1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 39 * 40 * 41 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 45 * 46 * Permission to use, copy, modify and distribute this software and 47 * its documentation is hereby granted, provided that both the copyright 48 * notice and this permission notice appear in all copies of the 49 * software, derivative works or modified versions, and any portions 50 * thereof, and that both notices appear in supporting documentation. 51 * 52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 55 * 56 * Carnegie Mellon requests users of this software to return to 57 * 58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 59 * School of Computer Science 60 * Carnegie Mellon University 61 * Pittsburgh PA 15213-3890 62 * 63 * any improvements or extensions that they make and grant Carnegie the 64 * rights to redistribute these changes. 65 * 66 * $FreeBSD: src/sys/vm/vm_kern.c,v 1.61.2.2 2002/03/12 18:25:26 tegge Exp $ 67 * $DragonFly: src/sys/vm/vm_kern.c,v 1.29 2007/06/07 23:14:29 dillon Exp $ 68 */ 69 70 /* 71 * Kernel memory management. 72 */ 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/proc.h> 77 #include <sys/malloc.h> 78 #include <sys/kernel.h> 79 #include <sys/sysctl.h> 80 81 #include <vm/vm.h> 82 #include <vm/vm_param.h> 83 #include <sys/lock.h> 84 #include <vm/pmap.h> 85 #include <vm/vm_map.h> 86 #include <vm/vm_object.h> 87 #include <vm/vm_page.h> 88 #include <vm/vm_pageout.h> 89 #include <vm/vm_kern.h> 90 #include <vm/vm_extern.h> 91 92 struct vm_map kernel_map; 93 struct vm_map clean_map; 94 struct vm_map buffer_map; 95 96 /* 97 * Allocate pageable memory to the kernel's address map. "map" must 98 * be kernel_map or a submap of kernel_map. 99 * 100 * No requirements. 101 */ 102 vm_offset_t 103 kmem_alloc_pageable(vm_map_t map, vm_size_t size) 104 { 105 vm_offset_t addr; 106 int result; 107 108 size = round_page(size); 109 addr = vm_map_min(map); 110 result = vm_map_find(map, NULL, (vm_offset_t) 0, 111 &addr, size, PAGE_SIZE, 112 TRUE, VM_MAPTYPE_NORMAL, 113 VM_PROT_ALL, VM_PROT_ALL, 114 0); 115 if (result != KERN_SUCCESS) 116 return (0); 117 return (addr); 118 } 119 120 /* 121 * Same as kmem_alloc_pageable, except that it create a nofault entry. 122 * 123 * No requirements. 124 */ 125 vm_offset_t 126 kmem_alloc_nofault(vm_map_t map, vm_size_t size, vm_size_t align) 127 { 128 vm_offset_t addr; 129 int result; 130 131 size = round_page(size); 132 addr = vm_map_min(map); 133 result = vm_map_find(map, NULL, (vm_offset_t) 0, 134 &addr, size, align, 135 TRUE, VM_MAPTYPE_NORMAL, 136 VM_PROT_ALL, VM_PROT_ALL, 137 MAP_NOFAULT); 138 if (result != KERN_SUCCESS) 139 return (0); 140 return (addr); 141 } 142 143 /* 144 * Allocate wired-down memory in the kernel's address map or a submap. 145 * 146 * No requirements. 147 */ 148 vm_offset_t 149 kmem_alloc3(vm_map_t map, vm_size_t size, int kmflags) 150 { 151 vm_offset_t addr; 152 vm_offset_t gstart; 153 vm_offset_t i; 154 int count; 155 int cow; 156 157 size = round_page(size); 158 159 if (kmflags & KM_KRESERVE) 160 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT); 161 else 162 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 163 164 if (kmflags & KM_STACK) { 165 cow = MAP_IS_KSTACK; 166 gstart = PAGE_SIZE; 167 } else { 168 cow = 0; 169 gstart = 0; 170 } 171 172 /* 173 * Use the kernel object for wired-down kernel pages. Assume that no 174 * region of the kernel object is referenced more than once. 175 * 176 * Locate sufficient space in the map. This will give us the final 177 * virtual address for the new memory, and thus will tell us the 178 * offset within the kernel map. 179 */ 180 vm_map_lock(map); 181 if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE, 0, &addr)) { 182 vm_map_unlock(map); 183 if (kmflags & KM_KRESERVE) 184 vm_map_entry_krelease(count); 185 else 186 vm_map_entry_release(count); 187 return (0); 188 } 189 vm_object_reference(&kernel_object); 190 vm_map_insert(map, &count, 191 &kernel_object, addr, addr, addr + size, 192 VM_MAPTYPE_NORMAL, 193 VM_PROT_ALL, VM_PROT_ALL, 194 cow); 195 vm_map_unlock(map); 196 if (kmflags & KM_KRESERVE) 197 vm_map_entry_krelease(count); 198 else 199 vm_map_entry_release(count); 200 201 /* 202 * Guarantee that there are pages already in this object before 203 * calling vm_map_wire. This is to prevent the following 204 * scenario: 205 * 206 * 1) Threads have swapped out, so that there is a pager for the 207 * kernel_object. 2) The kmsg zone is empty, and so we are 208 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault; 209 * there is no page, but there is a pager, so we call 210 * pager_data_request. But the kmsg zone is empty, so we must 211 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 212 * we get the data back from the pager, it will be (very stale) 213 * non-zero data. kmem_alloc is defined to return zero-filled memory. 214 * 215 * We're intentionally not activating the pages we allocate to prevent a 216 * race with page-out. vm_map_wire will wire the pages. 217 */ 218 lwkt_gettoken(&vm_token); 219 for (i = gstart; i < size; i += PAGE_SIZE) { 220 vm_page_t mem; 221 222 mem = vm_page_grab(&kernel_object, OFF_TO_IDX(addr + i), 223 VM_ALLOC_ZERO | VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 224 if ((mem->flags & PG_ZERO) == 0) 225 vm_page_zero_fill(mem); 226 mem->valid = VM_PAGE_BITS_ALL; 227 vm_page_flag_clear(mem, PG_ZERO); 228 vm_page_wakeup(mem); 229 } 230 lwkt_reltoken(&vm_token); 231 232 /* 233 * And finally, mark the data as non-pageable. 234 * 235 * NOTE: vm_map_wire() handles any kstack guard. 236 */ 237 vm_map_wire(map, (vm_offset_t)addr, addr + size, kmflags); 238 239 return (addr); 240 } 241 242 /* 243 * Release a region of kernel virtual memory allocated with kmem_alloc, 244 * and return the physical pages associated with that region. 245 * 246 * WARNING! If the caller entered pages into the region using pmap_kenter() 247 * it must remove the pages using pmap_kremove[_quick]() before freeing the 248 * underlying kmem, otherwise resident_count will be mistabulated. 249 * 250 * No requirements. 251 */ 252 void 253 kmem_free(vm_map_t map, vm_offset_t addr, vm_size_t size) 254 { 255 vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 256 } 257 258 /* 259 * Used to break a system map into smaller maps, usually to reduce 260 * contention and to provide large KVA spaces for subsystems like the 261 * buffer cache. 262 * 263 * parent Map to take range from 264 * result 265 * size Size of range to find 266 * min, max Returned endpoints of map 267 * pageable Can the region be paged 268 * 269 * No requirements. 270 */ 271 void 272 kmem_suballoc(vm_map_t parent, vm_map_t result, 273 vm_offset_t *min, vm_offset_t *max, vm_size_t size) 274 { 275 int ret; 276 277 size = round_page(size); 278 279 lwkt_gettoken(&vm_token); 280 *min = (vm_offset_t) vm_map_min(parent); 281 ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 282 min, size, PAGE_SIZE, 283 TRUE, VM_MAPTYPE_UNSPECIFIED, 284 VM_PROT_ALL, VM_PROT_ALL, 285 0); 286 if (ret != KERN_SUCCESS) { 287 kprintf("kmem_suballoc: bad status return of %d.\n", ret); 288 panic("kmem_suballoc"); 289 } 290 *max = *min + size; 291 pmap_reference(vm_map_pmap(parent)); 292 vm_map_init(result, *min, *max, vm_map_pmap(parent)); 293 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) 294 panic("kmem_suballoc: unable to change range to submap"); 295 lwkt_reltoken(&vm_token); 296 } 297 298 /* 299 * Allocates pageable memory from a sub-map of the kernel. If the submap 300 * has no room, the caller sleeps waiting for more memory in the submap. 301 * 302 * No requirements. 303 */ 304 vm_offset_t 305 kmem_alloc_wait(vm_map_t map, vm_size_t size) 306 { 307 vm_offset_t addr; 308 int count; 309 310 size = round_page(size); 311 312 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 313 314 for (;;) { 315 /* 316 * To make this work for more than one map, use the map's lock 317 * to lock out sleepers/wakers. 318 */ 319 vm_map_lock(map); 320 if (vm_map_findspace(map, vm_map_min(map), 321 size, PAGE_SIZE, 0, &addr) == 0) { 322 break; 323 } 324 /* no space now; see if we can ever get space */ 325 if (vm_map_max(map) - vm_map_min(map) < size) { 326 vm_map_entry_release(count); 327 vm_map_unlock(map); 328 return (0); 329 } 330 vm_map_unlock(map); 331 tsleep(map, 0, "kmaw", 0); 332 } 333 vm_map_insert(map, &count, 334 NULL, (vm_offset_t) 0, 335 addr, addr + size, 336 VM_MAPTYPE_NORMAL, 337 VM_PROT_ALL, VM_PROT_ALL, 338 0); 339 vm_map_unlock(map); 340 vm_map_entry_release(count); 341 342 return (addr); 343 } 344 345 /* 346 * Returns memory to a submap of the kernel, and wakes up any processes 347 * waiting for memory in that map. 348 * 349 * No requirements. 350 */ 351 void 352 kmem_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size) 353 { 354 int count; 355 356 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 357 vm_map_lock(map); 358 vm_map_delete(map, trunc_page(addr), round_page(addr + size), &count); 359 wakeup(map); 360 vm_map_unlock(map); 361 vm_map_entry_release(count); 362 } 363 364 /* 365 * Create the kernel_map and insert mappings to cover areas already 366 * allocated or reserved thus far. That is, the area (KvaStart,start) 367 * and (end,KvaEnd) must be marked as allocated. 368 * 369 * virtual2_start/end is a cutout Between KvaStart and start, 370 * for x86_64 due to the location of KERNBASE (at -2G). 371 * 372 * We could use a min_offset of 0 instead of KvaStart, but since the 373 * min_offset is not used for any calculations other then a bounds check 374 * it does not effect readability. KvaStart is more appropriate. 375 * 376 * Depend on the zalloc bootstrap cache to get our vm_map_entry_t. 377 * Called from the low level boot code only. 378 */ 379 void 380 kmem_init(vm_offset_t start, vm_offset_t end) 381 { 382 vm_offset_t addr; 383 vm_map_t m; 384 int count; 385 386 m = vm_map_create(&kernel_map, &kernel_pmap, KvaStart, KvaEnd); 387 vm_map_lock(m); 388 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 389 m->system_map = 1; 390 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 391 addr = KvaStart; 392 if (virtual2_start) { 393 if (addr < virtual2_start) { 394 vm_map_insert(m, &count, NULL, (vm_offset_t) 0, 395 addr, virtual2_start, 396 VM_MAPTYPE_NORMAL, 397 VM_PROT_ALL, VM_PROT_ALL, 398 0); 399 } 400 addr = virtual2_end; 401 } 402 if (addr < start) { 403 vm_map_insert(m, &count, NULL, (vm_offset_t) 0, 404 addr, start, 405 VM_MAPTYPE_NORMAL, 406 VM_PROT_ALL, VM_PROT_ALL, 407 0); 408 } 409 addr = end; 410 if (addr < KvaEnd) { 411 vm_map_insert(m, &count, NULL, (vm_offset_t) 0, 412 addr, KvaEnd, 413 VM_MAPTYPE_NORMAL, 414 VM_PROT_ALL, VM_PROT_ALL, 415 0); 416 } 417 /* ... and ending with the completion of the above `insert' */ 418 vm_map_unlock(m); 419 vm_map_entry_release(count); 420 } 421 422 /* 423 * No requirements. 424 */ 425 static int 426 kvm_size(SYSCTL_HANDLER_ARGS) 427 { 428 unsigned long ksize = KvaSize; 429 430 return sysctl_handle_long(oidp, &ksize, 0, req); 431 } 432 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 433 0, 0, kvm_size, "IU", "Size of KVM"); 434 435 /* 436 * No requirements. 437 */ 438 static int 439 kvm_free(SYSCTL_HANDLER_ARGS) 440 { 441 unsigned long kfree = virtual_end - kernel_vm_end; 442 443 return sysctl_handle_long(oidp, &kfree, 0, req); 444 } 445 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 446 0, 0, kvm_free, "IU", "Amount of KVM free"); 447 448