xref: /dragonfly/sys/vm/vm_kern.c (revision 70675b40)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)vm_kern.c	8.3 (Berkeley) 1/12/94
35  *
36  *
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  *
62  * $FreeBSD: src/sys/vm/vm_kern.c,v 1.61.2.2 2002/03/12 18:25:26 tegge Exp $
63  */
64 
65 /*
66  *	Kernel memory management.
67  */
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/proc.h>
72 #include <sys/malloc.h>
73 #include <sys/kernel.h>
74 #include <sys/sysctl.h>
75 
76 #include <vm/vm.h>
77 #include <vm/vm_param.h>
78 #include <sys/lock.h>
79 #include <vm/pmap.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_object.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_pageout.h>
84 #include <vm/vm_kern.h>
85 #include <vm/vm_extern.h>
86 
87 struct vm_map kernel_map;
88 struct vm_map clean_map;
89 struct vm_map buffer_map;
90 
91 static __inline
92 int
93 KMVMCPU(int kmflags)
94 {
95 	if ((kmflags & KM_CPU_SPEC) == 0)
96 		return 0;
97 	return VM_ALLOC_CPU(KM_GETCPU(kmflags));
98 }
99 
100 /*
101  * Allocate pageable swap-backed anonymous memory
102  */
103 void *
104 kmem_alloc_swapbacked(kmem_anon_desc_t *kp, vm_size_t size, vm_subsys_t id)
105 {
106 	int error;
107 	vm_pindex_t npages;
108 
109 	size = round_page(size);
110 	npages = size / PAGE_SIZE;
111 
112 	if (kp->map == NULL)
113 		kp->map = &kernel_map;
114 	kp->data = vm_map_min(&kernel_map);
115 	kp->size = size;
116 	kp->object = vm_object_allocate(OBJT_DEFAULT, npages);
117 
118 	error = vm_map_find(kp->map, kp->object, NULL, 0,
119 			    &kp->data, size,
120 			    PAGE_SIZE, TRUE,
121 			    VM_MAPTYPE_NORMAL, id,
122 			    VM_PROT_ALL, VM_PROT_ALL, 0);
123 	if (error) {
124 		kprintf("kmem_alloc_swapbacked: %zd bytes failed %d\n",
125 			size, error);
126 		kp->data = (vm_offset_t)0;
127 		kmem_free_swapbacked(kp);
128 		return NULL;
129 	}
130 	return ((void *)(intptr_t)kp->data);
131 }
132 
133 void
134 kmem_free_swapbacked(kmem_anon_desc_t *kp)
135 {
136 	if (kp->data) {
137 		/*
138 		 * The object will be deallocated by kmem_free().
139 		 */
140 		kmem_free(kp->map, kp->data, kp->size);
141 		kp->data = (vm_offset_t)0;
142 	} else {
143 		/*
144 		 * Failure during allocation, object must be deallocated
145 		 * manually.
146 		 */
147 		vm_object_deallocate(kp->object);
148 	}
149 	kp->object = NULL;
150 }
151 
152 /*
153  * Allocate pageable memory to the kernel's address map.  "map" must
154  * be kernel_map or a submap of kernel_map.  Caller must adjust map or
155  * enter VM pages itself.
156  *
157  * No requirements.
158  */
159 vm_offset_t
160 kmem_alloc_pageable(vm_map_t map, vm_size_t size, vm_subsys_t id)
161 {
162 	vm_offset_t addr;
163 	int result;
164 
165 	size = round_page(size);
166 	addr = vm_map_min(map);
167 	result = vm_map_find(map, NULL, NULL,
168 			     (vm_offset_t) 0, &addr, size,
169 			     PAGE_SIZE, TRUE,
170 			     VM_MAPTYPE_NORMAL, id,
171 			     VM_PROT_ALL, VM_PROT_ALL, 0);
172 	if (result != KERN_SUCCESS)
173 		return (0);
174 	return (addr);
175 }
176 
177 /*
178  * Same as kmem_alloc_pageable, except that it create a nofault entry.
179  *
180  * No requirements.
181  */
182 vm_offset_t
183 kmem_alloc_nofault(vm_map_t map, vm_size_t size, vm_subsys_t id,
184 		   vm_size_t align)
185 {
186 	vm_offset_t addr;
187 	int result;
188 
189 	size = round_page(size);
190 	addr = vm_map_min(map);
191 	result = vm_map_find(map, NULL, NULL,
192 			     (vm_offset_t) 0, &addr, size,
193 			     align, TRUE,
194 			     VM_MAPTYPE_NORMAL, id,
195 			     VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
196 	if (result != KERN_SUCCESS)
197 		return (0);
198 	return (addr);
199 }
200 
201 /*
202  * Allocate wired-down memory in the kernel's address map or a submap.
203  *
204  * No requirements.
205  */
206 vm_offset_t
207 kmem_alloc3(vm_map_t map, vm_size_t size, vm_subsys_t id, int kmflags)
208 {
209 	vm_offset_t addr;
210 	vm_offset_t gstart;
211 	vm_offset_t i;
212 	int count;
213 	int cow;
214 
215 	size = round_page(size);
216 
217 	if (kmflags & KM_KRESERVE)
218 		count = vm_map_entry_kreserve(MAP_RESERVE_COUNT);
219 	else
220 		count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
221 
222 	if (kmflags & KM_STACK) {
223 		cow = MAP_IS_KSTACK;
224 		gstart = PAGE_SIZE;
225 	} else {
226 		cow = 0;
227 		gstart = 0;
228 	}
229 
230 	/*
231 	 * Use the kernel object for wired-down kernel pages. Assume that no
232 	 * region of the kernel object is referenced more than once.
233 	 *
234 	 * Locate sufficient space in the map.  This will give us the final
235 	 * virtual address for the new memory, and thus will tell us the
236 	 * offset within the kernel map.
237 	 */
238 	vm_map_lock(map);
239 	if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE, 0, &addr)) {
240 		vm_map_unlock(map);
241 		if (kmflags & KM_KRESERVE)
242 			vm_map_entry_krelease(count);
243 		else
244 			vm_map_entry_release(count);
245 		return (0);
246 	}
247 	vm_object_hold(&kernel_object);
248 	vm_object_reference_locked(&kernel_object);
249 	vm_map_insert(map, &count,
250 		      &kernel_object, NULL,
251 		      addr, addr, addr + size,
252 		      VM_MAPTYPE_NORMAL, id,
253 		      VM_PROT_ALL, VM_PROT_ALL, cow);
254 	vm_object_drop(&kernel_object);
255 
256 	vm_map_unlock(map);
257 	if (kmflags & KM_KRESERVE)
258 		vm_map_entry_krelease(count);
259 	else
260 		vm_map_entry_release(count);
261 
262 	/*
263 	 * Guarantee that there are pages already in this object before
264 	 * calling vm_map_wire.  This is to prevent the following
265 	 * scenario:
266 	 *
267 	 * 1) Threads have swapped out, so that there is a pager for the
268 	 * kernel_object. 2) The kmsg zone is empty, and so we are
269 	 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault;
270 	 * there is no page, but there is a pager, so we call
271 	 * pager_data_request.  But the kmsg zone is empty, so we must
272 	 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
273 	 * we get the data back from the pager, it will be (very stale)
274 	 * non-zero data.  kmem_alloc is defined to return zero-filled memory.
275 	 *
276 	 * We're intentionally not activating the pages we allocate to prevent a
277 	 * race with page-out.  vm_map_wire will wire the pages.
278 	 */
279 	vm_object_hold(&kernel_object);
280 	for (i = gstart; i < size; i += PAGE_SIZE) {
281 		vm_page_t mem;
282 
283 		mem = vm_page_grab(&kernel_object, OFF_TO_IDX(addr + i),
284 				   VM_ALLOC_FORCE_ZERO | VM_ALLOC_NORMAL |
285 				   VM_ALLOC_RETRY | KMVMCPU(kmflags));
286 		vm_page_unqueue_nowakeup(mem);
287 		vm_page_wakeup(mem);
288 	}
289 	vm_object_drop(&kernel_object);
290 
291 	/*
292 	 * And finally, mark the data as non-pageable.
293 	 *
294 	 * NOTE: vm_map_wire() handles any kstack guard.
295 	 */
296 	vm_map_wire(map, addr, addr + size, kmflags);
297 
298 	return (addr);
299 }
300 
301 /*
302  * Release a region of kernel virtual memory allocated with kmem_alloc,
303  * and return the physical pages associated with that region.
304  *
305  * WARNING!  If the caller entered pages into the region using pmap_kenter()
306  * it must remove the pages using pmap_kremove[_quick]() before freeing the
307  * underlying kmem, otherwise resident_count will be mistabulated.
308  *
309  * No requirements.
310  */
311 void
312 kmem_free(vm_map_t map, vm_offset_t addr, vm_size_t size)
313 {
314 	vm_map_remove(map, trunc_page(addr), round_page(addr + size));
315 }
316 
317 /*
318  * Used to break a system map into smaller maps, usually to reduce
319  * contention and to provide large KVA spaces for subsystems like the
320  * buffer cache.
321  *
322  *	parent		Map to take range from
323  *	result
324  *	size		Size of range to find
325  *	min, max	Returned endpoints of map
326  *	pageable	Can the region be paged
327  *
328  * No requirements.
329  */
330 void
331 kmem_suballoc(vm_map_t parent, vm_map_t result,
332 	      vm_offset_t *min, vm_offset_t *max, vm_size_t size)
333 {
334 	int ret;
335 
336 	size = round_page(size);
337 
338 	*min = (vm_offset_t) vm_map_min(parent);
339 	ret = vm_map_find(parent, NULL, NULL,
340 			  (vm_offset_t) 0, min, size,
341 			  PAGE_SIZE, TRUE,
342 			  VM_MAPTYPE_UNSPECIFIED, VM_SUBSYS_SYSMAP,
343 			  VM_PROT_ALL, VM_PROT_ALL, 0);
344 	if (ret != KERN_SUCCESS) {
345 		kprintf("kmem_suballoc: bad status return of %d.\n", ret);
346 		panic("kmem_suballoc");
347 	}
348 	*max = *min + size;
349 	pmap_reference(vm_map_pmap(parent));
350 	vm_map_init(result, *min, *max, vm_map_pmap(parent));
351 	if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS)
352 		panic("kmem_suballoc: unable to change range to submap");
353 }
354 
355 /*
356  * Allocates pageable memory from a sub-map of the kernel.  If the submap
357  * has no room, the caller sleeps waiting for more memory in the submap.
358  *
359  * No requirements.
360  */
361 vm_offset_t
362 kmem_alloc_wait(vm_map_t map, vm_size_t size, vm_subsys_t id)
363 {
364 	vm_offset_t addr;
365 	int count;
366 
367 	size = round_page(size);
368 
369 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
370 
371 	for (;;) {
372 		/*
373 		 * To make this work for more than one map, use the map's lock
374 		 * to lock out sleepers/wakers.
375 		 */
376 		vm_map_lock(map);
377 		if (vm_map_findspace(map, vm_map_min(map),
378 				     size, PAGE_SIZE, 0, &addr) == 0) {
379 			break;
380 		}
381 		/* no space now; see if we can ever get space */
382 		if (vm_map_max(map) - vm_map_min(map) < size) {
383 			vm_map_entry_release(count);
384 			vm_map_unlock(map);
385 			return (0);
386 		}
387 		vm_map_unlock(map);
388 		tsleep(map, 0, "kmaw", 0);
389 	}
390 	vm_map_insert(map, &count,
391 		      NULL, NULL,
392 		      (vm_offset_t) 0, addr, addr + size,
393 		      VM_MAPTYPE_NORMAL, id,
394 		      VM_PROT_ALL, VM_PROT_ALL, 0);
395 	vm_map_unlock(map);
396 	vm_map_entry_release(count);
397 
398 	return (addr);
399 }
400 
401 /*
402  *  Allocates a region from the kernel address map and physical pages
403  *  within the specified address range to the kernel object.  Creates a
404  *  wired mapping from this region to these pages, and returns the
405  *  region's starting virtual address.  The allocated pages are not
406  *  necessarily physically contiguous.  If M_ZERO is specified through the
407  *  given flags, then the pages are zeroed before they are mapped.
408  */
409 vm_offset_t
410 kmem_alloc_attr(vm_map_t map, vm_size_t size, vm_subsys_t id,
411 		int flags, vm_paddr_t low,
412 		vm_paddr_t high, vm_memattr_t memattr)
413 {
414 	vm_offset_t addr, i, offset;
415 	vm_page_t m;
416 	int count;
417 
418 	size = round_page(size);
419 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
420 	vm_map_lock(map);
421 	if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE,
422 			     flags, &addr)) {
423 		vm_map_unlock(map);
424 		vm_map_entry_release(count);
425 		return (0);
426 	}
427 	offset = addr - vm_map_min(&kernel_map);
428 	vm_object_hold(&kernel_object);
429 	vm_object_reference_locked(&kernel_object);
430 	vm_map_insert(map, &count,
431 		      &kernel_object, NULL,
432 		      offset, addr, addr + size,
433 		      VM_MAPTYPE_NORMAL, id,
434 		      VM_PROT_ALL, VM_PROT_ALL, 0);
435 	vm_map_unlock(map);
436 	vm_map_entry_release(count);
437 	vm_object_drop(&kernel_object);
438 	for (i = 0; i < size; i += PAGE_SIZE) {
439 		m = vm_page_alloc_contig(low, high, PAGE_SIZE, 0,
440 					 PAGE_SIZE, memattr);
441 		if (!m) {
442 			return (0);
443 		}
444 		vm_object_hold(&kernel_object);
445 		vm_page_insert(m, &kernel_object, OFF_TO_IDX(offset + i));
446 		vm_object_drop(&kernel_object);
447 		if (flags & M_ZERO)
448 			pmap_zero_page(VM_PAGE_TO_PHYS(m));
449 		m->valid = VM_PAGE_BITS_ALL;
450 	}
451 	vm_map_wire(map, addr, addr + size, 0);
452 	return (addr);
453 }
454 
455 
456 /*
457  * Returns memory to a submap of the kernel, and wakes up any processes
458  * waiting for memory in that map.
459  *
460  * No requirements.
461  */
462 void
463 kmem_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size)
464 {
465 	int count;
466 
467 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
468 	vm_map_lock(map);
469 	vm_map_delete(map, trunc_page(addr), round_page(addr + size), &count);
470 	wakeup(map);
471 	vm_map_unlock(map);
472 	vm_map_entry_release(count);
473 }
474 
475 /*
476  * Create the kernel_ma for (KvaStart,KvaEnd) and insert mappings to
477  * cover areas already allocated or reserved thus far.
478  *
479  * The areas (virtual_start, virtual_end) and (virtual2_start, virtual2_end)
480  * are available so the cutouts are the areas around these ranges between
481  * KvaStart and KvaEnd.
482  *
483  * Depend on the zalloc bootstrap cache to get our vm_map_entry_t.
484  * Called from the low level boot code only.
485  */
486 void
487 kmem_init(void)
488 {
489 	vm_offset_t addr;
490 	vm_map_t m;
491 	int count;
492 
493 	m = &kernel_map;
494 	vm_map_init(m, KvaStart, KvaEnd, &kernel_pmap);
495 	vm_map_lock(m);
496 	/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
497 	m->system_map = 1;
498 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
499 	addr = KvaStart;
500 	if (virtual2_start) {
501 		if (addr < virtual2_start) {
502 			vm_map_insert(m, &count,
503 				      NULL, NULL,
504 				      (vm_offset_t) 0, addr, virtual2_start,
505 				      VM_MAPTYPE_NORMAL, VM_SUBSYS_RESERVED,
506 				      VM_PROT_ALL, VM_PROT_ALL, 0);
507 		}
508 		addr = virtual2_end;
509 	}
510 	if (addr < virtual_start) {
511 		vm_map_insert(m, &count,
512 			      NULL, NULL,
513 			      (vm_offset_t) 0, addr, virtual_start,
514 			      VM_MAPTYPE_NORMAL, VM_SUBSYS_RESERVED,
515 			      VM_PROT_ALL, VM_PROT_ALL, 0);
516 	}
517 	addr = virtual_end;
518 	if (addr < KvaEnd) {
519 		vm_map_insert(m, &count,
520 			      NULL, NULL,
521 			      (vm_offset_t) 0, addr, KvaEnd,
522 			      VM_MAPTYPE_NORMAL, VM_SUBSYS_RESERVED,
523 			      VM_PROT_ALL, VM_PROT_ALL, 0);
524 	}
525 	/* ... and ending with the completion of the above `insert' */
526 	vm_map_unlock(m);
527 	vm_map_entry_release(count);
528 }
529 
530 /*
531  * No requirements.
532  */
533 static int
534 kvm_size(SYSCTL_HANDLER_ARGS)
535 {
536 	unsigned long ksize = KvaSize;
537 
538 	return sysctl_handle_long(oidp, &ksize, 0, req);
539 }
540 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_ULONG|CTLFLAG_RD,
541     0, 0, kvm_size, "LU", "Size of KVM");
542 
543 /*
544  * No requirements.
545  */
546 static int
547 kvm_free(SYSCTL_HANDLER_ARGS)
548 {
549 	unsigned long kfree = virtual_end - kernel_vm_end;
550 
551 	return sysctl_handle_long(oidp, &kfree, 0, req);
552 }
553 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_ULONG|CTLFLAG_RD,
554     0, 0, kvm_free, "LU", "Amount of KVM free");
555 
556