xref: /freebsd/sys/vm/vm_glue.c (revision d0b2dbfa)
1 /*-
2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
35  *
36  *
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60 
61 #include <sys/cdefs.h>
62 #include "opt_vm.h"
63 #include "opt_kstack_pages.h"
64 #include "opt_kstack_max_pages.h"
65 #include "opt_kstack_usage_prof.h"
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/asan.h>
70 #include <sys/domainset.h>
71 #include <sys/limits.h>
72 #include <sys/lock.h>
73 #include <sys/malloc.h>
74 #include <sys/msan.h>
75 #include <sys/mutex.h>
76 #include <sys/proc.h>
77 #include <sys/racct.h>
78 #include <sys/refcount.h>
79 #include <sys/resourcevar.h>
80 #include <sys/rwlock.h>
81 #include <sys/sched.h>
82 #include <sys/sf_buf.h>
83 #include <sys/shm.h>
84 #include <sys/smp.h>
85 #include <sys/vmmeter.h>
86 #include <sys/vmem.h>
87 #include <sys/sx.h>
88 #include <sys/sysctl.h>
89 #include <sys/kernel.h>
90 #include <sys/ktr.h>
91 #include <sys/unistd.h>
92 
93 #include <vm/uma.h>
94 #include <vm/vm.h>
95 #include <vm/vm_param.h>
96 #include <vm/pmap.h>
97 #include <vm/vm_domainset.h>
98 #include <vm/vm_map.h>
99 #include <vm/vm_page.h>
100 #include <vm/vm_pageout.h>
101 #include <vm/vm_object.h>
102 #include <vm/vm_kern.h>
103 #include <vm/vm_extern.h>
104 #include <vm/vm_pager.h>
105 #include <vm/swap_pager.h>
106 
107 #include <machine/cpu.h>
108 
109 /*
110  * MPSAFE
111  *
112  * WARNING!  This code calls vm_map_check_protection() which only checks
113  * the associated vm_map_entry range.  It does not determine whether the
114  * contents of the memory is actually readable or writable.  In most cases
115  * just checking the vm_map_entry is sufficient within the kernel's address
116  * space.
117  */
118 int
119 kernacc(void *addr, int len, int rw)
120 {
121 	boolean_t rv;
122 	vm_offset_t saddr, eaddr;
123 	vm_prot_t prot;
124 
125 	KASSERT((rw & ~VM_PROT_ALL) == 0,
126 	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
127 
128 	if ((vm_offset_t)addr + len > vm_map_max(kernel_map) ||
129 	    (vm_offset_t)addr + len < (vm_offset_t)addr)
130 		return (FALSE);
131 
132 	prot = rw;
133 	saddr = trunc_page((vm_offset_t)addr);
134 	eaddr = round_page((vm_offset_t)addr + len);
135 	vm_map_lock_read(kernel_map);
136 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
137 	vm_map_unlock_read(kernel_map);
138 	return (rv == TRUE);
139 }
140 
141 /*
142  * MPSAFE
143  *
144  * WARNING!  This code calls vm_map_check_protection() which only checks
145  * the associated vm_map_entry range.  It does not determine whether the
146  * contents of the memory is actually readable or writable.  vmapbuf(),
147  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
148  * used in conjunction with this call.
149  */
150 int
151 useracc(void *addr, int len, int rw)
152 {
153 	boolean_t rv;
154 	vm_prot_t prot;
155 	vm_map_t map;
156 
157 	KASSERT((rw & ~VM_PROT_ALL) == 0,
158 	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
159 	prot = rw;
160 	map = &curproc->p_vmspace->vm_map;
161 	if ((vm_offset_t)addr + len > vm_map_max(map) ||
162 	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
163 		return (FALSE);
164 	}
165 	vm_map_lock_read(map);
166 	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
167 	    round_page((vm_offset_t)addr + len), prot);
168 	vm_map_unlock_read(map);
169 	return (rv == TRUE);
170 }
171 
172 int
173 vslock(void *addr, size_t len)
174 {
175 	vm_offset_t end, last, start;
176 	vm_size_t npages;
177 	int error;
178 
179 	last = (vm_offset_t)addr + len;
180 	start = trunc_page((vm_offset_t)addr);
181 	end = round_page(last);
182 	if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
183 		return (EINVAL);
184 	npages = atop(end - start);
185 	if (npages > vm_page_max_user_wired)
186 		return (ENOMEM);
187 	error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
188 	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
189 	if (error == KERN_SUCCESS) {
190 		curthread->td_vslock_sz += len;
191 		return (0);
192 	}
193 
194 	/*
195 	 * Return EFAULT on error to match copy{in,out}() behaviour
196 	 * rather than returning ENOMEM like mlock() would.
197 	 */
198 	return (EFAULT);
199 }
200 
201 void
202 vsunlock(void *addr, size_t len)
203 {
204 
205 	/* Rely on the parameter sanity checks performed by vslock(). */
206 	MPASS(curthread->td_vslock_sz >= len);
207 	curthread->td_vslock_sz -= len;
208 	(void)vm_map_unwire(&curproc->p_vmspace->vm_map,
209 	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
210 	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
211 }
212 
213 /*
214  * Pin the page contained within the given object at the given offset.  If the
215  * page is not resident, allocate and load it using the given object's pager.
216  * Return the pinned page if successful; otherwise, return NULL.
217  */
218 static vm_page_t
219 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
220 {
221 	vm_page_t m;
222 	vm_pindex_t pindex;
223 
224 	pindex = OFF_TO_IDX(offset);
225 	(void)vm_page_grab_valid_unlocked(&m, object, pindex,
226 	    VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
227 	return (m);
228 }
229 
230 /*
231  * Return a CPU private mapping to the page at the given offset within the
232  * given object.  The page is pinned before it is mapped.
233  */
234 struct sf_buf *
235 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
236 {
237 	vm_page_t m;
238 
239 	m = vm_imgact_hold_page(object, offset);
240 	if (m == NULL)
241 		return (NULL);
242 	sched_pin();
243 	return (sf_buf_alloc(m, SFB_CPUPRIVATE));
244 }
245 
246 /*
247  * Destroy the given CPU private mapping and unpin the page that it mapped.
248  */
249 void
250 vm_imgact_unmap_page(struct sf_buf *sf)
251 {
252 	vm_page_t m;
253 
254 	m = sf_buf_page(sf);
255 	sf_buf_free(sf);
256 	sched_unpin();
257 	vm_page_unwire(m, PQ_ACTIVE);
258 }
259 
260 void
261 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
262 {
263 
264 	pmap_sync_icache(map->pmap, va, sz);
265 }
266 
267 vm_object_t kstack_object;
268 static uma_zone_t kstack_cache;
269 static int kstack_cache_size;
270 
271 static int
272 sysctl_kstack_cache_size(SYSCTL_HANDLER_ARGS)
273 {
274 	int error, oldsize;
275 
276 	oldsize = kstack_cache_size;
277 	error = sysctl_handle_int(oidp, arg1, arg2, req);
278 	if (error == 0 && req->newptr && oldsize != kstack_cache_size)
279 		uma_zone_set_maxcache(kstack_cache, kstack_cache_size);
280 	return (error);
281 }
282 SYSCTL_PROC(_vm, OID_AUTO, kstack_cache_size,
283     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &kstack_cache_size, 0,
284     sysctl_kstack_cache_size, "IU", "Maximum number of cached kernel stacks");
285 
286 /*
287  * Create the kernel stack (including pcb for i386) for a new thread.
288  */
289 static vm_offset_t
290 vm_thread_stack_create(struct domainset *ds, int pages)
291 {
292 	vm_page_t ma[KSTACK_MAX_PAGES];
293 	vm_offset_t ks;
294 	int i;
295 
296 	/*
297 	 * Get a kernel virtual address for this thread's kstack.
298 	 */
299 	ks = kva_alloc((pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
300 	if (ks == 0) {
301 		printf("%s: kstack allocation failed\n", __func__);
302 		return (0);
303 	}
304 
305 	if (KSTACK_GUARD_PAGES != 0) {
306 		pmap_qremove(ks, KSTACK_GUARD_PAGES);
307 		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
308 	}
309 
310 	/*
311 	 * Allocate physical pages to back the stack.
312 	 */
313 	vm_thread_stack_back(ds, ks, ma, pages, VM_ALLOC_NORMAL);
314 	for (i = 0; i < pages; i++)
315 		vm_page_valid(ma[i]);
316 	pmap_qenter(ks, ma, pages);
317 
318 	return (ks);
319 }
320 
321 static void
322 vm_thread_stack_dispose(vm_offset_t ks, int pages)
323 {
324 	vm_page_t m;
325 	vm_pindex_t pindex;
326 	int i;
327 
328 	pindex = atop(ks - VM_MIN_KERNEL_ADDRESS);
329 
330 	pmap_qremove(ks, pages);
331 	VM_OBJECT_WLOCK(kstack_object);
332 	for (i = 0; i < pages; i++) {
333 		m = vm_page_lookup(kstack_object, pindex + i);
334 		if (m == NULL)
335 			panic("%s: kstack already missing?", __func__);
336 		vm_page_xbusy_claim(m);
337 		vm_page_unwire_noq(m);
338 		vm_page_free(m);
339 	}
340 	VM_OBJECT_WUNLOCK(kstack_object);
341 	kasan_mark((void *)ks, ptoa(pages), ptoa(pages), 0);
342 	kva_free(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
343 	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
344 }
345 
346 /*
347  * Allocate the kernel stack for a new thread.
348  */
349 int
350 vm_thread_new(struct thread *td, int pages)
351 {
352 	vm_offset_t ks;
353 
354 	/* Bounds check */
355 	if (pages <= 1)
356 		pages = kstack_pages;
357 	else if (pages > KSTACK_MAX_PAGES)
358 		pages = KSTACK_MAX_PAGES;
359 
360 	ks = 0;
361 	if (pages == kstack_pages && kstack_cache != NULL)
362 		ks = (vm_offset_t)uma_zalloc(kstack_cache, M_NOWAIT);
363 
364 	/*
365 	 * Ensure that kstack objects can draw pages from any memory
366 	 * domain.  Otherwise a local memory shortage can block a process
367 	 * swap-in.
368 	 */
369 	if (ks == 0)
370 		ks = vm_thread_stack_create(DOMAINSET_PREF(PCPU_GET(domain)),
371 		    pages);
372 	if (ks == 0)
373 		return (0);
374 	td->td_kstack = ks;
375 	td->td_kstack_pages = pages;
376 	kasan_mark((void *)ks, ptoa(pages), ptoa(pages), 0);
377 	kmsan_mark((void *)ks, ptoa(pages), KMSAN_STATE_UNINIT);
378 	return (1);
379 }
380 
381 /*
382  * Dispose of a thread's kernel stack.
383  */
384 void
385 vm_thread_dispose(struct thread *td)
386 {
387 	vm_offset_t ks;
388 	int pages;
389 
390 	pages = td->td_kstack_pages;
391 	ks = td->td_kstack;
392 	td->td_kstack = 0;
393 	td->td_kstack_pages = 0;
394 	kasan_mark((void *)ks, 0, ptoa(pages), KASAN_KSTACK_FREED);
395 	if (pages == kstack_pages)
396 		uma_zfree(kstack_cache, (void *)ks);
397 	else
398 		vm_thread_stack_dispose(ks, pages);
399 }
400 
401 /*
402  * Allocate physical pages, following the specified NUMA policy, to back a
403  * kernel stack.
404  */
405 void
406 vm_thread_stack_back(struct domainset *ds, vm_offset_t ks, vm_page_t ma[],
407     int npages, int req_class)
408 {
409 	vm_pindex_t pindex;
410 	int n;
411 
412 	pindex = atop(ks - VM_MIN_KERNEL_ADDRESS);
413 
414 	VM_OBJECT_WLOCK(kstack_object);
415 	for (n = 0; n < npages;) {
416 		if (vm_ndomains > 1)
417 			kstack_object->domain.dr_policy = ds;
418 
419 		/*
420 		 * Use WAITFAIL to force a reset of the domain selection policy
421 		 * if we had to sleep for pages.
422 		 */
423 		n += vm_page_grab_pages(kstack_object, pindex + n,
424 		    req_class | VM_ALLOC_WIRED | VM_ALLOC_WAITFAIL,
425 		    &ma[n], npages - n);
426 	}
427 	VM_OBJECT_WUNLOCK(kstack_object);
428 }
429 
430 static int
431 kstack_import(void *arg, void **store, int cnt, int domain, int flags)
432 {
433 	struct domainset *ds;
434 	int i;
435 
436 	if (domain == UMA_ANYDOMAIN)
437 		ds = DOMAINSET_RR();
438 	else
439 		ds = DOMAINSET_PREF(domain);
440 
441 	for (i = 0; i < cnt; i++) {
442 		store[i] = (void *)vm_thread_stack_create(ds, kstack_pages);
443 		if (store[i] == NULL)
444 			break;
445 	}
446 	return (i);
447 }
448 
449 static void
450 kstack_release(void *arg, void **store, int cnt)
451 {
452 	vm_offset_t ks;
453 	int i;
454 
455 	for (i = 0; i < cnt; i++) {
456 		ks = (vm_offset_t)store[i];
457 		vm_thread_stack_dispose(ks, kstack_pages);
458 	}
459 }
460 
461 static void
462 kstack_cache_init(void *null)
463 {
464 	kstack_object = vm_object_allocate(OBJT_SWAP,
465 	    atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS));
466 	kstack_cache = uma_zcache_create("kstack_cache",
467 	    kstack_pages * PAGE_SIZE, NULL, NULL, NULL, NULL,
468 	    kstack_import, kstack_release, NULL,
469 	    UMA_ZONE_FIRSTTOUCH);
470 	kstack_cache_size = imax(128, mp_ncpus * 4);
471 	uma_zone_set_maxcache(kstack_cache, kstack_cache_size);
472 }
473 SYSINIT(vm_kstacks, SI_SUB_KMEM, SI_ORDER_ANY, kstack_cache_init, NULL);
474 
475 #ifdef KSTACK_USAGE_PROF
476 /*
477  * Track maximum stack used by a thread in kernel.
478  */
479 static int max_kstack_used;
480 
481 SYSCTL_INT(_debug, OID_AUTO, max_kstack_used, CTLFLAG_RD,
482     &max_kstack_used, 0,
483     "Maximum stack depth used by a thread in kernel");
484 
485 void
486 intr_prof_stack_use(struct thread *td, struct trapframe *frame)
487 {
488 	vm_offset_t stack_top;
489 	vm_offset_t current;
490 	int used, prev_used;
491 
492 	/*
493 	 * Testing for interrupted kernel mode isn't strictly
494 	 * needed. It optimizes the execution, since interrupts from
495 	 * usermode will have only the trap frame on the stack.
496 	 */
497 	if (TRAPF_USERMODE(frame))
498 		return;
499 
500 	stack_top = td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
501 	current = (vm_offset_t)(uintptr_t)&stack_top;
502 
503 	/*
504 	 * Try to detect if interrupt is using kernel thread stack.
505 	 * Hardware could use a dedicated stack for interrupt handling.
506 	 */
507 	if (stack_top <= current || current < td->td_kstack)
508 		return;
509 
510 	used = stack_top - current;
511 	for (;;) {
512 		prev_used = max_kstack_used;
513 		if (prev_used >= used)
514 			break;
515 		if (atomic_cmpset_int(&max_kstack_used, prev_used, used))
516 			break;
517 	}
518 }
519 #endif /* KSTACK_USAGE_PROF */
520 
521 /*
522  * Implement fork's actions on an address space.
523  * Here we arrange for the address space to be copied or referenced,
524  * allocate a user struct (pcb and kernel stack), then call the
525  * machine-dependent layer to fill those in and make the new process
526  * ready to run.  The new process is set up so that it returns directly
527  * to user mode to avoid stack copying and relocation problems.
528  */
529 int
530 vm_forkproc(struct thread *td, struct proc *p2, struct thread *td2,
531     struct vmspace *vm2, int flags)
532 {
533 	struct proc *p1 = td->td_proc;
534 	struct domainset *dset;
535 	int error;
536 
537 	if ((flags & RFPROC) == 0) {
538 		/*
539 		 * Divorce the memory, if it is shared, essentially
540 		 * this changes shared memory amongst threads, into
541 		 * COW locally.
542 		 */
543 		if ((flags & RFMEM) == 0) {
544 			error = vmspace_unshare(p1);
545 			if (error)
546 				return (error);
547 		}
548 		cpu_fork(td, p2, td2, flags);
549 		return (0);
550 	}
551 
552 	if (flags & RFMEM) {
553 		p2->p_vmspace = p1->p_vmspace;
554 		refcount_acquire(&p1->p_vmspace->vm_refcnt);
555 	}
556 	dset = td2->td_domain.dr_policy;
557 	while (vm_page_count_severe_set(&dset->ds_mask)) {
558 		vm_wait_doms(&dset->ds_mask, 0);
559 	}
560 
561 	if ((flags & RFMEM) == 0) {
562 		p2->p_vmspace = vm2;
563 		if (p1->p_vmspace->vm_shm)
564 			shmfork(p1, p2);
565 	}
566 
567 	/*
568 	 * cpu_fork will copy and update the pcb, set up the kernel stack,
569 	 * and make the child ready to run.
570 	 */
571 	cpu_fork(td, p2, td2, flags);
572 	return (0);
573 }
574 
575 /*
576  * Called after process has been wait(2)'ed upon and is being reaped.
577  * The idea is to reclaim resources that we could not reclaim while
578  * the process was still executing.
579  */
580 void
581 vm_waitproc(struct proc *p)
582 {
583 
584 	vmspace_exitfree(p);		/* and clean-out the vmspace */
585 }
586 
587 void
588 kick_proc0(void)
589 {
590 
591 	wakeup(&proc0);
592 }
593