xref: /freebsd/sys/kern/uipc_shm.c (revision 1d386b48)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5  * Copyright 2020 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by BAE Systems, the University of
9  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11  * Computing (TC) research program.
12  *
13  * Portions of this software were developed by Konstantin Belousov
14  * under sponsorship from the FreeBSD Foundation.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 /*
39  * Support for shared swap-backed anonymous memory objects via
40  * shm_open(2), shm_rename(2), and shm_unlink(2).
41  * While most of the implementation is here, vm_mmap.c contains
42  * mapping logic changes.
43  *
44  * posixshmcontrol(1) allows users to inspect the state of the memory
45  * objects.  Per-uid swap resource limit controls total amount of
46  * memory that user can consume for anonymous objects, including
47  * shared.
48  */
49 
50 #include <sys/cdefs.h>
51 #include "opt_capsicum.h"
52 #include "opt_ktrace.h"
53 
54 #include <sys/param.h>
55 #include <sys/capsicum.h>
56 #include <sys/conf.h>
57 #include <sys/fcntl.h>
58 #include <sys/file.h>
59 #include <sys/filedesc.h>
60 #include <sys/filio.h>
61 #include <sys/fnv_hash.h>
62 #include <sys/kernel.h>
63 #include <sys/limits.h>
64 #include <sys/uio.h>
65 #include <sys/signal.h>
66 #include <sys/jail.h>
67 #include <sys/ktrace.h>
68 #include <sys/lock.h>
69 #include <sys/malloc.h>
70 #include <sys/mman.h>
71 #include <sys/mutex.h>
72 #include <sys/priv.h>
73 #include <sys/proc.h>
74 #include <sys/refcount.h>
75 #include <sys/resourcevar.h>
76 #include <sys/rwlock.h>
77 #include <sys/sbuf.h>
78 #include <sys/stat.h>
79 #include <sys/syscallsubr.h>
80 #include <sys/sysctl.h>
81 #include <sys/sysproto.h>
82 #include <sys/systm.h>
83 #include <sys/sx.h>
84 #include <sys/time.h>
85 #include <sys/vmmeter.h>
86 #include <sys/vnode.h>
87 #include <sys/unistd.h>
88 #include <sys/user.h>
89 
90 #include <security/audit/audit.h>
91 #include <security/mac/mac_framework.h>
92 
93 #include <vm/vm.h>
94 #include <vm/vm_param.h>
95 #include <vm/pmap.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_kern.h>
99 #include <vm/vm_object.h>
100 #include <vm/vm_page.h>
101 #include <vm/vm_pageout.h>
102 #include <vm/vm_pager.h>
103 #include <vm/swap_pager.h>
104 
105 struct shm_mapping {
106 	char		*sm_path;
107 	Fnv32_t		sm_fnv;
108 	struct shmfd	*sm_shmfd;
109 	LIST_ENTRY(shm_mapping) sm_link;
110 };
111 
112 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
113 static LIST_HEAD(, shm_mapping) *shm_dictionary;
114 static struct sx shm_dict_lock;
115 static struct mtx shm_timestamp_lock;
116 static u_long shm_hash;
117 static struct unrhdr64 shm_ino_unr;
118 static dev_t shm_dev_ino;
119 
120 #define	SHM_HASH(fnv)	(&shm_dictionary[(fnv) & shm_hash])
121 
122 static void	shm_init(void *arg);
123 static void	shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
124 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
125 static int	shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
126 static void	shm_doremove(struct shm_mapping *map);
127 static int	shm_dotruncate_cookie(struct shmfd *shmfd, off_t length,
128     void *rl_cookie);
129 static int	shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
130     void *rl_cookie);
131 static int	shm_copyin_path(struct thread *td, const char *userpath_in,
132     char **path_out);
133 static int	shm_deallocate(struct shmfd *shmfd, off_t *offset,
134     off_t *length, int flags);
135 
136 static fo_rdwr_t	shm_read;
137 static fo_rdwr_t	shm_write;
138 static fo_truncate_t	shm_truncate;
139 static fo_ioctl_t	shm_ioctl;
140 static fo_stat_t	shm_stat;
141 static fo_close_t	shm_close;
142 static fo_chmod_t	shm_chmod;
143 static fo_chown_t	shm_chown;
144 static fo_seek_t	shm_seek;
145 static fo_fill_kinfo_t	shm_fill_kinfo;
146 static fo_mmap_t	shm_mmap;
147 static fo_get_seals_t	shm_get_seals;
148 static fo_add_seals_t	shm_add_seals;
149 static fo_fallocate_t	shm_fallocate;
150 static fo_fspacectl_t	shm_fspacectl;
151 
152 /* File descriptor operations. */
153 struct fileops shm_ops = {
154 	.fo_read = shm_read,
155 	.fo_write = shm_write,
156 	.fo_truncate = shm_truncate,
157 	.fo_ioctl = shm_ioctl,
158 	.fo_poll = invfo_poll,
159 	.fo_kqfilter = invfo_kqfilter,
160 	.fo_stat = shm_stat,
161 	.fo_close = shm_close,
162 	.fo_chmod = shm_chmod,
163 	.fo_chown = shm_chown,
164 	.fo_sendfile = vn_sendfile,
165 	.fo_seek = shm_seek,
166 	.fo_fill_kinfo = shm_fill_kinfo,
167 	.fo_mmap = shm_mmap,
168 	.fo_get_seals = shm_get_seals,
169 	.fo_add_seals = shm_add_seals,
170 	.fo_fallocate = shm_fallocate,
171 	.fo_fspacectl = shm_fspacectl,
172 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE,
173 };
174 
175 FEATURE(posix_shm, "POSIX shared memory");
176 
177 static SYSCTL_NODE(_vm, OID_AUTO, largepages, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
178     "");
179 
180 static int largepage_reclaim_tries = 1;
181 SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries,
182     CTLFLAG_RWTUN, &largepage_reclaim_tries, 0,
183     "Number of contig reclaims before giving up for default alloc policy");
184 
185 static int
186 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
187 {
188 	vm_page_t m;
189 	vm_pindex_t idx;
190 	size_t tlen;
191 	int error, offset, rv;
192 
193 	idx = OFF_TO_IDX(uio->uio_offset);
194 	offset = uio->uio_offset & PAGE_MASK;
195 	tlen = MIN(PAGE_SIZE - offset, len);
196 
197 	rv = vm_page_grab_valid_unlocked(&m, obj, idx,
198 	    VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
199 	if (rv == VM_PAGER_OK)
200 		goto found;
201 
202 	/*
203 	 * Read I/O without either a corresponding resident page or swap
204 	 * page: use zero_region.  This is intended to avoid instantiating
205 	 * pages on read from a sparse region.
206 	 */
207 	VM_OBJECT_WLOCK(obj);
208 	m = vm_page_lookup(obj, idx);
209 	if (uio->uio_rw == UIO_READ && m == NULL &&
210 	    !vm_pager_has_page(obj, idx, NULL, NULL)) {
211 		VM_OBJECT_WUNLOCK(obj);
212 		return (uiomove(__DECONST(void *, zero_region), tlen, uio));
213 	}
214 
215 	/*
216 	 * Although the tmpfs vnode lock is held here, it is
217 	 * nonetheless safe to sleep waiting for a free page.  The
218 	 * pageout daemon does not need to acquire the tmpfs vnode
219 	 * lock to page out tobj's pages because tobj is a OBJT_SWAP
220 	 * type object.
221 	 */
222 	rv = vm_page_grab_valid(&m, obj, idx,
223 	    VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
224 	if (rv != VM_PAGER_OK) {
225 		VM_OBJECT_WUNLOCK(obj);
226 		if (bootverbose) {
227 			printf("uiomove_object: vm_obj %p idx %jd "
228 			    "pager error %d\n", obj, idx, rv);
229 		}
230 		return (rv == VM_PAGER_AGAIN ? ENOSPC : EIO);
231 	}
232 	VM_OBJECT_WUNLOCK(obj);
233 
234 found:
235 	error = uiomove_fromphys(&m, offset, tlen, uio);
236 	if (uio->uio_rw == UIO_WRITE && error == 0)
237 		vm_page_set_dirty(m);
238 	vm_page_activate(m);
239 	vm_page_sunbusy(m);
240 
241 	return (error);
242 }
243 
244 int
245 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
246 {
247 	ssize_t resid;
248 	size_t len;
249 	int error;
250 
251 	error = 0;
252 	while ((resid = uio->uio_resid) > 0) {
253 		if (obj_size <= uio->uio_offset)
254 			break;
255 		len = MIN(obj_size - uio->uio_offset, resid);
256 		if (len == 0)
257 			break;
258 		error = uiomove_object_page(obj, len, uio);
259 		if (error != 0 || resid == uio->uio_resid)
260 			break;
261 	}
262 	return (error);
263 }
264 
265 static u_long count_largepages[MAXPAGESIZES];
266 
267 static int
268 shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx,
269     int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
270 {
271 	vm_page_t m __diagused;
272 	int psind;
273 
274 	psind = object->un_pager.phys.data_val;
275 	if (psind == 0 || pidx >= object->size)
276 		return (VM_PAGER_FAIL);
277 	*first = rounddown2(pidx, pagesizes[psind] / PAGE_SIZE);
278 
279 	/*
280 	 * We only busy the first page in the superpage run.  It is
281 	 * useless to busy whole run since we only remove full
282 	 * superpage, and it takes too long to busy e.g. 512 * 512 ==
283 	 * 262144 pages constituing 1G amd64 superage.
284 	 */
285 	m = vm_page_grab(object, *first, VM_ALLOC_NORMAL | VM_ALLOC_NOCREAT);
286 	MPASS(m != NULL);
287 
288 	*last = *first + atop(pagesizes[psind]) - 1;
289 	return (VM_PAGER_OK);
290 }
291 
292 static boolean_t
293 shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex,
294     int *before, int *after)
295 {
296 	int psind;
297 
298 	psind = object->un_pager.phys.data_val;
299 	if (psind == 0 || pindex >= object->size)
300 		return (FALSE);
301 	if (before != NULL) {
302 		*before = pindex - rounddown2(pindex, pagesizes[psind] /
303 		    PAGE_SIZE);
304 	}
305 	if (after != NULL) {
306 		*after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) -
307 		    pindex;
308 	}
309 	return (TRUE);
310 }
311 
312 static void
313 shm_largepage_phys_ctor(vm_object_t object, vm_prot_t prot,
314     vm_ooffset_t foff, struct ucred *cred)
315 {
316 }
317 
318 static void
319 shm_largepage_phys_dtor(vm_object_t object)
320 {
321 	int psind;
322 
323 	psind = object->un_pager.phys.data_val;
324 	if (psind != 0) {
325 		atomic_subtract_long(&count_largepages[psind],
326 		    object->size / (pagesizes[psind] / PAGE_SIZE));
327 		vm_wire_sub(object->size);
328 	} else {
329 		KASSERT(object->size == 0,
330 		    ("largepage phys obj %p not initialized bit size %#jx > 0",
331 		    object, (uintmax_t)object->size));
332 	}
333 }
334 
335 static const struct phys_pager_ops shm_largepage_phys_ops = {
336 	.phys_pg_populate =	shm_largepage_phys_populate,
337 	.phys_pg_haspage =	shm_largepage_phys_haspage,
338 	.phys_pg_ctor =		shm_largepage_phys_ctor,
339 	.phys_pg_dtor =		shm_largepage_phys_dtor,
340 };
341 
342 bool
343 shm_largepage(struct shmfd *shmfd)
344 {
345 	return (shmfd->shm_object->type == OBJT_PHYS);
346 }
347 
348 static void
349 shm_pager_freespace(vm_object_t obj, vm_pindex_t start, vm_size_t size)
350 {
351 	struct shmfd *shm;
352 	vm_size_t c;
353 
354 	swap_pager_freespace(obj, start, size, &c);
355 	if (c == 0)
356 		return;
357 
358 	shm = obj->un_pager.swp.swp_priv;
359 	if (shm == NULL)
360 		return;
361 	KASSERT(shm->shm_pages >= c,
362 	    ("shm %p pages %jd free %jd", shm,
363 	    (uintmax_t)shm->shm_pages, (uintmax_t)c));
364 	shm->shm_pages -= c;
365 }
366 
367 static void
368 shm_page_inserted(vm_object_t obj, vm_page_t m)
369 {
370 	struct shmfd *shm;
371 
372 	shm = obj->un_pager.swp.swp_priv;
373 	if (shm == NULL)
374 		return;
375 	if (!vm_pager_has_page(obj, m->pindex, NULL, NULL))
376 		shm->shm_pages += 1;
377 }
378 
379 static void
380 shm_page_removed(vm_object_t obj, vm_page_t m)
381 {
382 	struct shmfd *shm;
383 
384 	shm = obj->un_pager.swp.swp_priv;
385 	if (shm == NULL)
386 		return;
387 	if (!vm_pager_has_page(obj, m->pindex, NULL, NULL)) {
388 		KASSERT(shm->shm_pages >= 1,
389 		    ("shm %p pages %jd free 1", shm,
390 		    (uintmax_t)shm->shm_pages));
391 		shm->shm_pages -= 1;
392 	}
393 }
394 
395 static struct pagerops shm_swap_pager_ops = {
396 	.pgo_kvme_type = KVME_TYPE_SWAP,
397 	.pgo_freespace = shm_pager_freespace,
398 	.pgo_page_inserted = shm_page_inserted,
399 	.pgo_page_removed = shm_page_removed,
400 };
401 static int shmfd_pager_type = -1;
402 
403 static int
404 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
405 {
406 	struct shmfd *shmfd;
407 	off_t foffset;
408 	int error;
409 
410 	shmfd = fp->f_data;
411 	foffset = foffset_lock(fp, 0);
412 	error = 0;
413 	switch (whence) {
414 	case L_INCR:
415 		if (foffset < 0 ||
416 		    (offset > 0 && foffset > OFF_MAX - offset)) {
417 			error = EOVERFLOW;
418 			break;
419 		}
420 		offset += foffset;
421 		break;
422 	case L_XTND:
423 		if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
424 			error = EOVERFLOW;
425 			break;
426 		}
427 		offset += shmfd->shm_size;
428 		break;
429 	case L_SET:
430 		break;
431 	default:
432 		error = EINVAL;
433 	}
434 	if (error == 0) {
435 		if (offset < 0 || offset > shmfd->shm_size)
436 			error = EINVAL;
437 		else
438 			td->td_uretoff.tdu_off = offset;
439 	}
440 	foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
441 	return (error);
442 }
443 
444 static int
445 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
446     int flags, struct thread *td)
447 {
448 	struct shmfd *shmfd;
449 	void *rl_cookie;
450 	int error;
451 
452 	shmfd = fp->f_data;
453 #ifdef MAC
454 	error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
455 	if (error)
456 		return (error);
457 #endif
458 	foffset_lock_uio(fp, uio, flags);
459 	rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
460 	    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
461 	error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
462 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
463 	foffset_unlock_uio(fp, uio, flags);
464 	return (error);
465 }
466 
467 static int
468 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
469     int flags, struct thread *td)
470 {
471 	struct shmfd *shmfd;
472 	void *rl_cookie;
473 	int error;
474 	off_t size;
475 
476 	shmfd = fp->f_data;
477 #ifdef MAC
478 	error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
479 	if (error)
480 		return (error);
481 #endif
482 	if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0)
483 		return (EINVAL);
484 	foffset_lock_uio(fp, uio, flags);
485 	if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
486 		/*
487 		 * Overflow is only an error if we're supposed to expand on
488 		 * write.  Otherwise, we'll just truncate the write to the
489 		 * size of the file, which can only grow up to OFF_MAX.
490 		 */
491 		if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
492 			foffset_unlock_uio(fp, uio, flags);
493 			return (EFBIG);
494 		}
495 
496 		size = shmfd->shm_size;
497 	} else {
498 		size = uio->uio_offset + uio->uio_resid;
499 	}
500 	if ((flags & FOF_OFFSET) == 0) {
501 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
502 		    &shmfd->shm_mtx);
503 	} else {
504 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
505 		    size, &shmfd->shm_mtx);
506 	}
507 	if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
508 		error = EPERM;
509 	} else {
510 		error = 0;
511 		if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
512 		    size > shmfd->shm_size) {
513 			error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
514 		}
515 		if (error == 0)
516 			error = uiomove_object(shmfd->shm_object,
517 			    shmfd->shm_size, uio);
518 	}
519 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
520 	foffset_unlock_uio(fp, uio, flags);
521 	return (error);
522 }
523 
524 static int
525 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
526     struct thread *td)
527 {
528 	struct shmfd *shmfd;
529 #ifdef MAC
530 	int error;
531 #endif
532 
533 	shmfd = fp->f_data;
534 #ifdef MAC
535 	error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
536 	if (error)
537 		return (error);
538 #endif
539 	return (shm_dotruncate(shmfd, length));
540 }
541 
542 int
543 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
544     struct thread *td)
545 {
546 	struct shmfd *shmfd;
547 	struct shm_largepage_conf *conf;
548 	void *rl_cookie;
549 
550 	shmfd = fp->f_data;
551 	switch (com) {
552 	case FIONBIO:
553 	case FIOASYNC:
554 		/*
555 		 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
556 		 * just like it would on an unlinked regular file
557 		 */
558 		return (0);
559 	case FIOSSHMLPGCNF:
560 		if (!shm_largepage(shmfd))
561 			return (ENOTTY);
562 		conf = data;
563 		if (shmfd->shm_lp_psind != 0 &&
564 		    conf->psind != shmfd->shm_lp_psind)
565 			return (EINVAL);
566 		if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES ||
567 		    pagesizes[conf->psind] == 0)
568 			return (EINVAL);
569 		if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT &&
570 		    conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT &&
571 		    conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD)
572 			return (EINVAL);
573 
574 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
575 		    &shmfd->shm_mtx);
576 		shmfd->shm_lp_psind = conf->psind;
577 		shmfd->shm_lp_alloc_policy = conf->alloc_policy;
578 		shmfd->shm_object->un_pager.phys.data_val = conf->psind;
579 		rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
580 		return (0);
581 	case FIOGSHMLPGCNF:
582 		if (!shm_largepage(shmfd))
583 			return (ENOTTY);
584 		conf = data;
585 		rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, OFF_MAX,
586 		    &shmfd->shm_mtx);
587 		conf->psind = shmfd->shm_lp_psind;
588 		conf->alloc_policy = shmfd->shm_lp_alloc_policy;
589 		rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
590 		return (0);
591 	default:
592 		return (ENOTTY);
593 	}
594 }
595 
596 static int
597 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred)
598 {
599 	struct shmfd *shmfd;
600 #ifdef MAC
601 	int error;
602 #endif
603 
604 	shmfd = fp->f_data;
605 
606 #ifdef MAC
607 	error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
608 	if (error)
609 		return (error);
610 #endif
611 
612 	/*
613 	 * Attempt to return sanish values for fstat() on a memory file
614 	 * descriptor.
615 	 */
616 	bzero(sb, sizeof(*sb));
617 	sb->st_blksize = PAGE_SIZE;
618 	sb->st_size = shmfd->shm_size;
619 	mtx_lock(&shm_timestamp_lock);
620 	sb->st_atim = shmfd->shm_atime;
621 	sb->st_ctim = shmfd->shm_ctime;
622 	sb->st_mtim = shmfd->shm_mtime;
623 	sb->st_birthtim = shmfd->shm_birthtime;
624 	sb->st_mode = S_IFREG | shmfd->shm_mode;		/* XXX */
625 	sb->st_uid = shmfd->shm_uid;
626 	sb->st_gid = shmfd->shm_gid;
627 	mtx_unlock(&shm_timestamp_lock);
628 	sb->st_dev = shm_dev_ino;
629 	sb->st_ino = shmfd->shm_ino;
630 	sb->st_nlink = shmfd->shm_object->ref_count;
631 	if (shm_largepage(shmfd)) {
632 		sb->st_blocks = shmfd->shm_object->size /
633 		    (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT);
634 	} else {
635 		sb->st_blocks = shmfd->shm_pages;
636 	}
637 
638 	return (0);
639 }
640 
641 static int
642 shm_close(struct file *fp, struct thread *td)
643 {
644 	struct shmfd *shmfd;
645 
646 	shmfd = fp->f_data;
647 	fp->f_data = NULL;
648 	shm_drop(shmfd);
649 
650 	return (0);
651 }
652 
653 static int
654 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
655 	int error;
656 	char *path;
657 	const char *pr_path;
658 	size_t pr_pathlen;
659 
660 	path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
661 	pr_path = td->td_ucred->cr_prison->pr_path;
662 
663 	/* Construct a full pathname for jailed callers. */
664 	pr_pathlen = strcmp(pr_path, "/") ==
665 	    0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
666 	error = copyinstr(userpath_in, path + pr_pathlen,
667 	    MAXPATHLEN - pr_pathlen, NULL);
668 	if (error != 0)
669 		goto out;
670 
671 #ifdef KTRACE
672 	if (KTRPOINT(curthread, KTR_NAMEI))
673 		ktrnamei(path);
674 #endif
675 
676 	/* Require paths to start with a '/' character. */
677 	if (path[pr_pathlen] != '/') {
678 		error = EINVAL;
679 		goto out;
680 	}
681 
682 	*path_out = path;
683 
684 out:
685 	if (error != 0)
686 		free(path, M_SHMFD);
687 
688 	return (error);
689 }
690 
691 static int
692 shm_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
693     int end)
694 {
695 	vm_page_t m;
696 	int rv;
697 
698 	VM_OBJECT_ASSERT_WLOCKED(object);
699 	KASSERT(base >= 0, ("%s: base %d", __func__, base));
700 	KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
701 	    end));
702 
703 retry:
704 	m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
705 	if (m != NULL) {
706 		MPASS(vm_page_all_valid(m));
707 	} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
708 		m = vm_page_alloc(object, idx,
709 		    VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
710 		if (m == NULL)
711 			goto retry;
712 		vm_object_pip_add(object, 1);
713 		VM_OBJECT_WUNLOCK(object);
714 		rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
715 		VM_OBJECT_WLOCK(object);
716 		vm_object_pip_wakeup(object);
717 		if (rv == VM_PAGER_OK) {
718 			/*
719 			 * Since the page was not resident, and therefore not
720 			 * recently accessed, immediately enqueue it for
721 			 * asynchronous laundering.  The current operation is
722 			 * not regarded as an access.
723 			 */
724 			vm_page_launder(m);
725 		} else {
726 			vm_page_free(m);
727 			VM_OBJECT_WUNLOCK(object);
728 			return (EIO);
729 		}
730 	}
731 	if (m != NULL) {
732 		pmap_zero_page_area(m, base, end - base);
733 		KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid",
734 		    __func__, m));
735 		vm_page_set_dirty(m);
736 		vm_page_xunbusy(m);
737 	}
738 
739 	return (0);
740 }
741 
742 static int
743 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
744 {
745 	vm_object_t object;
746 	vm_pindex_t nobjsize;
747 	vm_ooffset_t delta;
748 	int base, error;
749 
750 	KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
751 	object = shmfd->shm_object;
752 	VM_OBJECT_ASSERT_WLOCKED(object);
753 	rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
754 	if (length == shmfd->shm_size)
755 		return (0);
756 	nobjsize = OFF_TO_IDX(length + PAGE_MASK);
757 
758 	/* Are we shrinking?  If so, trim the end. */
759 	if (length < shmfd->shm_size) {
760 		if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
761 			return (EPERM);
762 
763 		/*
764 		 * Disallow any requests to shrink the size if this
765 		 * object is mapped into the kernel.
766 		 */
767 		if (shmfd->shm_kmappings > 0)
768 			return (EBUSY);
769 
770 		/*
771 		 * Zero the truncated part of the last page.
772 		 */
773 		base = length & PAGE_MASK;
774 		if (base != 0) {
775 			error = shm_partial_page_invalidate(object,
776 			    OFF_TO_IDX(length), base, PAGE_SIZE);
777 			if (error)
778 				return (error);
779 		}
780 		delta = IDX_TO_OFF(object->size - nobjsize);
781 
782 		if (nobjsize < object->size)
783 			vm_object_page_remove(object, nobjsize, object->size,
784 			    0);
785 
786 		/* Free the swap accounted for shm */
787 		swap_release_by_cred(delta, object->cred);
788 		object->charge -= delta;
789 	} else {
790 		if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
791 			return (EPERM);
792 
793 		/* Try to reserve additional swap space. */
794 		delta = IDX_TO_OFF(nobjsize - object->size);
795 		if (!swap_reserve_by_cred(delta, object->cred))
796 			return (ENOMEM);
797 		object->charge += delta;
798 	}
799 	shmfd->shm_size = length;
800 	mtx_lock(&shm_timestamp_lock);
801 	vfs_timestamp(&shmfd->shm_ctime);
802 	shmfd->shm_mtime = shmfd->shm_ctime;
803 	mtx_unlock(&shm_timestamp_lock);
804 	object->size = nobjsize;
805 	return (0);
806 }
807 
808 static int
809 shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
810 {
811 	vm_object_t object;
812 	vm_page_t m;
813 	vm_pindex_t newobjsz;
814 	vm_pindex_t oldobjsz __unused;
815 	int aflags, error, i, psind, try;
816 
817 	KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
818 	object = shmfd->shm_object;
819 	VM_OBJECT_ASSERT_WLOCKED(object);
820 	rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
821 
822 	oldobjsz = object->size;
823 	newobjsz = OFF_TO_IDX(length);
824 	if (length == shmfd->shm_size)
825 		return (0);
826 	psind = shmfd->shm_lp_psind;
827 	if (psind == 0 && length != 0)
828 		return (EINVAL);
829 	if ((length & (pagesizes[psind] - 1)) != 0)
830 		return (EINVAL);
831 
832 	if (length < shmfd->shm_size) {
833 		if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
834 			return (EPERM);
835 		if (shmfd->shm_kmappings > 0)
836 			return (EBUSY);
837 		return (ENOTSUP);	/* Pages are unmanaged. */
838 #if 0
839 		vm_object_page_remove(object, newobjsz, oldobjsz, 0);
840 		object->size = newobjsz;
841 		shmfd->shm_size = length;
842 		return (0);
843 #endif
844 	}
845 
846 	if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
847 		return (EPERM);
848 
849 	aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO;
850 	if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
851 		aflags |= VM_ALLOC_WAITFAIL;
852 	try = 0;
853 
854 	/*
855 	 * Extend shmfd and object, keeping all already fully
856 	 * allocated large pages intact even on error, because dropped
857 	 * object lock might allowed mapping of them.
858 	 */
859 	while (object->size < newobjsz) {
860 		m = vm_page_alloc_contig(object, object->size, aflags,
861 		    pagesizes[psind] / PAGE_SIZE, 0, ~0,
862 		    pagesizes[psind], 0,
863 		    VM_MEMATTR_DEFAULT);
864 		if (m == NULL) {
865 			VM_OBJECT_WUNLOCK(object);
866 			if (shmfd->shm_lp_alloc_policy ==
867 			    SHM_LARGEPAGE_ALLOC_NOWAIT ||
868 			    (shmfd->shm_lp_alloc_policy ==
869 			    SHM_LARGEPAGE_ALLOC_DEFAULT &&
870 			    try >= largepage_reclaim_tries)) {
871 				VM_OBJECT_WLOCK(object);
872 				return (ENOMEM);
873 			}
874 			error = vm_page_reclaim_contig(aflags,
875 			    pagesizes[psind] / PAGE_SIZE, 0, ~0,
876 			    pagesizes[psind], 0) ? 0 :
877 			    vm_wait_intr(object);
878 			if (error != 0) {
879 				VM_OBJECT_WLOCK(object);
880 				return (error);
881 			}
882 			try++;
883 			VM_OBJECT_WLOCK(object);
884 			continue;
885 		}
886 		try = 0;
887 		for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) {
888 			if ((m[i].flags & PG_ZERO) == 0)
889 				pmap_zero_page(&m[i]);
890 			vm_page_valid(&m[i]);
891 			vm_page_xunbusy(&m[i]);
892 		}
893 		object->size += OFF_TO_IDX(pagesizes[psind]);
894 		shmfd->shm_size += pagesizes[psind];
895 		atomic_add_long(&count_largepages[psind], 1);
896 		vm_wire_add(atop(pagesizes[psind]));
897 	}
898 	return (0);
899 }
900 
901 static int
902 shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie)
903 {
904 	int error;
905 
906 	VM_OBJECT_WLOCK(shmfd->shm_object);
907 	error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd,
908 	    length, rl_cookie) : shm_dotruncate_locked(shmfd, length,
909 	    rl_cookie);
910 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
911 	return (error);
912 }
913 
914 int
915 shm_dotruncate(struct shmfd *shmfd, off_t length)
916 {
917 	void *rl_cookie;
918 	int error;
919 
920 	rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
921 	    &shmfd->shm_mtx);
922 	error = shm_dotruncate_cookie(shmfd, length, rl_cookie);
923 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
924 	return (error);
925 }
926 
927 /*
928  * shmfd object management including creation and reference counting
929  * routines.
930  */
931 struct shmfd *
932 shm_alloc(struct ucred *ucred, mode_t mode, bool largepage)
933 {
934 	struct shmfd *shmfd;
935 	vm_object_t obj;
936 
937 	shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
938 	shmfd->shm_size = 0;
939 	shmfd->shm_uid = ucred->cr_uid;
940 	shmfd->shm_gid = ucred->cr_gid;
941 	shmfd->shm_mode = mode;
942 	if (largepage) {
943 		shmfd->shm_object = phys_pager_allocate(NULL,
944 		    &shm_largepage_phys_ops, NULL, shmfd->shm_size,
945 		    VM_PROT_DEFAULT, 0, ucred);
946 		shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT;
947 	} else {
948 		obj = vm_pager_allocate(shmfd_pager_type, NULL,
949 		    shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
950 		VM_OBJECT_WLOCK(obj);
951 		obj->un_pager.swp.swp_priv = shmfd;
952 		VM_OBJECT_WUNLOCK(obj);
953 		shmfd->shm_object = obj;
954 	}
955 	KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
956 	vfs_timestamp(&shmfd->shm_birthtime);
957 	shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
958 	    shmfd->shm_birthtime;
959 	shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
960 	refcount_init(&shmfd->shm_refs, 1);
961 	mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
962 	rangelock_init(&shmfd->shm_rl);
963 #ifdef MAC
964 	mac_posixshm_init(shmfd);
965 	mac_posixshm_create(ucred, shmfd);
966 #endif
967 
968 	return (shmfd);
969 }
970 
971 struct shmfd *
972 shm_hold(struct shmfd *shmfd)
973 {
974 
975 	refcount_acquire(&shmfd->shm_refs);
976 	return (shmfd);
977 }
978 
979 void
980 shm_drop(struct shmfd *shmfd)
981 {
982 	vm_object_t obj;
983 
984 	if (refcount_release(&shmfd->shm_refs)) {
985 #ifdef MAC
986 		mac_posixshm_destroy(shmfd);
987 #endif
988 		rangelock_destroy(&shmfd->shm_rl);
989 		mtx_destroy(&shmfd->shm_mtx);
990 		obj = shmfd->shm_object;
991 		if (!shm_largepage(shmfd)) {
992 			VM_OBJECT_WLOCK(obj);
993 			obj->un_pager.swp.swp_priv = NULL;
994 			VM_OBJECT_WUNLOCK(obj);
995 		}
996 		vm_object_deallocate(obj);
997 		free(shmfd, M_SHMFD);
998 	}
999 }
1000 
1001 /*
1002  * Determine if the credentials have sufficient permissions for a
1003  * specified combination of FREAD and FWRITE.
1004  */
1005 int
1006 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
1007 {
1008 	accmode_t accmode;
1009 	int error;
1010 
1011 	accmode = 0;
1012 	if (flags & FREAD)
1013 		accmode |= VREAD;
1014 	if (flags & FWRITE)
1015 		accmode |= VWRITE;
1016 	mtx_lock(&shm_timestamp_lock);
1017 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1018 	    accmode, ucred);
1019 	mtx_unlock(&shm_timestamp_lock);
1020 	return (error);
1021 }
1022 
1023 static void
1024 shm_init(void *arg)
1025 {
1026 	char name[32];
1027 	int i;
1028 
1029 	mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
1030 	sx_init(&shm_dict_lock, "shm dictionary");
1031 	shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
1032 	new_unrhdr64(&shm_ino_unr, 1);
1033 	shm_dev_ino = devfs_alloc_cdp_inode();
1034 	KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
1035 	shmfd_pager_type = vm_pager_alloc_dyn_type(&shm_swap_pager_ops,
1036 	    OBJT_SWAP);
1037 	MPASS(shmfd_pager_type != -1);
1038 
1039 	for (i = 1; i < MAXPAGESIZES; i++) {
1040 		if (pagesizes[i] == 0)
1041 			break;
1042 #define	M	(1024 * 1024)
1043 #define	G	(1024 * M)
1044 		if (pagesizes[i] >= G)
1045 			snprintf(name, sizeof(name), "%luG", pagesizes[i] / G);
1046 		else if (pagesizes[i] >= M)
1047 			snprintf(name, sizeof(name), "%luM", pagesizes[i] / M);
1048 		else
1049 			snprintf(name, sizeof(name), "%lu", pagesizes[i]);
1050 #undef G
1051 #undef M
1052 		SYSCTL_ADD_ULONG(NULL, SYSCTL_STATIC_CHILDREN(_vm_largepages),
1053 		    OID_AUTO, name, CTLFLAG_RD, &count_largepages[i],
1054 		    "number of non-transient largepages allocated");
1055 	}
1056 }
1057 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
1058 
1059 /*
1060  * Remove all shared memory objects that belong to a prison.
1061  */
1062 void
1063 shm_remove_prison(struct prison *pr)
1064 {
1065 	struct shm_mapping *shmm, *tshmm;
1066 	u_long i;
1067 
1068 	sx_xlock(&shm_dict_lock);
1069 	for (i = 0; i < shm_hash + 1; i++) {
1070 		LIST_FOREACH_SAFE(shmm, &shm_dictionary[i], sm_link, tshmm) {
1071 			if (shmm->sm_shmfd->shm_object->cred &&
1072 			    shmm->sm_shmfd->shm_object->cred->cr_prison == pr)
1073 				shm_doremove(shmm);
1074 		}
1075 	}
1076 	sx_xunlock(&shm_dict_lock);
1077 }
1078 
1079 /*
1080  * Dictionary management.  We maintain an in-kernel dictionary to map
1081  * paths to shmfd objects.  We use the FNV hash on the path to store
1082  * the mappings in a hash table.
1083  */
1084 static struct shmfd *
1085 shm_lookup(char *path, Fnv32_t fnv)
1086 {
1087 	struct shm_mapping *map;
1088 
1089 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1090 		if (map->sm_fnv != fnv)
1091 			continue;
1092 		if (strcmp(map->sm_path, path) == 0)
1093 			return (map->sm_shmfd);
1094 	}
1095 
1096 	return (NULL);
1097 }
1098 
1099 static void
1100 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
1101 {
1102 	struct shm_mapping *map;
1103 
1104 	map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
1105 	map->sm_path = path;
1106 	map->sm_fnv = fnv;
1107 	map->sm_shmfd = shm_hold(shmfd);
1108 	shmfd->shm_path = path;
1109 	LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
1110 }
1111 
1112 static int
1113 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
1114 {
1115 	struct shm_mapping *map;
1116 	int error;
1117 
1118 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1119 		if (map->sm_fnv != fnv)
1120 			continue;
1121 		if (strcmp(map->sm_path, path) == 0) {
1122 #ifdef MAC
1123 			error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
1124 			if (error)
1125 				return (error);
1126 #endif
1127 			error = shm_access(map->sm_shmfd, ucred,
1128 			    FREAD | FWRITE);
1129 			if (error)
1130 				return (error);
1131 			shm_doremove(map);
1132 			return (0);
1133 		}
1134 	}
1135 
1136 	return (ENOENT);
1137 }
1138 
1139 static void
1140 shm_doremove(struct shm_mapping *map)
1141 {
1142 	map->sm_shmfd->shm_path = NULL;
1143 	LIST_REMOVE(map, sm_link);
1144 	shm_drop(map->sm_shmfd);
1145 	free(map->sm_path, M_SHMFD);
1146 	free(map, M_SHMFD);
1147 }
1148 
1149 int
1150 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
1151     int shmflags, struct filecaps *fcaps, const char *name __unused)
1152 {
1153 	struct pwddesc *pdp;
1154 	struct shmfd *shmfd;
1155 	struct file *fp;
1156 	char *path;
1157 	void *rl_cookie;
1158 	Fnv32_t fnv;
1159 	mode_t cmode;
1160 	int error, fd, initial_seals;
1161 	bool largepage;
1162 
1163 	if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE |
1164 	    SHM_LARGEPAGE)) != 0)
1165 		return (EINVAL);
1166 
1167 	initial_seals = F_SEAL_SEAL;
1168 	if ((shmflags & SHM_ALLOW_SEALING) != 0)
1169 		initial_seals &= ~F_SEAL_SEAL;
1170 
1171 #ifdef CAPABILITY_MODE
1172 	/*
1173 	 * shm_open(2) is only allowed for anonymous objects.
1174 	 */
1175 	if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
1176 		return (ECAPMODE);
1177 #endif
1178 
1179 	AUDIT_ARG_FFLAGS(flags);
1180 	AUDIT_ARG_MODE(mode);
1181 
1182 	if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
1183 		return (EINVAL);
1184 
1185 	if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
1186 		return (EINVAL);
1187 
1188 	largepage = (shmflags & SHM_LARGEPAGE) != 0;
1189 	if (largepage && !PMAP_HAS_LARGEPAGES)
1190 		return (ENOTTY);
1191 
1192 	/*
1193 	 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
1194 	 * If the decision is made later to allow additional seals, care must be
1195 	 * taken below to ensure that the seals are properly set if the shmfd
1196 	 * already existed -- this currently assumes that only F_SEAL_SEAL can
1197 	 * be set and doesn't take further precautions to ensure the validity of
1198 	 * the seals being added with respect to current mappings.
1199 	 */
1200 	if ((initial_seals & ~F_SEAL_SEAL) != 0)
1201 		return (EINVAL);
1202 
1203 	pdp = td->td_proc->p_pd;
1204 	cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS;
1205 
1206 	/*
1207 	 * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
1208 	 * by POSIX.  We allow it to be unset here so that an in-kernel
1209 	 * interface may be written as a thin layer around shm, optionally not
1210 	 * setting CLOEXEC.  For shm_open(2), O_CLOEXEC is set unconditionally
1211 	 * in sys_shm_open() to keep this implementation compliant.
1212 	 */
1213 	error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
1214 	if (error)
1215 		return (error);
1216 
1217 	/* A SHM_ANON path pointer creates an anonymous object. */
1218 	if (userpath == SHM_ANON) {
1219 		/* A read-only anonymous object is pointless. */
1220 		if ((flags & O_ACCMODE) == O_RDONLY) {
1221 			fdclose(td, fp, fd);
1222 			fdrop(fp, td);
1223 			return (EINVAL);
1224 		}
1225 		shmfd = shm_alloc(td->td_ucred, cmode, largepage);
1226 		shmfd->shm_seals = initial_seals;
1227 		shmfd->shm_flags = shmflags;
1228 	} else {
1229 		error = shm_copyin_path(td, userpath, &path);
1230 		if (error != 0) {
1231 			fdclose(td, fp, fd);
1232 			fdrop(fp, td);
1233 			return (error);
1234 		}
1235 
1236 		AUDIT_ARG_UPATH1_CANON(path);
1237 		fnv = fnv_32_str(path, FNV1_32_INIT);
1238 		sx_xlock(&shm_dict_lock);
1239 		shmfd = shm_lookup(path, fnv);
1240 		if (shmfd == NULL) {
1241 			/* Object does not yet exist, create it if requested. */
1242 			if (flags & O_CREAT) {
1243 #ifdef MAC
1244 				error = mac_posixshm_check_create(td->td_ucred,
1245 				    path);
1246 				if (error == 0) {
1247 #endif
1248 					shmfd = shm_alloc(td->td_ucred, cmode,
1249 					    largepage);
1250 					shmfd->shm_seals = initial_seals;
1251 					shmfd->shm_flags = shmflags;
1252 					shm_insert(path, fnv, shmfd);
1253 #ifdef MAC
1254 				}
1255 #endif
1256 			} else {
1257 				free(path, M_SHMFD);
1258 				error = ENOENT;
1259 			}
1260 		} else {
1261 			rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1262 			    &shmfd->shm_mtx);
1263 
1264 			/*
1265 			 * kern_shm_open() likely shouldn't ever error out on
1266 			 * trying to set a seal that already exists, unlike
1267 			 * F_ADD_SEALS.  This would break terribly as
1268 			 * shm_open(2) actually sets F_SEAL_SEAL to maintain
1269 			 * historical behavior where the underlying file could
1270 			 * not be sealed.
1271 			 */
1272 			initial_seals &= ~shmfd->shm_seals;
1273 
1274 			/*
1275 			 * Object already exists, obtain a new
1276 			 * reference if requested and permitted.
1277 			 */
1278 			free(path, M_SHMFD);
1279 
1280 			/*
1281 			 * initial_seals can't set additional seals if we've
1282 			 * already been set F_SEAL_SEAL.  If F_SEAL_SEAL is set,
1283 			 * then we've already removed that one from
1284 			 * initial_seals.  This is currently redundant as we
1285 			 * only allow setting F_SEAL_SEAL at creation time, but
1286 			 * it's cheap to check and decreases the effort required
1287 			 * to allow additional seals.
1288 			 */
1289 			if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
1290 			    initial_seals != 0)
1291 				error = EPERM;
1292 			else if ((flags & (O_CREAT | O_EXCL)) ==
1293 			    (O_CREAT | O_EXCL))
1294 				error = EEXIST;
1295 			else if (shmflags != 0 && shmflags != shmfd->shm_flags)
1296 				error = EINVAL;
1297 			else {
1298 #ifdef MAC
1299 				error = mac_posixshm_check_open(td->td_ucred,
1300 				    shmfd, FFLAGS(flags & O_ACCMODE));
1301 				if (error == 0)
1302 #endif
1303 				error = shm_access(shmfd, td->td_ucred,
1304 				    FFLAGS(flags & O_ACCMODE));
1305 			}
1306 
1307 			/*
1308 			 * Truncate the file back to zero length if
1309 			 * O_TRUNC was specified and the object was
1310 			 * opened with read/write.
1311 			 */
1312 			if (error == 0 &&
1313 			    (flags & (O_ACCMODE | O_TRUNC)) ==
1314 			    (O_RDWR | O_TRUNC)) {
1315 				VM_OBJECT_WLOCK(shmfd->shm_object);
1316 #ifdef MAC
1317 				error = mac_posixshm_check_truncate(
1318 					td->td_ucred, fp->f_cred, shmfd);
1319 				if (error == 0)
1320 #endif
1321 					error = shm_dotruncate_locked(shmfd, 0,
1322 					    rl_cookie);
1323 				VM_OBJECT_WUNLOCK(shmfd->shm_object);
1324 			}
1325 			if (error == 0) {
1326 				/*
1327 				 * Currently we only allow F_SEAL_SEAL to be
1328 				 * set initially.  As noted above, this would
1329 				 * need to be reworked should that change.
1330 				 */
1331 				shmfd->shm_seals |= initial_seals;
1332 				shm_hold(shmfd);
1333 			}
1334 			rangelock_unlock(&shmfd->shm_rl, rl_cookie,
1335 			    &shmfd->shm_mtx);
1336 		}
1337 		sx_xunlock(&shm_dict_lock);
1338 
1339 		if (error) {
1340 			fdclose(td, fp, fd);
1341 			fdrop(fp, td);
1342 			return (error);
1343 		}
1344 	}
1345 
1346 	finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
1347 
1348 	td->td_retval[0] = fd;
1349 	fdrop(fp, td);
1350 
1351 	return (0);
1352 }
1353 
1354 /* System calls. */
1355 #ifdef COMPAT_FREEBSD12
1356 int
1357 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
1358 {
1359 
1360 	return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
1361 	    uap->mode, NULL));
1362 }
1363 #endif
1364 
1365 int
1366 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
1367 {
1368 	char *path;
1369 	Fnv32_t fnv;
1370 	int error;
1371 
1372 	error = shm_copyin_path(td, uap->path, &path);
1373 	if (error != 0)
1374 		return (error);
1375 
1376 	AUDIT_ARG_UPATH1_CANON(path);
1377 	fnv = fnv_32_str(path, FNV1_32_INIT);
1378 	sx_xlock(&shm_dict_lock);
1379 	error = shm_remove(path, fnv, td->td_ucred);
1380 	sx_xunlock(&shm_dict_lock);
1381 	free(path, M_SHMFD);
1382 
1383 	return (error);
1384 }
1385 
1386 int
1387 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
1388 {
1389 	char *path_from = NULL, *path_to = NULL;
1390 	Fnv32_t fnv_from, fnv_to;
1391 	struct shmfd *fd_from;
1392 	struct shmfd *fd_to;
1393 	int error;
1394 	int flags;
1395 
1396 	flags = uap->flags;
1397 	AUDIT_ARG_FFLAGS(flags);
1398 
1399 	/*
1400 	 * Make sure the user passed only valid flags.
1401 	 * If you add a new flag, please add a new term here.
1402 	 */
1403 	if ((flags & ~(
1404 	    SHM_RENAME_NOREPLACE |
1405 	    SHM_RENAME_EXCHANGE
1406 	    )) != 0) {
1407 		error = EINVAL;
1408 		goto out;
1409 	}
1410 
1411 	/*
1412 	 * EXCHANGE and NOREPLACE don't quite make sense together. Let's
1413 	 * force the user to choose one or the other.
1414 	 */
1415 	if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
1416 	    (flags & SHM_RENAME_EXCHANGE) != 0) {
1417 		error = EINVAL;
1418 		goto out;
1419 	}
1420 
1421 	/* Renaming to or from anonymous makes no sense */
1422 	if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1423 		error = EINVAL;
1424 		goto out;
1425 	}
1426 
1427 	error = shm_copyin_path(td, uap->path_from, &path_from);
1428 	if (error != 0)
1429 		goto out;
1430 
1431 	error = shm_copyin_path(td, uap->path_to, &path_to);
1432 	if (error != 0)
1433 		goto out;
1434 
1435 	AUDIT_ARG_UPATH1_CANON(path_from);
1436 	AUDIT_ARG_UPATH2_CANON(path_to);
1437 
1438 	/* Rename with from/to equal is a no-op */
1439 	if (strcmp(path_from, path_to) == 0)
1440 		goto out;
1441 
1442 	fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1443 	fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1444 
1445 	sx_xlock(&shm_dict_lock);
1446 
1447 	fd_from = shm_lookup(path_from, fnv_from);
1448 	if (fd_from == NULL) {
1449 		error = ENOENT;
1450 		goto out_locked;
1451 	}
1452 
1453 	fd_to = shm_lookup(path_to, fnv_to);
1454 	if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1455 		error = EEXIST;
1456 		goto out_locked;
1457 	}
1458 
1459 	/*
1460 	 * Unconditionally prevents shm_remove from invalidating the 'from'
1461 	 * shm's state.
1462 	 */
1463 	shm_hold(fd_from);
1464 	error = shm_remove(path_from, fnv_from, td->td_ucred);
1465 
1466 	/*
1467 	 * One of my assumptions failed if ENOENT (e.g. locking didn't
1468 	 * protect us)
1469 	 */
1470 	KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1471 	    path_from));
1472 	if (error != 0) {
1473 		shm_drop(fd_from);
1474 		goto out_locked;
1475 	}
1476 
1477 	/*
1478 	 * If we are exchanging, we need to ensure the shm_remove below
1479 	 * doesn't invalidate the dest shm's state.
1480 	 */
1481 	if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1482 		shm_hold(fd_to);
1483 
1484 	/*
1485 	 * NOTE: if path_to is not already in the hash, c'est la vie;
1486 	 * it simply means we have nothing already at path_to to unlink.
1487 	 * That is the ENOENT case.
1488 	 *
1489 	 * If we somehow don't have access to unlink this guy, but
1490 	 * did for the shm at path_from, then relink the shm to path_from
1491 	 * and abort with EACCES.
1492 	 *
1493 	 * All other errors: that is weird; let's relink and abort the
1494 	 * operation.
1495 	 */
1496 	error = shm_remove(path_to, fnv_to, td->td_ucred);
1497 	if (error != 0 && error != ENOENT) {
1498 		shm_insert(path_from, fnv_from, fd_from);
1499 		shm_drop(fd_from);
1500 		/* Don't free path_from now, since the hash references it */
1501 		path_from = NULL;
1502 		goto out_locked;
1503 	}
1504 
1505 	error = 0;
1506 
1507 	shm_insert(path_to, fnv_to, fd_from);
1508 
1509 	/* Don't free path_to now, since the hash references it */
1510 	path_to = NULL;
1511 
1512 	/* We kept a ref when we removed, and incremented again in insert */
1513 	shm_drop(fd_from);
1514 	KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1515 	    fd_from->shm_refs));
1516 
1517 	if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1518 		shm_insert(path_from, fnv_from, fd_to);
1519 		path_from = NULL;
1520 		shm_drop(fd_to);
1521 		KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1522 		    fd_to->shm_refs));
1523 	}
1524 
1525 out_locked:
1526 	sx_xunlock(&shm_dict_lock);
1527 
1528 out:
1529 	free(path_from, M_SHMFD);
1530 	free(path_to, M_SHMFD);
1531 	return (error);
1532 }
1533 
1534 static int
1535 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
1536     vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags,
1537     vm_ooffset_t foff, struct thread *td)
1538 {
1539 	struct vmspace *vms;
1540 	vm_map_entry_t next_entry, prev_entry;
1541 	vm_offset_t align, mask, maxaddr;
1542 	int docow, error, rv, try;
1543 	bool curmap;
1544 
1545 	if (shmfd->shm_lp_psind == 0)
1546 		return (EINVAL);
1547 
1548 	/* MAP_PRIVATE is disabled */
1549 	if ((flags & ~(MAP_SHARED | MAP_FIXED | MAP_EXCL |
1550 	    MAP_NOCORE | MAP_32BIT | MAP_ALIGNMENT_MASK)) != 0)
1551 		return (EINVAL);
1552 
1553 	vms = td->td_proc->p_vmspace;
1554 	curmap = map == &vms->vm_map;
1555 	if (curmap) {
1556 		error = kern_mmap_racct_check(td, map, size);
1557 		if (error != 0)
1558 			return (error);
1559 	}
1560 
1561 	docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT;
1562 	docow |= MAP_INHERIT_SHARE;
1563 	if ((flags & MAP_NOCORE) != 0)
1564 		docow |= MAP_DISABLE_COREDUMP;
1565 
1566 	mask = pagesizes[shmfd->shm_lp_psind] - 1;
1567 	if ((foff & mask) != 0)
1568 		return (EINVAL);
1569 	maxaddr = vm_map_max(map);
1570 	if ((flags & MAP_32BIT) != 0 && maxaddr > MAP_32BIT_MAX_ADDR)
1571 		maxaddr = MAP_32BIT_MAX_ADDR;
1572 	if (size == 0 || (size & mask) != 0 ||
1573 	    (*addr != 0 && ((*addr & mask) != 0 ||
1574 	    *addr + size < *addr || *addr + size > maxaddr)))
1575 		return (EINVAL);
1576 
1577 	align = flags & MAP_ALIGNMENT_MASK;
1578 	if (align == 0) {
1579 		align = pagesizes[shmfd->shm_lp_psind];
1580 	} else if (align == MAP_ALIGNED_SUPER) {
1581 		if (shmfd->shm_lp_psind != 1)
1582 			return (EINVAL);
1583 		align = pagesizes[1];
1584 	} else {
1585 		align >>= MAP_ALIGNMENT_SHIFT;
1586 		align = 1ULL << align;
1587 		/* Also handles overflow. */
1588 		if (align < pagesizes[shmfd->shm_lp_psind])
1589 			return (EINVAL);
1590 	}
1591 
1592 	vm_map_lock(map);
1593 	if ((flags & MAP_FIXED) == 0) {
1594 		try = 1;
1595 		if (curmap && (*addr == 0 ||
1596 		    (*addr >= round_page((vm_offset_t)vms->vm_taddr) &&
1597 		    *addr < round_page((vm_offset_t)vms->vm_daddr +
1598 		    lim_max(td, RLIMIT_DATA))))) {
1599 			*addr = roundup2((vm_offset_t)vms->vm_daddr +
1600 			    lim_max(td, RLIMIT_DATA),
1601 			    pagesizes[shmfd->shm_lp_psind]);
1602 		}
1603 again:
1604 		rv = vm_map_find_aligned(map, addr, size, maxaddr, align);
1605 		if (rv != KERN_SUCCESS) {
1606 			if (try == 1) {
1607 				try = 2;
1608 				*addr = vm_map_min(map);
1609 				if ((*addr & mask) != 0)
1610 					*addr = (*addr + mask) & mask;
1611 				goto again;
1612 			}
1613 			goto fail1;
1614 		}
1615 	} else if ((flags & MAP_EXCL) == 0) {
1616 		rv = vm_map_delete(map, *addr, *addr + size);
1617 		if (rv != KERN_SUCCESS)
1618 			goto fail1;
1619 	} else {
1620 		error = ENOSPC;
1621 		if (vm_map_lookup_entry(map, *addr, &prev_entry))
1622 			goto fail;
1623 		next_entry = vm_map_entry_succ(prev_entry);
1624 		if (next_entry->start < *addr + size)
1625 			goto fail;
1626 	}
1627 
1628 	rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size,
1629 	    prot, max_prot, docow);
1630 fail1:
1631 	error = vm_mmap_to_errno(rv);
1632 fail:
1633 	vm_map_unlock(map);
1634 	return (error);
1635 }
1636 
1637 static int
1638 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1639     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
1640     vm_ooffset_t foff, struct thread *td)
1641 {
1642 	struct shmfd *shmfd;
1643 	vm_prot_t maxprot;
1644 	int error;
1645 	bool writecnt;
1646 	void *rl_cookie;
1647 
1648 	shmfd = fp->f_data;
1649 	maxprot = VM_PROT_NONE;
1650 
1651 	rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize,
1652 	    &shmfd->shm_mtx);
1653 	/* FREAD should always be set. */
1654 	if ((fp->f_flag & FREAD) != 0)
1655 		maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1656 
1657 	/*
1658 	 * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1659 	 * mapping with a write seal applied.  Private mappings are always
1660 	 * writeable.
1661 	 */
1662 	if ((flags & MAP_SHARED) == 0) {
1663 		cap_maxprot |= VM_PROT_WRITE;
1664 		maxprot |= VM_PROT_WRITE;
1665 		writecnt = false;
1666 	} else {
1667 		if ((fp->f_flag & FWRITE) != 0 &&
1668 		    (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1669 			maxprot |= VM_PROT_WRITE;
1670 
1671 		/*
1672 		 * Any mappings from a writable descriptor may be upgraded to
1673 		 * VM_PROT_WRITE with mprotect(2), unless a write-seal was
1674 		 * applied between the open and subsequent mmap(2).  We want to
1675 		 * reject application of a write seal as long as any such
1676 		 * mapping exists so that the seal cannot be trivially bypassed.
1677 		 */
1678 		writecnt = (maxprot & VM_PROT_WRITE) != 0;
1679 		if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1680 			error = EACCES;
1681 			goto out;
1682 		}
1683 	}
1684 	maxprot &= cap_maxprot;
1685 
1686 	/* See comment in vn_mmap(). */
1687 	if (
1688 #ifdef _LP64
1689 	    objsize > OFF_MAX ||
1690 #endif
1691 	    foff > OFF_MAX - objsize) {
1692 		error = EINVAL;
1693 		goto out;
1694 	}
1695 
1696 #ifdef MAC
1697 	error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1698 	if (error != 0)
1699 		goto out;
1700 #endif
1701 
1702 	mtx_lock(&shm_timestamp_lock);
1703 	vfs_timestamp(&shmfd->shm_atime);
1704 	mtx_unlock(&shm_timestamp_lock);
1705 	vm_object_reference(shmfd->shm_object);
1706 
1707 	if (shm_largepage(shmfd)) {
1708 		writecnt = false;
1709 		error = shm_mmap_large(shmfd, map, addr, objsize, prot,
1710 		    maxprot, flags, foff, td);
1711 	} else {
1712 		if (writecnt) {
1713 			vm_pager_update_writecount(shmfd->shm_object, 0,
1714 			    objsize);
1715 		}
1716 		error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1717 		    shmfd->shm_object, foff, writecnt, td);
1718 	}
1719 	if (error != 0) {
1720 		if (writecnt)
1721 			vm_pager_release_writecount(shmfd->shm_object, 0,
1722 			    objsize);
1723 		vm_object_deallocate(shmfd->shm_object);
1724 	}
1725 out:
1726 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1727 	return (error);
1728 }
1729 
1730 static int
1731 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1732     struct thread *td)
1733 {
1734 	struct shmfd *shmfd;
1735 	int error;
1736 
1737 	error = 0;
1738 	shmfd = fp->f_data;
1739 	mtx_lock(&shm_timestamp_lock);
1740 	/*
1741 	 * SUSv4 says that x bits of permission need not be affected.
1742 	 * Be consistent with our shm_open there.
1743 	 */
1744 #ifdef MAC
1745 	error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1746 	if (error != 0)
1747 		goto out;
1748 #endif
1749 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1750 	    VADMIN, active_cred);
1751 	if (error != 0)
1752 		goto out;
1753 	shmfd->shm_mode = mode & ACCESSPERMS;
1754 out:
1755 	mtx_unlock(&shm_timestamp_lock);
1756 	return (error);
1757 }
1758 
1759 static int
1760 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1761     struct thread *td)
1762 {
1763 	struct shmfd *shmfd;
1764 	int error;
1765 
1766 	error = 0;
1767 	shmfd = fp->f_data;
1768 	mtx_lock(&shm_timestamp_lock);
1769 #ifdef MAC
1770 	error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1771 	if (error != 0)
1772 		goto out;
1773 #endif
1774 	if (uid == (uid_t)-1)
1775 		uid = shmfd->shm_uid;
1776 	if (gid == (gid_t)-1)
1777                  gid = shmfd->shm_gid;
1778 	if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1779 	    (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1780 	    (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1781 		goto out;
1782 	shmfd->shm_uid = uid;
1783 	shmfd->shm_gid = gid;
1784 out:
1785 	mtx_unlock(&shm_timestamp_lock);
1786 	return (error);
1787 }
1788 
1789 /*
1790  * Helper routines to allow the backing object of a shared memory file
1791  * descriptor to be mapped in the kernel.
1792  */
1793 int
1794 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1795 {
1796 	struct shmfd *shmfd;
1797 	vm_offset_t kva, ofs;
1798 	vm_object_t obj;
1799 	int rv;
1800 
1801 	if (fp->f_type != DTYPE_SHM)
1802 		return (EINVAL);
1803 	shmfd = fp->f_data;
1804 	obj = shmfd->shm_object;
1805 	VM_OBJECT_WLOCK(obj);
1806 	/*
1807 	 * XXXRW: This validation is probably insufficient, and subject to
1808 	 * sign errors.  It should be fixed.
1809 	 */
1810 	if (offset >= shmfd->shm_size ||
1811 	    offset + size > round_page(shmfd->shm_size)) {
1812 		VM_OBJECT_WUNLOCK(obj);
1813 		return (EINVAL);
1814 	}
1815 
1816 	shmfd->shm_kmappings++;
1817 	vm_object_reference_locked(obj);
1818 	VM_OBJECT_WUNLOCK(obj);
1819 
1820 	/* Map the object into the kernel_map and wire it. */
1821 	kva = vm_map_min(kernel_map);
1822 	ofs = offset & PAGE_MASK;
1823 	offset = trunc_page(offset);
1824 	size = round_page(size + ofs);
1825 	rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1826 	    VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1827 	    VM_PROT_READ | VM_PROT_WRITE, 0);
1828 	if (rv == KERN_SUCCESS) {
1829 		rv = vm_map_wire(kernel_map, kva, kva + size,
1830 		    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1831 		if (rv == KERN_SUCCESS) {
1832 			*memp = (void *)(kva + ofs);
1833 			return (0);
1834 		}
1835 		vm_map_remove(kernel_map, kva, kva + size);
1836 	} else
1837 		vm_object_deallocate(obj);
1838 
1839 	/* On failure, drop our mapping reference. */
1840 	VM_OBJECT_WLOCK(obj);
1841 	shmfd->shm_kmappings--;
1842 	VM_OBJECT_WUNLOCK(obj);
1843 
1844 	return (vm_mmap_to_errno(rv));
1845 }
1846 
1847 /*
1848  * We require the caller to unmap the entire entry.  This allows us to
1849  * safely decrement shm_kmappings when a mapping is removed.
1850  */
1851 int
1852 shm_unmap(struct file *fp, void *mem, size_t size)
1853 {
1854 	struct shmfd *shmfd;
1855 	vm_map_entry_t entry;
1856 	vm_offset_t kva, ofs;
1857 	vm_object_t obj;
1858 	vm_pindex_t pindex;
1859 	vm_prot_t prot;
1860 	boolean_t wired;
1861 	vm_map_t map;
1862 	int rv;
1863 
1864 	if (fp->f_type != DTYPE_SHM)
1865 		return (EINVAL);
1866 	shmfd = fp->f_data;
1867 	kva = (vm_offset_t)mem;
1868 	ofs = kva & PAGE_MASK;
1869 	kva = trunc_page(kva);
1870 	size = round_page(size + ofs);
1871 	map = kernel_map;
1872 	rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1873 	    &obj, &pindex, &prot, &wired);
1874 	if (rv != KERN_SUCCESS)
1875 		return (EINVAL);
1876 	if (entry->start != kva || entry->end != kva + size) {
1877 		vm_map_lookup_done(map, entry);
1878 		return (EINVAL);
1879 	}
1880 	vm_map_lookup_done(map, entry);
1881 	if (obj != shmfd->shm_object)
1882 		return (EINVAL);
1883 	vm_map_remove(map, kva, kva + size);
1884 	VM_OBJECT_WLOCK(obj);
1885 	KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1886 	shmfd->shm_kmappings--;
1887 	VM_OBJECT_WUNLOCK(obj);
1888 	return (0);
1889 }
1890 
1891 static int
1892 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1893 {
1894 	const char *path, *pr_path;
1895 	size_t pr_pathlen;
1896 	bool visible;
1897 
1898 	sx_assert(&shm_dict_lock, SA_LOCKED);
1899 	kif->kf_type = KF_TYPE_SHM;
1900 	kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1901 	kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1902 	if (shmfd->shm_path != NULL) {
1903 		if (shmfd->shm_path != NULL) {
1904 			path = shmfd->shm_path;
1905 			pr_path = curthread->td_ucred->cr_prison->pr_path;
1906 			if (strcmp(pr_path, "/") != 0) {
1907 				/* Return the jail-rooted pathname. */
1908 				pr_pathlen = strlen(pr_path);
1909 				visible = strncmp(path, pr_path, pr_pathlen)
1910 				    == 0 && path[pr_pathlen] == '/';
1911 				if (list && !visible)
1912 					return (EPERM);
1913 				if (visible)
1914 					path += pr_pathlen;
1915 			}
1916 			strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1917 		}
1918 	}
1919 	return (0);
1920 }
1921 
1922 static int
1923 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1924     struct filedesc *fdp __unused)
1925 {
1926 	int res;
1927 
1928 	sx_slock(&shm_dict_lock);
1929 	res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1930 	sx_sunlock(&shm_dict_lock);
1931 	return (res);
1932 }
1933 
1934 static int
1935 shm_add_seals(struct file *fp, int seals)
1936 {
1937 	struct shmfd *shmfd;
1938 	void *rl_cookie;
1939 	vm_ooffset_t writemappings;
1940 	int error, nseals;
1941 
1942 	error = 0;
1943 	shmfd = fp->f_data;
1944 	rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1945 	    &shmfd->shm_mtx);
1946 
1947 	/* Even already-set seals should result in EPERM. */
1948 	if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1949 		error = EPERM;
1950 		goto out;
1951 	}
1952 	nseals = seals & ~shmfd->shm_seals;
1953 	if ((nseals & F_SEAL_WRITE) != 0) {
1954 		if (shm_largepage(shmfd)) {
1955 			error = ENOTSUP;
1956 			goto out;
1957 		}
1958 
1959 		/*
1960 		 * The rangelock above prevents writable mappings from being
1961 		 * added after we've started applying seals.  The RLOCK here
1962 		 * is to avoid torn reads on ILP32 arches as unmapping/reducing
1963 		 * writemappings will be done without a rangelock.
1964 		 */
1965 		VM_OBJECT_RLOCK(shmfd->shm_object);
1966 		writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1967 		VM_OBJECT_RUNLOCK(shmfd->shm_object);
1968 		/* kmappings are also writable */
1969 		if (writemappings > 0) {
1970 			error = EBUSY;
1971 			goto out;
1972 		}
1973 	}
1974 	shmfd->shm_seals |= nseals;
1975 out:
1976 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1977 	return (error);
1978 }
1979 
1980 static int
1981 shm_get_seals(struct file *fp, int *seals)
1982 {
1983 	struct shmfd *shmfd;
1984 
1985 	shmfd = fp->f_data;
1986 	*seals = shmfd->shm_seals;
1987 	return (0);
1988 }
1989 
1990 static int
1991 shm_deallocate(struct shmfd *shmfd, off_t *offset, off_t *length, int flags)
1992 {
1993 	vm_object_t object;
1994 	vm_pindex_t pistart, pi, piend;
1995 	vm_ooffset_t off, len;
1996 	int startofs, endofs, end;
1997 	int error;
1998 
1999 	off = *offset;
2000 	len = *length;
2001 	KASSERT(off + len <= (vm_ooffset_t)OFF_MAX, ("off + len overflows"));
2002 	if (off + len > shmfd->shm_size)
2003 		len = shmfd->shm_size - off;
2004 	object = shmfd->shm_object;
2005 	startofs = off & PAGE_MASK;
2006 	endofs = (off + len) & PAGE_MASK;
2007 	pistart = OFF_TO_IDX(off);
2008 	piend = OFF_TO_IDX(off + len);
2009 	pi = OFF_TO_IDX(off + PAGE_MASK);
2010 	error = 0;
2011 
2012 	/* Handle the case when offset is on or beyond shm size. */
2013 	if ((off_t)len <= 0) {
2014 		*length = 0;
2015 		return (0);
2016 	}
2017 
2018 	VM_OBJECT_WLOCK(object);
2019 
2020 	if (startofs != 0) {
2021 		end = pistart != piend ? PAGE_SIZE : endofs;
2022 		error = shm_partial_page_invalidate(object, pistart, startofs,
2023 		    end);
2024 		if (error)
2025 			goto out;
2026 		off += end - startofs;
2027 		len -= end - startofs;
2028 	}
2029 
2030 	if (pi < piend) {
2031 		vm_object_page_remove(object, pi, piend, 0);
2032 		off += IDX_TO_OFF(piend - pi);
2033 		len -= IDX_TO_OFF(piend - pi);
2034 	}
2035 
2036 	if (endofs != 0 && pistart != piend) {
2037 		error = shm_partial_page_invalidate(object, piend, 0, endofs);
2038 		if (error)
2039 			goto out;
2040 		off += endofs;
2041 		len -= endofs;
2042 	}
2043 
2044 out:
2045 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
2046 	*offset = off;
2047 	*length = len;
2048 	return (error);
2049 }
2050 
2051 static int
2052 shm_fspacectl(struct file *fp, int cmd, off_t *offset, off_t *length, int flags,
2053     struct ucred *active_cred, struct thread *td)
2054 {
2055 	void *rl_cookie;
2056 	struct shmfd *shmfd;
2057 	off_t off, len;
2058 	int error;
2059 
2060 	KASSERT(cmd == SPACECTL_DEALLOC, ("shm_fspacectl: Invalid cmd"));
2061 	KASSERT((flags & ~SPACECTL_F_SUPPORTED) == 0,
2062 	    ("shm_fspacectl: non-zero flags"));
2063 	KASSERT(*offset >= 0 && *length > 0 && *length <= OFF_MAX - *offset,
2064 	    ("shm_fspacectl: offset/length overflow or underflow"));
2065 	error = EINVAL;
2066 	shmfd = fp->f_data;
2067 	off = *offset;
2068 	len = *length;
2069 
2070 	rl_cookie = rangelock_wlock(&shmfd->shm_rl, off, off + len,
2071 	    &shmfd->shm_mtx);
2072 	switch (cmd) {
2073 	case SPACECTL_DEALLOC:
2074 		if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
2075 			error = EPERM;
2076 			break;
2077 		}
2078 		error = shm_deallocate(shmfd, &off, &len, flags);
2079 		*offset = off;
2080 		*length = len;
2081 		break;
2082 	default:
2083 		__assert_unreachable();
2084 	}
2085 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
2086 	return (error);
2087 }
2088 
2089 
2090 static int
2091 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
2092 {
2093 	void *rl_cookie;
2094 	struct shmfd *shmfd;
2095 	size_t size;
2096 	int error;
2097 
2098 	/* This assumes that the caller already checked for overflow. */
2099 	error = 0;
2100 	shmfd = fp->f_data;
2101 	size = offset + len;
2102 
2103 	/*
2104 	 * Just grab the rangelock for the range that we may be attempting to
2105 	 * grow, rather than blocking read/write for regions we won't be
2106 	 * touching while this (potential) resize is in progress.  Other
2107 	 * attempts to resize the shmfd will have to take a write lock from 0 to
2108 	 * OFF_MAX, so this being potentially beyond the current usable range of
2109 	 * the shmfd is not necessarily a concern.  If other mechanisms are
2110 	 * added to grow a shmfd, this may need to be re-evaluated.
2111 	 */
2112 	rl_cookie = rangelock_wlock(&shmfd->shm_rl, offset, size,
2113 	    &shmfd->shm_mtx);
2114 	if (size > shmfd->shm_size)
2115 		error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
2116 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
2117 	/* Translate to posix_fallocate(2) return value as needed. */
2118 	if (error == ENOMEM)
2119 		error = ENOSPC;
2120 	return (error);
2121 }
2122 
2123 static int
2124 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
2125 {
2126 	struct shm_mapping *shmm;
2127 	struct sbuf sb;
2128 	struct kinfo_file kif;
2129 	u_long i;
2130 	int error, error2;
2131 
2132 	sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
2133 	sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2134 	error = 0;
2135 	sx_slock(&shm_dict_lock);
2136 	for (i = 0; i < shm_hash + 1; i++) {
2137 		LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
2138 			error = shm_fill_kinfo_locked(shmm->sm_shmfd,
2139 			    &kif, true);
2140 			if (error == EPERM) {
2141 				error = 0;
2142 				continue;
2143 			}
2144 			if (error != 0)
2145 				break;
2146 			pack_kinfo(&kif);
2147 			error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
2148 			    0 : ENOMEM;
2149 			if (error != 0)
2150 				break;
2151 		}
2152 	}
2153 	sx_sunlock(&shm_dict_lock);
2154 	error2 = sbuf_finish(&sb);
2155 	sbuf_delete(&sb);
2156 	return (error != 0 ? error : error2);
2157 }
2158 
2159 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
2160     CTLFLAG_RD | CTLFLAG_PRISON | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
2161     NULL, 0, sysctl_posix_shm_list, "",
2162     "POSIX SHM list");
2163 
2164 int
2165 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
2166     struct filecaps *caps)
2167 {
2168 
2169 	return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
2170 }
2171 
2172 /*
2173  * This version of the shm_open() interface leaves CLOEXEC behavior up to the
2174  * caller, and libc will enforce it for the traditional shm_open() call.  This
2175  * allows other consumers, like memfd_create(), to opt-in for CLOEXEC.  This
2176  * interface also includes a 'name' argument that is currently unused, but could
2177  * potentially be exported later via some interface for debugging purposes.
2178  * From the kernel's perspective, it is optional.  Individual consumers like
2179  * memfd_create() may require it in order to be compatible with other systems
2180  * implementing the same function.
2181  */
2182 int
2183 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
2184 {
2185 
2186 	return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
2187 	    uap->shmflags, NULL, uap->name));
2188 }
2189