xref: /freebsd/sys/kern/uipc_shm.c (revision 4b9d6057)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5  * Copyright 2020 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by BAE Systems, the University of
9  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11  * Computing (TC) research program.
12  *
13  * Portions of this software were developed by Konstantin Belousov
14  * under sponsorship from the FreeBSD Foundation.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 /*
39  * Support for shared swap-backed anonymous memory objects via
40  * shm_open(2), shm_rename(2), and shm_unlink(2).
41  * While most of the implementation is here, vm_mmap.c contains
42  * mapping logic changes.
43  *
44  * posixshmcontrol(1) allows users to inspect the state of the memory
45  * objects.  Per-uid swap resource limit controls total amount of
46  * memory that user can consume for anonymous objects, including
47  * shared.
48  */
49 
50 #include <sys/cdefs.h>
51 #include "opt_capsicum.h"
52 #include "opt_ktrace.h"
53 
54 #include <sys/param.h>
55 #include <sys/capsicum.h>
56 #include <sys/conf.h>
57 #include <sys/fcntl.h>
58 #include <sys/file.h>
59 #include <sys/filedesc.h>
60 #include <sys/filio.h>
61 #include <sys/fnv_hash.h>
62 #include <sys/kernel.h>
63 #include <sys/limits.h>
64 #include <sys/uio.h>
65 #include <sys/signal.h>
66 #include <sys/jail.h>
67 #include <sys/ktrace.h>
68 #include <sys/lock.h>
69 #include <sys/malloc.h>
70 #include <sys/mman.h>
71 #include <sys/mutex.h>
72 #include <sys/priv.h>
73 #include <sys/proc.h>
74 #include <sys/refcount.h>
75 #include <sys/resourcevar.h>
76 #include <sys/rwlock.h>
77 #include <sys/sbuf.h>
78 #include <sys/stat.h>
79 #include <sys/syscallsubr.h>
80 #include <sys/sysctl.h>
81 #include <sys/sysproto.h>
82 #include <sys/systm.h>
83 #include <sys/sx.h>
84 #include <sys/time.h>
85 #include <sys/vmmeter.h>
86 #include <sys/vnode.h>
87 #include <sys/unistd.h>
88 #include <sys/user.h>
89 
90 #include <security/audit/audit.h>
91 #include <security/mac/mac_framework.h>
92 
93 #include <vm/vm.h>
94 #include <vm/vm_param.h>
95 #include <vm/pmap.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_kern.h>
99 #include <vm/vm_object.h>
100 #include <vm/vm_page.h>
101 #include <vm/vm_pageout.h>
102 #include <vm/vm_pager.h>
103 #include <vm/swap_pager.h>
104 
105 struct shm_mapping {
106 	char		*sm_path;
107 	Fnv32_t		sm_fnv;
108 	struct shmfd	*sm_shmfd;
109 	LIST_ENTRY(shm_mapping) sm_link;
110 };
111 
112 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
113 static LIST_HEAD(, shm_mapping) *shm_dictionary;
114 static struct sx shm_dict_lock;
115 static struct mtx shm_timestamp_lock;
116 static u_long shm_hash;
117 static struct unrhdr64 shm_ino_unr;
118 static dev_t shm_dev_ino;
119 
120 #define	SHM_HASH(fnv)	(&shm_dictionary[(fnv) & shm_hash])
121 
122 static void	shm_init(void *arg);
123 static void	shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
124 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
125 static int	shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
126 static void	shm_doremove(struct shm_mapping *map);
127 static int	shm_dotruncate_cookie(struct shmfd *shmfd, off_t length,
128     void *rl_cookie);
129 static int	shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
130     void *rl_cookie);
131 static int	shm_copyin_path(struct thread *td, const char *userpath_in,
132     char **path_out);
133 static int	shm_deallocate(struct shmfd *shmfd, off_t *offset,
134     off_t *length, int flags);
135 
136 static fo_rdwr_t	shm_read;
137 static fo_rdwr_t	shm_write;
138 static fo_truncate_t	shm_truncate;
139 static fo_ioctl_t	shm_ioctl;
140 static fo_stat_t	shm_stat;
141 static fo_close_t	shm_close;
142 static fo_chmod_t	shm_chmod;
143 static fo_chown_t	shm_chown;
144 static fo_seek_t	shm_seek;
145 static fo_fill_kinfo_t	shm_fill_kinfo;
146 static fo_mmap_t	shm_mmap;
147 static fo_get_seals_t	shm_get_seals;
148 static fo_add_seals_t	shm_add_seals;
149 static fo_fallocate_t	shm_fallocate;
150 static fo_fspacectl_t	shm_fspacectl;
151 
152 /* File descriptor operations. */
153 struct fileops shm_ops = {
154 	.fo_read = shm_read,
155 	.fo_write = shm_write,
156 	.fo_truncate = shm_truncate,
157 	.fo_ioctl = shm_ioctl,
158 	.fo_poll = invfo_poll,
159 	.fo_kqfilter = invfo_kqfilter,
160 	.fo_stat = shm_stat,
161 	.fo_close = shm_close,
162 	.fo_chmod = shm_chmod,
163 	.fo_chown = shm_chown,
164 	.fo_sendfile = vn_sendfile,
165 	.fo_seek = shm_seek,
166 	.fo_fill_kinfo = shm_fill_kinfo,
167 	.fo_mmap = shm_mmap,
168 	.fo_get_seals = shm_get_seals,
169 	.fo_add_seals = shm_add_seals,
170 	.fo_fallocate = shm_fallocate,
171 	.fo_fspacectl = shm_fspacectl,
172 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE,
173 };
174 
175 FEATURE(posix_shm, "POSIX shared memory");
176 
177 static SYSCTL_NODE(_vm, OID_AUTO, largepages, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
178     "");
179 
180 static int largepage_reclaim_tries = 1;
181 SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries,
182     CTLFLAG_RWTUN, &largepage_reclaim_tries, 0,
183     "Number of contig reclaims before giving up for default alloc policy");
184 
185 #define	shm_rangelock_unlock(shmfd, cookie)				\
186 	rangelock_unlock(&(shmfd)->shm_rl, (cookie), &(shmfd)->shm_mtx)
187 #define	shm_rangelock_rlock(shmfd, start, end)				\
188 	rangelock_rlock(&(shmfd)->shm_rl, (start), (end), &(shmfd)->shm_mtx)
189 #define	shm_rangelock_tryrlock(shmfd, start, end)			\
190 	rangelock_tryrlock(&(shmfd)->shm_rl, (start), (end), &(shmfd)->shm_mtx)
191 #define	shm_rangelock_wlock(shmfd, start, end)				\
192 	rangelock_wlock(&(shmfd)->shm_rl, (start), (end), &(shmfd)->shm_mtx)
193 
194 static int
195 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
196 {
197 	vm_page_t m;
198 	vm_pindex_t idx;
199 	size_t tlen;
200 	int error, offset, rv;
201 
202 	idx = OFF_TO_IDX(uio->uio_offset);
203 	offset = uio->uio_offset & PAGE_MASK;
204 	tlen = MIN(PAGE_SIZE - offset, len);
205 
206 	rv = vm_page_grab_valid_unlocked(&m, obj, idx,
207 	    VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
208 	if (rv == VM_PAGER_OK)
209 		goto found;
210 
211 	/*
212 	 * Read I/O without either a corresponding resident page or swap
213 	 * page: use zero_region.  This is intended to avoid instantiating
214 	 * pages on read from a sparse region.
215 	 */
216 	VM_OBJECT_WLOCK(obj);
217 	m = vm_page_lookup(obj, idx);
218 	if (uio->uio_rw == UIO_READ && m == NULL &&
219 	    !vm_pager_has_page(obj, idx, NULL, NULL)) {
220 		VM_OBJECT_WUNLOCK(obj);
221 		return (uiomove(__DECONST(void *, zero_region), tlen, uio));
222 	}
223 
224 	/*
225 	 * Although the tmpfs vnode lock is held here, it is
226 	 * nonetheless safe to sleep waiting for a free page.  The
227 	 * pageout daemon does not need to acquire the tmpfs vnode
228 	 * lock to page out tobj's pages because tobj is a OBJT_SWAP
229 	 * type object.
230 	 */
231 	rv = vm_page_grab_valid(&m, obj, idx,
232 	    VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
233 	if (rv != VM_PAGER_OK) {
234 		VM_OBJECT_WUNLOCK(obj);
235 		if (bootverbose) {
236 			printf("uiomove_object: vm_obj %p idx %jd "
237 			    "pager error %d\n", obj, idx, rv);
238 		}
239 		return (rv == VM_PAGER_AGAIN ? ENOSPC : EIO);
240 	}
241 	VM_OBJECT_WUNLOCK(obj);
242 
243 found:
244 	error = uiomove_fromphys(&m, offset, tlen, uio);
245 	if (uio->uio_rw == UIO_WRITE && error == 0)
246 		vm_page_set_dirty(m);
247 	vm_page_activate(m);
248 	vm_page_sunbusy(m);
249 
250 	return (error);
251 }
252 
253 int
254 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
255 {
256 	ssize_t resid;
257 	size_t len;
258 	int error;
259 
260 	error = 0;
261 	while ((resid = uio->uio_resid) > 0) {
262 		if (obj_size <= uio->uio_offset)
263 			break;
264 		len = MIN(obj_size - uio->uio_offset, resid);
265 		if (len == 0)
266 			break;
267 		error = uiomove_object_page(obj, len, uio);
268 		if (error != 0 || resid == uio->uio_resid)
269 			break;
270 	}
271 	return (error);
272 }
273 
274 static u_long count_largepages[MAXPAGESIZES];
275 
276 static int
277 shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx,
278     int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
279 {
280 	vm_page_t m __diagused;
281 	int psind;
282 
283 	psind = object->un_pager.phys.data_val;
284 	if (psind == 0 || pidx >= object->size)
285 		return (VM_PAGER_FAIL);
286 	*first = rounddown2(pidx, pagesizes[psind] / PAGE_SIZE);
287 
288 	/*
289 	 * We only busy the first page in the superpage run.  It is
290 	 * useless to busy whole run since we only remove full
291 	 * superpage, and it takes too long to busy e.g. 512 * 512 ==
292 	 * 262144 pages constituing 1G amd64 superage.
293 	 */
294 	m = vm_page_grab(object, *first, VM_ALLOC_NORMAL | VM_ALLOC_NOCREAT);
295 	MPASS(m != NULL);
296 
297 	*last = *first + atop(pagesizes[psind]) - 1;
298 	return (VM_PAGER_OK);
299 }
300 
301 static boolean_t
302 shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex,
303     int *before, int *after)
304 {
305 	int psind;
306 
307 	psind = object->un_pager.phys.data_val;
308 	if (psind == 0 || pindex >= object->size)
309 		return (FALSE);
310 	if (before != NULL) {
311 		*before = pindex - rounddown2(pindex, pagesizes[psind] /
312 		    PAGE_SIZE);
313 	}
314 	if (after != NULL) {
315 		*after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) -
316 		    pindex;
317 	}
318 	return (TRUE);
319 }
320 
321 static void
322 shm_largepage_phys_ctor(vm_object_t object, vm_prot_t prot,
323     vm_ooffset_t foff, struct ucred *cred)
324 {
325 }
326 
327 static void
328 shm_largepage_phys_dtor(vm_object_t object)
329 {
330 	int psind;
331 
332 	psind = object->un_pager.phys.data_val;
333 	if (psind != 0) {
334 		atomic_subtract_long(&count_largepages[psind],
335 		    object->size / (pagesizes[psind] / PAGE_SIZE));
336 		vm_wire_sub(object->size);
337 	} else {
338 		KASSERT(object->size == 0,
339 		    ("largepage phys obj %p not initialized bit size %#jx > 0",
340 		    object, (uintmax_t)object->size));
341 	}
342 }
343 
344 static const struct phys_pager_ops shm_largepage_phys_ops = {
345 	.phys_pg_populate =	shm_largepage_phys_populate,
346 	.phys_pg_haspage =	shm_largepage_phys_haspage,
347 	.phys_pg_ctor =		shm_largepage_phys_ctor,
348 	.phys_pg_dtor =		shm_largepage_phys_dtor,
349 };
350 
351 bool
352 shm_largepage(struct shmfd *shmfd)
353 {
354 	return (shmfd->shm_object->type == OBJT_PHYS);
355 }
356 
357 static void
358 shm_pager_freespace(vm_object_t obj, vm_pindex_t start, vm_size_t size)
359 {
360 	struct shmfd *shm;
361 	vm_size_t c;
362 
363 	swap_pager_freespace(obj, start, size, &c);
364 	if (c == 0)
365 		return;
366 
367 	shm = obj->un_pager.swp.swp_priv;
368 	if (shm == NULL)
369 		return;
370 	KASSERT(shm->shm_pages >= c,
371 	    ("shm %p pages %jd free %jd", shm,
372 	    (uintmax_t)shm->shm_pages, (uintmax_t)c));
373 	shm->shm_pages -= c;
374 }
375 
376 static void
377 shm_page_inserted(vm_object_t obj, vm_page_t m)
378 {
379 	struct shmfd *shm;
380 
381 	shm = obj->un_pager.swp.swp_priv;
382 	if (shm == NULL)
383 		return;
384 	if (!vm_pager_has_page(obj, m->pindex, NULL, NULL))
385 		shm->shm_pages += 1;
386 }
387 
388 static void
389 shm_page_removed(vm_object_t obj, vm_page_t m)
390 {
391 	struct shmfd *shm;
392 
393 	shm = obj->un_pager.swp.swp_priv;
394 	if (shm == NULL)
395 		return;
396 	if (!vm_pager_has_page(obj, m->pindex, NULL, NULL)) {
397 		KASSERT(shm->shm_pages >= 1,
398 		    ("shm %p pages %jd free 1", shm,
399 		    (uintmax_t)shm->shm_pages));
400 		shm->shm_pages -= 1;
401 	}
402 }
403 
404 static struct pagerops shm_swap_pager_ops = {
405 	.pgo_kvme_type = KVME_TYPE_SWAP,
406 	.pgo_freespace = shm_pager_freespace,
407 	.pgo_page_inserted = shm_page_inserted,
408 	.pgo_page_removed = shm_page_removed,
409 };
410 static int shmfd_pager_type = -1;
411 
412 static int
413 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
414 {
415 	struct shmfd *shmfd;
416 	off_t foffset;
417 	int error;
418 
419 	shmfd = fp->f_data;
420 	foffset = foffset_lock(fp, 0);
421 	error = 0;
422 	switch (whence) {
423 	case L_INCR:
424 		if (foffset < 0 ||
425 		    (offset > 0 && foffset > OFF_MAX - offset)) {
426 			error = EOVERFLOW;
427 			break;
428 		}
429 		offset += foffset;
430 		break;
431 	case L_XTND:
432 		if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
433 			error = EOVERFLOW;
434 			break;
435 		}
436 		offset += shmfd->shm_size;
437 		break;
438 	case L_SET:
439 		break;
440 	default:
441 		error = EINVAL;
442 	}
443 	if (error == 0) {
444 		if (offset < 0 || offset > shmfd->shm_size)
445 			error = EINVAL;
446 		else
447 			td->td_uretoff.tdu_off = offset;
448 	}
449 	foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
450 	return (error);
451 }
452 
453 static int
454 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
455     int flags, struct thread *td)
456 {
457 	struct shmfd *shmfd;
458 	void *rl_cookie;
459 	int error;
460 
461 	shmfd = fp->f_data;
462 #ifdef MAC
463 	error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
464 	if (error)
465 		return (error);
466 #endif
467 	foffset_lock_uio(fp, uio, flags);
468 	rl_cookie = shm_rangelock_rlock(shmfd, uio->uio_offset,
469 	    uio->uio_offset + uio->uio_resid);
470 	error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
471 	shm_rangelock_unlock(shmfd, rl_cookie);
472 	foffset_unlock_uio(fp, uio, flags);
473 	return (error);
474 }
475 
476 static int
477 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
478     int flags, struct thread *td)
479 {
480 	struct shmfd *shmfd;
481 	void *rl_cookie;
482 	int error;
483 	off_t size;
484 
485 	shmfd = fp->f_data;
486 #ifdef MAC
487 	error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
488 	if (error)
489 		return (error);
490 #endif
491 	if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0)
492 		return (EINVAL);
493 	foffset_lock_uio(fp, uio, flags);
494 	if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
495 		/*
496 		 * Overflow is only an error if we're supposed to expand on
497 		 * write.  Otherwise, we'll just truncate the write to the
498 		 * size of the file, which can only grow up to OFF_MAX.
499 		 */
500 		if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
501 			foffset_unlock_uio(fp, uio, flags);
502 			return (EFBIG);
503 		}
504 
505 		size = shmfd->shm_size;
506 	} else {
507 		size = uio->uio_offset + uio->uio_resid;
508 	}
509 	if ((flags & FOF_OFFSET) == 0)
510 		rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
511 	else
512 		rl_cookie = shm_rangelock_wlock(shmfd, uio->uio_offset, size);
513 	if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
514 		error = EPERM;
515 	} else {
516 		error = 0;
517 		if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
518 		    size > shmfd->shm_size) {
519 			error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
520 		}
521 		if (error == 0)
522 			error = uiomove_object(shmfd->shm_object,
523 			    shmfd->shm_size, uio);
524 	}
525 	shm_rangelock_unlock(shmfd, rl_cookie);
526 	foffset_unlock_uio(fp, uio, flags);
527 	return (error);
528 }
529 
530 static int
531 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
532     struct thread *td)
533 {
534 	struct shmfd *shmfd;
535 #ifdef MAC
536 	int error;
537 #endif
538 
539 	shmfd = fp->f_data;
540 #ifdef MAC
541 	error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
542 	if (error)
543 		return (error);
544 #endif
545 	return (shm_dotruncate(shmfd, length));
546 }
547 
548 int
549 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
550     struct thread *td)
551 {
552 	struct shmfd *shmfd;
553 	struct shm_largepage_conf *conf;
554 	void *rl_cookie;
555 
556 	shmfd = fp->f_data;
557 	switch (com) {
558 	case FIONBIO:
559 	case FIOASYNC:
560 		/*
561 		 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
562 		 * just like it would on an unlinked regular file
563 		 */
564 		return (0);
565 	case FIOSSHMLPGCNF:
566 		if (!shm_largepage(shmfd))
567 			return (ENOTTY);
568 		conf = data;
569 		if (shmfd->shm_lp_psind != 0 &&
570 		    conf->psind != shmfd->shm_lp_psind)
571 			return (EINVAL);
572 		if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES ||
573 		    pagesizes[conf->psind] == 0)
574 			return (EINVAL);
575 		if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT &&
576 		    conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT &&
577 		    conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD)
578 			return (EINVAL);
579 
580 		rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
581 		shmfd->shm_lp_psind = conf->psind;
582 		shmfd->shm_lp_alloc_policy = conf->alloc_policy;
583 		shmfd->shm_object->un_pager.phys.data_val = conf->psind;
584 		shm_rangelock_unlock(shmfd, rl_cookie);
585 		return (0);
586 	case FIOGSHMLPGCNF:
587 		if (!shm_largepage(shmfd))
588 			return (ENOTTY);
589 		conf = data;
590 		rl_cookie = shm_rangelock_rlock(shmfd, 0, OFF_MAX);
591 		conf->psind = shmfd->shm_lp_psind;
592 		conf->alloc_policy = shmfd->shm_lp_alloc_policy;
593 		shm_rangelock_unlock(shmfd, rl_cookie);
594 		return (0);
595 	default:
596 		return (ENOTTY);
597 	}
598 }
599 
600 static int
601 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred)
602 {
603 	struct shmfd *shmfd;
604 #ifdef MAC
605 	int error;
606 #endif
607 
608 	shmfd = fp->f_data;
609 
610 #ifdef MAC
611 	error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
612 	if (error)
613 		return (error);
614 #endif
615 
616 	/*
617 	 * Attempt to return sanish values for fstat() on a memory file
618 	 * descriptor.
619 	 */
620 	bzero(sb, sizeof(*sb));
621 	sb->st_blksize = PAGE_SIZE;
622 	sb->st_size = shmfd->shm_size;
623 	mtx_lock(&shm_timestamp_lock);
624 	sb->st_atim = shmfd->shm_atime;
625 	sb->st_ctim = shmfd->shm_ctime;
626 	sb->st_mtim = shmfd->shm_mtime;
627 	sb->st_birthtim = shmfd->shm_birthtime;
628 	sb->st_mode = S_IFREG | shmfd->shm_mode;		/* XXX */
629 	sb->st_uid = shmfd->shm_uid;
630 	sb->st_gid = shmfd->shm_gid;
631 	mtx_unlock(&shm_timestamp_lock);
632 	sb->st_dev = shm_dev_ino;
633 	sb->st_ino = shmfd->shm_ino;
634 	sb->st_nlink = shmfd->shm_object->ref_count;
635 	if (shm_largepage(shmfd)) {
636 		sb->st_blocks = shmfd->shm_object->size /
637 		    (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT);
638 	} else {
639 		sb->st_blocks = shmfd->shm_pages;
640 	}
641 
642 	return (0);
643 }
644 
645 static int
646 shm_close(struct file *fp, struct thread *td)
647 {
648 	struct shmfd *shmfd;
649 
650 	shmfd = fp->f_data;
651 	fp->f_data = NULL;
652 	shm_drop(shmfd);
653 
654 	return (0);
655 }
656 
657 static int
658 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
659 	int error;
660 	char *path;
661 	const char *pr_path;
662 	size_t pr_pathlen;
663 
664 	path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
665 	pr_path = td->td_ucred->cr_prison->pr_path;
666 
667 	/* Construct a full pathname for jailed callers. */
668 	pr_pathlen = strcmp(pr_path, "/") ==
669 	    0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
670 	error = copyinstr(userpath_in, path + pr_pathlen,
671 	    MAXPATHLEN - pr_pathlen, NULL);
672 	if (error != 0)
673 		goto out;
674 
675 #ifdef KTRACE
676 	if (KTRPOINT(curthread, KTR_NAMEI))
677 		ktrnamei(path);
678 #endif
679 
680 	/* Require paths to start with a '/' character. */
681 	if (path[pr_pathlen] != '/') {
682 		error = EINVAL;
683 		goto out;
684 	}
685 
686 	*path_out = path;
687 
688 out:
689 	if (error != 0)
690 		free(path, M_SHMFD);
691 
692 	return (error);
693 }
694 
695 static int
696 shm_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
697     int end)
698 {
699 	vm_page_t m;
700 	int rv;
701 
702 	VM_OBJECT_ASSERT_WLOCKED(object);
703 	KASSERT(base >= 0, ("%s: base %d", __func__, base));
704 	KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
705 	    end));
706 
707 retry:
708 	m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
709 	if (m != NULL) {
710 		MPASS(vm_page_all_valid(m));
711 	} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
712 		m = vm_page_alloc(object, idx,
713 		    VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
714 		if (m == NULL)
715 			goto retry;
716 		vm_object_pip_add(object, 1);
717 		VM_OBJECT_WUNLOCK(object);
718 		rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
719 		VM_OBJECT_WLOCK(object);
720 		vm_object_pip_wakeup(object);
721 		if (rv == VM_PAGER_OK) {
722 			/*
723 			 * Since the page was not resident, and therefore not
724 			 * recently accessed, immediately enqueue it for
725 			 * asynchronous laundering.  The current operation is
726 			 * not regarded as an access.
727 			 */
728 			vm_page_launder(m);
729 		} else {
730 			vm_page_free(m);
731 			VM_OBJECT_WUNLOCK(object);
732 			return (EIO);
733 		}
734 	}
735 	if (m != NULL) {
736 		pmap_zero_page_area(m, base, end - base);
737 		KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid",
738 		    __func__, m));
739 		vm_page_set_dirty(m);
740 		vm_page_xunbusy(m);
741 	}
742 
743 	return (0);
744 }
745 
746 static int
747 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
748 {
749 	vm_object_t object;
750 	vm_pindex_t nobjsize;
751 	vm_ooffset_t delta;
752 	int base, error;
753 
754 	KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
755 	object = shmfd->shm_object;
756 	VM_OBJECT_ASSERT_WLOCKED(object);
757 	rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
758 	if (length == shmfd->shm_size)
759 		return (0);
760 	nobjsize = OFF_TO_IDX(length + PAGE_MASK);
761 
762 	/* Are we shrinking?  If so, trim the end. */
763 	if (length < shmfd->shm_size) {
764 		if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
765 			return (EPERM);
766 
767 		/*
768 		 * Disallow any requests to shrink the size if this
769 		 * object is mapped into the kernel.
770 		 */
771 		if (shmfd->shm_kmappings > 0)
772 			return (EBUSY);
773 
774 		/*
775 		 * Zero the truncated part of the last page.
776 		 */
777 		base = length & PAGE_MASK;
778 		if (base != 0) {
779 			error = shm_partial_page_invalidate(object,
780 			    OFF_TO_IDX(length), base, PAGE_SIZE);
781 			if (error)
782 				return (error);
783 		}
784 		delta = IDX_TO_OFF(object->size - nobjsize);
785 
786 		if (nobjsize < object->size)
787 			vm_object_page_remove(object, nobjsize, object->size,
788 			    0);
789 
790 		/* Free the swap accounted for shm */
791 		swap_release_by_cred(delta, object->cred);
792 		object->charge -= delta;
793 	} else {
794 		if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
795 			return (EPERM);
796 
797 		/* Try to reserve additional swap space. */
798 		delta = IDX_TO_OFF(nobjsize - object->size);
799 		if (!swap_reserve_by_cred(delta, object->cred))
800 			return (ENOMEM);
801 		object->charge += delta;
802 	}
803 	shmfd->shm_size = length;
804 	mtx_lock(&shm_timestamp_lock);
805 	vfs_timestamp(&shmfd->shm_ctime);
806 	shmfd->shm_mtime = shmfd->shm_ctime;
807 	mtx_unlock(&shm_timestamp_lock);
808 	object->size = nobjsize;
809 	return (0);
810 }
811 
812 static int
813 shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
814 {
815 	vm_object_t object;
816 	vm_page_t m;
817 	vm_pindex_t newobjsz;
818 	vm_pindex_t oldobjsz __unused;
819 	int aflags, error, i, psind, try;
820 
821 	KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
822 	object = shmfd->shm_object;
823 	VM_OBJECT_ASSERT_WLOCKED(object);
824 	rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
825 
826 	oldobjsz = object->size;
827 	newobjsz = OFF_TO_IDX(length);
828 	if (length == shmfd->shm_size)
829 		return (0);
830 	psind = shmfd->shm_lp_psind;
831 	if (psind == 0 && length != 0)
832 		return (EINVAL);
833 	if ((length & (pagesizes[psind] - 1)) != 0)
834 		return (EINVAL);
835 
836 	if (length < shmfd->shm_size) {
837 		if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
838 			return (EPERM);
839 		if (shmfd->shm_kmappings > 0)
840 			return (EBUSY);
841 		return (ENOTSUP);	/* Pages are unmanaged. */
842 #if 0
843 		vm_object_page_remove(object, newobjsz, oldobjsz, 0);
844 		object->size = newobjsz;
845 		shmfd->shm_size = length;
846 		return (0);
847 #endif
848 	}
849 
850 	if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
851 		return (EPERM);
852 
853 	aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO;
854 	if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
855 		aflags |= VM_ALLOC_WAITFAIL;
856 	try = 0;
857 
858 	/*
859 	 * Extend shmfd and object, keeping all already fully
860 	 * allocated large pages intact even on error, because dropped
861 	 * object lock might allowed mapping of them.
862 	 */
863 	while (object->size < newobjsz) {
864 		m = vm_page_alloc_contig(object, object->size, aflags,
865 		    pagesizes[psind] / PAGE_SIZE, 0, ~0,
866 		    pagesizes[psind], 0,
867 		    VM_MEMATTR_DEFAULT);
868 		if (m == NULL) {
869 			VM_OBJECT_WUNLOCK(object);
870 			if (shmfd->shm_lp_alloc_policy ==
871 			    SHM_LARGEPAGE_ALLOC_NOWAIT ||
872 			    (shmfd->shm_lp_alloc_policy ==
873 			    SHM_LARGEPAGE_ALLOC_DEFAULT &&
874 			    try >= largepage_reclaim_tries)) {
875 				VM_OBJECT_WLOCK(object);
876 				return (ENOMEM);
877 			}
878 			error = vm_page_reclaim_contig(aflags,
879 			    pagesizes[psind] / PAGE_SIZE, 0, ~0,
880 			    pagesizes[psind], 0) ? 0 :
881 			    vm_wait_intr(object);
882 			if (error != 0) {
883 				VM_OBJECT_WLOCK(object);
884 				return (error);
885 			}
886 			try++;
887 			VM_OBJECT_WLOCK(object);
888 			continue;
889 		}
890 		try = 0;
891 		for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) {
892 			if ((m[i].flags & PG_ZERO) == 0)
893 				pmap_zero_page(&m[i]);
894 			vm_page_valid(&m[i]);
895 			vm_page_xunbusy(&m[i]);
896 		}
897 		object->size += OFF_TO_IDX(pagesizes[psind]);
898 		shmfd->shm_size += pagesizes[psind];
899 		atomic_add_long(&count_largepages[psind], 1);
900 		vm_wire_add(atop(pagesizes[psind]));
901 	}
902 	return (0);
903 }
904 
905 static int
906 shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie)
907 {
908 	int error;
909 
910 	VM_OBJECT_WLOCK(shmfd->shm_object);
911 	error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd,
912 	    length, rl_cookie) : shm_dotruncate_locked(shmfd, length,
913 	    rl_cookie);
914 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
915 	return (error);
916 }
917 
918 int
919 shm_dotruncate(struct shmfd *shmfd, off_t length)
920 {
921 	void *rl_cookie;
922 	int error;
923 
924 	rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
925 	error = shm_dotruncate_cookie(shmfd, length, rl_cookie);
926 	shm_rangelock_unlock(shmfd, rl_cookie);
927 	return (error);
928 }
929 
930 /*
931  * shmfd object management including creation and reference counting
932  * routines.
933  */
934 struct shmfd *
935 shm_alloc(struct ucred *ucred, mode_t mode, bool largepage)
936 {
937 	struct shmfd *shmfd;
938 	vm_object_t obj;
939 
940 	shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
941 	shmfd->shm_size = 0;
942 	shmfd->shm_uid = ucred->cr_uid;
943 	shmfd->shm_gid = ucred->cr_gid;
944 	shmfd->shm_mode = mode;
945 	if (largepage) {
946 		shmfd->shm_object = phys_pager_allocate(NULL,
947 		    &shm_largepage_phys_ops, NULL, shmfd->shm_size,
948 		    VM_PROT_DEFAULT, 0, ucred);
949 		shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT;
950 	} else {
951 		obj = vm_pager_allocate(shmfd_pager_type, NULL,
952 		    shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
953 		VM_OBJECT_WLOCK(obj);
954 		obj->un_pager.swp.swp_priv = shmfd;
955 		VM_OBJECT_WUNLOCK(obj);
956 		shmfd->shm_object = obj;
957 	}
958 	KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
959 	vfs_timestamp(&shmfd->shm_birthtime);
960 	shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
961 	    shmfd->shm_birthtime;
962 	shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
963 	refcount_init(&shmfd->shm_refs, 1);
964 	mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
965 	rangelock_init(&shmfd->shm_rl);
966 #ifdef MAC
967 	mac_posixshm_init(shmfd);
968 	mac_posixshm_create(ucred, shmfd);
969 #endif
970 
971 	return (shmfd);
972 }
973 
974 struct shmfd *
975 shm_hold(struct shmfd *shmfd)
976 {
977 
978 	refcount_acquire(&shmfd->shm_refs);
979 	return (shmfd);
980 }
981 
982 void
983 shm_drop(struct shmfd *shmfd)
984 {
985 	vm_object_t obj;
986 
987 	if (refcount_release(&shmfd->shm_refs)) {
988 #ifdef MAC
989 		mac_posixshm_destroy(shmfd);
990 #endif
991 		rangelock_destroy(&shmfd->shm_rl);
992 		mtx_destroy(&shmfd->shm_mtx);
993 		obj = shmfd->shm_object;
994 		if (!shm_largepage(shmfd)) {
995 			VM_OBJECT_WLOCK(obj);
996 			obj->un_pager.swp.swp_priv = NULL;
997 			VM_OBJECT_WUNLOCK(obj);
998 		}
999 		vm_object_deallocate(obj);
1000 		free(shmfd, M_SHMFD);
1001 	}
1002 }
1003 
1004 /*
1005  * Determine if the credentials have sufficient permissions for a
1006  * specified combination of FREAD and FWRITE.
1007  */
1008 int
1009 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
1010 {
1011 	accmode_t accmode;
1012 	int error;
1013 
1014 	accmode = 0;
1015 	if (flags & FREAD)
1016 		accmode |= VREAD;
1017 	if (flags & FWRITE)
1018 		accmode |= VWRITE;
1019 	mtx_lock(&shm_timestamp_lock);
1020 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1021 	    accmode, ucred);
1022 	mtx_unlock(&shm_timestamp_lock);
1023 	return (error);
1024 }
1025 
1026 static void
1027 shm_init(void *arg)
1028 {
1029 	char name[32];
1030 	int i;
1031 
1032 	mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
1033 	sx_init(&shm_dict_lock, "shm dictionary");
1034 	shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
1035 	new_unrhdr64(&shm_ino_unr, 1);
1036 	shm_dev_ino = devfs_alloc_cdp_inode();
1037 	KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
1038 	shmfd_pager_type = vm_pager_alloc_dyn_type(&shm_swap_pager_ops,
1039 	    OBJT_SWAP);
1040 	MPASS(shmfd_pager_type != -1);
1041 
1042 	for (i = 1; i < MAXPAGESIZES; i++) {
1043 		if (pagesizes[i] == 0)
1044 			break;
1045 #define	M	(1024 * 1024)
1046 #define	G	(1024 * M)
1047 		if (pagesizes[i] >= G)
1048 			snprintf(name, sizeof(name), "%luG", pagesizes[i] / G);
1049 		else if (pagesizes[i] >= M)
1050 			snprintf(name, sizeof(name), "%luM", pagesizes[i] / M);
1051 		else
1052 			snprintf(name, sizeof(name), "%lu", pagesizes[i]);
1053 #undef G
1054 #undef M
1055 		SYSCTL_ADD_ULONG(NULL, SYSCTL_STATIC_CHILDREN(_vm_largepages),
1056 		    OID_AUTO, name, CTLFLAG_RD, &count_largepages[i],
1057 		    "number of non-transient largepages allocated");
1058 	}
1059 }
1060 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
1061 
1062 /*
1063  * Remove all shared memory objects that belong to a prison.
1064  */
1065 void
1066 shm_remove_prison(struct prison *pr)
1067 {
1068 	struct shm_mapping *shmm, *tshmm;
1069 	u_long i;
1070 
1071 	sx_xlock(&shm_dict_lock);
1072 	for (i = 0; i < shm_hash + 1; i++) {
1073 		LIST_FOREACH_SAFE(shmm, &shm_dictionary[i], sm_link, tshmm) {
1074 			if (shmm->sm_shmfd->shm_object->cred &&
1075 			    shmm->sm_shmfd->shm_object->cred->cr_prison == pr)
1076 				shm_doremove(shmm);
1077 		}
1078 	}
1079 	sx_xunlock(&shm_dict_lock);
1080 }
1081 
1082 /*
1083  * Dictionary management.  We maintain an in-kernel dictionary to map
1084  * paths to shmfd objects.  We use the FNV hash on the path to store
1085  * the mappings in a hash table.
1086  */
1087 static struct shmfd *
1088 shm_lookup(char *path, Fnv32_t fnv)
1089 {
1090 	struct shm_mapping *map;
1091 
1092 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1093 		if (map->sm_fnv != fnv)
1094 			continue;
1095 		if (strcmp(map->sm_path, path) == 0)
1096 			return (map->sm_shmfd);
1097 	}
1098 
1099 	return (NULL);
1100 }
1101 
1102 static void
1103 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
1104 {
1105 	struct shm_mapping *map;
1106 
1107 	map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
1108 	map->sm_path = path;
1109 	map->sm_fnv = fnv;
1110 	map->sm_shmfd = shm_hold(shmfd);
1111 	shmfd->shm_path = path;
1112 	LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
1113 }
1114 
1115 static int
1116 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
1117 {
1118 	struct shm_mapping *map;
1119 	int error;
1120 
1121 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1122 		if (map->sm_fnv != fnv)
1123 			continue;
1124 		if (strcmp(map->sm_path, path) == 0) {
1125 #ifdef MAC
1126 			error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
1127 			if (error)
1128 				return (error);
1129 #endif
1130 			error = shm_access(map->sm_shmfd, ucred,
1131 			    FREAD | FWRITE);
1132 			if (error)
1133 				return (error);
1134 			shm_doremove(map);
1135 			return (0);
1136 		}
1137 	}
1138 
1139 	return (ENOENT);
1140 }
1141 
1142 static void
1143 shm_doremove(struct shm_mapping *map)
1144 {
1145 	map->sm_shmfd->shm_path = NULL;
1146 	LIST_REMOVE(map, sm_link);
1147 	shm_drop(map->sm_shmfd);
1148 	free(map->sm_path, M_SHMFD);
1149 	free(map, M_SHMFD);
1150 }
1151 
1152 int
1153 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
1154     int shmflags, struct filecaps *fcaps, const char *name __unused)
1155 {
1156 	struct pwddesc *pdp;
1157 	struct shmfd *shmfd;
1158 	struct file *fp;
1159 	char *path;
1160 	void *rl_cookie;
1161 	Fnv32_t fnv;
1162 	mode_t cmode;
1163 	int error, fd, initial_seals;
1164 	bool largepage;
1165 
1166 	if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE |
1167 	    SHM_LARGEPAGE)) != 0)
1168 		return (EINVAL);
1169 
1170 	initial_seals = F_SEAL_SEAL;
1171 	if ((shmflags & SHM_ALLOW_SEALING) != 0)
1172 		initial_seals &= ~F_SEAL_SEAL;
1173 
1174 #ifdef CAPABILITY_MODE
1175 	/*
1176 	 * shm_open(2) is only allowed for anonymous objects.
1177 	 */
1178 	if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
1179 		return (ECAPMODE);
1180 #endif
1181 
1182 	AUDIT_ARG_FFLAGS(flags);
1183 	AUDIT_ARG_MODE(mode);
1184 
1185 	if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
1186 		return (EINVAL);
1187 
1188 	if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
1189 		return (EINVAL);
1190 
1191 	largepage = (shmflags & SHM_LARGEPAGE) != 0;
1192 	if (largepage && !PMAP_HAS_LARGEPAGES)
1193 		return (ENOTTY);
1194 
1195 	/*
1196 	 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
1197 	 * If the decision is made later to allow additional seals, care must be
1198 	 * taken below to ensure that the seals are properly set if the shmfd
1199 	 * already existed -- this currently assumes that only F_SEAL_SEAL can
1200 	 * be set and doesn't take further precautions to ensure the validity of
1201 	 * the seals being added with respect to current mappings.
1202 	 */
1203 	if ((initial_seals & ~F_SEAL_SEAL) != 0)
1204 		return (EINVAL);
1205 
1206 	pdp = td->td_proc->p_pd;
1207 	cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS;
1208 
1209 	/*
1210 	 * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
1211 	 * by POSIX.  We allow it to be unset here so that an in-kernel
1212 	 * interface may be written as a thin layer around shm, optionally not
1213 	 * setting CLOEXEC.  For shm_open(2), O_CLOEXEC is set unconditionally
1214 	 * in sys_shm_open() to keep this implementation compliant.
1215 	 */
1216 	error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
1217 	if (error)
1218 		return (error);
1219 
1220 	/* A SHM_ANON path pointer creates an anonymous object. */
1221 	if (userpath == SHM_ANON) {
1222 		/* A read-only anonymous object is pointless. */
1223 		if ((flags & O_ACCMODE) == O_RDONLY) {
1224 			fdclose(td, fp, fd);
1225 			fdrop(fp, td);
1226 			return (EINVAL);
1227 		}
1228 		shmfd = shm_alloc(td->td_ucred, cmode, largepage);
1229 		shmfd->shm_seals = initial_seals;
1230 		shmfd->shm_flags = shmflags;
1231 	} else {
1232 		error = shm_copyin_path(td, userpath, &path);
1233 		if (error != 0) {
1234 			fdclose(td, fp, fd);
1235 			fdrop(fp, td);
1236 			return (error);
1237 		}
1238 
1239 		AUDIT_ARG_UPATH1_CANON(path);
1240 		fnv = fnv_32_str(path, FNV1_32_INIT);
1241 		sx_xlock(&shm_dict_lock);
1242 		shmfd = shm_lookup(path, fnv);
1243 		if (shmfd == NULL) {
1244 			/* Object does not yet exist, create it if requested. */
1245 			if (flags & O_CREAT) {
1246 #ifdef MAC
1247 				error = mac_posixshm_check_create(td->td_ucred,
1248 				    path);
1249 				if (error == 0) {
1250 #endif
1251 					shmfd = shm_alloc(td->td_ucred, cmode,
1252 					    largepage);
1253 					shmfd->shm_seals = initial_seals;
1254 					shmfd->shm_flags = shmflags;
1255 					shm_insert(path, fnv, shmfd);
1256 #ifdef MAC
1257 				}
1258 #endif
1259 			} else {
1260 				free(path, M_SHMFD);
1261 				error = ENOENT;
1262 			}
1263 		} else {
1264 			rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1265 
1266 			/*
1267 			 * kern_shm_open() likely shouldn't ever error out on
1268 			 * trying to set a seal that already exists, unlike
1269 			 * F_ADD_SEALS.  This would break terribly as
1270 			 * shm_open(2) actually sets F_SEAL_SEAL to maintain
1271 			 * historical behavior where the underlying file could
1272 			 * not be sealed.
1273 			 */
1274 			initial_seals &= ~shmfd->shm_seals;
1275 
1276 			/*
1277 			 * Object already exists, obtain a new
1278 			 * reference if requested and permitted.
1279 			 */
1280 			free(path, M_SHMFD);
1281 
1282 			/*
1283 			 * initial_seals can't set additional seals if we've
1284 			 * already been set F_SEAL_SEAL.  If F_SEAL_SEAL is set,
1285 			 * then we've already removed that one from
1286 			 * initial_seals.  This is currently redundant as we
1287 			 * only allow setting F_SEAL_SEAL at creation time, but
1288 			 * it's cheap to check and decreases the effort required
1289 			 * to allow additional seals.
1290 			 */
1291 			if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
1292 			    initial_seals != 0)
1293 				error = EPERM;
1294 			else if ((flags & (O_CREAT | O_EXCL)) ==
1295 			    (O_CREAT | O_EXCL))
1296 				error = EEXIST;
1297 			else if (shmflags != 0 && shmflags != shmfd->shm_flags)
1298 				error = EINVAL;
1299 			else {
1300 #ifdef MAC
1301 				error = mac_posixshm_check_open(td->td_ucred,
1302 				    shmfd, FFLAGS(flags & O_ACCMODE));
1303 				if (error == 0)
1304 #endif
1305 				error = shm_access(shmfd, td->td_ucred,
1306 				    FFLAGS(flags & O_ACCMODE));
1307 			}
1308 
1309 			/*
1310 			 * Truncate the file back to zero length if
1311 			 * O_TRUNC was specified and the object was
1312 			 * opened with read/write.
1313 			 */
1314 			if (error == 0 &&
1315 			    (flags & (O_ACCMODE | O_TRUNC)) ==
1316 			    (O_RDWR | O_TRUNC)) {
1317 				VM_OBJECT_WLOCK(shmfd->shm_object);
1318 #ifdef MAC
1319 				error = mac_posixshm_check_truncate(
1320 					td->td_ucred, fp->f_cred, shmfd);
1321 				if (error == 0)
1322 #endif
1323 					error = shm_dotruncate_locked(shmfd, 0,
1324 					    rl_cookie);
1325 				VM_OBJECT_WUNLOCK(shmfd->shm_object);
1326 			}
1327 			if (error == 0) {
1328 				/*
1329 				 * Currently we only allow F_SEAL_SEAL to be
1330 				 * set initially.  As noted above, this would
1331 				 * need to be reworked should that change.
1332 				 */
1333 				shmfd->shm_seals |= initial_seals;
1334 				shm_hold(shmfd);
1335 			}
1336 			shm_rangelock_unlock(shmfd, rl_cookie);
1337 		}
1338 		sx_xunlock(&shm_dict_lock);
1339 
1340 		if (error) {
1341 			fdclose(td, fp, fd);
1342 			fdrop(fp, td);
1343 			return (error);
1344 		}
1345 	}
1346 
1347 	finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
1348 
1349 	td->td_retval[0] = fd;
1350 	fdrop(fp, td);
1351 
1352 	return (0);
1353 }
1354 
1355 /* System calls. */
1356 #ifdef COMPAT_FREEBSD12
1357 int
1358 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
1359 {
1360 
1361 	return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
1362 	    uap->mode, NULL));
1363 }
1364 #endif
1365 
1366 int
1367 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
1368 {
1369 	char *path;
1370 	Fnv32_t fnv;
1371 	int error;
1372 
1373 	error = shm_copyin_path(td, uap->path, &path);
1374 	if (error != 0)
1375 		return (error);
1376 
1377 	AUDIT_ARG_UPATH1_CANON(path);
1378 	fnv = fnv_32_str(path, FNV1_32_INIT);
1379 	sx_xlock(&shm_dict_lock);
1380 	error = shm_remove(path, fnv, td->td_ucred);
1381 	sx_xunlock(&shm_dict_lock);
1382 	free(path, M_SHMFD);
1383 
1384 	return (error);
1385 }
1386 
1387 int
1388 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
1389 {
1390 	char *path_from = NULL, *path_to = NULL;
1391 	Fnv32_t fnv_from, fnv_to;
1392 	struct shmfd *fd_from;
1393 	struct shmfd *fd_to;
1394 	int error;
1395 	int flags;
1396 
1397 	flags = uap->flags;
1398 	AUDIT_ARG_FFLAGS(flags);
1399 
1400 	/*
1401 	 * Make sure the user passed only valid flags.
1402 	 * If you add a new flag, please add a new term here.
1403 	 */
1404 	if ((flags & ~(
1405 	    SHM_RENAME_NOREPLACE |
1406 	    SHM_RENAME_EXCHANGE
1407 	    )) != 0) {
1408 		error = EINVAL;
1409 		goto out;
1410 	}
1411 
1412 	/*
1413 	 * EXCHANGE and NOREPLACE don't quite make sense together. Let's
1414 	 * force the user to choose one or the other.
1415 	 */
1416 	if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
1417 	    (flags & SHM_RENAME_EXCHANGE) != 0) {
1418 		error = EINVAL;
1419 		goto out;
1420 	}
1421 
1422 	/* Renaming to or from anonymous makes no sense */
1423 	if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1424 		error = EINVAL;
1425 		goto out;
1426 	}
1427 
1428 	error = shm_copyin_path(td, uap->path_from, &path_from);
1429 	if (error != 0)
1430 		goto out;
1431 
1432 	error = shm_copyin_path(td, uap->path_to, &path_to);
1433 	if (error != 0)
1434 		goto out;
1435 
1436 	AUDIT_ARG_UPATH1_CANON(path_from);
1437 	AUDIT_ARG_UPATH2_CANON(path_to);
1438 
1439 	/* Rename with from/to equal is a no-op */
1440 	if (strcmp(path_from, path_to) == 0)
1441 		goto out;
1442 
1443 	fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1444 	fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1445 
1446 	sx_xlock(&shm_dict_lock);
1447 
1448 	fd_from = shm_lookup(path_from, fnv_from);
1449 	if (fd_from == NULL) {
1450 		error = ENOENT;
1451 		goto out_locked;
1452 	}
1453 
1454 	fd_to = shm_lookup(path_to, fnv_to);
1455 	if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1456 		error = EEXIST;
1457 		goto out_locked;
1458 	}
1459 
1460 	/*
1461 	 * Unconditionally prevents shm_remove from invalidating the 'from'
1462 	 * shm's state.
1463 	 */
1464 	shm_hold(fd_from);
1465 	error = shm_remove(path_from, fnv_from, td->td_ucred);
1466 
1467 	/*
1468 	 * One of my assumptions failed if ENOENT (e.g. locking didn't
1469 	 * protect us)
1470 	 */
1471 	KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1472 	    path_from));
1473 	if (error != 0) {
1474 		shm_drop(fd_from);
1475 		goto out_locked;
1476 	}
1477 
1478 	/*
1479 	 * If we are exchanging, we need to ensure the shm_remove below
1480 	 * doesn't invalidate the dest shm's state.
1481 	 */
1482 	if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1483 		shm_hold(fd_to);
1484 
1485 	/*
1486 	 * NOTE: if path_to is not already in the hash, c'est la vie;
1487 	 * it simply means we have nothing already at path_to to unlink.
1488 	 * That is the ENOENT case.
1489 	 *
1490 	 * If we somehow don't have access to unlink this guy, but
1491 	 * did for the shm at path_from, then relink the shm to path_from
1492 	 * and abort with EACCES.
1493 	 *
1494 	 * All other errors: that is weird; let's relink and abort the
1495 	 * operation.
1496 	 */
1497 	error = shm_remove(path_to, fnv_to, td->td_ucred);
1498 	if (error != 0 && error != ENOENT) {
1499 		shm_insert(path_from, fnv_from, fd_from);
1500 		shm_drop(fd_from);
1501 		/* Don't free path_from now, since the hash references it */
1502 		path_from = NULL;
1503 		goto out_locked;
1504 	}
1505 
1506 	error = 0;
1507 
1508 	shm_insert(path_to, fnv_to, fd_from);
1509 
1510 	/* Don't free path_to now, since the hash references it */
1511 	path_to = NULL;
1512 
1513 	/* We kept a ref when we removed, and incremented again in insert */
1514 	shm_drop(fd_from);
1515 	KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1516 	    fd_from->shm_refs));
1517 
1518 	if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1519 		shm_insert(path_from, fnv_from, fd_to);
1520 		path_from = NULL;
1521 		shm_drop(fd_to);
1522 		KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1523 		    fd_to->shm_refs));
1524 	}
1525 
1526 out_locked:
1527 	sx_xunlock(&shm_dict_lock);
1528 
1529 out:
1530 	free(path_from, M_SHMFD);
1531 	free(path_to, M_SHMFD);
1532 	return (error);
1533 }
1534 
1535 static int
1536 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
1537     vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags,
1538     vm_ooffset_t foff, struct thread *td)
1539 {
1540 	struct vmspace *vms;
1541 	vm_map_entry_t next_entry, prev_entry;
1542 	vm_offset_t align, mask, maxaddr;
1543 	int docow, error, rv, try;
1544 	bool curmap;
1545 
1546 	if (shmfd->shm_lp_psind == 0)
1547 		return (EINVAL);
1548 
1549 	/* MAP_PRIVATE is disabled */
1550 	if ((flags & ~(MAP_SHARED | MAP_FIXED | MAP_EXCL |
1551 	    MAP_NOCORE | MAP_32BIT | MAP_ALIGNMENT_MASK)) != 0)
1552 		return (EINVAL);
1553 
1554 	vms = td->td_proc->p_vmspace;
1555 	curmap = map == &vms->vm_map;
1556 	if (curmap) {
1557 		error = kern_mmap_racct_check(td, map, size);
1558 		if (error != 0)
1559 			return (error);
1560 	}
1561 
1562 	docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT;
1563 	docow |= MAP_INHERIT_SHARE;
1564 	if ((flags & MAP_NOCORE) != 0)
1565 		docow |= MAP_DISABLE_COREDUMP;
1566 
1567 	mask = pagesizes[shmfd->shm_lp_psind] - 1;
1568 	if ((foff & mask) != 0)
1569 		return (EINVAL);
1570 	maxaddr = vm_map_max(map);
1571 	if ((flags & MAP_32BIT) != 0 && maxaddr > MAP_32BIT_MAX_ADDR)
1572 		maxaddr = MAP_32BIT_MAX_ADDR;
1573 	if (size == 0 || (size & mask) != 0 ||
1574 	    (*addr != 0 && ((*addr & mask) != 0 ||
1575 	    *addr + size < *addr || *addr + size > maxaddr)))
1576 		return (EINVAL);
1577 
1578 	align = flags & MAP_ALIGNMENT_MASK;
1579 	if (align == 0) {
1580 		align = pagesizes[shmfd->shm_lp_psind];
1581 	} else if (align == MAP_ALIGNED_SUPER) {
1582 		if (shmfd->shm_lp_psind != 1)
1583 			return (EINVAL);
1584 		align = pagesizes[1];
1585 	} else {
1586 		align >>= MAP_ALIGNMENT_SHIFT;
1587 		align = 1ULL << align;
1588 		/* Also handles overflow. */
1589 		if (align < pagesizes[shmfd->shm_lp_psind])
1590 			return (EINVAL);
1591 	}
1592 
1593 	vm_map_lock(map);
1594 	if ((flags & MAP_FIXED) == 0) {
1595 		try = 1;
1596 		if (curmap && (*addr == 0 ||
1597 		    (*addr >= round_page((vm_offset_t)vms->vm_taddr) &&
1598 		    *addr < round_page((vm_offset_t)vms->vm_daddr +
1599 		    lim_max(td, RLIMIT_DATA))))) {
1600 			*addr = roundup2((vm_offset_t)vms->vm_daddr +
1601 			    lim_max(td, RLIMIT_DATA),
1602 			    pagesizes[shmfd->shm_lp_psind]);
1603 		}
1604 again:
1605 		rv = vm_map_find_aligned(map, addr, size, maxaddr, align);
1606 		if (rv != KERN_SUCCESS) {
1607 			if (try == 1) {
1608 				try = 2;
1609 				*addr = vm_map_min(map);
1610 				if ((*addr & mask) != 0)
1611 					*addr = (*addr + mask) & mask;
1612 				goto again;
1613 			}
1614 			goto fail1;
1615 		}
1616 	} else if ((flags & MAP_EXCL) == 0) {
1617 		rv = vm_map_delete(map, *addr, *addr + size);
1618 		if (rv != KERN_SUCCESS)
1619 			goto fail1;
1620 	} else {
1621 		error = ENOSPC;
1622 		if (vm_map_lookup_entry(map, *addr, &prev_entry))
1623 			goto fail;
1624 		next_entry = vm_map_entry_succ(prev_entry);
1625 		if (next_entry->start < *addr + size)
1626 			goto fail;
1627 	}
1628 
1629 	rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size,
1630 	    prot, max_prot, docow);
1631 fail1:
1632 	error = vm_mmap_to_errno(rv);
1633 fail:
1634 	vm_map_unlock(map);
1635 	return (error);
1636 }
1637 
1638 static int
1639 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1640     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
1641     vm_ooffset_t foff, struct thread *td)
1642 {
1643 	struct shmfd *shmfd;
1644 	vm_prot_t maxprot;
1645 	int error;
1646 	bool writecnt;
1647 	void *rl_cookie;
1648 
1649 	shmfd = fp->f_data;
1650 	maxprot = VM_PROT_NONE;
1651 
1652 	rl_cookie = shm_rangelock_rlock(shmfd, 0, objsize);
1653 	/* FREAD should always be set. */
1654 	if ((fp->f_flag & FREAD) != 0)
1655 		maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1656 
1657 	/*
1658 	 * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1659 	 * mapping with a write seal applied.  Private mappings are always
1660 	 * writeable.
1661 	 */
1662 	if ((flags & MAP_SHARED) == 0) {
1663 		cap_maxprot |= VM_PROT_WRITE;
1664 		maxprot |= VM_PROT_WRITE;
1665 		writecnt = false;
1666 	} else {
1667 		if ((fp->f_flag & FWRITE) != 0 &&
1668 		    (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1669 			maxprot |= VM_PROT_WRITE;
1670 
1671 		/*
1672 		 * Any mappings from a writable descriptor may be upgraded to
1673 		 * VM_PROT_WRITE with mprotect(2), unless a write-seal was
1674 		 * applied between the open and subsequent mmap(2).  We want to
1675 		 * reject application of a write seal as long as any such
1676 		 * mapping exists so that the seal cannot be trivially bypassed.
1677 		 */
1678 		writecnt = (maxprot & VM_PROT_WRITE) != 0;
1679 		if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1680 			error = EACCES;
1681 			goto out;
1682 		}
1683 	}
1684 	maxprot &= cap_maxprot;
1685 
1686 	/* See comment in vn_mmap(). */
1687 	if (
1688 #ifdef _LP64
1689 	    objsize > OFF_MAX ||
1690 #endif
1691 	    foff > OFF_MAX - objsize) {
1692 		error = EINVAL;
1693 		goto out;
1694 	}
1695 
1696 #ifdef MAC
1697 	error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1698 	if (error != 0)
1699 		goto out;
1700 #endif
1701 
1702 	mtx_lock(&shm_timestamp_lock);
1703 	vfs_timestamp(&shmfd->shm_atime);
1704 	mtx_unlock(&shm_timestamp_lock);
1705 	vm_object_reference(shmfd->shm_object);
1706 
1707 	if (shm_largepage(shmfd)) {
1708 		writecnt = false;
1709 		error = shm_mmap_large(shmfd, map, addr, objsize, prot,
1710 		    maxprot, flags, foff, td);
1711 	} else {
1712 		if (writecnt) {
1713 			vm_pager_update_writecount(shmfd->shm_object, 0,
1714 			    objsize);
1715 		}
1716 		error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1717 		    shmfd->shm_object, foff, writecnt, td);
1718 	}
1719 	if (error != 0) {
1720 		if (writecnt)
1721 			vm_pager_release_writecount(shmfd->shm_object, 0,
1722 			    objsize);
1723 		vm_object_deallocate(shmfd->shm_object);
1724 	}
1725 out:
1726 	shm_rangelock_unlock(shmfd, rl_cookie);
1727 	return (error);
1728 }
1729 
1730 static int
1731 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1732     struct thread *td)
1733 {
1734 	struct shmfd *shmfd;
1735 	int error;
1736 
1737 	error = 0;
1738 	shmfd = fp->f_data;
1739 	mtx_lock(&shm_timestamp_lock);
1740 	/*
1741 	 * SUSv4 says that x bits of permission need not be affected.
1742 	 * Be consistent with our shm_open there.
1743 	 */
1744 #ifdef MAC
1745 	error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1746 	if (error != 0)
1747 		goto out;
1748 #endif
1749 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1750 	    VADMIN, active_cred);
1751 	if (error != 0)
1752 		goto out;
1753 	shmfd->shm_mode = mode & ACCESSPERMS;
1754 out:
1755 	mtx_unlock(&shm_timestamp_lock);
1756 	return (error);
1757 }
1758 
1759 static int
1760 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1761     struct thread *td)
1762 {
1763 	struct shmfd *shmfd;
1764 	int error;
1765 
1766 	error = 0;
1767 	shmfd = fp->f_data;
1768 	mtx_lock(&shm_timestamp_lock);
1769 #ifdef MAC
1770 	error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1771 	if (error != 0)
1772 		goto out;
1773 #endif
1774 	if (uid == (uid_t)-1)
1775 		uid = shmfd->shm_uid;
1776 	if (gid == (gid_t)-1)
1777                  gid = shmfd->shm_gid;
1778 	if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1779 	    (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1780 	    (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1781 		goto out;
1782 	shmfd->shm_uid = uid;
1783 	shmfd->shm_gid = gid;
1784 out:
1785 	mtx_unlock(&shm_timestamp_lock);
1786 	return (error);
1787 }
1788 
1789 /*
1790  * Helper routines to allow the backing object of a shared memory file
1791  * descriptor to be mapped in the kernel.
1792  */
1793 int
1794 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1795 {
1796 	struct shmfd *shmfd;
1797 	vm_offset_t kva, ofs;
1798 	vm_object_t obj;
1799 	int rv;
1800 
1801 	if (fp->f_type != DTYPE_SHM)
1802 		return (EINVAL);
1803 	shmfd = fp->f_data;
1804 	obj = shmfd->shm_object;
1805 	VM_OBJECT_WLOCK(obj);
1806 	/*
1807 	 * XXXRW: This validation is probably insufficient, and subject to
1808 	 * sign errors.  It should be fixed.
1809 	 */
1810 	if (offset >= shmfd->shm_size ||
1811 	    offset + size > round_page(shmfd->shm_size)) {
1812 		VM_OBJECT_WUNLOCK(obj);
1813 		return (EINVAL);
1814 	}
1815 
1816 	shmfd->shm_kmappings++;
1817 	vm_object_reference_locked(obj);
1818 	VM_OBJECT_WUNLOCK(obj);
1819 
1820 	/* Map the object into the kernel_map and wire it. */
1821 	kva = vm_map_min(kernel_map);
1822 	ofs = offset & PAGE_MASK;
1823 	offset = trunc_page(offset);
1824 	size = round_page(size + ofs);
1825 	rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1826 	    VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1827 	    VM_PROT_READ | VM_PROT_WRITE, 0);
1828 	if (rv == KERN_SUCCESS) {
1829 		rv = vm_map_wire(kernel_map, kva, kva + size,
1830 		    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1831 		if (rv == KERN_SUCCESS) {
1832 			*memp = (void *)(kva + ofs);
1833 			return (0);
1834 		}
1835 		vm_map_remove(kernel_map, kva, kva + size);
1836 	} else
1837 		vm_object_deallocate(obj);
1838 
1839 	/* On failure, drop our mapping reference. */
1840 	VM_OBJECT_WLOCK(obj);
1841 	shmfd->shm_kmappings--;
1842 	VM_OBJECT_WUNLOCK(obj);
1843 
1844 	return (vm_mmap_to_errno(rv));
1845 }
1846 
1847 /*
1848  * We require the caller to unmap the entire entry.  This allows us to
1849  * safely decrement shm_kmappings when a mapping is removed.
1850  */
1851 int
1852 shm_unmap(struct file *fp, void *mem, size_t size)
1853 {
1854 	struct shmfd *shmfd;
1855 	vm_map_entry_t entry;
1856 	vm_offset_t kva, ofs;
1857 	vm_object_t obj;
1858 	vm_pindex_t pindex;
1859 	vm_prot_t prot;
1860 	boolean_t wired;
1861 	vm_map_t map;
1862 	int rv;
1863 
1864 	if (fp->f_type != DTYPE_SHM)
1865 		return (EINVAL);
1866 	shmfd = fp->f_data;
1867 	kva = (vm_offset_t)mem;
1868 	ofs = kva & PAGE_MASK;
1869 	kva = trunc_page(kva);
1870 	size = round_page(size + ofs);
1871 	map = kernel_map;
1872 	rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1873 	    &obj, &pindex, &prot, &wired);
1874 	if (rv != KERN_SUCCESS)
1875 		return (EINVAL);
1876 	if (entry->start != kva || entry->end != kva + size) {
1877 		vm_map_lookup_done(map, entry);
1878 		return (EINVAL);
1879 	}
1880 	vm_map_lookup_done(map, entry);
1881 	if (obj != shmfd->shm_object)
1882 		return (EINVAL);
1883 	vm_map_remove(map, kva, kva + size);
1884 	VM_OBJECT_WLOCK(obj);
1885 	KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1886 	shmfd->shm_kmappings--;
1887 	VM_OBJECT_WUNLOCK(obj);
1888 	return (0);
1889 }
1890 
1891 static int
1892 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1893 {
1894 	const char *path, *pr_path;
1895 	size_t pr_pathlen;
1896 	bool visible;
1897 
1898 	sx_assert(&shm_dict_lock, SA_LOCKED);
1899 	kif->kf_type = KF_TYPE_SHM;
1900 	kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1901 	kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1902 	if (shmfd->shm_path != NULL) {
1903 		if (shmfd->shm_path != NULL) {
1904 			path = shmfd->shm_path;
1905 			pr_path = curthread->td_ucred->cr_prison->pr_path;
1906 			if (strcmp(pr_path, "/") != 0) {
1907 				/* Return the jail-rooted pathname. */
1908 				pr_pathlen = strlen(pr_path);
1909 				visible = strncmp(path, pr_path, pr_pathlen)
1910 				    == 0 && path[pr_pathlen] == '/';
1911 				if (list && !visible)
1912 					return (EPERM);
1913 				if (visible)
1914 					path += pr_pathlen;
1915 			}
1916 			strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1917 		}
1918 	}
1919 	return (0);
1920 }
1921 
1922 static int
1923 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1924     struct filedesc *fdp __unused)
1925 {
1926 	int res;
1927 
1928 	sx_slock(&shm_dict_lock);
1929 	res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1930 	sx_sunlock(&shm_dict_lock);
1931 	return (res);
1932 }
1933 
1934 static int
1935 shm_add_seals(struct file *fp, int seals)
1936 {
1937 	struct shmfd *shmfd;
1938 	void *rl_cookie;
1939 	vm_ooffset_t writemappings;
1940 	int error, nseals;
1941 
1942 	error = 0;
1943 	shmfd = fp->f_data;
1944 	rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1945 
1946 	/* Even already-set seals should result in EPERM. */
1947 	if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1948 		error = EPERM;
1949 		goto out;
1950 	}
1951 	nseals = seals & ~shmfd->shm_seals;
1952 	if ((nseals & F_SEAL_WRITE) != 0) {
1953 		if (shm_largepage(shmfd)) {
1954 			error = ENOTSUP;
1955 			goto out;
1956 		}
1957 
1958 		/*
1959 		 * The rangelock above prevents writable mappings from being
1960 		 * added after we've started applying seals.  The RLOCK here
1961 		 * is to avoid torn reads on ILP32 arches as unmapping/reducing
1962 		 * writemappings will be done without a rangelock.
1963 		 */
1964 		VM_OBJECT_RLOCK(shmfd->shm_object);
1965 		writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1966 		VM_OBJECT_RUNLOCK(shmfd->shm_object);
1967 		/* kmappings are also writable */
1968 		if (writemappings > 0) {
1969 			error = EBUSY;
1970 			goto out;
1971 		}
1972 	}
1973 	shmfd->shm_seals |= nseals;
1974 out:
1975 	shm_rangelock_unlock(shmfd, rl_cookie);
1976 	return (error);
1977 }
1978 
1979 static int
1980 shm_get_seals(struct file *fp, int *seals)
1981 {
1982 	struct shmfd *shmfd;
1983 
1984 	shmfd = fp->f_data;
1985 	*seals = shmfd->shm_seals;
1986 	return (0);
1987 }
1988 
1989 static int
1990 shm_deallocate(struct shmfd *shmfd, off_t *offset, off_t *length, int flags)
1991 {
1992 	vm_object_t object;
1993 	vm_pindex_t pistart, pi, piend;
1994 	vm_ooffset_t off, len;
1995 	int startofs, endofs, end;
1996 	int error;
1997 
1998 	off = *offset;
1999 	len = *length;
2000 	KASSERT(off + len <= (vm_ooffset_t)OFF_MAX, ("off + len overflows"));
2001 	if (off + len > shmfd->shm_size)
2002 		len = shmfd->shm_size - off;
2003 	object = shmfd->shm_object;
2004 	startofs = off & PAGE_MASK;
2005 	endofs = (off + len) & PAGE_MASK;
2006 	pistart = OFF_TO_IDX(off);
2007 	piend = OFF_TO_IDX(off + len);
2008 	pi = OFF_TO_IDX(off + PAGE_MASK);
2009 	error = 0;
2010 
2011 	/* Handle the case when offset is on or beyond shm size. */
2012 	if ((off_t)len <= 0) {
2013 		*length = 0;
2014 		return (0);
2015 	}
2016 
2017 	VM_OBJECT_WLOCK(object);
2018 
2019 	if (startofs != 0) {
2020 		end = pistart != piend ? PAGE_SIZE : endofs;
2021 		error = shm_partial_page_invalidate(object, pistart, startofs,
2022 		    end);
2023 		if (error)
2024 			goto out;
2025 		off += end - startofs;
2026 		len -= end - startofs;
2027 	}
2028 
2029 	if (pi < piend) {
2030 		vm_object_page_remove(object, pi, piend, 0);
2031 		off += IDX_TO_OFF(piend - pi);
2032 		len -= IDX_TO_OFF(piend - pi);
2033 	}
2034 
2035 	if (endofs != 0 && pistart != piend) {
2036 		error = shm_partial_page_invalidate(object, piend, 0, endofs);
2037 		if (error)
2038 			goto out;
2039 		off += endofs;
2040 		len -= endofs;
2041 	}
2042 
2043 out:
2044 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
2045 	*offset = off;
2046 	*length = len;
2047 	return (error);
2048 }
2049 
2050 static int
2051 shm_fspacectl(struct file *fp, int cmd, off_t *offset, off_t *length, int flags,
2052     struct ucred *active_cred, struct thread *td)
2053 {
2054 	void *rl_cookie;
2055 	struct shmfd *shmfd;
2056 	off_t off, len;
2057 	int error;
2058 
2059 	KASSERT(cmd == SPACECTL_DEALLOC, ("shm_fspacectl: Invalid cmd"));
2060 	KASSERT((flags & ~SPACECTL_F_SUPPORTED) == 0,
2061 	    ("shm_fspacectl: non-zero flags"));
2062 	KASSERT(*offset >= 0 && *length > 0 && *length <= OFF_MAX - *offset,
2063 	    ("shm_fspacectl: offset/length overflow or underflow"));
2064 	error = EINVAL;
2065 	shmfd = fp->f_data;
2066 	off = *offset;
2067 	len = *length;
2068 
2069 	rl_cookie = shm_rangelock_wlock(shmfd, off, off + len);
2070 	switch (cmd) {
2071 	case SPACECTL_DEALLOC:
2072 		if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
2073 			error = EPERM;
2074 			break;
2075 		}
2076 		error = shm_deallocate(shmfd, &off, &len, flags);
2077 		*offset = off;
2078 		*length = len;
2079 		break;
2080 	default:
2081 		__assert_unreachable();
2082 	}
2083 	shm_rangelock_unlock(shmfd, rl_cookie);
2084 	return (error);
2085 }
2086 
2087 
2088 static int
2089 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
2090 {
2091 	void *rl_cookie;
2092 	struct shmfd *shmfd;
2093 	size_t size;
2094 	int error;
2095 
2096 	/* This assumes that the caller already checked for overflow. */
2097 	error = 0;
2098 	shmfd = fp->f_data;
2099 	size = offset + len;
2100 
2101 	/*
2102 	 * Just grab the rangelock for the range that we may be attempting to
2103 	 * grow, rather than blocking read/write for regions we won't be
2104 	 * touching while this (potential) resize is in progress.  Other
2105 	 * attempts to resize the shmfd will have to take a write lock from 0 to
2106 	 * OFF_MAX, so this being potentially beyond the current usable range of
2107 	 * the shmfd is not necessarily a concern.  If other mechanisms are
2108 	 * added to grow a shmfd, this may need to be re-evaluated.
2109 	 */
2110 	rl_cookie = shm_rangelock_wlock(shmfd, offset, size);
2111 	if (size > shmfd->shm_size)
2112 		error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
2113 	shm_rangelock_unlock(shmfd, rl_cookie);
2114 	/* Translate to posix_fallocate(2) return value as needed. */
2115 	if (error == ENOMEM)
2116 		error = ENOSPC;
2117 	return (error);
2118 }
2119 
2120 static int
2121 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
2122 {
2123 	struct shm_mapping *shmm;
2124 	struct sbuf sb;
2125 	struct kinfo_file kif;
2126 	u_long i;
2127 	int error, error2;
2128 
2129 	sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
2130 	sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2131 	error = 0;
2132 	sx_slock(&shm_dict_lock);
2133 	for (i = 0; i < shm_hash + 1; i++) {
2134 		LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
2135 			error = shm_fill_kinfo_locked(shmm->sm_shmfd,
2136 			    &kif, true);
2137 			if (error == EPERM) {
2138 				error = 0;
2139 				continue;
2140 			}
2141 			if (error != 0)
2142 				break;
2143 			pack_kinfo(&kif);
2144 			error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
2145 			    0 : ENOMEM;
2146 			if (error != 0)
2147 				break;
2148 		}
2149 	}
2150 	sx_sunlock(&shm_dict_lock);
2151 	error2 = sbuf_finish(&sb);
2152 	sbuf_delete(&sb);
2153 	return (error != 0 ? error : error2);
2154 }
2155 
2156 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
2157     CTLFLAG_RD | CTLFLAG_PRISON | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
2158     NULL, 0, sysctl_posix_shm_list, "",
2159     "POSIX SHM list");
2160 
2161 int
2162 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
2163     struct filecaps *caps)
2164 {
2165 
2166 	return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
2167 }
2168 
2169 /*
2170  * This version of the shm_open() interface leaves CLOEXEC behavior up to the
2171  * caller, and libc will enforce it for the traditional shm_open() call.  This
2172  * allows other consumers, like memfd_create(), to opt-in for CLOEXEC.  This
2173  * interface also includes a 'name' argument that is currently unused, but could
2174  * potentially be exported later via some interface for debugging purposes.
2175  * From the kernel's perspective, it is optional.  Individual consumers like
2176  * memfd_create() may require it in order to be compatible with other systems
2177  * implementing the same function.
2178  */
2179 int
2180 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
2181 {
2182 
2183 	return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
2184 	    uap->shmflags, NULL, uap->name));
2185 }
2186