xref: /freebsd/sys/kern/uipc_shm.c (revision 0957b409)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5  * All rights reserved.
6  *
7  * Portions of this software were developed by BAE Systems, the University of
8  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
9  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
10  * Computing (TC) research program.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 /*
35  * Support for shared swap-backed anonymous memory objects via
36  * shm_open(2) and shm_unlink(2).  While most of the implementation is
37  * here, vm_mmap.c contains mapping logic changes.
38  *
39  * TODO:
40  *
41  * (1) Need to export data to a userland tool via a sysctl.  Should ipcs(1)
42  *     and ipcrm(1) be expanded or should new tools to manage both POSIX
43  *     kernel semaphores and POSIX shared memory be written?
44  *
45  * (2) Add support for this file type to fstat(1).
46  *
47  * (3) Resource limits?  Does this need its own resource limits or are the
48  *     existing limits in mmap(2) sufficient?
49  */
50 
51 #include <sys/cdefs.h>
52 __FBSDID("$FreeBSD$");
53 
54 #include "opt_capsicum.h"
55 #include "opt_ktrace.h"
56 
57 #include <sys/param.h>
58 #include <sys/capsicum.h>
59 #include <sys/conf.h>
60 #include <sys/fcntl.h>
61 #include <sys/file.h>
62 #include <sys/filedesc.h>
63 #include <sys/fnv_hash.h>
64 #include <sys/kernel.h>
65 #include <sys/uio.h>
66 #include <sys/signal.h>
67 #include <sys/jail.h>
68 #include <sys/ktrace.h>
69 #include <sys/lock.h>
70 #include <sys/malloc.h>
71 #include <sys/mman.h>
72 #include <sys/mutex.h>
73 #include <sys/priv.h>
74 #include <sys/proc.h>
75 #include <sys/refcount.h>
76 #include <sys/resourcevar.h>
77 #include <sys/rwlock.h>
78 #include <sys/stat.h>
79 #include <sys/syscallsubr.h>
80 #include <sys/sysctl.h>
81 #include <sys/sysproto.h>
82 #include <sys/systm.h>
83 #include <sys/sx.h>
84 #include <sys/time.h>
85 #include <sys/vnode.h>
86 #include <sys/unistd.h>
87 #include <sys/user.h>
88 
89 #include <security/audit/audit.h>
90 #include <security/mac/mac_framework.h>
91 
92 #include <vm/vm.h>
93 #include <vm/vm_param.h>
94 #include <vm/pmap.h>
95 #include <vm/vm_extern.h>
96 #include <vm/vm_map.h>
97 #include <vm/vm_kern.h>
98 #include <vm/vm_object.h>
99 #include <vm/vm_page.h>
100 #include <vm/vm_pageout.h>
101 #include <vm/vm_pager.h>
102 #include <vm/swap_pager.h>
103 
104 struct shm_mapping {
105 	char		*sm_path;
106 	Fnv32_t		sm_fnv;
107 	struct shmfd	*sm_shmfd;
108 	LIST_ENTRY(shm_mapping) sm_link;
109 };
110 
111 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
112 static LIST_HEAD(, shm_mapping) *shm_dictionary;
113 static struct sx shm_dict_lock;
114 static struct mtx shm_timestamp_lock;
115 static u_long shm_hash;
116 static struct unrhdr64 shm_ino_unr;
117 static dev_t shm_dev_ino;
118 
119 #define	SHM_HASH(fnv)	(&shm_dictionary[(fnv) & shm_hash])
120 
121 static void	shm_init(void *arg);
122 static void	shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
123 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
124 static int	shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
125 
126 static fo_rdwr_t	shm_read;
127 static fo_rdwr_t	shm_write;
128 static fo_truncate_t	shm_truncate;
129 static fo_stat_t	shm_stat;
130 static fo_close_t	shm_close;
131 static fo_chmod_t	shm_chmod;
132 static fo_chown_t	shm_chown;
133 static fo_seek_t	shm_seek;
134 static fo_fill_kinfo_t	shm_fill_kinfo;
135 static fo_mmap_t	shm_mmap;
136 
137 /* File descriptor operations. */
138 struct fileops shm_ops = {
139 	.fo_read = shm_read,
140 	.fo_write = shm_write,
141 	.fo_truncate = shm_truncate,
142 	.fo_ioctl = invfo_ioctl,
143 	.fo_poll = invfo_poll,
144 	.fo_kqfilter = invfo_kqfilter,
145 	.fo_stat = shm_stat,
146 	.fo_close = shm_close,
147 	.fo_chmod = shm_chmod,
148 	.fo_chown = shm_chown,
149 	.fo_sendfile = vn_sendfile,
150 	.fo_seek = shm_seek,
151 	.fo_fill_kinfo = shm_fill_kinfo,
152 	.fo_mmap = shm_mmap,
153 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
154 };
155 
156 FEATURE(posix_shm, "POSIX shared memory");
157 
158 static int
159 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
160 {
161 	vm_page_t m;
162 	vm_pindex_t idx;
163 	size_t tlen;
164 	int error, offset, rv;
165 
166 	idx = OFF_TO_IDX(uio->uio_offset);
167 	offset = uio->uio_offset & PAGE_MASK;
168 	tlen = MIN(PAGE_SIZE - offset, len);
169 
170 	VM_OBJECT_WLOCK(obj);
171 
172 	/*
173 	 * Read I/O without either a corresponding resident page or swap
174 	 * page: use zero_region.  This is intended to avoid instantiating
175 	 * pages on read from a sparse region.
176 	 */
177 	if (uio->uio_rw == UIO_READ && vm_page_lookup(obj, idx) == NULL &&
178 	    !vm_pager_has_page(obj, idx, NULL, NULL)) {
179 		VM_OBJECT_WUNLOCK(obj);
180 		return (uiomove(__DECONST(void *, zero_region), tlen, uio));
181 	}
182 
183 	/*
184 	 * Parallel reads of the page content from disk are prevented
185 	 * by exclusive busy.
186 	 *
187 	 * Although the tmpfs vnode lock is held here, it is
188 	 * nonetheless safe to sleep waiting for a free page.  The
189 	 * pageout daemon does not need to acquire the tmpfs vnode
190 	 * lock to page out tobj's pages because tobj is a OBJT_SWAP
191 	 * type object.
192 	 */
193 	m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
194 	if (m->valid != VM_PAGE_BITS_ALL) {
195 		vm_page_xbusy(m);
196 		if (vm_pager_has_page(obj, idx, NULL, NULL)) {
197 			rv = vm_pager_get_pages(obj, &m, 1, NULL, NULL);
198 			if (rv != VM_PAGER_OK) {
199 				printf(
200 	    "uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n",
201 				    obj, idx, m->valid, rv);
202 				vm_page_lock(m);
203 				vm_page_free(m);
204 				vm_page_unlock(m);
205 				VM_OBJECT_WUNLOCK(obj);
206 				return (EIO);
207 			}
208 		} else
209 			vm_page_zero_invalid(m, TRUE);
210 		vm_page_xunbusy(m);
211 	}
212 	vm_page_lock(m);
213 	vm_page_hold(m);
214 	if (vm_page_active(m))
215 		vm_page_reference(m);
216 	else
217 		vm_page_activate(m);
218 	vm_page_unlock(m);
219 	VM_OBJECT_WUNLOCK(obj);
220 	error = uiomove_fromphys(&m, offset, tlen, uio);
221 	if (uio->uio_rw == UIO_WRITE && error == 0) {
222 		VM_OBJECT_WLOCK(obj);
223 		vm_page_dirty(m);
224 		vm_pager_page_unswapped(m);
225 		VM_OBJECT_WUNLOCK(obj);
226 	}
227 	vm_page_lock(m);
228 	vm_page_unhold(m);
229 	vm_page_unlock(m);
230 
231 	return (error);
232 }
233 
234 int
235 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
236 {
237 	ssize_t resid;
238 	size_t len;
239 	int error;
240 
241 	error = 0;
242 	while ((resid = uio->uio_resid) > 0) {
243 		if (obj_size <= uio->uio_offset)
244 			break;
245 		len = MIN(obj_size - uio->uio_offset, resid);
246 		if (len == 0)
247 			break;
248 		error = uiomove_object_page(obj, len, uio);
249 		if (error != 0 || resid == uio->uio_resid)
250 			break;
251 	}
252 	return (error);
253 }
254 
255 static int
256 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
257 {
258 	struct shmfd *shmfd;
259 	off_t foffset;
260 	int error;
261 
262 	shmfd = fp->f_data;
263 	foffset = foffset_lock(fp, 0);
264 	error = 0;
265 	switch (whence) {
266 	case L_INCR:
267 		if (foffset < 0 ||
268 		    (offset > 0 && foffset > OFF_MAX - offset)) {
269 			error = EOVERFLOW;
270 			break;
271 		}
272 		offset += foffset;
273 		break;
274 	case L_XTND:
275 		if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
276 			error = EOVERFLOW;
277 			break;
278 		}
279 		offset += shmfd->shm_size;
280 		break;
281 	case L_SET:
282 		break;
283 	default:
284 		error = EINVAL;
285 	}
286 	if (error == 0) {
287 		if (offset < 0 || offset > shmfd->shm_size)
288 			error = EINVAL;
289 		else
290 			td->td_uretoff.tdu_off = offset;
291 	}
292 	foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
293 	return (error);
294 }
295 
296 static int
297 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
298     int flags, struct thread *td)
299 {
300 	struct shmfd *shmfd;
301 	void *rl_cookie;
302 	int error;
303 
304 	shmfd = fp->f_data;
305 #ifdef MAC
306 	error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
307 	if (error)
308 		return (error);
309 #endif
310 	foffset_lock_uio(fp, uio, flags);
311 	rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
312 	    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
313 	error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
314 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
315 	foffset_unlock_uio(fp, uio, flags);
316 	return (error);
317 }
318 
319 static int
320 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
321     int flags, struct thread *td)
322 {
323 	struct shmfd *shmfd;
324 	void *rl_cookie;
325 	int error;
326 
327 	shmfd = fp->f_data;
328 #ifdef MAC
329 	error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
330 	if (error)
331 		return (error);
332 #endif
333 	foffset_lock_uio(fp, uio, flags);
334 	if ((flags & FOF_OFFSET) == 0) {
335 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
336 		    &shmfd->shm_mtx);
337 	} else {
338 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
339 		    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
340 	}
341 
342 	error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
343 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
344 	foffset_unlock_uio(fp, uio, flags);
345 	return (error);
346 }
347 
348 static int
349 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
350     struct thread *td)
351 {
352 	struct shmfd *shmfd;
353 #ifdef MAC
354 	int error;
355 #endif
356 
357 	shmfd = fp->f_data;
358 #ifdef MAC
359 	error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
360 	if (error)
361 		return (error);
362 #endif
363 	return (shm_dotruncate(shmfd, length));
364 }
365 
366 static int
367 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
368     struct thread *td)
369 {
370 	struct shmfd *shmfd;
371 #ifdef MAC
372 	int error;
373 #endif
374 
375 	shmfd = fp->f_data;
376 
377 #ifdef MAC
378 	error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
379 	if (error)
380 		return (error);
381 #endif
382 
383 	/*
384 	 * Attempt to return sanish values for fstat() on a memory file
385 	 * descriptor.
386 	 */
387 	bzero(sb, sizeof(*sb));
388 	sb->st_blksize = PAGE_SIZE;
389 	sb->st_size = shmfd->shm_size;
390 	sb->st_blocks = howmany(sb->st_size, sb->st_blksize);
391 	mtx_lock(&shm_timestamp_lock);
392 	sb->st_atim = shmfd->shm_atime;
393 	sb->st_ctim = shmfd->shm_ctime;
394 	sb->st_mtim = shmfd->shm_mtime;
395 	sb->st_birthtim = shmfd->shm_birthtime;
396 	sb->st_mode = S_IFREG | shmfd->shm_mode;		/* XXX */
397 	sb->st_uid = shmfd->shm_uid;
398 	sb->st_gid = shmfd->shm_gid;
399 	mtx_unlock(&shm_timestamp_lock);
400 	sb->st_dev = shm_dev_ino;
401 	sb->st_ino = shmfd->shm_ino;
402 
403 	return (0);
404 }
405 
406 static int
407 shm_close(struct file *fp, struct thread *td)
408 {
409 	struct shmfd *shmfd;
410 
411 	shmfd = fp->f_data;
412 	fp->f_data = NULL;
413 	shm_drop(shmfd);
414 
415 	return (0);
416 }
417 
418 int
419 shm_dotruncate(struct shmfd *shmfd, off_t length)
420 {
421 	vm_object_t object;
422 	vm_page_t m;
423 	vm_pindex_t idx, nobjsize;
424 	vm_ooffset_t delta;
425 	int base, rv;
426 
427 	KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
428 	object = shmfd->shm_object;
429 	VM_OBJECT_WLOCK(object);
430 	if (length == shmfd->shm_size) {
431 		VM_OBJECT_WUNLOCK(object);
432 		return (0);
433 	}
434 	nobjsize = OFF_TO_IDX(length + PAGE_MASK);
435 
436 	/* Are we shrinking?  If so, trim the end. */
437 	if (length < shmfd->shm_size) {
438 		/*
439 		 * Disallow any requests to shrink the size if this
440 		 * object is mapped into the kernel.
441 		 */
442 		if (shmfd->shm_kmappings > 0) {
443 			VM_OBJECT_WUNLOCK(object);
444 			return (EBUSY);
445 		}
446 
447 		/*
448 		 * Zero the truncated part of the last page.
449 		 */
450 		base = length & PAGE_MASK;
451 		if (base != 0) {
452 			idx = OFF_TO_IDX(length);
453 retry:
454 			m = vm_page_lookup(object, idx);
455 			if (m != NULL) {
456 				if (vm_page_sleep_if_busy(m, "shmtrc"))
457 					goto retry;
458 			} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
459 				m = vm_page_alloc(object, idx,
460 				    VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
461 				if (m == NULL)
462 					goto retry;
463 				rv = vm_pager_get_pages(object, &m, 1, NULL,
464 				    NULL);
465 				vm_page_lock(m);
466 				if (rv == VM_PAGER_OK) {
467 					/*
468 					 * Since the page was not resident,
469 					 * and therefore not recently
470 					 * accessed, immediately enqueue it
471 					 * for asynchronous laundering.  The
472 					 * current operation is not regarded
473 					 * as an access.
474 					 */
475 					vm_page_launder(m);
476 					vm_page_unlock(m);
477 					vm_page_xunbusy(m);
478 				} else {
479 					vm_page_free(m);
480 					vm_page_unlock(m);
481 					VM_OBJECT_WUNLOCK(object);
482 					return (EIO);
483 				}
484 			}
485 			if (m != NULL) {
486 				pmap_zero_page_area(m, base, PAGE_SIZE - base);
487 				KASSERT(m->valid == VM_PAGE_BITS_ALL,
488 				    ("shm_dotruncate: page %p is invalid", m));
489 				vm_page_dirty(m);
490 				vm_pager_page_unswapped(m);
491 			}
492 		}
493 		delta = IDX_TO_OFF(object->size - nobjsize);
494 
495 		/* Toss in memory pages. */
496 		if (nobjsize < object->size)
497 			vm_object_page_remove(object, nobjsize, object->size,
498 			    0);
499 
500 		/* Toss pages from swap. */
501 		if (object->type == OBJT_SWAP)
502 			swap_pager_freespace(object, nobjsize, delta);
503 
504 		/* Free the swap accounted for shm */
505 		swap_release_by_cred(delta, object->cred);
506 		object->charge -= delta;
507 	} else {
508 		/* Try to reserve additional swap space. */
509 		delta = IDX_TO_OFF(nobjsize - object->size);
510 		if (!swap_reserve_by_cred(delta, object->cred)) {
511 			VM_OBJECT_WUNLOCK(object);
512 			return (ENOMEM);
513 		}
514 		object->charge += delta;
515 	}
516 	shmfd->shm_size = length;
517 	mtx_lock(&shm_timestamp_lock);
518 	vfs_timestamp(&shmfd->shm_ctime);
519 	shmfd->shm_mtime = shmfd->shm_ctime;
520 	mtx_unlock(&shm_timestamp_lock);
521 	object->size = nobjsize;
522 	VM_OBJECT_WUNLOCK(object);
523 	return (0);
524 }
525 
526 /*
527  * shmfd object management including creation and reference counting
528  * routines.
529  */
530 struct shmfd *
531 shm_alloc(struct ucred *ucred, mode_t mode)
532 {
533 	struct shmfd *shmfd;
534 
535 	shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
536 	shmfd->shm_size = 0;
537 	shmfd->shm_uid = ucred->cr_uid;
538 	shmfd->shm_gid = ucred->cr_gid;
539 	shmfd->shm_mode = mode;
540 	shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL,
541 	    shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
542 	KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
543 	shmfd->shm_object->pg_color = 0;
544 	VM_OBJECT_WLOCK(shmfd->shm_object);
545 	vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
546 	vm_object_set_flag(shmfd->shm_object, OBJ_COLORED | OBJ_NOSPLIT);
547 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
548 	vfs_timestamp(&shmfd->shm_birthtime);
549 	shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
550 	    shmfd->shm_birthtime;
551 	shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
552 	refcount_init(&shmfd->shm_refs, 1);
553 	mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
554 	rangelock_init(&shmfd->shm_rl);
555 #ifdef MAC
556 	mac_posixshm_init(shmfd);
557 	mac_posixshm_create(ucred, shmfd);
558 #endif
559 
560 	return (shmfd);
561 }
562 
563 struct shmfd *
564 shm_hold(struct shmfd *shmfd)
565 {
566 
567 	refcount_acquire(&shmfd->shm_refs);
568 	return (shmfd);
569 }
570 
571 void
572 shm_drop(struct shmfd *shmfd)
573 {
574 
575 	if (refcount_release(&shmfd->shm_refs)) {
576 #ifdef MAC
577 		mac_posixshm_destroy(shmfd);
578 #endif
579 		rangelock_destroy(&shmfd->shm_rl);
580 		mtx_destroy(&shmfd->shm_mtx);
581 		vm_object_deallocate(shmfd->shm_object);
582 		free(shmfd, M_SHMFD);
583 	}
584 }
585 
586 /*
587  * Determine if the credentials have sufficient permissions for a
588  * specified combination of FREAD and FWRITE.
589  */
590 int
591 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
592 {
593 	accmode_t accmode;
594 	int error;
595 
596 	accmode = 0;
597 	if (flags & FREAD)
598 		accmode |= VREAD;
599 	if (flags & FWRITE)
600 		accmode |= VWRITE;
601 	mtx_lock(&shm_timestamp_lock);
602 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
603 	    accmode, ucred, NULL);
604 	mtx_unlock(&shm_timestamp_lock);
605 	return (error);
606 }
607 
608 /*
609  * Dictionary management.  We maintain an in-kernel dictionary to map
610  * paths to shmfd objects.  We use the FNV hash on the path to store
611  * the mappings in a hash table.
612  */
613 static void
614 shm_init(void *arg)
615 {
616 
617 	mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
618 	sx_init(&shm_dict_lock, "shm dictionary");
619 	shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
620 	new_unrhdr64(&shm_ino_unr, 1);
621 	shm_dev_ino = devfs_alloc_cdp_inode();
622 	KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
623 }
624 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
625 
626 static struct shmfd *
627 shm_lookup(char *path, Fnv32_t fnv)
628 {
629 	struct shm_mapping *map;
630 
631 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
632 		if (map->sm_fnv != fnv)
633 			continue;
634 		if (strcmp(map->sm_path, path) == 0)
635 			return (map->sm_shmfd);
636 	}
637 
638 	return (NULL);
639 }
640 
641 static void
642 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
643 {
644 	struct shm_mapping *map;
645 
646 	map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
647 	map->sm_path = path;
648 	map->sm_fnv = fnv;
649 	map->sm_shmfd = shm_hold(shmfd);
650 	shmfd->shm_path = path;
651 	LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
652 }
653 
654 static int
655 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
656 {
657 	struct shm_mapping *map;
658 	int error;
659 
660 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
661 		if (map->sm_fnv != fnv)
662 			continue;
663 		if (strcmp(map->sm_path, path) == 0) {
664 #ifdef MAC
665 			error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
666 			if (error)
667 				return (error);
668 #endif
669 			error = shm_access(map->sm_shmfd, ucred,
670 			    FREAD | FWRITE);
671 			if (error)
672 				return (error);
673 			map->sm_shmfd->shm_path = NULL;
674 			LIST_REMOVE(map, sm_link);
675 			shm_drop(map->sm_shmfd);
676 			free(map->sm_path, M_SHMFD);
677 			free(map, M_SHMFD);
678 			return (0);
679 		}
680 	}
681 
682 	return (ENOENT);
683 }
684 
685 int
686 kern_shm_open(struct thread *td, const char *userpath, int flags, mode_t mode,
687     struct filecaps *fcaps)
688 {
689 	struct filedesc *fdp;
690 	struct shmfd *shmfd;
691 	struct file *fp;
692 	char *path;
693 	const char *pr_path;
694 	size_t pr_pathlen;
695 	Fnv32_t fnv;
696 	mode_t cmode;
697 	int fd, error;
698 
699 #ifdef CAPABILITY_MODE
700 	/*
701 	 * shm_open(2) is only allowed for anonymous objects.
702 	 */
703 	if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
704 		return (ECAPMODE);
705 #endif
706 
707 	AUDIT_ARG_FFLAGS(flags);
708 	AUDIT_ARG_MODE(mode);
709 
710 	if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
711 		return (EINVAL);
712 
713 	if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
714 		return (EINVAL);
715 
716 	fdp = td->td_proc->p_fd;
717 	cmode = (mode & ~fdp->fd_cmask) & ACCESSPERMS;
718 
719 	error = falloc_caps(td, &fp, &fd, O_CLOEXEC, fcaps);
720 	if (error)
721 		return (error);
722 
723 	/* A SHM_ANON path pointer creates an anonymous object. */
724 	if (userpath == SHM_ANON) {
725 		/* A read-only anonymous object is pointless. */
726 		if ((flags & O_ACCMODE) == O_RDONLY) {
727 			fdclose(td, fp, fd);
728 			fdrop(fp, td);
729 			return (EINVAL);
730 		}
731 		shmfd = shm_alloc(td->td_ucred, cmode);
732 	} else {
733 		path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
734 		pr_path = td->td_ucred->cr_prison->pr_path;
735 
736 		/* Construct a full pathname for jailed callers. */
737 		pr_pathlen = strcmp(pr_path, "/") == 0 ? 0
738 		    : strlcpy(path, pr_path, MAXPATHLEN);
739 		error = copyinstr(userpath, path + pr_pathlen,
740 		    MAXPATHLEN - pr_pathlen, NULL);
741 #ifdef KTRACE
742 		if (error == 0 && KTRPOINT(curthread, KTR_NAMEI))
743 			ktrnamei(path);
744 #endif
745 		/* Require paths to start with a '/' character. */
746 		if (error == 0 && path[pr_pathlen] != '/')
747 			error = EINVAL;
748 		if (error) {
749 			fdclose(td, fp, fd);
750 			fdrop(fp, td);
751 			free(path, M_SHMFD);
752 			return (error);
753 		}
754 
755 		AUDIT_ARG_UPATH1_CANON(path);
756 		fnv = fnv_32_str(path, FNV1_32_INIT);
757 		sx_xlock(&shm_dict_lock);
758 		shmfd = shm_lookup(path, fnv);
759 		if (shmfd == NULL) {
760 			/* Object does not yet exist, create it if requested. */
761 			if (flags & O_CREAT) {
762 #ifdef MAC
763 				error = mac_posixshm_check_create(td->td_ucred,
764 				    path);
765 				if (error == 0) {
766 #endif
767 					shmfd = shm_alloc(td->td_ucred, cmode);
768 					shm_insert(path, fnv, shmfd);
769 #ifdef MAC
770 				}
771 #endif
772 			} else {
773 				free(path, M_SHMFD);
774 				error = ENOENT;
775 			}
776 		} else {
777 			/*
778 			 * Object already exists, obtain a new
779 			 * reference if requested and permitted.
780 			 */
781 			free(path, M_SHMFD);
782 			if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
783 				error = EEXIST;
784 			else {
785 #ifdef MAC
786 				error = mac_posixshm_check_open(td->td_ucred,
787 				    shmfd, FFLAGS(flags & O_ACCMODE));
788 				if (error == 0)
789 #endif
790 				error = shm_access(shmfd, td->td_ucred,
791 				    FFLAGS(flags & O_ACCMODE));
792 			}
793 
794 			/*
795 			 * Truncate the file back to zero length if
796 			 * O_TRUNC was specified and the object was
797 			 * opened with read/write.
798 			 */
799 			if (error == 0 &&
800 			    (flags & (O_ACCMODE | O_TRUNC)) ==
801 			    (O_RDWR | O_TRUNC)) {
802 #ifdef MAC
803 				error = mac_posixshm_check_truncate(
804 					td->td_ucred, fp->f_cred, shmfd);
805 				if (error == 0)
806 #endif
807 					shm_dotruncate(shmfd, 0);
808 			}
809 			if (error == 0)
810 				shm_hold(shmfd);
811 		}
812 		sx_xunlock(&shm_dict_lock);
813 
814 		if (error) {
815 			fdclose(td, fp, fd);
816 			fdrop(fp, td);
817 			return (error);
818 		}
819 	}
820 
821 	finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
822 
823 	td->td_retval[0] = fd;
824 	fdrop(fp, td);
825 
826 	return (0);
827 }
828 
829 /* System calls. */
830 int
831 sys_shm_open(struct thread *td, struct shm_open_args *uap)
832 {
833 
834 	return (kern_shm_open(td, uap->path, uap->flags, uap->mode, NULL));
835 }
836 
837 int
838 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
839 {
840 	char *path;
841 	const char *pr_path;
842 	size_t pr_pathlen;
843 	Fnv32_t fnv;
844 	int error;
845 
846 	path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
847 	pr_path = td->td_ucred->cr_prison->pr_path;
848 	pr_pathlen = strcmp(pr_path, "/") == 0 ? 0
849 	    : strlcpy(path, pr_path, MAXPATHLEN);
850 	error = copyinstr(uap->path, path + pr_pathlen, MAXPATHLEN - pr_pathlen,
851 	    NULL);
852 	if (error) {
853 		free(path, M_TEMP);
854 		return (error);
855 	}
856 #ifdef KTRACE
857 	if (KTRPOINT(curthread, KTR_NAMEI))
858 		ktrnamei(path);
859 #endif
860 	AUDIT_ARG_UPATH1_CANON(path);
861 	fnv = fnv_32_str(path, FNV1_32_INIT);
862 	sx_xlock(&shm_dict_lock);
863 	error = shm_remove(path, fnv, td->td_ucred);
864 	sx_xunlock(&shm_dict_lock);
865 	free(path, M_TEMP);
866 
867 	return (error);
868 }
869 
870 int
871 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
872     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
873     vm_ooffset_t foff, struct thread *td)
874 {
875 	struct shmfd *shmfd;
876 	vm_prot_t maxprot;
877 	int error;
878 
879 	shmfd = fp->f_data;
880 	maxprot = VM_PROT_NONE;
881 
882 	/* FREAD should always be set. */
883 	if ((fp->f_flag & FREAD) != 0)
884 		maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
885 	if ((fp->f_flag & FWRITE) != 0)
886 		maxprot |= VM_PROT_WRITE;
887 
888 	/* Don't permit shared writable mappings on read-only descriptors. */
889 	if ((flags & MAP_SHARED) != 0 &&
890 	    (maxprot & VM_PROT_WRITE) == 0 &&
891 	    (prot & VM_PROT_WRITE) != 0)
892 		return (EACCES);
893 	maxprot &= cap_maxprot;
894 
895 	/* See comment in vn_mmap(). */
896 	if (
897 #ifdef _LP64
898 	    objsize > OFF_MAX ||
899 #endif
900 	    foff < 0 || foff > OFF_MAX - objsize)
901 		return (EINVAL);
902 
903 #ifdef MAC
904 	error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
905 	if (error != 0)
906 		return (error);
907 #endif
908 
909 	mtx_lock(&shm_timestamp_lock);
910 	vfs_timestamp(&shmfd->shm_atime);
911 	mtx_unlock(&shm_timestamp_lock);
912 	vm_object_reference(shmfd->shm_object);
913 
914 	error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
915 	    shmfd->shm_object, foff, FALSE, td);
916 	if (error != 0)
917 		vm_object_deallocate(shmfd->shm_object);
918 	return (error);
919 }
920 
921 static int
922 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
923     struct thread *td)
924 {
925 	struct shmfd *shmfd;
926 	int error;
927 
928 	error = 0;
929 	shmfd = fp->f_data;
930 	mtx_lock(&shm_timestamp_lock);
931 	/*
932 	 * SUSv4 says that x bits of permission need not be affected.
933 	 * Be consistent with our shm_open there.
934 	 */
935 #ifdef MAC
936 	error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
937 	if (error != 0)
938 		goto out;
939 #endif
940 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid,
941 	    shmfd->shm_gid, VADMIN, active_cred, NULL);
942 	if (error != 0)
943 		goto out;
944 	shmfd->shm_mode = mode & ACCESSPERMS;
945 out:
946 	mtx_unlock(&shm_timestamp_lock);
947 	return (error);
948 }
949 
950 static int
951 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
952     struct thread *td)
953 {
954 	struct shmfd *shmfd;
955 	int error;
956 
957 	error = 0;
958 	shmfd = fp->f_data;
959 	mtx_lock(&shm_timestamp_lock);
960 #ifdef MAC
961 	error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
962 	if (error != 0)
963 		goto out;
964 #endif
965 	if (uid == (uid_t)-1)
966 		uid = shmfd->shm_uid;
967 	if (gid == (gid_t)-1)
968                  gid = shmfd->shm_gid;
969 	if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
970 	    (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
971 	    (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
972 		goto out;
973 	shmfd->shm_uid = uid;
974 	shmfd->shm_gid = gid;
975 out:
976 	mtx_unlock(&shm_timestamp_lock);
977 	return (error);
978 }
979 
980 /*
981  * Helper routines to allow the backing object of a shared memory file
982  * descriptor to be mapped in the kernel.
983  */
984 int
985 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
986 {
987 	struct shmfd *shmfd;
988 	vm_offset_t kva, ofs;
989 	vm_object_t obj;
990 	int rv;
991 
992 	if (fp->f_type != DTYPE_SHM)
993 		return (EINVAL);
994 	shmfd = fp->f_data;
995 	obj = shmfd->shm_object;
996 	VM_OBJECT_WLOCK(obj);
997 	/*
998 	 * XXXRW: This validation is probably insufficient, and subject to
999 	 * sign errors.  It should be fixed.
1000 	 */
1001 	if (offset >= shmfd->shm_size ||
1002 	    offset + size > round_page(shmfd->shm_size)) {
1003 		VM_OBJECT_WUNLOCK(obj);
1004 		return (EINVAL);
1005 	}
1006 
1007 	shmfd->shm_kmappings++;
1008 	vm_object_reference_locked(obj);
1009 	VM_OBJECT_WUNLOCK(obj);
1010 
1011 	/* Map the object into the kernel_map and wire it. */
1012 	kva = vm_map_min(kernel_map);
1013 	ofs = offset & PAGE_MASK;
1014 	offset = trunc_page(offset);
1015 	size = round_page(size + ofs);
1016 	rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1017 	    VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1018 	    VM_PROT_READ | VM_PROT_WRITE, 0);
1019 	if (rv == KERN_SUCCESS) {
1020 		rv = vm_map_wire(kernel_map, kva, kva + size,
1021 		    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1022 		if (rv == KERN_SUCCESS) {
1023 			*memp = (void *)(kva + ofs);
1024 			return (0);
1025 		}
1026 		vm_map_remove(kernel_map, kva, kva + size);
1027 	} else
1028 		vm_object_deallocate(obj);
1029 
1030 	/* On failure, drop our mapping reference. */
1031 	VM_OBJECT_WLOCK(obj);
1032 	shmfd->shm_kmappings--;
1033 	VM_OBJECT_WUNLOCK(obj);
1034 
1035 	return (vm_mmap_to_errno(rv));
1036 }
1037 
1038 /*
1039  * We require the caller to unmap the entire entry.  This allows us to
1040  * safely decrement shm_kmappings when a mapping is removed.
1041  */
1042 int
1043 shm_unmap(struct file *fp, void *mem, size_t size)
1044 {
1045 	struct shmfd *shmfd;
1046 	vm_map_entry_t entry;
1047 	vm_offset_t kva, ofs;
1048 	vm_object_t obj;
1049 	vm_pindex_t pindex;
1050 	vm_prot_t prot;
1051 	boolean_t wired;
1052 	vm_map_t map;
1053 	int rv;
1054 
1055 	if (fp->f_type != DTYPE_SHM)
1056 		return (EINVAL);
1057 	shmfd = fp->f_data;
1058 	kva = (vm_offset_t)mem;
1059 	ofs = kva & PAGE_MASK;
1060 	kva = trunc_page(kva);
1061 	size = round_page(size + ofs);
1062 	map = kernel_map;
1063 	rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1064 	    &obj, &pindex, &prot, &wired);
1065 	if (rv != KERN_SUCCESS)
1066 		return (EINVAL);
1067 	if (entry->start != kva || entry->end != kva + size) {
1068 		vm_map_lookup_done(map, entry);
1069 		return (EINVAL);
1070 	}
1071 	vm_map_lookup_done(map, entry);
1072 	if (obj != shmfd->shm_object)
1073 		return (EINVAL);
1074 	vm_map_remove(map, kva, kva + size);
1075 	VM_OBJECT_WLOCK(obj);
1076 	KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1077 	shmfd->shm_kmappings--;
1078 	VM_OBJECT_WUNLOCK(obj);
1079 	return (0);
1080 }
1081 
1082 static int
1083 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
1084 {
1085 	const char *path, *pr_path;
1086 	struct shmfd *shmfd;
1087 	size_t pr_pathlen;
1088 
1089 	kif->kf_type = KF_TYPE_SHM;
1090 	shmfd = fp->f_data;
1091 
1092 	mtx_lock(&shm_timestamp_lock);
1093 	kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;	/* XXX */
1094 	mtx_unlock(&shm_timestamp_lock);
1095 	kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1096 	if (shmfd->shm_path != NULL) {
1097 		sx_slock(&shm_dict_lock);
1098 		if (shmfd->shm_path != NULL) {
1099 			path = shmfd->shm_path;
1100 			pr_path = curthread->td_ucred->cr_prison->pr_path;
1101 			if (strcmp(pr_path, "/") != 0) {
1102 				/* Return the jail-rooted pathname. */
1103 				pr_pathlen = strlen(pr_path);
1104 				if (strncmp(path, pr_path, pr_pathlen) == 0 &&
1105 				    path[pr_pathlen] == '/')
1106 					path += pr_pathlen;
1107 			}
1108 			strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1109 		}
1110 		sx_sunlock(&shm_dict_lock);
1111 	}
1112 	return (0);
1113 }
1114