1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson 5 * Copyright 2020 The FreeBSD Foundation 6 * All rights reserved. 7 * 8 * Portions of this software were developed by BAE Systems, the University of 9 * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL 10 * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent 11 * Computing (TC) research program. 12 * 13 * Portions of this software were developed by Konstantin Belousov 14 * under sponsorship from the FreeBSD Foundation. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 38 /* 39 * Support for shared swap-backed anonymous memory objects via 40 * shm_open(2), shm_rename(2), and shm_unlink(2). 41 * While most of the implementation is here, vm_mmap.c contains 42 * mapping logic changes. 43 * 44 * posixshmcontrol(1) allows users to inspect the state of the memory 45 * objects. Per-uid swap resource limit controls total amount of 46 * memory that user can consume for anonymous objects, including 47 * shared. 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 #include "opt_capsicum.h" 54 #include "opt_ktrace.h" 55 56 #include <sys/param.h> 57 #include <sys/capsicum.h> 58 #include <sys/conf.h> 59 #include <sys/fcntl.h> 60 #include <sys/file.h> 61 #include <sys/filedesc.h> 62 #include <sys/filio.h> 63 #include <sys/fnv_hash.h> 64 #include <sys/kernel.h> 65 #include <sys/limits.h> 66 #include <sys/uio.h> 67 #include <sys/signal.h> 68 #include <sys/jail.h> 69 #include <sys/ktrace.h> 70 #include <sys/lock.h> 71 #include <sys/malloc.h> 72 #include <sys/mman.h> 73 #include <sys/mutex.h> 74 #include <sys/priv.h> 75 #include <sys/proc.h> 76 #include <sys/refcount.h> 77 #include <sys/resourcevar.h> 78 #include <sys/rwlock.h> 79 #include <sys/sbuf.h> 80 #include <sys/stat.h> 81 #include <sys/syscallsubr.h> 82 #include <sys/sysctl.h> 83 #include <sys/sysproto.h> 84 #include <sys/systm.h> 85 #include <sys/sx.h> 86 #include <sys/time.h> 87 #include <sys/vmmeter.h> 88 #include <sys/vnode.h> 89 #include <sys/unistd.h> 90 #include <sys/user.h> 91 92 #include <security/audit/audit.h> 93 #include <security/mac/mac_framework.h> 94 95 #include <vm/vm.h> 96 #include <vm/vm_param.h> 97 #include <vm/pmap.h> 98 #include <vm/vm_extern.h> 99 #include <vm/vm_map.h> 100 #include <vm/vm_kern.h> 101 #include <vm/vm_object.h> 102 #include <vm/vm_page.h> 103 #include <vm/vm_pageout.h> 104 #include <vm/vm_pager.h> 105 #include <vm/swap_pager.h> 106 107 struct shm_mapping { 108 char *sm_path; 109 Fnv32_t sm_fnv; 110 struct shmfd *sm_shmfd; 111 LIST_ENTRY(shm_mapping) sm_link; 112 }; 113 114 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor"); 115 static LIST_HEAD(, shm_mapping) *shm_dictionary; 116 static struct sx shm_dict_lock; 117 static struct mtx shm_timestamp_lock; 118 static u_long shm_hash; 119 static struct unrhdr64 shm_ino_unr; 120 static dev_t shm_dev_ino; 121 122 #define SHM_HASH(fnv) (&shm_dictionary[(fnv) & shm_hash]) 123 124 static void shm_init(void *arg); 125 static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd); 126 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv); 127 static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred); 128 static int shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, 129 void *rl_cookie); 130 static int shm_dotruncate_locked(struct shmfd *shmfd, off_t length, 131 void *rl_cookie); 132 static int shm_copyin_path(struct thread *td, const char *userpath_in, 133 char **path_out); 134 static int shm_deallocate(struct shmfd *shmfd, off_t *offset, 135 off_t *length, int flags); 136 137 static fo_rdwr_t shm_read; 138 static fo_rdwr_t shm_write; 139 static fo_truncate_t shm_truncate; 140 static fo_ioctl_t shm_ioctl; 141 static fo_stat_t shm_stat; 142 static fo_close_t shm_close; 143 static fo_chmod_t shm_chmod; 144 static fo_chown_t shm_chown; 145 static fo_seek_t shm_seek; 146 static fo_fill_kinfo_t shm_fill_kinfo; 147 static fo_mmap_t shm_mmap; 148 static fo_get_seals_t shm_get_seals; 149 static fo_add_seals_t shm_add_seals; 150 static fo_fallocate_t shm_fallocate; 151 static fo_fspacectl_t shm_fspacectl; 152 153 /* File descriptor operations. */ 154 struct fileops shm_ops = { 155 .fo_read = shm_read, 156 .fo_write = shm_write, 157 .fo_truncate = shm_truncate, 158 .fo_ioctl = shm_ioctl, 159 .fo_poll = invfo_poll, 160 .fo_kqfilter = invfo_kqfilter, 161 .fo_stat = shm_stat, 162 .fo_close = shm_close, 163 .fo_chmod = shm_chmod, 164 .fo_chown = shm_chown, 165 .fo_sendfile = vn_sendfile, 166 .fo_seek = shm_seek, 167 .fo_fill_kinfo = shm_fill_kinfo, 168 .fo_mmap = shm_mmap, 169 .fo_get_seals = shm_get_seals, 170 .fo_add_seals = shm_add_seals, 171 .fo_fallocate = shm_fallocate, 172 .fo_fspacectl = shm_fspacectl, 173 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE, 174 }; 175 176 FEATURE(posix_shm, "POSIX shared memory"); 177 178 static SYSCTL_NODE(_vm, OID_AUTO, largepages, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 179 ""); 180 181 static int largepage_reclaim_tries = 1; 182 SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries, 183 CTLFLAG_RWTUN, &largepage_reclaim_tries, 0, 184 "Number of contig reclaims before giving up for default alloc policy"); 185 186 static int 187 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio) 188 { 189 vm_page_t m; 190 vm_pindex_t idx; 191 size_t tlen; 192 int error, offset, rv; 193 194 idx = OFF_TO_IDX(uio->uio_offset); 195 offset = uio->uio_offset & PAGE_MASK; 196 tlen = MIN(PAGE_SIZE - offset, len); 197 198 rv = vm_page_grab_valid_unlocked(&m, obj, idx, 199 VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT); 200 if (rv == VM_PAGER_OK) 201 goto found; 202 203 /* 204 * Read I/O without either a corresponding resident page or swap 205 * page: use zero_region. This is intended to avoid instantiating 206 * pages on read from a sparse region. 207 */ 208 VM_OBJECT_WLOCK(obj); 209 m = vm_page_lookup(obj, idx); 210 if (uio->uio_rw == UIO_READ && m == NULL && 211 !vm_pager_has_page(obj, idx, NULL, NULL)) { 212 VM_OBJECT_WUNLOCK(obj); 213 return (uiomove(__DECONST(void *, zero_region), tlen, uio)); 214 } 215 216 /* 217 * Although the tmpfs vnode lock is held here, it is 218 * nonetheless safe to sleep waiting for a free page. The 219 * pageout daemon does not need to acquire the tmpfs vnode 220 * lock to page out tobj's pages because tobj is a OBJT_SWAP 221 * type object. 222 */ 223 rv = vm_page_grab_valid(&m, obj, idx, 224 VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY); 225 if (rv != VM_PAGER_OK) { 226 VM_OBJECT_WUNLOCK(obj); 227 printf("uiomove_object: vm_obj %p idx %jd pager error %d\n", 228 obj, idx, rv); 229 return (EIO); 230 } 231 VM_OBJECT_WUNLOCK(obj); 232 233 found: 234 error = uiomove_fromphys(&m, offset, tlen, uio); 235 if (uio->uio_rw == UIO_WRITE && error == 0) 236 vm_page_set_dirty(m); 237 vm_page_activate(m); 238 vm_page_sunbusy(m); 239 240 return (error); 241 } 242 243 int 244 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio) 245 { 246 ssize_t resid; 247 size_t len; 248 int error; 249 250 error = 0; 251 while ((resid = uio->uio_resid) > 0) { 252 if (obj_size <= uio->uio_offset) 253 break; 254 len = MIN(obj_size - uio->uio_offset, resid); 255 if (len == 0) 256 break; 257 error = uiomove_object_page(obj, len, uio); 258 if (error != 0 || resid == uio->uio_resid) 259 break; 260 } 261 return (error); 262 } 263 264 static u_long count_largepages[MAXPAGESIZES]; 265 266 static int 267 shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx, 268 int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 269 { 270 vm_page_t m; 271 int psind; 272 273 psind = object->un_pager.phys.data_val; 274 if (psind == 0 || pidx >= object->size) 275 return (VM_PAGER_FAIL); 276 *first = rounddown2(pidx, pagesizes[psind] / PAGE_SIZE); 277 278 /* 279 * We only busy the first page in the superpage run. It is 280 * useless to busy whole run since we only remove full 281 * superpage, and it takes too long to busy e.g. 512 * 512 == 282 * 262144 pages constituing 1G amd64 superage. 283 */ 284 m = vm_page_grab(object, *first, VM_ALLOC_NORMAL | VM_ALLOC_NOCREAT); 285 MPASS(m != NULL); 286 287 *last = *first + atop(pagesizes[psind]) - 1; 288 return (VM_PAGER_OK); 289 } 290 291 static boolean_t 292 shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex, 293 int *before, int *after) 294 { 295 int psind; 296 297 psind = object->un_pager.phys.data_val; 298 if (psind == 0 || pindex >= object->size) 299 return (FALSE); 300 if (before != NULL) { 301 *before = pindex - rounddown2(pindex, pagesizes[psind] / 302 PAGE_SIZE); 303 } 304 if (after != NULL) { 305 *after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) - 306 pindex; 307 } 308 return (TRUE); 309 } 310 311 static void 312 shm_largepage_phys_ctor(vm_object_t object, vm_prot_t prot, 313 vm_ooffset_t foff, struct ucred *cred) 314 { 315 } 316 317 static void 318 shm_largepage_phys_dtor(vm_object_t object) 319 { 320 int psind; 321 322 psind = object->un_pager.phys.data_val; 323 if (psind != 0) { 324 atomic_subtract_long(&count_largepages[psind], 325 object->size / (pagesizes[psind] / PAGE_SIZE)); 326 vm_wire_sub(object->size); 327 } else { 328 KASSERT(object->size == 0, 329 ("largepage phys obj %p not initialized bit size %#jx > 0", 330 object, (uintmax_t)object->size)); 331 } 332 } 333 334 static const struct phys_pager_ops shm_largepage_phys_ops = { 335 .phys_pg_populate = shm_largepage_phys_populate, 336 .phys_pg_haspage = shm_largepage_phys_haspage, 337 .phys_pg_ctor = shm_largepage_phys_ctor, 338 .phys_pg_dtor = shm_largepage_phys_dtor, 339 }; 340 341 bool 342 shm_largepage(struct shmfd *shmfd) 343 { 344 return (shmfd->shm_object->type == OBJT_PHYS); 345 } 346 347 static int 348 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td) 349 { 350 struct shmfd *shmfd; 351 off_t foffset; 352 int error; 353 354 shmfd = fp->f_data; 355 foffset = foffset_lock(fp, 0); 356 error = 0; 357 switch (whence) { 358 case L_INCR: 359 if (foffset < 0 || 360 (offset > 0 && foffset > OFF_MAX - offset)) { 361 error = EOVERFLOW; 362 break; 363 } 364 offset += foffset; 365 break; 366 case L_XTND: 367 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) { 368 error = EOVERFLOW; 369 break; 370 } 371 offset += shmfd->shm_size; 372 break; 373 case L_SET: 374 break; 375 default: 376 error = EINVAL; 377 } 378 if (error == 0) { 379 if (offset < 0 || offset > shmfd->shm_size) 380 error = EINVAL; 381 else 382 td->td_uretoff.tdu_off = offset; 383 } 384 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0); 385 return (error); 386 } 387 388 static int 389 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred, 390 int flags, struct thread *td) 391 { 392 struct shmfd *shmfd; 393 void *rl_cookie; 394 int error; 395 396 shmfd = fp->f_data; 397 #ifdef MAC 398 error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd); 399 if (error) 400 return (error); 401 #endif 402 foffset_lock_uio(fp, uio, flags); 403 rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset, 404 uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx); 405 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio); 406 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 407 foffset_unlock_uio(fp, uio, flags); 408 return (error); 409 } 410 411 static int 412 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred, 413 int flags, struct thread *td) 414 { 415 struct shmfd *shmfd; 416 void *rl_cookie; 417 int error; 418 off_t size; 419 420 shmfd = fp->f_data; 421 #ifdef MAC 422 error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd); 423 if (error) 424 return (error); 425 #endif 426 if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0) 427 return (EINVAL); 428 foffset_lock_uio(fp, uio, flags); 429 if (uio->uio_resid > OFF_MAX - uio->uio_offset) { 430 /* 431 * Overflow is only an error if we're supposed to expand on 432 * write. Otherwise, we'll just truncate the write to the 433 * size of the file, which can only grow up to OFF_MAX. 434 */ 435 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) { 436 foffset_unlock_uio(fp, uio, flags); 437 return (EFBIG); 438 } 439 440 size = shmfd->shm_size; 441 } else { 442 size = uio->uio_offset + uio->uio_resid; 443 } 444 if ((flags & FOF_OFFSET) == 0) { 445 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, 446 &shmfd->shm_mtx); 447 } else { 448 rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset, 449 size, &shmfd->shm_mtx); 450 } 451 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) { 452 error = EPERM; 453 } else { 454 error = 0; 455 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 && 456 size > shmfd->shm_size) { 457 error = shm_dotruncate_cookie(shmfd, size, rl_cookie); 458 } 459 if (error == 0) 460 error = uiomove_object(shmfd->shm_object, 461 shmfd->shm_size, uio); 462 } 463 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 464 foffset_unlock_uio(fp, uio, flags); 465 return (error); 466 } 467 468 static int 469 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred, 470 struct thread *td) 471 { 472 struct shmfd *shmfd; 473 #ifdef MAC 474 int error; 475 #endif 476 477 shmfd = fp->f_data; 478 #ifdef MAC 479 error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd); 480 if (error) 481 return (error); 482 #endif 483 return (shm_dotruncate(shmfd, length)); 484 } 485 486 int 487 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred, 488 struct thread *td) 489 { 490 struct shmfd *shmfd; 491 struct shm_largepage_conf *conf; 492 void *rl_cookie; 493 494 shmfd = fp->f_data; 495 switch (com) { 496 case FIONBIO: 497 case FIOASYNC: 498 /* 499 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work, 500 * just like it would on an unlinked regular file 501 */ 502 return (0); 503 case FIOSSHMLPGCNF: 504 if (!shm_largepage(shmfd)) 505 return (ENOTTY); 506 conf = data; 507 if (shmfd->shm_lp_psind != 0 && 508 conf->psind != shmfd->shm_lp_psind) 509 return (EINVAL); 510 if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES || 511 pagesizes[conf->psind] == 0) 512 return (EINVAL); 513 if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT && 514 conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT && 515 conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD) 516 return (EINVAL); 517 518 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, 519 &shmfd->shm_mtx); 520 shmfd->shm_lp_psind = conf->psind; 521 shmfd->shm_lp_alloc_policy = conf->alloc_policy; 522 shmfd->shm_object->un_pager.phys.data_val = conf->psind; 523 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 524 return (0); 525 case FIOGSHMLPGCNF: 526 if (!shm_largepage(shmfd)) 527 return (ENOTTY); 528 conf = data; 529 rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, OFF_MAX, 530 &shmfd->shm_mtx); 531 conf->psind = shmfd->shm_lp_psind; 532 conf->alloc_policy = shmfd->shm_lp_alloc_policy; 533 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 534 return (0); 535 default: 536 return (ENOTTY); 537 } 538 } 539 540 static int 541 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred) 542 { 543 struct shmfd *shmfd; 544 #ifdef MAC 545 int error; 546 #endif 547 548 shmfd = fp->f_data; 549 550 #ifdef MAC 551 error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd); 552 if (error) 553 return (error); 554 #endif 555 556 /* 557 * Attempt to return sanish values for fstat() on a memory file 558 * descriptor. 559 */ 560 bzero(sb, sizeof(*sb)); 561 sb->st_blksize = PAGE_SIZE; 562 sb->st_size = shmfd->shm_size; 563 sb->st_blocks = howmany(sb->st_size, sb->st_blksize); 564 mtx_lock(&shm_timestamp_lock); 565 sb->st_atim = shmfd->shm_atime; 566 sb->st_ctim = shmfd->shm_ctime; 567 sb->st_mtim = shmfd->shm_mtime; 568 sb->st_birthtim = shmfd->shm_birthtime; 569 sb->st_mode = S_IFREG | shmfd->shm_mode; /* XXX */ 570 sb->st_uid = shmfd->shm_uid; 571 sb->st_gid = shmfd->shm_gid; 572 mtx_unlock(&shm_timestamp_lock); 573 sb->st_dev = shm_dev_ino; 574 sb->st_ino = shmfd->shm_ino; 575 sb->st_nlink = shmfd->shm_object->ref_count; 576 sb->st_blocks = shmfd->shm_object->size / 577 (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT); 578 579 return (0); 580 } 581 582 static int 583 shm_close(struct file *fp, struct thread *td) 584 { 585 struct shmfd *shmfd; 586 587 shmfd = fp->f_data; 588 fp->f_data = NULL; 589 shm_drop(shmfd); 590 591 return (0); 592 } 593 594 static int 595 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) { 596 int error; 597 char *path; 598 const char *pr_path; 599 size_t pr_pathlen; 600 601 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK); 602 pr_path = td->td_ucred->cr_prison->pr_path; 603 604 /* Construct a full pathname for jailed callers. */ 605 pr_pathlen = strcmp(pr_path, "/") == 606 0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN); 607 error = copyinstr(userpath_in, path + pr_pathlen, 608 MAXPATHLEN - pr_pathlen, NULL); 609 if (error != 0) 610 goto out; 611 612 #ifdef KTRACE 613 if (KTRPOINT(curthread, KTR_NAMEI)) 614 ktrnamei(path); 615 #endif 616 617 /* Require paths to start with a '/' character. */ 618 if (path[pr_pathlen] != '/') { 619 error = EINVAL; 620 goto out; 621 } 622 623 *path_out = path; 624 625 out: 626 if (error != 0) 627 free(path, M_SHMFD); 628 629 return (error); 630 } 631 632 static int 633 shm_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base, 634 int end) 635 { 636 vm_page_t m; 637 int rv; 638 639 VM_OBJECT_ASSERT_WLOCKED(object); 640 KASSERT(base >= 0, ("%s: base %d", __func__, base)); 641 KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base, 642 end)); 643 644 retry: 645 m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT); 646 if (m != NULL) { 647 MPASS(vm_page_all_valid(m)); 648 } else if (vm_pager_has_page(object, idx, NULL, NULL)) { 649 m = vm_page_alloc(object, idx, 650 VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL); 651 if (m == NULL) 652 goto retry; 653 vm_object_pip_add(object, 1); 654 VM_OBJECT_WUNLOCK(object); 655 rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); 656 VM_OBJECT_WLOCK(object); 657 vm_object_pip_wakeup(object); 658 if (rv == VM_PAGER_OK) { 659 /* 660 * Since the page was not resident, and therefore not 661 * recently accessed, immediately enqueue it for 662 * asynchronous laundering. The current operation is 663 * not regarded as an access. 664 */ 665 vm_page_launder(m); 666 } else { 667 vm_page_free(m); 668 VM_OBJECT_WUNLOCK(object); 669 return (EIO); 670 } 671 } 672 if (m != NULL) { 673 pmap_zero_page_area(m, base, end - base); 674 KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid", 675 __func__, m)); 676 vm_page_set_dirty(m); 677 vm_page_xunbusy(m); 678 } 679 680 return (0); 681 } 682 683 static int 684 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie) 685 { 686 vm_object_t object; 687 vm_pindex_t nobjsize; 688 vm_ooffset_t delta; 689 int base, error; 690 691 KASSERT(length >= 0, ("shm_dotruncate: length < 0")); 692 object = shmfd->shm_object; 693 VM_OBJECT_ASSERT_WLOCKED(object); 694 rangelock_cookie_assert(rl_cookie, RA_WLOCKED); 695 if (length == shmfd->shm_size) 696 return (0); 697 nobjsize = OFF_TO_IDX(length + PAGE_MASK); 698 699 /* Are we shrinking? If so, trim the end. */ 700 if (length < shmfd->shm_size) { 701 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0) 702 return (EPERM); 703 704 /* 705 * Disallow any requests to shrink the size if this 706 * object is mapped into the kernel. 707 */ 708 if (shmfd->shm_kmappings > 0) 709 return (EBUSY); 710 711 /* 712 * Zero the truncated part of the last page. 713 */ 714 base = length & PAGE_MASK; 715 if (base != 0) { 716 error = shm_partial_page_invalidate(object, 717 OFF_TO_IDX(length), base, PAGE_SIZE); 718 if (error) 719 return (error); 720 } 721 delta = IDX_TO_OFF(object->size - nobjsize); 722 723 if (nobjsize < object->size) 724 vm_object_page_remove(object, nobjsize, object->size, 725 0); 726 727 /* Free the swap accounted for shm */ 728 swap_release_by_cred(delta, object->cred); 729 object->charge -= delta; 730 } else { 731 if ((shmfd->shm_seals & F_SEAL_GROW) != 0) 732 return (EPERM); 733 734 /* Try to reserve additional swap space. */ 735 delta = IDX_TO_OFF(nobjsize - object->size); 736 if (!swap_reserve_by_cred(delta, object->cred)) 737 return (ENOMEM); 738 object->charge += delta; 739 } 740 shmfd->shm_size = length; 741 mtx_lock(&shm_timestamp_lock); 742 vfs_timestamp(&shmfd->shm_ctime); 743 shmfd->shm_mtime = shmfd->shm_ctime; 744 mtx_unlock(&shm_timestamp_lock); 745 object->size = nobjsize; 746 return (0); 747 } 748 749 static int 750 shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie) 751 { 752 vm_object_t object; 753 vm_page_t m; 754 vm_pindex_t newobjsz, oldobjsz; 755 int aflags, error, i, psind, try; 756 757 KASSERT(length >= 0, ("shm_dotruncate: length < 0")); 758 object = shmfd->shm_object; 759 VM_OBJECT_ASSERT_WLOCKED(object); 760 rangelock_cookie_assert(rl_cookie, RA_WLOCKED); 761 762 oldobjsz = object->size; 763 newobjsz = OFF_TO_IDX(length); 764 if (length == shmfd->shm_size) 765 return (0); 766 psind = shmfd->shm_lp_psind; 767 if (psind == 0 && length != 0) 768 return (EINVAL); 769 if ((length & (pagesizes[psind] - 1)) != 0) 770 return (EINVAL); 771 772 if (length < shmfd->shm_size) { 773 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0) 774 return (EPERM); 775 if (shmfd->shm_kmappings > 0) 776 return (EBUSY); 777 return (ENOTSUP); /* Pages are unmanaged. */ 778 #if 0 779 vm_object_page_remove(object, newobjsz, oldobjsz, 0); 780 object->size = newobjsz; 781 shmfd->shm_size = length; 782 return (0); 783 #endif 784 } 785 786 if ((shmfd->shm_seals & F_SEAL_GROW) != 0) 787 return (EPERM); 788 789 aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO; 790 if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT) 791 aflags |= VM_ALLOC_WAITFAIL; 792 try = 0; 793 794 /* 795 * Extend shmfd and object, keeping all already fully 796 * allocated large pages intact even on error, because dropped 797 * object lock might allowed mapping of them. 798 */ 799 while (object->size < newobjsz) { 800 m = vm_page_alloc_contig(object, object->size, aflags, 801 pagesizes[psind] / PAGE_SIZE, 0, ~0, 802 pagesizes[psind], 0, 803 VM_MEMATTR_DEFAULT); 804 if (m == NULL) { 805 VM_OBJECT_WUNLOCK(object); 806 if (shmfd->shm_lp_alloc_policy == 807 SHM_LARGEPAGE_ALLOC_NOWAIT || 808 (shmfd->shm_lp_alloc_policy == 809 SHM_LARGEPAGE_ALLOC_DEFAULT && 810 try >= largepage_reclaim_tries)) { 811 VM_OBJECT_WLOCK(object); 812 return (ENOMEM); 813 } 814 error = vm_page_reclaim_contig(aflags, 815 pagesizes[psind] / PAGE_SIZE, 0, ~0, 816 pagesizes[psind], 0) ? 0 : 817 vm_wait_intr(object); 818 if (error != 0) { 819 VM_OBJECT_WLOCK(object); 820 return (error); 821 } 822 try++; 823 VM_OBJECT_WLOCK(object); 824 continue; 825 } 826 try = 0; 827 for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) { 828 if ((m[i].flags & PG_ZERO) == 0) 829 pmap_zero_page(&m[i]); 830 vm_page_valid(&m[i]); 831 vm_page_xunbusy(&m[i]); 832 } 833 object->size += OFF_TO_IDX(pagesizes[psind]); 834 shmfd->shm_size += pagesizes[psind]; 835 atomic_add_long(&count_largepages[psind], 1); 836 vm_wire_add(atop(pagesizes[psind])); 837 } 838 return (0); 839 } 840 841 static int 842 shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie) 843 { 844 int error; 845 846 VM_OBJECT_WLOCK(shmfd->shm_object); 847 error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd, 848 length, rl_cookie) : shm_dotruncate_locked(shmfd, length, 849 rl_cookie); 850 VM_OBJECT_WUNLOCK(shmfd->shm_object); 851 return (error); 852 } 853 854 int 855 shm_dotruncate(struct shmfd *shmfd, off_t length) 856 { 857 void *rl_cookie; 858 int error; 859 860 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, 861 &shmfd->shm_mtx); 862 error = shm_dotruncate_cookie(shmfd, length, rl_cookie); 863 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 864 return (error); 865 } 866 867 /* 868 * shmfd object management including creation and reference counting 869 * routines. 870 */ 871 struct shmfd * 872 shm_alloc(struct ucred *ucred, mode_t mode, bool largepage) 873 { 874 struct shmfd *shmfd; 875 876 shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO); 877 shmfd->shm_size = 0; 878 shmfd->shm_uid = ucred->cr_uid; 879 shmfd->shm_gid = ucred->cr_gid; 880 shmfd->shm_mode = mode; 881 if (largepage) { 882 shmfd->shm_object = phys_pager_allocate(NULL, 883 &shm_largepage_phys_ops, NULL, shmfd->shm_size, 884 VM_PROT_DEFAULT, 0, ucred); 885 shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT; 886 } else { 887 shmfd->shm_object = vm_pager_allocate(OBJT_SWAP, NULL, 888 shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred); 889 } 890 KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate")); 891 vfs_timestamp(&shmfd->shm_birthtime); 892 shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime = 893 shmfd->shm_birthtime; 894 shmfd->shm_ino = alloc_unr64(&shm_ino_unr); 895 refcount_init(&shmfd->shm_refs, 1); 896 mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF); 897 rangelock_init(&shmfd->shm_rl); 898 #ifdef MAC 899 mac_posixshm_init(shmfd); 900 mac_posixshm_create(ucred, shmfd); 901 #endif 902 903 return (shmfd); 904 } 905 906 struct shmfd * 907 shm_hold(struct shmfd *shmfd) 908 { 909 910 refcount_acquire(&shmfd->shm_refs); 911 return (shmfd); 912 } 913 914 void 915 shm_drop(struct shmfd *shmfd) 916 { 917 918 if (refcount_release(&shmfd->shm_refs)) { 919 #ifdef MAC 920 mac_posixshm_destroy(shmfd); 921 #endif 922 rangelock_destroy(&shmfd->shm_rl); 923 mtx_destroy(&shmfd->shm_mtx); 924 vm_object_deallocate(shmfd->shm_object); 925 free(shmfd, M_SHMFD); 926 } 927 } 928 929 /* 930 * Determine if the credentials have sufficient permissions for a 931 * specified combination of FREAD and FWRITE. 932 */ 933 int 934 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags) 935 { 936 accmode_t accmode; 937 int error; 938 939 accmode = 0; 940 if (flags & FREAD) 941 accmode |= VREAD; 942 if (flags & FWRITE) 943 accmode |= VWRITE; 944 mtx_lock(&shm_timestamp_lock); 945 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid, 946 accmode, ucred); 947 mtx_unlock(&shm_timestamp_lock); 948 return (error); 949 } 950 951 static void 952 shm_init(void *arg) 953 { 954 char name[32]; 955 int i; 956 957 mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF); 958 sx_init(&shm_dict_lock, "shm dictionary"); 959 shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash); 960 new_unrhdr64(&shm_ino_unr, 1); 961 shm_dev_ino = devfs_alloc_cdp_inode(); 962 KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized")); 963 964 for (i = 1; i < MAXPAGESIZES; i++) { 965 if (pagesizes[i] == 0) 966 break; 967 #define M (1024 * 1024) 968 #define G (1024 * M) 969 if (pagesizes[i] >= G) 970 snprintf(name, sizeof(name), "%luG", pagesizes[i] / G); 971 else if (pagesizes[i] >= M) 972 snprintf(name, sizeof(name), "%luM", pagesizes[i] / M); 973 else 974 snprintf(name, sizeof(name), "%lu", pagesizes[i]); 975 #undef G 976 #undef M 977 SYSCTL_ADD_ULONG(NULL, SYSCTL_STATIC_CHILDREN(_vm_largepages), 978 OID_AUTO, name, CTLFLAG_RD, &count_largepages[i], 979 "number of non-transient largepages allocated"); 980 } 981 } 982 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL); 983 984 /* 985 * Dictionary management. We maintain an in-kernel dictionary to map 986 * paths to shmfd objects. We use the FNV hash on the path to store 987 * the mappings in a hash table. 988 */ 989 static struct shmfd * 990 shm_lookup(char *path, Fnv32_t fnv) 991 { 992 struct shm_mapping *map; 993 994 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) { 995 if (map->sm_fnv != fnv) 996 continue; 997 if (strcmp(map->sm_path, path) == 0) 998 return (map->sm_shmfd); 999 } 1000 1001 return (NULL); 1002 } 1003 1004 static void 1005 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd) 1006 { 1007 struct shm_mapping *map; 1008 1009 map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK); 1010 map->sm_path = path; 1011 map->sm_fnv = fnv; 1012 map->sm_shmfd = shm_hold(shmfd); 1013 shmfd->shm_path = path; 1014 LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link); 1015 } 1016 1017 static int 1018 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred) 1019 { 1020 struct shm_mapping *map; 1021 int error; 1022 1023 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) { 1024 if (map->sm_fnv != fnv) 1025 continue; 1026 if (strcmp(map->sm_path, path) == 0) { 1027 #ifdef MAC 1028 error = mac_posixshm_check_unlink(ucred, map->sm_shmfd); 1029 if (error) 1030 return (error); 1031 #endif 1032 error = shm_access(map->sm_shmfd, ucred, 1033 FREAD | FWRITE); 1034 if (error) 1035 return (error); 1036 map->sm_shmfd->shm_path = NULL; 1037 LIST_REMOVE(map, sm_link); 1038 shm_drop(map->sm_shmfd); 1039 free(map->sm_path, M_SHMFD); 1040 free(map, M_SHMFD); 1041 return (0); 1042 } 1043 } 1044 1045 return (ENOENT); 1046 } 1047 1048 int 1049 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode, 1050 int shmflags, struct filecaps *fcaps, const char *name __unused) 1051 { 1052 struct pwddesc *pdp; 1053 struct shmfd *shmfd; 1054 struct file *fp; 1055 char *path; 1056 void *rl_cookie; 1057 Fnv32_t fnv; 1058 mode_t cmode; 1059 int error, fd, initial_seals; 1060 bool largepage; 1061 1062 if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE | 1063 SHM_LARGEPAGE)) != 0) 1064 return (EINVAL); 1065 1066 initial_seals = F_SEAL_SEAL; 1067 if ((shmflags & SHM_ALLOW_SEALING) != 0) 1068 initial_seals &= ~F_SEAL_SEAL; 1069 1070 #ifdef CAPABILITY_MODE 1071 /* 1072 * shm_open(2) is only allowed for anonymous objects. 1073 */ 1074 if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON)) 1075 return (ECAPMODE); 1076 #endif 1077 1078 AUDIT_ARG_FFLAGS(flags); 1079 AUDIT_ARG_MODE(mode); 1080 1081 if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR) 1082 return (EINVAL); 1083 1084 if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0) 1085 return (EINVAL); 1086 1087 largepage = (shmflags & SHM_LARGEPAGE) != 0; 1088 if (largepage && !PMAP_HAS_LARGEPAGES) 1089 return (ENOTTY); 1090 1091 /* 1092 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd. 1093 * If the decision is made later to allow additional seals, care must be 1094 * taken below to ensure that the seals are properly set if the shmfd 1095 * already existed -- this currently assumes that only F_SEAL_SEAL can 1096 * be set and doesn't take further precautions to ensure the validity of 1097 * the seals being added with respect to current mappings. 1098 */ 1099 if ((initial_seals & ~F_SEAL_SEAL) != 0) 1100 return (EINVAL); 1101 1102 pdp = td->td_proc->p_pd; 1103 cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS; 1104 1105 /* 1106 * shm_open(2) created shm should always have O_CLOEXEC set, as mandated 1107 * by POSIX. We allow it to be unset here so that an in-kernel 1108 * interface may be written as a thin layer around shm, optionally not 1109 * setting CLOEXEC. For shm_open(2), O_CLOEXEC is set unconditionally 1110 * in sys_shm_open() to keep this implementation compliant. 1111 */ 1112 error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps); 1113 if (error) 1114 return (error); 1115 1116 /* A SHM_ANON path pointer creates an anonymous object. */ 1117 if (userpath == SHM_ANON) { 1118 /* A read-only anonymous object is pointless. */ 1119 if ((flags & O_ACCMODE) == O_RDONLY) { 1120 fdclose(td, fp, fd); 1121 fdrop(fp, td); 1122 return (EINVAL); 1123 } 1124 shmfd = shm_alloc(td->td_ucred, cmode, largepage); 1125 shmfd->shm_seals = initial_seals; 1126 shmfd->shm_flags = shmflags; 1127 } else { 1128 error = shm_copyin_path(td, userpath, &path); 1129 if (error != 0) { 1130 fdclose(td, fp, fd); 1131 fdrop(fp, td); 1132 return (error); 1133 } 1134 1135 AUDIT_ARG_UPATH1_CANON(path); 1136 fnv = fnv_32_str(path, FNV1_32_INIT); 1137 sx_xlock(&shm_dict_lock); 1138 shmfd = shm_lookup(path, fnv); 1139 if (shmfd == NULL) { 1140 /* Object does not yet exist, create it if requested. */ 1141 if (flags & O_CREAT) { 1142 #ifdef MAC 1143 error = mac_posixshm_check_create(td->td_ucred, 1144 path); 1145 if (error == 0) { 1146 #endif 1147 shmfd = shm_alloc(td->td_ucred, cmode, 1148 largepage); 1149 shmfd->shm_seals = initial_seals; 1150 shmfd->shm_flags = shmflags; 1151 shm_insert(path, fnv, shmfd); 1152 #ifdef MAC 1153 } 1154 #endif 1155 } else { 1156 free(path, M_SHMFD); 1157 error = ENOENT; 1158 } 1159 } else { 1160 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, 1161 &shmfd->shm_mtx); 1162 1163 /* 1164 * kern_shm_open() likely shouldn't ever error out on 1165 * trying to set a seal that already exists, unlike 1166 * F_ADD_SEALS. This would break terribly as 1167 * shm_open(2) actually sets F_SEAL_SEAL to maintain 1168 * historical behavior where the underlying file could 1169 * not be sealed. 1170 */ 1171 initial_seals &= ~shmfd->shm_seals; 1172 1173 /* 1174 * Object already exists, obtain a new 1175 * reference if requested and permitted. 1176 */ 1177 free(path, M_SHMFD); 1178 1179 /* 1180 * initial_seals can't set additional seals if we've 1181 * already been set F_SEAL_SEAL. If F_SEAL_SEAL is set, 1182 * then we've already removed that one from 1183 * initial_seals. This is currently redundant as we 1184 * only allow setting F_SEAL_SEAL at creation time, but 1185 * it's cheap to check and decreases the effort required 1186 * to allow additional seals. 1187 */ 1188 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 && 1189 initial_seals != 0) 1190 error = EPERM; 1191 else if ((flags & (O_CREAT | O_EXCL)) == 1192 (O_CREAT | O_EXCL)) 1193 error = EEXIST; 1194 else if (shmflags != 0 && shmflags != shmfd->shm_flags) 1195 error = EINVAL; 1196 else { 1197 #ifdef MAC 1198 error = mac_posixshm_check_open(td->td_ucred, 1199 shmfd, FFLAGS(flags & O_ACCMODE)); 1200 if (error == 0) 1201 #endif 1202 error = shm_access(shmfd, td->td_ucred, 1203 FFLAGS(flags & O_ACCMODE)); 1204 } 1205 1206 /* 1207 * Truncate the file back to zero length if 1208 * O_TRUNC was specified and the object was 1209 * opened with read/write. 1210 */ 1211 if (error == 0 && 1212 (flags & (O_ACCMODE | O_TRUNC)) == 1213 (O_RDWR | O_TRUNC)) { 1214 VM_OBJECT_WLOCK(shmfd->shm_object); 1215 #ifdef MAC 1216 error = mac_posixshm_check_truncate( 1217 td->td_ucred, fp->f_cred, shmfd); 1218 if (error == 0) 1219 #endif 1220 error = shm_dotruncate_locked(shmfd, 0, 1221 rl_cookie); 1222 VM_OBJECT_WUNLOCK(shmfd->shm_object); 1223 } 1224 if (error == 0) { 1225 /* 1226 * Currently we only allow F_SEAL_SEAL to be 1227 * set initially. As noted above, this would 1228 * need to be reworked should that change. 1229 */ 1230 shmfd->shm_seals |= initial_seals; 1231 shm_hold(shmfd); 1232 } 1233 rangelock_unlock(&shmfd->shm_rl, rl_cookie, 1234 &shmfd->shm_mtx); 1235 } 1236 sx_xunlock(&shm_dict_lock); 1237 1238 if (error) { 1239 fdclose(td, fp, fd); 1240 fdrop(fp, td); 1241 return (error); 1242 } 1243 } 1244 1245 finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops); 1246 1247 td->td_retval[0] = fd; 1248 fdrop(fp, td); 1249 1250 return (0); 1251 } 1252 1253 /* System calls. */ 1254 #ifdef COMPAT_FREEBSD12 1255 int 1256 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap) 1257 { 1258 1259 return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC, 1260 uap->mode, NULL)); 1261 } 1262 #endif 1263 1264 int 1265 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap) 1266 { 1267 char *path; 1268 Fnv32_t fnv; 1269 int error; 1270 1271 error = shm_copyin_path(td, uap->path, &path); 1272 if (error != 0) 1273 return (error); 1274 1275 AUDIT_ARG_UPATH1_CANON(path); 1276 fnv = fnv_32_str(path, FNV1_32_INIT); 1277 sx_xlock(&shm_dict_lock); 1278 error = shm_remove(path, fnv, td->td_ucred); 1279 sx_xunlock(&shm_dict_lock); 1280 free(path, M_SHMFD); 1281 1282 return (error); 1283 } 1284 1285 int 1286 sys_shm_rename(struct thread *td, struct shm_rename_args *uap) 1287 { 1288 char *path_from = NULL, *path_to = NULL; 1289 Fnv32_t fnv_from, fnv_to; 1290 struct shmfd *fd_from; 1291 struct shmfd *fd_to; 1292 int error; 1293 int flags; 1294 1295 flags = uap->flags; 1296 AUDIT_ARG_FFLAGS(flags); 1297 1298 /* 1299 * Make sure the user passed only valid flags. 1300 * If you add a new flag, please add a new term here. 1301 */ 1302 if ((flags & ~( 1303 SHM_RENAME_NOREPLACE | 1304 SHM_RENAME_EXCHANGE 1305 )) != 0) { 1306 error = EINVAL; 1307 goto out; 1308 } 1309 1310 /* 1311 * EXCHANGE and NOREPLACE don't quite make sense together. Let's 1312 * force the user to choose one or the other. 1313 */ 1314 if ((flags & SHM_RENAME_NOREPLACE) != 0 && 1315 (flags & SHM_RENAME_EXCHANGE) != 0) { 1316 error = EINVAL; 1317 goto out; 1318 } 1319 1320 /* Renaming to or from anonymous makes no sense */ 1321 if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) { 1322 error = EINVAL; 1323 goto out; 1324 } 1325 1326 error = shm_copyin_path(td, uap->path_from, &path_from); 1327 if (error != 0) 1328 goto out; 1329 1330 error = shm_copyin_path(td, uap->path_to, &path_to); 1331 if (error != 0) 1332 goto out; 1333 1334 AUDIT_ARG_UPATH1_CANON(path_from); 1335 AUDIT_ARG_UPATH2_CANON(path_to); 1336 1337 /* Rename with from/to equal is a no-op */ 1338 if (strcmp(path_from, path_to) == 0) 1339 goto out; 1340 1341 fnv_from = fnv_32_str(path_from, FNV1_32_INIT); 1342 fnv_to = fnv_32_str(path_to, FNV1_32_INIT); 1343 1344 sx_xlock(&shm_dict_lock); 1345 1346 fd_from = shm_lookup(path_from, fnv_from); 1347 if (fd_from == NULL) { 1348 error = ENOENT; 1349 goto out_locked; 1350 } 1351 1352 fd_to = shm_lookup(path_to, fnv_to); 1353 if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) { 1354 error = EEXIST; 1355 goto out_locked; 1356 } 1357 1358 /* 1359 * Unconditionally prevents shm_remove from invalidating the 'from' 1360 * shm's state. 1361 */ 1362 shm_hold(fd_from); 1363 error = shm_remove(path_from, fnv_from, td->td_ucred); 1364 1365 /* 1366 * One of my assumptions failed if ENOENT (e.g. locking didn't 1367 * protect us) 1368 */ 1369 KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s", 1370 path_from)); 1371 if (error != 0) { 1372 shm_drop(fd_from); 1373 goto out_locked; 1374 } 1375 1376 /* 1377 * If we are exchanging, we need to ensure the shm_remove below 1378 * doesn't invalidate the dest shm's state. 1379 */ 1380 if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) 1381 shm_hold(fd_to); 1382 1383 /* 1384 * NOTE: if path_to is not already in the hash, c'est la vie; 1385 * it simply means we have nothing already at path_to to unlink. 1386 * That is the ENOENT case. 1387 * 1388 * If we somehow don't have access to unlink this guy, but 1389 * did for the shm at path_from, then relink the shm to path_from 1390 * and abort with EACCES. 1391 * 1392 * All other errors: that is weird; let's relink and abort the 1393 * operation. 1394 */ 1395 error = shm_remove(path_to, fnv_to, td->td_ucred); 1396 if (error != 0 && error != ENOENT) { 1397 shm_insert(path_from, fnv_from, fd_from); 1398 shm_drop(fd_from); 1399 /* Don't free path_from now, since the hash references it */ 1400 path_from = NULL; 1401 goto out_locked; 1402 } 1403 1404 error = 0; 1405 1406 shm_insert(path_to, fnv_to, fd_from); 1407 1408 /* Don't free path_to now, since the hash references it */ 1409 path_to = NULL; 1410 1411 /* We kept a ref when we removed, and incremented again in insert */ 1412 shm_drop(fd_from); 1413 KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n", 1414 fd_from->shm_refs)); 1415 1416 if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) { 1417 shm_insert(path_from, fnv_from, fd_to); 1418 path_from = NULL; 1419 shm_drop(fd_to); 1420 KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n", 1421 fd_to->shm_refs)); 1422 } 1423 1424 out_locked: 1425 sx_xunlock(&shm_dict_lock); 1426 1427 out: 1428 free(path_from, M_SHMFD); 1429 free(path_to, M_SHMFD); 1430 return (error); 1431 } 1432 1433 static int 1434 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr, 1435 vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags, 1436 vm_ooffset_t foff, struct thread *td) 1437 { 1438 struct vmspace *vms; 1439 vm_map_entry_t next_entry, prev_entry; 1440 vm_offset_t align, mask, maxaddr; 1441 int docow, error, rv, try; 1442 bool curmap; 1443 1444 if (shmfd->shm_lp_psind == 0) 1445 return (EINVAL); 1446 1447 /* MAP_PRIVATE is disabled */ 1448 if ((flags & ~(MAP_SHARED | MAP_FIXED | MAP_EXCL | 1449 MAP_NOCORE | 1450 #ifdef MAP_32BIT 1451 MAP_32BIT | 1452 #endif 1453 MAP_ALIGNMENT_MASK)) != 0) 1454 return (EINVAL); 1455 1456 vms = td->td_proc->p_vmspace; 1457 curmap = map == &vms->vm_map; 1458 if (curmap) { 1459 error = kern_mmap_racct_check(td, map, size); 1460 if (error != 0) 1461 return (error); 1462 } 1463 1464 docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT; 1465 docow |= MAP_INHERIT_SHARE; 1466 if ((flags & MAP_NOCORE) != 0) 1467 docow |= MAP_DISABLE_COREDUMP; 1468 1469 mask = pagesizes[shmfd->shm_lp_psind] - 1; 1470 if ((foff & mask) != 0) 1471 return (EINVAL); 1472 maxaddr = vm_map_max(map); 1473 #ifdef MAP_32BIT 1474 if ((flags & MAP_32BIT) != 0 && maxaddr > MAP_32BIT_MAX_ADDR) 1475 maxaddr = MAP_32BIT_MAX_ADDR; 1476 #endif 1477 if (size == 0 || (size & mask) != 0 || 1478 (*addr != 0 && ((*addr & mask) != 0 || 1479 *addr + size < *addr || *addr + size > maxaddr))) 1480 return (EINVAL); 1481 1482 align = flags & MAP_ALIGNMENT_MASK; 1483 if (align == 0) { 1484 align = pagesizes[shmfd->shm_lp_psind]; 1485 } else if (align == MAP_ALIGNED_SUPER) { 1486 if (shmfd->shm_lp_psind != 1) 1487 return (EINVAL); 1488 align = pagesizes[1]; 1489 } else { 1490 align >>= MAP_ALIGNMENT_SHIFT; 1491 align = 1ULL << align; 1492 /* Also handles overflow. */ 1493 if (align < pagesizes[shmfd->shm_lp_psind]) 1494 return (EINVAL); 1495 } 1496 1497 vm_map_lock(map); 1498 if ((flags & MAP_FIXED) == 0) { 1499 try = 1; 1500 if (curmap && (*addr == 0 || 1501 (*addr >= round_page((vm_offset_t)vms->vm_taddr) && 1502 *addr < round_page((vm_offset_t)vms->vm_daddr + 1503 lim_max(td, RLIMIT_DATA))))) { 1504 *addr = roundup2((vm_offset_t)vms->vm_daddr + 1505 lim_max(td, RLIMIT_DATA), 1506 pagesizes[shmfd->shm_lp_psind]); 1507 } 1508 again: 1509 rv = vm_map_find_aligned(map, addr, size, maxaddr, align); 1510 if (rv != KERN_SUCCESS) { 1511 if (try == 1) { 1512 try = 2; 1513 *addr = vm_map_min(map); 1514 if ((*addr & mask) != 0) 1515 *addr = (*addr + mask) & mask; 1516 goto again; 1517 } 1518 goto fail1; 1519 } 1520 } else if ((flags & MAP_EXCL) == 0) { 1521 rv = vm_map_delete(map, *addr, *addr + size); 1522 if (rv != KERN_SUCCESS) 1523 goto fail1; 1524 } else { 1525 error = ENOSPC; 1526 if (vm_map_lookup_entry(map, *addr, &prev_entry)) 1527 goto fail; 1528 next_entry = vm_map_entry_succ(prev_entry); 1529 if (next_entry->start < *addr + size) 1530 goto fail; 1531 } 1532 1533 rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size, 1534 prot, max_prot, docow); 1535 fail1: 1536 error = vm_mmap_to_errno(rv); 1537 fail: 1538 vm_map_unlock(map); 1539 return (error); 1540 } 1541 1542 static int 1543 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize, 1544 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, 1545 vm_ooffset_t foff, struct thread *td) 1546 { 1547 struct shmfd *shmfd; 1548 vm_prot_t maxprot; 1549 int error; 1550 bool writecnt; 1551 void *rl_cookie; 1552 1553 shmfd = fp->f_data; 1554 maxprot = VM_PROT_NONE; 1555 1556 rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize, 1557 &shmfd->shm_mtx); 1558 /* FREAD should always be set. */ 1559 if ((fp->f_flag & FREAD) != 0) 1560 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ; 1561 1562 /* 1563 * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared 1564 * mapping with a write seal applied. Private mappings are always 1565 * writeable. 1566 */ 1567 if ((flags & MAP_SHARED) == 0) { 1568 cap_maxprot |= VM_PROT_WRITE; 1569 maxprot |= VM_PROT_WRITE; 1570 writecnt = false; 1571 } else { 1572 if ((fp->f_flag & FWRITE) != 0 && 1573 (shmfd->shm_seals & F_SEAL_WRITE) == 0) 1574 maxprot |= VM_PROT_WRITE; 1575 1576 /* 1577 * Any mappings from a writable descriptor may be upgraded to 1578 * VM_PROT_WRITE with mprotect(2), unless a write-seal was 1579 * applied between the open and subsequent mmap(2). We want to 1580 * reject application of a write seal as long as any such 1581 * mapping exists so that the seal cannot be trivially bypassed. 1582 */ 1583 writecnt = (maxprot & VM_PROT_WRITE) != 0; 1584 if (!writecnt && (prot & VM_PROT_WRITE) != 0) { 1585 error = EACCES; 1586 goto out; 1587 } 1588 } 1589 maxprot &= cap_maxprot; 1590 1591 /* See comment in vn_mmap(). */ 1592 if ( 1593 #ifdef _LP64 1594 objsize > OFF_MAX || 1595 #endif 1596 foff > OFF_MAX - objsize) { 1597 error = EINVAL; 1598 goto out; 1599 } 1600 1601 #ifdef MAC 1602 error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags); 1603 if (error != 0) 1604 goto out; 1605 #endif 1606 1607 mtx_lock(&shm_timestamp_lock); 1608 vfs_timestamp(&shmfd->shm_atime); 1609 mtx_unlock(&shm_timestamp_lock); 1610 vm_object_reference(shmfd->shm_object); 1611 1612 if (shm_largepage(shmfd)) { 1613 writecnt = false; 1614 error = shm_mmap_large(shmfd, map, addr, objsize, prot, 1615 maxprot, flags, foff, td); 1616 } else { 1617 if (writecnt) { 1618 vm_pager_update_writecount(shmfd->shm_object, 0, 1619 objsize); 1620 } 1621 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags, 1622 shmfd->shm_object, foff, writecnt, td); 1623 } 1624 if (error != 0) { 1625 if (writecnt) 1626 vm_pager_release_writecount(shmfd->shm_object, 0, 1627 objsize); 1628 vm_object_deallocate(shmfd->shm_object); 1629 } 1630 out: 1631 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 1632 return (error); 1633 } 1634 1635 static int 1636 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, 1637 struct thread *td) 1638 { 1639 struct shmfd *shmfd; 1640 int error; 1641 1642 error = 0; 1643 shmfd = fp->f_data; 1644 mtx_lock(&shm_timestamp_lock); 1645 /* 1646 * SUSv4 says that x bits of permission need not be affected. 1647 * Be consistent with our shm_open there. 1648 */ 1649 #ifdef MAC 1650 error = mac_posixshm_check_setmode(active_cred, shmfd, mode); 1651 if (error != 0) 1652 goto out; 1653 #endif 1654 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid, 1655 VADMIN, active_cred); 1656 if (error != 0) 1657 goto out; 1658 shmfd->shm_mode = mode & ACCESSPERMS; 1659 out: 1660 mtx_unlock(&shm_timestamp_lock); 1661 return (error); 1662 } 1663 1664 static int 1665 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, 1666 struct thread *td) 1667 { 1668 struct shmfd *shmfd; 1669 int error; 1670 1671 error = 0; 1672 shmfd = fp->f_data; 1673 mtx_lock(&shm_timestamp_lock); 1674 #ifdef MAC 1675 error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid); 1676 if (error != 0) 1677 goto out; 1678 #endif 1679 if (uid == (uid_t)-1) 1680 uid = shmfd->shm_uid; 1681 if (gid == (gid_t)-1) 1682 gid = shmfd->shm_gid; 1683 if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) || 1684 (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) && 1685 (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN))) 1686 goto out; 1687 shmfd->shm_uid = uid; 1688 shmfd->shm_gid = gid; 1689 out: 1690 mtx_unlock(&shm_timestamp_lock); 1691 return (error); 1692 } 1693 1694 /* 1695 * Helper routines to allow the backing object of a shared memory file 1696 * descriptor to be mapped in the kernel. 1697 */ 1698 int 1699 shm_map(struct file *fp, size_t size, off_t offset, void **memp) 1700 { 1701 struct shmfd *shmfd; 1702 vm_offset_t kva, ofs; 1703 vm_object_t obj; 1704 int rv; 1705 1706 if (fp->f_type != DTYPE_SHM) 1707 return (EINVAL); 1708 shmfd = fp->f_data; 1709 obj = shmfd->shm_object; 1710 VM_OBJECT_WLOCK(obj); 1711 /* 1712 * XXXRW: This validation is probably insufficient, and subject to 1713 * sign errors. It should be fixed. 1714 */ 1715 if (offset >= shmfd->shm_size || 1716 offset + size > round_page(shmfd->shm_size)) { 1717 VM_OBJECT_WUNLOCK(obj); 1718 return (EINVAL); 1719 } 1720 1721 shmfd->shm_kmappings++; 1722 vm_object_reference_locked(obj); 1723 VM_OBJECT_WUNLOCK(obj); 1724 1725 /* Map the object into the kernel_map and wire it. */ 1726 kva = vm_map_min(kernel_map); 1727 ofs = offset & PAGE_MASK; 1728 offset = trunc_page(offset); 1729 size = round_page(size + ofs); 1730 rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0, 1731 VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE, 1732 VM_PROT_READ | VM_PROT_WRITE, 0); 1733 if (rv == KERN_SUCCESS) { 1734 rv = vm_map_wire(kernel_map, kva, kva + size, 1735 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 1736 if (rv == KERN_SUCCESS) { 1737 *memp = (void *)(kva + ofs); 1738 return (0); 1739 } 1740 vm_map_remove(kernel_map, kva, kva + size); 1741 } else 1742 vm_object_deallocate(obj); 1743 1744 /* On failure, drop our mapping reference. */ 1745 VM_OBJECT_WLOCK(obj); 1746 shmfd->shm_kmappings--; 1747 VM_OBJECT_WUNLOCK(obj); 1748 1749 return (vm_mmap_to_errno(rv)); 1750 } 1751 1752 /* 1753 * We require the caller to unmap the entire entry. This allows us to 1754 * safely decrement shm_kmappings when a mapping is removed. 1755 */ 1756 int 1757 shm_unmap(struct file *fp, void *mem, size_t size) 1758 { 1759 struct shmfd *shmfd; 1760 vm_map_entry_t entry; 1761 vm_offset_t kva, ofs; 1762 vm_object_t obj; 1763 vm_pindex_t pindex; 1764 vm_prot_t prot; 1765 boolean_t wired; 1766 vm_map_t map; 1767 int rv; 1768 1769 if (fp->f_type != DTYPE_SHM) 1770 return (EINVAL); 1771 shmfd = fp->f_data; 1772 kva = (vm_offset_t)mem; 1773 ofs = kva & PAGE_MASK; 1774 kva = trunc_page(kva); 1775 size = round_page(size + ofs); 1776 map = kernel_map; 1777 rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry, 1778 &obj, &pindex, &prot, &wired); 1779 if (rv != KERN_SUCCESS) 1780 return (EINVAL); 1781 if (entry->start != kva || entry->end != kva + size) { 1782 vm_map_lookup_done(map, entry); 1783 return (EINVAL); 1784 } 1785 vm_map_lookup_done(map, entry); 1786 if (obj != shmfd->shm_object) 1787 return (EINVAL); 1788 vm_map_remove(map, kva, kva + size); 1789 VM_OBJECT_WLOCK(obj); 1790 KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped")); 1791 shmfd->shm_kmappings--; 1792 VM_OBJECT_WUNLOCK(obj); 1793 return (0); 1794 } 1795 1796 static int 1797 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list) 1798 { 1799 const char *path, *pr_path; 1800 size_t pr_pathlen; 1801 bool visible; 1802 1803 sx_assert(&shm_dict_lock, SA_LOCKED); 1804 kif->kf_type = KF_TYPE_SHM; 1805 kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode; 1806 kif->kf_un.kf_file.kf_file_size = shmfd->shm_size; 1807 if (shmfd->shm_path != NULL) { 1808 if (shmfd->shm_path != NULL) { 1809 path = shmfd->shm_path; 1810 pr_path = curthread->td_ucred->cr_prison->pr_path; 1811 if (strcmp(pr_path, "/") != 0) { 1812 /* Return the jail-rooted pathname. */ 1813 pr_pathlen = strlen(pr_path); 1814 visible = strncmp(path, pr_path, pr_pathlen) 1815 == 0 && path[pr_pathlen] == '/'; 1816 if (list && !visible) 1817 return (EPERM); 1818 if (visible) 1819 path += pr_pathlen; 1820 } 1821 strlcpy(kif->kf_path, path, sizeof(kif->kf_path)); 1822 } 1823 } 1824 return (0); 1825 } 1826 1827 static int 1828 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1829 struct filedesc *fdp __unused) 1830 { 1831 int res; 1832 1833 sx_slock(&shm_dict_lock); 1834 res = shm_fill_kinfo_locked(fp->f_data, kif, false); 1835 sx_sunlock(&shm_dict_lock); 1836 return (res); 1837 } 1838 1839 static int 1840 shm_add_seals(struct file *fp, int seals) 1841 { 1842 struct shmfd *shmfd; 1843 void *rl_cookie; 1844 vm_ooffset_t writemappings; 1845 int error, nseals; 1846 1847 error = 0; 1848 shmfd = fp->f_data; 1849 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, 1850 &shmfd->shm_mtx); 1851 1852 /* Even already-set seals should result in EPERM. */ 1853 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) { 1854 error = EPERM; 1855 goto out; 1856 } 1857 nseals = seals & ~shmfd->shm_seals; 1858 if ((nseals & F_SEAL_WRITE) != 0) { 1859 if (shm_largepage(shmfd)) { 1860 error = ENOTSUP; 1861 goto out; 1862 } 1863 1864 /* 1865 * The rangelock above prevents writable mappings from being 1866 * added after we've started applying seals. The RLOCK here 1867 * is to avoid torn reads on ILP32 arches as unmapping/reducing 1868 * writemappings will be done without a rangelock. 1869 */ 1870 VM_OBJECT_RLOCK(shmfd->shm_object); 1871 writemappings = shmfd->shm_object->un_pager.swp.writemappings; 1872 VM_OBJECT_RUNLOCK(shmfd->shm_object); 1873 /* kmappings are also writable */ 1874 if (writemappings > 0) { 1875 error = EBUSY; 1876 goto out; 1877 } 1878 } 1879 shmfd->shm_seals |= nseals; 1880 out: 1881 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 1882 return (error); 1883 } 1884 1885 static int 1886 shm_get_seals(struct file *fp, int *seals) 1887 { 1888 struct shmfd *shmfd; 1889 1890 shmfd = fp->f_data; 1891 *seals = shmfd->shm_seals; 1892 return (0); 1893 } 1894 1895 static int 1896 shm_deallocate(struct shmfd *shmfd, off_t *offset, off_t *length, int flags) 1897 { 1898 vm_object_t object; 1899 vm_pindex_t pistart, pi, piend; 1900 vm_ooffset_t off, len; 1901 int startofs, endofs, end; 1902 int error; 1903 1904 off = *offset; 1905 len = *length; 1906 KASSERT(off + len <= (vm_ooffset_t)OFF_MAX, ("off + len overflows")); 1907 if (off + len > shmfd->shm_size) 1908 len = shmfd->shm_size - off; 1909 object = shmfd->shm_object; 1910 startofs = off & PAGE_MASK; 1911 endofs = (off + len) & PAGE_MASK; 1912 pistart = OFF_TO_IDX(off); 1913 piend = OFF_TO_IDX(off + len); 1914 pi = OFF_TO_IDX(off + PAGE_MASK); 1915 error = 0; 1916 1917 /* Handle the case when offset is on or beyond shm size. */ 1918 if ((off_t)len <= 0) { 1919 *length = 0; 1920 return (0); 1921 } 1922 1923 VM_OBJECT_WLOCK(object); 1924 1925 if (startofs != 0) { 1926 end = pistart != piend ? PAGE_SIZE : endofs; 1927 error = shm_partial_page_invalidate(object, pistart, startofs, 1928 end); 1929 if (error) 1930 goto out; 1931 off += end - startofs; 1932 len -= end - startofs; 1933 } 1934 1935 if (pi < piend) { 1936 vm_object_page_remove(object, pi, piend, 0); 1937 off += IDX_TO_OFF(piend - pi); 1938 len -= IDX_TO_OFF(piend - pi); 1939 } 1940 1941 if (endofs != 0 && pistart != piend) { 1942 error = shm_partial_page_invalidate(object, piend, 0, endofs); 1943 if (error) 1944 goto out; 1945 off += endofs; 1946 len -= endofs; 1947 } 1948 1949 out: 1950 VM_OBJECT_WUNLOCK(shmfd->shm_object); 1951 *offset = off; 1952 *length = len; 1953 return (error); 1954 } 1955 1956 static int 1957 shm_fspacectl(struct file *fp, int cmd, off_t *offset, off_t *length, int flags, 1958 struct ucred *active_cred, struct thread *td) 1959 { 1960 void *rl_cookie; 1961 struct shmfd *shmfd; 1962 off_t off, len; 1963 int error; 1964 1965 /* This assumes that the caller already checked for overflow. */ 1966 error = EINVAL; 1967 shmfd = fp->f_data; 1968 off = *offset; 1969 len = *length; 1970 1971 if (cmd != SPACECTL_DEALLOC || off < 0 || len <= 0 || 1972 len > OFF_MAX - off || flags != 0) 1973 return (EINVAL); 1974 1975 rl_cookie = rangelock_wlock(&shmfd->shm_rl, off, off + len, 1976 &shmfd->shm_mtx); 1977 switch (cmd) { 1978 case SPACECTL_DEALLOC: 1979 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) { 1980 error = EPERM; 1981 break; 1982 } 1983 error = shm_deallocate(shmfd, &off, &len, flags); 1984 *offset = off; 1985 *length = len; 1986 break; 1987 default: 1988 __assert_unreachable(); 1989 } 1990 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 1991 return (error); 1992 } 1993 1994 1995 static int 1996 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td) 1997 { 1998 void *rl_cookie; 1999 struct shmfd *shmfd; 2000 size_t size; 2001 int error; 2002 2003 /* This assumes that the caller already checked for overflow. */ 2004 error = 0; 2005 shmfd = fp->f_data; 2006 size = offset + len; 2007 2008 /* 2009 * Just grab the rangelock for the range that we may be attempting to 2010 * grow, rather than blocking read/write for regions we won't be 2011 * touching while this (potential) resize is in progress. Other 2012 * attempts to resize the shmfd will have to take a write lock from 0 to 2013 * OFF_MAX, so this being potentially beyond the current usable range of 2014 * the shmfd is not necessarily a concern. If other mechanisms are 2015 * added to grow a shmfd, this may need to be re-evaluated. 2016 */ 2017 rl_cookie = rangelock_wlock(&shmfd->shm_rl, offset, size, 2018 &shmfd->shm_mtx); 2019 if (size > shmfd->shm_size) 2020 error = shm_dotruncate_cookie(shmfd, size, rl_cookie); 2021 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 2022 /* Translate to posix_fallocate(2) return value as needed. */ 2023 if (error == ENOMEM) 2024 error = ENOSPC; 2025 return (error); 2026 } 2027 2028 static int 2029 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS) 2030 { 2031 struct shm_mapping *shmm; 2032 struct sbuf sb; 2033 struct kinfo_file kif; 2034 u_long i; 2035 ssize_t curlen; 2036 int error, error2; 2037 2038 sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req); 2039 sbuf_clear_flags(&sb, SBUF_INCLUDENUL); 2040 curlen = 0; 2041 error = 0; 2042 sx_slock(&shm_dict_lock); 2043 for (i = 0; i < shm_hash + 1; i++) { 2044 LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) { 2045 error = shm_fill_kinfo_locked(shmm->sm_shmfd, 2046 &kif, true); 2047 if (error == EPERM) { 2048 error = 0; 2049 continue; 2050 } 2051 if (error != 0) 2052 break; 2053 pack_kinfo(&kif); 2054 if (req->oldptr != NULL && 2055 kif.kf_structsize + curlen > req->oldlen) 2056 break; 2057 error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ? 2058 0 : ENOMEM; 2059 if (error != 0) 2060 break; 2061 curlen += kif.kf_structsize; 2062 } 2063 } 2064 sx_sunlock(&shm_dict_lock); 2065 error2 = sbuf_finish(&sb); 2066 sbuf_delete(&sb); 2067 return (error != 0 ? error : error2); 2068 } 2069 2070 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list, 2071 CTLFLAG_RD | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE, 2072 NULL, 0, sysctl_posix_shm_list, "", 2073 "POSIX SHM list"); 2074 2075 int 2076 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode, 2077 struct filecaps *caps) 2078 { 2079 2080 return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL)); 2081 } 2082 2083 /* 2084 * This version of the shm_open() interface leaves CLOEXEC behavior up to the 2085 * caller, and libc will enforce it for the traditional shm_open() call. This 2086 * allows other consumers, like memfd_create(), to opt-in for CLOEXEC. This 2087 * interface also includes a 'name' argument that is currently unused, but could 2088 * potentially be exported later via some interface for debugging purposes. 2089 * From the kernel's perspective, it is optional. Individual consumers like 2090 * memfd_create() may require it in order to be compatible with other systems 2091 * implementing the same function. 2092 */ 2093 int 2094 sys_shm_open2(struct thread *td, struct shm_open2_args *uap) 2095 { 2096 2097 return (kern_shm_open2(td, uap->path, uap->flags, uap->mode, 2098 uap->shmflags, NULL, uap->name)); 2099 } 2100