1 /*- 2 * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 7 * 2005 program. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $NetBSD: tmpfs_vnops.c,v 1.39 2007/07/23 15:41:01 jmmv Exp $ 31 */ 32 33 /* 34 * tmpfs vnode interface. 35 */ 36 37 #include <sys/kernel.h> 38 #include <sys/kern_syscall.h> 39 #include <sys/param.h> 40 #include <sys/uio.h> 41 #include <sys/fcntl.h> 42 #include <sys/lockf.h> 43 #include <sys/priv.h> 44 #include <sys/proc.h> 45 #include <sys/resourcevar.h> 46 #include <sys/sched.h> 47 #include <sys/stat.h> 48 #include <sys/systm.h> 49 #include <sys/sysctl.h> 50 #include <sys/unistd.h> 51 #include <sys/vfsops.h> 52 #include <sys/vnode.h> 53 #include <sys/mountctl.h> 54 55 #include <vm/vm.h> 56 #include <vm/vm_extern.h> 57 #include <vm/vm_object.h> 58 #include <vm/vm_page.h> 59 #include <vm/vm_pageout.h> 60 #include <vm/vm_pager.h> 61 #include <vm/swap_pager.h> 62 63 #include <sys/buf2.h> 64 #include <vm/vm_page2.h> 65 66 #include <vfs/fifofs/fifo.h> 67 #include <vfs/tmpfs/tmpfs_vnops.h> 68 #include "tmpfs.h" 69 70 static void tmpfs_strategy_done(struct bio *bio); 71 static void tmpfs_move_pages(vm_object_t src, vm_object_t dst, int movflags); 72 73 /* 74 * bufcache_mode: 75 * 0 Normal page queue operation on flush. Run through the buffer 76 * cache if free memory is under the minimum. 77 * 78 * 1 Try to keep in memory, but run through the buffer cache if 79 * the system is under memory pressure (though this might just 80 * require inactive cleaning). 81 * 82 * 2 Be a bit more aggressive when running writes through the 83 * buffer cache when the system is under memory pressure. 84 * 85 * 3 Always run tmpfs writes through the buffer cache, thus forcing 86 * them out to swap. 87 */ 88 __read_mostly static int tmpfs_cluster_rd_enable = 1; 89 __read_mostly static int tmpfs_cluster_wr_enable = 1; 90 __read_mostly int tmpfs_bufcache_mode = 0; 91 SYSCTL_NODE(_vfs, OID_AUTO, tmpfs, CTLFLAG_RW, 0, "TMPFS filesystem"); 92 SYSCTL_INT(_vfs_tmpfs, OID_AUTO, cluster_rd_enable, CTLFLAG_RW, 93 &tmpfs_cluster_rd_enable, 0, ""); 94 SYSCTL_INT(_vfs_tmpfs, OID_AUTO, cluster_wr_enable, CTLFLAG_RW, 95 &tmpfs_cluster_wr_enable, 0, ""); 96 SYSCTL_INT(_vfs_tmpfs, OID_AUTO, bufcache_mode, CTLFLAG_RW, 97 &tmpfs_bufcache_mode, 0, ""); 98 99 #define TMPFS_MOVF_FROMBACKING 0x0001 100 #define TMPFS_MOVF_DEACTIVATE 0x0002 101 102 103 static __inline 104 void 105 tmpfs_knote(struct vnode *vp, int flags) 106 { 107 if (flags) 108 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags); 109 } 110 111 112 /* --------------------------------------------------------------------- */ 113 114 static int 115 tmpfs_nresolve(struct vop_nresolve_args *ap) 116 { 117 struct vnode *dvp = ap->a_dvp; 118 struct vnode *vp = NULL; 119 struct namecache *ncp = ap->a_nch->ncp; 120 struct tmpfs_node *tnode; 121 struct tmpfs_dirent *de; 122 struct tmpfs_node *dnode; 123 int error; 124 125 dnode = VP_TO_TMPFS_DIR(dvp); 126 127 TMPFS_NODE_LOCK_SH(dnode); 128 loop: 129 de = tmpfs_dir_lookup(dnode, NULL, ncp); 130 if (de == NULL) { 131 error = ENOENT; 132 } else { 133 /* 134 * Allocate a vnode for the node we found. Use 135 * tmpfs_alloc_vp()'s deadlock handling mode. 136 */ 137 tnode = de->td_node; 138 error = tmpfs_alloc_vp(dvp->v_mount, dnode, tnode, 139 LK_EXCLUSIVE | LK_RETRY, &vp); 140 if (error == EAGAIN) 141 goto loop; 142 if (error) 143 goto out; 144 KKASSERT(vp); 145 } 146 147 out: 148 TMPFS_NODE_UNLOCK(dnode); 149 150 if ((dnode->tn_status & TMPFS_NODE_ACCESSED) == 0) { 151 TMPFS_NODE_LOCK(dnode); 152 dnode->tn_status |= TMPFS_NODE_ACCESSED; 153 TMPFS_NODE_UNLOCK(dnode); 154 } 155 156 /* 157 * Store the result of this lookup in the cache. Avoid this if the 158 * request was for creation, as it does not improve timings on 159 * emprical tests. 160 */ 161 if (vp) { 162 vn_unlock(vp); 163 cache_setvp(ap->a_nch, vp); 164 vrele(vp); 165 } else if (error == ENOENT) { 166 cache_setvp(ap->a_nch, NULL); 167 } 168 return (error); 169 } 170 171 static int 172 tmpfs_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 173 { 174 struct vnode *dvp = ap->a_dvp; 175 struct vnode **vpp = ap->a_vpp; 176 struct tmpfs_node *dnode = VP_TO_TMPFS_NODE(dvp); 177 struct ucred *cred = ap->a_cred; 178 int error; 179 180 *vpp = NULL; 181 182 /* Check accessibility of requested node as a first step. */ 183 error = VOP_ACCESS(dvp, VEXEC, cred); 184 if (error != 0) 185 return error; 186 187 if (dnode->tn_dir.tn_parent != NULL) { 188 /* Allocate a new vnode on the matching entry. */ 189 error = tmpfs_alloc_vp(dvp->v_mount, 190 NULL, dnode->tn_dir.tn_parent, 191 LK_EXCLUSIVE | LK_RETRY, vpp); 192 193 if (*vpp) 194 vn_unlock(*vpp); 195 } 196 return (*vpp == NULL) ? ENOENT : 0; 197 } 198 199 /* --------------------------------------------------------------------- */ 200 201 static int 202 tmpfs_ncreate(struct vop_ncreate_args *ap) 203 { 204 struct vnode *dvp = ap->a_dvp; 205 struct vnode **vpp = ap->a_vpp; 206 struct namecache *ncp = ap->a_nch->ncp; 207 struct vattr *vap = ap->a_vap; 208 struct ucred *cred = ap->a_cred; 209 int error; 210 211 KKASSERT(vap->va_type == VREG || vap->va_type == VSOCK); 212 213 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL); 214 if (error == 0) { 215 cache_setunresolved(ap->a_nch); 216 cache_setvp(ap->a_nch, *vpp); 217 tmpfs_knote(dvp, NOTE_WRITE); 218 } 219 return (error); 220 } 221 /* --------------------------------------------------------------------- */ 222 223 static int 224 tmpfs_nmknod(struct vop_nmknod_args *ap) 225 { 226 struct vnode *dvp = ap->a_dvp; 227 struct vnode **vpp = ap->a_vpp; 228 struct namecache *ncp = ap->a_nch->ncp; 229 struct vattr *vap = ap->a_vap; 230 struct ucred *cred = ap->a_cred; 231 int error; 232 233 if (vap->va_type != VBLK && vap->va_type != VCHR && 234 vap->va_type != VFIFO) { 235 return (EINVAL); 236 } 237 238 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL); 239 if (error == 0) { 240 cache_setunresolved(ap->a_nch); 241 cache_setvp(ap->a_nch, *vpp); 242 tmpfs_knote(dvp, NOTE_WRITE); 243 } 244 return error; 245 } 246 247 /* --------------------------------------------------------------------- */ 248 249 static int 250 tmpfs_open(struct vop_open_args *ap) 251 { 252 struct vnode *vp = ap->a_vp; 253 int mode = ap->a_mode; 254 struct tmpfs_node *node; 255 int error; 256 257 node = VP_TO_TMPFS_NODE(vp); 258 259 #if 0 260 /* The file is still active but all its names have been removed 261 * (e.g. by a "rmdir $(pwd)"). It cannot be opened any more as 262 * it is about to die. */ 263 if (node->tn_links < 1) 264 return (ENOENT); 265 #endif 266 267 /* If the file is marked append-only, deny write requests. */ 268 if ((node->tn_flags & APPEND) && 269 (mode & (FWRITE | O_APPEND)) == FWRITE) { 270 error = EPERM; 271 } else { 272 if (node->tn_reg.tn_pages_in_aobj) { 273 TMPFS_NODE_LOCK(node); 274 if (node->tn_reg.tn_pages_in_aobj) { 275 tmpfs_move_pages(node->tn_reg.tn_aobj, 276 vp->v_object, 277 TMPFS_MOVF_FROMBACKING); 278 node->tn_reg.tn_pages_in_aobj = 0; 279 } 280 TMPFS_NODE_UNLOCK(node); 281 } 282 error = vop_stdopen(ap); 283 } 284 285 return (error); 286 } 287 288 /* --------------------------------------------------------------------- */ 289 290 static int 291 tmpfs_close(struct vop_close_args *ap) 292 { 293 struct vnode *vp = ap->a_vp; 294 struct tmpfs_node *node; 295 int error; 296 297 node = VP_TO_TMPFS_NODE(vp); 298 299 if (node->tn_links > 0) { 300 /* 301 * Update node times. No need to do it if the node has 302 * been deleted, because it will vanish after we return. 303 */ 304 tmpfs_update(vp); 305 } 306 307 error = vop_stdclose(ap); 308 309 return (error); 310 } 311 312 /* --------------------------------------------------------------------- */ 313 314 int 315 tmpfs_access(struct vop_access_args *ap) 316 { 317 struct vnode *vp = ap->a_vp; 318 int error; 319 struct tmpfs_node *node; 320 321 node = VP_TO_TMPFS_NODE(vp); 322 323 switch (vp->v_type) { 324 case VDIR: 325 /* FALLTHROUGH */ 326 case VLNK: 327 /* FALLTHROUGH */ 328 case VREG: 329 if ((ap->a_mode & VWRITE) && 330 (vp->v_mount->mnt_flag & MNT_RDONLY)) { 331 error = EROFS; 332 goto out; 333 } 334 break; 335 336 case VBLK: 337 /* FALLTHROUGH */ 338 case VCHR: 339 /* FALLTHROUGH */ 340 case VSOCK: 341 /* FALLTHROUGH */ 342 case VFIFO: 343 break; 344 345 default: 346 error = EINVAL; 347 goto out; 348 } 349 350 if ((ap->a_mode & VWRITE) && (node->tn_flags & IMMUTABLE)) { 351 error = EPERM; 352 goto out; 353 } 354 355 error = vop_helper_access(ap, node->tn_uid, node->tn_gid, 356 node->tn_mode, 0); 357 out: 358 return error; 359 } 360 361 /* --------------------------------------------------------------------- */ 362 363 int 364 tmpfs_getattr(struct vop_getattr_args *ap) 365 { 366 struct vnode *vp = ap->a_vp; 367 struct vattr *vap = ap->a_vap; 368 struct tmpfs_node *node; 369 370 node = VP_TO_TMPFS_NODE(vp); 371 372 tmpfs_update(vp); 373 374 vap->va_type = vp->v_type; 375 vap->va_mode = node->tn_mode; 376 vap->va_nlink = node->tn_links; 377 vap->va_uid = node->tn_uid; 378 vap->va_gid = node->tn_gid; 379 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 380 vap->va_fileid = node->tn_id; 381 vap->va_size = node->tn_size; 382 vap->va_blocksize = PAGE_SIZE; 383 vap->va_atime.tv_sec = node->tn_atime; 384 vap->va_atime.tv_nsec = node->tn_atimensec; 385 vap->va_mtime.tv_sec = node->tn_mtime; 386 vap->va_mtime.tv_nsec = node->tn_mtimensec; 387 vap->va_ctime.tv_sec = node->tn_ctime; 388 vap->va_ctime.tv_nsec = node->tn_ctimensec; 389 vap->va_gen = node->tn_gen; 390 vap->va_flags = node->tn_flags; 391 if (vp->v_type == VBLK || vp->v_type == VCHR) { 392 vap->va_rmajor = umajor(node->tn_rdev); 393 vap->va_rminor = uminor(node->tn_rdev); 394 } 395 vap->va_bytes = round_page(node->tn_size); 396 vap->va_filerev = 0; 397 398 return 0; 399 } 400 401 /* --------------------------------------------------------------------- */ 402 403 int 404 tmpfs_getattr_quick(struct vop_getattr_args *ap) 405 { 406 struct vnode *vp = ap->a_vp; 407 struct vattr *vap = ap->a_vap; 408 struct tmpfs_node *node; 409 410 node = VP_TO_TMPFS_NODE(vp); 411 412 tmpfs_update(vp); 413 414 vap->va_type = vp->v_type; 415 vap->va_mode = node->tn_mode; 416 vap->va_nlink = node->tn_links; 417 vap->va_uid = node->tn_uid; 418 vap->va_gid = node->tn_gid; 419 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 420 vap->va_fileid = node->tn_id; 421 vap->va_size = node->tn_size; 422 vap->va_blocksize = PAGE_SIZE; 423 vap->va_gen = node->tn_gen; 424 vap->va_flags = node->tn_flags; 425 if (vp->v_type == VBLK || vp->v_type == VCHR) { 426 vap->va_rmajor = umajor(node->tn_rdev); 427 vap->va_rminor = uminor(node->tn_rdev); 428 } 429 vap->va_bytes = -1; 430 vap->va_filerev = 0; 431 432 return 0; 433 } 434 435 436 /* --------------------------------------------------------------------- */ 437 438 int 439 tmpfs_setattr(struct vop_setattr_args *ap) 440 { 441 struct vnode *vp = ap->a_vp; 442 struct vattr *vap = ap->a_vap; 443 struct ucred *cred = ap->a_cred; 444 struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp); 445 int error = 0; 446 int kflags = 0; 447 448 TMPFS_NODE_LOCK(node); 449 if (error == 0 && (vap->va_flags != VNOVAL)) { 450 error = tmpfs_chflags(vp, vap->va_flags, cred); 451 kflags |= NOTE_ATTRIB; 452 } 453 454 if (error == 0 && (vap->va_size != VNOVAL)) { 455 /* restore any saved pages before proceeding */ 456 if (node->tn_reg.tn_pages_in_aobj) { 457 tmpfs_move_pages(node->tn_reg.tn_aobj, vp->v_object, 458 TMPFS_MOVF_FROMBACKING | 459 TMPFS_MOVF_DEACTIVATE); 460 node->tn_reg.tn_pages_in_aobj = 0; 461 } 462 if (vap->va_size > node->tn_size) 463 kflags |= NOTE_WRITE | NOTE_EXTEND; 464 else 465 kflags |= NOTE_WRITE; 466 error = tmpfs_chsize(vp, vap->va_size, cred); 467 } 468 469 if (error == 0 && (vap->va_uid != (uid_t)VNOVAL || 470 vap->va_gid != (gid_t)VNOVAL)) { 471 error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred); 472 kflags |= NOTE_ATTRIB; 473 } 474 475 if (error == 0 && (vap->va_mode != (mode_t)VNOVAL)) { 476 error = tmpfs_chmod(vp, vap->va_mode, cred); 477 kflags |= NOTE_ATTRIB; 478 } 479 480 if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL && 481 vap->va_atime.tv_nsec != VNOVAL) || 482 (vap->va_mtime.tv_sec != VNOVAL && 483 vap->va_mtime.tv_nsec != VNOVAL) )) { 484 error = tmpfs_chtimes(vp, &vap->va_atime, &vap->va_mtime, 485 vap->va_vaflags, cred); 486 kflags |= NOTE_ATTRIB; 487 } 488 489 /* 490 * Update the node times. We give preference to the error codes 491 * generated by this function rather than the ones that may arise 492 * from tmpfs_update. 493 */ 494 tmpfs_update(vp); 495 TMPFS_NODE_UNLOCK(node); 496 tmpfs_knote(vp, kflags); 497 498 return (error); 499 } 500 501 /* --------------------------------------------------------------------- */ 502 503 /* 504 * fsync is usually a NOP, but we must take action when unmounting or 505 * when recycling. 506 */ 507 static int 508 tmpfs_fsync(struct vop_fsync_args *ap) 509 { 510 struct tmpfs_node *node; 511 struct vnode *vp = ap->a_vp; 512 513 node = VP_TO_TMPFS_NODE(vp); 514 515 /* 516 * tmpfs vnodes typically remain dirty, avoid long syncer scans 517 * by forcing removal from the syncer list. 518 */ 519 vn_syncer_remove(vp, 1); 520 521 tmpfs_update(vp); 522 if (vp->v_type == VREG) { 523 if (vp->v_flag & VRECLAIMED) { 524 if (node->tn_links == 0) 525 tmpfs_truncate(vp, 0); 526 else 527 vfsync(ap->a_vp, ap->a_waitfor, 1, NULL, NULL); 528 } 529 } 530 531 return 0; 532 } 533 534 /* --------------------------------------------------------------------- */ 535 536 static int 537 tmpfs_read(struct vop_read_args *ap) 538 { 539 struct buf *bp; 540 struct vnode *vp = ap->a_vp; 541 struct uio *uio = ap->a_uio; 542 struct tmpfs_node *node; 543 off_t base_offset; 544 size_t offset; 545 size_t len; 546 size_t resid; 547 int error; 548 int seqcount; 549 550 /* 551 * Check the basics 552 */ 553 if (uio->uio_offset < 0) 554 return (EINVAL); 555 if (vp->v_type != VREG) 556 return (EINVAL); 557 558 /* 559 * Extract node, try to shortcut the operation through 560 * the VM page cache, allowing us to avoid buffer cache 561 * overheads. 562 */ 563 node = VP_TO_TMPFS_NODE(vp); 564 resid = uio->uio_resid; 565 seqcount = ap->a_ioflag >> IO_SEQSHIFT; 566 error = vop_helper_read_shortcut(ap); 567 if (error) 568 return error; 569 if (uio->uio_resid == 0) { 570 if (resid) 571 goto finished; 572 return error; 573 } 574 575 /* 576 * restore any saved pages before proceeding 577 */ 578 if (node->tn_reg.tn_pages_in_aobj) { 579 TMPFS_NODE_LOCK(node); 580 if (node->tn_reg.tn_pages_in_aobj) { 581 tmpfs_move_pages(node->tn_reg.tn_aobj, vp->v_object, 582 TMPFS_MOVF_FROMBACKING); 583 node->tn_reg.tn_pages_in_aobj = 0; 584 } 585 TMPFS_NODE_UNLOCK(node); 586 } 587 588 /* 589 * Fall-through to our normal read code. 590 */ 591 while (uio->uio_resid > 0 && uio->uio_offset < node->tn_size) { 592 /* 593 * Use buffer cache I/O (via tmpfs_strategy) 594 */ 595 offset = (size_t)uio->uio_offset & TMPFS_BLKMASK64; 596 base_offset = (off_t)uio->uio_offset - offset; 597 bp = getcacheblk(vp, base_offset, 598 node->tn_blksize, GETBLK_KVABIO); 599 if (bp == NULL) { 600 if (tmpfs_cluster_rd_enable) { 601 error = cluster_readx(vp, node->tn_size, 602 base_offset, 603 node->tn_blksize, 604 B_NOTMETA | B_KVABIO, 605 uio->uio_resid, 606 seqcount * MAXBSIZE, 607 &bp); 608 } else { 609 error = bread_kvabio(vp, base_offset, 610 node->tn_blksize, &bp); 611 } 612 if (error) { 613 brelse(bp); 614 kprintf("tmpfs_read bread error %d\n", error); 615 break; 616 } 617 618 /* 619 * tmpfs pretty much fiddles directly with the VM 620 * system, don't let it exhaust it or we won't play 621 * nice with other processes. 622 * 623 * Only do this if the VOP is coming from a normal 624 * read/write. The VM system handles the case for 625 * UIO_NOCOPY. 626 */ 627 if (uio->uio_segflg != UIO_NOCOPY) 628 vm_wait_nominal(); 629 } 630 bp->b_flags |= B_CLUSTEROK; 631 bkvasync(bp); 632 633 /* 634 * Figure out how many bytes we can actually copy this loop. 635 */ 636 len = node->tn_blksize - offset; 637 if (len > uio->uio_resid) 638 len = uio->uio_resid; 639 if (len > node->tn_size - uio->uio_offset) 640 len = (size_t)(node->tn_size - uio->uio_offset); 641 642 error = uiomovebp(bp, (char *)bp->b_data + offset, len, uio); 643 bqrelse(bp); 644 if (error) { 645 kprintf("tmpfs_read uiomove error %d\n", error); 646 break; 647 } 648 } 649 650 finished: 651 if ((node->tn_status & TMPFS_NODE_ACCESSED) == 0) { 652 TMPFS_NODE_LOCK(node); 653 node->tn_status |= TMPFS_NODE_ACCESSED; 654 TMPFS_NODE_UNLOCK(node); 655 } 656 return (error); 657 } 658 659 static int 660 tmpfs_write(struct vop_write_args *ap) 661 { 662 struct buf *bp; 663 struct vnode *vp = ap->a_vp; 664 struct uio *uio = ap->a_uio; 665 struct thread *td = uio->uio_td; 666 struct tmpfs_node *node; 667 boolean_t extended; 668 off_t oldsize; 669 int error; 670 off_t base_offset; 671 size_t offset; 672 size_t len; 673 struct rlimit limit; 674 int trivial = 0; 675 int kflags = 0; 676 int seqcount; 677 678 error = 0; 679 if (uio->uio_resid == 0) { 680 return error; 681 } 682 683 node = VP_TO_TMPFS_NODE(vp); 684 685 if (vp->v_type != VREG) 686 return (EINVAL); 687 seqcount = ap->a_ioflag >> IO_SEQSHIFT; 688 689 TMPFS_NODE_LOCK(node); 690 691 /* 692 * restore any saved pages before proceeding 693 */ 694 if (node->tn_reg.tn_pages_in_aobj) { 695 tmpfs_move_pages(node->tn_reg.tn_aobj, vp->v_object, 696 TMPFS_MOVF_FROMBACKING); 697 node->tn_reg.tn_pages_in_aobj = 0; 698 } 699 700 oldsize = node->tn_size; 701 if (ap->a_ioflag & IO_APPEND) 702 uio->uio_offset = node->tn_size; 703 704 /* 705 * Check for illegal write offsets. 706 */ 707 if (uio->uio_offset + uio->uio_resid > 708 VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize) { 709 error = EFBIG; 710 goto done; 711 } 712 713 /* 714 * NOTE: Ignore if UIO does not come from a user thread (e.g. VN). 715 */ 716 if (vp->v_type == VREG && td != NULL && td->td_lwp != NULL) { 717 error = kern_getrlimit(RLIMIT_FSIZE, &limit); 718 if (error) 719 goto done; 720 if (uio->uio_offset + uio->uio_resid > limit.rlim_cur) { 721 ksignal(td->td_proc, SIGXFSZ); 722 error = EFBIG; 723 goto done; 724 } 725 } 726 727 /* 728 * Extend the file's size if necessary 729 */ 730 extended = ((uio->uio_offset + uio->uio_resid) > node->tn_size); 731 732 while (uio->uio_resid > 0) { 733 /* 734 * Don't completely blow out running buffer I/O 735 * when being hit from the pageout daemon. 736 */ 737 if (uio->uio_segflg == UIO_NOCOPY && 738 (ap->a_ioflag & IO_RECURSE) == 0) { 739 bwillwrite(node->tn_blksize); 740 } 741 742 /* 743 * Use buffer cache I/O (via tmpfs_strategy) 744 * 745 * Calculate the maximum bytes we can write to the buffer at 746 * this offset (after resizing). 747 */ 748 offset = (size_t)uio->uio_offset & TMPFS_BLKMASK64; 749 base_offset = (off_t)uio->uio_offset - offset; 750 len = uio->uio_resid; 751 if (len > TMPFS_BLKSIZE - offset) 752 len = TMPFS_BLKSIZE - offset; 753 754 if ((uio->uio_offset + len) > node->tn_size) { 755 trivial = (uio->uio_offset <= node->tn_size); 756 error = tmpfs_reg_resize(vp, uio->uio_offset + len, 757 trivial); 758 if (error) 759 break; 760 } 761 762 /* 763 * Read to fill in any gaps. Theoretically we could 764 * optimize this if the write covers the entire buffer 765 * and is not a UIO_NOCOPY write, however this can lead 766 * to a security violation exposing random kernel memory 767 * (whatever junk was in the backing VM pages before). 768 * 769 * So just use bread() to do the right thing. 770 */ 771 error = bread_kvabio(vp, base_offset, node->tn_blksize, &bp); 772 bkvasync(bp); 773 error = uiomovebp(bp, (char *)bp->b_data + offset, len, uio); 774 if (error) { 775 kprintf("tmpfs_write uiomove error %d\n", error); 776 brelse(bp); 777 break; 778 } 779 780 if (uio->uio_offset > node->tn_size) { 781 node->tn_size = uio->uio_offset; 782 kflags |= NOTE_EXTEND; 783 } 784 kflags |= NOTE_WRITE; 785 786 /* 787 * UIO_NOCOPY is a sensitive state due to potentially being 788 * issued from the pageout daemon while in a low-memory 789 * situation. However, in order to cluster the I/O nicely 790 * (e.g. 64KB+ writes instead of 16KB writes), we still try 791 * to follow the same semantics that any other filesystem 792 * might use. 793 * 794 * For the normal case we buwrite(), dirtying the underlying 795 * VM pages instead of dirtying the buffer and releasing the 796 * buffer as a clean buffer. This allows tmpfs to use 797 * essentially all available memory to cache file data. 798 * If we used bdwrite() the buffer cache would wind up 799 * flushing the data to swap too quickly. 800 * 801 * But because tmpfs can seriously load the VM system we 802 * fall-back to using bdwrite() when free memory starts 803 * to get low. This shifts the load away from the VM system 804 * and makes tmpfs act more like a normal filesystem with 805 * regards to disk activity. 806 * 807 * tmpfs pretty much fiddles directly with the VM 808 * system, don't let it exhaust it or we won't play 809 * nice with other processes. Only do this if the 810 * VOP is coming from a normal read/write. The VM system 811 * handles the case for UIO_NOCOPY. 812 */ 813 bp->b_flags |= B_CLUSTEROK; 814 if (uio->uio_segflg == UIO_NOCOPY) { 815 /* 816 * Flush from the pageout daemon, deal with potentially 817 * very heavy tmpfs write activity causing long stalls 818 * in the pageout daemon before pages get to free/cache. 819 * 820 * We have to be careful not to bypass the page queues 821 * entirely or we can cause write-read thrashing and 822 * delay the paging of data that is more pageable then 823 * our current data. 824 * 825 * (a) Under severe pressure setting B_DIRECT will 826 * cause a buffer release to try to free the 827 * underlying pages. 828 * 829 * (b) Under modest memory pressure the B_AGE flag 830 * we retire the buffer and its underlying pages 831 * more quickly than normal. 832 * 833 * We could also force this by setting B_NOTMETA 834 * but that might have other unintended side- 835 * effects (e.g. setting PG_NOTMETA on the VM page). 836 * 837 * (c) For the pageout->putpages->generic_putpages-> 838 * UIO_NOCOPY-write (here), issuing an immediate 839 * write prevents any real clustering from 840 * happening because the buffers probably aren't 841 * (yet) marked dirty, or lost due to prior use 842 * of buwrite(). Try to use the normal 843 * cluster_write() mechanism for performance. 844 * 845 * Hopefully this will unblock the VM system more 846 * quickly under extreme tmpfs write load. 847 */ 848 if (tmpfs_bufcache_mode >= 2) { 849 if (vm_page_count_min(vm_page_free_hysteresis)) 850 bp->b_flags |= B_DIRECT | B_TTC; 851 if (vm_pages_needed || vm_paging_needed(0)) 852 bp->b_flags |= B_AGE; 853 } 854 bp->b_flags |= B_RELBUF; 855 bp->b_act_count = 0; /* buffer->deactivate pgs */ 856 if (tmpfs_cluster_wr_enable && 857 (ap->a_ioflag & (IO_SYNC | IO_DIRECT)) == 0) { 858 cluster_write(bp, node->tn_size, 859 node->tn_blksize, seqcount); 860 } else { 861 cluster_awrite(bp); 862 } 863 } else if (vm_page_count_min(0) || 864 ((vm_pages_needed || vm_paging_needed(0)) && 865 tmpfs_bufcache_mode >= 1)) { 866 /* 867 * If the pageout daemon is running we cycle the 868 * write through the buffer cache normally to 869 * pipeline the flush, thus avoiding adding any 870 * more memory pressure to the pageout daemon. 871 */ 872 bp->b_act_count = 0; /* buffer->deactivate pgs */ 873 if (tmpfs_cluster_wr_enable) { 874 cluster_write(bp, node->tn_size, 875 node->tn_blksize, seqcount); 876 } else { 877 bdwrite(bp); 878 } 879 } else { 880 /* 881 * Otherwise run the buffer directly through to the 882 * backing VM store, leaving the buffer clean so 883 * buffer limits do not force early flushes to swap. 884 */ 885 buwrite(bp); 886 /*vm_wait_nominal();*/ 887 } 888 889 if (bp->b_error) { 890 kprintf("tmpfs_write bwrite error %d\n", bp->b_error); 891 break; 892 } 893 } 894 895 if (error) { 896 if (extended) { 897 (void)tmpfs_reg_resize(vp, oldsize, trivial); 898 kflags &= ~NOTE_EXTEND; 899 } 900 goto done; 901 } 902 903 /* 904 * Currently we don't set the mtime on files modified via mmap() 905 * because we can't tell the difference between those modifications 906 * and an attempt by the pageout daemon to flush tmpfs pages to 907 * swap. 908 * 909 * This is because in order to defer flushes as long as possible 910 * buwrite() works by marking the underlying VM pages dirty in 911 * order to be able to dispose of the buffer cache buffer without 912 * flushing it. 913 */ 914 if (uio->uio_segflg == UIO_NOCOPY) { 915 if (vp->v_flag & VLASTWRITETS) { 916 node->tn_mtime = vp->v_lastwrite_ts.tv_sec; 917 node->tn_mtimensec = vp->v_lastwrite_ts.tv_nsec; 918 } 919 } else { 920 node->tn_status |= TMPFS_NODE_MODIFIED; 921 vclrflags(vp, VLASTWRITETS); 922 } 923 924 if (extended) 925 node->tn_status |= TMPFS_NODE_CHANGED; 926 927 if (node->tn_mode & (S_ISUID | S_ISGID)) { 928 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) 929 node->tn_mode &= ~(S_ISUID | S_ISGID); 930 } 931 done: 932 TMPFS_NODE_UNLOCK(node); 933 if (kflags) 934 tmpfs_knote(vp, kflags); 935 936 return(error); 937 } 938 939 static int 940 tmpfs_advlock(struct vop_advlock_args *ap) 941 { 942 struct tmpfs_node *node; 943 struct vnode *vp = ap->a_vp; 944 int error; 945 946 node = VP_TO_TMPFS_NODE(vp); 947 error = (lf_advlock(ap, &node->tn_advlock, node->tn_size)); 948 949 return (error); 950 } 951 952 /* 953 * The strategy function is typically only called when memory pressure 954 * forces the system to attempt to pageout pages. It can also be called 955 * by [n]vtruncbuf() when a truncation cuts a page in half. Normal write 956 * operations 957 * 958 * We set VKVABIO for VREG files so bp->b_data may not be synchronized to 959 * our cpu. swap_pager_strategy() is all we really use, and it directly 960 * supports this. 961 */ 962 static int 963 tmpfs_strategy(struct vop_strategy_args *ap) 964 { 965 struct bio *bio = ap->a_bio; 966 struct bio *nbio; 967 struct buf *bp = bio->bio_buf; 968 struct vnode *vp = ap->a_vp; 969 struct tmpfs_node *node; 970 vm_object_t uobj; 971 vm_page_t m; 972 int i; 973 974 if (vp->v_type != VREG) { 975 bp->b_resid = bp->b_bcount; 976 bp->b_flags |= B_ERROR | B_INVAL; 977 bp->b_error = EINVAL; 978 biodone(bio); 979 return(0); 980 } 981 982 node = VP_TO_TMPFS_NODE(vp); 983 984 uobj = node->tn_reg.tn_aobj; 985 986 /* 987 * Don't bother flushing to swap if there is no swap, just 988 * ensure that the pages are marked as needing a commit (still). 989 */ 990 if (bp->b_cmd == BUF_CMD_WRITE && vm_swap_size == 0) { 991 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 992 m = bp->b_xio.xio_pages[i]; 993 vm_page_need_commit(m); 994 } 995 bp->b_resid = 0; 996 bp->b_error = 0; 997 biodone(bio); 998 } else { 999 #if 0 1000 /* 1001 * XXX removed, this does not work well because under heavy 1002 * filesystem loads it often 1003 * forces the data to be read right back in again after 1004 * being written due to bypassing normal LRU operation. 1005 * 1006 * Tell the buffer cache to try to recycle the pages 1007 * to PQ_CACHE on release. 1008 */ 1009 if (tmpfs_bufcache_mode >= 2 || 1010 (tmpfs_bufcache_mode == 1 && vm_paging_needed(0))) { 1011 bp->b_flags |= B_TTC; 1012 } 1013 #endif 1014 nbio = push_bio(bio); 1015 nbio->bio_done = tmpfs_strategy_done; 1016 nbio->bio_offset = bio->bio_offset; 1017 swap_pager_strategy(uobj, nbio); 1018 } 1019 return 0; 1020 } 1021 1022 /* 1023 * If we were unable to commit the pages to swap make sure they are marked 1024 * as needing a commit (again). If we were, clear the flag to allow the 1025 * pages to be freed. 1026 * 1027 * Do not error-out the buffer. In particular, vinvalbuf() needs to 1028 * always work. 1029 */ 1030 static void 1031 tmpfs_strategy_done(struct bio *bio) 1032 { 1033 struct buf *bp; 1034 vm_page_t m; 1035 int i; 1036 1037 bp = bio->bio_buf; 1038 1039 if (bp->b_flags & B_ERROR) { 1040 bp->b_flags &= ~B_ERROR; 1041 bp->b_error = 0; 1042 bp->b_resid = 0; 1043 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1044 m = bp->b_xio.xio_pages[i]; 1045 vm_page_need_commit(m); 1046 } 1047 } else { 1048 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1049 m = bp->b_xio.xio_pages[i]; 1050 vm_page_clear_commit(m); 1051 } 1052 } 1053 bio = pop_bio(bio); 1054 biodone(bio); 1055 } 1056 1057 /* 1058 * To make write clustering work well make the backing store look 1059 * contiguous to the cluster_*() code. The swap_strategy() function 1060 * will take it from there. 1061 * 1062 * Use MAXBSIZE-sized chunks as a micro-optimization to make random 1063 * flushes leave full-sized gaps. 1064 */ 1065 static int 1066 tmpfs_bmap(struct vop_bmap_args *ap) 1067 { 1068 if (ap->a_doffsetp != NULL) 1069 *ap->a_doffsetp = ap->a_loffset; 1070 if (ap->a_runp != NULL) 1071 *ap->a_runp = MAXBSIZE - (ap->a_loffset & (MAXBSIZE - 1)); 1072 if (ap->a_runb != NULL) 1073 *ap->a_runb = ap->a_loffset & (MAXBSIZE - 1); 1074 1075 return 0; 1076 } 1077 1078 /* --------------------------------------------------------------------- */ 1079 1080 static int 1081 tmpfs_nremove(struct vop_nremove_args *ap) 1082 { 1083 struct vnode *dvp = ap->a_dvp; 1084 struct namecache *ncp = ap->a_nch->ncp; 1085 struct vnode *vp; 1086 int error; 1087 struct tmpfs_dirent *de; 1088 struct tmpfs_mount *tmp; 1089 struct tmpfs_node *dnode; 1090 struct tmpfs_node *node; 1091 1092 /* 1093 * We have to acquire the vp from ap->a_nch because we will likely 1094 * unresolve the namecache entry, and a vrele/vput is needed to 1095 * trigger the tmpfs_inactive/tmpfs_reclaim sequence. 1096 * 1097 * We have to use vget to clear any inactive state on the vnode, 1098 * otherwise the vnode may remain inactive and thus tmpfs_inactive 1099 * will not get called when we release it. 1100 */ 1101 error = cache_vget(ap->a_nch, ap->a_cred, LK_SHARED, &vp); 1102 KKASSERT(vp->v_mount == dvp->v_mount); 1103 KKASSERT(error == 0); 1104 vn_unlock(vp); 1105 1106 if (vp->v_type == VDIR) { 1107 error = EISDIR; 1108 goto out2; 1109 } 1110 1111 dnode = VP_TO_TMPFS_DIR(dvp); 1112 node = VP_TO_TMPFS_NODE(vp); 1113 tmp = VFS_TO_TMPFS(vp->v_mount); 1114 1115 TMPFS_NODE_LOCK(dnode); 1116 TMPFS_NODE_LOCK(node); 1117 de = tmpfs_dir_lookup(dnode, node, ncp); 1118 if (de == NULL) { 1119 error = ENOENT; 1120 TMPFS_NODE_UNLOCK(node); 1121 TMPFS_NODE_UNLOCK(dnode); 1122 goto out; 1123 } 1124 1125 /* Files marked as immutable or append-only cannot be deleted. */ 1126 if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) || 1127 (dnode->tn_flags & APPEND)) { 1128 error = EPERM; 1129 TMPFS_NODE_UNLOCK(node); 1130 TMPFS_NODE_UNLOCK(dnode); 1131 goto out; 1132 } 1133 1134 /* Remove the entry from the directory; as it is a file, we do not 1135 * have to change the number of hard links of the directory. */ 1136 tmpfs_dir_detach_locked(dnode, de); 1137 TMPFS_NODE_UNLOCK(dnode); 1138 1139 /* Free the directory entry we just deleted. Note that the node 1140 * referred by it will not be removed until the vnode is really 1141 * reclaimed. */ 1142 tmpfs_free_dirent(tmp, de); 1143 1144 if (node->tn_links > 0) 1145 node->tn_status |= TMPFS_NODE_CHANGED; 1146 TMPFS_NODE_UNLOCK(node); 1147 1148 cache_unlink(ap->a_nch); 1149 tmpfs_knote(vp, NOTE_DELETE); 1150 error = 0; 1151 1152 out: 1153 if (error == 0) 1154 tmpfs_knote(dvp, NOTE_WRITE); 1155 out2: 1156 vrele(vp); 1157 1158 return error; 1159 } 1160 1161 /* --------------------------------------------------------------------- */ 1162 1163 static int 1164 tmpfs_nlink(struct vop_nlink_args *ap) 1165 { 1166 struct vnode *dvp = ap->a_dvp; 1167 struct vnode *vp = ap->a_vp; 1168 struct tmpfs_mount *tmp = VFS_TO_TMPFS(vp->v_mount); 1169 struct namecache *ncp = ap->a_nch->ncp; 1170 struct tmpfs_dirent *de; 1171 struct tmpfs_node *node; 1172 struct tmpfs_node *dnode; 1173 int error; 1174 1175 KKASSERT(dvp != vp); /* XXX When can this be false? */ 1176 1177 node = VP_TO_TMPFS_NODE(vp); 1178 dnode = VP_TO_TMPFS_NODE(dvp); 1179 TMPFS_NODE_LOCK(dnode); 1180 1181 /* XXX: Why aren't the following two tests done by the caller? */ 1182 1183 /* Hard links of directories are forbidden. */ 1184 if (vp->v_type == VDIR) { 1185 error = EPERM; 1186 goto out; 1187 } 1188 1189 /* Cannot create cross-device links. */ 1190 if (dvp->v_mount != vp->v_mount) { 1191 error = EXDEV; 1192 goto out; 1193 } 1194 1195 /* Cannot hard-link into a deleted directory */ 1196 if (dnode != tmp->tm_root && dnode->tn_dir.tn_parent == NULL) { 1197 error = ENOENT; 1198 goto out; 1199 } 1200 1201 /* Ensure that we do not overflow the maximum number of links imposed 1202 * by the system. */ 1203 KKASSERT(node->tn_links <= LINK_MAX); 1204 if (node->tn_links >= LINK_MAX) { 1205 error = EMLINK; 1206 goto out; 1207 } 1208 1209 /* We cannot create links of files marked immutable or append-only. */ 1210 if (node->tn_flags & (IMMUTABLE | APPEND)) { 1211 error = EPERM; 1212 goto out; 1213 } 1214 1215 /* Allocate a new directory entry to represent the node. */ 1216 error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node, 1217 ncp->nc_name, ncp->nc_nlen, &de); 1218 if (error != 0) 1219 goto out; 1220 1221 /* Insert the new directory entry into the appropriate directory. */ 1222 tmpfs_dir_attach_locked(dnode, de); 1223 1224 /* vp link count has changed, so update node times. */ 1225 1226 TMPFS_NODE_LOCK(node); 1227 node->tn_status |= TMPFS_NODE_CHANGED; 1228 TMPFS_NODE_UNLOCK(node); 1229 tmpfs_update(vp); 1230 1231 tmpfs_knote(vp, NOTE_LINK); 1232 cache_setunresolved(ap->a_nch); 1233 cache_setvp(ap->a_nch, vp); 1234 error = 0; 1235 1236 out: 1237 TMPFS_NODE_UNLOCK(dnode); 1238 if (error == 0) 1239 tmpfs_knote(dvp, NOTE_WRITE); 1240 return error; 1241 } 1242 1243 /* --------------------------------------------------------------------- */ 1244 1245 static int 1246 tmpfs_nrename(struct vop_nrename_args *ap) 1247 { 1248 struct vnode *fdvp = ap->a_fdvp; 1249 struct namecache *fncp = ap->a_fnch->ncp; 1250 struct vnode *fvp = fncp->nc_vp; 1251 struct vnode *tdvp = ap->a_tdvp; 1252 struct namecache *tncp = ap->a_tnch->ncp; 1253 struct vnode *tvp; 1254 struct tmpfs_dirent *de, *tde; 1255 struct tmpfs_mount *tmp; 1256 struct tmpfs_node *fdnode; 1257 struct tmpfs_node *tdnode; 1258 struct tmpfs_node *fnode; 1259 struct tmpfs_node *tnode; 1260 char *newname; 1261 char *oldname; 1262 int error; 1263 1264 KKASSERT(fdvp->v_mount == fvp->v_mount); 1265 1266 /* 1267 * Because tvp can get overwritten we have to vget it instead of 1268 * just vref or use it, otherwise it's VINACTIVE flag may not get 1269 * cleared and the node won't get destroyed. 1270 */ 1271 error = cache_vget(ap->a_tnch, ap->a_cred, LK_SHARED, &tvp); 1272 if (error == 0) { 1273 tnode = VP_TO_TMPFS_NODE(tvp); 1274 vn_unlock(tvp); 1275 } else { 1276 tnode = NULL; 1277 } 1278 1279 /* Disallow cross-device renames. 1280 * XXX Why isn't this done by the caller? */ 1281 if (fvp->v_mount != tdvp->v_mount || 1282 (tvp != NULL && fvp->v_mount != tvp->v_mount)) { 1283 error = EXDEV; 1284 goto out; 1285 } 1286 1287 tmp = VFS_TO_TMPFS(tdvp->v_mount); 1288 tdnode = VP_TO_TMPFS_DIR(tdvp); 1289 1290 /* If source and target are the same file, there is nothing to do. */ 1291 if (fvp == tvp) { 1292 error = 0; 1293 goto out; 1294 } 1295 1296 fdnode = VP_TO_TMPFS_DIR(fdvp); 1297 fnode = VP_TO_TMPFS_NODE(fvp); 1298 1299 tmpfs_lock4(fdnode, tdnode, fnode, tnode); 1300 1301 /* 1302 * Cannot rename into a deleted directory 1303 */ 1304 if (tdnode != tmp->tm_root && tdnode->tn_dir.tn_parent == NULL) { 1305 error = ENOENT; 1306 goto out_locked; 1307 } 1308 1309 /* Avoid manipulating '.' and '..' entries. */ 1310 de = tmpfs_dir_lookup(fdnode, fnode, fncp); 1311 if (de == NULL) { 1312 error = ENOENT; 1313 goto out_locked; 1314 } 1315 KKASSERT(de->td_node == fnode); 1316 1317 /* 1318 * If replacing an entry in the target directory and that entry 1319 * is a directory, it must be empty. 1320 * 1321 * Kern_rename gurantees the destination to be a directory 1322 * if the source is one (it does?). 1323 */ 1324 if (tvp != NULL) { 1325 KKASSERT(tnode != NULL); 1326 1327 if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) || 1328 (tdnode->tn_flags & (APPEND | IMMUTABLE))) { 1329 error = EPERM; 1330 goto out_locked; 1331 } 1332 1333 if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) { 1334 if (tnode->tn_size > 0) { 1335 error = ENOTEMPTY; 1336 goto out_locked; 1337 } 1338 } else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) { 1339 error = ENOTDIR; 1340 goto out_locked; 1341 } else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) { 1342 error = EISDIR; 1343 goto out_locked; 1344 } else { 1345 KKASSERT(fnode->tn_type != VDIR && 1346 tnode->tn_type != VDIR); 1347 } 1348 } 1349 1350 if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) || 1351 (fdnode->tn_flags & (APPEND | IMMUTABLE))) { 1352 error = EPERM; 1353 goto out_locked; 1354 } 1355 1356 /* 1357 * Ensure that we have enough memory to hold the new name, if it 1358 * has to be changed. 1359 */ 1360 if (fncp->nc_nlen != tncp->nc_nlen || 1361 bcmp(fncp->nc_name, tncp->nc_name, fncp->nc_nlen) != 0) { 1362 newname = kmalloc(tncp->nc_nlen + 1, tmp->tm_name_zone, 1363 M_WAITOK | M_NULLOK); 1364 if (newname == NULL) { 1365 error = ENOSPC; 1366 goto out_locked; 1367 } 1368 bcopy(tncp->nc_name, newname, tncp->nc_nlen); 1369 newname[tncp->nc_nlen] = '\0'; 1370 } else { 1371 newname = NULL; 1372 } 1373 1374 /* 1375 * Unlink entry from source directory. Note that the kernel has 1376 * already checked for illegal recursion cases (renaming a directory 1377 * into a subdirectory of itself). 1378 */ 1379 if (fdnode != tdnode) { 1380 tmpfs_dir_detach_locked(fdnode, de); 1381 } else { 1382 /* XXX depend on namecache lock */ 1383 KKASSERT(de == tmpfs_dir_lookup(fdnode, fnode, fncp)); 1384 RB_REMOVE(tmpfs_dirtree, &fdnode->tn_dir.tn_dirtree, de); 1385 RB_REMOVE(tmpfs_dirtree_cookie, 1386 &fdnode->tn_dir.tn_cookietree, de); 1387 } 1388 1389 /* 1390 * Handle any name change. Swap with newname, we will 1391 * deallocate it at the end. 1392 */ 1393 if (newname != NULL) { 1394 oldname = de->td_name; 1395 de->td_name = newname; 1396 de->td_namelen = (uint16_t)tncp->nc_nlen; 1397 newname = oldname; 1398 } 1399 1400 /* 1401 * If we are overwriting an entry, we have to remove the old one 1402 * from the target directory. 1403 */ 1404 if (tvp != NULL) { 1405 /* Remove the old entry from the target directory. */ 1406 tde = tmpfs_dir_lookup(tdnode, tnode, tncp); 1407 tmpfs_dir_detach_locked(tdnode, tde); 1408 tmpfs_knote(tdnode->tn_vnode, NOTE_DELETE); 1409 1410 /* 1411 * Free the directory entry we just deleted. Note that the 1412 * node referred by it will not be removed until the vnode is 1413 * really reclaimed. 1414 */ 1415 tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), tde); 1416 /*cache_inval_vp(tvp, CINV_DESTROY);*/ 1417 } 1418 1419 /* 1420 * Link entry to target directory. If the entry 1421 * represents a directory move the parent linkage 1422 * as well. 1423 */ 1424 if (fdnode != tdnode) { 1425 if (de->td_node->tn_type == VDIR) { 1426 TMPFS_VALIDATE_DIR(fnode); 1427 } 1428 tmpfs_dir_attach_locked(tdnode, de); 1429 } else { 1430 tdnode->tn_status |= TMPFS_NODE_MODIFIED; 1431 RB_INSERT(tmpfs_dirtree, &tdnode->tn_dir.tn_dirtree, de); 1432 RB_INSERT(tmpfs_dirtree_cookie, 1433 &tdnode->tn_dir.tn_cookietree, de); 1434 } 1435 tmpfs_unlock4(fdnode, tdnode, fnode, tnode); 1436 1437 /* 1438 * Finish up 1439 */ 1440 if (newname) { 1441 kfree(newname, tmp->tm_name_zone); 1442 newname = NULL; 1443 } 1444 cache_rename(ap->a_fnch, ap->a_tnch); 1445 tmpfs_knote(ap->a_fdvp, NOTE_WRITE); 1446 tmpfs_knote(ap->a_tdvp, NOTE_WRITE); 1447 if (fnode->tn_vnode) 1448 tmpfs_knote(fnode->tn_vnode, NOTE_RENAME); 1449 if (tvp) 1450 vrele(tvp); 1451 return 0; 1452 1453 out_locked: 1454 tmpfs_unlock4(fdnode, tdnode, fnode, tnode); 1455 out: 1456 if (tvp) 1457 vrele(tvp); 1458 return error; 1459 } 1460 1461 /* --------------------------------------------------------------------- */ 1462 1463 static int 1464 tmpfs_nmkdir(struct vop_nmkdir_args *ap) 1465 { 1466 struct vnode *dvp = ap->a_dvp; 1467 struct vnode **vpp = ap->a_vpp; 1468 struct namecache *ncp = ap->a_nch->ncp; 1469 struct vattr *vap = ap->a_vap; 1470 struct ucred *cred = ap->a_cred; 1471 int error; 1472 1473 KKASSERT(vap->va_type == VDIR); 1474 1475 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL); 1476 if (error == 0) { 1477 cache_setunresolved(ap->a_nch); 1478 cache_setvp(ap->a_nch, *vpp); 1479 tmpfs_knote(dvp, NOTE_WRITE | NOTE_LINK); 1480 } 1481 return error; 1482 } 1483 1484 /* --------------------------------------------------------------------- */ 1485 1486 static int 1487 tmpfs_nrmdir(struct vop_nrmdir_args *ap) 1488 { 1489 struct vnode *dvp = ap->a_dvp; 1490 struct namecache *ncp = ap->a_nch->ncp; 1491 struct vnode *vp; 1492 struct tmpfs_dirent *de; 1493 struct tmpfs_mount *tmp; 1494 struct tmpfs_node *dnode; 1495 struct tmpfs_node *node; 1496 int error; 1497 1498 /* 1499 * We have to acquire the vp from ap->a_nch because we will likely 1500 * unresolve the namecache entry, and a vrele/vput is needed to 1501 * trigger the tmpfs_inactive/tmpfs_reclaim sequence. 1502 * 1503 * We have to use vget to clear any inactive state on the vnode, 1504 * otherwise the vnode may remain inactive and thus tmpfs_inactive 1505 * will not get called when we release it. 1506 */ 1507 error = cache_vget(ap->a_nch, ap->a_cred, LK_SHARED, &vp); 1508 KKASSERT(error == 0); 1509 vn_unlock(vp); 1510 1511 /* 1512 * Prevalidate so we don't hit an assertion later 1513 */ 1514 if (vp->v_type != VDIR) { 1515 error = ENOTDIR; 1516 goto out; 1517 } 1518 1519 tmp = VFS_TO_TMPFS(dvp->v_mount); 1520 dnode = VP_TO_TMPFS_DIR(dvp); 1521 node = VP_TO_TMPFS_DIR(vp); 1522 1523 /* 1524 * 1525 */ 1526 TMPFS_NODE_LOCK(dnode); 1527 TMPFS_NODE_LOCK(node); 1528 1529 /* 1530 * Only empty directories can be removed. 1531 */ 1532 if (node->tn_size > 0) { 1533 error = ENOTEMPTY; 1534 goto out_locked; 1535 } 1536 1537 if ((dnode->tn_flags & APPEND) 1538 || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) { 1539 error = EPERM; 1540 goto out_locked; 1541 } 1542 1543 /* 1544 * This invariant holds only if we are not trying to 1545 * remove "..". We checked for that above so this is safe now. 1546 */ 1547 KKASSERT(node->tn_dir.tn_parent == dnode); 1548 1549 /* 1550 * Get the directory entry associated with node (vp) 1551 */ 1552 de = tmpfs_dir_lookup(dnode, node, ncp); 1553 KKASSERT(TMPFS_DIRENT_MATCHES(de, ncp->nc_name, ncp->nc_nlen)); 1554 1555 /* Check flags to see if we are allowed to remove the directory. */ 1556 if ((dnode->tn_flags & APPEND) || 1557 node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) { 1558 error = EPERM; 1559 goto out_locked; 1560 } 1561 1562 /* Detach the directory entry from the directory (dnode). */ 1563 tmpfs_dir_detach_locked(dnode, de); 1564 1565 /* 1566 * Must set parent linkage to NULL (tested by ncreate to disallow 1567 * the creation of new files/dirs in a deleted directory) 1568 */ 1569 node->tn_status |= TMPFS_NODE_CHANGED; 1570 1571 dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | 1572 TMPFS_NODE_MODIFIED; 1573 1574 /* Free the directory entry we just deleted. Note that the node 1575 * referred by it will not be removed until the vnode is really 1576 * reclaimed. */ 1577 tmpfs_free_dirent(tmp, de); 1578 1579 /* Release the deleted vnode (will destroy the node, notify 1580 * interested parties and clean it from the cache). */ 1581 1582 dnode->tn_status |= TMPFS_NODE_CHANGED; 1583 1584 TMPFS_NODE_UNLOCK(node); 1585 TMPFS_NODE_UNLOCK(dnode); 1586 1587 tmpfs_update(dvp); 1588 cache_unlink(ap->a_nch); 1589 tmpfs_knote(dvp, NOTE_WRITE | NOTE_LINK); 1590 vrele(vp); 1591 return 0; 1592 1593 out_locked: 1594 TMPFS_NODE_UNLOCK(node); 1595 TMPFS_NODE_UNLOCK(dnode); 1596 1597 out: 1598 vrele(vp); 1599 1600 return error; 1601 } 1602 1603 /* --------------------------------------------------------------------- */ 1604 1605 static int 1606 tmpfs_nsymlink(struct vop_nsymlink_args *ap) 1607 { 1608 struct vnode *dvp = ap->a_dvp; 1609 struct vnode **vpp = ap->a_vpp; 1610 struct namecache *ncp = ap->a_nch->ncp; 1611 struct vattr *vap = ap->a_vap; 1612 struct ucred *cred = ap->a_cred; 1613 char *target = ap->a_target; 1614 int error; 1615 1616 vap->va_type = VLNK; 1617 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, target); 1618 if (error == 0) { 1619 tmpfs_knote(*vpp, NOTE_WRITE); 1620 cache_setunresolved(ap->a_nch); 1621 cache_setvp(ap->a_nch, *vpp); 1622 } 1623 return error; 1624 } 1625 1626 /* --------------------------------------------------------------------- */ 1627 1628 static int 1629 tmpfs_readdir(struct vop_readdir_args *ap) 1630 { 1631 struct vnode *vp = ap->a_vp; 1632 struct uio *uio = ap->a_uio; 1633 int *eofflag = ap->a_eofflag; 1634 off_t **cookies = ap->a_cookies; 1635 int *ncookies = ap->a_ncookies; 1636 struct tmpfs_mount *tmp; 1637 int error; 1638 off_t startoff; 1639 off_t cnt = 0; 1640 struct tmpfs_node *node; 1641 1642 /* This operation only makes sense on directory nodes. */ 1643 if (vp->v_type != VDIR) { 1644 return ENOTDIR; 1645 } 1646 1647 tmp = VFS_TO_TMPFS(vp->v_mount); 1648 node = VP_TO_TMPFS_DIR(vp); 1649 startoff = uio->uio_offset; 1650 1651 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOT) { 1652 error = tmpfs_dir_getdotdent(node, uio); 1653 if (error != 0) { 1654 TMPFS_NODE_LOCK_SH(node); 1655 goto outok; 1656 } 1657 cnt++; 1658 } 1659 1660 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT) { 1661 /* may lock parent, cannot hold node lock */ 1662 error = tmpfs_dir_getdotdotdent(tmp, node, uio); 1663 if (error != 0) { 1664 TMPFS_NODE_LOCK_SH(node); 1665 goto outok; 1666 } 1667 cnt++; 1668 } 1669 1670 TMPFS_NODE_LOCK_SH(node); 1671 error = tmpfs_dir_getdents(node, uio, &cnt); 1672 1673 outok: 1674 KKASSERT(error >= -1); 1675 1676 if (error == -1) 1677 error = 0; 1678 1679 if (eofflag != NULL) 1680 *eofflag = 1681 (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF); 1682 1683 /* Update NFS-related variables. */ 1684 if (error == 0 && cookies != NULL && ncookies != NULL) { 1685 off_t i; 1686 off_t off = startoff; 1687 struct tmpfs_dirent *de = NULL; 1688 1689 *ncookies = cnt; 1690 *cookies = kmalloc(cnt * sizeof(off_t), M_TEMP, M_WAITOK); 1691 1692 for (i = 0; i < cnt; i++) { 1693 KKASSERT(off != TMPFS_DIRCOOKIE_EOF); 1694 if (off == TMPFS_DIRCOOKIE_DOT) { 1695 off = TMPFS_DIRCOOKIE_DOTDOT; 1696 } else { 1697 if (off == TMPFS_DIRCOOKIE_DOTDOT) { 1698 de = RB_MIN(tmpfs_dirtree_cookie, 1699 &node->tn_dir.tn_cookietree); 1700 } else if (de != NULL) { 1701 de = RB_NEXT(tmpfs_dirtree_cookie, 1702 &node->tn_dir.tn_cookietree, de); 1703 } else { 1704 de = tmpfs_dir_lookupbycookie(node, 1705 off); 1706 KKASSERT(de != NULL); 1707 de = RB_NEXT(tmpfs_dirtree_cookie, 1708 &node->tn_dir.tn_cookietree, de); 1709 } 1710 if (de == NULL) 1711 off = TMPFS_DIRCOOKIE_EOF; 1712 else 1713 off = tmpfs_dircookie(de); 1714 } 1715 (*cookies)[i] = off; 1716 } 1717 KKASSERT(uio->uio_offset == off); 1718 } 1719 TMPFS_NODE_UNLOCK(node); 1720 1721 if ((node->tn_status & TMPFS_NODE_ACCESSED) == 0) { 1722 TMPFS_NODE_LOCK(node); 1723 node->tn_status |= TMPFS_NODE_ACCESSED; 1724 TMPFS_NODE_UNLOCK(node); 1725 } 1726 return error; 1727 } 1728 1729 /* --------------------------------------------------------------------- */ 1730 1731 static int 1732 tmpfs_readlink(struct vop_readlink_args *ap) 1733 { 1734 struct vnode *vp = ap->a_vp; 1735 struct uio *uio = ap->a_uio; 1736 int error; 1737 struct tmpfs_node *node; 1738 1739 KKASSERT(uio->uio_offset == 0); 1740 KKASSERT(vp->v_type == VLNK); 1741 1742 node = VP_TO_TMPFS_NODE(vp); 1743 TMPFS_NODE_LOCK_SH(node); 1744 error = uiomove(node->tn_link, 1745 MIN(node->tn_size, uio->uio_resid), uio); 1746 TMPFS_NODE_UNLOCK(node); 1747 if ((node->tn_status & TMPFS_NODE_ACCESSED) == 0) { 1748 TMPFS_NODE_LOCK(node); 1749 node->tn_status |= TMPFS_NODE_ACCESSED; 1750 TMPFS_NODE_UNLOCK(node); 1751 } 1752 return error; 1753 } 1754 1755 /* --------------------------------------------------------------------- */ 1756 1757 static int 1758 tmpfs_inactive(struct vop_inactive_args *ap) 1759 { 1760 struct vnode *vp = ap->a_vp; 1761 struct tmpfs_node *node; 1762 struct mount *mp; 1763 1764 mp = vp->v_mount; 1765 lwkt_gettoken(&mp->mnt_token); 1766 node = VP_TO_TMPFS_NODE(vp); 1767 1768 /* 1769 * Degenerate case 1770 */ 1771 if (node == NULL) { 1772 vrecycle(vp); 1773 lwkt_reltoken(&mp->mnt_token); 1774 return(0); 1775 } 1776 1777 /* 1778 * Get rid of unreferenced deleted vnodes sooner rather than 1779 * later so the data memory can be recovered immediately. 1780 * 1781 * We must truncate the vnode to prevent the normal reclamation 1782 * path from flushing the data for the removed file to disk. 1783 */ 1784 TMPFS_NODE_LOCK(node); 1785 if (node->tn_links == 0) { 1786 node->tn_vpstate = TMPFS_VNODE_DOOMED; 1787 TMPFS_NODE_UNLOCK(node); 1788 if (node->tn_type == VREG) 1789 tmpfs_truncate(vp, 0); 1790 vrecycle(vp); 1791 } else { 1792 /* 1793 * We must retain any VM pages belonging to the vnode's 1794 * object as the vnode will destroy the object during a 1795 * later reclaim. We call vinvalbuf(V_SAVE) to clean 1796 * out the buffer cache. 1797 * 1798 * On DragonFlyBSD, vnodes are not immediately deactivated 1799 * on the 1->0 refs, so this is a relatively optimal 1800 * operation. We have to do this in tmpfs_inactive() 1801 * because the pages will have already been thrown away 1802 * at the time tmpfs_reclaim() is called. 1803 */ 1804 if (node->tn_type == VREG && 1805 node->tn_reg.tn_pages_in_aobj == 0) { 1806 vinvalbuf(vp, V_SAVE, 0, 0); 1807 KKASSERT(RB_EMPTY(&vp->v_rbdirty_tree)); 1808 KKASSERT(RB_EMPTY(&vp->v_rbclean_tree)); 1809 tmpfs_move_pages(vp->v_object, node->tn_reg.tn_aobj, 1810 TMPFS_MOVF_DEACTIVATE); 1811 node->tn_reg.tn_pages_in_aobj = 1; 1812 } 1813 1814 TMPFS_NODE_UNLOCK(node); 1815 } 1816 lwkt_reltoken(&mp->mnt_token); 1817 1818 return 0; 1819 } 1820 1821 /* --------------------------------------------------------------------- */ 1822 1823 int 1824 tmpfs_reclaim(struct vop_reclaim_args *ap) 1825 { 1826 struct vnode *vp = ap->a_vp; 1827 struct tmpfs_mount *tmp; 1828 struct tmpfs_node *node; 1829 struct mount *mp; 1830 1831 mp = vp->v_mount; 1832 lwkt_gettoken(&mp->mnt_token); 1833 1834 node = VP_TO_TMPFS_NODE(vp); 1835 tmp = VFS_TO_TMPFS(vp->v_mount); 1836 KKASSERT(mp == tmp->tm_mount); 1837 1838 TMPFS_NODE_LOCK(node); 1839 KKASSERT(node->tn_vnode == vp); 1840 node->tn_vnode = NULL; 1841 vp->v_data = NULL; 1842 1843 /* 1844 * If the node referenced by this vnode was deleted by the 1845 * user, we must free its associated data structures now that 1846 * the vnode is being reclaimed. 1847 * 1848 * Directories have an extra link ref. 1849 */ 1850 if (node->tn_links == 0) { 1851 node->tn_vpstate = TMPFS_VNODE_DOOMED; 1852 tmpfs_free_node(tmp, node); 1853 /* eats the lock */ 1854 } else { 1855 TMPFS_NODE_UNLOCK(node); 1856 } 1857 lwkt_reltoken(&mp->mnt_token); 1858 1859 KKASSERT(vp->v_data == NULL); 1860 return 0; 1861 } 1862 1863 /* --------------------------------------------------------------------- */ 1864 1865 static int 1866 tmpfs_mountctl(struct vop_mountctl_args *ap) 1867 { 1868 struct tmpfs_mount *tmp; 1869 struct mount *mp; 1870 int rc; 1871 1872 mp = ap->a_head.a_ops->head.vv_mount; 1873 lwkt_gettoken(&mp->mnt_token); 1874 1875 switch (ap->a_op) { 1876 case (MOUNTCTL_SET_EXPORT): 1877 tmp = (struct tmpfs_mount *) mp->mnt_data; 1878 1879 if (ap->a_ctllen != sizeof(struct export_args)) 1880 rc = (EINVAL); 1881 else 1882 rc = vfs_export(mp, &tmp->tm_export, 1883 (const struct export_args *) ap->a_ctl); 1884 break; 1885 default: 1886 rc = vop_stdmountctl(ap); 1887 break; 1888 } 1889 1890 lwkt_reltoken(&mp->mnt_token); 1891 return (rc); 1892 } 1893 1894 /* --------------------------------------------------------------------- */ 1895 1896 static int 1897 tmpfs_print(struct vop_print_args *ap) 1898 { 1899 struct vnode *vp = ap->a_vp; 1900 1901 struct tmpfs_node *node; 1902 1903 node = VP_TO_TMPFS_NODE(vp); 1904 1905 kprintf("tag VT_TMPFS, tmpfs_node %p, flags 0x%x, links %d\n", 1906 node, node->tn_flags, node->tn_links); 1907 kprintf("\tmode 0%o, owner %d, group %d, size %ju, status 0x%x\n", 1908 node->tn_mode, node->tn_uid, node->tn_gid, 1909 (uintmax_t)node->tn_size, node->tn_status); 1910 1911 if (vp->v_type == VFIFO) 1912 fifo_printinfo(vp); 1913 1914 kprintf("\n"); 1915 1916 return 0; 1917 } 1918 1919 /* --------------------------------------------------------------------- */ 1920 1921 static int 1922 tmpfs_pathconf(struct vop_pathconf_args *ap) 1923 { 1924 struct vnode *vp = ap->a_vp; 1925 int name = ap->a_name; 1926 register_t *retval = ap->a_retval; 1927 struct tmpfs_mount *tmp; 1928 int error; 1929 1930 error = 0; 1931 1932 switch (name) { 1933 case _PC_CHOWN_RESTRICTED: 1934 *retval = 1; 1935 break; 1936 1937 case _PC_FILESIZEBITS: 1938 tmp = VFS_TO_TMPFS(vp->v_mount); 1939 *retval = max(32, flsll(tmp->tm_pages_max * PAGE_SIZE) + 1); 1940 break; 1941 1942 case _PC_LINK_MAX: 1943 *retval = LINK_MAX; 1944 break; 1945 1946 case _PC_NAME_MAX: 1947 *retval = NAME_MAX; 1948 break; 1949 1950 case _PC_NO_TRUNC: 1951 *retval = 1; 1952 break; 1953 1954 case _PC_PATH_MAX: 1955 *retval = PATH_MAX; 1956 break; 1957 1958 case _PC_PIPE_BUF: 1959 *retval = PIPE_BUF; 1960 break; 1961 1962 case _PC_SYNC_IO: 1963 *retval = 1; 1964 break; 1965 1966 case _PC_2_SYMLINKS: 1967 *retval = 1; 1968 break; 1969 1970 default: 1971 error = EINVAL; 1972 } 1973 1974 return error; 1975 } 1976 1977 /************************************************************************ 1978 * KQFILTER OPS * 1979 ************************************************************************/ 1980 1981 static void filt_tmpfsdetach(struct knote *kn); 1982 static int filt_tmpfsread(struct knote *kn, long hint); 1983 static int filt_tmpfswrite(struct knote *kn, long hint); 1984 static int filt_tmpfsvnode(struct knote *kn, long hint); 1985 1986 static struct filterops tmpfsread_filtops = 1987 { FILTEROP_ISFD | FILTEROP_MPSAFE, 1988 NULL, filt_tmpfsdetach, filt_tmpfsread }; 1989 static struct filterops tmpfswrite_filtops = 1990 { FILTEROP_ISFD | FILTEROP_MPSAFE, 1991 NULL, filt_tmpfsdetach, filt_tmpfswrite }; 1992 static struct filterops tmpfsvnode_filtops = 1993 { FILTEROP_ISFD | FILTEROP_MPSAFE, 1994 NULL, filt_tmpfsdetach, filt_tmpfsvnode }; 1995 1996 static int 1997 tmpfs_kqfilter (struct vop_kqfilter_args *ap) 1998 { 1999 struct vnode *vp = ap->a_vp; 2000 struct knote *kn = ap->a_kn; 2001 2002 switch (kn->kn_filter) { 2003 case EVFILT_READ: 2004 kn->kn_fop = &tmpfsread_filtops; 2005 break; 2006 case EVFILT_WRITE: 2007 kn->kn_fop = &tmpfswrite_filtops; 2008 break; 2009 case EVFILT_VNODE: 2010 kn->kn_fop = &tmpfsvnode_filtops; 2011 break; 2012 default: 2013 return (EOPNOTSUPP); 2014 } 2015 2016 kn->kn_hook = (caddr_t)vp; 2017 2018 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 2019 2020 return(0); 2021 } 2022 2023 static void 2024 filt_tmpfsdetach(struct knote *kn) 2025 { 2026 struct vnode *vp = (void *)kn->kn_hook; 2027 2028 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 2029 } 2030 2031 static int 2032 filt_tmpfsread(struct knote *kn, long hint) 2033 { 2034 struct vnode *vp = (void *)kn->kn_hook; 2035 struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp); 2036 off_t off; 2037 2038 if (hint == NOTE_REVOKE) { 2039 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2040 return(1); 2041 } 2042 2043 /* 2044 * Interlock against MP races when performing this function. 2045 */ 2046 TMPFS_NODE_LOCK_SH(node); 2047 off = node->tn_size - kn->kn_fp->f_offset; 2048 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 2049 if (kn->kn_sfflags & NOTE_OLDAPI) { 2050 TMPFS_NODE_UNLOCK(node); 2051 return(1); 2052 } 2053 if (kn->kn_data == 0) { 2054 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 2055 } 2056 TMPFS_NODE_UNLOCK(node); 2057 return (kn->kn_data != 0); 2058 } 2059 2060 static int 2061 filt_tmpfswrite(struct knote *kn, long hint) 2062 { 2063 if (hint == NOTE_REVOKE) 2064 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2065 kn->kn_data = 0; 2066 return (1); 2067 } 2068 2069 static int 2070 filt_tmpfsvnode(struct knote *kn, long hint) 2071 { 2072 if (kn->kn_sfflags & hint) 2073 kn->kn_fflags |= hint; 2074 if (hint == NOTE_REVOKE) { 2075 kn->kn_flags |= (EV_EOF | EV_NODATA); 2076 return (1); 2077 } 2078 return (kn->kn_fflags != 0); 2079 } 2080 2081 /* 2082 * Helper to move VM pages between objects 2083 * 2084 * NOTE: The vm_page_rename() dirties the page, so we can clear the 2085 * PG_NEED_COMMIT flag. If the pages are being moved into tn_aobj, 2086 * the pageout daemon will be able to page them out. 2087 */ 2088 static int 2089 tmpfs_move_pages_callback(vm_page_t p, void *data) 2090 { 2091 struct rb_vm_page_scan_info *info = data; 2092 vm_pindex_t pindex; 2093 2094 /* 2095 * Take control of the page 2096 */ 2097 pindex = p->pindex; 2098 if (vm_page_busy_try(p, TRUE)) { 2099 vm_page_sleep_busy(p, TRUE, "tpgmov"); 2100 info->error = -1; 2101 return -1; 2102 } 2103 if (p->object != info->object || p->pindex != pindex) { 2104 vm_page_wakeup(p); 2105 info->error = -1; 2106 return -1; 2107 } 2108 2109 /* 2110 * Make sure the page is not mapped. These flags might also still be 2111 * set heuristically even if we know the page is not mapped and must 2112 * be properly cleaned up. 2113 */ 2114 if (__predict_false((p->flags & (PG_MAPPED|PG_WRITEABLE)) != 0)) 2115 vm_page_protect(p, VM_PROT_NONE); 2116 2117 /* 2118 * Free or rename the page as appropriate 2119 */ 2120 if ((info->pagerflags & TMPFS_MOVF_FROMBACKING) && 2121 (p->flags & PG_SWAPPED) && 2122 (p->flags & PG_NEED_COMMIT) == 0 && 2123 p->dirty == 0) { 2124 /* 2125 * If the page in the backing aobj was paged out to swap 2126 * it will be clean and it is better to free it rather 2127 * than re-dirty it. We will assume that the page was 2128 * paged out to swap for a reason! 2129 * 2130 * This helps avoid unnecessary swap thrashing on the page. 2131 */ 2132 vm_page_free(p); 2133 } else if ((info->pagerflags & TMPFS_MOVF_FROMBACKING) == 0 && 2134 (p->flags & PG_NEED_COMMIT) == 0 && 2135 p->dirty == 0) { 2136 /* 2137 * If the page associated with the vnode was cleaned via 2138 * a tmpfs_strategy() call, it exists as a swap block in 2139 * aobj and it is again better to free it rather than 2140 * re-dirty it. We will assume that the page was 2141 * paged out to swap for a reason! 2142 * 2143 * This helps avoid unnecessary swap thrashing on the page. 2144 */ 2145 vm_page_free(p); 2146 } else { 2147 /* 2148 * Rename the page, which will also ensure that it is flagged 2149 * as dirty and check whether a swap block association exists 2150 * in the target object or not, setting appropriate flags if 2151 * it does. 2152 */ 2153 vm_page_rename(p, info->dest_object, pindex); 2154 vm_page_clear_commit(p); 2155 if (info->pagerflags & TMPFS_MOVF_DEACTIVATE) 2156 vm_page_deactivate(p); 2157 vm_page_wakeup(p); 2158 /* page automaticaly made dirty */ 2159 } 2160 2161 return 0; 2162 } 2163 2164 static 2165 void 2166 tmpfs_move_pages(vm_object_t src, vm_object_t dst, int movflags) 2167 { 2168 struct rb_vm_page_scan_info info; 2169 2170 vm_object_hold(src); 2171 vm_object_hold(dst); 2172 info.object = src; 2173 info.dest_object = dst; 2174 info.pagerflags = movflags; 2175 do { 2176 if (src->paging_in_progress) 2177 vm_object_pip_wait(src, "objtfs"); 2178 info.error = 1; 2179 vm_page_rb_tree_RB_SCAN(&src->rb_memq, NULL, 2180 tmpfs_move_pages_callback, &info); 2181 } while (info.error < 0 || !RB_EMPTY(&src->rb_memq) || 2182 src->paging_in_progress); 2183 vm_object_drop(dst); 2184 vm_object_drop(src); 2185 } 2186 2187 /* --------------------------------------------------------------------- */ 2188 2189 /* 2190 * vnode operations vector used for files stored in a tmpfs file system. 2191 */ 2192 struct vop_ops tmpfs_vnode_vops = { 2193 .vop_default = vop_defaultop, 2194 .vop_getpages = vop_stdgetpages, 2195 .vop_putpages = vop_stdputpages, 2196 .vop_ncreate = tmpfs_ncreate, 2197 .vop_nresolve = tmpfs_nresolve, 2198 .vop_nlookupdotdot = tmpfs_nlookupdotdot, 2199 .vop_nmknod = tmpfs_nmknod, 2200 .vop_open = tmpfs_open, 2201 .vop_close = tmpfs_close, 2202 .vop_access = tmpfs_access, 2203 .vop_getattr = tmpfs_getattr, 2204 .vop_getattr_quick = tmpfs_getattr_quick, 2205 .vop_setattr = tmpfs_setattr, 2206 .vop_read = tmpfs_read, 2207 .vop_write = tmpfs_write, 2208 .vop_fsync = tmpfs_fsync, 2209 .vop_mountctl = tmpfs_mountctl, 2210 .vop_nremove = tmpfs_nremove, 2211 .vop_nlink = tmpfs_nlink, 2212 .vop_nrename = tmpfs_nrename, 2213 .vop_nmkdir = tmpfs_nmkdir, 2214 .vop_nrmdir = tmpfs_nrmdir, 2215 .vop_nsymlink = tmpfs_nsymlink, 2216 .vop_readdir = tmpfs_readdir, 2217 .vop_readlink = tmpfs_readlink, 2218 .vop_inactive = tmpfs_inactive, 2219 .vop_reclaim = tmpfs_reclaim, 2220 .vop_print = tmpfs_print, 2221 .vop_pathconf = tmpfs_pathconf, 2222 .vop_bmap = tmpfs_bmap, 2223 .vop_strategy = tmpfs_strategy, 2224 .vop_advlock = tmpfs_advlock, 2225 .vop_kqfilter = tmpfs_kqfilter 2226 }; 2227