xref: /freebsd/sys/fs/tmpfs/tmpfs_vnops.c (revision fdafd315)
1 /*	$NetBSD: tmpfs_vnops.c,v 1.39 2007/07/23 15:41:01 jmmv Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-2-Clause
5  *
6  * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
11  * 2005 program.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * tmpfs vnode interface.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/dirent.h>
42 #include <sys/extattr.h>
43 #include <sys/fcntl.h>
44 #include <sys/file.h>
45 #include <sys/filio.h>
46 #include <sys/limits.h>
47 #include <sys/lockf.h>
48 #include <sys/lock.h>
49 #include <sys/mount.h>
50 #include <sys/namei.h>
51 #include <sys/priv.h>
52 #include <sys/proc.h>
53 #include <sys/rwlock.h>
54 #include <sys/sched.h>
55 #include <sys/smr.h>
56 #include <sys/stat.h>
57 #include <sys/sysctl.h>
58 #include <sys/unistd.h>
59 #include <sys/vnode.h>
60 #include <security/audit/audit.h>
61 #include <security/mac/mac_framework.h>
62 
63 #include <vm/vm.h>
64 #include <vm/vm_param.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_pager.h>
68 #include <vm/swap_pager.h>
69 
70 #include <fs/tmpfs/tmpfs_vnops.h>
71 #include <fs/tmpfs/tmpfs.h>
72 
73 SYSCTL_DECL(_vfs_tmpfs);
74 VFS_SMR_DECLARE;
75 
76 static volatile int tmpfs_rename_restarts;
77 SYSCTL_INT(_vfs_tmpfs, OID_AUTO, rename_restarts, CTLFLAG_RD,
78     __DEVOLATILE(int *, &tmpfs_rename_restarts), 0,
79     "Times rename had to restart due to lock contention");
80 
81 MALLOC_DEFINE(M_TMPFSEA, "tmpfs extattr", "tmpfs extattr structure");
82 
83 static int
tmpfs_vn_get_ino_alloc(struct mount * mp,void * arg,int lkflags,struct vnode ** rvp)84 tmpfs_vn_get_ino_alloc(struct mount *mp, void *arg, int lkflags,
85     struct vnode **rvp)
86 {
87 
88 	return (tmpfs_alloc_vp(mp, arg, lkflags, rvp));
89 }
90 
91 static int
tmpfs_lookup1(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp)92 tmpfs_lookup1(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
93 {
94 	struct tmpfs_dirent *de;
95 	struct tmpfs_node *dnode, *pnode;
96 	struct tmpfs_mount *tm;
97 	int error;
98 
99 	/* Caller assumes responsibility for ensuring access (VEXEC). */
100 	dnode = VP_TO_TMPFS_DIR(dvp);
101 	*vpp = NULLVP;
102 
103 	/* We cannot be requesting the parent directory of the root node. */
104 	MPASS(IMPLIES(dnode->tn_type == VDIR &&
105 	    dnode->tn_dir.tn_parent == dnode,
106 	    !(cnp->cn_flags & ISDOTDOT)));
107 
108 	TMPFS_ASSERT_LOCKED(dnode);
109 	if (dnode->tn_dir.tn_parent == NULL) {
110 		error = ENOENT;
111 		goto out;
112 	}
113 	if (cnp->cn_flags & ISDOTDOT) {
114 		tm = VFS_TO_TMPFS(dvp->v_mount);
115 		pnode = dnode->tn_dir.tn_parent;
116 		tmpfs_ref_node(pnode);
117 		error = vn_vget_ino_gen(dvp, tmpfs_vn_get_ino_alloc,
118 		    pnode, cnp->cn_lkflags, vpp);
119 		tmpfs_free_node(tm, pnode);
120 		if (error != 0)
121 			goto out;
122 	} else if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
123 		VREF(dvp);
124 		*vpp = dvp;
125 		error = 0;
126 	} else {
127 		de = tmpfs_dir_lookup(dnode, NULL, cnp);
128 		if (de != NULL && de->td_node == NULL)
129 			cnp->cn_flags |= ISWHITEOUT;
130 		if (de == NULL || de->td_node == NULL) {
131 			/*
132 			 * The entry was not found in the directory.
133 			 * This is OK if we are creating or renaming an
134 			 * entry and are working on the last component of
135 			 * the path name.
136 			 */
137 			if ((cnp->cn_flags & ISLASTCN) &&
138 			    (cnp->cn_nameiop == CREATE || \
139 			    cnp->cn_nameiop == RENAME ||
140 			    (cnp->cn_nameiop == DELETE &&
141 			    cnp->cn_flags & DOWHITEOUT &&
142 			    cnp->cn_flags & ISWHITEOUT))) {
143 				error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred,
144 				    curthread);
145 				if (error != 0)
146 					goto out;
147 
148 				error = EJUSTRETURN;
149 			} else
150 				error = ENOENT;
151 		} else {
152 			struct tmpfs_node *tnode;
153 
154 			/*
155 			 * The entry was found, so get its associated
156 			 * tmpfs_node.
157 			 */
158 			tnode = de->td_node;
159 
160 			/*
161 			 * If we are not at the last path component and
162 			 * found a non-directory or non-link entry (which
163 			 * may itself be pointing to a directory), raise
164 			 * an error.
165 			 */
166 			if ((tnode->tn_type != VDIR &&
167 			    tnode->tn_type != VLNK) &&
168 			    !(cnp->cn_flags & ISLASTCN)) {
169 				error = ENOTDIR;
170 				goto out;
171 			}
172 
173 			/*
174 			 * If we are deleting or renaming the entry, keep
175 			 * track of its tmpfs_dirent so that it can be
176 			 * easily deleted later.
177 			 */
178 			if ((cnp->cn_flags & ISLASTCN) &&
179 			    (cnp->cn_nameiop == DELETE ||
180 			    cnp->cn_nameiop == RENAME)) {
181 				error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred,
182 				    curthread);
183 				if (error != 0)
184 					goto out;
185 
186 				/* Allocate a new vnode on the matching entry. */
187 				error = tmpfs_alloc_vp(dvp->v_mount, tnode,
188 				    cnp->cn_lkflags, vpp);
189 				if (error != 0)
190 					goto out;
191 
192 				if ((dnode->tn_mode & S_ISTXT) &&
193 				  VOP_ACCESS(dvp, VADMIN, cnp->cn_cred,
194 				  curthread) && VOP_ACCESS(*vpp, VADMIN,
195 				  cnp->cn_cred, curthread)) {
196 					error = EPERM;
197 					vput(*vpp);
198 					*vpp = NULL;
199 					goto out;
200 				}
201 			} else {
202 				error = tmpfs_alloc_vp(dvp->v_mount, tnode,
203 				    cnp->cn_lkflags, vpp);
204 				if (error != 0)
205 					goto out;
206 			}
207 		}
208 	}
209 
210 	/*
211 	 * Store the result of this lookup in the cache.  Avoid this if the
212 	 * request was for creation, as it does not improve timings on
213 	 * emprical tests.
214 	 */
215 	if ((cnp->cn_flags & MAKEENTRY) != 0 && tmpfs_use_nc(dvp))
216 		cache_enter(dvp, *vpp, cnp);
217 
218 out:
219 #ifdef INVARIANTS
220 	/*
221 	 * If there were no errors, *vpp cannot be null and it must be
222 	 * locked.
223 	 */
224 	if (error == 0) {
225 		MPASS(*vpp != NULLVP);
226 		ASSERT_VOP_LOCKED(*vpp, __func__);
227 	} else {
228 		MPASS(*vpp == NULL);
229 	}
230 #endif
231 
232 	return (error);
233 }
234 
235 static int
tmpfs_cached_lookup(struct vop_cachedlookup_args * v)236 tmpfs_cached_lookup(struct vop_cachedlookup_args *v)
237 {
238 
239 	return (tmpfs_lookup1(v->a_dvp, v->a_vpp, v->a_cnp));
240 }
241 
242 static int
tmpfs_lookup(struct vop_lookup_args * v)243 tmpfs_lookup(struct vop_lookup_args *v)
244 {
245 	struct vnode *dvp = v->a_dvp;
246 	struct vnode **vpp = v->a_vpp;
247 	struct componentname *cnp = v->a_cnp;
248 	int error;
249 
250 	/* Check accessibility of requested node as a first step. */
251 	error = vn_dir_check_exec(dvp, cnp);
252 	if (error != 0)
253 		return (error);
254 
255 	return (tmpfs_lookup1(dvp, vpp, cnp));
256 }
257 
258 static int
tmpfs_create(struct vop_create_args * v)259 tmpfs_create(struct vop_create_args *v)
260 {
261 	struct vnode *dvp = v->a_dvp;
262 	struct vnode **vpp = v->a_vpp;
263 	struct componentname *cnp = v->a_cnp;
264 	struct vattr *vap = v->a_vap;
265 	int error;
266 
267 	MPASS(vap->va_type == VREG || vap->va_type == VSOCK);
268 
269 	error = tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL);
270 	if (error == 0 && (cnp->cn_flags & MAKEENTRY) != 0 && tmpfs_use_nc(dvp))
271 		cache_enter(dvp, *vpp, cnp);
272 	return (error);
273 }
274 
275 static int
tmpfs_mknod(struct vop_mknod_args * v)276 tmpfs_mknod(struct vop_mknod_args *v)
277 {
278 	struct vnode *dvp = v->a_dvp;
279 	struct vnode **vpp = v->a_vpp;
280 	struct componentname *cnp = v->a_cnp;
281 	struct vattr *vap = v->a_vap;
282 
283 	if (vap->va_type != VBLK && vap->va_type != VCHR &&
284 	    vap->va_type != VFIFO)
285 		return (EINVAL);
286 
287 	return (tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL));
288 }
289 
290 struct fileops tmpfs_fnops;
291 
292 static int
tmpfs_open(struct vop_open_args * v)293 tmpfs_open(struct vop_open_args *v)
294 {
295 	struct vnode *vp;
296 	struct tmpfs_node *node;
297 	struct file *fp;
298 	int error, mode;
299 
300 	vp = v->a_vp;
301 	mode = v->a_mode;
302 	node = VP_TO_TMPFS_NODE(vp);
303 
304 	/*
305 	 * The file is still active but all its names have been removed
306 	 * (e.g. by a "rmdir $(pwd)").  It cannot be opened any more as
307 	 * it is about to die.
308 	 */
309 	if (node->tn_links < 1)
310 		return (ENOENT);
311 
312 	/* If the file is marked append-only, deny write requests. */
313 	if (node->tn_flags & APPEND && (mode & (FWRITE | O_APPEND)) == FWRITE)
314 		error = EPERM;
315 	else {
316 		error = 0;
317 		/* For regular files, the call below is nop. */
318 		KASSERT(vp->v_type != VREG || (node->tn_reg.tn_aobj->flags &
319 		    OBJ_DEAD) == 0, ("dead object"));
320 		vnode_create_vobject(vp, node->tn_size, v->a_td);
321 	}
322 
323 	fp = v->a_fp;
324 	MPASS(fp == NULL || fp->f_data == NULL);
325 	if (error == 0 && fp != NULL && vp->v_type == VREG) {
326 		tmpfs_ref_node(node);
327 		finit_vnode(fp, mode, node, &tmpfs_fnops);
328 	}
329 
330 	return (error);
331 }
332 
333 static int
tmpfs_close(struct vop_close_args * v)334 tmpfs_close(struct vop_close_args *v)
335 {
336 	struct vnode *vp = v->a_vp;
337 
338 	/* Update node times. */
339 	tmpfs_update(vp);
340 
341 	return (0);
342 }
343 
344 int
tmpfs_fo_close(struct file * fp,struct thread * td)345 tmpfs_fo_close(struct file *fp, struct thread *td)
346 {
347 	struct tmpfs_node *node;
348 
349 	node = fp->f_data;
350 	if (node != NULL) {
351 		MPASS(node->tn_type == VREG);
352 		tmpfs_free_node(node->tn_reg.tn_tmp, node);
353 	}
354 	return (vnops.fo_close(fp, td));
355 }
356 
357 /*
358  * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see
359  * the comment above cache_fplookup for details.
360  */
361 int
tmpfs_fplookup_vexec(struct vop_fplookup_vexec_args * v)362 tmpfs_fplookup_vexec(struct vop_fplookup_vexec_args *v)
363 {
364 	struct vnode *vp;
365 	struct tmpfs_node *node;
366 	struct ucred *cred;
367 	mode_t all_x, mode;
368 
369 	vp = v->a_vp;
370 	node = VP_TO_TMPFS_NODE_SMR(vp);
371 	if (__predict_false(node == NULL))
372 		return (EAGAIN);
373 
374 	all_x = S_IXUSR | S_IXGRP | S_IXOTH;
375 	mode = atomic_load_short(&node->tn_mode);
376 	if (__predict_true((mode & all_x) == all_x))
377 		return (0);
378 
379 	cred = v->a_cred;
380 	return (vaccess_vexec_smr(mode, node->tn_uid, node->tn_gid, cred));
381 }
382 
383 static int
tmpfs_access_locked(struct vnode * vp,struct tmpfs_node * node,accmode_t accmode,struct ucred * cred)384 tmpfs_access_locked(struct vnode *vp, struct tmpfs_node *node,
385     accmode_t accmode, struct ucred *cred)
386 {
387 #ifdef DEBUG_VFS_LOCKS
388 	if (!mtx_owned(TMPFS_NODE_MTX(node))) {
389 		ASSERT_VOP_LOCKED(vp,
390 		    "tmpfs_access_locked needs locked vnode or node");
391 	}
392 #endif
393 
394 	if ((accmode & VWRITE) != 0 && (node->tn_flags & IMMUTABLE) != 0)
395 		return (EPERM);
396 	return (vaccess(vp->v_type, node->tn_mode, node->tn_uid, node->tn_gid,
397 	    accmode, cred));
398 }
399 
400 int
tmpfs_access(struct vop_access_args * v)401 tmpfs_access(struct vop_access_args *v)
402 {
403 	struct vnode *vp = v->a_vp;
404 	struct ucred *cred = v->a_cred;
405 	struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp);
406 	mode_t all_x = S_IXUSR | S_IXGRP | S_IXOTH;
407 	accmode_t accmode = v->a_accmode;
408 
409 	/*
410 	 * Common case path lookup.
411 	 */
412 	if (__predict_true(accmode == VEXEC &&
413 	    (node->tn_mode & all_x) == all_x))
414 		return (0);
415 
416 	switch (vp->v_type) {
417 	case VDIR:
418 		/* FALLTHROUGH */
419 	case VLNK:
420 		/* FALLTHROUGH */
421 	case VREG:
422 		if ((accmode & VWRITE) != 0 &&
423 		    (vp->v_mount->mnt_flag & MNT_RDONLY) != 0)
424 			return (EROFS);
425 		break;
426 
427 	case VBLK:
428 		/* FALLTHROUGH */
429 	case VCHR:
430 		/* FALLTHROUGH */
431 	case VSOCK:
432 		/* FALLTHROUGH */
433 	case VFIFO:
434 		break;
435 
436 	default:
437 		return (EINVAL);
438 	}
439 
440 	return (tmpfs_access_locked(vp, node, accmode, cred));
441 }
442 
443 int
tmpfs_stat(struct vop_stat_args * v)444 tmpfs_stat(struct vop_stat_args *v)
445 {
446 	struct vnode *vp = v->a_vp;
447 	struct stat *sb = v->a_sb;
448 	struct tmpfs_node *node;
449 	int error;
450 
451 	node = VP_TO_TMPFS_NODE(vp);
452 
453 	tmpfs_update_getattr(vp);
454 
455 	error = vop_stat_helper_pre(v);
456 	if (__predict_false(error))
457 		return (error);
458 
459 	sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
460 	sb->st_ino = node->tn_id;
461 	sb->st_mode = node->tn_mode | VTTOIF(vp->v_type);
462 	sb->st_nlink = node->tn_links;
463 	sb->st_uid = node->tn_uid;
464 	sb->st_gid = node->tn_gid;
465 	sb->st_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
466 		node->tn_rdev : NODEV;
467 	sb->st_size = node->tn_size;
468 	sb->st_atim.tv_sec = node->tn_atime.tv_sec;
469 	sb->st_atim.tv_nsec = node->tn_atime.tv_nsec;
470 	sb->st_mtim.tv_sec = node->tn_mtime.tv_sec;
471 	sb->st_mtim.tv_nsec = node->tn_mtime.tv_nsec;
472 	sb->st_ctim.tv_sec = node->tn_ctime.tv_sec;
473 	sb->st_ctim.tv_nsec = node->tn_ctime.tv_nsec;
474 	sb->st_birthtim.tv_sec = node->tn_birthtime.tv_sec;
475 	sb->st_birthtim.tv_nsec = node->tn_birthtime.tv_nsec;
476 	sb->st_blksize = PAGE_SIZE;
477 	sb->st_flags = node->tn_flags;
478 	sb->st_gen = node->tn_gen;
479 	if (vp->v_type == VREG) {
480 #ifdef __ILP32__
481 		vm_object_t obj = node->tn_reg.tn_aobj;
482 
483 		/* Handle torn read */
484 		VM_OBJECT_RLOCK(obj);
485 #endif
486 		sb->st_blocks = ptoa(node->tn_reg.tn_pages);
487 #ifdef __ILP32__
488 		VM_OBJECT_RUNLOCK(obj);
489 #endif
490 	} else {
491 		sb->st_blocks = node->tn_size;
492 	}
493 	sb->st_blocks /= S_BLKSIZE;
494 	return (vop_stat_helper_post(v, error));
495 }
496 
497 int
tmpfs_getattr(struct vop_getattr_args * v)498 tmpfs_getattr(struct vop_getattr_args *v)
499 {
500 	struct vnode *vp = v->a_vp;
501 	struct vattr *vap = v->a_vap;
502 	struct tmpfs_node *node;
503 
504 	node = VP_TO_TMPFS_NODE(vp);
505 
506 	tmpfs_update_getattr(vp);
507 
508 	vap->va_type = vp->v_type;
509 	vap->va_mode = node->tn_mode;
510 	vap->va_nlink = node->tn_links;
511 	vap->va_uid = node->tn_uid;
512 	vap->va_gid = node->tn_gid;
513 	vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
514 	vap->va_fileid = node->tn_id;
515 	vap->va_size = node->tn_size;
516 	vap->va_blocksize = PAGE_SIZE;
517 	vap->va_atime = node->tn_atime;
518 	vap->va_mtime = node->tn_mtime;
519 	vap->va_ctime = node->tn_ctime;
520 	vap->va_birthtime = node->tn_birthtime;
521 	vap->va_gen = node->tn_gen;
522 	vap->va_flags = node->tn_flags;
523 	vap->va_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
524 	    node->tn_rdev : NODEV;
525 	if (vp->v_type == VREG) {
526 #ifdef __ILP32__
527 		vm_object_t obj = node->tn_reg.tn_aobj;
528 
529 		VM_OBJECT_RLOCK(obj);
530 #endif
531 		vap->va_bytes = ptoa(node->tn_reg.tn_pages);
532 #ifdef __ILP32__
533 		VM_OBJECT_RUNLOCK(obj);
534 #endif
535 	} else {
536 		vap->va_bytes = node->tn_size;
537 	}
538 	vap->va_filerev = 0;
539 
540 	return (0);
541 }
542 
543 int
tmpfs_setattr(struct vop_setattr_args * v)544 tmpfs_setattr(struct vop_setattr_args *v)
545 {
546 	struct vnode *vp = v->a_vp;
547 	struct vattr *vap = v->a_vap;
548 	struct ucred *cred = v->a_cred;
549 	struct thread *td = curthread;
550 
551 	int error;
552 
553 	ASSERT_VOP_IN_SEQC(vp);
554 
555 	error = 0;
556 
557 	/* Abort if any unsettable attribute is given. */
558 	if (vap->va_type != VNON ||
559 	    vap->va_nlink != VNOVAL ||
560 	    vap->va_fsid != VNOVAL ||
561 	    vap->va_fileid != VNOVAL ||
562 	    vap->va_blocksize != VNOVAL ||
563 	    vap->va_gen != VNOVAL ||
564 	    vap->va_rdev != VNOVAL ||
565 	    vap->va_bytes != VNOVAL)
566 		error = EINVAL;
567 
568 	if (error == 0 && (vap->va_flags != VNOVAL))
569 		error = tmpfs_chflags(vp, vap->va_flags, cred, td);
570 
571 	if (error == 0 && (vap->va_size != VNOVAL))
572 		error = tmpfs_chsize(vp, vap->va_size, cred, td);
573 
574 	if (error == 0 && (vap->va_uid != VNOVAL || vap->va_gid != VNOVAL))
575 		error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred, td);
576 
577 	if (error == 0 && (vap->va_mode != (mode_t)VNOVAL))
578 		error = tmpfs_chmod(vp, vap->va_mode, cred, td);
579 
580 	if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL &&
581 	    vap->va_atime.tv_nsec != VNOVAL) ||
582 	    (vap->va_mtime.tv_sec != VNOVAL &&
583 	    vap->va_mtime.tv_nsec != VNOVAL) ||
584 	    (vap->va_birthtime.tv_sec != VNOVAL &&
585 	    vap->va_birthtime.tv_nsec != VNOVAL)))
586 		error = tmpfs_chtimes(vp, vap, cred, td);
587 
588 	/*
589 	 * Update the node times.  We give preference to the error codes
590 	 * generated by this function rather than the ones that may arise
591 	 * from tmpfs_update.
592 	 */
593 	tmpfs_update(vp);
594 
595 	return (error);
596 }
597 
598 static int
tmpfs_read(struct vop_read_args * v)599 tmpfs_read(struct vop_read_args *v)
600 {
601 	struct vnode *vp;
602 	struct uio *uio;
603 	struct tmpfs_node *node;
604 
605 	vp = v->a_vp;
606 	if (vp->v_type != VREG)
607 		return (EISDIR);
608 	uio = v->a_uio;
609 	if (uio->uio_offset < 0)
610 		return (EINVAL);
611 	node = VP_TO_TMPFS_NODE(vp);
612 	tmpfs_set_accessed(VFS_TO_TMPFS(vp->v_mount), node);
613 	return (uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio));
614 }
615 
616 static int
tmpfs_read_pgcache(struct vop_read_pgcache_args * v)617 tmpfs_read_pgcache(struct vop_read_pgcache_args *v)
618 {
619 	struct vnode *vp;
620 	struct tmpfs_node *node;
621 	vm_object_t object;
622 	off_t size;
623 	int error;
624 
625 	vp = v->a_vp;
626 	VNPASS((vn_irflag_read(vp) & VIRF_PGREAD) != 0, vp);
627 
628 	if (v->a_uio->uio_offset < 0)
629 		return (EINVAL);
630 
631 	error = EJUSTRETURN;
632 	vfs_smr_enter();
633 
634 	node = VP_TO_TMPFS_NODE_SMR(vp);
635 	if (node == NULL)
636 		goto out_smr;
637 	MPASS(node->tn_type == VREG);
638 	MPASS(node->tn_refcount >= 1);
639 	object = node->tn_reg.tn_aobj;
640 	if (object == NULL)
641 		goto out_smr;
642 
643 	MPASS(object->type == tmpfs_pager_type);
644 	MPASS((object->flags & (OBJ_ANON | OBJ_DEAD | OBJ_SWAP)) ==
645 	    OBJ_SWAP);
646 	if (!VN_IS_DOOMED(vp)) {
647 		/* size cannot become shorter due to rangelock. */
648 		size = node->tn_size;
649 		tmpfs_set_accessed(node->tn_reg.tn_tmp, node);
650 		vfs_smr_exit();
651 		error = uiomove_object(object, size, v->a_uio);
652 		return (error);
653 	}
654 out_smr:
655 	vfs_smr_exit();
656 	return (error);
657 }
658 
659 static int
tmpfs_write(struct vop_write_args * v)660 tmpfs_write(struct vop_write_args *v)
661 {
662 	struct vnode *vp;
663 	struct uio *uio;
664 	struct tmpfs_node *node;
665 	off_t oldsize;
666 	ssize_t r;
667 	int error, ioflag;
668 	mode_t newmode;
669 
670 	vp = v->a_vp;
671 	uio = v->a_uio;
672 	ioflag = v->a_ioflag;
673 	error = 0;
674 	node = VP_TO_TMPFS_NODE(vp);
675 	oldsize = node->tn_size;
676 
677 	if (uio->uio_offset < 0 || vp->v_type != VREG)
678 		return (EINVAL);
679 	if (uio->uio_resid == 0)
680 		return (0);
681 	if (ioflag & IO_APPEND)
682 		uio->uio_offset = node->tn_size;
683 	error = vn_rlimit_fsizex(vp, uio, VFS_TO_TMPFS(vp->v_mount)->
684 	    tm_maxfilesize, &r, uio->uio_td);
685 	if (error != 0) {
686 		vn_rlimit_fsizex_res(uio, r);
687 		return (error);
688 	}
689 
690 	if (uio->uio_offset + uio->uio_resid > node->tn_size) {
691 		error = tmpfs_reg_resize(vp, uio->uio_offset + uio->uio_resid,
692 		    FALSE);
693 		if (error != 0)
694 			goto out;
695 	}
696 
697 	error = uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio);
698 	node->tn_status |= TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED;
699 	node->tn_accessed = true;
700 	if (node->tn_mode & (S_ISUID | S_ISGID)) {
701 		if (priv_check_cred(v->a_cred, PRIV_VFS_RETAINSUGID)) {
702 			newmode = node->tn_mode & ~(S_ISUID | S_ISGID);
703 			vn_seqc_write_begin(vp);
704 			atomic_store_short(&node->tn_mode, newmode);
705 			vn_seqc_write_end(vp);
706 		}
707 	}
708 	if (error != 0)
709 		(void)tmpfs_reg_resize(vp, oldsize, TRUE);
710 
711 out:
712 	MPASS(IMPLIES(error == 0, uio->uio_resid == 0));
713 	MPASS(IMPLIES(error != 0, oldsize == node->tn_size));
714 
715 	vn_rlimit_fsizex_res(uio, r);
716 	return (error);
717 }
718 
719 static int
tmpfs_deallocate(struct vop_deallocate_args * v)720 tmpfs_deallocate(struct vop_deallocate_args *v)
721 {
722 	return (tmpfs_reg_punch_hole(v->a_vp, v->a_offset, v->a_len));
723 }
724 
725 static int
tmpfs_fsync(struct vop_fsync_args * v)726 tmpfs_fsync(struct vop_fsync_args *v)
727 {
728 	struct vnode *vp = v->a_vp;
729 
730 	tmpfs_check_mtime(vp);
731 	tmpfs_update(vp);
732 
733 	return (0);
734 }
735 
736 static int
tmpfs_remove(struct vop_remove_args * v)737 tmpfs_remove(struct vop_remove_args *v)
738 {
739 	struct vnode *dvp = v->a_dvp;
740 	struct vnode *vp = v->a_vp;
741 
742 	int error;
743 	struct tmpfs_dirent *de;
744 	struct tmpfs_mount *tmp;
745 	struct tmpfs_node *dnode;
746 	struct tmpfs_node *node;
747 
748 	if (vp->v_type == VDIR) {
749 		error = EISDIR;
750 		goto out;
751 	}
752 
753 	dnode = VP_TO_TMPFS_DIR(dvp);
754 	node = VP_TO_TMPFS_NODE(vp);
755 	tmp = VFS_TO_TMPFS(vp->v_mount);
756 	de = tmpfs_dir_lookup(dnode, node, v->a_cnp);
757 	MPASS(de != NULL);
758 
759 	/* Files marked as immutable or append-only cannot be deleted. */
760 	if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) ||
761 	    (dnode->tn_flags & APPEND)) {
762 		error = EPERM;
763 		goto out;
764 	}
765 
766 	/* Remove the entry from the directory; as it is a file, we do not
767 	 * have to change the number of hard links of the directory. */
768 	tmpfs_dir_detach(dvp, de);
769 	if (v->a_cnp->cn_flags & DOWHITEOUT)
770 		tmpfs_dir_whiteout_add(dvp, v->a_cnp);
771 
772 	/* Free the directory entry we just deleted.  Note that the node
773 	 * referred by it will not be removed until the vnode is really
774 	 * reclaimed. */
775 	tmpfs_free_dirent(tmp, de);
776 
777 	node->tn_status |= TMPFS_NODE_CHANGED;
778 	node->tn_accessed = true;
779 	error = 0;
780 
781 out:
782 	return (error);
783 }
784 
785 static int
tmpfs_link(struct vop_link_args * v)786 tmpfs_link(struct vop_link_args *v)
787 {
788 	struct vnode *dvp = v->a_tdvp;
789 	struct vnode *vp = v->a_vp;
790 	struct componentname *cnp = v->a_cnp;
791 
792 	int error;
793 	struct tmpfs_dirent *de;
794 	struct tmpfs_node *node;
795 
796 	MPASS(dvp != vp); /* XXX When can this be false? */
797 	node = VP_TO_TMPFS_NODE(vp);
798 
799 	/* Ensure that we do not overflow the maximum number of links imposed
800 	 * by the system. */
801 	MPASS(node->tn_links <= TMPFS_LINK_MAX);
802 	if (node->tn_links == TMPFS_LINK_MAX) {
803 		error = EMLINK;
804 		goto out;
805 	}
806 
807 	/* We cannot create links of files marked immutable or append-only. */
808 	if (node->tn_flags & (IMMUTABLE | APPEND)) {
809 		error = EPERM;
810 		goto out;
811 	}
812 
813 	/* Allocate a new directory entry to represent the node. */
814 	error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node,
815 	    cnp->cn_nameptr, cnp->cn_namelen, &de);
816 	if (error != 0)
817 		goto out;
818 
819 	/* Insert the new directory entry into the appropriate directory. */
820 	if (cnp->cn_flags & ISWHITEOUT)
821 		tmpfs_dir_whiteout_remove(dvp, cnp);
822 	tmpfs_dir_attach(dvp, de);
823 
824 	/* vp link count has changed, so update node times. */
825 	node->tn_status |= TMPFS_NODE_CHANGED;
826 	tmpfs_update(vp);
827 
828 	error = 0;
829 
830 out:
831 	return (error);
832 }
833 
834 /*
835  * We acquire all but fdvp locks using non-blocking acquisitions.  If we
836  * fail to acquire any lock in the path we will drop all held locks,
837  * acquire the new lock in a blocking fashion, and then release it and
838  * restart the rename.  This acquire/release step ensures that we do not
839  * spin on a lock waiting for release.  On error release all vnode locks
840  * and decrement references the way tmpfs_rename() would do.
841  */
842 static int
tmpfs_rename_relock(struct vnode * fdvp,struct vnode ** fvpp,struct vnode * tdvp,struct vnode ** tvpp,struct componentname * fcnp,struct componentname * tcnp)843 tmpfs_rename_relock(struct vnode *fdvp, struct vnode **fvpp,
844     struct vnode *tdvp, struct vnode **tvpp,
845     struct componentname *fcnp, struct componentname *tcnp)
846 {
847 	struct vnode *nvp;
848 	struct mount *mp;
849 	struct tmpfs_dirent *de;
850 	int error, restarts = 0;
851 
852 	VOP_UNLOCK(tdvp);
853 	if (*tvpp != NULL && *tvpp != tdvp)
854 		VOP_UNLOCK(*tvpp);
855 	mp = fdvp->v_mount;
856 
857 relock:
858 	restarts += 1;
859 	error = vn_lock(fdvp, LK_EXCLUSIVE);
860 	if (error)
861 		goto releout;
862 	if (vn_lock(tdvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
863 		VOP_UNLOCK(fdvp);
864 		error = vn_lock(tdvp, LK_EXCLUSIVE);
865 		if (error)
866 			goto releout;
867 		VOP_UNLOCK(tdvp);
868 		goto relock;
869 	}
870 	/*
871 	 * Re-resolve fvp to be certain it still exists and fetch the
872 	 * correct vnode.
873 	 */
874 	de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(fdvp), NULL, fcnp);
875 	if (de == NULL) {
876 		VOP_UNLOCK(fdvp);
877 		VOP_UNLOCK(tdvp);
878 		if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
879 		    (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.'))
880 			error = EINVAL;
881 		else
882 			error = ENOENT;
883 		goto releout;
884 	}
885 	error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE | LK_NOWAIT, &nvp);
886 	if (error != 0) {
887 		VOP_UNLOCK(fdvp);
888 		VOP_UNLOCK(tdvp);
889 		if (error != EBUSY)
890 			goto releout;
891 		error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE, &nvp);
892 		if (error != 0)
893 			goto releout;
894 		VOP_UNLOCK(nvp);
895 		/*
896 		 * Concurrent rename race.
897 		 */
898 		if (nvp == tdvp) {
899 			vrele(nvp);
900 			error = EINVAL;
901 			goto releout;
902 		}
903 		vrele(*fvpp);
904 		*fvpp = nvp;
905 		goto relock;
906 	}
907 	vrele(*fvpp);
908 	*fvpp = nvp;
909 	VOP_UNLOCK(*fvpp);
910 	/*
911 	 * Re-resolve tvp and acquire the vnode lock if present.
912 	 */
913 	de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(tdvp), NULL, tcnp);
914 	/*
915 	 * If tvp disappeared we just carry on.
916 	 */
917 	if (de == NULL && *tvpp != NULL) {
918 		vrele(*tvpp);
919 		*tvpp = NULL;
920 	}
921 	/*
922 	 * Get the tvp ino if the lookup succeeded.  We may have to restart
923 	 * if the non-blocking acquire fails.
924 	 */
925 	if (de != NULL) {
926 		nvp = NULL;
927 		error = tmpfs_alloc_vp(mp, de->td_node,
928 		    LK_EXCLUSIVE | LK_NOWAIT, &nvp);
929 		if (*tvpp != NULL)
930 			vrele(*tvpp);
931 		*tvpp = nvp;
932 		if (error != 0) {
933 			VOP_UNLOCK(fdvp);
934 			VOP_UNLOCK(tdvp);
935 			if (error != EBUSY)
936 				goto releout;
937 			error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE,
938 			    &nvp);
939 			if (error != 0)
940 				goto releout;
941 			VOP_UNLOCK(nvp);
942 			/*
943 			 * fdvp contains fvp, thus tvp (=fdvp) is not empty.
944 			 */
945 			if (nvp == fdvp) {
946 				error = ENOTEMPTY;
947 				goto releout;
948 			}
949 			goto relock;
950 		}
951 	}
952 	tmpfs_rename_restarts += restarts;
953 
954 	return (0);
955 
956 releout:
957 	vrele(fdvp);
958 	vrele(*fvpp);
959 	vrele(tdvp);
960 	if (*tvpp != NULL)
961 		vrele(*tvpp);
962 	tmpfs_rename_restarts += restarts;
963 
964 	return (error);
965 }
966 
967 static int
tmpfs_rename(struct vop_rename_args * v)968 tmpfs_rename(struct vop_rename_args *v)
969 {
970 	struct vnode *fdvp = v->a_fdvp;
971 	struct vnode *fvp = v->a_fvp;
972 	struct componentname *fcnp = v->a_fcnp;
973 	struct vnode *tdvp = v->a_tdvp;
974 	struct vnode *tvp = v->a_tvp;
975 	struct componentname *tcnp = v->a_tcnp;
976 	char *newname;
977 	struct tmpfs_dirent *de;
978 	struct tmpfs_mount *tmp;
979 	struct tmpfs_node *fdnode;
980 	struct tmpfs_node *fnode;
981 	struct tmpfs_node *tnode;
982 	struct tmpfs_node *tdnode;
983 	int error;
984 	bool want_seqc_end;
985 
986 	want_seqc_end = false;
987 
988 	/*
989 	 * Disallow cross-device renames.
990 	 * XXX Why isn't this done by the caller?
991 	 */
992 	if (fvp->v_mount != tdvp->v_mount ||
993 	    (tvp != NULL && fvp->v_mount != tvp->v_mount)) {
994 		error = EXDEV;
995 		goto out;
996 	}
997 
998 	/* If source and target are the same file, there is nothing to do. */
999 	if (fvp == tvp) {
1000 		error = 0;
1001 		goto out;
1002 	}
1003 
1004 	/*
1005 	 * If we need to move the directory between entries, lock the
1006 	 * source so that we can safely operate on it.
1007 	 */
1008 	if (fdvp != tdvp && fdvp != tvp) {
1009 		if (vn_lock(fdvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
1010 			error = tmpfs_rename_relock(fdvp, &fvp, tdvp, &tvp,
1011 			    fcnp, tcnp);
1012 			if (error != 0)
1013 				return (error);
1014 			ASSERT_VOP_ELOCKED(fdvp,
1015 			    "tmpfs_rename: fdvp not locked");
1016 			ASSERT_VOP_ELOCKED(tdvp,
1017 			    "tmpfs_rename: tdvp not locked");
1018 			if (tvp != NULL)
1019 				ASSERT_VOP_ELOCKED(tvp,
1020 				    "tmpfs_rename: tvp not locked");
1021 			if (fvp == tvp) {
1022 				error = 0;
1023 				goto out_locked;
1024 			}
1025 		}
1026 	}
1027 
1028 	/*
1029 	 * Avoid manipulating '.' and '..' entries.
1030 	 */
1031 	if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
1032 	    (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.')) {
1033 		error = EINVAL;
1034 		goto out_locked;
1035 	}
1036 
1037 	if (tvp != NULL)
1038 		vn_seqc_write_begin(tvp);
1039 	vn_seqc_write_begin(tdvp);
1040 	vn_seqc_write_begin(fvp);
1041 	vn_seqc_write_begin(fdvp);
1042 	want_seqc_end = true;
1043 
1044 	tmp = VFS_TO_TMPFS(tdvp->v_mount);
1045 	tdnode = VP_TO_TMPFS_DIR(tdvp);
1046 	tnode = (tvp == NULL) ? NULL : VP_TO_TMPFS_NODE(tvp);
1047 	fdnode = VP_TO_TMPFS_DIR(fdvp);
1048 	fnode = VP_TO_TMPFS_NODE(fvp);
1049 	de = tmpfs_dir_lookup(fdnode, fnode, fcnp);
1050 
1051 	/*
1052 	 * Entry can disappear before we lock fdvp.
1053 	 */
1054 	if (de == NULL) {
1055 		if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
1056 		    (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.'))
1057 			error = EINVAL;
1058 		else
1059 			error = ENOENT;
1060 		goto out_locked;
1061 	}
1062 	MPASS(de->td_node == fnode);
1063 
1064 	/*
1065 	 * If re-naming a directory to another preexisting directory
1066 	 * ensure that the target directory is empty so that its
1067 	 * removal causes no side effects.
1068 	 * Kern_rename guarantees the destination to be a directory
1069 	 * if the source is one.
1070 	 */
1071 	if (tvp != NULL) {
1072 		MPASS(tnode != NULL);
1073 
1074 		if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
1075 		    (tdnode->tn_flags & (APPEND | IMMUTABLE))) {
1076 			error = EPERM;
1077 			goto out_locked;
1078 		}
1079 
1080 		if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) {
1081 			if (tnode->tn_size > 0) {
1082 				error = ENOTEMPTY;
1083 				goto out_locked;
1084 			}
1085 		} else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) {
1086 			error = ENOTDIR;
1087 			goto out_locked;
1088 		} else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) {
1089 			error = EISDIR;
1090 			goto out_locked;
1091 		} else {
1092 			MPASS(fnode->tn_type != VDIR &&
1093 				tnode->tn_type != VDIR);
1094 		}
1095 	}
1096 
1097 	if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))
1098 	    || (fdnode->tn_flags & (APPEND | IMMUTABLE))) {
1099 		error = EPERM;
1100 		goto out_locked;
1101 	}
1102 
1103 	/*
1104 	 * Ensure that we have enough memory to hold the new name, if it
1105 	 * has to be changed.
1106 	 */
1107 	if (fcnp->cn_namelen != tcnp->cn_namelen ||
1108 	    bcmp(fcnp->cn_nameptr, tcnp->cn_nameptr, fcnp->cn_namelen) != 0) {
1109 		newname = malloc(tcnp->cn_namelen, M_TMPFSNAME, M_WAITOK);
1110 	} else
1111 		newname = NULL;
1112 
1113 	/*
1114 	 * If the node is being moved to another directory, we have to do
1115 	 * the move.
1116 	 */
1117 	if (fdnode != tdnode) {
1118 		/*
1119 		 * In case we are moving a directory, we have to adjust its
1120 		 * parent to point to the new parent.
1121 		 */
1122 		if (de->td_node->tn_type == VDIR) {
1123 			struct tmpfs_node *n;
1124 
1125 			TMPFS_NODE_LOCK(fnode);
1126 			error = tmpfs_access_locked(fvp, fnode, VWRITE,
1127 			    tcnp->cn_cred);
1128 			TMPFS_NODE_UNLOCK(fnode);
1129 			if (error) {
1130 				if (newname != NULL)
1131 					free(newname, M_TMPFSNAME);
1132 				goto out_locked;
1133 			}
1134 
1135 			/*
1136 			 * Ensure the target directory is not a child of the
1137 			 * directory being moved.  Otherwise, we'd end up
1138 			 * with stale nodes.
1139 			 */
1140 			n = tdnode;
1141 			/*
1142 			 * TMPFS_LOCK guaranties that no nodes are freed while
1143 			 * traversing the list. Nodes can only be marked as
1144 			 * removed: tn_parent == NULL.
1145 			 */
1146 			TMPFS_LOCK(tmp);
1147 			TMPFS_NODE_LOCK(n);
1148 			while (n != n->tn_dir.tn_parent) {
1149 				struct tmpfs_node *parent;
1150 
1151 				if (n == fnode) {
1152 					TMPFS_NODE_UNLOCK(n);
1153 					TMPFS_UNLOCK(tmp);
1154 					error = EINVAL;
1155 					if (newname != NULL)
1156 						free(newname, M_TMPFSNAME);
1157 					goto out_locked;
1158 				}
1159 				parent = n->tn_dir.tn_parent;
1160 				TMPFS_NODE_UNLOCK(n);
1161 				if (parent == NULL) {
1162 					n = NULL;
1163 					break;
1164 				}
1165 				TMPFS_NODE_LOCK(parent);
1166 				if (parent->tn_dir.tn_parent == NULL) {
1167 					TMPFS_NODE_UNLOCK(parent);
1168 					n = NULL;
1169 					break;
1170 				}
1171 				n = parent;
1172 			}
1173 			TMPFS_UNLOCK(tmp);
1174 			if (n == NULL) {
1175 				error = EINVAL;
1176 				if (newname != NULL)
1177 					    free(newname, M_TMPFSNAME);
1178 				goto out_locked;
1179 			}
1180 			TMPFS_NODE_UNLOCK(n);
1181 
1182 			/* Adjust the parent pointer. */
1183 			TMPFS_VALIDATE_DIR(fnode);
1184 			TMPFS_NODE_LOCK(de->td_node);
1185 			de->td_node->tn_dir.tn_parent = tdnode;
1186 			TMPFS_NODE_UNLOCK(de->td_node);
1187 
1188 			/*
1189 			 * As a result of changing the target of the '..'
1190 			 * entry, the link count of the source and target
1191 			 * directories has to be adjusted.
1192 			 */
1193 			TMPFS_NODE_LOCK(tdnode);
1194 			TMPFS_ASSERT_LOCKED(tdnode);
1195 			tdnode->tn_links++;
1196 			TMPFS_NODE_UNLOCK(tdnode);
1197 
1198 			TMPFS_NODE_LOCK(fdnode);
1199 			TMPFS_ASSERT_LOCKED(fdnode);
1200 			fdnode->tn_links--;
1201 			TMPFS_NODE_UNLOCK(fdnode);
1202 		}
1203 	}
1204 
1205 	/*
1206 	 * Do the move: just remove the entry from the source directory
1207 	 * and insert it into the target one.
1208 	 */
1209 	tmpfs_dir_detach(fdvp, de);
1210 
1211 	if (fcnp->cn_flags & DOWHITEOUT)
1212 		tmpfs_dir_whiteout_add(fdvp, fcnp);
1213 	if (tcnp->cn_flags & ISWHITEOUT)
1214 		tmpfs_dir_whiteout_remove(tdvp, tcnp);
1215 
1216 	/*
1217 	 * If the name has changed, we need to make it effective by changing
1218 	 * it in the directory entry.
1219 	 */
1220 	if (newname != NULL) {
1221 		MPASS(tcnp->cn_namelen <= MAXNAMLEN);
1222 
1223 		free(de->ud.td_name, M_TMPFSNAME);
1224 		de->ud.td_name = newname;
1225 		tmpfs_dirent_init(de, tcnp->cn_nameptr, tcnp->cn_namelen);
1226 
1227 		fnode->tn_status |= TMPFS_NODE_CHANGED;
1228 		tdnode->tn_status |= TMPFS_NODE_MODIFIED;
1229 	}
1230 
1231 	/*
1232 	 * If we are overwriting an entry, we have to remove the old one
1233 	 * from the target directory.
1234 	 */
1235 	if (tvp != NULL) {
1236 		struct tmpfs_dirent *tde;
1237 
1238 		/* Remove the old entry from the target directory. */
1239 		tde = tmpfs_dir_lookup(tdnode, tnode, tcnp);
1240 		tmpfs_dir_detach(tdvp, tde);
1241 
1242 		/* Update node's ctime because of possible hardlinks. */
1243 		tnode->tn_status |= TMPFS_NODE_CHANGED;
1244 		tmpfs_update(tvp);
1245 
1246 		/*
1247 		 * Free the directory entry we just deleted.  Note that the
1248 		 * node referred by it will not be removed until the vnode is
1249 		 * really reclaimed.
1250 		 */
1251 		tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), tde);
1252 	}
1253 
1254 	tmpfs_dir_attach(tdvp, de);
1255 
1256 	if (tmpfs_use_nc(fvp)) {
1257 		cache_vop_rename(fdvp, fvp, tdvp, tvp, fcnp, tcnp);
1258 	}
1259 
1260 	error = 0;
1261 
1262 out_locked:
1263 	if (fdvp != tdvp && fdvp != tvp)
1264 		VOP_UNLOCK(fdvp);
1265 
1266 out:
1267 	if (want_seqc_end) {
1268 		if (tvp != NULL)
1269 			vn_seqc_write_end(tvp);
1270 		vn_seqc_write_end(tdvp);
1271 		vn_seqc_write_end(fvp);
1272 		vn_seqc_write_end(fdvp);
1273 	}
1274 
1275 	/*
1276 	 * Release target nodes.
1277 	 * XXX: I don't understand when tdvp can be the same as tvp, but
1278 	 * other code takes care of this...
1279 	 */
1280 	if (tdvp == tvp)
1281 		vrele(tdvp);
1282 	else
1283 		vput(tdvp);
1284 	if (tvp != NULL)
1285 		vput(tvp);
1286 
1287 	/* Release source nodes. */
1288 	vrele(fdvp);
1289 	vrele(fvp);
1290 
1291 	return (error);
1292 }
1293 
1294 static int
tmpfs_mkdir(struct vop_mkdir_args * v)1295 tmpfs_mkdir(struct vop_mkdir_args *v)
1296 {
1297 	struct vnode *dvp = v->a_dvp;
1298 	struct vnode **vpp = v->a_vpp;
1299 	struct componentname *cnp = v->a_cnp;
1300 	struct vattr *vap = v->a_vap;
1301 
1302 	MPASS(vap->va_type == VDIR);
1303 
1304 	return (tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL));
1305 }
1306 
1307 static int
tmpfs_rmdir(struct vop_rmdir_args * v)1308 tmpfs_rmdir(struct vop_rmdir_args *v)
1309 {
1310 	struct vnode *dvp = v->a_dvp;
1311 	struct vnode *vp = v->a_vp;
1312 
1313 	int error;
1314 	struct tmpfs_dirent *de;
1315 	struct tmpfs_mount *tmp;
1316 	struct tmpfs_node *dnode;
1317 	struct tmpfs_node *node;
1318 
1319 	tmp = VFS_TO_TMPFS(dvp->v_mount);
1320 	dnode = VP_TO_TMPFS_DIR(dvp);
1321 	node = VP_TO_TMPFS_DIR(vp);
1322 
1323 	/* Directories with more than two entries ('.' and '..') cannot be
1324 	 * removed. */
1325 	 if (node->tn_size > 0) {
1326 		 error = ENOTEMPTY;
1327 		 goto out;
1328 	 }
1329 
1330 	if ((dnode->tn_flags & APPEND)
1331 	    || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) {
1332 		error = EPERM;
1333 		goto out;
1334 	}
1335 
1336 	/* This invariant holds only if we are not trying to remove "..".
1337 	  * We checked for that above so this is safe now. */
1338 	MPASS(node->tn_dir.tn_parent == dnode);
1339 
1340 	/* Get the directory entry associated with node (vp).  This was
1341 	 * filled by tmpfs_lookup while looking up the entry. */
1342 	de = tmpfs_dir_lookup(dnode, node, v->a_cnp);
1343 	MPASS(TMPFS_DIRENT_MATCHES(de,
1344 	    v->a_cnp->cn_nameptr,
1345 	    v->a_cnp->cn_namelen));
1346 
1347 	/* Check flags to see if we are allowed to remove the directory. */
1348 	if ((dnode->tn_flags & APPEND) != 0 ||
1349 	    (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) != 0) {
1350 		error = EPERM;
1351 		goto out;
1352 	}
1353 
1354 	/* Detach the directory entry from the directory (dnode). */
1355 	tmpfs_dir_detach(dvp, de);
1356 	if (v->a_cnp->cn_flags & DOWHITEOUT)
1357 		tmpfs_dir_whiteout_add(dvp, v->a_cnp);
1358 
1359 	/* No vnode should be allocated for this entry from this point */
1360 	TMPFS_NODE_LOCK(node);
1361 	node->tn_links--;
1362 	node->tn_dir.tn_parent = NULL;
1363 	node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1364 	node->tn_accessed = true;
1365 
1366 	TMPFS_NODE_UNLOCK(node);
1367 
1368 	TMPFS_NODE_LOCK(dnode);
1369 	dnode->tn_links--;
1370 	dnode->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1371 	dnode->tn_accessed = true;
1372 	TMPFS_NODE_UNLOCK(dnode);
1373 
1374 	if (tmpfs_use_nc(dvp)) {
1375 		cache_vop_rmdir(dvp, vp);
1376 	}
1377 
1378 	/* Free the directory entry we just deleted.  Note that the node
1379 	 * referred by it will not be removed until the vnode is really
1380 	 * reclaimed. */
1381 	tmpfs_free_dirent(tmp, de);
1382 
1383 	/* Release the deleted vnode (will destroy the node, notify
1384 	 * interested parties and clean it from the cache). */
1385 
1386 	dnode->tn_status |= TMPFS_NODE_CHANGED;
1387 	tmpfs_update(dvp);
1388 
1389 	error = 0;
1390 
1391 out:
1392 	return (error);
1393 }
1394 
1395 static int
tmpfs_symlink(struct vop_symlink_args * v)1396 tmpfs_symlink(struct vop_symlink_args *v)
1397 {
1398 	struct vnode *dvp = v->a_dvp;
1399 	struct vnode **vpp = v->a_vpp;
1400 	struct componentname *cnp = v->a_cnp;
1401 	struct vattr *vap = v->a_vap;
1402 	const char *target = v->a_target;
1403 
1404 #ifdef notyet /* XXX FreeBSD BUG: kern_symlink is not setting VLNK */
1405 	MPASS(vap->va_type == VLNK);
1406 #else
1407 	vap->va_type = VLNK;
1408 #endif
1409 
1410 	return (tmpfs_alloc_file(dvp, vpp, vap, cnp, target));
1411 }
1412 
1413 static int
tmpfs_readdir(struct vop_readdir_args * va)1414 tmpfs_readdir(struct vop_readdir_args *va)
1415 {
1416 	struct vnode *vp;
1417 	struct uio *uio;
1418 	struct tmpfs_mount *tm;
1419 	struct tmpfs_node *node;
1420 	uint64_t **cookies;
1421 	int *eofflag, *ncookies;
1422 	ssize_t startresid;
1423 	int error, maxcookies;
1424 
1425 	vp = va->a_vp;
1426 	uio = va->a_uio;
1427 	eofflag = va->a_eofflag;
1428 	cookies = va->a_cookies;
1429 	ncookies = va->a_ncookies;
1430 
1431 	/* This operation only makes sense on directory nodes. */
1432 	if (vp->v_type != VDIR)
1433 		return (ENOTDIR);
1434 
1435 	maxcookies = 0;
1436 	node = VP_TO_TMPFS_DIR(vp);
1437 	tm = VFS_TO_TMPFS(vp->v_mount);
1438 
1439 	startresid = uio->uio_resid;
1440 
1441 	/* Allocate cookies for NFS and compat modules. */
1442 	if (cookies != NULL && ncookies != NULL) {
1443 		maxcookies = howmany(node->tn_size,
1444 		    sizeof(struct tmpfs_dirent)) + 2;
1445 		*cookies = malloc(maxcookies * sizeof(**cookies), M_TEMP,
1446 		    M_WAITOK);
1447 		*ncookies = 0;
1448 	}
1449 
1450 	if (cookies == NULL)
1451 		error = tmpfs_dir_getdents(tm, node, uio, 0, NULL, NULL);
1452 	else
1453 		error = tmpfs_dir_getdents(tm, node, uio, maxcookies, *cookies,
1454 		    ncookies);
1455 
1456 	/* Buffer was filled without hitting EOF. */
1457 	if (error == EJUSTRETURN)
1458 		error = (uio->uio_resid != startresid) ? 0 : EINVAL;
1459 
1460 	if (error != 0 && cookies != NULL && ncookies != NULL) {
1461 		free(*cookies, M_TEMP);
1462 		*cookies = NULL;
1463 		*ncookies = 0;
1464 	}
1465 
1466 	if (eofflag != NULL)
1467 		*eofflag =
1468 		    (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF);
1469 
1470 	return (error);
1471 }
1472 
1473 static int
tmpfs_readlink(struct vop_readlink_args * v)1474 tmpfs_readlink(struct vop_readlink_args *v)
1475 {
1476 	struct vnode *vp = v->a_vp;
1477 	struct uio *uio = v->a_uio;
1478 
1479 	int error;
1480 	struct tmpfs_node *node;
1481 
1482 	MPASS(uio->uio_offset == 0);
1483 	MPASS(vp->v_type == VLNK);
1484 
1485 	node = VP_TO_TMPFS_NODE(vp);
1486 
1487 	error = uiomove(node->tn_link_target, MIN(node->tn_size, uio->uio_resid),
1488 	    uio);
1489 	tmpfs_set_accessed(VFS_TO_TMPFS(vp->v_mount), node);
1490 
1491 	return (error);
1492 }
1493 
1494 /*
1495  * VOP_FPLOOKUP_SYMLINK routines are subject to special circumstances, see
1496  * the comment above cache_fplookup for details.
1497  *
1498  * Check tmpfs_alloc_node for tmpfs-specific synchronisation notes.
1499  */
1500 static int
tmpfs_fplookup_symlink(struct vop_fplookup_symlink_args * v)1501 tmpfs_fplookup_symlink(struct vop_fplookup_symlink_args *v)
1502 {
1503 	struct vnode *vp;
1504 	struct tmpfs_node *node;
1505 	char *symlink;
1506 
1507 	vp = v->a_vp;
1508 	node = VP_TO_TMPFS_NODE_SMR(vp);
1509 	if (__predict_false(node == NULL))
1510 		return (EAGAIN);
1511 	if (!atomic_load_char(&node->tn_link_smr))
1512 		return (EAGAIN);
1513 	symlink = atomic_load_ptr(&node->tn_link_target);
1514 	if (symlink == NULL)
1515 		return (EAGAIN);
1516 
1517 	return (cache_symlink_resolve(v->a_fpl, symlink, node->tn_size));
1518 }
1519 
1520 static int
tmpfs_inactive(struct vop_inactive_args * v)1521 tmpfs_inactive(struct vop_inactive_args *v)
1522 {
1523 	struct vnode *vp;
1524 	struct tmpfs_node *node;
1525 
1526 	vp = v->a_vp;
1527 	node = VP_TO_TMPFS_NODE(vp);
1528 	if (node->tn_links == 0)
1529 		vrecycle(vp);
1530 	else
1531 		tmpfs_check_mtime(vp);
1532 	return (0);
1533 }
1534 
1535 static int
tmpfs_need_inactive(struct vop_need_inactive_args * ap)1536 tmpfs_need_inactive(struct vop_need_inactive_args *ap)
1537 {
1538 	struct vnode *vp;
1539 	struct tmpfs_node *node;
1540 	struct vm_object *obj;
1541 
1542 	vp = ap->a_vp;
1543 	node = VP_TO_TMPFS_NODE(vp);
1544 	if (node->tn_links == 0)
1545 		goto need;
1546 	if (vp->v_type == VREG) {
1547 		obj = vp->v_object;
1548 		if (obj->generation != obj->cleangeneration)
1549 			goto need;
1550 	}
1551 	return (0);
1552 need:
1553 	return (1);
1554 }
1555 
1556 int
tmpfs_reclaim(struct vop_reclaim_args * v)1557 tmpfs_reclaim(struct vop_reclaim_args *v)
1558 {
1559 	struct vnode *vp;
1560 	struct tmpfs_mount *tmp;
1561 	struct tmpfs_node *node;
1562 	bool unlock;
1563 
1564 	vp = v->a_vp;
1565 	node = VP_TO_TMPFS_NODE(vp);
1566 	tmp = VFS_TO_TMPFS(vp->v_mount);
1567 
1568 	if (vp->v_type == VREG)
1569 		tmpfs_destroy_vobject(vp, node->tn_reg.tn_aobj);
1570 	vp->v_object = NULL;
1571 
1572 	TMPFS_LOCK(tmp);
1573 	TMPFS_NODE_LOCK(node);
1574 	tmpfs_free_vp(vp);
1575 
1576 	/*
1577 	 * If the node referenced by this vnode was deleted by the user,
1578 	 * we must free its associated data structures (now that the vnode
1579 	 * is being reclaimed).
1580 	 */
1581 	unlock = true;
1582 	if (node->tn_links == 0 &&
1583 	    (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0) {
1584 		node->tn_vpstate = TMPFS_VNODE_DOOMED;
1585 		unlock = !tmpfs_free_node_locked(tmp, node, true);
1586 	}
1587 
1588 	if (unlock) {
1589 		TMPFS_NODE_UNLOCK(node);
1590 		TMPFS_UNLOCK(tmp);
1591 	}
1592 
1593 	MPASS(vp->v_data == NULL);
1594 	return (0);
1595 }
1596 
1597 int
tmpfs_print(struct vop_print_args * v)1598 tmpfs_print(struct vop_print_args *v)
1599 {
1600 	struct vnode *vp = v->a_vp;
1601 
1602 	struct tmpfs_node *node;
1603 
1604 	node = VP_TO_TMPFS_NODE(vp);
1605 
1606 	printf("tag VT_TMPFS, tmpfs_node %p, flags 0x%lx, links %jd\n",
1607 	    node, node->tn_flags, (uintmax_t)node->tn_links);
1608 	printf("\tmode 0%o, owner %d, group %d, size %jd, status 0x%x\n",
1609 	    node->tn_mode, node->tn_uid, node->tn_gid,
1610 	    (intmax_t)node->tn_size, node->tn_status);
1611 
1612 	if (vp->v_type == VFIFO)
1613 		fifo_printinfo(vp);
1614 
1615 	printf("\n");
1616 
1617 	return (0);
1618 }
1619 
1620 int
tmpfs_pathconf(struct vop_pathconf_args * v)1621 tmpfs_pathconf(struct vop_pathconf_args *v)
1622 {
1623 	struct vnode *vp = v->a_vp;
1624 	int name = v->a_name;
1625 	long *retval = v->a_retval;
1626 
1627 	int error;
1628 
1629 	error = 0;
1630 
1631 	switch (name) {
1632 	case _PC_LINK_MAX:
1633 		*retval = TMPFS_LINK_MAX;
1634 		break;
1635 
1636 	case _PC_SYMLINK_MAX:
1637 		*retval = MAXPATHLEN;
1638 		break;
1639 
1640 	case _PC_NAME_MAX:
1641 		*retval = NAME_MAX;
1642 		break;
1643 
1644 	case _PC_PIPE_BUF:
1645 		if (vp->v_type == VDIR || vp->v_type == VFIFO)
1646 			*retval = PIPE_BUF;
1647 		else
1648 			error = EINVAL;
1649 		break;
1650 
1651 	case _PC_CHOWN_RESTRICTED:
1652 		*retval = 1;
1653 		break;
1654 
1655 	case _PC_NO_TRUNC:
1656 		*retval = 1;
1657 		break;
1658 
1659 	case _PC_SYNC_IO:
1660 		*retval = 1;
1661 		break;
1662 
1663 	case _PC_FILESIZEBITS:
1664 		*retval = 64;
1665 		break;
1666 
1667 	case _PC_MIN_HOLE_SIZE:
1668 		*retval = PAGE_SIZE;
1669 		break;
1670 
1671 	default:
1672 		error = vop_stdpathconf(v);
1673 	}
1674 
1675 	return (error);
1676 }
1677 
1678 static int
tmpfs_vptofh(struct vop_vptofh_args * ap)1679 tmpfs_vptofh(struct vop_vptofh_args *ap)
1680 /*
1681 vop_vptofh {
1682 	IN struct vnode *a_vp;
1683 	IN struct fid *a_fhp;
1684 };
1685 */
1686 {
1687 	struct tmpfs_fid_data tfd;
1688 	struct tmpfs_node *node;
1689 	struct fid *fhp;
1690 
1691 	node = VP_TO_TMPFS_NODE(ap->a_vp);
1692 	fhp = ap->a_fhp;
1693 	fhp->fid_len = sizeof(tfd);
1694 
1695 	/*
1696 	 * Copy into fid_data from the stack to avoid unaligned pointer use.
1697 	 * See the comment in sys/mount.h on struct fid for details.
1698 	 */
1699 	tfd.tfd_id = node->tn_id;
1700 	tfd.tfd_gen = node->tn_gen;
1701 	memcpy(fhp->fid_data, &tfd, fhp->fid_len);
1702 
1703 	return (0);
1704 }
1705 
1706 static int
tmpfs_whiteout(struct vop_whiteout_args * ap)1707 tmpfs_whiteout(struct vop_whiteout_args *ap)
1708 {
1709 	struct vnode *dvp = ap->a_dvp;
1710 	struct componentname *cnp = ap->a_cnp;
1711 	struct tmpfs_dirent *de;
1712 
1713 	switch (ap->a_flags) {
1714 	case LOOKUP:
1715 		return (0);
1716 	case CREATE:
1717 		de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(dvp), NULL, cnp);
1718 		if (de != NULL)
1719 			return (de->td_node == NULL ? 0 : EEXIST);
1720 		return (tmpfs_dir_whiteout_add(dvp, cnp));
1721 	case DELETE:
1722 		tmpfs_dir_whiteout_remove(dvp, cnp);
1723 		return (0);
1724 	default:
1725 		panic("tmpfs_whiteout: unknown op");
1726 	}
1727 }
1728 
1729 static int
tmpfs_vptocnp_dir(struct tmpfs_node * tn,struct tmpfs_node * tnp,struct tmpfs_dirent ** pde)1730 tmpfs_vptocnp_dir(struct tmpfs_node *tn, struct tmpfs_node *tnp,
1731     struct tmpfs_dirent **pde)
1732 {
1733 	struct tmpfs_dir_cursor dc;
1734 	struct tmpfs_dirent *de;
1735 
1736 	for (de = tmpfs_dir_first(tnp, &dc); de != NULL;
1737 	     de = tmpfs_dir_next(tnp, &dc)) {
1738 		if (de->td_node == tn) {
1739 			*pde = de;
1740 			return (0);
1741 		}
1742 	}
1743 	return (ENOENT);
1744 }
1745 
1746 static int
tmpfs_vptocnp_fill(struct vnode * vp,struct tmpfs_node * tn,struct tmpfs_node * tnp,char * buf,size_t * buflen,struct vnode ** dvp)1747 tmpfs_vptocnp_fill(struct vnode *vp, struct tmpfs_node *tn,
1748     struct tmpfs_node *tnp, char *buf, size_t *buflen, struct vnode **dvp)
1749 {
1750 	struct tmpfs_dirent *de;
1751 	int error, i;
1752 
1753 	error = vn_vget_ino_gen(vp, tmpfs_vn_get_ino_alloc, tnp, LK_SHARED,
1754 	    dvp);
1755 	if (error != 0)
1756 		return (error);
1757 	error = tmpfs_vptocnp_dir(tn, tnp, &de);
1758 	if (error == 0) {
1759 		i = *buflen;
1760 		i -= de->td_namelen;
1761 		if (i < 0) {
1762 			error = ENOMEM;
1763 		} else {
1764 			bcopy(de->ud.td_name, buf + i, de->td_namelen);
1765 			*buflen = i;
1766 		}
1767 	}
1768 	if (error == 0) {
1769 		if (vp != *dvp)
1770 			VOP_UNLOCK(*dvp);
1771 	} else {
1772 		if (vp != *dvp)
1773 			vput(*dvp);
1774 		else
1775 			vrele(vp);
1776 	}
1777 	return (error);
1778 }
1779 
1780 static int
tmpfs_vptocnp(struct vop_vptocnp_args * ap)1781 tmpfs_vptocnp(struct vop_vptocnp_args *ap)
1782 {
1783 	struct vnode *vp, **dvp;
1784 	struct tmpfs_node *tn, *tnp, *tnp1;
1785 	struct tmpfs_dirent *de;
1786 	struct tmpfs_mount *tm;
1787 	char *buf;
1788 	size_t *buflen;
1789 	int error;
1790 
1791 	vp = ap->a_vp;
1792 	dvp = ap->a_vpp;
1793 	buf = ap->a_buf;
1794 	buflen = ap->a_buflen;
1795 
1796 	tm = VFS_TO_TMPFS(vp->v_mount);
1797 	tn = VP_TO_TMPFS_NODE(vp);
1798 	if (tn->tn_type == VDIR) {
1799 		tnp = tn->tn_dir.tn_parent;
1800 		if (tnp == NULL)
1801 			return (ENOENT);
1802 		tmpfs_ref_node(tnp);
1803 		error = tmpfs_vptocnp_fill(vp, tn, tn->tn_dir.tn_parent, buf,
1804 		    buflen, dvp);
1805 		tmpfs_free_node(tm, tnp);
1806 		return (error);
1807 	}
1808 restart:
1809 	TMPFS_LOCK(tm);
1810 restart_locked:
1811 	LIST_FOREACH_SAFE(tnp, &tm->tm_nodes_used, tn_entries, tnp1) {
1812 		if (tnp->tn_type != VDIR)
1813 			continue;
1814 		TMPFS_NODE_LOCK(tnp);
1815 		tmpfs_ref_node(tnp);
1816 
1817 		/*
1818 		 * tn_vnode cannot be instantiated while we hold the
1819 		 * node lock, so the directory cannot be changed while
1820 		 * we iterate over it.  Do this to avoid instantiating
1821 		 * vnode for directories which cannot point to our
1822 		 * node.
1823 		 */
1824 		error = tnp->tn_vnode == NULL ? tmpfs_vptocnp_dir(tn, tnp,
1825 		    &de) : 0;
1826 
1827 		if (error == 0) {
1828 			TMPFS_NODE_UNLOCK(tnp);
1829 			TMPFS_UNLOCK(tm);
1830 			error = tmpfs_vptocnp_fill(vp, tn, tnp, buf, buflen,
1831 			    dvp);
1832 			if (error == 0) {
1833 				tmpfs_free_node(tm, tnp);
1834 				return (0);
1835 			}
1836 			if (VN_IS_DOOMED(vp)) {
1837 				tmpfs_free_node(tm, tnp);
1838 				return (ENOENT);
1839 			}
1840 			TMPFS_LOCK(tm);
1841 			TMPFS_NODE_LOCK(tnp);
1842 		}
1843 		if (tmpfs_free_node_locked(tm, tnp, false)) {
1844 			goto restart;
1845 		} else {
1846 			KASSERT(tnp->tn_refcount > 0,
1847 			    ("node %p refcount zero", tnp));
1848 			if (tnp->tn_attached) {
1849 				tnp1 = LIST_NEXT(tnp, tn_entries);
1850 				TMPFS_NODE_UNLOCK(tnp);
1851 			} else {
1852 				TMPFS_NODE_UNLOCK(tnp);
1853 				goto restart_locked;
1854 			}
1855 		}
1856 	}
1857 	TMPFS_UNLOCK(tm);
1858 	return (ENOENT);
1859 }
1860 
1861 void
tmpfs_extattr_free(struct tmpfs_extattr * ea)1862 tmpfs_extattr_free(struct tmpfs_extattr *ea)
1863 {
1864 	free(ea->ea_name, M_TMPFSEA);
1865 	free(ea->ea_value, M_TMPFSEA);
1866 	free(ea, M_TMPFSEA);
1867 }
1868 
1869 static bool
tmpfs_extattr_update_mem(struct tmpfs_mount * tmp,ssize_t size)1870 tmpfs_extattr_update_mem(struct tmpfs_mount *tmp, ssize_t size)
1871 {
1872 	TMPFS_LOCK(tmp);
1873 	if (size > 0 &&
1874 	    !tmpfs_pages_check_avail(tmp, howmany(size, PAGE_SIZE))) {
1875 		TMPFS_UNLOCK(tmp);
1876 		return (false);
1877 	}
1878 	if (tmp->tm_ea_memory_inuse + size > tmp->tm_ea_memory_max) {
1879 		TMPFS_UNLOCK(tmp);
1880 		return (false);
1881 	}
1882 	tmp->tm_ea_memory_inuse += size;
1883 	TMPFS_UNLOCK(tmp);
1884 	return (true);
1885 }
1886 
1887 static int
tmpfs_deleteextattr(struct vop_deleteextattr_args * ap)1888 tmpfs_deleteextattr(struct vop_deleteextattr_args *ap)
1889 {
1890 	struct vnode *vp = ap->a_vp;
1891 	struct tmpfs_mount *tmp;
1892 	struct tmpfs_node *node;
1893 	struct tmpfs_extattr *ea;
1894 	size_t namelen;
1895 	ssize_t diff;
1896 	int error;
1897 
1898 	node = VP_TO_TMPFS_NODE(vp);
1899 	tmp = VFS_TO_TMPFS(vp->v_mount);
1900 	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1901 		return (EOPNOTSUPP);
1902 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1903 	    ap->a_cred, ap->a_td, VWRITE);
1904 	if (error != 0)
1905 		return (error);
1906 	if (ap->a_name == NULL || ap->a_name[0] == '\0')
1907 		return (EINVAL);
1908 	namelen = strlen(ap->a_name);
1909 	if (namelen > EXTATTR_MAXNAMELEN)
1910 		return (EINVAL);
1911 
1912 	LIST_FOREACH(ea, &node->tn_extattrs, ea_extattrs) {
1913 		if (ea->ea_namespace == ap->a_attrnamespace &&
1914 		    namelen == ea->ea_namelen &&
1915 		    memcmp(ap->a_name, ea->ea_name, namelen) == 0)
1916 			break;
1917 	}
1918 
1919 	if (ea == NULL)
1920 		return (ENOATTR);
1921 	LIST_REMOVE(ea, ea_extattrs);
1922 	diff = -(sizeof(struct tmpfs_extattr) + namelen + ea->ea_size);
1923 	tmpfs_extattr_update_mem(tmp, diff);
1924 	tmpfs_extattr_free(ea);
1925 	return (0);
1926 }
1927 
1928 static int
tmpfs_getextattr(struct vop_getextattr_args * ap)1929 tmpfs_getextattr(struct vop_getextattr_args *ap)
1930 {
1931 	struct vnode *vp = ap->a_vp;
1932 	struct tmpfs_node *node;
1933 	struct tmpfs_extattr *ea;
1934 	size_t namelen;
1935 	int error;
1936 
1937 	node = VP_TO_TMPFS_NODE(vp);
1938 	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1939 		return (EOPNOTSUPP);
1940 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1941 	    ap->a_cred, ap->a_td, VREAD);
1942 	if (error != 0)
1943 		return (error);
1944 	if (ap->a_name == NULL || ap->a_name[0] == '\0')
1945 		return (EINVAL);
1946 	namelen = strlen(ap->a_name);
1947 	if (namelen > EXTATTR_MAXNAMELEN)
1948 		return (EINVAL);
1949 
1950 	LIST_FOREACH(ea, &node->tn_extattrs, ea_extattrs) {
1951 		if (ea->ea_namespace == ap->a_attrnamespace &&
1952 		    namelen == ea->ea_namelen &&
1953 		    memcmp(ap->a_name, ea->ea_name, namelen) == 0)
1954 			break;
1955 	}
1956 
1957 	if (ea == NULL)
1958 		return (ENOATTR);
1959 	if (ap->a_size != NULL)
1960 		*ap->a_size = ea->ea_size;
1961 	if (ap->a_uio != NULL && ea->ea_size != 0)
1962 		error = uiomove(ea->ea_value, ea->ea_size, ap->a_uio);
1963 	return (error);
1964 }
1965 
1966 static int
tmpfs_listextattr(struct vop_listextattr_args * ap)1967 tmpfs_listextattr(struct vop_listextattr_args *ap)
1968 {
1969 	struct vnode *vp = ap->a_vp;
1970 	struct tmpfs_node *node;
1971 	struct tmpfs_extattr *ea;
1972 	int error;
1973 
1974 	node = VP_TO_TMPFS_NODE(vp);
1975 	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1976 		return (EOPNOTSUPP);
1977 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1978 	    ap->a_cred, ap->a_td, VREAD);
1979 	if (error != 0)
1980 		return (error);
1981 	if (ap->a_size != NULL)
1982 		*ap->a_size = 0;
1983 
1984 	LIST_FOREACH(ea, &node->tn_extattrs, ea_extattrs) {
1985 		if (ea->ea_namespace != ap->a_attrnamespace)
1986 			continue;
1987 		if (ap->a_size != NULL)
1988 			*ap->a_size += ea->ea_namelen + 1;
1989 		if (ap->a_uio != NULL) {
1990 			error = uiomove(&ea->ea_namelen, 1, ap->a_uio);
1991 			if (error != 0)
1992 				break;
1993 			error = uiomove(ea->ea_name, ea->ea_namelen, ap->a_uio);
1994 			if (error != 0)
1995 				break;
1996 		}
1997 	}
1998 
1999 	return (error);
2000 }
2001 
2002 static int
tmpfs_setextattr(struct vop_setextattr_args * ap)2003 tmpfs_setextattr(struct vop_setextattr_args *ap)
2004 {
2005 	struct vnode *vp = ap->a_vp;
2006 	struct tmpfs_mount *tmp;
2007 	struct tmpfs_node *node;
2008 	struct tmpfs_extattr *ea;
2009 	struct tmpfs_extattr *new_ea;
2010 	size_t attr_size;
2011 	size_t namelen;
2012 	ssize_t diff;
2013 	int error;
2014 
2015 	node = VP_TO_TMPFS_NODE(vp);
2016 	tmp = VFS_TO_TMPFS(vp->v_mount);
2017 	attr_size = ap->a_uio->uio_resid;
2018 	diff = 0;
2019 	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
2020 		return (EOPNOTSUPP);
2021 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
2022 	    ap->a_cred, ap->a_td, VWRITE);
2023 	if (error != 0)
2024 		return (error);
2025 	if (ap->a_name == NULL || ap->a_name[0] == '\0')
2026 		return (EINVAL);
2027 	namelen = strlen(ap->a_name);
2028 	if (namelen > EXTATTR_MAXNAMELEN)
2029 		return (EINVAL);
2030 
2031 	LIST_FOREACH(ea, &node->tn_extattrs, ea_extattrs) {
2032 		if (ea->ea_namespace == ap->a_attrnamespace &&
2033 		    namelen == ea->ea_namelen &&
2034 		    memcmp(ap->a_name, ea->ea_name, namelen) == 0) {
2035 			diff -= sizeof(struct tmpfs_extattr) + ea->ea_namelen +
2036 			    ea->ea_size;
2037 			break;
2038 		}
2039 	}
2040 
2041 	diff += sizeof(struct tmpfs_extattr) + namelen + attr_size;
2042 	if (!tmpfs_extattr_update_mem(tmp, diff))
2043 		return (ENOSPC);
2044 	new_ea = malloc(sizeof(struct tmpfs_extattr), M_TMPFSEA, M_WAITOK);
2045 	new_ea->ea_namespace = ap->a_attrnamespace;
2046 	new_ea->ea_name = malloc(namelen, M_TMPFSEA, M_WAITOK);
2047 	new_ea->ea_namelen = namelen;
2048 	memcpy(new_ea->ea_name, ap->a_name, namelen);
2049 	if (attr_size != 0) {
2050 		new_ea->ea_value = malloc(attr_size, M_TMPFSEA, M_WAITOK);
2051 		new_ea->ea_size = attr_size;
2052 		error = uiomove(new_ea->ea_value, attr_size, ap->a_uio);
2053 	} else {
2054 		new_ea->ea_value = NULL;
2055 		new_ea->ea_size = 0;
2056 	}
2057 	if (error != 0) {
2058 		tmpfs_extattr_update_mem(tmp, -diff);
2059 		tmpfs_extattr_free(new_ea);
2060 		return (error);
2061 	}
2062 	if (ea != NULL) {
2063 		LIST_REMOVE(ea, ea_extattrs);
2064 		tmpfs_extattr_free(ea);
2065 	}
2066 	LIST_INSERT_HEAD(&node->tn_extattrs, new_ea, ea_extattrs);
2067 	return (0);
2068 }
2069 
2070 static off_t
tmpfs_seek_data_locked(vm_object_t obj,off_t noff)2071 tmpfs_seek_data_locked(vm_object_t obj, off_t noff)
2072 {
2073 	vm_page_t m;
2074 	vm_pindex_t p, p_m, p_swp;
2075 
2076 	p = OFF_TO_IDX(noff);
2077 	m = vm_page_find_least(obj, p);
2078 
2079 	/*
2080 	 * Microoptimize the most common case for SEEK_DATA, where
2081 	 * there is no hole and the page is resident.
2082 	 */
2083 	if (m != NULL && vm_page_any_valid(m) && m->pindex == p)
2084 		return (noff);
2085 
2086 	p_swp = swap_pager_find_least(obj, p);
2087 	if (p_swp == p)
2088 		return (noff);
2089 
2090 	p_m = m == NULL ? obj->size : m->pindex;
2091 	return (IDX_TO_OFF(MIN(p_m, p_swp)));
2092 }
2093 
2094 static off_t
tmpfs_seek_next(off_t noff)2095 tmpfs_seek_next(off_t noff)
2096 {
2097 	return (noff + PAGE_SIZE - (noff & PAGE_MASK));
2098 }
2099 
2100 static int
tmpfs_seek_clamp(struct tmpfs_node * tn,off_t * noff,bool seekdata)2101 tmpfs_seek_clamp(struct tmpfs_node *tn, off_t *noff, bool seekdata)
2102 {
2103 	if (*noff < tn->tn_size)
2104 		return (0);
2105 	if (seekdata)
2106 		return (ENXIO);
2107 	*noff = tn->tn_size;
2108 	return (0);
2109 }
2110 
2111 static off_t
tmpfs_seek_hole_locked(vm_object_t obj,off_t noff)2112 tmpfs_seek_hole_locked(vm_object_t obj, off_t noff)
2113 {
2114 	vm_page_t m;
2115 	vm_pindex_t p, p_swp;
2116 
2117 	for (;; noff = tmpfs_seek_next(noff)) {
2118 		/*
2119 		 * Walk over the largest sequential run of the valid pages.
2120 		 */
2121 		for (m = vm_page_lookup(obj, OFF_TO_IDX(noff));
2122 		    m != NULL && vm_page_any_valid(m);
2123 		    m = vm_page_next(m), noff = tmpfs_seek_next(noff))
2124 			;
2125 
2126 		/*
2127 		 * Found a hole in the object's page queue.  Check if
2128 		 * there is a hole in the swap at the same place.
2129 		 */
2130 		p = OFF_TO_IDX(noff);
2131 		p_swp = swap_pager_find_least(obj, p);
2132 		if (p_swp != p) {
2133 			noff = IDX_TO_OFF(p);
2134 			break;
2135 		}
2136 	}
2137 	return (noff);
2138 }
2139 
2140 static int
tmpfs_seek_datahole(struct vnode * vp,off_t * off,bool seekdata)2141 tmpfs_seek_datahole(struct vnode *vp, off_t *off, bool seekdata)
2142 {
2143 	struct tmpfs_node *tn;
2144 	vm_object_t obj;
2145 	off_t noff;
2146 	int error;
2147 
2148 	if (vp->v_type != VREG)
2149 		return (ENOTTY);
2150 	tn = VP_TO_TMPFS_NODE(vp);
2151 	noff = *off;
2152 	if (noff < 0)
2153 		return (ENXIO);
2154 	error = tmpfs_seek_clamp(tn, &noff, seekdata);
2155 	if (error != 0)
2156 		return (error);
2157 	obj = tn->tn_reg.tn_aobj;
2158 
2159 	VM_OBJECT_RLOCK(obj);
2160 	noff = seekdata ? tmpfs_seek_data_locked(obj, noff) :
2161 	    tmpfs_seek_hole_locked(obj, noff);
2162 	VM_OBJECT_RUNLOCK(obj);
2163 
2164 	error = tmpfs_seek_clamp(tn, &noff, seekdata);
2165 	if (error == 0)
2166 		*off = noff;
2167 	return (error);
2168 }
2169 
2170 static int
tmpfs_ioctl(struct vop_ioctl_args * ap)2171 tmpfs_ioctl(struct vop_ioctl_args *ap)
2172 {
2173 	struct vnode *vp = ap->a_vp;
2174 	int error = 0;
2175 
2176 	switch (ap->a_command) {
2177 	case FIOSEEKDATA:
2178 	case FIOSEEKHOLE:
2179 		error = vn_lock(vp, LK_SHARED);
2180 		if (error != 0) {
2181 			error = EBADF;
2182 			break;
2183 		}
2184 		error = tmpfs_seek_datahole(vp, (off_t *)ap->a_data,
2185 		    ap->a_command == FIOSEEKDATA);
2186 		VOP_UNLOCK(vp);
2187 		break;
2188 	default:
2189 		error = ENOTTY;
2190 		break;
2191 	}
2192 	return (error);
2193 }
2194 
2195 /*
2196  * Vnode operations vector used for files stored in a tmpfs file system.
2197  */
2198 struct vop_vector tmpfs_vnodeop_entries = {
2199 	.vop_default =			&default_vnodeops,
2200 	.vop_lookup =			vfs_cache_lookup,
2201 	.vop_cachedlookup =		tmpfs_cached_lookup,
2202 	.vop_create =			tmpfs_create,
2203 	.vop_mknod =			tmpfs_mknod,
2204 	.vop_open =			tmpfs_open,
2205 	.vop_close =			tmpfs_close,
2206 	.vop_fplookup_vexec =		tmpfs_fplookup_vexec,
2207 	.vop_fplookup_symlink =		tmpfs_fplookup_symlink,
2208 	.vop_access =			tmpfs_access,
2209 	.vop_stat =			tmpfs_stat,
2210 	.vop_getattr =			tmpfs_getattr,
2211 	.vop_setattr =			tmpfs_setattr,
2212 	.vop_read =			tmpfs_read,
2213 	.vop_read_pgcache =		tmpfs_read_pgcache,
2214 	.vop_write =			tmpfs_write,
2215 	.vop_deallocate =		tmpfs_deallocate,
2216 	.vop_fsync =			tmpfs_fsync,
2217 	.vop_remove =			tmpfs_remove,
2218 	.vop_link =			tmpfs_link,
2219 	.vop_rename =			tmpfs_rename,
2220 	.vop_mkdir =			tmpfs_mkdir,
2221 	.vop_rmdir =			tmpfs_rmdir,
2222 	.vop_symlink =			tmpfs_symlink,
2223 	.vop_readdir =			tmpfs_readdir,
2224 	.vop_readlink =			tmpfs_readlink,
2225 	.vop_inactive =			tmpfs_inactive,
2226 	.vop_need_inactive =		tmpfs_need_inactive,
2227 	.vop_reclaim =			tmpfs_reclaim,
2228 	.vop_print =			tmpfs_print,
2229 	.vop_pathconf =			tmpfs_pathconf,
2230 	.vop_vptofh =			tmpfs_vptofh,
2231 	.vop_whiteout =			tmpfs_whiteout,
2232 	.vop_bmap =			VOP_EOPNOTSUPP,
2233 	.vop_vptocnp =			tmpfs_vptocnp,
2234 	.vop_lock1 =			vop_lock,
2235 	.vop_unlock = 			vop_unlock,
2236 	.vop_islocked = 		vop_islocked,
2237 	.vop_deleteextattr =		tmpfs_deleteextattr,
2238 	.vop_getextattr =		tmpfs_getextattr,
2239 	.vop_listextattr =		tmpfs_listextattr,
2240 	.vop_setextattr =		tmpfs_setextattr,
2241 	.vop_add_writecount =		vop_stdadd_writecount_nomsync,
2242 	.vop_ioctl =			tmpfs_ioctl,
2243 };
2244 VFS_VOP_VECTOR_REGISTER(tmpfs_vnodeop_entries);
2245 
2246 /*
2247  * Same vector for mounts which do not use namecache.
2248  */
2249 struct vop_vector tmpfs_vnodeop_nonc_entries = {
2250 	.vop_default =			&tmpfs_vnodeop_entries,
2251 	.vop_lookup =			tmpfs_lookup,
2252 };
2253 VFS_VOP_VECTOR_REGISTER(tmpfs_vnodeop_nonc_entries);
2254