xref: /dragonfly/sys/vfs/tmpfs/tmpfs_subr.c (revision 52f9f0d9)
1 /*	$NetBSD: tmpfs_subr.c,v 1.35 2007/07/09 21:10:50 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2005 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9  * 2005 program.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Efficient memory file system supporting functions.
35  */
36 
37 #include <sys/kernel.h>
38 #include <sys/param.h>
39 #include <sys/namei.h>
40 #include <sys/priv.h>
41 #include <sys/proc.h>
42 #include <sys/spinlock2.h>
43 #include <sys/stat.h>
44 #include <sys/systm.h>
45 #include <sys/vnode.h>
46 #include <sys/vmmeter.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_object.h>
50 #include <vm/vm_page.h>
51 #include <vm/vm_pager.h>
52 #include <vm/vm_extern.h>
53 
54 #include <vfs/tmpfs/tmpfs.h>
55 #include <vfs/tmpfs/tmpfs_vnops.h>
56 
57 static ino_t tmpfs_fetch_ino(struct tmpfs_mount *);
58 
59 /* --------------------------------------------------------------------- */
60 
61 /*
62  * Allocates a new node of type 'type' inside the 'tmp' mount point, with
63  * its owner set to 'uid', its group to 'gid' and its mode set to 'mode',
64  * using the credentials of the process 'p'.
65  *
66  * If the node type is set to 'VDIR', then the parent parameter must point
67  * to the parent directory of the node being created.  It may only be NULL
68  * while allocating the root node.
69  *
70  * If the node type is set to 'VBLK' or 'VCHR', then the rdev parameter
71  * specifies the device the node represents.
72  *
73  * If the node type is set to 'VLNK', then the parameter target specifies
74  * the file name of the target file for the symbolic link that is being
75  * created.
76  *
77  * Note that new nodes are retrieved from the available list if it has
78  * items or, if it is empty, from the node pool as long as there is enough
79  * space to create them.
80  *
81  * Returns zero on success or an appropriate error code on failure.
82  */
83 int
84 tmpfs_alloc_node(struct tmpfs_mount *tmp, enum vtype type,
85     uid_t uid, gid_t gid, mode_t mode, struct tmpfs_node *parent,
86     char *target, int rmajor, int rminor, struct tmpfs_node **node)
87 {
88 	struct tmpfs_node *nnode;
89 	struct timespec ts;
90 	udev_t rdev;
91 
92 	/* If the root directory of the 'tmp' file system is not yet
93 	 * allocated, this must be the request to do it. */
94 	KKASSERT(IMPLIES(tmp->tm_root == NULL, parent == NULL && type == VDIR));
95 
96 	KKASSERT(IFF(type == VLNK, target != NULL));
97 	KKASSERT(IFF(type == VBLK || type == VCHR, rmajor != VNOVAL));
98 
99 	if (tmp->tm_nodes_inuse >= tmp->tm_nodes_max)
100 		return (ENOSPC);
101 
102 	nnode = objcache_get(tmp->tm_node_pool, M_WAITOK | M_NULLOK);
103 	if (nnode == NULL)
104 		return (ENOSPC);
105 
106 	/* Generic initialization. */
107 	nnode->tn_type = type;
108 	vfs_timestamp(&ts);
109 	nnode->tn_ctime = nnode->tn_mtime = nnode->tn_atime
110 		= ts.tv_sec;
111 	nnode->tn_ctimensec = nnode->tn_mtimensec = nnode->tn_atimensec
112 		= ts.tv_nsec;
113 	nnode->tn_uid = uid;
114 	nnode->tn_gid = gid;
115 	nnode->tn_mode = mode;
116 	nnode->tn_id = tmpfs_fetch_ino(tmp);
117 	nnode->tn_advlock.init_done = 0;
118 
119 	/* Type-specific initialization. */
120 	switch (nnode->tn_type) {
121 	case VBLK:
122 	case VCHR:
123 		rdev = makeudev(rmajor, rminor);
124 		if (rdev == NOUDEV) {
125 			objcache_put(tmp->tm_node_pool, nnode);
126 			return(EINVAL);
127 		}
128 		nnode->tn_rdev = rdev;
129 		break;
130 
131 	case VDIR:
132 		TAILQ_INIT(&nnode->tn_dir.tn_dirhead);
133 		KKASSERT(parent != nnode);
134 		KKASSERT(IMPLIES(parent == NULL, tmp->tm_root == NULL));
135 		nnode->tn_dir.tn_parent = parent;
136 		nnode->tn_dir.tn_readdir_lastn = 0;
137 		nnode->tn_dir.tn_readdir_lastp = NULL;
138 		nnode->tn_links++;
139 		nnode->tn_size = 0;
140 		if (parent) {
141 			TMPFS_NODE_LOCK(parent);
142 			parent->tn_links++;
143 			TMPFS_NODE_UNLOCK(parent);
144 		}
145 		break;
146 
147 	case VFIFO:
148 		/* FALLTHROUGH */
149 	case VSOCK:
150 		break;
151 
152 	case VLNK:
153 		nnode->tn_size = strlen(target);
154 		nnode->tn_link = kmalloc(nnode->tn_size + 1, tmp->tm_name_zone,
155 					 M_WAITOK | M_NULLOK);
156 		if (nnode->tn_link == NULL) {
157 			objcache_put(tmp->tm_node_pool, nnode);
158 			return (ENOSPC);
159 		}
160 		bcopy(target, nnode->tn_link, nnode->tn_size);
161 		nnode->tn_link[nnode->tn_size] = '\0';
162 		break;
163 
164 	case VREG:
165 		nnode->tn_reg.tn_aobj =
166 		    swap_pager_alloc(NULL, 0, VM_PROT_DEFAULT, 0);
167 		nnode->tn_reg.tn_aobj_pages = 0;
168 		nnode->tn_size = 0;
169 		break;
170 
171 	default:
172 		panic("tmpfs_alloc_node: type %p %d", nnode, (int)nnode->tn_type);
173 	}
174 
175 	TMPFS_NODE_LOCK(nnode);
176 	TMPFS_LOCK(tmp);
177 	LIST_INSERT_HEAD(&tmp->tm_nodes_used, nnode, tn_entries);
178 	tmp->tm_nodes_inuse++;
179 	TMPFS_UNLOCK(tmp);
180 	TMPFS_NODE_UNLOCK(nnode);
181 
182 	*node = nnode;
183 	return 0;
184 }
185 
186 /* --------------------------------------------------------------------- */
187 
188 /*
189  * Destroys the node pointed to by node from the file system 'tmp'.
190  * If the node does not belong to the given mount point, the results are
191  * unpredicted.
192  *
193  * If the node references a directory; no entries are allowed because
194  * their removal could need a recursive algorithm, something forbidden in
195  * kernel space.  Furthermore, there is not need to provide such
196  * functionality (recursive removal) because the only primitives offered
197  * to the user are the removal of empty directories and the deletion of
198  * individual files.
199  *
200  * Note that nodes are not really deleted; in fact, when a node has been
201  * allocated, it cannot be deleted during the whole life of the file
202  * system.  Instead, they are moved to the available list and remain there
203  * until reused.
204  */
205 void
206 tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node)
207 {
208 	vm_pindex_t pages = 0;
209 
210 #ifdef INVARIANTS
211 	TMPFS_ASSERT_ELOCKED(node);
212 	KKASSERT(node->tn_vnode == NULL);
213 	KKASSERT((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0);
214 #endif
215 
216 	TMPFS_LOCK(tmp);
217 	LIST_REMOVE(node, tn_entries);
218 	tmp->tm_nodes_inuse--;
219 	TMPFS_UNLOCK(tmp);
220 	TMPFS_NODE_UNLOCK(node);
221 
222 	switch (node->tn_type) {
223 	case VNON:
224 		/* Do not do anything.  VNON is provided to let the
225 		 * allocation routine clean itself easily by avoiding
226 		 * duplicating code in it. */
227 		/* FALLTHROUGH */
228 	case VBLK:
229 		/* FALLTHROUGH */
230 	case VCHR:
231 		/* FALLTHROUGH */
232 		break;
233 	case VDIR:
234 		/*
235 		 * The parent link can be NULL if this is the root
236 		 * node.
237 		 */
238 		node->tn_links--;
239 		node->tn_size = 0;
240 		KKASSERT(node->tn_dir.tn_parent || node == tmp->tm_root);
241 		if (node->tn_dir.tn_parent) {
242 			TMPFS_NODE_LOCK(node->tn_dir.tn_parent);
243 			node->tn_dir.tn_parent->tn_links--;
244 
245 			/*
246 			 * If the parent directory has no more links and
247 			 * no vnode ref nothing is going to come along
248 			 * and clean it up unless we do it here.
249 			 */
250 			if (node->tn_dir.tn_parent->tn_links == 0 &&
251 			    node->tn_dir.tn_parent->tn_vnode == NULL) {
252 				tmpfs_free_node(tmp, node->tn_dir.tn_parent);
253 				/* eats parent lock */
254 			} else {
255 				TMPFS_NODE_UNLOCK(node->tn_dir.tn_parent);
256 			}
257 			node->tn_dir.tn_parent = NULL;
258 		}
259 
260 		/*
261 		 * If the root node is being destroyed don't leave a
262 		 * dangling pointer in tmpfs_mount.
263 		 */
264 		if (node == tmp->tm_root)
265 			tmp->tm_root = NULL;
266 		break;
267 	case VFIFO:
268 		/* FALLTHROUGH */
269 	case VSOCK:
270 		break;
271 
272 	case VLNK:
273 		kfree(node->tn_link, tmp->tm_name_zone);
274 		node->tn_link = NULL;
275 		node->tn_size = 0;
276 		break;
277 
278 	case VREG:
279 		if (node->tn_reg.tn_aobj != NULL)
280 			vm_object_deallocate(node->tn_reg.tn_aobj);
281 		node->tn_reg.tn_aobj = NULL;
282 		pages = node->tn_reg.tn_aobj_pages;
283 		break;
284 
285 	default:
286 		panic("tmpfs_free_node: type %p %d", node, (int)node->tn_type);
287 	}
288 
289 	/*
290 	 * Clean up fields for the next allocation.  The objcache only ctors
291 	 * new allocations.
292 	 */
293 	tmpfs_node_ctor(node, NULL, 0);
294 	objcache_put(tmp->tm_node_pool, node);
295 	/* node is now invalid */
296 
297 	TMPFS_LOCK(tmp);
298 	tmp->tm_pages_used -= pages;
299 	TMPFS_UNLOCK(tmp);
300 }
301 
302 /* --------------------------------------------------------------------- */
303 
304 /*
305  * Allocates a new directory entry for the node node with a name of name.
306  * The new directory entry is returned in *de.
307  *
308  * The link count of node is increased by one to reflect the new object
309  * referencing it.
310  *
311  * Returns zero on success or an appropriate error code on failure.
312  */
313 int
314 tmpfs_alloc_dirent(struct tmpfs_mount *tmp, struct tmpfs_node *node,
315     const char *name, uint16_t len, struct tmpfs_dirent **de)
316 {
317 	struct tmpfs_dirent *nde;
318 
319 	nde = objcache_get(tmp->tm_dirent_pool, M_WAITOK);
320 	nde->td_name = kmalloc(len + 1, tmp->tm_name_zone, M_WAITOK | M_NULLOK);
321 	if (nde->td_name == NULL) {
322 		objcache_put(tmp->tm_dirent_pool, nde);
323 		*de = NULL;
324 		return (ENOSPC);
325 	}
326 	nde->td_namelen = len;
327 	bcopy(name, nde->td_name, len);
328 	nde->td_name[len] = '\0';
329 
330 	nde->td_node = node;
331 
332 	TMPFS_NODE_LOCK(node);
333 	node->tn_links++;
334 	TMPFS_NODE_UNLOCK(node);
335 
336 	*de = nde;
337 
338 	return 0;
339 }
340 
341 /* --------------------------------------------------------------------- */
342 
343 /*
344  * Frees a directory entry.  It is the caller's responsibility to destroy
345  * the node referenced by it if needed.
346  *
347  * The link count of node is decreased by one to reflect the removal of an
348  * object that referenced it.  This only happens if 'node_exists' is true;
349  * otherwise the function will not access the node referred to by the
350  * directory entry, as it may already have been released from the outside.
351  */
352 void
353 tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpfs_dirent *de)
354 {
355 	struct tmpfs_node *node;
356 
357 	node = de->td_node;
358 
359 	TMPFS_NODE_LOCK(node);
360 	TMPFS_ASSERT_ELOCKED(node);
361 	KKASSERT(node->tn_links > 0);
362 	node->tn_links--;
363 	TMPFS_NODE_UNLOCK(node);
364 
365 	kfree(de->td_name, tmp->tm_name_zone);
366 	de->td_namelen = 0;
367 	de->td_name = NULL;
368 	de->td_node = NULL;
369 	objcache_put(tmp->tm_dirent_pool, de);
370 }
371 
372 /* --------------------------------------------------------------------- */
373 
374 /*
375  * Allocates a new vnode for the node node or returns a new reference to
376  * an existing one if the node had already a vnode referencing it.  The
377  * resulting locked vnode is returned in *vpp.
378  *
379  * Returns zero on success or an appropriate error code on failure.
380  */
381 int
382 tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag,
383 	       struct vnode **vpp)
384 {
385 	int error = 0;
386 	struct vnode *vp;
387 
388 loop:
389 	/*
390 	 * Interlocked extraction from node.  This can race many things.
391 	 * We have to get a soft reference on the vnode while we hold
392 	 * the node locked, then acquire it properly and check for races.
393 	 */
394 	TMPFS_NODE_LOCK(node);
395 	if ((vp = node->tn_vnode) != NULL) {
396 		KKASSERT((node->tn_vpstate & TMPFS_VNODE_DOOMED) == 0);
397 		vhold_interlocked(vp);
398 		TMPFS_NODE_UNLOCK(node);
399 
400 		if (vget(vp, lkflag | LK_EXCLUSIVE) != 0) {
401 			vdrop(vp);
402 			goto loop;
403 		}
404 		if (node->tn_vnode != vp) {
405 			vput(vp);
406 			vdrop(vp);
407 			goto loop;
408 		}
409 		vdrop(vp);
410 		goto out;
411 	}
412 	/* vp is NULL */
413 
414 	/*
415 	 * This should never happen.
416 	 */
417 	if (node->tn_vpstate & TMPFS_VNODE_DOOMED) {
418 		TMPFS_NODE_UNLOCK(node);
419 		error = ENOENT;
420 		goto out;
421 	}
422 
423 	/*
424 	 * Interlock against other calls to tmpfs_alloc_vp() trying to
425 	 * allocate and assign a vp to node.
426 	 */
427 	if (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) {
428 		node->tn_vpstate |= TMPFS_VNODE_WANT;
429 		error = tsleep(&node->tn_vpstate, PINTERLOCKED | PCATCH,
430 			       "tmpfs_alloc_vp", 0);
431 		TMPFS_NODE_UNLOCK(node);
432 		if (error)
433 			return error;
434 		goto loop;
435 	}
436 	node->tn_vpstate |= TMPFS_VNODE_ALLOCATING;
437 	TMPFS_NODE_UNLOCK(node);
438 
439 	/*
440 	 * Allocate a new vnode (may block).  The ALLOCATING flag should
441 	 * prevent a race against someone else assigning node->tn_vnode.
442 	 */
443 	error = getnewvnode(VT_TMPFS, mp, &vp, VLKTIMEOUT, LK_CANRECURSE);
444 	if (error != 0)
445 		goto unlock;
446 
447 	KKASSERT(node->tn_vnode == NULL);
448 	KKASSERT(vp != NULL);
449 	vp->v_data = node;
450 	vp->v_type = node->tn_type;
451 
452 	/* Type-specific initialization. */
453 	switch (node->tn_type) {
454 	case VBLK:
455 		/* FALLTHROUGH */
456 	case VCHR:
457 		/* FALLTHROUGH */
458 	case VSOCK:
459 		break;
460 	case VREG:
461 		vinitvmio(vp, node->tn_size, BMASK, -1);
462 		break;
463 	case VLNK:
464 		break;
465 	case VFIFO:
466 		vp->v_ops = &mp->mnt_vn_fifo_ops;
467 		break;
468 	case VDIR:
469 		break;
470 
471 	default:
472 		panic("tmpfs_alloc_vp: type %p %d", node, (int)node->tn_type);
473 	}
474 
475 	insmntque(vp, mp);
476 
477 unlock:
478 	TMPFS_NODE_LOCK(node);
479 
480 	KKASSERT(node->tn_vpstate & TMPFS_VNODE_ALLOCATING);
481 	node->tn_vpstate &= ~TMPFS_VNODE_ALLOCATING;
482 	node->tn_vnode = vp;
483 
484 	if (node->tn_vpstate & TMPFS_VNODE_WANT) {
485 		node->tn_vpstate &= ~TMPFS_VNODE_WANT;
486 		TMPFS_NODE_UNLOCK(node);
487 		wakeup(&node->tn_vpstate);
488 	} else {
489 		TMPFS_NODE_UNLOCK(node);
490 	}
491 
492 out:
493 	*vpp = vp;
494 
495 	KKASSERT(IFF(error == 0, *vpp != NULL && vn_islocked(*vpp)));
496 #ifdef INVARIANTS
497 	TMPFS_NODE_LOCK(node);
498 	KKASSERT(*vpp == node->tn_vnode);
499 	TMPFS_NODE_UNLOCK(node);
500 #endif
501 
502 	return error;
503 }
504 
505 /* --------------------------------------------------------------------- */
506 
507 /*
508  * Destroys the association between the vnode vp and the node it
509  * references.
510  */
511 void
512 tmpfs_free_vp(struct vnode *vp)
513 {
514 	struct tmpfs_node *node;
515 
516 	node = VP_TO_TMPFS_NODE(vp);
517 
518 	TMPFS_NODE_LOCK(node);
519 	KKASSERT(lockcount(TMPFS_NODE_MTX(node)) > 0);
520 	node->tn_vnode = NULL;
521 	TMPFS_NODE_UNLOCK(node);
522 	vp->v_data = NULL;
523 }
524 
525 /* --------------------------------------------------------------------- */
526 
527 /*
528  * Allocates a new file of type 'type' and adds it to the parent directory
529  * 'dvp'; this addition is done using the component name given in 'cnp'.
530  * The ownership of the new file is automatically assigned based on the
531  * credentials of the caller (through 'cnp'), the group is set based on
532  * the parent directory and the mode is determined from the 'vap' argument.
533  * If successful, *vpp holds a vnode to the newly created file and zero
534  * is returned.  Otherwise *vpp is NULL and the function returns an
535  * appropriate error code.
536  */
537 int
538 tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
539 		 struct namecache *ncp, struct ucred *cred, char *target)
540 {
541 	int error;
542 	struct tmpfs_dirent *de;
543 	struct tmpfs_mount *tmp;
544 	struct tmpfs_node *dnode;
545 	struct tmpfs_node *node;
546 	struct tmpfs_node *parent;
547 
548 	tmp = VFS_TO_TMPFS(dvp->v_mount);
549 	dnode = VP_TO_TMPFS_DIR(dvp);
550 	*vpp = NULL;
551 
552 	/* If the entry we are creating is a directory, we cannot overflow
553 	 * the number of links of its parent, because it will get a new
554 	 * link. */
555 	if (vap->va_type == VDIR) {
556 		/* Ensure that we do not overflow the maximum number of links
557 		 * imposed by the system. */
558 		KKASSERT(dnode->tn_links <= LINK_MAX);
559 		if (dnode->tn_links == LINK_MAX) {
560 			return EMLINK;
561 		}
562 
563 		parent = dnode;
564 		KKASSERT(parent != NULL);
565 	} else
566 		parent = NULL;
567 
568 	/* Allocate a node that represents the new file. */
569 	error = tmpfs_alloc_node(tmp, vap->va_type, cred->cr_uid,
570 	    dnode->tn_gid, vap->va_mode, parent, target, vap->va_rmajor, vap->va_rminor, &node);
571 	if (error != 0)
572 		return error;
573 	TMPFS_NODE_LOCK(node);
574 
575 	/* Allocate a directory entry that points to the new file. */
576 	error = tmpfs_alloc_dirent(tmp, node, ncp->nc_name, ncp->nc_nlen, &de);
577 	if (error != 0) {
578 		tmpfs_free_node(tmp, node);
579 		/* eats node lock */
580 		return error;
581 	}
582 
583 	/* Allocate a vnode for the new file. */
584 	error = tmpfs_alloc_vp(dvp->v_mount, node, LK_EXCLUSIVE, vpp);
585 	if (error != 0) {
586 		tmpfs_free_dirent(tmp, de);
587 		tmpfs_free_node(tmp, node);
588 		/* eats node lock */
589 		return error;
590 	}
591 
592 	/* Now that all required items are allocated, we can proceed to
593 	 * insert the new node into the directory, an operation that
594 	 * cannot fail. */
595 	tmpfs_dir_attach(dnode, de);
596 	TMPFS_NODE_UNLOCK(node);
597 
598 	return error;
599 }
600 
601 /* --------------------------------------------------------------------- */
602 
603 /*
604  * Attaches the directory entry de to the directory represented by vp.
605  * Note that this does not change the link count of the node pointed by
606  * the directory entry, as this is done by tmpfs_alloc_dirent.
607  */
608 void
609 tmpfs_dir_attach(struct tmpfs_node *dnode, struct tmpfs_dirent *de)
610 {
611 	TMPFS_NODE_LOCK(dnode);
612 	TAILQ_INSERT_TAIL(&dnode->tn_dir.tn_dirhead, de, td_entries);
613 
614 	TMPFS_ASSERT_ELOCKED(dnode);
615 	dnode->tn_size += sizeof(struct tmpfs_dirent);
616 	dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED |
617 			    TMPFS_NODE_MODIFIED;
618 	TMPFS_NODE_UNLOCK(dnode);
619 }
620 
621 /* --------------------------------------------------------------------- */
622 
623 /*
624  * Detaches the directory entry de from the directory represented by vp.
625  * Note that this does not change the link count of the node pointed by
626  * the directory entry, as this is done by tmpfs_free_dirent.
627  */
628 void
629 tmpfs_dir_detach(struct tmpfs_node *dnode, struct tmpfs_dirent *de)
630 {
631 	TMPFS_NODE_LOCK(dnode);
632 	if (dnode->tn_dir.tn_readdir_lastp == de) {
633 		dnode->tn_dir.tn_readdir_lastn = 0;
634 		dnode->tn_dir.tn_readdir_lastp = NULL;
635 	}
636 	TAILQ_REMOVE(&dnode->tn_dir.tn_dirhead, de, td_entries);
637 
638 	TMPFS_ASSERT_ELOCKED(dnode);
639 	dnode->tn_size -= sizeof(struct tmpfs_dirent);
640 	dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED |
641 			    TMPFS_NODE_MODIFIED;
642 	TMPFS_NODE_UNLOCK(dnode);
643 }
644 
645 /* --------------------------------------------------------------------- */
646 
647 /*
648  * Looks for a directory entry in the directory represented by node.
649  * 'ncp' describes the name of the entry to look for.  Note that the .
650  * and .. components are not allowed as they do not physically exist
651  * within directories.
652  *
653  * Returns a pointer to the entry when found, otherwise NULL.
654  */
655 struct tmpfs_dirent *
656 tmpfs_dir_lookup(struct tmpfs_node *node, struct tmpfs_node *f,
657     struct namecache *ncp)
658 {
659 	struct tmpfs_dirent *de;
660 	int len = ncp->nc_nlen;
661 
662 	TMPFS_VALIDATE_DIR(node);
663 
664 	TAILQ_FOREACH(de, &node->tn_dir.tn_dirhead, td_entries) {
665 		if (f != NULL && de->td_node != f)
666 		    continue;
667 		if (len == de->td_namelen) {
668 			if (!memcmp(ncp->nc_name, de->td_name, len))
669 				break;
670 		}
671 	}
672 
673 	TMPFS_NODE_LOCK(node);
674 	node->tn_status |= TMPFS_NODE_ACCESSED;
675 	TMPFS_NODE_UNLOCK(node);
676 
677 	return de;
678 }
679 
680 /* --------------------------------------------------------------------- */
681 
682 /*
683  * Helper function for tmpfs_readdir.  Creates a '.' entry for the given
684  * directory and returns it in the uio space.  The function returns 0
685  * on success, -1 if there was not enough space in the uio structure to
686  * hold the directory entry or an appropriate error code if another
687  * error happens.
688  */
689 int
690 tmpfs_dir_getdotdent(struct tmpfs_node *node, struct uio *uio)
691 {
692 	int error;
693 	struct dirent dent;
694 	int dirsize;
695 
696 	TMPFS_VALIDATE_DIR(node);
697 	KKASSERT(uio->uio_offset == TMPFS_DIRCOOKIE_DOT);
698 
699 	dent.d_ino = node->tn_id;
700 	dent.d_type = DT_DIR;
701 	dent.d_namlen = 1;
702 	dent.d_name[0] = '.';
703 	dent.d_name[1] = '\0';
704 	dirsize = _DIRENT_DIRSIZ(&dent);
705 
706 	if (dirsize > uio->uio_resid)
707 		error = -1;
708 	else {
709 		error = uiomove((caddr_t)&dent, dirsize, uio);
710 		if (error == 0)
711 			uio->uio_offset = TMPFS_DIRCOOKIE_DOTDOT;
712 	}
713 
714 	TMPFS_NODE_LOCK(node);
715 	node->tn_status |= TMPFS_NODE_ACCESSED;
716 	TMPFS_NODE_UNLOCK(node);
717 
718 	return error;
719 }
720 
721 /* --------------------------------------------------------------------- */
722 
723 /*
724  * Helper function for tmpfs_readdir.  Creates a '..' entry for the given
725  * directory and returns it in the uio space.  The function returns 0
726  * on success, -1 if there was not enough space in the uio structure to
727  * hold the directory entry or an appropriate error code if another
728  * error happens.
729  */
730 int
731 tmpfs_dir_getdotdotdent(struct tmpfs_mount *tmp, struct tmpfs_node *node,
732 			struct uio *uio)
733 {
734 	int error;
735 	struct dirent dent;
736 	int dirsize;
737 
738 	TMPFS_VALIDATE_DIR(node);
739 	KKASSERT(uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT);
740 
741 	if (node->tn_dir.tn_parent) {
742 		TMPFS_NODE_LOCK(node->tn_dir.tn_parent);
743 		dent.d_ino = node->tn_dir.tn_parent->tn_id;
744 		TMPFS_NODE_UNLOCK(node->tn_dir.tn_parent);
745 	} else {
746 		dent.d_ino = tmp->tm_root->tn_id;
747 	}
748 
749 	dent.d_type = DT_DIR;
750 	dent.d_namlen = 2;
751 	dent.d_name[0] = '.';
752 	dent.d_name[1] = '.';
753 	dent.d_name[2] = '\0';
754 	dirsize = _DIRENT_DIRSIZ(&dent);
755 
756 	if (dirsize > uio->uio_resid)
757 		error = -1;
758 	else {
759 		error = uiomove((caddr_t)&dent, dirsize, uio);
760 		if (error == 0) {
761 			struct tmpfs_dirent *de;
762 
763 			de = TAILQ_FIRST(&node->tn_dir.tn_dirhead);
764 			if (de == NULL)
765 				uio->uio_offset = TMPFS_DIRCOOKIE_EOF;
766 			else
767 				uio->uio_offset = tmpfs_dircookie(de);
768 		}
769 	}
770 
771 	TMPFS_NODE_LOCK(node);
772 	node->tn_status |= TMPFS_NODE_ACCESSED;
773 	TMPFS_NODE_UNLOCK(node);
774 
775 	return error;
776 }
777 
778 /* --------------------------------------------------------------------- */
779 
780 /*
781  * Lookup a directory entry by its associated cookie.
782  */
783 struct tmpfs_dirent *
784 tmpfs_dir_lookupbycookie(struct tmpfs_node *node, off_t cookie)
785 {
786 	struct tmpfs_dirent *de;
787 
788 	if (cookie == node->tn_dir.tn_readdir_lastn &&
789 	    node->tn_dir.tn_readdir_lastp != NULL) {
790 		return node->tn_dir.tn_readdir_lastp;
791 	}
792 
793 	TAILQ_FOREACH(de, &node->tn_dir.tn_dirhead, td_entries) {
794 		if (tmpfs_dircookie(de) == cookie) {
795 			break;
796 		}
797 	}
798 
799 	return de;
800 }
801 
802 /* --------------------------------------------------------------------- */
803 
804 /*
805  * Helper function for tmpfs_readdir.  Returns as much directory entries
806  * as can fit in the uio space.  The read starts at uio->uio_offset.
807  * The function returns 0 on success, -1 if there was not enough space
808  * in the uio structure to hold the directory entry or an appropriate
809  * error code if another error happens.
810  */
811 int
812 tmpfs_dir_getdents(struct tmpfs_node *node, struct uio *uio, off_t *cntp)
813 {
814 	int error;
815 	off_t startcookie;
816 	struct tmpfs_dirent *de;
817 
818 	TMPFS_VALIDATE_DIR(node);
819 
820 	/* Locate the first directory entry we have to return.  We have cached
821 	 * the last readdir in the node, so use those values if appropriate.
822 	 * Otherwise do a linear scan to find the requested entry. */
823 	startcookie = uio->uio_offset;
824 	KKASSERT(startcookie != TMPFS_DIRCOOKIE_DOT);
825 	KKASSERT(startcookie != TMPFS_DIRCOOKIE_DOTDOT);
826 	if (startcookie == TMPFS_DIRCOOKIE_EOF) {
827 		return 0;
828 	} else {
829 		de = tmpfs_dir_lookupbycookie(node, startcookie);
830 	}
831 	if (de == NULL) {
832 		return EINVAL;
833 	}
834 
835 	/* Read as much entries as possible; i.e., until we reach the end of
836 	 * the directory or we exhaust uio space. */
837 	do {
838 		struct dirent d;
839 		int reclen;
840 
841 		/* Create a dirent structure representing the current
842 		 * tmpfs_node and fill it. */
843 		d.d_ino = de->td_node->tn_id;
844 		switch (de->td_node->tn_type) {
845 		case VBLK:
846 			d.d_type = DT_BLK;
847 			break;
848 
849 		case VCHR:
850 			d.d_type = DT_CHR;
851 			break;
852 
853 		case VDIR:
854 			d.d_type = DT_DIR;
855 			break;
856 
857 		case VFIFO:
858 			d.d_type = DT_FIFO;
859 			break;
860 
861 		case VLNK:
862 			d.d_type = DT_LNK;
863 			break;
864 
865 		case VREG:
866 			d.d_type = DT_REG;
867 			break;
868 
869 		case VSOCK:
870 			d.d_type = DT_SOCK;
871 			break;
872 
873 		default:
874 			panic("tmpfs_dir_getdents: type %p %d",
875 			    de->td_node, (int)de->td_node->tn_type);
876 		}
877 		d.d_namlen = de->td_namelen;
878 		KKASSERT(de->td_namelen < sizeof(d.d_name));
879 		bcopy(de->td_name, d.d_name, d.d_namlen);
880 		d.d_name[d.d_namlen] = '\0';
881 		reclen = _DIRENT_RECLEN(d.d_namlen);
882 
883 		/* Stop reading if the directory entry we are treating is
884 		 * bigger than the amount of data that can be returned. */
885 		if (reclen > uio->uio_resid) {
886 			error = -1;
887 			break;
888 		}
889 
890 		/* Copy the new dirent structure into the output buffer and
891 		 * advance pointers. */
892 		error = uiomove((caddr_t)&d, reclen, uio);
893 
894 		(*cntp)++;
895 		de = TAILQ_NEXT(de, td_entries);
896 	} while (error == 0 && uio->uio_resid > 0 && de != NULL);
897 
898 	/* Update the offset and cache. */
899 	if (de == NULL) {
900 		uio->uio_offset = TMPFS_DIRCOOKIE_EOF;
901 		node->tn_dir.tn_readdir_lastn = 0;
902 		node->tn_dir.tn_readdir_lastp = NULL;
903 	} else {
904 		node->tn_dir.tn_readdir_lastn = uio->uio_offset = tmpfs_dircookie(de);
905 		node->tn_dir.tn_readdir_lastp = de;
906 	}
907 	node->tn_status |= TMPFS_NODE_ACCESSED;
908 
909 	return error;
910 }
911 
912 /* --------------------------------------------------------------------- */
913 
914 /*
915  * Resizes the aobj associated to the regular file pointed to by vp to
916  * the size newsize.  'vp' must point to a vnode that represents a regular
917  * file.  'newsize' must be positive.
918  *
919  * pass trivial as 1 when buf content will be overwritten, otherwise set 0
920  * to be zero filled.
921  *
922  * Returns zero on success or an appropriate error code on failure.
923  */
924 int
925 tmpfs_reg_resize(struct vnode *vp, off_t newsize, int trivial)
926 {
927 	int error;
928 	vm_pindex_t newpages, oldpages;
929 	struct tmpfs_mount *tmp;
930 	struct tmpfs_node *node;
931 	off_t oldsize;
932 
933 #ifdef INVARIANTS
934 	KKASSERT(vp->v_type == VREG);
935 	KKASSERT(newsize >= 0);
936 #endif
937 
938 	node = VP_TO_TMPFS_NODE(vp);
939 	tmp = VFS_TO_TMPFS(vp->v_mount);
940 
941 	/* Convert the old and new sizes to the number of pages needed to
942 	 * store them.  It may happen that we do not need to do anything
943 	 * because the last allocated page can accommodate the change on
944 	 * its own. */
945 	oldsize = node->tn_size;
946 	oldpages = round_page64(oldsize) / PAGE_SIZE;
947 	KKASSERT(oldpages == node->tn_reg.tn_aobj_pages);
948 	newpages = round_page64(newsize) / PAGE_SIZE;
949 
950 	if (newpages > oldpages &&
951 	   tmp->tm_pages_used + newpages - oldpages > tmp->tm_pages_max) {
952 		error = ENOSPC;
953 		goto out;
954 	}
955 
956 	TMPFS_LOCK(tmp);
957 	tmp->tm_pages_used += (newpages - oldpages);
958 	TMPFS_UNLOCK(tmp);
959 
960 	TMPFS_NODE_LOCK(node);
961 	node->tn_reg.tn_aobj_pages = newpages;
962 	node->tn_size = newsize;
963 	TMPFS_NODE_UNLOCK(node);
964 
965 	/*
966 	 * When adjusting the vnode filesize and its VM object we must
967 	 * also adjust our backing VM object (aobj).  The blocksize
968 	 * used must match the block sized we use for the buffer cache.
969 	 *
970 	 * The backing VM object contains no VM pages, only swap
971 	 * assignments.
972 	 */
973 	if (newsize < oldsize) {
974 		vm_pindex_t osize;
975 		vm_pindex_t nsize;
976 		vm_object_t aobj;
977 
978 		error = nvtruncbuf(vp, newsize, BSIZE, -1, 0);
979 		aobj = node->tn_reg.tn_aobj;
980 		if (aobj) {
981 			osize = aobj->size;
982 			nsize = vp->v_object->size;
983 			if (nsize < osize) {
984 				aobj->size = osize;
985 				swap_pager_freespace(aobj, nsize,
986 						     osize - nsize);
987 			}
988 		}
989 	} else {
990 		vm_object_t aobj;
991 
992 		error = nvextendbuf(vp, oldsize, newsize, BSIZE, BSIZE,
993 				    -1, -1, trivial);
994 		aobj = node->tn_reg.tn_aobj;
995 		if (aobj)
996 			aobj->size = vp->v_object->size;
997 	}
998 
999 out:
1000 	return error;
1001 }
1002 
1003 /* --------------------------------------------------------------------- */
1004 
1005 /*
1006  * Change flags of the given vnode.
1007  * Caller should execute tmpfs_update on vp after a successful execution.
1008  * The vnode must be locked on entry and remain locked on exit.
1009  */
1010 int
1011 tmpfs_chflags(struct vnode *vp, int vaflags, struct ucred *cred)
1012 {
1013 	int error;
1014 	struct tmpfs_node *node;
1015 	int flags;
1016 
1017 	KKASSERT(vn_islocked(vp));
1018 
1019 	node = VP_TO_TMPFS_NODE(vp);
1020 	flags = node->tn_flags;
1021 
1022 	/* Disallow this operation if the file system is mounted read-only. */
1023 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1024 		return EROFS;
1025 	error = vop_helper_setattr_flags(&flags, vaflags, node->tn_uid, cred);
1026 
1027 	/*
1028 	 * Unprivileged processes are not permitted to unset system
1029 	 * flags, or modify flags if any system flags are set.
1030 	 *
1031 	 * Silently enforce SF_NOCACHE on the root tmpfs vnode so
1032 	 * tmpfs data is not double-cached by swapcache.
1033 	 */
1034 	if (error == 0) {
1035 		TMPFS_NODE_LOCK(node);
1036 		if (!priv_check_cred(cred, PRIV_VFS_SYSFLAGS, 0)) {
1037 			if (vp->v_flag & VROOT)
1038 				flags |= SF_NOCACHE;
1039 			node->tn_flags = flags;
1040 		} else {
1041 			if (node->tn_flags & (SF_NOUNLINK | SF_IMMUTABLE |
1042 					      SF_APPEND) ||
1043 			    (flags & UF_SETTABLE) != flags) {
1044 				error = EPERM;
1045 			} else {
1046 				node->tn_flags &= SF_SETTABLE;
1047 				node->tn_flags |= (flags & UF_SETTABLE);
1048 			}
1049 		}
1050 		node->tn_status |= TMPFS_NODE_CHANGED;
1051 		TMPFS_NODE_UNLOCK(node);
1052 	}
1053 
1054 	KKASSERT(vn_islocked(vp));
1055 
1056 	return error;
1057 }
1058 
1059 /* --------------------------------------------------------------------- */
1060 
1061 /*
1062  * Change access mode on the given vnode.
1063  * Caller should execute tmpfs_update on vp after a successful execution.
1064  * The vnode must be locked on entry and remain locked on exit.
1065  */
1066 int
1067 tmpfs_chmod(struct vnode *vp, mode_t vamode, struct ucred *cred)
1068 {
1069 	struct tmpfs_node *node;
1070 	mode_t cur_mode;
1071 	int error;
1072 
1073 	KKASSERT(vn_islocked(vp));
1074 
1075 	node = VP_TO_TMPFS_NODE(vp);
1076 
1077 	/* Disallow this operation if the file system is mounted read-only. */
1078 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1079 		return EROFS;
1080 
1081 	/* Immutable or append-only files cannot be modified, either. */
1082 	if (node->tn_flags & (IMMUTABLE | APPEND))
1083 		return EPERM;
1084 
1085 	cur_mode = node->tn_mode;
1086 	error = vop_helper_chmod(vp, vamode, cred, node->tn_uid, node->tn_gid,
1087 				 &cur_mode);
1088 
1089 	if (error == 0 &&
1090 	    (node->tn_mode & ALLPERMS) != (cur_mode & ALLPERMS)) {
1091 		TMPFS_NODE_LOCK(node);
1092 		node->tn_mode &= ~ALLPERMS;
1093 		node->tn_mode |= cur_mode & ALLPERMS;
1094 
1095 		node->tn_status |= TMPFS_NODE_CHANGED;
1096 		TMPFS_NODE_UNLOCK(node);
1097 	}
1098 
1099 	KKASSERT(vn_islocked(vp));
1100 
1101 	return 0;
1102 }
1103 
1104 /* --------------------------------------------------------------------- */
1105 
1106 /*
1107  * Change ownership of the given vnode.  At least one of uid or gid must
1108  * be different than VNOVAL.  If one is set to that value, the attribute
1109  * is unchanged.
1110  * Caller should execute tmpfs_update on vp after a successful execution.
1111  * The vnode must be locked on entry and remain locked on exit.
1112  */
1113 int
1114 tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred)
1115 {
1116 	mode_t cur_mode;
1117 	uid_t cur_uid;
1118 	gid_t cur_gid;
1119 	struct tmpfs_node *node;
1120 	int error;
1121 
1122 	KKASSERT(vn_islocked(vp));
1123 	node = VP_TO_TMPFS_NODE(vp);
1124 
1125 	/* Disallow this operation if the file system is mounted read-only. */
1126 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1127 		return EROFS;
1128 
1129 	/* Immutable or append-only files cannot be modified, either. */
1130 	if (node->tn_flags & (IMMUTABLE | APPEND))
1131 		return EPERM;
1132 
1133 	cur_uid = node->tn_uid;
1134 	cur_gid = node->tn_gid;
1135 	cur_mode = node->tn_mode;
1136 	error = vop_helper_chown(vp, uid, gid, cred,
1137 				 &cur_uid, &cur_gid, &cur_mode);
1138 
1139 	if (error == 0) {
1140 		TMPFS_NODE_LOCK(node);
1141 		if (cur_uid != node->tn_uid ||
1142 		    cur_gid != node->tn_gid ||
1143 		    cur_mode != node->tn_mode) {
1144 			node->tn_uid = cur_uid;
1145 			node->tn_gid = cur_gid;
1146 			node->tn_mode = cur_mode;
1147 			node->tn_status |= TMPFS_NODE_CHANGED;
1148 		}
1149 		TMPFS_NODE_UNLOCK(node);
1150 	}
1151 
1152 	return error;
1153 }
1154 
1155 /* --------------------------------------------------------------------- */
1156 
1157 /*
1158  * Change size of the given vnode.
1159  * Caller should execute tmpfs_update on vp after a successful execution.
1160  * The vnode must be locked on entry and remain locked on exit.
1161  */
1162 int
1163 tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred)
1164 {
1165 	int error;
1166 	struct tmpfs_node *node;
1167 
1168 	KKASSERT(vn_islocked(vp));
1169 
1170 	node = VP_TO_TMPFS_NODE(vp);
1171 
1172 	/* Decide whether this is a valid operation based on the file type. */
1173 	error = 0;
1174 	switch (vp->v_type) {
1175 	case VDIR:
1176 		return EISDIR;
1177 
1178 	case VREG:
1179 		if (vp->v_mount->mnt_flag & MNT_RDONLY)
1180 			return EROFS;
1181 		break;
1182 
1183 	case VBLK:
1184 		/* FALLTHROUGH */
1185 	case VCHR:
1186 		/* FALLTHROUGH */
1187 	case VFIFO:
1188 		/* Allow modifications of special files even if in the file
1189 		 * system is mounted read-only (we are not modifying the
1190 		 * files themselves, but the objects they represent). */
1191 		return 0;
1192 
1193 	default:
1194 		/* Anything else is unsupported. */
1195 		return EOPNOTSUPP;
1196 	}
1197 
1198 	/* Immutable or append-only files cannot be modified, either. */
1199 	if (node->tn_flags & (IMMUTABLE | APPEND))
1200 		return EPERM;
1201 
1202 	error = tmpfs_truncate(vp, size);
1203 	/* tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents
1204 	 * for us, as will update tn_status; no need to do that here. */
1205 
1206 	KKASSERT(vn_islocked(vp));
1207 
1208 	return error;
1209 }
1210 
1211 /* --------------------------------------------------------------------- */
1212 
1213 /*
1214  * Change access and modification times of the given vnode.
1215  * Caller should execute tmpfs_update on vp after a successful execution.
1216  * The vnode must be locked on entry and remain locked on exit.
1217  */
1218 int
1219 tmpfs_chtimes(struct vnode *vp, struct timespec *atime, struct timespec *mtime,
1220 	int vaflags, struct ucred *cred)
1221 {
1222 	struct tmpfs_node *node;
1223 
1224 	KKASSERT(vn_islocked(vp));
1225 
1226 	node = VP_TO_TMPFS_NODE(vp);
1227 
1228 	/* Disallow this operation if the file system is mounted read-only. */
1229 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1230 		return EROFS;
1231 
1232 	/* Immutable or append-only files cannot be modified, either. */
1233 	if (node->tn_flags & (IMMUTABLE | APPEND))
1234 		return EPERM;
1235 
1236 	TMPFS_NODE_LOCK(node);
1237 	if (atime->tv_sec != VNOVAL && atime->tv_nsec != VNOVAL)
1238 		node->tn_status |= TMPFS_NODE_ACCESSED;
1239 
1240 	if (mtime->tv_sec != VNOVAL && mtime->tv_nsec != VNOVAL)
1241 		node->tn_status |= TMPFS_NODE_MODIFIED;
1242 
1243 	TMPFS_NODE_UNLOCK(node);
1244 
1245 	tmpfs_itimes(vp, atime, mtime);
1246 
1247 	KKASSERT(vn_islocked(vp));
1248 
1249 	return 0;
1250 }
1251 
1252 /* --------------------------------------------------------------------- */
1253 /* Sync timestamps */
1254 void
1255 tmpfs_itimes(struct vnode *vp, const struct timespec *acc,
1256     const struct timespec *mod)
1257 {
1258 	struct tmpfs_node *node;
1259 	struct timespec now;
1260 
1261 	node = VP_TO_TMPFS_NODE(vp);
1262 
1263 	if ((node->tn_status & (TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED |
1264 	    TMPFS_NODE_CHANGED)) == 0)
1265 		return;
1266 
1267 	vfs_timestamp(&now);
1268 
1269 	TMPFS_NODE_LOCK(node);
1270 	if (node->tn_status & TMPFS_NODE_ACCESSED) {
1271 		if (acc == NULL)
1272 			 acc = &now;
1273 		node->tn_atime = acc->tv_sec;
1274 		node->tn_atimensec = acc->tv_nsec;
1275 	}
1276 	if (node->tn_status & TMPFS_NODE_MODIFIED) {
1277 		if (mod == NULL)
1278 			mod = &now;
1279 		node->tn_mtime = mod->tv_sec;
1280 		node->tn_mtimensec = mod->tv_nsec;
1281 	}
1282 	if (node->tn_status & TMPFS_NODE_CHANGED) {
1283 		node->tn_ctime = now.tv_sec;
1284 		node->tn_ctimensec = now.tv_nsec;
1285 	}
1286 	node->tn_status &=
1287 	    ~(TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED);
1288 	TMPFS_NODE_UNLOCK(node);
1289 }
1290 
1291 /* --------------------------------------------------------------------- */
1292 
1293 void
1294 tmpfs_update(struct vnode *vp)
1295 {
1296 
1297 	tmpfs_itimes(vp, NULL, NULL);
1298 }
1299 
1300 /* --------------------------------------------------------------------- */
1301 
1302 int
1303 tmpfs_truncate(struct vnode *vp, off_t length)
1304 {
1305 	int error;
1306 	struct tmpfs_node *node;
1307 
1308 	node = VP_TO_TMPFS_NODE(vp);
1309 
1310 	if (length < 0) {
1311 		error = EINVAL;
1312 		goto out;
1313 	}
1314 
1315 	if (node->tn_size == length) {
1316 		error = 0;
1317 		goto out;
1318 	}
1319 
1320 	if (length > VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize)
1321 		return (EFBIG);
1322 
1323 
1324 	error = tmpfs_reg_resize(vp, length, 1);
1325 
1326 	if (error == 0) {
1327 		TMPFS_NODE_LOCK(node);
1328 		node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1329 		TMPFS_NODE_UNLOCK(node);
1330 	}
1331 
1332 out:
1333 	tmpfs_update(vp);
1334 
1335 	return error;
1336 }
1337 
1338 /* --------------------------------------------------------------------- */
1339 
1340 static ino_t
1341 tmpfs_fetch_ino(struct tmpfs_mount *tmp)
1342 {
1343 	ino_t ret;
1344 
1345 	ret = tmp->tm_ino++;
1346 
1347 	return (ret);
1348 }
1349