xref: /openbsd/sys/tmpfs/tmpfs_subr.c (revision af1a0040)
1 /*	$OpenBSD: tmpfs_subr.c,v 1.26 2022/11/15 17:16:44 mvs Exp $	*/
2 /*	$NetBSD: tmpfs_subr.c,v 1.79 2012/03/13 18:40:50 elad Exp $	*/
3 
4 /*
5  * Copyright (c) 2005-2011 The NetBSD Foundation, Inc.
6  * Copyright (c) 2013 Pedro Martelletto
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
11  * 2005 program, and by Mindaugas Rasiukevicius.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * Efficient memory file system: interfaces for inode and directory entry
37  * construction, destruction and manipulation.
38  *
39  * Reference counting
40  *
41  *	The link count of inode (tmpfs_node_t::tn_links) is used as a
42  *	reference counter.  However, it has slightly different semantics.
43  *
44  *	For directories - link count represents directory entries, which
45  *	refer to the directories.  In other words, it represents the count
46  *	of sub-directories.  It also takes into account the virtual '.'
47  *	entry (which has no real entry in the list).  For files - link count
48  *	represents the hard links.  Since only empty directories can be
49  *	removed - link count aligns the reference counting requirements
50  *	enough.  Note: to check whether directory is not empty, the inode
51  *	size (tmpfs_node_t::tn_size) can be used.
52  *
53  *	The inode itself, as an object, gathers its first reference when
54  *	directory entry is attached via tmpfs_dir_attach(9).  For instance,
55  *	after regular tmpfs_create(), a file would have a link count of 1,
56  *	while directory after tmpfs_mkdir() would have 2 (due to '.').
57  *
58  * Reclamation
59  *
60  *	It should be noted that tmpfs inodes rely on a combination of vnode
61  *	reference counting and link counting.  That is, an inode can only be
62  *	destroyed if its associated vnode is inactive.  The destruction is
63  *	done on vnode reclamation i.e. tmpfs_reclaim().  It should be noted
64  *	that tmpfs_node_t::tn_links being 0 is a destruction criterion.
65  *
66  *	If an inode has references within the file system (tn_links > 0) and
67  *	its inactive vnode gets reclaimed/recycled - then the association is
68  *	broken in tmpfs_reclaim().  In such case, an inode will always pass
69  *	tmpfs_lookup() and thus tmpfs_vnode_get() to associate a new vnode.
70  *
71  * Lock order
72  *
73  *	tmpfs_node_t::tn_nlock ->
74  *		struct vnode::v_vlock ->
75  *			struct vnode::v_interlock
76  */
77 
78 #include <sys/param.h>
79 #include <sys/dirent.h>
80 #include <sys/event.h>
81 #include <sys/mount.h>
82 #include <sys/namei.h>
83 #include <sys/time.h>
84 #include <sys/proc.h>
85 #include <sys/stat.h>
86 #include <sys/systm.h>
87 #include <sys/vnode.h>
88 
89 #include <uvm/uvm_aobj.h>
90 
91 #include <tmpfs/tmpfs.h>
92 #include <tmpfs/tmpfs_vnops.h>
93 
94 
95 /* Local functions. */
96 void	tmpfs_dir_putseq(tmpfs_node_t *, tmpfs_dirent_t *);
97 int	tmpfs_dir_getdotents(tmpfs_node_t *, struct dirent *, struct uio *);
98 
99 /*
100  * tmpfs_alloc_node: allocate a new inode of a specified type and
101  * insert it into the list of specified mount point.
102  */
103 int
tmpfs_alloc_node(tmpfs_mount_t * tmp,enum vtype type,uid_t uid,gid_t gid,mode_t mode,char * target,dev_t rdev,tmpfs_node_t ** node)104 tmpfs_alloc_node(tmpfs_mount_t *tmp, enum vtype type, uid_t uid, gid_t gid,
105     mode_t mode, char *target, dev_t rdev, tmpfs_node_t **node)
106 {
107 	tmpfs_node_t *nnode;
108 	struct uvm_object *uobj;
109 
110 	nnode = tmpfs_node_get(tmp);
111 	if (nnode == NULL) {
112 		return ENOSPC;
113 	}
114 
115 	/* Initially, no references and no associations. */
116 	nnode->tn_links = 0;
117 	nnode->tn_vnode = NULL;
118 	nnode->tn_dirent_hint = NULL;
119 
120 	rw_enter_write(&tmp->tm_acc_lock);
121 	nnode->tn_id = ++tmp->tm_highest_inode;
122 	if (nnode->tn_id == 0) {
123 		--tmp->tm_highest_inode;
124 		rw_exit_write(&tmp->tm_acc_lock);
125 		tmpfs_node_put(tmp, nnode);
126 		return ENOSPC;
127 	}
128 	 rw_exit_write(&tmp->tm_acc_lock);
129 
130 	/* Generic initialization. */
131 	nnode->tn_type = type;
132 	nnode->tn_size = 0;
133 	nnode->tn_flags = 0;
134 	nnode->tn_lockf = NULL;
135 	nnode->tn_gen = TMPFS_NODE_GEN_MASK & arc4random();
136 
137 	nanotime(&nnode->tn_atime);
138 	nnode->tn_birthtime = nnode->tn_atime;
139 	nnode->tn_ctime = nnode->tn_atime;
140 	nnode->tn_mtime = nnode->tn_atime;
141 
142 	KASSERT(uid != VNOVAL && gid != VNOVAL && mode != VNOVAL);
143 
144 	nnode->tn_uid = uid;
145 	nnode->tn_gid = gid;
146 	nnode->tn_mode = mode;
147 
148 	/* Type-specific initialization. */
149 	switch (nnode->tn_type) {
150 	case VBLK:
151 	case VCHR:
152 		/* Character/block special device. */
153 		KASSERT(rdev != VNOVAL);
154 		nnode->tn_spec.tn_dev.tn_rdev = rdev;
155 		break;
156 	case VDIR:
157 		/* Directory. */
158 		TAILQ_INIT(&nnode->tn_spec.tn_dir.tn_dir);
159 		nnode->tn_spec.tn_dir.tn_parent = NULL;
160 		nnode->tn_spec.tn_dir.tn_next_seq = TMPFS_DIRSEQ_START;
161 		nnode->tn_spec.tn_dir.tn_readdir_lastp = NULL;
162 
163 		/* Extra link count for the virtual '.' entry. */
164 		nnode->tn_links++;
165 		break;
166 	case VFIFO:
167 	case VSOCK:
168 		break;
169 	case VLNK:
170 		/* Symbolic link.  Target specifies the file name. */
171 		KASSERT(target && strlen(target) < MAXPATHLEN);
172 
173 		nnode->tn_size = strlen(target);
174 		if (nnode->tn_size == 0) {
175 			nnode->tn_spec.tn_lnk.tn_link = NULL;
176 			break;
177 		}
178 		nnode->tn_spec.tn_lnk.tn_link =
179 		    tmpfs_strname_alloc(tmp, nnode->tn_size);
180 		if (nnode->tn_spec.tn_lnk.tn_link == NULL) {
181 			tmpfs_node_put(tmp, nnode);
182 			return ENOSPC;
183 		}
184 		memcpy(nnode->tn_spec.tn_lnk.tn_link, target, nnode->tn_size);
185 		break;
186 	case VREG:
187 		/* Regular file.  Create an underlying UVM object. */
188 		uobj = uao_create(0, UAO_FLAG_CANFAIL);
189 		if (uobj == NULL) {
190 			tmpfs_node_put(tmp, nnode);
191 			return ENOSPC;
192 		}
193 		nnode->tn_spec.tn_reg.tn_aobj = uobj;
194 		nnode->tn_spec.tn_reg.tn_aobj_pages = 0;
195 		nnode->tn_spec.tn_reg.tn_aobj_pgptr = (vaddr_t)NULL;
196 		nnode->tn_spec.tn_reg.tn_aobj_pgnum = (voff_t)-1;
197 		break;
198 	default:
199 		KASSERT(0);
200 	}
201 
202 	rw_init(&nnode->tn_nlock, "tvlk");
203 
204 	rw_enter_write(&tmp->tm_lock);
205 	LIST_INSERT_HEAD(&tmp->tm_nodes, nnode, tn_entries);
206 	rw_exit_write(&tmp->tm_lock);
207 
208 	*node = nnode;
209 	return 0;
210 }
211 
212 /*
213  * tmpfs_free_node: remove the inode from a list in the mount point and
214  * destroy the inode structures.
215  */
216 void
tmpfs_free_node(tmpfs_mount_t * tmp,tmpfs_node_t * node)217 tmpfs_free_node(tmpfs_mount_t *tmp, tmpfs_node_t *node)
218 {
219 	size_t objsz;
220 
221 	rw_enter_write(&tmp->tm_lock);
222 	LIST_REMOVE(node, tn_entries);
223 	rw_exit_write(&tmp->tm_lock);
224 
225 	switch (node->tn_type) {
226 	case VLNK:
227 		if (node->tn_size > 0) {
228 			KASSERT(node->tn_size <= SIZE_MAX);
229 			tmpfs_strname_free(tmp, node->tn_spec.tn_lnk.tn_link,
230 			    node->tn_size);
231 		}
232 		break;
233 	case VREG:
234 		/*
235 		 * Calculate the size of inode data, decrease the used-memory
236 		 * counter, and destroy the underlying UVM object (if any).
237 		 */
238 		objsz = PAGE_SIZE * node->tn_spec.tn_reg.tn_aobj_pages;
239 		if (objsz != 0) {
240 			tmpfs_mem_decr(tmp, objsz);
241 		}
242 		if (node->tn_spec.tn_reg.tn_aobj != NULL) {
243 			uao_detach(node->tn_spec.tn_reg.tn_aobj);
244 			node->tn_spec.tn_reg.tn_aobj = NULL;
245 		}
246 		break;
247 	case VDIR:
248 		KASSERT(TAILQ_EMPTY(&node->tn_spec.tn_dir.tn_dir));
249 		KASSERT(node->tn_spec.tn_dir.tn_parent == NULL ||
250 		    node == tmp->tm_root);
251 		break;
252 	default:
253 		break;
254 	}
255 
256 	rw_enter_write(&tmp->tm_acc_lock);
257 	if (node->tn_id == tmp->tm_highest_inode)
258 		--tmp->tm_highest_inode;
259 	rw_exit_write(&tmp->tm_acc_lock);
260 
261 	/* mutex_destroy(&node->tn_nlock); */
262 	tmpfs_node_put(tmp, node);
263 }
264 
265 /*
266  * tmpfs_vnode_get: allocate or reclaim a vnode for a specified inode.
267  *
268  * => Must be called with tmpfs_node_t::tn_nlock held.
269  * => Returns vnode (*vpp) locked.
270  */
271 int
tmpfs_vnode_get(struct mount * mp,tmpfs_node_t * node,struct vnode ** vpp)272 tmpfs_vnode_get(struct mount *mp, tmpfs_node_t *node, struct vnode **vpp)
273 {
274 	struct vnode *vp, *nvp;
275 	/* kmutex_t *slock; */
276 	int error;
277 again:
278 	/* If there is already a vnode, try to reclaim it. */
279 	if ((vp = node->tn_vnode) != NULL) {
280 		/* atomic_or_ulong(&node->tn_gen, TMPFS_RECLAIMING_BIT); */
281 		node->tn_gen |= TMPFS_RECLAIMING_BIT;
282 		rw_exit_write(&node->tn_nlock);
283 		error = vget(vp, LK_EXCLUSIVE);
284 		if (error == ENOENT) {
285 			rw_enter_write(&node->tn_nlock);
286 			goto again;
287 		}
288 		/* atomic_and_ulong(&node->tn_gen, ~TMPFS_RECLAIMING_BIT); */
289 		node->tn_gen &= ~TMPFS_RECLAIMING_BIT;
290 		*vpp = vp;
291 		return error;
292 	}
293 	if (TMPFS_NODE_RECLAIMING(node)) {
294 		/* atomic_and_ulong(&node->tn_gen, ~TMPFS_RECLAIMING_BIT); */
295 		node->tn_gen &= ~TMPFS_RECLAIMING_BIT;
296 	}
297 
298 	/*
299 	 * Get a new vnode and associate it with our inode.  Share the
300 	 * lock with underlying UVM object, if there is one (VREG case).
301 	 */
302 #if 0
303 	if (node->tn_type == VREG) {
304 		struct uvm_object *uobj = node->tn_spec.tn_reg.tn_aobj;
305 		slock = uobj->vmobjlock;
306 	} else {
307 		slock = NULL;
308 	}
309 #endif
310 	error = getnewvnode(VT_TMPFS, mp, &tmpfs_vops, &vp);
311 	if (error) {
312 		rw_exit_write(&node->tn_nlock);
313 		return error;
314 	}
315 
316 	rrw_init_flags(&node->tn_vlock, "tnode", RWL_DUPOK | RWL_IS_VNODE);
317 	vp->v_type = node->tn_type;
318 
319 	/* Type-specific initialization. */
320 	switch (node->tn_type) {
321 	case VBLK:
322 	case VCHR:
323 		vp->v_op = &tmpfs_specvops;
324 		if ((nvp = checkalias(vp, node->tn_spec.tn_dev.tn_rdev, mp))) {
325 			nvp->v_data = vp->v_data;
326 			vp->v_data = NULL;
327 			vp->v_op = &spec_vops;
328 			vrele(vp);
329 			vgone(vp);
330 			vp = nvp;
331 			node->tn_vnode = vp;
332 		}
333 		break;
334 	case VDIR:
335 		vp->v_flag |= node->tn_spec.tn_dir.tn_parent == node ?
336 		    VROOT : 0;
337 		break;
338 #ifdef FIFO
339 	case VFIFO:
340 		vp->v_op = &tmpfs_fifovops;
341 		break;
342 #endif
343 	case VLNK:
344 	case VREG:
345 	case VSOCK:
346 		break;
347 	default:
348 		KASSERT(0);
349 	}
350 
351 	uvm_vnp_setsize(vp, node->tn_size);
352 	vp->v_data = node;
353 	node->tn_vnode = vp;
354 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
355 	rw_exit_write(&node->tn_nlock);
356 
357 	KASSERT(VOP_ISLOCKED(vp));
358 	*vpp = vp;
359 	return 0;
360 }
361 
362 /*
363  * tmpfs_alloc_file: allocate a new file of specified type and adds it
364  * into the parent directory.
365  *
366  * => Credentials of the caller are used.
367  */
368 int
tmpfs_alloc_file(struct vnode * dvp,struct vnode ** vpp,struct vattr * vap,struct componentname * cnp,char * target)369 tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
370     struct componentname *cnp, char *target)
371 {
372 	tmpfs_mount_t *tmp = VFS_TO_TMPFS(dvp->v_mount);
373 	tmpfs_node_t *dnode = VP_TO_TMPFS_DIR(dvp), *node;
374 	tmpfs_dirent_t *de;
375 	int error;
376 
377 	KASSERT(VOP_ISLOCKED(dvp));
378 	*vpp = NULL;
379 
380 	/* Check for the maximum number of links limit. */
381 	if (vap->va_type == VDIR) {
382 		/* Check for maximum links limit. */
383 		if (dnode->tn_links == LINK_MAX) {
384 			error = EMLINK;
385 			goto out;
386 		}
387 		KASSERT(dnode->tn_links < LINK_MAX);
388 	}
389 
390 	if (TMPFS_DIRSEQ_FULL(dnode)) {
391 		error = ENOSPC;
392 		goto out;
393 	}
394 
395 	if (dnode->tn_links == 0) {
396 		error = ENOENT;
397 		goto out;
398 	}
399 
400 	/* Allocate a node that represents the new file. */
401 	error = tmpfs_alloc_node(tmp, vap->va_type, cnp->cn_cred->cr_uid,
402 	    dnode->tn_gid, vap->va_mode, target, vap->va_rdev, &node);
403 	if (error)
404 		goto out;
405 
406 	/* Allocate a directory entry that points to the new file. */
407 	error = tmpfs_alloc_dirent(tmp, cnp->cn_nameptr, cnp->cn_namelen, &de);
408 	if (error) {
409 		tmpfs_free_node(tmp, node);
410 		goto out;
411 	}
412 
413 	/* Get a vnode for the new file. */
414 	rw_enter_write(&node->tn_nlock);
415 	error = tmpfs_vnode_get(dvp->v_mount, node, vpp);
416 	if (error) {
417 		tmpfs_free_dirent(tmp, de);
418 		tmpfs_free_node(tmp, node);
419 		goto out;
420 	}
421 
422 	/* Associate inode and attach the entry into the directory. */
423 	tmpfs_dir_attach(dnode, de, node);
424 
425 out:
426 	if (error == 0 && (cnp->cn_flags & SAVESTART) == 0)
427 		pool_put(&namei_pool, cnp->cn_pnbuf);
428 	return error;
429 }
430 
431 /*
432  * tmpfs_alloc_dirent: allocates a new directory entry for the inode.
433  * The directory entry contains a path name component.
434  */
435 int
tmpfs_alloc_dirent(tmpfs_mount_t * tmp,const char * name,uint16_t len,tmpfs_dirent_t ** de)436 tmpfs_alloc_dirent(tmpfs_mount_t *tmp, const char *name, uint16_t len,
437     tmpfs_dirent_t **de)
438 {
439 	tmpfs_dirent_t *nde;
440 
441 	nde = tmpfs_dirent_get(tmp);
442 	if (nde == NULL)
443 		return ENOSPC;
444 
445 	nde->td_name = tmpfs_strname_alloc(tmp, len);
446 	if (nde->td_name == NULL) {
447 		tmpfs_dirent_put(tmp, nde);
448 		return ENOSPC;
449 	}
450 	nde->td_namelen = len;
451 	memcpy(nde->td_name, name, len);
452 	nde->td_seq = TMPFS_DIRSEQ_NONE;
453 
454 	*de = nde;
455 	return 0;
456 }
457 
458 /*
459  * tmpfs_free_dirent: free a directory entry.
460  */
461 void
tmpfs_free_dirent(tmpfs_mount_t * tmp,tmpfs_dirent_t * de)462 tmpfs_free_dirent(tmpfs_mount_t *tmp, tmpfs_dirent_t *de)
463 {
464 	KASSERT(de->td_node == NULL);
465 	KASSERT(de->td_seq == TMPFS_DIRSEQ_NONE);
466 	tmpfs_strname_free(tmp, de->td_name, de->td_namelen);
467 	tmpfs_dirent_put(tmp, de);
468 }
469 
470 /*
471  * tmpfs_dir_attach: associate directory entry with a specified inode,
472  * and attach the entry into the directory, specified by vnode.
473  *
474  * => Increases link count on the associated node.
475  * => Increases link count on directory node, if our node is VDIR.
476  *    It is caller's responsibility to check for the LINK_MAX limit.
477  * => Triggers kqueue events here.
478  */
479 void
tmpfs_dir_attach(tmpfs_node_t * dnode,tmpfs_dirent_t * de,tmpfs_node_t * node)480 tmpfs_dir_attach(tmpfs_node_t *dnode, tmpfs_dirent_t *de, tmpfs_node_t *node)
481 {
482 	struct vnode *dvp = dnode->tn_vnode;
483 	int events = NOTE_WRITE;
484 
485 	KASSERT(dvp != NULL);
486 	KASSERT(VOP_ISLOCKED(dvp));
487 
488 	/* Get a new sequence number. */
489 	KASSERT(de->td_seq == TMPFS_DIRSEQ_NONE);
490 	de->td_seq = tmpfs_dir_getseq(dnode, de);
491 
492 	/* Associate directory entry and the inode. */
493 	de->td_node = node;
494 	KASSERT(node->tn_links < LINK_MAX);
495 	node->tn_links++;
496 
497 	/* Save the hint (might overwrite). */
498 	node->tn_dirent_hint = de;
499 
500 	/* Insert the entry to the directory (parent of inode). */
501 	TAILQ_INSERT_TAIL(&dnode->tn_spec.tn_dir.tn_dir, de, td_entries);
502 	dnode->tn_size += sizeof(tmpfs_dirent_t);
503 	tmpfs_update(dnode, TMPFS_NODE_STATUSALL);
504 	uvm_vnp_setsize(dvp, dnode->tn_size);
505 
506 	if (node->tn_type == VDIR) {
507 		/* Set parent. */
508 		KASSERT(node->tn_spec.tn_dir.tn_parent == NULL);
509 		node->tn_spec.tn_dir.tn_parent = dnode;
510 
511 		/* Increase the link count of parent. */
512 		KASSERT(dnode->tn_links < LINK_MAX);
513 		dnode->tn_links++;
514 		events |= NOTE_LINK;
515 
516 		TMPFS_VALIDATE_DIR(node);
517 	}
518 	VN_KNOTE(dvp, events);
519 }
520 
521 /*
522  * tmpfs_dir_detach: disassociate directory entry and its inode,
523  * and detach the entry from the directory, specified by vnode.
524  *
525  * => Decreases link count on the associated node.
526  * => Decreases the link count on directory node, if our node is VDIR.
527  * => Triggers kqueue events here.
528  */
529 void
tmpfs_dir_detach(tmpfs_node_t * dnode,tmpfs_dirent_t * de)530 tmpfs_dir_detach(tmpfs_node_t *dnode, tmpfs_dirent_t *de)
531 {
532 	tmpfs_node_t *node = de->td_node;
533 	struct vnode *vp, *dvp = dnode->tn_vnode;
534 	int events = NOTE_WRITE;
535 
536 	KASSERT(dvp == NULL || VOP_ISLOCKED(dvp));
537 
538 	/* Deassociate the inode and entry. */
539 	de->td_node = NULL;
540 	node->tn_dirent_hint = NULL;
541 
542 	KASSERT(node->tn_links > 0);
543 	node->tn_links--;
544 	if ((vp = node->tn_vnode) != NULL) {
545 		KASSERT(VOP_ISLOCKED(vp));
546 		VN_KNOTE(vp, node->tn_links ?  NOTE_LINK : NOTE_DELETE);
547 	}
548 
549 	/* If directory - decrease the link count of parent. */
550 	if (node->tn_type == VDIR) {
551 		KASSERT(node->tn_spec.tn_dir.tn_parent == dnode);
552 		node->tn_spec.tn_dir.tn_parent = NULL;
553 
554 		KASSERT(dnode->tn_links > 0);
555 		dnode->tn_links--;
556 		events |= NOTE_LINK;
557 	}
558 
559 	/* Remove the entry from the directory. */
560 	if (dnode->tn_spec.tn_dir.tn_readdir_lastp == de) {
561 		dnode->tn_spec.tn_dir.tn_readdir_lastp = NULL;
562 	}
563 	TAILQ_REMOVE(&dnode->tn_spec.tn_dir.tn_dir, de, td_entries);
564 
565 	dnode->tn_size -= sizeof(tmpfs_dirent_t);
566 	tmpfs_update(dnode, TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED);
567 	tmpfs_dir_putseq(dnode, de);
568 	if (dvp) {
569 		tmpfs_update(dnode, 0);
570 		uvm_vnp_setsize(dvp, dnode->tn_size);
571 		VN_KNOTE(dvp, events);
572 	}
573 }
574 
575 /*
576  * tmpfs_dir_lookup: find a directory entry in the specified inode.
577  *
578  * Note that the . and .. components are not allowed as they do not
579  * physically exist within directories.
580  */
581 tmpfs_dirent_t *
tmpfs_dir_lookup(tmpfs_node_t * node,struct componentname * cnp)582 tmpfs_dir_lookup(tmpfs_node_t *node, struct componentname *cnp)
583 {
584 	const char *name = cnp->cn_nameptr;
585 	const uint16_t nlen = cnp->cn_namelen;
586 	tmpfs_dirent_t *de;
587 
588 	KASSERT(VOP_ISLOCKED(node->tn_vnode));
589 	KASSERT(nlen != 1 || !(name[0] == '.'));
590 	KASSERT(nlen != 2 || !(name[0] == '.' && name[1] == '.'));
591 	TMPFS_VALIDATE_DIR(node);
592 
593 	TAILQ_FOREACH(de, &node->tn_spec.tn_dir.tn_dir, td_entries) {
594 		if (de->td_namelen != nlen)
595 			continue;
596 		if (memcmp(de->td_name, name, nlen) != 0)
597 			continue;
598 		break;
599 	}
600 	tmpfs_update(node, TMPFS_NODE_ACCESSED);
601 	return de;
602 }
603 
604 /*
605  * tmpfs_dir_cached: get a cached directory entry if it is valid.  Used to
606  * avoid unnecessary tmpfs_dir_lookup().
607  *
608  * => The vnode must be locked.
609  */
610 tmpfs_dirent_t *
tmpfs_dir_cached(tmpfs_node_t * node)611 tmpfs_dir_cached(tmpfs_node_t *node)
612 {
613 	tmpfs_dirent_t *de = node->tn_dirent_hint;
614 
615 	KASSERT(VOP_ISLOCKED(node->tn_vnode));
616 
617 	if (de == NULL) {
618 		return NULL;
619 	}
620 	KASSERT(de->td_node == node);
621 
622 	/*
623 	 * Directories always have a valid hint.  For files, check if there
624 	 * are any hard links.  If there are - hint might be invalid.
625 	 */
626 	return (node->tn_type != VDIR && node->tn_links > 1) ? NULL : de;
627 }
628 
629 /*
630  * tmpfs_dir_getseq: get a per-directory sequence number for the entry.
631  */
632 uint64_t
tmpfs_dir_getseq(tmpfs_node_t * dnode,tmpfs_dirent_t * de)633 tmpfs_dir_getseq(tmpfs_node_t *dnode, tmpfs_dirent_t *de)
634 {
635 	uint64_t seq = de->td_seq;
636 
637 	TMPFS_VALIDATE_DIR(dnode);
638 
639 	if (__predict_true(seq != TMPFS_DIRSEQ_NONE)) {
640 		/* Already set. */
641 		KASSERT(seq >= TMPFS_DIRSEQ_START);
642 		return seq;
643 	}
644 
645 	/*
646 	 * The "." and ".." and the end-of-directory have reserved numbers.
647 	 * The other sequence numbers are allocated incrementally.
648 	 */
649 
650 	seq = dnode->tn_spec.tn_dir.tn_next_seq;
651 	KASSERT(seq >= TMPFS_DIRSEQ_START);
652 	KASSERT(seq < TMPFS_DIRSEQ_END);
653 	dnode->tn_spec.tn_dir.tn_next_seq++;
654 	return seq;
655 }
656 
657 void
tmpfs_dir_putseq(tmpfs_node_t * dnode,tmpfs_dirent_t * de)658 tmpfs_dir_putseq(tmpfs_node_t *dnode, tmpfs_dirent_t *de)
659 {
660 	uint64_t seq = de->td_seq;
661 
662 	TMPFS_VALIDATE_DIR(dnode);
663 	KASSERT(seq == TMPFS_DIRSEQ_NONE || seq >= TMPFS_DIRSEQ_START);
664 	KASSERT(seq == TMPFS_DIRSEQ_NONE || seq < TMPFS_DIRSEQ_END);
665 
666 	de->td_seq = TMPFS_DIRSEQ_NONE;
667 
668 	/* Empty?  We can reset. */
669 	if (dnode->tn_size == 0) {
670 		dnode->tn_spec.tn_dir.tn_next_seq = TMPFS_DIRSEQ_START;
671 	} else if (seq != TMPFS_DIRSEQ_NONE &&
672 	    seq == dnode->tn_spec.tn_dir.tn_next_seq - 1) {
673 		dnode->tn_spec.tn_dir.tn_next_seq--;
674 	}
675 }
676 
677 /*
678  * tmpfs_dir_lookupbyseq: lookup a directory entry by the sequence number.
679  */
680 tmpfs_dirent_t *
tmpfs_dir_lookupbyseq(tmpfs_node_t * node,off_t seq)681 tmpfs_dir_lookupbyseq(tmpfs_node_t *node, off_t seq)
682 {
683 	tmpfs_dirent_t *de = node->tn_spec.tn_dir.tn_readdir_lastp;
684 
685 	TMPFS_VALIDATE_DIR(node);
686 
687 	/*
688 	 * First, check the cache.  If does not match - perform a lookup.
689 	 */
690 	if (de && de->td_seq == seq) {
691 		KASSERT(de->td_seq >= TMPFS_DIRSEQ_START);
692 		KASSERT(de->td_seq != TMPFS_DIRSEQ_NONE);
693 		return de;
694 	}
695 	TAILQ_FOREACH(de, &node->tn_spec.tn_dir.tn_dir, td_entries) {
696 		KASSERT(de->td_seq >= TMPFS_DIRSEQ_START);
697 		KASSERT(de->td_seq != TMPFS_DIRSEQ_NONE);
698 		if (de->td_seq == seq)
699 			return de;
700 	}
701 	return NULL;
702 }
703 
704 /*
705  * tmpfs_dir_getdotents: helper function for tmpfs_readdir() to get the
706  * dot meta entries, that is, "." or "..".  Copy it to the UIO space.
707  */
708 int
tmpfs_dir_getdotents(tmpfs_node_t * node,struct dirent * dp,struct uio * uio)709 tmpfs_dir_getdotents(tmpfs_node_t *node, struct dirent *dp, struct uio *uio)
710 {
711 	tmpfs_dirent_t *de;
712 	off_t next = 0;
713 	int error;
714 
715 	switch (uio->uio_offset) {
716 	case TMPFS_DIRSEQ_DOT:
717 		dp->d_fileno = node->tn_id;
718 		strlcpy(dp->d_name, ".", sizeof(dp->d_name));
719 		next = TMPFS_DIRSEQ_DOTDOT;
720 		break;
721 	case TMPFS_DIRSEQ_DOTDOT:
722 		dp->d_fileno = node->tn_spec.tn_dir.tn_parent->tn_id;
723 		strlcpy(dp->d_name, "..", sizeof(dp->d_name));
724 		de = TAILQ_FIRST(&node->tn_spec.tn_dir.tn_dir);
725 		next = de ? tmpfs_dir_getseq(node, de) : TMPFS_DIRSEQ_EOF;
726 		break;
727 	default:
728 		KASSERT(false);
729 	}
730 	dp->d_type = DT_DIR;
731 	dp->d_namlen = strlen(dp->d_name);
732 	dp->d_reclen = DIRENT_SIZE(dp);
733 	dp->d_off = next;
734 
735 	if (dp->d_reclen > uio->uio_resid) {
736 		return EJUSTRETURN;
737 	}
738 
739 	if ((error = uiomove(dp, dp->d_reclen, uio)) != 0) {
740 		return error;
741 	}
742 
743 	uio->uio_offset = next;
744 	return error;
745 }
746 
747 /*
748  * tmpfs_dir_getdents: helper function for tmpfs_readdir.
749  *
750  * => Returns as much directory entries as can fit in the uio space.
751  * => The read starts at uio->uio_offset.
752  */
753 int
tmpfs_dir_getdents(tmpfs_node_t * node,struct uio * uio)754 tmpfs_dir_getdents(tmpfs_node_t *node, struct uio *uio)
755 {
756 	tmpfs_dirent_t *de, *next_de;
757 	struct dirent dent;
758 	int error = 0;
759 
760 	KASSERT(VOP_ISLOCKED(node->tn_vnode));
761 	TMPFS_VALIDATE_DIR(node);
762 	memset(&dent, 0, sizeof(dent));
763 
764 	if (uio->uio_offset == TMPFS_DIRSEQ_DOT) {
765 		if ((error = tmpfs_dir_getdotents(node, &dent, uio)) != 0) {
766 			goto done;
767 		}
768 	}
769 	if (uio->uio_offset == TMPFS_DIRSEQ_DOTDOT) {
770 		if ((error = tmpfs_dir_getdotents(node, &dent, uio)) != 0) {
771 			goto done;
772 		}
773 	}
774 	/* Done if we reached the end. */
775 	if (uio->uio_offset == TMPFS_DIRSEQ_EOF) {
776 		goto done;
777 	}
778 
779 	/* Locate the directory entry given by the given sequence number. */
780 	de = tmpfs_dir_lookupbyseq(node, uio->uio_offset);
781 	if (de == NULL) {
782 		error = EINVAL;
783 		goto done;
784 	}
785 
786 	/*
787 	 * Read as many entries as possible; i.e., until we reach the end
788 	 * of the directory or we exhaust UIO space.
789 	 */
790 	do {
791 		dent.d_fileno = de->td_node->tn_id;
792 		switch (de->td_node->tn_type) {
793 		case VBLK:
794 			dent.d_type = DT_BLK;
795 			break;
796 		case VCHR:
797 			dent.d_type = DT_CHR;
798 			break;
799 		case VDIR:
800 			dent.d_type = DT_DIR;
801 			break;
802 		case VFIFO:
803 			dent.d_type = DT_FIFO;
804 			break;
805 		case VLNK:
806 			dent.d_type = DT_LNK;
807 			break;
808 		case VREG:
809 			dent.d_type = DT_REG;
810 			break;
811 		case VSOCK:
812 			dent.d_type = DT_SOCK;
813 			break;
814 		default:
815 			KASSERT(0);
816 		}
817 		dent.d_namlen = de->td_namelen;
818 		KASSERT(de->td_namelen < sizeof(dent.d_name));
819 		memcpy(dent.d_name, de->td_name, de->td_namelen);
820 		dent.d_name[de->td_namelen] = '\0';
821 		dent.d_reclen = DIRENT_SIZE(&dent);
822 
823 		next_de = TAILQ_NEXT(de, td_entries);
824 		if (next_de == NULL)
825 			dent.d_off = TMPFS_DIRSEQ_EOF;
826 		else
827 			dent.d_off = tmpfs_dir_getseq(node, next_de);
828 
829 		if (dent.d_reclen > uio->uio_resid) {
830 			/* Exhausted UIO space. */
831 			error = EJUSTRETURN;
832 			break;
833 		}
834 
835 		/* Copy out the directory entry and continue. */
836 		error = uiomove(&dent, dent.d_reclen, uio);
837 		if (error) {
838 			break;
839 		}
840 		de = TAILQ_NEXT(de, td_entries);
841 
842 	} while (uio->uio_resid > 0 && de);
843 
844 	/* Cache the last entry or clear and mark EOF. */
845 	uio->uio_offset = de ? tmpfs_dir_getseq(node, de) : TMPFS_DIRSEQ_EOF;
846 	node->tn_spec.tn_dir.tn_readdir_lastp = de;
847 done:
848 	tmpfs_update(node, TMPFS_NODE_ACCESSED);
849 
850 	if (error == EJUSTRETURN) {
851 		/* Exhausted UIO space - just return. */
852 		error = 0;
853 	}
854 	KASSERT(error >= 0);
855 	return error;
856 }
857 
858 /*
859  * tmpfs_reg_resize: resize the underlying UVM object associated with the
860  * specified regular file.
861  */
862 
863 int
tmpfs_reg_resize(struct vnode * vp,off_t newsize)864 tmpfs_reg_resize(struct vnode *vp, off_t newsize)
865 {
866 	tmpfs_mount_t *tmp = VFS_TO_TMPFS(vp->v_mount);
867 	tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
868 	struct uvm_object *uobj = node->tn_spec.tn_reg.tn_aobj;
869 	size_t newpages, oldpages, bytes;
870 	off_t oldsize;
871 	vaddr_t pgoff;
872 	int error;
873 
874 	KASSERT(vp->v_type == VREG);
875 	KASSERT(newsize >= 0);
876 
877 	oldsize = node->tn_size;
878 	oldpages = round_page(oldsize) >> PAGE_SHIFT;
879 	newpages = round_page(newsize) >> PAGE_SHIFT;
880 	KASSERT(oldpages == node->tn_spec.tn_reg.tn_aobj_pages);
881 
882 	if (newpages > oldpages) {
883 		/* Increase the used-memory counter if getting extra pages. */
884 		bytes = (newpages - oldpages) << PAGE_SHIFT;
885 		if (tmpfs_mem_incr(tmp, bytes) == 0)
886 			return ENOSPC;
887 		rw_enter(uobj->vmobjlock, RW_WRITE);
888 		error = uao_grow(uobj, newpages);
889 		rw_exit(uobj->vmobjlock);
890 		if (error) {
891 			tmpfs_mem_decr(tmp, bytes);
892 			return ENOSPC;
893 		}
894 	}
895 
896 	node->tn_spec.tn_reg.tn_aobj_pages = newpages;
897 	node->tn_size = newsize;
898 	uvm_vnp_setsize(vp, newsize);
899 	uvm_vnp_uncache(vp);
900 
901 	/*
902 	 * Free "backing store".
903 	 */
904 	if (newpages < oldpages) {
905 		if (tmpfs_uio_cached(node))
906 			tmpfs_uio_uncache(node);
907 		rw_enter(uobj->vmobjlock, RW_WRITE);
908 		if (uao_shrink(uobj, newpages))
909 			panic("shrink failed");
910 		rw_exit(uobj->vmobjlock);
911 		/* Decrease the used-memory counter. */
912 		tmpfs_mem_decr(tmp, (oldpages - newpages) << PAGE_SHIFT);
913 	}
914 	if (newsize > oldsize) {
915 		if (tmpfs_uio_cached(node))
916 			tmpfs_uio_uncache(node);
917 		pgoff = oldsize & PAGE_MASK;
918 		if (pgoff != 0) {
919 			/*
920 			 * Growing from an offset which is not at a page
921 			 * boundary; zero out unused bytes in current page.
922 			 */
923 			error = tmpfs_zeropg(node, trunc_page(oldsize), pgoff);
924 			if (error)
925 				panic("tmpfs_zeropg: error %d", error);
926 		}
927 		VN_KNOTE(vp, NOTE_EXTEND);
928 	}
929 	return 0;
930 }
931 
932 /*
933  * tmpfs_chflags: change flags of the given vnode.
934  *
935  */
936 int
tmpfs_chflags(struct vnode * vp,int flags,struct ucred * cred,struct proc * p)937 tmpfs_chflags(struct vnode *vp, int flags, struct ucred *cred, struct proc *p)
938 {
939 	tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
940 	int error;
941 
942 	KASSERT(VOP_ISLOCKED(vp));
943 
944 	/* Disallow this operation if the file system is mounted read-only. */
945 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
946 		return EROFS;
947 
948 	if (cred->cr_uid != node->tn_uid && (error = suser_ucred(cred)))
949 		return error;
950 
951 	if (cred->cr_uid == 0) {
952 		if (node->tn_flags & (SF_IMMUTABLE | SF_APPEND) &&
953 		    securelevel > 0)
954 			return EPERM;
955 		node->tn_flags = flags;
956 	} else {
957 		if (node->tn_flags & (SF_IMMUTABLE | SF_APPEND) ||
958 		    (flags & UF_SETTABLE) != flags)
959 			return EPERM;
960 		node->tn_flags &= SF_SETTABLE;
961 		node->tn_flags |= (flags & UF_SETTABLE);
962 	}
963 
964 	tmpfs_update(node, TMPFS_NODE_CHANGED);
965 	VN_KNOTE(vp, NOTE_ATTRIB);
966 	return 0;
967 }
968 
969 /*
970  * tmpfs_chmod: change access mode on the given vnode.
971  *
972  */
973 int
tmpfs_chmod(struct vnode * vp,mode_t mode,struct ucred * cred,struct proc * p)974 tmpfs_chmod(struct vnode *vp, mode_t mode, struct ucred *cred, struct proc *p)
975 {
976 	tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
977 	int error;
978 
979 	KASSERT(VOP_ISLOCKED(vp));
980 
981 	/* Disallow this operation if the file system is mounted read-only. */
982 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
983 		return EROFS;
984 
985 	/* Immutable or append-only files cannot be modified, either. */
986 	if (node->tn_flags & (IMMUTABLE | APPEND))
987 		return EPERM;
988 
989 	if (cred->cr_uid != node->tn_uid && (error = suser_ucred(cred)))
990 		return error;
991 	if (cred->cr_uid != 0) {
992 		if (vp->v_type != VDIR && (mode & S_ISTXT))
993 			return EFTYPE;
994 		if (!groupmember(node->tn_gid, cred) && (mode & S_ISGID))
995 			return EPERM;
996 	}
997 
998 	node->tn_mode = (mode & ALLPERMS);
999 	tmpfs_update(node, TMPFS_NODE_CHANGED);
1000 	if ((vp->v_flag & VTEXT) && (node->tn_mode & S_ISTXT) == 0)
1001 		uvm_vnp_uncache(vp);
1002 	VN_KNOTE(vp, NOTE_ATTRIB);
1003 	return 0;
1004 }
1005 
1006 /*
1007  * tmpfs_chown: change ownership of the given vnode.
1008  *
1009  * => At least one of uid or gid must be different than VNOVAL.
1010  * => Attribute is unchanged for VNOVAL case.
1011  */
1012 int
tmpfs_chown(struct vnode * vp,uid_t uid,gid_t gid,struct ucred * cred,struct proc * p)1013 tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred, struct proc *p)
1014 {
1015 	tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
1016 	int error;
1017 
1018 	KASSERT(VOP_ISLOCKED(vp));
1019 
1020 	/* Assign default values if they are unknown. */
1021 	KASSERT(uid != VNOVAL || gid != VNOVAL);
1022 	if (uid == VNOVAL) {
1023 		uid = node->tn_uid;
1024 	}
1025 	if (gid == VNOVAL) {
1026 		gid = node->tn_gid;
1027 	}
1028 
1029 	/* Disallow this operation if the file system is mounted read-only. */
1030 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1031 		return EROFS;
1032 
1033 	/* Immutable or append-only files cannot be modified, either. */
1034 	if (node->tn_flags & (IMMUTABLE | APPEND))
1035 		return EPERM;
1036 
1037 	if ((cred->cr_uid != node->tn_uid || uid != node->tn_uid ||
1038 	    (gid != node->tn_gid && !groupmember(gid, cred))) &&
1039 	    (error = suser_ucred(cred)))
1040 	    	return error;
1041 
1042 	node->tn_uid = uid;
1043 	node->tn_gid = gid;
1044 	tmpfs_update(node, TMPFS_NODE_CHANGED);
1045 	VN_KNOTE(vp, NOTE_ATTRIB);
1046 	return 0;
1047 }
1048 
1049 /*
1050  * tmpfs_chsize: change size of the given vnode.
1051  */
1052 int
tmpfs_chsize(struct vnode * vp,u_quad_t size,struct ucred * cred,struct proc * p)1053 tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred, struct proc *p)
1054 {
1055 	tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
1056 
1057 	KASSERT(VOP_ISLOCKED(vp));
1058 
1059 	/* Decide whether this is a valid operation based on the file type. */
1060 	switch (vp->v_type) {
1061 	case VDIR:
1062 		return EISDIR;
1063 	case VREG:
1064 		if (vp->v_mount->mnt_flag & MNT_RDONLY) {
1065 			return EROFS;
1066 		}
1067 		break;
1068 	case VBLK:
1069 	case VCHR:
1070 	case VFIFO:
1071 		/*
1072 		 * Allow modifications of special files even if in the file
1073 		 * system is mounted read-only (we are not modifying the
1074 		 * files themselves, but the objects they represent).
1075 		 */
1076 		return 0;
1077 	default:
1078 		return EOPNOTSUPP;
1079 	}
1080 
1081 	/* Immutable or append-only files cannot be modified, either. */
1082 	if (node->tn_flags & (IMMUTABLE | APPEND)) {
1083 		return EPERM;
1084 	}
1085 
1086 	/* Note: tmpfs_truncate() will raise NOTE_EXTEND and NOTE_ATTRIB. */
1087 	return tmpfs_truncate(vp, size);
1088 }
1089 
1090 /*
1091  * tmpfs_chtimes: change access and modification times for vnode.
1092  */
1093 int
tmpfs_chtimes(struct vnode * vp,const struct timespec * atime,const struct timespec * mtime,int vaflags,struct ucred * cred,struct proc * p)1094 tmpfs_chtimes(struct vnode *vp, const struct timespec *atime,
1095     const struct timespec *mtime, int vaflags, struct ucred *cred,
1096     struct proc *p)
1097 {
1098 	tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
1099 	int error;
1100 
1101 	KASSERT(VOP_ISLOCKED(vp));
1102 
1103 	/* Disallow this operation if the file system is mounted read-only. */
1104 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1105 		return EROFS;
1106 
1107 	/* Immutable or append-only files cannot be modified, either. */
1108 	if (node->tn_flags & (IMMUTABLE | APPEND))
1109 		return EPERM;
1110 
1111 	if (cred->cr_uid != node->tn_uid && (error = suser_ucred(cred)) &&
1112 	    ((vaflags & VA_UTIMES_NULL) == 0 ||
1113 	    (error = VOP_ACCESS(vp, VWRITE, cred, p))))
1114 	    	return error;
1115 
1116 	if (atime->tv_nsec != VNOVAL)
1117 		node->tn_atime = *atime;
1118 
1119 	if (mtime->tv_nsec != VNOVAL)
1120 		node->tn_mtime = *mtime;
1121 
1122 	if (mtime->tv_nsec != VNOVAL || (vaflags & VA_UTIMES_CHANGE))
1123 		tmpfs_update(VP_TO_TMPFS_NODE(vp), TMPFS_NODE_CHANGED);
1124 
1125 	VN_KNOTE(vp, NOTE_ATTRIB);
1126 
1127 	return 0;
1128 }
1129 
1130 /*
1131  * tmpfs_update: update timestamps, et al.
1132  */
1133 void
tmpfs_update(tmpfs_node_t * node,int flags)1134 tmpfs_update(tmpfs_node_t *node, int flags)
1135 {
1136 	struct timespec nowtm;
1137 
1138 	nanotime(&nowtm);
1139 
1140 	if (flags & TMPFS_NODE_ACCESSED) {
1141 		node->tn_atime = nowtm;
1142 	}
1143 	if (flags & TMPFS_NODE_MODIFIED) {
1144 		node->tn_mtime = nowtm;
1145 	}
1146 	if (flags & TMPFS_NODE_CHANGED) {
1147 		node->tn_ctime = nowtm;
1148 	}
1149 }
1150 
1151 int
tmpfs_truncate(struct vnode * vp,off_t length)1152 tmpfs_truncate(struct vnode *vp, off_t length)
1153 {
1154 	tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
1155 	int error;
1156 
1157 	if (length < 0) {
1158 		error = EINVAL;
1159 		goto out;
1160 	}
1161 	if (node->tn_size == length) {
1162 		error = 0;
1163 		goto out;
1164 	}
1165 	error = tmpfs_reg_resize(vp, length);
1166 	if (error == 0) {
1167 		tmpfs_update(node, TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED);
1168 	}
1169 out:
1170 	return error;
1171 }
1172 
1173 int
tmpfs_uio_cached(tmpfs_node_t * node)1174 tmpfs_uio_cached(tmpfs_node_t *node)
1175 {
1176 	int pgnum_valid = (node->tn_pgnum != (voff_t)-1);
1177 	int pgptr_valid = (node->tn_pgptr != (vaddr_t)NULL);
1178 	KASSERT(pgnum_valid == pgptr_valid);
1179 	return pgnum_valid && pgptr_valid;
1180 }
1181 
1182 vaddr_t
tmpfs_uio_lookup(tmpfs_node_t * node,voff_t pgnum)1183 tmpfs_uio_lookup(tmpfs_node_t *node, voff_t pgnum)
1184 {
1185 	if (tmpfs_uio_cached(node) == 1 && node->tn_pgnum == pgnum)
1186 		return node->tn_pgptr;
1187 
1188 	return (vaddr_t)NULL;
1189 }
1190 
1191 void
tmpfs_uio_uncache(tmpfs_node_t * node)1192 tmpfs_uio_uncache(tmpfs_node_t *node)
1193 {
1194 	KASSERT(node->tn_pgnum != (voff_t)-1);
1195 	KASSERT(node->tn_pgptr != (vaddr_t)NULL);
1196 	uvm_unmap(kernel_map, node->tn_pgptr, node->tn_pgptr + PAGE_SIZE);
1197 	node->tn_pgnum = (voff_t)-1;
1198 	node->tn_pgptr = (vaddr_t)NULL;
1199 }
1200 
1201 void
tmpfs_uio_cache(tmpfs_node_t * node,voff_t pgnum,vaddr_t pgptr)1202 tmpfs_uio_cache(tmpfs_node_t *node, voff_t pgnum, vaddr_t pgptr)
1203 {
1204 	KASSERT(node->tn_pgnum == (voff_t)-1);
1205 	KASSERT(node->tn_pgptr == (vaddr_t)NULL);
1206 	node->tn_pgnum = pgnum;
1207 	node->tn_pgptr = pgptr;
1208 }
1209 
1210 /*
1211  * Be gentle to kernel_map, don't allow more than 4MB in a single transaction.
1212  */
1213 #define TMPFS_UIO_MAXBYTES	((1 << 22) - PAGE_SIZE)
1214 
1215 int
tmpfs_uiomove(tmpfs_node_t * node,struct uio * uio,vsize_t len)1216 tmpfs_uiomove(tmpfs_node_t *node, struct uio *uio, vsize_t len)
1217 {
1218 	vaddr_t va, pgoff;
1219 	int error, adv;
1220 	voff_t pgnum;
1221 	vsize_t sz;
1222 
1223 	pgnum = trunc_page(uio->uio_offset);
1224 	pgoff = uio->uio_offset & PAGE_MASK;
1225 
1226 	if (pgoff + len < PAGE_SIZE) {
1227 		va = tmpfs_uio_lookup(node, pgnum);
1228 		if (va != (vaddr_t)NULL)
1229 			return uiomove((void *)va + pgoff, len, uio);
1230 	}
1231 
1232 	if (len >= TMPFS_UIO_MAXBYTES) {
1233 		sz = TMPFS_UIO_MAXBYTES;
1234 		adv = MADV_NORMAL;
1235 	} else {
1236 		sz = len;
1237 		adv = MADV_SEQUENTIAL;
1238 	}
1239 
1240 	if (tmpfs_uio_cached(node))
1241 		tmpfs_uio_uncache(node);
1242 
1243 	uao_reference(node->tn_uobj);
1244 
1245 	error = uvm_map(kernel_map, &va, round_page(pgoff + sz), node->tn_uobj,
1246 	    trunc_page(uio->uio_offset), 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE,
1247 	    PROT_READ | PROT_WRITE, MAP_INHERIT_NONE, adv, 0));
1248 	if (error) {
1249 		uao_detach(node->tn_uobj); /* Drop reference. */
1250 		return error;
1251 	}
1252 
1253 	error = uiomove((void *)va + pgoff, sz, uio);
1254 	if (error == 0 && pgoff + sz < PAGE_SIZE)
1255 		tmpfs_uio_cache(node, pgnum, va);
1256 	else
1257 		uvm_unmap(kernel_map, va, va + round_page(pgoff + sz));
1258 
1259 	return error;
1260 }
1261 
1262 int
tmpfs_zeropg(tmpfs_node_t * node,voff_t pgnum,vaddr_t pgoff)1263 tmpfs_zeropg(tmpfs_node_t *node, voff_t pgnum, vaddr_t pgoff)
1264 {
1265 	vaddr_t va;
1266 	int error;
1267 
1268 	KASSERT(tmpfs_uio_cached(node) == 0);
1269 
1270 	uao_reference(node->tn_uobj);
1271 
1272 	error = uvm_map(kernel_map, &va, PAGE_SIZE, node->tn_uobj, pgnum, 0,
1273 	    UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
1274 	    MAP_INHERIT_NONE, MADV_NORMAL, 0));
1275 	if (error) {
1276 		uao_detach(node->tn_uobj); /* Drop reference. */
1277 		return error;
1278 	}
1279 
1280 	bzero((void *)va + pgoff, PAGE_SIZE - pgoff);
1281 	uvm_unmap(kernel_map, va, va + PAGE_SIZE);
1282 
1283 	return 0;
1284 }
1285