xref: /dragonfly/sys/vfs/tmpfs/tmpfs_vfsops.c (revision 2983445f)
1 /*	$NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2005 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9  * 2005 program.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Efficient memory file system.
35  *
36  * tmpfs is a file system that uses NetBSD's virtual memory sub-system
37  * (the well-known UVM) to store file data and metadata in an efficient
38  * way.  This means that it does not follow the structure of an on-disk
39  * file system because it simply does not need to.  Instead, it uses
40  * memory-specific data structures and algorithms to automatically
41  * allocate and release resources.
42  */
43 
44 #include <sys/conf.h>
45 #include <sys/param.h>
46 #include <sys/limits.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/kernel.h>
50 #include <sys/stat.h>
51 #include <sys/systm.h>
52 #include <sys/sysctl.h>
53 #include <sys/objcache.h>
54 
55 #include <vm/vm.h>
56 #include <vm/vm_object.h>
57 #include <vm/vm_param.h>
58 
59 #include <vfs/tmpfs/tmpfs.h>
60 #include <vfs/tmpfs/tmpfs_vnops.h>
61 #include <vfs/tmpfs/tmpfs_args.h>
62 
63 /*
64  * Default permission for root node
65  */
66 #define TMPFS_DEFAULT_ROOT_MODE	(S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
67 
68 MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures");
69 
70 /* --------------------------------------------------------------------- */
71 
72 static int	tmpfs_mount(struct mount *, char *, caddr_t, struct ucred *);
73 static int	tmpfs_unmount(struct mount *, int);
74 static int	tmpfs_root(struct mount *, struct vnode **);
75 static int	tmpfs_fhtovp(struct mount *, struct vnode *, struct fid *, struct vnode **);
76 static int	tmpfs_statfs(struct mount *, struct statfs *, struct ucred *cred);
77 
78 /* --------------------------------------------------------------------- */
79 int
80 tmpfs_node_ctor(void *obj, void *privdata, int flags)
81 {
82 	struct tmpfs_node *node = (struct tmpfs_node *)obj;
83 
84 	node->tn_gen++;
85 	node->tn_size = 0;
86 	node->tn_status = 0;
87 	node->tn_flags = 0;
88 	node->tn_links = 0;
89 	node->tn_vnode = NULL;
90 	node->tn_vpstate = TMPFS_VNODE_WANT;
91 	bzero(&node->tn_spec, sizeof(node->tn_spec));
92 
93 	return (1);
94 }
95 
96 static void
97 tmpfs_node_dtor(void *obj, void *privdata)
98 {
99 	struct tmpfs_node *node = (struct tmpfs_node *)obj;
100 	node->tn_type = VNON;
101 	node->tn_vpstate = TMPFS_VNODE_DOOMED;
102 }
103 
104 static void*
105 tmpfs_node_init(void *args, int flags)
106 {
107 	struct tmpfs_node *node = (struct tmpfs_node *)objcache_malloc_alloc(args, flags);
108 	if (node == NULL)
109 		return (NULL);
110 	node->tn_id = 0;
111 
112 	lockinit(&node->tn_interlock, "tmpfs node interlock", 0, LK_CANRECURSE);
113 	node->tn_gen = karc4random();
114 
115 	return node;
116 }
117 
118 static void
119 tmpfs_node_fini(void *obj, void *args)
120 {
121 	struct tmpfs_node *node = (struct tmpfs_node *)obj;
122 	lockuninit(&node->tn_interlock);
123 	objcache_malloc_free(obj, args);
124 }
125 
126 static int
127 tmpfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred)
128 {
129 	struct tmpfs_mount *tmp;
130 	struct tmpfs_node *root;
131 	struct tmpfs_args args;
132 	vm_pindex_t pages;
133 	vm_pindex_t pages_limit;
134 	ino_t nodes;
135 	u_int64_t	maxfsize;
136 	int error;
137 	/* Size counters. */
138 	ino_t	nodes_max;
139 	off_t	size_max;
140 	size_t	maxfsize_max;
141 	size_t	size;
142 
143 	/* Root node attributes. */
144 	uid_t	root_uid = cred->cr_uid;
145 	gid_t	root_gid = cred->cr_gid;
146 	mode_t	root_mode = (VREAD | VWRITE);
147 
148 	if (mp->mnt_flag & MNT_UPDATE) {
149 		/* XXX: There is no support yet to update file system
150 		 * settings.  Should be added. */
151 
152 		return EOPNOTSUPP;
153 	}
154 
155 	/*
156 	 * mount info
157 	 */
158 	bzero(&args, sizeof(args));
159 	size_max  = 0;
160 	nodes_max = 0;
161 	maxfsize_max = 0;
162 
163 	if (path) {
164 		if (data) {
165 			error = copyin(data, &args, sizeof(args));
166 			if (error)
167 				return (error);
168 		}
169 		size_max = args.ta_size_max;
170 		nodes_max = args.ta_nodes_max;
171 		maxfsize_max = args.ta_maxfsize_max;
172 		root_uid = args.ta_root_uid;
173 		root_gid = args.ta_root_gid;
174 		root_mode = args.ta_root_mode;
175 	}
176 
177 	/*
178 	 * If mount by non-root, then verify that user has necessary
179 	 * permissions on the device.
180 	 */
181 	if (cred->cr_uid != 0) {
182 		root_mode = VREAD;
183 		if ((mp->mnt_flag & MNT_RDONLY) == 0)
184 			root_mode |= VWRITE;
185 	}
186 
187 	pages_limit = vm_swap_max + vmstats.v_page_count / 2;
188 
189 	if (size_max == 0)
190 		pages = pages_limit / 2;
191 	else if (size_max < PAGE_SIZE)
192 		pages = 1;
193 	else if (OFF_TO_IDX(size_max) > pages_limit)
194 		pages = pages_limit;
195 	else
196 		pages = OFF_TO_IDX(size_max);
197 
198 	if (nodes_max == 0)
199 		nodes = 3 + pages * PAGE_SIZE / 1024;
200 	else if (nodes_max < 3)
201 		nodes = 3;
202 	else if (nodes_max > pages)
203 		nodes = pages;
204 	else
205 		nodes = nodes_max;
206 
207 	maxfsize = IDX_TO_OFF(pages_limit);
208 	if (maxfsize_max != 0 && maxfsize > maxfsize_max)
209 		maxfsize = maxfsize_max;
210 
211 	/* Allocate the tmpfs mount structure and fill it. */
212 	tmp = kmalloc(sizeof(*tmp), M_TMPFSMNT, M_WAITOK | M_ZERO);
213 
214 	lockinit(&(tmp->allnode_lock), "tmpfs allnode lock", 0, LK_CANRECURSE);
215 	tmp->tm_nodes_max = nodes;
216 	tmp->tm_nodes_inuse = 0;
217 	tmp->tm_maxfilesize = maxfsize;
218 	LIST_INIT(&tmp->tm_nodes_used);
219 
220 	tmp->tm_pages_max = pages;
221 	tmp->tm_pages_used = 0;
222 
223 	kmalloc_create(&tmp->tm_node_zone, "tmpfs node");
224 	kmalloc_create(&tmp->tm_dirent_zone, "tmpfs dirent");
225 	kmalloc_create(&tmp->tm_name_zone, "tmpfs name zone");
226 
227 	kmalloc_raise_limit(tmp->tm_node_zone, sizeof(struct tmpfs_node) *
228 			    tmp->tm_nodes_max);
229 
230 	tmp->tm_node_zone_malloc_args.objsize = sizeof(struct tmpfs_node);
231 	tmp->tm_node_zone_malloc_args.mtype = tmp->tm_node_zone;
232 
233 	tmp->tm_dirent_zone_malloc_args.objsize = sizeof(struct tmpfs_dirent);
234 	tmp->tm_dirent_zone_malloc_args.mtype = tmp->tm_dirent_zone;
235 
236 	tmp->tm_dirent_pool =  objcache_create( "tmpfs dirent cache",
237 	    0, 0,
238 	    NULL, NULL, NULL,
239 	    objcache_malloc_alloc, objcache_malloc_free,
240 	    &tmp->tm_dirent_zone_malloc_args);
241 	tmp->tm_node_pool = objcache_create( "tmpfs node cache",
242 	    0, 0,
243 	    tmpfs_node_ctor, tmpfs_node_dtor, NULL,
244 	    tmpfs_node_init, tmpfs_node_fini,
245 	    &tmp->tm_node_zone_malloc_args);
246 
247 	/* Allocate the root node. */
248 	error = tmpfs_alloc_node(tmp, VDIR, root_uid, root_gid,
249 				 root_mode & ALLPERMS, NULL, NULL,
250 				 VNOVAL, VNOVAL, &root);
251 
252 	/*
253 	 * We are backed by swap, set snocache chflags flag so we
254 	 * don't trip over swapcache.
255 	 */
256 	root->tn_flags = SF_NOCACHE;
257 
258 	if (error != 0 || root == NULL) {
259 	    objcache_destroy(tmp->tm_node_pool);
260 	    objcache_destroy(tmp->tm_dirent_pool);
261 	    kfree(tmp, M_TMPFSMNT);
262 	    return error;
263 	}
264 	KASSERT(root->tn_id >= 0, ("tmpfs root with invalid ino: %d", (int)root->tn_id));
265 	tmp->tm_root = root;
266 
267 	mp->mnt_flag |= MNT_LOCAL;
268 #if 0
269 	mp->mnt_kern_flag |= MNTK_RD_MPSAFE | MNTK_WR_MPSAFE | MNTK_GA_MPSAFE  |
270 			     MNTK_IN_MPSAFE | MNTK_SG_MPSAFE;
271 #endif
272 	mp->mnt_kern_flag |= MNTK_RD_MPSAFE | MNTK_SG_MPSAFE;
273 	mp->mnt_kern_flag |= MNTK_NOMSYNC;
274 	mp->mnt_data = (qaddr_t)tmp;
275 	vfs_getnewfsid(mp);
276 
277 
278 	vfs_add_vnodeops(mp, &tmpfs_vnode_vops, &mp->mnt_vn_norm_ops);
279 	vfs_add_vnodeops(mp, &tmpfs_fifo_vops, &mp->mnt_vn_fifo_ops);
280 
281 	copystr("tmpfs", mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
282 	bzero(mp->mnt_stat.f_mntfromname +size, MNAMELEN - size);
283 	bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname));
284 	copyinstr(path, mp->mnt_stat.f_mntonname,
285 		  sizeof(mp->mnt_stat.f_mntonname) -1,
286 		  &size);
287 
288 	tmpfs_statfs(mp, &mp->mnt_stat, cred);
289 
290 	return 0;
291 }
292 
293 /* --------------------------------------------------------------------- */
294 
295 /* ARGSUSED2 */
296 static int
297 tmpfs_unmount(struct mount *mp, int mntflags)
298 {
299 	int error;
300 	int flags = 0;
301 	int found;
302 	struct tmpfs_mount *tmp;
303 	struct tmpfs_node *node;
304 
305 	/* Handle forced unmounts. */
306 	if (mntflags & MNT_FORCE)
307 		flags |= FORCECLOSE;
308 
309 	tmp = VFS_TO_TMPFS(mp);
310 
311 	/*
312 	 * Finalize all pending I/O.  In the case of tmpfs we want
313 	 * to throw all the data away so clean out the buffer cache
314 	 * and vm objects before calling vflush().
315 	 */
316 	LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
317 		if (node->tn_type == VREG && node->tn_vnode) {
318 			++node->tn_links;
319 			TMPFS_NODE_LOCK(node);
320 			vx_get(node->tn_vnode);
321 			tmpfs_truncate(node->tn_vnode, 0);
322 			vx_put(node->tn_vnode);
323 			TMPFS_NODE_UNLOCK(node);
324 			--node->tn_links;
325 		}
326 	}
327 	error = vflush(mp, 0, flags);
328 	if (error != 0)
329 		return error;
330 
331 	/*
332 	 * First pass get rid of all the directory entries and
333 	 * vnode associations.  The directory structure will
334 	 * remain via the extra link count representing tn_dir.tn_parent.
335 	 *
336 	 * No vnodes should remain after the vflush above.
337 	 */
338 	LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
339 		++node->tn_links;
340 		TMPFS_NODE_LOCK(node);
341 		if (node->tn_type == VDIR) {
342 			struct tmpfs_dirent *de;
343 
344 			while (!TAILQ_EMPTY(&node->tn_dir.tn_dirhead)) {
345 				de = TAILQ_FIRST(&node->tn_dir.tn_dirhead);
346 				tmpfs_dir_detach(node, de);
347 				tmpfs_free_dirent(tmp, de);
348 				node->tn_size -= sizeof(struct tmpfs_dirent);
349 			}
350 		}
351 		KKASSERT(node->tn_vnode == NULL);
352 #if 0
353 		vp = node->tn_vnode;
354 		if (vp != NULL) {
355 			tmpfs_free_vp(vp);
356 			vrecycle(vp);
357 			node->tn_vnode = NULL;
358 		}
359 #endif
360 		TMPFS_NODE_UNLOCK(node);
361 		--node->tn_links;
362 	}
363 
364 	/*
365 	 * Now get rid of all nodes.  We can remove any node with a
366 	 * link count of 0 or any directory node with a link count of
367 	 * 1.  The parents will not be destroyed until all their children
368 	 * have been destroyed.
369 	 *
370 	 * Recursion in tmpfs_free_node() can further modify the list so
371 	 * we cannot use a next pointer here.
372 	 *
373 	 * The root node will be destroyed by this loop (it will be last).
374 	 */
375 	while (!LIST_EMPTY(&tmp->tm_nodes_used)) {
376 		found = 0;
377 		LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
378 			if (node->tn_links == 0 ||
379 			    (node->tn_links == 1 && node->tn_type == VDIR)) {
380 				TMPFS_NODE_LOCK(node);
381 				tmpfs_free_node(tmp, node);
382 				/* eats lock */
383 				found = 1;
384 				break;
385 			}
386 		}
387 		if (found == 0) {
388 			kprintf("tmpfs: Cannot free entire node tree!");
389 			break;
390 		}
391 	}
392 
393 	KKASSERT(tmp->tm_root == NULL);
394 
395 	objcache_destroy(tmp->tm_dirent_pool);
396 	objcache_destroy(tmp->tm_node_pool);
397 
398 	kmalloc_destroy(&tmp->tm_name_zone);
399 	kmalloc_destroy(&tmp->tm_dirent_zone);
400 	kmalloc_destroy(&tmp->tm_node_zone);
401 
402 	tmp->tm_node_zone = tmp->tm_dirent_zone = NULL;
403 
404 	lockuninit(&tmp->allnode_lock);
405 	KKASSERT(tmp->tm_pages_used == 0);
406 	KKASSERT(tmp->tm_nodes_inuse == 0);
407 
408 	/* Throw away the tmpfs_mount structure. */
409 	kfree(tmp, M_TMPFSMNT);
410 	mp->mnt_data = NULL;
411 
412 	mp->mnt_flag &= ~MNT_LOCAL;
413 	return 0;
414 }
415 
416 /* --------------------------------------------------------------------- */
417 
418 static int
419 tmpfs_root(struct mount *mp, struct vnode **vpp)
420 {
421 	struct tmpfs_mount *tmp;
422 	int error;
423 
424 	tmp = VFS_TO_TMPFS(mp);
425 	if (tmp->tm_root == NULL) {
426 		kprintf("tmpfs_root: called without root node %p\n", mp);
427 		print_backtrace(-1);
428 		*vpp = NULL;
429 		error = EINVAL;
430 	} else {
431 		error = tmpfs_alloc_vp(mp, tmp->tm_root, LK_EXCLUSIVE, vpp);
432 		(*vpp)->v_flag |= VROOT;
433 		(*vpp)->v_type = VDIR;
434 	}
435 	return error;
436 }
437 
438 /* --------------------------------------------------------------------- */
439 
440 static int
441 tmpfs_fhtovp(struct mount *mp, struct vnode *rootvp, struct fid *fhp, struct vnode **vpp)
442 {
443 	boolean_t found;
444 	struct tmpfs_fid *tfhp;
445 	struct tmpfs_mount *tmp;
446 	struct tmpfs_node *node;
447 
448 	tmp = VFS_TO_TMPFS(mp);
449 
450 	tfhp = (struct tmpfs_fid *)fhp;
451 	if (tfhp->tf_len != sizeof(struct tmpfs_fid))
452 		return EINVAL;
453 
454 	if (tfhp->tf_id >= tmp->tm_nodes_max)
455 		return EINVAL;
456 
457 	found = FALSE;
458 
459 	TMPFS_LOCK(tmp);
460 	LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
461 		if (node->tn_id == tfhp->tf_id &&
462 		    node->tn_gen == tfhp->tf_gen) {
463 			found = TRUE;
464 			break;
465 		}
466 	}
467 	TMPFS_UNLOCK(tmp);
468 
469 	if (found)
470 		return (tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp));
471 
472 	return (EINVAL);
473 }
474 
475 /* --------------------------------------------------------------------- */
476 
477 /* ARGSUSED2 */
478 static int
479 tmpfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
480 {
481 	fsfilcnt_t freenodes;
482 	struct tmpfs_mount *tmp;
483 
484 	tmp = VFS_TO_TMPFS(mp);
485 
486 	sbp->f_iosize = PAGE_SIZE;
487 	sbp->f_bsize = PAGE_SIZE;
488 
489 	sbp->f_blocks = tmp->tm_pages_max;
490 	sbp->f_bavail = tmp->tm_pages_max - tmp->tm_pages_used;
491 	sbp->f_bfree = sbp->f_bavail;
492 
493 	freenodes = tmp->tm_nodes_max - tmp->tm_nodes_inuse;
494 
495 	sbp->f_files = freenodes + tmp->tm_nodes_inuse;
496 	sbp->f_ffree = freenodes;
497 	sbp->f_owner = tmp->tm_root->tn_uid;
498 
499 	return 0;
500 }
501 
502 /* --------------------------------------------------------------------- */
503 
504 /*
505  * tmpfs vfs operations.
506  */
507 
508 static struct vfsops tmpfs_vfsops = {
509 	.vfs_mount =			tmpfs_mount,
510 	.vfs_unmount =			tmpfs_unmount,
511 	.vfs_root =			tmpfs_root,
512 	.vfs_statfs =			tmpfs_statfs,
513 	.vfs_fhtovp =			tmpfs_fhtovp,
514 	.vfs_sync =			vfs_stdsync
515 };
516 
517 VFS_SET(tmpfs_vfsops, tmpfs, 0);
518