xref: /minix/sys/ufs/chfs/chfs_vfsops.c (revision 84d9c625)
1 /*	$NetBSD: chfs_vfsops.c,v 1.9 2013/10/20 17:18:38 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2010 Department of Software Engineering,
5  *		      University of Szeged, Hungary
6  * Copyright (C) 2010 Tamas Toth <ttoth@inf.u-szeged.hu>
7  * Copyright (C) 2010 Adam Hoka <ahoka@NetBSD.org>
8  * All rights reserved.
9  *
10  * This code is derived from software contributed to The NetBSD Foundation
11  * by the Department of Software Engineering, University of Szeged, Hungary
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 
37 #include <sys/param.h>
38 #include <sys/types.h>
39 #include <sys/kmem.h>
40 #include <sys/mount.h>
41 #include <sys/stat.h>
42 #include <sys/systm.h>
43 #include <sys/proc.h>
44 #include <sys/module.h>
45 #include <sys/namei.h>
46 #include <sys/malloc.h>
47 #include <sys/fcntl.h>
48 #include <sys/conf.h>
49 #include <sys/buf.h>
50 //XXX needed just for debugging
51 #include <sys/fstrans.h>
52 #include <sys/sleepq.h>
53 #include <sys/lockdebug.h>
54 #include <sys/ktrace.h>
55 
56 #include <uvm/uvm.h>
57 #include <uvm/uvm_pager.h>
58 #include <ufs/ufs/dir.h>
59 #include <ufs/ufs/ufs_extern.h>
60 #include <miscfs/genfs/genfs.h>
61 #include <miscfs/genfs/genfs_node.h>
62 #include <miscfs/specfs/specdev.h>
63 #include "chfs.h"
64 #include "chfs_args.h"
65 
66 MODULE(MODULE_CLASS_VFS, chfs, "flash");
67 
68 /* --------------------------------------------------------------------- */
69 /* functions */
70 
71 static int chfs_mount(struct mount *, const char *, void *, size_t *);
72 static int chfs_unmount(struct mount *, int);
73 static int chfs_root(struct mount *, struct vnode **);
74 static int chfs_vget(struct mount *, ino_t, struct vnode **);
75 static int chfs_fhtovp(struct mount *, struct fid *, struct vnode **);
76 static int chfs_vptofh(struct vnode *, struct fid *, size_t *);
77 static int chfs_start(struct mount *, int);
78 static int chfs_statvfs(struct mount *, struct statvfs *);
79 static int chfs_sync(struct mount *, int, kauth_cred_t);
80 static void chfs_init(void);
81 static void chfs_reinit(void);
82 static void chfs_done(void);
83 static int chfs_snapshot(struct mount *, struct vnode *,
84     struct timespec *);
85 
86 /* --------------------------------------------------------------------- */
87 /* structures */
88 
89 int
90 chfs_gop_alloc(struct vnode *vp, off_t off, off_t len,  int flags,
91     kauth_cred_t cred)
92 {
93 	return (0);
94 }
95 
96 const struct genfs_ops chfs_genfsops = {
97 	.gop_size = genfs_size,
98 	.gop_alloc = chfs_gop_alloc,
99 	.gop_write = genfs_gop_write,
100 	.gop_markupdate = ufs_gop_markupdate,
101 };
102 
103 struct pool chfs_inode_pool;
104 
105 /* for looking up the major for flash */
106 extern const struct cdevsw flash_cdevsw;
107 
108 /* --------------------------------------------------------------------- */
109 
110 static int
111 chfs_mount(struct mount *mp,
112     const char *path, void *data, size_t *data_len)
113 {
114 	struct lwp *l = curlwp;
115 	struct nameidata nd;
116 	struct pathbuf *pb;
117 	struct vnode *devvp = NULL;
118 	struct ufs_args *args = data;
119 	struct ufsmount *ump = NULL;
120 	struct chfs_mount *chmp;
121 	int err = 0;
122 	int xflags;
123 
124 	dbg("mount()\n");
125 
126 	if (*data_len < sizeof *args)
127 		return EINVAL;
128 
129 	if (mp->mnt_flag & MNT_GETARGS) {
130 		ump = VFSTOUFS(mp);
131 		if (ump == NULL)
132 			return EIO;
133 		memset(args, 0, sizeof *args);
134 		args->fspec = NULL;
135 		*data_len = sizeof *args;
136 		return 0;
137 	}
138 
139 	if (mp->mnt_flag & MNT_UPDATE) {
140 		/* XXX: There is no support yet to update file system
141 		 * settings.  Should be added. */
142 
143 		return ENODEV;
144 	}
145 
146 	if (args->fspec != NULL) {
147 		err = pathbuf_copyin(args->fspec, &pb);
148 		if (err) {
149 			return err;
150 		}
151 		/* Look up the name and verify that it's sane. */
152 		NDINIT(&nd, LOOKUP, FOLLOW, pb);
153 		if ((err = namei(&nd)) != 0 )
154 			return (err);
155 		devvp = nd.ni_vp;
156 
157 		/* Be sure this is a valid block device */
158 		if (devvp->v_type != VBLK)
159 			err = ENOTBLK;
160 		else if (bdevsw_lookup(devvp->v_rdev) == NULL)
161 			err = ENXIO;
162 	}
163 
164 	if (err) {
165 		vrele(devvp);
166 		return (err);
167 	}
168 
169 	if (mp->mnt_flag & MNT_RDONLY)
170 		xflags = FREAD;
171 	else
172 		xflags = FREAD|FWRITE;
173 
174 	err = VOP_OPEN(devvp, xflags, FSCRED);
175 	if (err)
176 		goto fail;
177 
178 	/* call CHFS mount function */
179 	err = chfs_mountfs(devvp, mp);
180 	if (err) {
181 		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
182 		(void)VOP_CLOSE(devvp, xflags, NOCRED);
183 		VOP_UNLOCK(devvp);
184 		goto fail;
185 	}
186 
187 	ump = VFSTOUFS(mp);
188 	chmp = ump->um_chfs;
189 
190 	vfs_getnewfsid(mp);
191 	chmp->chm_fsmp = mp;
192 
193 	return set_statvfs_info(path,
194 	    UIO_USERSPACE, args->fspec,
195 	    UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
196 
197 fail:
198 	vrele(devvp);
199 	return (err);
200 }
201 
202 /* chfs_mountfs - init CHFS */
203 int
204 chfs_mountfs(struct vnode *devvp, struct mount *mp)
205 {
206 	struct lwp *l = curlwp;
207 	kauth_cred_t cred;
208 	devmajor_t flash_major;
209 	dev_t dev;
210 	struct ufsmount* ump = NULL;
211 	struct chfs_mount* chmp;
212 	struct vnode *vp;
213 	int err = 0;
214 
215 	dbg("mountfs()\n");
216 
217 	dev = devvp->v_rdev;
218 	cred = l ? l->l_cred : NOCRED;
219 
220 	/* Flush out any old buffers remaining from a previous use. */
221 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
222 	err = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
223 	VOP_UNLOCK(devvp);
224 	if (err)
225 		return (err);
226 
227 	/* Setup device. */
228 	flash_major = cdevsw_lookup_major(&flash_cdevsw);
229 
230 	if (devvp->v_type != VBLK)
231 		err = ENOTBLK;
232 	else if (bdevsw_lookup(dev) == NULL)
233 		err = ENXIO;
234 	else if (major(dev) != flash_major) {
235 		dbg("major(dev): %d, flash_major: %d\n",
236 		    major(dev), flash_major);
237 		err = ENODEV;
238 	}
239 	if (err) {
240 		vrele(devvp);
241 		return (err);
242 	}
243 
244 	/* Connect CHFS to UFS. */
245 	ump = kmem_zalloc(sizeof(struct ufsmount), KM_SLEEP);
246 
247 	ump->um_fstype = UFS1;
248 	ump->um_chfs = kmem_zalloc(sizeof(struct chfs_mount), KM_SLEEP);
249 	mutex_init(&ump->um_lock, MUTEX_DEFAULT, IPL_NONE);
250 
251 	chmp = ump->um_chfs;
252 
253 	/* Initialize erase block handler. */
254 	chmp->chm_ebh = kmem_alloc(sizeof(struct chfs_ebh), KM_SLEEP);
255 
256 	dbg("[]opening flash: %u\n", (unsigned int)devvp->v_rdev);
257 	err = ebh_open(chmp->chm_ebh, devvp->v_rdev);
258 	if (err) {
259 		dbg("error while opening flash\n");
260 		goto fail;
261 	}
262 
263 	//TODO check flash sizes
264 
265 	/* Initialize vnode cache's hashtable and eraseblock array. */
266 	chmp->chm_gbl_version = 0;
267 	chmp->chm_vnocache_hash = chfs_vnocache_hash_init();
268 
269 	chmp->chm_blocks = kmem_zalloc(chmp->chm_ebh->peb_nr *
270 	    sizeof(struct chfs_eraseblock), KM_SLEEP);
271 
272 	/* Initialize mutexes. */
273 	mutex_init(&chmp->chm_lock_mountfields, MUTEX_DEFAULT, IPL_NONE);
274 	mutex_init(&chmp->chm_lock_sizes, MUTEX_DEFAULT, IPL_NONE);
275 	mutex_init(&chmp->chm_lock_vnocache, MUTEX_DEFAULT, IPL_NONE);
276 
277 	/* Initialize read/write contants. (from UFS) */
278 	chmp->chm_fs_bmask = -4096;
279 	chmp->chm_fs_bsize = 4096;
280 	chmp->chm_fs_qbmask = 4095;
281 	chmp->chm_fs_bshift = 12;
282 	chmp->chm_fs_fmask = -2048;
283 	chmp->chm_fs_qfmask = 2047;
284 
285 	/* Initialize writebuffer. */
286 	chmp->chm_wbuf_pagesize = chmp->chm_ebh->flash_if->page_size;
287 	dbg("wbuf size: %zu\n", chmp->chm_wbuf_pagesize);
288 	chmp->chm_wbuf = kmem_alloc(chmp->chm_wbuf_pagesize, KM_SLEEP);
289 	rw_init(&chmp->chm_lock_wbuf);
290 
291 	/* Initialize queues. */
292 	TAILQ_INIT(&chmp->chm_free_queue);
293 	TAILQ_INIT(&chmp->chm_clean_queue);
294 	TAILQ_INIT(&chmp->chm_dirty_queue);
295 	TAILQ_INIT(&chmp->chm_very_dirty_queue);
296 	TAILQ_INIT(&chmp->chm_erasable_pending_wbuf_queue);
297 	TAILQ_INIT(&chmp->chm_erase_pending_queue);
298 
299 	/* Initialize flash-specific constants. */
300 	chfs_calc_trigger_levels(chmp);
301 
302 	/* Initialize sizes. */
303 	chmp->chm_nr_free_blocks = 0;
304 	chmp->chm_nr_erasable_blocks = 0;
305 	chmp->chm_max_vno = 2;
306 	chmp->chm_checked_vno = 2;
307 	chmp->chm_unchecked_size = 0;
308 	chmp->chm_used_size = 0;
309 	chmp->chm_dirty_size = 0;
310 	chmp->chm_wasted_size = 0;
311 	chmp->chm_free_size = chmp->chm_ebh->eb_size * chmp->chm_ebh->peb_nr;
312 
313 	/* Build filesystem. */
314 	err = chfs_build_filesystem(chmp);
315 
316 	if (err) {
317 		/* Armageddon and return. */
318 		chfs_vnocache_hash_destroy(chmp->chm_vnocache_hash);
319 		ebh_close(chmp->chm_ebh);
320 		err = EIO;
321 		goto fail;
322 	}
323 
324 	/* Initialize UFS. */
325 	mp->mnt_data = ump;
326 	mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
327 	mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_CHFS);
328 	mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
329 	mp->mnt_stat.f_namemax = MAXNAMLEN;
330 	mp->mnt_flag |= MNT_LOCAL;
331 	mp->mnt_fs_bshift = PAGE_SHIFT;
332 	mp->mnt_dev_bshift = DEV_BSHIFT;
333 	mp->mnt_iflag |= IMNT_MPSAFE;
334 	ump->um_flags = 0;
335 	ump->um_mountp = mp;
336 	ump->um_dev = dev;
337 	ump->um_devvp = devvp;
338 	ump->um_maxfilesize = 1048512 * 1024;
339 
340 	/* Allocate the root vnode. */
341 	err = VFS_VGET(mp, CHFS_ROOTINO, &vp);
342 	if (err) {
343 		dbg("error: %d while allocating root node\n", err);
344 		return err;
345 	}
346 	vput(vp);
347 
348 	/* Start GC. */
349 	chfs_gc_thread_start(chmp);
350 	mutex_enter(&chmp->chm_lock_mountfields);
351 	chfs_gc_trigger(chmp);
352 	mutex_exit(&chmp->chm_lock_mountfields);
353 
354 	spec_node_setmountedfs(devvp, mp);
355 	return 0;
356 
357 fail:
358 	kmem_free(chmp->chm_ebh, sizeof(struct chfs_ebh));
359 	kmem_free(chmp, sizeof(struct chfs_mount));
360 	kmem_free(ump, sizeof(struct ufsmount));
361 	return err;
362 }
363 
364 /* --------------------------------------------------------------------- */
365 
366 static int
367 chfs_unmount(struct mount *mp, int mntflags)
368 {
369 	int flags = 0, i = 0;
370 	struct ufsmount *ump;
371 	struct chfs_mount *chmp;
372 
373 	if (mntflags & MNT_FORCE)
374 		flags |= FORCECLOSE;
375 
376 	dbg("[START]\n");
377 
378 	ump = VFSTOUFS(mp);
379 	chmp = ump->um_chfs;
380 
381 	/* Stop GC. */
382 	chfs_gc_thread_stop(chmp);
383 
384 	/* Flush everyt buffer. */
385 	(void)vflush(mp, NULLVP, flags);
386 
387 	if (chmp->chm_wbuf_len) {
388 		mutex_enter(&chmp->chm_lock_mountfields);
389 		chfs_flush_pending_wbuf(chmp);
390 		mutex_exit(&chmp->chm_lock_mountfields);
391 	}
392 
393 	/* Free node references. */
394 	for (i = 0; i < chmp->chm_ebh->peb_nr; i++) {
395 		chfs_free_node_refs(&chmp->chm_blocks[i]);
396 	}
397 
398 	/* Destroy vnode cache hashtable. */
399 	chfs_vnocache_hash_destroy(chmp->chm_vnocache_hash);
400 
401 	/* Close eraseblock handler. */
402 	ebh_close(chmp->chm_ebh);
403 
404 	/* Destroy mutexes. */
405 	rw_destroy(&chmp->chm_lock_wbuf);
406 	mutex_destroy(&chmp->chm_lock_vnocache);
407 	mutex_destroy(&chmp->chm_lock_sizes);
408 	mutex_destroy(&chmp->chm_lock_mountfields);
409 
410 	/* Unmount UFS. */
411 	if (ump->um_devvp->v_type != VBAD) {
412 		spec_node_setmountedfs(ump->um_devvp, NULL);
413 	}
414 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
415 	(void)VOP_CLOSE(ump->um_devvp, FREAD|FWRITE, NOCRED);
416 	vput(ump->um_devvp);
417 
418 	mutex_destroy(&ump->um_lock);
419 
420 	/* Everything done. */
421 	kmem_free(ump, sizeof(struct ufsmount));
422 	mp->mnt_data = NULL;
423 	mp->mnt_flag &= ~MNT_LOCAL;
424 	dbg("[END]\n");
425 	return (0);
426 }
427 
428 /* --------------------------------------------------------------------- */
429 
430 static int
431 chfs_root(struct mount *mp, struct vnode **vpp)
432 {
433 	struct vnode *vp;
434 	int error;
435 
436 	if ((error = VFS_VGET(mp, (ino_t)UFS_ROOTINO, &vp)) != 0)
437 		return error;
438 	*vpp = vp;
439 	return 0;
440 }
441 
442 /* --------------------------------------------------------------------- */
443 
444 extern rb_tree_ops_t frag_rbtree_ops;
445 
446 static int
447 chfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
448 {
449 	struct chfs_mount *chmp;
450 	struct chfs_inode *ip;
451 	struct ufsmount *ump;
452 	struct vnode *vp;
453 	dev_t dev;
454 	int error;
455 	struct chfs_vnode_cache* chvc = NULL;
456 	struct chfs_node_ref* nref = NULL;
457 	struct buf *bp;
458 
459 	dbg("vget() | ino: %llu\n", (unsigned long long)ino);
460 
461 	ump = VFSTOUFS(mp);
462 	dev = ump->um_dev;
463 retry:
464 	if (!vpp) {
465 		vpp = kmem_alloc(sizeof(struct vnode*), KM_SLEEP);
466 	}
467 
468 	/* Get node from inode hash. */
469 	if ((*vpp = chfs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
470 		return 0;
471 	}
472 
473 	/* Allocate a new vnode/inode. */
474 	if ((error = getnewvnode(VT_CHFS,
475 		    mp, chfs_vnodeop_p, NULL, &vp)) != 0) {
476 		*vpp = NULL;
477 		return (error);
478 	}
479 	ip = pool_get(&chfs_inode_pool, PR_WAITOK);
480 
481 	mutex_enter(&chfs_hashlock);
482 	if ((*vpp = chfs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
483 		mutex_exit(&chfs_hashlock);
484 		ungetnewvnode(vp);
485 		pool_put(&chfs_inode_pool, ip);
486 		goto retry;
487 	}
488 
489 	vp->v_vflag |= VV_LOCKSWORK;
490 
491 	/* Initialize vnode/inode. */
492 	memset(ip, 0, sizeof(*ip));
493 	vp->v_data = ip;
494 	ip->vp = vp;
495 	ip->ch_type = VTTOCHT(vp->v_type);
496 	ip->ump = ump;
497 	ip->chmp = chmp = ump->um_chfs;
498 	ip->dev = dev;
499 	ip->ino = ino;
500 	vp->v_mount = mp;
501 	genfs_node_init(vp, &chfs_genfsops);
502 
503 	rb_tree_init(&ip->fragtree, &frag_rbtree_ops);
504 
505 	chfs_ihashins(ip);
506 	mutex_exit(&chfs_hashlock);
507 
508 	/* Set root inode. */
509 	if (ino == CHFS_ROOTINO) {
510 		dbg("SETROOT\n");
511 		vp->v_vflag |= VV_ROOT;
512 		vp->v_type = VDIR;
513 		ip->ch_type = CHT_DIR;
514 		ip->mode = IFMT | IEXEC | IWRITE | IREAD;
515 		ip->iflag |= (IN_ACCESS | IN_CHANGE | IN_UPDATE);
516 		chfs_update(vp, NULL, NULL, UPDATE_WAIT);
517 		TAILQ_INIT(&ip->dents);
518 		chfs_set_vnode_size(vp, 512);
519 	}
520 
521 	mutex_enter(&chmp->chm_lock_vnocache);
522 	chvc = chfs_vnode_cache_get(chmp, ino);
523 	mutex_exit(&chmp->chm_lock_vnocache);
524 	if (!chvc) {
525 		dbg("!chvc\n");
526 		/* Initialize the corresponding vnode cache. */
527 		/* XXX, we cant alloc under a lock, refactor this! */
528 		chvc = chfs_vnode_cache_alloc(ino);
529 		mutex_enter(&chmp->chm_lock_vnocache);
530 		if (ino == CHFS_ROOTINO) {
531 			chvc->nlink = 2;
532 			chvc->pvno = CHFS_ROOTINO;
533 			chvc->state = VNO_STATE_CHECKEDABSENT;
534 		}
535 		chfs_vnode_cache_add(chmp, chvc);
536 		mutex_exit(&chmp->chm_lock_vnocache);
537 
538 		ip->chvc = chvc;
539 		TAILQ_INIT(&ip->dents);
540 	} else {
541 		dbg("chvc\n");
542 		ip->chvc = chvc;
543 		/* We had a vnode cache, the node is already on flash, so read it */
544 		if (ino == CHFS_ROOTINO) {
545 			chvc->pvno = CHFS_ROOTINO;
546 			TAILQ_INIT(&chvc->scan_dirents);
547 		} else {
548 			chfs_readvnode(mp, ino, &vp);
549 		}
550 
551 		mutex_enter(&chmp->chm_lock_mountfields);
552 		/* Initialize type specific things. */
553 		switch (ip->ch_type) {
554 		case CHT_DIR:
555 			/* Read every dirent. */
556 			nref = chvc->dirents;
557 			while (nref &&
558 			    (struct chfs_vnode_cache *)nref != chvc) {
559 				chfs_readdirent(mp, nref, ip);
560 				nref = nref->nref_next;
561 			}
562 			chfs_set_vnode_size(vp, 512);
563 			break;
564 		case CHT_REG:
565 			/* FALLTHROUGH */
566 		case CHT_SOCK:
567 			/* Collect data. */
568 			dbg("read_inode_internal | ino: %llu\n",
569 				(unsigned long long)ip->ino);
570 			error = chfs_read_inode(chmp, ip);
571 			if (error) {
572 				vput(vp);
573 				*vpp = NULL;
574 				mutex_exit(&chmp->chm_lock_mountfields);
575 				return (error);
576 			}
577 			break;
578 		case CHT_LNK:
579 			/* Collect data. */
580 			dbg("read_inode_internal | ino: %llu\n",
581 				(unsigned long long)ip->ino);
582 			error = chfs_read_inode_internal(chmp, ip);
583 			if (error) {
584 				vput(vp);
585 				*vpp = NULL;
586 				mutex_exit(&chmp->chm_lock_mountfields);
587 				return (error);
588 			}
589 
590 			/* Set link. */
591 			dbg("size: %llu\n", (unsigned long long)ip->size);
592 			bp = getiobuf(vp, true);
593 			bp->b_blkno = 0;
594 			bp->b_bufsize = bp->b_resid =
595 			    bp->b_bcount = ip->size;
596 			bp->b_data = kmem_alloc(ip->size, KM_SLEEP);
597 			chfs_read_data(chmp, vp, bp);
598 			if (!ip->target)
599 				ip->target = kmem_alloc(ip->size,
600 				    KM_SLEEP);
601 			memcpy(ip->target, bp->b_data, ip->size);
602 			kmem_free(bp->b_data, ip->size);
603 			putiobuf(bp);
604 
605 			break;
606 		case CHT_CHR:
607 			/* FALLTHROUGH */
608 		case CHT_BLK:
609 			/* FALLTHROUGH */
610 		case CHT_FIFO:
611 			/* Collect data. */
612 			dbg("read_inode_internal | ino: %llu\n",
613 				(unsigned long long)ip->ino);
614 			error = chfs_read_inode_internal(chmp, ip);
615 			if (error) {
616 				vput(vp);
617 				*vpp = NULL;
618 				mutex_exit(&chmp->chm_lock_mountfields);
619 				return (error);
620 			}
621 
622 			/* Set device. */
623 			bp = getiobuf(vp, true);
624 			bp->b_blkno = 0;
625 			bp->b_bufsize = bp->b_resid =
626 			    bp->b_bcount = sizeof(dev_t);
627 			bp->b_data = kmem_alloc(sizeof(dev_t), KM_SLEEP);
628 			chfs_read_data(chmp, vp, bp);
629 			memcpy(&ip->rdev,
630 			    bp->b_data, sizeof(dev_t));
631 			kmem_free(bp->b_data, sizeof(dev_t));
632 			putiobuf(bp);
633 			/* Set specific operations. */
634 			if (ip->ch_type == CHT_FIFO) {
635 				vp->v_op = chfs_fifoop_p;
636 			} else {
637 				vp->v_op = chfs_specop_p;
638 				spec_node_init(vp, ip->rdev);
639 			}
640 
641 		    break;
642 		case CHT_BLANK:
643 			/* FALLTHROUGH */
644 		case CHT_BAD:
645 			break;
646 		}
647 		mutex_exit(&chmp->chm_lock_mountfields);
648 
649 	}
650 
651 	/* Finish inode initalization. */
652 	ip->devvp = ump->um_devvp;
653 	vref(ip->devvp);
654 
655 	uvm_vnp_setsize(vp, ip->size);
656 	*vpp = vp;
657 
658 	return 0;
659 }
660 
661 /* --------------------------------------------------------------------- */
662 
663 static int
664 chfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
665 {
666 	return ENODEV;
667 }
668 
669 /* --------------------------------------------------------------------- */
670 
671 static int
672 chfs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
673 {
674 	return ENODEV;
675 }
676 
677 /* --------------------------------------------------------------------- */
678 
679 static int
680 chfs_start(struct mount *mp, int flags)
681 {
682 	return 0;
683 }
684 
685 /* --------------------------------------------------------------------- */
686 
687 static int
688 chfs_statvfs(struct mount *mp, struct statvfs *sbp)
689 {
690  	struct chfs_mount *chmp;
691 	struct ufsmount *ump;
692 	dbg("statvfs\n");
693 
694 	ump = VFSTOUFS(mp);
695 	chmp = ump->um_chfs;
696 
697 	sbp->f_flag   = mp->mnt_flag;
698 	sbp->f_bsize  = chmp->chm_ebh->eb_size;
699 	sbp->f_frsize = chmp->chm_ebh->eb_size;
700 	sbp->f_iosize = chmp->chm_ebh->eb_size;
701 
702 	sbp->f_blocks = chmp->chm_ebh->peb_nr;
703 	sbp->f_files  = 0;
704 	sbp->f_bavail = chmp->chm_nr_free_blocks - chmp->chm_resv_blocks_write;
705 
706 	sbp->f_bfree = chmp->chm_nr_free_blocks;
707 	sbp->f_bresvd = chmp->chm_resv_blocks_write;
708 
709 	/* FFS specific */
710 	sbp->f_ffree  = 0;
711 	sbp->f_favail = 0;
712 	sbp->f_fresvd = 0;
713 
714 	copy_statvfs_info(sbp, mp);
715 
716 	return 0;
717 }
718 
719 /* --------------------------------------------------------------------- */
720 
721 static int
722 chfs_sync(struct mount *mp, int waitfor,
723     kauth_cred_t uc)
724 {
725 	return 0;
726 }
727 
728 /* --------------------------------------------------------------------- */
729 
730 static void
731 chfs_init(void)
732 {
733 	/* Initialize pools and inode hash. */
734 	chfs_alloc_pool_caches();
735 	chfs_ihashinit();
736 	pool_init(&chfs_inode_pool, sizeof(struct chfs_inode), 0, 0, 0,
737 	    "chfsinopl", &pool_allocator_nointr, IPL_NONE);
738 	ufs_init();
739 }
740 
741 /* --------------------------------------------------------------------- */
742 
743 static void
744 chfs_reinit(void)
745 {
746 	chfs_ihashreinit();
747 	ufs_reinit();
748 }
749 
750 /* --------------------------------------------------------------------- */
751 
752 static void
753 chfs_done(void)
754 {
755 	ufs_done();
756 	chfs_ihashdone();
757 	pool_destroy(&chfs_inode_pool);
758 	chfs_destroy_pool_caches();
759 }
760 
761 /* --------------------------------------------------------------------- */
762 
763 static int
764 chfs_snapshot(struct mount *mp, struct vnode *vp,
765     struct timespec *ctime)
766 {
767 	return ENODEV;
768 }
769 
770 /* --------------------------------------------------------------------- */
771 
772 /*
773  * chfs vfs operations.
774  */
775 
776 extern const struct vnodeopv_desc chfs_fifoop_opv_desc;
777 extern const struct vnodeopv_desc chfs_specop_opv_desc;
778 extern const struct vnodeopv_desc chfs_vnodeop_opv_desc;
779 
780 const struct vnodeopv_desc * const chfs_vnodeopv_descs[] = {
781 	&chfs_fifoop_opv_desc,
782 	&chfs_specop_opv_desc,
783 	&chfs_vnodeop_opv_desc,
784 	NULL,
785 };
786 
787 struct vfsops chfs_vfsops = {
788 	MOUNT_CHFS,			/* vfs_name */
789 	sizeof (struct chfs_args),
790 	chfs_mount,			/* vfs_mount */
791 	chfs_start,			/* vfs_start */
792 	chfs_unmount,		/* vfs_unmount */
793 	chfs_root,			/* vfs_root */
794 	ufs_quotactl,		/* vfs_quotactl */
795 	chfs_statvfs,		/* vfs_statvfs */
796 	chfs_sync,			/* vfs_sync */
797 	chfs_vget,			/* vfs_vget */
798 	chfs_fhtovp,		/* vfs_fhtovp */
799 	chfs_vptofh,		/* vfs_vptofh */
800 	chfs_init,			/* vfs_init */
801 	chfs_reinit,		/* vfs_reinit */
802 	chfs_done,			/* vfs_done */
803 	NULL,				/* vfs_mountroot */
804 	chfs_snapshot,		/* vfs_snapshot */
805 	vfs_stdextattrctl,	/* vfs_extattrctl */
806 	(void *)eopnotsupp,	/* vfs_suspendctl */
807 	genfs_renamelock_enter,
808 	genfs_renamelock_exit,
809 	(void *)eopnotsupp,
810 	chfs_vnodeopv_descs,
811 	0,					/* vfs_refcount */
812 	{ NULL, NULL },
813 };
814 
815 /* For using CHFS as a module. */
816 static int
817 chfs_modcmd(modcmd_t cmd, void *arg)
818 {
819 	switch (cmd) {
820 	case MODULE_CMD_INIT:
821 		return vfs_attach(&chfs_vfsops);
822 	case MODULE_CMD_FINI:
823 		return vfs_detach(&chfs_vfsops);
824 	default:
825 		return ENOTTY;
826 	}
827 }
828