xref: /dragonfly/sys/vfs/hammer/hammer_vfsops.c (revision 52f9f0d9)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/vnode.h>
39 #include <sys/mount.h>
40 #include <sys/malloc.h>
41 #include <sys/nlookup.h>
42 #include <sys/fcntl.h>
43 #include <sys/sysctl.h>
44 #include <sys/buf.h>
45 #include <sys/buf2.h>
46 #include "hammer.h"
47 
48 /*
49  * NOTE!  Global statistics may not be MPSAFE so HAMMER never uses them
50  *	  in conditionals.
51  */
52 int hammer_supported_version = HAMMER_VOL_VERSION_DEFAULT;
53 int hammer_debug_io;
54 int hammer_debug_general;
55 int hammer_debug_debug = 1;		/* medium-error panics */
56 int hammer_debug_inode;
57 int hammer_debug_locks;
58 int hammer_debug_btree;
59 int hammer_debug_tid;
60 int hammer_debug_recover;		/* -1 will disable, +1 will force */
61 int hammer_debug_recover_faults;
62 int hammer_debug_critical;		/* non-zero enter debugger on error */
63 int hammer_cluster_enable = 1;		/* enable read clustering by default */
64 int hammer_live_dedup = 0;
65 int hammer_tdmux_ticks;
66 int hammer_count_fsyncs;
67 int hammer_count_inodes;
68 int hammer_count_iqueued;
69 int hammer_count_reclaims;
70 int hammer_count_records;
71 int hammer_count_record_datas;
72 int hammer_count_volumes;
73 int hammer_count_buffers;
74 int hammer_count_nodes;
75 int64_t hammer_count_extra_space_used;
76 int64_t hammer_stats_btree_lookups;
77 int64_t hammer_stats_btree_searches;
78 int64_t hammer_stats_btree_inserts;
79 int64_t hammer_stats_btree_deletes;
80 int64_t hammer_stats_btree_elements;
81 int64_t hammer_stats_btree_splits;
82 int64_t hammer_stats_btree_iterations;
83 int64_t hammer_stats_btree_root_iterations;
84 int64_t hammer_stats_record_iterations;
85 
86 int64_t hammer_stats_file_read;
87 int64_t hammer_stats_file_write;
88 int64_t hammer_stats_file_iopsr;
89 int64_t hammer_stats_file_iopsw;
90 int64_t hammer_stats_disk_read;
91 int64_t hammer_stats_disk_write;
92 int64_t hammer_stats_inode_flushes;
93 int64_t hammer_stats_commits;
94 int64_t hammer_stats_undo;
95 int64_t hammer_stats_redo;
96 
97 long hammer_count_dirtybufspace;	/* global */
98 int hammer_count_refedbufs;		/* global */
99 int hammer_count_reservations;
100 long hammer_count_io_running_read;
101 long hammer_count_io_running_write;
102 int hammer_count_io_locked;
103 long hammer_limit_dirtybufspace;	/* per-mount */
104 int hammer_limit_recs;			/* as a whole XXX */
105 int hammer_limit_inode_recs = 2048;	/* per inode */
106 int hammer_limit_reclaims;
107 int hammer_live_dedup_cache_size = DEDUP_CACHE_SIZE;
108 int hammer_limit_redo = 4096 * 1024;	/* per inode */
109 int hammer_autoflush = 500;		/* auto flush (typ on reclaim) */
110 int hammer_bio_count;
111 int hammer_verify_zone;
112 int hammer_verify_data = 1;
113 int hammer_write_mode;
114 int hammer_double_buffer;
115 int hammer_yield_check = 16;
116 int hammer_fsync_mode = 3;
117 int64_t hammer_contention_count;
118 int64_t hammer_zone_limit;
119 
120 /*
121  * Live dedup debug counters (sysctls are writable so that counters
122  * can be reset from userspace).
123  */
124 int64_t hammer_live_dedup_vnode_bcmps = 0;
125 int64_t hammer_live_dedup_device_bcmps = 0;
126 int64_t hammer_live_dedup_findblk_failures = 0;
127 int64_t hammer_live_dedup_bmap_saves = 0;
128 
129 
130 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem");
131 
132 SYSCTL_INT(_vfs_hammer, OID_AUTO, supported_version, CTLFLAG_RD,
133 	   &hammer_supported_version, 0, "");
134 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW,
135 	   &hammer_debug_general, 0, "");
136 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW,
137 	   &hammer_debug_io, 0, "");
138 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_debug, CTLFLAG_RW,
139 	   &hammer_debug_debug, 0, "");
140 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW,
141 	   &hammer_debug_inode, 0, "");
142 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW,
143 	   &hammer_debug_locks, 0, "");
144 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW,
145 	   &hammer_debug_btree, 0, "");
146 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW,
147 	   &hammer_debug_tid, 0, "");
148 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW,
149 	   &hammer_debug_recover, 0, "");
150 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW,
151 	   &hammer_debug_recover_faults, 0, "");
152 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_critical, CTLFLAG_RW,
153 	   &hammer_debug_critical, 0, "");
154 SYSCTL_INT(_vfs_hammer, OID_AUTO, cluster_enable, CTLFLAG_RW,
155 	   &hammer_cluster_enable, 0, "");
156 /*
157  * 0 - live dedup is disabled
158  * 1 - dedup cache is populated on reads only
159  * 2 - dedup cache is populated on both reads and writes
160  */
161 SYSCTL_INT(_vfs_hammer, OID_AUTO, live_dedup, CTLFLAG_RD,
162 	   &hammer_live_dedup, 0, "Enable live dedup (experimental)");
163 SYSCTL_INT(_vfs_hammer, OID_AUTO, tdmux_ticks, CTLFLAG_RW,
164 	   &hammer_tdmux_ticks, 0, "Hammer tdmux ticks");
165 
166 SYSCTL_LONG(_vfs_hammer, OID_AUTO, limit_dirtybufspace, CTLFLAG_RW,
167 	   &hammer_limit_dirtybufspace, 0, "");
168 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW,
169 	   &hammer_limit_recs, 0, "");
170 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_inode_recs, CTLFLAG_RW,
171 	   &hammer_limit_inode_recs, 0, "");
172 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_reclaims, CTLFLAG_RW,
173 	   &hammer_limit_reclaims, 0, "");
174 SYSCTL_INT(_vfs_hammer, OID_AUTO, live_dedup_cache_size, CTLFLAG_RW,
175 	   &hammer_live_dedup_cache_size, 0,
176 	   "Number of cache entries");
177 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_redo, CTLFLAG_RW,
178 	   &hammer_limit_redo, 0, "");
179 
180 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_fsyncs, CTLFLAG_RD,
181 	   &hammer_count_fsyncs, 0, "");
182 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD,
183 	   &hammer_count_inodes, 0, "");
184 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_iqueued, CTLFLAG_RD,
185 	   &hammer_count_iqueued, 0, "");
186 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reclaims, CTLFLAG_RD,
187 	   &hammer_count_reclaims, 0, "");
188 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD,
189 	   &hammer_count_records, 0, "");
190 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD,
191 	   &hammer_count_record_datas, 0, "");
192 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD,
193 	   &hammer_count_volumes, 0, "");
194 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD,
195 	   &hammer_count_buffers, 0, "");
196 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD,
197 	   &hammer_count_nodes, 0, "");
198 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, count_extra_space_used, CTLFLAG_RD,
199 	   &hammer_count_extra_space_used, 0, "");
200 
201 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_searches, CTLFLAG_RD,
202 	   &hammer_stats_btree_searches, 0, "");
203 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_lookups, CTLFLAG_RD,
204 	   &hammer_stats_btree_lookups, 0, "");
205 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_inserts, CTLFLAG_RD,
206 	   &hammer_stats_btree_inserts, 0, "");
207 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_deletes, CTLFLAG_RD,
208 	   &hammer_stats_btree_deletes, 0, "");
209 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_elements, CTLFLAG_RD,
210 	   &hammer_stats_btree_elements, 0, "");
211 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_splits, CTLFLAG_RD,
212 	   &hammer_stats_btree_splits, 0, "");
213 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_iterations, CTLFLAG_RD,
214 	   &hammer_stats_btree_iterations, 0, "");
215 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_root_iterations, CTLFLAG_RD,
216 	   &hammer_stats_btree_root_iterations, 0, "");
217 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_record_iterations, CTLFLAG_RD,
218 	   &hammer_stats_record_iterations, 0, "");
219 
220 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_read, CTLFLAG_RD,
221 	   &hammer_stats_file_read, 0, "");
222 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_write, CTLFLAG_RD,
223 	   &hammer_stats_file_write, 0, "");
224 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsr, CTLFLAG_RD,
225 	   &hammer_stats_file_iopsr, 0, "");
226 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsw, CTLFLAG_RD,
227 	   &hammer_stats_file_iopsw, 0, "");
228 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_read, CTLFLAG_RD,
229 	   &hammer_stats_disk_read, 0, "");
230 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_write, CTLFLAG_RD,
231 	   &hammer_stats_disk_write, 0, "");
232 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_inode_flushes, CTLFLAG_RD,
233 	   &hammer_stats_inode_flushes, 0, "");
234 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_commits, CTLFLAG_RD,
235 	   &hammer_stats_commits, 0, "");
236 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_undo, CTLFLAG_RD,
237 	   &hammer_stats_undo, 0, "");
238 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_redo, CTLFLAG_RD,
239 	   &hammer_stats_redo, 0, "");
240 
241 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, live_dedup_vnode_bcmps, CTLFLAG_RW,
242 	    &hammer_live_dedup_vnode_bcmps, 0,
243 	    "successful vnode buffer comparisons");
244 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, live_dedup_device_bcmps, CTLFLAG_RW,
245 	    &hammer_live_dedup_device_bcmps, 0,
246 	    "successful device buffer comparisons");
247 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, live_dedup_findblk_failures, CTLFLAG_RW,
248 	    &hammer_live_dedup_findblk_failures, 0,
249 	    "block lookup failures for comparison");
250 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, live_dedup_bmap_saves, CTLFLAG_RW,
251 	    &hammer_live_dedup_bmap_saves, 0,
252 	    "useful physical block lookups");
253 
254 SYSCTL_LONG(_vfs_hammer, OID_AUTO, count_dirtybufspace, CTLFLAG_RD,
255 	   &hammer_count_dirtybufspace, 0, "");
256 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD,
257 	   &hammer_count_refedbufs, 0, "");
258 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD,
259 	   &hammer_count_reservations, 0, "");
260 SYSCTL_LONG(_vfs_hammer, OID_AUTO, count_io_running_read, CTLFLAG_RD,
261 	   &hammer_count_io_running_read, 0, "");
262 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_locked, CTLFLAG_RD,
263 	   &hammer_count_io_locked, 0, "");
264 SYSCTL_LONG(_vfs_hammer, OID_AUTO, count_io_running_write, CTLFLAG_RD,
265 	   &hammer_count_io_running_write, 0, "");
266 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW,
267 	   &hammer_zone_limit, 0, "");
268 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW,
269 	   &hammer_contention_count, 0, "");
270 SYSCTL_INT(_vfs_hammer, OID_AUTO, autoflush, CTLFLAG_RW,
271 	   &hammer_autoflush, 0, "");
272 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_zone, CTLFLAG_RW,
273 	   &hammer_verify_zone, 0, "");
274 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_data, CTLFLAG_RW,
275 	   &hammer_verify_data, 0, "");
276 SYSCTL_INT(_vfs_hammer, OID_AUTO, write_mode, CTLFLAG_RW,
277 	   &hammer_write_mode, 0, "");
278 SYSCTL_INT(_vfs_hammer, OID_AUTO, double_buffer, CTLFLAG_RW,
279 	   &hammer_double_buffer, 0, "");
280 SYSCTL_INT(_vfs_hammer, OID_AUTO, yield_check, CTLFLAG_RW,
281 	   &hammer_yield_check, 0, "");
282 SYSCTL_INT(_vfs_hammer, OID_AUTO, fsync_mode, CTLFLAG_RW,
283 	   &hammer_fsync_mode, 0, "");
284 
285 /* KTR_INFO_MASTER(hammer); */
286 
287 /*
288  * VFS ABI
289  */
290 static void	hammer_free_hmp(struct mount *mp);
291 
292 static int	hammer_vfs_mount(struct mount *mp, char *path, caddr_t data,
293 				struct ucred *cred);
294 static int	hammer_vfs_unmount(struct mount *mp, int mntflags);
295 static int	hammer_vfs_root(struct mount *mp, struct vnode **vpp);
296 static int	hammer_vfs_statfs(struct mount *mp, struct statfs *sbp,
297 				struct ucred *cred);
298 static int	hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
299 				struct ucred *cred);
300 static int	hammer_vfs_sync(struct mount *mp, int waitfor);
301 static int	hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
302 				ino_t ino, struct vnode **vpp);
303 static int	hammer_vfs_init(struct vfsconf *conf);
304 static int	hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
305 				struct fid *fhp, struct vnode **vpp);
306 static int	hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp);
307 static int	hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
308 				int *exflagsp, struct ucred **credanonp);
309 
310 
311 static struct vfsops hammer_vfsops = {
312 	.vfs_mount	= hammer_vfs_mount,
313 	.vfs_unmount	= hammer_vfs_unmount,
314 	.vfs_root 	= hammer_vfs_root,
315 	.vfs_statfs	= hammer_vfs_statfs,
316 	.vfs_statvfs	= hammer_vfs_statvfs,
317 	.vfs_sync	= hammer_vfs_sync,
318 	.vfs_vget	= hammer_vfs_vget,
319 	.vfs_init	= hammer_vfs_init,
320 	.vfs_vptofh	= hammer_vfs_vptofh,
321 	.vfs_fhtovp	= hammer_vfs_fhtovp,
322 	.vfs_checkexp	= hammer_vfs_checkexp
323 };
324 
325 MALLOC_DEFINE(M_HAMMER, "HAMMER-mount", "");
326 
327 VFS_SET(hammer_vfsops, hammer, 0);
328 MODULE_VERSION(hammer, 1);
329 
330 static int
331 hammer_vfs_init(struct vfsconf *conf)
332 {
333 	int n;
334 
335 	/*
336 	 * Wait up to this long for an exclusive deadlock to clear
337 	 * before acquiring a new shared lock on the ip.  The deadlock
338 	 * may have occured on a b-tree node related to the ip.
339 	 */
340 	if (hammer_tdmux_ticks == 0)
341 		hammer_tdmux_ticks = hz / 5;
342 
343 	/*
344 	 * Autosize
345 	 */
346 	if (hammer_limit_recs == 0) {
347 		hammer_limit_recs = nbuf * 25;
348 		n = kmalloc_limit(M_HAMMER) / 512;
349 		if (hammer_limit_recs > n)
350 			hammer_limit_recs = n;
351 	}
352 	if (hammer_limit_dirtybufspace == 0) {
353 		hammer_limit_dirtybufspace = hidirtybufspace / 2;
354 		if (hammer_limit_dirtybufspace < 100)
355 			hammer_limit_dirtybufspace = 100;
356 	}
357 
358 	/*
359 	 * The hammer_inode structure detaches from the vnode on reclaim.
360 	 * This limits the number of inodes in this state to prevent a
361 	 * memory pool blowout.
362 	 */
363 	if (hammer_limit_reclaims == 0)
364 		hammer_limit_reclaims = desiredvnodes / 10;
365 
366 	return(0);
367 }
368 
369 static int
370 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
371 		 struct ucred *cred)
372 {
373 	struct hammer_mount_info info;
374 	hammer_mount_t hmp;
375 	hammer_volume_t rootvol;
376 	struct vnode *rootvp;
377 	struct vnode *devvp = NULL;
378 	const char *upath;	/* volume name in userspace */
379 	char *path;		/* volume name in system space */
380 	int error;
381 	int i;
382 	int master_id;
383 	char *next_volume_ptr = NULL;
384 
385 	/*
386 	 * Accept hammer_mount_info.  mntpt is NULL for root mounts at boot.
387 	 */
388 	if (mntpt == NULL) {
389 		bzero(&info, sizeof(info));
390 		info.asof = 0;
391 		info.hflags = 0;
392 		info.nvolumes = 1;
393 
394 		next_volume_ptr = mp->mnt_stat.f_mntfromname;
395 
396 		/* Count number of volumes separated by ':' */
397 		for (char *p = next_volume_ptr; *p != '\0'; ++p) {
398 			if (*p == ':') {
399 				++info.nvolumes;
400 			}
401 		}
402 
403 		mp->mnt_flag &= ~MNT_RDONLY; /* mount R/W */
404 	} else {
405 		if ((error = copyin(data, &info, sizeof(info))) != 0)
406 			return (error);
407 	}
408 
409 	/*
410 	 * updating or new mount
411 	 */
412 	if (mp->mnt_flag & MNT_UPDATE) {
413 		hmp = (void *)mp->mnt_data;
414 		KKASSERT(hmp != NULL);
415 	} else {
416 		if (info.nvolumes <= 0 || info.nvolumes >= 32768)
417 			return (EINVAL);
418 		hmp = NULL;
419 	}
420 
421 	/*
422 	 * master-id validation.  The master id may not be changed by a
423 	 * mount update.
424 	 */
425 	if (info.hflags & HMNT_MASTERID) {
426 		if (hmp && hmp->master_id != info.master_id) {
427 			kprintf("hammer: cannot change master id "
428 				"with mount update\n");
429 			return(EINVAL);
430 		}
431 		master_id = info.master_id;
432 		if (master_id < -1 || master_id >= HAMMER_MAX_MASTERS)
433 			return (EINVAL);
434 	} else {
435 		if (hmp)
436 			master_id = hmp->master_id;
437 		else
438 			master_id = 0;
439 	}
440 
441 	/*
442 	 * Internal mount data structure
443 	 */
444 	if (hmp == NULL) {
445 		hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO);
446 		mp->mnt_data = (qaddr_t)hmp;
447 		hmp->mp = mp;
448 		/*TAILQ_INIT(&hmp->recycle_list);*/
449 
450 		/*
451 		 * Make sure kmalloc type limits are set appropriately.
452 		 *
453 		 * Our inode kmalloc group is sized based on maxvnodes
454 		 * (controlled by the system, not us).
455 		 */
456 		kmalloc_create(&hmp->m_misc, "HAMMER-others");
457 		kmalloc_create(&hmp->m_inodes, "HAMMER-inodes");
458 
459 		kmalloc_raise_limit(hmp->m_inodes, 0);	/* unlimited */
460 
461 		hmp->root_btree_beg.localization = 0x00000000U;
462 		hmp->root_btree_beg.obj_id = -0x8000000000000000LL;
463 		hmp->root_btree_beg.key = -0x8000000000000000LL;
464 		hmp->root_btree_beg.create_tid = 1;
465 		hmp->root_btree_beg.delete_tid = 1;
466 		hmp->root_btree_beg.rec_type = 0;
467 		hmp->root_btree_beg.obj_type = 0;
468 
469 		hmp->root_btree_end.localization = 0xFFFFFFFFU;
470 		hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL;
471 		hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL;
472 		hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL;
473 		hmp->root_btree_end.delete_tid = 0;   /* special case */
474 		hmp->root_btree_end.rec_type = 0xFFFFU;
475 		hmp->root_btree_end.obj_type = 0;
476 
477 		hmp->krate.freq = 1;	/* maximum reporting rate (hz) */
478 		hmp->krate.count = -16;	/* initial burst */
479 
480 		hmp->sync_lock.refs = 1;
481 		hmp->free_lock.refs = 1;
482 		hmp->undo_lock.refs = 1;
483 		hmp->blkmap_lock.refs = 1;
484 		hmp->snapshot_lock.refs = 1;
485 		hmp->volume_lock.refs = 1;
486 
487 		TAILQ_INIT(&hmp->delay_list);
488 		TAILQ_INIT(&hmp->flush_group_list);
489 		TAILQ_INIT(&hmp->objid_cache_list);
490 		TAILQ_INIT(&hmp->undo_lru_list);
491 		TAILQ_INIT(&hmp->reclaim_list);
492 
493 		RB_INIT(&hmp->rb_dedup_crc_root);
494 		RB_INIT(&hmp->rb_dedup_off_root);
495 		TAILQ_INIT(&hmp->dedup_lru_list);
496 	}
497 	hmp->hflags &= ~HMNT_USERFLAGS;
498 	hmp->hflags |= info.hflags & HMNT_USERFLAGS;
499 
500 	hmp->master_id = master_id;
501 
502 	if (info.asof) {
503 		mp->mnt_flag |= MNT_RDONLY;
504 		hmp->asof = info.asof;
505 	} else {
506 		hmp->asof = HAMMER_MAX_TID;
507 	}
508 
509 	hmp->volume_to_remove = -1;
510 
511 	/*
512 	 * Re-open read-write if originally read-only, or vise-versa.
513 	 *
514 	 * When going from read-only to read-write execute the stage2
515 	 * recovery if it has not already been run.
516 	 */
517 	if (mp->mnt_flag & MNT_UPDATE) {
518 		lwkt_gettoken(&hmp->fs_token);
519 		error = 0;
520 		if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
521 			kprintf("HAMMER read-only -> read-write\n");
522 			hmp->ronly = 0;
523 			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
524 				hammer_adjust_volume_mode, NULL);
525 			rootvol = hammer_get_root_volume(hmp, &error);
526 			if (rootvol) {
527 				hammer_recover_flush_buffers(hmp, rootvol, 1);
528 				error = hammer_recover_stage2(hmp, rootvol);
529 				bcopy(rootvol->ondisk->vol0_blockmap,
530 				      hmp->blockmap,
531 				      sizeof(hmp->blockmap));
532 				hammer_rel_volume(rootvol, 0);
533 			}
534 			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
535 				hammer_reload_inode, NULL);
536 			/* kernel clears MNT_RDONLY */
537 		} else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
538 			kprintf("HAMMER read-write -> read-only\n");
539 			hmp->ronly = 1;	/* messy */
540 			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
541 				hammer_reload_inode, NULL);
542 			hmp->ronly = 0;
543 			hammer_flusher_sync(hmp);
544 			hammer_flusher_sync(hmp);
545 			hammer_flusher_sync(hmp);
546 			hmp->ronly = 1;
547 			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
548 				hammer_adjust_volume_mode, NULL);
549 		}
550 		lwkt_reltoken(&hmp->fs_token);
551 		return(error);
552 	}
553 
554 	RB_INIT(&hmp->rb_vols_root);
555 	RB_INIT(&hmp->rb_inos_root);
556 	RB_INIT(&hmp->rb_redo_root);
557 	RB_INIT(&hmp->rb_nods_root);
558 	RB_INIT(&hmp->rb_undo_root);
559 	RB_INIT(&hmp->rb_resv_root);
560 	RB_INIT(&hmp->rb_bufs_root);
561 	RB_INIT(&hmp->rb_pfsm_root);
562 
563 	hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
564 
565 	RB_INIT(&hmp->volu_root);
566 	RB_INIT(&hmp->undo_root);
567 	RB_INIT(&hmp->data_root);
568 	RB_INIT(&hmp->meta_root);
569 	RB_INIT(&hmp->lose_root);
570 	TAILQ_INIT(&hmp->iorun_list);
571 
572 	lwkt_token_init(&hmp->fs_token, "hammerfs");
573 	lwkt_token_init(&hmp->io_token, "hammerio");
574 
575 	lwkt_gettoken(&hmp->fs_token);
576 
577 	/*
578 	 * Load volumes
579 	 */
580 	path = objcache_get(namei_oc, M_WAITOK);
581 	hmp->nvolumes = -1;
582 	for (i = 0; i < info.nvolumes; ++i) {
583 		if (mntpt == NULL) {
584 			/*
585 			 * Root mount.
586 			 */
587 			KKASSERT(next_volume_ptr != NULL);
588 			strcpy(path, "");
589 			if (*next_volume_ptr != '/') {
590 				/* relative path */
591 				strcpy(path, "/dev/");
592 			}
593 			int k;
594 			for (k = strlen(path); k < MAXPATHLEN-1; ++k) {
595 				if (*next_volume_ptr == '\0') {
596 					break;
597 				} else if (*next_volume_ptr == ':') {
598 					++next_volume_ptr;
599 					break;
600 				} else {
601 					path[k] = *next_volume_ptr;
602 					++next_volume_ptr;
603 				}
604 			}
605 			path[k] = '\0';
606 
607 			error = 0;
608 			cdev_t dev = kgetdiskbyname(path);
609 			error = bdevvp(dev, &devvp);
610 			if (error) {
611 				kprintf("hammer_mountroot: can't find devvp\n");
612 			}
613 		} else {
614 			error = copyin(&info.volumes[i], &upath,
615 				       sizeof(char *));
616 			if (error == 0)
617 				error = copyinstr(upath, path,
618 						  MAXPATHLEN, NULL);
619 		}
620 		if (error == 0)
621 			error = hammer_install_volume(hmp, path, devvp);
622 		if (error)
623 			break;
624 	}
625 	objcache_put(namei_oc, path);
626 
627 	/*
628 	 * Make sure we found a root volume
629 	 */
630 	if (error == 0 && hmp->rootvol == NULL) {
631 		kprintf("hammer_mount: No root volume found!\n");
632 		error = EINVAL;
633 	}
634 
635 	/*
636 	 * Check that all required volumes are available
637 	 */
638 	if (error == 0 && hammer_mountcheck_volumes(hmp)) {
639 		kprintf("hammer_mount: Missing volumes, cannot mount!\n");
640 		error = EINVAL;
641 	}
642 
643 	if (error) {
644 		/* called with fs_token held */
645 		hammer_free_hmp(mp);
646 		return (error);
647 	}
648 
649 	/*
650 	 * No errors, setup enough of the mount point so we can lookup the
651 	 * root vnode.
652 	 */
653 	mp->mnt_iosize_max = MAXPHYS;
654 	mp->mnt_kern_flag |= MNTK_FSMID;
655 
656 	/*
657 	 * MPSAFE code.  Note that VOPs and VFSops which are not MPSAFE
658 	 * will acquire a per-mount token prior to entry and release it
659 	 * on return, so even if we do not specify it we no longer get
660 	 * the BGL regardlless of how we are flagged.
661 	 */
662 	mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;
663 	/*MNTK_RD_MPSAFE | MNTK_GA_MPSAFE | MNTK_IN_MPSAFE;*/
664 
665 	/*
666 	 * note: f_iosize is used by vnode_pager_haspage() when constructing
667 	 * its VOP_BMAP call.
668 	 */
669 	mp->mnt_stat.f_iosize = HAMMER_BUFSIZE;
670 	mp->mnt_stat.f_bsize = HAMMER_BUFSIZE;
671 
672 	mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE;
673 	mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE;
674 
675 	mp->mnt_maxsymlinklen = 255;
676 	mp->mnt_flag |= MNT_LOCAL;
677 
678 	vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops);
679 	vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops);
680 	vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops);
681 
682 	/*
683 	 * The root volume's ondisk pointer is only valid if we hold a
684 	 * reference to it.
685 	 */
686 	rootvol = hammer_get_root_volume(hmp, &error);
687 	if (error)
688 		goto failed;
689 
690 	/*
691 	 * Perform any necessary UNDO operations.  The recovery code does
692 	 * call hammer_undo_lookup() so we have to pre-cache the blockmap,
693 	 * and then re-copy it again after recovery is complete.
694 	 *
695 	 * If this is a read-only mount the UNDO information is retained
696 	 * in memory in the form of dirty buffer cache buffers, and not
697 	 * written back to the media.
698 	 */
699 	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
700 	      sizeof(hmp->blockmap));
701 
702 	/*
703 	 * Check filesystem version
704 	 */
705 	hmp->version = rootvol->ondisk->vol_version;
706 	if (hmp->version < HAMMER_VOL_VERSION_MIN ||
707 	    hmp->version > HAMMER_VOL_VERSION_MAX) {
708 		kprintf("HAMMER: mount unsupported fs version %d\n",
709 			hmp->version);
710 		error = ERANGE;
711 		goto done;
712 	}
713 
714 	/*
715 	 * The undo_rec_limit limits the size of flush groups to avoid
716 	 * blowing out the UNDO FIFO.  This calculation is typically in
717 	 * the tens of thousands and is designed primarily when small
718 	 * HAMMER filesystems are created.
719 	 */
720 	hmp->undo_rec_limit = hammer_undo_max(hmp) / 8192 + 100;
721 	if (hammer_debug_general & 0x0001)
722 		kprintf("HAMMER: undo_rec_limit %d\n", hmp->undo_rec_limit);
723 
724 	/*
725 	 * NOTE: Recover stage1 not only handles meta-data recovery, it
726 	 * 	 also sets hmp->undo_seqno for HAMMER VERSION 4+ filesystems.
727 	 */
728 	error = hammer_recover_stage1(hmp, rootvol);
729 	if (error) {
730 		kprintf("Failed to recover HAMMER filesystem on mount\n");
731 		goto done;
732 	}
733 
734 	/*
735 	 * Finish setup now that we have a good root volume.
736 	 *
737 	 * The top 16 bits of fsid.val[1] is a pfs id.
738 	 */
739 	ksnprintf(mp->mnt_stat.f_mntfromname,
740 		  sizeof(mp->mnt_stat.f_mntfromname), "%s",
741 		  rootvol->ondisk->vol_name);
742 	mp->mnt_stat.f_fsid.val[0] =
743 		crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8);
744 	mp->mnt_stat.f_fsid.val[1] =
745 		crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8);
746 	mp->mnt_stat.f_fsid.val[1] &= 0x0000FFFF;
747 
748 	mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid;
749 	mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid,
750 				     sizeof(mp->mnt_vstat.f_fsid_uuid));
751 
752 	/*
753 	 * Certain often-modified fields in the root volume are cached in
754 	 * the hammer_mount structure so we do not have to generate lots
755 	 * of little UNDO structures for them.
756 	 *
757 	 * Recopy after recovery.  This also has the side effect of
758 	 * setting our cached undo FIFO's first_offset, which serves to
759 	 * placemark the FIFO start for the NEXT flush cycle while the
760 	 * on-disk first_offset represents the LAST flush cycle.
761 	 */
762 	hmp->next_tid = rootvol->ondisk->vol0_next_tid;
763 	hmp->flush_tid1 = hmp->next_tid;
764 	hmp->flush_tid2 = hmp->next_tid;
765 	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
766 	      sizeof(hmp->blockmap));
767 	hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks;
768 
769 	hammer_flusher_create(hmp);
770 
771 	/*
772 	 * Locate the root directory using the root cluster's B-Tree as a
773 	 * starting point.  The root directory uses an obj_id of 1.
774 	 *
775 	 * FUTURE: Leave the root directory cached referenced but unlocked
776 	 * in hmp->rootvp (need to flush it on unmount).
777 	 */
778 	error = hammer_vfs_vget(mp, NULL, 1, &rootvp);
779 	if (error)
780 		goto done;
781 	vput(rootvp);
782 	/*vn_unlock(hmp->rootvp);*/
783 	if (hmp->ronly == 0)
784 		error = hammer_recover_stage2(hmp, rootvol);
785 
786 	/*
787 	 * If the stage2 recovery fails be sure to clean out all cached
788 	 * vnodes before throwing away the mount structure or bad things
789 	 * will happen.
790 	 */
791 	if (error)
792 		vflush(mp, 0, 0);
793 
794 done:
795 	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
796 		/* New mount */
797 
798 		/* Populate info for mount point (NULL pad)*/
799 		bzero(mp->mnt_stat.f_mntonname, MNAMELEN);
800 		size_t size;
801 		if (mntpt) {
802 			copyinstr(mntpt, mp->mnt_stat.f_mntonname,
803 							MNAMELEN -1, &size);
804 		} else { /* Root mount */
805 			mp->mnt_stat.f_mntonname[0] = '/';
806 		}
807 	}
808 	(void)VFS_STATFS(mp, &mp->mnt_stat, cred);
809 	hammer_rel_volume(rootvol, 0);
810 failed:
811 	/*
812 	 * Cleanup and return.
813 	 */
814 	if (error) {
815 		/* called with fs_token held */
816 		hammer_free_hmp(mp);
817 	} else {
818 		lwkt_reltoken(&hmp->fs_token);
819 	}
820 	return (error);
821 }
822 
823 static int
824 hammer_vfs_unmount(struct mount *mp, int mntflags)
825 {
826 	hammer_mount_t hmp = (void *)mp->mnt_data;
827 	int flags;
828 	int error;
829 
830 	/*
831 	 * Clean out the vnodes
832 	 */
833 	lwkt_gettoken(&hmp->fs_token);
834 	flags = 0;
835 	if (mntflags & MNT_FORCE)
836 		flags |= FORCECLOSE;
837 	error = vflush(mp, 0, flags);
838 
839 	/*
840 	 * Clean up the internal mount structure and related entities.  This
841 	 * may issue I/O.
842 	 */
843 	if (error == 0) {
844 		/* called with fs_token held */
845 		hammer_free_hmp(mp);
846 	} else {
847 		lwkt_reltoken(&hmp->fs_token);
848 	}
849 	return(error);
850 }
851 
852 /*
853  * Clean up the internal mount structure and disassociate it from the mount.
854  * This may issue I/O.
855  *
856  * Called with fs_token held.
857  */
858 static void
859 hammer_free_hmp(struct mount *mp)
860 {
861 	hammer_mount_t hmp = (void *)mp->mnt_data;
862 	hammer_flush_group_t flg;
863 	int count;
864 	int dummy;
865 
866 	/*
867 	 * Flush anything dirty.  This won't even run if the
868 	 * filesystem errored-out.
869 	 */
870 	count = 0;
871 	while (hammer_flusher_haswork(hmp)) {
872 		hammer_flusher_sync(hmp);
873 		++count;
874 		if (count >= 5) {
875 			if (count == 5)
876 				kprintf("HAMMER: umount flushing.");
877 			else
878 				kprintf(".");
879 			tsleep(&dummy, 0, "hmrufl", hz);
880 		}
881 		if (count == 30) {
882 			kprintf("giving up\n");
883 			break;
884 		}
885 	}
886 	if (count >= 5 && count < 30)
887 		kprintf("\n");
888 
889 	/*
890 	 * If the mount had a critical error we have to destroy any
891 	 * remaining inodes before we can finish cleaning up the flusher.
892 	 */
893 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
894 		RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
895 			hammer_destroy_inode_callback, NULL);
896 	}
897 
898 	/*
899 	 * There shouldn't be any inodes left now and any left over
900 	 * flush groups should now be empty.
901 	 */
902 	KKASSERT(RB_EMPTY(&hmp->rb_inos_root));
903 	while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
904 		TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
905 		KKASSERT(RB_EMPTY(&flg->flush_tree));
906 		if (flg->refs) {
907 			kprintf("HAMMER: Warning, flush_group %p was "
908 				"not empty on umount!\n", flg);
909 		}
910 		kfree(flg, hmp->m_misc);
911 	}
912 
913 	/*
914 	 * We can finally destroy the flusher
915 	 */
916 	hammer_flusher_destroy(hmp);
917 
918 	/*
919 	 * We may have held recovered buffers due to a read-only mount.
920 	 * These must be discarded.
921 	 */
922 	if (hmp->ronly)
923 		hammer_recover_flush_buffers(hmp, NULL, -1);
924 
925 	/*
926 	 * Unload buffers and then volumes
927 	 */
928         RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
929 		hammer_unload_buffer, NULL);
930 	RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
931 		hammer_unload_volume, NULL);
932 
933 	mp->mnt_data = NULL;
934 	mp->mnt_flag &= ~MNT_LOCAL;
935 	hmp->mp = NULL;
936 	hammer_destroy_objid_cache(hmp);
937 	hammer_destroy_dedup_cache(hmp);
938 	if (hmp->dedup_free_cache != NULL) {
939 		kfree(hmp->dedup_free_cache, hmp->m_misc);
940 		hmp->dedup_free_cache = NULL;
941 	}
942 	kmalloc_destroy(&hmp->m_misc);
943 	kmalloc_destroy(&hmp->m_inodes);
944 	lwkt_reltoken(&hmp->fs_token);
945 	kfree(hmp, M_HAMMER);
946 }
947 
948 /*
949  * Report critical errors.  ip may be NULL.
950  */
951 void
952 hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip,
953 		      int error, const char *msg)
954 {
955 	hmp->flags |= HAMMER_MOUNT_CRITICAL_ERROR;
956 
957 	krateprintf(&hmp->krate,
958 		    "HAMMER(%s): Critical error inode=%jd error=%d %s\n",
959 		    hmp->mp->mnt_stat.f_mntfromname,
960 		    (intmax_t)(ip ? ip->obj_id : -1),
961 		    error, msg);
962 
963 	if (hmp->ronly == 0) {
964 		hmp->ronly = 2;		/* special errored read-only mode */
965 		hmp->mp->mnt_flag |= MNT_RDONLY;
966 		RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
967 			hammer_adjust_volume_mode, NULL);
968 		kprintf("HAMMER(%s): Forcing read-only mode\n",
969 			hmp->mp->mnt_stat.f_mntfromname);
970 	}
971 	hmp->error = error;
972 	if (hammer_debug_critical)
973 		Debugger("Entering debugger");
974 }
975 
976 
977 /*
978  * Obtain a vnode for the specified inode number.  An exclusively locked
979  * vnode is returned.
980  */
981 int
982 hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
983 		ino_t ino, struct vnode **vpp)
984 {
985 	struct hammer_transaction trans;
986 	struct hammer_mount *hmp = (void *)mp->mnt_data;
987 	struct hammer_inode *ip;
988 	int error;
989 	u_int32_t localization;
990 
991 	lwkt_gettoken(&hmp->fs_token);
992 	hammer_simple_transaction(&trans, hmp);
993 
994 	/*
995 	 * If a directory vnode is supplied (mainly NFS) then we can acquire
996 	 * the PFS domain from it.  Otherwise we would only be able to vget
997 	 * inodes in the root PFS.
998 	 */
999 	if (dvp) {
1000 		localization = HAMMER_DEF_LOCALIZATION +
1001 				VTOI(dvp)->obj_localization;
1002 	} else {
1003 		localization = HAMMER_DEF_LOCALIZATION;
1004 	}
1005 
1006 	/*
1007 	 * Lookup the requested HAMMER inode.  The structure must be
1008 	 * left unlocked while we manipulate the related vnode to avoid
1009 	 * a deadlock.
1010 	 */
1011 	ip = hammer_get_inode(&trans, NULL, ino,
1012 			      hmp->asof, localization,
1013 			      0, &error);
1014 	if (ip == NULL) {
1015 		*vpp = NULL;
1016 	} else {
1017 		error = hammer_get_vnode(ip, vpp);
1018 		hammer_rel_inode(ip, 0);
1019 	}
1020 	hammer_done_transaction(&trans);
1021 	lwkt_reltoken(&hmp->fs_token);
1022 	return (error);
1023 }
1024 
1025 /*
1026  * Return the root vnode for the filesystem.
1027  *
1028  * HAMMER stores the root vnode in the hammer_mount structure so
1029  * getting it is easy.
1030  */
1031 static int
1032 hammer_vfs_root(struct mount *mp, struct vnode **vpp)
1033 {
1034 	int error;
1035 
1036 	error = hammer_vfs_vget(mp, NULL, 1, vpp);
1037 	return (error);
1038 }
1039 
1040 static int
1041 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
1042 {
1043 	struct hammer_mount *hmp = (void *)mp->mnt_data;
1044 	hammer_volume_t volume;
1045 	hammer_volume_ondisk_t ondisk;
1046 	int error;
1047 	int64_t bfree;
1048 	int64_t breserved;
1049 
1050 	lwkt_gettoken(&hmp->fs_token);
1051 	volume = hammer_get_root_volume(hmp, &error);
1052 	if (error) {
1053 		lwkt_reltoken(&hmp->fs_token);
1054 		return(error);
1055 	}
1056 	ondisk = volume->ondisk;
1057 
1058 	/*
1059 	 * Basic stats
1060 	 */
1061 	_hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
1062 	mp->mnt_stat.f_files = ondisk->vol0_stat_inodes;
1063 	bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
1064 	hammer_rel_volume(volume, 0);
1065 
1066 	mp->mnt_stat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
1067 	mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
1068 	if (mp->mnt_stat.f_files < 0)
1069 		mp->mnt_stat.f_files = 0;
1070 
1071 	*sbp = mp->mnt_stat;
1072 	lwkt_reltoken(&hmp->fs_token);
1073 	return(0);
1074 }
1075 
1076 static int
1077 hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
1078 {
1079 	struct hammer_mount *hmp = (void *)mp->mnt_data;
1080 	hammer_volume_t volume;
1081 	hammer_volume_ondisk_t ondisk;
1082 	int error;
1083 	int64_t bfree;
1084 	int64_t breserved;
1085 
1086 	lwkt_gettoken(&hmp->fs_token);
1087 	volume = hammer_get_root_volume(hmp, &error);
1088 	if (error) {
1089 		lwkt_reltoken(&hmp->fs_token);
1090 		return(error);
1091 	}
1092 	ondisk = volume->ondisk;
1093 
1094 	/*
1095 	 * Basic stats
1096 	 */
1097 	_hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
1098 	mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes;
1099 	bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
1100 	hammer_rel_volume(volume, 0);
1101 
1102 	mp->mnt_vstat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
1103 	mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
1104 	if (mp->mnt_vstat.f_files < 0)
1105 		mp->mnt_vstat.f_files = 0;
1106 	*sbp = mp->mnt_vstat;
1107 	lwkt_reltoken(&hmp->fs_token);
1108 	return(0);
1109 }
1110 
1111 /*
1112  * Sync the filesystem.  Currently we have to run it twice, the second
1113  * one will advance the undo start index to the end index, so if a crash
1114  * occurs no undos will be run on mount.
1115  *
1116  * We do not sync the filesystem if we are called from a panic.  If we did
1117  * we might end up blowing up a sync that was already in progress.
1118  */
1119 static int
1120 hammer_vfs_sync(struct mount *mp, int waitfor)
1121 {
1122 	struct hammer_mount *hmp = (void *)mp->mnt_data;
1123 	int error;
1124 
1125 	lwkt_gettoken(&hmp->fs_token);
1126 	if (panicstr == NULL) {
1127 		error = hammer_sync_hmp(hmp, waitfor);
1128 	} else {
1129 		error = EIO;
1130 	}
1131 	lwkt_reltoken(&hmp->fs_token);
1132 	return (error);
1133 }
1134 
1135 /*
1136  * Convert a vnode to a file handle.
1137  *
1138  * Accesses read-only fields on already-referenced structures so
1139  * no token is needed.
1140  */
1141 static int
1142 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp)
1143 {
1144 	hammer_inode_t ip;
1145 
1146 	KKASSERT(MAXFIDSZ >= 16);
1147 	ip = VTOI(vp);
1148 	fhp->fid_len = offsetof(struct fid, fid_data[16]);
1149 	fhp->fid_ext = ip->obj_localization >> 16;
1150 	bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id));
1151 	bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof));
1152 	return(0);
1153 }
1154 
1155 
1156 /*
1157  * Convert a file handle back to a vnode.
1158  *
1159  * Use rootvp to enforce PFS isolation when a PFS is exported via a
1160  * null mount.
1161  */
1162 static int
1163 hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
1164 		  struct fid *fhp, struct vnode **vpp)
1165 {
1166 	hammer_mount_t hmp = (void *)mp->mnt_data;
1167 	struct hammer_transaction trans;
1168 	struct hammer_inode *ip;
1169 	struct hammer_inode_info info;
1170 	int error;
1171 	u_int32_t localization;
1172 
1173 	bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id));
1174 	bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof));
1175 	if (rootvp)
1176 		localization = VTOI(rootvp)->obj_localization;
1177 	else
1178 		localization = (u_int32_t)fhp->fid_ext << 16;
1179 
1180 	lwkt_gettoken(&hmp->fs_token);
1181 	hammer_simple_transaction(&trans, hmp);
1182 
1183 	/*
1184 	 * Get/allocate the hammer_inode structure.  The structure must be
1185 	 * unlocked while we manipulate the related vnode to avoid a
1186 	 * deadlock.
1187 	 */
1188 	ip = hammer_get_inode(&trans, NULL, info.obj_id,
1189 			      info.obj_asof, localization, 0, &error);
1190 	if (ip) {
1191 		error = hammer_get_vnode(ip, vpp);
1192 		hammer_rel_inode(ip, 0);
1193 	} else {
1194 		*vpp = NULL;
1195 	}
1196 	hammer_done_transaction(&trans);
1197 	lwkt_reltoken(&hmp->fs_token);
1198 	return (error);
1199 }
1200 
1201 static int
1202 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
1203 		    int *exflagsp, struct ucred **credanonp)
1204 {
1205 	hammer_mount_t hmp = (void *)mp->mnt_data;
1206 	struct netcred *np;
1207 	int error;
1208 
1209 	lwkt_gettoken(&hmp->fs_token);
1210 	np = vfs_export_lookup(mp, &hmp->export, nam);
1211 	if (np) {
1212 		*exflagsp = np->netc_exflags;
1213 		*credanonp = &np->netc_anon;
1214 		error = 0;
1215 	} else {
1216 		error = EACCES;
1217 	}
1218 	lwkt_reltoken(&hmp->fs_token);
1219 	return (error);
1220 
1221 }
1222 
1223 int
1224 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export)
1225 {
1226 	hammer_mount_t hmp = (void *)mp->mnt_data;
1227 	int error;
1228 
1229 	lwkt_gettoken(&hmp->fs_token);
1230 
1231 	switch(op) {
1232 	case MOUNTCTL_SET_EXPORT:
1233 		error = vfs_export(mp, &hmp->export, export);
1234 		break;
1235 	default:
1236 		error = EOPNOTSUPP;
1237 		break;
1238 	}
1239 	lwkt_reltoken(&hmp->fs_token);
1240 
1241 	return(error);
1242 }
1243 
1244