xref: /dragonfly/sys/vfs/hammer/hammer_vfsops.c (revision 7e82238e)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/mountctl.h>
36 
37 #include "hammer.h"
38 
39 /*
40  * NOTE!  Global statistics may not be MPSAFE so HAMMER never uses them
41  *	  in conditionals.
42  */
43 int hammer_supported_version = HAMMER_VOL_VERSION_DEFAULT;
44 int hammer_debug_io;
45 int hammer_debug_general;
46 int hammer_debug_inode;
47 int hammer_debug_locks;
48 int hammer_debug_btree;
49 int hammer_debug_tid;
50 int hammer_debug_recover;		/* -1 will disable, +1 will force */
51 int hammer_debug_recover_faults;
52 int hammer_debug_critical;		/* non-zero enter debugger on error */
53 int hammer_cluster_enable = 1;		/* enable read clustering by default */
54 int hammer_live_dedup = 0;
55 int hammer_tdmux_ticks;
56 int hammer_count_fsyncs;
57 int hammer_count_inodes;
58 int hammer_count_iqueued;
59 int hammer_count_reclaims;
60 int hammer_count_records;
61 int hammer_count_record_datas;
62 int hammer_count_volumes;
63 int hammer_count_buffers;
64 int hammer_count_nodes;
65 int64_t hammer_count_extra_space_used;
66 int64_t hammer_stats_btree_lookups;
67 int64_t hammer_stats_btree_searches;
68 int64_t hammer_stats_btree_inserts;
69 int64_t hammer_stats_btree_deletes;
70 int64_t hammer_stats_btree_elements;
71 int64_t hammer_stats_btree_splits;
72 int64_t hammer_stats_btree_iterations;
73 int64_t hammer_stats_btree_root_iterations;
74 int64_t hammer_stats_record_iterations;
75 
76 int64_t hammer_stats_file_read;
77 int64_t hammer_stats_file_write;
78 int64_t hammer_stats_file_iopsr;
79 int64_t hammer_stats_file_iopsw;
80 int64_t hammer_stats_disk_read;
81 int64_t hammer_stats_disk_write;
82 int64_t hammer_stats_inode_flushes;
83 int64_t hammer_stats_commits;
84 int64_t hammer_stats_undo;
85 int64_t hammer_stats_redo;
86 
87 long hammer_count_dirtybufspace;	/* global */
88 int hammer_count_refedbufs;		/* global */
89 int hammer_count_reservations;
90 long hammer_count_io_running_read;
91 long hammer_count_io_running_write;
92 int hammer_count_io_locked;
93 long hammer_limit_dirtybufspace;	/* per-mount */
94 int hammer_limit_recs;			/* as a whole XXX */
95 int hammer_limit_inode_recs = 2048;	/* per inode */
96 int hammer_limit_reclaims;
97 int hammer_live_dedup_cache_size = DEDUP_CACHE_SIZE;
98 int hammer_limit_redo = 4096 * 1024;	/* per inode */
99 int hammer_autoflush = 500;		/* auto flush (typ on reclaim) */
100 int hammer_bio_count;
101 int hammer_verify_zone;
102 int hammer_verify_data = 1;
103 int hammer_write_mode;
104 int hammer_double_buffer;
105 int hammer_btree_full_undo = 1;
106 int hammer_yield_check = 16;
107 int hammer_fsync_mode = 3;
108 int64_t hammer_contention_count;
109 int64_t hammer_zone_limit;
110 
111 /*
112  * Live dedup debug counters (sysctls are writable so that counters
113  * can be reset from userspace).
114  */
115 int64_t hammer_live_dedup_vnode_bcmps = 0;
116 int64_t hammer_live_dedup_device_bcmps = 0;
117 int64_t hammer_live_dedup_findblk_failures = 0;
118 int64_t hammer_live_dedup_bmap_saves = 0;
119 
120 
121 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem");
122 
123 SYSCTL_INT(_vfs_hammer, OID_AUTO, supported_version, CTLFLAG_RD,
124 	   &hammer_supported_version, 0, "");
125 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW,
126 	   &hammer_debug_general, 0, "");
127 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW,
128 	   &hammer_debug_io, 0, "");
129 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW,
130 	   &hammer_debug_inode, 0, "");
131 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW,
132 	   &hammer_debug_locks, 0, "");
133 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW,
134 	   &hammer_debug_btree, 0, "");
135 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW,
136 	   &hammer_debug_tid, 0, "");
137 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW,
138 	   &hammer_debug_recover, 0, "");
139 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW,
140 	   &hammer_debug_recover_faults, 0, "");
141 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_critical, CTLFLAG_RW,
142 	   &hammer_debug_critical, 0, "");
143 SYSCTL_INT(_vfs_hammer, OID_AUTO, cluster_enable, CTLFLAG_RW,
144 	   &hammer_cluster_enable, 0, "");
145 /*
146  * 0 - live dedup is disabled
147  * 1 - dedup cache is populated on reads only
148  * 2 - dedup cache is populated on both reads and writes
149  *
150  * LIVE_DEDUP IS DISABLED PERMANENTLY!  This feature appears to cause
151  * blockmap corruption over time so we've turned it off permanently.
152  */
153 SYSCTL_INT(_vfs_hammer, OID_AUTO, live_dedup, CTLFLAG_RD,
154 	   &hammer_live_dedup, 0, "Enable live dedup (experimental)");
155 SYSCTL_INT(_vfs_hammer, OID_AUTO, tdmux_ticks, CTLFLAG_RW,
156 	   &hammer_tdmux_ticks, 0, "Hammer tdmux ticks");
157 
158 SYSCTL_LONG(_vfs_hammer, OID_AUTO, limit_dirtybufspace, CTLFLAG_RW,
159 	   &hammer_limit_dirtybufspace, 0, "");
160 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW,
161 	   &hammer_limit_recs, 0, "");
162 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_inode_recs, CTLFLAG_RW,
163 	   &hammer_limit_inode_recs, 0, "");
164 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_reclaims, CTLFLAG_RW,
165 	   &hammer_limit_reclaims, 0, "");
166 SYSCTL_INT(_vfs_hammer, OID_AUTO, live_dedup_cache_size, CTLFLAG_RW,
167 	   &hammer_live_dedup_cache_size, 0,
168 	   "Number of cache entries");
169 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_redo, CTLFLAG_RW,
170 	   &hammer_limit_redo, 0, "");
171 
172 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_fsyncs, CTLFLAG_RD,
173 	   &hammer_count_fsyncs, 0, "");
174 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD,
175 	   &hammer_count_inodes, 0, "");
176 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_iqueued, CTLFLAG_RD,
177 	   &hammer_count_iqueued, 0, "");
178 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reclaims, CTLFLAG_RD,
179 	   &hammer_count_reclaims, 0, "");
180 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD,
181 	   &hammer_count_records, 0, "");
182 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD,
183 	   &hammer_count_record_datas, 0, "");
184 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD,
185 	   &hammer_count_volumes, 0, "");
186 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD,
187 	   &hammer_count_buffers, 0, "");
188 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD,
189 	   &hammer_count_nodes, 0, "");
190 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, count_extra_space_used, CTLFLAG_RD,
191 	   &hammer_count_extra_space_used, 0, "");
192 
193 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_searches, CTLFLAG_RD,
194 	   &hammer_stats_btree_searches, 0, "");
195 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_lookups, CTLFLAG_RD,
196 	   &hammer_stats_btree_lookups, 0, "");
197 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_inserts, CTLFLAG_RD,
198 	   &hammer_stats_btree_inserts, 0, "");
199 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_deletes, CTLFLAG_RD,
200 	   &hammer_stats_btree_deletes, 0, "");
201 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_elements, CTLFLAG_RD,
202 	   &hammer_stats_btree_elements, 0, "");
203 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_splits, CTLFLAG_RD,
204 	   &hammer_stats_btree_splits, 0, "");
205 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_iterations, CTLFLAG_RD,
206 	   &hammer_stats_btree_iterations, 0, "");
207 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_root_iterations, CTLFLAG_RD,
208 	   &hammer_stats_btree_root_iterations, 0, "");
209 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_record_iterations, CTLFLAG_RD,
210 	   &hammer_stats_record_iterations, 0, "");
211 
212 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_read, CTLFLAG_RD,
213 	   &hammer_stats_file_read, 0, "");
214 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_write, CTLFLAG_RD,
215 	   &hammer_stats_file_write, 0, "");
216 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsr, CTLFLAG_RD,
217 	   &hammer_stats_file_iopsr, 0, "");
218 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsw, CTLFLAG_RD,
219 	   &hammer_stats_file_iopsw, 0, "");
220 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_read, CTLFLAG_RD,
221 	   &hammer_stats_disk_read, 0, "");
222 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_write, CTLFLAG_RD,
223 	   &hammer_stats_disk_write, 0, "");
224 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_inode_flushes, CTLFLAG_RD,
225 	   &hammer_stats_inode_flushes, 0, "");
226 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_commits, CTLFLAG_RD,
227 	   &hammer_stats_commits, 0, "");
228 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_undo, CTLFLAG_RD,
229 	   &hammer_stats_undo, 0, "");
230 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_redo, CTLFLAG_RD,
231 	   &hammer_stats_redo, 0, "");
232 
233 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, live_dedup_vnode_bcmps, CTLFLAG_RW,
234 	    &hammer_live_dedup_vnode_bcmps, 0,
235 	    "successful vnode buffer comparisons");
236 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, live_dedup_device_bcmps, CTLFLAG_RW,
237 	    &hammer_live_dedup_device_bcmps, 0,
238 	    "successful device buffer comparisons");
239 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, live_dedup_findblk_failures, CTLFLAG_RW,
240 	    &hammer_live_dedup_findblk_failures, 0,
241 	    "block lookup failures for comparison");
242 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, live_dedup_bmap_saves, CTLFLAG_RW,
243 	    &hammer_live_dedup_bmap_saves, 0,
244 	    "useful physical block lookups");
245 
246 SYSCTL_LONG(_vfs_hammer, OID_AUTO, count_dirtybufspace, CTLFLAG_RD,
247 	   &hammer_count_dirtybufspace, 0, "");
248 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD,
249 	   &hammer_count_refedbufs, 0, "");
250 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD,
251 	   &hammer_count_reservations, 0, "");
252 SYSCTL_LONG(_vfs_hammer, OID_AUTO, count_io_running_read, CTLFLAG_RD,
253 	   &hammer_count_io_running_read, 0, "");
254 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_locked, CTLFLAG_RD,
255 	   &hammer_count_io_locked, 0, "");
256 SYSCTL_LONG(_vfs_hammer, OID_AUTO, count_io_running_write, CTLFLAG_RD,
257 	   &hammer_count_io_running_write, 0, "");
258 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW,
259 	   &hammer_zone_limit, 0, "");
260 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW,
261 	   &hammer_contention_count, 0, "");
262 SYSCTL_INT(_vfs_hammer, OID_AUTO, autoflush, CTLFLAG_RW,
263 	   &hammer_autoflush, 0, "");
264 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_zone, CTLFLAG_RW,
265 	   &hammer_verify_zone, 0, "");
266 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_data, CTLFLAG_RW,
267 	   &hammer_verify_data, 0, "");
268 SYSCTL_INT(_vfs_hammer, OID_AUTO, write_mode, CTLFLAG_RW,
269 	   &hammer_write_mode, 0, "");
270 SYSCTL_INT(_vfs_hammer, OID_AUTO, double_buffer, CTLFLAG_RW,
271 	   &hammer_double_buffer, 0, "");
272 SYSCTL_INT(_vfs_hammer, OID_AUTO, btree_full_undo, CTLFLAG_RW,
273 	   &hammer_btree_full_undo, 0, "");
274 SYSCTL_INT(_vfs_hammer, OID_AUTO, yield_check, CTLFLAG_RW,
275 	   &hammer_yield_check, 0, "");
276 SYSCTL_INT(_vfs_hammer, OID_AUTO, fsync_mode, CTLFLAG_RW,
277 	   &hammer_fsync_mode, 0, "");
278 
279 /* KTR_INFO_MASTER(hammer); */
280 
281 /*
282  * VFS ABI
283  */
284 static void	hammer_free_hmp(struct mount *mp);
285 
286 static int	hammer_vfs_mount(struct mount *mp, char *path, caddr_t data,
287 				struct ucred *cred);
288 static int	hammer_vfs_unmount(struct mount *mp, int mntflags);
289 static int	hammer_vfs_root(struct mount *mp, struct vnode **vpp);
290 static int	hammer_vfs_statfs(struct mount *mp, struct statfs *sbp,
291 				struct ucred *cred);
292 static int	hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
293 				struct ucred *cred);
294 static int	hammer_vfs_sync(struct mount *mp, int waitfor);
295 static int	hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
296 				ino_t ino, struct vnode **vpp);
297 static int	hammer_vfs_init(struct vfsconf *conf);
298 static int	hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
299 				struct fid *fhp, struct vnode **vpp);
300 static int	hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp);
301 static int	hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
302 				int *exflagsp, struct ucred **credanonp);
303 
304 
305 static struct vfsops hammer_vfsops = {
306 	.vfs_mount	= hammer_vfs_mount,
307 	.vfs_unmount	= hammer_vfs_unmount,
308 	.vfs_root	= hammer_vfs_root,
309 	.vfs_statfs	= hammer_vfs_statfs,
310 	.vfs_statvfs	= hammer_vfs_statvfs,
311 	.vfs_sync	= hammer_vfs_sync,
312 	.vfs_vget	= hammer_vfs_vget,
313 	.vfs_init	= hammer_vfs_init,
314 	.vfs_vptofh	= hammer_vfs_vptofh,
315 	.vfs_fhtovp	= hammer_vfs_fhtovp,
316 	.vfs_checkexp	= hammer_vfs_checkexp
317 };
318 
319 MALLOC_DEFINE(M_HAMMER, "HAMMER-mount", "");
320 
321 VFS_SET(hammer_vfsops, hammer, 0);
322 MODULE_VERSION(hammer, 1);
323 
324 static int
325 hammer_vfs_init(struct vfsconf *conf)
326 {
327 	long n;
328 
329 	/*
330 	 * Wait up to this long for an exclusive deadlock to clear
331 	 * before acquiring a new shared lock on the ip.  The deadlock
332 	 * may have occured on a b-tree node related to the ip.
333 	 */
334 	if (hammer_tdmux_ticks == 0)
335 		hammer_tdmux_ticks = hz / 5;
336 
337 	/*
338 	 * Autosize, but be careful because a hammer filesystem's
339 	 * reserve is partially calculated based on dirtybufspace,
340 	 * so we simply cannot allow it to get too large.
341 	 */
342 	if (hammer_limit_recs == 0) {
343 		n = nbuf * 25;
344 		if (n > kmalloc_limit(M_HAMMER) / 512)
345 			n = kmalloc_limit(M_HAMMER) / 512;
346 		if (n > 2 * 1024 * 1024)
347 			n = 2 * 1024 * 1024;
348 		hammer_limit_recs = (int)n;
349 	}
350 	if (hammer_limit_dirtybufspace == 0) {
351 		hammer_limit_dirtybufspace = hidirtybufspace / 2;
352 		if (hammer_limit_dirtybufspace < 1L * 1024 * 1024)
353 			hammer_limit_dirtybufspace = 1024L * 1024;
354 		if (hammer_limit_dirtybufspace > 1024L * 1024 * 1024)
355 			hammer_limit_dirtybufspace = 1024L * 1024 * 1024;
356 	}
357 
358 	/*
359 	 * The hammer_inode structure detaches from the vnode on reclaim.
360 	 * This limits the number of inodes in this state to prevent a
361 	 * memory pool blowout.
362 	 */
363 	if (hammer_limit_reclaims == 0)
364 		hammer_limit_reclaims = desiredvnodes / 10;
365 
366 	return(0);
367 }
368 
369 static int
370 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
371 		 struct ucred *cred)
372 {
373 	struct hammer_mount_info info;
374 	hammer_mount_t hmp;
375 	hammer_volume_t rootvol;
376 	struct vnode *rootvp;
377 	struct vnode *devvp = NULL;
378 	const char *upath;	/* volume name in userspace */
379 	char *path;		/* volume name in system space */
380 	int error;
381 	int i;
382 	int master_id;
383 	int nvolumes;
384 	char *next_volume_ptr = NULL;
385 
386 	/*
387 	 * Accept hammer_mount_info.  mntpt is NULL for root mounts at boot.
388 	 */
389 	if (mntpt == NULL) {
390 		bzero(&info, sizeof(info));
391 		info.asof = 0;
392 		info.hflags = 0;
393 		info.nvolumes = 1;
394 
395 		next_volume_ptr = mp->mnt_stat.f_mntfromname;
396 
397 		/* Count number of volumes separated by ':' */
398 		for (char *p = next_volume_ptr; *p != '\0'; ++p) {
399 			if (*p == ':') {
400 				++info.nvolumes;
401 			}
402 		}
403 
404 		mp->mnt_flag &= ~MNT_RDONLY; /* mount R/W */
405 	} else {
406 		if ((error = copyin(data, &info, sizeof(info))) != 0)
407 			return (error);
408 	}
409 
410 	/*
411 	 * updating or new mount
412 	 */
413 	if (mp->mnt_flag & MNT_UPDATE) {
414 		hmp = (void *)mp->mnt_data;
415 		KKASSERT(hmp != NULL);
416 	} else {
417 		if (info.nvolumes <= 0 || info.nvolumes > HAMMER_MAX_VOLUMES)
418 			return (EINVAL);
419 		hmp = NULL;
420 	}
421 
422 	/*
423 	 * master-id validation.  The master id may not be changed by a
424 	 * mount update.
425 	 */
426 	if (info.hflags & HMNT_MASTERID || info.hflags & HMNT_NOMIRROR) {
427 		if (hmp && hmp->master_id != info.master_id) {
428 			hkprintf("cannot change master id with mount update\n");
429 			return(EINVAL);
430 		}
431 		master_id = info.master_id;
432 		if (master_id < -1 || master_id >= HAMMER_MAX_MASTERS)
433 			return (EINVAL);
434 	} else {
435 		if (hmp)
436 			master_id = hmp->master_id;
437 		else
438 			master_id = 0;
439 	}
440 
441 	/*
442 	 * Internal mount data structure
443 	 */
444 	if (hmp == NULL) {
445 		hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO);
446 		mp->mnt_data = (qaddr_t)hmp;
447 		hmp->mp = mp;
448 
449 		/*
450 		 * Make sure kmalloc type limits are set appropriately.
451 		 *
452 		 * Our inode kmalloc group is sized based on maxvnodes
453 		 * (controlled by the system, not us).
454 		 */
455 		kmalloc_create(&hmp->m_misc, "HAMMER-others");
456 		kmalloc_create(&hmp->m_inodes, "HAMMER-inodes");
457 
458 		kmalloc_raise_limit(hmp->m_inodes, 0);	/* unlimited */
459 
460 		hmp->root_btree_beg.localization = 0x00000000U;
461 		hmp->root_btree_beg.obj_id = -0x8000000000000000LL;
462 		hmp->root_btree_beg.key = -0x8000000000000000LL;
463 		hmp->root_btree_beg.create_tid = 1;
464 		hmp->root_btree_beg.delete_tid = 1;
465 		hmp->root_btree_beg.rec_type = 0;
466 		hmp->root_btree_beg.obj_type = 0;
467 		hmp->root_btree_beg.btype = HAMMER_BTREE_TYPE_NONE;
468 
469 		hmp->root_btree_end.localization = 0xFFFFFFFFU;
470 		hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL;
471 		hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL;
472 		hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL;
473 		hmp->root_btree_end.delete_tid = 0;   /* special case */
474 		hmp->root_btree_end.rec_type = 0xFFFFU;
475 		hmp->root_btree_end.obj_type = 0;
476 		hmp->root_btree_end.btype = HAMMER_BTREE_TYPE_NONE;
477 
478 		hmp->krate.freq = 1;	/* maximum reporting rate (hz) */
479 		hmp->krate.count = -16;	/* initial burst */
480 		hmp->kdiag.freq = 1;	/* maximum reporting rate (hz) */
481 		hmp->kdiag.count = -16;	/* initial burst */
482 
483 		hmp->sync_lock.refs = 1;
484 		hmp->free_lock.refs = 1;
485 		hmp->undo_lock.refs = 1;
486 		hmp->blkmap_lock.refs = 1;
487 		hmp->snapshot_lock.refs = 1;
488 		hmp->volume_lock.refs = 1;
489 
490 		TAILQ_INIT(&hmp->delay_list);
491 		TAILQ_INIT(&hmp->flush_group_list);
492 		TAILQ_INIT(&hmp->objid_cache_list);
493 		TAILQ_INIT(&hmp->undo_lru_list);
494 		TAILQ_INIT(&hmp->reclaim_list);
495 
496 		RB_INIT(&hmp->rb_dedup_crc_root);
497 		RB_INIT(&hmp->rb_dedup_off_root);
498 		TAILQ_INIT(&hmp->dedup_lru_list);
499 	}
500 	hmp->hflags &= ~HMNT_USERFLAGS;
501 	hmp->hflags |= info.hflags & HMNT_USERFLAGS;
502 
503 	hmp->master_id = master_id;
504 
505 	if (info.asof) {
506 		mp->mnt_flag |= MNT_RDONLY;
507 		hmp->asof = info.asof;
508 	} else {
509 		hmp->asof = HAMMER_MAX_TID;
510 	}
511 
512 	hmp->volume_to_remove = -1;
513 
514 	/*
515 	 * Re-open read-write if originally read-only, or vise-versa.
516 	 *
517 	 * When going from read-only to read-write execute the stage2
518 	 * recovery if it has not already been run.
519 	 */
520 	if (mp->mnt_flag & MNT_UPDATE) {
521 		lwkt_gettoken(&hmp->fs_token);
522 		error = 0;
523 		if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
524 			hkprintf("read-only -> read-write\n");
525 			hmp->ronly = 0;
526 			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
527 				hammer_adjust_volume_mode, NULL);
528 			rootvol = hammer_get_root_volume(hmp, &error);
529 			if (rootvol) {
530 				hammer_recover_flush_buffers(hmp, rootvol, 1);
531 				error = hammer_recover_stage2(hmp, rootvol);
532 				bcopy(rootvol->ondisk->vol0_blockmap,
533 				      hmp->blockmap,
534 				      sizeof(hmp->blockmap));
535 				hammer_rel_volume(rootvol, 0);
536 			}
537 			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
538 				hammer_reload_inode, NULL);
539 			/* kernel clears MNT_RDONLY */
540 		} else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
541 			hkprintf("read-write -> read-only\n");
542 			hmp->ronly = 1;	/* messy */
543 			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
544 				hammer_reload_inode, NULL);
545 			hmp->ronly = 0;
546 			hammer_flusher_sync(hmp);
547 			hammer_flusher_sync(hmp);
548 			hammer_flusher_sync(hmp);
549 			hmp->ronly = 1;
550 			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
551 				hammer_adjust_volume_mode, NULL);
552 		}
553 		lwkt_reltoken(&hmp->fs_token);
554 		return(error);
555 	}
556 
557 	RB_INIT(&hmp->rb_vols_root);
558 	RB_INIT(&hmp->rb_inos_root);
559 	RB_INIT(&hmp->rb_redo_root);
560 	RB_INIT(&hmp->rb_nods_root);
561 	RB_INIT(&hmp->rb_undo_root);
562 	RB_INIT(&hmp->rb_resv_root);
563 	RB_INIT(&hmp->rb_bufs_root);
564 	RB_INIT(&hmp->rb_pfsm_root);
565 
566 	hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
567 
568 	RB_INIT(&hmp->volu_root);
569 	RB_INIT(&hmp->undo_root);
570 	RB_INIT(&hmp->data_root);
571 	RB_INIT(&hmp->meta_root);
572 	RB_INIT(&hmp->lose_root);
573 	TAILQ_INIT(&hmp->iorun_list);
574 
575 	lwkt_token_init(&hmp->fs_token, "hammerfs");
576 	lwkt_token_init(&hmp->io_token, "hammerio");
577 
578 	lwkt_gettoken(&hmp->fs_token);
579 
580 	/*
581 	 * Load volumes
582 	 */
583 	path = objcache_get(namei_oc, M_WAITOK);
584 	hmp->nvolumes = -1;
585 	for (i = 0; i < info.nvolumes; ++i) {
586 		if (mntpt == NULL) {
587 			/*
588 			 * Root mount.
589 			 */
590 			KKASSERT(next_volume_ptr != NULL);
591 			strcpy(path, "");
592 			if (*next_volume_ptr != '/') {
593 				/* relative path */
594 				strcpy(path, "/dev/");
595 			}
596 			int k;
597 			for (k = strlen(path); k < MAXPATHLEN-1; ++k) {
598 				if (*next_volume_ptr == '\0') {
599 					break;
600 				} else if (*next_volume_ptr == ':') {
601 					++next_volume_ptr;
602 					break;
603 				} else {
604 					path[k] = *next_volume_ptr;
605 					++next_volume_ptr;
606 				}
607 			}
608 			path[k] = '\0';
609 
610 			error = 0;
611 			cdev_t dev = kgetdiskbyname(path);
612 			error = bdevvp(dev, &devvp);
613 			if (error) {
614 				hdkprintf("can't find devvp\n");
615 			}
616 		} else {
617 			error = copyin(&info.volumes[i], &upath,
618 				       sizeof(char *));
619 			if (error == 0)
620 				error = copyinstr(upath, path,
621 						  MAXPATHLEN, NULL);
622 		}
623 		if (error == 0)
624 			error = hammer_install_volume(hmp, path, devvp, NULL);
625 		if (error)
626 			break;
627 	}
628 	objcache_put(namei_oc, path);
629 
630 	/*
631 	 * Make sure we found a root volume
632 	 */
633 	if (hmp->rootvol == NULL) {
634 		if (error == EBUSY) {
635 			hdkprintf("The volumes are probably mounted\n");
636 		} else {
637 			hdkprintf("No root volume found!\n");
638 			error = EINVAL;
639 		}
640 		goto failed;
641 	}
642 
643 	/*
644 	 * Check that all required volumes are available
645 	 */
646 	if (error == 0 && hammer_mountcheck_volumes(hmp)) {
647 		hdkprintf("Missing volumes, cannot mount!\n");
648 		error = EINVAL;
649 		goto failed;
650 	}
651 
652 	/*
653 	 * Other errors
654 	 */
655 	if (error) {
656 		hdkprintf("Failed to load volumes!\n");
657 		goto failed;
658 	}
659 
660 	nvolumes = hammer_get_installed_volumes(hmp);
661 	if (hmp->nvolumes != nvolumes) {
662 		hdkprintf("volume header says %d volumes, but %d installed\n",
663 			hmp->nvolumes, nvolumes);
664 		error = EINVAL;
665 		goto failed;
666 	}
667 
668 	/*
669 	 * No errors, setup enough of the mount point so we can lookup the
670 	 * root vnode.
671 	 */
672 	mp->mnt_iosize_max = MAXPHYS;
673 	mp->mnt_kern_flag |= MNTK_FSMID;
674 	mp->mnt_kern_flag |= MNTK_THR_SYNC;	/* new vsyncscan semantics */
675 
676 	/*
677 	 * MPSAFE code.  Note that VOPs and VFSops which are not MPSAFE
678 	 * will acquire a per-mount token prior to entry and release it
679 	 * on return.
680 	 */
681 	mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;
682 
683 	/*
684 	 * note: f_iosize is used by vnode_pager_haspage() when constructing
685 	 * its VOP_BMAP call.
686 	 */
687 	mp->mnt_stat.f_iosize = HAMMER_BUFSIZE;
688 	mp->mnt_stat.f_bsize = HAMMER_BUFSIZE;
689 
690 	mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE;
691 	mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE;
692 
693 	mp->mnt_maxsymlinklen = 255;
694 	mp->mnt_flag |= MNT_LOCAL;
695 
696 	vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops);
697 	vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops);
698 	vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops);
699 
700 	/*
701 	 * The root volume's ondisk pointer is only valid if we hold a
702 	 * reference to it.
703 	 */
704 	rootvol = hammer_get_root_volume(hmp, &error);
705 	if (error)
706 		goto failed;
707 
708 	/*
709 	 * Perform any necessary UNDO operations.  The recovery code does
710 	 * call hammer_undo_lookup() so we have to pre-cache the blockmap,
711 	 * and then re-copy it again after recovery is complete.
712 	 *
713 	 * If this is a read-only mount the UNDO information is retained
714 	 * in memory in the form of dirty buffer cache buffers, and not
715 	 * written back to the media.
716 	 */
717 	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
718 	      sizeof(hmp->blockmap));
719 
720 	/*
721 	 * Check filesystem version
722 	 */
723 	hmp->version = rootvol->ondisk->vol_version;
724 	if (hmp->version < HAMMER_VOL_VERSION_MIN ||
725 	    hmp->version > HAMMER_VOL_VERSION_MAX) {
726 		hkprintf("mount unsupported fs version %d\n", hmp->version);
727 		error = ERANGE;
728 		goto done;
729 	}
730 
731 	/*
732 	 * The undo_rec_limit limits the size of flush groups to avoid
733 	 * blowing out the UNDO FIFO.  This calculation is typically in
734 	 * the tens of thousands and is designed primarily when small
735 	 * HAMMER filesystems are created.
736 	 */
737 	hmp->undo_rec_limit = hammer_undo_max(hmp) / 8192 + 100;
738 	if (hammer_debug_general & 0x0001)
739 		hkprintf("undo_rec_limit %d\n", hmp->undo_rec_limit);
740 
741 	/*
742 	 * NOTE: Recover stage1 not only handles meta-data recovery, it
743 	 * 	 also sets hmp->undo_seqno for HAMMER VERSION 4+ filesystems.
744 	 */
745 	error = hammer_recover_stage1(hmp, rootvol);
746 	if (error) {
747 		kprintf("Failed to recover HAMMER filesystem on mount\n");
748 		goto done;
749 	}
750 
751 	/*
752 	 * Finish setup now that we have a good root volume.
753 	 * vol_name is a filesystem label string.
754 	 */
755 	ksnprintf(mp->mnt_stat.f_mntfromname,
756 		  sizeof(mp->mnt_stat.f_mntfromname), "%s",
757 		  rootvol->ondisk->vol_name);
758 	mp->mnt_stat.f_fsid.val[0] =
759 		crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8);
760 	mp->mnt_stat.f_fsid.val[1] =
761 		crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8);
762 	mp->mnt_stat.f_fsid.val[1] &= HAMMER_LOCALIZE_MASK;
763 
764 	mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid;
765 	mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid,
766 				     sizeof(mp->mnt_vstat.f_fsid_uuid));
767 
768 	/*
769 	 * Certain often-modified fields in the root volume are cached in
770 	 * the hammer_mount structure so we do not have to generate lots
771 	 * of little UNDO structures for them.
772 	 *
773 	 * Recopy after recovery.  This also has the side effect of
774 	 * setting our cached undo FIFO's first_offset, which serves to
775 	 * placemark the FIFO start for the NEXT flush cycle while the
776 	 * on-disk first_offset represents the LAST flush cycle.
777 	 */
778 	hmp->next_tid = rootvol->ondisk->vol0_next_tid;
779 	hmp->flush_tid1 = hmp->next_tid;
780 	hmp->flush_tid2 = hmp->next_tid;
781 	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
782 	      sizeof(hmp->blockmap));
783 	hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks;
784 
785 	hammer_flusher_create(hmp);
786 
787 	/*
788 	 * Locate the root directory with an obj_id of 1.
789 	 */
790 	error = hammer_vfs_vget(mp, NULL, HAMMER_OBJID_ROOT, &rootvp);
791 	if (error)
792 		goto done;
793 	vput(rootvp);
794 	if (hmp->ronly == 0)
795 		error = hammer_recover_stage2(hmp, rootvol);
796 
797 	/*
798 	 * If the stage2 recovery fails be sure to clean out all cached
799 	 * vnodes before throwing away the mount structure or bad things
800 	 * will happen.
801 	 */
802 	if (error)
803 		vflush(mp, 0, 0);
804 
805 done:
806 	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
807 		/* New mount */
808 
809 		/* Populate info for mount point (NULL pad)*/
810 		bzero(mp->mnt_stat.f_mntonname, MNAMELEN);
811 		size_t size;
812 		if (mntpt) {
813 			copyinstr(mntpt, mp->mnt_stat.f_mntonname,
814 							MNAMELEN -1, &size);
815 		} else { /* Root mount */
816 			mp->mnt_stat.f_mntonname[0] = '/';
817 		}
818 	}
819 	(void)VFS_STATFS(mp, &mp->mnt_stat, cred);
820 	hammer_rel_volume(rootvol, 0);
821 failed:
822 	/*
823 	 * Cleanup and return.
824 	 */
825 	if (error) {
826 		/* called with fs_token held */
827 		hammer_free_hmp(mp);
828 	} else {
829 		lwkt_reltoken(&hmp->fs_token);
830 	}
831 	return (error);
832 }
833 
834 static int
835 hammer_vfs_unmount(struct mount *mp, int mntflags)
836 {
837 	hammer_mount_t hmp = (void *)mp->mnt_data;
838 	int flags;
839 	int error;
840 
841 	/*
842 	 * Clean out the vnodes
843 	 */
844 	lwkt_gettoken(&hmp->fs_token);
845 	flags = 0;
846 	if (mntflags & MNT_FORCE)
847 		flags |= FORCECLOSE;
848 	error = vflush(mp, 0, flags);
849 
850 	/*
851 	 * Clean up the internal mount structure and related entities.  This
852 	 * may issue I/O.
853 	 */
854 	if (error == 0) {
855 		/* called with fs_token held */
856 		hammer_free_hmp(mp);
857 	} else {
858 		lwkt_reltoken(&hmp->fs_token);
859 	}
860 	return(error);
861 }
862 
863 /*
864  * Clean up the internal mount structure and disassociate it from the mount.
865  * This may issue I/O.
866  *
867  * Called with fs_token held.
868  */
869 static void
870 hammer_free_hmp(struct mount *mp)
871 {
872 	hammer_mount_t hmp = (void *)mp->mnt_data;
873 	hammer_flush_group_t flg;
874 
875 	/*
876 	 * Flush anything dirty.  This won't even run if the
877 	 * filesystem errored-out.
878 	 */
879 	hammer_flush_dirty(hmp, 30);
880 
881 	/*
882 	 * If the mount had a critical error we have to destroy any
883 	 * remaining inodes before we can finish cleaning up the flusher.
884 	 */
885 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
886 		RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
887 			hammer_destroy_inode_callback, NULL);
888 	}
889 
890 	/*
891 	 * There shouldn't be any inodes left now and any left over
892 	 * flush groups should now be empty.
893 	 */
894 	KKASSERT(RB_EMPTY(&hmp->rb_inos_root));
895 	while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
896 		TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
897 		KKASSERT(RB_EMPTY(&flg->flush_tree));
898 		if (flg->refs) {
899 			hkprintf("Warning, flush_group %p was "
900 				"not empty on umount!\n", flg);
901 		}
902 		kfree(flg, hmp->m_misc);
903 	}
904 
905 	/*
906 	 * We can finally destroy the flusher
907 	 */
908 	hammer_flusher_destroy(hmp);
909 
910 	/*
911 	 * We may have held recovered buffers due to a read-only mount.
912 	 * These must be discarded.
913 	 */
914 	if (hmp->ronly)
915 		hammer_recover_flush_buffers(hmp, NULL, -1);
916 
917 	/*
918 	 * Unload buffers and then volumes
919 	 */
920         RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
921 		hammer_unload_buffer, NULL);
922 	RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
923 		hammer_unload_volume, NULL);
924 
925 	mp->mnt_data = NULL;
926 	mp->mnt_flag &= ~MNT_LOCAL;
927 	hmp->mp = NULL;
928 	hammer_destroy_objid_cache(hmp);
929 	hammer_destroy_dedup_cache(hmp);
930 	if (hmp->dedup_free_cache != NULL) {
931 		kfree(hmp->dedup_free_cache, hmp->m_misc);
932 		hmp->dedup_free_cache = NULL;
933 	}
934 	kmalloc_destroy(&hmp->m_misc);
935 	kmalloc_destroy(&hmp->m_inodes);
936 	lwkt_reltoken(&hmp->fs_token);
937 	kfree(hmp, M_HAMMER);
938 }
939 
940 /*
941  * Report critical errors.  ip may be NULL.
942  */
943 void
944 hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip,
945 		      int error, const char *msg)
946 {
947 	hmp->flags |= HAMMER_MOUNT_CRITICAL_ERROR;
948 
949 	hmkrateprintf(&hmp->krate, hmp,
950 		    "Critical error inode=%jd error=%d %s\n",
951 		    (intmax_t)(ip ? ip->obj_id : -1),
952 		    error, msg);
953 
954 	if (hmp->ronly == 0) {
955 		hmp->ronly = 2;		/* special errored read-only mode */
956 		hmp->mp->mnt_flag |= MNT_RDONLY;
957 		RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
958 			hammer_adjust_volume_mode, NULL);
959 		hmkprintf(hmp, "Forcing read-only mode\n");
960 	}
961 	hmp->error = error;
962 	if (hammer_debug_critical)
963 		Debugger("Entering debugger");
964 }
965 
966 
967 /*
968  * Obtain a vnode for the specified inode number.  An exclusively locked
969  * vnode is returned.
970  */
971 int
972 hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
973 		ino_t ino, struct vnode **vpp)
974 {
975 	struct hammer_transaction trans;
976 	struct hammer_mount *hmp = (void *)mp->mnt_data;
977 	struct hammer_inode *ip;
978 	int error;
979 	u_int32_t localization;
980 
981 	lwkt_gettoken(&hmp->fs_token);
982 	hammer_simple_transaction(&trans, hmp);
983 
984 	/*
985 	 * If a directory vnode is supplied (mainly NFS) then we can acquire
986 	 * the PFS domain from it.  Otherwise we would only be able to vget
987 	 * inodes in the root PFS.
988 	 */
989 	if (dvp) {
990 		localization = HAMMER_DEF_LOCALIZATION +
991 				VTOI(dvp)->obj_localization;
992 	} else {
993 		localization = HAMMER_DEF_LOCALIZATION;
994 	}
995 
996 	/*
997 	 * Lookup the requested HAMMER inode.  The structure must be
998 	 * left unlocked while we manipulate the related vnode to avoid
999 	 * a deadlock.
1000 	 */
1001 	ip = hammer_get_inode(&trans, NULL, ino,
1002 			      hmp->asof, localization,
1003 			      0, &error);
1004 	if (ip == NULL) {
1005 		*vpp = NULL;
1006 	} else {
1007 		error = hammer_get_vnode(ip, vpp);
1008 		hammer_rel_inode(ip, 0);
1009 	}
1010 	hammer_done_transaction(&trans);
1011 	lwkt_reltoken(&hmp->fs_token);
1012 	return (error);
1013 }
1014 
1015 /*
1016  * Return the root vnode for the filesystem.
1017  *
1018  * HAMMER stores the root vnode in the hammer_mount structure so
1019  * getting it is easy.
1020  */
1021 static int
1022 hammer_vfs_root(struct mount *mp, struct vnode **vpp)
1023 {
1024 	int error;
1025 
1026 	error = hammer_vfs_vget(mp, NULL, HAMMER_OBJID_ROOT, vpp);
1027 	return (error);
1028 }
1029 
1030 static int
1031 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
1032 {
1033 	struct hammer_mount *hmp = (void *)mp->mnt_data;
1034 	hammer_volume_t volume;
1035 	hammer_volume_ondisk_t ondisk;
1036 	int error;
1037 	int64_t bfree;
1038 	int64_t breserved;
1039 
1040 	lwkt_gettoken(&hmp->fs_token);
1041 	volume = hammer_get_root_volume(hmp, &error);
1042 	if (error) {
1043 		lwkt_reltoken(&hmp->fs_token);
1044 		return(error);
1045 	}
1046 	ondisk = volume->ondisk;
1047 
1048 	/*
1049 	 * Basic stats
1050 	 */
1051 	_hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
1052 	mp->mnt_stat.f_files = ondisk->vol0_stat_inodes;
1053 	bfree = ondisk->vol0_stat_freebigblocks * HAMMER_BIGBLOCK_SIZE;
1054 	hammer_rel_volume(volume, 0);
1055 
1056 	mp->mnt_stat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
1057 	mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
1058 	if (mp->mnt_stat.f_files < 0)
1059 		mp->mnt_stat.f_files = 0;
1060 
1061 	*sbp = mp->mnt_stat;
1062 	lwkt_reltoken(&hmp->fs_token);
1063 	return(0);
1064 }
1065 
1066 static int
1067 hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
1068 {
1069 	struct hammer_mount *hmp = (void *)mp->mnt_data;
1070 	hammer_volume_t volume;
1071 	hammer_volume_ondisk_t ondisk;
1072 	int error;
1073 	int64_t bfree;
1074 	int64_t breserved;
1075 
1076 	lwkt_gettoken(&hmp->fs_token);
1077 	volume = hammer_get_root_volume(hmp, &error);
1078 	if (error) {
1079 		lwkt_reltoken(&hmp->fs_token);
1080 		return(error);
1081 	}
1082 	ondisk = volume->ondisk;
1083 
1084 	/*
1085 	 * Basic stats
1086 	 */
1087 	_hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
1088 	mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes;
1089 	bfree = ondisk->vol0_stat_freebigblocks * HAMMER_BIGBLOCK_SIZE;
1090 	hammer_rel_volume(volume, 0);
1091 
1092 	mp->mnt_vstat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
1093 	mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
1094 	if (mp->mnt_vstat.f_files < 0)
1095 		mp->mnt_vstat.f_files = 0;
1096 	*sbp = mp->mnt_vstat;
1097 	lwkt_reltoken(&hmp->fs_token);
1098 	return(0);
1099 }
1100 
1101 /*
1102  * Sync the filesystem.  Currently we have to run it twice, the second
1103  * one will advance the undo start index to the end index, so if a crash
1104  * occurs no undos will be run on mount.
1105  *
1106  * We do not sync the filesystem if we are called from a panic.  If we did
1107  * we might end up blowing up a sync that was already in progress.
1108  */
1109 static int
1110 hammer_vfs_sync(struct mount *mp, int waitfor)
1111 {
1112 	struct hammer_mount *hmp = (void *)mp->mnt_data;
1113 	int error;
1114 
1115 	lwkt_gettoken(&hmp->fs_token);
1116 	if (panicstr == NULL) {
1117 		error = hammer_sync_hmp(hmp, waitfor);
1118 	} else {
1119 		error = EIO;
1120 	}
1121 	lwkt_reltoken(&hmp->fs_token);
1122 	return (error);
1123 }
1124 
1125 /*
1126  * Convert a vnode to a file handle.
1127  *
1128  * Accesses read-only fields on already-referenced structures so
1129  * no token is needed.
1130  */
1131 static int
1132 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp)
1133 {
1134 	hammer_inode_t ip;
1135 
1136 	KKASSERT(MAXFIDSZ >= 16);
1137 	ip = VTOI(vp);
1138 	fhp->fid_len = offsetof(struct fid, fid_data[16]);
1139 	fhp->fid_ext = ip->obj_localization >> 16;
1140 	bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id));
1141 	bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof));
1142 	return(0);
1143 }
1144 
1145 
1146 /*
1147  * Convert a file handle back to a vnode.
1148  *
1149  * Use rootvp to enforce PFS isolation when a PFS is exported via a
1150  * null mount.
1151  */
1152 static int
1153 hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
1154 		  struct fid *fhp, struct vnode **vpp)
1155 {
1156 	hammer_mount_t hmp = (void *)mp->mnt_data;
1157 	struct hammer_transaction trans;
1158 	struct hammer_inode *ip;
1159 	struct hammer_inode_info info;
1160 	int error;
1161 	u_int32_t localization;
1162 
1163 	bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id));
1164 	bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof));
1165 	if (rootvp)
1166 		localization = VTOI(rootvp)->obj_localization;
1167 	else
1168 		localization = (u_int32_t)fhp->fid_ext << 16;
1169 
1170 	lwkt_gettoken(&hmp->fs_token);
1171 	hammer_simple_transaction(&trans, hmp);
1172 
1173 	/*
1174 	 * Get/allocate the hammer_inode structure.  The structure must be
1175 	 * unlocked while we manipulate the related vnode to avoid a
1176 	 * deadlock.
1177 	 */
1178 	ip = hammer_get_inode(&trans, NULL, info.obj_id,
1179 			      info.obj_asof, localization, 0, &error);
1180 	if (ip) {
1181 		error = hammer_get_vnode(ip, vpp);
1182 		hammer_rel_inode(ip, 0);
1183 	} else {
1184 		*vpp = NULL;
1185 	}
1186 	hammer_done_transaction(&trans);
1187 	lwkt_reltoken(&hmp->fs_token);
1188 	return (error);
1189 }
1190 
1191 static int
1192 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
1193 		    int *exflagsp, struct ucred **credanonp)
1194 {
1195 	hammer_mount_t hmp = (void *)mp->mnt_data;
1196 	struct netcred *np;
1197 	int error;
1198 
1199 	lwkt_gettoken(&hmp->fs_token);
1200 	np = vfs_export_lookup(mp, &hmp->export, nam);
1201 	if (np) {
1202 		*exflagsp = np->netc_exflags;
1203 		*credanonp = &np->netc_anon;
1204 		error = 0;
1205 	} else {
1206 		error = EACCES;
1207 	}
1208 	lwkt_reltoken(&hmp->fs_token);
1209 	return (error);
1210 
1211 }
1212 
1213 int
1214 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export)
1215 {
1216 	hammer_mount_t hmp = (void *)mp->mnt_data;
1217 	int error;
1218 
1219 	lwkt_gettoken(&hmp->fs_token);
1220 
1221 	switch(op) {
1222 	case MOUNTCTL_SET_EXPORT:
1223 		error = vfs_export(mp, &hmp->export, export);
1224 		break;
1225 	default:
1226 		error = EOPNOTSUPP;
1227 		break;
1228 	}
1229 	lwkt_reltoken(&hmp->fs_token);
1230 
1231 	return(error);
1232 }
1233 
1234