xref: /dragonfly/sys/vfs/hammer/hammer_vfsops.c (revision bcb3e04d)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.74 2008/11/13 02:18:43 dillon Exp $
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/vnode.h>
41 #include <sys/mount.h>
42 #include <sys/malloc.h>
43 #include <sys/nlookup.h>
44 #include <sys/fcntl.h>
45 #include <sys/sysctl.h>
46 #include <sys/buf.h>
47 #include <sys/buf2.h>
48 #include "hammer.h"
49 
50 /*
51  * NOTE!  Global statistics may not be MPSAFE so HAMMER never uses them
52  *	  in conditionals.
53  */
54 int hammer_supported_version = HAMMER_VOL_VERSION_DEFAULT;
55 int hammer_debug_io;
56 int hammer_debug_general;
57 int hammer_debug_debug = 1;		/* medium-error panics */
58 int hammer_debug_inode;
59 int hammer_debug_locks;
60 int hammer_debug_btree;
61 int hammer_debug_tid;
62 int hammer_debug_recover;		/* -1 will disable, +1 will force */
63 int hammer_debug_recover_faults;
64 int hammer_debug_critical;		/* non-zero enter debugger on error */
65 int hammer_cluster_enable = 1;		/* enable read clustering by default */
66 int hammer_count_fsyncs;
67 int hammer_count_inodes;
68 int hammer_count_iqueued;
69 int hammer_count_reclaiming;
70 int hammer_count_records;
71 int hammer_count_record_datas;
72 int hammer_count_volumes;
73 int hammer_count_buffers;
74 int hammer_count_nodes;
75 int64_t hammer_count_extra_space_used;
76 int64_t hammer_stats_btree_lookups;
77 int64_t hammer_stats_btree_searches;
78 int64_t hammer_stats_btree_inserts;
79 int64_t hammer_stats_btree_deletes;
80 int64_t hammer_stats_btree_elements;
81 int64_t hammer_stats_btree_splits;
82 int64_t hammer_stats_btree_iterations;
83 int64_t hammer_stats_btree_root_iterations;
84 int64_t hammer_stats_record_iterations;
85 
86 int64_t hammer_stats_file_read;
87 int64_t hammer_stats_file_write;
88 int64_t hammer_stats_file_iopsr;
89 int64_t hammer_stats_file_iopsw;
90 int64_t hammer_stats_disk_read;
91 int64_t hammer_stats_disk_write;
92 int64_t hammer_stats_inode_flushes;
93 int64_t hammer_stats_commits;
94 int64_t hammer_stats_undo;
95 int64_t hammer_stats_redo;
96 
97 int hammer_count_dirtybufspace;		/* global */
98 int hammer_count_refedbufs;		/* global */
99 int hammer_count_reservations;
100 int hammer_count_io_running_read;
101 int hammer_count_io_running_write;
102 int hammer_count_io_locked;
103 int hammer_limit_dirtybufspace;		/* per-mount */
104 int hammer_limit_running_io;		/* per-mount */
105 int hammer_limit_recs;			/* as a whole XXX */
106 int hammer_limit_inode_recs = 1024;	/* per inode */
107 int hammer_limit_reclaim = HAMMER_RECLAIM_WAIT;
108 int hammer_limit_redo = 4096 * 1024;	/* per inode */
109 int hammer_autoflush = 2000;		/* auto flush */
110 int hammer_bio_count;
111 int hammer_verify_zone;
112 int hammer_verify_data = 1;
113 int hammer_write_mode;
114 int hammer_yield_check = 16;
115 int hammer_fsync_mode = 3;
116 int64_t hammer_contention_count;
117 int64_t hammer_zone_limit;
118 
119 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem");
120 SYSCTL_INT(_vfs_hammer, OID_AUTO, supported_version, CTLFLAG_RD,
121 	   &hammer_supported_version, 0, "");
122 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW,
123 	   &hammer_debug_general, 0, "");
124 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW,
125 	   &hammer_debug_io, 0, "");
126 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_debug, CTLFLAG_RW,
127 	   &hammer_debug_debug, 0, "");
128 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW,
129 	   &hammer_debug_inode, 0, "");
130 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW,
131 	   &hammer_debug_locks, 0, "");
132 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW,
133 	   &hammer_debug_btree, 0, "");
134 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW,
135 	   &hammer_debug_tid, 0, "");
136 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW,
137 	   &hammer_debug_recover, 0, "");
138 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW,
139 	   &hammer_debug_recover_faults, 0, "");
140 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_critical, CTLFLAG_RW,
141 	   &hammer_debug_critical, 0, "");
142 SYSCTL_INT(_vfs_hammer, OID_AUTO, cluster_enable, CTLFLAG_RW,
143 	   &hammer_cluster_enable, 0, "");
144 
145 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufspace, CTLFLAG_RW,
146 	   &hammer_limit_dirtybufspace, 0, "");
147 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_running_io, CTLFLAG_RW,
148 	   &hammer_limit_running_io, 0, "");
149 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW,
150 	   &hammer_limit_recs, 0, "");
151 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_inode_recs, CTLFLAG_RW,
152 	   &hammer_limit_inode_recs, 0, "");
153 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_reclaim, CTLFLAG_RW,
154 	   &hammer_limit_reclaim, 0, "");
155 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_redo, CTLFLAG_RW,
156 	   &hammer_limit_redo, 0, "");
157 
158 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_fsyncs, CTLFLAG_RD,
159 	   &hammer_count_fsyncs, 0, "");
160 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD,
161 	   &hammer_count_inodes, 0, "");
162 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_iqueued, CTLFLAG_RD,
163 	   &hammer_count_iqueued, 0, "");
164 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reclaiming, CTLFLAG_RD,
165 	   &hammer_count_reclaiming, 0, "");
166 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD,
167 	   &hammer_count_records, 0, "");
168 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD,
169 	   &hammer_count_record_datas, 0, "");
170 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD,
171 	   &hammer_count_volumes, 0, "");
172 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD,
173 	   &hammer_count_buffers, 0, "");
174 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD,
175 	   &hammer_count_nodes, 0, "");
176 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, count_extra_space_used, CTLFLAG_RD,
177 	   &hammer_count_extra_space_used, 0, "");
178 
179 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_searches, CTLFLAG_RD,
180 	   &hammer_stats_btree_searches, 0, "");
181 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_lookups, CTLFLAG_RD,
182 	   &hammer_stats_btree_lookups, 0, "");
183 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_inserts, CTLFLAG_RD,
184 	   &hammer_stats_btree_inserts, 0, "");
185 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_deletes, CTLFLAG_RD,
186 	   &hammer_stats_btree_deletes, 0, "");
187 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_elements, CTLFLAG_RD,
188 	   &hammer_stats_btree_elements, 0, "");
189 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_splits, CTLFLAG_RD,
190 	   &hammer_stats_btree_splits, 0, "");
191 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_iterations, CTLFLAG_RD,
192 	   &hammer_stats_btree_iterations, 0, "");
193 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_root_iterations, CTLFLAG_RD,
194 	   &hammer_stats_btree_root_iterations, 0, "");
195 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_record_iterations, CTLFLAG_RD,
196 	   &hammer_stats_record_iterations, 0, "");
197 
198 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_read, CTLFLAG_RD,
199 	   &hammer_stats_file_read, 0, "");
200 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_write, CTLFLAG_RD,
201 	   &hammer_stats_file_write, 0, "");
202 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsr, CTLFLAG_RD,
203 	   &hammer_stats_file_iopsr, 0, "");
204 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsw, CTLFLAG_RD,
205 	   &hammer_stats_file_iopsw, 0, "");
206 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_read, CTLFLAG_RD,
207 	   &hammer_stats_disk_read, 0, "");
208 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_write, CTLFLAG_RD,
209 	   &hammer_stats_disk_write, 0, "");
210 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_inode_flushes, CTLFLAG_RD,
211 	   &hammer_stats_inode_flushes, 0, "");
212 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_commits, CTLFLAG_RD,
213 	   &hammer_stats_commits, 0, "");
214 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_undo, CTLFLAG_RD,
215 	   &hammer_stats_undo, 0, "");
216 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_redo, CTLFLAG_RD,
217 	   &hammer_stats_redo, 0, "");
218 
219 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufspace, CTLFLAG_RD,
220 	   &hammer_count_dirtybufspace, 0, "");
221 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD,
222 	   &hammer_count_refedbufs, 0, "");
223 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD,
224 	   &hammer_count_reservations, 0, "");
225 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_read, CTLFLAG_RD,
226 	   &hammer_count_io_running_read, 0, "");
227 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_locked, CTLFLAG_RD,
228 	   &hammer_count_io_locked, 0, "");
229 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_write, CTLFLAG_RD,
230 	   &hammer_count_io_running_write, 0, "");
231 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW,
232 	   &hammer_zone_limit, 0, "");
233 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW,
234 	   &hammer_contention_count, 0, "");
235 SYSCTL_INT(_vfs_hammer, OID_AUTO, autoflush, CTLFLAG_RW,
236 	   &hammer_autoflush, 0, "");
237 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_zone, CTLFLAG_RW,
238 	   &hammer_verify_zone, 0, "");
239 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_data, CTLFLAG_RW,
240 	   &hammer_verify_data, 0, "");
241 SYSCTL_INT(_vfs_hammer, OID_AUTO, write_mode, CTLFLAG_RW,
242 	   &hammer_write_mode, 0, "");
243 SYSCTL_INT(_vfs_hammer, OID_AUTO, yield_check, CTLFLAG_RW,
244 	   &hammer_yield_check, 0, "");
245 SYSCTL_INT(_vfs_hammer, OID_AUTO, fsync_mode, CTLFLAG_RW,
246 	   &hammer_fsync_mode, 0, "");
247 
248 KTR_INFO_MASTER(hammer);
249 
250 /*
251  * VFS ABI
252  */
253 static void	hammer_free_hmp(struct mount *mp);
254 
255 static int	hammer_vfs_mount(struct mount *mp, char *path, caddr_t data,
256 				struct ucred *cred);
257 static int	hammer_vfs_unmount(struct mount *mp, int mntflags);
258 static int	hammer_vfs_root(struct mount *mp, struct vnode **vpp);
259 static int	hammer_vfs_statfs(struct mount *mp, struct statfs *sbp,
260 				struct ucred *cred);
261 static int	hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
262 				struct ucred *cred);
263 static int	hammer_vfs_sync(struct mount *mp, int waitfor);
264 static int	hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
265 				ino_t ino, struct vnode **vpp);
266 static int	hammer_vfs_init(struct vfsconf *conf);
267 static int	hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
268 				struct fid *fhp, struct vnode **vpp);
269 static int	hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp);
270 static int	hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
271 				int *exflagsp, struct ucred **credanonp);
272 
273 
274 static struct vfsops hammer_vfsops = {
275 	.vfs_mount	= hammer_vfs_mount,
276 	.vfs_unmount	= hammer_vfs_unmount,
277 	.vfs_root 	= hammer_vfs_root,
278 	.vfs_statfs	= hammer_vfs_statfs,
279 	.vfs_statvfs	= hammer_vfs_statvfs,
280 	.vfs_sync	= hammer_vfs_sync,
281 	.vfs_vget	= hammer_vfs_vget,
282 	.vfs_init	= hammer_vfs_init,
283 	.vfs_vptofh	= hammer_vfs_vptofh,
284 	.vfs_fhtovp	= hammer_vfs_fhtovp,
285 	.vfs_checkexp	= hammer_vfs_checkexp
286 };
287 
288 MALLOC_DEFINE(M_HAMMER, "HAMMER-mount", "");
289 
290 VFS_SET(hammer_vfsops, hammer, 0);
291 MODULE_VERSION(hammer, 1);
292 
293 static int
294 hammer_vfs_init(struct vfsconf *conf)
295 {
296 	int n;
297 
298 	if (hammer_limit_recs == 0) {
299 		hammer_limit_recs = nbuf * 25;
300 		n = kmalloc_limit(M_HAMMER) / 512;
301 		if (hammer_limit_recs > n)
302 			hammer_limit_recs = n;
303 	}
304 	if (hammer_limit_dirtybufspace == 0) {
305 		hammer_limit_dirtybufspace = hidirtybufspace / 2;
306 		if (hammer_limit_dirtybufspace < 100)
307 			hammer_limit_dirtybufspace = 100;
308 	}
309 
310 	/*
311 	 * Set reasonable limits to maintain an I/O pipeline.  This is
312 	 * used by the flush code which explicitly initiates I/O, and
313 	 * is per-mount.
314 	 *
315 	 * The system-driven buffer cache uses vfs.lorunningspace and
316 	 * vfs.hirunningspace globally.
317 	 */
318 	if (hammer_limit_running_io == 0)
319 		hammer_limit_running_io = hammer_limit_dirtybufspace;
320 	if (hammer_limit_running_io > 10 * 1024 * 1024)
321 		hammer_limit_running_io = 10 * 1024 * 1024;
322 	return(0);
323 }
324 
325 static int
326 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
327 		 struct ucred *cred)
328 {
329 	struct hammer_mount_info info;
330 	hammer_mount_t hmp;
331 	hammer_volume_t rootvol;
332 	struct vnode *rootvp;
333 	struct vnode *devvp = NULL;
334 	const char *upath;	/* volume name in userspace */
335 	char *path;		/* volume name in system space */
336 	int error;
337 	int i;
338 	int master_id;
339 	char *next_volume_ptr = NULL;
340 
341 	/*
342 	 * Accept hammer_mount_info.  mntpt is NULL for root mounts at boot.
343 	 */
344 	if (mntpt == NULL) {
345 		bzero(&info, sizeof(info));
346 		info.asof = 0;
347 		info.hflags = 0;
348 		info.nvolumes = 1;
349 
350 		next_volume_ptr = mp->mnt_stat.f_mntfromname;
351 
352 		/* Count number of volumes separated by ':' */
353 		for (char *p = next_volume_ptr; *p != '\0'; ++p) {
354 			if (*p == ':') {
355 				++info.nvolumes;
356 			}
357 		}
358 
359 		mp->mnt_flag &= ~MNT_RDONLY; /* mount R/W */
360 	} else {
361 		if ((error = copyin(data, &info, sizeof(info))) != 0)
362 			return (error);
363 	}
364 
365 	/*
366 	 * updating or new mount
367 	 */
368 	if (mp->mnt_flag & MNT_UPDATE) {
369 		hmp = (void *)mp->mnt_data;
370 		KKASSERT(hmp != NULL);
371 	} else {
372 		if (info.nvolumes <= 0 || info.nvolumes >= 32768)
373 			return (EINVAL);
374 		hmp = NULL;
375 	}
376 
377 	/*
378 	 * master-id validation.  The master id may not be changed by a
379 	 * mount update.
380 	 */
381 	if (info.hflags & HMNT_MASTERID) {
382 		if (hmp && hmp->master_id != info.master_id) {
383 			kprintf("hammer: cannot change master id "
384 				"with mount update\n");
385 			return(EINVAL);
386 		}
387 		master_id = info.master_id;
388 		if (master_id < -1 || master_id >= HAMMER_MAX_MASTERS)
389 			return (EINVAL);
390 	} else {
391 		if (hmp)
392 			master_id = hmp->master_id;
393 		else
394 			master_id = 0;
395 	}
396 
397 	/*
398 	 * Internal mount data structure
399 	 */
400 	if (hmp == NULL) {
401 		hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO);
402 		mp->mnt_data = (qaddr_t)hmp;
403 		hmp->mp = mp;
404 		/*TAILQ_INIT(&hmp->recycle_list);*/
405 
406 		/*
407 		 * Make sure kmalloc type limits are set appropriately.
408 		 *
409 		 * Our inode kmalloc group is sized based on maxvnodes
410 		 * (controlled by the system, not us).
411 		 */
412 		kmalloc_create(&hmp->m_misc, "HAMMER-others");
413 		kmalloc_create(&hmp->m_inodes, "HAMMER-inodes");
414 
415 		kmalloc_raise_limit(hmp->m_inodes, 0);	/* unlimited */
416 
417 		hmp->root_btree_beg.localization = 0x00000000U;
418 		hmp->root_btree_beg.obj_id = -0x8000000000000000LL;
419 		hmp->root_btree_beg.key = -0x8000000000000000LL;
420 		hmp->root_btree_beg.create_tid = 1;
421 		hmp->root_btree_beg.delete_tid = 1;
422 		hmp->root_btree_beg.rec_type = 0;
423 		hmp->root_btree_beg.obj_type = 0;
424 
425 		hmp->root_btree_end.localization = 0xFFFFFFFFU;
426 		hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL;
427 		hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL;
428 		hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL;
429 		hmp->root_btree_end.delete_tid = 0;   /* special case */
430 		hmp->root_btree_end.rec_type = 0xFFFFU;
431 		hmp->root_btree_end.obj_type = 0;
432 
433 		hmp->krate.freq = 1;	/* maximum reporting rate (hz) */
434 		hmp->krate.count = -16;	/* initial burst */
435 
436 		hmp->sync_lock.refs = 1;
437 		hmp->free_lock.refs = 1;
438 		hmp->undo_lock.refs = 1;
439 		hmp->blkmap_lock.refs = 1;
440 		hmp->snapshot_lock.refs = 1;
441 		hmp->volume_lock.refs = 1;
442 
443 		TAILQ_INIT(&hmp->delay_list);
444 		TAILQ_INIT(&hmp->flush_group_list);
445 		TAILQ_INIT(&hmp->objid_cache_list);
446 		TAILQ_INIT(&hmp->undo_lru_list);
447 		TAILQ_INIT(&hmp->reclaim_list);
448 	}
449 	hmp->hflags &= ~HMNT_USERFLAGS;
450 	hmp->hflags |= info.hflags & HMNT_USERFLAGS;
451 
452 	hmp->master_id = master_id;
453 
454 	if (info.asof) {
455 		mp->mnt_flag |= MNT_RDONLY;
456 		hmp->asof = info.asof;
457 	} else {
458 		hmp->asof = HAMMER_MAX_TID;
459 	}
460 
461 	hmp->volume_to_remove = -1;
462 
463 	/*
464 	 * Re-open read-write if originally read-only, or vise-versa.
465 	 *
466 	 * When going from read-only to read-write execute the stage2
467 	 * recovery if it has not already been run.
468 	 */
469 	if (mp->mnt_flag & MNT_UPDATE) {
470 		lwkt_gettoken(&hmp->fs_token);
471 		error = 0;
472 		if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
473 			kprintf("HAMMER read-only -> read-write\n");
474 			hmp->ronly = 0;
475 			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
476 				hammer_adjust_volume_mode, NULL);
477 			rootvol = hammer_get_root_volume(hmp, &error);
478 			if (rootvol) {
479 				hammer_recover_flush_buffers(hmp, rootvol, 1);
480 				error = hammer_recover_stage2(hmp, rootvol);
481 				bcopy(rootvol->ondisk->vol0_blockmap,
482 				      hmp->blockmap,
483 				      sizeof(hmp->blockmap));
484 				hammer_rel_volume(rootvol, 0);
485 			}
486 			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
487 				hammer_reload_inode, NULL);
488 			/* kernel clears MNT_RDONLY */
489 		} else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
490 			kprintf("HAMMER read-write -> read-only\n");
491 			hmp->ronly = 1;	/* messy */
492 			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
493 				hammer_reload_inode, NULL);
494 			hmp->ronly = 0;
495 			hammer_flusher_sync(hmp);
496 			hammer_flusher_sync(hmp);
497 			hammer_flusher_sync(hmp);
498 			hmp->ronly = 1;
499 			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
500 				hammer_adjust_volume_mode, NULL);
501 		}
502 		lwkt_reltoken(&hmp->fs_token);
503 		return(error);
504 	}
505 
506 	RB_INIT(&hmp->rb_vols_root);
507 	RB_INIT(&hmp->rb_inos_root);
508 	RB_INIT(&hmp->rb_redo_root);
509 	RB_INIT(&hmp->rb_nods_root);
510 	RB_INIT(&hmp->rb_undo_root);
511 	RB_INIT(&hmp->rb_resv_root);
512 	RB_INIT(&hmp->rb_bufs_root);
513 	RB_INIT(&hmp->rb_pfsm_root);
514 
515 	hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
516 
517 	TAILQ_INIT(&hmp->volu_list);
518 	TAILQ_INIT(&hmp->undo_list);
519 	TAILQ_INIT(&hmp->data_list);
520 	TAILQ_INIT(&hmp->meta_list);
521 	TAILQ_INIT(&hmp->lose_list);
522 	TAILQ_INIT(&hmp->iorun_list);
523 
524 	lwkt_token_init(&hmp->fs_token, 1, "hammerfs");
525 	lwkt_token_init(&hmp->io_token, 1, "hammerio");
526 
527 	lwkt_gettoken(&hmp->fs_token);
528 
529 	/*
530 	 * Load volumes
531 	 */
532 	path = objcache_get(namei_oc, M_WAITOK);
533 	hmp->nvolumes = -1;
534 	for (i = 0; i < info.nvolumes; ++i) {
535 		if (mntpt == NULL) {
536 			/*
537 			 * Root mount.
538 			 */
539 			KKASSERT(next_volume_ptr != NULL);
540 			strcpy(path, "");
541 			if (*next_volume_ptr != '/') {
542 				/* relative path */
543 				strcpy(path, "/dev/");
544 			}
545 			int k;
546 			for (k = strlen(path); k < MAXPATHLEN-1; ++k) {
547 				if (*next_volume_ptr == '\0') {
548 					break;
549 				} else if (*next_volume_ptr == ':') {
550 					++next_volume_ptr;
551 					break;
552 				} else {
553 					path[k] = *next_volume_ptr;
554 					++next_volume_ptr;
555 				}
556 			}
557 			path[k] = '\0';
558 
559 			error = 0;
560 			cdev_t dev = kgetdiskbyname(path);
561 			error = bdevvp(dev, &devvp);
562 			if (error) {
563 				kprintf("hammer_mountroot: can't find devvp\n");
564 			}
565 		} else {
566 			error = copyin(&info.volumes[i], &upath,
567 				       sizeof(char *));
568 			if (error == 0)
569 				error = copyinstr(upath, path,
570 						  MAXPATHLEN, NULL);
571 		}
572 		if (error == 0)
573 			error = hammer_install_volume(hmp, path, devvp);
574 		if (error)
575 			break;
576 	}
577 	objcache_put(namei_oc, path);
578 
579 	/*
580 	 * Make sure we found a root volume
581 	 */
582 	if (error == 0 && hmp->rootvol == NULL) {
583 		kprintf("hammer_mount: No root volume found!\n");
584 		error = EINVAL;
585 	}
586 
587 	/*
588 	 * Check that all required volumes are available
589 	 */
590 	if (error == 0 && hammer_mountcheck_volumes(hmp)) {
591 		kprintf("hammer_mount: Missing volumes, cannot mount!\n");
592 		error = EINVAL;
593 	}
594 
595 	if (error) {
596 		/* called with fs_token held */
597 		hammer_free_hmp(mp);
598 		return (error);
599 	}
600 
601 	/*
602 	 * No errors, setup enough of the mount point so we can lookup the
603 	 * root vnode.
604 	 */
605 	mp->mnt_iosize_max = MAXPHYS;
606 	mp->mnt_kern_flag |= MNTK_FSMID;
607 
608 	/*
609 	 * MPSAFE code.  Note that VOPs and VFSops which are not MPSAFE
610 	 * will acquire a per-mount token prior to entry and release it
611 	 * on return, so even if we do not specify it we no longer get
612 	 * the BGL regardlless of how we are flagged.
613 	 */
614 	mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;
615 	/*MNTK_RD_MPSAFE | MNTK_GA_MPSAFE | MNTK_IN_MPSAFE;*/
616 
617 	/*
618 	 * note: f_iosize is used by vnode_pager_haspage() when constructing
619 	 * its VOP_BMAP call.
620 	 */
621 	mp->mnt_stat.f_iosize = HAMMER_BUFSIZE;
622 	mp->mnt_stat.f_bsize = HAMMER_BUFSIZE;
623 
624 	mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE;
625 	mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE;
626 
627 	mp->mnt_maxsymlinklen = 255;
628 	mp->mnt_flag |= MNT_LOCAL;
629 
630 	vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops);
631 	vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops);
632 	vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops);
633 
634 	/*
635 	 * The root volume's ondisk pointer is only valid if we hold a
636 	 * reference to it.
637 	 */
638 	rootvol = hammer_get_root_volume(hmp, &error);
639 	if (error)
640 		goto failed;
641 
642 	/*
643 	 * Perform any necessary UNDO operations.  The recovery code does
644 	 * call hammer_undo_lookup() so we have to pre-cache the blockmap,
645 	 * and then re-copy it again after recovery is complete.
646 	 *
647 	 * If this is a read-only mount the UNDO information is retained
648 	 * in memory in the form of dirty buffer cache buffers, and not
649 	 * written back to the media.
650 	 */
651 	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
652 	      sizeof(hmp->blockmap));
653 
654 	/*
655 	 * Check filesystem version
656 	 */
657 	hmp->version = rootvol->ondisk->vol_version;
658 	if (hmp->version < HAMMER_VOL_VERSION_MIN ||
659 	    hmp->version > HAMMER_VOL_VERSION_MAX) {
660 		kprintf("HAMMER: mount unsupported fs version %d\n",
661 			hmp->version);
662 		error = ERANGE;
663 		goto done;
664 	}
665 
666 	/*
667 	 * The undo_rec_limit limits the size of flush groups to avoid
668 	 * blowing out the UNDO FIFO.  This calculation is typically in
669 	 * the tens of thousands and is designed primarily when small
670 	 * HAMMER filesystems are created.
671 	 */
672 	hmp->undo_rec_limit = hammer_undo_max(hmp) / 8192 + 100;
673 	if (hammer_debug_general & 0x0001)
674 		kprintf("HAMMER: undo_rec_limit %d\n", hmp->undo_rec_limit);
675 
676 	/*
677 	 * NOTE: Recover stage1 not only handles meta-data recovery, it
678 	 * 	 also sets hmp->undo_seqno for HAMMER VERSION 4+ filesystems.
679 	 */
680 	error = hammer_recover_stage1(hmp, rootvol);
681 	if (error) {
682 		kprintf("Failed to recover HAMMER filesystem on mount\n");
683 		goto done;
684 	}
685 
686 	/*
687 	 * Finish setup now that we have a good root volume.
688 	 *
689 	 * The top 16 bits of fsid.val[1] is a pfs id.
690 	 */
691 	ksnprintf(mp->mnt_stat.f_mntfromname,
692 		  sizeof(mp->mnt_stat.f_mntfromname), "%s",
693 		  rootvol->ondisk->vol_name);
694 	mp->mnt_stat.f_fsid.val[0] =
695 		crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8);
696 	mp->mnt_stat.f_fsid.val[1] =
697 		crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8);
698 	mp->mnt_stat.f_fsid.val[1] &= 0x0000FFFF;
699 
700 	mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid;
701 	mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid,
702 				     sizeof(mp->mnt_vstat.f_fsid_uuid));
703 
704 	/*
705 	 * Certain often-modified fields in the root volume are cached in
706 	 * the hammer_mount structure so we do not have to generate lots
707 	 * of little UNDO structures for them.
708 	 *
709 	 * Recopy after recovery.  This also has the side effect of
710 	 * setting our cached undo FIFO's first_offset, which serves to
711 	 * placemark the FIFO start for the NEXT flush cycle while the
712 	 * on-disk first_offset represents the LAST flush cycle.
713 	 */
714 	hmp->next_tid = rootvol->ondisk->vol0_next_tid;
715 	hmp->flush_tid1 = hmp->next_tid;
716 	hmp->flush_tid2 = hmp->next_tid;
717 	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
718 	      sizeof(hmp->blockmap));
719 	hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks;
720 
721 	hammer_flusher_create(hmp);
722 
723 	/*
724 	 * Locate the root directory using the root cluster's B-Tree as a
725 	 * starting point.  The root directory uses an obj_id of 1.
726 	 *
727 	 * FUTURE: Leave the root directory cached referenced but unlocked
728 	 * in hmp->rootvp (need to flush it on unmount).
729 	 */
730 	error = hammer_vfs_vget(mp, NULL, 1, &rootvp);
731 	if (error)
732 		goto done;
733 	vput(rootvp);
734 	/*vn_unlock(hmp->rootvp);*/
735 	if (hmp->ronly == 0)
736 		error = hammer_recover_stage2(hmp, rootvol);
737 
738 	/*
739 	 * If the stage2 recovery fails be sure to clean out all cached
740 	 * vnodes before throwing away the mount structure or bad things
741 	 * will happen.
742 	 */
743 	if (error)
744 		vflush(mp, 0, 0);
745 
746 done:
747 	hammer_rel_volume(rootvol, 0);
748 failed:
749 	/*
750 	 * Cleanup and return.
751 	 */
752 	if (error) {
753 		/* called with fs_token held */
754 		hammer_free_hmp(mp);
755 	} else {
756 		lwkt_reltoken(&hmp->fs_token);
757 	}
758 	return (error);
759 }
760 
761 static int
762 hammer_vfs_unmount(struct mount *mp, int mntflags)
763 {
764 	hammer_mount_t hmp = (void *)mp->mnt_data;
765 	int flags;
766 	int error;
767 
768 	/*
769 	 * Clean out the vnodes
770 	 */
771 	lwkt_gettoken(&hmp->fs_token);
772 	flags = 0;
773 	if (mntflags & MNT_FORCE)
774 		flags |= FORCECLOSE;
775 	error = vflush(mp, 0, flags);
776 
777 	/*
778 	 * Clean up the internal mount structure and related entities.  This
779 	 * may issue I/O.
780 	 */
781 	if (error == 0) {
782 		/* called with fs_token held */
783 		hammer_free_hmp(mp);
784 	} else {
785 		lwkt_reltoken(&hmp->fs_token);
786 	}
787 	return(error);
788 }
789 
790 /*
791  * Clean up the internal mount structure and disassociate it from the mount.
792  * This may issue I/O.
793  *
794  * Called with fs_token held.
795  */
796 static void
797 hammer_free_hmp(struct mount *mp)
798 {
799 	hammer_mount_t hmp = (void *)mp->mnt_data;
800 	hammer_flush_group_t flg;
801 	int count;
802 	int dummy;
803 
804 	/*
805 	 * Flush anything dirty.  This won't even run if the
806 	 * filesystem errored-out.
807 	 */
808 	count = 0;
809 	while (hammer_flusher_haswork(hmp)) {
810 		hammer_flusher_sync(hmp);
811 		++count;
812 		if (count >= 5) {
813 			if (count == 5)
814 				kprintf("HAMMER: umount flushing.");
815 			else
816 				kprintf(".");
817 			tsleep(&dummy, 0, "hmrufl", hz);
818 		}
819 		if (count == 30) {
820 			kprintf("giving up\n");
821 			break;
822 		}
823 	}
824 	if (count >= 5 && count < 30)
825 		kprintf("\n");
826 
827 	/*
828 	 * If the mount had a critical error we have to destroy any
829 	 * remaining inodes before we can finish cleaning up the flusher.
830 	 */
831 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
832 		RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
833 			hammer_destroy_inode_callback, NULL);
834 	}
835 
836 	/*
837 	 * There shouldn't be any inodes left now and any left over
838 	 * flush groups should now be empty.
839 	 */
840 	KKASSERT(RB_EMPTY(&hmp->rb_inos_root));
841 	while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
842 		TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
843 		KKASSERT(RB_EMPTY(&flg->flush_tree));
844 		if (flg->refs) {
845 			kprintf("HAMMER: Warning, flush_group %p was "
846 				"not empty on umount!\n", flg);
847 		}
848 		kfree(flg, hmp->m_misc);
849 	}
850 
851 	/*
852 	 * We can finally destroy the flusher
853 	 */
854 	hammer_flusher_destroy(hmp);
855 
856 	/*
857 	 * We may have held recovered buffers due to a read-only mount.
858 	 * These must be discarded.
859 	 */
860 	if (hmp->ronly)
861 		hammer_recover_flush_buffers(hmp, NULL, -1);
862 
863 	/*
864 	 * Unload buffers and then volumes
865 	 */
866         RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
867 		hammer_unload_buffer, NULL);
868 	RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
869 		hammer_unload_volume, NULL);
870 
871 	mp->mnt_data = NULL;
872 	mp->mnt_flag &= ~MNT_LOCAL;
873 	hmp->mp = NULL;
874 	hammer_destroy_objid_cache(hmp);
875 	kmalloc_destroy(&hmp->m_misc);
876 	kmalloc_destroy(&hmp->m_inodes);
877 	lwkt_reltoken(&hmp->fs_token);
878 	kfree(hmp, M_HAMMER);
879 }
880 
881 /*
882  * Report critical errors.  ip may be NULL.
883  */
884 void
885 hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip,
886 		      int error, const char *msg)
887 {
888 	hmp->flags |= HAMMER_MOUNT_CRITICAL_ERROR;
889 
890 	krateprintf(&hmp->krate,
891 		    "HAMMER(%s): Critical error inode=%jd error=%d %s\n",
892 		    hmp->mp->mnt_stat.f_mntfromname,
893 		    (intmax_t)(ip ? ip->obj_id : -1),
894 		    error, msg);
895 
896 	if (hmp->ronly == 0) {
897 		hmp->ronly = 2;		/* special errored read-only mode */
898 		hmp->mp->mnt_flag |= MNT_RDONLY;
899 		kprintf("HAMMER(%s): Forcing read-only mode\n",
900 			hmp->mp->mnt_stat.f_mntfromname);
901 	}
902 	hmp->error = error;
903 	if (hammer_debug_critical)
904 		Debugger("Entering debugger");
905 }
906 
907 
908 /*
909  * Obtain a vnode for the specified inode number.  An exclusively locked
910  * vnode is returned.
911  */
912 int
913 hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
914 		ino_t ino, struct vnode **vpp)
915 {
916 	struct hammer_transaction trans;
917 	struct hammer_mount *hmp = (void *)mp->mnt_data;
918 	struct hammer_inode *ip;
919 	int error;
920 	u_int32_t localization;
921 
922 	lwkt_gettoken(&hmp->fs_token);
923 	hammer_simple_transaction(&trans, hmp);
924 
925 	/*
926 	 * If a directory vnode is supplied (mainly NFS) then we can acquire
927 	 * the PFS domain from it.  Otherwise we would only be able to vget
928 	 * inodes in the root PFS.
929 	 */
930 	if (dvp) {
931 		localization = HAMMER_DEF_LOCALIZATION +
932 				VTOI(dvp)->obj_localization;
933 	} else {
934 		localization = HAMMER_DEF_LOCALIZATION;
935 	}
936 
937 	/*
938 	 * Lookup the requested HAMMER inode.  The structure must be
939 	 * left unlocked while we manipulate the related vnode to avoid
940 	 * a deadlock.
941 	 */
942 	ip = hammer_get_inode(&trans, NULL, ino,
943 			      hmp->asof, localization,
944 			      0, &error);
945 	if (ip == NULL) {
946 		*vpp = NULL;
947 	} else {
948 		error = hammer_get_vnode(ip, vpp);
949 		hammer_rel_inode(ip, 0);
950 	}
951 	hammer_done_transaction(&trans);
952 	lwkt_reltoken(&hmp->fs_token);
953 	return (error);
954 }
955 
956 /*
957  * Return the root vnode for the filesystem.
958  *
959  * HAMMER stores the root vnode in the hammer_mount structure so
960  * getting it is easy.
961  */
962 static int
963 hammer_vfs_root(struct mount *mp, struct vnode **vpp)
964 {
965 	int error;
966 
967 	error = hammer_vfs_vget(mp, NULL, 1, vpp);
968 	return (error);
969 }
970 
971 static int
972 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
973 {
974 	struct hammer_mount *hmp = (void *)mp->mnt_data;
975 	hammer_volume_t volume;
976 	hammer_volume_ondisk_t ondisk;
977 	int error;
978 	int64_t bfree;
979 	int64_t breserved;
980 
981 	lwkt_gettoken(&hmp->fs_token);
982 	volume = hammer_get_root_volume(hmp, &error);
983 	if (error) {
984 		lwkt_reltoken(&hmp->fs_token);
985 		return(error);
986 	}
987 	ondisk = volume->ondisk;
988 
989 	/*
990 	 * Basic stats
991 	 */
992 	_hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
993 	mp->mnt_stat.f_files = ondisk->vol0_stat_inodes;
994 	bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
995 	hammer_rel_volume(volume, 0);
996 
997 	mp->mnt_stat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
998 	mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
999 	if (mp->mnt_stat.f_files < 0)
1000 		mp->mnt_stat.f_files = 0;
1001 
1002 	*sbp = mp->mnt_stat;
1003 	lwkt_reltoken(&hmp->fs_token);
1004 	return(0);
1005 }
1006 
1007 static int
1008 hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
1009 {
1010 	struct hammer_mount *hmp = (void *)mp->mnt_data;
1011 	hammer_volume_t volume;
1012 	hammer_volume_ondisk_t ondisk;
1013 	int error;
1014 	int64_t bfree;
1015 	int64_t breserved;
1016 
1017 	lwkt_gettoken(&hmp->fs_token);
1018 	volume = hammer_get_root_volume(hmp, &error);
1019 	if (error) {
1020 		lwkt_reltoken(&hmp->fs_token);
1021 		return(error);
1022 	}
1023 	ondisk = volume->ondisk;
1024 
1025 	/*
1026 	 * Basic stats
1027 	 */
1028 	_hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
1029 	mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes;
1030 	bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
1031 	hammer_rel_volume(volume, 0);
1032 
1033 	mp->mnt_vstat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
1034 	mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
1035 	if (mp->mnt_vstat.f_files < 0)
1036 		mp->mnt_vstat.f_files = 0;
1037 	*sbp = mp->mnt_vstat;
1038 	lwkt_reltoken(&hmp->fs_token);
1039 	return(0);
1040 }
1041 
1042 /*
1043  * Sync the filesystem.  Currently we have to run it twice, the second
1044  * one will advance the undo start index to the end index, so if a crash
1045  * occurs no undos will be run on mount.
1046  *
1047  * We do not sync the filesystem if we are called from a panic.  If we did
1048  * we might end up blowing up a sync that was already in progress.
1049  */
1050 static int
1051 hammer_vfs_sync(struct mount *mp, int waitfor)
1052 {
1053 	struct hammer_mount *hmp = (void *)mp->mnt_data;
1054 	int error;
1055 
1056 	lwkt_gettoken(&hmp->fs_token);
1057 	if (panicstr == NULL) {
1058 		error = hammer_sync_hmp(hmp, waitfor);
1059 	} else {
1060 		error = EIO;
1061 	}
1062 	lwkt_reltoken(&hmp->fs_token);
1063 	return (error);
1064 }
1065 
1066 /*
1067  * Convert a vnode to a file handle.
1068  *
1069  * Accesses read-only fields on already-referenced structures so
1070  * no token is needed.
1071  */
1072 static int
1073 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp)
1074 {
1075 	hammer_inode_t ip;
1076 
1077 	KKASSERT(MAXFIDSZ >= 16);
1078 	ip = VTOI(vp);
1079 	fhp->fid_len = offsetof(struct fid, fid_data[16]);
1080 	fhp->fid_ext = ip->obj_localization >> 16;
1081 	bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id));
1082 	bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof));
1083 	return(0);
1084 }
1085 
1086 
1087 /*
1088  * Convert a file handle back to a vnode.
1089  *
1090  * Use rootvp to enforce PFS isolation when a PFS is exported via a
1091  * null mount.
1092  */
1093 static int
1094 hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
1095 		  struct fid *fhp, struct vnode **vpp)
1096 {
1097 	hammer_mount_t hmp = (void *)mp->mnt_data;
1098 	struct hammer_transaction trans;
1099 	struct hammer_inode *ip;
1100 	struct hammer_inode_info info;
1101 	int error;
1102 	u_int32_t localization;
1103 
1104 	bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id));
1105 	bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof));
1106 	if (rootvp)
1107 		localization = VTOI(rootvp)->obj_localization;
1108 	else
1109 		localization = (u_int32_t)fhp->fid_ext << 16;
1110 
1111 	lwkt_gettoken(&hmp->fs_token);
1112 	hammer_simple_transaction(&trans, hmp);
1113 
1114 	/*
1115 	 * Get/allocate the hammer_inode structure.  The structure must be
1116 	 * unlocked while we manipulate the related vnode to avoid a
1117 	 * deadlock.
1118 	 */
1119 	ip = hammer_get_inode(&trans, NULL, info.obj_id,
1120 			      info.obj_asof, localization, 0, &error);
1121 	if (ip) {
1122 		error = hammer_get_vnode(ip, vpp);
1123 		hammer_rel_inode(ip, 0);
1124 	} else {
1125 		*vpp = NULL;
1126 	}
1127 	hammer_done_transaction(&trans);
1128 	lwkt_reltoken(&hmp->fs_token);
1129 	return (error);
1130 }
1131 
1132 static int
1133 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
1134 		    int *exflagsp, struct ucred **credanonp)
1135 {
1136 	hammer_mount_t hmp = (void *)mp->mnt_data;
1137 	struct netcred *np;
1138 	int error;
1139 
1140 	lwkt_gettoken(&hmp->fs_token);
1141 	np = vfs_export_lookup(mp, &hmp->export, nam);
1142 	if (np) {
1143 		*exflagsp = np->netc_exflags;
1144 		*credanonp = &np->netc_anon;
1145 		error = 0;
1146 	} else {
1147 		error = EACCES;
1148 	}
1149 	lwkt_reltoken(&hmp->fs_token);
1150 	return (error);
1151 
1152 }
1153 
1154 int
1155 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export)
1156 {
1157 	hammer_mount_t hmp = (void *)mp->mnt_data;
1158 	int error;
1159 
1160 	lwkt_gettoken(&hmp->fs_token);
1161 
1162 	switch(op) {
1163 	case MOUNTCTL_SET_EXPORT:
1164 		error = vfs_export(mp, &hmp->export, export);
1165 		break;
1166 	default:
1167 		error = EOPNOTSUPP;
1168 		break;
1169 	}
1170 	lwkt_reltoken(&hmp->fs_token);
1171 
1172 	return(error);
1173 }
1174 
1175