xref: /dragonfly/sys/vfs/hammer/hammer_vfsops.c (revision 28c26f7e)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.74 2008/11/13 02:18:43 dillon Exp $
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/vnode.h>
41 #include <sys/mount.h>
42 #include <sys/malloc.h>
43 #include <sys/nlookup.h>
44 #include <sys/fcntl.h>
45 #include <sys/sysctl.h>
46 #include <sys/buf.h>
47 #include <sys/buf2.h>
48 #include "hammer.h"
49 
50 int hammer_supported_version = HAMMER_VOL_VERSION_DEFAULT;
51 int hammer_debug_io;
52 int hammer_debug_general;
53 int hammer_debug_debug = 1;		/* medium-error panics */
54 int hammer_debug_inode;
55 int hammer_debug_locks;
56 int hammer_debug_btree;
57 int hammer_debug_tid;
58 int hammer_debug_recover;		/* -1 will disable, +1 will force */
59 int hammer_debug_recover_faults;
60 int hammer_debug_critical;		/* non-zero enter debugger on error */
61 int hammer_cluster_enable = 1;		/* enable read clustering by default */
62 int hammer_count_fsyncs;
63 int hammer_count_inodes;
64 int hammer_count_iqueued;
65 int hammer_count_reclaiming;
66 int hammer_count_records;
67 int hammer_count_record_datas;
68 int hammer_count_volumes;
69 int hammer_count_buffers;
70 int hammer_count_nodes;
71 int64_t hammer_count_extra_space_used;
72 int64_t hammer_stats_btree_lookups;
73 int64_t hammer_stats_btree_searches;
74 int64_t hammer_stats_btree_inserts;
75 int64_t hammer_stats_btree_deletes;
76 int64_t hammer_stats_btree_elements;
77 int64_t hammer_stats_btree_splits;
78 int64_t hammer_stats_btree_iterations;
79 int64_t hammer_stats_btree_root_iterations;
80 int64_t hammer_stats_record_iterations;
81 
82 int64_t hammer_stats_file_read;
83 int64_t hammer_stats_file_write;
84 int64_t hammer_stats_file_iopsr;
85 int64_t hammer_stats_file_iopsw;
86 int64_t hammer_stats_disk_read;
87 int64_t hammer_stats_disk_write;
88 int64_t hammer_stats_inode_flushes;
89 int64_t hammer_stats_commits;
90 int64_t hammer_stats_undo;
91 
92 int hammer_count_dirtybufspace;		/* global */
93 int hammer_count_refedbufs;		/* global */
94 int hammer_count_reservations;
95 int hammer_count_io_running_read;
96 int hammer_count_io_running_write;
97 int hammer_count_io_locked;
98 int hammer_limit_dirtybufspace;		/* per-mount */
99 int hammer_limit_recs;			/* as a whole XXX */
100 int hammer_limit_inode_recs = 1024;	/* per inode */
101 int hammer_limit_reclaim = HAMMER_RECLAIM_WAIT;
102 int hammer_autoflush = 2000;		/* auto flush */
103 int hammer_bio_count;
104 int hammer_verify_zone;
105 int hammer_verify_data = 1;
106 int hammer_write_mode;
107 int hammer_yield_check = 16;
108 int hammer_fsync_mode;
109 int64_t hammer_contention_count;
110 int64_t hammer_zone_limit;
111 
112 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem");
113 SYSCTL_INT(_vfs_hammer, OID_AUTO, supported_version, CTLFLAG_RD,
114 	   &hammer_supported_version, 0, "");
115 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW,
116 	   &hammer_debug_general, 0, "");
117 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW,
118 	   &hammer_debug_io, 0, "");
119 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_debug, CTLFLAG_RW,
120 	   &hammer_debug_debug, 0, "");
121 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW,
122 	   &hammer_debug_inode, 0, "");
123 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW,
124 	   &hammer_debug_locks, 0, "");
125 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW,
126 	   &hammer_debug_btree, 0, "");
127 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW,
128 	   &hammer_debug_tid, 0, "");
129 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW,
130 	   &hammer_debug_recover, 0, "");
131 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW,
132 	   &hammer_debug_recover_faults, 0, "");
133 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_critical, CTLFLAG_RW,
134 	   &hammer_debug_critical, 0, "");
135 SYSCTL_INT(_vfs_hammer, OID_AUTO, cluster_enable, CTLFLAG_RW,
136 	   &hammer_cluster_enable, 0, "");
137 
138 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufspace, CTLFLAG_RW,
139 	   &hammer_limit_dirtybufspace, 0, "");
140 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW,
141 	   &hammer_limit_recs, 0, "");
142 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_inode_recs, CTLFLAG_RW,
143 	   &hammer_limit_inode_recs, 0, "");
144 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_reclaim, CTLFLAG_RW,
145 	   &hammer_limit_reclaim, 0, "");
146 
147 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_fsyncs, CTLFLAG_RD,
148 	   &hammer_count_fsyncs, 0, "");
149 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD,
150 	   &hammer_count_inodes, 0, "");
151 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_iqueued, CTLFLAG_RD,
152 	   &hammer_count_iqueued, 0, "");
153 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reclaiming, CTLFLAG_RD,
154 	   &hammer_count_reclaiming, 0, "");
155 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD,
156 	   &hammer_count_records, 0, "");
157 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD,
158 	   &hammer_count_record_datas, 0, "");
159 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD,
160 	   &hammer_count_volumes, 0, "");
161 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD,
162 	   &hammer_count_buffers, 0, "");
163 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD,
164 	   &hammer_count_nodes, 0, "");
165 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, count_extra_space_used, CTLFLAG_RD,
166 	   &hammer_count_extra_space_used, 0, "");
167 
168 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_searches, CTLFLAG_RD,
169 	   &hammer_stats_btree_searches, 0, "");
170 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_lookups, CTLFLAG_RD,
171 	   &hammer_stats_btree_lookups, 0, "");
172 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_inserts, CTLFLAG_RD,
173 	   &hammer_stats_btree_inserts, 0, "");
174 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_deletes, CTLFLAG_RD,
175 	   &hammer_stats_btree_deletes, 0, "");
176 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_elements, CTLFLAG_RD,
177 	   &hammer_stats_btree_elements, 0, "");
178 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_splits, CTLFLAG_RD,
179 	   &hammer_stats_btree_splits, 0, "");
180 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_iterations, CTLFLAG_RD,
181 	   &hammer_stats_btree_iterations, 0, "");
182 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_root_iterations, CTLFLAG_RD,
183 	   &hammer_stats_btree_root_iterations, 0, "");
184 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_record_iterations, CTLFLAG_RD,
185 	   &hammer_stats_record_iterations, 0, "");
186 
187 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_read, CTLFLAG_RD,
188 	   &hammer_stats_file_read, 0, "");
189 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_write, CTLFLAG_RD,
190 	   &hammer_stats_file_write, 0, "");
191 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsr, CTLFLAG_RD,
192 	   &hammer_stats_file_iopsr, 0, "");
193 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsw, CTLFLAG_RD,
194 	   &hammer_stats_file_iopsw, 0, "");
195 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_read, CTLFLAG_RD,
196 	   &hammer_stats_disk_read, 0, "");
197 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_write, CTLFLAG_RD,
198 	   &hammer_stats_disk_write, 0, "");
199 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_inode_flushes, CTLFLAG_RD,
200 	   &hammer_stats_inode_flushes, 0, "");
201 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_commits, CTLFLAG_RD,
202 	   &hammer_stats_commits, 0, "");
203 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_undo, CTLFLAG_RD,
204 	   &hammer_stats_undo, 0, "");
205 
206 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufspace, CTLFLAG_RD,
207 	   &hammer_count_dirtybufspace, 0, "");
208 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD,
209 	   &hammer_count_refedbufs, 0, "");
210 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD,
211 	   &hammer_count_reservations, 0, "");
212 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_read, CTLFLAG_RD,
213 	   &hammer_count_io_running_read, 0, "");
214 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_locked, CTLFLAG_RD,
215 	   &hammer_count_io_locked, 0, "");
216 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_write, CTLFLAG_RD,
217 	   &hammer_count_io_running_write, 0, "");
218 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW,
219 	   &hammer_zone_limit, 0, "");
220 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW,
221 	   &hammer_contention_count, 0, "");
222 SYSCTL_INT(_vfs_hammer, OID_AUTO, autoflush, CTLFLAG_RW,
223 	   &hammer_autoflush, 0, "");
224 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_zone, CTLFLAG_RW,
225 	   &hammer_verify_zone, 0, "");
226 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_data, CTLFLAG_RW,
227 	   &hammer_verify_data, 0, "");
228 SYSCTL_INT(_vfs_hammer, OID_AUTO, write_mode, CTLFLAG_RW,
229 	   &hammer_write_mode, 0, "");
230 SYSCTL_INT(_vfs_hammer, OID_AUTO, yield_check, CTLFLAG_RW,
231 	   &hammer_yield_check, 0, "");
232 SYSCTL_INT(_vfs_hammer, OID_AUTO, fsync_mode, CTLFLAG_RW,
233 	   &hammer_fsync_mode, 0, "");
234 
235 KTR_INFO_MASTER(hammer);
236 
237 /*
238  * VFS ABI
239  */
240 static void	hammer_free_hmp(struct mount *mp);
241 
242 static int	hammer_vfs_mount(struct mount *mp, char *path, caddr_t data,
243 				struct ucred *cred);
244 static int	hammer_vfs_unmount(struct mount *mp, int mntflags);
245 static int	hammer_vfs_root(struct mount *mp, struct vnode **vpp);
246 static int	hammer_vfs_statfs(struct mount *mp, struct statfs *sbp,
247 				struct ucred *cred);
248 static int	hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
249 				struct ucred *cred);
250 static int	hammer_vfs_sync(struct mount *mp, int waitfor);
251 static int	hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
252 				ino_t ino, struct vnode **vpp);
253 static int	hammer_vfs_init(struct vfsconf *conf);
254 static int	hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
255 				struct fid *fhp, struct vnode **vpp);
256 static int	hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp);
257 static int	hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
258 				int *exflagsp, struct ucred **credanonp);
259 
260 
261 static struct vfsops hammer_vfsops = {
262 	.vfs_mount	= hammer_vfs_mount,
263 	.vfs_unmount	= hammer_vfs_unmount,
264 	.vfs_root 	= hammer_vfs_root,
265 	.vfs_statfs	= hammer_vfs_statfs,
266 	.vfs_statvfs	= hammer_vfs_statvfs,
267 	.vfs_sync	= hammer_vfs_sync,
268 	.vfs_vget	= hammer_vfs_vget,
269 	.vfs_init	= hammer_vfs_init,
270 	.vfs_vptofh	= hammer_vfs_vptofh,
271 	.vfs_fhtovp	= hammer_vfs_fhtovp,
272 	.vfs_checkexp	= hammer_vfs_checkexp
273 };
274 
275 MALLOC_DEFINE(M_HAMMER, "HAMMER-mount", "");
276 
277 VFS_SET(hammer_vfsops, hammer, 0);
278 MODULE_VERSION(hammer, 1);
279 
280 static int
281 hammer_vfs_init(struct vfsconf *conf)
282 {
283 	int n;
284 
285 	if (hammer_limit_recs == 0) {
286 		hammer_limit_recs = nbuf * 25;
287 		n = kmalloc_limit(M_HAMMER) / 512;
288 		if (hammer_limit_recs > n)
289 			hammer_limit_recs = n;
290 	}
291 	if (hammer_limit_dirtybufspace == 0) {
292 		hammer_limit_dirtybufspace = hidirtybufspace / 2;
293 		if (hammer_limit_dirtybufspace < 100)
294 			hammer_limit_dirtybufspace = 100;
295 	}
296 	return(0);
297 }
298 
299 static int
300 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
301 		 struct ucred *cred)
302 {
303 	struct hammer_mount_info info;
304 	hammer_mount_t hmp;
305 	hammer_volume_t rootvol;
306 	struct vnode *rootvp;
307 	struct vnode *devvp = NULL;
308 	const char *upath;	/* volume name in userspace */
309 	char *path;		/* volume name in system space */
310 	int error;
311 	int i;
312 	int master_id;
313 	int maxinodes;
314 
315 	/*
316 	 * Accept hammer_mount_info.  mntpt is NULL for root mounts at boot.
317 	 */
318 	if (mntpt == NULL) {
319 		if ((error = bdevvp(rootdev, &devvp))) {
320 			kprintf("hammer_mountroot: can't find devvp\n");
321 			return (error);
322 		}
323 		mp->mnt_flag &= ~MNT_RDONLY; /* mount R/W */
324 		bzero(&info, sizeof(info));
325 		info.asof = 0;
326 		info.hflags = 0;
327 		info.nvolumes = 1;
328 	} else {
329 		if ((error = copyin(data, &info, sizeof(info))) != 0)
330 			return (error);
331 	}
332 
333 	/*
334 	 * updating or new mount
335 	 */
336 	if (mp->mnt_flag & MNT_UPDATE) {
337 		hmp = (void *)mp->mnt_data;
338 		KKASSERT(hmp != NULL);
339 	} else {
340 		if (info.nvolumes <= 0 || info.nvolumes >= 32768)
341 			return (EINVAL);
342 		hmp = NULL;
343 	}
344 
345 	/*
346 	 * master-id validation.  The master id may not be changed by a
347 	 * mount update.
348 	 */
349 	if (info.hflags & HMNT_MASTERID) {
350 		if (hmp && hmp->master_id != info.master_id) {
351 			kprintf("hammer: cannot change master id "
352 				"with mount update\n");
353 			return(EINVAL);
354 		}
355 		master_id = info.master_id;
356 		if (master_id < -1 || master_id >= HAMMER_MAX_MASTERS)
357 			return (EINVAL);
358 	} else {
359 		if (hmp)
360 			master_id = hmp->master_id;
361 		else
362 			master_id = 0;
363 	}
364 
365 	/*
366 	 * Interal mount data structure
367 	 */
368 	if (hmp == NULL) {
369 		hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO);
370 		mp->mnt_data = (qaddr_t)hmp;
371 		hmp->mp = mp;
372 		/*TAILQ_INIT(&hmp->recycle_list);*/
373 
374 		/*
375 		 * Make sure kmalloc type limits are set appropriately.  If root
376 		 * increases the vnode limit you may have to do a dummy remount
377 		 * to adjust the HAMMER inode limit.
378 		 */
379 		kmalloc_create(&hmp->m_misc, "HAMMER-others");
380 		kmalloc_create(&hmp->m_inodes, "HAMMER-inodes");
381 
382 		maxinodes = desiredvnodes + desiredvnodes / 5 +
383 			    hammer_limit_reclaim * 2;
384 		kmalloc_raise_limit(hmp->m_inodes,
385 				    maxinodes * sizeof(struct hammer_inode));
386 
387 		hmp->root_btree_beg.localization = 0x00000000U;
388 		hmp->root_btree_beg.obj_id = -0x8000000000000000LL;
389 		hmp->root_btree_beg.key = -0x8000000000000000LL;
390 		hmp->root_btree_beg.create_tid = 1;
391 		hmp->root_btree_beg.delete_tid = 1;
392 		hmp->root_btree_beg.rec_type = 0;
393 		hmp->root_btree_beg.obj_type = 0;
394 
395 		hmp->root_btree_end.localization = 0xFFFFFFFFU;
396 		hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL;
397 		hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL;
398 		hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL;
399 		hmp->root_btree_end.delete_tid = 0;   /* special case */
400 		hmp->root_btree_end.rec_type = 0xFFFFU;
401 		hmp->root_btree_end.obj_type = 0;
402 
403 		hmp->krate.freq = 1;	/* maximum reporting rate (hz) */
404 		hmp->krate.count = -16;	/* initial burst */
405 
406 		hmp->sync_lock.refs = 1;
407 		hmp->free_lock.refs = 1;
408 		hmp->undo_lock.refs = 1;
409 		hmp->blkmap_lock.refs = 1;
410 		hmp->snapshot_lock.refs = 1;
411 
412 		TAILQ_INIT(&hmp->delay_list);
413 		TAILQ_INIT(&hmp->flush_group_list);
414 		TAILQ_INIT(&hmp->objid_cache_list);
415 		TAILQ_INIT(&hmp->undo_lru_list);
416 		TAILQ_INIT(&hmp->reclaim_list);
417 	}
418 	hmp->hflags &= ~HMNT_USERFLAGS;
419 	hmp->hflags |= info.hflags & HMNT_USERFLAGS;
420 
421 	hmp->master_id = master_id;
422 
423 	if (info.asof) {
424 		mp->mnt_flag |= MNT_RDONLY;
425 		hmp->asof = info.asof;
426 	} else {
427 		hmp->asof = HAMMER_MAX_TID;
428 	}
429 
430 	/*
431 	 * Re-open read-write if originally read-only, or vise-versa.
432 	 *
433 	 * When going from read-only to read-write execute the stage2
434 	 * recovery if it has not already been run.
435 	 */
436 	if (mp->mnt_flag & MNT_UPDATE) {
437 		error = 0;
438 		if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
439 			kprintf("HAMMER read-only -> read-write\n");
440 			hmp->ronly = 0;
441 			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
442 				hammer_adjust_volume_mode, NULL);
443 			rootvol = hammer_get_root_volume(hmp, &error);
444 			if (rootvol) {
445 				hammer_recover_flush_buffers(hmp, rootvol, 1);
446 				error = hammer_recover_stage2(hmp, rootvol);
447 				bcopy(rootvol->ondisk->vol0_blockmap,
448 				      hmp->blockmap,
449 				      sizeof(hmp->blockmap));
450 				hammer_rel_volume(rootvol, 0);
451 			}
452 			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
453 				hammer_reload_inode, NULL);
454 			/* kernel clears MNT_RDONLY */
455 		} else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
456 			kprintf("HAMMER read-write -> read-only\n");
457 			hmp->ronly = 1;	/* messy */
458 			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
459 				hammer_reload_inode, NULL);
460 			hmp->ronly = 0;
461 			hammer_flusher_sync(hmp);
462 			hammer_flusher_sync(hmp);
463 			hammer_flusher_sync(hmp);
464 			hmp->ronly = 1;
465 			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
466 				hammer_adjust_volume_mode, NULL);
467 		}
468 		return(error);
469 	}
470 
471 	RB_INIT(&hmp->rb_vols_root);
472 	RB_INIT(&hmp->rb_inos_root);
473 	RB_INIT(&hmp->rb_nods_root);
474 	RB_INIT(&hmp->rb_undo_root);
475 	RB_INIT(&hmp->rb_resv_root);
476 	RB_INIT(&hmp->rb_bufs_root);
477 	RB_INIT(&hmp->rb_pfsm_root);
478 
479 	hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
480 
481 	TAILQ_INIT(&hmp->volu_list);
482 	TAILQ_INIT(&hmp->undo_list);
483 	TAILQ_INIT(&hmp->data_list);
484 	TAILQ_INIT(&hmp->meta_list);
485 	TAILQ_INIT(&hmp->lose_list);
486 
487 	/*
488 	 * Load volumes
489 	 */
490 	path = objcache_get(namei_oc, M_WAITOK);
491 	hmp->nvolumes = -1;
492 	for (i = 0; i < info.nvolumes; ++i) {
493 		if (mntpt == NULL) {
494 			/*
495 			 * Root mount.
496 			 * Only one volume; and no need for copyin.
497 			 */
498 			KKASSERT(info.nvolumes == 1);
499 			ksnprintf(path, MAXPATHLEN, "/dev/%s",
500 				  mp->mnt_stat.f_mntfromname);
501 			error = 0;
502 		} else {
503 			error = copyin(&info.volumes[i], &upath,
504 				       sizeof(char *));
505 			if (error == 0)
506 				error = copyinstr(upath, path,
507 						  MAXPATHLEN, NULL);
508 		}
509 		if (error == 0)
510 			error = hammer_install_volume(hmp, path, devvp);
511 		if (error)
512 			break;
513 	}
514 	objcache_put(namei_oc, path);
515 
516 	/*
517 	 * Make sure we found a root volume
518 	 */
519 	if (error == 0 && hmp->rootvol == NULL) {
520 		kprintf("hammer_mount: No root volume found!\n");
521 		error = EINVAL;
522 	}
523 
524 	/*
525 	 * Check that all required volumes are available
526 	 */
527 	if (error == 0 && hammer_mountcheck_volumes(hmp)) {
528 		kprintf("hammer_mount: Missing volumes, cannot mount!\n");
529 		error = EINVAL;
530 	}
531 
532 	if (error) {
533 		hammer_free_hmp(mp);
534 		return (error);
535 	}
536 
537 	/*
538 	 * No errors, setup enough of the mount point so we can lookup the
539 	 * root vnode.
540 	 */
541 	mp->mnt_iosize_max = MAXPHYS;
542 	mp->mnt_kern_flag |= MNTK_FSMID;
543 
544 	/*
545 	 * note: f_iosize is used by vnode_pager_haspage() when constructing
546 	 * its VOP_BMAP call.
547 	 */
548 	mp->mnt_stat.f_iosize = HAMMER_BUFSIZE;
549 	mp->mnt_stat.f_bsize = HAMMER_BUFSIZE;
550 
551 	mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE;
552 	mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE;
553 
554 	mp->mnt_maxsymlinklen = 255;
555 	mp->mnt_flag |= MNT_LOCAL;
556 
557 	vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops);
558 	vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops);
559 	vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops);
560 
561 	/*
562 	 * The root volume's ondisk pointer is only valid if we hold a
563 	 * reference to it.
564 	 */
565 	rootvol = hammer_get_root_volume(hmp, &error);
566 	if (error)
567 		goto failed;
568 
569 	/*
570 	 * Perform any necessary UNDO operations.  The recovery code does
571 	 * call hammer_undo_lookup() so we have to pre-cache the blockmap,
572 	 * and then re-copy it again after recovery is complete.
573 	 *
574 	 * If this is a read-only mount the UNDO information is retained
575 	 * in memory in the form of dirty buffer cache buffers, and not
576 	 * written back to the media.
577 	 */
578 	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
579 	      sizeof(hmp->blockmap));
580 
581 	/*
582 	 * Check filesystem version
583 	 */
584 	hmp->version = rootvol->ondisk->vol_version;
585 	if (hmp->version < HAMMER_VOL_VERSION_MIN ||
586 	    hmp->version > HAMMER_VOL_VERSION_MAX) {
587 		kprintf("HAMMER: mount unsupported fs version %d\n",
588 			hmp->version);
589 		error = ERANGE;
590 		goto done;
591 	}
592 
593 	/*
594 	 * The undo_rec_limit limits the size of flush groups to avoid
595 	 * blowing out the UNDO FIFO.  This calculation is typically in
596 	 * the tens of thousands and is designed primarily when small
597 	 * HAMMER filesystems are created.
598 	 */
599 	hmp->undo_rec_limit = hammer_undo_max(hmp) / 8192 + 100;
600 	if (hammer_debug_general & 0x0001)
601 		kprintf("HAMMER: undo_rec_limit %d\n", hmp->undo_rec_limit);
602 
603 	/*
604 	 * NOTE: Recover stage1 not only handles meta-data recovery, it
605 	 * 	 also sets hmp->undo_seqno for HAMMER VERSION 4+ filesystems.
606 	 */
607 	error = hammer_recover_stage1(hmp, rootvol);
608 	if (error) {
609 		kprintf("Failed to recover HAMMER filesystem on mount\n");
610 		goto done;
611 	}
612 
613 	/*
614 	 * Finish setup now that we have a good root volume.
615 	 *
616 	 * The top 16 bits of fsid.val[1] is a pfs id.
617 	 */
618 	ksnprintf(mp->mnt_stat.f_mntfromname,
619 		  sizeof(mp->mnt_stat.f_mntfromname), "%s",
620 		  rootvol->ondisk->vol_name);
621 	mp->mnt_stat.f_fsid.val[0] =
622 		crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8);
623 	mp->mnt_stat.f_fsid.val[1] =
624 		crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8);
625 	mp->mnt_stat.f_fsid.val[1] &= 0x0000FFFF;
626 
627 	mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid;
628 	mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid,
629 				     sizeof(mp->mnt_vstat.f_fsid_uuid));
630 
631 	/*
632 	 * Certain often-modified fields in the root volume are cached in
633 	 * the hammer_mount structure so we do not have to generate lots
634 	 * of little UNDO structures for them.
635 	 *
636 	 * Recopy after recovery.  This also has the side effect of
637 	 * setting our cached undo FIFO's first_offset, which serves to
638 	 * placemark the FIFO start for the NEXT flush cycle while the
639 	 * on-disk first_offset represents the LAST flush cycle.
640 	 */
641 	hmp->next_tid = rootvol->ondisk->vol0_next_tid;
642 	hmp->flush_tid1 = hmp->next_tid;
643 	hmp->flush_tid2 = hmp->next_tid;
644 	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
645 	      sizeof(hmp->blockmap));
646 	hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks;
647 
648 	hammer_flusher_create(hmp);
649 
650 	/*
651 	 * Locate the root directory using the root cluster's B-Tree as a
652 	 * starting point.  The root directory uses an obj_id of 1.
653 	 *
654 	 * FUTURE: Leave the root directory cached referenced but unlocked
655 	 * in hmp->rootvp (need to flush it on unmount).
656 	 */
657 	error = hammer_vfs_vget(mp, NULL, 1, &rootvp);
658 	if (error)
659 		goto done;
660 	vput(rootvp);
661 	/*vn_unlock(hmp->rootvp);*/
662 	if (hmp->ronly == 0)
663 		error = hammer_recover_stage2(hmp, rootvol);
664 
665 done:
666 	hammer_rel_volume(rootvol, 0);
667 failed:
668 	/*
669 	 * Cleanup and return.
670 	 */
671 	if (error)
672 		hammer_free_hmp(mp);
673 	return (error);
674 }
675 
676 static int
677 hammer_vfs_unmount(struct mount *mp, int mntflags)
678 {
679 #if 0
680 	struct hammer_mount *hmp = (void *)mp->mnt_data;
681 #endif
682 	int flags;
683 	int error;
684 
685 	/*
686 	 * Clean out the vnodes
687 	 */
688 	flags = 0;
689 	if (mntflags & MNT_FORCE)
690 		flags |= FORCECLOSE;
691 	if ((error = vflush(mp, 0, flags)) != 0)
692 		return (error);
693 
694 	/*
695 	 * Clean up the internal mount structure and related entities.  This
696 	 * may issue I/O.
697 	 */
698 	hammer_free_hmp(mp);
699 	return(0);
700 }
701 
702 /*
703  * Clean up the internal mount structure and disassociate it from the mount.
704  * This may issue I/O.
705  */
706 static void
707 hammer_free_hmp(struct mount *mp)
708 {
709 	struct hammer_mount *hmp = (void *)mp->mnt_data;
710 	hammer_flush_group_t flg;
711 	int count;
712 	int dummy;
713 
714 	/*
715 	 * Flush anything dirty.  This won't even run if the
716 	 * filesystem errored-out.
717 	 */
718 	count = 0;
719 	while (hammer_flusher_haswork(hmp)) {
720 		hammer_flusher_sync(hmp);
721 		++count;
722 		if (count >= 5) {
723 			if (count == 5)
724 				kprintf("HAMMER: umount flushing.");
725 			else
726 				kprintf(".");
727 			tsleep(&dummy, 0, "hmrufl", hz);
728 		}
729 		if (count == 30) {
730 			kprintf("giving up\n");
731 			break;
732 		}
733 	}
734 	if (count >= 5 && count < 30)
735 		kprintf("\n");
736 
737 	/*
738 	 * If the mount had a critical error we have to destroy any
739 	 * remaining inodes before we can finish cleaning up the flusher.
740 	 */
741 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
742 		RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
743 			hammer_destroy_inode_callback, NULL);
744 	}
745 
746 	/*
747 	 * There shouldn't be any inodes left now and any left over
748 	 * flush groups should now be empty.
749 	 */
750 	KKASSERT(RB_EMPTY(&hmp->rb_inos_root));
751 	while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
752 		TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
753 		KKASSERT(RB_EMPTY(&flg->flush_tree));
754 		if (flg->refs) {
755 			kprintf("HAMMER: Warning, flush_group %p was "
756 				"not empty on umount!\n", flg);
757 		}
758 		kfree(flg, hmp->m_misc);
759 	}
760 
761 	/*
762 	 * We can finally destroy the flusher
763 	 */
764 	hammer_flusher_destroy(hmp);
765 
766 	/*
767 	 * We may have held recovered buffers due to a read-only mount.
768 	 * These must be discarded.
769 	 */
770 	if (hmp->ronly)
771 		hammer_recover_flush_buffers(hmp, NULL, -1);
772 
773 	/*
774 	 * Unload buffers and then volumes
775 	 */
776         RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
777 		hammer_unload_buffer, NULL);
778 	RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
779 		hammer_unload_volume, NULL);
780 
781 	mp->mnt_data = NULL;
782 	mp->mnt_flag &= ~MNT_LOCAL;
783 	hmp->mp = NULL;
784 	hammer_destroy_objid_cache(hmp);
785 	kmalloc_destroy(&hmp->m_misc);
786 	kmalloc_destroy(&hmp->m_inodes);
787 	kfree(hmp, M_HAMMER);
788 }
789 
790 /*
791  * Report critical errors.  ip may be NULL.
792  */
793 void
794 hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip,
795 		      int error, const char *msg)
796 {
797 	hmp->flags |= HAMMER_MOUNT_CRITICAL_ERROR;
798 
799 	krateprintf(&hmp->krate,
800 		    "HAMMER(%s): Critical error inode=%jd error=%d %s\n",
801 		    hmp->mp->mnt_stat.f_mntfromname,
802 		    (intmax_t)(ip ? ip->obj_id : -1),
803 		    error, msg);
804 
805 	if (hmp->ronly == 0) {
806 		hmp->ronly = 2;		/* special errored read-only mode */
807 		hmp->mp->mnt_flag |= MNT_RDONLY;
808 		kprintf("HAMMER(%s): Forcing read-only mode\n",
809 			hmp->mp->mnt_stat.f_mntfromname);
810 	}
811 	hmp->error = error;
812 	if (hammer_debug_critical)
813 		Debugger("Entering debugger");
814 }
815 
816 
817 /*
818  * Obtain a vnode for the specified inode number.  An exclusively locked
819  * vnode is returned.
820  */
821 int
822 hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
823 		ino_t ino, struct vnode **vpp)
824 {
825 	struct hammer_transaction trans;
826 	struct hammer_mount *hmp = (void *)mp->mnt_data;
827 	struct hammer_inode *ip;
828 	int error;
829 	u_int32_t localization;
830 
831 	hammer_simple_transaction(&trans, hmp);
832 
833 	/*
834 	 * If a directory vnode is supplied (mainly NFS) then we can acquire
835 	 * the PFS domain from it.  Otherwise we would only be able to vget
836 	 * inodes in the root PFS.
837 	 */
838 	if (dvp) {
839 		localization = HAMMER_DEF_LOCALIZATION +
840 				VTOI(dvp)->obj_localization;
841 	} else {
842 		localization = HAMMER_DEF_LOCALIZATION;
843 	}
844 
845 	/*
846 	 * Lookup the requested HAMMER inode.  The structure must be
847 	 * left unlocked while we manipulate the related vnode to avoid
848 	 * a deadlock.
849 	 */
850 	ip = hammer_get_inode(&trans, NULL, ino,
851 			      hmp->asof, localization,
852 			      0, &error);
853 	if (ip == NULL) {
854 		*vpp = NULL;
855 		hammer_done_transaction(&trans);
856 		return(error);
857 	}
858 	error = hammer_get_vnode(ip, vpp);
859 	hammer_rel_inode(ip, 0);
860 	hammer_done_transaction(&trans);
861 	return (error);
862 }
863 
864 /*
865  * Return the root vnode for the filesystem.
866  *
867  * HAMMER stores the root vnode in the hammer_mount structure so
868  * getting it is easy.
869  */
870 static int
871 hammer_vfs_root(struct mount *mp, struct vnode **vpp)
872 {
873 #if 0
874 	struct hammer_mount *hmp = (void *)mp->mnt_data;
875 #endif
876 	int error;
877 
878 	error = hammer_vfs_vget(mp, NULL, 1, vpp);
879 	return (error);
880 }
881 
882 static int
883 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
884 {
885 	struct hammer_mount *hmp = (void *)mp->mnt_data;
886 	hammer_volume_t volume;
887 	hammer_volume_ondisk_t ondisk;
888 	int error;
889 	int64_t bfree;
890 	int64_t breserved;
891 
892 	volume = hammer_get_root_volume(hmp, &error);
893 	if (error)
894 		return(error);
895 	ondisk = volume->ondisk;
896 
897 	/*
898 	 * Basic stats
899 	 */
900 	_hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
901 	mp->mnt_stat.f_files = ondisk->vol0_stat_inodes;
902 	bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
903 	hammer_rel_volume(volume, 0);
904 
905 	mp->mnt_stat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
906 	mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
907 	if (mp->mnt_stat.f_files < 0)
908 		mp->mnt_stat.f_files = 0;
909 
910 	*sbp = mp->mnt_stat;
911 	return(0);
912 }
913 
914 static int
915 hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
916 {
917 	struct hammer_mount *hmp = (void *)mp->mnt_data;
918 	hammer_volume_t volume;
919 	hammer_volume_ondisk_t ondisk;
920 	int error;
921 	int64_t bfree;
922 	int64_t breserved;
923 
924 	volume = hammer_get_root_volume(hmp, &error);
925 	if (error)
926 		return(error);
927 	ondisk = volume->ondisk;
928 
929 	/*
930 	 * Basic stats
931 	 */
932 	_hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
933 	mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes;
934 	bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
935 	hammer_rel_volume(volume, 0);
936 
937 	mp->mnt_vstat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
938 	mp->mnt_vstat.f_bavail = mp->mnt_stat.f_bfree;
939 	if (mp->mnt_vstat.f_files < 0)
940 		mp->mnt_vstat.f_files = 0;
941 	*sbp = mp->mnt_vstat;
942 	return(0);
943 }
944 
945 /*
946  * Sync the filesystem.  Currently we have to run it twice, the second
947  * one will advance the undo start index to the end index, so if a crash
948  * occurs no undos will be run on mount.
949  *
950  * We do not sync the filesystem if we are called from a panic.  If we did
951  * we might end up blowing up a sync that was already in progress.
952  */
953 static int
954 hammer_vfs_sync(struct mount *mp, int waitfor)
955 {
956 	struct hammer_mount *hmp = (void *)mp->mnt_data;
957 	int error;
958 
959 	if (panicstr == NULL) {
960 		error = hammer_sync_hmp(hmp, waitfor);
961 	} else {
962 		error = EIO;
963 	}
964 	return (error);
965 }
966 
967 /*
968  * Convert a vnode to a file handle.
969  */
970 static int
971 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp)
972 {
973 	hammer_inode_t ip;
974 
975 	KKASSERT(MAXFIDSZ >= 16);
976 	ip = VTOI(vp);
977 	fhp->fid_len = offsetof(struct fid, fid_data[16]);
978 	fhp->fid_ext = ip->obj_localization >> 16;
979 	bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id));
980 	bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof));
981 	return(0);
982 }
983 
984 
985 /*
986  * Convert a file handle back to a vnode.
987  *
988  * Use rootvp to enforce PFS isolation when a PFS is exported via a
989  * null mount.
990  */
991 static int
992 hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
993 		  struct fid *fhp, struct vnode **vpp)
994 {
995 	struct hammer_transaction trans;
996 	struct hammer_inode *ip;
997 	struct hammer_inode_info info;
998 	int error;
999 	u_int32_t localization;
1000 
1001 	bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id));
1002 	bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof));
1003 	if (rootvp)
1004 		localization = VTOI(rootvp)->obj_localization;
1005 	else
1006 		localization = (u_int32_t)fhp->fid_ext << 16;
1007 
1008 	hammer_simple_transaction(&trans, (void *)mp->mnt_data);
1009 
1010 	/*
1011 	 * Get/allocate the hammer_inode structure.  The structure must be
1012 	 * unlocked while we manipulate the related vnode to avoid a
1013 	 * deadlock.
1014 	 */
1015 	ip = hammer_get_inode(&trans, NULL, info.obj_id,
1016 			      info.obj_asof, localization, 0, &error);
1017 	if (ip == NULL) {
1018 		*vpp = NULL;
1019 		return(error);
1020 	}
1021 	error = hammer_get_vnode(ip, vpp);
1022 	hammer_rel_inode(ip, 0);
1023 	hammer_done_transaction(&trans);
1024 	return (error);
1025 }
1026 
1027 static int
1028 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
1029 		    int *exflagsp, struct ucred **credanonp)
1030 {
1031 	hammer_mount_t hmp = (void *)mp->mnt_data;
1032 	struct netcred *np;
1033 	int error;
1034 
1035 	np = vfs_export_lookup(mp, &hmp->export, nam);
1036 	if (np) {
1037 		*exflagsp = np->netc_exflags;
1038 		*credanonp = &np->netc_anon;
1039 		error = 0;
1040 	} else {
1041 		error = EACCES;
1042 	}
1043 	return (error);
1044 
1045 }
1046 
1047 int
1048 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export)
1049 {
1050 	hammer_mount_t hmp = (void *)mp->mnt_data;
1051 	int error;
1052 
1053 	switch(op) {
1054 	case MOUNTCTL_SET_EXPORT:
1055 		error = vfs_export(mp, &hmp->export, export);
1056 		break;
1057 	default:
1058 		error = EOPNOTSUPP;
1059 		break;
1060 	}
1061 	return(error);
1062 }
1063 
1064