xref: /dragonfly/sys/vfs/hammer/hammer_vfsops.c (revision ce0e08e2)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.74 2008/11/13 02:18:43 dillon Exp $
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/vnode.h>
41 #include <sys/mount.h>
42 #include <sys/malloc.h>
43 #include <sys/nlookup.h>
44 #include <sys/fcntl.h>
45 #include <sys/sysctl.h>
46 #include <sys/buf.h>
47 #include <sys/buf2.h>
48 #include "hammer.h"
49 
50 int hammer_debug_io;
51 int hammer_debug_general;
52 int hammer_debug_debug = 1;		/* medium-error panics */
53 int hammer_debug_inode;
54 int hammer_debug_locks;
55 int hammer_debug_btree;
56 int hammer_debug_tid;
57 int hammer_debug_recover;		/* -1 will disable, +1 will force */
58 int hammer_debug_recover_faults;
59 int hammer_cluster_enable = 1;		/* enable read clustering by default */
60 int hammer_count_fsyncs;
61 int hammer_count_inodes;
62 int hammer_count_iqueued;
63 int hammer_count_reclaiming;
64 int hammer_count_records;
65 int hammer_count_record_datas;
66 int hammer_count_volumes;
67 int hammer_count_buffers;
68 int hammer_count_nodes;
69 int64_t hammer_count_extra_space_used;
70 int64_t hammer_stats_btree_lookups;
71 int64_t hammer_stats_btree_searches;
72 int64_t hammer_stats_btree_inserts;
73 int64_t hammer_stats_btree_deletes;
74 int64_t hammer_stats_btree_elements;
75 int64_t hammer_stats_btree_splits;
76 int64_t hammer_stats_btree_iterations;
77 int64_t hammer_stats_record_iterations;
78 
79 int64_t hammer_stats_file_read;
80 int64_t hammer_stats_file_write;
81 int64_t hammer_stats_file_iopsr;
82 int64_t hammer_stats_file_iopsw;
83 int64_t hammer_stats_disk_read;
84 int64_t hammer_stats_disk_write;
85 int64_t hammer_stats_inode_flushes;
86 int64_t hammer_stats_commits;
87 
88 int hammer_count_dirtybufspace;		/* global */
89 int hammer_count_refedbufs;		/* global */
90 int hammer_count_reservations;
91 int hammer_count_io_running_read;
92 int hammer_count_io_running_write;
93 int hammer_count_io_locked;
94 int hammer_limit_dirtybufspace;		/* per-mount */
95 int hammer_limit_recs;			/* as a whole XXX */
96 int hammer_limit_iqueued;		/* per-mount */
97 int hammer_autoflush = 2000;		/* auto flush */
98 int hammer_bio_count;
99 int hammer_verify_zone;
100 int hammer_verify_data = 1;
101 int hammer_write_mode;
102 int64_t hammer_contention_count;
103 int64_t hammer_zone_limit;
104 
105 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem");
106 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW,
107 	   &hammer_debug_general, 0, "");
108 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW,
109 	   &hammer_debug_io, 0, "");
110 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_debug, CTLFLAG_RW,
111 	   &hammer_debug_debug, 0, "");
112 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW,
113 	   &hammer_debug_inode, 0, "");
114 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW,
115 	   &hammer_debug_locks, 0, "");
116 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW,
117 	   &hammer_debug_btree, 0, "");
118 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW,
119 	   &hammer_debug_tid, 0, "");
120 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW,
121 	   &hammer_debug_recover, 0, "");
122 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW,
123 	   &hammer_debug_recover_faults, 0, "");
124 SYSCTL_INT(_vfs_hammer, OID_AUTO, cluster_enable, CTLFLAG_RW,
125 	   &hammer_cluster_enable, 0, "");
126 
127 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufspace, CTLFLAG_RW,
128 	   &hammer_limit_dirtybufspace, 0, "");
129 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW,
130 	   &hammer_limit_recs, 0, "");
131 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_iqueued, CTLFLAG_RW,
132 	   &hammer_limit_iqueued, 0, "");
133 
134 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_fsyncs, CTLFLAG_RD,
135 	   &hammer_count_fsyncs, 0, "");
136 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD,
137 	   &hammer_count_inodes, 0, "");
138 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_iqueued, CTLFLAG_RD,
139 	   &hammer_count_iqueued, 0, "");
140 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reclaiming, CTLFLAG_RD,
141 	   &hammer_count_reclaiming, 0, "");
142 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD,
143 	   &hammer_count_records, 0, "");
144 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD,
145 	   &hammer_count_record_datas, 0, "");
146 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD,
147 	   &hammer_count_volumes, 0, "");
148 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD,
149 	   &hammer_count_buffers, 0, "");
150 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD,
151 	   &hammer_count_nodes, 0, "");
152 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, count_extra_space_used, CTLFLAG_RD,
153 	   &hammer_count_extra_space_used, 0, "");
154 
155 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_searches, CTLFLAG_RD,
156 	   &hammer_stats_btree_searches, 0, "");
157 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_lookups, CTLFLAG_RD,
158 	   &hammer_stats_btree_lookups, 0, "");
159 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_inserts, CTLFLAG_RD,
160 	   &hammer_stats_btree_inserts, 0, "");
161 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_deletes, CTLFLAG_RD,
162 	   &hammer_stats_btree_deletes, 0, "");
163 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_elements, CTLFLAG_RD,
164 	   &hammer_stats_btree_elements, 0, "");
165 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_splits, CTLFLAG_RD,
166 	   &hammer_stats_btree_splits, 0, "");
167 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_iterations, CTLFLAG_RD,
168 	   &hammer_stats_btree_iterations, 0, "");
169 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_record_iterations, CTLFLAG_RD,
170 	   &hammer_stats_record_iterations, 0, "");
171 
172 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_read, CTLFLAG_RD,
173 	   &hammer_stats_file_read, 0, "");
174 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_write, CTLFLAG_RD,
175 	   &hammer_stats_file_write, 0, "");
176 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsr, CTLFLAG_RD,
177 	   &hammer_stats_file_iopsr, 0, "");
178 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsw, CTLFLAG_RD,
179 	   &hammer_stats_file_iopsw, 0, "");
180 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_read, CTLFLAG_RD,
181 	   &hammer_stats_disk_read, 0, "");
182 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_write, CTLFLAG_RD,
183 	   &hammer_stats_disk_write, 0, "");
184 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_inode_flushes, CTLFLAG_RD,
185 	   &hammer_stats_inode_flushes, 0, "");
186 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_commits, CTLFLAG_RD,
187 	   &hammer_stats_commits, 0, "");
188 
189 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufspace, CTLFLAG_RD,
190 	   &hammer_count_dirtybufspace, 0, "");
191 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD,
192 	   &hammer_count_refedbufs, 0, "");
193 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD,
194 	   &hammer_count_reservations, 0, "");
195 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_read, CTLFLAG_RD,
196 	   &hammer_count_io_running_read, 0, "");
197 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_locked, CTLFLAG_RD,
198 	   &hammer_count_io_locked, 0, "");
199 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_write, CTLFLAG_RD,
200 	   &hammer_count_io_running_write, 0, "");
201 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW,
202 	   &hammer_zone_limit, 0, "");
203 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW,
204 	   &hammer_contention_count, 0, "");
205 SYSCTL_INT(_vfs_hammer, OID_AUTO, autoflush, CTLFLAG_RW,
206 	   &hammer_autoflush, 0, "");
207 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_zone, CTLFLAG_RW,
208 	   &hammer_verify_zone, 0, "");
209 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_data, CTLFLAG_RW,
210 	   &hammer_verify_data, 0, "");
211 SYSCTL_INT(_vfs_hammer, OID_AUTO, write_mode, CTLFLAG_RW,
212 	   &hammer_write_mode, 0, "");
213 
214 KTR_INFO_MASTER(hammer);
215 
216 /*
217  * VFS ABI
218  */
219 static void	hammer_free_hmp(struct mount *mp);
220 
221 static int	hammer_vfs_mount(struct mount *mp, char *path, caddr_t data,
222 				struct ucred *cred);
223 static int	hammer_vfs_unmount(struct mount *mp, int mntflags);
224 static int	hammer_vfs_root(struct mount *mp, struct vnode **vpp);
225 static int	hammer_vfs_statfs(struct mount *mp, struct statfs *sbp,
226 				struct ucred *cred);
227 static int	hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
228 				struct ucred *cred);
229 static int	hammer_vfs_sync(struct mount *mp, int waitfor);
230 static int	hammer_vfs_vget(struct mount *mp, ino_t ino,
231 				struct vnode **vpp);
232 static int	hammer_vfs_init(struct vfsconf *conf);
233 static int	hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
234 				struct fid *fhp, struct vnode **vpp);
235 static int	hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp);
236 static int	hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
237 				int *exflagsp, struct ucred **credanonp);
238 
239 
240 static struct vfsops hammer_vfsops = {
241 	.vfs_mount	= hammer_vfs_mount,
242 	.vfs_unmount	= hammer_vfs_unmount,
243 	.vfs_root 	= hammer_vfs_root,
244 	.vfs_statfs	= hammer_vfs_statfs,
245 	.vfs_statvfs	= hammer_vfs_statvfs,
246 	.vfs_sync	= hammer_vfs_sync,
247 	.vfs_vget	= hammer_vfs_vget,
248 	.vfs_init	= hammer_vfs_init,
249 	.vfs_vptofh	= hammer_vfs_vptofh,
250 	.vfs_fhtovp	= hammer_vfs_fhtovp,
251 	.vfs_checkexp	= hammer_vfs_checkexp
252 };
253 
254 MALLOC_DEFINE(M_HAMMER, "HAMMER-mount", "");
255 
256 VFS_SET(hammer_vfsops, hammer, 0);
257 MODULE_VERSION(hammer, 1);
258 
259 static int
260 hammer_vfs_init(struct vfsconf *conf)
261 {
262 	int n;
263 
264 	if (hammer_limit_recs == 0) {
265 		hammer_limit_recs = nbuf * 25;
266 		n = kmalloc_limit(M_HAMMER) / 512;
267 		if (hammer_limit_recs > n)
268 			hammer_limit_recs = n;
269 	}
270 	if (hammer_limit_dirtybufspace == 0) {
271 		hammer_limit_dirtybufspace = hidirtybufspace / 2;
272 		if (hammer_limit_dirtybufspace < 100)
273 			hammer_limit_dirtybufspace = 100;
274 	}
275 	if (hammer_limit_iqueued == 0)
276 		hammer_limit_iqueued = desiredvnodes / 5;
277 	return(0);
278 }
279 
280 static int
281 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
282 		 struct ucred *cred)
283 {
284 	struct hammer_mount_info info;
285 	hammer_mount_t hmp;
286 	hammer_volume_t rootvol;
287 	struct vnode *rootvp;
288 	struct vnode *devvp = NULL;
289 	const char *upath;	/* volume name in userspace */
290 	char *path;		/* volume name in system space */
291 	int error;
292 	int i;
293 	int master_id;
294 	int maxinodes;
295 
296 	/*
297 	 * Accept hammer_mount_info.  mntpt is NULL for root mounts at boot.
298 	 */
299 	if (mntpt == NULL) {
300 		if ((error = bdevvp(rootdev, &devvp))) {
301 			kprintf("hammer_mountroot: can't find devvp\n");
302 			return (error);
303 		}
304 		mp->mnt_flag &= ~MNT_RDONLY; /* mount R/W */
305 		bzero(&info, sizeof(info));
306 		info.asof = 0;
307 		info.hflags = 0;
308 		info.nvolumes = 1;
309 	} else {
310 		if ((error = copyin(data, &info, sizeof(info))) != 0)
311 			return (error);
312 	}
313 
314 	/*
315 	 * updating or new mount
316 	 */
317 	if (mp->mnt_flag & MNT_UPDATE) {
318 		hmp = (void *)mp->mnt_data;
319 		KKASSERT(hmp != NULL);
320 	} else {
321 		if (info.nvolumes <= 0 || info.nvolumes >= 32768)
322 			return (EINVAL);
323 		hmp = NULL;
324 	}
325 
326 	/*
327 	 * master-id validation.  The master id may not be changed by a
328 	 * mount update.
329 	 */
330 	if (info.hflags & HMNT_MASTERID) {
331 		if (hmp && hmp->master_id != info.master_id) {
332 			kprintf("hammer: cannot change master id "
333 				"with mount update\n");
334 			return(EINVAL);
335 		}
336 		master_id = info.master_id;
337 		if (master_id < -1 || master_id >= HAMMER_MAX_MASTERS)
338 			return (EINVAL);
339 	} else {
340 		if (hmp)
341 			master_id = hmp->master_id;
342 		else
343 			master_id = 0;
344 	}
345 
346 	/*
347 	 * Interal mount data structure
348 	 */
349 	if (hmp == NULL) {
350 		hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO);
351 		mp->mnt_data = (qaddr_t)hmp;
352 		hmp->mp = mp;
353 		/*TAILQ_INIT(&hmp->recycle_list);*/
354 
355 		/*
356 		 * Make sure kmalloc type limits are set appropriately.  If root
357 		 * increases the vnode limit you may have to do a dummy remount
358 		 * to adjust the HAMMER inode limit.
359 		 */
360 		kmalloc_create(&hmp->m_misc, "HAMMER-others");
361 		kmalloc_create(&hmp->m_inodes, "HAMMER-inodes");
362 
363 		maxinodes = desiredvnodes + desiredvnodes / 5 +
364 			    HAMMER_RECLAIM_WAIT;
365 		kmalloc_raise_limit(hmp->m_inodes,
366 				    maxinodes * sizeof(struct hammer_inode));
367 
368 		hmp->root_btree_beg.localization = 0x00000000U;
369 		hmp->root_btree_beg.obj_id = -0x8000000000000000LL;
370 		hmp->root_btree_beg.key = -0x8000000000000000LL;
371 		hmp->root_btree_beg.create_tid = 1;
372 		hmp->root_btree_beg.delete_tid = 1;
373 		hmp->root_btree_beg.rec_type = 0;
374 		hmp->root_btree_beg.obj_type = 0;
375 
376 		hmp->root_btree_end.localization = 0xFFFFFFFFU;
377 		hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL;
378 		hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL;
379 		hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL;
380 		hmp->root_btree_end.delete_tid = 0;   /* special case */
381 		hmp->root_btree_end.rec_type = 0xFFFFU;
382 		hmp->root_btree_end.obj_type = 0;
383 
384 		hmp->krate.freq = 1;	/* maximum reporting rate (hz) */
385 		hmp->krate.count = -16;	/* initial burst */
386 
387 		hmp->sync_lock.refs = 1;
388 		hmp->free_lock.refs = 1;
389 		hmp->undo_lock.refs = 1;
390 		hmp->blkmap_lock.refs = 1;
391 
392 		TAILQ_INIT(&hmp->delay_list);
393 		TAILQ_INIT(&hmp->flush_group_list);
394 		TAILQ_INIT(&hmp->objid_cache_list);
395 		TAILQ_INIT(&hmp->undo_lru_list);
396 		TAILQ_INIT(&hmp->reclaim_list);
397 	}
398 	hmp->hflags &= ~HMNT_USERFLAGS;
399 	hmp->hflags |= info.hflags & HMNT_USERFLAGS;
400 
401 	hmp->master_id = master_id;
402 
403 	if (info.asof) {
404 		mp->mnt_flag |= MNT_RDONLY;
405 		hmp->asof = info.asof;
406 	} else {
407 		hmp->asof = HAMMER_MAX_TID;
408 	}
409 
410 	/*
411 	 * Re-open read-write if originally read-only, or vise-versa.
412 	 */
413 	if (mp->mnt_flag & MNT_UPDATE) {
414 		error = 0;
415 		if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
416 			kprintf("HAMMER read-only -> read-write\n");
417 			hmp->ronly = 0;
418 			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
419 				hammer_adjust_volume_mode, NULL);
420 			rootvol = hammer_get_root_volume(hmp, &error);
421 			if (rootvol) {
422 				hammer_recover_flush_buffers(hmp, rootvol, 1);
423 				bcopy(rootvol->ondisk->vol0_blockmap,
424 				      hmp->blockmap,
425 				      sizeof(hmp->blockmap));
426 				hammer_rel_volume(rootvol, 0);
427 			}
428 			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
429 				hammer_reload_inode, NULL);
430 			/* kernel clears MNT_RDONLY */
431 		} else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
432 			kprintf("HAMMER read-write -> read-only\n");
433 			hmp->ronly = 1;	/* messy */
434 			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
435 				hammer_reload_inode, NULL);
436 			hmp->ronly = 0;
437 			hammer_flusher_sync(hmp);
438 			hammer_flusher_sync(hmp);
439 			hammer_flusher_sync(hmp);
440 			hmp->ronly = 1;
441 			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
442 				hammer_adjust_volume_mode, NULL);
443 		}
444 		return(error);
445 	}
446 
447 	RB_INIT(&hmp->rb_vols_root);
448 	RB_INIT(&hmp->rb_inos_root);
449 	RB_INIT(&hmp->rb_nods_root);
450 	RB_INIT(&hmp->rb_undo_root);
451 	RB_INIT(&hmp->rb_resv_root);
452 	RB_INIT(&hmp->rb_bufs_root);
453 	RB_INIT(&hmp->rb_pfsm_root);
454 
455 	hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
456 
457 	TAILQ_INIT(&hmp->volu_list);
458 	TAILQ_INIT(&hmp->undo_list);
459 	TAILQ_INIT(&hmp->data_list);
460 	TAILQ_INIT(&hmp->meta_list);
461 	TAILQ_INIT(&hmp->lose_list);
462 
463 	/*
464 	 * Load volumes
465 	 */
466 	path = objcache_get(namei_oc, M_WAITOK);
467 	hmp->nvolumes = -1;
468 	for (i = 0; i < info.nvolumes; ++i) {
469 		if (mntpt == NULL) {
470 			/*
471 			 * Root mount.
472 			 * Only one volume; and no need for copyin.
473 			 */
474 			KKASSERT(info.nvolumes == 1);
475 			ksnprintf(path, MAXPATHLEN, "/dev/%s",
476 				  mp->mnt_stat.f_mntfromname);
477 			error = 0;
478 		} else {
479 			error = copyin(&info.volumes[i], &upath,
480 				       sizeof(char *));
481 			if (error == 0)
482 				error = copyinstr(upath, path,
483 						  MAXPATHLEN, NULL);
484 		}
485 		if (error == 0)
486 			error = hammer_install_volume(hmp, path, devvp);
487 		if (error)
488 			break;
489 	}
490 	objcache_put(namei_oc, path);
491 
492 	/*
493 	 * Make sure we found a root volume
494 	 */
495 	if (error == 0 && hmp->rootvol == NULL) {
496 		kprintf("hammer_mount: No root volume found!\n");
497 		error = EINVAL;
498 	}
499 
500 	/*
501 	 * Check that all required volumes are available
502 	 */
503 	if (error == 0 && hammer_mountcheck_volumes(hmp)) {
504 		kprintf("hammer_mount: Missing volumes, cannot mount!\n");
505 		error = EINVAL;
506 	}
507 
508 	if (error) {
509 		hammer_free_hmp(mp);
510 		return (error);
511 	}
512 
513 	/*
514 	 * No errors, setup enough of the mount point so we can lookup the
515 	 * root vnode.
516 	 */
517 	mp->mnt_iosize_max = MAXPHYS;
518 	mp->mnt_kern_flag |= MNTK_FSMID;
519 
520 	/*
521 	 * note: f_iosize is used by vnode_pager_haspage() when constructing
522 	 * its VOP_BMAP call.
523 	 */
524 	mp->mnt_stat.f_iosize = HAMMER_BUFSIZE;
525 	mp->mnt_stat.f_bsize = HAMMER_BUFSIZE;
526 
527 	mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE;
528 	mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE;
529 
530 	mp->mnt_maxsymlinklen = 255;
531 	mp->mnt_flag |= MNT_LOCAL;
532 
533 	vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops);
534 	vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops);
535 	vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops);
536 
537 	/*
538 	 * The root volume's ondisk pointer is only valid if we hold a
539 	 * reference to it.
540 	 */
541 	rootvol = hammer_get_root_volume(hmp, &error);
542 	if (error)
543 		goto failed;
544 
545 	/*
546 	 * Perform any necessary UNDO operations.  The recovery code does
547 	 * call hammer_undo_lookup() so we have to pre-cache the blockmap,
548 	 * and then re-copy it again after recovery is complete.
549 	 *
550 	 * If this is a read-only mount the UNDO information is retained
551 	 * in memory in the form of dirty buffer cache buffers, and not
552 	 * written back to the media.
553 	 */
554 	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
555 	      sizeof(hmp->blockmap));
556 
557 	/*
558 	 * Check filesystem version
559 	 */
560 	hmp->version = rootvol->ondisk->vol_version;
561 	if (hmp->version < HAMMER_VOL_VERSION_MIN ||
562 	    hmp->version > HAMMER_VOL_VERSION_MAX) {
563 		kprintf("HAMMER: mount unsupported fs version %d\n",
564 			hmp->version);
565 		error = ERANGE;
566 		goto done;
567 	}
568 
569 	/*
570 	 * The undo_rec_limit limits the size of flush groups to avoid
571 	 * blowing out the UNDO FIFO.  This calculation is typically in
572 	 * the tens of thousands and is designed primarily when small
573 	 * HAMMER filesystems are created.
574 	 */
575 	hmp->undo_rec_limit = hammer_undo_max(hmp) / 8192 + 100;
576 	if (hammer_debug_general & 0x0001)
577 		kprintf("HAMMER: undo_rec_limit %d\n", hmp->undo_rec_limit);
578 
579 	error = hammer_recover(hmp, rootvol);
580 	if (error) {
581 		kprintf("Failed to recover HAMMER filesystem on mount\n");
582 		goto done;
583 	}
584 
585 	/*
586 	 * Finish setup now that we have a good root volume.
587 	 *
588 	 * The top 16 bits of fsid.val[1] is a pfs id.
589 	 */
590 	ksnprintf(mp->mnt_stat.f_mntfromname,
591 		  sizeof(mp->mnt_stat.f_mntfromname), "%s",
592 		  rootvol->ondisk->vol_name);
593 	mp->mnt_stat.f_fsid.val[0] =
594 		crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8);
595 	mp->mnt_stat.f_fsid.val[1] =
596 		crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8);
597 	mp->mnt_stat.f_fsid.val[1] &= 0x0000FFFF;
598 
599 	mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid;
600 	mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid,
601 				     sizeof(mp->mnt_vstat.f_fsid_uuid));
602 
603 	/*
604 	 * Certain often-modified fields in the root volume are cached in
605 	 * the hammer_mount structure so we do not have to generate lots
606 	 * of little UNDO structures for them.
607 	 *
608 	 * Recopy after recovery.  This also has the side effect of
609 	 * setting our cached undo FIFO's first_offset, which serves to
610 	 * placemark the FIFO start for the NEXT flush cycle while the
611 	 * on-disk first_offset represents the LAST flush cycle.
612 	 */
613 	hmp->next_tid = rootvol->ondisk->vol0_next_tid;
614 	hmp->flush_tid1 = hmp->next_tid;
615 	hmp->flush_tid2 = hmp->next_tid;
616 	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
617 	      sizeof(hmp->blockmap));
618 	hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks;
619 
620 	hammer_flusher_create(hmp);
621 
622 	/*
623 	 * Locate the root directory using the root cluster's B-Tree as a
624 	 * starting point.  The root directory uses an obj_id of 1.
625 	 *
626 	 * FUTURE: Leave the root directory cached referenced but unlocked
627 	 * in hmp->rootvp (need to flush it on unmount).
628 	 */
629 	error = hammer_vfs_vget(mp, 1, &rootvp);
630 	if (error)
631 		goto done;
632 	vput(rootvp);
633 	/*vn_unlock(hmp->rootvp);*/
634 
635 done:
636 	hammer_rel_volume(rootvol, 0);
637 failed:
638 	/*
639 	 * Cleanup and return.
640 	 */
641 	if (error)
642 		hammer_free_hmp(mp);
643 	return (error);
644 }
645 
646 static int
647 hammer_vfs_unmount(struct mount *mp, int mntflags)
648 {
649 #if 0
650 	struct hammer_mount *hmp = (void *)mp->mnt_data;
651 #endif
652 	int flags;
653 	int error;
654 
655 	/*
656 	 * Clean out the vnodes
657 	 */
658 	flags = 0;
659 	if (mntflags & MNT_FORCE)
660 		flags |= FORCECLOSE;
661 	if ((error = vflush(mp, 0, flags)) != 0)
662 		return (error);
663 
664 	/*
665 	 * Clean up the internal mount structure and related entities.  This
666 	 * may issue I/O.
667 	 */
668 	hammer_free_hmp(mp);
669 	return(0);
670 }
671 
672 /*
673  * Clean up the internal mount structure and disassociate it from the mount.
674  * This may issue I/O.
675  */
676 static void
677 hammer_free_hmp(struct mount *mp)
678 {
679 	struct hammer_mount *hmp = (void *)mp->mnt_data;
680 	hammer_flush_group_t flg;
681 	int count;
682 
683 	/*
684 	 * Flush anything dirty.  This won't even run if the
685 	 * filesystem errored-out.
686 	 */
687 	count = 0;
688 	while (hammer_flusher_haswork(hmp)) {
689 		hammer_flusher_sync(hmp);
690 		++count;
691 		if (count >= 5) {
692 			if (count == 5)
693 				kprintf("HAMMER: umount flushing.");
694 			else
695 				kprintf(".");
696 			tsleep(hmp, 0, "hmrufl", hz);
697 		}
698 		if (count == 30) {
699 			kprintf("giving up\n");
700 			break;
701 		}
702 	}
703 	if (count >= 5 && count < 30)
704 		kprintf("\n");
705 
706 	/*
707 	 * If the mount had a critical error we have to destroy any
708 	 * remaining inodes before we can finish cleaning up the flusher.
709 	 */
710 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
711 		RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
712 			hammer_destroy_inode_callback, NULL);
713 	}
714 
715 	/*
716 	 * There shouldn't be any inodes left now and any left over
717 	 * flush groups should now be empty.
718 	 */
719 	KKASSERT(RB_EMPTY(&hmp->rb_inos_root));
720 	while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
721 		TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
722 		KKASSERT(TAILQ_EMPTY(&flg->flush_list));
723 		if (flg->refs) {
724 			kprintf("HAMMER: Warning, flush_group %p was "
725 				"not empty on umount!\n", flg);
726 		}
727 		kfree(flg, hmp->m_misc);
728 	}
729 
730 	/*
731 	 * We can finally destroy the flusher
732 	 */
733 	hammer_flusher_destroy(hmp);
734 
735 	/*
736 	 * We may have held recovered buffers due to a read-only mount.
737 	 * These must be discarded.
738 	 */
739 	if (hmp->ronly)
740 		hammer_recover_flush_buffers(hmp, NULL, -1);
741 
742 	/*
743 	 * Unload buffers and then volumes
744 	 */
745         RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
746 		hammer_unload_buffer, NULL);
747 	RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
748 		hammer_unload_volume, NULL);
749 
750 	mp->mnt_data = NULL;
751 	mp->mnt_flag &= ~MNT_LOCAL;
752 	hmp->mp = NULL;
753 	hammer_destroy_objid_cache(hmp);
754 	kmalloc_destroy(&hmp->m_misc);
755 	kmalloc_destroy(&hmp->m_inodes);
756 	kfree(hmp, M_HAMMER);
757 }
758 
759 /*
760  * Report critical errors.  ip may be NULL.
761  */
762 void
763 hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip,
764 		      int error, const char *msg)
765 {
766 	hmp->flags |= HAMMER_MOUNT_CRITICAL_ERROR;
767 	krateprintf(&hmp->krate,
768 		"HAMMER(%s): Critical error inode=%lld %s\n",
769 		hmp->mp->mnt_stat.f_mntfromname,
770 		(ip ? ip->obj_id : -1), msg);
771 	if (hmp->ronly == 0) {
772 		hmp->ronly = 2;		/* special errored read-only mode */
773 		hmp->mp->mnt_flag |= MNT_RDONLY;
774 		kprintf("HAMMER(%s): Forcing read-only mode\n",
775 			hmp->mp->mnt_stat.f_mntfromname);
776 	}
777 	hmp->error = error;
778 }
779 
780 
781 /*
782  * Obtain a vnode for the specified inode number.  An exclusively locked
783  * vnode is returned.
784  */
785 int
786 hammer_vfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
787 {
788 	struct hammer_transaction trans;
789 	struct hammer_mount *hmp = (void *)mp->mnt_data;
790 	struct hammer_inode *ip;
791 	int error;
792 
793 	hammer_simple_transaction(&trans, hmp);
794 
795 	/*
796 	 * Lookup the requested HAMMER inode.  The structure must be
797 	 * left unlocked while we manipulate the related vnode to avoid
798 	 * a deadlock.
799 	 */
800 	ip = hammer_get_inode(&trans, NULL, ino,
801 			      hmp->asof, HAMMER_DEF_LOCALIZATION,
802 			      0, &error);
803 	if (ip == NULL) {
804 		*vpp = NULL;
805 		hammer_done_transaction(&trans);
806 		return(error);
807 	}
808 	error = hammer_get_vnode(ip, vpp);
809 	hammer_rel_inode(ip, 0);
810 	hammer_done_transaction(&trans);
811 	return (error);
812 }
813 
814 /*
815  * Return the root vnode for the filesystem.
816  *
817  * HAMMER stores the root vnode in the hammer_mount structure so
818  * getting it is easy.
819  */
820 static int
821 hammer_vfs_root(struct mount *mp, struct vnode **vpp)
822 {
823 #if 0
824 	struct hammer_mount *hmp = (void *)mp->mnt_data;
825 #endif
826 	int error;
827 
828 	error = hammer_vfs_vget(mp, 1, vpp);
829 	return (error);
830 }
831 
832 static int
833 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
834 {
835 	struct hammer_mount *hmp = (void *)mp->mnt_data;
836 	hammer_volume_t volume;
837 	hammer_volume_ondisk_t ondisk;
838 	int error;
839 	int64_t bfree;
840 
841 	volume = hammer_get_root_volume(hmp, &error);
842 	if (error)
843 		return(error);
844 	ondisk = volume->ondisk;
845 
846 	/*
847 	 * Basic stats
848 	 */
849 	mp->mnt_stat.f_files = ondisk->vol0_stat_inodes;
850 	bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
851 	hammer_rel_volume(volume, 0);
852 
853 	mp->mnt_stat.f_bfree = bfree / HAMMER_BUFSIZE;
854 	mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
855 	if (mp->mnt_stat.f_files < 0)
856 		mp->mnt_stat.f_files = 0;
857 
858 	*sbp = mp->mnt_stat;
859 	return(0);
860 }
861 
862 static int
863 hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
864 {
865 	struct hammer_mount *hmp = (void *)mp->mnt_data;
866 	hammer_volume_t volume;
867 	hammer_volume_ondisk_t ondisk;
868 	int error;
869 	int64_t bfree;
870 
871 	volume = hammer_get_root_volume(hmp, &error);
872 	if (error)
873 		return(error);
874 	ondisk = volume->ondisk;
875 
876 	/*
877 	 * Basic stats
878 	 */
879 	mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes;
880 	bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
881 	hammer_rel_volume(volume, 0);
882 
883 	mp->mnt_vstat.f_bfree = bfree / HAMMER_BUFSIZE;
884 	mp->mnt_vstat.f_bavail = mp->mnt_stat.f_bfree;
885 	if (mp->mnt_vstat.f_files < 0)
886 		mp->mnt_vstat.f_files = 0;
887 	*sbp = mp->mnt_vstat;
888 	return(0);
889 }
890 
891 /*
892  * Sync the filesystem.  Currently we have to run it twice, the second
893  * one will advance the undo start index to the end index, so if a crash
894  * occurs no undos will be run on mount.
895  *
896  * We do not sync the filesystem if we are called from a panic.  If we did
897  * we might end up blowing up a sync that was already in progress.
898  */
899 static int
900 hammer_vfs_sync(struct mount *mp, int waitfor)
901 {
902 	struct hammer_mount *hmp = (void *)mp->mnt_data;
903 	int error;
904 
905 	if (panicstr == NULL) {
906 		error = hammer_sync_hmp(hmp, waitfor);
907 	} else {
908 		error = EIO;
909 	}
910 	return (error);
911 }
912 
913 /*
914  * Convert a vnode to a file handle.
915  */
916 static int
917 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp)
918 {
919 	hammer_inode_t ip;
920 
921 	KKASSERT(MAXFIDSZ >= 16);
922 	ip = VTOI(vp);
923 	fhp->fid_len = offsetof(struct fid, fid_data[16]);
924 	fhp->fid_ext = ip->obj_localization >> 16;
925 	bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id));
926 	bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof));
927 	return(0);
928 }
929 
930 
931 /*
932  * Convert a file handle back to a vnode.
933  *
934  * Use rootvp to enforce PFS isolation when a PFS is exported via a
935  * null mount.
936  */
937 static int
938 hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
939 		  struct fid *fhp, struct vnode **vpp)
940 {
941 	struct hammer_transaction trans;
942 	struct hammer_inode *ip;
943 	struct hammer_inode_info info;
944 	int error;
945 	u_int32_t localization;
946 
947 	bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id));
948 	bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof));
949 	if (rootvp)
950 		localization = VTOI(rootvp)->obj_localization;
951 	else
952 		localization = (u_int32_t)fhp->fid_ext << 16;
953 
954 	hammer_simple_transaction(&trans, (void *)mp->mnt_data);
955 
956 	/*
957 	 * Get/allocate the hammer_inode structure.  The structure must be
958 	 * unlocked while we manipulate the related vnode to avoid a
959 	 * deadlock.
960 	 */
961 	ip = hammer_get_inode(&trans, NULL, info.obj_id,
962 			      info.obj_asof, localization, 0, &error);
963 	if (ip == NULL) {
964 		*vpp = NULL;
965 		return(error);
966 	}
967 	error = hammer_get_vnode(ip, vpp);
968 	hammer_rel_inode(ip, 0);
969 	hammer_done_transaction(&trans);
970 	return (error);
971 }
972 
973 static int
974 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
975 		    int *exflagsp, struct ucred **credanonp)
976 {
977 	hammer_mount_t hmp = (void *)mp->mnt_data;
978 	struct netcred *np;
979 	int error;
980 
981 	np = vfs_export_lookup(mp, &hmp->export, nam);
982 	if (np) {
983 		*exflagsp = np->netc_exflags;
984 		*credanonp = &np->netc_anon;
985 		error = 0;
986 	} else {
987 		error = EACCES;
988 	}
989 	return (error);
990 
991 }
992 
993 int
994 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export)
995 {
996 	hammer_mount_t hmp = (void *)mp->mnt_data;
997 	int error;
998 
999 	switch(op) {
1000 	case MOUNTCTL_SET_EXPORT:
1001 		error = vfs_export(mp, &hmp->export, export);
1002 		break;
1003 	default:
1004 		error = EOPNOTSUPP;
1005 		break;
1006 	}
1007 	return(error);
1008 }
1009 
1010