xref: /dragonfly/sys/vfs/hammer/hammer_vfsops.c (revision 5ca0a96d)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/mountctl.h>
36 
37 #include "hammer.h"
38 
39 /*
40  * NOTE!  Global statistics may not be MPSAFE so HAMMER never uses them
41  *	  in conditionals.
42  */
43 int hammer_supported_version = HAMMER_VOL_VERSION_DEFAULT;
44 int hammer_debug_io;
45 int hammer_debug_general;
46 int hammer_debug_inode;
47 int hammer_debug_locks;
48 int hammer_debug_btree;
49 int hammer_debug_tid;
50 int hammer_debug_recover;		/* -1 will disable, +1 will force */
51 int hammer_debug_critical;		/* non-zero enter debugger on error */
52 int hammer_cluster_enable = 2;		/* ena cluster_read, scale x 2 */
53 int hammer_tdmux_ticks;
54 int hammer_count_fsyncs;
55 int hammer_count_inodes;
56 int hammer_count_iqueued;
57 int hammer_count_reclaims;
58 int hammer_count_records;
59 int hammer_count_record_datas;
60 int hammer_count_volumes;
61 int hammer_count_buffers;
62 int hammer_count_nodes;
63 int64_t hammer_stats_btree_lookups;
64 int64_t hammer_stats_btree_searches;
65 int64_t hammer_stats_btree_inserts;
66 int64_t hammer_stats_btree_deletes;
67 int64_t hammer_stats_btree_elements;
68 int64_t hammer_stats_btree_splits;
69 int64_t hammer_stats_btree_iterations;
70 int64_t hammer_stats_btree_root_iterations;
71 int64_t hammer_stats_record_iterations;
72 
73 int64_t hammer_stats_file_read;
74 int64_t hammer_stats_file_write;
75 int64_t hammer_stats_disk_read;
76 int64_t hammer_stats_disk_write;
77 int64_t hammer_stats_inode_flushes;
78 int64_t hammer_stats_commits;
79 int64_t hammer_stats_undo;
80 int64_t hammer_stats_redo;
81 
82 long hammer_count_dirtybufspace;	/* global */
83 int hammer_count_refedbufs;		/* global */
84 int hammer_count_reservations;
85 long hammer_count_io_running_read;
86 long hammer_count_io_running_write;
87 int hammer_count_io_locked;
88 long hammer_limit_dirtybufspace;	/* per-mount */
89 int hammer_limit_recs;			/* as a whole XXX */
90 int hammer_limit_inode_recs = 2048;	/* per inode */
91 int hammer_limit_reclaims;
92 int hammer_live_dedup_cache_size = 4096;
93 int hammer_limit_redo = 4096 * 1024;	/* per inode */
94 int hammer_autoflush = 500;		/* auto flush (typ on reclaim) */
95 int hammer_verify_zone;
96 int hammer_verify_data = 1;
97 int hammer_double_buffer;
98 int hammer_btree_full_undo = 1;
99 int hammer_yield_check = 16;
100 int hammer_fsync_mode = 3;
101 int64_t hammer_contention_count;
102 
103 int hammer_noatime = 1;
104 TUNABLE_INT("vfs.hammer.noatime", &hammer_noatime);
105 
106 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem");
107 SYSCTL_INT(_vfs_hammer, OID_AUTO, supported_version, CTLFLAG_RD,
108 	   &hammer_supported_version, 0, "");
109 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW,
110 	   &hammer_debug_general, 0, "");
111 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW,
112 	   &hammer_debug_io, 0, "");
113 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW,
114 	   &hammer_debug_inode, 0, "");
115 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW,
116 	   &hammer_debug_locks, 0, "");
117 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW,
118 	   &hammer_debug_btree, 0, "");
119 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW,
120 	   &hammer_debug_tid, 0, "");
121 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW,
122 	   &hammer_debug_recover, 0, "");
123 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_critical, CTLFLAG_RW,
124 	   &hammer_debug_critical, 0, "");
125 SYSCTL_INT(_vfs_hammer, OID_AUTO, cluster_enable, CTLFLAG_RW,
126 	   &hammer_cluster_enable, 0, "");
127 SYSCTL_INT(_vfs_hammer, OID_AUTO, tdmux_ticks, CTLFLAG_RW,
128 	   &hammer_tdmux_ticks, 0, "Hammer tdmux ticks");
129 
130 SYSCTL_LONG(_vfs_hammer, OID_AUTO, limit_dirtybufspace, CTLFLAG_RW,
131 	   &hammer_limit_dirtybufspace, 0, "");
132 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW,
133 	   &hammer_limit_recs, 0, "");
134 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_inode_recs, CTLFLAG_RW,
135 	   &hammer_limit_inode_recs, 0, "");
136 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_reclaims, CTLFLAG_RW,
137 	   &hammer_limit_reclaims, 0, "");
138 SYSCTL_INT(_vfs_hammer, OID_AUTO, live_dedup_cache_size, CTLFLAG_RW,
139 	   &hammer_live_dedup_cache_size, 0,
140 	   "Number of cache entries");
141 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_redo, CTLFLAG_RW,
142 	   &hammer_limit_redo, 0, "");
143 
144 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_fsyncs, CTLFLAG_RD,
145 	   &hammer_count_fsyncs, 0, "");
146 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD,
147 	   &hammer_count_inodes, 0, "");
148 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_iqueued, CTLFLAG_RD,
149 	   &hammer_count_iqueued, 0, "");
150 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reclaims, CTLFLAG_RD,
151 	   &hammer_count_reclaims, 0, "");
152 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD,
153 	   &hammer_count_records, 0, "");
154 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD,
155 	   &hammer_count_record_datas, 0, "");
156 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD,
157 	   &hammer_count_volumes, 0, "");
158 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD,
159 	   &hammer_count_buffers, 0, "");
160 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD,
161 	   &hammer_count_nodes, 0, "");
162 
163 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_searches, CTLFLAG_RD,
164 	   &hammer_stats_btree_searches, 0, "");
165 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_lookups, CTLFLAG_RD,
166 	   &hammer_stats_btree_lookups, 0, "");
167 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_inserts, CTLFLAG_RD,
168 	   &hammer_stats_btree_inserts, 0, "");
169 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_deletes, CTLFLAG_RD,
170 	   &hammer_stats_btree_deletes, 0, "");
171 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_elements, CTLFLAG_RD,
172 	   &hammer_stats_btree_elements, 0, "");
173 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_splits, CTLFLAG_RD,
174 	   &hammer_stats_btree_splits, 0, "");
175 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_iterations, CTLFLAG_RD,
176 	   &hammer_stats_btree_iterations, 0, "");
177 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_root_iterations, CTLFLAG_RD,
178 	   &hammer_stats_btree_root_iterations, 0, "");
179 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_record_iterations, CTLFLAG_RD,
180 	   &hammer_stats_record_iterations, 0, "");
181 
182 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_read, CTLFLAG_RD,
183 	   &hammer_stats_file_read, 0, "");
184 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_write, CTLFLAG_RD,
185 	   &hammer_stats_file_write, 0, "");
186 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_read, CTLFLAG_RD,
187 	   &hammer_stats_disk_read, 0, "");
188 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_write, CTLFLAG_RD,
189 	   &hammer_stats_disk_write, 0, "");
190 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_inode_flushes, CTLFLAG_RD,
191 	   &hammer_stats_inode_flushes, 0, "");
192 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_commits, CTLFLAG_RD,
193 	   &hammer_stats_commits, 0, "");
194 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_undo, CTLFLAG_RD,
195 	   &hammer_stats_undo, 0, "");
196 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_redo, CTLFLAG_RD,
197 	   &hammer_stats_redo, 0, "");
198 
199 SYSCTL_LONG(_vfs_hammer, OID_AUTO, count_dirtybufspace, CTLFLAG_RD,
200 	   &hammer_count_dirtybufspace, 0, "");
201 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD,
202 	   &hammer_count_refedbufs, 0, "");
203 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD,
204 	   &hammer_count_reservations, 0, "");
205 SYSCTL_LONG(_vfs_hammer, OID_AUTO, count_io_running_read, CTLFLAG_RD,
206 	   &hammer_count_io_running_read, 0, "");
207 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_locked, CTLFLAG_RD,
208 	   &hammer_count_io_locked, 0, "");
209 SYSCTL_LONG(_vfs_hammer, OID_AUTO, count_io_running_write, CTLFLAG_RD,
210 	   &hammer_count_io_running_write, 0, "");
211 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW,
212 	   &hammer_contention_count, 0, "");
213 SYSCTL_INT(_vfs_hammer, OID_AUTO, autoflush, CTLFLAG_RW,
214 	   &hammer_autoflush, 0, "");
215 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_zone, CTLFLAG_RW,
216 	   &hammer_verify_zone, 0, "");
217 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_data, CTLFLAG_RW,
218 	   &hammer_verify_data, 0, "");
219 SYSCTL_INT(_vfs_hammer, OID_AUTO, double_buffer, CTLFLAG_RW,
220 	   &hammer_double_buffer, 0, "");
221 SYSCTL_INT(_vfs_hammer, OID_AUTO, btree_full_undo, CTLFLAG_RW,
222 	   &hammer_btree_full_undo, 0, "");
223 SYSCTL_INT(_vfs_hammer, OID_AUTO, yield_check, CTLFLAG_RW,
224 	   &hammer_yield_check, 0, "");
225 SYSCTL_INT(_vfs_hammer, OID_AUTO, fsync_mode, CTLFLAG_RW,
226 	   &hammer_fsync_mode, 0, "");
227 
228 /* KTR_INFO_MASTER(hammer); */
229 
230 /*
231  * VFS ABI
232  */
233 static void	hammer_free_hmp(struct mount *mp);
234 
235 static int	hammer_vfs_mount(struct mount *mp, char *path, caddr_t data,
236 				struct ucred *cred);
237 static int	hammer_vfs_unmount(struct mount *mp, int mntflags);
238 static int	hammer_vfs_root(struct mount *mp, struct vnode **vpp);
239 static int	hammer_vfs_statfs(struct mount *mp, struct statfs *sbp,
240 				struct ucred *cred);
241 static int	hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
242 				struct ucred *cred);
243 static int	hammer_vfs_sync(struct mount *mp, int waitfor);
244 static int	hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
245 				ino_t ino, struct vnode **vpp);
246 static int	hammer_vfs_init(struct vfsconf *conf);
247 static int	hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
248 				struct fid *fhp, struct vnode **vpp);
249 static int	hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp);
250 static int	hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
251 				int *exflagsp, struct ucred **credanonp);
252 
253 
254 static struct vfsops hammer_vfsops = {
255 	.vfs_flags	= 0,
256 	.vfs_mount	= hammer_vfs_mount,
257 	.vfs_unmount	= hammer_vfs_unmount,
258 	.vfs_root	= hammer_vfs_root,
259 	.vfs_statfs	= hammer_vfs_statfs,
260 	.vfs_statvfs	= hammer_vfs_statvfs,
261 	.vfs_sync	= hammer_vfs_sync,
262 	.vfs_vget	= hammer_vfs_vget,
263 	.vfs_init	= hammer_vfs_init,
264 	.vfs_vptofh	= hammer_vfs_vptofh,
265 	.vfs_fhtovp	= hammer_vfs_fhtovp,
266 	.vfs_checkexp	= hammer_vfs_checkexp
267 };
268 
269 MALLOC_DEFINE(M_HAMMER, "HAMMER-mount", "");
270 
271 VFS_SET(hammer_vfsops, hammer, VFCF_MPSAFE);
272 MODULE_VERSION(hammer, 1);
273 
274 static int
hammer_vfs_init(struct vfsconf * conf)275 hammer_vfs_init(struct vfsconf *conf)
276 {
277 	long n;
278 
279 	/*
280 	 * Wait up to this long for an exclusive deadlock to clear
281 	 * before acquiring a new shared lock on the ip.  The deadlock
282 	 * may have occured on a b-tree node related to the ip.
283 	 */
284 	if (hammer_tdmux_ticks == 0)
285 		hammer_tdmux_ticks = hz / 5;
286 
287 	/*
288 	 * Autosize, but be careful because a hammer filesystem's
289 	 * reserve is partially calculated based on dirtybufspace,
290 	 * so we simply cannot allow it to get too large.
291 	 */
292 	if (hammer_limit_recs == 0) {
293 		n = nbuf * 25;
294 		if (n > kmalloc_limit(M_HAMMER) / 512)
295 			n = kmalloc_limit(M_HAMMER) / 512;
296 		if (n > 2 * 1024 * 1024)
297 			n = 2 * 1024 * 1024;
298 		hammer_limit_recs = (int)n;
299 	}
300 	if (hammer_limit_dirtybufspace == 0) {
301 		hammer_limit_dirtybufspace = hidirtybufspace / 2;
302 		if (hammer_limit_dirtybufspace < 1L * 1024 * 1024)
303 			hammer_limit_dirtybufspace = 1024L * 1024;
304 		if (hammer_limit_dirtybufspace > 1024L * 1024 * 1024)
305 			hammer_limit_dirtybufspace = 1024L * 1024 * 1024;
306 	}
307 
308 	/*
309 	 * The hammer_inode structure detaches from the vnode on reclaim.
310 	 * This limits the number of inodes in this state to prevent a
311 	 * memory pool blowout.
312 	 */
313 	if (hammer_limit_reclaims == 0) {
314 		hammer_limit_reclaims = maxvnodes / 10;
315 		if (hammer_limit_reclaims > HAMMER_LIMIT_RECLAIMS)
316 			hammer_limit_reclaims = HAMMER_LIMIT_RECLAIMS;
317 	}
318 
319 	return(0);
320 }
321 
322 static int
hammer_vfs_mount(struct mount * mp,char * mntpt,caddr_t data,struct ucred * cred)323 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
324 		 struct ucred *cred)
325 {
326 	struct hammer_mount_info info;
327 	hammer_mount_t hmp;
328 	hammer_volume_t rootvol;
329 	struct vnode *rootvp;
330 	struct vnode *devvp = NULL;
331 	const char *upath;	/* volume name in userspace */
332 	char *path;		/* volume name in system space */
333 	int error;
334 	int i;
335 	int master_id;
336 	int nvolumes;
337 	char *next_volume_ptr = NULL;
338 
339 	if (hammer_noatime) {
340 		/* Force noatime */
341 		mp->mnt_flag |= MNT_NOATIME;
342 	}
343 
344 	/*
345 	 * Accept hammer_mount_info.  mntpt is NULL for root mounts at boot.
346 	 */
347 	if (mntpt == NULL) {
348 		bzero(&info, sizeof(info));
349 		info.asof = 0;
350 		info.hflags = 0;
351 		info.nvolumes = 1;
352 
353 		next_volume_ptr = mp->mnt_stat.f_mntfromname;
354 
355 		/* Count number of volumes separated by ':' */
356 		for (char *p = next_volume_ptr; *p != '\0'; ++p) {
357 			if (*p == ':') {
358 				++info.nvolumes;
359 			}
360 		}
361 
362 		mp->mnt_flag &= ~MNT_RDONLY; /* mount R/W */
363 	} else {
364 		if ((error = copyin(data, &info, sizeof(info))) != 0)
365 			return (error);
366 	}
367 
368 	/*
369 	 * updating or new mount
370 	 */
371 	if (mp->mnt_flag & MNT_UPDATE) {
372 		hmp = (void *)mp->mnt_data;
373 		KKASSERT(hmp != NULL);
374 	} else {
375 		if (info.nvolumes <= 0 || info.nvolumes > HAMMER_MAX_VOLUMES)
376 			return (EINVAL);
377 		hmp = NULL;
378 	}
379 
380 	/*
381 	 * master-id validation.  The master id may not be changed by a
382 	 * mount update.
383 	 */
384 	if (info.hflags & HMNT_MASTERID || info.hflags & HMNT_NOMIRROR) {
385 		if (hmp && hmp->master_id != info.master_id) {
386 			hkprintf("cannot change master id with mount update\n");
387 			return(EINVAL);
388 		}
389 		master_id = info.master_id;
390 		if (master_id < -1 || master_id >= HAMMER_MAX_MASTERS)
391 			return (EINVAL);
392 	} else {
393 		if (hmp)
394 			master_id = hmp->master_id;
395 		else
396 			master_id = 0;
397 	}
398 
399 	/*
400 	 * Internal mount data structure
401 	 */
402 	if (hmp == NULL) {
403 		hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO);
404 		mp->mnt_data = (qaddr_t)hmp;
405 		hmp->mp = mp;
406 
407 		/*
408 		 * Make sure kmalloc type limits are set appropriately.
409 		 *
410 		 * Our inode kmalloc group is sized based on maxvnodes
411 		 * (controlled by the system, not us).
412 		 */
413 		kmalloc_create(&hmp->m_misc, "HAMMER-others");
414 		kmalloc_create(&hmp->m_inodes, "HAMMER-inodes");
415 
416 		kmalloc_raise_limit(hmp->m_inodes, 0);	/* unlimited */
417 
418 		hmp->root_btree_beg.localization =
419 			HAMMER_MIN_ONDISK_LOCALIZATION;
420 		hmp->root_btree_beg.obj_id = HAMMER_MIN_OBJID;
421 		hmp->root_btree_beg.key = HAMMER_MIN_KEY;
422 		hmp->root_btree_beg.create_tid = 1;
423 		hmp->root_btree_beg.delete_tid = 1;
424 		hmp->root_btree_beg.rec_type = HAMMER_MIN_RECTYPE;
425 		hmp->root_btree_beg.obj_type = 0;
426 		hmp->root_btree_beg.btype = HAMMER_BTREE_TYPE_NONE;
427 
428 		hmp->root_btree_end.localization =
429 			HAMMER_MAX_ONDISK_LOCALIZATION;
430 		hmp->root_btree_end.obj_id = HAMMER_MAX_OBJID;
431 		hmp->root_btree_end.key = HAMMER_MAX_KEY;
432 		hmp->root_btree_end.create_tid = HAMMER_MAX_TID;
433 		hmp->root_btree_end.delete_tid = 0;   /* special case */
434 		hmp->root_btree_end.rec_type = HAMMER_MAX_RECTYPE;
435 		hmp->root_btree_end.obj_type = 0;
436 		hmp->root_btree_end.btype = HAMMER_BTREE_TYPE_NONE;
437 
438 		hmp->krate.freq = 1;	/* maximum reporting rate (hz) */
439 		hmp->krate.count = -16;	/* initial burst */
440 		hmp->kdiag.freq = 1;	/* maximum reporting rate (hz) */
441 		hmp->kdiag.count = -16;	/* initial burst */
442 
443 		hmp->sync_lock.refs = 1;
444 		hmp->undo_lock.refs = 1;
445 		hmp->blkmap_lock.refs = 1;
446 		hmp->snapshot_lock.refs = 1;
447 		hmp->volume_lock.refs = 1;
448 
449 		TAILQ_INIT(&hmp->delay_list);
450 		TAILQ_INIT(&hmp->flush_group_list);
451 		TAILQ_INIT(&hmp->objid_cache_list);
452 		TAILQ_INIT(&hmp->undo_lru_list);
453 		TAILQ_INIT(&hmp->reclaim_list);
454 	}
455 	hmp->hflags &= ~HMNT_USERFLAGS;
456 	hmp->hflags |= info.hflags & HMNT_USERFLAGS;
457 
458 	hmp->master_id = master_id;
459 
460 	if (info.asof) {
461 		mp->mnt_flag |= MNT_RDONLY;
462 		hmp->asof = info.asof;
463 	} else {
464 		hmp->asof = HAMMER_MAX_TID;
465 	}
466 
467 	hmp->volume_to_remove = -1;
468 
469 	/*
470 	 * Re-open read-write if originally read-only, or vise-versa.
471 	 *
472 	 * When going from read-only to read-write execute the stage2
473 	 * recovery if it has not already been run.
474 	 */
475 	if (mp->mnt_flag & MNT_UPDATE) {
476 		lwkt_gettoken(&hmp->fs_token);
477 		error = 0;
478 		if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
479 			hkprintf("read-only -> read-write\n");
480 			hmp->ronly = 0;
481 			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
482 				hammer_adjust_volume_mode, NULL);
483 			rootvol = hammer_get_root_volume(hmp, &error);
484 			if (rootvol) {
485 				hammer_recover_flush_buffers(hmp, rootvol, 1);
486 				error = hammer_recover_stage2(hmp, rootvol);
487 				bcopy(rootvol->ondisk->vol0_blockmap,
488 				      hmp->blockmap,
489 				      sizeof(hmp->blockmap));
490 				hammer_rel_volume(rootvol, 0);
491 			}
492 			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
493 				hammer_reload_inode, NULL);
494 			/* kernel clears MNT_RDONLY */
495 		} else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
496 			hkprintf("read-write -> read-only\n");
497 			hmp->ronly = 1;	/* messy */
498 			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
499 				hammer_reload_inode, NULL);
500 			hmp->ronly = 0;
501 			hammer_flusher_sync(hmp);
502 			hammer_flusher_sync(hmp);
503 			hammer_flusher_sync(hmp);
504 			hmp->ronly = 1;
505 			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
506 				hammer_adjust_volume_mode, NULL);
507 		}
508 		lwkt_reltoken(&hmp->fs_token);
509 		return(error);
510 	}
511 
512 	RB_INIT(&hmp->rb_vols_root);
513 	RB_INIT(&hmp->rb_inos_root);
514 	RB_INIT(&hmp->rb_redo_root);
515 	RB_INIT(&hmp->rb_nods_root);
516 	RB_INIT(&hmp->rb_undo_root);
517 	RB_INIT(&hmp->rb_resv_root);
518 	RB_INIT(&hmp->rb_bufs_root);
519 	RB_INIT(&hmp->rb_pfsm_root);
520 
521 	hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
522 
523 	RB_INIT(&hmp->volu_root);
524 	RB_INIT(&hmp->undo_root);
525 	RB_INIT(&hmp->data_root);
526 	RB_INIT(&hmp->meta_root);
527 	RB_INIT(&hmp->lose_root);
528 	TAILQ_INIT(&hmp->iorun_list);
529 
530 	lwkt_token_init(&hmp->fs_token, "hammerfs");
531 	lwkt_token_init(&hmp->io_token, "hammerio");
532 
533 	lwkt_gettoken(&hmp->fs_token);
534 
535 	/*
536 	 * Load volumes
537 	 */
538 	path = objcache_get(namei_oc, M_WAITOK);
539 	hmp->nvolumes = -1;
540 	for (i = 0; i < info.nvolumes; ++i) {
541 		if (mntpt == NULL) {
542 			/*
543 			 * Root mount.
544 			 */
545 			KKASSERT(next_volume_ptr != NULL);
546 			strcpy(path, "");
547 			if (*next_volume_ptr != '/') {
548 				/* relative path */
549 				strcpy(path, "/dev/");
550 			}
551 			int k;
552 			for (k = strlen(path); k < MAXPATHLEN-1; ++k) {
553 				if (*next_volume_ptr == '\0') {
554 					break;
555 				} else if (*next_volume_ptr == ':') {
556 					++next_volume_ptr;
557 					break;
558 				} else {
559 					path[k] = *next_volume_ptr;
560 					++next_volume_ptr;
561 				}
562 			}
563 			path[k] = '\0';
564 
565 			error = 0;
566 			cdev_t dev = kgetdiskbyname(path);
567 			error = bdevvp(dev, &devvp);
568 			if (error) {
569 				hdkprintf("can't find devvp\n");
570 			}
571 		} else {
572 			error = copyin(&info.volumes[i], &upath,
573 				       sizeof(char *));
574 			if (error == 0)
575 				error = copyinstr(upath, path,
576 						  MAXPATHLEN, NULL);
577 		}
578 		if (error == 0)
579 			error = hammer_install_volume(hmp, path, devvp, NULL);
580 		if (error)
581 			break;
582 	}
583 	objcache_put(namei_oc, path);
584 
585 	/*
586 	 * Make sure we found a root volume
587 	 */
588 	if (hmp->rootvol == NULL) {
589 		if (error == EBUSY) {
590 			hdkprintf("The volumes are probably mounted\n");
591 		} else {
592 			hdkprintf("No root volume found!\n");
593 			error = EINVAL;
594 		}
595 		goto failed;
596 	}
597 
598 	/*
599 	 * Check that all required volumes are available
600 	 */
601 	if (error == 0 && hammer_mountcheck_volumes(hmp)) {
602 		hdkprintf("Missing volumes, cannot mount!\n");
603 		error = EINVAL;
604 		goto failed;
605 	}
606 
607 	/*
608 	 * Other errors
609 	 */
610 	if (error) {
611 		hdkprintf("Failed to load volumes!\n");
612 		goto failed;
613 	}
614 
615 	nvolumes = hammer_get_installed_volumes(hmp);
616 	if (hmp->nvolumes != nvolumes) {
617 		hdkprintf("volume header says %d volumes, but %d installed\n",
618 			hmp->nvolumes, nvolumes);
619 		error = EINVAL;
620 		goto failed;
621 	}
622 
623 	/*
624 	 * No errors, setup enough of the mount point so we can lookup the
625 	 * root vnode.
626 	 */
627 	mp->mnt_iosize_max = MAXPHYS;
628 	mp->mnt_kern_flag |= MNTK_THR_SYNC;	/* new vsyncscan semantics */
629 
630 	/*
631 	 * MPSAFE code.  Note that VOPs and VFSops which are not MPSAFE
632 	 * will acquire a per-mount token prior to entry and release it
633 	 * on return.
634 	 */
635 	mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;
636 
637 	/*
638 	 * note: f_iosize is used by vnode_pager_haspage() when constructing
639 	 * its VOP_BMAP call.
640 	 */
641 	mp->mnt_stat.f_iosize = HAMMER_BUFSIZE;
642 	mp->mnt_stat.f_bsize = HAMMER_BUFSIZE;
643 
644 	mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE;
645 	mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE;
646 
647 	mp->mnt_maxsymlinklen = 255;
648 	mp->mnt_flag |= MNT_LOCAL;
649 
650 	vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops);
651 	vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops);
652 	vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops);
653 
654 	/*
655 	 * The root volume's ondisk pointer is only valid if we hold a
656 	 * reference to it.
657 	 */
658 	rootvol = hammer_get_root_volume(hmp, &error);
659 	if (error)
660 		goto failed;
661 
662 	/*
663 	 * Perform any necessary UNDO operations.  The recovery code does
664 	 * call hammer_undo_lookup() so we have to pre-cache the blockmap,
665 	 * and then re-copy it again after recovery is complete.
666 	 *
667 	 * If this is a read-only mount the UNDO information is retained
668 	 * in memory in the form of dirty buffer cache buffers, and not
669 	 * written back to the media.
670 	 */
671 	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
672 	      sizeof(hmp->blockmap));
673 
674 	/*
675 	 * Check filesystem version
676 	 */
677 	hmp->version = rootvol->ondisk->vol_version;
678 	if (hmp->version < HAMMER_VOL_VERSION_MIN ||
679 	    hmp->version > HAMMER_VOL_VERSION_MAX) {
680 		hkprintf("mount unsupported fs version %d\n", hmp->version);
681 		error = ERANGE;
682 		goto done;
683 	}
684 
685 	/*
686 	 * The undo_rec_limit limits the size of flush groups to avoid
687 	 * blowing out the UNDO FIFO.  This calculation is typically in
688 	 * the tens of thousands and is designed primarily when small
689 	 * HAMMER filesystems are created.
690 	 */
691 	hmp->undo_rec_limit = hammer_undo_max(hmp) / 8192 + 100;
692 	if (hammer_debug_general & 0x0001)
693 		hkprintf("undo_rec_limit %d\n", hmp->undo_rec_limit);
694 
695 	/*
696 	 * NOTE: Recover stage1 not only handles meta-data recovery, it
697 	 * 	 also sets hmp->undo_seqno for HAMMER VERSION 4+ filesystems.
698 	 */
699 	error = hammer_recover_stage1(hmp, rootvol);
700 	if (error) {
701 		kprintf("Failed to recover HAMMER filesystem on mount\n");
702 		goto done;
703 	}
704 
705 	/*
706 	 * Finish setup now that we have a good root volume.
707 	 */
708 	ksnprintf(mp->mnt_stat.f_mntfromname,
709 		  sizeof(mp->mnt_stat.f_mntfromname), "%s",
710 		  rootvol->ondisk->vol_label);
711 	mp->mnt_stat.f_fsid.val[0] =
712 		crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8);
713 	mp->mnt_stat.f_fsid.val[1] =
714 		crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8);
715 	mp->mnt_stat.f_fsid.val[1] &= HAMMER_LOCALIZE_MASK;
716 
717 	mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid;
718 	mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid,
719 				     sizeof(mp->mnt_vstat.f_fsid_uuid));
720 
721 	/*
722 	 * Certain often-modified fields in the root volume are cached in
723 	 * the hammer_mount structure so we do not have to generate lots
724 	 * of little UNDO structures for them.
725 	 *
726 	 * Recopy after recovery.  This also has the side effect of
727 	 * setting our cached undo FIFO's first_offset, which serves to
728 	 * placemark the FIFO start for the NEXT flush cycle while the
729 	 * on-disk first_offset represents the LAST flush cycle.
730 	 */
731 	hmp->next_tid = rootvol->ondisk->vol0_next_tid;
732 	hmp->flush_tid1 = hmp->next_tid;
733 	hmp->flush_tid2 = hmp->next_tid;
734 	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
735 	      sizeof(hmp->blockmap));
736 	hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks;
737 
738 	hammer_flusher_create(hmp);
739 
740 	/*
741 	 * Locate the root directory with an obj_id of 1.
742 	 */
743 	error = hammer_vfs_root(mp, &rootvp);
744 	if (error)
745 		goto done;
746 	vput(rootvp);
747 	if (hmp->ronly == 0)
748 		error = hammer_recover_stage2(hmp, rootvol);
749 
750 	/*
751 	 * If the stage2 recovery fails be sure to clean out all cached
752 	 * vnodes before throwing away the mount structure or bad things
753 	 * will happen.
754 	 */
755 	if (error)
756 		vflush(mp, 0, 0);
757 
758 done:
759 	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
760 		/* New mount */
761 
762 		/* Populate info for mount point (NULL pad)*/
763 		bzero(mp->mnt_stat.f_mntonname, MNAMELEN);
764 		size_t size;
765 		if (mntpt) {
766 			copyinstr(mntpt, mp->mnt_stat.f_mntonname,
767 							MNAMELEN -1, &size);
768 		} else { /* Root mount */
769 			mp->mnt_stat.f_mntonname[0] = '/';
770 		}
771 	}
772 	(void)VFS_STATFS(mp, &mp->mnt_stat, cred);
773 	hammer_rel_volume(rootvol, 0);
774 failed:
775 	/*
776 	 * Cleanup and return.
777 	 */
778 	if (error) {
779 		/* called with fs_token held */
780 		hammer_free_hmp(mp);
781 	} else {
782 		lwkt_reltoken(&hmp->fs_token);
783 	}
784 	return (error);
785 }
786 
787 static int
hammer_vfs_unmount(struct mount * mp,int mntflags)788 hammer_vfs_unmount(struct mount *mp, int mntflags)
789 {
790 	hammer_mount_t hmp = (void *)mp->mnt_data;
791 	int flags;
792 	int error;
793 
794 	/*
795 	 * Clean out the vnodes
796 	 */
797 	lwkt_gettoken(&hmp->fs_token);
798 	flags = 0;
799 	if (mntflags & MNT_FORCE)
800 		flags |= FORCECLOSE;
801 	error = vflush(mp, 0, flags);
802 
803 	/*
804 	 * Clean up the internal mount structure and related entities.  This
805 	 * may issue I/O.
806 	 */
807 	if (error == 0) {
808 		/* called with fs_token held */
809 		hammer_free_hmp(mp);
810 	} else {
811 		lwkt_reltoken(&hmp->fs_token);
812 	}
813 	return(error);
814 }
815 
816 /*
817  * Clean up the internal mount structure and disassociate it from the mount.
818  * This may issue I/O.
819  *
820  * Called with fs_token held.
821  */
822 static void
hammer_free_hmp(struct mount * mp)823 hammer_free_hmp(struct mount *mp)
824 {
825 	hammer_mount_t hmp = (void *)mp->mnt_data;
826 	hammer_flush_group_t flg;
827 
828 	/*
829 	 * Flush anything dirty.  This won't even run if the
830 	 * filesystem errored-out.
831 	 */
832 	hammer_flush_dirty(hmp, 30);
833 
834 	/*
835 	 * If the mount had a critical error we have to destroy any
836 	 * remaining inodes before we can finish cleaning up the flusher.
837 	 */
838 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
839 		RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
840 			hammer_destroy_inode_callback, NULL);
841 	}
842 
843 	/*
844 	 * There shouldn't be any inodes left now and any left over
845 	 * flush groups should now be empty.
846 	 */
847 	KKASSERT(RB_EMPTY(&hmp->rb_inos_root));
848 	while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
849 		TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
850 		KKASSERT(RB_EMPTY(&flg->flush_tree));
851 		if (flg->refs) {
852 			hkprintf("Warning, flush_group %p was "
853 				"not empty on umount!\n", flg);
854 		}
855 		kfree(flg, hmp->m_misc);
856 	}
857 
858 	/*
859 	 * We can finally destroy the flusher
860 	 */
861 	hammer_flusher_destroy(hmp);
862 
863 	/*
864 	 * We may have held recovered buffers due to a read-only mount.
865 	 * These must be discarded.
866 	 */
867 	if (hmp->ronly)
868 		hammer_recover_flush_buffers(hmp, NULL, -1);
869 
870 	/*
871 	 * Unload buffers and then volumes
872 	 */
873         RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
874 		hammer_unload_buffer, NULL);
875 	RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
876 		hammer_unload_volume, NULL);
877 
878 	mp->mnt_data = NULL;
879 	mp->mnt_flag &= ~MNT_LOCAL;
880 	hmp->mp = NULL;
881 	hammer_destroy_objid_cache(hmp);
882 	kmalloc_destroy(&hmp->m_misc);
883 	kmalloc_destroy(&hmp->m_inodes);
884 	lwkt_reltoken(&hmp->fs_token);
885 	kfree(hmp, M_HAMMER);
886 }
887 
888 /*
889  * Report critical errors.  ip may be NULL.
890  */
891 void
hammer_critical_error(hammer_mount_t hmp,hammer_inode_t ip,int error,const char * msg)892 hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip,
893 		      int error, const char *msg)
894 {
895 	hmp->flags |= HAMMER_MOUNT_CRITICAL_ERROR;
896 
897 	hmkrateprintf(&hmp->krate, hmp,
898 		    "Critical error inode=%jd error=%d %s\n",
899 		    (intmax_t)(ip ? ip->obj_id : -1),
900 		    error, msg);
901 
902 	if (hmp->ronly == 0) {
903 		hmp->ronly = 2;		/* special errored read-only mode */
904 		hmp->mp->mnt_flag |= MNT_RDONLY;
905 		RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
906 			hammer_adjust_volume_mode, NULL);
907 		hmkprintf(hmp, "Forcing read-only mode\n");
908 	}
909 	hmp->error = error;
910 	if (hammer_debug_critical)
911 		Debugger("Entering debugger");
912 }
913 
914 
915 /*
916  * Obtain a vnode for the specified inode number.  An exclusively locked
917  * vnode is returned.
918  */
919 int
hammer_vfs_vget(struct mount * mp,struct vnode * dvp,ino_t ino,struct vnode ** vpp)920 hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
921 		ino_t ino, struct vnode **vpp)
922 {
923 	struct hammer_transaction trans;
924 	hammer_mount_t hmp = (void *)mp->mnt_data;
925 	hammer_inode_t ip;
926 	int error;
927 	uint32_t localization;
928 
929 	lwkt_gettoken(&hmp->fs_token);
930 	hammer_simple_transaction(&trans, hmp);
931 
932 	/*
933 	 * If a directory vnode is supplied (mainly NFS) then we can acquire
934 	 * the PFS domain from it.  Otherwise we would only be able to vget
935 	 * inodes in the root PFS.
936 	 */
937 	if (dvp) {
938 		localization = HAMMER_DEF_LOCALIZATION |
939 				VTOI(dvp)->obj_localization;
940 	} else {
941 		localization = HAMMER_DEF_LOCALIZATION;
942 	}
943 
944 	/*
945 	 * Lookup the requested HAMMER inode.  The structure must be
946 	 * left unlocked while we manipulate the related vnode to avoid
947 	 * a deadlock.
948 	 */
949 	ip = hammer_get_inode(&trans, NULL, ino,
950 			      hmp->asof, localization,
951 			      0, &error);
952 	if (ip == NULL) {
953 		*vpp = NULL;
954 	} else {
955 		error = hammer_get_vnode(ip, vpp);
956 		hammer_rel_inode(ip, 0);
957 	}
958 	hammer_done_transaction(&trans);
959 	lwkt_reltoken(&hmp->fs_token);
960 	return (error);
961 }
962 
963 /*
964  * Return the root vnode for the filesystem.
965  *
966  * HAMMER stores the root vnode in the hammer_mount structure so
967  * getting it is easy.
968  */
969 static int
hammer_vfs_root(struct mount * mp,struct vnode ** vpp)970 hammer_vfs_root(struct mount *mp, struct vnode **vpp)
971 {
972 	int error;
973 
974 	error = hammer_vfs_vget(mp, NULL, HAMMER_OBJID_ROOT, vpp);
975 	return (error);
976 }
977 
978 static int
hammer_vfs_statfs(struct mount * mp,struct statfs * sbp,struct ucred * cred)979 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
980 {
981 	hammer_mount_t hmp = (void *)mp->mnt_data;
982 	hammer_volume_t volume;
983 	hammer_volume_ondisk_t ondisk;
984 	int error;
985 	int64_t bfree;
986 	int64_t breserved;
987 
988 	lwkt_gettoken(&hmp->fs_token);
989 	volume = hammer_get_root_volume(hmp, &error);
990 	if (error) {
991 		lwkt_reltoken(&hmp->fs_token);
992 		return(error);
993 	}
994 	ondisk = volume->ondisk;
995 
996 	/*
997 	 * Basic stats
998 	 */
999 	_hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
1000 	mp->mnt_stat.f_files = ondisk->vol0_stat_inodes;
1001 	bfree = ondisk->vol0_stat_freebigblocks * HAMMER_BIGBLOCK_SIZE;
1002 	hammer_rel_volume(volume, 0);
1003 
1004 	if (breserved > bfree)
1005 		breserved = bfree;
1006 	mp->mnt_stat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
1007 	mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
1008 	if (mp->mnt_stat.f_files < 0)
1009 		mp->mnt_stat.f_files = 0;
1010 
1011 	*sbp = mp->mnt_stat;
1012 	lwkt_reltoken(&hmp->fs_token);
1013 	return(0);
1014 }
1015 
1016 static int
hammer_vfs_statvfs(struct mount * mp,struct statvfs * sbp,struct ucred * cred)1017 hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
1018 {
1019 	hammer_mount_t hmp = (void *)mp->mnt_data;
1020 	hammer_volume_t volume;
1021 	hammer_volume_ondisk_t ondisk;
1022 	int error;
1023 	int64_t bfree;
1024 	int64_t breserved;
1025 
1026 	lwkt_gettoken(&hmp->fs_token);
1027 	volume = hammer_get_root_volume(hmp, &error);
1028 	if (error) {
1029 		lwkt_reltoken(&hmp->fs_token);
1030 		return(error);
1031 	}
1032 	ondisk = volume->ondisk;
1033 
1034 	/*
1035 	 * Basic stats
1036 	 */
1037 	_hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
1038 	mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes;
1039 	bfree = ondisk->vol0_stat_freebigblocks * HAMMER_BIGBLOCK_SIZE;
1040 	hammer_rel_volume(volume, 0);
1041 
1042 	if (breserved > bfree)
1043 		breserved = bfree;
1044 	mp->mnt_vstat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
1045 	mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
1046 	if (mp->mnt_vstat.f_files < 0)
1047 		mp->mnt_vstat.f_files = 0;
1048 	*sbp = mp->mnt_vstat;
1049 	lwkt_reltoken(&hmp->fs_token);
1050 	return(0);
1051 }
1052 
1053 /*
1054  * Sync the filesystem.  Currently we have to run it twice, the second
1055  * one will advance the undo start index to the end index, so if a crash
1056  * occurs no undos will be run on mount.
1057  *
1058  * We do not sync the filesystem if we are called from a panic.  If we did
1059  * we might end up blowing up a sync that was already in progress.
1060  */
1061 static int
hammer_vfs_sync(struct mount * mp,int waitfor)1062 hammer_vfs_sync(struct mount *mp, int waitfor)
1063 {
1064 	hammer_mount_t hmp = (void *)mp->mnt_data;
1065 	int error;
1066 
1067 	lwkt_gettoken(&hmp->fs_token);
1068 	if (panicstr == NULL) {
1069 		error = hammer_sync_hmp(hmp, waitfor);
1070 	} else {
1071 		error = EIO;
1072 	}
1073 	lwkt_reltoken(&hmp->fs_token);
1074 	return (error);
1075 }
1076 
1077 /*
1078  * Convert a vnode to a file handle.
1079  *
1080  * Accesses read-only fields on already-referenced structures so
1081  * no token is needed.
1082  */
1083 static int
hammer_vfs_vptofh(struct vnode * vp,struct fid * fhp)1084 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp)
1085 {
1086 	hammer_inode_t ip;
1087 
1088 	KKASSERT(MAXFIDSZ >= 16);
1089 	ip = VTOI(vp);
1090 	fhp->fid_len = offsetof(struct fid, fid_data[16]);
1091 	fhp->fid_ext = lo_to_pfs(ip->obj_localization);
1092 	bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id));
1093 	bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof));
1094 	return(0);
1095 }
1096 
1097 
1098 /*
1099  * Convert a file handle back to a vnode.
1100  *
1101  * Use rootvp to enforce PFS isolation when a PFS is exported via a
1102  * null mount.
1103  */
1104 static int
hammer_vfs_fhtovp(struct mount * mp,struct vnode * rootvp,struct fid * fhp,struct vnode ** vpp)1105 hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
1106 		  struct fid *fhp, struct vnode **vpp)
1107 {
1108 	hammer_mount_t hmp = (void *)mp->mnt_data;
1109 	struct hammer_transaction trans;
1110 	hammer_inode_t ip;
1111 	struct hammer_inode_info info;
1112 	int error;
1113 	uint32_t localization;
1114 
1115 	bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id));
1116 	bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof));
1117 	if (rootvp)
1118 		localization = VTOI(rootvp)->obj_localization;
1119 	else
1120 		localization = pfs_to_lo(fhp->fid_ext);
1121 
1122 	lwkt_gettoken(&hmp->fs_token);
1123 	hammer_simple_transaction(&trans, hmp);
1124 
1125 	/*
1126 	 * Get/allocate the hammer_inode structure.  The structure must be
1127 	 * unlocked while we manipulate the related vnode to avoid a
1128 	 * deadlock.
1129 	 */
1130 	ip = hammer_get_inode(&trans, NULL, info.obj_id,
1131 			      info.obj_asof, localization, 0, &error);
1132 	if (ip) {
1133 		error = hammer_get_vnode(ip, vpp);
1134 		hammer_rel_inode(ip, 0);
1135 	} else {
1136 		*vpp = NULL;
1137 	}
1138 	hammer_done_transaction(&trans);
1139 	lwkt_reltoken(&hmp->fs_token);
1140 	return (error);
1141 }
1142 
1143 static int
hammer_vfs_checkexp(struct mount * mp,struct sockaddr * nam,int * exflagsp,struct ucred ** credanonp)1144 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
1145 		    int *exflagsp, struct ucred **credanonp)
1146 {
1147 	hammer_mount_t hmp = (void *)mp->mnt_data;
1148 	struct netcred *np;
1149 	int error;
1150 
1151 	lwkt_gettoken(&hmp->fs_token);
1152 	np = vfs_export_lookup(mp, &hmp->export, nam);
1153 	if (np) {
1154 		*exflagsp = np->netc_exflags;
1155 		*credanonp = &np->netc_anon;
1156 		error = 0;
1157 	} else {
1158 		error = EACCES;
1159 	}
1160 	lwkt_reltoken(&hmp->fs_token);
1161 	return (error);
1162 
1163 }
1164 
1165 int
hammer_vfs_export(struct mount * mp,int op,const struct export_args * export)1166 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export)
1167 {
1168 	hammer_mount_t hmp = (void *)mp->mnt_data;
1169 	int error;
1170 
1171 	lwkt_gettoken(&hmp->fs_token);
1172 
1173 	switch(op) {
1174 	case MOUNTCTL_SET_EXPORT:
1175 		error = vfs_export(mp, &hmp->export, export);
1176 		break;
1177 	default:
1178 		error = EOPNOTSUPP;
1179 		break;
1180 	}
1181 	lwkt_reltoken(&hmp->fs_token);
1182 
1183 	return(error);
1184 }
1185 
1186