1 /*
2 * Copyright (c) 2011-2023 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/nlookup.h>
39 #include <sys/vnode.h>
40 #include <sys/mount.h>
41 #include <sys/fcntl.h>
42 #include <sys/vfsops.h>
43 #include <sys/sysctl.h>
44 #include <sys/socket.h>
45 #include <sys/objcache.h>
46 #include <sys/proc.h>
47 #include <sys/lock.h>
48 #include <sys/file.h>
49
50 #include "hammer2.h"
51
52 TAILQ_HEAD(hammer2_mntlist, hammer2_dev);
53 static struct hammer2_mntlist hammer2_mntlist;
54
55 struct hammer2_pfslist hammer2_pfslist;
56 struct hammer2_pfslist hammer2_spmplist;
57 struct lock hammer2_mntlk;
58
59 int hammer2_supported_version = HAMMER2_VOL_VERSION_DEFAULT;
60 int hammer2_debug;
61 int hammer2_aux_flags;
62 int hammer2_xop_nthreads;
63 int hammer2_xop_sgroups;
64 int hammer2_xop_xgroups;
65 int hammer2_xop_xbase;
66 int hammer2_xop_mod;
67 long hammer2_debug_inode;
68 int hammer2_cluster_meta_read = 1; /* physical read-ahead */
69 int hammer2_cluster_data_read = 4; /* physical read-ahead */
70 int hammer2_cluster_write = 0; /* physical write clustering */
71 int hammer2_dedup_enable = 1;
72 int hammer2_always_compress = 0; /* always try to compress */
73 int hammer2_flush_pipe = 100;
74 int hammer2_dio_count;
75 int hammer2_dio_limit = 256;
76 int hammer2_bulkfree_tps = 5000;
77 int hammer2_spread_workers;
78 int hammer2_limit_saved_depth;
79 long hammer2_chain_allocs;
80 long hammer2_limit_saved_chains;
81 long hammer2_limit_dirty_chains;
82 long hammer2_limit_dirty_inodes;
83 long hammer2_count_modified_chains;
84 long hammer2_iod_file_read;
85 long hammer2_iod_meta_read;
86 long hammer2_iod_indr_read;
87 long hammer2_iod_fmap_read;
88 long hammer2_iod_volu_read;
89 long hammer2_iod_file_write;
90 long hammer2_iod_file_wembed;
91 long hammer2_iod_file_wzero;
92 long hammer2_iod_file_wdedup;
93 long hammer2_iod_meta_write;
94 long hammer2_iod_indr_write;
95 long hammer2_iod_fmap_write;
96 long hammer2_iod_volu_write;
97 static long hammer2_iod_inode_creates;
98 static long hammer2_iod_inode_deletes;
99
100 long hammer2_process_icrc32;
101 long hammer2_process_xxhash64;
102
103 MALLOC_DECLARE(M_HAMMER2_CBUFFER);
104 MALLOC_DEFINE(M_HAMMER2_CBUFFER, "HAMMER2-compbuffer",
105 "Buffer used for compression.");
106
107 MALLOC_DECLARE(M_HAMMER2_DEBUFFER);
108 MALLOC_DEFINE(M_HAMMER2_DEBUFFER, "HAMMER2-decompbuffer",
109 "Buffer used for decompression.");
110
111 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem");
112
113 SYSCTL_INT(_vfs_hammer2, OID_AUTO, supported_version, CTLFLAG_RD,
114 &hammer2_supported_version, 0, "");
115 SYSCTL_INT(_vfs_hammer2, OID_AUTO, aux_flags, CTLFLAG_RW,
116 &hammer2_aux_flags, 0, "");
117 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW,
118 &hammer2_debug, 0, "");
119 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, debug_inode, CTLFLAG_RW,
120 &hammer2_debug_inode, 0, "");
121 SYSCTL_INT(_vfs_hammer2, OID_AUTO, spread_workers, CTLFLAG_RW,
122 &hammer2_spread_workers, 0, "");
123 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_meta_read, CTLFLAG_RW,
124 &hammer2_cluster_meta_read, 0, "");
125 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_data_read, CTLFLAG_RW,
126 &hammer2_cluster_data_read, 0, "");
127 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_write, CTLFLAG_RW,
128 &hammer2_cluster_write, 0, "");
129 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dedup_enable, CTLFLAG_RW,
130 &hammer2_dedup_enable, 0, "");
131 SYSCTL_INT(_vfs_hammer2, OID_AUTO, always_compress, CTLFLAG_RW,
132 &hammer2_always_compress, 0, "");
133 SYSCTL_INT(_vfs_hammer2, OID_AUTO, flush_pipe, CTLFLAG_RW,
134 &hammer2_flush_pipe, 0, "");
135 SYSCTL_INT(_vfs_hammer2, OID_AUTO, bulkfree_tps, CTLFLAG_RW,
136 &hammer2_bulkfree_tps, 0, "");
137 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, chain_allocs, CTLFLAG_RD,
138 &hammer2_chain_allocs, 0, "");
139 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_saved_chains, CTLFLAG_RW,
140 &hammer2_limit_saved_chains, 0, "");
141 SYSCTL_INT(_vfs_hammer2, OID_AUTO, limit_saved_depth, CTLFLAG_RW,
142 &hammer2_limit_saved_depth, 0, "");
143 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_chains, CTLFLAG_RW,
144 &hammer2_limit_dirty_chains, 0, "");
145 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_inodes, CTLFLAG_RW,
146 &hammer2_limit_dirty_inodes, 0, "");
147 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, count_modified_chains, CTLFLAG_RD,
148 &hammer2_count_modified_chains, 0, "");
149 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_count, CTLFLAG_RD,
150 &hammer2_dio_count, 0, "");
151 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_limit, CTLFLAG_RW,
152 &hammer2_dio_limit, 0, "");
153
154 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RD,
155 &hammer2_iod_file_read, 0, "");
156 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RD,
157 &hammer2_iod_meta_read, 0, "");
158 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RD,
159 &hammer2_iod_indr_read, 0, "");
160 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_read, CTLFLAG_RD,
161 &hammer2_iod_fmap_read, 0, "");
162 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_read, CTLFLAG_RD,
163 &hammer2_iod_volu_read, 0, "");
164
165 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RD,
166 &hammer2_iod_file_write, 0, "");
167 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wembed, CTLFLAG_RD,
168 &hammer2_iod_file_wembed, 0, "");
169 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wzero, CTLFLAG_RD,
170 &hammer2_iod_file_wzero, 0, "");
171 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wdedup, CTLFLAG_RD,
172 &hammer2_iod_file_wdedup, 0, "");
173 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RD,
174 &hammer2_iod_meta_write, 0, "");
175 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RD,
176 &hammer2_iod_indr_write, 0, "");
177 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_write, CTLFLAG_RD,
178 &hammer2_iod_fmap_write, 0, "");
179 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RD,
180 &hammer2_iod_volu_write, 0, "");
181 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_inode_creates, CTLFLAG_RD,
182 &hammer2_iod_inode_creates, 0, "");
183 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_inode_deletes, CTLFLAG_RD,
184 &hammer2_iod_inode_deletes, 0, "");
185
186 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, process_icrc32, CTLFLAG_RD,
187 &hammer2_process_icrc32, 0, "");
188 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, process_xxhash64, CTLFLAG_RD,
189 &hammer2_process_xxhash64, 0, "");
190
191 static int hammer2_vfs_init(struct vfsconf *conf);
192 static int hammer2_vfs_uninit(struct vfsconf *vfsp);
193 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
194 struct ucred *cred);
195 static int hammer2_remount(hammer2_dev_t *, struct mount *, char *,
196 struct ucred *);
197 static int hammer2_recovery(hammer2_dev_t *hmp);
198 static int hammer2_vfs_unmount(struct mount *mp, int mntflags);
199 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp);
200 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp,
201 struct ucred *cred);
202 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
203 struct ucred *cred);
204 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
205 struct fid *fhp, struct vnode **vpp);
206 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp);
207 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
208 int *exflagsp, struct ucred **credanonp);
209 static int hammer2_vfs_modifying(struct mount *mp);
210
211 static void hammer2_update_pmps(hammer2_dev_t *hmp);
212
213 static void hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp);
214 static void hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp,
215 hammer2_dev_t *hmp);
216 static int hammer2_fixup_pfses(hammer2_dev_t *hmp);
217
218 /*
219 * HAMMER2 vfs operations.
220 */
221 static struct vfsops hammer2_vfsops = {
222 .vfs_flags = 0,
223 .vfs_init = hammer2_vfs_init,
224 .vfs_uninit = hammer2_vfs_uninit,
225 .vfs_sync = hammer2_vfs_sync,
226 .vfs_mount = hammer2_vfs_mount,
227 .vfs_unmount = hammer2_vfs_unmount,
228 .vfs_root = hammer2_vfs_root,
229 .vfs_statfs = hammer2_vfs_statfs,
230 .vfs_statvfs = hammer2_vfs_statvfs,
231 .vfs_vget = hammer2_vfs_vget,
232 .vfs_vptofh = hammer2_vfs_vptofh,
233 .vfs_fhtovp = hammer2_vfs_fhtovp,
234 .vfs_checkexp = hammer2_vfs_checkexp,
235 .vfs_modifying = hammer2_vfs_modifying
236 };
237
238 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", "");
239
240 VFS_SET(hammer2_vfsops, hammer2, VFCF_MPSAFE);
241 MODULE_VERSION(hammer2, 1);
242
243 static
244 int
hammer2_vfs_init(struct vfsconf * conf)245 hammer2_vfs_init(struct vfsconf *conf)
246 {
247 static struct objcache_malloc_args margs_read;
248 static struct objcache_malloc_args margs_write;
249 static struct objcache_malloc_args margs_vop;
250
251 int error;
252 int mod;
253
254 error = 0;
255 kmalloc_raise_limit(M_HAMMER2, 0); /* unlimited */
256
257 /*
258 * hammer2_xop_nthreads must be a multiple of ncpus,
259 * minimum 2 * ncpus.
260 */
261 mod = ncpus;
262 hammer2_xop_mod = mod;
263 hammer2_xop_nthreads = mod * 2;
264 while (hammer2_xop_nthreads / mod < HAMMER2_XOPGROUPS_MIN ||
265 hammer2_xop_nthreads < HAMMER2_XOPTHREADS_MIN)
266 {
267 hammer2_xop_nthreads += mod;
268 }
269 hammer2_xop_sgroups = hammer2_xop_nthreads / mod / 2;
270 hammer2_xop_xgroups = hammer2_xop_nthreads / mod - hammer2_xop_sgroups;
271 hammer2_xop_xbase = hammer2_xop_sgroups * mod;
272
273 /*
274 * A large DIO cache is needed to retain dedup enablement masks.
275 * The bulkfree code clears related masks as part of the disk block
276 * recycling algorithm, preventing it from being used for a later
277 * dedup.
278 *
279 * NOTE: A large buffer cache can actually interfere with dedup
280 * operation because we dedup based on media physical buffers
281 * and not logical buffers. Try to make the DIO case large
282 * enough to avoid this problem, but also cap it.
283 */
284 hammer2_dio_limit = nbuf * 2;
285 if (hammer2_dio_limit > 100000)
286 hammer2_dio_limit = 100000;
287
288 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref))
289 error = EINVAL;
290 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data))
291 error = EINVAL;
292 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data))
293 error = EINVAL;
294
295 if (error)
296 kprintf("HAMMER2 structure size mismatch; cannot continue.\n");
297
298 margs_read.objsize = 65536;
299 margs_read.mtype = M_HAMMER2_DEBUFFER;
300
301 margs_write.objsize = 32768;
302 margs_write.mtype = M_HAMMER2_CBUFFER;
303
304 margs_vop.objsize = sizeof(hammer2_xop_t);
305 margs_vop.mtype = M_HAMMER2;
306
307 /*
308 * Note thaht for the XOPS cache we want backing store allocations
309 * to use M_ZERO. This is not allowed in objcache_get() (to avoid
310 * confusion), so use the backing store function that does it. This
311 * means that initial XOPS objects are zerod but REUSED objects are
312 * not. So we are responsible for cleaning the object up sufficiently
313 * for our needs before objcache_put()ing it back (typically just the
314 * FIFO indices).
315 */
316 cache_buffer_read = objcache_create(margs_read.mtype->ks_shortdesc,
317 0, 1, NULL, NULL, NULL,
318 objcache_malloc_alloc,
319 objcache_malloc_free,
320 &margs_read);
321 cache_buffer_write = objcache_create(margs_write.mtype->ks_shortdesc,
322 0, 1, NULL, NULL, NULL,
323 objcache_malloc_alloc,
324 objcache_malloc_free,
325 &margs_write);
326 cache_xops = objcache_create(margs_vop.mtype->ks_shortdesc,
327 0, 1, NULL, NULL, NULL,
328 objcache_malloc_alloc_zero,
329 objcache_malloc_free,
330 &margs_vop);
331
332
333 lockinit(&hammer2_mntlk, "mntlk", 0, 0);
334 TAILQ_INIT(&hammer2_mntlist);
335 TAILQ_INIT(&hammer2_pfslist);
336 TAILQ_INIT(&hammer2_spmplist);
337
338 hammer2_limit_dirty_chains = maxvnodes / 10;
339 if (hammer2_limit_dirty_chains > HAMMER2_LIMIT_DIRTY_CHAINS)
340 hammer2_limit_dirty_chains = HAMMER2_LIMIT_DIRTY_CHAINS;
341 if (hammer2_limit_dirty_chains < 1000)
342 hammer2_limit_dirty_chains = 1000;
343
344 hammer2_limit_dirty_inodes = maxvnodes / 25;
345 if (hammer2_limit_dirty_inodes < 100)
346 hammer2_limit_dirty_inodes = 100;
347 if (hammer2_limit_dirty_inodes > HAMMER2_LIMIT_DIRTY_INODES)
348 hammer2_limit_dirty_inodes = HAMMER2_LIMIT_DIRTY_INODES;
349
350 hammer2_limit_saved_chains = hammer2_limit_dirty_chains * 5;
351
352 return (error);
353 }
354
355 static
356 int
hammer2_vfs_uninit(struct vfsconf * vfsp __unused)357 hammer2_vfs_uninit(struct vfsconf *vfsp __unused)
358 {
359 objcache_destroy(cache_buffer_read);
360 objcache_destroy(cache_buffer_write);
361 objcache_destroy(cache_xops);
362 return 0;
363 }
364
365 /*
366 * Core PFS allocator. Used to allocate or reference the pmp structure
367 * for PFS cluster mounts and the spmp structure for media (hmp) structures.
368 * The pmp can be passed in or loaded by this function using the chain and
369 * inode data.
370 *
371 * pmp->modify_tid tracks new modify_tid transaction ids for front-end
372 * transactions. Note that synchronization does not use this field.
373 * (typically frontend operations and synchronization cannot run on the
374 * same PFS node at the same time).
375 *
376 * XXX check locking
377 */
378 hammer2_pfs_t *
hammer2_pfsalloc(hammer2_chain_t * chain,const hammer2_inode_data_t * ripdata,hammer2_dev_t * force_local)379 hammer2_pfsalloc(hammer2_chain_t *chain,
380 const hammer2_inode_data_t *ripdata,
381 hammer2_dev_t *force_local)
382 {
383 hammer2_pfs_t *pmp;
384 hammer2_inode_t *iroot;
385 int count;
386 int i;
387 int j;
388
389 pmp = NULL;
390
391 /*
392 * Locate or create the PFS based on the cluster id. If ripdata
393 * is NULL this is a spmp which is unique and is always allocated.
394 *
395 * If the device is mounted in local mode all PFSs are considered
396 * independent and not part of any cluster (for debugging only).
397 */
398 if (ripdata) {
399 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) {
400 if (force_local != pmp->force_local)
401 continue;
402 if (force_local == NULL &&
403 bcmp(&pmp->pfs_clid, &ripdata->meta.pfs_clid,
404 sizeof(pmp->pfs_clid)) == 0) {
405 break;
406 } else if (force_local && pmp->pfs_names[0] &&
407 strcmp(pmp->pfs_names[0], (const char *)ripdata->filename) == 0) {
408 break;
409 }
410 }
411 }
412
413 if (pmp == NULL) {
414 pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO);
415 pmp->force_local = force_local;
416 hammer2_trans_manage_init(pmp);
417 kmalloc_create_obj(&pmp->minode, "HAMMER2-inodes",
418 sizeof(struct hammer2_inode));
419 lockinit(&pmp->lock, "pfslk", 0, 0);
420 hammer2_spin_init(&pmp->blockset_spin, "h2blkset");
421 hammer2_inum_hash_init(pmp);
422 hammer2_spin_init(&pmp->xop_spin, "h2xop");
423 TAILQ_INIT(&pmp->syncq);
424 TAILQ_INIT(&pmp->depq);
425 hammer2_spin_init(&pmp->list_spin, "h2pfsalloc_list");
426
427 /*
428 * Save the last media transaction id for the flusher. Set
429 * initial
430 */
431 if (ripdata) {
432 pmp->pfs_clid = ripdata->meta.pfs_clid;
433 TAILQ_INSERT_TAIL(&hammer2_pfslist, pmp, mntentry);
434 } else {
435 pmp->flags |= HAMMER2_PMPF_SPMP;
436 TAILQ_INSERT_TAIL(&hammer2_spmplist, pmp, mntentry);
437 }
438
439 /*
440 * The synchronization thread may start too early, make
441 * sure it stays frozen until we are ready to let it go.
442 * XXX
443 */
444 /*
445 pmp->primary_thr.flags = HAMMER2_THREAD_FROZEN |
446 HAMMER2_THREAD_REMASTER;
447 */
448 }
449
450 /*
451 * Create the PFS's root inode and any missing XOP helper threads.
452 */
453 if ((iroot = pmp->iroot) == NULL) {
454 iroot = hammer2_inode_get(pmp, NULL, 1, -1);
455 if (ripdata)
456 iroot->meta = ripdata->meta;
457 pmp->iroot = iroot;
458 hammer2_inode_ref(iroot);
459 hammer2_inode_unlock(iroot);
460 }
461
462 /*
463 * Stop here if no chain is passed in.
464 */
465 if (chain == NULL)
466 goto done;
467
468 /*
469 * When a chain is passed in we must add it to the PFS's root
470 * inode, update pmp->pfs_types[], and update the syncronization
471 * threads.
472 *
473 * When forcing local mode, mark the PFS as a MASTER regardless.
474 *
475 * At the moment empty spots can develop due to removals or failures.
476 * Ultimately we want to re-fill these spots but doing so might
477 * confused running code. XXX
478 */
479 hammer2_inode_ref(iroot);
480 hammer2_mtx_ex(&iroot->lock);
481 j = iroot->cluster.nchains;
482
483 if (j == HAMMER2_MAXCLUSTER) {
484 kprintf("hammer2_pfsalloc: cluster full!\n");
485 /* XXX fatal error? */
486 } else {
487 KKASSERT(chain->pmp == NULL);
488 chain->pmp = pmp;
489 hammer2_chain_ref(chain);
490 iroot->cluster.array[j].chain = chain;
491 if (force_local)
492 pmp->pfs_types[j] = HAMMER2_PFSTYPE_MASTER;
493 else
494 pmp->pfs_types[j] = ripdata->meta.pfs_type;
495 pmp->pfs_names[j] = kstrdup((const char *)ripdata->filename, M_HAMMER2);
496 pmp->pfs_hmps[j] = chain->hmp;
497 hammer2_spin_ex(&pmp->blockset_spin);
498 pmp->pfs_iroot_blocksets[j] = chain->data->ipdata.u.blockset;
499 hammer2_spin_unex(&pmp->blockset_spin);
500
501 /*
502 * If the PFS is already mounted we must account
503 * for the mount_count here.
504 */
505 if (pmp->mp)
506 ++chain->hmp->mount_count;
507
508 /*
509 * May have to fixup dirty chain tracking. Previous
510 * pmp was NULL so nothing to undo.
511 */
512 if (chain->flags & HAMMER2_CHAIN_MODIFIED)
513 hammer2_pfs_memory_inc(pmp);
514 ++j;
515 }
516 iroot->cluster.nchains = j;
517
518 /*
519 * Update nmasters from any PFS inode which is part of the cluster.
520 * It is possible that this will result in a value which is too
521 * high. MASTER PFSs are authoritative for pfs_nmasters and will
522 * override this value later on.
523 *
524 * (This informs us of masters that might not currently be
525 * discoverable by this mount).
526 */
527 if (ripdata && pmp->pfs_nmasters < ripdata->meta.pfs_nmasters) {
528 pmp->pfs_nmasters = ripdata->meta.pfs_nmasters;
529 }
530
531 /*
532 * Count visible masters. Masters are usually added with
533 * ripdata->meta.pfs_nmasters set to 1. This detects when there
534 * are more (XXX and must update the master inodes).
535 */
536 count = 0;
537 for (i = 0; i < iroot->cluster.nchains; ++i) {
538 if (pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER)
539 ++count;
540 }
541 if (pmp->pfs_nmasters < count)
542 pmp->pfs_nmasters = count;
543
544 /*
545 * Create missing synchronization and support threads.
546 *
547 * Single-node masters (including snapshots) have nothing to
548 * synchronize and do not require this thread.
549 *
550 * Multi-node masters or any number of soft masters, slaves, copy,
551 * or other PFS types need the thread.
552 *
553 * Each thread is responsible for its particular cluster index.
554 * We use independent threads so stalls or mismatches related to
555 * any given target do not affect other targets.
556 */
557 for (i = 0; i < iroot->cluster.nchains; ++i) {
558 /*
559 * Single-node masters (including snapshots) have nothing
560 * to synchronize and will make direct xops support calls,
561 * thus they do not require this thread.
562 *
563 * Note that there can be thousands of snapshots. We do not
564 * want to create thousands of threads.
565 */
566 if (pmp->pfs_nmasters <= 1 &&
567 pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) {
568 continue;
569 }
570
571 /*
572 * Sync support thread
573 */
574 if (pmp->sync_thrs[i].td == NULL) {
575 hammer2_thr_create(&pmp->sync_thrs[i], pmp, NULL,
576 "h2nod", i, -1,
577 hammer2_primary_sync_thread);
578 }
579 }
580
581 /*
582 * Create missing Xop threads
583 *
584 * NOTE: We create helper threads for all mounted PFSs or any
585 * PFSs with 2+ nodes (so the sync thread can update them,
586 * even if not mounted).
587 */
588 if (pmp->mp || iroot->cluster.nchains >= 2)
589 hammer2_xop_helper_create(pmp);
590
591 hammer2_mtx_unlock(&iroot->lock);
592 hammer2_inode_drop(iroot);
593 done:
594 return pmp;
595 }
596
597 /*
598 * Deallocate an element of a probed PFS. If destroying and this is a
599 * MASTER, adjust nmasters.
600 *
601 * This function does not physically destroy the PFS element in its device
602 * under the super-root (see hammer2_ioctl_pfs_delete()).
603 */
604 void
hammer2_pfsdealloc(hammer2_pfs_t * pmp,int clindex,int destroying)605 hammer2_pfsdealloc(hammer2_pfs_t *pmp, int clindex, int destroying)
606 {
607 hammer2_inode_t *iroot;
608 hammer2_chain_t *chain;
609 int j;
610
611 /*
612 * Cleanup our reference on iroot. iroot is (should) not be needed
613 * by the flush code.
614 */
615 iroot = pmp->iroot;
616 if (iroot) {
617 /*
618 * Stop synchronizing
619 *
620 * XXX flush after acquiring the iroot lock.
621 * XXX clean out the cluster index from all inode structures.
622 */
623 hammer2_thr_delete(&pmp->sync_thrs[clindex]);
624
625 /*
626 * Remove the cluster index from the group. If destroying
627 * the PFS and this is a master, adjust pfs_nmasters.
628 */
629 hammer2_mtx_ex(&iroot->lock);
630 chain = iroot->cluster.array[clindex].chain;
631 iroot->cluster.array[clindex].chain = NULL;
632
633 switch(pmp->pfs_types[clindex]) {
634 case HAMMER2_PFSTYPE_MASTER:
635 if (destroying && pmp->pfs_nmasters > 0)
636 --pmp->pfs_nmasters;
637 /* XXX adjust ripdata->meta.pfs_nmasters */
638 break;
639 default:
640 break;
641 }
642 pmp->pfs_types[clindex] = HAMMER2_PFSTYPE_NONE;
643
644 hammer2_mtx_unlock(&iroot->lock);
645
646 /*
647 * Release the chain.
648 */
649 if (chain) {
650 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE);
651 hammer2_chain_drop(chain);
652 }
653
654 /*
655 * Terminate all XOP threads for the cluster index.
656 */
657 if (pmp->xop_groups) {
658 for (j = 0; j < hammer2_xop_nthreads; ++j) {
659 hammer2_thr_delete(
660 &pmp->xop_groups[j].thrs[clindex]);
661 }
662 }
663 }
664 }
665
666 /*
667 * Destroy a PFS, typically only occurs after the last mount on a device
668 * has gone away.
669 */
670 static void
hammer2_pfsfree(hammer2_pfs_t * pmp)671 hammer2_pfsfree(hammer2_pfs_t *pmp)
672 {
673 hammer2_inode_t *iroot;
674 hammer2_chain_t *chain;
675 int chains_still_present = 0;
676 int i;
677 int j;
678
679 /*
680 * Cleanup our reference on iroot. iroot is (should) not be needed
681 * by the flush code.
682 */
683 if (pmp->flags & HAMMER2_PMPF_SPMP)
684 TAILQ_REMOVE(&hammer2_spmplist, pmp, mntentry);
685 else
686 TAILQ_REMOVE(&hammer2_pfslist, pmp, mntentry);
687
688 /*
689 * Clean up iroot
690 */
691 iroot = pmp->iroot;
692 if (iroot) {
693 for (i = 0; i < iroot->cluster.nchains; ++i) {
694 hammer2_thr_delete(&pmp->sync_thrs[i]);
695 if (pmp->xop_groups) {
696 for (j = 0; j < hammer2_xop_nthreads; ++j)
697 hammer2_thr_delete(
698 &pmp->xop_groups[j].thrs[i]);
699 }
700 chain = iroot->cluster.array[i].chain;
701 if (chain && !RB_EMPTY(&chain->core.rbtree)) {
702 kprintf("hammer2: Warning pmp %p still "
703 "has active chains\n", pmp);
704 chains_still_present = 1;
705 }
706 }
707 KASSERT(iroot->refs == 1,
708 ("PMP->IROOT %p REFS WRONG %d", iroot, iroot->refs));
709
710 /* ref for iroot */
711 hammer2_inode_drop(iroot);
712 pmp->iroot = NULL;
713 }
714
715 /*
716 * Free remaining pmp resources
717 */
718 if (chains_still_present) {
719 kprintf("hammer2: cannot free pmp %p, still in use\n", pmp);
720 } else {
721 kmalloc_destroy_obj(&pmp->minode);
722 kfree(pmp, M_HAMMER2);
723 }
724 }
725
726 /*
727 * Remove all references to hmp from the pfs list. Any PFS which becomes
728 * empty is terminated and freed.
729 *
730 * XXX inefficient.
731 */
732 static void
hammer2_pfsfree_scan(hammer2_dev_t * hmp,int which)733 hammer2_pfsfree_scan(hammer2_dev_t *hmp, int which)
734 {
735 hammer2_pfs_t *pmp;
736 hammer2_inode_t *iroot;
737 hammer2_chain_t *rchain;
738 int i;
739 int j;
740 struct hammer2_pfslist *wlist;
741
742 if (which == 0)
743 wlist = &hammer2_pfslist;
744 else
745 wlist = &hammer2_spmplist;
746 again:
747 TAILQ_FOREACH(pmp, wlist, mntentry) {
748 if ((iroot = pmp->iroot) == NULL)
749 continue;
750
751 /*
752 * Determine if this PFS is affected. If it is we must
753 * freeze all management threads and lock its iroot.
754 *
755 * Freezing a management thread forces it idle, operations
756 * in-progress will be aborted and it will have to start
757 * over again when unfrozen, or exit if told to exit.
758 */
759 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
760 if (pmp->pfs_hmps[i] == hmp)
761 break;
762 }
763 if (i == HAMMER2_MAXCLUSTER)
764 continue;
765
766 hammer2_vfs_sync_pmp(pmp, MNT_WAIT);
767
768 /*
769 * Make sure all synchronization threads are locked
770 * down.
771 */
772 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
773 if (pmp->pfs_hmps[i] == NULL)
774 continue;
775 hammer2_thr_freeze_async(&pmp->sync_thrs[i]);
776 if (pmp->xop_groups) {
777 for (j = 0; j < hammer2_xop_nthreads; ++j) {
778 hammer2_thr_freeze_async(
779 &pmp->xop_groups[j].thrs[i]);
780 }
781 }
782 }
783 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
784 if (pmp->pfs_hmps[i] == NULL)
785 continue;
786 hammer2_thr_freeze(&pmp->sync_thrs[i]);
787 if (pmp->xop_groups) {
788 for (j = 0; j < hammer2_xop_nthreads; ++j) {
789 hammer2_thr_freeze(
790 &pmp->xop_groups[j].thrs[i]);
791 }
792 }
793 }
794
795 /*
796 * Lock the inode and clean out matching chains.
797 * Note that we cannot use hammer2_inode_lock_*()
798 * here because that would attempt to validate the
799 * cluster that we are in the middle of ripping
800 * apart.
801 *
802 * WARNING! We are working directly on the inodes
803 * embedded cluster.
804 */
805 hammer2_mtx_ex(&iroot->lock);
806
807 /*
808 * Remove the chain from matching elements of the PFS.
809 */
810 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
811 if (pmp->pfs_hmps[i] != hmp)
812 continue;
813 hammer2_thr_delete(&pmp->sync_thrs[i]);
814 if (pmp->xop_groups) {
815 for (j = 0; j < hammer2_xop_nthreads; ++j) {
816 hammer2_thr_delete(
817 &pmp->xop_groups[j].thrs[i]);
818 }
819 }
820 rchain = iroot->cluster.array[i].chain;
821 iroot->cluster.array[i].chain = NULL;
822 pmp->pfs_types[i] = HAMMER2_PFSTYPE_NONE;
823 if (pmp->pfs_names[i]) {
824 kfree(pmp->pfs_names[i], M_HAMMER2);
825 pmp->pfs_names[i] = NULL;
826 }
827 if (rchain) {
828 hammer2_chain_drop(rchain);
829 /* focus hint */
830 if (iroot->cluster.focus == rchain)
831 iroot->cluster.focus = NULL;
832 }
833 pmp->pfs_hmps[i] = NULL;
834 }
835 hammer2_mtx_unlock(&iroot->lock);
836
837 /*
838 * Cleanup trailing chains. Gaps may remain.
839 */
840 for (i = HAMMER2_MAXCLUSTER - 1; i >= 0; --i) {
841 if (pmp->pfs_hmps[i])
842 break;
843 }
844 iroot->cluster.nchains = i + 1;
845
846 /*
847 * If the PMP has no elements remaining we can destroy it.
848 * (this will transition management threads from frozen->exit).
849 */
850 if (iroot->cluster.nchains == 0) {
851 /*
852 * If this was the hmp's spmp, we need to clean
853 * a little more stuff out.
854 */
855 if (hmp->spmp == pmp) {
856 hmp->spmp = NULL;
857 hmp->vchain.pmp = NULL;
858 hmp->fchain.pmp = NULL;
859 }
860
861 /*
862 * Free the pmp and restart the loop
863 */
864 KKASSERT(TAILQ_EMPTY(&pmp->syncq));
865 KKASSERT(TAILQ_EMPTY(&pmp->depq));
866 hammer2_pfsfree(pmp);
867 goto again;
868 }
869
870 /*
871 * If elements still remain we need to set the REMASTER
872 * flag and unfreeze it.
873 */
874 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
875 if (pmp->pfs_hmps[i] == NULL)
876 continue;
877 hammer2_thr_remaster(&pmp->sync_thrs[i]);
878 hammer2_thr_unfreeze(&pmp->sync_thrs[i]);
879 if (pmp->xop_groups) {
880 for (j = 0; j < hammer2_xop_nthreads; ++j) {
881 hammer2_thr_remaster(
882 &pmp->xop_groups[j].thrs[i]);
883 hammer2_thr_unfreeze(
884 &pmp->xop_groups[j].thrs[i]);
885 }
886 }
887 }
888 }
889 }
890
891 /*
892 * Mount or remount HAMMER2 fileystem from physical media
893 *
894 * mountroot
895 * mp mount point structure
896 * path NULL
897 * data <unused>
898 * cred <unused>
899 *
900 * mount
901 * mp mount point structure
902 * path path to mount point
903 * data pointer to argument structure in user space
904 * volume volume path (device@LABEL form)
905 * hflags user mount flags
906 * cred user credentials
907 *
908 * RETURNS: 0 Success
909 * !0 error number
910 */
911 static
912 int
hammer2_vfs_mount(struct mount * mp,char * path,caddr_t data,struct ucred * cred)913 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
914 struct ucred *cred)
915 {
916 struct hammer2_mount_info info;
917 hammer2_pfs_t *pmp;
918 hammer2_pfs_t *spmp;
919 hammer2_dev_t *hmp, *hmp_tmp;
920 hammer2_dev_t *force_local;
921 hammer2_key_t key_next;
922 hammer2_key_t key_dummy;
923 hammer2_key_t lhc;
924 hammer2_chain_t *parent;
925 hammer2_chain_t *chain;
926 const hammer2_inode_data_t *ripdata;
927 hammer2_devvp_list_t devvpl;
928 hammer2_devvp_t *e, *e_tmp;
929 struct file *fp;
930 char devstr[MNAMELEN];
931 size_t size;
932 size_t done;
933 char *label;
934 int ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
935 int error;
936 int i;
937
938 hmp = NULL;
939 pmp = NULL;
940 label = NULL;
941 bzero(&info, sizeof(info));
942
943 if (path) {
944 /*
945 * Non-root mount or updating a mount
946 */
947 error = copyin(data, &info, sizeof(info));
948 if (error)
949 return (error);
950 }
951
952 if (mp->mnt_flag & MNT_UPDATE) {
953 /*
954 * Update mount. Note that pmp->iroot->cluster is
955 * an inode-embedded cluster and thus cannot be
956 * directly locked.
957 *
958 * XXX HAMMER2 needs to implement NFS export via
959 * mountctl.
960 */
961 hammer2_cluster_t *cluster;
962
963 error = 0;
964 pmp = MPTOPMP(mp);
965 pmp->hflags = info.hflags;
966 cluster = &pmp->iroot->cluster;
967 for (i = 0; i < cluster->nchains; ++i) {
968 if (cluster->array[i].chain == NULL)
969 continue;
970 hmp = cluster->array[i].chain->hmp;
971 error = hammer2_remount(hmp, mp, path, cred);
972 if (error)
973 break;
974 }
975
976 return error;
977 }
978
979 if (path == NULL) {
980 /*
981 * Root mount
982 */
983 info.cluster_fd = -1;
984 ksnprintf(devstr, sizeof(devstr), "%s",
985 mp->mnt_stat.f_mntfromname);
986 done = strlen(devstr) + 1;
987 kprintf("hammer2_mount: root devstr=\"%s\"\n", devstr);
988 } else {
989 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done);
990 if (error)
991 return (error);
992 kprintf("hammer2_mount: devstr=\"%s\"\n", devstr);
993 }
994
995 /*
996 * Extract device and label, automatically mount @BOOT, @ROOT, or @DATA
997 * if no label specified, based on the partition id. Error out if no
998 * label or device (with partition id) is specified. This is strictly
999 * a convenience to match the default label created by newfs_hammer2,
1000 * our preference is that a label always be specified.
1001 *
1002 * NOTE: We allow 'mount @LABEL <blah>'... that is, a mount command
1003 * that does not specify a device, as long as some H2 label
1004 * has already been mounted from that device. This makes
1005 * mounting snapshots a lot easier.
1006 */
1007 label = strchr(devstr, '@');
1008 if (label && ((label + 1) - devstr) > done) {
1009 kprintf("hammer2_mount: bad label %s/%zd\n", devstr, done);
1010 return (EINVAL);
1011 }
1012 if (label == NULL || label[1] == 0) {
1013 char slice;
1014
1015 if (label == NULL)
1016 label = devstr + strlen(devstr);
1017 else
1018 *label = '\0'; /* clean up trailing @ */
1019
1020 slice = label[-1];
1021 switch(slice) {
1022 case 'a':
1023 label = "BOOT";
1024 break;
1025 case 'd':
1026 label = "ROOT";
1027 break;
1028 default:
1029 label = "DATA";
1030 break;
1031 }
1032 } else {
1033 *label = '\0';
1034 label++;
1035 }
1036
1037 kprintf("hammer2_mount: device=\"%s\" label=\"%s\" rdonly=%d\n",
1038 devstr, label, ronly);
1039
1040 /*
1041 * Initialize all device vnodes.
1042 */
1043 TAILQ_INIT(&devvpl);
1044 error = hammer2_init_devvp(devstr, path == NULL, &devvpl);
1045 if (error) {
1046 kprintf("hammer2: failed to initialize devvp in %s\n", devstr);
1047 hammer2_cleanup_devvp(&devvpl);
1048 return error;
1049 }
1050
1051 /*
1052 * Determine if the device has already been mounted. After this
1053 * check hmp will be non-NULL if we are doing the second or more
1054 * hammer2 mounts from the same device.
1055 */
1056 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
1057 if (!TAILQ_EMPTY(&devvpl)) {
1058 /*
1059 * Match the device. Due to the way devfs works,
1060 * we may not be able to directly match the vnode pointer,
1061 * so also check to see if the underlying device matches.
1062 */
1063 TAILQ_FOREACH(hmp_tmp, &hammer2_mntlist, mntentry) {
1064 TAILQ_FOREACH(e_tmp, &hmp_tmp->devvpl, entry) {
1065 int devvp_found = 0;
1066 TAILQ_FOREACH(e, &devvpl, entry) {
1067 KKASSERT(e->devvp);
1068 if (e_tmp->devvp == e->devvp)
1069 devvp_found = 1;
1070 if (e_tmp->devvp->v_rdev &&
1071 e_tmp->devvp->v_rdev == e->devvp->v_rdev)
1072 devvp_found = 1;
1073 }
1074 if (!devvp_found)
1075 goto next_hmp;
1076 }
1077 hmp = hmp_tmp;
1078 kprintf("hammer2_mount: hmp=%p matched\n", hmp);
1079 break;
1080 next_hmp:
1081 continue;
1082 }
1083
1084 /*
1085 * If no match this may be a fresh H2 mount, make sure
1086 * the device is not mounted on anything else.
1087 */
1088 if (hmp == NULL) {
1089 TAILQ_FOREACH(e, &devvpl, entry) {
1090 struct vnode *devvp = e->devvp;
1091 KKASSERT(devvp);
1092 error = vfs_mountedon(devvp);
1093 if (error) {
1094 kprintf("hammer2_mount: %s mounted %d\n",
1095 e->path, error);
1096 hammer2_cleanup_devvp(&devvpl);
1097 lockmgr(&hammer2_mntlk, LK_RELEASE);
1098 return error;
1099 }
1100 }
1101 }
1102 } else {
1103 /*
1104 * Match the label to a pmp already probed.
1105 */
1106 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) {
1107 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
1108 if (pmp->pfs_names[i] &&
1109 strcmp(pmp->pfs_names[i], label) == 0) {
1110 hmp = pmp->pfs_hmps[i];
1111 break;
1112 }
1113 }
1114 if (hmp)
1115 break;
1116 }
1117 if (hmp == NULL) {
1118 kprintf("hammer2_mount: PFS label \"%s\" not found\n",
1119 label);
1120 hammer2_cleanup_devvp(&devvpl);
1121 lockmgr(&hammer2_mntlk, LK_RELEASE);
1122 return ENOENT;
1123 }
1124 }
1125
1126 /*
1127 * Open the device if this isn't a secondary mount and construct
1128 * the H2 device mount (hmp).
1129 */
1130 if (hmp == NULL) {
1131 hammer2_chain_t *schain;
1132 hammer2_xop_head_t xop;
1133
1134 /*
1135 * Now open the device
1136 */
1137 KKASSERT(!TAILQ_EMPTY(&devvpl));
1138 error = hammer2_open_devvp(&devvpl, ronly);
1139 if (error) {
1140 hammer2_close_devvp(&devvpl, ronly);
1141 hammer2_cleanup_devvp(&devvpl);
1142 lockmgr(&hammer2_mntlk, LK_RELEASE);
1143 return error;
1144 }
1145
1146 /*
1147 * Construct volumes and link with device vnodes.
1148 */
1149 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO);
1150 hmp->devvp = NULL;
1151 error = hammer2_init_volumes(mp, &devvpl, hmp->volumes,
1152 &hmp->voldata, &hmp->volhdrno,
1153 &hmp->devvp);
1154 if (error) {
1155 hammer2_close_devvp(&devvpl, ronly);
1156 hammer2_cleanup_devvp(&devvpl);
1157 lockmgr(&hammer2_mntlk, LK_RELEASE);
1158 kfree(hmp, M_HAMMER2);
1159 return error;
1160 }
1161 if (!hmp->devvp) {
1162 kprintf("hammer2: failed to initialize root volume\n");
1163 hammer2_unmount_helper(mp, NULL, hmp);
1164 lockmgr(&hammer2_mntlk, LK_RELEASE);
1165 hammer2_vfs_unmount(mp, MNT_FORCE);
1166 return EINVAL;
1167 }
1168
1169 ksnprintf(hmp->devrepname, sizeof(hmp->devrepname), "%s", devstr);
1170 hmp->ronly = ronly;
1171 hmp->hflags = info.hflags & HMNT2_DEVFLAGS;
1172 kmalloc_create_obj(&hmp->mchain, "HAMMER2-chains",
1173 sizeof(struct hammer2_chain));
1174 kmalloc_create_obj(&hmp->mio, "HAMMER2-dio",
1175 sizeof(struct hammer2_io));
1176 kmalloc_create(&hmp->mmsg, "HAMMER2-msg");
1177 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry);
1178 hammer2_io_hash_init(hmp);
1179 hammer2_spin_init(&hmp->list_spin, "h2mount_list");
1180
1181 lockinit(&hmp->vollk, "h2vol", 0, 0);
1182 lockinit(&hmp->bulklk, "h2bulk", 0, 0);
1183 lockinit(&hmp->bflock, "h2bflk", 0, 0);
1184
1185 /*
1186 * vchain setup. vchain.data is embedded.
1187 * vchain.refs is initialized and will never drop to 0.
1188 */
1189 hmp->vchain.hmp = hmp;
1190 hmp->vchain.refs = 1;
1191 hmp->vchain.data = (void *)&hmp->voldata;
1192 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME;
1193 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
1194 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid;
1195 hammer2_chain_init(&hmp->vchain);
1196
1197 /*
1198 * fchain setup. fchain.data is embedded.
1199 * fchain.refs is initialized and will never drop to 0.
1200 *
1201 * The data is not used but needs to be initialized to
1202 * pass assertion muster. We use this chain primarily
1203 * as a placeholder for the freemap's top-level radix tree
1204 * so it does not interfere with the volume's topology
1205 * radix tree.
1206 */
1207 hmp->fchain.hmp = hmp;
1208 hmp->fchain.refs = 1;
1209 hmp->fchain.data = (void *)&hmp->voldata.freemap_blockset;
1210 hmp->fchain.bref.type = HAMMER2_BREF_TYPE_FREEMAP;
1211 hmp->fchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
1212 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid;
1213 hmp->fchain.bref.methods =
1214 HAMMER2_ENC_CHECK(HAMMER2_CHECK_FREEMAP) |
1215 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE);
1216 hammer2_chain_init(&hmp->fchain);
1217
1218 /*
1219 * Initialize volume header related fields.
1220 */
1221 KKASSERT(hmp->voldata.magic == HAMMER2_VOLUME_ID_HBO ||
1222 hmp->voldata.magic == HAMMER2_VOLUME_ID_ABO);
1223 hmp->volsync = hmp->voldata;
1224 hmp->free_reserved = hmp->voldata.allocator_size / 20;
1225 /*
1226 * Must use hmp instead of volume header for these two
1227 * in order to handle volume versions transparently.
1228 */
1229 if (hmp->voldata.version >= HAMMER2_VOL_VERSION_MULTI_VOLUMES) {
1230 hmp->nvolumes = hmp->voldata.nvolumes;
1231 hmp->total_size = hmp->voldata.total_size;
1232 } else {
1233 hmp->nvolumes = 1;
1234 hmp->total_size = hmp->voldata.volu_size;
1235 }
1236 KKASSERT(hmp->nvolumes > 0);
1237
1238 /*
1239 * Move devvpl entries to hmp.
1240 */
1241 TAILQ_INIT(&hmp->devvpl);
1242 while ((e = TAILQ_FIRST(&devvpl)) != NULL) {
1243 TAILQ_REMOVE(&devvpl, e, entry);
1244 TAILQ_INSERT_TAIL(&hmp->devvpl, e, entry);
1245 }
1246 KKASSERT(TAILQ_EMPTY(&devvpl));
1247 KKASSERT(!TAILQ_EMPTY(&hmp->devvpl));
1248
1249 /*
1250 * Really important to get these right or the flush and
1251 * teardown code will get confused.
1252 */
1253 hmp->spmp = hammer2_pfsalloc(NULL, NULL, NULL);
1254 spmp = hmp->spmp;
1255 spmp->pfs_hmps[0] = hmp;
1256
1257 /*
1258 * Dummy-up vchain and fchain's modify_tid. mirror_tid
1259 * is inherited from the volume header.
1260 */
1261 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid;
1262 hmp->vchain.bref.modify_tid = hmp->vchain.bref.mirror_tid;
1263 hmp->vchain.pmp = spmp;
1264 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid;
1265 hmp->fchain.bref.modify_tid = hmp->fchain.bref.mirror_tid;
1266 hmp->fchain.pmp = spmp;
1267
1268 /*
1269 * First locate the super-root inode, which is key 0
1270 * relative to the volume header's blockset.
1271 *
1272 * Then locate the root inode by scanning the directory keyspace
1273 * represented by the label.
1274 */
1275 parent = hammer2_chain_lookup_init(&hmp->vchain, 0);
1276 schain = hammer2_chain_lookup(&parent, &key_dummy,
1277 HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY,
1278 &error, 0);
1279 hammer2_chain_lookup_done(parent);
1280 if (schain == NULL) {
1281 kprintf("hammer2_mount: invalid super-root\n");
1282 hammer2_unmount_helper(mp, NULL, hmp);
1283 lockmgr(&hammer2_mntlk, LK_RELEASE);
1284 hammer2_vfs_unmount(mp, MNT_FORCE);
1285 return EINVAL;
1286 }
1287 if (schain->error) {
1288 kprintf("hammer2_mount: error %s reading super-root\n",
1289 hammer2_error_str(schain->error));
1290 hammer2_chain_unlock(schain);
1291 hammer2_chain_drop(schain);
1292 schain = NULL;
1293 hammer2_unmount_helper(mp, NULL, hmp);
1294 lockmgr(&hammer2_mntlk, LK_RELEASE);
1295 hammer2_vfs_unmount(mp, MNT_FORCE);
1296 return EINVAL;
1297 }
1298
1299 /*
1300 * The super-root always uses an inode_tid of 1 when
1301 * creating PFSs.
1302 */
1303 spmp->inode_tid = 1;
1304 spmp->modify_tid = schain->bref.modify_tid + 1;
1305
1306 /*
1307 * Sanity-check schain's pmp and finish initialization.
1308 * Any chain belonging to the super-root topology should
1309 * have a NULL pmp (not even set to spmp).
1310 */
1311 ripdata = &schain->data->ipdata;
1312 KKASSERT(schain->pmp == NULL);
1313 spmp->pfs_clid = ripdata->meta.pfs_clid;
1314
1315 /*
1316 * Replace the dummy spmp->iroot with a real one. It's
1317 * easier to just do a wholesale replacement than to try
1318 * to update the chain and fixup the iroot fields.
1319 *
1320 * The returned inode is locked with the supplied cluster.
1321 */
1322 hammer2_dummy_xop_from_chain(&xop, schain);
1323 hammer2_inode_drop(spmp->iroot);
1324 spmp->iroot = hammer2_inode_get(spmp, &xop, -1, -1);
1325 spmp->spmp_hmp = hmp;
1326 spmp->pfs_types[0] = ripdata->meta.pfs_type;
1327 spmp->pfs_hmps[0] = hmp;
1328 hammer2_inode_ref(spmp->iroot);
1329 hammer2_inode_unlock(spmp->iroot);
1330 hammer2_cluster_unlock(&xop.cluster);
1331 hammer2_chain_drop(schain);
1332 /* do not call hammer2_cluster_drop() on an embedded cluster */
1333 schain = NULL; /* now invalid */
1334 /* leave spmp->iroot with one ref */
1335
1336 if (!hmp->ronly) {
1337 error = hammer2_recovery(hmp);
1338 if (error == 0)
1339 error |= hammer2_fixup_pfses(hmp);
1340 /* XXX do something with error */
1341 }
1342 hammer2_update_pmps(hmp);
1343 hammer2_iocom_init(hmp);
1344 hammer2_bulkfree_init(hmp);
1345
1346 /*
1347 * Ref the cluster management messaging descriptor. The mount
1348 * program deals with the other end of the communications pipe.
1349 *
1350 * Root mounts typically do not supply one.
1351 */
1352 if (info.cluster_fd >= 0) {
1353 fp = holdfp(curthread, info.cluster_fd, -1);
1354 if (fp) {
1355 hammer2_cluster_reconnect(hmp, fp);
1356 } else {
1357 kprintf("hammer2_mount: bad cluster_fd!\n");
1358 }
1359 }
1360 } else {
1361 /* hmp->devvp_list is already constructed. */
1362 hammer2_cleanup_devvp(&devvpl);
1363 spmp = hmp->spmp;
1364 if (info.hflags & HMNT2_DEVFLAGS) {
1365 kprintf("hammer2_mount: Warning: mount flags pertaining "
1366 "to the whole device may only be specified "
1367 "on the first mount of the device: %08x\n",
1368 info.hflags & HMNT2_DEVFLAGS);
1369 }
1370 }
1371
1372 /*
1373 * Force local mount (disassociate all PFSs from their clusters).
1374 * Used primarily for debugging.
1375 */
1376 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL;
1377
1378 /*
1379 * Lookup the mount point under the media-localized super-root.
1380 * Scanning hammer2_pfslist doesn't help us because it represents
1381 * PFS cluster ids which can aggregate several named PFSs together.
1382 *
1383 * cluster->pmp will incorrectly point to spmp and must be fixed
1384 * up later on.
1385 */
1386 hammer2_inode_lock(spmp->iroot, 0);
1387 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS);
1388 lhc = hammer2_dirhash(label, strlen(label));
1389 chain = hammer2_chain_lookup(&parent, &key_next,
1390 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1391 &error, 0);
1392 while (chain) {
1393 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
1394 strcmp(label, (char *)chain->data->ipdata.filename) == 0) {
1395 break;
1396 }
1397 chain = hammer2_chain_next(&parent, chain, &key_next,
1398 key_next,
1399 lhc + HAMMER2_DIRHASH_LOMASK,
1400 &error, 0);
1401 }
1402 if (parent) {
1403 hammer2_chain_unlock(parent);
1404 hammer2_chain_drop(parent);
1405 }
1406 hammer2_inode_unlock(spmp->iroot);
1407
1408 /*
1409 * PFS could not be found?
1410 */
1411 if (chain == NULL) {
1412 hammer2_unmount_helper(mp, NULL, hmp);
1413 lockmgr(&hammer2_mntlk, LK_RELEASE);
1414 hammer2_vfs_unmount(mp, MNT_FORCE);
1415
1416 if (error) {
1417 kprintf("hammer2_mount: PFS label I/O error\n");
1418 return EINVAL;
1419 } else {
1420 kprintf("hammer2_mount: PFS label \"%s\" not found\n",
1421 label);
1422 return ENOENT;
1423 }
1424 }
1425
1426 /*
1427 * Acquire the pmp structure (it should have already been allocated
1428 * via hammer2_update_pmps()).
1429 */
1430 if (chain->error) {
1431 kprintf("hammer2_mount: PFS label I/O error\n");
1432 } else {
1433 ripdata = &chain->data->ipdata;
1434 pmp = hammer2_pfsalloc(NULL, ripdata, force_local);
1435 }
1436 hammer2_chain_unlock(chain);
1437 hammer2_chain_drop(chain);
1438
1439 /*
1440 * PFS to mount must exist at this point.
1441 */
1442 if (pmp == NULL) {
1443 kprintf("hammer2_mount: Failed to acquire PFS structure\n");
1444 hammer2_unmount_helper(mp, NULL, hmp);
1445 lockmgr(&hammer2_mntlk, LK_RELEASE);
1446 hammer2_vfs_unmount(mp, MNT_FORCE);
1447 return EINVAL;
1448 }
1449
1450 /*
1451 * Finish the mount
1452 */
1453 kprintf("hammer2_mount: hmp=%p pmp=%p\n", hmp, pmp);
1454
1455 /* Check if the pmp has already been mounted. */
1456 if (pmp->mp) {
1457 kprintf("hammer2_mount: PFS already mounted!\n");
1458 hammer2_unmount_helper(mp, NULL, hmp);
1459 lockmgr(&hammer2_mntlk, LK_RELEASE);
1460 hammer2_vfs_unmount(mp, MNT_FORCE);
1461 return EBUSY;
1462 }
1463
1464 pmp->hflags = info.hflags;
1465 mp->mnt_flag |= MNT_LOCAL;
1466 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /* all entry pts are SMP */
1467 mp->mnt_kern_flag |= MNTK_THR_SYNC; /* new vsyncscan semantics */
1468
1469 /*
1470 * required mount structure initializations
1471 */
1472 mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE;
1473 mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE;
1474
1475 mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE;
1476 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
1477
1478 /*
1479 * Optional fields
1480 */
1481 mp->mnt_iosize_max = MAXPHYS;
1482
1483 /*
1484 * Connect up mount pointers.
1485 */
1486 hammer2_mount_helper(mp, pmp);
1487 lockmgr(&hammer2_mntlk, LK_RELEASE);
1488
1489 /*
1490 * Finish setup
1491 */
1492 vfs_getnewfsid(mp);
1493 vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops);
1494 vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops);
1495 vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops);
1496
1497 if (path) {
1498 copyinstr(info.volume, mp->mnt_stat.f_mntfromname,
1499 MNAMELEN - 1, &size);
1500 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
1501 } /* else root mount, already in there */
1502
1503 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname));
1504 if (path) {
1505 copyinstr(path, mp->mnt_stat.f_mntonname,
1506 sizeof(mp->mnt_stat.f_mntonname) - 1,
1507 &size);
1508 } else {
1509 /* root mount */
1510 mp->mnt_stat.f_mntonname[0] = '/';
1511 }
1512
1513 /*
1514 * Initial statfs to prime mnt_stat.
1515 */
1516 hammer2_vfs_statfs(mp, &mp->mnt_stat, cred);
1517
1518 return 0;
1519 }
1520
1521 /*
1522 * Scan PFSs under the super-root and create hammer2_pfs structures.
1523 */
1524 static
1525 void
hammer2_update_pmps(hammer2_dev_t * hmp)1526 hammer2_update_pmps(hammer2_dev_t *hmp)
1527 {
1528 const hammer2_inode_data_t *ripdata;
1529 hammer2_chain_t *parent;
1530 hammer2_chain_t *chain;
1531 hammer2_dev_t *force_local;
1532 hammer2_pfs_t *spmp;
1533 hammer2_key_t key_next;
1534 int error;
1535
1536 /*
1537 * Force local mount (disassociate all PFSs from their clusters).
1538 * Used primarily for debugging.
1539 */
1540 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL;
1541
1542 /*
1543 * Lookup mount point under the media-localized super-root.
1544 *
1545 * cluster->pmp will incorrectly point to spmp and must be fixed
1546 * up later on.
1547 */
1548 spmp = hmp->spmp;
1549 hammer2_inode_lock(spmp->iroot, 0);
1550 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS);
1551 chain = hammer2_chain_lookup(&parent, &key_next,
1552 HAMMER2_KEY_MIN, HAMMER2_KEY_MAX,
1553 &error, 0);
1554 while (chain) {
1555 if (chain->error) {
1556 kprintf("I/O error scanning PFS labels\n");
1557 } else if (chain->bref.type != HAMMER2_BREF_TYPE_INODE) {
1558 kprintf("Non inode chain type %d under super-root\n",
1559 chain->bref.type);
1560 } else {
1561 ripdata = &chain->data->ipdata;
1562 hammer2_pfsalloc(chain, ripdata, force_local);
1563 }
1564 chain = hammer2_chain_next(&parent, chain, &key_next,
1565 key_next, HAMMER2_KEY_MAX,
1566 &error, 0);
1567 }
1568 if (parent) {
1569 hammer2_chain_unlock(parent);
1570 hammer2_chain_drop(parent);
1571 }
1572 hammer2_inode_unlock(spmp->iroot);
1573 }
1574
1575 static
1576 int
hammer2_remount(hammer2_dev_t * hmp,struct mount * mp,char * path __unused,struct ucred * cred)1577 hammer2_remount(hammer2_dev_t *hmp, struct mount *mp, char *path __unused,
1578 struct ucred *cred)
1579 {
1580 hammer2_volume_t *vol;
1581 struct vnode *devvp;
1582 int i, error, result = 0;
1583
1584 if (!(hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)))
1585 return 0;
1586
1587 for (i = 0; i < hmp->nvolumes; ++i) {
1588 vol = &hmp->volumes[i];
1589 devvp = vol->dev->devvp;
1590 KKASSERT(devvp);
1591 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1592 VOP_OPEN(devvp, FREAD | FWRITE, FSCRED, NULL);
1593 vn_unlock(devvp);
1594 error = 0;
1595 if (vol->id == HAMMER2_ROOT_VOLUME) {
1596 error = hammer2_recovery(hmp);
1597 if (error == 0)
1598 error |= hammer2_fixup_pfses(hmp);
1599 }
1600 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1601 if (error == 0) {
1602 VOP_CLOSE(devvp, FREAD, NULL);
1603 } else {
1604 VOP_CLOSE(devvp, FREAD | FWRITE, NULL);
1605 }
1606 vn_unlock(devvp);
1607 result |= error;
1608 }
1609 if (result == 0) {
1610 kprintf("hammer2: enable read/write\n");
1611 hmp->ronly = 0;
1612 }
1613
1614 return result;
1615 }
1616
1617 static
1618 int
hammer2_vfs_unmount(struct mount * mp,int mntflags)1619 hammer2_vfs_unmount(struct mount *mp, int mntflags)
1620 {
1621 hammer2_pfs_t *pmp;
1622 int flags;
1623 int error = 0;
1624
1625 pmp = MPTOPMP(mp);
1626
1627 if (pmp == NULL)
1628 return(0);
1629
1630 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
1631
1632 /*
1633 * If mount initialization proceeded far enough we must flush
1634 * its vnodes and sync the underlying mount points. Three syncs
1635 * are required to fully flush the filesystem (freemap updates lag
1636 * by one flush, and one extra for safety).
1637 */
1638 if (mntflags & MNT_FORCE)
1639 flags = FORCECLOSE;
1640 else
1641 flags = 0;
1642 if (pmp->iroot) {
1643 error = vflush(mp, 0, flags);
1644 if (error)
1645 goto failed;
1646 hammer2_vfs_sync(mp, MNT_WAIT);
1647 hammer2_vfs_sync(mp, MNT_WAIT);
1648 hammer2_vfs_sync(mp, MNT_WAIT);
1649 }
1650
1651 /*
1652 * Cleanup the frontend support XOPS threads
1653 */
1654 hammer2_xop_helper_cleanup(pmp);
1655
1656 if (pmp->mp)
1657 hammer2_unmount_helper(mp, pmp, NULL);
1658
1659 error = 0;
1660 failed:
1661 lockmgr(&hammer2_mntlk, LK_RELEASE);
1662
1663 return (error);
1664 }
1665
1666 /*
1667 * Mount helper, hook the system mount into our PFS.
1668 * The mount lock is held.
1669 *
1670 * We must bump the mount_count on related devices for any
1671 * mounted PFSs.
1672 */
1673 static
1674 void
hammer2_mount_helper(struct mount * mp,hammer2_pfs_t * pmp)1675 hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp)
1676 {
1677 hammer2_cluster_t *cluster;
1678 hammer2_chain_t *rchain;
1679 int i;
1680
1681 mp->mnt_data = (qaddr_t)pmp;
1682 pmp->mp = mp;
1683
1684 /*
1685 * After pmp->mp is set we have to adjust hmp->mount_count.
1686 */
1687 cluster = &pmp->iroot->cluster;
1688 for (i = 0; i < cluster->nchains; ++i) {
1689 rchain = cluster->array[i].chain;
1690 if (rchain == NULL)
1691 continue;
1692 ++rchain->hmp->mount_count;
1693 }
1694
1695 /*
1696 * Create missing Xop threads
1697 */
1698 hammer2_xop_helper_create(pmp);
1699 }
1700
1701 /*
1702 * Unmount helper, unhook the system mount from our PFS.
1703 * The mount lock is held.
1704 *
1705 * If hmp is supplied a mount responsible for being the first to open
1706 * the block device failed and the block device and all PFSs using the
1707 * block device must be cleaned up.
1708 *
1709 * If pmp is supplied multiple devices might be backing the PFS and each
1710 * must be disconnected. This might not be the last PFS using some of the
1711 * underlying devices. Also, we have to adjust our hmp->mount_count
1712 * accounting for the devices backing the pmp which is now undergoing an
1713 * unmount.
1714 */
1715 static
1716 void
hammer2_unmount_helper(struct mount * mp,hammer2_pfs_t * pmp,hammer2_dev_t * hmp)1717 hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, hammer2_dev_t *hmp)
1718 {
1719 hammer2_cluster_t *cluster;
1720 hammer2_chain_t *rchain;
1721 int dumpcnt;
1722 int i;
1723
1724 /*
1725 * If no device supplied this is a high-level unmount and we have to
1726 * to disconnect the mount, adjust mount_count, and locate devices
1727 * that might now have no mounts.
1728 */
1729 if (pmp) {
1730 KKASSERT(hmp == NULL);
1731 KKASSERT(MPTOPMP(mp) == pmp);
1732 pmp->mp = NULL;
1733 mp->mnt_data = NULL;
1734
1735 /*
1736 * After pmp->mp is cleared we have to account for
1737 * mount_count.
1738 */
1739 cluster = &pmp->iroot->cluster;
1740 for (i = 0; i < cluster->nchains; ++i) {
1741 rchain = cluster->array[i].chain;
1742 if (rchain == NULL)
1743 continue;
1744 --rchain->hmp->mount_count;
1745 /* scrapping hmp now may invalidate the pmp */
1746 }
1747 again:
1748 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) {
1749 if (hmp->mount_count == 0) {
1750 hammer2_unmount_helper(NULL, NULL, hmp);
1751 goto again;
1752 }
1753 }
1754 return;
1755 }
1756
1757 /*
1758 * Try to terminate the block device. We can't terminate it if
1759 * there are still PFSs referencing it.
1760 */
1761 if (hmp->mount_count)
1762 return;
1763
1764 /*
1765 * Decomission the network before we start messing with the
1766 * device and PFS.
1767 */
1768 hammer2_iocom_uninit(hmp);
1769
1770 hammer2_bulkfree_uninit(hmp);
1771 hammer2_pfsfree_scan(hmp, 0);
1772
1773 /*
1774 * Cycle the volume data lock as a safety (probably not needed any
1775 * more). To ensure everything is out we need to flush at least
1776 * three times. (1) The running of the sideq can dirty the
1777 * filesystem, (2) A normal flush can dirty the freemap, and
1778 * (3) ensure that the freemap is fully synchronized.
1779 *
1780 * The next mount's recovery scan can clean everything up but we want
1781 * to leave the filesystem in a 100% clean state on a normal unmount.
1782 */
1783 #if 0
1784 hammer2_voldata_lock(hmp);
1785 hammer2_voldata_unlock(hmp);
1786 #endif
1787
1788 /*
1789 * Flush whatever is left. Unmounted but modified PFS's might still
1790 * have some dirty chains on them.
1791 */
1792 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1793 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS);
1794
1795 if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1796 hammer2_voldata_modify(hmp);
1797 hammer2_flush(&hmp->fchain, HAMMER2_FLUSH_TOP |
1798 HAMMER2_FLUSH_ALL);
1799 }
1800 hammer2_chain_unlock(&hmp->fchain);
1801
1802 if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1803 hammer2_flush(&hmp->vchain, HAMMER2_FLUSH_TOP |
1804 HAMMER2_FLUSH_ALL);
1805 }
1806 hammer2_chain_unlock(&hmp->vchain);
1807
1808 if ((hmp->vchain.flags | hmp->fchain.flags) &
1809 HAMMER2_CHAIN_FLUSH_MASK) {
1810 kprintf("hammer2_unmount: chains left over after final sync\n");
1811 kprintf(" vchain %08x\n", hmp->vchain.flags);
1812 kprintf(" fchain %08x\n", hmp->fchain.flags);
1813
1814 if (hammer2_debug & 0x0010)
1815 Debugger("entered debugger");
1816 }
1817
1818 hammer2_pfsfree_scan(hmp, 1);
1819
1820 KKASSERT(hmp->spmp == NULL);
1821
1822 /*
1823 * Finish up with the device vnode
1824 */
1825 if (!TAILQ_EMPTY(&hmp->devvpl)) {
1826 hammer2_close_devvp(&hmp->devvpl, hmp->ronly);
1827 hammer2_cleanup_devvp(&hmp->devvpl);
1828 }
1829 KKASSERT(TAILQ_EMPTY(&hmp->devvpl));
1830
1831 /*
1832 * Clear vchain/fchain flags that might prevent final cleanup
1833 * of these chains.
1834 */
1835 if (hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) {
1836 atomic_add_long(&hammer2_count_modified_chains, -1);
1837 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED);
1838 hammer2_pfs_memory_wakeup(hmp->vchain.pmp, -1);
1839 }
1840 if (hmp->vchain.flags & HAMMER2_CHAIN_UPDATE) {
1841 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_UPDATE);
1842 }
1843
1844 if (hmp->fchain.flags & HAMMER2_CHAIN_MODIFIED) {
1845 atomic_add_long(&hammer2_count_modified_chains, -1);
1846 atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_MODIFIED);
1847 hammer2_pfs_memory_wakeup(hmp->fchain.pmp, -1);
1848 }
1849 if (hmp->fchain.flags & HAMMER2_CHAIN_UPDATE) {
1850 atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_UPDATE);
1851 }
1852
1853 dumpcnt = 50;
1854 hammer2_dump_chain(&hmp->vchain, 0, 0, &dumpcnt, 'v', (u_int)-1);
1855 dumpcnt = 50;
1856 hammer2_dump_chain(&hmp->fchain, 0, 0, &dumpcnt, 'f', (u_int)-1);
1857
1858 /*
1859 * Final drop of embedded freemap root chain to
1860 * clean up fchain.core (fchain structure is not
1861 * flagged ALLOCATED so it is cleaned out and then
1862 * left to rot).
1863 */
1864 hammer2_chain_drop(&hmp->fchain);
1865
1866 /*
1867 * Final drop of embedded volume root chain to clean
1868 * up vchain.core (vchain structure is not flagged
1869 * ALLOCATED so it is cleaned out and then left to
1870 * rot).
1871 */
1872 hammer2_chain_drop(&hmp->vchain);
1873
1874 hammer2_io_hash_cleanup_all(hmp);
1875 if (hmp->iofree_count) {
1876 kprintf("io_cleanup: %d I/O's left hanging\n",
1877 hmp->iofree_count);
1878 }
1879
1880 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry);
1881 kmalloc_destroy_obj(&hmp->mchain);
1882 kmalloc_destroy_obj(&hmp->mio);
1883 kmalloc_destroy(&hmp->mmsg);
1884 kfree(hmp, M_HAMMER2);
1885 }
1886
1887 int
hammer2_vfs_vget(struct mount * mp,struct vnode * dvp,ino_t ino,struct vnode ** vpp)1888 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
1889 ino_t ino, struct vnode **vpp)
1890 {
1891 hammer2_xop_lookup_t *xop;
1892 hammer2_pfs_t *pmp;
1893 hammer2_inode_t *ip;
1894 hammer2_tid_t inum;
1895 int error;
1896
1897 inum = (hammer2_tid_t)ino & HAMMER2_DIRHASH_USERMSK;
1898
1899 error = 0;
1900 pmp = MPTOPMP(mp);
1901
1902 /*
1903 * Easy if we already have it cached
1904 */
1905 ip = hammer2_inode_lookup(pmp, inum);
1906 if (ip) {
1907 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
1908 *vpp = hammer2_igetv(ip, &error);
1909 hammer2_inode_unlock(ip);
1910 hammer2_inode_drop(ip); /* from lookup */
1911
1912 return error;
1913 }
1914
1915 /*
1916 * Otherwise we have to find the inode
1917 */
1918 xop = hammer2_xop_alloc(pmp->iroot, 0);
1919 xop->lhc = inum;
1920 hammer2_xop_start(&xop->head, &hammer2_lookup_desc);
1921 error = hammer2_xop_collect(&xop->head, 0);
1922
1923 if (error == 0)
1924 ip = hammer2_inode_get(pmp, &xop->head, -1, -1);
1925 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1926
1927 if (ip) {
1928 *vpp = hammer2_igetv(ip, &error);
1929 hammer2_inode_unlock(ip);
1930 } else {
1931 *vpp = NULL;
1932 error = ENOENT;
1933 }
1934 return (error);
1935 }
1936
1937 static
1938 int
hammer2_vfs_root(struct mount * mp,struct vnode ** vpp)1939 hammer2_vfs_root(struct mount *mp, struct vnode **vpp)
1940 {
1941 hammer2_pfs_t *pmp;
1942 struct vnode *vp;
1943 int error;
1944
1945 pmp = MPTOPMP(mp);
1946 if (pmp->iroot == NULL) {
1947 kprintf("hammer2 (%s): no root inode\n",
1948 mp->mnt_stat.f_mntfromname);
1949 *vpp = NULL;
1950 return EINVAL;
1951 }
1952
1953 error = 0;
1954 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED);
1955
1956 while (pmp->inode_tid == 0) {
1957 hammer2_xop_ipcluster_t *xop;
1958 const hammer2_inode_meta_t *meta;
1959
1960 xop = hammer2_xop_alloc(pmp->iroot, HAMMER2_XOP_MODIFYING);
1961 hammer2_xop_start(&xop->head, &hammer2_ipcluster_desc);
1962 error = hammer2_xop_collect(&xop->head, 0);
1963
1964 if (error == 0) {
1965 meta = &hammer2_xop_gdata(&xop->head)->ipdata.meta;
1966 pmp->iroot->meta = *meta;
1967 pmp->inode_tid = meta->pfs_inum + 1;
1968 hammer2_xop_pdata(&xop->head);
1969 /* meta invalid */
1970
1971 if (pmp->inode_tid < HAMMER2_INODE_START)
1972 pmp->inode_tid = HAMMER2_INODE_START;
1973 pmp->modify_tid =
1974 xop->head.cluster.focus->bref.modify_tid + 1;
1975 #if 0
1976 kprintf("PFS: Starting inode %jd\n",
1977 (intmax_t)pmp->inode_tid);
1978 kprintf("PMP focus good set nextino=%ld mod=%016jx\n",
1979 pmp->inode_tid, pmp->modify_tid);
1980 #endif
1981 wakeup(&pmp->iroot);
1982
1983 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1984
1985 /*
1986 * Prime the mount info.
1987 */
1988 hammer2_vfs_statfs(mp, &mp->mnt_stat, NULL);
1989 break;
1990 }
1991
1992 /*
1993 * Loop, try again
1994 */
1995 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1996 hammer2_inode_unlock(pmp->iroot);
1997 error = tsleep(&pmp->iroot, PCATCH, "h2root", hz);
1998 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED);
1999 if (error == EINTR)
2000 break;
2001 }
2002
2003 if (error) {
2004 hammer2_inode_unlock(pmp->iroot);
2005 *vpp = NULL;
2006 } else {
2007 vp = hammer2_igetv(pmp->iroot, &error);
2008 hammer2_inode_unlock(pmp->iroot);
2009 *vpp = vp;
2010 }
2011
2012 return (error);
2013 }
2014
2015 /*
2016 * Filesystem status
2017 *
2018 * XXX incorporate ipdata->meta.inode_quota and data_quota
2019 */
2020 static
2021 int
hammer2_vfs_statfs(struct mount * mp,struct statfs * sbp,struct ucred * cred)2022 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
2023 {
2024 hammer2_pfs_t *pmp;
2025 hammer2_dev_t *hmp;
2026 hammer2_blockref_t bref;
2027 struct statfs tmp;
2028 int i;
2029
2030 /*
2031 * NOTE: iroot might not have validated the cluster yet.
2032 */
2033 pmp = MPTOPMP(mp);
2034
2035 bzero(&tmp, sizeof(tmp));
2036
2037 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) {
2038 hmp = pmp->pfs_hmps[i];
2039 if (hmp == NULL)
2040 continue;
2041 if (pmp->iroot->cluster.array[i].chain)
2042 bref = pmp->iroot->cluster.array[i].chain->bref;
2043 else
2044 bzero(&bref, sizeof(bref));
2045
2046 tmp.f_files = bref.embed.stats.inode_count;
2047 tmp.f_ffree = 0;
2048 tmp.f_blocks = hmp->voldata.allocator_size /
2049 mp->mnt_vstat.f_bsize;
2050 tmp.f_bfree = hmp->voldata.allocator_free /
2051 mp->mnt_vstat.f_bsize;
2052 tmp.f_bavail = tmp.f_bfree;
2053
2054 if (cred && cred->cr_uid != 0) {
2055 uint64_t adj;
2056
2057 /* 5% */
2058 adj = hmp->free_reserved / mp->mnt_vstat.f_bsize;
2059 tmp.f_blocks -= adj;
2060 tmp.f_bfree -= adj;
2061 tmp.f_bavail -= adj;
2062 }
2063
2064 mp->mnt_stat.f_blocks = tmp.f_blocks;
2065 mp->mnt_stat.f_bfree = tmp.f_bfree;
2066 mp->mnt_stat.f_bavail = tmp.f_bavail;
2067 mp->mnt_stat.f_files = tmp.f_files;
2068 mp->mnt_stat.f_ffree = tmp.f_ffree;
2069
2070 *sbp = mp->mnt_stat;
2071 }
2072 return (0);
2073 }
2074
2075 static
2076 int
hammer2_vfs_statvfs(struct mount * mp,struct statvfs * sbp,struct ucred * cred)2077 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
2078 {
2079 hammer2_pfs_t *pmp;
2080 hammer2_dev_t *hmp;
2081 hammer2_blockref_t bref;
2082 struct statvfs tmp;
2083 int i;
2084
2085 /*
2086 * NOTE: iroot might not have validated the cluster yet.
2087 */
2088 pmp = MPTOPMP(mp);
2089 bzero(&tmp, sizeof(tmp));
2090
2091 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) {
2092 hmp = pmp->pfs_hmps[i];
2093 if (hmp == NULL)
2094 continue;
2095 if (pmp->iroot->cluster.array[i].chain)
2096 bref = pmp->iroot->cluster.array[i].chain->bref;
2097 else
2098 bzero(&bref, sizeof(bref));
2099
2100 tmp.f_files = bref.embed.stats.inode_count;
2101 tmp.f_ffree = 0;
2102 tmp.f_blocks = hmp->voldata.allocator_size /
2103 mp->mnt_vstat.f_bsize;
2104 tmp.f_bfree = hmp->voldata.allocator_free /
2105 mp->mnt_vstat.f_bsize;
2106 tmp.f_bavail = tmp.f_bfree;
2107
2108 if (cred && cred->cr_uid != 0) {
2109 uint64_t adj;
2110
2111 /* 5% */
2112 adj = hmp->free_reserved / mp->mnt_vstat.f_bsize;
2113 tmp.f_blocks -= adj;
2114 tmp.f_bfree -= adj;
2115 tmp.f_bavail -= adj;
2116 }
2117
2118 mp->mnt_vstat.f_blocks = tmp.f_blocks;
2119 mp->mnt_vstat.f_bfree = tmp.f_bfree;
2120 mp->mnt_vstat.f_bavail = tmp.f_bavail;
2121 mp->mnt_vstat.f_files = tmp.f_files;
2122 mp->mnt_vstat.f_ffree = tmp.f_ffree;
2123
2124 *sbp = mp->mnt_vstat;
2125 }
2126 return (0);
2127 }
2128
2129 /*
2130 * Mount-time recovery (RW mounts)
2131 *
2132 * Updates to the free block table are allowed to lag flushes by one
2133 * transaction. In case of a crash, then on a fresh mount we must do an
2134 * incremental scan of the last committed transaction id and make sure that
2135 * all related blocks have been marked allocated.
2136 */
2137 struct hammer2_recovery_elm {
2138 TAILQ_ENTRY(hammer2_recovery_elm) entry;
2139 hammer2_chain_t *chain;
2140 hammer2_tid_t sync_tid;
2141 };
2142
2143 TAILQ_HEAD(hammer2_recovery_list, hammer2_recovery_elm);
2144
2145 struct hammer2_recovery_info {
2146 struct hammer2_recovery_list list;
2147 hammer2_tid_t mtid;
2148 int depth;
2149 };
2150
2151 static int hammer2_recovery_scan(hammer2_dev_t *hmp,
2152 hammer2_chain_t *parent,
2153 struct hammer2_recovery_info *info,
2154 hammer2_tid_t sync_tid);
2155
2156 #define HAMMER2_RECOVERY_MAXDEPTH 10
2157
2158 static
2159 int
hammer2_recovery(hammer2_dev_t * hmp)2160 hammer2_recovery(hammer2_dev_t *hmp)
2161 {
2162 struct hammer2_recovery_info info;
2163 struct hammer2_recovery_elm *elm;
2164 hammer2_chain_t *parent;
2165 hammer2_tid_t sync_tid;
2166 hammer2_tid_t mirror_tid;
2167 int error;
2168
2169 hammer2_trans_init(hmp->spmp, 0);
2170
2171 sync_tid = hmp->voldata.freemap_tid;
2172 mirror_tid = hmp->voldata.mirror_tid;
2173
2174 kprintf("hammer2_mount: \"%s\": ", hmp->devrepname);
2175 if (sync_tid >= mirror_tid) {
2176 kprintf("no recovery needed\n");
2177 } else {
2178 kprintf("freemap recovery %016jx-%016jx\n",
2179 sync_tid + 1, mirror_tid);
2180 }
2181
2182 TAILQ_INIT(&info.list);
2183 info.depth = 0;
2184 parent = hammer2_chain_lookup_init(&hmp->vchain, 0);
2185 error = hammer2_recovery_scan(hmp, parent, &info, sync_tid);
2186 hammer2_chain_lookup_done(parent);
2187
2188 while ((elm = TAILQ_FIRST(&info.list)) != NULL) {
2189 TAILQ_REMOVE(&info.list, elm, entry);
2190 parent = elm->chain;
2191 sync_tid = elm->sync_tid;
2192 kfree(elm, M_HAMMER2);
2193
2194 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
2195 error |= hammer2_recovery_scan(hmp, parent, &info,
2196 hmp->voldata.freemap_tid);
2197 hammer2_chain_unlock(parent);
2198 hammer2_chain_drop(parent); /* drop elm->chain ref */
2199 }
2200
2201 hammer2_trans_done(hmp->spmp, 0);
2202
2203 return error;
2204 }
2205
2206 static
2207 int
hammer2_recovery_scan(hammer2_dev_t * hmp,hammer2_chain_t * parent,struct hammer2_recovery_info * info,hammer2_tid_t sync_tid)2208 hammer2_recovery_scan(hammer2_dev_t *hmp, hammer2_chain_t *parent,
2209 struct hammer2_recovery_info *info,
2210 hammer2_tid_t sync_tid)
2211 {
2212 const hammer2_inode_data_t *ripdata;
2213 hammer2_chain_t *chain;
2214 hammer2_blockref_t bref;
2215 int tmp_error;
2216 int rup_error;
2217 int error;
2218 int first;
2219
2220 /*
2221 * Adjust freemap to ensure that the block(s) are marked allocated.
2222 */
2223 if (parent->bref.type != HAMMER2_BREF_TYPE_VOLUME) {
2224 hammer2_freemap_adjust(hmp, &parent->bref,
2225 HAMMER2_FREEMAP_DORECOVER);
2226 }
2227
2228 /*
2229 * Check type for recursive scan
2230 */
2231 switch(parent->bref.type) {
2232 case HAMMER2_BREF_TYPE_VOLUME:
2233 /* data already instantiated */
2234 break;
2235 case HAMMER2_BREF_TYPE_INODE:
2236 /*
2237 * Must instantiate data for DIRECTDATA test and also
2238 * for recursion.
2239 */
2240 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
2241 ripdata = &parent->data->ipdata;
2242 if (ripdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
2243 /* not applicable to recovery scan */
2244 hammer2_chain_unlock(parent);
2245 return 0;
2246 }
2247 hammer2_chain_unlock(parent);
2248 break;
2249 case HAMMER2_BREF_TYPE_INDIRECT:
2250 /*
2251 * Must instantiate data for recursion
2252 */
2253 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
2254 hammer2_chain_unlock(parent);
2255 break;
2256 case HAMMER2_BREF_TYPE_DIRENT:
2257 case HAMMER2_BREF_TYPE_DATA:
2258 case HAMMER2_BREF_TYPE_FREEMAP:
2259 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2260 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2261 /* not applicable to recovery scan */
2262 return 0;
2263 break;
2264 default:
2265 return HAMMER2_ERROR_BADBREF;
2266 }
2267
2268 /*
2269 * Defer operation if depth limit reached.
2270 */
2271 if (info->depth >= HAMMER2_RECOVERY_MAXDEPTH) {
2272 struct hammer2_recovery_elm *elm;
2273
2274 elm = kmalloc(sizeof(*elm), M_HAMMER2, M_ZERO | M_WAITOK);
2275 elm->chain = parent;
2276 elm->sync_tid = sync_tid;
2277 hammer2_chain_ref(parent);
2278 TAILQ_INSERT_TAIL(&info->list, elm, entry);
2279 /* unlocked by caller */
2280
2281 return(0);
2282 }
2283
2284
2285 /*
2286 * Recursive scan of the last flushed transaction only. We are
2287 * doing this without pmp assignments so don't leave the chains
2288 * hanging around after we are done with them.
2289 *
2290 * error Cumulative error this level only
2291 * rup_error Cumulative error for recursion
2292 * tmp_error Specific non-cumulative recursion error
2293 */
2294 chain = NULL;
2295 first = 1;
2296 rup_error = 0;
2297 error = 0;
2298
2299 for (;;) {
2300 error |= hammer2_chain_scan(parent, &chain, &bref,
2301 &first,
2302 HAMMER2_LOOKUP_NODATA);
2303
2304 /*
2305 * Problem during scan or EOF
2306 */
2307 if (error)
2308 break;
2309
2310 /*
2311 * If this is a leaf
2312 */
2313 if (chain == NULL) {
2314 if (bref.mirror_tid > sync_tid) {
2315 hammer2_freemap_adjust(hmp, &bref,
2316 HAMMER2_FREEMAP_DORECOVER);
2317 }
2318 continue;
2319 }
2320
2321 /*
2322 * This may or may not be a recursive node.
2323 */
2324 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE);
2325 if (bref.mirror_tid > sync_tid) {
2326 ++info->depth;
2327 tmp_error = hammer2_recovery_scan(hmp, chain,
2328 info, sync_tid);
2329 --info->depth;
2330 } else {
2331 tmp_error = 0;
2332 }
2333
2334 /*
2335 * Flush the recovery at the PFS boundary to stage it for
2336 * the final flush of the super-root topology.
2337 */
2338 if (tmp_error == 0 &&
2339 (bref.flags & HAMMER2_BREF_FLAG_PFSROOT) &&
2340 (chain->flags & HAMMER2_CHAIN_ONFLUSH)) {
2341 hammer2_flush(chain, HAMMER2_FLUSH_TOP |
2342 HAMMER2_FLUSH_ALL);
2343 }
2344 rup_error |= tmp_error;
2345 }
2346 return ((error | rup_error) & ~HAMMER2_ERROR_EOF);
2347 }
2348
2349 /*
2350 * This fixes up an error introduced in earlier H2 implementations where
2351 * moving a PFS inode into an indirect block wound up causing the
2352 * HAMMER2_BREF_FLAG_PFSROOT flag in the bref to get cleared.
2353 */
2354 static
2355 int
hammer2_fixup_pfses(hammer2_dev_t * hmp)2356 hammer2_fixup_pfses(hammer2_dev_t *hmp)
2357 {
2358 const hammer2_inode_data_t *ripdata;
2359 hammer2_chain_t *parent;
2360 hammer2_chain_t *chain;
2361 hammer2_key_t key_next;
2362 hammer2_pfs_t *spmp;
2363 int error;
2364
2365 error = 0;
2366
2367 /*
2368 * Lookup mount point under the media-localized super-root.
2369 *
2370 * cluster->pmp will incorrectly point to spmp and must be fixed
2371 * up later on.
2372 */
2373 spmp = hmp->spmp;
2374 hammer2_inode_lock(spmp->iroot, 0);
2375 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS);
2376 chain = hammer2_chain_lookup(&parent, &key_next,
2377 HAMMER2_KEY_MIN, HAMMER2_KEY_MAX,
2378 &error, 0);
2379 while (chain) {
2380 if (chain->bref.type != HAMMER2_BREF_TYPE_INODE)
2381 continue;
2382 if (chain->error) {
2383 kprintf("I/O error scanning PFS labels\n");
2384 error |= chain->error;
2385 } else if ((chain->bref.flags &
2386 HAMMER2_BREF_FLAG_PFSROOT) == 0) {
2387 int error2;
2388
2389 ripdata = &chain->data->ipdata;
2390 hammer2_trans_init(hmp->spmp, 0);
2391 error2 = hammer2_chain_modify(chain,
2392 chain->bref.modify_tid,
2393 0, 0);
2394 if (error2 == 0) {
2395 kprintf("hammer2: Correct mis-flagged PFS %s\n",
2396 ripdata->filename);
2397 chain->bref.flags |= HAMMER2_BREF_FLAG_PFSROOT;
2398 } else {
2399 error |= error2;
2400 }
2401 hammer2_flush(chain, HAMMER2_FLUSH_TOP |
2402 HAMMER2_FLUSH_ALL);
2403 hammer2_trans_done(hmp->spmp, 0);
2404 }
2405 chain = hammer2_chain_next(&parent, chain, &key_next,
2406 key_next, HAMMER2_KEY_MAX,
2407 &error, 0);
2408 }
2409 if (parent) {
2410 hammer2_chain_unlock(parent);
2411 hammer2_chain_drop(parent);
2412 }
2413 hammer2_inode_unlock(spmp->iroot);
2414
2415 return error;
2416 }
2417
2418 /*
2419 * Sync a mount point; this is called periodically on a per-mount basis from
2420 * the filesystem syncer, and whenever a user issues a sync.
2421 */
2422 int
hammer2_vfs_sync(struct mount * mp,int waitfor)2423 hammer2_vfs_sync(struct mount *mp, int waitfor)
2424 {
2425 int error;
2426
2427 error = hammer2_vfs_sync_pmp(MPTOPMP(mp), waitfor);
2428
2429 return error;
2430 }
2431
2432 /*
2433 * Because frontend operations lock vnodes before we get a chance to
2434 * lock the related inode, we can't just acquire a vnode lock without
2435 * risking a deadlock. The frontend may be holding a vnode lock while
2436 * also blocked on our SYNCQ flag while trying to get the inode lock.
2437 *
2438 * To deal with this situation we can check the vnode lock situation
2439 * after locking the inode and perform a work-around.
2440 */
2441 int
hammer2_vfs_sync_pmp(hammer2_pfs_t * pmp,int waitfor)2442 hammer2_vfs_sync_pmp(hammer2_pfs_t *pmp, int waitfor)
2443 {
2444 hammer2_inode_t *ip;
2445 hammer2_depend_t *depend;
2446 hammer2_depend_t *depend_next;
2447 struct vnode *vp;
2448 uint32_t pass2;
2449 int error;
2450 int wakecount;
2451 int dorestart;
2452
2453 /*
2454 * Move all inodes on sideq to syncq. This will clear sideq.
2455 * This should represent all flushable inodes. These inodes
2456 * will already have refs due to being on syncq or sideq. We
2457 * must do this all at once with the spinlock held to ensure that
2458 * all inode dependencies are part of the same flush.
2459 *
2460 * We should be able to do this asynchronously from frontend
2461 * operations because we will be locking the inodes later on
2462 * to actually flush them, and that will partition any frontend
2463 * op using the same inode. Either it has already locked the
2464 * inode and we will block, or it has not yet locked the inode
2465 * and it will block until we are finished flushing that inode.
2466 *
2467 * When restarting, only move the inodes flagged as PASS2 from
2468 * SIDEQ to SYNCQ. PASS2 propagation by inode_lock4() and
2469 * inode_depend() are atomic with the spin-lock.
2470 */
2471 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH);
2472 #ifdef HAMMER2_DEBUG_SYNC
2473 kprintf("FILESYSTEM SYNC BOUNDARY\n");
2474 #endif
2475 dorestart = 0;
2476
2477 /*
2478 * Move inodes from depq to syncq, releasing the related
2479 * depend structures.
2480 */
2481 restart:
2482 #ifdef HAMMER2_DEBUG_SYNC
2483 kprintf("FILESYSTEM SYNC RESTART (%d)\n", dorestart);
2484 #endif
2485 hammer2_trans_setflags(pmp, 0/*HAMMER2_TRANS_COPYQ*/);
2486 hammer2_trans_clearflags(pmp, HAMMER2_TRANS_RESCAN);
2487
2488 /*
2489 * Move inodes from depq to syncq. When restarting, only depq's
2490 * marked pass2 are moved.
2491 */
2492 hammer2_spin_ex(&pmp->list_spin);
2493 depend_next = TAILQ_FIRST(&pmp->depq);
2494 wakecount = 0;
2495
2496 while ((depend = depend_next) != NULL) {
2497 depend_next = TAILQ_NEXT(depend, entry);
2498 if (dorestart && depend->pass2 == 0)
2499 continue;
2500 TAILQ_FOREACH(ip, &depend->sideq, entry) {
2501 KKASSERT(ip->flags & HAMMER2_INODE_SIDEQ);
2502 atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ);
2503 atomic_clear_int(&ip->flags, HAMMER2_INODE_SIDEQ);
2504 ip->depend = NULL;
2505 }
2506
2507 /*
2508 * NOTE: pmp->sideq_count includes both sideq and syncq
2509 */
2510 TAILQ_CONCAT(&pmp->syncq, &depend->sideq, entry);
2511
2512 depend->count = 0;
2513 depend->pass2 = 0;
2514 TAILQ_REMOVE(&pmp->depq, depend, entry);
2515 }
2516
2517 hammer2_spin_unex(&pmp->list_spin);
2518 hammer2_trans_clearflags(pmp, /*HAMMER2_TRANS_COPYQ |*/
2519 HAMMER2_TRANS_WAITING);
2520 dorestart = 0;
2521
2522 /*
2523 * sideq_count may have dropped enough to allow us to unstall
2524 * the frontend.
2525 */
2526 hammer2_pfs_memory_wakeup(pmp, 0);
2527
2528 /*
2529 * Now run through all inodes on syncq.
2530 *
2531 * Flush transactions only interlock with other flush transactions.
2532 * Any conflicting frontend operations will block on the inode, but
2533 * may hold a vnode lock while doing so.
2534 */
2535 hammer2_spin_ex(&pmp->list_spin);
2536 while ((ip = TAILQ_FIRST(&pmp->syncq)) != NULL) {
2537 /*
2538 * Remove the inode from the SYNCQ, transfer the syncq ref
2539 * to us. We must clear SYNCQ to allow any potential
2540 * front-end deadlock to proceed. We must set PASS2 so
2541 * the dependency code knows what to do.
2542 */
2543 pass2 = ip->flags;
2544 cpu_ccfence();
2545 if (atomic_cmpset_int(&ip->flags,
2546 pass2,
2547 (pass2 & ~(HAMMER2_INODE_SYNCQ |
2548 HAMMER2_INODE_SYNCQ_WAKEUP)) |
2549 HAMMER2_INODE_SYNCQ_PASS2) == 0)
2550 {
2551 continue;
2552 }
2553 TAILQ_REMOVE(&pmp->syncq, ip, entry);
2554 --pmp->sideq_count;
2555 hammer2_spin_unex(&pmp->list_spin);
2556
2557 /*
2558 * Tickle anyone waiting on ip->flags or the hysteresis
2559 * on the dirty inode count.
2560 */
2561 if (pass2 & HAMMER2_INODE_SYNCQ_WAKEUP)
2562 wakeup(&ip->flags);
2563 if (++wakecount >= hammer2_limit_dirty_inodes / 20 + 1) {
2564 wakecount = 0;
2565 hammer2_pfs_memory_wakeup(pmp, 0);
2566 }
2567
2568 /*
2569 * Relock the inode, and we inherit a ref from the above.
2570 * We will check for a race after we acquire the vnode.
2571 */
2572 hammer2_mtx_ex(&ip->lock);
2573
2574 /*
2575 * We need the vp in order to vfsync() dirty buffers, so if
2576 * one isn't attached we can skip it.
2577 *
2578 * Ordering the inode lock and then the vnode lock has the
2579 * potential to deadlock. If we had left SYNCQ set that could
2580 * also deadlock us against the frontend even if we don't hold
2581 * any locks, but the latter is not a problem now since we
2582 * cleared it. igetv will temporarily release the inode lock
2583 * in a safe manner to work-around the deadlock.
2584 *
2585 * Unfortunately it is still possible to deadlock when the
2586 * frontend obtains multiple inode locks, because all the
2587 * related vnodes are already locked (nor can the vnode locks
2588 * be released and reacquired without messing up RECLAIM and
2589 * INACTIVE sequencing).
2590 *
2591 * The solution for now is to move the vp back onto SIDEQ
2592 * and set dorestart, which will restart the flush after we
2593 * exhaust the current SYNCQ. Note that additional
2594 * dependencies may build up, so we definitely need to move
2595 * the whole SIDEQ back to SYNCQ when we restart.
2596 */
2597 vp = ip->vp;
2598 if (vp) {
2599 if (vget(vp, LK_EXCLUSIVE|LK_NOWAIT)) {
2600 /*
2601 * Failed to get the vnode, requeue the inode
2602 * (PASS2 is already set so it will be found
2603 * again on the restart). Then unlock.
2604 */
2605 vp = NULL;
2606 dorestart |= 1;
2607 #ifdef HAMMER2_DEBUG_SYNC
2608 kprintf("inum %ld (sync delayed by vnode)\n",
2609 (long)ip->meta.inum);
2610 #endif
2611 hammer2_inode_delayed_sideq(ip);
2612
2613 hammer2_mtx_unlock(&ip->lock);
2614 hammer2_inode_drop(ip);
2615
2616 /*
2617 * If PASS2 was previously set we might
2618 * be looping too hard, ask for a delay
2619 * along with the restart.
2620 */
2621 if (pass2 & HAMMER2_INODE_SYNCQ_PASS2)
2622 dorestart |= 2;
2623 hammer2_spin_ex(&pmp->list_spin);
2624 continue;
2625 }
2626 } else {
2627 vp = NULL;
2628 }
2629
2630 /*
2631 * If the inode wound up on a SIDEQ again it will already be
2632 * prepped for another PASS2. In this situation if we flush
2633 * it now we will just wind up flushing it again in the same
2634 * syncer run, so we might as well not flush it now.
2635 */
2636 if (ip->flags & HAMMER2_INODE_SIDEQ) {
2637 hammer2_mtx_unlock(&ip->lock);
2638 hammer2_inode_drop(ip);
2639 if (vp)
2640 vput(vp);
2641 dorestart |= 1;
2642 hammer2_spin_ex(&pmp->list_spin);
2643 continue;
2644 }
2645
2646 /*
2647 * Ok we have the inode exclusively locked and if vp is
2648 * not NULL that will also be exclusively locked. Do the
2649 * meat of the flush.
2650 *
2651 * vp token needed for v_rbdirty_tree check / vclrisdirty
2652 * sequencing. Though we hold the vnode exclusively so
2653 * we shouldn't need to hold the token also in this case.
2654 */
2655 if (vp) {
2656 vfsync(vp, MNT_WAIT, 1, NULL, NULL);
2657 bio_track_wait(&vp->v_track_write, 0, 0); /* XXX */
2658 }
2659
2660 /*
2661 * If the inode has not yet been inserted into the tree
2662 * we must do so. Then sync and flush it. The flush should
2663 * update the parent.
2664 */
2665 if (ip->flags & HAMMER2_INODE_DELETING) {
2666 #ifdef HAMMER2_DEBUG_SYNC
2667 kprintf("inum %ld destroy\n", (long)ip->meta.inum);
2668 #endif
2669 hammer2_inode_chain_des(ip);
2670 atomic_add_long(&hammer2_iod_inode_deletes, 1);
2671 } else if (ip->flags & HAMMER2_INODE_CREATING) {
2672 #ifdef HAMMER2_DEBUG_SYNC
2673 kprintf("inum %ld insert\n", (long)ip->meta.inum);
2674 #endif
2675 hammer2_inode_chain_ins(ip);
2676 atomic_add_long(&hammer2_iod_inode_creates, 1);
2677 }
2678 #ifdef HAMMER2_DEBUG_SYNC
2679 kprintf("inum %ld chain-sync\n", (long)ip->meta.inum);
2680 #endif
2681
2682 /*
2683 * Because I kinda messed up the design and index the inodes
2684 * under the root inode, along side the directory entries,
2685 * we can't flush the inode index under the iroot until the
2686 * end. If we do it now we might miss effects created by
2687 * other inodes on the SYNCQ.
2688 *
2689 * Do a normal (non-FSSYNC) flush instead, which allows the
2690 * vnode code to work the same. We don't want to force iroot
2691 * back onto the SIDEQ, and we also don't want the flush code
2692 * to update pfs_iroot_blocksets until the final flush later.
2693 *
2694 * XXX at the moment this will likely result in a double-flush
2695 * of the iroot chain.
2696 */
2697 hammer2_inode_chain_sync(ip);
2698 if (ip == pmp->iroot) {
2699 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP);
2700 } else {
2701 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP |
2702 HAMMER2_XOP_FSSYNC);
2703 }
2704 if (vp) {
2705 lwkt_gettoken(&vp->v_token);
2706 if ((ip->flags & (HAMMER2_INODE_MODIFIED |
2707 HAMMER2_INODE_RESIZED |
2708 HAMMER2_INODE_DIRTYDATA)) == 0 &&
2709 RB_EMPTY(&vp->v_rbdirty_tree) &&
2710 !bio_track_active(&vp->v_track_write)) {
2711 vclrisdirty(vp);
2712 } else {
2713 hammer2_inode_delayed_sideq(ip);
2714 }
2715 lwkt_reltoken(&vp->v_token);
2716 vput(vp);
2717 vp = NULL; /* safety */
2718 }
2719 atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_PASS2);
2720 hammer2_inode_unlock(ip); /* unlock+drop */
2721 /* ip pointer invalid */
2722
2723 /*
2724 * If the inode got dirted after we dropped our locks,
2725 * it will have already been moved back to the SIDEQ.
2726 */
2727 hammer2_spin_ex(&pmp->list_spin);
2728 }
2729 hammer2_spin_unex(&pmp->list_spin);
2730 hammer2_pfs_memory_wakeup(pmp, 0);
2731
2732 if (dorestart || (pmp->trans.flags & HAMMER2_TRANS_RESCAN)) {
2733 /*
2734 * bit 2 is set if something above thinks we might be
2735 * looping too hard, try to unclog the frontend
2736 * dependency and wait a bit before restarting.
2737 *
2738 * NOTE: The frontend could be stuck in h2memw, though
2739 * it isn't supposed to be holding vnode locks
2740 * in that case.
2741 */
2742 if (dorestart & 2) {
2743 wakeup(&pmp->inmem_dirty_chains);
2744 tsleep(&dorestart, 0, "h2syndel", 2);
2745 }
2746 #ifdef HAMMER2_DEBUG_SYNC
2747 kprintf("FILESYSTEM SYNC STAGE 1 RESTART\n");
2748 /*tsleep(&dorestart, 0, "h2STG1-R", hz*20);*/
2749 #endif
2750 dorestart = 1;
2751 goto restart;
2752 }
2753 #ifdef HAMMER2_DEBUG_SYNC
2754 kprintf("FILESYSTEM SYNC STAGE 2 BEGIN\n");
2755 /*tsleep(&dorestart, 0, "h2STG2", hz*20);*/
2756 #endif
2757
2758 /*
2759 * We have to flush the PFS root last, even if it does not appear to
2760 * be dirty, because all the inodes in the PFS are indexed under it.
2761 * The normal flushing of iroot above would only occur if directory
2762 * entries under the root were changed.
2763 *
2764 * Specifying VOLHDR will cause an additionl flush of hmp->spmp
2765 * for the media making up the cluster.
2766 */
2767 if ((ip = pmp->iroot) != NULL) {
2768 hammer2_inode_ref(ip);
2769 hammer2_mtx_ex(&ip->lock);
2770 hammer2_inode_chain_sync(ip);
2771 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP |
2772 HAMMER2_XOP_FSSYNC |
2773 HAMMER2_XOP_VOLHDR);
2774 hammer2_inode_unlock(ip); /* unlock+drop */
2775 }
2776 #ifdef HAMMER2_DEBUG_SYNC
2777 kprintf("FILESYSTEM SYNC STAGE 2 DONE\n");
2778 #endif
2779
2780 /*
2781 * device bioq sync
2782 */
2783 hammer2_bioq_sync(pmp);
2784
2785 error = 0; /* XXX */
2786 hammer2_trans_done(pmp, HAMMER2_TRANS_ISFLUSH);
2787
2788 return (error);
2789 }
2790
2791 static
2792 int
hammer2_vfs_vptofh(struct vnode * vp,struct fid * fhp)2793 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp)
2794 {
2795 hammer2_inode_t *ip;
2796
2797 KKASSERT(MAXFIDSZ >= 16);
2798 ip = VTOI(vp);
2799 fhp->fid_len = offsetof(struct fid, fid_data[16]);
2800 fhp->fid_ext = 0;
2801 ((hammer2_tid_t *)fhp->fid_data)[0] = ip->meta.inum;
2802 ((hammer2_tid_t *)fhp->fid_data)[1] = 0;
2803
2804 return 0;
2805 }
2806
2807 static
2808 int
hammer2_vfs_fhtovp(struct mount * mp,struct vnode * rootvp,struct fid * fhp,struct vnode ** vpp)2809 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
2810 struct fid *fhp, struct vnode **vpp)
2811 {
2812 hammer2_tid_t inum;
2813 int error;
2814
2815 inum = ((hammer2_tid_t *)fhp->fid_data)[0] & HAMMER2_DIRHASH_USERMSK;
2816 if (vpp) {
2817 if (inum == 1)
2818 error = hammer2_vfs_root(mp, vpp);
2819 else
2820 error = hammer2_vfs_vget(mp, NULL, inum, vpp);
2821 } else {
2822 error = 0;
2823 }
2824 return error;
2825 }
2826
2827 static
2828 int
hammer2_vfs_checkexp(struct mount * mp,struct sockaddr * nam,int * exflagsp,struct ucred ** credanonp)2829 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
2830 int *exflagsp, struct ucred **credanonp)
2831 {
2832 hammer2_pfs_t *pmp;
2833 struct netcred *np;
2834 int error;
2835
2836 pmp = MPTOPMP(mp);
2837 np = vfs_export_lookup(mp, &pmp->export, nam);
2838 if (np) {
2839 *exflagsp = np->netc_exflags;
2840 *credanonp = &np->netc_anon;
2841 error = 0;
2842 } else {
2843 error = EACCES;
2844 }
2845 return error;
2846 }
2847
2848 /*
2849 * This handles hysteresis on regular file flushes. Because the BIOs are
2850 * routed to a thread it is possible for an excessive number to build up
2851 * and cause long front-end stalls long before the runningbuffspace limit
2852 * is hit, so we implement hammer2_flush_pipe to control the
2853 * hysteresis.
2854 *
2855 * This is a particular problem when compression is used.
2856 */
2857 void
hammer2_lwinprog_ref(hammer2_pfs_t * pmp)2858 hammer2_lwinprog_ref(hammer2_pfs_t *pmp)
2859 {
2860 atomic_add_int(&pmp->count_lwinprog, 1);
2861 }
2862
2863 void
hammer2_lwinprog_drop(hammer2_pfs_t * pmp)2864 hammer2_lwinprog_drop(hammer2_pfs_t *pmp)
2865 {
2866 int lwinprog;
2867
2868 lwinprog = atomic_fetchadd_int(&pmp->count_lwinprog, -1);
2869 if ((lwinprog & HAMMER2_LWINPROG_WAITING) &&
2870 (lwinprog & HAMMER2_LWINPROG_MASK) <= hammer2_flush_pipe * 2 / 3) {
2871 atomic_clear_int(&pmp->count_lwinprog,
2872 HAMMER2_LWINPROG_WAITING);
2873 wakeup(&pmp->count_lwinprog);
2874 }
2875 if ((lwinprog & HAMMER2_LWINPROG_WAITING0) &&
2876 (lwinprog & HAMMER2_LWINPROG_MASK) <= 0) {
2877 atomic_clear_int(&pmp->count_lwinprog,
2878 HAMMER2_LWINPROG_WAITING0);
2879 wakeup(&pmp->count_lwinprog);
2880 }
2881 }
2882
2883 void
hammer2_lwinprog_wait(hammer2_pfs_t * pmp,int flush_pipe)2884 hammer2_lwinprog_wait(hammer2_pfs_t *pmp, int flush_pipe)
2885 {
2886 int lwinprog;
2887 int lwflag = (flush_pipe) ? HAMMER2_LWINPROG_WAITING :
2888 HAMMER2_LWINPROG_WAITING0;
2889
2890 for (;;) {
2891 lwinprog = pmp->count_lwinprog;
2892 cpu_ccfence();
2893 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe)
2894 break;
2895 tsleep_interlock(&pmp->count_lwinprog, 0);
2896 atomic_set_int(&pmp->count_lwinprog, lwflag);
2897 lwinprog = pmp->count_lwinprog;
2898 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe)
2899 break;
2900 tsleep(&pmp->count_lwinprog, PINTERLOCKED, "h2wpipe", hz);
2901 }
2902 }
2903
2904 /*
2905 * It is possible for an excessive number of dirty chains or dirty inodes
2906 * to build up. When this occurs we start an asynchronous filesystem sync.
2907 * If the level continues to build up, we stall, waiting for it to drop,
2908 * with some hysteresis.
2909 *
2910 * This relies on the kernel calling hammer2_vfs_modifying() prior to
2911 * obtaining any vnode locks before making a modifying VOP call.
2912 */
2913 static int
hammer2_vfs_modifying(struct mount * mp)2914 hammer2_vfs_modifying(struct mount *mp)
2915 {
2916 if (mp->mnt_flag & MNT_RDONLY)
2917 return EROFS;
2918 hammer2_pfs_memory_wait(MPTOPMP(mp));
2919
2920 return 0;
2921 }
2922
2923 /*
2924 * Initiate an asynchronous filesystem sync and, with hysteresis,
2925 * stall if the internal data structure count becomes too bloated.
2926 */
2927 void
hammer2_pfs_memory_wait(hammer2_pfs_t * pmp)2928 hammer2_pfs_memory_wait(hammer2_pfs_t *pmp)
2929 {
2930 uint32_t waiting;
2931 int pcatch;
2932 int error;
2933 int started;
2934
2935 if (pmp == NULL || pmp->mp == NULL)
2936 return;
2937
2938 started = 0;
2939
2940 for (;;) {
2941 waiting = pmp->inmem_dirty_chains & HAMMER2_DIRTYCHAIN_MASK;
2942 cpu_ccfence();
2943
2944 /*
2945 * Start the syncer running at 1/2 the limit to try
2946 * to avoid sleeping.
2947 */
2948 if (waiting > hammer2_limit_dirty_chains / 2 ||
2949 pmp->sideq_count > hammer2_limit_dirty_inodes / 2)
2950 {
2951 trigger_syncer(pmp->mp);
2952 }
2953
2954 /*
2955 * Stall at the limit waiting for the counts to drop.
2956 * This code will typically be woken up once the count
2957 * drops below 3/4 the limit, or in one second.
2958 */
2959 if (waiting < hammer2_limit_dirty_chains &&
2960 pmp->sideq_count < hammer2_limit_dirty_inodes)
2961 {
2962 break;
2963 }
2964
2965 if (started == 0) {
2966 trigger_syncer_start(pmp->mp);
2967 started = 1;
2968 }
2969
2970 /*
2971 * Interlocked re-test, sleep, and retry.
2972 */
2973 pcatch = curthread->td_proc ? PCATCH : 0;
2974 tsleep_interlock(&pmp->inmem_dirty_chains, pcatch);
2975
2976 atomic_set_int(&pmp->inmem_dirty_chains,
2977 HAMMER2_DIRTYCHAIN_WAITING);
2978
2979 if (waiting < hammer2_limit_dirty_chains &&
2980 pmp->sideq_count < hammer2_limit_dirty_inodes) {
2981 break;
2982 }
2983 error = tsleep(&pmp->inmem_dirty_chains,
2984 PINTERLOCKED | pcatch,
2985 "h2memw", hz);
2986 if (error == ERESTART)
2987 break;
2988 }
2989 if (started)
2990 trigger_syncer_stop(pmp->mp);
2991 }
2992
2993 /*
2994 * Wake up any stalled frontend ops waiting, with hysteresis, using
2995 * 2/3 of the limit.
2996 */
2997 void
hammer2_pfs_memory_wakeup(hammer2_pfs_t * pmp,int count)2998 hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp, int count)
2999 {
3000 uint32_t waiting;
3001
3002 if (pmp) {
3003 waiting = atomic_fetchadd_int(&pmp->inmem_dirty_chains, count);
3004 /* don't need --waiting to test flag */
3005
3006 if ((waiting & HAMMER2_DIRTYCHAIN_WAITING) &&
3007 (pmp->inmem_dirty_chains & HAMMER2_DIRTYCHAIN_MASK) <=
3008 hammer2_limit_dirty_chains * 2 / 3 &&
3009 pmp->sideq_count <= hammer2_limit_dirty_inodes * 2 / 3) {
3010 atomic_clear_int(&pmp->inmem_dirty_chains,
3011 HAMMER2_DIRTYCHAIN_WAITING);
3012 wakeup(&pmp->inmem_dirty_chains);
3013 }
3014 }
3015 }
3016
3017 void
hammer2_pfs_memory_inc(hammer2_pfs_t * pmp)3018 hammer2_pfs_memory_inc(hammer2_pfs_t *pmp)
3019 {
3020 if (pmp) {
3021 atomic_add_int(&pmp->inmem_dirty_chains, 1);
3022 }
3023 }
3024
3025 /*
3026 * Volume header data locks
3027 */
3028 void
hammer2_voldata_lock(hammer2_dev_t * hmp)3029 hammer2_voldata_lock(hammer2_dev_t *hmp)
3030 {
3031 lockmgr(&hmp->vollk, LK_EXCLUSIVE);
3032 }
3033
3034 void
hammer2_voldata_unlock(hammer2_dev_t * hmp)3035 hammer2_voldata_unlock(hammer2_dev_t *hmp)
3036 {
3037 lockmgr(&hmp->vollk, LK_RELEASE);
3038 }
3039
3040 /*
3041 * Caller indicates that the volume header is being modified. Flag
3042 * the related chain and adjust its transaction id.
3043 *
3044 * The transaction id is set to voldata.mirror_tid + 1, similar to
3045 * what hammer2_chain_modify() does. Be very careful here, volume
3046 * data can be updated independently of the rest of the filesystem.
3047 */
3048 void
hammer2_voldata_modify(hammer2_dev_t * hmp)3049 hammer2_voldata_modify(hammer2_dev_t *hmp)
3050 {
3051 if ((hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) == 0) {
3052 atomic_add_long(&hammer2_count_modified_chains, 1);
3053 atomic_set_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED);
3054 hammer2_pfs_memory_inc(hmp->vchain.pmp);
3055 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid + 1;
3056 }
3057 }
3058
3059 /*
3060 * Returns 0 if the filesystem has tons of free space
3061 * Returns 1 if the filesystem has less than 10% remaining
3062 * Returns 2 if the filesystem has less than 2%/5% (user/root) remaining.
3063 */
3064 int
hammer2_vfs_enospace(hammer2_inode_t * ip,off_t bytes,struct ucred * cred)3065 hammer2_vfs_enospace(hammer2_inode_t *ip, off_t bytes, struct ucred *cred)
3066 {
3067 hammer2_pfs_t *pmp;
3068 hammer2_dev_t *hmp;
3069 hammer2_off_t free_reserved;
3070 hammer2_off_t free_nominal;
3071 int i;
3072
3073 pmp = ip->pmp;
3074
3075 if (pmp->free_ticks == 0 || pmp->free_ticks != ticks) {
3076 free_reserved = HAMMER2_SEGSIZE;
3077 free_nominal = 0x7FFFFFFFFFFFFFFFLLU;
3078 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) {
3079 hmp = pmp->pfs_hmps[i];
3080 if (hmp == NULL)
3081 continue;
3082 if (pmp->pfs_types[i] != HAMMER2_PFSTYPE_MASTER &&
3083 pmp->pfs_types[i] != HAMMER2_PFSTYPE_SOFT_MASTER)
3084 continue;
3085
3086 if (free_nominal > hmp->voldata.allocator_free)
3087 free_nominal = hmp->voldata.allocator_free;
3088 if (free_reserved < hmp->free_reserved)
3089 free_reserved = hmp->free_reserved;
3090 }
3091
3092 /*
3093 * SMP races ok
3094 */
3095 pmp->free_reserved = free_reserved;
3096 pmp->free_nominal = free_nominal;
3097 pmp->free_ticks = ticks;
3098 } else {
3099 free_reserved = pmp->free_reserved;
3100 free_nominal = pmp->free_nominal;
3101 }
3102 if (cred && cred->cr_uid != 0) {
3103 if ((int64_t)(free_nominal - bytes) <
3104 (int64_t)free_reserved) {
3105 return 2;
3106 }
3107 } else {
3108 if ((int64_t)(free_nominal - bytes) <
3109 (int64_t)free_reserved / 2) {
3110 return 2;
3111 }
3112 }
3113 if ((int64_t)(free_nominal - bytes) < (int64_t)free_reserved * 2)
3114 return 1;
3115 return 0;
3116 }
3117