1 /*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2022 Tomohiro Kusumi <tkusumi@netbsd.org>
5 * Copyright (c) 2011-2023 The DragonFly Project. All rights reserved.
6 *
7 * This code is derived from software contributed to The DragonFly Project
8 * by Matthew Dillon <dillon@dragonflybsd.org>
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
19 * distribution.
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37 /*
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/nlookup.h>
42 #include <sys/vnode.h>
43 #include <sys/mount.h>
44 #include <sys/fcntl.h>
45 #include <sys/vfsops.h>
46 #include <sys/sysctl.h>
47 #include <sys/socket.h>
48 #include <sys/objcache.h>
49 #include <sys/proc.h>
50 #include <sys/lock.h>
51 #include <sys/file.h>
52 */
53
54 #include "hammer2.h"
55
56 TAILQ_HEAD(hammer2_mntlist, hammer2_dev);
57 static struct hammer2_mntlist hammer2_mntlist;
58
59 struct hammer2_pfslist hammer2_pfslist;
60 struct hammer2_pfslist hammer2_spmplist;
61 struct lock hammer2_mntlk;
62
63 int hammer2_supported_version = HAMMER2_VOL_VERSION_DEFAULT;
64 int hammer2_debug;
65 int hammer2_aux_flags;
66 int hammer2_xop_nthreads;
67 int hammer2_xop_sgroups;
68 int hammer2_xop_xgroups;
69 int hammer2_xop_xbase;
70 int hammer2_xop_mod;
71 long hammer2_debug_inode;
72 int hammer2_cluster_meta_read = 1; /* physical read-ahead */
73 int hammer2_cluster_data_read = 4; /* physical read-ahead */
74 int hammer2_cluster_write = 0; /* physical write clustering */
75 int hammer2_dedup_enable = 1;
76 int hammer2_always_compress = 0; /* always try to compress */
77 int hammer2_flush_pipe = 100;
78 int hammer2_dio_count;
79 int hammer2_dio_limit = 256;
80 int hammer2_bulkfree_tps = 5000;
81 int hammer2_spread_workers;
82 int hammer2_limit_saved_depth;
83 long hammer2_chain_allocs;
84 long hammer2_limit_saved_chains;
85 long hammer2_limit_dirty_chains;
86 long hammer2_limit_dirty_inodes;
87 long hammer2_count_modified_chains;
88 long hammer2_iod_file_read;
89 long hammer2_iod_meta_read;
90 long hammer2_iod_indr_read;
91 long hammer2_iod_fmap_read;
92 long hammer2_iod_volu_read;
93 long hammer2_iod_file_write;
94 long hammer2_iod_file_wembed;
95 long hammer2_iod_file_wzero;
96 long hammer2_iod_file_wdedup;
97 long hammer2_iod_meta_write;
98 long hammer2_iod_indr_write;
99 long hammer2_iod_fmap_write;
100 long hammer2_iod_volu_write;
101 static long hammer2_iod_inode_creates;
102 static long hammer2_iod_inode_deletes;
103
104 long hammer2_process_icrc32;
105 long hammer2_process_xxhash64;
106
107 int hz;
108 int ticks;
109 int64_t vnode_count;
110
111 MALLOC_DECLARE(M_HAMMER2_CBUFFER);
112 MALLOC_DEFINE(M_HAMMER2_CBUFFER, "HAMMER2-compbuffer",
113 "Buffer used for compression.");
114
115 MALLOC_DECLARE(M_HAMMER2_DEBUFFER);
116 MALLOC_DEFINE(M_HAMMER2_DEBUFFER, "HAMMER2-decompbuffer",
117 "Buffer used for decompression.");
118
119 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem");
120
121 SYSCTL_INT(_vfs_hammer2, OID_AUTO, supported_version, CTLFLAG_RD,
122 &hammer2_supported_version, 0, "");
123 SYSCTL_INT(_vfs_hammer2, OID_AUTO, aux_flags, CTLFLAG_RW,
124 &hammer2_aux_flags, 0, "");
125 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW,
126 &hammer2_debug, 0, "");
127 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, debug_inode, CTLFLAG_RW,
128 &hammer2_debug_inode, 0, "");
129 SYSCTL_INT(_vfs_hammer2, OID_AUTO, spread_workers, CTLFLAG_RW,
130 &hammer2_spread_workers, 0, "");
131 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_meta_read, CTLFLAG_RW,
132 &hammer2_cluster_meta_read, 0, "");
133 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_data_read, CTLFLAG_RW,
134 &hammer2_cluster_data_read, 0, "");
135 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_write, CTLFLAG_RW,
136 &hammer2_cluster_write, 0, "");
137 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dedup_enable, CTLFLAG_RW,
138 &hammer2_dedup_enable, 0, "");
139 SYSCTL_INT(_vfs_hammer2, OID_AUTO, always_compress, CTLFLAG_RW,
140 &hammer2_always_compress, 0, "");
141 SYSCTL_INT(_vfs_hammer2, OID_AUTO, flush_pipe, CTLFLAG_RW,
142 &hammer2_flush_pipe, 0, "");
143 SYSCTL_INT(_vfs_hammer2, OID_AUTO, bulkfree_tps, CTLFLAG_RW,
144 &hammer2_bulkfree_tps, 0, "");
145 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, chain_allocs, CTLFLAG_RD,
146 &hammer2_chain_allocs, 0, "");
147 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_saved_chains, CTLFLAG_RW,
148 &hammer2_limit_saved_chains, 0, "");
149 SYSCTL_INT(_vfs_hammer2, OID_AUTO, limit_saved_depth, CTLFLAG_RW,
150 &hammer2_limit_saved_depth, 0, "");
151 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_chains, CTLFLAG_RW,
152 &hammer2_limit_dirty_chains, 0, "");
153 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_inodes, CTLFLAG_RW,
154 &hammer2_limit_dirty_inodes, 0, "");
155 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, count_modified_chains, CTLFLAG_RD,
156 &hammer2_count_modified_chains, 0, "");
157 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_count, CTLFLAG_RD,
158 &hammer2_dio_count, 0, "");
159 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_limit, CTLFLAG_RW,
160 &hammer2_dio_limit, 0, "");
161
162 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RD,
163 &hammer2_iod_file_read, 0, "");
164 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RD,
165 &hammer2_iod_meta_read, 0, "");
166 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RD,
167 &hammer2_iod_indr_read, 0, "");
168 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_read, CTLFLAG_RD,
169 &hammer2_iod_fmap_read, 0, "");
170 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_read, CTLFLAG_RD,
171 &hammer2_iod_volu_read, 0, "");
172
173 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RD,
174 &hammer2_iod_file_write, 0, "");
175 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wembed, CTLFLAG_RD,
176 &hammer2_iod_file_wembed, 0, "");
177 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wzero, CTLFLAG_RD,
178 &hammer2_iod_file_wzero, 0, "");
179 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wdedup, CTLFLAG_RD,
180 &hammer2_iod_file_wdedup, 0, "");
181 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RD,
182 &hammer2_iod_meta_write, 0, "");
183 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RD,
184 &hammer2_iod_indr_write, 0, "");
185 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_write, CTLFLAG_RD,
186 &hammer2_iod_fmap_write, 0, "");
187 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RD,
188 &hammer2_iod_volu_write, 0, "");
189 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_inode_creates, CTLFLAG_RD,
190 &hammer2_iod_inode_creates, 0, "");
191 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_inode_deletes, CTLFLAG_RD,
192 &hammer2_iod_inode_deletes, 0, "");
193
194 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, process_icrc32, CTLFLAG_RD,
195 &hammer2_process_icrc32, 0, "");
196 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, process_xxhash64, CTLFLAG_RD,
197 &hammer2_process_xxhash64, 0, "");
198
199 /*
200 static int hammer2_vfs_init(struct vfsconf *conf);
201 static int hammer2_vfs_uninit(struct vfsconf *vfsp);
202 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
203 struct ucred *cred);
204 static int hammer2_remount(hammer2_dev_t *, struct mount *, char *,
205 struct ucred *);
206 */
207 static int hammer2_recovery(hammer2_dev_t *hmp);
208 /*
209 static int hammer2_vfs_unmount(struct mount *mp, int mntflags);
210 static int hammer2_vfs_root(struct mount *mp, struct m_vnode **vpp);
211 */
212 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp,
213 struct ucred *cred);
214 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
215 struct ucred *cred);
216 /*
217 static int hammer2_vfs_fhtovp(struct mount *mp, struct m_vnode *rootvp,
218 struct fid *fhp, struct m_vnode **vpp);
219 static int hammer2_vfs_vptofh(struct m_vnode *vp, struct fid *fhp);
220 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
221 int *exflagsp, struct ucred **credanonp);
222 static int hammer2_vfs_modifying(struct mount *mp);
223 */
224
225 static void hammer2_update_pmps(hammer2_dev_t *hmp);
226
227 static void hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp);
228 static void hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp,
229 hammer2_dev_t *hmp);
230 static int hammer2_fixup_pfses(hammer2_dev_t *hmp);
231
232 /*
233 * HAMMER2 vfs operations.
234 */
235 /*
236 static struct vfsops hammer2_vfsops = {
237 .vfs_flags = 0,
238 .vfs_init = hammer2_vfs_init,
239 .vfs_uninit = hammer2_vfs_uninit,
240 .vfs_sync = hammer2_vfs_sync,
241 .vfs_mount = hammer2_vfs_mount,
242 .vfs_unmount = hammer2_vfs_unmount,
243 .vfs_root = hammer2_vfs_root,
244 .vfs_statfs = hammer2_vfs_statfs,
245 .vfs_statvfs = hammer2_vfs_statvfs,
246 .vfs_vget = hammer2_vfs_vget,
247 .vfs_vptofh = hammer2_vfs_vptofh,
248 .vfs_fhtovp = hammer2_vfs_fhtovp,
249 .vfs_checkexp = hammer2_vfs_checkexp,
250 .vfs_modifying = hammer2_vfs_modifying
251 };
252 */
253
254 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", "");
255
256 VFS_SET(hammer2_vfsops, hammer2, VFCF_MPSAFE);
257 MODULE_VERSION(hammer2, 1);
258
259 int
hammer2_vfs_init(void)260 hammer2_vfs_init(void)
261 {
262 /*
263 static struct objcache_malloc_args margs_read;
264 static struct objcache_malloc_args margs_write;
265 static struct objcache_malloc_args margs_vop;
266 */
267
268 int error;
269 int mod;
270
271 error = 0;
272 kmalloc_raise_limit(M_HAMMER2, 0); /* unlimited */
273
274 /*
275 * hammer2_xop_nthreads must be a multiple of ncpus,
276 * minimum 2 * ncpus.
277 */
278 const int ncpus = 1;
279 mod = ncpus;
280 hammer2_xop_mod = mod;
281 hammer2_xop_nthreads = mod * 2;
282 /*
283 while (hammer2_xop_nthreads / mod < HAMMER2_XOPGROUPS_MIN ||
284 hammer2_xop_nthreads < HAMMER2_XOPTHREADS_MIN)
285 {
286 hammer2_xop_nthreads += mod;
287 }
288 hammer2_xop_sgroups = hammer2_xop_nthreads / mod / 2;
289 hammer2_xop_xgroups = hammer2_xop_nthreads / mod - hammer2_xop_sgroups;
290 hammer2_xop_xbase = hammer2_xop_sgroups * mod;
291 */
292
293 /*
294 * A large DIO cache is needed to retain dedup enablement masks.
295 * The bulkfree code clears related masks as part of the disk block
296 * recycling algorithm, preventing it from being used for a later
297 * dedup.
298 *
299 * NOTE: A large buffer cache can actually interfere with dedup
300 * operation because we dedup based on media physical buffers
301 * and not logical buffers. Try to make the DIO case large
302 * enough to avoid this problem, but also cap it.
303 */
304 const long nbuf = 100000; /* XXX */
305 hammer2_dio_limit = nbuf * 2;
306 if (hammer2_dio_limit > 100000)
307 hammer2_dio_limit = 100000;
308
309 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref))
310 error = EINVAL;
311 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data))
312 error = EINVAL;
313 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data))
314 error = EINVAL;
315
316 if (error) {
317 kprintf("HAMMER2 structure size mismatch; cannot continue.\n");
318 return (error);
319 }
320
321 #if 0
322 margs_read.objsize = 65536;
323 margs_read.mtype = M_HAMMER2_DEBUFFER;
324
325 margs_write.objsize = 32768;
326 margs_write.mtype = M_HAMMER2_CBUFFER;
327
328 margs_vop.objsize = sizeof(hammer2_xop_t);
329 margs_vop.mtype = M_HAMMER2;
330
331 /*
332 * Note thaht for the XOPS cache we want backing store allocations
333 * to use M_ZERO. This is not allowed in objcache_get() (to avoid
334 * confusion), so use the backing store function that does it. This
335 * means that initial XOPS objects are zerod but REUSED objects are
336 * not. So we are responsible for cleaning the object up sufficiently
337 * for our needs before objcache_put()ing it back (typically just the
338 * FIFO indices).
339 */
340 cache_buffer_read = objcache_create(margs_read.mtype->ks_shortdesc,
341 0, 1, NULL, NULL, NULL,
342 objcache_malloc_alloc,
343 objcache_malloc_free,
344 &margs_read);
345 cache_buffer_write = objcache_create(margs_write.mtype->ks_shortdesc,
346 0, 1, NULL, NULL, NULL,
347 objcache_malloc_alloc,
348 objcache_malloc_free,
349 &margs_write);
350 cache_xops = objcache_create(margs_vop.mtype->ks_shortdesc,
351 0, 1, NULL, NULL, NULL,
352 objcache_malloc_alloc_zero,
353 objcache_malloc_free,
354 &margs_vop);
355 #endif
356
357
358 lockinit(&hammer2_mntlk, "mntlk", 0, 0);
359 TAILQ_INIT(&hammer2_mntlist);
360 TAILQ_INIT(&hammer2_pfslist);
361 TAILQ_INIT(&hammer2_spmplist);
362
363 const int maxvnodes = 100000; /* XXX */
364 hammer2_limit_dirty_chains = maxvnodes / 10;
365 if (hammer2_limit_dirty_chains > HAMMER2_LIMIT_DIRTY_CHAINS)
366 hammer2_limit_dirty_chains = HAMMER2_LIMIT_DIRTY_CHAINS;
367 if (hammer2_limit_dirty_chains < 1000)
368 hammer2_limit_dirty_chains = 1000;
369
370 hammer2_limit_dirty_inodes = maxvnodes / 25;
371 if (hammer2_limit_dirty_inodes < 100)
372 hammer2_limit_dirty_inodes = 100;
373 if (hammer2_limit_dirty_inodes > HAMMER2_LIMIT_DIRTY_INODES)
374 hammer2_limit_dirty_inodes = HAMMER2_LIMIT_DIRTY_INODES;
375
376 hammer2_limit_saved_chains = hammer2_limit_dirty_chains * 5;
377
378 return (error);
379 }
380
381 int
hammer2_vfs_uninit(void)382 hammer2_vfs_uninit(void)
383 {
384 /*
385 objcache_destroy(cache_buffer_read);
386 objcache_destroy(cache_buffer_write);
387 objcache_destroy(cache_xops);
388 */
389 return 0;
390 }
391
392 /*
393 * Core PFS allocator. Used to allocate or reference the pmp structure
394 * for PFS cluster mounts and the spmp structure for media (hmp) structures.
395 * The pmp can be passed in or loaded by this function using the chain and
396 * inode data.
397 *
398 * pmp->modify_tid tracks new modify_tid transaction ids for front-end
399 * transactions. Note that synchronization does not use this field.
400 * (typically frontend operations and synchronization cannot run on the
401 * same PFS node at the same time).
402 *
403 * XXX check locking
404 */
405 hammer2_pfs_t *
hammer2_pfsalloc(hammer2_chain_t * chain,const hammer2_inode_data_t * ripdata,hammer2_dev_t * force_local)406 hammer2_pfsalloc(hammer2_chain_t *chain,
407 const hammer2_inode_data_t *ripdata,
408 hammer2_dev_t *force_local)
409 {
410 hammer2_pfs_t *pmp;
411 hammer2_inode_t *iroot;
412 int count;
413 int i;
414 int j;
415
416 pmp = NULL;
417
418 /*
419 * Locate or create the PFS based on the cluster id. If ripdata
420 * is NULL this is a spmp which is unique and is always allocated.
421 *
422 * If the device is mounted in local mode all PFSs are considered
423 * independent and not part of any cluster (for debugging only).
424 */
425 if (ripdata) {
426 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) {
427 if (force_local != pmp->force_local)
428 continue;
429 if (force_local == NULL &&
430 bcmp(&pmp->pfs_clid, &ripdata->meta.pfs_clid,
431 sizeof(pmp->pfs_clid)) == 0) {
432 break;
433 } else if (force_local && pmp->pfs_names[0] &&
434 strcmp(pmp->pfs_names[0], (const char *)ripdata->filename) == 0) {
435 break;
436 }
437 }
438 }
439
440 if (pmp == NULL) {
441 pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO);
442 pmp->force_local = force_local;
443 hammer2_trans_manage_init(pmp);
444 kmalloc_create_obj(&pmp->minode, "HAMMER2-inodes",
445 sizeof(struct hammer2_inode));
446 lockinit(&pmp->lock, "pfslk", 0, 0);
447 hammer2_spin_init(&pmp->blockset_spin, "h2blkset");
448 hammer2_inum_hash_init(pmp);
449 hammer2_spin_init(&pmp->xop_spin, "h2xop");
450 TAILQ_INIT(&pmp->syncq);
451 TAILQ_INIT(&pmp->depq);
452 TAILQ_INIT(&pmp->recq);
453 hammer2_spin_init(&pmp->list_spin, "h2pfsalloc_list");
454
455 /*
456 * Save the last media transaction id for the flusher. Set
457 * initial
458 */
459 if (ripdata) {
460 pmp->pfs_clid = ripdata->meta.pfs_clid;
461 TAILQ_INSERT_TAIL(&hammer2_pfslist, pmp, mntentry);
462 } else {
463 pmp->flags |= HAMMER2_PMPF_SPMP;
464 TAILQ_INSERT_TAIL(&hammer2_spmplist, pmp, mntentry);
465 }
466
467 /*
468 * The synchronization thread may start too early, make
469 * sure it stays frozen until we are ready to let it go.
470 * XXX
471 */
472 /*
473 pmp->primary_thr.flags = HAMMER2_THREAD_FROZEN |
474 HAMMER2_THREAD_REMASTER;
475 */
476 }
477
478 /*
479 * Create the PFS's root inode and any missing XOP helper threads.
480 */
481 if ((iroot = pmp->iroot) == NULL) {
482 iroot = hammer2_inode_get(pmp, NULL, 1, -1);
483 if (ripdata)
484 iroot->meta = ripdata->meta;
485 pmp->iroot = iroot;
486 hammer2_inode_ref(iroot);
487 hammer2_inode_unlock(iroot);
488 }
489
490 /*
491 * Stop here if no chain is passed in.
492 */
493 if (chain == NULL)
494 goto done;
495
496 /*
497 * When a chain is passed in we must add it to the PFS's root
498 * inode, update pmp->pfs_types[], and update the syncronization
499 * threads.
500 *
501 * When forcing local mode, mark the PFS as a MASTER regardless.
502 *
503 * At the moment empty spots can develop due to removals or failures.
504 * Ultimately we want to re-fill these spots but doing so might
505 * confused running code. XXX
506 */
507 hammer2_inode_ref(iroot);
508 hammer2_mtx_ex(&iroot->lock);
509 j = iroot->cluster.nchains;
510
511 if (j == HAMMER2_MAXCLUSTER) {
512 kprintf("hammer2_pfsalloc: cluster full!\n");
513 /* XXX fatal error? */
514 } else {
515 KKASSERT(chain->pmp == NULL);
516 chain->pmp = pmp;
517 hammer2_chain_ref(chain);
518 iroot->cluster.array[j].chain = chain;
519 if (force_local)
520 pmp->pfs_types[j] = HAMMER2_PFSTYPE_MASTER;
521 else
522 pmp->pfs_types[j] = ripdata->meta.pfs_type;
523 pmp->pfs_names[j] = kstrdup((const char *)ripdata->filename, M_HAMMER2);
524 pmp->pfs_hmps[j] = chain->hmp;
525 hammer2_spin_ex(&pmp->blockset_spin);
526 pmp->pfs_iroot_blocksets[j] = chain->data->ipdata.u.blockset;
527 hammer2_spin_unex(&pmp->blockset_spin);
528
529 /*
530 * If the PFS is already mounted we must account
531 * for the mount_count here.
532 */
533 if (pmp->mp)
534 ++chain->hmp->mount_count;
535
536 /*
537 * May have to fixup dirty chain tracking. Previous
538 * pmp was NULL so nothing to undo.
539 */
540 if (chain->flags & HAMMER2_CHAIN_MODIFIED)
541 hammer2_pfs_memory_inc(pmp);
542 ++j;
543 }
544 iroot->cluster.nchains = j;
545
546 /*
547 * Update nmasters from any PFS inode which is part of the cluster.
548 * It is possible that this will result in a value which is too
549 * high. MASTER PFSs are authoritative for pfs_nmasters and will
550 * override this value later on.
551 *
552 * (This informs us of masters that might not currently be
553 * discoverable by this mount).
554 */
555 if (ripdata && pmp->pfs_nmasters < ripdata->meta.pfs_nmasters) {
556 pmp->pfs_nmasters = ripdata->meta.pfs_nmasters;
557 }
558
559 /*
560 * Count visible masters. Masters are usually added with
561 * ripdata->meta.pfs_nmasters set to 1. This detects when there
562 * are more (XXX and must update the master inodes).
563 */
564 count = 0;
565 for (i = 0; i < iroot->cluster.nchains; ++i) {
566 if (pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER)
567 ++count;
568 }
569 if (pmp->pfs_nmasters < count)
570 pmp->pfs_nmasters = count;
571
572 /*
573 * Create missing synchronization and support threads.
574 *
575 * Single-node masters (including snapshots) have nothing to
576 * synchronize and do not require this thread.
577 *
578 * Multi-node masters or any number of soft masters, slaves, copy,
579 * or other PFS types need the thread.
580 *
581 * Each thread is responsible for its particular cluster index.
582 * We use independent threads so stalls or mismatches related to
583 * any given target do not affect other targets.
584 */
585 for (i = 0; i < iroot->cluster.nchains; ++i) {
586 /*
587 * Single-node masters (including snapshots) have nothing
588 * to synchronize and will make direct xops support calls,
589 * thus they do not require this thread.
590 *
591 * Note that there can be thousands of snapshots. We do not
592 * want to create thousands of threads.
593 */
594 if (pmp->pfs_nmasters <= 1 &&
595 pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) {
596 continue;
597 }
598
599 /*
600 * Sync support thread
601 */
602 /*
603 if (pmp->sync_thrs[i].td == NULL) {
604 hammer2_thr_create(&pmp->sync_thrs[i], pmp, NULL,
605 "h2nod", i, -1,
606 hammer2_primary_sync_thread);
607 }
608 */
609 }
610
611 /*
612 * Create missing Xop threads
613 *
614 * NOTE: We create helper threads for all mounted PFSs or any
615 * PFSs with 2+ nodes (so the sync thread can update them,
616 * even if not mounted).
617 */
618 if (pmp->mp || iroot->cluster.nchains >= 2)
619 hammer2_xop_helper_create(pmp);
620
621 hammer2_mtx_unlock(&iroot->lock);
622 hammer2_inode_drop(iroot);
623 done:
624 return pmp;
625 }
626
627 /*
628 * Deallocate an element of a probed PFS. If destroying and this is a
629 * MASTER, adjust nmasters.
630 *
631 * This function does not physically destroy the PFS element in its device
632 * under the super-root (see hammer2_ioctl_pfs_delete()).
633 */
634 void
hammer2_pfsdealloc(hammer2_pfs_t * pmp,int clindex,int destroying)635 hammer2_pfsdealloc(hammer2_pfs_t *pmp, int clindex, int destroying)
636 {
637 hammer2_inode_t *iroot;
638 hammer2_chain_t *chain;
639 int j;
640
641 /*
642 * Cleanup our reference on iroot. iroot is (should) not be needed
643 * by the flush code.
644 */
645 iroot = pmp->iroot;
646 if (iroot) {
647 /*
648 * Stop synchronizing
649 *
650 * XXX flush after acquiring the iroot lock.
651 * XXX clean out the cluster index from all inode structures.
652 */
653 hammer2_thr_delete(&pmp->sync_thrs[clindex]);
654
655 /*
656 * Remove the cluster index from the group. If destroying
657 * the PFS and this is a master, adjust pfs_nmasters.
658 */
659 hammer2_mtx_ex(&iroot->lock);
660 chain = iroot->cluster.array[clindex].chain;
661 iroot->cluster.array[clindex].chain = NULL;
662
663 switch(pmp->pfs_types[clindex]) {
664 case HAMMER2_PFSTYPE_MASTER:
665 if (destroying && pmp->pfs_nmasters > 0)
666 --pmp->pfs_nmasters;
667 /* XXX adjust ripdata->meta.pfs_nmasters */
668 break;
669 default:
670 break;
671 }
672 pmp->pfs_types[clindex] = HAMMER2_PFSTYPE_NONE;
673
674 hammer2_mtx_unlock(&iroot->lock);
675
676 /*
677 * Release the chain.
678 */
679 if (chain) {
680 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE);
681 hammer2_chain_drop(chain);
682 }
683
684 /*
685 * Terminate all XOP threads for the cluster index.
686 */
687 if (pmp->xop_groups) {
688 for (j = 0; j < hammer2_xop_nthreads; ++j) {
689 hammer2_thr_delete(
690 &pmp->xop_groups[j].thrs[clindex]);
691 }
692 }
693 }
694 }
695
696 /*
697 * Destroy a PFS, typically only occurs after the last mount on a device
698 * has gone away.
699 */
700 static void
hammer2_pfsfree(hammer2_pfs_t * pmp)701 hammer2_pfsfree(hammer2_pfs_t *pmp)
702 {
703 hammer2_inode_t *iroot, *ip;
704 hammer2_chain_t *chain;
705 int chains_still_present = 0;
706 int i;
707 //int j;
708
709 /*
710 * Cleanup our reference on iroot. iroot is (should) not be needed
711 * by the flush code.
712 */
713 if (pmp->flags & HAMMER2_PMPF_SPMP)
714 TAILQ_REMOVE(&hammer2_spmplist, pmp, mntentry);
715 else
716 TAILQ_REMOVE(&hammer2_pfslist, pmp, mntentry);
717
718 /*
719 * Clean up iroot
720 */
721 iroot = pmp->iroot;
722 if (iroot) {
723 for (i = 0; i < iroot->cluster.nchains; ++i) {
724 /*
725 hammer2_thr_delete(&pmp->sync_thrs[i]);
726 if (pmp->xop_groups) {
727 for (j = 0; j < hammer2_xop_nthreads; ++j)
728 hammer2_thr_delete(
729 &pmp->xop_groups[j].thrs[i]);
730 }
731 */
732 chain = iroot->cluster.array[i].chain;
733 if (chain && !RB_EMPTY(&chain->core.rbtree)) {
734 kprintf("hammer2: Warning pmp %p still "
735 "has active chains\n", pmp);
736 chains_still_present = 1;
737 }
738 }
739 KASSERT(iroot->refs == 1,
740 ("PMP->IROOT %p REFS WRONG %d", iroot, iroot->refs));
741
742 /* ref for iroot */
743 hammer2_inode_drop(iroot);
744 pmp->iroot = NULL;
745 }
746
747 /*
748 * Free remaining pmp resources
749 */
750 if (chains_still_present) {
751 kprintf("hammer2: cannot free pmp %p, still in use\n", pmp);
752 } else {
753 /*
754 * Free inode in reclaim queue.
755 */
756 while ((ip = TAILQ_FIRST(&pmp->recq)) != NULL) {
757 TAILQ_REMOVE(&pmp->recq, ip, recq_entry);
758 /*
759 * VOP_RECLAIM is currently unused,
760 * so directly free vnode before inode.
761 */
762 if (ip->vp) {
763 if (ip->vp->v_malloced)
764 freevnode(ip->vp);
765 } else {
766 /* PFS inode ? */
767 }
768 kfree_obj(ip, pmp->minode);
769 atomic_add_long(&pmp->inmem_inodes, -1);
770 }
771 assert(TAILQ_EMPTY(&pmp->recq));
772 assert(pmp->inmem_inodes == 0);
773
774 kmalloc_destroy_obj(&pmp->minode);
775 kfree(pmp, M_HAMMER2);
776 }
777 }
778
779 /*
780 * Remove all references to hmp from the pfs list. Any PFS which becomes
781 * empty is terminated and freed.
782 *
783 * XXX inefficient.
784 */
785 static void
hammer2_pfsfree_scan(hammer2_dev_t * hmp,int which)786 hammer2_pfsfree_scan(hammer2_dev_t *hmp, int which)
787 {
788 hammer2_pfs_t *pmp;
789 hammer2_inode_t *iroot;
790 hammer2_chain_t *rchain;
791 int i;
792 //int j;
793 struct hammer2_pfslist *wlist;
794
795 if (which == 0)
796 wlist = &hammer2_pfslist;
797 else
798 wlist = &hammer2_spmplist;
799 again:
800 TAILQ_FOREACH(pmp, wlist, mntentry) {
801 if ((iroot = pmp->iroot) == NULL)
802 continue;
803
804 /*
805 * Determine if this PFS is affected. If it is we must
806 * freeze all management threads and lock its iroot.
807 *
808 * Freezing a management thread forces it idle, operations
809 * in-progress will be aborted and it will have to start
810 * over again when unfrozen, or exit if told to exit.
811 */
812 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
813 if (pmp->pfs_hmps[i] == hmp)
814 break;
815 }
816 if (i == HAMMER2_MAXCLUSTER)
817 continue;
818
819 hammer2_vfs_sync_pmp(pmp, MNT_WAIT);
820
821 /*
822 * Make sure all synchronization threads are locked
823 * down.
824 */
825 /*
826 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
827 if (pmp->pfs_hmps[i] == NULL)
828 continue;
829 hammer2_thr_freeze_async(&pmp->sync_thrs[i]);
830 if (pmp->xop_groups) {
831 for (j = 0; j < hammer2_xop_nthreads; ++j) {
832 hammer2_thr_freeze_async(
833 &pmp->xop_groups[j].thrs[i]);
834 }
835 }
836 }
837 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
838 if (pmp->pfs_hmps[i] == NULL)
839 continue;
840 hammer2_thr_freeze(&pmp->sync_thrs[i]);
841 if (pmp->xop_groups) {
842 for (j = 0; j < hammer2_xop_nthreads; ++j) {
843 hammer2_thr_freeze(
844 &pmp->xop_groups[j].thrs[i]);
845 }
846 }
847 }
848 */
849
850 /*
851 * Lock the inode and clean out matching chains.
852 * Note that we cannot use hammer2_inode_lock_*()
853 * here because that would attempt to validate the
854 * cluster that we are in the middle of ripping
855 * apart.
856 *
857 * WARNING! We are working directly on the inodes
858 * embedded cluster.
859 */
860 hammer2_mtx_ex(&iroot->lock);
861
862 /*
863 * Remove the chain from matching elements of the PFS.
864 */
865 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
866 if (pmp->pfs_hmps[i] != hmp)
867 continue;
868 /*
869 hammer2_thr_delete(&pmp->sync_thrs[i]);
870 if (pmp->xop_groups) {
871 for (j = 0; j < hammer2_xop_nthreads; ++j) {
872 hammer2_thr_delete(
873 &pmp->xop_groups[j].thrs[i]);
874 }
875 }
876 */
877 rchain = iroot->cluster.array[i].chain;
878 iroot->cluster.array[i].chain = NULL;
879 pmp->pfs_types[i] = HAMMER2_PFSTYPE_NONE;
880 if (pmp->pfs_names[i]) {
881 kfree(pmp->pfs_names[i], M_HAMMER2);
882 pmp->pfs_names[i] = NULL;
883 }
884 if (rchain) {
885 hammer2_chain_drop(rchain);
886 /* focus hint */
887 if (iroot->cluster.focus == rchain)
888 iroot->cluster.focus = NULL;
889 }
890 pmp->pfs_hmps[i] = NULL;
891 }
892 hammer2_mtx_unlock(&iroot->lock);
893
894 /*
895 * Cleanup trailing chains. Gaps may remain.
896 */
897 for (i = HAMMER2_MAXCLUSTER - 1; i >= 0; --i) {
898 if (pmp->pfs_hmps[i])
899 break;
900 }
901 iroot->cluster.nchains = i + 1;
902
903 /*
904 * If the PMP has no elements remaining we can destroy it.
905 * (this will transition management threads from frozen->exit).
906 */
907 if (iroot->cluster.nchains == 0) {
908 /*
909 * If this was the hmp's spmp, we need to clean
910 * a little more stuff out.
911 */
912 if (hmp->spmp == pmp) {
913 hmp->spmp = NULL;
914 hmp->vchain.pmp = NULL;
915 hmp->fchain.pmp = NULL;
916 }
917
918 /*
919 * Free the pmp and restart the loop
920 */
921 KKASSERT(TAILQ_EMPTY(&pmp->syncq));
922 KKASSERT(TAILQ_EMPTY(&pmp->depq));
923 hammer2_pfsfree(pmp);
924 goto again;
925 }
926
927 /*
928 * If elements still remain we need to set the REMASTER
929 * flag and unfreeze it.
930 */
931 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
932 if (pmp->pfs_hmps[i] == NULL)
933 continue;
934 /*
935 hammer2_thr_remaster(&pmp->sync_thrs[i]);
936 hammer2_thr_unfreeze(&pmp->sync_thrs[i]);
937 if (pmp->xop_groups) {
938 for (j = 0; j < hammer2_xop_nthreads; ++j) {
939 hammer2_thr_remaster(
940 &pmp->xop_groups[j].thrs[i]);
941 hammer2_thr_unfreeze(
942 &pmp->xop_groups[j].thrs[i]);
943 }
944 }
945 */
946 }
947 }
948 }
949
950 /*
951 * Mount or remount HAMMER2 fileystem from physical media
952 *
953 * mountroot
954 * mp mount point structure
955 * path NULL
956 * data <unused>
957 * cred <unused>
958 *
959 * mount
960 * mp mount point structure
961 * path path to mount point
962 * data pointer to argument structure in user space
963 * volume volume path (device@LABEL form)
964 * hflags user mount flags
965 * cred user credentials
966 *
967 * RETURNS: 0 Success
968 * !0 error number
969 */
970 int
hammer2_vfs_mount(struct m_vnode * makefs_devvp,struct mount * mp,const char * label,const struct hammer2_mount_info * mi)971 hammer2_vfs_mount(struct m_vnode *makefs_devvp, struct mount *mp,
972 const char *label, const struct hammer2_mount_info *mi)
973 {
974 struct hammer2_mount_info info = *mi;
975 hammer2_pfs_t *pmp;
976 hammer2_pfs_t *spmp;
977 hammer2_dev_t *hmp, *hmp_tmp;
978 hammer2_dev_t *force_local;
979 hammer2_key_t key_next;
980 hammer2_key_t key_dummy;
981 hammer2_key_t lhc;
982 hammer2_chain_t *parent;
983 hammer2_chain_t *chain;
984 const hammer2_inode_data_t *ripdata;
985 hammer2_devvp_list_t devvpl;
986 hammer2_devvp_t *e, *e_tmp;
987 char *devstr;
988 int ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
989 int error;
990 int i;
991
992 hmp = NULL;
993 pmp = NULL;
994 devstr = NULL;
995
996 kprintf("hammer2_mount: device=\"%s\" label=\"%s\" rdonly=%d\n",
997 devstr, label, ronly);
998
999 /*
1000 * Initialize all device vnodes.
1001 */
1002 TAILQ_INIT(&devvpl);
1003 error = hammer2_init_devvp(makefs_devvp, &devvpl);
1004 if (error) {
1005 kprintf("hammer2: failed to initialize devvp in %s\n", devstr);
1006 hammer2_cleanup_devvp(&devvpl);
1007 return error;
1008 }
1009
1010 /*
1011 * Determine if the device has already been mounted. After this
1012 * check hmp will be non-NULL if we are doing the second or more
1013 * hammer2 mounts from the same device.
1014 */
1015 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
1016 if (!TAILQ_EMPTY(&devvpl)) {
1017 /*
1018 * Match the device. Due to the way devfs works,
1019 * we may not be able to directly match the vnode pointer,
1020 * so also check to see if the underlying device matches.
1021 */
1022 TAILQ_FOREACH(hmp_tmp, &hammer2_mntlist, mntentry) {
1023 TAILQ_FOREACH(e_tmp, &hmp_tmp->devvpl, entry) {
1024 int devvp_found = 0;
1025 TAILQ_FOREACH(e, &devvpl, entry) {
1026 KKASSERT(e->devvp);
1027 if (e_tmp->devvp == e->devvp)
1028 devvp_found = 1;
1029 /*
1030 if (e_tmp->devvp->v_rdev &&
1031 e_tmp->devvp->v_rdev == e->devvp->v_rdev)
1032 devvp_found = 1;
1033 */
1034 }
1035 if (!devvp_found)
1036 goto next_hmp;
1037 }
1038 hmp = hmp_tmp;
1039 kprintf("hammer2_mount: hmp=%p matched\n", hmp);
1040 break;
1041 next_hmp:
1042 continue;
1043 }
1044
1045 /*
1046 * If no match this may be a fresh H2 mount, make sure
1047 * the device is not mounted on anything else.
1048 */
1049 if (hmp == NULL) {
1050 TAILQ_FOREACH(e, &devvpl, entry) {
1051 struct m_vnode *devvp = e->devvp;
1052 KKASSERT(devvp);
1053 error = vfs_mountedon(devvp);
1054 if (error) {
1055 kprintf("hammer2_mount: %s mounted %d\n",
1056 e->path, error);
1057 hammer2_cleanup_devvp(&devvpl);
1058 lockmgr(&hammer2_mntlk, LK_RELEASE);
1059 return error;
1060 }
1061 }
1062 }
1063 } else {
1064 /*
1065 * Match the label to a pmp already probed.
1066 */
1067 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) {
1068 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
1069 if (pmp->pfs_names[i] &&
1070 strcmp(pmp->pfs_names[i], label) == 0) {
1071 hmp = pmp->pfs_hmps[i];
1072 break;
1073 }
1074 }
1075 if (hmp)
1076 break;
1077 }
1078 if (hmp == NULL) {
1079 kprintf("hammer2_mount: PFS label \"%s\" not found\n",
1080 label);
1081 hammer2_cleanup_devvp(&devvpl);
1082 lockmgr(&hammer2_mntlk, LK_RELEASE);
1083 return ENOENT;
1084 }
1085 }
1086
1087 /*
1088 * Open the device if this isn't a secondary mount and construct
1089 * the H2 device mount (hmp).
1090 */
1091 if (hmp == NULL) {
1092 hammer2_chain_t *schain;
1093 hammer2_xop_head_t xop;
1094
1095 /*
1096 * Now open the device
1097 */
1098 KKASSERT(!TAILQ_EMPTY(&devvpl));
1099 error = hammer2_open_devvp(&devvpl, ronly);
1100 if (error) {
1101 hammer2_close_devvp(&devvpl, ronly);
1102 hammer2_cleanup_devvp(&devvpl);
1103 lockmgr(&hammer2_mntlk, LK_RELEASE);
1104 return error;
1105 }
1106
1107 /*
1108 * Construct volumes and link with device vnodes.
1109 */
1110 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO);
1111 hmp->devvp = NULL;
1112 error = hammer2_init_vfsvolumes(mp, &devvpl, hmp->volumes,
1113 &hmp->voldata, &hmp->volhdrno,
1114 &hmp->devvp);
1115 if (error) {
1116 hammer2_close_devvp(&devvpl, ronly);
1117 hammer2_cleanup_devvp(&devvpl);
1118 lockmgr(&hammer2_mntlk, LK_RELEASE);
1119 kfree(hmp, M_HAMMER2);
1120 return error;
1121 }
1122 if (!hmp->devvp) {
1123 kprintf("hammer2: failed to initialize root volume\n");
1124 hammer2_unmount_helper(mp, NULL, hmp);
1125 lockmgr(&hammer2_mntlk, LK_RELEASE);
1126 hammer2_vfs_unmount(mp, MNT_FORCE);
1127 return EINVAL;
1128 }
1129
1130 ksnprintf(hmp->devrepname, sizeof(hmp->devrepname), "%s", devstr);
1131 hmp->ronly = ronly;
1132 hmp->hflags = info.hflags & HMNT2_DEVFLAGS;
1133 kmalloc_create_obj(&hmp->mchain, "HAMMER2-chains",
1134 sizeof(struct hammer2_chain));
1135 kmalloc_create_obj(&hmp->mio, "HAMMER2-dio",
1136 sizeof(struct hammer2_io));
1137 kmalloc_create(&hmp->mmsg, "HAMMER2-msg");
1138 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry);
1139 hammer2_io_hash_init(hmp);
1140 hammer2_spin_init(&hmp->list_spin, "h2mount_list");
1141
1142 lockinit(&hmp->vollk, "h2vol", 0, 0);
1143 lockinit(&hmp->bulklk, "h2bulk", 0, 0);
1144 lockinit(&hmp->bflock, "h2bflk", 0, 0);
1145
1146 /*
1147 * vchain setup. vchain.data is embedded.
1148 * vchain.refs is initialized and will never drop to 0.
1149 */
1150 hmp->vchain.hmp = hmp;
1151 hmp->vchain.refs = 1;
1152 hmp->vchain.data = (void *)&hmp->voldata;
1153 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME;
1154 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
1155 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid;
1156 hammer2_chain_init(&hmp->vchain);
1157
1158 /*
1159 * fchain setup. fchain.data is embedded.
1160 * fchain.refs is initialized and will never drop to 0.
1161 *
1162 * The data is not used but needs to be initialized to
1163 * pass assertion muster. We use this chain primarily
1164 * as a placeholder for the freemap's top-level radix tree
1165 * so it does not interfere with the volume's topology
1166 * radix tree.
1167 */
1168 hmp->fchain.hmp = hmp;
1169 hmp->fchain.refs = 1;
1170 hmp->fchain.data = (void *)&hmp->voldata.freemap_blockset;
1171 hmp->fchain.bref.type = HAMMER2_BREF_TYPE_FREEMAP;
1172 hmp->fchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
1173 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid;
1174 hmp->fchain.bref.methods =
1175 HAMMER2_ENC_CHECK(HAMMER2_CHECK_FREEMAP) |
1176 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE);
1177 hammer2_chain_init(&hmp->fchain);
1178
1179 /*
1180 * Initialize volume header related fields.
1181 */
1182 KKASSERT(hmp->voldata.magic == HAMMER2_VOLUME_ID_HBO ||
1183 hmp->voldata.magic == HAMMER2_VOLUME_ID_ABO);
1184 hmp->volsync = hmp->voldata;
1185 hmp->free_reserved = hmp->voldata.allocator_size / 20;
1186 /*
1187 * Must use hmp instead of volume header for these two
1188 * in order to handle volume versions transparently.
1189 */
1190 if (hmp->voldata.version >= HAMMER2_VOL_VERSION_MULTI_VOLUMES) {
1191 hmp->nvolumes = hmp->voldata.nvolumes;
1192 hmp->total_size = hmp->voldata.total_size;
1193 } else {
1194 hmp->nvolumes = 1;
1195 hmp->total_size = hmp->voldata.volu_size;
1196 }
1197 KKASSERT(hmp->nvolumes > 0);
1198
1199 /*
1200 * Move devvpl entries to hmp.
1201 */
1202 TAILQ_INIT(&hmp->devvpl);
1203 while ((e = TAILQ_FIRST(&devvpl)) != NULL) {
1204 TAILQ_REMOVE(&devvpl, e, entry);
1205 TAILQ_INSERT_TAIL(&hmp->devvpl, e, entry);
1206 }
1207 KKASSERT(TAILQ_EMPTY(&devvpl));
1208 KKASSERT(!TAILQ_EMPTY(&hmp->devvpl));
1209
1210 /*
1211 * Really important to get these right or the flush and
1212 * teardown code will get confused.
1213 */
1214 hmp->spmp = hammer2_pfsalloc(NULL, NULL, NULL);
1215 spmp = hmp->spmp;
1216 spmp->pfs_hmps[0] = hmp;
1217
1218 /*
1219 * Dummy-up vchain and fchain's modify_tid. mirror_tid
1220 * is inherited from the volume header.
1221 */
1222 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid;
1223 hmp->vchain.bref.modify_tid = hmp->vchain.bref.mirror_tid;
1224 hmp->vchain.pmp = spmp;
1225 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid;
1226 hmp->fchain.bref.modify_tid = hmp->fchain.bref.mirror_tid;
1227 hmp->fchain.pmp = spmp;
1228
1229 /*
1230 * First locate the super-root inode, which is key 0
1231 * relative to the volume header's blockset.
1232 *
1233 * Then locate the root inode by scanning the directory keyspace
1234 * represented by the label.
1235 */
1236 parent = hammer2_chain_lookup_init(&hmp->vchain, 0);
1237 schain = hammer2_chain_lookup(&parent, &key_dummy,
1238 HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY,
1239 &error, 0);
1240 hammer2_chain_lookup_done(parent);
1241 if (schain == NULL) {
1242 kprintf("hammer2_mount: invalid super-root\n");
1243 hammer2_unmount_helper(mp, NULL, hmp);
1244 lockmgr(&hammer2_mntlk, LK_RELEASE);
1245 hammer2_vfs_unmount(mp, MNT_FORCE);
1246 return EINVAL;
1247 }
1248 if (schain->error) {
1249 kprintf("hammer2_mount: error %s reading super-root\n",
1250 hammer2_error_str(schain->error));
1251 hammer2_chain_unlock(schain);
1252 hammer2_chain_drop(schain);
1253 schain = NULL;
1254 hammer2_unmount_helper(mp, NULL, hmp);
1255 lockmgr(&hammer2_mntlk, LK_RELEASE);
1256 hammer2_vfs_unmount(mp, MNT_FORCE);
1257 return EINVAL;
1258 }
1259
1260 /*
1261 * The super-root always uses an inode_tid of 1 when
1262 * creating PFSs.
1263 */
1264 spmp->inode_tid = 1;
1265 spmp->modify_tid = schain->bref.modify_tid + 1;
1266
1267 /*
1268 * Sanity-check schain's pmp and finish initialization.
1269 * Any chain belonging to the super-root topology should
1270 * have a NULL pmp (not even set to spmp).
1271 */
1272 ripdata = &schain->data->ipdata;
1273 KKASSERT(schain->pmp == NULL);
1274 spmp->pfs_clid = ripdata->meta.pfs_clid;
1275
1276 /*
1277 * Replace the dummy spmp->iroot with a real one. It's
1278 * easier to just do a wholesale replacement than to try
1279 * to update the chain and fixup the iroot fields.
1280 *
1281 * The returned inode is locked with the supplied cluster.
1282 */
1283 hammer2_dummy_xop_from_chain(&xop, schain);
1284 hammer2_inode_drop(spmp->iroot);
1285 spmp->iroot = hammer2_inode_get(spmp, &xop, -1, -1);
1286 spmp->spmp_hmp = hmp;
1287 spmp->pfs_types[0] = ripdata->meta.pfs_type;
1288 spmp->pfs_hmps[0] = hmp;
1289 hammer2_inode_ref(spmp->iroot);
1290 hammer2_inode_unlock(spmp->iroot);
1291 hammer2_cluster_unlock(&xop.cluster);
1292 hammer2_chain_drop(schain);
1293 /* do not call hammer2_cluster_drop() on an embedded cluster */
1294 schain = NULL; /* now invalid */
1295 /* leave spmp->iroot with one ref */
1296
1297 if (!hmp->ronly) {
1298 error = hammer2_recovery(hmp);
1299 if (error == 0)
1300 error |= hammer2_fixup_pfses(hmp);
1301 /* XXX do something with error */
1302 }
1303 hammer2_update_pmps(hmp);
1304 hammer2_iocom_init(hmp);
1305 hammer2_bulkfree_init(hmp);
1306
1307 /*
1308 * Ref the cluster management messaging descriptor. The mount
1309 * program deals with the other end of the communications pipe.
1310 *
1311 * Root mounts typically do not supply one.
1312 */
1313 /*
1314 if (info.cluster_fd >= 0) {
1315 fp = holdfp(curthread, info.cluster_fd, -1);
1316 if (fp) {
1317 hammer2_cluster_reconnect(hmp, fp);
1318 } else {
1319 kprintf("hammer2_mount: bad cluster_fd!\n");
1320 }
1321 }
1322 */
1323 } else {
1324 /* hmp->devvp_list is already constructed. */
1325 hammer2_cleanup_devvp(&devvpl);
1326 spmp = hmp->spmp;
1327 if (info.hflags & HMNT2_DEVFLAGS) {
1328 kprintf("hammer2_mount: Warning: mount flags pertaining "
1329 "to the whole device may only be specified "
1330 "on the first mount of the device: %08x\n",
1331 info.hflags & HMNT2_DEVFLAGS);
1332 }
1333 }
1334
1335 /*
1336 * Force local mount (disassociate all PFSs from their clusters).
1337 * Used primarily for debugging.
1338 */
1339 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL;
1340
1341 /*
1342 * Lookup the mount point under the media-localized super-root.
1343 * Scanning hammer2_pfslist doesn't help us because it represents
1344 * PFS cluster ids which can aggregate several named PFSs together.
1345 *
1346 * cluster->pmp will incorrectly point to spmp and must be fixed
1347 * up later on.
1348 */
1349 hammer2_inode_lock(spmp->iroot, 0);
1350 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS);
1351 lhc = hammer2_dirhash(label, strlen(label));
1352 chain = hammer2_chain_lookup(&parent, &key_next,
1353 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1354 &error, 0);
1355 while (chain) {
1356 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
1357 strcmp(label, (char *)chain->data->ipdata.filename) == 0) {
1358 break;
1359 }
1360 chain = hammer2_chain_next(&parent, chain, &key_next,
1361 key_next,
1362 lhc + HAMMER2_DIRHASH_LOMASK,
1363 &error, 0);
1364 }
1365 if (parent) {
1366 hammer2_chain_unlock(parent);
1367 hammer2_chain_drop(parent);
1368 }
1369 hammer2_inode_unlock(spmp->iroot);
1370
1371 /*
1372 * PFS could not be found?
1373 */
1374 if (chain == NULL) {
1375 hammer2_unmount_helper(mp, NULL, hmp);
1376 lockmgr(&hammer2_mntlk, LK_RELEASE);
1377 hammer2_vfs_unmount(mp, MNT_FORCE);
1378
1379 if (error) {
1380 kprintf("hammer2_mount: PFS label I/O error\n");
1381 return EINVAL;
1382 } else {
1383 kprintf("hammer2_mount: PFS label \"%s\" not found\n",
1384 label);
1385 return ENOENT;
1386 }
1387 }
1388
1389 /*
1390 * Acquire the pmp structure (it should have already been allocated
1391 * via hammer2_update_pmps()).
1392 */
1393 if (chain->error) {
1394 kprintf("hammer2_mount: PFS label I/O error\n");
1395 } else {
1396 ripdata = &chain->data->ipdata;
1397 pmp = hammer2_pfsalloc(NULL, ripdata, force_local);
1398 }
1399 hammer2_chain_unlock(chain);
1400 hammer2_chain_drop(chain);
1401
1402 /*
1403 * PFS to mount must exist at this point.
1404 */
1405 if (pmp == NULL) {
1406 kprintf("hammer2_mount: Failed to acquire PFS structure\n");
1407 hammer2_unmount_helper(mp, NULL, hmp);
1408 lockmgr(&hammer2_mntlk, LK_RELEASE);
1409 hammer2_vfs_unmount(mp, MNT_FORCE);
1410 return EINVAL;
1411 }
1412
1413 /*
1414 * Finish the mount
1415 */
1416 kprintf("hammer2_mount: hmp=%p pmp=%p\n", hmp, pmp);
1417
1418 /* Check if the pmp has already been mounted. */
1419 if (pmp->mp) {
1420 kprintf("hammer2_mount: PFS already mounted!\n");
1421 hammer2_unmount_helper(mp, NULL, hmp);
1422 lockmgr(&hammer2_mntlk, LK_RELEASE);
1423 hammer2_vfs_unmount(mp, MNT_FORCE);
1424 return EBUSY;
1425 }
1426
1427 pmp->hflags = info.hflags;
1428 mp->mnt_flag |= MNT_LOCAL;
1429 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /* all entry pts are SMP */
1430 mp->mnt_kern_flag |= MNTK_THR_SYNC; /* new vsyncscan semantics */
1431
1432 /*
1433 * required mount structure initializations
1434 */
1435 mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE;
1436 mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE;
1437
1438 mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE;
1439 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
1440
1441 /*
1442 * Optional fields
1443 */
1444 mp->mnt_iosize_max = MAXPHYS;
1445
1446 /*
1447 * Connect up mount pointers.
1448 */
1449 hammer2_mount_helper(mp, pmp);
1450 lockmgr(&hammer2_mntlk, LK_RELEASE);
1451
1452 #if 0
1453 /*
1454 * Finish setup
1455 */
1456 vfs_getnewfsid(mp);
1457 vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops);
1458 vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops);
1459 vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops);
1460
1461 if (path) {
1462 copyinstr(info.volume, mp->mnt_stat.f_mntfromname,
1463 MNAMELEN - 1, &size);
1464 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
1465 } /* else root mount, already in there */
1466
1467 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname));
1468 if (path) {
1469 copyinstr(path, mp->mnt_stat.f_mntonname,
1470 sizeof(mp->mnt_stat.f_mntonname) - 1,
1471 &size);
1472 } else {
1473 /* root mount */
1474 mp->mnt_stat.f_mntonname[0] = '/';
1475 }
1476 #endif
1477
1478 /*
1479 * Initial statfs to prime mnt_stat.
1480 */
1481 hammer2_vfs_statfs(mp, &mp->mnt_stat, NULL);
1482 hammer2_vfs_statvfs(mp, &mp->mnt_vstat, NULL);
1483
1484 return 0;
1485 }
1486
1487 /*
1488 * Scan PFSs under the super-root and create hammer2_pfs structures.
1489 */
1490 static
1491 void
hammer2_update_pmps(hammer2_dev_t * hmp)1492 hammer2_update_pmps(hammer2_dev_t *hmp)
1493 {
1494 const hammer2_inode_data_t *ripdata;
1495 hammer2_chain_t *parent;
1496 hammer2_chain_t *chain;
1497 hammer2_dev_t *force_local;
1498 hammer2_pfs_t *spmp;
1499 hammer2_key_t key_next;
1500 int error;
1501
1502 /*
1503 * Force local mount (disassociate all PFSs from their clusters).
1504 * Used primarily for debugging.
1505 */
1506 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL;
1507
1508 /*
1509 * Lookup mount point under the media-localized super-root.
1510 *
1511 * cluster->pmp will incorrectly point to spmp and must be fixed
1512 * up later on.
1513 */
1514 spmp = hmp->spmp;
1515 hammer2_inode_lock(spmp->iroot, 0);
1516 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS);
1517 chain = hammer2_chain_lookup(&parent, &key_next,
1518 HAMMER2_KEY_MIN, HAMMER2_KEY_MAX,
1519 &error, 0);
1520 while (chain) {
1521 if (chain->error) {
1522 kprintf("I/O error scanning PFS labels\n");
1523 } else if (chain->bref.type != HAMMER2_BREF_TYPE_INODE) {
1524 kprintf("Non inode chain type %d under super-root\n",
1525 chain->bref.type);
1526 } else {
1527 ripdata = &chain->data->ipdata;
1528 hammer2_pfsalloc(chain, ripdata, force_local);
1529 }
1530 chain = hammer2_chain_next(&parent, chain, &key_next,
1531 key_next, HAMMER2_KEY_MAX,
1532 &error, 0);
1533 }
1534 if (parent) {
1535 hammer2_chain_unlock(parent);
1536 hammer2_chain_drop(parent);
1537 }
1538 hammer2_inode_unlock(spmp->iroot);
1539 }
1540
1541 #if 0
1542 static
1543 int
1544 hammer2_remount(hammer2_dev_t *hmp, struct mount *mp, char *path __unused,
1545 struct ucred *cred)
1546 {
1547 hammer2_volume_t *vol;
1548 struct m_vnode *devvp;
1549 int i, error, result = 0;
1550
1551 if (!(hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)))
1552 return 0;
1553
1554 for (i = 0; i < hmp->nvolumes; ++i) {
1555 vol = &hmp->volumes[i];
1556 devvp = vol->dev->devvp;
1557 KKASSERT(devvp);
1558 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1559 VOP_OPEN(devvp, FREAD | FWRITE, FSCRED, NULL);
1560 vn_unlock(devvp);
1561 error = 0;
1562 if (vol->id == HAMMER2_ROOT_VOLUME) {
1563 error = hammer2_recovery(hmp);
1564 if (error == 0)
1565 error |= hammer2_fixup_pfses(hmp);
1566 }
1567 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1568 if (error == 0) {
1569 VOP_CLOSE(devvp, FREAD, NULL);
1570 } else {
1571 VOP_CLOSE(devvp, FREAD | FWRITE, NULL);
1572 }
1573 vn_unlock(devvp);
1574 result |= error;
1575 }
1576 if (result == 0) {
1577 kprintf("hammer2: enable read/write\n");
1578 hmp->ronly = 0;
1579 }
1580
1581 return result;
1582 }
1583 #endif
1584
1585 int
hammer2_vfs_unmount(struct mount * mp,int mntflags)1586 hammer2_vfs_unmount(struct mount *mp, int mntflags)
1587 {
1588 hammer2_pfs_t *pmp;
1589 int flags;
1590 int error = 0;
1591
1592 pmp = MPTOPMP(mp);
1593
1594 if (pmp == NULL)
1595 return(0);
1596
1597 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
1598
1599 /*
1600 * If mount initialization proceeded far enough we must flush
1601 * its vnodes and sync the underlying mount points. Three syncs
1602 * are required to fully flush the filesystem (freemap updates lag
1603 * by one flush, and one extra for safety).
1604 */
1605 if (mntflags & MNT_FORCE)
1606 flags = FORCECLOSE;
1607 else
1608 flags = 0;
1609 if (pmp->iroot) {
1610 error = vflush(mp, 0, flags);
1611 if (error)
1612 goto failed;
1613 hammer2_vfs_sync(mp, MNT_WAIT);
1614 hammer2_vfs_sync(mp, MNT_WAIT);
1615 hammer2_vfs_sync(mp, MNT_WAIT);
1616 }
1617
1618 /*
1619 * Cleanup the frontend support XOPS threads
1620 */
1621 hammer2_xop_helper_cleanup(pmp);
1622
1623 if (pmp->mp)
1624 hammer2_unmount_helper(mp, pmp, NULL);
1625
1626 error = 0;
1627 failed:
1628 lockmgr(&hammer2_mntlk, LK_RELEASE);
1629
1630 return (error);
1631 }
1632
1633 /*
1634 * Mount helper, hook the system mount into our PFS.
1635 * The mount lock is held.
1636 *
1637 * We must bump the mount_count on related devices for any
1638 * mounted PFSs.
1639 */
1640 static
1641 void
hammer2_mount_helper(struct mount * mp,hammer2_pfs_t * pmp)1642 hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp)
1643 {
1644 hammer2_cluster_t *cluster;
1645 hammer2_chain_t *rchain;
1646 int i;
1647
1648 mp->mnt_data = (qaddr_t)pmp;
1649 pmp->mp = mp;
1650
1651 /*
1652 * After pmp->mp is set we have to adjust hmp->mount_count.
1653 */
1654 cluster = &pmp->iroot->cluster;
1655 for (i = 0; i < cluster->nchains; ++i) {
1656 rchain = cluster->array[i].chain;
1657 if (rchain == NULL)
1658 continue;
1659 ++rchain->hmp->mount_count;
1660 }
1661
1662 /*
1663 * Create missing Xop threads
1664 */
1665 hammer2_xop_helper_create(pmp);
1666 }
1667
1668 /*
1669 * Unmount helper, unhook the system mount from our PFS.
1670 * The mount lock is held.
1671 *
1672 * If hmp is supplied a mount responsible for being the first to open
1673 * the block device failed and the block device and all PFSs using the
1674 * block device must be cleaned up.
1675 *
1676 * If pmp is supplied multiple devices might be backing the PFS and each
1677 * must be disconnected. This might not be the last PFS using some of the
1678 * underlying devices. Also, we have to adjust our hmp->mount_count
1679 * accounting for the devices backing the pmp which is now undergoing an
1680 * unmount.
1681 */
1682 static
1683 void
hammer2_unmount_helper(struct mount * mp,hammer2_pfs_t * pmp,hammer2_dev_t * hmp)1684 hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, hammer2_dev_t *hmp)
1685 {
1686 hammer2_cluster_t *cluster;
1687 hammer2_chain_t *rchain;
1688 int dumpcnt;
1689 int i;
1690
1691 /*
1692 * If no device supplied this is a high-level unmount and we have to
1693 * to disconnect the mount, adjust mount_count, and locate devices
1694 * that might now have no mounts.
1695 */
1696 if (pmp) {
1697 KKASSERT(hmp == NULL);
1698 KKASSERT(MPTOPMP(mp) == pmp);
1699 pmp->mp = NULL;
1700 mp->mnt_data = NULL;
1701
1702 /*
1703 * After pmp->mp is cleared we have to account for
1704 * mount_count.
1705 */
1706 cluster = &pmp->iroot->cluster;
1707 for (i = 0; i < cluster->nchains; ++i) {
1708 rchain = cluster->array[i].chain;
1709 if (rchain == NULL)
1710 continue;
1711 --rchain->hmp->mount_count;
1712 /* scrapping hmp now may invalidate the pmp */
1713 }
1714 again:
1715 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) {
1716 if (hmp->mount_count == 0) {
1717 hammer2_unmount_helper(NULL, NULL, hmp);
1718 goto again;
1719 }
1720 }
1721 return;
1722 }
1723
1724 /*
1725 * Try to terminate the block device. We can't terminate it if
1726 * there are still PFSs referencing it.
1727 */
1728 if (hmp->mount_count)
1729 return;
1730
1731 /*
1732 * Decomission the network before we start messing with the
1733 * device and PFS.
1734 */
1735 hammer2_iocom_uninit(hmp);
1736
1737 hammer2_bulkfree_uninit(hmp);
1738 hammer2_pfsfree_scan(hmp, 0);
1739
1740 /*
1741 * Cycle the volume data lock as a safety (probably not needed any
1742 * more). To ensure everything is out we need to flush at least
1743 * three times. (1) The running of the sideq can dirty the
1744 * filesystem, (2) A normal flush can dirty the freemap, and
1745 * (3) ensure that the freemap is fully synchronized.
1746 *
1747 * The next mount's recovery scan can clean everything up but we want
1748 * to leave the filesystem in a 100% clean state on a normal unmount.
1749 */
1750 #if 0
1751 hammer2_voldata_lock(hmp);
1752 hammer2_voldata_unlock(hmp);
1753 #endif
1754
1755 /*
1756 * Flush whatever is left. Unmounted but modified PFS's might still
1757 * have some dirty chains on them.
1758 */
1759 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1760 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS);
1761
1762 if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1763 hammer2_voldata_modify(hmp);
1764 hammer2_flush(&hmp->fchain, HAMMER2_FLUSH_TOP |
1765 HAMMER2_FLUSH_ALL);
1766 }
1767 hammer2_chain_unlock(&hmp->fchain);
1768
1769 if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1770 hammer2_flush(&hmp->vchain, HAMMER2_FLUSH_TOP |
1771 HAMMER2_FLUSH_ALL);
1772 }
1773 hammer2_chain_unlock(&hmp->vchain);
1774
1775 if ((hmp->vchain.flags | hmp->fchain.flags) &
1776 HAMMER2_CHAIN_FLUSH_MASK) {
1777 kprintf("hammer2_unmount: chains left over after final sync\n");
1778 kprintf(" vchain %08x\n", hmp->vchain.flags);
1779 kprintf(" fchain %08x\n", hmp->fchain.flags);
1780
1781 if (hammer2_debug & 0x0010)
1782 Debugger("entered debugger");
1783 }
1784
1785 hammer2_pfsfree_scan(hmp, 1);
1786
1787 KKASSERT(hmp->spmp == NULL);
1788
1789 /*
1790 * Finish up with the device vnode
1791 */
1792 if (!TAILQ_EMPTY(&hmp->devvpl)) {
1793 hammer2_close_devvp(&hmp->devvpl, hmp->ronly);
1794 hammer2_cleanup_devvp(&hmp->devvpl);
1795 }
1796 KKASSERT(TAILQ_EMPTY(&hmp->devvpl));
1797
1798 /*
1799 * Clear vchain/fchain flags that might prevent final cleanup
1800 * of these chains.
1801 */
1802 if (hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) {
1803 atomic_add_long(&hammer2_count_modified_chains, -1);
1804 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED);
1805 hammer2_pfs_memory_wakeup(hmp->vchain.pmp, -1);
1806 }
1807 if (hmp->vchain.flags & HAMMER2_CHAIN_UPDATE) {
1808 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_UPDATE);
1809 }
1810
1811 if (hmp->fchain.flags & HAMMER2_CHAIN_MODIFIED) {
1812 atomic_add_long(&hammer2_count_modified_chains, -1);
1813 atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_MODIFIED);
1814 hammer2_pfs_memory_wakeup(hmp->fchain.pmp, -1);
1815 }
1816 if (hmp->fchain.flags & HAMMER2_CHAIN_UPDATE) {
1817 atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_UPDATE);
1818 }
1819
1820 dumpcnt = 50;
1821 hammer2_dump_chain(&hmp->vchain, 0, 0, &dumpcnt, 'v', (u_int)-1);
1822 dumpcnt = 50;
1823 hammer2_dump_chain(&hmp->fchain, 0, 0, &dumpcnt, 'f', (u_int)-1);
1824
1825 /*
1826 * Final drop of embedded freemap root chain to
1827 * clean up fchain.core (fchain structure is not
1828 * flagged ALLOCATED so it is cleaned out and then
1829 * left to rot).
1830 */
1831 hammer2_chain_drop(&hmp->fchain);
1832
1833 /*
1834 * Final drop of embedded volume root chain to clean
1835 * up vchain.core (vchain structure is not flagged
1836 * ALLOCATED so it is cleaned out and then left to
1837 * rot).
1838 */
1839 hammer2_chain_drop(&hmp->vchain);
1840
1841 hammer2_io_hash_cleanup_all(hmp);
1842 if (hmp->iofree_count) {
1843 kprintf("io_cleanup: %d I/O's left hanging\n",
1844 hmp->iofree_count);
1845 }
1846
1847 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry);
1848 kmalloc_destroy_obj(&hmp->mchain);
1849 kmalloc_destroy_obj(&hmp->mio);
1850 kmalloc_destroy(&hmp->mmsg);
1851 kfree(hmp, M_HAMMER2);
1852 }
1853
1854 int
hammer2_vfs_vget(struct mount * mp,struct m_vnode * dvp,ino_t ino,struct m_vnode ** vpp)1855 hammer2_vfs_vget(struct mount *mp, struct m_vnode *dvp,
1856 ino_t ino, struct m_vnode **vpp)
1857 {
1858 hammer2_xop_lookup_t *xop;
1859 hammer2_pfs_t *pmp;
1860 hammer2_inode_t *ip;
1861 hammer2_tid_t inum;
1862 int error;
1863
1864 inum = (hammer2_tid_t)ino & HAMMER2_DIRHASH_USERMSK;
1865
1866 error = 0;
1867 pmp = MPTOPMP(mp);
1868
1869 /*
1870 * Easy if we already have it cached
1871 */
1872 ip = hammer2_inode_lookup(pmp, inum);
1873 if (ip) {
1874 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
1875 *vpp = hammer2_igetv(ip, &error);
1876 hammer2_inode_unlock(ip);
1877 hammer2_inode_drop(ip); /* from lookup */
1878
1879 return error;
1880 }
1881
1882 /*
1883 * Otherwise we have to find the inode
1884 */
1885 xop = hammer2_xop_alloc(pmp->iroot, 0);
1886 xop->lhc = inum;
1887 hammer2_xop_start(&xop->head, &hammer2_lookup_desc);
1888 error = hammer2_xop_collect(&xop->head, 0);
1889
1890 if (error == 0)
1891 ip = hammer2_inode_get(pmp, &xop->head, -1, -1);
1892 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1893
1894 if (ip) {
1895 *vpp = hammer2_igetv(ip, &error);
1896 hammer2_inode_unlock(ip);
1897 } else {
1898 *vpp = NULL;
1899 error = ENOENT;
1900 }
1901 return (error);
1902 }
1903
1904 int
hammer2_vfs_root(struct mount * mp,struct m_vnode ** vpp)1905 hammer2_vfs_root(struct mount *mp, struct m_vnode **vpp)
1906 {
1907 hammer2_pfs_t *pmp;
1908 struct m_vnode *vp;
1909 int error;
1910
1911 pmp = MPTOPMP(mp);
1912 if (pmp->iroot == NULL) {
1913 kprintf("hammer2 (%s): no root inode\n",
1914 mp->mnt_stat.f_mntfromname);
1915 *vpp = NULL;
1916 return EINVAL;
1917 }
1918
1919 error = 0;
1920 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED);
1921
1922 while (pmp->inode_tid == 0) {
1923 hammer2_xop_ipcluster_t *xop;
1924 const hammer2_inode_meta_t *meta;
1925
1926 xop = hammer2_xop_alloc(pmp->iroot, HAMMER2_XOP_MODIFYING);
1927 hammer2_xop_start(&xop->head, &hammer2_ipcluster_desc);
1928 error = hammer2_xop_collect(&xop->head, 0);
1929
1930 if (error == 0) {
1931 meta = &hammer2_xop_gdata(&xop->head)->ipdata.meta;
1932 pmp->iroot->meta = *meta;
1933 pmp->inode_tid = meta->pfs_inum + 1;
1934 hammer2_xop_pdata(&xop->head);
1935 /* meta invalid */
1936
1937 if (pmp->inode_tid < HAMMER2_INODE_START)
1938 pmp->inode_tid = HAMMER2_INODE_START;
1939 pmp->modify_tid =
1940 xop->head.cluster.focus->bref.modify_tid + 1;
1941 #if 0
1942 kprintf("PFS: Starting inode %jd\n",
1943 (intmax_t)pmp->inode_tid);
1944 kprintf("PMP focus good set nextino=%ld mod=%016jx\n",
1945 pmp->inode_tid, pmp->modify_tid);
1946 #endif
1947 //wakeup(&pmp->iroot); XXX
1948
1949 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1950
1951 /*
1952 * Prime the mount info.
1953 */
1954 hammer2_vfs_statfs(mp, &mp->mnt_stat, NULL);
1955 break;
1956 }
1957
1958 /*
1959 * Loop, try again
1960 */
1961 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1962 hammer2_inode_unlock(pmp->iroot);
1963 error = tsleep(&pmp->iroot, PCATCH, "h2root", hz);
1964 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED);
1965 if (error == EINTR)
1966 break;
1967 }
1968
1969 if (error) {
1970 hammer2_inode_unlock(pmp->iroot);
1971 *vpp = NULL;
1972 } else {
1973 vp = hammer2_igetv(pmp->iroot, &error);
1974 hammer2_inode_unlock(pmp->iroot);
1975 *vpp = vp;
1976 }
1977
1978 return (error);
1979 }
1980
1981 /*
1982 * Filesystem status
1983 *
1984 * XXX incorporate ipdata->meta.inode_quota and data_quota
1985 */
1986 static
1987 int
hammer2_vfs_statfs(struct mount * mp,struct statfs * sbp,struct ucred * cred)1988 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
1989 {
1990 hammer2_pfs_t *pmp;
1991 hammer2_dev_t *hmp;
1992 hammer2_blockref_t bref;
1993 struct statfs tmp;
1994 int i;
1995
1996 /*
1997 * NOTE: iroot might not have validated the cluster yet.
1998 */
1999 pmp = MPTOPMP(mp);
2000
2001 bzero(&tmp, sizeof(tmp));
2002
2003 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) {
2004 hmp = pmp->pfs_hmps[i];
2005 if (hmp == NULL)
2006 continue;
2007 if (pmp->iroot->cluster.array[i].chain)
2008 bref = pmp->iroot->cluster.array[i].chain->bref;
2009 else
2010 bzero(&bref, sizeof(bref));
2011
2012 tmp.f_files = bref.embed.stats.inode_count;
2013 tmp.f_ffree = 0;
2014 tmp.f_blocks = hmp->voldata.allocator_size /
2015 mp->mnt_vstat.f_bsize;
2016 tmp.f_bfree = hmp->voldata.allocator_free /
2017 mp->mnt_vstat.f_bsize;
2018 tmp.f_bavail = tmp.f_bfree;
2019
2020 if (cred && cred->cr_uid != 0) {
2021 uint64_t adj;
2022
2023 /* 5% */
2024 adj = hmp->free_reserved / mp->mnt_vstat.f_bsize;
2025 tmp.f_blocks -= adj;
2026 tmp.f_bfree -= adj;
2027 tmp.f_bavail -= adj;
2028 }
2029
2030 mp->mnt_stat.f_blocks = tmp.f_blocks;
2031 mp->mnt_stat.f_bfree = tmp.f_bfree;
2032 mp->mnt_stat.f_bavail = tmp.f_bavail;
2033 mp->mnt_stat.f_files = tmp.f_files;
2034 mp->mnt_stat.f_ffree = tmp.f_ffree;
2035
2036 *sbp = mp->mnt_stat;
2037 }
2038 return (0);
2039 }
2040
2041 static
2042 int
hammer2_vfs_statvfs(struct mount * mp,struct statvfs * sbp,struct ucred * cred)2043 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
2044 {
2045 hammer2_pfs_t *pmp;
2046 hammer2_dev_t *hmp;
2047 hammer2_blockref_t bref;
2048 struct statvfs tmp;
2049 int i;
2050
2051 /*
2052 * NOTE: iroot might not have validated the cluster yet.
2053 */
2054 pmp = MPTOPMP(mp);
2055 bzero(&tmp, sizeof(tmp));
2056
2057 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) {
2058 hmp = pmp->pfs_hmps[i];
2059 if (hmp == NULL)
2060 continue;
2061 if (pmp->iroot->cluster.array[i].chain)
2062 bref = pmp->iroot->cluster.array[i].chain->bref;
2063 else
2064 bzero(&bref, sizeof(bref));
2065
2066 tmp.f_files = bref.embed.stats.inode_count;
2067 tmp.f_ffree = 0;
2068 tmp.f_blocks = hmp->voldata.allocator_size /
2069 mp->mnt_vstat.f_bsize;
2070 tmp.f_bfree = hmp->voldata.allocator_free /
2071 mp->mnt_vstat.f_bsize;
2072 tmp.f_bavail = tmp.f_bfree;
2073
2074 if (cred && cred->cr_uid != 0) {
2075 uint64_t adj;
2076
2077 /* 5% */
2078 adj = hmp->free_reserved / mp->mnt_vstat.f_bsize;
2079 tmp.f_blocks -= adj;
2080 tmp.f_bfree -= adj;
2081 tmp.f_bavail -= adj;
2082 }
2083
2084 mp->mnt_vstat.f_blocks = tmp.f_blocks;
2085 mp->mnt_vstat.f_bfree = tmp.f_bfree;
2086 mp->mnt_vstat.f_bavail = tmp.f_bavail;
2087 mp->mnt_vstat.f_files = tmp.f_files;
2088 mp->mnt_vstat.f_ffree = tmp.f_ffree;
2089
2090 *sbp = mp->mnt_vstat;
2091 }
2092 return (0);
2093 }
2094
2095 /*
2096 * Mount-time recovery (RW mounts)
2097 *
2098 * Updates to the free block table are allowed to lag flushes by one
2099 * transaction. In case of a crash, then on a fresh mount we must do an
2100 * incremental scan of the last committed transaction id and make sure that
2101 * all related blocks have been marked allocated.
2102 */
2103 struct hammer2_recovery_elm {
2104 TAILQ_ENTRY(hammer2_recovery_elm) entry;
2105 hammer2_chain_t *chain;
2106 hammer2_tid_t sync_tid;
2107 };
2108
2109 TAILQ_HEAD(hammer2_recovery_list, hammer2_recovery_elm);
2110
2111 struct hammer2_recovery_info {
2112 struct hammer2_recovery_list list;
2113 hammer2_tid_t mtid;
2114 int depth;
2115 };
2116
2117 static int hammer2_recovery_scan(hammer2_dev_t *hmp,
2118 hammer2_chain_t *parent,
2119 struct hammer2_recovery_info *info,
2120 hammer2_tid_t sync_tid);
2121
2122 #define HAMMER2_RECOVERY_MAXDEPTH 10
2123
2124 static
2125 int
hammer2_recovery(hammer2_dev_t * hmp)2126 hammer2_recovery(hammer2_dev_t *hmp)
2127 {
2128 struct hammer2_recovery_info info;
2129 struct hammer2_recovery_elm *elm;
2130 hammer2_chain_t *parent;
2131 hammer2_tid_t sync_tid;
2132 hammer2_tid_t mirror_tid;
2133 int error;
2134
2135 hammer2_trans_init(hmp->spmp, 0);
2136
2137 sync_tid = hmp->voldata.freemap_tid;
2138 mirror_tid = hmp->voldata.mirror_tid;
2139
2140 kprintf("hammer2_mount: \"%s\": ", hmp->devrepname);
2141 if (sync_tid >= mirror_tid) {
2142 kprintf("no recovery needed\n");
2143 } else {
2144 kprintf("freemap recovery %016jx-%016jx\n",
2145 sync_tid + 1, mirror_tid);
2146 }
2147
2148 TAILQ_INIT(&info.list);
2149 info.depth = 0;
2150 parent = hammer2_chain_lookup_init(&hmp->vchain, 0);
2151 error = hammer2_recovery_scan(hmp, parent, &info, sync_tid);
2152 hammer2_chain_lookup_done(parent);
2153
2154 while ((elm = TAILQ_FIRST(&info.list)) != NULL) {
2155 TAILQ_REMOVE(&info.list, elm, entry);
2156 parent = elm->chain;
2157 sync_tid = elm->sync_tid;
2158 kfree(elm, M_HAMMER2);
2159
2160 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
2161 error |= hammer2_recovery_scan(hmp, parent, &info,
2162 hmp->voldata.freemap_tid);
2163 hammer2_chain_unlock(parent);
2164 hammer2_chain_drop(parent); /* drop elm->chain ref */
2165 }
2166
2167 hammer2_trans_done(hmp->spmp, 0);
2168
2169 return error;
2170 }
2171
2172 static
2173 int
hammer2_recovery_scan(hammer2_dev_t * hmp,hammer2_chain_t * parent,struct hammer2_recovery_info * info,hammer2_tid_t sync_tid)2174 hammer2_recovery_scan(hammer2_dev_t *hmp, hammer2_chain_t *parent,
2175 struct hammer2_recovery_info *info,
2176 hammer2_tid_t sync_tid)
2177 {
2178 const hammer2_inode_data_t *ripdata;
2179 hammer2_chain_t *chain;
2180 hammer2_blockref_t bref;
2181 int tmp_error;
2182 int rup_error;
2183 int error;
2184 int first;
2185
2186 /*
2187 * Adjust freemap to ensure that the block(s) are marked allocated.
2188 */
2189 if (parent->bref.type != HAMMER2_BREF_TYPE_VOLUME) {
2190 hammer2_freemap_adjust(hmp, &parent->bref,
2191 HAMMER2_FREEMAP_DORECOVER);
2192 }
2193
2194 /*
2195 * Check type for recursive scan
2196 */
2197 switch(parent->bref.type) {
2198 case HAMMER2_BREF_TYPE_VOLUME:
2199 /* data already instantiated */
2200 break;
2201 case HAMMER2_BREF_TYPE_INODE:
2202 /*
2203 * Must instantiate data for DIRECTDATA test and also
2204 * for recursion.
2205 */
2206 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
2207 ripdata = &parent->data->ipdata;
2208 if (ripdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
2209 /* not applicable to recovery scan */
2210 hammer2_chain_unlock(parent);
2211 return 0;
2212 }
2213 hammer2_chain_unlock(parent);
2214 break;
2215 case HAMMER2_BREF_TYPE_INDIRECT:
2216 /*
2217 * Must instantiate data for recursion
2218 */
2219 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
2220 hammer2_chain_unlock(parent);
2221 break;
2222 case HAMMER2_BREF_TYPE_DIRENT:
2223 case HAMMER2_BREF_TYPE_DATA:
2224 case HAMMER2_BREF_TYPE_FREEMAP:
2225 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2226 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2227 /* not applicable to recovery scan */
2228 return 0;
2229 break;
2230 default:
2231 return HAMMER2_ERROR_BADBREF;
2232 }
2233
2234 /*
2235 * Defer operation if depth limit reached.
2236 */
2237 if (info->depth >= HAMMER2_RECOVERY_MAXDEPTH) {
2238 struct hammer2_recovery_elm *elm;
2239
2240 elm = kmalloc(sizeof(*elm), M_HAMMER2, M_ZERO | M_WAITOK);
2241 elm->chain = parent;
2242 elm->sync_tid = sync_tid;
2243 hammer2_chain_ref(parent);
2244 TAILQ_INSERT_TAIL(&info->list, elm, entry);
2245 /* unlocked by caller */
2246
2247 return(0);
2248 }
2249
2250
2251 /*
2252 * Recursive scan of the last flushed transaction only. We are
2253 * doing this without pmp assignments so don't leave the chains
2254 * hanging around after we are done with them.
2255 *
2256 * error Cumulative error this level only
2257 * rup_error Cumulative error for recursion
2258 * tmp_error Specific non-cumulative recursion error
2259 */
2260 chain = NULL;
2261 first = 1;
2262 rup_error = 0;
2263 error = 0;
2264
2265 for (;;) {
2266 error |= hammer2_chain_scan(parent, &chain, &bref,
2267 &first,
2268 HAMMER2_LOOKUP_NODATA);
2269
2270 /*
2271 * Problem during scan or EOF
2272 */
2273 if (error)
2274 break;
2275
2276 /*
2277 * If this is a leaf
2278 */
2279 if (chain == NULL) {
2280 if (bref.mirror_tid > sync_tid) {
2281 hammer2_freemap_adjust(hmp, &bref,
2282 HAMMER2_FREEMAP_DORECOVER);
2283 }
2284 continue;
2285 }
2286
2287 /*
2288 * This may or may not be a recursive node.
2289 */
2290 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE);
2291 if (bref.mirror_tid > sync_tid) {
2292 ++info->depth;
2293 tmp_error = hammer2_recovery_scan(hmp, chain,
2294 info, sync_tid);
2295 --info->depth;
2296 } else {
2297 tmp_error = 0;
2298 }
2299
2300 /*
2301 * Flush the recovery at the PFS boundary to stage it for
2302 * the final flush of the super-root topology.
2303 */
2304 if (tmp_error == 0 &&
2305 (bref.flags & HAMMER2_BREF_FLAG_PFSROOT) &&
2306 (chain->flags & HAMMER2_CHAIN_ONFLUSH)) {
2307 hammer2_flush(chain, HAMMER2_FLUSH_TOP |
2308 HAMMER2_FLUSH_ALL);
2309 }
2310 rup_error |= tmp_error;
2311 }
2312 return ((error | rup_error) & ~HAMMER2_ERROR_EOF);
2313 }
2314
2315 /*
2316 * This fixes up an error introduced in earlier H2 implementations where
2317 * moving a PFS inode into an indirect block wound up causing the
2318 * HAMMER2_BREF_FLAG_PFSROOT flag in the bref to get cleared.
2319 */
2320 static
2321 int
hammer2_fixup_pfses(hammer2_dev_t * hmp)2322 hammer2_fixup_pfses(hammer2_dev_t *hmp)
2323 {
2324 const hammer2_inode_data_t *ripdata;
2325 hammer2_chain_t *parent;
2326 hammer2_chain_t *chain;
2327 hammer2_key_t key_next;
2328 hammer2_pfs_t *spmp;
2329 int error;
2330
2331 error = 0;
2332
2333 /*
2334 * Lookup mount point under the media-localized super-root.
2335 *
2336 * cluster->pmp will incorrectly point to spmp and must be fixed
2337 * up later on.
2338 */
2339 spmp = hmp->spmp;
2340 hammer2_inode_lock(spmp->iroot, 0);
2341 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS);
2342 chain = hammer2_chain_lookup(&parent, &key_next,
2343 HAMMER2_KEY_MIN, HAMMER2_KEY_MAX,
2344 &error, 0);
2345 while (chain) {
2346 if (chain->bref.type != HAMMER2_BREF_TYPE_INODE)
2347 continue;
2348 if (chain->error) {
2349 kprintf("I/O error scanning PFS labels\n");
2350 error |= chain->error;
2351 } else if ((chain->bref.flags &
2352 HAMMER2_BREF_FLAG_PFSROOT) == 0) {
2353 int error2;
2354
2355 ripdata = &chain->data->ipdata;
2356 hammer2_trans_init(hmp->spmp, 0);
2357 error2 = hammer2_chain_modify(chain,
2358 chain->bref.modify_tid,
2359 0, 0);
2360 if (error2 == 0) {
2361 kprintf("hammer2: Correct mis-flagged PFS %s\n",
2362 ripdata->filename);
2363 chain->bref.flags |= HAMMER2_BREF_FLAG_PFSROOT;
2364 } else {
2365 error |= error2;
2366 }
2367 hammer2_flush(chain, HAMMER2_FLUSH_TOP |
2368 HAMMER2_FLUSH_ALL);
2369 hammer2_trans_done(hmp->spmp, 0);
2370 }
2371 chain = hammer2_chain_next(&parent, chain, &key_next,
2372 key_next, HAMMER2_KEY_MAX,
2373 &error, 0);
2374 }
2375 if (parent) {
2376 hammer2_chain_unlock(parent);
2377 hammer2_chain_drop(parent);
2378 }
2379 hammer2_inode_unlock(spmp->iroot);
2380
2381 return error;
2382 }
2383
2384 /*
2385 * Sync a mount point; this is called periodically on a per-mount basis from
2386 * the filesystem syncer, and whenever a user issues a sync.
2387 */
2388 int
hammer2_vfs_sync(struct mount * mp,int waitfor)2389 hammer2_vfs_sync(struct mount *mp, int waitfor)
2390 {
2391 int error;
2392
2393 error = hammer2_vfs_sync_pmp(MPTOPMP(mp), waitfor);
2394
2395 return error;
2396 }
2397
2398 /*
2399 * Because frontend operations lock vnodes before we get a chance to
2400 * lock the related inode, we can't just acquire a vnode lock without
2401 * risking a deadlock. The frontend may be holding a vnode lock while
2402 * also blocked on our SYNCQ flag while trying to get the inode lock.
2403 *
2404 * To deal with this situation we can check the vnode lock situation
2405 * after locking the inode and perform a work-around.
2406 */
2407 int
hammer2_vfs_sync_pmp(hammer2_pfs_t * pmp,int waitfor)2408 hammer2_vfs_sync_pmp(hammer2_pfs_t *pmp, int waitfor)
2409 {
2410 hammer2_inode_t *ip;
2411 hammer2_depend_t *depend;
2412 hammer2_depend_t *depend_next;
2413 struct m_vnode *vp;
2414 uint32_t pass2;
2415 int error;
2416 int wakecount;
2417 int dorestart;
2418
2419 /*
2420 * Move all inodes on sideq to syncq. This will clear sideq.
2421 * This should represent all flushable inodes. These inodes
2422 * will already have refs due to being on syncq or sideq. We
2423 * must do this all at once with the spinlock held to ensure that
2424 * all inode dependencies are part of the same flush.
2425 *
2426 * We should be able to do this asynchronously from frontend
2427 * operations because we will be locking the inodes later on
2428 * to actually flush them, and that will partition any frontend
2429 * op using the same inode. Either it has already locked the
2430 * inode and we will block, or it has not yet locked the inode
2431 * and it will block until we are finished flushing that inode.
2432 *
2433 * When restarting, only move the inodes flagged as PASS2 from
2434 * SIDEQ to SYNCQ. PASS2 propagation by inode_lock4() and
2435 * inode_depend() are atomic with the spin-lock.
2436 */
2437 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH);
2438 #ifdef HAMMER2_DEBUG_SYNC
2439 kprintf("FILESYSTEM SYNC BOUNDARY\n");
2440 #endif
2441 dorestart = 0;
2442
2443 /*
2444 * Move inodes from depq to syncq, releasing the related
2445 * depend structures.
2446 */
2447 restart:
2448 #ifdef HAMMER2_DEBUG_SYNC
2449 kprintf("FILESYSTEM SYNC RESTART (%d)\n", dorestart);
2450 #endif
2451 hammer2_trans_setflags(pmp, 0/*HAMMER2_TRANS_COPYQ*/);
2452 hammer2_trans_clearflags(pmp, HAMMER2_TRANS_RESCAN);
2453
2454 /*
2455 * Move inodes from depq to syncq. When restarting, only depq's
2456 * marked pass2 are moved.
2457 */
2458 hammer2_spin_ex(&pmp->list_spin);
2459 depend_next = TAILQ_FIRST(&pmp->depq);
2460 wakecount = 0;
2461
2462 while ((depend = depend_next) != NULL) {
2463 depend_next = TAILQ_NEXT(depend, entry);
2464 if (dorestart && depend->pass2 == 0)
2465 continue;
2466 TAILQ_FOREACH(ip, &depend->sideq, entry) {
2467 KKASSERT(ip->flags & HAMMER2_INODE_SIDEQ);
2468 atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ);
2469 atomic_clear_int(&ip->flags, HAMMER2_INODE_SIDEQ);
2470 ip->depend = NULL;
2471 }
2472
2473 /*
2474 * NOTE: pmp->sideq_count includes both sideq and syncq
2475 */
2476 TAILQ_CONCAT(&pmp->syncq, &depend->sideq, entry);
2477
2478 depend->count = 0;
2479 depend->pass2 = 0;
2480 TAILQ_REMOVE(&pmp->depq, depend, entry);
2481 }
2482
2483 hammer2_spin_unex(&pmp->list_spin);
2484 hammer2_trans_clearflags(pmp, /*HAMMER2_TRANS_COPYQ |*/
2485 HAMMER2_TRANS_WAITING);
2486 dorestart = 0;
2487
2488 /*
2489 * sideq_count may have dropped enough to allow us to unstall
2490 * the frontend.
2491 */
2492 hammer2_pfs_memory_wakeup(pmp, 0);
2493
2494 /*
2495 * Now run through all inodes on syncq.
2496 *
2497 * Flush transactions only interlock with other flush transactions.
2498 * Any conflicting frontend operations will block on the inode, but
2499 * may hold a vnode lock while doing so.
2500 */
2501 hammer2_spin_ex(&pmp->list_spin);
2502 while ((ip = TAILQ_FIRST(&pmp->syncq)) != NULL) {
2503 /*
2504 * Remove the inode from the SYNCQ, transfer the syncq ref
2505 * to us. We must clear SYNCQ to allow any potential
2506 * front-end deadlock to proceed. We must set PASS2 so
2507 * the dependency code knows what to do.
2508 */
2509 pass2 = ip->flags;
2510 cpu_ccfence();
2511 if (atomic_cmpset_int(&ip->flags,
2512 pass2,
2513 (pass2 & ~(HAMMER2_INODE_SYNCQ |
2514 HAMMER2_INODE_SYNCQ_WAKEUP)) |
2515 HAMMER2_INODE_SYNCQ_PASS2) == 0)
2516 {
2517 continue;
2518 }
2519 TAILQ_REMOVE(&pmp->syncq, ip, entry);
2520 --pmp->sideq_count;
2521 hammer2_spin_unex(&pmp->list_spin);
2522
2523 /*
2524 * Tickle anyone waiting on ip->flags or the hysteresis
2525 * on the dirty inode count.
2526 */
2527 if (pass2 & HAMMER2_INODE_SYNCQ_WAKEUP)
2528 wakeup(&ip->flags);
2529 if (++wakecount >= hammer2_limit_dirty_inodes / 20 + 1) {
2530 wakecount = 0;
2531 hammer2_pfs_memory_wakeup(pmp, 0);
2532 }
2533
2534 /*
2535 * Relock the inode, and we inherit a ref from the above.
2536 * We will check for a race after we acquire the vnode.
2537 */
2538 hammer2_mtx_ex(&ip->lock);
2539
2540 /*
2541 * We need the vp in order to vfsync() dirty buffers, so if
2542 * one isn't attached we can skip it.
2543 *
2544 * Ordering the inode lock and then the vnode lock has the
2545 * potential to deadlock. If we had left SYNCQ set that could
2546 * also deadlock us against the frontend even if we don't hold
2547 * any locks, but the latter is not a problem now since we
2548 * cleared it. igetv will temporarily release the inode lock
2549 * in a safe manner to work-around the deadlock.
2550 *
2551 * Unfortunately it is still possible to deadlock when the
2552 * frontend obtains multiple inode locks, because all the
2553 * related vnodes are already locked (nor can the vnode locks
2554 * be released and reacquired without messing up RECLAIM and
2555 * INACTIVE sequencing).
2556 *
2557 * The solution for now is to move the vp back onto SIDEQ
2558 * and set dorestart, which will restart the flush after we
2559 * exhaust the current SYNCQ. Note that additional
2560 * dependencies may build up, so we definitely need to move
2561 * the whole SIDEQ back to SYNCQ when we restart.
2562 */
2563 vp = ip->vp;
2564 if (vp) {
2565 if (vget(vp, LK_EXCLUSIVE|LK_NOWAIT)) {
2566 /*
2567 * Failed to get the vnode, requeue the inode
2568 * (PASS2 is already set so it will be found
2569 * again on the restart). Then unlock.
2570 */
2571 vp = NULL;
2572 dorestart |= 1;
2573 #ifdef HAMMER2_DEBUG_SYNC
2574 kprintf("inum %ld (sync delayed by vnode)\n",
2575 (long)ip->meta.inum);
2576 #endif
2577 hammer2_inode_delayed_sideq(ip);
2578
2579 hammer2_mtx_unlock(&ip->lock);
2580 hammer2_inode_drop(ip);
2581
2582 /*
2583 * If PASS2 was previously set we might
2584 * be looping too hard, ask for a delay
2585 * along with the restart.
2586 */
2587 if (pass2 & HAMMER2_INODE_SYNCQ_PASS2)
2588 dorestart |= 2;
2589 hammer2_spin_ex(&pmp->list_spin);
2590 continue;
2591 }
2592 } else {
2593 vp = NULL;
2594 }
2595
2596 /*
2597 * If the inode wound up on a SIDEQ again it will already be
2598 * prepped for another PASS2. In this situation if we flush
2599 * it now we will just wind up flushing it again in the same
2600 * syncer run, so we might as well not flush it now.
2601 */
2602 if (ip->flags & HAMMER2_INODE_SIDEQ) {
2603 hammer2_mtx_unlock(&ip->lock);
2604 hammer2_inode_drop(ip);
2605 if (vp)
2606 vput(vp);
2607 dorestart |= 1;
2608 hammer2_spin_ex(&pmp->list_spin);
2609 continue;
2610 }
2611
2612 /*
2613 * Ok we have the inode exclusively locked and if vp is
2614 * not NULL that will also be exclusively locked. Do the
2615 * meat of the flush.
2616 *
2617 * vp token needed for v_rbdirty_tree check / vclrisdirty
2618 * sequencing. Though we hold the vnode exclusively so
2619 * we shouldn't need to hold the token also in this case.
2620 */
2621 if (vp) {
2622 vfsync(vp, MNT_WAIT, 1, NULL, NULL);
2623 bio_track_wait(NULL, 0, 0); /* XXX */
2624 }
2625
2626 /*
2627 * If the inode has not yet been inserted into the tree
2628 * we must do so. Then sync and flush it. The flush should
2629 * update the parent.
2630 */
2631 if (ip->flags & HAMMER2_INODE_DELETING) {
2632 #ifdef HAMMER2_DEBUG_SYNC
2633 kprintf("inum %ld destroy\n", (long)ip->meta.inum);
2634 #endif
2635 hammer2_inode_chain_des(ip);
2636 atomic_add_long(&hammer2_iod_inode_deletes, 1);
2637 } else if (ip->flags & HAMMER2_INODE_CREATING) {
2638 #ifdef HAMMER2_DEBUG_SYNC
2639 kprintf("inum %ld insert\n", (long)ip->meta.inum);
2640 #endif
2641 hammer2_inode_chain_ins(ip);
2642 atomic_add_long(&hammer2_iod_inode_creates, 1);
2643 }
2644 #ifdef HAMMER2_DEBUG_SYNC
2645 kprintf("inum %ld chain-sync\n", (long)ip->meta.inum);
2646 #endif
2647
2648 /*
2649 * Because I kinda messed up the design and index the inodes
2650 * under the root inode, along side the directory entries,
2651 * we can't flush the inode index under the iroot until the
2652 * end. If we do it now we might miss effects created by
2653 * other inodes on the SYNCQ.
2654 *
2655 * Do a normal (non-FSSYNC) flush instead, which allows the
2656 * vnode code to work the same. We don't want to force iroot
2657 * back onto the SIDEQ, and we also don't want the flush code
2658 * to update pfs_iroot_blocksets until the final flush later.
2659 *
2660 * XXX at the moment this will likely result in a double-flush
2661 * of the iroot chain.
2662 */
2663 hammer2_inode_chain_sync(ip);
2664 if (ip == pmp->iroot) {
2665 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP);
2666 } else {
2667 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP |
2668 HAMMER2_XOP_FSSYNC);
2669 }
2670 if (vp) {
2671 lwkt_gettoken(NULL);
2672 if ((ip->flags & (HAMMER2_INODE_MODIFIED |
2673 HAMMER2_INODE_RESIZED |
2674 HAMMER2_INODE_DIRTYDATA)) == 0) {
2675 //RB_EMPTY(&vp->v_rbdirty_tree) &&
2676 //!bio_track_active(&vp->v_track_write)) {
2677 vclrisdirty(vp);
2678 } else {
2679 hammer2_inode_delayed_sideq(ip);
2680 }
2681 lwkt_reltoken(NULL);
2682 vput(vp);
2683 vp = NULL; /* safety */
2684 }
2685 atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_PASS2);
2686 hammer2_inode_unlock(ip); /* unlock+drop */
2687 /* ip pointer invalid */
2688
2689 /*
2690 * If the inode got dirted after we dropped our locks,
2691 * it will have already been moved back to the SIDEQ.
2692 */
2693 hammer2_spin_ex(&pmp->list_spin);
2694 }
2695 hammer2_spin_unex(&pmp->list_spin);
2696 hammer2_pfs_memory_wakeup(pmp, 0);
2697
2698 if (dorestart || (pmp->trans.flags & HAMMER2_TRANS_RESCAN)) {
2699 /*
2700 * bit 2 is set if something above thinks we might be
2701 * looping too hard, try to unclog the frontend
2702 * dependency and wait a bit before restarting.
2703 *
2704 * NOTE: The frontend could be stuck in h2memw, though
2705 * it isn't supposed to be holding vnode locks
2706 * in that case.
2707 */
2708 if (dorestart & 2) {
2709 wakeup(&pmp->inmem_dirty_chains);
2710 tsleep(&dorestart, 0, "h2syndel", 2);
2711 }
2712 #ifdef HAMMER2_DEBUG_SYNC
2713 kprintf("FILESYSTEM SYNC STAGE 1 RESTART\n");
2714 /*tsleep(&dorestart, 0, "h2STG1-R", hz*20);*/
2715 #endif
2716 dorestart = 1;
2717 goto restart;
2718 }
2719 #ifdef HAMMER2_DEBUG_SYNC
2720 kprintf("FILESYSTEM SYNC STAGE 2 BEGIN\n");
2721 /*tsleep(&dorestart, 0, "h2STG2", hz*20);*/
2722 #endif
2723
2724 /*
2725 * We have to flush the PFS root last, even if it does not appear to
2726 * be dirty, because all the inodes in the PFS are indexed under it.
2727 * The normal flushing of iroot above would only occur if directory
2728 * entries under the root were changed.
2729 *
2730 * Specifying VOLHDR will cause an additionl flush of hmp->spmp
2731 * for the media making up the cluster.
2732 */
2733 if ((ip = pmp->iroot) != NULL) {
2734 hammer2_inode_ref(ip);
2735 hammer2_mtx_ex(&ip->lock);
2736 hammer2_inode_chain_sync(ip);
2737 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP |
2738 HAMMER2_XOP_FSSYNC |
2739 HAMMER2_XOP_VOLHDR);
2740 hammer2_inode_unlock(ip); /* unlock+drop */
2741 }
2742 #ifdef HAMMER2_DEBUG_SYNC
2743 kprintf("FILESYSTEM SYNC STAGE 2 DONE\n");
2744 #endif
2745
2746 /*
2747 * device bioq sync
2748 */
2749 hammer2_bioq_sync(pmp);
2750
2751 error = 0; /* XXX */
2752 hammer2_trans_done(pmp, HAMMER2_TRANS_ISFLUSH);
2753
2754 return (error);
2755 }
2756
2757 #if 0
2758 static
2759 int
2760 hammer2_vfs_vptofh(struct m_vnode *vp, struct fid *fhp)
2761 {
2762 hammer2_inode_t *ip;
2763
2764 KKASSERT(MAXFIDSZ >= 16);
2765 ip = VTOI(vp);
2766 fhp->fid_len = offsetof(struct fid, fid_data[16]);
2767 fhp->fid_ext = 0;
2768 ((hammer2_tid_t *)fhp->fid_data)[0] = ip->meta.inum;
2769 ((hammer2_tid_t *)fhp->fid_data)[1] = 0;
2770
2771 return 0;
2772 }
2773
2774 static
2775 int
2776 hammer2_vfs_fhtovp(struct mount *mp, struct m_vnode *rootvp,
2777 struct fid *fhp, struct m_vnode **vpp)
2778 {
2779 hammer2_tid_t inum;
2780 int error;
2781
2782 inum = ((hammer2_tid_t *)fhp->fid_data)[0] & HAMMER2_DIRHASH_USERMSK;
2783 if (vpp) {
2784 if (inum == 1)
2785 error = hammer2_vfs_root(mp, vpp);
2786 else
2787 error = hammer2_vfs_vget(mp, NULL, inum, vpp);
2788 } else {
2789 error = 0;
2790 }
2791 return error;
2792 }
2793
2794 static
2795 int
2796 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
2797 int *exflagsp, struct ucred **credanonp)
2798 {
2799 hammer2_pfs_t *pmp;
2800 struct netcred *np;
2801 int error;
2802
2803 pmp = MPTOPMP(mp);
2804 np = vfs_export_lookup(mp, &pmp->export, nam);
2805 if (np) {
2806 *exflagsp = np->netc_exflags;
2807 *credanonp = &np->netc_anon;
2808 error = 0;
2809 } else {
2810 error = EACCES;
2811 }
2812 return error;
2813 }
2814 #endif
2815
2816 /*
2817 * This handles hysteresis on regular file flushes. Because the BIOs are
2818 * routed to a thread it is possible for an excessive number to build up
2819 * and cause long front-end stalls long before the runningbuffspace limit
2820 * is hit, so we implement hammer2_flush_pipe to control the
2821 * hysteresis.
2822 *
2823 * This is a particular problem when compression is used.
2824 */
2825 void
hammer2_lwinprog_ref(hammer2_pfs_t * pmp)2826 hammer2_lwinprog_ref(hammer2_pfs_t *pmp)
2827 {
2828 atomic_add_int(&pmp->count_lwinprog, 1);
2829 }
2830
2831 void
hammer2_lwinprog_drop(hammer2_pfs_t * pmp)2832 hammer2_lwinprog_drop(hammer2_pfs_t *pmp)
2833 {
2834 #if 0
2835 int lwinprog;
2836
2837 lwinprog = atomic_fetchadd_int(&pmp->count_lwinprog, -1);
2838 if ((lwinprog & HAMMER2_LWINPROG_WAITING) &&
2839 (lwinprog & HAMMER2_LWINPROG_MASK) <= hammer2_flush_pipe * 2 / 3) {
2840 atomic_clear_int(&pmp->count_lwinprog,
2841 HAMMER2_LWINPROG_WAITING);
2842 wakeup(&pmp->count_lwinprog);
2843 }
2844 if ((lwinprog & HAMMER2_LWINPROG_WAITING0) &&
2845 (lwinprog & HAMMER2_LWINPROG_MASK) <= 0) {
2846 atomic_clear_int(&pmp->count_lwinprog,
2847 HAMMER2_LWINPROG_WAITING0);
2848 wakeup(&pmp->count_lwinprog);
2849 }
2850 #endif
2851 }
2852
2853 void
hammer2_lwinprog_wait(hammer2_pfs_t * pmp,int flush_pipe)2854 hammer2_lwinprog_wait(hammer2_pfs_t *pmp, int flush_pipe)
2855 {
2856 #if 0
2857 int lwinprog;
2858 int lwflag = (flush_pipe) ? HAMMER2_LWINPROG_WAITING :
2859 HAMMER2_LWINPROG_WAITING0;
2860
2861 for (;;) {
2862 lwinprog = pmp->count_lwinprog;
2863 cpu_ccfence();
2864 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe)
2865 break;
2866 tsleep_interlock(&pmp->count_lwinprog, 0);
2867 atomic_set_int(&pmp->count_lwinprog, lwflag);
2868 lwinprog = pmp->count_lwinprog;
2869 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe)
2870 break;
2871 tsleep(&pmp->count_lwinprog, PINTERLOCKED, "h2wpipe", hz);
2872 }
2873 #endif
2874 }
2875
2876 #if 0
2877 /*
2878 * It is possible for an excessive number of dirty chains or dirty inodes
2879 * to build up. When this occurs we start an asynchronous filesystem sync.
2880 * If the level continues to build up, we stall, waiting for it to drop,
2881 * with some hysteresis.
2882 *
2883 * This relies on the kernel calling hammer2_vfs_modifying() prior to
2884 * obtaining any vnode locks before making a modifying VOP call.
2885 */
2886 static int
2887 hammer2_vfs_modifying(struct mount *mp)
2888 {
2889 if (mp->mnt_flag & MNT_RDONLY)
2890 return EROFS;
2891 hammer2_pfs_memory_wait(MPTOPMP(mp));
2892
2893 return 0;
2894 }
2895 #endif
2896
2897 /*
2898 * Initiate an asynchronous filesystem sync and, with hysteresis,
2899 * stall if the internal data structure count becomes too bloated.
2900 */
2901 void
hammer2_pfs_memory_wait(hammer2_pfs_t * pmp)2902 hammer2_pfs_memory_wait(hammer2_pfs_t *pmp)
2903 {
2904 uint32_t waiting;
2905 int pcatch;
2906 int error;
2907 int started;
2908
2909 if (pmp == NULL || pmp->mp == NULL)
2910 return;
2911
2912 started = 0;
2913
2914 for (;;) {
2915 waiting = pmp->inmem_dirty_chains & HAMMER2_DIRTYCHAIN_MASK;
2916 cpu_ccfence();
2917
2918 /*
2919 * Start the syncer running at 1/2 the limit to try
2920 * to avoid sleeping.
2921 */
2922 if (waiting > hammer2_limit_dirty_chains / 2 ||
2923 pmp->sideq_count > hammer2_limit_dirty_inodes / 2)
2924 {
2925 trigger_syncer(pmp->mp);
2926 }
2927
2928 /*
2929 * Stall at the limit waiting for the counts to drop.
2930 * This code will typically be woken up once the count
2931 * drops below 3/4 the limit, or in one second.
2932 */
2933 if (waiting < hammer2_limit_dirty_chains &&
2934 pmp->sideq_count < hammer2_limit_dirty_inodes)
2935 {
2936 break;
2937 }
2938
2939 if (started == 0) {
2940 trigger_syncer_start(pmp->mp);
2941 started = 1;
2942 }
2943
2944 /*
2945 * Interlocked re-test, sleep, and retry.
2946 */
2947 pcatch = curthread->td_proc ? PCATCH : 0;
2948 tsleep_interlock(&pmp->inmem_dirty_chains, pcatch);
2949
2950 atomic_set_int(&pmp->inmem_dirty_chains,
2951 HAMMER2_DIRTYCHAIN_WAITING);
2952
2953 if (waiting < hammer2_limit_dirty_chains &&
2954 pmp->sideq_count < hammer2_limit_dirty_inodes) {
2955 break;
2956 }
2957 error = tsleep(&pmp->inmem_dirty_chains,
2958 PINTERLOCKED | pcatch,
2959 "h2memw", hz);
2960 if (error == ERESTART)
2961 break;
2962 }
2963 if (started)
2964 trigger_syncer_stop(pmp->mp);
2965 }
2966
2967 /*
2968 * Wake up any stalled frontend ops waiting, with hysteresis, using
2969 * 2/3 of the limit.
2970 */
2971 void
hammer2_pfs_memory_wakeup(hammer2_pfs_t * pmp,int count)2972 hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp, int count)
2973 {
2974 uint32_t waiting;
2975
2976 if (pmp) {
2977 waiting = atomic_fetchadd_int(&pmp->inmem_dirty_chains, count);
2978 /* don't need --waiting to test flag */
2979
2980 if ((waiting & HAMMER2_DIRTYCHAIN_WAITING) &&
2981 (pmp->inmem_dirty_chains & HAMMER2_DIRTYCHAIN_MASK) <=
2982 hammer2_limit_dirty_chains * 2 / 3 &&
2983 pmp->sideq_count <= hammer2_limit_dirty_inodes * 2 / 3) {
2984 atomic_clear_int(&pmp->inmem_dirty_chains,
2985 HAMMER2_DIRTYCHAIN_WAITING);
2986 wakeup(&pmp->inmem_dirty_chains);
2987 }
2988 }
2989 }
2990
2991 void
hammer2_pfs_memory_inc(hammer2_pfs_t * pmp)2992 hammer2_pfs_memory_inc(hammer2_pfs_t *pmp)
2993 {
2994 if (pmp) {
2995 atomic_add_int(&pmp->inmem_dirty_chains, 1);
2996 }
2997 }
2998
2999 /*
3000 * Volume header data locks
3001 */
3002 void
hammer2_voldata_lock(hammer2_dev_t * hmp)3003 hammer2_voldata_lock(hammer2_dev_t *hmp)
3004 {
3005 lockmgr(&hmp->vollk, LK_EXCLUSIVE);
3006 }
3007
3008 void
hammer2_voldata_unlock(hammer2_dev_t * hmp)3009 hammer2_voldata_unlock(hammer2_dev_t *hmp)
3010 {
3011 lockmgr(&hmp->vollk, LK_RELEASE);
3012 }
3013
3014 /*
3015 * Caller indicates that the volume header is being modified. Flag
3016 * the related chain and adjust its transaction id.
3017 *
3018 * The transaction id is set to voldata.mirror_tid + 1, similar to
3019 * what hammer2_chain_modify() does. Be very careful here, volume
3020 * data can be updated independently of the rest of the filesystem.
3021 */
3022 void
hammer2_voldata_modify(hammer2_dev_t * hmp)3023 hammer2_voldata_modify(hammer2_dev_t *hmp)
3024 {
3025 if ((hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) == 0) {
3026 atomic_add_long(&hammer2_count_modified_chains, 1);
3027 atomic_set_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED);
3028 hammer2_pfs_memory_inc(hmp->vchain.pmp);
3029 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid + 1;
3030 }
3031 }
3032
3033 /*
3034 * Returns 0 if the filesystem has tons of free space
3035 * Returns 1 if the filesystem has less than 10% remaining
3036 * Returns 2 if the filesystem has less than 2%/5% (user/root) remaining.
3037 */
3038 int
hammer2_vfs_enospace(hammer2_inode_t * ip,off_t bytes,struct ucred * cred)3039 hammer2_vfs_enospace(hammer2_inode_t *ip, off_t bytes, struct ucred *cred)
3040 {
3041 hammer2_pfs_t *pmp;
3042 hammer2_dev_t *hmp;
3043 hammer2_off_t free_reserved;
3044 hammer2_off_t free_nominal;
3045 int i;
3046
3047 pmp = ip->pmp;
3048
3049 if (/*XXX*/ 1 || pmp->free_ticks == 0 || pmp->free_ticks != ticks) {
3050 free_reserved = HAMMER2_SEGSIZE;
3051 free_nominal = 0x7FFFFFFFFFFFFFFFLLU;
3052 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) {
3053 hmp = pmp->pfs_hmps[i];
3054 if (hmp == NULL)
3055 continue;
3056 if (pmp->pfs_types[i] != HAMMER2_PFSTYPE_MASTER &&
3057 pmp->pfs_types[i] != HAMMER2_PFSTYPE_SOFT_MASTER)
3058 continue;
3059
3060 if (free_nominal > hmp->voldata.allocator_free)
3061 free_nominal = hmp->voldata.allocator_free;
3062 if (free_reserved < hmp->free_reserved)
3063 free_reserved = hmp->free_reserved;
3064 }
3065
3066 /*
3067 * SMP races ok
3068 */
3069 pmp->free_reserved = free_reserved;
3070 pmp->free_nominal = free_nominal;
3071 pmp->free_ticks = ticks;
3072 } else {
3073 free_reserved = pmp->free_reserved;
3074 free_nominal = pmp->free_nominal;
3075 }
3076 if (cred && cred->cr_uid != 0) {
3077 if ((int64_t)(free_nominal - bytes) <
3078 (int64_t)free_reserved) {
3079 return 2;
3080 }
3081 } else {
3082 if ((int64_t)(free_nominal - bytes) <
3083 (int64_t)free_reserved / 2) {
3084 return 2;
3085 }
3086 }
3087 if ((int64_t)(free_nominal - bytes) < (int64_t)free_reserved * 2)
3088 return 1;
3089 return 0;
3090 }
3091