1 /* 2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/nlookup.h> 39 #include <sys/vnode.h> 40 #include <sys/mount.h> 41 #include <sys/fcntl.h> 42 #include <sys/vfsops.h> 43 #include <sys/sysctl.h> 44 #include <sys/socket.h> 45 #include <sys/objcache.h> 46 #include <sys/proc.h> 47 #include <sys/lock.h> 48 #include <sys/file.h> 49 50 #include "hammer2.h" 51 52 TAILQ_HEAD(hammer2_mntlist, hammer2_dev); 53 static struct hammer2_mntlist hammer2_mntlist; 54 55 struct hammer2_pfslist hammer2_pfslist; 56 struct hammer2_pfslist hammer2_spmplist; 57 struct lock hammer2_mntlk; 58 59 int hammer2_supported_version = HAMMER2_VOL_VERSION_DEFAULT; 60 int hammer2_debug; 61 int hammer2_aux_flags; 62 int hammer2_xop_nthreads; 63 int hammer2_xop_sgroups; 64 int hammer2_xop_xgroups; 65 int hammer2_xop_xbase; 66 int hammer2_xop_mod; 67 long hammer2_debug_inode; 68 int hammer2_cluster_meta_read = 1; /* physical read-ahead */ 69 int hammer2_cluster_data_read = 4; /* physical read-ahead */ 70 int hammer2_cluster_write = 0; /* physical write clustering */ 71 int hammer2_dedup_enable = 1; 72 int hammer2_always_compress = 0; /* always try to compress */ 73 int hammer2_flush_pipe = 100; 74 int hammer2_dio_count; 75 int hammer2_dio_limit = 256; 76 int hammer2_bulkfree_tps = 5000; 77 int hammer2_spread_workers; 78 long hammer2_chain_allocs; 79 long hammer2_limit_dirty_chains; 80 long hammer2_limit_dirty_inodes; 81 long hammer2_count_modified_chains; 82 long hammer2_iod_file_read; 83 long hammer2_iod_meta_read; 84 long hammer2_iod_indr_read; 85 long hammer2_iod_fmap_read; 86 long hammer2_iod_volu_read; 87 long hammer2_iod_file_write; 88 long hammer2_iod_file_wembed; 89 long hammer2_iod_file_wzero; 90 long hammer2_iod_file_wdedup; 91 long hammer2_iod_meta_write; 92 long hammer2_iod_indr_write; 93 long hammer2_iod_fmap_write; 94 long hammer2_iod_volu_write; 95 static long hammer2_iod_inode_creates; 96 static long hammer2_iod_inode_deletes; 97 98 long hammer2_process_icrc32; 99 long hammer2_process_xxhash64; 100 101 MALLOC_DECLARE(M_HAMMER2_CBUFFER); 102 MALLOC_DEFINE(M_HAMMER2_CBUFFER, "HAMMER2-compbuffer", 103 "Buffer used for compression."); 104 105 MALLOC_DECLARE(M_HAMMER2_DEBUFFER); 106 MALLOC_DEFINE(M_HAMMER2_DEBUFFER, "HAMMER2-decompbuffer", 107 "Buffer used for decompression."); 108 109 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem"); 110 111 SYSCTL_INT(_vfs_hammer2, OID_AUTO, supported_version, CTLFLAG_RD, 112 &hammer2_supported_version, 0, ""); 113 SYSCTL_INT(_vfs_hammer2, OID_AUTO, aux_flags, CTLFLAG_RW, 114 &hammer2_aux_flags, 0, ""); 115 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW, 116 &hammer2_debug, 0, ""); 117 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, debug_inode, CTLFLAG_RW, 118 &hammer2_debug_inode, 0, ""); 119 SYSCTL_INT(_vfs_hammer2, OID_AUTO, spread_workers, CTLFLAG_RW, 120 &hammer2_spread_workers, 0, ""); 121 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_meta_read, CTLFLAG_RW, 122 &hammer2_cluster_meta_read, 0, ""); 123 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_data_read, CTLFLAG_RW, 124 &hammer2_cluster_data_read, 0, ""); 125 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_write, CTLFLAG_RW, 126 &hammer2_cluster_write, 0, ""); 127 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dedup_enable, CTLFLAG_RW, 128 &hammer2_dedup_enable, 0, ""); 129 SYSCTL_INT(_vfs_hammer2, OID_AUTO, always_compress, CTLFLAG_RW, 130 &hammer2_always_compress, 0, ""); 131 SYSCTL_INT(_vfs_hammer2, OID_AUTO, flush_pipe, CTLFLAG_RW, 132 &hammer2_flush_pipe, 0, ""); 133 SYSCTL_INT(_vfs_hammer2, OID_AUTO, bulkfree_tps, CTLFLAG_RW, 134 &hammer2_bulkfree_tps, 0, ""); 135 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, chain_allocs, CTLFLAG_RW, 136 &hammer2_chain_allocs, 0, ""); 137 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_chains, CTLFLAG_RW, 138 &hammer2_limit_dirty_chains, 0, ""); 139 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_inodes, CTLFLAG_RW, 140 &hammer2_limit_dirty_inodes, 0, ""); 141 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, count_modified_chains, CTLFLAG_RW, 142 &hammer2_count_modified_chains, 0, ""); 143 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_count, CTLFLAG_RD, 144 &hammer2_dio_count, 0, ""); 145 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_limit, CTLFLAG_RW, 146 &hammer2_dio_limit, 0, ""); 147 148 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RW, 149 &hammer2_iod_file_read, 0, ""); 150 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RW, 151 &hammer2_iod_meta_read, 0, ""); 152 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RW, 153 &hammer2_iod_indr_read, 0, ""); 154 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_read, CTLFLAG_RW, 155 &hammer2_iod_fmap_read, 0, ""); 156 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_read, CTLFLAG_RW, 157 &hammer2_iod_volu_read, 0, ""); 158 159 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RW, 160 &hammer2_iod_file_write, 0, ""); 161 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wembed, CTLFLAG_RW, 162 &hammer2_iod_file_wembed, 0, ""); 163 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wzero, CTLFLAG_RW, 164 &hammer2_iod_file_wzero, 0, ""); 165 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wdedup, CTLFLAG_RW, 166 &hammer2_iod_file_wdedup, 0, ""); 167 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RW, 168 &hammer2_iod_meta_write, 0, ""); 169 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RW, 170 &hammer2_iod_indr_write, 0, ""); 171 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_write, CTLFLAG_RW, 172 &hammer2_iod_fmap_write, 0, ""); 173 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RW, 174 &hammer2_iod_volu_write, 0, ""); 175 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_inode_creates, CTLFLAG_RW, 176 &hammer2_iod_inode_creates, 0, ""); 177 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_inode_deletes, CTLFLAG_RW, 178 &hammer2_iod_inode_deletes, 0, ""); 179 180 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, process_icrc32, CTLFLAG_RW, 181 &hammer2_process_icrc32, 0, ""); 182 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, process_xxhash64, CTLFLAG_RW, 183 &hammer2_process_xxhash64, 0, ""); 184 185 static int hammer2_vfs_init(struct vfsconf *conf); 186 static int hammer2_vfs_uninit(struct vfsconf *vfsp); 187 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, 188 struct ucred *cred); 189 static int hammer2_remount(hammer2_dev_t *, struct mount *, char *, 190 struct ucred *); 191 static int hammer2_recovery(hammer2_dev_t *hmp); 192 static int hammer2_vfs_unmount(struct mount *mp, int mntflags); 193 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp); 194 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, 195 struct ucred *cred); 196 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, 197 struct ucred *cred); 198 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 199 struct fid *fhp, struct vnode **vpp); 200 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp); 201 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 202 int *exflagsp, struct ucred **credanonp); 203 static int hammer2_vfs_modifying(struct mount *mp); 204 205 static void hammer2_update_pmps(hammer2_dev_t *hmp); 206 207 static void hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp); 208 static void hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, 209 hammer2_dev_t *hmp); 210 static int hammer2_fixup_pfses(hammer2_dev_t *hmp); 211 212 /* 213 * HAMMER2 vfs operations. 214 */ 215 static struct vfsops hammer2_vfsops = { 216 .vfs_flags = 0, 217 .vfs_init = hammer2_vfs_init, 218 .vfs_uninit = hammer2_vfs_uninit, 219 .vfs_sync = hammer2_vfs_sync, 220 .vfs_mount = hammer2_vfs_mount, 221 .vfs_unmount = hammer2_vfs_unmount, 222 .vfs_root = hammer2_vfs_root, 223 .vfs_statfs = hammer2_vfs_statfs, 224 .vfs_statvfs = hammer2_vfs_statvfs, 225 .vfs_vget = hammer2_vfs_vget, 226 .vfs_vptofh = hammer2_vfs_vptofh, 227 .vfs_fhtovp = hammer2_vfs_fhtovp, 228 .vfs_checkexp = hammer2_vfs_checkexp, 229 .vfs_modifying = hammer2_vfs_modifying 230 }; 231 232 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", ""); 233 234 VFS_SET(hammer2_vfsops, hammer2, VFCF_MPSAFE); 235 MODULE_VERSION(hammer2, 1); 236 237 static 238 int 239 hammer2_vfs_init(struct vfsconf *conf) 240 { 241 static struct objcache_malloc_args margs_read; 242 static struct objcache_malloc_args margs_write; 243 static struct objcache_malloc_args margs_vop; 244 245 int error; 246 int mod; 247 248 error = 0; 249 kmalloc_raise_limit(M_HAMMER2, 0); /* unlimited */ 250 251 /* 252 * hammer2_xop_nthreads must be a multiple of ncpus, 253 * minimum 2 * ncpus. 254 */ 255 mod = ncpus; 256 hammer2_xop_mod = mod; 257 hammer2_xop_nthreads = mod * 2; 258 while (hammer2_xop_nthreads / mod < HAMMER2_XOPGROUPS_MIN || 259 hammer2_xop_nthreads < HAMMER2_XOPTHREADS_MIN) 260 { 261 hammer2_xop_nthreads += mod; 262 } 263 hammer2_xop_sgroups = hammer2_xop_nthreads / mod / 2; 264 hammer2_xop_xgroups = hammer2_xop_nthreads / mod - hammer2_xop_sgroups; 265 hammer2_xop_xbase = hammer2_xop_sgroups * mod; 266 267 /* 268 * A large DIO cache is needed to retain dedup enablement masks. 269 * The bulkfree code clears related masks as part of the disk block 270 * recycling algorithm, preventing it from being used for a later 271 * dedup. 272 * 273 * NOTE: A large buffer cache can actually interfere with dedup 274 * operation because we dedup based on media physical buffers 275 * and not logical buffers. Try to make the DIO case large 276 * enough to avoid this problem, but also cap it. 277 */ 278 hammer2_dio_limit = nbuf * 2; 279 if (hammer2_dio_limit > 100000) 280 hammer2_dio_limit = 100000; 281 282 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref)) 283 error = EINVAL; 284 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data)) 285 error = EINVAL; 286 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data)) 287 error = EINVAL; 288 289 if (error) 290 kprintf("HAMMER2 structure size mismatch; cannot continue.\n"); 291 292 margs_read.objsize = 65536; 293 margs_read.mtype = M_HAMMER2_DEBUFFER; 294 295 margs_write.objsize = 32768; 296 margs_write.mtype = M_HAMMER2_CBUFFER; 297 298 margs_vop.objsize = sizeof(hammer2_xop_t); 299 margs_vop.mtype = M_HAMMER2; 300 301 /* 302 * Note thaht for the XOPS cache we want backing store allocations 303 * to use M_ZERO. This is not allowed in objcache_get() (to avoid 304 * confusion), so use the backing store function that does it. This 305 * means that initial XOPS objects are zerod but REUSED objects are 306 * not. So we are responsible for cleaning the object up sufficiently 307 * for our needs before objcache_put()ing it back (typically just the 308 * FIFO indices). 309 */ 310 cache_buffer_read = objcache_create(margs_read.mtype->ks_shortdesc, 311 0, 1, NULL, NULL, NULL, 312 objcache_malloc_alloc, 313 objcache_malloc_free, 314 &margs_read); 315 cache_buffer_write = objcache_create(margs_write.mtype->ks_shortdesc, 316 0, 1, NULL, NULL, NULL, 317 objcache_malloc_alloc, 318 objcache_malloc_free, 319 &margs_write); 320 cache_xops = objcache_create(margs_vop.mtype->ks_shortdesc, 321 0, 1, NULL, NULL, NULL, 322 objcache_malloc_alloc_zero, 323 objcache_malloc_free, 324 &margs_vop); 325 326 327 lockinit(&hammer2_mntlk, "mntlk", 0, 0); 328 TAILQ_INIT(&hammer2_mntlist); 329 TAILQ_INIT(&hammer2_pfslist); 330 TAILQ_INIT(&hammer2_spmplist); 331 332 hammer2_limit_dirty_chains = maxvnodes / 10; 333 if (hammer2_limit_dirty_chains > HAMMER2_LIMIT_DIRTY_CHAINS) 334 hammer2_limit_dirty_chains = HAMMER2_LIMIT_DIRTY_CHAINS; 335 if (hammer2_limit_dirty_chains < 1000) 336 hammer2_limit_dirty_chains = 1000; 337 338 hammer2_limit_dirty_inodes = maxvnodes / 25; 339 if (hammer2_limit_dirty_inodes < 100) 340 hammer2_limit_dirty_inodes = 100; 341 if (hammer2_limit_dirty_inodes > HAMMER2_LIMIT_DIRTY_INODES) 342 hammer2_limit_dirty_inodes = HAMMER2_LIMIT_DIRTY_INODES; 343 344 return (error); 345 } 346 347 static 348 int 349 hammer2_vfs_uninit(struct vfsconf *vfsp __unused) 350 { 351 objcache_destroy(cache_buffer_read); 352 objcache_destroy(cache_buffer_write); 353 objcache_destroy(cache_xops); 354 return 0; 355 } 356 357 /* 358 * Core PFS allocator. Used to allocate or reference the pmp structure 359 * for PFS cluster mounts and the spmp structure for media (hmp) structures. 360 * The pmp can be passed in or loaded by this function using the chain and 361 * inode data. 362 * 363 * pmp->modify_tid tracks new modify_tid transaction ids for front-end 364 * transactions. Note that synchronization does not use this field. 365 * (typically frontend operations and synchronization cannot run on the 366 * same PFS node at the same time). 367 * 368 * XXX check locking 369 */ 370 hammer2_pfs_t * 371 hammer2_pfsalloc(hammer2_chain_t *chain, 372 const hammer2_inode_data_t *ripdata, 373 hammer2_tid_t modify_tid, hammer2_dev_t *force_local) 374 { 375 hammer2_pfs_t *pmp; 376 hammer2_inode_t *iroot; 377 int count; 378 int i; 379 int j; 380 381 pmp = NULL; 382 383 /* 384 * Locate or create the PFS based on the cluster id. If ripdata 385 * is NULL this is a spmp which is unique and is always allocated. 386 * 387 * If the device is mounted in local mode all PFSs are considered 388 * independent and not part of any cluster (for debugging only). 389 */ 390 if (ripdata) { 391 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 392 if (force_local != pmp->force_local) 393 continue; 394 if (force_local == NULL && 395 bcmp(&pmp->pfs_clid, &ripdata->meta.pfs_clid, 396 sizeof(pmp->pfs_clid)) == 0) { 397 break; 398 } else if (force_local && pmp->pfs_names[0] && 399 strcmp(pmp->pfs_names[0], ripdata->filename) == 0) { 400 break; 401 } 402 } 403 } 404 405 if (pmp == NULL) { 406 pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO); 407 pmp->force_local = force_local; 408 hammer2_trans_manage_init(pmp); 409 kmalloc_create_obj(&pmp->minode, "HAMMER2-inodes", 410 sizeof(struct hammer2_inode)); 411 lockinit(&pmp->lock, "pfslk", 0, 0); 412 spin_init(&pmp->inum_spin, "hm2pfsalloc_inum"); 413 spin_init(&pmp->xop_spin, "h2xop"); 414 spin_init(&pmp->lru_spin, "h2lru"); 415 RB_INIT(&pmp->inum_tree); 416 TAILQ_INIT(&pmp->syncq); 417 TAILQ_INIT(&pmp->depq); 418 TAILQ_INIT(&pmp->lru_list); 419 spin_init(&pmp->list_spin, "h2pfsalloc_list"); 420 421 /* 422 * Save the last media transaction id for the flusher. Set 423 * initial 424 */ 425 if (ripdata) { 426 pmp->pfs_clid = ripdata->meta.pfs_clid; 427 TAILQ_INSERT_TAIL(&hammer2_pfslist, pmp, mntentry); 428 } else { 429 pmp->flags |= HAMMER2_PMPF_SPMP; 430 TAILQ_INSERT_TAIL(&hammer2_spmplist, pmp, mntentry); 431 } 432 433 /* 434 * The synchronization thread may start too early, make 435 * sure it stays frozen until we are ready to let it go. 436 * XXX 437 */ 438 /* 439 pmp->primary_thr.flags = HAMMER2_THREAD_FROZEN | 440 HAMMER2_THREAD_REMASTER; 441 */ 442 } 443 444 /* 445 * Create the PFS's root inode and any missing XOP helper threads. 446 */ 447 if ((iroot = pmp->iroot) == NULL) { 448 iroot = hammer2_inode_get(pmp, NULL, 1, -1); 449 if (ripdata) 450 iroot->meta = ripdata->meta; 451 pmp->iroot = iroot; 452 hammer2_inode_ref(iroot); 453 hammer2_inode_unlock(iroot); 454 } 455 456 /* 457 * Stop here if no chain is passed in. 458 */ 459 if (chain == NULL) 460 goto done; 461 462 /* 463 * When a chain is passed in we must add it to the PFS's root 464 * inode, update pmp->pfs_types[], and update the syncronization 465 * threads. 466 * 467 * When forcing local mode, mark the PFS as a MASTER regardless. 468 * 469 * At the moment empty spots can develop due to removals or failures. 470 * Ultimately we want to re-fill these spots but doing so might 471 * confused running code. XXX 472 */ 473 hammer2_inode_ref(iroot); 474 hammer2_mtx_ex(&iroot->lock); 475 j = iroot->cluster.nchains; 476 477 if (j == HAMMER2_MAXCLUSTER) { 478 kprintf("hammer2_pfsalloc: cluster full!\n"); 479 /* XXX fatal error? */ 480 } else { 481 KKASSERT(chain->pmp == NULL); 482 chain->pmp = pmp; 483 hammer2_chain_ref(chain); 484 iroot->cluster.array[j].chain = chain; 485 if (force_local) 486 pmp->pfs_types[j] = HAMMER2_PFSTYPE_MASTER; 487 else 488 pmp->pfs_types[j] = ripdata->meta.pfs_type; 489 pmp->pfs_names[j] = kstrdup(ripdata->filename, M_HAMMER2); 490 pmp->pfs_hmps[j] = chain->hmp; 491 hammer2_spin_ex(&pmp->inum_spin); 492 pmp->pfs_iroot_blocksets[j] = chain->data->ipdata.u.blockset; 493 hammer2_spin_unex(&pmp->inum_spin); 494 495 /* 496 * If the PFS is already mounted we must account 497 * for the mount_count here. 498 */ 499 if (pmp->mp) 500 ++chain->hmp->mount_count; 501 502 /* 503 * May have to fixup dirty chain tracking. Previous 504 * pmp was NULL so nothing to undo. 505 */ 506 if (chain->flags & HAMMER2_CHAIN_MODIFIED) 507 hammer2_pfs_memory_inc(pmp); 508 ++j; 509 } 510 iroot->cluster.nchains = j; 511 512 /* 513 * Update nmasters from any PFS inode which is part of the cluster. 514 * It is possible that this will result in a value which is too 515 * high. MASTER PFSs are authoritative for pfs_nmasters and will 516 * override this value later on. 517 * 518 * (This informs us of masters that might not currently be 519 * discoverable by this mount). 520 */ 521 if (ripdata && pmp->pfs_nmasters < ripdata->meta.pfs_nmasters) { 522 pmp->pfs_nmasters = ripdata->meta.pfs_nmasters; 523 } 524 525 /* 526 * Count visible masters. Masters are usually added with 527 * ripdata->meta.pfs_nmasters set to 1. This detects when there 528 * are more (XXX and must update the master inodes). 529 */ 530 count = 0; 531 for (i = 0; i < iroot->cluster.nchains; ++i) { 532 if (pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) 533 ++count; 534 } 535 if (pmp->pfs_nmasters < count) 536 pmp->pfs_nmasters = count; 537 538 /* 539 * Create missing synchronization and support threads. 540 * 541 * Single-node masters (including snapshots) have nothing to 542 * synchronize and do not require this thread. 543 * 544 * Multi-node masters or any number of soft masters, slaves, copy, 545 * or other PFS types need the thread. 546 * 547 * Each thread is responsible for its particular cluster index. 548 * We use independent threads so stalls or mismatches related to 549 * any given target do not affect other targets. 550 */ 551 for (i = 0; i < iroot->cluster.nchains; ++i) { 552 /* 553 * Single-node masters (including snapshots) have nothing 554 * to synchronize and will make direct xops support calls, 555 * thus they do not require this thread. 556 * 557 * Note that there can be thousands of snapshots. We do not 558 * want to create thousands of threads. 559 */ 560 if (pmp->pfs_nmasters <= 1 && 561 pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) { 562 continue; 563 } 564 565 /* 566 * Sync support thread 567 */ 568 if (pmp->sync_thrs[i].td == NULL) { 569 hammer2_thr_create(&pmp->sync_thrs[i], pmp, NULL, 570 "h2nod", i, -1, 571 hammer2_primary_sync_thread); 572 } 573 } 574 575 /* 576 * Create missing Xop threads 577 * 578 * NOTE: We create helper threads for all mounted PFSs or any 579 * PFSs with 2+ nodes (so the sync thread can update them, 580 * even if not mounted). 581 */ 582 if (pmp->mp || iroot->cluster.nchains >= 2) 583 hammer2_xop_helper_create(pmp); 584 585 hammer2_mtx_unlock(&iroot->lock); 586 hammer2_inode_drop(iroot); 587 done: 588 return pmp; 589 } 590 591 /* 592 * Deallocate an element of a probed PFS. If destroying and this is a 593 * MASTER, adjust nmasters. 594 * 595 * This function does not physically destroy the PFS element in its device 596 * under the super-root (see hammer2_ioctl_pfs_delete()). 597 */ 598 void 599 hammer2_pfsdealloc(hammer2_pfs_t *pmp, int clindex, int destroying) 600 { 601 hammer2_inode_t *iroot; 602 hammer2_chain_t *chain; 603 int j; 604 605 /* 606 * Cleanup our reference on iroot. iroot is (should) not be needed 607 * by the flush code. 608 */ 609 iroot = pmp->iroot; 610 if (iroot) { 611 /* 612 * Stop synchronizing 613 * 614 * XXX flush after acquiring the iroot lock. 615 * XXX clean out the cluster index from all inode structures. 616 */ 617 hammer2_thr_delete(&pmp->sync_thrs[clindex]); 618 619 /* 620 * Remove the cluster index from the group. If destroying 621 * the PFS and this is a master, adjust pfs_nmasters. 622 */ 623 hammer2_mtx_ex(&iroot->lock); 624 chain = iroot->cluster.array[clindex].chain; 625 iroot->cluster.array[clindex].chain = NULL; 626 627 switch(pmp->pfs_types[clindex]) { 628 case HAMMER2_PFSTYPE_MASTER: 629 if (destroying && pmp->pfs_nmasters > 0) 630 --pmp->pfs_nmasters; 631 /* XXX adjust ripdata->meta.pfs_nmasters */ 632 break; 633 default: 634 break; 635 } 636 pmp->pfs_types[clindex] = HAMMER2_PFSTYPE_NONE; 637 638 hammer2_mtx_unlock(&iroot->lock); 639 640 /* 641 * Release the chain. 642 */ 643 if (chain) { 644 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 645 hammer2_chain_drop(chain); 646 } 647 648 /* 649 * Terminate all XOP threads for the cluster index. 650 */ 651 if (pmp->xop_groups) { 652 for (j = 0; j < hammer2_xop_nthreads; ++j) { 653 hammer2_thr_delete( 654 &pmp->xop_groups[j].thrs[clindex]); 655 } 656 } 657 } 658 } 659 660 /* 661 * Destroy a PFS, typically only occurs after the last mount on a device 662 * has gone away. 663 */ 664 static void 665 hammer2_pfsfree(hammer2_pfs_t *pmp) 666 { 667 hammer2_inode_t *iroot; 668 hammer2_chain_t *chain; 669 int chains_still_present = 0; 670 int i; 671 int j; 672 673 /* 674 * Cleanup our reference on iroot. iroot is (should) not be needed 675 * by the flush code. 676 */ 677 if (pmp->flags & HAMMER2_PMPF_SPMP) 678 TAILQ_REMOVE(&hammer2_spmplist, pmp, mntentry); 679 else 680 TAILQ_REMOVE(&hammer2_pfslist, pmp, mntentry); 681 682 /* 683 * Cleanup chains remaining on LRU list. 684 */ 685 hammer2_spin_ex(&pmp->lru_spin); 686 while ((chain = TAILQ_FIRST(&pmp->lru_list)) != NULL) { 687 KKASSERT(chain->flags & HAMMER2_CHAIN_ONLRU); 688 atomic_add_int(&pmp->lru_count, -1); 689 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONLRU); 690 TAILQ_REMOVE(&pmp->lru_list, chain, lru_node); 691 hammer2_chain_ref(chain); 692 hammer2_spin_unex(&pmp->lru_spin); 693 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 694 hammer2_chain_drop(chain); 695 hammer2_spin_ex(&pmp->lru_spin); 696 } 697 hammer2_spin_unex(&pmp->lru_spin); 698 699 /* 700 * Clean up iroot 701 */ 702 iroot = pmp->iroot; 703 if (iroot) { 704 for (i = 0; i < iroot->cluster.nchains; ++i) { 705 hammer2_thr_delete(&pmp->sync_thrs[i]); 706 if (pmp->xop_groups) { 707 for (j = 0; j < hammer2_xop_nthreads; ++j) 708 hammer2_thr_delete( 709 &pmp->xop_groups[j].thrs[i]); 710 } 711 chain = iroot->cluster.array[i].chain; 712 if (chain && !RB_EMPTY(&chain->core.rbtree)) { 713 kprintf("hammer2: Warning pmp %p still " 714 "has active chains\n", pmp); 715 chains_still_present = 1; 716 } 717 } 718 KASSERT(iroot->refs == 1, 719 ("PMP->IROOT %p REFS WRONG %d", iroot, iroot->refs)); 720 721 /* ref for iroot */ 722 hammer2_inode_drop(iroot); 723 pmp->iroot = NULL; 724 } 725 726 /* 727 * Free remaining pmp resources 728 */ 729 if (chains_still_present) { 730 kprintf("hammer2: cannot free pmp %p, still in use\n", pmp); 731 } else { 732 kmalloc_destroy_obj(&pmp->minode); 733 kfree(pmp, M_HAMMER2); 734 } 735 } 736 737 /* 738 * Remove all references to hmp from the pfs list. Any PFS which becomes 739 * empty is terminated and freed. 740 * 741 * XXX inefficient. 742 */ 743 static void 744 hammer2_pfsfree_scan(hammer2_dev_t *hmp, int which) 745 { 746 hammer2_pfs_t *pmp; 747 hammer2_inode_t *iroot; 748 hammer2_chain_t *rchain; 749 int i; 750 int j; 751 struct hammer2_pfslist *wlist; 752 753 if (which == 0) 754 wlist = &hammer2_pfslist; 755 else 756 wlist = &hammer2_spmplist; 757 again: 758 TAILQ_FOREACH(pmp, wlist, mntentry) { 759 if ((iroot = pmp->iroot) == NULL) 760 continue; 761 762 /* 763 * Determine if this PFS is affected. If it is we must 764 * freeze all management threads and lock its iroot. 765 * 766 * Freezing a management thread forces it idle, operations 767 * in-progress will be aborted and it will have to start 768 * over again when unfrozen, or exit if told to exit. 769 */ 770 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 771 if (pmp->pfs_hmps[i] == hmp) 772 break; 773 } 774 if (i == HAMMER2_MAXCLUSTER) 775 continue; 776 777 hammer2_vfs_sync_pmp(pmp, MNT_WAIT); 778 779 /* 780 * Make sure all synchronization threads are locked 781 * down. 782 */ 783 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 784 if (pmp->pfs_hmps[i] == NULL) 785 continue; 786 hammer2_thr_freeze_async(&pmp->sync_thrs[i]); 787 if (pmp->xop_groups) { 788 for (j = 0; j < hammer2_xop_nthreads; ++j) { 789 hammer2_thr_freeze_async( 790 &pmp->xop_groups[j].thrs[i]); 791 } 792 } 793 } 794 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 795 if (pmp->pfs_hmps[i] == NULL) 796 continue; 797 hammer2_thr_freeze(&pmp->sync_thrs[i]); 798 if (pmp->xop_groups) { 799 for (j = 0; j < hammer2_xop_nthreads; ++j) { 800 hammer2_thr_freeze( 801 &pmp->xop_groups[j].thrs[i]); 802 } 803 } 804 } 805 806 /* 807 * Lock the inode and clean out matching chains. 808 * Note that we cannot use hammer2_inode_lock_*() 809 * here because that would attempt to validate the 810 * cluster that we are in the middle of ripping 811 * apart. 812 * 813 * WARNING! We are working directly on the inodes 814 * embedded cluster. 815 */ 816 hammer2_mtx_ex(&iroot->lock); 817 818 /* 819 * Remove the chain from matching elements of the PFS. 820 */ 821 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 822 if (pmp->pfs_hmps[i] != hmp) 823 continue; 824 hammer2_thr_delete(&pmp->sync_thrs[i]); 825 if (pmp->xop_groups) { 826 for (j = 0; j < hammer2_xop_nthreads; ++j) { 827 hammer2_thr_delete( 828 &pmp->xop_groups[j].thrs[i]); 829 } 830 } 831 rchain = iroot->cluster.array[i].chain; 832 iroot->cluster.array[i].chain = NULL; 833 pmp->pfs_types[i] = 0; 834 if (pmp->pfs_names[i]) { 835 kfree(pmp->pfs_names[i], M_HAMMER2); 836 pmp->pfs_names[i] = NULL; 837 } 838 if (rchain) { 839 hammer2_chain_drop(rchain); 840 /* focus hint */ 841 if (iroot->cluster.focus == rchain) 842 iroot->cluster.focus = NULL; 843 } 844 pmp->pfs_hmps[i] = NULL; 845 } 846 hammer2_mtx_unlock(&iroot->lock); 847 848 /* 849 * Cleanup trailing chains. Gaps may remain. 850 */ 851 for (i = HAMMER2_MAXCLUSTER - 1; i >= 0; --i) { 852 if (pmp->pfs_hmps[i]) 853 break; 854 } 855 iroot->cluster.nchains = i + 1; 856 857 /* 858 * If the PMP has no elements remaining we can destroy it. 859 * (this will transition management threads from frozen->exit). 860 */ 861 if (iroot->cluster.nchains == 0) { 862 /* 863 * If this was the hmp's spmp, we need to clean 864 * a little more stuff out. 865 */ 866 if (hmp->spmp == pmp) { 867 hmp->spmp = NULL; 868 hmp->vchain.pmp = NULL; 869 hmp->fchain.pmp = NULL; 870 } 871 872 /* 873 * Free the pmp and restart the loop 874 */ 875 KKASSERT(TAILQ_EMPTY(&pmp->syncq)); 876 KKASSERT(TAILQ_EMPTY(&pmp->depq)); 877 hammer2_pfsfree(pmp); 878 goto again; 879 } 880 881 /* 882 * If elements still remain we need to set the REMASTER 883 * flag and unfreeze it. 884 */ 885 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 886 if (pmp->pfs_hmps[i] == NULL) 887 continue; 888 hammer2_thr_remaster(&pmp->sync_thrs[i]); 889 hammer2_thr_unfreeze(&pmp->sync_thrs[i]); 890 if (pmp->xop_groups) { 891 for (j = 0; j < hammer2_xop_nthreads; ++j) { 892 hammer2_thr_remaster( 893 &pmp->xop_groups[j].thrs[i]); 894 hammer2_thr_unfreeze( 895 &pmp->xop_groups[j].thrs[i]); 896 } 897 } 898 } 899 } 900 } 901 902 /* 903 * Mount or remount HAMMER2 fileystem from physical media 904 * 905 * mountroot 906 * mp mount point structure 907 * path NULL 908 * data <unused> 909 * cred <unused> 910 * 911 * mount 912 * mp mount point structure 913 * path path to mount point 914 * data pointer to argument structure in user space 915 * volume volume path (device@LABEL form) 916 * hflags user mount flags 917 * cred user credentials 918 * 919 * RETURNS: 0 Success 920 * !0 error number 921 */ 922 static 923 int 924 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, 925 struct ucred *cred) 926 { 927 struct hammer2_mount_info info; 928 hammer2_pfs_t *pmp; 929 hammer2_pfs_t *spmp; 930 hammer2_dev_t *hmp, *hmp_tmp; 931 hammer2_dev_t *force_local; 932 hammer2_key_t key_next; 933 hammer2_key_t key_dummy; 934 hammer2_key_t lhc; 935 hammer2_chain_t *parent; 936 hammer2_chain_t *chain; 937 const hammer2_inode_data_t *ripdata; 938 hammer2_blockref_t bref; 939 hammer2_devvp_list_t devvpl; 940 hammer2_devvp_t *e, *e_tmp; 941 struct file *fp; 942 char devstr[MNAMELEN]; 943 size_t size; 944 size_t done; 945 char *dev; 946 char *label; 947 int ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); 948 int error; 949 int i; 950 951 hmp = NULL; 952 pmp = NULL; 953 dev = NULL; 954 label = NULL; 955 bzero(&info, sizeof(info)); 956 957 if (path) { 958 /* 959 * Non-root mount or updating a mount 960 */ 961 error = copyin(data, &info, sizeof(info)); 962 if (error) 963 return (error); 964 } 965 966 if (mp->mnt_flag & MNT_UPDATE) { 967 /* 968 * Update mount. Note that pmp->iroot->cluster is 969 * an inode-embedded cluster and thus cannot be 970 * directly locked. 971 * 972 * XXX HAMMER2 needs to implement NFS export via 973 * mountctl. 974 */ 975 hammer2_cluster_t *cluster; 976 977 pmp = MPTOPMP(mp); 978 pmp->hflags = info.hflags; 979 cluster = &pmp->iroot->cluster; 980 for (i = 0; i < cluster->nchains; ++i) { 981 if (cluster->array[i].chain == NULL) 982 continue; 983 hmp = cluster->array[i].chain->hmp; 984 error = hammer2_remount(hmp, mp, path, cred); 985 if (error) 986 break; 987 } 988 989 return error; 990 } 991 992 if (path == NULL) { 993 /* 994 * Root mount 995 */ 996 info.cluster_fd = -1; 997 ksnprintf(devstr, sizeof(devstr), "%s", 998 mp->mnt_stat.f_mntfromname); 999 done = strlen(devstr) + 1; 1000 kprintf("hammer2_mount: root devstr=\"%s\"\n", devstr); 1001 } else { 1002 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done); 1003 if (error) 1004 return (error); 1005 kprintf("hammer2_mount: devstr=\"%s\"\n", devstr); 1006 } 1007 1008 /* 1009 * Extract device and label, automatically mount @BOOT, @ROOT, or @DATA 1010 * if no label specified, based on the partition id. Error out if no 1011 * label or device (with partition id) is specified. This is strictly 1012 * a convenience to match the default label created by newfs_hammer2, 1013 * our preference is that a label always be specified. 1014 * 1015 * NOTE: We allow 'mount @LABEL <blah>'... that is, a mount command 1016 * that does not specify a device, as long as some H2 label 1017 * has already been mounted from that device. This makes 1018 * mounting snapshots a lot easier. 1019 */ 1020 dev = devstr; 1021 label = strchr(devstr, '@'); 1022 if (label && ((label + 1) - dev) > done) { 1023 kprintf("hammer2_mount: bad label %s/%zd\n", devstr, done); 1024 return (EINVAL); 1025 } 1026 if (label == NULL || label[1] == 0) { 1027 char slice; 1028 1029 if (label == NULL) 1030 label = devstr + strlen(devstr); 1031 else 1032 *label = '\0'; /* clean up trailing @ */ 1033 1034 slice = label[-1]; 1035 switch(slice) { 1036 case 'a': 1037 label = "BOOT"; 1038 break; 1039 case 'd': 1040 label = "ROOT"; 1041 break; 1042 default: 1043 label = "DATA"; 1044 break; 1045 } 1046 } else { 1047 *label = '\0'; 1048 label++; 1049 } 1050 1051 kprintf("hammer2_mount: dev=\"%s\" label=\"%s\" rdonly=%d\n", 1052 dev, label, ronly); 1053 1054 /* 1055 * Initialize all device vnodes. 1056 */ 1057 TAILQ_INIT(&devvpl); 1058 error = hammer2_init_devvp(dev, path == NULL, &devvpl); 1059 if (error) { 1060 kprintf("hammer2: failed to initialize devvp in %s\n", dev); 1061 hammer2_cleanup_devvp(&devvpl); 1062 return error; 1063 } 1064 1065 /* 1066 * Determine if the device has already been mounted. After this 1067 * check hmp will be non-NULL if we are doing the second or more 1068 * hammer2 mounts from the same device. 1069 */ 1070 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 1071 if (!TAILQ_EMPTY(&devvpl)) { 1072 /* 1073 * Match the device. Due to the way devfs works, 1074 * we may not be able to directly match the vnode pointer, 1075 * so also check to see if the underlying device matches. 1076 */ 1077 TAILQ_FOREACH(hmp_tmp, &hammer2_mntlist, mntentry) { 1078 TAILQ_FOREACH(e_tmp, &hmp_tmp->devvpl, entry) { 1079 int devvp_found = 0; 1080 TAILQ_FOREACH(e, &devvpl, entry) { 1081 KKASSERT(e->devvp); 1082 if (e_tmp->devvp == e->devvp) 1083 devvp_found = 1; 1084 if (e_tmp->devvp->v_rdev && 1085 e_tmp->devvp->v_rdev == e->devvp->v_rdev) 1086 devvp_found = 1; 1087 } 1088 if (!devvp_found) 1089 goto next_hmp; 1090 } 1091 hmp = hmp_tmp; 1092 kprintf("hammer2_mount: hmp=%p matched\n", hmp); 1093 break; 1094 next_hmp: 1095 continue; 1096 } 1097 1098 /* 1099 * If no match this may be a fresh H2 mount, make sure 1100 * the device is not mounted on anything else. 1101 */ 1102 if (hmp == NULL) { 1103 TAILQ_FOREACH(e, &devvpl, entry) { 1104 struct vnode *devvp = e->devvp; 1105 KKASSERT(devvp); 1106 error = vfs_mountedon(devvp); 1107 if (error) { 1108 kprintf("hammer2_mount: %s mounted %d\n", 1109 e->path, error); 1110 hammer2_cleanup_devvp(&devvpl); 1111 lockmgr(&hammer2_mntlk, LK_RELEASE); 1112 return error; 1113 } 1114 } 1115 } 1116 } else { 1117 /* 1118 * Match the label to a pmp already probed. 1119 */ 1120 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 1121 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 1122 if (pmp->pfs_names[i] && 1123 strcmp(pmp->pfs_names[i], label) == 0) { 1124 hmp = pmp->pfs_hmps[i]; 1125 break; 1126 } 1127 } 1128 if (hmp) 1129 break; 1130 } 1131 if (hmp == NULL) { 1132 kprintf("hammer2_mount: PFS label \"%s\" not found\n", 1133 label); 1134 hammer2_cleanup_devvp(&devvpl); 1135 lockmgr(&hammer2_mntlk, LK_RELEASE); 1136 return ENOENT; 1137 } 1138 } 1139 1140 /* 1141 * Open the device if this isn't a secondary mount and construct 1142 * the H2 device mount (hmp). 1143 */ 1144 if (hmp == NULL) { 1145 hammer2_chain_t *schain; 1146 hammer2_xop_head_t xop; 1147 1148 /* 1149 * Now open the device 1150 */ 1151 KKASSERT(!TAILQ_EMPTY(&devvpl)); 1152 if (error == 0) { 1153 error = hammer2_open_devvp(&devvpl, ronly); 1154 if (error) { 1155 hammer2_close_devvp(&devvpl, ronly); 1156 hammer2_cleanup_devvp(&devvpl); 1157 lockmgr(&hammer2_mntlk, LK_RELEASE); 1158 return error; 1159 } 1160 } 1161 1162 /* 1163 * Construct volumes and link with device vnodes. 1164 */ 1165 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO); 1166 hmp->devvp = NULL; 1167 error = hammer2_init_volumes(mp, &devvpl, hmp->volumes, 1168 &hmp->voldata, &hmp->volhdrno, 1169 &hmp->devvp); 1170 if (error) { 1171 hammer2_close_devvp(&devvpl, ronly); 1172 hammer2_cleanup_devvp(&devvpl); 1173 lockmgr(&hammer2_mntlk, LK_RELEASE); 1174 kfree(hmp, M_HAMMER2); 1175 return error; 1176 } 1177 if (!hmp->devvp) { 1178 kprintf("hammer2: failed to initialize root volume\n"); 1179 hammer2_unmount_helper(mp, NULL, hmp); 1180 lockmgr(&hammer2_mntlk, LK_RELEASE); 1181 hammer2_vfs_unmount(mp, MNT_FORCE); 1182 return EINVAL; 1183 } 1184 1185 ksnprintf(hmp->devrepname, sizeof(hmp->devrepname), "%s", dev); 1186 hmp->ronly = ronly; 1187 hmp->hflags = info.hflags & HMNT2_DEVFLAGS; 1188 kmalloc_create_obj(&hmp->mchain, "HAMMER2-chains", 1189 sizeof(struct hammer2_chain)); 1190 kmalloc_create_obj(&hmp->mio, "HAMMER2-dio", 1191 sizeof(struct hammer2_io)); 1192 kmalloc_create(&hmp->mmsg, "HAMMER2-msg"); 1193 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry); 1194 RB_INIT(&hmp->iotree); 1195 spin_init(&hmp->io_spin, "h2mount_io"); 1196 spin_init(&hmp->list_spin, "h2mount_list"); 1197 1198 lockinit(&hmp->vollk, "h2vol", 0, 0); 1199 lockinit(&hmp->bulklk, "h2bulk", 0, 0); 1200 lockinit(&hmp->bflock, "h2bflk", 0, 0); 1201 1202 /* 1203 * vchain setup. vchain.data is embedded. 1204 * vchain.refs is initialized and will never drop to 0. 1205 * 1206 * NOTE! voldata is not yet loaded. 1207 */ 1208 hmp->vchain.hmp = hmp; 1209 hmp->vchain.refs = 1; 1210 hmp->vchain.data = (void *)&hmp->voldata; 1211 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME; 1212 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX; 1213 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid; 1214 hammer2_chain_core_init(&hmp->vchain); 1215 1216 /* 1217 * fchain setup. fchain.data is embedded. 1218 * fchain.refs is initialized and will never drop to 0. 1219 * 1220 * The data is not used but needs to be initialized to 1221 * pass assertion muster. We use this chain primarily 1222 * as a placeholder for the freemap's top-level radix tree 1223 * so it does not interfere with the volume's topology 1224 * radix tree. 1225 */ 1226 hmp->fchain.hmp = hmp; 1227 hmp->fchain.refs = 1; 1228 hmp->fchain.data = (void *)&hmp->voldata.freemap_blockset; 1229 hmp->fchain.bref.type = HAMMER2_BREF_TYPE_FREEMAP; 1230 hmp->fchain.bref.data_off = 0 | HAMMER2_PBUFRADIX; 1231 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid; 1232 hmp->fchain.bref.methods = 1233 HAMMER2_ENC_CHECK(HAMMER2_CHECK_FREEMAP) | 1234 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE); 1235 hammer2_chain_core_init(&hmp->fchain); 1236 1237 /* 1238 * Initialize volume header related fields. 1239 */ 1240 KKASSERT(hmp->voldata.magic == HAMMER2_VOLUME_ID_HBO || 1241 hmp->voldata.magic == HAMMER2_VOLUME_ID_ABO); 1242 hmp->volsync = hmp->voldata; 1243 hmp->free_reserved = hmp->voldata.allocator_size / 20; 1244 /* 1245 * Must use hmp instead of volume header for these two 1246 * in order to handle volume versions transparently. 1247 */ 1248 if (hmp->voldata.version >= HAMMER2_VOL_VERSION_MULTI_VOLUMES) { 1249 hmp->nvolumes = hmp->voldata.nvolumes; 1250 hmp->total_size = hmp->voldata.total_size; 1251 } else { 1252 hmp->nvolumes = 1; 1253 hmp->total_size = hmp->voldata.volu_size; 1254 } 1255 KKASSERT(hmp->nvolumes > 0); 1256 1257 /* 1258 * Move devvpl entries to hmp. 1259 */ 1260 TAILQ_INIT(&hmp->devvpl); 1261 while ((e = TAILQ_FIRST(&devvpl)) != NULL) { 1262 TAILQ_REMOVE(&devvpl, e, entry); 1263 TAILQ_INSERT_TAIL(&hmp->devvpl, e, entry); 1264 } 1265 KKASSERT(TAILQ_EMPTY(&devvpl)); 1266 KKASSERT(!TAILQ_EMPTY(&hmp->devvpl)); 1267 1268 /* 1269 * Really important to get these right or the flush and 1270 * teardown code will get confused. 1271 */ 1272 hmp->spmp = hammer2_pfsalloc(NULL, NULL, 0, NULL); 1273 spmp = hmp->spmp; 1274 spmp->pfs_hmps[0] = hmp; 1275 1276 /* 1277 * Dummy-up vchain and fchain's modify_tid. mirror_tid 1278 * is inherited from the volume header. 1279 */ 1280 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid; 1281 hmp->vchain.bref.modify_tid = hmp->vchain.bref.mirror_tid; 1282 hmp->vchain.pmp = spmp; 1283 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid; 1284 hmp->fchain.bref.modify_tid = hmp->fchain.bref.mirror_tid; 1285 hmp->fchain.pmp = spmp; 1286 1287 /* 1288 * First locate the super-root inode, which is key 0 1289 * relative to the volume header's blockset. 1290 * 1291 * Then locate the root inode by scanning the directory keyspace 1292 * represented by the label. 1293 */ 1294 parent = hammer2_chain_lookup_init(&hmp->vchain, 0); 1295 schain = hammer2_chain_lookup(&parent, &key_dummy, 1296 HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY, 1297 &error, 0); 1298 hammer2_chain_lookup_done(parent); 1299 if (schain == NULL) { 1300 kprintf("hammer2_mount: invalid super-root\n"); 1301 hammer2_unmount_helper(mp, NULL, hmp); 1302 lockmgr(&hammer2_mntlk, LK_RELEASE); 1303 hammer2_vfs_unmount(mp, MNT_FORCE); 1304 return EINVAL; 1305 } 1306 if (schain->error) { 1307 kprintf("hammer2_mount: error %s reading super-root\n", 1308 hammer2_error_str(schain->error)); 1309 hammer2_chain_unlock(schain); 1310 hammer2_chain_drop(schain); 1311 schain = NULL; 1312 hammer2_unmount_helper(mp, NULL, hmp); 1313 lockmgr(&hammer2_mntlk, LK_RELEASE); 1314 hammer2_vfs_unmount(mp, MNT_FORCE); 1315 return EINVAL; 1316 } 1317 1318 /* 1319 * The super-root always uses an inode_tid of 1 when 1320 * creating PFSs. 1321 */ 1322 spmp->inode_tid = 1; 1323 spmp->modify_tid = schain->bref.modify_tid + 1; 1324 1325 /* 1326 * Sanity-check schain's pmp and finish initialization. 1327 * Any chain belonging to the super-root topology should 1328 * have a NULL pmp (not even set to spmp). 1329 */ 1330 ripdata = &schain->data->ipdata; 1331 KKASSERT(schain->pmp == NULL); 1332 spmp->pfs_clid = ripdata->meta.pfs_clid; 1333 1334 /* 1335 * Replace the dummy spmp->iroot with a real one. It's 1336 * easier to just do a wholesale replacement than to try 1337 * to update the chain and fixup the iroot fields. 1338 * 1339 * The returned inode is locked with the supplied cluster. 1340 */ 1341 hammer2_dummy_xop_from_chain(&xop, schain); 1342 hammer2_inode_drop(spmp->iroot); 1343 spmp->iroot = NULL; 1344 spmp->iroot = hammer2_inode_get(spmp, &xop, -1, -1); 1345 spmp->spmp_hmp = hmp; 1346 spmp->pfs_types[0] = ripdata->meta.pfs_type; 1347 spmp->pfs_hmps[0] = hmp; 1348 hammer2_inode_ref(spmp->iroot); 1349 hammer2_inode_unlock(spmp->iroot); 1350 hammer2_cluster_unlock(&xop.cluster); 1351 hammer2_chain_drop(schain); 1352 /* do not call hammer2_cluster_drop() on an embedded cluster */ 1353 schain = NULL; /* now invalid */ 1354 /* leave spmp->iroot with one ref */ 1355 1356 if (!hmp->ronly) { 1357 error = hammer2_recovery(hmp); 1358 if (error == 0) 1359 error |= hammer2_fixup_pfses(hmp); 1360 /* XXX do something with error */ 1361 } 1362 hammer2_update_pmps(hmp); 1363 hammer2_iocom_init(hmp); 1364 hammer2_bulkfree_init(hmp); 1365 1366 /* 1367 * Ref the cluster management messaging descriptor. The mount 1368 * program deals with the other end of the communications pipe. 1369 * 1370 * Root mounts typically do not supply one. 1371 */ 1372 if (info.cluster_fd >= 0) { 1373 fp = holdfp(curthread, info.cluster_fd, -1); 1374 if (fp) { 1375 hammer2_cluster_reconnect(hmp, fp); 1376 } else { 1377 kprintf("hammer2_mount: bad cluster_fd!\n"); 1378 } 1379 } 1380 } else { 1381 spmp = hmp->spmp; 1382 if (info.hflags & HMNT2_DEVFLAGS) { 1383 kprintf("hammer2_mount: Warning: mount flags pertaining " 1384 "to the whole device may only be specified " 1385 "on the first mount of the device: %08x\n", 1386 info.hflags & HMNT2_DEVFLAGS); 1387 } 1388 } 1389 1390 /* 1391 * Force local mount (disassociate all PFSs from their clusters). 1392 * Used primarily for debugging. 1393 */ 1394 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL; 1395 1396 /* 1397 * Lookup the mount point under the media-localized super-root. 1398 * Scanning hammer2_pfslist doesn't help us because it represents 1399 * PFS cluster ids which can aggregate several named PFSs together. 1400 * 1401 * cluster->pmp will incorrectly point to spmp and must be fixed 1402 * up later on. 1403 */ 1404 hammer2_inode_lock(spmp->iroot, 0); 1405 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS); 1406 lhc = hammer2_dirhash(label, strlen(label)); 1407 chain = hammer2_chain_lookup(&parent, &key_next, 1408 lhc, lhc + HAMMER2_DIRHASH_LOMASK, 1409 &error, 0); 1410 while (chain) { 1411 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && 1412 strcmp(label, chain->data->ipdata.filename) == 0) { 1413 break; 1414 } 1415 chain = hammer2_chain_next(&parent, chain, &key_next, 1416 key_next, 1417 lhc + HAMMER2_DIRHASH_LOMASK, 1418 &error, 0); 1419 } 1420 if (parent) { 1421 hammer2_chain_unlock(parent); 1422 hammer2_chain_drop(parent); 1423 } 1424 hammer2_inode_unlock(spmp->iroot); 1425 1426 /* 1427 * PFS could not be found? 1428 */ 1429 if (chain == NULL) { 1430 hammer2_unmount_helper(mp, NULL, hmp); 1431 lockmgr(&hammer2_mntlk, LK_RELEASE); 1432 hammer2_vfs_unmount(mp, MNT_FORCE); 1433 1434 if (error) { 1435 kprintf("hammer2_mount: PFS label I/O error\n"); 1436 return EINVAL; 1437 } else { 1438 kprintf("hammer2_mount: PFS label \"%s\" not found\n", 1439 label); 1440 return ENOENT; 1441 } 1442 } 1443 1444 /* 1445 * Acquire the pmp structure (it should have already been allocated 1446 * via hammer2_update_pmps() so do not pass cluster in to add to 1447 * available chains). 1448 * 1449 * Check if the cluster has already been mounted. A cluster can 1450 * only be mounted once, use null mounts to mount additional copies. 1451 */ 1452 if (chain->error) { 1453 kprintf("hammer2_mount: PFS label I/O error\n"); 1454 } else { 1455 ripdata = &chain->data->ipdata; 1456 bref = chain->bref; 1457 pmp = hammer2_pfsalloc(NULL, ripdata, 1458 bref.modify_tid, force_local); 1459 } 1460 hammer2_chain_unlock(chain); 1461 hammer2_chain_drop(chain); 1462 1463 /* 1464 * Finish the mount 1465 */ 1466 kprintf("hammer2_mount: hmp=%p pmp=%p\n", hmp, pmp); 1467 1468 if (pmp->mp) { 1469 kprintf("hammer2_mount: PFS already mounted!\n"); 1470 hammer2_unmount_helper(mp, NULL, hmp); 1471 lockmgr(&hammer2_mntlk, LK_RELEASE); 1472 hammer2_vfs_unmount(mp, MNT_FORCE); 1473 1474 return EBUSY; 1475 } 1476 1477 pmp->hflags = info.hflags; 1478 mp->mnt_flag |= MNT_LOCAL; 1479 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /* all entry pts are SMP */ 1480 mp->mnt_kern_flag |= MNTK_THR_SYNC; /* new vsyncscan semantics */ 1481 1482 /* 1483 * required mount structure initializations 1484 */ 1485 mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE; 1486 mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE; 1487 1488 mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE; 1489 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE; 1490 1491 /* 1492 * Optional fields 1493 */ 1494 mp->mnt_iosize_max = MAXPHYS; 1495 1496 /* 1497 * Connect up mount pointers. 1498 */ 1499 hammer2_mount_helper(mp, pmp); 1500 lockmgr(&hammer2_mntlk, LK_RELEASE); 1501 1502 /* 1503 * Finish setup 1504 */ 1505 vfs_getnewfsid(mp); 1506 vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops); 1507 vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops); 1508 vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops); 1509 1510 if (path) { 1511 copyinstr(info.volume, mp->mnt_stat.f_mntfromname, 1512 MNAMELEN - 1, &size); 1513 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 1514 } /* else root mount, already in there */ 1515 1516 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname)); 1517 if (path) { 1518 copyinstr(path, mp->mnt_stat.f_mntonname, 1519 sizeof(mp->mnt_stat.f_mntonname) - 1, 1520 &size); 1521 } else { 1522 /* root mount */ 1523 mp->mnt_stat.f_mntonname[0] = '/'; 1524 } 1525 1526 /* 1527 * Initial statfs to prime mnt_stat. 1528 */ 1529 hammer2_vfs_statfs(mp, &mp->mnt_stat, cred); 1530 1531 return 0; 1532 } 1533 1534 /* 1535 * Scan PFSs under the super-root and create hammer2_pfs structures. 1536 */ 1537 static 1538 void 1539 hammer2_update_pmps(hammer2_dev_t *hmp) 1540 { 1541 const hammer2_inode_data_t *ripdata; 1542 hammer2_chain_t *parent; 1543 hammer2_chain_t *chain; 1544 hammer2_blockref_t bref; 1545 hammer2_dev_t *force_local; 1546 hammer2_pfs_t *spmp; 1547 hammer2_pfs_t *pmp; 1548 hammer2_key_t key_next; 1549 int error; 1550 1551 /* 1552 * Force local mount (disassociate all PFSs from their clusters). 1553 * Used primarily for debugging. 1554 */ 1555 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL; 1556 1557 /* 1558 * Lookup mount point under the media-localized super-root. 1559 * 1560 * cluster->pmp will incorrectly point to spmp and must be fixed 1561 * up later on. 1562 */ 1563 spmp = hmp->spmp; 1564 hammer2_inode_lock(spmp->iroot, 0); 1565 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS); 1566 chain = hammer2_chain_lookup(&parent, &key_next, 1567 HAMMER2_KEY_MIN, HAMMER2_KEY_MAX, 1568 &error, 0); 1569 while (chain) { 1570 if (chain->error) { 1571 kprintf("I/O error scanning PFS labels\n"); 1572 } else if (chain->bref.type != HAMMER2_BREF_TYPE_INODE) { 1573 kprintf("Non inode chain type %d under super-root\n", 1574 chain->bref.type); 1575 } else { 1576 ripdata = &chain->data->ipdata; 1577 bref = chain->bref; 1578 pmp = hammer2_pfsalloc(chain, ripdata, 1579 bref.modify_tid, force_local); 1580 } 1581 chain = hammer2_chain_next(&parent, chain, &key_next, 1582 key_next, HAMMER2_KEY_MAX, 1583 &error, 0); 1584 } 1585 if (parent) { 1586 hammer2_chain_unlock(parent); 1587 hammer2_chain_drop(parent); 1588 } 1589 hammer2_inode_unlock(spmp->iroot); 1590 } 1591 1592 static 1593 int 1594 hammer2_remount(hammer2_dev_t *hmp, struct mount *mp, char *path __unused, 1595 struct ucred *cred) 1596 { 1597 hammer2_volume_t *vol; 1598 struct vnode *devvp; 1599 int i, error, result = 0; 1600 1601 if (!(hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR))) 1602 return 0; 1603 1604 for (i = 0; i < hmp->nvolumes; ++i) { 1605 vol = &hmp->volumes[i]; 1606 devvp = vol->dev->devvp; 1607 KKASSERT(devvp); 1608 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1609 VOP_OPEN(devvp, FREAD | FWRITE, FSCRED, NULL); 1610 vn_unlock(devvp); 1611 error = 0; 1612 if (vol->id == HAMMER2_ROOT_VOLUME) { 1613 error = hammer2_recovery(hmp); 1614 if (error == 0) 1615 error |= hammer2_fixup_pfses(hmp); 1616 } 1617 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1618 if (error == 0) { 1619 VOP_CLOSE(devvp, FREAD, NULL); 1620 } else { 1621 VOP_CLOSE(devvp, FREAD | FWRITE, NULL); 1622 } 1623 vn_unlock(devvp); 1624 result |= error; 1625 } 1626 if (result == 0) { 1627 kprintf("hammer2: enable read/write\n"); 1628 hmp->ronly = 0; 1629 } 1630 1631 return result; 1632 } 1633 1634 static 1635 int 1636 hammer2_vfs_unmount(struct mount *mp, int mntflags) 1637 { 1638 hammer2_pfs_t *pmp; 1639 int flags; 1640 int error = 0; 1641 1642 pmp = MPTOPMP(mp); 1643 1644 if (pmp == NULL) 1645 return(0); 1646 1647 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 1648 1649 /* 1650 * If mount initialization proceeded far enough we must flush 1651 * its vnodes and sync the underlying mount points. Three syncs 1652 * are required to fully flush the filesystem (freemap updates lag 1653 * by one flush, and one extra for safety). 1654 */ 1655 if (mntflags & MNT_FORCE) 1656 flags = FORCECLOSE; 1657 else 1658 flags = 0; 1659 if (pmp->iroot) { 1660 error = vflush(mp, 0, flags); 1661 if (error) 1662 goto failed; 1663 hammer2_vfs_sync(mp, MNT_WAIT); 1664 hammer2_vfs_sync(mp, MNT_WAIT); 1665 hammer2_vfs_sync(mp, MNT_WAIT); 1666 } 1667 1668 /* 1669 * Cleanup the frontend support XOPS threads 1670 */ 1671 hammer2_xop_helper_cleanup(pmp); 1672 1673 if (pmp->mp) 1674 hammer2_unmount_helper(mp, pmp, NULL); 1675 1676 error = 0; 1677 failed: 1678 lockmgr(&hammer2_mntlk, LK_RELEASE); 1679 1680 return (error); 1681 } 1682 1683 /* 1684 * Mount helper, hook the system mount into our PFS. 1685 * The mount lock is held. 1686 * 1687 * We must bump the mount_count on related devices for any 1688 * mounted PFSs. 1689 */ 1690 static 1691 void 1692 hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp) 1693 { 1694 hammer2_cluster_t *cluster; 1695 hammer2_chain_t *rchain; 1696 int i; 1697 1698 mp->mnt_data = (qaddr_t)pmp; 1699 pmp->mp = mp; 1700 1701 /* 1702 * After pmp->mp is set we have to adjust hmp->mount_count. 1703 */ 1704 cluster = &pmp->iroot->cluster; 1705 for (i = 0; i < cluster->nchains; ++i) { 1706 rchain = cluster->array[i].chain; 1707 if (rchain == NULL) 1708 continue; 1709 ++rchain->hmp->mount_count; 1710 } 1711 1712 /* 1713 * Create missing Xop threads 1714 */ 1715 hammer2_xop_helper_create(pmp); 1716 } 1717 1718 /* 1719 * Mount helper, unhook the system mount from our PFS. 1720 * The mount lock is held. 1721 * 1722 * If hmp is supplied a mount responsible for being the first to open 1723 * the block device failed and the block device and all PFSs using the 1724 * block device must be cleaned up. 1725 * 1726 * If pmp is supplied multiple devices might be backing the PFS and each 1727 * must be disconnected. This might not be the last PFS using some of the 1728 * underlying devices. Also, we have to adjust our hmp->mount_count 1729 * accounting for the devices backing the pmp which is now undergoing an 1730 * unmount. 1731 */ 1732 static 1733 void 1734 hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, hammer2_dev_t *hmp) 1735 { 1736 hammer2_cluster_t *cluster; 1737 hammer2_chain_t *rchain; 1738 int dumpcnt; 1739 int i; 1740 1741 /* 1742 * If no device supplied this is a high-level unmount and we have to 1743 * to disconnect the mount, adjust mount_count, and locate devices 1744 * that might now have no mounts. 1745 */ 1746 if (pmp) { 1747 KKASSERT(hmp == NULL); 1748 KKASSERT(MPTOPMP(mp) == pmp); 1749 pmp->mp = NULL; 1750 mp->mnt_data = NULL; 1751 1752 /* 1753 * After pmp->mp is cleared we have to account for 1754 * mount_count. 1755 */ 1756 cluster = &pmp->iroot->cluster; 1757 for (i = 0; i < cluster->nchains; ++i) { 1758 rchain = cluster->array[i].chain; 1759 if (rchain == NULL) 1760 continue; 1761 --rchain->hmp->mount_count; 1762 /* scrapping hmp now may invalidate the pmp */ 1763 } 1764 again: 1765 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) { 1766 if (hmp->mount_count == 0) { 1767 hammer2_unmount_helper(NULL, NULL, hmp); 1768 goto again; 1769 } 1770 } 1771 return; 1772 } 1773 1774 /* 1775 * Try to terminate the block device. We can't terminate it if 1776 * there are still PFSs referencing it. 1777 */ 1778 if (hmp->mount_count) 1779 return; 1780 1781 /* 1782 * Decomission the network before we start messing with the 1783 * device and PFS. 1784 */ 1785 hammer2_iocom_uninit(hmp); 1786 1787 hammer2_bulkfree_uninit(hmp); 1788 hammer2_pfsfree_scan(hmp, 0); 1789 1790 /* 1791 * Cycle the volume data lock as a safety (probably not needed any 1792 * more). To ensure everything is out we need to flush at least 1793 * three times. (1) The running of the sideq can dirty the 1794 * filesystem, (2) A normal flush can dirty the freemap, and 1795 * (3) ensure that the freemap is fully synchronized. 1796 * 1797 * The next mount's recovery scan can clean everything up but we want 1798 * to leave the filesystem in a 100% clean state on a normal unmount. 1799 */ 1800 #if 0 1801 hammer2_voldata_lock(hmp); 1802 hammer2_voldata_unlock(hmp); 1803 #endif 1804 1805 /* 1806 * Flush whatever is left. Unmounted but modified PFS's might still 1807 * have some dirty chains on them. 1808 */ 1809 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS); 1810 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS); 1811 1812 if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) { 1813 hammer2_voldata_modify(hmp); 1814 hammer2_flush(&hmp->fchain, HAMMER2_FLUSH_TOP | 1815 HAMMER2_FLUSH_ALL); 1816 } 1817 hammer2_chain_unlock(&hmp->fchain); 1818 1819 if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) { 1820 hammer2_flush(&hmp->vchain, HAMMER2_FLUSH_TOP | 1821 HAMMER2_FLUSH_ALL); 1822 } 1823 hammer2_chain_unlock(&hmp->vchain); 1824 1825 if ((hmp->vchain.flags | hmp->fchain.flags) & 1826 HAMMER2_CHAIN_FLUSH_MASK) { 1827 kprintf("hammer2_unmount: chains left over after final sync\n"); 1828 kprintf(" vchain %08x\n", hmp->vchain.flags); 1829 kprintf(" fchain %08x\n", hmp->fchain.flags); 1830 1831 if (hammer2_debug & 0x0010) 1832 Debugger("entered debugger"); 1833 } 1834 1835 hammer2_pfsfree_scan(hmp, 1); 1836 1837 KKASSERT(hmp->spmp == NULL); 1838 1839 /* 1840 * Finish up with the device vnode 1841 */ 1842 if (!TAILQ_EMPTY(&hmp->devvpl)) { 1843 hammer2_close_devvp(&hmp->devvpl, hmp->ronly); 1844 hammer2_cleanup_devvp(&hmp->devvpl); 1845 } 1846 KKASSERT(TAILQ_EMPTY(&hmp->devvpl)); 1847 1848 /* 1849 * Clear vchain/fchain flags that might prevent final cleanup 1850 * of these chains. 1851 */ 1852 if (hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) { 1853 atomic_add_long(&hammer2_count_modified_chains, -1); 1854 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED); 1855 hammer2_pfs_memory_wakeup(hmp->vchain.pmp, -1); 1856 } 1857 if (hmp->vchain.flags & HAMMER2_CHAIN_UPDATE) { 1858 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_UPDATE); 1859 } 1860 1861 if (hmp->fchain.flags & HAMMER2_CHAIN_MODIFIED) { 1862 atomic_add_long(&hammer2_count_modified_chains, -1); 1863 atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_MODIFIED); 1864 hammer2_pfs_memory_wakeup(hmp->fchain.pmp, -1); 1865 } 1866 if (hmp->fchain.flags & HAMMER2_CHAIN_UPDATE) { 1867 atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_UPDATE); 1868 } 1869 1870 /* 1871 * Final drop of embedded freemap root chain to 1872 * clean up fchain.core (fchain structure is not 1873 * flagged ALLOCATED so it is cleaned out and then 1874 * left to rot). 1875 */ 1876 hammer2_chain_drop(&hmp->fchain); 1877 1878 /* 1879 * Final drop of embedded volume root chain to clean 1880 * up vchain.core (vchain structure is not flagged 1881 * ALLOCATED so it is cleaned out and then left to 1882 * rot). 1883 */ 1884 dumpcnt = 50; 1885 hammer2_dump_chain(&hmp->vchain, 0, 0, &dumpcnt, 'v', (u_int)-1); 1886 dumpcnt = 50; 1887 hammer2_dump_chain(&hmp->fchain, 0, 0, &dumpcnt, 'f', (u_int)-1); 1888 1889 hammer2_chain_drop(&hmp->vchain); 1890 1891 hammer2_io_cleanup(hmp, &hmp->iotree); 1892 if (hmp->iofree_count) { 1893 kprintf("io_cleanup: %d I/O's left hanging\n", 1894 hmp->iofree_count); 1895 } 1896 1897 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry); 1898 kmalloc_destroy_obj(&hmp->mchain); 1899 kmalloc_destroy_obj(&hmp->mio); 1900 kmalloc_destroy(&hmp->mmsg); 1901 kfree(hmp, M_HAMMER2); 1902 } 1903 1904 int 1905 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp, 1906 ino_t ino, struct vnode **vpp) 1907 { 1908 hammer2_xop_lookup_t *xop; 1909 hammer2_pfs_t *pmp; 1910 hammer2_inode_t *ip; 1911 hammer2_tid_t inum; 1912 int error; 1913 1914 inum = (hammer2_tid_t)ino & HAMMER2_DIRHASH_USERMSK; 1915 1916 error = 0; 1917 pmp = MPTOPMP(mp); 1918 1919 /* 1920 * Easy if we already have it cached 1921 */ 1922 ip = hammer2_inode_lookup(pmp, inum); 1923 if (ip) { 1924 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 1925 *vpp = hammer2_igetv(ip, &error); 1926 hammer2_inode_unlock(ip); 1927 hammer2_inode_drop(ip); /* from lookup */ 1928 1929 return error; 1930 } 1931 1932 /* 1933 * Otherwise we have to find the inode 1934 */ 1935 xop = hammer2_xop_alloc(pmp->iroot, 0); 1936 xop->lhc = inum; 1937 hammer2_xop_start(&xop->head, &hammer2_lookup_desc); 1938 error = hammer2_xop_collect(&xop->head, 0); 1939 1940 if (error == 0) 1941 ip = hammer2_inode_get(pmp, &xop->head, -1, -1); 1942 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1943 1944 if (ip) { 1945 *vpp = hammer2_igetv(ip, &error); 1946 hammer2_inode_unlock(ip); 1947 } else { 1948 *vpp = NULL; 1949 error = ENOENT; 1950 } 1951 return (error); 1952 } 1953 1954 static 1955 int 1956 hammer2_vfs_root(struct mount *mp, struct vnode **vpp) 1957 { 1958 hammer2_pfs_t *pmp; 1959 struct vnode *vp; 1960 int error; 1961 1962 pmp = MPTOPMP(mp); 1963 if (pmp->iroot == NULL) { 1964 kprintf("hammer2 (%s): no root inode\n", 1965 mp->mnt_stat.f_mntfromname); 1966 *vpp = NULL; 1967 return EINVAL; 1968 } 1969 1970 error = 0; 1971 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED); 1972 1973 while (pmp->inode_tid == 0) { 1974 hammer2_xop_ipcluster_t *xop; 1975 const hammer2_inode_meta_t *meta; 1976 1977 xop = hammer2_xop_alloc(pmp->iroot, HAMMER2_XOP_MODIFYING); 1978 hammer2_xop_start(&xop->head, &hammer2_ipcluster_desc); 1979 error = hammer2_xop_collect(&xop->head, 0); 1980 1981 if (error == 0) { 1982 meta = &hammer2_xop_gdata(&xop->head)->ipdata.meta; 1983 pmp->iroot->meta = *meta; 1984 pmp->inode_tid = meta->pfs_inum + 1; 1985 hammer2_xop_pdata(&xop->head); 1986 /* meta invalid */ 1987 1988 if (pmp->inode_tid < HAMMER2_INODE_START) 1989 pmp->inode_tid = HAMMER2_INODE_START; 1990 pmp->modify_tid = 1991 xop->head.cluster.focus->bref.modify_tid + 1; 1992 #if 0 1993 kprintf("PFS: Starting inode %jd\n", 1994 (intmax_t)pmp->inode_tid); 1995 kprintf("PMP focus good set nextino=%ld mod=%016jx\n", 1996 pmp->inode_tid, pmp->modify_tid); 1997 #endif 1998 wakeup(&pmp->iroot); 1999 2000 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2001 2002 /* 2003 * Prime the mount info. 2004 */ 2005 hammer2_vfs_statfs(mp, &mp->mnt_stat, NULL); 2006 break; 2007 } 2008 2009 /* 2010 * Loop, try again 2011 */ 2012 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2013 hammer2_inode_unlock(pmp->iroot); 2014 error = tsleep(&pmp->iroot, PCATCH, "h2root", hz); 2015 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED); 2016 if (error == EINTR) 2017 break; 2018 } 2019 2020 if (error) { 2021 hammer2_inode_unlock(pmp->iroot); 2022 *vpp = NULL; 2023 } else { 2024 vp = hammer2_igetv(pmp->iroot, &error); 2025 hammer2_inode_unlock(pmp->iroot); 2026 *vpp = vp; 2027 } 2028 2029 return (error); 2030 } 2031 2032 /* 2033 * Filesystem status 2034 * 2035 * XXX incorporate ipdata->meta.inode_quota and data_quota 2036 */ 2037 static 2038 int 2039 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 2040 { 2041 hammer2_pfs_t *pmp; 2042 hammer2_dev_t *hmp; 2043 hammer2_blockref_t bref; 2044 struct statfs tmp; 2045 int i; 2046 2047 /* 2048 * NOTE: iroot might not have validated the cluster yet. 2049 */ 2050 pmp = MPTOPMP(mp); 2051 2052 bzero(&tmp, sizeof(tmp)); 2053 2054 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 2055 hmp = pmp->pfs_hmps[i]; 2056 if (hmp == NULL) 2057 continue; 2058 if (pmp->iroot->cluster.array[i].chain) 2059 bref = pmp->iroot->cluster.array[i].chain->bref; 2060 else 2061 bzero(&bref, sizeof(bref)); 2062 2063 tmp.f_files = bref.embed.stats.inode_count; 2064 tmp.f_ffree = 0; 2065 tmp.f_blocks = hmp->voldata.allocator_size / 2066 mp->mnt_vstat.f_bsize; 2067 tmp.f_bfree = hmp->voldata.allocator_free / 2068 mp->mnt_vstat.f_bsize; 2069 tmp.f_bavail = tmp.f_bfree; 2070 2071 if (cred && cred->cr_uid != 0) { 2072 uint64_t adj; 2073 2074 /* 5% */ 2075 adj = hmp->free_reserved / mp->mnt_vstat.f_bsize; 2076 tmp.f_blocks -= adj; 2077 tmp.f_bfree -= adj; 2078 tmp.f_bavail -= adj; 2079 } 2080 2081 mp->mnt_stat.f_blocks = tmp.f_blocks; 2082 mp->mnt_stat.f_bfree = tmp.f_bfree; 2083 mp->mnt_stat.f_bavail = tmp.f_bavail; 2084 mp->mnt_stat.f_files = tmp.f_files; 2085 mp->mnt_stat.f_ffree = tmp.f_ffree; 2086 2087 *sbp = mp->mnt_stat; 2088 } 2089 return (0); 2090 } 2091 2092 static 2093 int 2094 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred) 2095 { 2096 hammer2_pfs_t *pmp; 2097 hammer2_dev_t *hmp; 2098 hammer2_blockref_t bref; 2099 struct statvfs tmp; 2100 int i; 2101 2102 /* 2103 * NOTE: iroot might not have validated the cluster yet. 2104 */ 2105 pmp = MPTOPMP(mp); 2106 bzero(&tmp, sizeof(tmp)); 2107 2108 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 2109 hmp = pmp->pfs_hmps[i]; 2110 if (hmp == NULL) 2111 continue; 2112 if (pmp->iroot->cluster.array[i].chain) 2113 bref = pmp->iroot->cluster.array[i].chain->bref; 2114 else 2115 bzero(&bref, sizeof(bref)); 2116 2117 tmp.f_files = bref.embed.stats.inode_count; 2118 tmp.f_ffree = 0; 2119 tmp.f_blocks = hmp->voldata.allocator_size / 2120 mp->mnt_vstat.f_bsize; 2121 tmp.f_bfree = hmp->voldata.allocator_free / 2122 mp->mnt_vstat.f_bsize; 2123 tmp.f_bavail = tmp.f_bfree; 2124 2125 if (cred && cred->cr_uid != 0) { 2126 uint64_t adj; 2127 2128 /* 5% */ 2129 adj = hmp->free_reserved / mp->mnt_vstat.f_bsize; 2130 tmp.f_blocks -= adj; 2131 tmp.f_bfree -= adj; 2132 tmp.f_bavail -= adj; 2133 } 2134 2135 mp->mnt_vstat.f_blocks = tmp.f_blocks; 2136 mp->mnt_vstat.f_bfree = tmp.f_bfree; 2137 mp->mnt_vstat.f_bavail = tmp.f_bavail; 2138 mp->mnt_vstat.f_files = tmp.f_files; 2139 mp->mnt_vstat.f_ffree = tmp.f_ffree; 2140 2141 *sbp = mp->mnt_vstat; 2142 } 2143 return (0); 2144 } 2145 2146 /* 2147 * Mount-time recovery (RW mounts) 2148 * 2149 * Updates to the free block table are allowed to lag flushes by one 2150 * transaction. In case of a crash, then on a fresh mount we must do an 2151 * incremental scan of the last committed transaction id and make sure that 2152 * all related blocks have been marked allocated. 2153 */ 2154 struct hammer2_recovery_elm { 2155 TAILQ_ENTRY(hammer2_recovery_elm) entry; 2156 hammer2_chain_t *chain; 2157 hammer2_tid_t sync_tid; 2158 }; 2159 2160 TAILQ_HEAD(hammer2_recovery_list, hammer2_recovery_elm); 2161 2162 struct hammer2_recovery_info { 2163 struct hammer2_recovery_list list; 2164 hammer2_tid_t mtid; 2165 int depth; 2166 }; 2167 2168 static int hammer2_recovery_scan(hammer2_dev_t *hmp, 2169 hammer2_chain_t *parent, 2170 struct hammer2_recovery_info *info, 2171 hammer2_tid_t sync_tid); 2172 2173 #define HAMMER2_RECOVERY_MAXDEPTH 10 2174 2175 static 2176 int 2177 hammer2_recovery(hammer2_dev_t *hmp) 2178 { 2179 struct hammer2_recovery_info info; 2180 struct hammer2_recovery_elm *elm; 2181 hammer2_chain_t *parent; 2182 hammer2_tid_t sync_tid; 2183 hammer2_tid_t mirror_tid; 2184 int error; 2185 2186 hammer2_trans_init(hmp->spmp, 0); 2187 2188 sync_tid = hmp->voldata.freemap_tid; 2189 mirror_tid = hmp->voldata.mirror_tid; 2190 2191 kprintf("hammer2_mount: \"%s\": ", hmp->devrepname); 2192 if (sync_tid >= mirror_tid) { 2193 kprintf("no recovery needed\n"); 2194 } else { 2195 kprintf("freemap recovery %016jx-%016jx\n", 2196 sync_tid + 1, mirror_tid); 2197 } 2198 2199 TAILQ_INIT(&info.list); 2200 info.depth = 0; 2201 parent = hammer2_chain_lookup_init(&hmp->vchain, 0); 2202 error = hammer2_recovery_scan(hmp, parent, &info, sync_tid); 2203 hammer2_chain_lookup_done(parent); 2204 2205 while ((elm = TAILQ_FIRST(&info.list)) != NULL) { 2206 TAILQ_REMOVE(&info.list, elm, entry); 2207 parent = elm->chain; 2208 sync_tid = elm->sync_tid; 2209 kfree(elm, M_HAMMER2); 2210 2211 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2212 error |= hammer2_recovery_scan(hmp, parent, &info, 2213 hmp->voldata.freemap_tid); 2214 hammer2_chain_unlock(parent); 2215 hammer2_chain_drop(parent); /* drop elm->chain ref */ 2216 } 2217 2218 hammer2_trans_done(hmp->spmp, 0); 2219 2220 return error; 2221 } 2222 2223 static 2224 int 2225 hammer2_recovery_scan(hammer2_dev_t *hmp, hammer2_chain_t *parent, 2226 struct hammer2_recovery_info *info, 2227 hammer2_tid_t sync_tid) 2228 { 2229 const hammer2_inode_data_t *ripdata; 2230 hammer2_chain_t *chain; 2231 hammer2_blockref_t bref; 2232 int tmp_error; 2233 int rup_error; 2234 int error; 2235 int first; 2236 2237 /* 2238 * Adjust freemap to ensure that the block(s) are marked allocated. 2239 */ 2240 if (parent->bref.type != HAMMER2_BREF_TYPE_VOLUME) { 2241 hammer2_freemap_adjust(hmp, &parent->bref, 2242 HAMMER2_FREEMAP_DORECOVER); 2243 } 2244 2245 /* 2246 * Check type for recursive scan 2247 */ 2248 switch(parent->bref.type) { 2249 case HAMMER2_BREF_TYPE_VOLUME: 2250 /* data already instantiated */ 2251 break; 2252 case HAMMER2_BREF_TYPE_INODE: 2253 /* 2254 * Must instantiate data for DIRECTDATA test and also 2255 * for recursion. 2256 */ 2257 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2258 ripdata = &parent->data->ipdata; 2259 if (ripdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) { 2260 /* not applicable to recovery scan */ 2261 hammer2_chain_unlock(parent); 2262 return 0; 2263 } 2264 hammer2_chain_unlock(parent); 2265 break; 2266 case HAMMER2_BREF_TYPE_INDIRECT: 2267 /* 2268 * Must instantiate data for recursion 2269 */ 2270 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2271 hammer2_chain_unlock(parent); 2272 break; 2273 case HAMMER2_BREF_TYPE_DIRENT: 2274 case HAMMER2_BREF_TYPE_DATA: 2275 case HAMMER2_BREF_TYPE_FREEMAP: 2276 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 2277 case HAMMER2_BREF_TYPE_FREEMAP_LEAF: 2278 /* not applicable to recovery scan */ 2279 return 0; 2280 break; 2281 default: 2282 return HAMMER2_ERROR_BADBREF; 2283 } 2284 2285 /* 2286 * Defer operation if depth limit reached. 2287 */ 2288 if (info->depth >= HAMMER2_RECOVERY_MAXDEPTH) { 2289 struct hammer2_recovery_elm *elm; 2290 2291 elm = kmalloc(sizeof(*elm), M_HAMMER2, M_ZERO | M_WAITOK); 2292 elm->chain = parent; 2293 elm->sync_tid = sync_tid; 2294 hammer2_chain_ref(parent); 2295 TAILQ_INSERT_TAIL(&info->list, elm, entry); 2296 /* unlocked by caller */ 2297 2298 return(0); 2299 } 2300 2301 2302 /* 2303 * Recursive scan of the last flushed transaction only. We are 2304 * doing this without pmp assignments so don't leave the chains 2305 * hanging around after we are done with them. 2306 * 2307 * error Cumulative error this level only 2308 * rup_error Cumulative error for recursion 2309 * tmp_error Specific non-cumulative recursion error 2310 */ 2311 chain = NULL; 2312 first = 1; 2313 rup_error = 0; 2314 error = 0; 2315 2316 for (;;) { 2317 error |= hammer2_chain_scan(parent, &chain, &bref, 2318 &first, 2319 HAMMER2_LOOKUP_NODATA); 2320 2321 /* 2322 * Problem during scan or EOF 2323 */ 2324 if (error) 2325 break; 2326 2327 /* 2328 * If this is a leaf 2329 */ 2330 if (chain == NULL) { 2331 if (bref.mirror_tid > sync_tid) { 2332 hammer2_freemap_adjust(hmp, &bref, 2333 HAMMER2_FREEMAP_DORECOVER); 2334 } 2335 continue; 2336 } 2337 2338 /* 2339 * This may or may not be a recursive node. 2340 */ 2341 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 2342 if (bref.mirror_tid > sync_tid) { 2343 ++info->depth; 2344 tmp_error = hammer2_recovery_scan(hmp, chain, 2345 info, sync_tid); 2346 --info->depth; 2347 } else { 2348 tmp_error = 0; 2349 } 2350 2351 /* 2352 * Flush the recovery at the PFS boundary to stage it for 2353 * the final flush of the super-root topology. 2354 */ 2355 if (tmp_error == 0 && 2356 (bref.flags & HAMMER2_BREF_FLAG_PFSROOT) && 2357 (chain->flags & HAMMER2_CHAIN_ONFLUSH)) { 2358 hammer2_flush(chain, HAMMER2_FLUSH_TOP | 2359 HAMMER2_FLUSH_ALL); 2360 } 2361 rup_error |= tmp_error; 2362 } 2363 return ((error | rup_error) & ~HAMMER2_ERROR_EOF); 2364 } 2365 2366 /* 2367 * This fixes up an error introduced in earlier H2 implementations where 2368 * moving a PFS inode into an indirect block wound up causing the 2369 * HAMMER2_BREF_FLAG_PFSROOT flag in the bref to get cleared. 2370 */ 2371 static 2372 int 2373 hammer2_fixup_pfses(hammer2_dev_t *hmp) 2374 { 2375 const hammer2_inode_data_t *ripdata; 2376 hammer2_chain_t *parent; 2377 hammer2_chain_t *chain; 2378 hammer2_key_t key_next; 2379 hammer2_pfs_t *spmp; 2380 int error; 2381 2382 error = 0; 2383 2384 /* 2385 * Lookup mount point under the media-localized super-root. 2386 * 2387 * cluster->pmp will incorrectly point to spmp and must be fixed 2388 * up later on. 2389 */ 2390 spmp = hmp->spmp; 2391 hammer2_inode_lock(spmp->iroot, 0); 2392 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS); 2393 chain = hammer2_chain_lookup(&parent, &key_next, 2394 HAMMER2_KEY_MIN, HAMMER2_KEY_MAX, 2395 &error, 0); 2396 while (chain) { 2397 if (chain->bref.type != HAMMER2_BREF_TYPE_INODE) 2398 continue; 2399 if (chain->error) { 2400 kprintf("I/O error scanning PFS labels\n"); 2401 error |= chain->error; 2402 } else if ((chain->bref.flags & 2403 HAMMER2_BREF_FLAG_PFSROOT) == 0) { 2404 int error2; 2405 2406 ripdata = &chain->data->ipdata; 2407 hammer2_trans_init(hmp->spmp, 0); 2408 error2 = hammer2_chain_modify(chain, 2409 chain->bref.modify_tid, 2410 0, 0); 2411 if (error2 == 0) { 2412 kprintf("hammer2: Correct mis-flagged PFS %s\n", 2413 ripdata->filename); 2414 chain->bref.flags |= HAMMER2_BREF_FLAG_PFSROOT; 2415 } else { 2416 error |= error2; 2417 } 2418 hammer2_flush(chain, HAMMER2_FLUSH_TOP | 2419 HAMMER2_FLUSH_ALL); 2420 hammer2_trans_done(hmp->spmp, 0); 2421 } 2422 chain = hammer2_chain_next(&parent, chain, &key_next, 2423 key_next, HAMMER2_KEY_MAX, 2424 &error, 0); 2425 } 2426 if (parent) { 2427 hammer2_chain_unlock(parent); 2428 hammer2_chain_drop(parent); 2429 } 2430 hammer2_inode_unlock(spmp->iroot); 2431 2432 return error; 2433 } 2434 2435 /* 2436 * Sync a mount point; this is called periodically on a per-mount basis from 2437 * the filesystem syncer, and whenever a user issues a sync. 2438 */ 2439 int 2440 hammer2_vfs_sync(struct mount *mp, int waitfor) 2441 { 2442 int error; 2443 2444 error = hammer2_vfs_sync_pmp(MPTOPMP(mp), waitfor); 2445 2446 return error; 2447 } 2448 2449 /* 2450 * Because frontend operations lock vnodes before we get a chance to 2451 * lock the related inode, we can't just acquire a vnode lock without 2452 * risking a deadlock. The frontend may be holding a vnode lock while 2453 * also blocked on our SYNCQ flag while trying to get the inode lock. 2454 * 2455 * To deal with this situation we can check the vnode lock situation 2456 * after locking the inode and perform a work-around. 2457 */ 2458 int 2459 hammer2_vfs_sync_pmp(hammer2_pfs_t *pmp, int waitfor) 2460 { 2461 struct mount *mp; 2462 /*hammer2_xop_flush_t *xop;*/ 2463 hammer2_inode_t *ip; 2464 hammer2_depend_t *depend; 2465 hammer2_depend_t *depend_next; 2466 struct vnode *vp; 2467 uint32_t pass2; 2468 int error; 2469 int wakecount; 2470 int dorestart; 2471 2472 mp = pmp->mp; 2473 2474 /* 2475 * Move all inodes on sideq to syncq. This will clear sideq. 2476 * This should represent all flushable inodes. These inodes 2477 * will already have refs due to being on syncq or sideq. We 2478 * must do this all at once with the spinlock held to ensure that 2479 * all inode dependencies are part of the same flush. 2480 * 2481 * We should be able to do this asynchronously from frontend 2482 * operations because we will be locking the inodes later on 2483 * to actually flush them, and that will partition any frontend 2484 * op using the same inode. Either it has already locked the 2485 * inode and we will block, or it has not yet locked the inode 2486 * and it will block until we are finished flushing that inode. 2487 * 2488 * When restarting, only move the inodes flagged as PASS2 from 2489 * SIDEQ to SYNCQ. PASS2 propagation by inode_lock4() and 2490 * inode_depend() are atomic with the spin-lock. 2491 */ 2492 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH); 2493 #ifdef HAMMER2_DEBUG_SYNC 2494 kprintf("FILESYSTEM SYNC BOUNDARY\n"); 2495 #endif 2496 dorestart = 0; 2497 2498 /* 2499 * Move inodes from depq to syncq, releasing the related 2500 * depend structures. 2501 */ 2502 restart: 2503 #ifdef HAMMER2_DEBUG_SYNC 2504 kprintf("FILESYSTEM SYNC RESTART (%d)\n", dorestart); 2505 #endif 2506 hammer2_trans_setflags(pmp, 0/*HAMMER2_TRANS_COPYQ*/); 2507 hammer2_trans_clearflags(pmp, HAMMER2_TRANS_RESCAN); 2508 2509 /* 2510 * Move inodes from depq to syncq. When restarting, only depq's 2511 * marked pass2 are moved. 2512 */ 2513 hammer2_spin_ex(&pmp->list_spin); 2514 depend_next = TAILQ_FIRST(&pmp->depq); 2515 wakecount = 0; 2516 2517 while ((depend = depend_next) != NULL) { 2518 depend_next = TAILQ_NEXT(depend, entry); 2519 if (dorestart && depend->pass2 == 0) 2520 continue; 2521 TAILQ_FOREACH(ip, &depend->sideq, entry) { 2522 KKASSERT(ip->flags & HAMMER2_INODE_SIDEQ); 2523 atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ); 2524 atomic_clear_int(&ip->flags, HAMMER2_INODE_SIDEQ); 2525 ip->depend = NULL; 2526 } 2527 2528 /* 2529 * NOTE: pmp->sideq_count includes both sideq and syncq 2530 */ 2531 TAILQ_CONCAT(&pmp->syncq, &depend->sideq, entry); 2532 2533 depend->count = 0; 2534 depend->pass2 = 0; 2535 TAILQ_REMOVE(&pmp->depq, depend, entry); 2536 } 2537 2538 hammer2_spin_unex(&pmp->list_spin); 2539 hammer2_trans_clearflags(pmp, /*HAMMER2_TRANS_COPYQ |*/ 2540 HAMMER2_TRANS_WAITING); 2541 dorestart = 0; 2542 2543 /* 2544 * sideq_count may have dropped enough to allow us to unstall 2545 * the frontend. 2546 */ 2547 hammer2_pfs_memory_wakeup(pmp, 0); 2548 2549 /* 2550 * Now run through all inodes on syncq. 2551 * 2552 * Flush transactions only interlock with other flush transactions. 2553 * Any conflicting frontend operations will block on the inode, but 2554 * may hold a vnode lock while doing so. 2555 */ 2556 hammer2_spin_ex(&pmp->list_spin); 2557 while ((ip = TAILQ_FIRST(&pmp->syncq)) != NULL) { 2558 /* 2559 * Remove the inode from the SYNCQ, transfer the syncq ref 2560 * to us. We must clear SYNCQ to allow any potential 2561 * front-end deadlock to proceed. We must set PASS2 so 2562 * the dependency code knows what to do. 2563 */ 2564 pass2 = ip->flags; 2565 cpu_ccfence(); 2566 if (atomic_cmpset_int(&ip->flags, 2567 pass2, 2568 (pass2 & ~(HAMMER2_INODE_SYNCQ | 2569 HAMMER2_INODE_SYNCQ_WAKEUP)) | 2570 HAMMER2_INODE_SYNCQ_PASS2) == 0) { 2571 continue; 2572 } 2573 TAILQ_REMOVE(&pmp->syncq, ip, entry); 2574 --pmp->sideq_count; 2575 hammer2_spin_unex(&pmp->list_spin); 2576 2577 /* 2578 * Tickle anyone waiting on ip->flags or the hysteresis 2579 * on the dirty inode count. 2580 */ 2581 if (pass2 & HAMMER2_INODE_SYNCQ_WAKEUP) 2582 wakeup(&ip->flags); 2583 if (++wakecount >= hammer2_limit_dirty_inodes / 20 + 1) { 2584 wakecount = 0; 2585 hammer2_pfs_memory_wakeup(pmp, 0); 2586 } 2587 2588 /* 2589 * Relock the inode, and we inherit a ref from the above. 2590 * We will check for a race after we acquire the vnode. 2591 */ 2592 hammer2_mtx_ex(&ip->lock); 2593 2594 /* 2595 * We need the vp in order to vfsync() dirty buffers, so if 2596 * one isn't attached we can skip it. 2597 * 2598 * Ordering the inode lock and then the vnode lock has the 2599 * potential to deadlock. If we had left SYNCQ set that could 2600 * also deadlock us against the frontend even if we don't hold 2601 * any locks, but the latter is not a problem now since we 2602 * cleared it. igetv will temporarily release the inode lock 2603 * in a safe manner to work-around the deadlock. 2604 * 2605 * Unfortunately it is still possible to deadlock when the 2606 * frontend obtains multiple inode locks, because all the 2607 * related vnodes are already locked (nor can the vnode locks 2608 * be released and reacquired without messing up RECLAIM and 2609 * INACTIVE sequencing). 2610 * 2611 * The solution for now is to move the vp back onto SIDEQ 2612 * and set dorestart, which will restart the flush after we 2613 * exhaust the current SYNCQ. Note that additional 2614 * dependencies may build up, so we definitely need to move 2615 * the whole SIDEQ back to SYNCQ when we restart. 2616 */ 2617 vp = ip->vp; 2618 if (vp) { 2619 if (vget(vp, LK_EXCLUSIVE|LK_NOWAIT)) { 2620 /* 2621 * Failed to get the vnode, requeue the inode 2622 * (PASS2 is already set so it will be found 2623 * again on the restart). 2624 * 2625 * Then unlock, possibly sleep, and retry 2626 * later. We sleep if PASS2 was *previously* 2627 * set, before we set it again above. 2628 */ 2629 vp = NULL; 2630 dorestart = 1; 2631 #ifdef HAMMER2_DEBUG_SYNC 2632 kprintf("inum %ld (sync delayed by vnode)\n", 2633 (long)ip->meta.inum); 2634 #endif 2635 hammer2_inode_delayed_sideq(ip); 2636 2637 hammer2_mtx_unlock(&ip->lock); 2638 hammer2_inode_drop(ip); 2639 2640 if (pass2 & HAMMER2_INODE_SYNCQ_PASS2) { 2641 tsleep(&dorestart, 0, "h2syndel", 2); 2642 } 2643 hammer2_spin_ex(&pmp->list_spin); 2644 continue; 2645 } 2646 } else { 2647 vp = NULL; 2648 } 2649 2650 /* 2651 * If the inode wound up on a SIDEQ again it will already be 2652 * prepped for another PASS2. In this situation if we flush 2653 * it now we will just wind up flushing it again in the same 2654 * syncer run, so we might as well not flush it now. 2655 */ 2656 if (ip->flags & HAMMER2_INODE_SIDEQ) { 2657 hammer2_mtx_unlock(&ip->lock); 2658 hammer2_inode_drop(ip); 2659 if (vp) 2660 vput(vp); 2661 dorestart = 1; 2662 hammer2_spin_ex(&pmp->list_spin); 2663 continue; 2664 } 2665 2666 /* 2667 * Ok we have the inode exclusively locked and if vp is 2668 * not NULL that will also be exclusively locked. Do the 2669 * meat of the flush. 2670 * 2671 * vp token needed for v_rbdirty_tree check / vclrisdirty 2672 * sequencing. Though we hold the vnode exclusively so 2673 * we shouldn't need to hold the token also in this case. 2674 */ 2675 if (vp) { 2676 vfsync(vp, MNT_WAIT, 1, NULL, NULL); 2677 bio_track_wait(&vp->v_track_write, 0, 0); /* XXX */ 2678 } 2679 2680 /* 2681 * If the inode has not yet been inserted into the tree 2682 * we must do so. Then sync and flush it. The flush should 2683 * update the parent. 2684 */ 2685 if (ip->flags & HAMMER2_INODE_DELETING) { 2686 #ifdef HAMMER2_DEBUG_SYNC 2687 kprintf("inum %ld destroy\n", (long)ip->meta.inum); 2688 #endif 2689 hammer2_inode_chain_des(ip); 2690 atomic_add_long(&hammer2_iod_inode_deletes, 1); 2691 } else if (ip->flags & HAMMER2_INODE_CREATING) { 2692 #ifdef HAMMER2_DEBUG_SYNC 2693 kprintf("inum %ld insert\n", (long)ip->meta.inum); 2694 #endif 2695 hammer2_inode_chain_ins(ip); 2696 atomic_add_long(&hammer2_iod_inode_creates, 1); 2697 } 2698 #ifdef HAMMER2_DEBUG_SYNC 2699 kprintf("inum %ld chain-sync\n", (long)ip->meta.inum); 2700 #endif 2701 2702 /* 2703 * Because I kinda messed up the design and index the inodes 2704 * under the root inode, along side the directory entries, 2705 * we can't flush the inode index under the iroot until the 2706 * end. If we do it now we might miss effects created by 2707 * other inodes on the SYNCQ. 2708 * 2709 * Do a normal (non-FSSYNC) flush instead, which allows the 2710 * vnode code to work the same. We don't want to force iroot 2711 * back onto the SIDEQ, and we also don't want the flush code 2712 * to update pfs_iroot_blocksets until the final flush later. 2713 * 2714 * XXX at the moment this will likely result in a double-flush 2715 * of the iroot chain. 2716 */ 2717 hammer2_inode_chain_sync(ip); 2718 if (ip == pmp->iroot) { 2719 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP); 2720 } else { 2721 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP | 2722 HAMMER2_XOP_FSSYNC); 2723 } 2724 if (vp) { 2725 lwkt_gettoken(&vp->v_token); 2726 if ((ip->flags & (HAMMER2_INODE_MODIFIED | 2727 HAMMER2_INODE_RESIZED | 2728 HAMMER2_INODE_DIRTYDATA)) == 0 && 2729 RB_EMPTY(&vp->v_rbdirty_tree) && 2730 !bio_track_active(&vp->v_track_write)) { 2731 vclrisdirty(vp); 2732 } else { 2733 hammer2_inode_delayed_sideq(ip); 2734 } 2735 lwkt_reltoken(&vp->v_token); 2736 vput(vp); 2737 vp = NULL; /* safety */ 2738 } 2739 atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_PASS2); 2740 hammer2_inode_unlock(ip); /* unlock+drop */ 2741 /* ip pointer invalid */ 2742 2743 /* 2744 * If the inode got dirted after we dropped our locks, 2745 * it will have already been moved back to the SIDEQ. 2746 */ 2747 hammer2_spin_ex(&pmp->list_spin); 2748 } 2749 hammer2_spin_unex(&pmp->list_spin); 2750 hammer2_pfs_memory_wakeup(pmp, 0); 2751 2752 if (dorestart || (pmp->trans.flags & HAMMER2_TRANS_RESCAN)) { 2753 #ifdef HAMMER2_DEBUG_SYNC 2754 kprintf("FILESYSTEM SYNC STAGE 1 RESTART\n"); 2755 /*tsleep(&dorestart, 0, "h2STG1-R", hz*20);*/ 2756 #endif 2757 dorestart = 1; 2758 goto restart; 2759 } 2760 #ifdef HAMMER2_DEBUG_SYNC 2761 kprintf("FILESYSTEM SYNC STAGE 2 BEGIN\n"); 2762 /*tsleep(&dorestart, 0, "h2STG2", hz*20);*/ 2763 #endif 2764 2765 /* 2766 * We have to flush the PFS root last, even if it does not appear to 2767 * be dirty, because all the inodes in the PFS are indexed under it. 2768 * The normal flushing of iroot above would only occur if directory 2769 * entries under the root were changed. 2770 * 2771 * Specifying VOLHDR will cause an additionl flush of hmp->spmp 2772 * for the media making up the cluster. 2773 */ 2774 if ((ip = pmp->iroot) != NULL) { 2775 hammer2_inode_ref(ip); 2776 hammer2_mtx_ex(&ip->lock); 2777 hammer2_inode_chain_sync(ip); 2778 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP | 2779 HAMMER2_XOP_FSSYNC | 2780 HAMMER2_XOP_VOLHDR); 2781 hammer2_inode_unlock(ip); /* unlock+drop */ 2782 } 2783 #ifdef HAMMER2_DEBUG_SYNC 2784 kprintf("FILESYSTEM SYNC STAGE 2 DONE\n"); 2785 #endif 2786 2787 /* 2788 * device bioq sync 2789 */ 2790 hammer2_bioq_sync(pmp); 2791 2792 #if 0 2793 /* 2794 * Generally speaking we now want to flush the media topology from 2795 * the iroot through to the inodes. The flush stops at any inode 2796 * boundary, which allows the frontend to continue running concurrent 2797 * modifying operations on inodes (including kernel flushes of 2798 * buffers) without interfering with the main sync. 2799 * 2800 * Use the XOP interface to concurrently flush all nodes to 2801 * synchronize the PFSROOT subtopology to the media. A standard 2802 * end-of-scan ENOENT error indicates cluster sufficiency. 2803 * 2804 * Note that this flush will not be visible on crash recovery until 2805 * we flush the super-root topology in the next loop. 2806 * 2807 * XXX For now wait for all flushes to complete. 2808 */ 2809 if (mp && (ip = pmp->iroot) != NULL) { 2810 /* 2811 * If unmounting try to flush everything including any 2812 * sub-trees under inodes, just in case there is dangling 2813 * modified data, as a safety. Otherwise just flush up to 2814 * the inodes in this stage. 2815 */ 2816 kprintf("MP & IROOT\n"); 2817 #ifdef HAMMER2_DEBUG_SYNC 2818 kprintf("FILESYSTEM SYNC STAGE 3 IROOT BEGIN\n"); 2819 #endif 2820 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 2821 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | 2822 HAMMER2_XOP_VOLHDR | 2823 HAMMER2_XOP_FSSYNC | 2824 HAMMER2_XOP_INODE_STOP); 2825 } else { 2826 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | 2827 HAMMER2_XOP_INODE_STOP | 2828 HAMMER2_XOP_VOLHDR | 2829 HAMMER2_XOP_FSSYNC | 2830 HAMMER2_XOP_INODE_STOP); 2831 } 2832 hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc); 2833 error = hammer2_xop_collect(&xop->head, 2834 HAMMER2_XOP_COLLECT_WAITALL); 2835 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2836 #ifdef HAMMER2_DEBUG_SYNC 2837 kprintf("FILESYSTEM SYNC STAGE 3 IROOT END\n"); 2838 #endif 2839 if (error == HAMMER2_ERROR_ENOENT) 2840 error = 0; 2841 else 2842 error = hammer2_error_to_errno(error); 2843 } else { 2844 error = 0; 2845 } 2846 #endif 2847 error = 0; /* XXX */ 2848 hammer2_trans_done(pmp, HAMMER2_TRANS_ISFLUSH); 2849 2850 return (error); 2851 } 2852 2853 static 2854 int 2855 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp) 2856 { 2857 hammer2_inode_t *ip; 2858 2859 KKASSERT(MAXFIDSZ >= 16); 2860 ip = VTOI(vp); 2861 fhp->fid_len = offsetof(struct fid, fid_data[16]); 2862 fhp->fid_ext = 0; 2863 ((hammer2_tid_t *)fhp->fid_data)[0] = ip->meta.inum; 2864 ((hammer2_tid_t *)fhp->fid_data)[1] = 0; 2865 2866 return 0; 2867 } 2868 2869 static 2870 int 2871 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 2872 struct fid *fhp, struct vnode **vpp) 2873 { 2874 hammer2_pfs_t *pmp; 2875 hammer2_tid_t inum; 2876 int error; 2877 2878 pmp = MPTOPMP(mp); 2879 inum = ((hammer2_tid_t *)fhp->fid_data)[0] & HAMMER2_DIRHASH_USERMSK; 2880 if (vpp) { 2881 if (inum == 1) 2882 error = hammer2_vfs_root(mp, vpp); 2883 else 2884 error = hammer2_vfs_vget(mp, NULL, inum, vpp); 2885 } else { 2886 error = 0; 2887 } 2888 return error; 2889 } 2890 2891 static 2892 int 2893 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 2894 int *exflagsp, struct ucred **credanonp) 2895 { 2896 hammer2_pfs_t *pmp; 2897 struct netcred *np; 2898 int error; 2899 2900 pmp = MPTOPMP(mp); 2901 np = vfs_export_lookup(mp, &pmp->export, nam); 2902 if (np) { 2903 *exflagsp = np->netc_exflags; 2904 *credanonp = &np->netc_anon; 2905 error = 0; 2906 } else { 2907 error = EACCES; 2908 } 2909 return error; 2910 } 2911 2912 /* 2913 * This handles hysteresis on regular file flushes. Because the BIOs are 2914 * routed to a thread it is possible for an excessive number to build up 2915 * and cause long front-end stalls long before the runningbuffspace limit 2916 * is hit, so we implement hammer2_flush_pipe to control the 2917 * hysteresis. 2918 * 2919 * This is a particular problem when compression is used. 2920 */ 2921 void 2922 hammer2_lwinprog_ref(hammer2_pfs_t *pmp) 2923 { 2924 atomic_add_int(&pmp->count_lwinprog, 1); 2925 } 2926 2927 void 2928 hammer2_lwinprog_drop(hammer2_pfs_t *pmp) 2929 { 2930 int lwinprog; 2931 2932 lwinprog = atomic_fetchadd_int(&pmp->count_lwinprog, -1); 2933 if ((lwinprog & HAMMER2_LWINPROG_WAITING) && 2934 (lwinprog & HAMMER2_LWINPROG_MASK) <= hammer2_flush_pipe * 2 / 3) { 2935 atomic_clear_int(&pmp->count_lwinprog, 2936 HAMMER2_LWINPROG_WAITING); 2937 wakeup(&pmp->count_lwinprog); 2938 } 2939 if ((lwinprog & HAMMER2_LWINPROG_WAITING0) && 2940 (lwinprog & HAMMER2_LWINPROG_MASK) <= 0) { 2941 atomic_clear_int(&pmp->count_lwinprog, 2942 HAMMER2_LWINPROG_WAITING0); 2943 wakeup(&pmp->count_lwinprog); 2944 } 2945 } 2946 2947 void 2948 hammer2_lwinprog_wait(hammer2_pfs_t *pmp, int flush_pipe) 2949 { 2950 int lwinprog; 2951 int lwflag = (flush_pipe) ? HAMMER2_LWINPROG_WAITING : 2952 HAMMER2_LWINPROG_WAITING0; 2953 2954 for (;;) { 2955 lwinprog = pmp->count_lwinprog; 2956 cpu_ccfence(); 2957 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe) 2958 break; 2959 tsleep_interlock(&pmp->count_lwinprog, 0); 2960 atomic_set_int(&pmp->count_lwinprog, lwflag); 2961 lwinprog = pmp->count_lwinprog; 2962 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe) 2963 break; 2964 tsleep(&pmp->count_lwinprog, PINTERLOCKED, "h2wpipe", hz); 2965 } 2966 } 2967 2968 /* 2969 * It is possible for an excessive number of dirty chains or dirty inodes 2970 * to build up. When this occurs we start an asynchronous filesystem sync. 2971 * If the level continues to build up, we stall, waiting for it to drop, 2972 * with some hysteresis. 2973 * 2974 * This relies on the kernel calling hammer2_vfs_modifying() prior to 2975 * obtaining any vnode locks before making a modifying VOP call. 2976 */ 2977 static int 2978 hammer2_vfs_modifying(struct mount *mp) 2979 { 2980 if (mp->mnt_flag & MNT_RDONLY) 2981 return EROFS; 2982 hammer2_pfs_memory_wait(MPTOPMP(mp)); 2983 2984 return 0; 2985 } 2986 2987 /* 2988 * Initiate an asynchronous filesystem sync and, with hysteresis, 2989 * stall if the internal data structure count becomes too bloated. 2990 */ 2991 void 2992 hammer2_pfs_memory_wait(hammer2_pfs_t *pmp) 2993 { 2994 uint32_t waiting; 2995 int pcatch; 2996 int error; 2997 2998 if (pmp == NULL || pmp->mp == NULL) 2999 return; 3000 3001 for (;;) { 3002 waiting = pmp->inmem_dirty_chains & HAMMER2_DIRTYCHAIN_MASK; 3003 cpu_ccfence(); 3004 3005 /* 3006 * Start the syncer running at 1/2 the limit 3007 */ 3008 if (waiting > hammer2_limit_dirty_chains / 2 || 3009 pmp->sideq_count > hammer2_limit_dirty_inodes / 2) { 3010 trigger_syncer(pmp->mp); 3011 } 3012 3013 /* 3014 * Stall at the limit waiting for the counts to drop. 3015 * This code will typically be woken up once the count 3016 * drops below 3/4 the limit, or in one second. 3017 */ 3018 if (waiting < hammer2_limit_dirty_chains && 3019 pmp->sideq_count < hammer2_limit_dirty_inodes) { 3020 break; 3021 } 3022 3023 pcatch = curthread->td_proc ? PCATCH : 0; 3024 3025 tsleep_interlock(&pmp->inmem_dirty_chains, pcatch); 3026 atomic_set_int(&pmp->inmem_dirty_chains, 3027 HAMMER2_DIRTYCHAIN_WAITING); 3028 if (waiting < hammer2_limit_dirty_chains && 3029 pmp->sideq_count < hammer2_limit_dirty_inodes) { 3030 break; 3031 } 3032 trigger_syncer(pmp->mp); 3033 error = tsleep(&pmp->inmem_dirty_chains, PINTERLOCKED | pcatch, 3034 "h2memw", hz); 3035 if (error == ERESTART) 3036 break; 3037 } 3038 } 3039 3040 /* 3041 * Wake up any stalled frontend ops waiting, with hysteresis, using 3042 * 2/3 of the limit. 3043 */ 3044 void 3045 hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp, int count) 3046 { 3047 uint32_t waiting; 3048 3049 if (pmp) { 3050 waiting = atomic_fetchadd_int(&pmp->inmem_dirty_chains, count); 3051 /* don't need --waiting to test flag */ 3052 3053 if ((waiting & HAMMER2_DIRTYCHAIN_WAITING) && 3054 (pmp->inmem_dirty_chains & HAMMER2_DIRTYCHAIN_MASK) <= 3055 hammer2_limit_dirty_chains * 2 / 3 && 3056 pmp->sideq_count <= hammer2_limit_dirty_inodes * 2 / 3) { 3057 atomic_clear_int(&pmp->inmem_dirty_chains, 3058 HAMMER2_DIRTYCHAIN_WAITING); 3059 wakeup(&pmp->inmem_dirty_chains); 3060 } 3061 } 3062 } 3063 3064 void 3065 hammer2_pfs_memory_inc(hammer2_pfs_t *pmp) 3066 { 3067 if (pmp) { 3068 atomic_add_int(&pmp->inmem_dirty_chains, 1); 3069 } 3070 } 3071 3072 /* 3073 * Volume header data locks 3074 */ 3075 void 3076 hammer2_voldata_lock(hammer2_dev_t *hmp) 3077 { 3078 lockmgr(&hmp->vollk, LK_EXCLUSIVE); 3079 } 3080 3081 void 3082 hammer2_voldata_unlock(hammer2_dev_t *hmp) 3083 { 3084 lockmgr(&hmp->vollk, LK_RELEASE); 3085 } 3086 3087 /* 3088 * Caller indicates that the volume header is being modified. Flag 3089 * the related chain and adjust its transaction id. 3090 * 3091 * The transaction id is set to voldata.mirror_tid + 1, similar to 3092 * what hammer2_chain_modify() does. Be very careful here, volume 3093 * data can be updated independently of the rest of the filesystem. 3094 */ 3095 void 3096 hammer2_voldata_modify(hammer2_dev_t *hmp) 3097 { 3098 if ((hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) == 0) { 3099 atomic_add_long(&hammer2_count_modified_chains, 1); 3100 atomic_set_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED); 3101 hammer2_pfs_memory_inc(hmp->vchain.pmp); 3102 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid + 1; 3103 } 3104 } 3105 3106 /* 3107 * Returns 0 if the filesystem has tons of free space 3108 * Returns 1 if the filesystem has less than 10% remaining 3109 * Returns 2 if the filesystem has less than 2%/5% (user/root) remaining. 3110 */ 3111 int 3112 hammer2_vfs_enospace(hammer2_inode_t *ip, off_t bytes, struct ucred *cred) 3113 { 3114 hammer2_pfs_t *pmp; 3115 hammer2_dev_t *hmp; 3116 hammer2_off_t free_reserved; 3117 hammer2_off_t free_nominal; 3118 int i; 3119 3120 pmp = ip->pmp; 3121 3122 if (pmp->free_ticks == 0 || pmp->free_ticks != ticks) { 3123 free_reserved = HAMMER2_SEGSIZE; 3124 free_nominal = 0x7FFFFFFFFFFFFFFFLLU; 3125 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 3126 hmp = pmp->pfs_hmps[i]; 3127 if (hmp == NULL) 3128 continue; 3129 if (pmp->pfs_types[i] != HAMMER2_PFSTYPE_MASTER && 3130 pmp->pfs_types[i] != HAMMER2_PFSTYPE_SOFT_MASTER) 3131 continue; 3132 3133 if (free_nominal > hmp->voldata.allocator_free) 3134 free_nominal = hmp->voldata.allocator_free; 3135 if (free_reserved < hmp->free_reserved) 3136 free_reserved = hmp->free_reserved; 3137 } 3138 3139 /* 3140 * SMP races ok 3141 */ 3142 pmp->free_reserved = free_reserved; 3143 pmp->free_nominal = free_nominal; 3144 pmp->free_ticks = ticks; 3145 } else { 3146 free_reserved = pmp->free_reserved; 3147 free_nominal = pmp->free_nominal; 3148 } 3149 if (cred && cred->cr_uid != 0) { 3150 if ((int64_t)(free_nominal - bytes) < 3151 (int64_t)free_reserved) { 3152 return 2; 3153 } 3154 } else { 3155 if ((int64_t)(free_nominal - bytes) < 3156 (int64_t)free_reserved / 2) { 3157 return 2; 3158 } 3159 } 3160 if ((int64_t)(free_nominal - bytes) < (int64_t)free_reserved * 2) 3161 return 1; 3162 return 0; 3163 } 3164 3165 /* 3166 * Debugging 3167 */ 3168 void 3169 hammer2_dump_chain(hammer2_chain_t *chain, int tab, int bi, int *countp, 3170 char pfx, u_int flags) 3171 { 3172 hammer2_chain_t *scan; 3173 hammer2_chain_t *parent; 3174 3175 --*countp; 3176 if (*countp == 0) { 3177 kprintf("%*.*s...\n", tab, tab, ""); 3178 return; 3179 } 3180 if (*countp < 0) 3181 return; 3182 kprintf("%*.*s%c-chain %p %s.%-3d %016jx %016jx/%-2d mir=%016jx\n", 3183 tab, tab, "", pfx, chain, 3184 hammer2_bref_type_str(chain->bref.type), bi, 3185 chain->bref.data_off, chain->bref.key, chain->bref.keybits, 3186 chain->bref.mirror_tid); 3187 3188 kprintf("%*.*s [%08x] (%s) refs=%d", 3189 tab, tab, "", 3190 chain->flags, 3191 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE && 3192 chain->data) ? (char *)chain->data->ipdata.filename : "?"), 3193 chain->refs); 3194 3195 parent = chain->parent; 3196 if (parent) 3197 kprintf("\n%*.*s p=%p [pflags %08x prefs %d]", 3198 tab, tab, "", 3199 parent, parent->flags, parent->refs); 3200 if (RB_EMPTY(&chain->core.rbtree)) { 3201 kprintf("\n"); 3202 } else { 3203 int bi = 0; 3204 kprintf(" {\n"); 3205 RB_FOREACH(scan, hammer2_chain_tree, &chain->core.rbtree) { 3206 if ((scan->flags & flags) || flags == (u_int)-1) { 3207 hammer2_dump_chain(scan, tab + 4, bi, countp, 3208 'a', flags); 3209 } 3210 bi++; 3211 } 3212 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && chain->data) 3213 kprintf("%*.*s}(%s)\n", tab, tab, "", 3214 chain->data->ipdata.filename); 3215 else 3216 kprintf("%*.*s}\n", tab, tab, ""); 3217 } 3218 } 3219