1 /* 2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/nlookup.h> 39 #include <sys/vnode.h> 40 #include <sys/mount.h> 41 #include <sys/fcntl.h> 42 #include <sys/buf.h> 43 #include <sys/uuid.h> 44 #include <sys/vfsops.h> 45 #include <sys/sysctl.h> 46 #include <sys/socket.h> 47 #include <sys/objcache.h> 48 49 #include <sys/proc.h> 50 #include <sys/namei.h> 51 #include <sys/mountctl.h> 52 #include <sys/dirent.h> 53 #include <sys/uio.h> 54 55 #include <sys/mutex.h> 56 #include <sys/mutex2.h> 57 58 #include "hammer2.h" 59 #include "hammer2_disk.h" 60 #include "hammer2_mount.h" 61 #include "hammer2_lz4.h" 62 63 #include "zlib/hammer2_zlib.h" 64 65 #define REPORT_REFS_ERRORS 1 /* XXX remove me */ 66 67 MALLOC_DEFINE(M_OBJCACHE, "objcache", "Object Cache"); 68 69 struct hammer2_sync_info { 70 int error; 71 int waitfor; 72 int pass; 73 }; 74 75 TAILQ_HEAD(hammer2_mntlist, hammer2_dev); 76 static struct hammer2_mntlist hammer2_mntlist; 77 78 struct hammer2_pfslist hammer2_pfslist; 79 struct hammer2_pfslist hammer2_spmplist; 80 struct lock hammer2_mntlk; 81 82 int hammer2_supported_version = HAMMER2_VOL_VERSION_DEFAULT; 83 int hammer2_debug; 84 long hammer2_debug_inode; 85 int hammer2_cluster_meta_read = 1; /* physical read-ahead */ 86 int hammer2_cluster_data_read = 4; /* physical read-ahead */ 87 int hammer2_cluster_write = 0; /* physical write clustering */ 88 int hammer2_dedup_enable = 1; 89 int hammer2_always_compress = 0; /* always try to compress */ 90 int hammer2_inval_enable = 0; 91 int hammer2_flush_pipe = 100; 92 int hammer2_dio_count; 93 int hammer2_dio_limit = 256; 94 int hammer2_bulkfree_tps = 5000; 95 long hammer2_chain_allocs; 96 long hammer2_chain_frees; 97 long hammer2_limit_dirty_chains; 98 long hammer2_limit_dirty_inodes; 99 long hammer2_count_modified_chains; 100 long hammer2_iod_invals; 101 long hammer2_iod_file_read; 102 long hammer2_iod_meta_read; 103 long hammer2_iod_indr_read; 104 long hammer2_iod_fmap_read; 105 long hammer2_iod_volu_read; 106 long hammer2_iod_file_write; 107 long hammer2_iod_file_wembed; 108 long hammer2_iod_file_wzero; 109 long hammer2_iod_file_wdedup; 110 long hammer2_iod_meta_write; 111 long hammer2_iod_indr_write; 112 long hammer2_iod_fmap_write; 113 long hammer2_iod_volu_write; 114 long hammer2_iod_inode_creates; 115 long hammer2_iod_inode_deletes; 116 117 MALLOC_DECLARE(M_HAMMER2_CBUFFER); 118 MALLOC_DEFINE(M_HAMMER2_CBUFFER, "HAMMER2-compbuffer", 119 "Buffer used for compression."); 120 121 MALLOC_DECLARE(M_HAMMER2_DEBUFFER); 122 MALLOC_DEFINE(M_HAMMER2_DEBUFFER, "HAMMER2-decompbuffer", 123 "Buffer used for decompression."); 124 125 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem"); 126 127 SYSCTL_INT(_vfs_hammer2, OID_AUTO, supported_version, CTLFLAG_RD, 128 &hammer2_supported_version, 0, ""); 129 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW, 130 &hammer2_debug, 0, ""); 131 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, debug_inode, CTLFLAG_RW, 132 &hammer2_debug_inode, 0, ""); 133 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_meta_read, CTLFLAG_RW, 134 &hammer2_cluster_meta_read, 0, ""); 135 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_data_read, CTLFLAG_RW, 136 &hammer2_cluster_data_read, 0, ""); 137 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_write, CTLFLAG_RW, 138 &hammer2_cluster_write, 0, ""); 139 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dedup_enable, CTLFLAG_RW, 140 &hammer2_dedup_enable, 0, ""); 141 SYSCTL_INT(_vfs_hammer2, OID_AUTO, always_compress, CTLFLAG_RW, 142 &hammer2_always_compress, 0, ""); 143 SYSCTL_INT(_vfs_hammer2, OID_AUTO, inval_enable, CTLFLAG_RW, 144 &hammer2_inval_enable, 0, ""); 145 SYSCTL_INT(_vfs_hammer2, OID_AUTO, flush_pipe, CTLFLAG_RW, 146 &hammer2_flush_pipe, 0, ""); 147 SYSCTL_INT(_vfs_hammer2, OID_AUTO, bulkfree_tps, CTLFLAG_RW, 148 &hammer2_bulkfree_tps, 0, ""); 149 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, chain_allocs, CTLFLAG_RW, 150 &hammer2_chain_allocs, 0, ""); 151 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, chain_frees, CTLFLAG_RW, 152 &hammer2_chain_frees, 0, ""); 153 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_chains, CTLFLAG_RW, 154 &hammer2_limit_dirty_chains, 0, ""); 155 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_inodes, CTLFLAG_RW, 156 &hammer2_limit_dirty_inodes, 0, ""); 157 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, count_modified_chains, CTLFLAG_RW, 158 &hammer2_count_modified_chains, 0, ""); 159 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_count, CTLFLAG_RD, 160 &hammer2_dio_count, 0, ""); 161 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_limit, CTLFLAG_RW, 162 &hammer2_dio_limit, 0, ""); 163 164 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_invals, CTLFLAG_RW, 165 &hammer2_iod_invals, 0, ""); 166 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RW, 167 &hammer2_iod_file_read, 0, ""); 168 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RW, 169 &hammer2_iod_meta_read, 0, ""); 170 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RW, 171 &hammer2_iod_indr_read, 0, ""); 172 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_read, CTLFLAG_RW, 173 &hammer2_iod_fmap_read, 0, ""); 174 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_read, CTLFLAG_RW, 175 &hammer2_iod_volu_read, 0, ""); 176 177 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RW, 178 &hammer2_iod_file_write, 0, ""); 179 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wembed, CTLFLAG_RW, 180 &hammer2_iod_file_wembed, 0, ""); 181 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wzero, CTLFLAG_RW, 182 &hammer2_iod_file_wzero, 0, ""); 183 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wdedup, CTLFLAG_RW, 184 &hammer2_iod_file_wdedup, 0, ""); 185 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RW, 186 &hammer2_iod_meta_write, 0, ""); 187 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RW, 188 &hammer2_iod_indr_write, 0, ""); 189 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_write, CTLFLAG_RW, 190 &hammer2_iod_fmap_write, 0, ""); 191 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RW, 192 &hammer2_iod_volu_write, 0, ""); 193 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_inode_creates, CTLFLAG_RW, 194 &hammer2_iod_inode_creates, 0, ""); 195 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_inode_deletes, CTLFLAG_RW, 196 &hammer2_iod_inode_deletes, 0, ""); 197 198 long hammer2_process_icrc32; 199 long hammer2_process_xxhash64; 200 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, process_icrc32, CTLFLAG_RW, 201 &hammer2_process_icrc32, 0, ""); 202 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, process_xxhash64, CTLFLAG_RW, 203 &hammer2_process_xxhash64, 0, ""); 204 205 static int hammer2_vfs_init(struct vfsconf *conf); 206 static int hammer2_vfs_uninit(struct vfsconf *vfsp); 207 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, 208 struct ucred *cred); 209 static int hammer2_remount(hammer2_dev_t *, struct mount *, char *, 210 struct vnode *, struct ucred *); 211 static int hammer2_recovery(hammer2_dev_t *hmp); 212 static int hammer2_vfs_unmount(struct mount *mp, int mntflags); 213 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp); 214 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, 215 struct ucred *cred); 216 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, 217 struct ucred *cred); 218 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 219 struct fid *fhp, struct vnode **vpp); 220 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp); 221 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 222 int *exflagsp, struct ucred **credanonp); 223 static void hammer2_vfs_modifying(struct mount *mp); 224 225 static int hammer2_install_volume_header(hammer2_dev_t *hmp); 226 #if 0 227 static int hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data); 228 #endif 229 230 static void hammer2_update_pmps(hammer2_dev_t *hmp); 231 232 static void hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp); 233 static void hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, 234 hammer2_dev_t *hmp); 235 static int hammer2_fixup_pfses(hammer2_dev_t *hmp); 236 237 /* 238 * HAMMER2 vfs operations. 239 */ 240 static struct vfsops hammer2_vfsops = { 241 .vfs_init = hammer2_vfs_init, 242 .vfs_uninit = hammer2_vfs_uninit, 243 .vfs_sync = hammer2_vfs_sync, 244 .vfs_mount = hammer2_vfs_mount, 245 .vfs_unmount = hammer2_vfs_unmount, 246 .vfs_root = hammer2_vfs_root, 247 .vfs_statfs = hammer2_vfs_statfs, 248 .vfs_statvfs = hammer2_vfs_statvfs, 249 .vfs_vget = hammer2_vfs_vget, 250 .vfs_vptofh = hammer2_vfs_vptofh, 251 .vfs_fhtovp = hammer2_vfs_fhtovp, 252 .vfs_checkexp = hammer2_vfs_checkexp, 253 .vfs_modifying = hammer2_vfs_modifying 254 }; 255 256 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", ""); 257 258 VFS_SET(hammer2_vfsops, hammer2, VFCF_MPSAFE); 259 MODULE_VERSION(hammer2, 1); 260 261 static 262 int 263 hammer2_vfs_init(struct vfsconf *conf) 264 { 265 static struct objcache_malloc_args margs_read; 266 static struct objcache_malloc_args margs_write; 267 static struct objcache_malloc_args margs_vop; 268 269 int error; 270 271 error = 0; 272 kmalloc_raise_limit(M_HAMMER2, 0); /* unlimited */ 273 274 /* 275 * A large DIO cache is needed to retain dedup enablement masks. 276 * The bulkfree code clears related masks as part of the disk block 277 * recycling algorithm, preventing it from being used for a later 278 * dedup. 279 * 280 * NOTE: A large buffer cache can actually interfere with dedup 281 * operation because we dedup based on media physical buffers 282 * and not logical buffers. Try to make the DIO case large 283 * enough to avoid this problem, but also cap it. 284 */ 285 hammer2_dio_limit = nbuf * 2; 286 if (hammer2_dio_limit > 100000) 287 hammer2_dio_limit = 100000; 288 289 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref)) 290 error = EINVAL; 291 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data)) 292 error = EINVAL; 293 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data)) 294 error = EINVAL; 295 296 if (error) 297 kprintf("HAMMER2 structure size mismatch; cannot continue.\n"); 298 299 margs_read.objsize = 65536; 300 margs_read.mtype = M_HAMMER2_DEBUFFER; 301 302 margs_write.objsize = 32768; 303 margs_write.mtype = M_HAMMER2_CBUFFER; 304 305 margs_vop.objsize = sizeof(hammer2_xop_t); 306 margs_vop.mtype = M_HAMMER2; 307 308 /* 309 * Note thaht for the XOPS cache we want backing store allocations 310 * to use M_ZERO. This is not allowed in objcache_get() (to avoid 311 * confusion), so use the backing store function that does it. This 312 * means that initial XOPS objects are zerod but REUSED objects are 313 * not. So we are responsible for cleaning the object up sufficiently 314 * for our needs before objcache_put()ing it back (typically just the 315 * FIFO indices). 316 */ 317 cache_buffer_read = objcache_create(margs_read.mtype->ks_shortdesc, 318 0, 1, NULL, NULL, NULL, 319 objcache_malloc_alloc, 320 objcache_malloc_free, 321 &margs_read); 322 cache_buffer_write = objcache_create(margs_write.mtype->ks_shortdesc, 323 0, 1, NULL, NULL, NULL, 324 objcache_malloc_alloc, 325 objcache_malloc_free, 326 &margs_write); 327 cache_xops = objcache_create(margs_vop.mtype->ks_shortdesc, 328 0, 1, NULL, NULL, NULL, 329 objcache_malloc_alloc_zero, 330 objcache_malloc_free, 331 &margs_vop); 332 333 334 lockinit(&hammer2_mntlk, "mntlk", 0, 0); 335 TAILQ_INIT(&hammer2_mntlist); 336 TAILQ_INIT(&hammer2_pfslist); 337 TAILQ_INIT(&hammer2_spmplist); 338 339 hammer2_limit_dirty_chains = maxvnodes / 10; 340 if (hammer2_limit_dirty_chains > HAMMER2_LIMIT_DIRTY_CHAINS) 341 hammer2_limit_dirty_chains = HAMMER2_LIMIT_DIRTY_CHAINS; 342 if (hammer2_limit_dirty_chains < 1000) 343 hammer2_limit_dirty_chains = 1000; 344 345 hammer2_limit_dirty_inodes = maxvnodes / 25; 346 if (hammer2_limit_dirty_inodes < 100) 347 hammer2_limit_dirty_inodes = 100; 348 if (hammer2_limit_dirty_inodes > HAMMER2_LIMIT_DIRTY_INODES) 349 hammer2_limit_dirty_inodes = HAMMER2_LIMIT_DIRTY_INODES; 350 351 return (error); 352 } 353 354 static 355 int 356 hammer2_vfs_uninit(struct vfsconf *vfsp __unused) 357 { 358 objcache_destroy(cache_buffer_read); 359 objcache_destroy(cache_buffer_write); 360 objcache_destroy(cache_xops); 361 return 0; 362 } 363 364 /* 365 * Core PFS allocator. Used to allocate or reference the pmp structure 366 * for PFS cluster mounts and the spmp structure for media (hmp) structures. 367 * The pmp can be passed in or loaded by this function using the chain and 368 * inode data. 369 * 370 * pmp->modify_tid tracks new modify_tid transaction ids for front-end 371 * transactions. Note that synchronization does not use this field. 372 * (typically frontend operations and synchronization cannot run on the 373 * same PFS node at the same time). 374 * 375 * XXX check locking 376 */ 377 hammer2_pfs_t * 378 hammer2_pfsalloc(hammer2_chain_t *chain, 379 const hammer2_inode_data_t *ripdata, 380 hammer2_tid_t modify_tid, hammer2_dev_t *force_local) 381 { 382 hammer2_pfs_t *pmp; 383 hammer2_inode_t *iroot; 384 int count; 385 int i; 386 int j; 387 388 pmp = NULL; 389 390 /* 391 * Locate or create the PFS based on the cluster id. If ripdata 392 * is NULL this is a spmp which is unique and is always allocated. 393 * 394 * If the device is mounted in local mode all PFSs are considered 395 * independent and not part of any cluster (for debugging only). 396 */ 397 if (ripdata) { 398 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 399 if (force_local != pmp->force_local) 400 continue; 401 if (force_local == NULL && 402 bcmp(&pmp->pfs_clid, &ripdata->meta.pfs_clid, 403 sizeof(pmp->pfs_clid)) == 0) { 404 break; 405 } else if (force_local && pmp->pfs_names[0] && 406 strcmp(pmp->pfs_names[0], ripdata->filename) == 0) { 407 break; 408 } 409 } 410 } 411 412 if (pmp == NULL) { 413 pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO); 414 pmp->force_local = force_local; 415 hammer2_trans_manage_init(pmp); 416 kmalloc_create(&pmp->minode, "HAMMER2-inodes"); 417 kmalloc_create(&pmp->mmsg, "HAMMER2-pfsmsg"); 418 lockinit(&pmp->lock, "pfslk", 0, 0); 419 lockinit(&pmp->lock_nlink, "h2nlink", 0, 0); 420 spin_init(&pmp->inum_spin, "hm2pfsalloc_inum"); 421 spin_init(&pmp->xop_spin, "h2xop"); 422 spin_init(&pmp->lru_spin, "h2lru"); 423 RB_INIT(&pmp->inum_tree); 424 TAILQ_INIT(&pmp->syncq); 425 TAILQ_INIT(&pmp->depq); 426 TAILQ_INIT(&pmp->lru_list); 427 spin_init(&pmp->list_spin, "h2pfsalloc_list"); 428 429 /* 430 * Distribute backend operations to threads 431 */ 432 for (i = 0; i < HAMMER2_XOPGROUPS; ++i) 433 hammer2_xop_group_init(pmp, &pmp->xop_groups[i]); 434 435 /* 436 * Save the last media transaction id for the flusher. Set 437 * initial 438 */ 439 if (ripdata) { 440 pmp->pfs_clid = ripdata->meta.pfs_clid; 441 TAILQ_INSERT_TAIL(&hammer2_pfslist, pmp, mntentry); 442 } else { 443 pmp->flags |= HAMMER2_PMPF_SPMP; 444 TAILQ_INSERT_TAIL(&hammer2_spmplist, pmp, mntentry); 445 } 446 447 /* 448 * The synchronization thread may start too early, make 449 * sure it stays frozen until we are ready to let it go. 450 * XXX 451 */ 452 /* 453 pmp->primary_thr.flags = HAMMER2_THREAD_FROZEN | 454 HAMMER2_THREAD_REMASTER; 455 */ 456 } 457 458 /* 459 * Create the PFS's root inode and any missing XOP helper threads. 460 */ 461 if ((iroot = pmp->iroot) == NULL) { 462 iroot = hammer2_inode_get(pmp, NULL, 1, -1); 463 if (ripdata) 464 iroot->meta = ripdata->meta; 465 pmp->iroot = iroot; 466 hammer2_inode_ref(iroot); 467 hammer2_inode_unlock(iroot); 468 } 469 470 /* 471 * Stop here if no chain is passed in. 472 */ 473 if (chain == NULL) 474 goto done; 475 476 /* 477 * When a chain is passed in we must add it to the PFS's root 478 * inode, update pmp->pfs_types[], and update the syncronization 479 * threads. 480 * 481 * When forcing local mode, mark the PFS as a MASTER regardless. 482 * 483 * At the moment empty spots can develop due to removals or failures. 484 * Ultimately we want to re-fill these spots but doing so might 485 * confused running code. XXX 486 */ 487 hammer2_inode_ref(iroot); 488 hammer2_mtx_ex(&iroot->lock); 489 j = iroot->cluster.nchains; 490 491 if (j == HAMMER2_MAXCLUSTER) { 492 kprintf("hammer2_mount: cluster full!\n"); 493 /* XXX fatal error? */ 494 } else { 495 KKASSERT(chain->pmp == NULL); 496 chain->pmp = pmp; 497 hammer2_chain_ref(chain); 498 iroot->cluster.array[j].chain = chain; 499 if (force_local) 500 pmp->pfs_types[j] = HAMMER2_PFSTYPE_MASTER; 501 else 502 pmp->pfs_types[j] = ripdata->meta.pfs_type; 503 pmp->pfs_names[j] = kstrdup(ripdata->filename, M_HAMMER2); 504 pmp->pfs_hmps[j] = chain->hmp; 505 hammer2_spin_ex(&pmp->inum_spin); 506 pmp->pfs_iroot_blocksets[j] = chain->data->ipdata.u.blockset; 507 hammer2_spin_unex(&pmp->inum_spin); 508 509 /* 510 * If the PFS is already mounted we must account 511 * for the mount_count here. 512 */ 513 if (pmp->mp) 514 ++chain->hmp->mount_count; 515 516 /* 517 * May have to fixup dirty chain tracking. Previous 518 * pmp was NULL so nothing to undo. 519 */ 520 if (chain->flags & HAMMER2_CHAIN_MODIFIED) 521 hammer2_pfs_memory_inc(pmp); 522 ++j; 523 } 524 iroot->cluster.nchains = j; 525 526 /* 527 * Update nmasters from any PFS inode which is part of the cluster. 528 * It is possible that this will result in a value which is too 529 * high. MASTER PFSs are authoritative for pfs_nmasters and will 530 * override this value later on. 531 * 532 * (This informs us of masters that might not currently be 533 * discoverable by this mount). 534 */ 535 if (ripdata && pmp->pfs_nmasters < ripdata->meta.pfs_nmasters) { 536 pmp->pfs_nmasters = ripdata->meta.pfs_nmasters; 537 } 538 539 /* 540 * Count visible masters. Masters are usually added with 541 * ripdata->meta.pfs_nmasters set to 1. This detects when there 542 * are more (XXX and must update the master inodes). 543 */ 544 count = 0; 545 for (i = 0; i < iroot->cluster.nchains; ++i) { 546 if (pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) 547 ++count; 548 } 549 if (pmp->pfs_nmasters < count) 550 pmp->pfs_nmasters = count; 551 552 /* 553 * Create missing synchronization and support threads. 554 * 555 * Single-node masters (including snapshots) have nothing to 556 * synchronize and do not require this thread. 557 * 558 * Multi-node masters or any number of soft masters, slaves, copy, 559 * or other PFS types need the thread. 560 * 561 * Each thread is responsible for its particular cluster index. 562 * We use independent threads so stalls or mismatches related to 563 * any given target do not affect other targets. 564 */ 565 for (i = 0; i < iroot->cluster.nchains; ++i) { 566 /* 567 * Single-node masters (including snapshots) have nothing 568 * to synchronize and will make direct xops support calls, 569 * thus they do not require this thread. 570 * 571 * Note that there can be thousands of snapshots. We do not 572 * want to create thousands of threads. 573 */ 574 if (pmp->pfs_nmasters <= 1 && 575 pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) { 576 continue; 577 } 578 579 /* 580 * Sync support thread 581 */ 582 if (pmp->sync_thrs[i].td == NULL) { 583 hammer2_thr_create(&pmp->sync_thrs[i], pmp, NULL, 584 "h2nod", i, -1, 585 hammer2_primary_sync_thread); 586 } 587 } 588 589 /* 590 * Create missing Xop threads 591 * 592 * NOTE: We create helper threads for all mounted PFSs or any 593 * PFSs with 2+ nodes (so the sync thread can update them, 594 * even if not mounted). 595 */ 596 if (pmp->mp || iroot->cluster.nchains >= 2) 597 hammer2_xop_helper_create(pmp); 598 599 hammer2_mtx_unlock(&iroot->lock); 600 hammer2_inode_drop(iroot); 601 done: 602 return pmp; 603 } 604 605 /* 606 * Deallocate an element of a probed PFS. If destroying and this is a 607 * MASTER, adjust nmasters. 608 * 609 * This function does not physically destroy the PFS element in its device 610 * under the super-root (see hammer2_ioctl_pfs_delete()). 611 */ 612 void 613 hammer2_pfsdealloc(hammer2_pfs_t *pmp, int clindex, int destroying) 614 { 615 hammer2_inode_t *iroot; 616 hammer2_chain_t *chain; 617 int j; 618 619 /* 620 * Cleanup our reference on iroot. iroot is (should) not be needed 621 * by the flush code. 622 */ 623 iroot = pmp->iroot; 624 if (iroot) { 625 /* 626 * Stop synchronizing 627 * 628 * XXX flush after acquiring the iroot lock. 629 * XXX clean out the cluster index from all inode structures. 630 */ 631 hammer2_thr_delete(&pmp->sync_thrs[clindex]); 632 633 /* 634 * Remove the cluster index from the group. If destroying 635 * the PFS and this is a master, adjust pfs_nmasters. 636 */ 637 hammer2_mtx_ex(&iroot->lock); 638 chain = iroot->cluster.array[clindex].chain; 639 iroot->cluster.array[clindex].chain = NULL; 640 641 switch(pmp->pfs_types[clindex]) { 642 case HAMMER2_PFSTYPE_MASTER: 643 if (destroying && pmp->pfs_nmasters > 0) 644 --pmp->pfs_nmasters; 645 /* XXX adjust ripdata->meta.pfs_nmasters */ 646 break; 647 default: 648 break; 649 } 650 pmp->pfs_types[clindex] = HAMMER2_PFSTYPE_NONE; 651 652 hammer2_mtx_unlock(&iroot->lock); 653 654 /* 655 * Release the chain. 656 */ 657 if (chain) { 658 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 659 hammer2_chain_drop(chain); 660 } 661 662 /* 663 * Terminate all XOP threads for the cluster index. 664 */ 665 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) 666 hammer2_thr_delete(&pmp->xop_groups[j].thrs[clindex]); 667 } 668 } 669 670 /* 671 * Destroy a PFS, typically only occurs after the last mount on a device 672 * has gone away. 673 */ 674 static void 675 hammer2_pfsfree(hammer2_pfs_t *pmp) 676 { 677 hammer2_inode_t *iroot; 678 hammer2_chain_t *chain; 679 int chains_still_present = 0; 680 int i; 681 int j; 682 683 /* 684 * Cleanup our reference on iroot. iroot is (should) not be needed 685 * by the flush code. 686 */ 687 if (pmp->flags & HAMMER2_PMPF_SPMP) 688 TAILQ_REMOVE(&hammer2_spmplist, pmp, mntentry); 689 else 690 TAILQ_REMOVE(&hammer2_pfslist, pmp, mntentry); 691 692 /* 693 * Cleanup chains remaining on LRU list. 694 */ 695 hammer2_spin_ex(&pmp->lru_spin); 696 while ((chain = TAILQ_FIRST(&pmp->lru_list)) != NULL) { 697 KKASSERT(chain->flags & HAMMER2_CHAIN_ONLRU); 698 atomic_add_int(&pmp->lru_count, -1); 699 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONLRU); 700 TAILQ_REMOVE(&pmp->lru_list, chain, lru_node); 701 hammer2_chain_ref(chain); 702 hammer2_spin_unex(&pmp->lru_spin); 703 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 704 hammer2_chain_drop(chain); 705 hammer2_spin_ex(&pmp->lru_spin); 706 } 707 hammer2_spin_unex(&pmp->lru_spin); 708 709 /* 710 * Clean up iroot 711 */ 712 iroot = pmp->iroot; 713 if (iroot) { 714 for (i = 0; i < iroot->cluster.nchains; ++i) { 715 hammer2_thr_delete(&pmp->sync_thrs[i]); 716 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) 717 hammer2_thr_delete(&pmp->xop_groups[j].thrs[i]); 718 chain = iroot->cluster.array[i].chain; 719 if (chain && !RB_EMPTY(&chain->core.rbtree)) { 720 kprintf("hammer2: Warning pmp %p still " 721 "has active chains\n", pmp); 722 chains_still_present = 1; 723 } 724 } 725 #if REPORT_REFS_ERRORS 726 if (iroot->refs != 1) 727 kprintf("PMP->IROOT %p REFS WRONG %d\n", 728 iroot, iroot->refs); 729 #else 730 KKASSERT(iroot->refs == 1); 731 #endif 732 /* ref for iroot */ 733 hammer2_inode_drop(iroot); 734 pmp->iroot = NULL; 735 } 736 737 /* 738 * Free remaining pmp resources 739 */ 740 if (chains_still_present) { 741 kprintf("hammer2: cannot free pmp %p, still in use\n", pmp); 742 } else { 743 kmalloc_destroy(&pmp->mmsg); 744 kmalloc_destroy(&pmp->minode); 745 kfree(pmp, M_HAMMER2); 746 } 747 } 748 749 /* 750 * Remove all references to hmp from the pfs list. Any PFS which becomes 751 * empty is terminated and freed. 752 * 753 * XXX inefficient. 754 */ 755 static void 756 hammer2_pfsfree_scan(hammer2_dev_t *hmp, int which) 757 { 758 hammer2_pfs_t *pmp; 759 hammer2_inode_t *iroot; 760 hammer2_chain_t *rchain; 761 int i; 762 int j; 763 struct hammer2_pfslist *wlist; 764 765 if (which == 0) 766 wlist = &hammer2_pfslist; 767 else 768 wlist = &hammer2_spmplist; 769 again: 770 TAILQ_FOREACH(pmp, wlist, mntentry) { 771 if ((iroot = pmp->iroot) == NULL) 772 continue; 773 774 /* 775 * Determine if this PFS is affected. If it is we must 776 * freeze all management threads and lock its iroot. 777 * 778 * Freezing a management thread forces it idle, operations 779 * in-progress will be aborted and it will have to start 780 * over again when unfrozen, or exit if told to exit. 781 */ 782 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 783 if (pmp->pfs_hmps[i] == hmp) 784 break; 785 } 786 if (i == HAMMER2_MAXCLUSTER) 787 continue; 788 789 hammer2_vfs_sync_pmp(pmp, MNT_WAIT); 790 791 /* 792 * Make sure all synchronization threads are locked 793 * down. 794 */ 795 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 796 if (pmp->pfs_hmps[i] == NULL) 797 continue; 798 hammer2_thr_freeze_async(&pmp->sync_thrs[i]); 799 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) { 800 hammer2_thr_freeze_async( 801 &pmp->xop_groups[j].thrs[i]); 802 } 803 } 804 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 805 if (pmp->pfs_hmps[i] == NULL) 806 continue; 807 hammer2_thr_freeze(&pmp->sync_thrs[i]); 808 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) { 809 hammer2_thr_freeze( 810 &pmp->xop_groups[j].thrs[i]); 811 } 812 } 813 814 /* 815 * Lock the inode and clean out matching chains. 816 * Note that we cannot use hammer2_inode_lock_*() 817 * here because that would attempt to validate the 818 * cluster that we are in the middle of ripping 819 * apart. 820 * 821 * WARNING! We are working directly on the inodes 822 * embedded cluster. 823 */ 824 hammer2_mtx_ex(&iroot->lock); 825 826 /* 827 * Remove the chain from matching elements of the PFS. 828 */ 829 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 830 if (pmp->pfs_hmps[i] != hmp) 831 continue; 832 hammer2_thr_delete(&pmp->sync_thrs[i]); 833 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) { 834 hammer2_thr_delete( 835 &pmp->xop_groups[j].thrs[i]); 836 } 837 rchain = iroot->cluster.array[i].chain; 838 iroot->cluster.array[i].chain = NULL; 839 pmp->pfs_types[i] = 0; 840 if (pmp->pfs_names[i]) { 841 kfree(pmp->pfs_names[i], M_HAMMER2); 842 pmp->pfs_names[i] = NULL; 843 } 844 if (rchain) { 845 hammer2_chain_drop(rchain); 846 /* focus hint */ 847 if (iroot->cluster.focus == rchain) 848 iroot->cluster.focus = NULL; 849 } 850 pmp->pfs_hmps[i] = NULL; 851 } 852 hammer2_mtx_unlock(&iroot->lock); 853 854 /* 855 * Cleanup trailing chains. Gaps may remain. 856 */ 857 for (i = HAMMER2_MAXCLUSTER - 1; i >= 0; --i) { 858 if (pmp->pfs_hmps[i]) 859 break; 860 } 861 iroot->cluster.nchains = i + 1; 862 863 /* 864 * If the PMP has no elements remaining we can destroy it. 865 * (this will transition management threads from frozen->exit). 866 */ 867 if (iroot->cluster.nchains == 0) { 868 /* 869 * If this was the hmp's spmp, we need to clean 870 * a little more stuff out. 871 */ 872 if (hmp->spmp == pmp) { 873 hmp->spmp = NULL; 874 hmp->vchain.pmp = NULL; 875 hmp->fchain.pmp = NULL; 876 } 877 878 /* 879 * Free the pmp and restart the loop 880 */ 881 KKASSERT(TAILQ_EMPTY(&pmp->syncq)); 882 KKASSERT(TAILQ_EMPTY(&pmp->depq)); 883 hammer2_pfsfree(pmp); 884 goto again; 885 } 886 887 /* 888 * If elements still remain we need to set the REMASTER 889 * flag and unfreeze it. 890 */ 891 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 892 if (pmp->pfs_hmps[i] == NULL) 893 continue; 894 hammer2_thr_remaster(&pmp->sync_thrs[i]); 895 hammer2_thr_unfreeze(&pmp->sync_thrs[i]); 896 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) { 897 hammer2_thr_remaster( 898 &pmp->xop_groups[j].thrs[i]); 899 hammer2_thr_unfreeze( 900 &pmp->xop_groups[j].thrs[i]); 901 } 902 } 903 } 904 } 905 906 /* 907 * Mount or remount HAMMER2 fileystem from physical media 908 * 909 * mountroot 910 * mp mount point structure 911 * path NULL 912 * data <unused> 913 * cred <unused> 914 * 915 * mount 916 * mp mount point structure 917 * path path to mount point 918 * data pointer to argument structure in user space 919 * volume volume path (device@LABEL form) 920 * hflags user mount flags 921 * cred user credentials 922 * 923 * RETURNS: 0 Success 924 * !0 error number 925 */ 926 static 927 int 928 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, 929 struct ucred *cred) 930 { 931 struct hammer2_mount_info info; 932 hammer2_pfs_t *pmp; 933 hammer2_pfs_t *spmp; 934 hammer2_dev_t *hmp; 935 hammer2_dev_t *force_local; 936 hammer2_key_t key_next; 937 hammer2_key_t key_dummy; 938 hammer2_key_t lhc; 939 struct vnode *devvp; 940 struct nlookupdata nd; 941 hammer2_chain_t *parent; 942 hammer2_chain_t *chain; 943 const hammer2_inode_data_t *ripdata; 944 hammer2_blockref_t bref; 945 struct file *fp; 946 char devstr[MNAMELEN]; 947 size_t size; 948 size_t done; 949 char *dev; 950 char *label; 951 int ronly = 1; 952 int error; 953 int i; 954 955 hmp = NULL; 956 pmp = NULL; 957 dev = NULL; 958 label = NULL; 959 devvp = NULL; 960 961 if (path == NULL) { 962 /* 963 * Root mount 964 */ 965 bzero(&info, sizeof(info)); 966 info.cluster_fd = -1; 967 ksnprintf(devstr, sizeof(devstr), "%s", 968 mp->mnt_stat.f_mntfromname); 969 kprintf("hammer2_mount: root '%s'\n", devstr); 970 done = strlen(devstr) + 1; 971 } else { 972 /* 973 * Non-root mount or updating a mount 974 */ 975 error = copyin(data, &info, sizeof(info)); 976 if (error) 977 return (error); 978 979 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done); 980 if (error) 981 return (error); 982 kprintf("hammer2_mount: '%s'\n", devstr); 983 } 984 985 /* 986 * Extract device and label, automatically mount @BOOT, @ROOT, or @DATA 987 * if no label specified, based on the partition id. Error out if no 988 * label or device (with partition id) is specified. This is strictly 989 * a convenience to match the default label created by newfs_hammer2, 990 * our preference is that a label always be specified. 991 * 992 * NOTE: We allow 'mount @LABEL <blah>'... that is, a mount command 993 * that does not specify a device, as long as some H2 label 994 * has already been mounted from that device. This makes 995 * mounting snapshots a lot easier. 996 */ 997 dev = devstr; 998 label = strchr(devstr, '@'); 999 if (label && ((label + 1) - dev) > done) { 1000 kprintf("hammer2: mount: bad label %s/%zd\n", 1001 devstr, done); 1002 return (EINVAL); 1003 } 1004 if (label == NULL || label[1] == 0) { 1005 char slice; 1006 1007 if (label == NULL) 1008 label = devstr + strlen(devstr); 1009 else 1010 *label = '\0'; /* clean up trailing @ */ 1011 1012 slice = label[-1]; 1013 switch(slice) { 1014 case 'a': 1015 label = "BOOT"; 1016 break; 1017 case 'd': 1018 label = "ROOT"; 1019 break; 1020 default: 1021 label = "DATA"; 1022 break; 1023 } 1024 } else { 1025 *label = '\0'; 1026 label++; 1027 } 1028 1029 kprintf("hammer2_mount: dev=\"%s\" label=\"%s\" rdonly=%d\n", 1030 dev, label, (mp->mnt_flag & MNT_RDONLY)); 1031 1032 if (mp->mnt_flag & MNT_UPDATE) { 1033 /* 1034 * Update mount. Note that pmp->iroot->cluster is 1035 * an inode-embedded cluster and thus cannot be 1036 * directly locked. 1037 * 1038 * XXX HAMMER2 needs to implement NFS export via 1039 * mountctl. 1040 */ 1041 hammer2_cluster_t *cluster; 1042 1043 pmp = MPTOPMP(mp); 1044 pmp->hflags = info.hflags; 1045 cluster = &pmp->iroot->cluster; 1046 for (i = 0; i < cluster->nchains; ++i) { 1047 if (cluster->array[i].chain == NULL) 1048 continue; 1049 hmp = cluster->array[i].chain->hmp; 1050 devvp = hmp->devvp; 1051 error = hammer2_remount(hmp, mp, path, 1052 devvp, cred); 1053 if (error) 1054 break; 1055 } 1056 1057 return error; 1058 } 1059 1060 /* 1061 * HMP device mount 1062 * 1063 * If a path is specified and dev is not an empty string, lookup the 1064 * name and verify that it referes to a block device. 1065 * 1066 * If a path is specified and dev is an empty string we fall through 1067 * and locate the label in the hmp search. 1068 */ 1069 if (path && *dev != 0) { 1070 error = nlookup_init(&nd, dev, UIO_SYSSPACE, NLC_FOLLOW); 1071 if (error == 0) 1072 error = nlookup(&nd); 1073 if (error == 0) 1074 error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp); 1075 nlookup_done(&nd); 1076 } else if (path == NULL) { 1077 /* root mount */ 1078 cdev_t cdev = kgetdiskbyname(dev); 1079 error = bdevvp(cdev, &devvp); 1080 if (error) 1081 kprintf("hammer2: cannot find '%s'\n", dev); 1082 } else { 1083 /* 1084 * We will locate the hmp using the label in the hmp loop. 1085 */ 1086 error = 0; 1087 } 1088 1089 /* 1090 * Make sure its a block device. Do not check to see if it is 1091 * already mounted until we determine that its a fresh H2 device. 1092 */ 1093 if (error == 0 && devvp) { 1094 vn_isdisk(devvp, &error); 1095 } 1096 1097 /* 1098 * Determine if the device has already been mounted. After this 1099 * check hmp will be non-NULL if we are doing the second or more 1100 * hammer2 mounts from the same device. 1101 */ 1102 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 1103 if (devvp) { 1104 /* 1105 * Match the device. Due to the way devfs works, 1106 * we may not be able to directly match the vnode pointer, 1107 * so also check to see if the underlying device matches. 1108 */ 1109 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) { 1110 if (hmp->devvp == devvp) 1111 break; 1112 if (devvp->v_rdev && 1113 hmp->devvp->v_rdev == devvp->v_rdev) { 1114 break; 1115 } 1116 } 1117 1118 /* 1119 * If no match this may be a fresh H2 mount, make sure 1120 * the device is not mounted on anything else. 1121 */ 1122 if (hmp == NULL) 1123 error = vfs_mountedon(devvp); 1124 } else if (error == 0) { 1125 /* 1126 * Match the label to a pmp already probed. 1127 */ 1128 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 1129 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 1130 if (pmp->pfs_names[i] && 1131 strcmp(pmp->pfs_names[i], label) == 0) { 1132 hmp = pmp->pfs_hmps[i]; 1133 break; 1134 } 1135 } 1136 if (hmp) 1137 break; 1138 } 1139 if (hmp == NULL) 1140 error = ENOENT; 1141 } 1142 1143 /* 1144 * Open the device if this isn't a secondary mount and construct 1145 * the H2 device mount (hmp). 1146 */ 1147 if (hmp == NULL) { 1148 hammer2_chain_t *schain; 1149 hammer2_xid_t xid; 1150 hammer2_xop_head_t xop; 1151 1152 if (error == 0 && vcount(devvp) > 0) { 1153 kprintf("Primary device already has references\n"); 1154 error = EBUSY; 1155 } 1156 1157 /* 1158 * Now open the device 1159 */ 1160 if (error == 0) { 1161 ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); 1162 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1163 error = vinvalbuf(devvp, V_SAVE, 0, 0); 1164 if (error == 0) { 1165 error = VOP_OPEN(devvp, 1166 (ronly ? FREAD : FREAD | FWRITE), 1167 FSCRED, NULL); 1168 } 1169 vn_unlock(devvp); 1170 } 1171 if (error && devvp) { 1172 vrele(devvp); 1173 devvp = NULL; 1174 } 1175 if (error) { 1176 lockmgr(&hammer2_mntlk, LK_RELEASE); 1177 return error; 1178 } 1179 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO); 1180 ksnprintf(hmp->devrepname, sizeof(hmp->devrepname), "%s", dev); 1181 hmp->ronly = ronly; 1182 hmp->devvp = devvp; 1183 hmp->hflags = info.hflags & HMNT2_DEVFLAGS; 1184 kmalloc_create(&hmp->mchain, "HAMMER2-chains"); 1185 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry); 1186 RB_INIT(&hmp->iotree); 1187 spin_init(&hmp->io_spin, "h2mount_io"); 1188 spin_init(&hmp->list_spin, "h2mount_list"); 1189 1190 lockinit(&hmp->vollk, "h2vol", 0, 0); 1191 lockinit(&hmp->bulklk, "h2bulk", 0, 0); 1192 lockinit(&hmp->bflock, "h2bflk", 0, 0); 1193 1194 /* 1195 * vchain setup. vchain.data is embedded. 1196 * vchain.refs is initialized and will never drop to 0. 1197 * 1198 * NOTE! voldata is not yet loaded. 1199 */ 1200 hmp->vchain.hmp = hmp; 1201 hmp->vchain.refs = 1; 1202 hmp->vchain.data = (void *)&hmp->voldata; 1203 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME; 1204 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX; 1205 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid; 1206 1207 hammer2_chain_core_init(&hmp->vchain); 1208 /* hmp->vchain.u.xxx is left NULL */ 1209 1210 /* 1211 * fchain setup. fchain.data is embedded. 1212 * fchain.refs is initialized and will never drop to 0. 1213 * 1214 * The data is not used but needs to be initialized to 1215 * pass assertion muster. We use this chain primarily 1216 * as a placeholder for the freemap's top-level RBTREE 1217 * so it does not interfere with the volume's topology 1218 * RBTREE. 1219 */ 1220 hmp->fchain.hmp = hmp; 1221 hmp->fchain.refs = 1; 1222 hmp->fchain.data = (void *)&hmp->voldata.freemap_blockset; 1223 hmp->fchain.bref.type = HAMMER2_BREF_TYPE_FREEMAP; 1224 hmp->fchain.bref.data_off = 0 | HAMMER2_PBUFRADIX; 1225 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid; 1226 hmp->fchain.bref.methods = 1227 HAMMER2_ENC_CHECK(HAMMER2_CHECK_FREEMAP) | 1228 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE); 1229 1230 hammer2_chain_core_init(&hmp->fchain); 1231 /* hmp->fchain.u.xxx is left NULL */ 1232 1233 /* 1234 * Install the volume header and initialize fields from 1235 * voldata. 1236 */ 1237 error = hammer2_install_volume_header(hmp); 1238 if (error) { 1239 hammer2_unmount_helper(mp, NULL, hmp); 1240 lockmgr(&hammer2_mntlk, LK_RELEASE); 1241 hammer2_vfs_unmount(mp, MNT_FORCE); 1242 return error; 1243 } 1244 1245 /* 1246 * Really important to get these right or flush will get 1247 * confused. 1248 */ 1249 hmp->spmp = hammer2_pfsalloc(NULL, NULL, 0, NULL); 1250 spmp = hmp->spmp; 1251 1252 /* 1253 * Dummy-up vchain and fchain's modify_tid. mirror_tid 1254 * is inherited from the volume header. 1255 */ 1256 xid = 0; 1257 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid; 1258 hmp->vchain.bref.modify_tid = hmp->vchain.bref.mirror_tid; 1259 hmp->vchain.pmp = spmp; 1260 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid; 1261 hmp->fchain.bref.modify_tid = hmp->fchain.bref.mirror_tid; 1262 hmp->fchain.pmp = spmp; 1263 1264 /* 1265 * First locate the super-root inode, which is key 0 1266 * relative to the volume header's blockset. 1267 * 1268 * Then locate the root inode by scanning the directory keyspace 1269 * represented by the label. 1270 */ 1271 parent = hammer2_chain_lookup_init(&hmp->vchain, 0); 1272 schain = hammer2_chain_lookup(&parent, &key_dummy, 1273 HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY, 1274 &error, 0); 1275 hammer2_chain_lookup_done(parent); 1276 if (schain == NULL) { 1277 kprintf("hammer2_mount: invalid super-root\n"); 1278 hammer2_unmount_helper(mp, NULL, hmp); 1279 lockmgr(&hammer2_mntlk, LK_RELEASE); 1280 hammer2_vfs_unmount(mp, MNT_FORCE); 1281 return EINVAL; 1282 } 1283 if (schain->error) { 1284 kprintf("hammer2_mount: error %s reading super-root\n", 1285 hammer2_error_str(schain->error)); 1286 hammer2_chain_unlock(schain); 1287 hammer2_chain_drop(schain); 1288 schain = NULL; 1289 hammer2_unmount_helper(mp, NULL, hmp); 1290 lockmgr(&hammer2_mntlk, LK_RELEASE); 1291 hammer2_vfs_unmount(mp, MNT_FORCE); 1292 return EINVAL; 1293 } 1294 1295 /* 1296 * The super-root always uses an inode_tid of 1 when 1297 * creating PFSs. 1298 */ 1299 spmp->inode_tid = 1; 1300 spmp->modify_tid = schain->bref.modify_tid + 1; 1301 1302 /* 1303 * Sanity-check schain's pmp and finish initialization. 1304 * Any chain belonging to the super-root topology should 1305 * have a NULL pmp (not even set to spmp). 1306 */ 1307 ripdata = &hammer2_chain_rdata(schain)->ipdata; 1308 KKASSERT(schain->pmp == NULL); 1309 spmp->pfs_clid = ripdata->meta.pfs_clid; 1310 1311 /* 1312 * Replace the dummy spmp->iroot with a real one. It's 1313 * easier to just do a wholesale replacement than to try 1314 * to update the chain and fixup the iroot fields. 1315 * 1316 * The returned inode is locked with the supplied cluster. 1317 */ 1318 hammer2_dummy_xop_from_chain(&xop, schain); 1319 hammer2_inode_drop(spmp->iroot); 1320 spmp->iroot = NULL; 1321 spmp->iroot = hammer2_inode_get(spmp, &xop, -1, -1); 1322 spmp->spmp_hmp = hmp; 1323 spmp->pfs_types[0] = ripdata->meta.pfs_type; 1324 spmp->pfs_hmps[0] = hmp; 1325 hammer2_inode_ref(spmp->iroot); 1326 hammer2_inode_unlock(spmp->iroot); 1327 hammer2_cluster_unlock(&xop.cluster); 1328 hammer2_chain_drop(schain); 1329 /* do not call hammer2_cluster_drop() on an embedded cluster */ 1330 schain = NULL; /* now invalid */ 1331 /* leave spmp->iroot with one ref */ 1332 1333 if ((mp->mnt_flag & MNT_RDONLY) == 0) { 1334 error = hammer2_recovery(hmp); 1335 if (error == 0) 1336 error |= hammer2_fixup_pfses(hmp); 1337 /* XXX do something with error */ 1338 } 1339 hammer2_update_pmps(hmp); 1340 hammer2_iocom_init(hmp); 1341 hammer2_bulkfree_init(hmp); 1342 1343 /* 1344 * Ref the cluster management messaging descriptor. The mount 1345 * program deals with the other end of the communications pipe. 1346 * 1347 * Root mounts typically do not supply one. 1348 */ 1349 if (info.cluster_fd >= 0) { 1350 fp = holdfp(curthread, info.cluster_fd, -1); 1351 if (fp) { 1352 hammer2_cluster_reconnect(hmp, fp); 1353 } else { 1354 kprintf("hammer2_mount: bad cluster_fd!\n"); 1355 } 1356 } 1357 } else { 1358 spmp = hmp->spmp; 1359 if (info.hflags & HMNT2_DEVFLAGS) { 1360 kprintf("hammer2: Warning: mount flags pertaining " 1361 "to the whole device may only be specified " 1362 "on the first mount of the device: %08x\n", 1363 info.hflags & HMNT2_DEVFLAGS); 1364 } 1365 } 1366 1367 /* 1368 * Force local mount (disassociate all PFSs from their clusters). 1369 * Used primarily for debugging. 1370 */ 1371 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL; 1372 1373 /* 1374 * Lookup the mount point under the media-localized super-root. 1375 * Scanning hammer2_pfslist doesn't help us because it represents 1376 * PFS cluster ids which can aggregate several named PFSs together. 1377 * 1378 * cluster->pmp will incorrectly point to spmp and must be fixed 1379 * up later on. 1380 */ 1381 hammer2_inode_lock(spmp->iroot, 0); 1382 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS); 1383 lhc = hammer2_dirhash(label, strlen(label)); 1384 chain = hammer2_chain_lookup(&parent, &key_next, 1385 lhc, lhc + HAMMER2_DIRHASH_LOMASK, 1386 &error, 0); 1387 while (chain) { 1388 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && 1389 strcmp(label, chain->data->ipdata.filename) == 0) { 1390 break; 1391 } 1392 chain = hammer2_chain_next(&parent, chain, &key_next, 1393 key_next, 1394 lhc + HAMMER2_DIRHASH_LOMASK, 1395 &error, 0); 1396 } 1397 if (parent) { 1398 hammer2_chain_unlock(parent); 1399 hammer2_chain_drop(parent); 1400 } 1401 hammer2_inode_unlock(spmp->iroot); 1402 1403 /* 1404 * PFS could not be found? 1405 */ 1406 if (chain == NULL) { 1407 if (error) 1408 kprintf("hammer2_mount: PFS label I/O error\n"); 1409 else 1410 kprintf("hammer2_mount: PFS label not found\n"); 1411 hammer2_unmount_helper(mp, NULL, hmp); 1412 lockmgr(&hammer2_mntlk, LK_RELEASE); 1413 hammer2_vfs_unmount(mp, MNT_FORCE); 1414 1415 return EINVAL; 1416 } 1417 1418 /* 1419 * Acquire the pmp structure (it should have already been allocated 1420 * via hammer2_update_pmps() so do not pass cluster in to add to 1421 * available chains). 1422 * 1423 * Check if the cluster has already been mounted. A cluster can 1424 * only be mounted once, use null mounts to mount additional copies. 1425 */ 1426 if (chain->error) { 1427 kprintf("hammer2_mount: PFS label I/O error\n"); 1428 } else { 1429 ripdata = &chain->data->ipdata; 1430 bref = chain->bref; 1431 pmp = hammer2_pfsalloc(NULL, ripdata, 1432 bref.modify_tid, force_local); 1433 } 1434 hammer2_chain_unlock(chain); 1435 hammer2_chain_drop(chain); 1436 1437 /* 1438 * Finish the mount 1439 */ 1440 kprintf("hammer2_mount hmp=%p pmp=%p\n", hmp, pmp); 1441 1442 if (pmp->mp) { 1443 kprintf("hammer2_mount: PFS already mounted!\n"); 1444 hammer2_unmount_helper(mp, NULL, hmp); 1445 lockmgr(&hammer2_mntlk, LK_RELEASE); 1446 hammer2_vfs_unmount(mp, MNT_FORCE); 1447 1448 return EBUSY; 1449 } 1450 1451 pmp->hflags = info.hflags; 1452 mp->mnt_flag |= MNT_LOCAL; 1453 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /* all entry pts are SMP */ 1454 mp->mnt_kern_flag |= MNTK_THR_SYNC; /* new vsyncscan semantics */ 1455 1456 /* 1457 * required mount structure initializations 1458 */ 1459 mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE; 1460 mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE; 1461 1462 mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE; 1463 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE; 1464 1465 /* 1466 * Optional fields 1467 */ 1468 mp->mnt_iosize_max = MAXPHYS; 1469 1470 /* 1471 * Connect up mount pointers. 1472 */ 1473 hammer2_mount_helper(mp, pmp); 1474 1475 lockmgr(&hammer2_mntlk, LK_RELEASE); 1476 1477 /* 1478 * Finish setup 1479 */ 1480 vfs_getnewfsid(mp); 1481 vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops); 1482 vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops); 1483 vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops); 1484 1485 if (path) { 1486 copyinstr(info.volume, mp->mnt_stat.f_mntfromname, 1487 MNAMELEN - 1, &size); 1488 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 1489 } /* else root mount, already in there */ 1490 1491 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname)); 1492 if (path) { 1493 copyinstr(path, mp->mnt_stat.f_mntonname, 1494 sizeof(mp->mnt_stat.f_mntonname) - 1, 1495 &size); 1496 } else { 1497 /* root mount */ 1498 mp->mnt_stat.f_mntonname[0] = '/'; 1499 } 1500 1501 /* 1502 * Initial statfs to prime mnt_stat. 1503 */ 1504 hammer2_vfs_statfs(mp, &mp->mnt_stat, cred); 1505 1506 return 0; 1507 } 1508 1509 /* 1510 * Scan PFSs under the super-root and create hammer2_pfs structures. 1511 */ 1512 static 1513 void 1514 hammer2_update_pmps(hammer2_dev_t *hmp) 1515 { 1516 const hammer2_inode_data_t *ripdata; 1517 hammer2_chain_t *parent; 1518 hammer2_chain_t *chain; 1519 hammer2_blockref_t bref; 1520 hammer2_dev_t *force_local; 1521 hammer2_pfs_t *spmp; 1522 hammer2_pfs_t *pmp; 1523 hammer2_key_t key_next; 1524 int error; 1525 1526 /* 1527 * Force local mount (disassociate all PFSs from their clusters). 1528 * Used primarily for debugging. 1529 */ 1530 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL; 1531 1532 /* 1533 * Lookup mount point under the media-localized super-root. 1534 * 1535 * cluster->pmp will incorrectly point to spmp and must be fixed 1536 * up later on. 1537 */ 1538 spmp = hmp->spmp; 1539 hammer2_inode_lock(spmp->iroot, 0); 1540 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS); 1541 chain = hammer2_chain_lookup(&parent, &key_next, 1542 HAMMER2_KEY_MIN, HAMMER2_KEY_MAX, 1543 &error, 0); 1544 while (chain) { 1545 if (chain->bref.type != HAMMER2_BREF_TYPE_INODE) 1546 continue; 1547 if (chain->error) { 1548 kprintf("I/O error scanning PFS labels\n"); 1549 } else { 1550 ripdata = &chain->data->ipdata; 1551 bref = chain->bref; 1552 1553 pmp = hammer2_pfsalloc(chain, ripdata, 1554 bref.modify_tid, force_local); 1555 } 1556 chain = hammer2_chain_next(&parent, chain, &key_next, 1557 key_next, HAMMER2_KEY_MAX, 1558 &error, 0); 1559 } 1560 if (parent) { 1561 hammer2_chain_unlock(parent); 1562 hammer2_chain_drop(parent); 1563 } 1564 hammer2_inode_unlock(spmp->iroot); 1565 } 1566 1567 static 1568 int 1569 hammer2_remount(hammer2_dev_t *hmp, struct mount *mp, char *path __unused, 1570 struct vnode *devvp, struct ucred *cred) 1571 { 1572 int error; 1573 1574 if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) { 1575 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1576 VOP_OPEN(devvp, FREAD | FWRITE, FSCRED, NULL); 1577 vn_unlock(devvp); 1578 error = hammer2_recovery(hmp); 1579 if (error == 0) 1580 error |= hammer2_fixup_pfses(hmp); 1581 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1582 if (error == 0) { 1583 VOP_CLOSE(devvp, FREAD, NULL); 1584 hmp->ronly = 0; 1585 } else { 1586 VOP_CLOSE(devvp, FREAD | FWRITE, NULL); 1587 } 1588 vn_unlock(devvp); 1589 } else { 1590 error = 0; 1591 } 1592 return error; 1593 } 1594 1595 static 1596 int 1597 hammer2_vfs_unmount(struct mount *mp, int mntflags) 1598 { 1599 hammer2_pfs_t *pmp; 1600 int flags; 1601 int error = 0; 1602 1603 pmp = MPTOPMP(mp); 1604 1605 if (pmp == NULL) 1606 return(0); 1607 1608 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 1609 1610 /* 1611 * If mount initialization proceeded far enough we must flush 1612 * its vnodes and sync the underlying mount points. Three syncs 1613 * are required to fully flush the filesystem (freemap updates lag 1614 * by one flush, and one extra for safety). 1615 */ 1616 if (mntflags & MNT_FORCE) 1617 flags = FORCECLOSE; 1618 else 1619 flags = 0; 1620 if (pmp->iroot) { 1621 error = vflush(mp, 0, flags); 1622 if (error) 1623 goto failed; 1624 hammer2_vfs_sync(mp, MNT_WAIT); 1625 hammer2_vfs_sync(mp, MNT_WAIT); 1626 hammer2_vfs_sync(mp, MNT_WAIT); 1627 } 1628 1629 /* 1630 * Cleanup the frontend support XOPS threads 1631 */ 1632 hammer2_xop_helper_cleanup(pmp); 1633 1634 if (pmp->mp) 1635 hammer2_unmount_helper(mp, pmp, NULL); 1636 1637 error = 0; 1638 failed: 1639 lockmgr(&hammer2_mntlk, LK_RELEASE); 1640 1641 return (error); 1642 } 1643 1644 /* 1645 * Mount helper, hook the system mount into our PFS. 1646 * The mount lock is held. 1647 * 1648 * We must bump the mount_count on related devices for any 1649 * mounted PFSs. 1650 */ 1651 static 1652 void 1653 hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp) 1654 { 1655 hammer2_cluster_t *cluster; 1656 hammer2_chain_t *rchain; 1657 int i; 1658 1659 mp->mnt_data = (qaddr_t)pmp; 1660 pmp->mp = mp; 1661 1662 /* 1663 * After pmp->mp is set we have to adjust hmp->mount_count. 1664 */ 1665 cluster = &pmp->iroot->cluster; 1666 for (i = 0; i < cluster->nchains; ++i) { 1667 rchain = cluster->array[i].chain; 1668 if (rchain == NULL) 1669 continue; 1670 ++rchain->hmp->mount_count; 1671 } 1672 1673 /* 1674 * Create missing Xop threads 1675 */ 1676 hammer2_xop_helper_create(pmp); 1677 } 1678 1679 /* 1680 * Mount helper, unhook the system mount from our PFS. 1681 * The mount lock is held. 1682 * 1683 * If hmp is supplied a mount responsible for being the first to open 1684 * the block device failed and the block device and all PFSs using the 1685 * block device must be cleaned up. 1686 * 1687 * If pmp is supplied multiple devices might be backing the PFS and each 1688 * must be disconnected. This might not be the last PFS using some of the 1689 * underlying devices. Also, we have to adjust our hmp->mount_count 1690 * accounting for the devices backing the pmp which is now undergoing an 1691 * unmount. 1692 */ 1693 static 1694 void 1695 hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, hammer2_dev_t *hmp) 1696 { 1697 hammer2_cluster_t *cluster; 1698 hammer2_chain_t *rchain; 1699 struct vnode *devvp; 1700 int dumpcnt; 1701 int ronly; 1702 int i; 1703 1704 /* 1705 * If no device supplied this is a high-level unmount and we have to 1706 * to disconnect the mount, adjust mount_count, and locate devices 1707 * that might now have no mounts. 1708 */ 1709 if (pmp) { 1710 KKASSERT(hmp == NULL); 1711 KKASSERT((void *)(intptr_t)mp->mnt_data == pmp); 1712 pmp->mp = NULL; 1713 mp->mnt_data = NULL; 1714 1715 /* 1716 * After pmp->mp is cleared we have to account for 1717 * mount_count. 1718 */ 1719 cluster = &pmp->iroot->cluster; 1720 for (i = 0; i < cluster->nchains; ++i) { 1721 rchain = cluster->array[i].chain; 1722 if (rchain == NULL) 1723 continue; 1724 --rchain->hmp->mount_count; 1725 /* scrapping hmp now may invalidate the pmp */ 1726 } 1727 again: 1728 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) { 1729 if (hmp->mount_count == 0) { 1730 hammer2_unmount_helper(NULL, NULL, hmp); 1731 goto again; 1732 } 1733 } 1734 return; 1735 } 1736 1737 /* 1738 * Try to terminate the block device. We can't terminate it if 1739 * there are still PFSs referencing it. 1740 */ 1741 if (hmp->mount_count) 1742 return; 1743 1744 /* 1745 * Decomission the network before we start messing with the 1746 * device and PFS. 1747 */ 1748 hammer2_iocom_uninit(hmp); 1749 1750 hammer2_bulkfree_uninit(hmp); 1751 hammer2_pfsfree_scan(hmp, 0); 1752 #if 0 1753 hammer2_dev_exlock(hmp); /* XXX order */ 1754 #endif 1755 1756 /* 1757 * Cycle the volume data lock as a safety (probably not needed any 1758 * more). To ensure everything is out we need to flush at least 1759 * three times. (1) The running of the sideq can dirty the 1760 * filesystem, (2) A normal flush can dirty the freemap, and 1761 * (3) ensure that the freemap is fully synchronized. 1762 * 1763 * The next mount's recovery scan can clean everything up but we want 1764 * to leave the filesystem in a 100% clean state on a normal unmount. 1765 */ 1766 #if 0 1767 hammer2_voldata_lock(hmp); 1768 hammer2_voldata_unlock(hmp); 1769 #endif 1770 1771 /* 1772 * Flush whatever is left. Unmounted but modified PFS's might still 1773 * have some dirty chains on them. 1774 */ 1775 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS); 1776 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS); 1777 1778 if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) { 1779 hammer2_voldata_modify(hmp); 1780 hammer2_flush(&hmp->fchain, HAMMER2_FLUSH_TOP | 1781 HAMMER2_FLUSH_ALL); 1782 } 1783 hammer2_chain_unlock(&hmp->fchain); 1784 1785 if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) { 1786 hammer2_flush(&hmp->vchain, HAMMER2_FLUSH_TOP | 1787 HAMMER2_FLUSH_ALL); 1788 } 1789 hammer2_chain_unlock(&hmp->vchain); 1790 1791 if ((hmp->vchain.flags | hmp->fchain.flags) & 1792 HAMMER2_CHAIN_FLUSH_MASK) { 1793 kprintf("hammer2_unmount: chains left over " 1794 "after final sync\n"); 1795 kprintf(" vchain %08x\n", hmp->vchain.flags); 1796 kprintf(" fchain %08x\n", hmp->fchain.flags); 1797 1798 if (hammer2_debug & 0x0010) 1799 Debugger("entered debugger"); 1800 } 1801 1802 hammer2_pfsfree_scan(hmp, 1); 1803 1804 KKASSERT(hmp->spmp == NULL); 1805 1806 /* 1807 * Finish up with the device vnode 1808 */ 1809 if ((devvp = hmp->devvp) != NULL) { 1810 ronly = hmp->ronly; 1811 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1812 kprintf("hammer2_unmount(A): devvp %s rbdirty %p ronly=%d\n", 1813 hmp->devrepname, RB_ROOT(&devvp->v_rbdirty_tree), 1814 ronly); 1815 vinvalbuf(devvp, (ronly ? 0 : V_SAVE), 0, 0); 1816 kprintf("hammer2_unmount(B): devvp %s rbdirty %p\n", 1817 hmp->devrepname, RB_ROOT(&devvp->v_rbdirty_tree)); 1818 hmp->devvp = NULL; 1819 VOP_CLOSE(devvp, (ronly ? FREAD : FREAD|FWRITE), NULL); 1820 vn_unlock(devvp); 1821 vrele(devvp); 1822 devvp = NULL; 1823 } 1824 1825 /* 1826 * Clear vchain/fchain flags that might prevent final cleanup 1827 * of these chains. 1828 */ 1829 if (hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) { 1830 atomic_add_long(&hammer2_count_modified_chains, -1); 1831 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED); 1832 hammer2_pfs_memory_wakeup(hmp->vchain.pmp); 1833 } 1834 if (hmp->vchain.flags & HAMMER2_CHAIN_UPDATE) { 1835 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_UPDATE); 1836 } 1837 1838 if (hmp->fchain.flags & HAMMER2_CHAIN_MODIFIED) { 1839 atomic_add_long(&hammer2_count_modified_chains, -1); 1840 atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_MODIFIED); 1841 hammer2_pfs_memory_wakeup(hmp->fchain.pmp); 1842 } 1843 if (hmp->fchain.flags & HAMMER2_CHAIN_UPDATE) { 1844 atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_UPDATE); 1845 } 1846 1847 /* 1848 * Final drop of embedded freemap root chain to 1849 * clean up fchain.core (fchain structure is not 1850 * flagged ALLOCATED so it is cleaned out and then 1851 * left to rot). 1852 */ 1853 hammer2_chain_drop(&hmp->fchain); 1854 1855 /* 1856 * Final drop of embedded volume root chain to clean 1857 * up vchain.core (vchain structure is not flagged 1858 * ALLOCATED so it is cleaned out and then left to 1859 * rot). 1860 */ 1861 dumpcnt = 50; 1862 hammer2_dump_chain(&hmp->vchain, 0, &dumpcnt, 'v', (u_int)-1); 1863 dumpcnt = 50; 1864 hammer2_dump_chain(&hmp->fchain, 0, &dumpcnt, 'f', (u_int)-1); 1865 #if 0 1866 hammer2_dev_unlock(hmp); 1867 #endif 1868 hammer2_chain_drop(&hmp->vchain); 1869 1870 hammer2_io_cleanup(hmp, &hmp->iotree); 1871 if (hmp->iofree_count) { 1872 kprintf("io_cleanup: %d I/O's left hanging\n", 1873 hmp->iofree_count); 1874 } 1875 1876 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry); 1877 kmalloc_destroy(&hmp->mchain); 1878 kfree(hmp, M_HAMMER2); 1879 } 1880 1881 int 1882 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp, 1883 ino_t ino, struct vnode **vpp) 1884 { 1885 hammer2_xop_lookup_t *xop; 1886 hammer2_pfs_t *pmp; 1887 hammer2_inode_t *ip; 1888 hammer2_tid_t inum; 1889 int error; 1890 1891 inum = (hammer2_tid_t)ino & HAMMER2_DIRHASH_USERMSK; 1892 1893 error = 0; 1894 pmp = MPTOPMP(mp); 1895 1896 /* 1897 * Easy if we already have it cached 1898 */ 1899 ip = hammer2_inode_lookup(pmp, inum); 1900 if (ip) { 1901 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 1902 *vpp = hammer2_igetv(ip, &error); 1903 hammer2_inode_unlock(ip); 1904 hammer2_inode_drop(ip); /* from lookup */ 1905 1906 return error; 1907 } 1908 1909 /* 1910 * Otherwise we have to find the inode 1911 */ 1912 xop = hammer2_xop_alloc(pmp->iroot, 0); 1913 xop->lhc = inum; 1914 hammer2_xop_start(&xop->head, &hammer2_lookup_desc); 1915 error = hammer2_xop_collect(&xop->head, 0); 1916 1917 if (error == 0) 1918 ip = hammer2_inode_get(pmp, &xop->head, -1, -1); 1919 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1920 1921 if (ip) { 1922 *vpp = hammer2_igetv(ip, &error); 1923 hammer2_inode_unlock(ip); 1924 } else { 1925 *vpp = NULL; 1926 error = ENOENT; 1927 } 1928 return (error); 1929 } 1930 1931 static 1932 int 1933 hammer2_vfs_root(struct mount *mp, struct vnode **vpp) 1934 { 1935 hammer2_pfs_t *pmp; 1936 struct vnode *vp; 1937 int error; 1938 1939 pmp = MPTOPMP(mp); 1940 if (pmp->iroot == NULL) { 1941 kprintf("hammer2 (%s): no root inode\n", 1942 mp->mnt_stat.f_mntfromname); 1943 *vpp = NULL; 1944 return EINVAL; 1945 } 1946 1947 error = 0; 1948 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED); 1949 1950 while (pmp->inode_tid == 0) { 1951 hammer2_xop_ipcluster_t *xop; 1952 const hammer2_inode_meta_t *meta; 1953 1954 xop = hammer2_xop_alloc(pmp->iroot, HAMMER2_XOP_MODIFYING); 1955 hammer2_xop_start(&xop->head, &hammer2_ipcluster_desc); 1956 error = hammer2_xop_collect(&xop->head, 0); 1957 1958 if (error == 0) { 1959 meta = &hammer2_xop_gdata(&xop->head)->ipdata.meta; 1960 pmp->iroot->meta = *meta; 1961 pmp->inode_tid = meta->pfs_inum + 1; 1962 hammer2_xop_pdata(&xop->head); 1963 /* meta invalid */ 1964 1965 if (pmp->inode_tid < HAMMER2_INODE_START) 1966 pmp->inode_tid = HAMMER2_INODE_START; 1967 pmp->modify_tid = 1968 xop->head.cluster.focus->bref.modify_tid + 1; 1969 #if 0 1970 kprintf("PFS: Starting inode %jd\n", 1971 (intmax_t)pmp->inode_tid); 1972 kprintf("PMP focus good set nextino=%ld mod=%016jx\n", 1973 pmp->inode_tid, pmp->modify_tid); 1974 #endif 1975 wakeup(&pmp->iroot); 1976 1977 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1978 1979 /* 1980 * Prime the mount info. 1981 */ 1982 hammer2_vfs_statfs(mp, &mp->mnt_stat, NULL); 1983 break; 1984 } 1985 1986 /* 1987 * Loop, try again 1988 */ 1989 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1990 hammer2_inode_unlock(pmp->iroot); 1991 error = tsleep(&pmp->iroot, PCATCH, "h2root", hz); 1992 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED); 1993 if (error == EINTR) 1994 break; 1995 } 1996 1997 if (error) { 1998 hammer2_inode_unlock(pmp->iroot); 1999 *vpp = NULL; 2000 } else { 2001 vp = hammer2_igetv(pmp->iroot, &error); 2002 hammer2_inode_unlock(pmp->iroot); 2003 *vpp = vp; 2004 } 2005 2006 return (error); 2007 } 2008 2009 /* 2010 * Filesystem status 2011 * 2012 * XXX incorporate ipdata->meta.inode_quota and data_quota 2013 */ 2014 static 2015 int 2016 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 2017 { 2018 hammer2_pfs_t *pmp; 2019 hammer2_dev_t *hmp; 2020 hammer2_blockref_t bref; 2021 struct statfs tmp; 2022 int i; 2023 2024 /* 2025 * NOTE: iroot might not have validated the cluster yet. 2026 */ 2027 pmp = MPTOPMP(mp); 2028 2029 bzero(&tmp, sizeof(tmp)); 2030 2031 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 2032 hmp = pmp->pfs_hmps[i]; 2033 if (hmp == NULL) 2034 continue; 2035 if (pmp->iroot->cluster.array[i].chain) 2036 bref = pmp->iroot->cluster.array[i].chain->bref; 2037 else 2038 bzero(&bref, sizeof(bref)); 2039 2040 tmp.f_files = bref.embed.stats.inode_count; 2041 tmp.f_ffree = 0; 2042 tmp.f_blocks = hmp->voldata.allocator_size / 2043 mp->mnt_vstat.f_bsize; 2044 tmp.f_bfree = hmp->voldata.allocator_free / 2045 mp->mnt_vstat.f_bsize; 2046 tmp.f_bavail = tmp.f_bfree; 2047 2048 if (cred && cred->cr_uid != 0) { 2049 uint64_t adj; 2050 2051 /* 5% */ 2052 adj = hmp->free_reserved / mp->mnt_vstat.f_bsize; 2053 tmp.f_blocks -= adj; 2054 tmp.f_bfree -= adj; 2055 tmp.f_bavail -= adj; 2056 } 2057 2058 mp->mnt_stat.f_blocks = tmp.f_blocks; 2059 mp->mnt_stat.f_bfree = tmp.f_bfree; 2060 mp->mnt_stat.f_bavail = tmp.f_bavail; 2061 mp->mnt_stat.f_files = tmp.f_files; 2062 mp->mnt_stat.f_ffree = tmp.f_ffree; 2063 2064 *sbp = mp->mnt_stat; 2065 } 2066 return (0); 2067 } 2068 2069 static 2070 int 2071 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred) 2072 { 2073 hammer2_pfs_t *pmp; 2074 hammer2_dev_t *hmp; 2075 hammer2_blockref_t bref; 2076 struct statvfs tmp; 2077 int i; 2078 2079 /* 2080 * NOTE: iroot might not have validated the cluster yet. 2081 */ 2082 pmp = MPTOPMP(mp); 2083 bzero(&tmp, sizeof(tmp)); 2084 2085 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 2086 hmp = pmp->pfs_hmps[i]; 2087 if (hmp == NULL) 2088 continue; 2089 if (pmp->iroot->cluster.array[i].chain) 2090 bref = pmp->iroot->cluster.array[i].chain->bref; 2091 else 2092 bzero(&bref, sizeof(bref)); 2093 2094 tmp.f_files = bref.embed.stats.inode_count; 2095 tmp.f_ffree = 0; 2096 tmp.f_blocks = hmp->voldata.allocator_size / 2097 mp->mnt_vstat.f_bsize; 2098 tmp.f_bfree = hmp->voldata.allocator_free / 2099 mp->mnt_vstat.f_bsize; 2100 tmp.f_bavail = tmp.f_bfree; 2101 2102 if (cred && cred->cr_uid != 0) { 2103 uint64_t adj; 2104 2105 /* 5% */ 2106 adj = hmp->free_reserved / mp->mnt_vstat.f_bsize; 2107 tmp.f_blocks -= adj; 2108 tmp.f_bfree -= adj; 2109 tmp.f_bavail -= adj; 2110 } 2111 2112 mp->mnt_vstat.f_blocks = tmp.f_blocks; 2113 mp->mnt_vstat.f_bfree = tmp.f_bfree; 2114 mp->mnt_vstat.f_bavail = tmp.f_bavail; 2115 mp->mnt_vstat.f_files = tmp.f_files; 2116 mp->mnt_vstat.f_ffree = tmp.f_ffree; 2117 2118 *sbp = mp->mnt_vstat; 2119 } 2120 return (0); 2121 } 2122 2123 /* 2124 * Mount-time recovery (RW mounts) 2125 * 2126 * Updates to the free block table are allowed to lag flushes by one 2127 * transaction. In case of a crash, then on a fresh mount we must do an 2128 * incremental scan of the last committed transaction id and make sure that 2129 * all related blocks have been marked allocated. 2130 * 2131 * The super-root topology and each PFS has its own transaction id domain, 2132 * so we must track PFS boundary transitions. 2133 */ 2134 struct hammer2_recovery_elm { 2135 TAILQ_ENTRY(hammer2_recovery_elm) entry; 2136 hammer2_chain_t *chain; 2137 hammer2_tid_t sync_tid; 2138 }; 2139 2140 TAILQ_HEAD(hammer2_recovery_list, hammer2_recovery_elm); 2141 2142 struct hammer2_recovery_info { 2143 struct hammer2_recovery_list list; 2144 hammer2_tid_t mtid; 2145 int depth; 2146 }; 2147 2148 static int hammer2_recovery_scan(hammer2_dev_t *hmp, 2149 hammer2_chain_t *parent, 2150 struct hammer2_recovery_info *info, 2151 hammer2_tid_t sync_tid); 2152 2153 #define HAMMER2_RECOVERY_MAXDEPTH 10 2154 2155 static 2156 int 2157 hammer2_recovery(hammer2_dev_t *hmp) 2158 { 2159 struct hammer2_recovery_info info; 2160 struct hammer2_recovery_elm *elm; 2161 hammer2_chain_t *parent; 2162 hammer2_tid_t sync_tid; 2163 hammer2_tid_t mirror_tid; 2164 int error; 2165 2166 hammer2_trans_init(hmp->spmp, 0); 2167 2168 sync_tid = hmp->voldata.freemap_tid; 2169 mirror_tid = hmp->voldata.mirror_tid; 2170 2171 kprintf("hammer2 mount \"%s\": ", hmp->devrepname); 2172 if (sync_tid >= mirror_tid) { 2173 kprintf(" no recovery needed\n"); 2174 } else { 2175 kprintf(" freemap recovery %016jx-%016jx\n", 2176 sync_tid + 1, mirror_tid); 2177 } 2178 2179 TAILQ_INIT(&info.list); 2180 info.depth = 0; 2181 parent = hammer2_chain_lookup_init(&hmp->vchain, 0); 2182 error = hammer2_recovery_scan(hmp, parent, &info, sync_tid); 2183 hammer2_chain_lookup_done(parent); 2184 2185 while ((elm = TAILQ_FIRST(&info.list)) != NULL) { 2186 TAILQ_REMOVE(&info.list, elm, entry); 2187 parent = elm->chain; 2188 sync_tid = elm->sync_tid; 2189 kfree(elm, M_HAMMER2); 2190 2191 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2192 error |= hammer2_recovery_scan(hmp, parent, &info, 2193 hmp->voldata.freemap_tid); 2194 hammer2_chain_unlock(parent); 2195 hammer2_chain_drop(parent); /* drop elm->chain ref */ 2196 } 2197 2198 hammer2_trans_done(hmp->spmp, 0); 2199 2200 return error; 2201 } 2202 2203 static 2204 int 2205 hammer2_recovery_scan(hammer2_dev_t *hmp, hammer2_chain_t *parent, 2206 struct hammer2_recovery_info *info, 2207 hammer2_tid_t sync_tid) 2208 { 2209 const hammer2_inode_data_t *ripdata; 2210 hammer2_chain_t *chain; 2211 hammer2_blockref_t bref; 2212 int tmp_error; 2213 int rup_error; 2214 int error; 2215 int first; 2216 2217 /* 2218 * Adjust freemap to ensure that the block(s) are marked allocated. 2219 */ 2220 if (parent->bref.type != HAMMER2_BREF_TYPE_VOLUME) { 2221 hammer2_freemap_adjust(hmp, &parent->bref, 2222 HAMMER2_FREEMAP_DORECOVER); 2223 } 2224 2225 /* 2226 * Check type for recursive scan 2227 */ 2228 switch(parent->bref.type) { 2229 case HAMMER2_BREF_TYPE_VOLUME: 2230 /* data already instantiated */ 2231 break; 2232 case HAMMER2_BREF_TYPE_INODE: 2233 /* 2234 * Must instantiate data for DIRECTDATA test and also 2235 * for recursion. 2236 */ 2237 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2238 ripdata = &hammer2_chain_rdata(parent)->ipdata; 2239 if (ripdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) { 2240 /* not applicable to recovery scan */ 2241 hammer2_chain_unlock(parent); 2242 return 0; 2243 } 2244 hammer2_chain_unlock(parent); 2245 break; 2246 case HAMMER2_BREF_TYPE_INDIRECT: 2247 /* 2248 * Must instantiate data for recursion 2249 */ 2250 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2251 hammer2_chain_unlock(parent); 2252 break; 2253 case HAMMER2_BREF_TYPE_DIRENT: 2254 case HAMMER2_BREF_TYPE_DATA: 2255 case HAMMER2_BREF_TYPE_FREEMAP: 2256 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 2257 case HAMMER2_BREF_TYPE_FREEMAP_LEAF: 2258 /* not applicable to recovery scan */ 2259 return 0; 2260 break; 2261 default: 2262 return HAMMER2_ERROR_BADBREF; 2263 } 2264 2265 /* 2266 * Defer operation if depth limit reached or if we are crossing a 2267 * PFS boundary. 2268 */ 2269 if (info->depth >= HAMMER2_RECOVERY_MAXDEPTH) { 2270 struct hammer2_recovery_elm *elm; 2271 2272 elm = kmalloc(sizeof(*elm), M_HAMMER2, M_ZERO | M_WAITOK); 2273 elm->chain = parent; 2274 elm->sync_tid = sync_tid; 2275 hammer2_chain_ref(parent); 2276 TAILQ_INSERT_TAIL(&info->list, elm, entry); 2277 /* unlocked by caller */ 2278 2279 return(0); 2280 } 2281 2282 2283 /* 2284 * Recursive scan of the last flushed transaction only. We are 2285 * doing this without pmp assignments so don't leave the chains 2286 * hanging around after we are done with them. 2287 * 2288 * error Cumulative error this level only 2289 * rup_error Cumulative error for recursion 2290 * tmp_error Specific non-cumulative recursion error 2291 */ 2292 chain = NULL; 2293 first = 1; 2294 rup_error = 0; 2295 error = 0; 2296 2297 for (;;) { 2298 error |= hammer2_chain_scan(parent, &chain, &bref, 2299 &first, 2300 HAMMER2_LOOKUP_NODATA); 2301 2302 /* 2303 * Problem during scan or EOF 2304 */ 2305 if (error) 2306 break; 2307 2308 /* 2309 * If this is a leaf 2310 */ 2311 if (chain == NULL) { 2312 if (bref.mirror_tid > sync_tid) { 2313 hammer2_freemap_adjust(hmp, &bref, 2314 HAMMER2_FREEMAP_DORECOVER); 2315 } 2316 continue; 2317 } 2318 2319 /* 2320 * This may or may not be a recursive node. 2321 */ 2322 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 2323 if (bref.mirror_tid > sync_tid) { 2324 ++info->depth; 2325 tmp_error = hammer2_recovery_scan(hmp, chain, 2326 info, sync_tid); 2327 --info->depth; 2328 } else { 2329 tmp_error = 0; 2330 } 2331 2332 /* 2333 * Flush the recovery at the PFS boundary to stage it for 2334 * the final flush of the super-root topology. 2335 */ 2336 if (tmp_error == 0 && 2337 (bref.flags & HAMMER2_BREF_FLAG_PFSROOT) && 2338 (chain->flags & HAMMER2_CHAIN_ONFLUSH)) { 2339 hammer2_flush(chain, HAMMER2_FLUSH_TOP | 2340 HAMMER2_FLUSH_ALL); 2341 } 2342 rup_error |= tmp_error; 2343 } 2344 return ((error | rup_error) & ~HAMMER2_ERROR_EOF); 2345 } 2346 2347 /* 2348 * This fixes up an error introduced in earlier H2 implementations where 2349 * moving a PFS inode into an indirect block wound up causing the 2350 * HAMMER2_BREF_FLAG_PFSROOT flag in the bref to get cleared. 2351 */ 2352 static 2353 int 2354 hammer2_fixup_pfses(hammer2_dev_t *hmp) 2355 { 2356 const hammer2_inode_data_t *ripdata; 2357 hammer2_chain_t *parent; 2358 hammer2_chain_t *chain; 2359 hammer2_key_t key_next; 2360 hammer2_pfs_t *spmp; 2361 int error; 2362 2363 error = 0; 2364 2365 /* 2366 * Lookup mount point under the media-localized super-root. 2367 * 2368 * cluster->pmp will incorrectly point to spmp and must be fixed 2369 * up later on. 2370 */ 2371 spmp = hmp->spmp; 2372 hammer2_inode_lock(spmp->iroot, 0); 2373 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS); 2374 chain = hammer2_chain_lookup(&parent, &key_next, 2375 HAMMER2_KEY_MIN, HAMMER2_KEY_MAX, 2376 &error, 0); 2377 while (chain) { 2378 if (chain->bref.type != HAMMER2_BREF_TYPE_INODE) 2379 continue; 2380 if (chain->error) { 2381 kprintf("I/O error scanning PFS labels\n"); 2382 error |= chain->error; 2383 } else if ((chain->bref.flags & 2384 HAMMER2_BREF_FLAG_PFSROOT) == 0) { 2385 int error2; 2386 2387 ripdata = &chain->data->ipdata; 2388 hammer2_trans_init(hmp->spmp, 0); 2389 error2 = hammer2_chain_modify(chain, 2390 chain->bref.modify_tid, 2391 0, 0); 2392 if (error2 == 0) { 2393 kprintf("hammer2: Correct mis-flagged PFS %s\n", 2394 ripdata->filename); 2395 chain->bref.flags |= HAMMER2_BREF_FLAG_PFSROOT; 2396 } else { 2397 error |= error2; 2398 } 2399 hammer2_flush(chain, HAMMER2_FLUSH_TOP | 2400 HAMMER2_FLUSH_ALL); 2401 hammer2_trans_done(hmp->spmp, 0); 2402 } 2403 chain = hammer2_chain_next(&parent, chain, &key_next, 2404 key_next, HAMMER2_KEY_MAX, 2405 &error, 0); 2406 } 2407 if (parent) { 2408 hammer2_chain_unlock(parent); 2409 hammer2_chain_drop(parent); 2410 } 2411 hammer2_inode_unlock(spmp->iroot); 2412 2413 return error; 2414 } 2415 2416 /* 2417 * Sync a mount point; this is called periodically on a per-mount basis from 2418 * the filesystem syncer, and whenever a user issues a sync. 2419 */ 2420 int 2421 hammer2_vfs_sync(struct mount *mp, int waitfor) 2422 { 2423 int error; 2424 2425 error = hammer2_vfs_sync_pmp(MPTOPMP(mp), waitfor); 2426 2427 return error; 2428 } 2429 2430 /* 2431 * Because frontend operations lock vnodes before we get a chance to 2432 * lock the related inode, we can't just acquire a vnode lock without 2433 * risking a deadlock. The frontend may be holding a vnode lock while 2434 * also blocked on our SYNCQ flag while trying to get the inode lock. 2435 * 2436 * To deal with this situation we can check the vnode lock situation 2437 * after locking the inode and perform a work-around. 2438 */ 2439 int 2440 hammer2_vfs_sync_pmp(hammer2_pfs_t *pmp, int waitfor) 2441 { 2442 struct mount *mp; 2443 /*hammer2_xop_flush_t *xop;*/ 2444 /*struct hammer2_sync_info info;*/ 2445 hammer2_inode_t *ip; 2446 hammer2_depend_t *depend; 2447 hammer2_depend_t *depend_next; 2448 struct vnode *vp; 2449 uint32_t pass2; 2450 int error; 2451 int dorestart; 2452 2453 mp = pmp->mp; 2454 2455 /* 2456 * Move all inodes on sideq to syncq. This will clear sideq. 2457 * This should represent all flushable inodes. These inodes 2458 * will already have refs due to being on syncq or sideq. We 2459 * must do this all at once with the spinlock held to ensure that 2460 * all inode dependencies are part of the same flush. 2461 * 2462 * We should be able to do this asynchronously from frontend 2463 * operations because we will be locking the inodes later on 2464 * to actually flush them, and that will partition any frontend 2465 * op using the same inode. Either it has already locked the 2466 * inode and we will block, or it has not yet locked the inode 2467 * and it will block until we are finished flushing that inode. 2468 * 2469 * When restarting, only move the inodes flagged as PASS2 from 2470 * SIDEQ to SYNCQ. PASS2 propagation by inode_lock4() and 2471 * inode_depend() are atomic with the spin-lock. 2472 */ 2473 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH); 2474 #ifdef HAMMER2_DEBUG_SYNC 2475 kprintf("FILESYSTEM SYNC BOUNDARY\n"); 2476 #endif 2477 dorestart = 0; 2478 2479 /* 2480 * Move inodes from depq to syncq, releasing the related 2481 * depend structures. 2482 */ 2483 restart: 2484 #ifdef HAMMER2_DEBUG_SYNC 2485 kprintf("FILESYSTEM SYNC RESTART (%d)\n", dorestart); 2486 #endif 2487 hammer2_trans_setflags(pmp, 0/*HAMMER2_TRANS_COPYQ*/); 2488 hammer2_trans_clearflags(pmp, HAMMER2_TRANS_RESCAN); 2489 2490 /* 2491 * Move inodes from depq to syncq. When restarting, only depq's 2492 * marked pass2 are moved. 2493 */ 2494 hammer2_spin_ex(&pmp->list_spin); 2495 depend_next = TAILQ_FIRST(&pmp->depq); 2496 2497 while ((depend = depend_next) != NULL) { 2498 depend_next = TAILQ_NEXT(depend, entry); 2499 if (dorestart && depend->pass2 == 0) 2500 continue; 2501 TAILQ_FOREACH(ip, &depend->sideq, entry) { 2502 KKASSERT(ip->flags & HAMMER2_INODE_SIDEQ); 2503 atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ); 2504 atomic_clear_int(&ip->flags, HAMMER2_INODE_SIDEQ); 2505 ip->depend = NULL; 2506 } 2507 TAILQ_CONCAT(&pmp->syncq, &depend->sideq, entry); 2508 pmp->sideq_count -= depend->count; 2509 depend->count = 0; 2510 depend->pass2 = 0; 2511 TAILQ_REMOVE(&pmp->depq, depend, entry); 2512 } 2513 2514 hammer2_spin_unex(&pmp->list_spin); 2515 hammer2_trans_clearflags(pmp, /*HAMMER2_TRANS_COPYQ |*/ 2516 HAMMER2_TRANS_WAITING); 2517 dorestart = 0; 2518 2519 /* 2520 * sideq_count may have dropped enough to allow us to unstall 2521 * the frontend. 2522 */ 2523 hammer2_pfs_memory_inc(pmp); 2524 hammer2_pfs_memory_wakeup(pmp); 2525 2526 /* 2527 * Now run through all inodes on syncq. 2528 * 2529 * Flush transactions only interlock with other flush transactions. 2530 * Any conflicting frontend operations will block on the inode, but 2531 * may hold a vnode lock while doing so. 2532 */ 2533 hammer2_spin_ex(&pmp->list_spin); 2534 while ((ip = TAILQ_FIRST(&pmp->syncq)) != NULL) { 2535 /* 2536 * Remove the inode from the SYNCQ, transfer the syncq ref 2537 * to us. We must clear SYNCQ to allow any potential 2538 * front-end deadlock to proceed. We must set PASS2 so 2539 * the dependency code knows what to do. 2540 */ 2541 pass2 = ip->flags; 2542 cpu_ccfence(); 2543 if (atomic_cmpset_int(&ip->flags, 2544 pass2, 2545 (pass2 & ~(HAMMER2_INODE_SYNCQ | 2546 HAMMER2_INODE_SYNCQ_WAKEUP)) | 2547 HAMMER2_INODE_SYNCQ_PASS2) == 0) { 2548 continue; 2549 } 2550 TAILQ_REMOVE(&pmp->syncq, ip, entry); 2551 hammer2_spin_unex(&pmp->list_spin); 2552 if (pass2 & HAMMER2_INODE_SYNCQ_WAKEUP) 2553 wakeup(&ip->flags); 2554 2555 /* 2556 * Relock the inode, and we inherit a ref from the above. 2557 * We will check for a race after we acquire the vnode. 2558 */ 2559 hammer2_mtx_ex(&ip->lock); 2560 2561 /* 2562 * We need the vp in order to vfsync() dirty buffers, so if 2563 * one isn't attached we can skip it. 2564 * 2565 * Ordering the inode lock and then the vnode lock has the 2566 * potential to deadlock. If we had left SYNCQ set that could 2567 * also deadlock us against the frontend even if we don't hold 2568 * any locks, but the latter is not a problem now since we 2569 * cleared it. igetv will temporarily release the inode lock 2570 * in a safe manner to work-around the deadlock. 2571 * 2572 * Unfortunately it is still possible to deadlock when the 2573 * frontend obtains multiple inode locks, because all the 2574 * related vnodes are already locked (nor can the vnode locks 2575 * be released and reacquired without messing up RECLAIM and 2576 * INACTIVE sequencing). 2577 * 2578 * The solution for now is to move the vp back onto SIDEQ 2579 * and set dorestart, which will restart the flush after we 2580 * exhaust the current SYNCQ. Note that additional 2581 * dependencies may build up, so we definitely need to move 2582 * the whole SIDEQ back to SYNCQ when we restart. 2583 */ 2584 vp = ip->vp; 2585 if (vp) { 2586 if (vget(vp, LK_EXCLUSIVE|LK_NOWAIT)) { 2587 /* 2588 * Failed to get the vnode, requeue the inode 2589 * (PASS2 is already set so it will be found 2590 * again on the restart). 2591 * 2592 * Then unlock, possibly sleep, and retry 2593 * later. We sleep if PASS2 was *previously* 2594 * set, before we set it again above. 2595 */ 2596 vp = NULL; 2597 dorestart = 1; 2598 #ifdef HAMMER2_DEBUG_SYNC 2599 kprintf("inum %ld (sync delayed by vnode)\n", 2600 (long)ip->meta.inum); 2601 #endif 2602 hammer2_inode_delayed_sideq(ip); 2603 2604 hammer2_mtx_unlock(&ip->lock); 2605 hammer2_inode_drop(ip); 2606 2607 if (pass2 & HAMMER2_INODE_SYNCQ_PASS2) { 2608 tsleep(&dorestart, 0, "h2syndel", 2); 2609 } 2610 hammer2_spin_ex(&pmp->list_spin); 2611 continue; 2612 } 2613 } else { 2614 vp = NULL; 2615 } 2616 2617 /* 2618 * If the inode wound up on a SIDEQ again it will already be 2619 * prepped for another PASS2. In this situation if we flush 2620 * it now we will just wind up flushing it again in the same 2621 * syncer run, so we might as well not flush it now. 2622 */ 2623 if (ip->flags & HAMMER2_INODE_SIDEQ) { 2624 hammer2_mtx_unlock(&ip->lock); 2625 hammer2_inode_drop(ip); 2626 if (vp) 2627 vput(vp); 2628 dorestart = 1; 2629 hammer2_spin_ex(&pmp->list_spin); 2630 continue; 2631 } 2632 2633 /* 2634 * Ok we have the inode exclusively locked and if vp is 2635 * not NULL that will also be exclusively locked. Do the 2636 * meat of the flush. 2637 * 2638 * vp token needed for v_rbdirty_tree check / vclrisdirty 2639 * sequencing. Though we hold the vnode exclusively so 2640 * we shouldn't need to hold the token also in this case. 2641 */ 2642 if (vp) { 2643 vfsync(vp, MNT_WAIT, 1, NULL, NULL); 2644 bio_track_wait(&vp->v_track_write, 0, 0); /* XXX */ 2645 } 2646 2647 /* 2648 * If the inode has not yet been inserted into the tree 2649 * we must do so. Then sync and flush it. The flush should 2650 * update the parent. 2651 */ 2652 if (ip->flags & HAMMER2_INODE_DELETING) { 2653 #ifdef HAMMER2_DEBUG_SYNC 2654 kprintf("inum %ld destroy\n", (long)ip->meta.inum); 2655 #endif 2656 hammer2_inode_chain_des(ip); 2657 atomic_add_long(&hammer2_iod_inode_deletes, 1); 2658 } else if (ip->flags & HAMMER2_INODE_CREATING) { 2659 #ifdef HAMMER2_DEBUG_SYNC 2660 kprintf("inum %ld insert\n", (long)ip->meta.inum); 2661 #endif 2662 hammer2_inode_chain_ins(ip); 2663 atomic_add_long(&hammer2_iod_inode_creates, 1); 2664 } 2665 #ifdef HAMMER2_DEBUG_SYNC 2666 kprintf("inum %ld chain-sync\n", (long)ip->meta.inum); 2667 #endif 2668 2669 /* 2670 * Because I kinda messed up the design and index the inodes 2671 * under the root inode, along side the directory entries, 2672 * we can't flush the inode index under the iroot until the 2673 * end. If we do it now we might miss effects created by 2674 * other inodes on the SYNCQ. 2675 * 2676 * Do a normal (non-FSSYNC) flush instead, which allows the 2677 * vnode code to work the same. We don't want to force iroot 2678 * back onto the SIDEQ, and we also don't want the flush code 2679 * to update pfs_iroot_blocksets until the final flush later. 2680 * 2681 * XXX at the moment this will likely result in a double-flush 2682 * of the iroot chain. 2683 */ 2684 hammer2_inode_chain_sync(ip); 2685 if (ip == pmp->iroot) { 2686 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP); 2687 } else { 2688 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP | 2689 HAMMER2_XOP_FSSYNC); 2690 } 2691 if (vp) { 2692 lwkt_gettoken(&vp->v_token); 2693 if ((ip->flags & (HAMMER2_INODE_MODIFIED | 2694 HAMMER2_INODE_RESIZED | 2695 HAMMER2_INODE_DIRTYDATA)) == 0 && 2696 RB_EMPTY(&vp->v_rbdirty_tree) && 2697 !bio_track_active(&vp->v_track_write)) { 2698 vclrisdirty(vp); 2699 } else { 2700 hammer2_inode_delayed_sideq(ip); 2701 } 2702 lwkt_reltoken(&vp->v_token); 2703 vput(vp); 2704 vp = NULL; /* safety */ 2705 } 2706 atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_PASS2); 2707 hammer2_inode_unlock(ip); /* unlock+drop */ 2708 /* ip pointer invalid */ 2709 2710 /* 2711 * If the inode got dirted after we dropped our locks, 2712 * it will have already been moved back to the SIDEQ. 2713 */ 2714 hammer2_spin_ex(&pmp->list_spin); 2715 } 2716 hammer2_spin_unex(&pmp->list_spin); 2717 if (dorestart || (pmp->trans.flags & HAMMER2_TRANS_RESCAN)) { 2718 #ifdef HAMMER2_DEBUG_SYNC 2719 kprintf("FILESYSTEM SYNC STAGE 1 RESTART\n"); 2720 /*tsleep(&dorestart, 0, "h2STG1-R", hz*20);*/ 2721 #endif 2722 dorestart = 1; 2723 goto restart; 2724 } 2725 #ifdef HAMMER2_DEBUG_SYNC 2726 kprintf("FILESYSTEM SYNC STAGE 2 BEGIN\n"); 2727 /*tsleep(&dorestart, 0, "h2STG2", hz*20);*/ 2728 #endif 2729 2730 /* 2731 * We have to flush the PFS root last, even if it does not appear to 2732 * be dirty, because all the inodes in the PFS are indexed under it. 2733 * The normal flushing of iroot above would only occur if directory 2734 * entries under the root were changed. 2735 * 2736 * Specifying VOLHDR will cause an additionl flush of hmp->spmp 2737 * for the media making up the cluster. 2738 */ 2739 if ((ip = pmp->iroot) != NULL) { 2740 hammer2_inode_ref(ip); 2741 hammer2_mtx_ex(&ip->lock); 2742 hammer2_inode_chain_sync(ip); 2743 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP | 2744 HAMMER2_XOP_FSSYNC | 2745 HAMMER2_XOP_VOLHDR); 2746 hammer2_inode_unlock(ip); /* unlock+drop */ 2747 } 2748 #ifdef HAMMER2_DEBUG_SYNC 2749 kprintf("FILESYSTEM SYNC STAGE 2 DONE\n"); 2750 #endif 2751 2752 /* 2753 * device bioq sync 2754 */ 2755 hammer2_bioq_sync(pmp); 2756 2757 #if 0 2758 info.pass = 1; 2759 info.waitfor = MNT_WAIT; 2760 vsyncscan(mp, flags, hammer2_sync_scan2, &info); 2761 2762 info.pass = 2; 2763 info.waitfor = MNT_WAIT; 2764 vsyncscan(mp, flags, hammer2_sync_scan2, &info); 2765 #endif 2766 #if 0 2767 /* 2768 * Generally speaking we now want to flush the media topology from 2769 * the iroot through to the inodes. The flush stops at any inode 2770 * boundary, which allows the frontend to continue running concurrent 2771 * modifying operations on inodes (including kernel flushes of 2772 * buffers) without interfering with the main sync. 2773 * 2774 * Use the XOP interface to concurrently flush all nodes to 2775 * synchronize the PFSROOT subtopology to the media. A standard 2776 * end-of-scan ENOENT error indicates cluster sufficiency. 2777 * 2778 * Note that this flush will not be visible on crash recovery until 2779 * we flush the super-root topology in the next loop. 2780 * 2781 * XXX For now wait for all flushes to complete. 2782 */ 2783 if (mp && (ip = pmp->iroot) != NULL) { 2784 /* 2785 * If unmounting try to flush everything including any 2786 * sub-trees under inodes, just in case there is dangling 2787 * modified data, as a safety. Otherwise just flush up to 2788 * the inodes in this stage. 2789 */ 2790 kprintf("MP & IROOT\n"); 2791 #ifdef HAMMER2_DEBUG_SYNC 2792 kprintf("FILESYSTEM SYNC STAGE 3 IROOT BEGIN\n"); 2793 #endif 2794 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 2795 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | 2796 HAMMER2_XOP_VOLHDR | 2797 HAMMER2_XOP_FSSYNC | 2798 HAMMER2_XOP_INODE_STOP); 2799 } else { 2800 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | 2801 HAMMER2_XOP_INODE_STOP | 2802 HAMMER2_XOP_VOLHDR | 2803 HAMMER2_XOP_FSSYNC | 2804 HAMMER2_XOP_INODE_STOP); 2805 } 2806 hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc); 2807 error = hammer2_xop_collect(&xop->head, 2808 HAMMER2_XOP_COLLECT_WAITALL); 2809 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2810 #ifdef HAMMER2_DEBUG_SYNC 2811 kprintf("FILESYSTEM SYNC STAGE 3 IROOT END\n"); 2812 #endif 2813 if (error == HAMMER2_ERROR_ENOENT) 2814 error = 0; 2815 else 2816 error = hammer2_error_to_errno(error); 2817 } else { 2818 error = 0; 2819 } 2820 #endif 2821 error = 0; /* XXX */ 2822 hammer2_trans_done(pmp, HAMMER2_TRANS_ISFLUSH); 2823 2824 return (error); 2825 } 2826 2827 static 2828 int 2829 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp) 2830 { 2831 hammer2_inode_t *ip; 2832 2833 KKASSERT(MAXFIDSZ >= 16); 2834 ip = VTOI(vp); 2835 fhp->fid_len = offsetof(struct fid, fid_data[16]); 2836 fhp->fid_ext = 0; 2837 ((hammer2_tid_t *)fhp->fid_data)[0] = ip->meta.inum; 2838 ((hammer2_tid_t *)fhp->fid_data)[1] = 0; 2839 2840 return 0; 2841 } 2842 2843 static 2844 int 2845 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 2846 struct fid *fhp, struct vnode **vpp) 2847 { 2848 hammer2_pfs_t *pmp; 2849 hammer2_tid_t inum; 2850 int error; 2851 2852 pmp = MPTOPMP(mp); 2853 inum = ((hammer2_tid_t *)fhp->fid_data)[0] & HAMMER2_DIRHASH_USERMSK; 2854 if (vpp) { 2855 if (inum == 1) 2856 error = hammer2_vfs_root(mp, vpp); 2857 else 2858 error = hammer2_vfs_vget(mp, NULL, inum, vpp); 2859 } else { 2860 error = 0; 2861 } 2862 if (error) 2863 kprintf("fhtovp: %016jx -> %p, %d\n", inum, *vpp, error); 2864 return error; 2865 } 2866 2867 static 2868 int 2869 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 2870 int *exflagsp, struct ucred **credanonp) 2871 { 2872 hammer2_pfs_t *pmp; 2873 struct netcred *np; 2874 int error; 2875 2876 pmp = MPTOPMP(mp); 2877 np = vfs_export_lookup(mp, &pmp->export, nam); 2878 if (np) { 2879 *exflagsp = np->netc_exflags; 2880 *credanonp = &np->netc_anon; 2881 error = 0; 2882 } else { 2883 error = EACCES; 2884 } 2885 return error; 2886 } 2887 2888 /* 2889 * Support code for hammer2_vfs_mount(). Read, verify, and install the volume 2890 * header into the HMP 2891 * 2892 * XXX read four volhdrs and use the one with the highest TID whos CRC 2893 * matches. 2894 * 2895 * XXX check iCRCs. 2896 * 2897 * XXX For filesystems w/ less than 4 volhdrs, make sure to not write to 2898 * nonexistant locations. 2899 * 2900 * XXX Record selected volhdr and ring updates to each of 4 volhdrs 2901 */ 2902 static 2903 int 2904 hammer2_install_volume_header(hammer2_dev_t *hmp) 2905 { 2906 hammer2_volume_data_t *vd; 2907 struct buf *bp; 2908 hammer2_crc32_t crc0, crc, bcrc0, bcrc; 2909 int error_reported; 2910 int error; 2911 int valid; 2912 int i; 2913 2914 error_reported = 0; 2915 error = 0; 2916 valid = 0; 2917 bp = NULL; 2918 2919 /* 2920 * There are up to 4 copies of the volume header (syncs iterate 2921 * between them so there is no single master). We don't trust the 2922 * volu_size field so we don't know precisely how large the filesystem 2923 * is, so depend on the OS to return an error if we go beyond the 2924 * block device's EOF. 2925 */ 2926 for (i = 0; i < HAMMER2_NUM_VOLHDRS; i++) { 2927 error = bread(hmp->devvp, i * HAMMER2_ZONE_BYTES64, 2928 HAMMER2_VOLUME_BYTES, &bp); 2929 if (error) { 2930 brelse(bp); 2931 bp = NULL; 2932 continue; 2933 } 2934 2935 vd = (struct hammer2_volume_data *) bp->b_data; 2936 if ((vd->magic != HAMMER2_VOLUME_ID_HBO) && 2937 (vd->magic != HAMMER2_VOLUME_ID_ABO)) { 2938 brelse(bp); 2939 bp = NULL; 2940 continue; 2941 } 2942 2943 if (vd->magic == HAMMER2_VOLUME_ID_ABO) { 2944 /* XXX: Reversed-endianness filesystem */ 2945 kprintf("hammer2: reverse-endian filesystem detected"); 2946 brelse(bp); 2947 bp = NULL; 2948 continue; 2949 } 2950 2951 crc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT0]; 2952 crc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC0_OFF, 2953 HAMMER2_VOLUME_ICRC0_SIZE); 2954 bcrc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT1]; 2955 bcrc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC1_OFF, 2956 HAMMER2_VOLUME_ICRC1_SIZE); 2957 if ((crc0 != crc) || (bcrc0 != bcrc)) { 2958 kprintf("hammer2 volume header crc " 2959 "mismatch copy #%d %08x/%08x\n", 2960 i, crc0, crc); 2961 error_reported = 1; 2962 brelse(bp); 2963 bp = NULL; 2964 continue; 2965 } 2966 if (valid == 0 || hmp->voldata.mirror_tid < vd->mirror_tid) { 2967 valid = 1; 2968 hmp->voldata = *vd; 2969 hmp->volhdrno = i; 2970 } 2971 brelse(bp); 2972 bp = NULL; 2973 } 2974 if (valid) { 2975 hmp->volsync = hmp->voldata; 2976 hmp->free_reserved = hmp->voldata.allocator_size / 20; 2977 error = 0; 2978 if (error_reported || bootverbose || 1) { /* 1/DEBUG */ 2979 kprintf("hammer2: using volume header #%d\n", 2980 hmp->volhdrno); 2981 } 2982 } else { 2983 error = EINVAL; 2984 kprintf("hammer2: no valid volume headers found!\n"); 2985 } 2986 return (error); 2987 } 2988 2989 /* 2990 * This handles hysteresis on regular file flushes. Because the BIOs are 2991 * routed to a thread it is possible for an excessive number to build up 2992 * and cause long front-end stalls long before the runningbuffspace limit 2993 * is hit, so we implement hammer2_flush_pipe to control the 2994 * hysteresis. 2995 * 2996 * This is a particular problem when compression is used. 2997 */ 2998 void 2999 hammer2_lwinprog_ref(hammer2_pfs_t *pmp) 3000 { 3001 atomic_add_int(&pmp->count_lwinprog, 1); 3002 } 3003 3004 void 3005 hammer2_lwinprog_drop(hammer2_pfs_t *pmp) 3006 { 3007 int lwinprog; 3008 3009 lwinprog = atomic_fetchadd_int(&pmp->count_lwinprog, -1); 3010 if ((lwinprog & HAMMER2_LWINPROG_WAITING) && 3011 (lwinprog & HAMMER2_LWINPROG_MASK) <= hammer2_flush_pipe * 2 / 3) { 3012 atomic_clear_int(&pmp->count_lwinprog, 3013 HAMMER2_LWINPROG_WAITING); 3014 wakeup(&pmp->count_lwinprog); 3015 } 3016 if ((lwinprog & HAMMER2_LWINPROG_WAITING0) && 3017 (lwinprog & HAMMER2_LWINPROG_MASK) <= 0) { 3018 atomic_clear_int(&pmp->count_lwinprog, 3019 HAMMER2_LWINPROG_WAITING0); 3020 wakeup(&pmp->count_lwinprog); 3021 } 3022 } 3023 3024 void 3025 hammer2_lwinprog_wait(hammer2_pfs_t *pmp, int flush_pipe) 3026 { 3027 int lwinprog; 3028 int lwflag = (flush_pipe) ? HAMMER2_LWINPROG_WAITING : 3029 HAMMER2_LWINPROG_WAITING0; 3030 3031 for (;;) { 3032 lwinprog = pmp->count_lwinprog; 3033 cpu_ccfence(); 3034 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe) 3035 break; 3036 tsleep_interlock(&pmp->count_lwinprog, 0); 3037 atomic_set_int(&pmp->count_lwinprog, lwflag); 3038 lwinprog = pmp->count_lwinprog; 3039 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe) 3040 break; 3041 tsleep(&pmp->count_lwinprog, PINTERLOCKED, "h2wpipe", hz); 3042 } 3043 } 3044 3045 /* 3046 * It is possible for an excessive number of dirty chains or dirty inodes 3047 * to build up. When this occurs we start an asynchronous filesystem sync. 3048 * If the level continues to build up, we stall, waiting for it to drop, 3049 * with some hysteresis. 3050 * 3051 * We limit the stall to two seconds per call. 3052 * 3053 * This relies on the kernel calling hammer2_vfs_modifying() prior to 3054 * obtaining any vnode locks before making a modifying VOP call. 3055 */ 3056 static void 3057 hammer2_vfs_modifying(struct mount *mp) 3058 { 3059 hammer2_pfs_memory_wait(MPTOPMP(mp)); 3060 } 3061 3062 /* 3063 * Initiate an asynchronous filesystem sync and, with hysteresis, 3064 * stall if the internal data structure count becomes too bloated. 3065 */ 3066 void 3067 hammer2_pfs_memory_wait(hammer2_pfs_t *pmp) 3068 { 3069 uint32_t waiting; 3070 int loops; 3071 3072 if (pmp == NULL || pmp->mp == NULL) 3073 return; 3074 3075 for (loops = 0; loops < 2; ++loops) { 3076 waiting = pmp->inmem_dirty_chains & HAMMER2_DIRTYCHAIN_MASK; 3077 cpu_ccfence(); 3078 3079 /* 3080 * Start the syncer running at 1/2 the limit 3081 */ 3082 if (waiting > hammer2_limit_dirty_chains / 2 || 3083 pmp->sideq_count > hammer2_limit_dirty_inodes / 2) { 3084 trigger_syncer(pmp->mp); 3085 } 3086 3087 /* 3088 * Stall at the limit waiting for the counts to drop. 3089 * This code will typically be woken up once the count 3090 * drops below 3/4 the limit, or in one second. 3091 */ 3092 if (waiting < hammer2_limit_dirty_chains && 3093 pmp->sideq_count < hammer2_limit_dirty_inodes) { 3094 break; 3095 } 3096 tsleep_interlock(&pmp->inmem_dirty_chains, 0); 3097 atomic_set_int(&pmp->inmem_dirty_chains, 3098 HAMMER2_DIRTYCHAIN_WAITING); 3099 if (waiting < hammer2_limit_dirty_chains && 3100 pmp->sideq_count < hammer2_limit_dirty_inodes) { 3101 break; 3102 } 3103 trigger_syncer(pmp->mp); 3104 tsleep(&pmp->inmem_dirty_chains, PINTERLOCKED, "h2memw", hz); 3105 #if 0 3106 limit = pmp->mp->mnt_nvnodelistsize / 10; 3107 if (limit < hammer2_limit_dirty_chains) 3108 limit = hammer2_limit_dirty_chains; 3109 if (limit < 1000) 3110 limit = 1000; 3111 #endif 3112 } 3113 } 3114 3115 void 3116 hammer2_pfs_memory_inc(hammer2_pfs_t *pmp) 3117 { 3118 if (pmp) { 3119 atomic_add_int(&pmp->inmem_dirty_chains, 1); 3120 } 3121 } 3122 3123 void 3124 hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp) 3125 { 3126 uint32_t waiting; 3127 3128 if (pmp) { 3129 waiting = atomic_fetchadd_int(&pmp->inmem_dirty_chains, -1); 3130 /* don't need --waiting to test flag */ 3131 3132 if ((waiting & HAMMER2_DIRTYCHAIN_WAITING) && 3133 (pmp->inmem_dirty_chains & HAMMER2_DIRTYCHAIN_MASK) <= 3134 hammer2_limit_dirty_chains * 2 / 3 && 3135 pmp->sideq_count <= hammer2_limit_dirty_inodes * 2 / 3) { 3136 atomic_clear_int(&pmp->inmem_dirty_chains, 3137 HAMMER2_DIRTYCHAIN_WAITING); 3138 wakeup(&pmp->inmem_dirty_chains); 3139 } 3140 } 3141 } 3142 3143 /* 3144 * Returns 0 if the filesystem has tons of free space 3145 * Returns 1 if the filesystem has less than 10% remaining 3146 * Returns 2 if the filesystem has less than 2%/5% (user/root) remaining. 3147 */ 3148 int 3149 hammer2_vfs_enospace(hammer2_inode_t *ip, off_t bytes, struct ucred *cred) 3150 { 3151 hammer2_pfs_t *pmp; 3152 hammer2_dev_t *hmp; 3153 hammer2_off_t free_reserved; 3154 hammer2_off_t free_nominal; 3155 int i; 3156 3157 pmp = ip->pmp; 3158 3159 if (pmp->free_ticks == 0 || pmp->free_ticks != ticks) { 3160 free_reserved = HAMMER2_SEGSIZE; 3161 free_nominal = 0x7FFFFFFFFFFFFFFFLLU; 3162 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 3163 hmp = pmp->pfs_hmps[i]; 3164 if (hmp == NULL) 3165 continue; 3166 if (pmp->pfs_types[i] != HAMMER2_PFSTYPE_MASTER && 3167 pmp->pfs_types[i] != HAMMER2_PFSTYPE_SOFT_MASTER) 3168 continue; 3169 3170 if (free_nominal > hmp->voldata.allocator_free) 3171 free_nominal = hmp->voldata.allocator_free; 3172 if (free_reserved < hmp->free_reserved) 3173 free_reserved = hmp->free_reserved; 3174 } 3175 3176 /* 3177 * SMP races ok 3178 */ 3179 pmp->free_reserved = free_reserved; 3180 pmp->free_nominal = free_nominal; 3181 pmp->free_ticks = ticks; 3182 } else { 3183 free_reserved = pmp->free_reserved; 3184 free_nominal = pmp->free_nominal; 3185 } 3186 if (cred && cred->cr_uid != 0) { 3187 if ((int64_t)(free_nominal - bytes) < 3188 (int64_t)free_reserved) { 3189 return 2; 3190 } 3191 } else { 3192 if ((int64_t)(free_nominal - bytes) < 3193 (int64_t)free_reserved / 2) { 3194 return 2; 3195 } 3196 } 3197 if ((int64_t)(free_nominal - bytes) < (int64_t)free_reserved * 2) 3198 return 1; 3199 return 0; 3200 } 3201 3202 /* 3203 * Debugging 3204 */ 3205 void 3206 hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp, char pfx, 3207 u_int flags) 3208 { 3209 hammer2_chain_t *scan; 3210 hammer2_chain_t *parent; 3211 3212 --*countp; 3213 if (*countp == 0) { 3214 kprintf("%*.*s...\n", tab, tab, ""); 3215 return; 3216 } 3217 if (*countp < 0) 3218 return; 3219 kprintf("%*.*s%c-chain %p.%d %016jx/%d mir=%016jx\n", 3220 tab, tab, "", pfx, 3221 chain, chain->bref.type, 3222 chain->bref.key, chain->bref.keybits, 3223 chain->bref.mirror_tid); 3224 3225 kprintf("%*.*s [%08x] (%s) refs=%d", 3226 tab, tab, "", 3227 chain->flags, 3228 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE && 3229 chain->data) ? (char *)chain->data->ipdata.filename : "?"), 3230 chain->refs); 3231 3232 parent = chain->parent; 3233 if (parent) 3234 kprintf("\n%*.*s p=%p [pflags %08x prefs %d", 3235 tab, tab, "", 3236 parent, parent->flags, parent->refs); 3237 if (RB_EMPTY(&chain->core.rbtree)) { 3238 kprintf("\n"); 3239 } else { 3240 kprintf(" {\n"); 3241 RB_FOREACH(scan, hammer2_chain_tree, &chain->core.rbtree) { 3242 if ((scan->flags & flags) || flags == (u_int)-1) { 3243 hammer2_dump_chain(scan, tab + 4, countp, 'a', 3244 flags); 3245 } 3246 } 3247 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && chain->data) 3248 kprintf("%*.*s}(%s)\n", tab, tab, "", 3249 chain->data->ipdata.filename); 3250 else 3251 kprintf("%*.*s}\n", tab, tab, ""); 3252 } 3253 } 3254