1 /* 2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/nlookup.h> 39 #include <sys/vnode.h> 40 #include <sys/mount.h> 41 #include <sys/fcntl.h> 42 #include <sys/vfsops.h> 43 #include <sys/sysctl.h> 44 #include <sys/socket.h> 45 #include <sys/objcache.h> 46 #include <sys/proc.h> 47 #include <sys/lock.h> 48 #include <sys/file.h> 49 50 #include "hammer2.h" 51 52 TAILQ_HEAD(hammer2_mntlist, hammer2_dev); 53 static struct hammer2_mntlist hammer2_mntlist; 54 55 struct hammer2_pfslist hammer2_pfslist; 56 struct hammer2_pfslist hammer2_spmplist; 57 struct lock hammer2_mntlk; 58 59 int hammer2_supported_version = HAMMER2_VOL_VERSION_DEFAULT; 60 int hammer2_debug; 61 int hammer2_aux_flags; 62 int hammer2_xop_nthreads; 63 int hammer2_xop_sgroups; 64 int hammer2_xop_xgroups; 65 int hammer2_xop_xbase; 66 int hammer2_xop_mod; 67 long hammer2_debug_inode; 68 int hammer2_cluster_meta_read = 1; /* physical read-ahead */ 69 int hammer2_cluster_data_read = 4; /* physical read-ahead */ 70 int hammer2_cluster_write = 0; /* physical write clustering */ 71 int hammer2_dedup_enable = 1; 72 int hammer2_always_compress = 0; /* always try to compress */ 73 int hammer2_flush_pipe = 100; 74 int hammer2_dio_count; 75 int hammer2_dio_limit = 256; 76 int hammer2_bulkfree_tps = 5000; 77 int hammer2_spread_workers; 78 int hammer2_limit_saved_depth; 79 long hammer2_chain_allocs; 80 long hammer2_limit_saved_chains; 81 long hammer2_limit_dirty_chains; 82 long hammer2_limit_dirty_inodes; 83 long hammer2_count_modified_chains; 84 long hammer2_iod_file_read; 85 long hammer2_iod_meta_read; 86 long hammer2_iod_indr_read; 87 long hammer2_iod_fmap_read; 88 long hammer2_iod_volu_read; 89 long hammer2_iod_file_write; 90 long hammer2_iod_file_wembed; 91 long hammer2_iod_file_wzero; 92 long hammer2_iod_file_wdedup; 93 long hammer2_iod_meta_write; 94 long hammer2_iod_indr_write; 95 long hammer2_iod_fmap_write; 96 long hammer2_iod_volu_write; 97 static long hammer2_iod_inode_creates; 98 static long hammer2_iod_inode_deletes; 99 100 long hammer2_process_icrc32; 101 long hammer2_process_xxhash64; 102 103 MALLOC_DECLARE(M_HAMMER2_CBUFFER); 104 MALLOC_DEFINE(M_HAMMER2_CBUFFER, "HAMMER2-compbuffer", 105 "Buffer used for compression."); 106 107 MALLOC_DECLARE(M_HAMMER2_DEBUFFER); 108 MALLOC_DEFINE(M_HAMMER2_DEBUFFER, "HAMMER2-decompbuffer", 109 "Buffer used for decompression."); 110 111 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem"); 112 113 SYSCTL_INT(_vfs_hammer2, OID_AUTO, supported_version, CTLFLAG_RD, 114 &hammer2_supported_version, 0, ""); 115 SYSCTL_INT(_vfs_hammer2, OID_AUTO, aux_flags, CTLFLAG_RW, 116 &hammer2_aux_flags, 0, ""); 117 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW, 118 &hammer2_debug, 0, ""); 119 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, debug_inode, CTLFLAG_RW, 120 &hammer2_debug_inode, 0, ""); 121 SYSCTL_INT(_vfs_hammer2, OID_AUTO, spread_workers, CTLFLAG_RW, 122 &hammer2_spread_workers, 0, ""); 123 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_meta_read, CTLFLAG_RW, 124 &hammer2_cluster_meta_read, 0, ""); 125 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_data_read, CTLFLAG_RW, 126 &hammer2_cluster_data_read, 0, ""); 127 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_write, CTLFLAG_RW, 128 &hammer2_cluster_write, 0, ""); 129 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dedup_enable, CTLFLAG_RW, 130 &hammer2_dedup_enable, 0, ""); 131 SYSCTL_INT(_vfs_hammer2, OID_AUTO, always_compress, CTLFLAG_RW, 132 &hammer2_always_compress, 0, ""); 133 SYSCTL_INT(_vfs_hammer2, OID_AUTO, flush_pipe, CTLFLAG_RW, 134 &hammer2_flush_pipe, 0, ""); 135 SYSCTL_INT(_vfs_hammer2, OID_AUTO, bulkfree_tps, CTLFLAG_RW, 136 &hammer2_bulkfree_tps, 0, ""); 137 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, chain_allocs, CTLFLAG_RD, 138 &hammer2_chain_allocs, 0, ""); 139 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_saved_chains, CTLFLAG_RW, 140 &hammer2_limit_saved_chains, 0, ""); 141 SYSCTL_INT(_vfs_hammer2, OID_AUTO, limit_saved_depth, CTLFLAG_RW, 142 &hammer2_limit_saved_depth, 0, ""); 143 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_chains, CTLFLAG_RW, 144 &hammer2_limit_dirty_chains, 0, ""); 145 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_inodes, CTLFLAG_RW, 146 &hammer2_limit_dirty_inodes, 0, ""); 147 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, count_modified_chains, CTLFLAG_RD, 148 &hammer2_count_modified_chains, 0, ""); 149 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_count, CTLFLAG_RD, 150 &hammer2_dio_count, 0, ""); 151 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_limit, CTLFLAG_RW, 152 &hammer2_dio_limit, 0, ""); 153 154 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RD, 155 &hammer2_iod_file_read, 0, ""); 156 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RD, 157 &hammer2_iod_meta_read, 0, ""); 158 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RD, 159 &hammer2_iod_indr_read, 0, ""); 160 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_read, CTLFLAG_RD, 161 &hammer2_iod_fmap_read, 0, ""); 162 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_read, CTLFLAG_RD, 163 &hammer2_iod_volu_read, 0, ""); 164 165 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RD, 166 &hammer2_iod_file_write, 0, ""); 167 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wembed, CTLFLAG_RD, 168 &hammer2_iod_file_wembed, 0, ""); 169 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wzero, CTLFLAG_RD, 170 &hammer2_iod_file_wzero, 0, ""); 171 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wdedup, CTLFLAG_RD, 172 &hammer2_iod_file_wdedup, 0, ""); 173 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RD, 174 &hammer2_iod_meta_write, 0, ""); 175 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RD, 176 &hammer2_iod_indr_write, 0, ""); 177 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_write, CTLFLAG_RD, 178 &hammer2_iod_fmap_write, 0, ""); 179 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RD, 180 &hammer2_iod_volu_write, 0, ""); 181 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_inode_creates, CTLFLAG_RD, 182 &hammer2_iod_inode_creates, 0, ""); 183 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_inode_deletes, CTLFLAG_RD, 184 &hammer2_iod_inode_deletes, 0, ""); 185 186 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, process_icrc32, CTLFLAG_RD, 187 &hammer2_process_icrc32, 0, ""); 188 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, process_xxhash64, CTLFLAG_RD, 189 &hammer2_process_xxhash64, 0, ""); 190 191 static int hammer2_vfs_init(struct vfsconf *conf); 192 static int hammer2_vfs_uninit(struct vfsconf *vfsp); 193 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, 194 struct ucred *cred); 195 static int hammer2_remount(hammer2_dev_t *, struct mount *, char *, 196 struct ucred *); 197 static int hammer2_recovery(hammer2_dev_t *hmp); 198 static int hammer2_vfs_unmount(struct mount *mp, int mntflags); 199 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp); 200 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, 201 struct ucred *cred); 202 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, 203 struct ucred *cred); 204 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 205 struct fid *fhp, struct vnode **vpp); 206 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp); 207 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 208 int *exflagsp, struct ucred **credanonp); 209 static int hammer2_vfs_modifying(struct mount *mp); 210 211 static void hammer2_update_pmps(hammer2_dev_t *hmp); 212 213 static void hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp); 214 static void hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, 215 hammer2_dev_t *hmp); 216 static int hammer2_fixup_pfses(hammer2_dev_t *hmp); 217 218 /* 219 * HAMMER2 vfs operations. 220 */ 221 static struct vfsops hammer2_vfsops = { 222 .vfs_flags = 0, 223 .vfs_init = hammer2_vfs_init, 224 .vfs_uninit = hammer2_vfs_uninit, 225 .vfs_sync = hammer2_vfs_sync, 226 .vfs_mount = hammer2_vfs_mount, 227 .vfs_unmount = hammer2_vfs_unmount, 228 .vfs_root = hammer2_vfs_root, 229 .vfs_statfs = hammer2_vfs_statfs, 230 .vfs_statvfs = hammer2_vfs_statvfs, 231 .vfs_vget = hammer2_vfs_vget, 232 .vfs_vptofh = hammer2_vfs_vptofh, 233 .vfs_fhtovp = hammer2_vfs_fhtovp, 234 .vfs_checkexp = hammer2_vfs_checkexp, 235 .vfs_modifying = hammer2_vfs_modifying 236 }; 237 238 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", ""); 239 240 VFS_SET(hammer2_vfsops, hammer2, VFCF_MPSAFE); 241 MODULE_VERSION(hammer2, 1); 242 243 static 244 int 245 hammer2_vfs_init(struct vfsconf *conf) 246 { 247 static struct objcache_malloc_args margs_read; 248 static struct objcache_malloc_args margs_write; 249 static struct objcache_malloc_args margs_vop; 250 251 int error; 252 int mod; 253 254 error = 0; 255 kmalloc_raise_limit(M_HAMMER2, 0); /* unlimited */ 256 257 /* 258 * hammer2_xop_nthreads must be a multiple of ncpus, 259 * minimum 2 * ncpus. 260 */ 261 mod = ncpus; 262 hammer2_xop_mod = mod; 263 hammer2_xop_nthreads = mod * 2; 264 while (hammer2_xop_nthreads / mod < HAMMER2_XOPGROUPS_MIN || 265 hammer2_xop_nthreads < HAMMER2_XOPTHREADS_MIN) 266 { 267 hammer2_xop_nthreads += mod; 268 } 269 hammer2_xop_sgroups = hammer2_xop_nthreads / mod / 2; 270 hammer2_xop_xgroups = hammer2_xop_nthreads / mod - hammer2_xop_sgroups; 271 hammer2_xop_xbase = hammer2_xop_sgroups * mod; 272 273 /* 274 * A large DIO cache is needed to retain dedup enablement masks. 275 * The bulkfree code clears related masks as part of the disk block 276 * recycling algorithm, preventing it from being used for a later 277 * dedup. 278 * 279 * NOTE: A large buffer cache can actually interfere with dedup 280 * operation because we dedup based on media physical buffers 281 * and not logical buffers. Try to make the DIO case large 282 * enough to avoid this problem, but also cap it. 283 */ 284 hammer2_dio_limit = nbuf * 2; 285 if (hammer2_dio_limit > 100000) 286 hammer2_dio_limit = 100000; 287 288 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref)) 289 error = EINVAL; 290 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data)) 291 error = EINVAL; 292 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data)) 293 error = EINVAL; 294 295 if (error) 296 kprintf("HAMMER2 structure size mismatch; cannot continue.\n"); 297 298 margs_read.objsize = 65536; 299 margs_read.mtype = M_HAMMER2_DEBUFFER; 300 301 margs_write.objsize = 32768; 302 margs_write.mtype = M_HAMMER2_CBUFFER; 303 304 margs_vop.objsize = sizeof(hammer2_xop_t); 305 margs_vop.mtype = M_HAMMER2; 306 307 /* 308 * Note thaht for the XOPS cache we want backing store allocations 309 * to use M_ZERO. This is not allowed in objcache_get() (to avoid 310 * confusion), so use the backing store function that does it. This 311 * means that initial XOPS objects are zerod but REUSED objects are 312 * not. So we are responsible for cleaning the object up sufficiently 313 * for our needs before objcache_put()ing it back (typically just the 314 * FIFO indices). 315 */ 316 cache_buffer_read = objcache_create(margs_read.mtype->ks_shortdesc, 317 0, 1, NULL, NULL, NULL, 318 objcache_malloc_alloc, 319 objcache_malloc_free, 320 &margs_read); 321 cache_buffer_write = objcache_create(margs_write.mtype->ks_shortdesc, 322 0, 1, NULL, NULL, NULL, 323 objcache_malloc_alloc, 324 objcache_malloc_free, 325 &margs_write); 326 cache_xops = objcache_create(margs_vop.mtype->ks_shortdesc, 327 0, 1, NULL, NULL, NULL, 328 objcache_malloc_alloc_zero, 329 objcache_malloc_free, 330 &margs_vop); 331 332 333 lockinit(&hammer2_mntlk, "mntlk", 0, 0); 334 TAILQ_INIT(&hammer2_mntlist); 335 TAILQ_INIT(&hammer2_pfslist); 336 TAILQ_INIT(&hammer2_spmplist); 337 338 hammer2_limit_dirty_chains = maxvnodes / 10; 339 if (hammer2_limit_dirty_chains > HAMMER2_LIMIT_DIRTY_CHAINS) 340 hammer2_limit_dirty_chains = HAMMER2_LIMIT_DIRTY_CHAINS; 341 if (hammer2_limit_dirty_chains < 1000) 342 hammer2_limit_dirty_chains = 1000; 343 344 hammer2_limit_dirty_inodes = maxvnodes / 25; 345 if (hammer2_limit_dirty_inodes < 100) 346 hammer2_limit_dirty_inodes = 100; 347 if (hammer2_limit_dirty_inodes > HAMMER2_LIMIT_DIRTY_INODES) 348 hammer2_limit_dirty_inodes = HAMMER2_LIMIT_DIRTY_INODES; 349 350 hammer2_limit_saved_chains = hammer2_limit_dirty_chains * 5; 351 352 return (error); 353 } 354 355 static 356 int 357 hammer2_vfs_uninit(struct vfsconf *vfsp __unused) 358 { 359 objcache_destroy(cache_buffer_read); 360 objcache_destroy(cache_buffer_write); 361 objcache_destroy(cache_xops); 362 return 0; 363 } 364 365 /* 366 * Core PFS allocator. Used to allocate or reference the pmp structure 367 * for PFS cluster mounts and the spmp structure for media (hmp) structures. 368 * The pmp can be passed in or loaded by this function using the chain and 369 * inode data. 370 * 371 * pmp->modify_tid tracks new modify_tid transaction ids for front-end 372 * transactions. Note that synchronization does not use this field. 373 * (typically frontend operations and synchronization cannot run on the 374 * same PFS node at the same time). 375 * 376 * XXX check locking 377 */ 378 hammer2_pfs_t * 379 hammer2_pfsalloc(hammer2_chain_t *chain, 380 const hammer2_inode_data_t *ripdata, 381 hammer2_dev_t *force_local) 382 { 383 hammer2_pfs_t *pmp; 384 hammer2_inode_t *iroot; 385 int count; 386 int i; 387 int j; 388 389 pmp = NULL; 390 391 /* 392 * Locate or create the PFS based on the cluster id. If ripdata 393 * is NULL this is a spmp which is unique and is always allocated. 394 * 395 * If the device is mounted in local mode all PFSs are considered 396 * independent and not part of any cluster (for debugging only). 397 */ 398 if (ripdata) { 399 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 400 if (force_local != pmp->force_local) 401 continue; 402 if (force_local == NULL && 403 bcmp(&pmp->pfs_clid, &ripdata->meta.pfs_clid, 404 sizeof(pmp->pfs_clid)) == 0) { 405 break; 406 } else if (force_local && pmp->pfs_names[0] && 407 strcmp(pmp->pfs_names[0], ripdata->filename) == 0) { 408 break; 409 } 410 } 411 } 412 413 if (pmp == NULL) { 414 pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO); 415 pmp->force_local = force_local; 416 hammer2_trans_manage_init(pmp); 417 kmalloc_create_obj(&pmp->minode, "HAMMER2-inodes", 418 sizeof(struct hammer2_inode)); 419 lockinit(&pmp->lock, "pfslk", 0, 0); 420 hammer2_spin_init(&pmp->inum_spin, "hm2pfsalloc_inum"); 421 hammer2_spin_init(&pmp->xop_spin, "h2xop"); 422 hammer2_spin_init(&pmp->lru_spin, "h2lru"); 423 RB_INIT(&pmp->inum_tree); 424 TAILQ_INIT(&pmp->syncq); 425 TAILQ_INIT(&pmp->depq); 426 TAILQ_INIT(&pmp->lru_list); 427 hammer2_spin_init(&pmp->list_spin, "h2pfsalloc_list"); 428 429 /* 430 * Save the last media transaction id for the flusher. Set 431 * initial 432 */ 433 if (ripdata) { 434 pmp->pfs_clid = ripdata->meta.pfs_clid; 435 TAILQ_INSERT_TAIL(&hammer2_pfslist, pmp, mntentry); 436 } else { 437 pmp->flags |= HAMMER2_PMPF_SPMP; 438 TAILQ_INSERT_TAIL(&hammer2_spmplist, pmp, mntentry); 439 } 440 441 /* 442 * The synchronization thread may start too early, make 443 * sure it stays frozen until we are ready to let it go. 444 * XXX 445 */ 446 /* 447 pmp->primary_thr.flags = HAMMER2_THREAD_FROZEN | 448 HAMMER2_THREAD_REMASTER; 449 */ 450 } 451 452 /* 453 * Create the PFS's root inode and any missing XOP helper threads. 454 */ 455 if ((iroot = pmp->iroot) == NULL) { 456 iroot = hammer2_inode_get(pmp, NULL, 1, -1); 457 if (ripdata) 458 iroot->meta = ripdata->meta; 459 pmp->iroot = iroot; 460 hammer2_inode_ref(iroot); 461 hammer2_inode_unlock(iroot); 462 } 463 464 /* 465 * Stop here if no chain is passed in. 466 */ 467 if (chain == NULL) 468 goto done; 469 470 /* 471 * When a chain is passed in we must add it to the PFS's root 472 * inode, update pmp->pfs_types[], and update the syncronization 473 * threads. 474 * 475 * When forcing local mode, mark the PFS as a MASTER regardless. 476 * 477 * At the moment empty spots can develop due to removals or failures. 478 * Ultimately we want to re-fill these spots but doing so might 479 * confused running code. XXX 480 */ 481 hammer2_inode_ref(iroot); 482 hammer2_mtx_ex(&iroot->lock); 483 j = iroot->cluster.nchains; 484 485 if (j == HAMMER2_MAXCLUSTER) { 486 kprintf("hammer2_pfsalloc: cluster full!\n"); 487 /* XXX fatal error? */ 488 } else { 489 KKASSERT(chain->pmp == NULL); 490 chain->pmp = pmp; 491 hammer2_chain_ref(chain); 492 iroot->cluster.array[j].chain = chain; 493 if (force_local) 494 pmp->pfs_types[j] = HAMMER2_PFSTYPE_MASTER; 495 else 496 pmp->pfs_types[j] = ripdata->meta.pfs_type; 497 pmp->pfs_names[j] = kstrdup(ripdata->filename, M_HAMMER2); 498 pmp->pfs_hmps[j] = chain->hmp; 499 hammer2_spin_ex(&pmp->inum_spin); 500 pmp->pfs_iroot_blocksets[j] = chain->data->ipdata.u.blockset; 501 hammer2_spin_unex(&pmp->inum_spin); 502 503 /* 504 * If the PFS is already mounted we must account 505 * for the mount_count here. 506 */ 507 if (pmp->mp) 508 ++chain->hmp->mount_count; 509 510 /* 511 * May have to fixup dirty chain tracking. Previous 512 * pmp was NULL so nothing to undo. 513 */ 514 if (chain->flags & HAMMER2_CHAIN_MODIFIED) 515 hammer2_pfs_memory_inc(pmp); 516 ++j; 517 } 518 iroot->cluster.nchains = j; 519 520 /* 521 * Update nmasters from any PFS inode which is part of the cluster. 522 * It is possible that this will result in a value which is too 523 * high. MASTER PFSs are authoritative for pfs_nmasters and will 524 * override this value later on. 525 * 526 * (This informs us of masters that might not currently be 527 * discoverable by this mount). 528 */ 529 if (ripdata && pmp->pfs_nmasters < ripdata->meta.pfs_nmasters) { 530 pmp->pfs_nmasters = ripdata->meta.pfs_nmasters; 531 } 532 533 /* 534 * Count visible masters. Masters are usually added with 535 * ripdata->meta.pfs_nmasters set to 1. This detects when there 536 * are more (XXX and must update the master inodes). 537 */ 538 count = 0; 539 for (i = 0; i < iroot->cluster.nchains; ++i) { 540 if (pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) 541 ++count; 542 } 543 if (pmp->pfs_nmasters < count) 544 pmp->pfs_nmasters = count; 545 546 /* 547 * Create missing synchronization and support threads. 548 * 549 * Single-node masters (including snapshots) have nothing to 550 * synchronize and do not require this thread. 551 * 552 * Multi-node masters or any number of soft masters, slaves, copy, 553 * or other PFS types need the thread. 554 * 555 * Each thread is responsible for its particular cluster index. 556 * We use independent threads so stalls or mismatches related to 557 * any given target do not affect other targets. 558 */ 559 for (i = 0; i < iroot->cluster.nchains; ++i) { 560 /* 561 * Single-node masters (including snapshots) have nothing 562 * to synchronize and will make direct xops support calls, 563 * thus they do not require this thread. 564 * 565 * Note that there can be thousands of snapshots. We do not 566 * want to create thousands of threads. 567 */ 568 if (pmp->pfs_nmasters <= 1 && 569 pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) { 570 continue; 571 } 572 573 /* 574 * Sync support thread 575 */ 576 if (pmp->sync_thrs[i].td == NULL) { 577 hammer2_thr_create(&pmp->sync_thrs[i], pmp, NULL, 578 "h2nod", i, -1, 579 hammer2_primary_sync_thread); 580 } 581 } 582 583 /* 584 * Create missing Xop threads 585 * 586 * NOTE: We create helper threads for all mounted PFSs or any 587 * PFSs with 2+ nodes (so the sync thread can update them, 588 * even if not mounted). 589 */ 590 if (pmp->mp || iroot->cluster.nchains >= 2) 591 hammer2_xop_helper_create(pmp); 592 593 hammer2_mtx_unlock(&iroot->lock); 594 hammer2_inode_drop(iroot); 595 done: 596 return pmp; 597 } 598 599 /* 600 * Deallocate an element of a probed PFS. If destroying and this is a 601 * MASTER, adjust nmasters. 602 * 603 * This function does not physically destroy the PFS element in its device 604 * under the super-root (see hammer2_ioctl_pfs_delete()). 605 */ 606 void 607 hammer2_pfsdealloc(hammer2_pfs_t *pmp, int clindex, int destroying) 608 { 609 hammer2_inode_t *iroot; 610 hammer2_chain_t *chain; 611 int j; 612 613 /* 614 * Cleanup our reference on iroot. iroot is (should) not be needed 615 * by the flush code. 616 */ 617 iroot = pmp->iroot; 618 if (iroot) { 619 /* 620 * Stop synchronizing 621 * 622 * XXX flush after acquiring the iroot lock. 623 * XXX clean out the cluster index from all inode structures. 624 */ 625 hammer2_thr_delete(&pmp->sync_thrs[clindex]); 626 627 /* 628 * Remove the cluster index from the group. If destroying 629 * the PFS and this is a master, adjust pfs_nmasters. 630 */ 631 hammer2_mtx_ex(&iroot->lock); 632 chain = iroot->cluster.array[clindex].chain; 633 iroot->cluster.array[clindex].chain = NULL; 634 635 switch(pmp->pfs_types[clindex]) { 636 case HAMMER2_PFSTYPE_MASTER: 637 if (destroying && pmp->pfs_nmasters > 0) 638 --pmp->pfs_nmasters; 639 /* XXX adjust ripdata->meta.pfs_nmasters */ 640 break; 641 default: 642 break; 643 } 644 pmp->pfs_types[clindex] = HAMMER2_PFSTYPE_NONE; 645 646 hammer2_mtx_unlock(&iroot->lock); 647 648 /* 649 * Release the chain. 650 */ 651 if (chain) { 652 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 653 hammer2_chain_drop(chain); 654 } 655 656 /* 657 * Terminate all XOP threads for the cluster index. 658 */ 659 if (pmp->xop_groups) { 660 for (j = 0; j < hammer2_xop_nthreads; ++j) { 661 hammer2_thr_delete( 662 &pmp->xop_groups[j].thrs[clindex]); 663 } 664 } 665 } 666 } 667 668 /* 669 * Destroy a PFS, typically only occurs after the last mount on a device 670 * has gone away. 671 */ 672 static void 673 hammer2_pfsfree(hammer2_pfs_t *pmp) 674 { 675 hammer2_inode_t *iroot; 676 hammer2_chain_t *chain; 677 int chains_still_present = 0; 678 int i; 679 int j; 680 681 /* 682 * Cleanup our reference on iroot. iroot is (should) not be needed 683 * by the flush code. 684 */ 685 if (pmp->flags & HAMMER2_PMPF_SPMP) 686 TAILQ_REMOVE(&hammer2_spmplist, pmp, mntentry); 687 else 688 TAILQ_REMOVE(&hammer2_pfslist, pmp, mntentry); 689 690 /* 691 * Cleanup chains remaining on LRU list. 692 */ 693 hammer2_spin_ex(&pmp->lru_spin); 694 while ((chain = TAILQ_FIRST(&pmp->lru_list)) != NULL) { 695 KKASSERT(chain->flags & HAMMER2_CHAIN_ONLRU); 696 atomic_add_int(&pmp->lru_count, -1); 697 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONLRU); 698 TAILQ_REMOVE(&pmp->lru_list, chain, lru_node); 699 hammer2_chain_ref(chain); 700 hammer2_spin_unex(&pmp->lru_spin); 701 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 702 hammer2_chain_drop(chain); 703 hammer2_spin_ex(&pmp->lru_spin); 704 } 705 hammer2_spin_unex(&pmp->lru_spin); 706 707 /* 708 * Clean up iroot 709 */ 710 iroot = pmp->iroot; 711 if (iroot) { 712 for (i = 0; i < iroot->cluster.nchains; ++i) { 713 hammer2_thr_delete(&pmp->sync_thrs[i]); 714 if (pmp->xop_groups) { 715 for (j = 0; j < hammer2_xop_nthreads; ++j) 716 hammer2_thr_delete( 717 &pmp->xop_groups[j].thrs[i]); 718 } 719 chain = iroot->cluster.array[i].chain; 720 if (chain && !RB_EMPTY(&chain->core.rbtree)) { 721 kprintf("hammer2: Warning pmp %p still " 722 "has active chains\n", pmp); 723 chains_still_present = 1; 724 } 725 } 726 KASSERT(iroot->refs == 1, 727 ("PMP->IROOT %p REFS WRONG %d", iroot, iroot->refs)); 728 729 /* ref for iroot */ 730 hammer2_inode_drop(iroot); 731 pmp->iroot = NULL; 732 } 733 734 /* 735 * Free remaining pmp resources 736 */ 737 if (chains_still_present) { 738 kprintf("hammer2: cannot free pmp %p, still in use\n", pmp); 739 } else { 740 kmalloc_destroy_obj(&pmp->minode); 741 kfree(pmp, M_HAMMER2); 742 } 743 } 744 745 /* 746 * Remove all references to hmp from the pfs list. Any PFS which becomes 747 * empty is terminated and freed. 748 * 749 * XXX inefficient. 750 */ 751 static void 752 hammer2_pfsfree_scan(hammer2_dev_t *hmp, int which) 753 { 754 hammer2_pfs_t *pmp; 755 hammer2_inode_t *iroot; 756 hammer2_chain_t *rchain; 757 int i; 758 int j; 759 struct hammer2_pfslist *wlist; 760 761 if (which == 0) 762 wlist = &hammer2_pfslist; 763 else 764 wlist = &hammer2_spmplist; 765 again: 766 TAILQ_FOREACH(pmp, wlist, mntentry) { 767 if ((iroot = pmp->iroot) == NULL) 768 continue; 769 770 /* 771 * Determine if this PFS is affected. If it is we must 772 * freeze all management threads and lock its iroot. 773 * 774 * Freezing a management thread forces it idle, operations 775 * in-progress will be aborted and it will have to start 776 * over again when unfrozen, or exit if told to exit. 777 */ 778 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 779 if (pmp->pfs_hmps[i] == hmp) 780 break; 781 } 782 if (i == HAMMER2_MAXCLUSTER) 783 continue; 784 785 hammer2_vfs_sync_pmp(pmp, MNT_WAIT); 786 787 /* 788 * Make sure all synchronization threads are locked 789 * down. 790 */ 791 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 792 if (pmp->pfs_hmps[i] == NULL) 793 continue; 794 hammer2_thr_freeze_async(&pmp->sync_thrs[i]); 795 if (pmp->xop_groups) { 796 for (j = 0; j < hammer2_xop_nthreads; ++j) { 797 hammer2_thr_freeze_async( 798 &pmp->xop_groups[j].thrs[i]); 799 } 800 } 801 } 802 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 803 if (pmp->pfs_hmps[i] == NULL) 804 continue; 805 hammer2_thr_freeze(&pmp->sync_thrs[i]); 806 if (pmp->xop_groups) { 807 for (j = 0; j < hammer2_xop_nthreads; ++j) { 808 hammer2_thr_freeze( 809 &pmp->xop_groups[j].thrs[i]); 810 } 811 } 812 } 813 814 /* 815 * Lock the inode and clean out matching chains. 816 * Note that we cannot use hammer2_inode_lock_*() 817 * here because that would attempt to validate the 818 * cluster that we are in the middle of ripping 819 * apart. 820 * 821 * WARNING! We are working directly on the inodes 822 * embedded cluster. 823 */ 824 hammer2_mtx_ex(&iroot->lock); 825 826 /* 827 * Remove the chain from matching elements of the PFS. 828 */ 829 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 830 if (pmp->pfs_hmps[i] != hmp) 831 continue; 832 hammer2_thr_delete(&pmp->sync_thrs[i]); 833 if (pmp->xop_groups) { 834 for (j = 0; j < hammer2_xop_nthreads; ++j) { 835 hammer2_thr_delete( 836 &pmp->xop_groups[j].thrs[i]); 837 } 838 } 839 rchain = iroot->cluster.array[i].chain; 840 iroot->cluster.array[i].chain = NULL; 841 pmp->pfs_types[i] = HAMMER2_PFSTYPE_NONE; 842 if (pmp->pfs_names[i]) { 843 kfree(pmp->pfs_names[i], M_HAMMER2); 844 pmp->pfs_names[i] = NULL; 845 } 846 if (rchain) { 847 hammer2_chain_drop(rchain); 848 /* focus hint */ 849 if (iroot->cluster.focus == rchain) 850 iroot->cluster.focus = NULL; 851 } 852 pmp->pfs_hmps[i] = NULL; 853 } 854 hammer2_mtx_unlock(&iroot->lock); 855 856 /* 857 * Cleanup trailing chains. Gaps may remain. 858 */ 859 for (i = HAMMER2_MAXCLUSTER - 1; i >= 0; --i) { 860 if (pmp->pfs_hmps[i]) 861 break; 862 } 863 iroot->cluster.nchains = i + 1; 864 865 /* 866 * If the PMP has no elements remaining we can destroy it. 867 * (this will transition management threads from frozen->exit). 868 */ 869 if (iroot->cluster.nchains == 0) { 870 /* 871 * If this was the hmp's spmp, we need to clean 872 * a little more stuff out. 873 */ 874 if (hmp->spmp == pmp) { 875 hmp->spmp = NULL; 876 hmp->vchain.pmp = NULL; 877 hmp->fchain.pmp = NULL; 878 } 879 880 /* 881 * Free the pmp and restart the loop 882 */ 883 KKASSERT(TAILQ_EMPTY(&pmp->syncq)); 884 KKASSERT(TAILQ_EMPTY(&pmp->depq)); 885 hammer2_pfsfree(pmp); 886 goto again; 887 } 888 889 /* 890 * If elements still remain we need to set the REMASTER 891 * flag and unfreeze it. 892 */ 893 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 894 if (pmp->pfs_hmps[i] == NULL) 895 continue; 896 hammer2_thr_remaster(&pmp->sync_thrs[i]); 897 hammer2_thr_unfreeze(&pmp->sync_thrs[i]); 898 if (pmp->xop_groups) { 899 for (j = 0; j < hammer2_xop_nthreads; ++j) { 900 hammer2_thr_remaster( 901 &pmp->xop_groups[j].thrs[i]); 902 hammer2_thr_unfreeze( 903 &pmp->xop_groups[j].thrs[i]); 904 } 905 } 906 } 907 } 908 } 909 910 /* 911 * Mount or remount HAMMER2 fileystem from physical media 912 * 913 * mountroot 914 * mp mount point structure 915 * path NULL 916 * data <unused> 917 * cred <unused> 918 * 919 * mount 920 * mp mount point structure 921 * path path to mount point 922 * data pointer to argument structure in user space 923 * volume volume path (device@LABEL form) 924 * hflags user mount flags 925 * cred user credentials 926 * 927 * RETURNS: 0 Success 928 * !0 error number 929 */ 930 static 931 int 932 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, 933 struct ucred *cred) 934 { 935 struct hammer2_mount_info info; 936 hammer2_pfs_t *pmp; 937 hammer2_pfs_t *spmp; 938 hammer2_dev_t *hmp, *hmp_tmp; 939 hammer2_dev_t *force_local; 940 hammer2_key_t key_next; 941 hammer2_key_t key_dummy; 942 hammer2_key_t lhc; 943 hammer2_chain_t *parent; 944 hammer2_chain_t *chain; 945 const hammer2_inode_data_t *ripdata; 946 hammer2_devvp_list_t devvpl; 947 hammer2_devvp_t *e, *e_tmp; 948 struct file *fp; 949 char devstr[MNAMELEN]; 950 size_t size; 951 size_t done; 952 char *label; 953 int ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); 954 int error; 955 int i; 956 957 hmp = NULL; 958 pmp = NULL; 959 label = NULL; 960 bzero(&info, sizeof(info)); 961 962 if (path) { 963 /* 964 * Non-root mount or updating a mount 965 */ 966 error = copyin(data, &info, sizeof(info)); 967 if (error) 968 return (error); 969 } 970 971 if (mp->mnt_flag & MNT_UPDATE) { 972 /* 973 * Update mount. Note that pmp->iroot->cluster is 974 * an inode-embedded cluster and thus cannot be 975 * directly locked. 976 * 977 * XXX HAMMER2 needs to implement NFS export via 978 * mountctl. 979 */ 980 hammer2_cluster_t *cluster; 981 982 error = 0; 983 pmp = MPTOPMP(mp); 984 pmp->hflags = info.hflags; 985 cluster = &pmp->iroot->cluster; 986 for (i = 0; i < cluster->nchains; ++i) { 987 if (cluster->array[i].chain == NULL) 988 continue; 989 hmp = cluster->array[i].chain->hmp; 990 error = hammer2_remount(hmp, mp, path, cred); 991 if (error) 992 break; 993 } 994 995 return error; 996 } 997 998 if (path == NULL) { 999 /* 1000 * Root mount 1001 */ 1002 info.cluster_fd = -1; 1003 ksnprintf(devstr, sizeof(devstr), "%s", 1004 mp->mnt_stat.f_mntfromname); 1005 done = strlen(devstr) + 1; 1006 kprintf("hammer2_mount: root devstr=\"%s\"\n", devstr); 1007 } else { 1008 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done); 1009 if (error) 1010 return (error); 1011 kprintf("hammer2_mount: devstr=\"%s\"\n", devstr); 1012 } 1013 1014 /* 1015 * Extract device and label, automatically mount @BOOT, @ROOT, or @DATA 1016 * if no label specified, based on the partition id. Error out if no 1017 * label or device (with partition id) is specified. This is strictly 1018 * a convenience to match the default label created by newfs_hammer2, 1019 * our preference is that a label always be specified. 1020 * 1021 * NOTE: We allow 'mount @LABEL <blah>'... that is, a mount command 1022 * that does not specify a device, as long as some H2 label 1023 * has already been mounted from that device. This makes 1024 * mounting snapshots a lot easier. 1025 */ 1026 label = strchr(devstr, '@'); 1027 if (label && ((label + 1) - devstr) > done) { 1028 kprintf("hammer2_mount: bad label %s/%zd\n", devstr, done); 1029 return (EINVAL); 1030 } 1031 if (label == NULL || label[1] == 0) { 1032 char slice; 1033 1034 if (label == NULL) 1035 label = devstr + strlen(devstr); 1036 else 1037 *label = '\0'; /* clean up trailing @ */ 1038 1039 slice = label[-1]; 1040 switch(slice) { 1041 case 'a': 1042 label = "BOOT"; 1043 break; 1044 case 'd': 1045 label = "ROOT"; 1046 break; 1047 default: 1048 label = "DATA"; 1049 break; 1050 } 1051 } else { 1052 *label = '\0'; 1053 label++; 1054 } 1055 1056 kprintf("hammer2_mount: device=\"%s\" label=\"%s\" rdonly=%d\n", 1057 devstr, label, ronly); 1058 1059 /* 1060 * Initialize all device vnodes. 1061 */ 1062 TAILQ_INIT(&devvpl); 1063 error = hammer2_init_devvp(devstr, path == NULL, &devvpl); 1064 if (error) { 1065 kprintf("hammer2: failed to initialize devvp in %s\n", devstr); 1066 hammer2_cleanup_devvp(&devvpl); 1067 return error; 1068 } 1069 1070 /* 1071 * Determine if the device has already been mounted. After this 1072 * check hmp will be non-NULL if we are doing the second or more 1073 * hammer2 mounts from the same device. 1074 */ 1075 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 1076 if (!TAILQ_EMPTY(&devvpl)) { 1077 /* 1078 * Match the device. Due to the way devfs works, 1079 * we may not be able to directly match the vnode pointer, 1080 * so also check to see if the underlying device matches. 1081 */ 1082 TAILQ_FOREACH(hmp_tmp, &hammer2_mntlist, mntentry) { 1083 TAILQ_FOREACH(e_tmp, &hmp_tmp->devvpl, entry) { 1084 int devvp_found = 0; 1085 TAILQ_FOREACH(e, &devvpl, entry) { 1086 KKASSERT(e->devvp); 1087 if (e_tmp->devvp == e->devvp) 1088 devvp_found = 1; 1089 if (e_tmp->devvp->v_rdev && 1090 e_tmp->devvp->v_rdev == e->devvp->v_rdev) 1091 devvp_found = 1; 1092 } 1093 if (!devvp_found) 1094 goto next_hmp; 1095 } 1096 hmp = hmp_tmp; 1097 kprintf("hammer2_mount: hmp=%p matched\n", hmp); 1098 break; 1099 next_hmp: 1100 continue; 1101 } 1102 1103 /* 1104 * If no match this may be a fresh H2 mount, make sure 1105 * the device is not mounted on anything else. 1106 */ 1107 if (hmp == NULL) { 1108 TAILQ_FOREACH(e, &devvpl, entry) { 1109 struct vnode *devvp = e->devvp; 1110 KKASSERT(devvp); 1111 error = vfs_mountedon(devvp); 1112 if (error) { 1113 kprintf("hammer2_mount: %s mounted %d\n", 1114 e->path, error); 1115 hammer2_cleanup_devvp(&devvpl); 1116 lockmgr(&hammer2_mntlk, LK_RELEASE); 1117 return error; 1118 } 1119 } 1120 } 1121 } else { 1122 /* 1123 * Match the label to a pmp already probed. 1124 */ 1125 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 1126 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 1127 if (pmp->pfs_names[i] && 1128 strcmp(pmp->pfs_names[i], label) == 0) { 1129 hmp = pmp->pfs_hmps[i]; 1130 break; 1131 } 1132 } 1133 if (hmp) 1134 break; 1135 } 1136 if (hmp == NULL) { 1137 kprintf("hammer2_mount: PFS label \"%s\" not found\n", 1138 label); 1139 hammer2_cleanup_devvp(&devvpl); 1140 lockmgr(&hammer2_mntlk, LK_RELEASE); 1141 return ENOENT; 1142 } 1143 } 1144 1145 /* 1146 * Open the device if this isn't a secondary mount and construct 1147 * the H2 device mount (hmp). 1148 */ 1149 if (hmp == NULL) { 1150 hammer2_chain_t *schain; 1151 hammer2_xop_head_t xop; 1152 1153 /* 1154 * Now open the device 1155 */ 1156 KKASSERT(!TAILQ_EMPTY(&devvpl)); 1157 error = hammer2_open_devvp(&devvpl, ronly); 1158 if (error) { 1159 hammer2_close_devvp(&devvpl, ronly); 1160 hammer2_cleanup_devvp(&devvpl); 1161 lockmgr(&hammer2_mntlk, LK_RELEASE); 1162 return error; 1163 } 1164 1165 /* 1166 * Construct volumes and link with device vnodes. 1167 */ 1168 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO); 1169 hmp->devvp = NULL; 1170 error = hammer2_init_volumes(mp, &devvpl, hmp->volumes, 1171 &hmp->voldata, &hmp->volhdrno, 1172 &hmp->devvp); 1173 if (error) { 1174 hammer2_close_devvp(&devvpl, ronly); 1175 hammer2_cleanup_devvp(&devvpl); 1176 lockmgr(&hammer2_mntlk, LK_RELEASE); 1177 kfree(hmp, M_HAMMER2); 1178 return error; 1179 } 1180 if (!hmp->devvp) { 1181 kprintf("hammer2: failed to initialize root volume\n"); 1182 hammer2_unmount_helper(mp, NULL, hmp); 1183 lockmgr(&hammer2_mntlk, LK_RELEASE); 1184 hammer2_vfs_unmount(mp, MNT_FORCE); 1185 return EINVAL; 1186 } 1187 1188 ksnprintf(hmp->devrepname, sizeof(hmp->devrepname), "%s", devstr); 1189 hmp->ronly = ronly; 1190 hmp->hflags = info.hflags & HMNT2_DEVFLAGS; 1191 kmalloc_create_obj(&hmp->mchain, "HAMMER2-chains", 1192 sizeof(struct hammer2_chain)); 1193 kmalloc_create_obj(&hmp->mio, "HAMMER2-dio", 1194 sizeof(struct hammer2_io)); 1195 kmalloc_create(&hmp->mmsg, "HAMMER2-msg"); 1196 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry); 1197 RB_INIT(&hmp->iotree); 1198 hammer2_spin_init(&hmp->io_spin, "h2mount_io"); 1199 hammer2_spin_init(&hmp->list_spin, "h2mount_list"); 1200 1201 lockinit(&hmp->vollk, "h2vol", 0, 0); 1202 lockinit(&hmp->bulklk, "h2bulk", 0, 0); 1203 lockinit(&hmp->bflock, "h2bflk", 0, 0); 1204 1205 /* 1206 * vchain setup. vchain.data is embedded. 1207 * vchain.refs is initialized and will never drop to 0. 1208 * 1209 * NOTE! voldata is not yet loaded. 1210 */ 1211 hmp->vchain.hmp = hmp; 1212 hmp->vchain.refs = 1; 1213 hmp->vchain.data = (void *)&hmp->voldata; 1214 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME; 1215 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX; 1216 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid; 1217 hammer2_chain_init(&hmp->vchain); 1218 1219 /* 1220 * fchain setup. fchain.data is embedded. 1221 * fchain.refs is initialized and will never drop to 0. 1222 * 1223 * The data is not used but needs to be initialized to 1224 * pass assertion muster. We use this chain primarily 1225 * as a placeholder for the freemap's top-level radix tree 1226 * so it does not interfere with the volume's topology 1227 * radix tree. 1228 */ 1229 hmp->fchain.hmp = hmp; 1230 hmp->fchain.refs = 1; 1231 hmp->fchain.data = (void *)&hmp->voldata.freemap_blockset; 1232 hmp->fchain.bref.type = HAMMER2_BREF_TYPE_FREEMAP; 1233 hmp->fchain.bref.data_off = 0 | HAMMER2_PBUFRADIX; 1234 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid; 1235 hmp->fchain.bref.methods = 1236 HAMMER2_ENC_CHECK(HAMMER2_CHECK_FREEMAP) | 1237 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE); 1238 hammer2_chain_init(&hmp->fchain); 1239 1240 /* 1241 * Initialize volume header related fields. 1242 */ 1243 KKASSERT(hmp->voldata.magic == HAMMER2_VOLUME_ID_HBO || 1244 hmp->voldata.magic == HAMMER2_VOLUME_ID_ABO); 1245 hmp->volsync = hmp->voldata; 1246 hmp->free_reserved = hmp->voldata.allocator_size / 20; 1247 /* 1248 * Must use hmp instead of volume header for these two 1249 * in order to handle volume versions transparently. 1250 */ 1251 if (hmp->voldata.version >= HAMMER2_VOL_VERSION_MULTI_VOLUMES) { 1252 hmp->nvolumes = hmp->voldata.nvolumes; 1253 hmp->total_size = hmp->voldata.total_size; 1254 } else { 1255 hmp->nvolumes = 1; 1256 hmp->total_size = hmp->voldata.volu_size; 1257 } 1258 KKASSERT(hmp->nvolumes > 0); 1259 1260 /* 1261 * Move devvpl entries to hmp. 1262 */ 1263 TAILQ_INIT(&hmp->devvpl); 1264 while ((e = TAILQ_FIRST(&devvpl)) != NULL) { 1265 TAILQ_REMOVE(&devvpl, e, entry); 1266 TAILQ_INSERT_TAIL(&hmp->devvpl, e, entry); 1267 } 1268 KKASSERT(TAILQ_EMPTY(&devvpl)); 1269 KKASSERT(!TAILQ_EMPTY(&hmp->devvpl)); 1270 1271 /* 1272 * Really important to get these right or the flush and 1273 * teardown code will get confused. 1274 */ 1275 hmp->spmp = hammer2_pfsalloc(NULL, NULL, NULL); 1276 spmp = hmp->spmp; 1277 spmp->pfs_hmps[0] = hmp; 1278 1279 /* 1280 * Dummy-up vchain and fchain's modify_tid. mirror_tid 1281 * is inherited from the volume header. 1282 */ 1283 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid; 1284 hmp->vchain.bref.modify_tid = hmp->vchain.bref.mirror_tid; 1285 hmp->vchain.pmp = spmp; 1286 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid; 1287 hmp->fchain.bref.modify_tid = hmp->fchain.bref.mirror_tid; 1288 hmp->fchain.pmp = spmp; 1289 1290 /* 1291 * First locate the super-root inode, which is key 0 1292 * relative to the volume header's blockset. 1293 * 1294 * Then locate the root inode by scanning the directory keyspace 1295 * represented by the label. 1296 */ 1297 parent = hammer2_chain_lookup_init(&hmp->vchain, 0); 1298 schain = hammer2_chain_lookup(&parent, &key_dummy, 1299 HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY, 1300 &error, 0); 1301 hammer2_chain_lookup_done(parent); 1302 if (schain == NULL) { 1303 kprintf("hammer2_mount: invalid super-root\n"); 1304 hammer2_unmount_helper(mp, NULL, hmp); 1305 lockmgr(&hammer2_mntlk, LK_RELEASE); 1306 hammer2_vfs_unmount(mp, MNT_FORCE); 1307 return EINVAL; 1308 } 1309 if (schain->error) { 1310 kprintf("hammer2_mount: error %s reading super-root\n", 1311 hammer2_error_str(schain->error)); 1312 hammer2_chain_unlock(schain); 1313 hammer2_chain_drop(schain); 1314 schain = NULL; 1315 hammer2_unmount_helper(mp, NULL, hmp); 1316 lockmgr(&hammer2_mntlk, LK_RELEASE); 1317 hammer2_vfs_unmount(mp, MNT_FORCE); 1318 return EINVAL; 1319 } 1320 1321 /* 1322 * The super-root always uses an inode_tid of 1 when 1323 * creating PFSs. 1324 */ 1325 spmp->inode_tid = 1; 1326 spmp->modify_tid = schain->bref.modify_tid + 1; 1327 1328 /* 1329 * Sanity-check schain's pmp and finish initialization. 1330 * Any chain belonging to the super-root topology should 1331 * have a NULL pmp (not even set to spmp). 1332 */ 1333 ripdata = &schain->data->ipdata; 1334 KKASSERT(schain->pmp == NULL); 1335 spmp->pfs_clid = ripdata->meta.pfs_clid; 1336 1337 /* 1338 * Replace the dummy spmp->iroot with a real one. It's 1339 * easier to just do a wholesale replacement than to try 1340 * to update the chain and fixup the iroot fields. 1341 * 1342 * The returned inode is locked with the supplied cluster. 1343 */ 1344 hammer2_dummy_xop_from_chain(&xop, schain); 1345 hammer2_inode_drop(spmp->iroot); 1346 spmp->iroot = NULL; 1347 spmp->iroot = hammer2_inode_get(spmp, &xop, -1, -1); 1348 spmp->spmp_hmp = hmp; 1349 spmp->pfs_types[0] = ripdata->meta.pfs_type; 1350 spmp->pfs_hmps[0] = hmp; 1351 hammer2_inode_ref(spmp->iroot); 1352 hammer2_inode_unlock(spmp->iroot); 1353 hammer2_cluster_unlock(&xop.cluster); 1354 hammer2_chain_drop(schain); 1355 /* do not call hammer2_cluster_drop() on an embedded cluster */ 1356 schain = NULL; /* now invalid */ 1357 /* leave spmp->iroot with one ref */ 1358 1359 if (!hmp->ronly) { 1360 error = hammer2_recovery(hmp); 1361 if (error == 0) 1362 error |= hammer2_fixup_pfses(hmp); 1363 /* XXX do something with error */ 1364 } 1365 hammer2_update_pmps(hmp); 1366 hammer2_iocom_init(hmp); 1367 hammer2_bulkfree_init(hmp); 1368 1369 /* 1370 * Ref the cluster management messaging descriptor. The mount 1371 * program deals with the other end of the communications pipe. 1372 * 1373 * Root mounts typically do not supply one. 1374 */ 1375 if (info.cluster_fd >= 0) { 1376 fp = holdfp(curthread, info.cluster_fd, -1); 1377 if (fp) { 1378 hammer2_cluster_reconnect(hmp, fp); 1379 } else { 1380 kprintf("hammer2_mount: bad cluster_fd!\n"); 1381 } 1382 } 1383 } else { 1384 spmp = hmp->spmp; 1385 if (info.hflags & HMNT2_DEVFLAGS) { 1386 kprintf("hammer2_mount: Warning: mount flags pertaining " 1387 "to the whole device may only be specified " 1388 "on the first mount of the device: %08x\n", 1389 info.hflags & HMNT2_DEVFLAGS); 1390 } 1391 } 1392 1393 /* 1394 * Force local mount (disassociate all PFSs from their clusters). 1395 * Used primarily for debugging. 1396 */ 1397 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL; 1398 1399 /* 1400 * Lookup the mount point under the media-localized super-root. 1401 * Scanning hammer2_pfslist doesn't help us because it represents 1402 * PFS cluster ids which can aggregate several named PFSs together. 1403 * 1404 * cluster->pmp will incorrectly point to spmp and must be fixed 1405 * up later on. 1406 */ 1407 hammer2_inode_lock(spmp->iroot, 0); 1408 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS); 1409 lhc = hammer2_dirhash(label, strlen(label)); 1410 chain = hammer2_chain_lookup(&parent, &key_next, 1411 lhc, lhc + HAMMER2_DIRHASH_LOMASK, 1412 &error, 0); 1413 while (chain) { 1414 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && 1415 strcmp(label, chain->data->ipdata.filename) == 0) { 1416 break; 1417 } 1418 chain = hammer2_chain_next(&parent, chain, &key_next, 1419 key_next, 1420 lhc + HAMMER2_DIRHASH_LOMASK, 1421 &error, 0); 1422 } 1423 if (parent) { 1424 hammer2_chain_unlock(parent); 1425 hammer2_chain_drop(parent); 1426 } 1427 hammer2_inode_unlock(spmp->iroot); 1428 1429 /* 1430 * PFS could not be found? 1431 */ 1432 if (chain == NULL) { 1433 hammer2_unmount_helper(mp, NULL, hmp); 1434 lockmgr(&hammer2_mntlk, LK_RELEASE); 1435 hammer2_vfs_unmount(mp, MNT_FORCE); 1436 1437 if (error) { 1438 kprintf("hammer2_mount: PFS label I/O error\n"); 1439 return EINVAL; 1440 } else { 1441 kprintf("hammer2_mount: PFS label \"%s\" not found\n", 1442 label); 1443 return ENOENT; 1444 } 1445 } 1446 1447 /* 1448 * Acquire the pmp structure (it should have already been allocated 1449 * via hammer2_update_pmps() so do not pass cluster in to add to 1450 * available chains). 1451 * 1452 * Check if the cluster has already been mounted. A cluster can 1453 * only be mounted once, use null mounts to mount additional copies. 1454 */ 1455 if (chain->error) { 1456 kprintf("hammer2_mount: PFS label I/O error\n"); 1457 } else { 1458 ripdata = &chain->data->ipdata; 1459 pmp = hammer2_pfsalloc(NULL, ripdata, force_local); 1460 } 1461 hammer2_chain_unlock(chain); 1462 hammer2_chain_drop(chain); 1463 1464 /* 1465 * PFS to mount must exist at this point. 1466 */ 1467 if (pmp == NULL) { 1468 kprintf("hammer2_mount: Failed to acquire PFS structure\n"); 1469 hammer2_unmount_helper(mp, NULL, hmp); 1470 lockmgr(&hammer2_mntlk, LK_RELEASE); 1471 hammer2_vfs_unmount(mp, MNT_FORCE); 1472 return EINVAL; 1473 } 1474 1475 /* 1476 * Finish the mount 1477 */ 1478 kprintf("hammer2_mount: hmp=%p pmp=%p\n", hmp, pmp); 1479 1480 if (pmp->mp) { 1481 kprintf("hammer2_mount: PFS already mounted!\n"); 1482 hammer2_unmount_helper(mp, NULL, hmp); 1483 lockmgr(&hammer2_mntlk, LK_RELEASE); 1484 hammer2_vfs_unmount(mp, MNT_FORCE); 1485 return EBUSY; 1486 } 1487 1488 pmp->hflags = info.hflags; 1489 mp->mnt_flag |= MNT_LOCAL; 1490 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /* all entry pts are SMP */ 1491 mp->mnt_kern_flag |= MNTK_THR_SYNC; /* new vsyncscan semantics */ 1492 1493 /* 1494 * required mount structure initializations 1495 */ 1496 mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE; 1497 mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE; 1498 1499 mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE; 1500 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE; 1501 1502 /* 1503 * Optional fields 1504 */ 1505 mp->mnt_iosize_max = MAXPHYS; 1506 1507 /* 1508 * Connect up mount pointers. 1509 */ 1510 hammer2_mount_helper(mp, pmp); 1511 lockmgr(&hammer2_mntlk, LK_RELEASE); 1512 1513 /* 1514 * Finish setup 1515 */ 1516 vfs_getnewfsid(mp); 1517 vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops); 1518 vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops); 1519 vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops); 1520 1521 if (path) { 1522 copyinstr(info.volume, mp->mnt_stat.f_mntfromname, 1523 MNAMELEN - 1, &size); 1524 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 1525 } /* else root mount, already in there */ 1526 1527 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname)); 1528 if (path) { 1529 copyinstr(path, mp->mnt_stat.f_mntonname, 1530 sizeof(mp->mnt_stat.f_mntonname) - 1, 1531 &size); 1532 } else { 1533 /* root mount */ 1534 mp->mnt_stat.f_mntonname[0] = '/'; 1535 } 1536 1537 /* 1538 * Initial statfs to prime mnt_stat. 1539 */ 1540 hammer2_vfs_statfs(mp, &mp->mnt_stat, cred); 1541 1542 return 0; 1543 } 1544 1545 /* 1546 * Scan PFSs under the super-root and create hammer2_pfs structures. 1547 */ 1548 static 1549 void 1550 hammer2_update_pmps(hammer2_dev_t *hmp) 1551 { 1552 const hammer2_inode_data_t *ripdata; 1553 hammer2_chain_t *parent; 1554 hammer2_chain_t *chain; 1555 hammer2_dev_t *force_local; 1556 hammer2_pfs_t *spmp; 1557 hammer2_key_t key_next; 1558 int error; 1559 1560 /* 1561 * Force local mount (disassociate all PFSs from their clusters). 1562 * Used primarily for debugging. 1563 */ 1564 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL; 1565 1566 /* 1567 * Lookup mount point under the media-localized super-root. 1568 * 1569 * cluster->pmp will incorrectly point to spmp and must be fixed 1570 * up later on. 1571 */ 1572 spmp = hmp->spmp; 1573 hammer2_inode_lock(spmp->iroot, 0); 1574 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS); 1575 chain = hammer2_chain_lookup(&parent, &key_next, 1576 HAMMER2_KEY_MIN, HAMMER2_KEY_MAX, 1577 &error, 0); 1578 while (chain) { 1579 if (chain->error) { 1580 kprintf("I/O error scanning PFS labels\n"); 1581 } else if (chain->bref.type != HAMMER2_BREF_TYPE_INODE) { 1582 kprintf("Non inode chain type %d under super-root\n", 1583 chain->bref.type); 1584 } else { 1585 ripdata = &chain->data->ipdata; 1586 hammer2_pfsalloc(chain, ripdata, force_local); 1587 } 1588 chain = hammer2_chain_next(&parent, chain, &key_next, 1589 key_next, HAMMER2_KEY_MAX, 1590 &error, 0); 1591 } 1592 if (parent) { 1593 hammer2_chain_unlock(parent); 1594 hammer2_chain_drop(parent); 1595 } 1596 hammer2_inode_unlock(spmp->iroot); 1597 } 1598 1599 static 1600 int 1601 hammer2_remount(hammer2_dev_t *hmp, struct mount *mp, char *path __unused, 1602 struct ucred *cred) 1603 { 1604 hammer2_volume_t *vol; 1605 struct vnode *devvp; 1606 int i, error, result = 0; 1607 1608 if (!(hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR))) 1609 return 0; 1610 1611 for (i = 0; i < hmp->nvolumes; ++i) { 1612 vol = &hmp->volumes[i]; 1613 devvp = vol->dev->devvp; 1614 KKASSERT(devvp); 1615 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1616 VOP_OPEN(devvp, FREAD | FWRITE, FSCRED, NULL); 1617 vn_unlock(devvp); 1618 error = 0; 1619 if (vol->id == HAMMER2_ROOT_VOLUME) { 1620 error = hammer2_recovery(hmp); 1621 if (error == 0) 1622 error |= hammer2_fixup_pfses(hmp); 1623 } 1624 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1625 if (error == 0) { 1626 VOP_CLOSE(devvp, FREAD, NULL); 1627 } else { 1628 VOP_CLOSE(devvp, FREAD | FWRITE, NULL); 1629 } 1630 vn_unlock(devvp); 1631 result |= error; 1632 } 1633 if (result == 0) { 1634 kprintf("hammer2: enable read/write\n"); 1635 hmp->ronly = 0; 1636 } 1637 1638 return result; 1639 } 1640 1641 static 1642 int 1643 hammer2_vfs_unmount(struct mount *mp, int mntflags) 1644 { 1645 hammer2_pfs_t *pmp; 1646 int flags; 1647 int error = 0; 1648 1649 pmp = MPTOPMP(mp); 1650 1651 if (pmp == NULL) 1652 return(0); 1653 1654 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 1655 1656 /* 1657 * If mount initialization proceeded far enough we must flush 1658 * its vnodes and sync the underlying mount points. Three syncs 1659 * are required to fully flush the filesystem (freemap updates lag 1660 * by one flush, and one extra for safety). 1661 */ 1662 if (mntflags & MNT_FORCE) 1663 flags = FORCECLOSE; 1664 else 1665 flags = 0; 1666 if (pmp->iroot) { 1667 error = vflush(mp, 0, flags); 1668 if (error) 1669 goto failed; 1670 hammer2_vfs_sync(mp, MNT_WAIT); 1671 hammer2_vfs_sync(mp, MNT_WAIT); 1672 hammer2_vfs_sync(mp, MNT_WAIT); 1673 } 1674 1675 /* 1676 * Cleanup the frontend support XOPS threads 1677 */ 1678 hammer2_xop_helper_cleanup(pmp); 1679 1680 if (pmp->mp) 1681 hammer2_unmount_helper(mp, pmp, NULL); 1682 1683 error = 0; 1684 failed: 1685 lockmgr(&hammer2_mntlk, LK_RELEASE); 1686 1687 return (error); 1688 } 1689 1690 /* 1691 * Mount helper, hook the system mount into our PFS. 1692 * The mount lock is held. 1693 * 1694 * We must bump the mount_count on related devices for any 1695 * mounted PFSs. 1696 */ 1697 static 1698 void 1699 hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp) 1700 { 1701 hammer2_cluster_t *cluster; 1702 hammer2_chain_t *rchain; 1703 int i; 1704 1705 mp->mnt_data = (qaddr_t)pmp; 1706 pmp->mp = mp; 1707 1708 /* 1709 * After pmp->mp is set we have to adjust hmp->mount_count. 1710 */ 1711 cluster = &pmp->iroot->cluster; 1712 for (i = 0; i < cluster->nchains; ++i) { 1713 rchain = cluster->array[i].chain; 1714 if (rchain == NULL) 1715 continue; 1716 ++rchain->hmp->mount_count; 1717 } 1718 1719 /* 1720 * Create missing Xop threads 1721 */ 1722 hammer2_xop_helper_create(pmp); 1723 } 1724 1725 /* 1726 * Unmount helper, unhook the system mount from our PFS. 1727 * The mount lock is held. 1728 * 1729 * If hmp is supplied a mount responsible for being the first to open 1730 * the block device failed and the block device and all PFSs using the 1731 * block device must be cleaned up. 1732 * 1733 * If pmp is supplied multiple devices might be backing the PFS and each 1734 * must be disconnected. This might not be the last PFS using some of the 1735 * underlying devices. Also, we have to adjust our hmp->mount_count 1736 * accounting for the devices backing the pmp which is now undergoing an 1737 * unmount. 1738 */ 1739 static 1740 void 1741 hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, hammer2_dev_t *hmp) 1742 { 1743 hammer2_cluster_t *cluster; 1744 hammer2_chain_t *rchain; 1745 int dumpcnt; 1746 int i; 1747 1748 /* 1749 * If no device supplied this is a high-level unmount and we have to 1750 * to disconnect the mount, adjust mount_count, and locate devices 1751 * that might now have no mounts. 1752 */ 1753 if (pmp) { 1754 KKASSERT(hmp == NULL); 1755 KKASSERT(MPTOPMP(mp) == pmp); 1756 pmp->mp = NULL; 1757 mp->mnt_data = NULL; 1758 1759 /* 1760 * After pmp->mp is cleared we have to account for 1761 * mount_count. 1762 */ 1763 cluster = &pmp->iroot->cluster; 1764 for (i = 0; i < cluster->nchains; ++i) { 1765 rchain = cluster->array[i].chain; 1766 if (rchain == NULL) 1767 continue; 1768 --rchain->hmp->mount_count; 1769 /* scrapping hmp now may invalidate the pmp */ 1770 } 1771 again: 1772 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) { 1773 if (hmp->mount_count == 0) { 1774 hammer2_unmount_helper(NULL, NULL, hmp); 1775 goto again; 1776 } 1777 } 1778 return; 1779 } 1780 1781 /* 1782 * Try to terminate the block device. We can't terminate it if 1783 * there are still PFSs referencing it. 1784 */ 1785 if (hmp->mount_count) 1786 return; 1787 1788 /* 1789 * Decomission the network before we start messing with the 1790 * device and PFS. 1791 */ 1792 hammer2_iocom_uninit(hmp); 1793 1794 hammer2_bulkfree_uninit(hmp); 1795 hammer2_pfsfree_scan(hmp, 0); 1796 1797 /* 1798 * Cycle the volume data lock as a safety (probably not needed any 1799 * more). To ensure everything is out we need to flush at least 1800 * three times. (1) The running of the sideq can dirty the 1801 * filesystem, (2) A normal flush can dirty the freemap, and 1802 * (3) ensure that the freemap is fully synchronized. 1803 * 1804 * The next mount's recovery scan can clean everything up but we want 1805 * to leave the filesystem in a 100% clean state on a normal unmount. 1806 */ 1807 #if 0 1808 hammer2_voldata_lock(hmp); 1809 hammer2_voldata_unlock(hmp); 1810 #endif 1811 1812 /* 1813 * Flush whatever is left. Unmounted but modified PFS's might still 1814 * have some dirty chains on them. 1815 */ 1816 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS); 1817 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS); 1818 1819 if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) { 1820 hammer2_voldata_modify(hmp); 1821 hammer2_flush(&hmp->fchain, HAMMER2_FLUSH_TOP | 1822 HAMMER2_FLUSH_ALL); 1823 } 1824 hammer2_chain_unlock(&hmp->fchain); 1825 1826 if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) { 1827 hammer2_flush(&hmp->vchain, HAMMER2_FLUSH_TOP | 1828 HAMMER2_FLUSH_ALL); 1829 } 1830 hammer2_chain_unlock(&hmp->vchain); 1831 1832 if ((hmp->vchain.flags | hmp->fchain.flags) & 1833 HAMMER2_CHAIN_FLUSH_MASK) { 1834 kprintf("hammer2_unmount: chains left over after final sync\n"); 1835 kprintf(" vchain %08x\n", hmp->vchain.flags); 1836 kprintf(" fchain %08x\n", hmp->fchain.flags); 1837 1838 if (hammer2_debug & 0x0010) 1839 Debugger("entered debugger"); 1840 } 1841 1842 hammer2_pfsfree_scan(hmp, 1); 1843 1844 KKASSERT(hmp->spmp == NULL); 1845 1846 /* 1847 * Finish up with the device vnode 1848 */ 1849 if (!TAILQ_EMPTY(&hmp->devvpl)) { 1850 hammer2_close_devvp(&hmp->devvpl, hmp->ronly); 1851 hammer2_cleanup_devvp(&hmp->devvpl); 1852 } 1853 KKASSERT(TAILQ_EMPTY(&hmp->devvpl)); 1854 1855 /* 1856 * Clear vchain/fchain flags that might prevent final cleanup 1857 * of these chains. 1858 */ 1859 if (hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) { 1860 atomic_add_long(&hammer2_count_modified_chains, -1); 1861 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED); 1862 hammer2_pfs_memory_wakeup(hmp->vchain.pmp, -1); 1863 } 1864 if (hmp->vchain.flags & HAMMER2_CHAIN_UPDATE) { 1865 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_UPDATE); 1866 } 1867 1868 if (hmp->fchain.flags & HAMMER2_CHAIN_MODIFIED) { 1869 atomic_add_long(&hammer2_count_modified_chains, -1); 1870 atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_MODIFIED); 1871 hammer2_pfs_memory_wakeup(hmp->fchain.pmp, -1); 1872 } 1873 if (hmp->fchain.flags & HAMMER2_CHAIN_UPDATE) { 1874 atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_UPDATE); 1875 } 1876 1877 dumpcnt = 50; 1878 hammer2_dump_chain(&hmp->vchain, 0, 0, &dumpcnt, 'v', (u_int)-1); 1879 dumpcnt = 50; 1880 hammer2_dump_chain(&hmp->fchain, 0, 0, &dumpcnt, 'f', (u_int)-1); 1881 1882 /* 1883 * Final drop of embedded freemap root chain to 1884 * clean up fchain.core (fchain structure is not 1885 * flagged ALLOCATED so it is cleaned out and then 1886 * left to rot). 1887 */ 1888 hammer2_chain_drop(&hmp->fchain); 1889 1890 /* 1891 * Final drop of embedded volume root chain to clean 1892 * up vchain.core (vchain structure is not flagged 1893 * ALLOCATED so it is cleaned out and then left to 1894 * rot). 1895 */ 1896 hammer2_chain_drop(&hmp->vchain); 1897 1898 hammer2_io_cleanup(hmp, &hmp->iotree); 1899 if (hmp->iofree_count) { 1900 kprintf("io_cleanup: %d I/O's left hanging\n", 1901 hmp->iofree_count); 1902 } 1903 1904 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry); 1905 kmalloc_destroy_obj(&hmp->mchain); 1906 kmalloc_destroy_obj(&hmp->mio); 1907 kmalloc_destroy(&hmp->mmsg); 1908 kfree(hmp, M_HAMMER2); 1909 } 1910 1911 int 1912 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp, 1913 ino_t ino, struct vnode **vpp) 1914 { 1915 hammer2_xop_lookup_t *xop; 1916 hammer2_pfs_t *pmp; 1917 hammer2_inode_t *ip; 1918 hammer2_tid_t inum; 1919 int error; 1920 1921 inum = (hammer2_tid_t)ino & HAMMER2_DIRHASH_USERMSK; 1922 1923 error = 0; 1924 pmp = MPTOPMP(mp); 1925 1926 /* 1927 * Easy if we already have it cached 1928 */ 1929 ip = hammer2_inode_lookup(pmp, inum); 1930 if (ip) { 1931 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 1932 *vpp = hammer2_igetv(ip, &error); 1933 hammer2_inode_unlock(ip); 1934 hammer2_inode_drop(ip); /* from lookup */ 1935 1936 return error; 1937 } 1938 1939 /* 1940 * Otherwise we have to find the inode 1941 */ 1942 xop = hammer2_xop_alloc(pmp->iroot, 0); 1943 xop->lhc = inum; 1944 hammer2_xop_start(&xop->head, &hammer2_lookup_desc); 1945 error = hammer2_xop_collect(&xop->head, 0); 1946 1947 if (error == 0) 1948 ip = hammer2_inode_get(pmp, &xop->head, -1, -1); 1949 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1950 1951 if (ip) { 1952 *vpp = hammer2_igetv(ip, &error); 1953 hammer2_inode_unlock(ip); 1954 } else { 1955 *vpp = NULL; 1956 error = ENOENT; 1957 } 1958 return (error); 1959 } 1960 1961 static 1962 int 1963 hammer2_vfs_root(struct mount *mp, struct vnode **vpp) 1964 { 1965 hammer2_pfs_t *pmp; 1966 struct vnode *vp; 1967 int error; 1968 1969 pmp = MPTOPMP(mp); 1970 if (pmp->iroot == NULL) { 1971 kprintf("hammer2 (%s): no root inode\n", 1972 mp->mnt_stat.f_mntfromname); 1973 *vpp = NULL; 1974 return EINVAL; 1975 } 1976 1977 error = 0; 1978 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED); 1979 1980 while (pmp->inode_tid == 0) { 1981 hammer2_xop_ipcluster_t *xop; 1982 const hammer2_inode_meta_t *meta; 1983 1984 xop = hammer2_xop_alloc(pmp->iroot, HAMMER2_XOP_MODIFYING); 1985 hammer2_xop_start(&xop->head, &hammer2_ipcluster_desc); 1986 error = hammer2_xop_collect(&xop->head, 0); 1987 1988 if (error == 0) { 1989 meta = &hammer2_xop_gdata(&xop->head)->ipdata.meta; 1990 pmp->iroot->meta = *meta; 1991 pmp->inode_tid = meta->pfs_inum + 1; 1992 hammer2_xop_pdata(&xop->head); 1993 /* meta invalid */ 1994 1995 if (pmp->inode_tid < HAMMER2_INODE_START) 1996 pmp->inode_tid = HAMMER2_INODE_START; 1997 pmp->modify_tid = 1998 xop->head.cluster.focus->bref.modify_tid + 1; 1999 #if 0 2000 kprintf("PFS: Starting inode %jd\n", 2001 (intmax_t)pmp->inode_tid); 2002 kprintf("PMP focus good set nextino=%ld mod=%016jx\n", 2003 pmp->inode_tid, pmp->modify_tid); 2004 #endif 2005 wakeup(&pmp->iroot); 2006 2007 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2008 2009 /* 2010 * Prime the mount info. 2011 */ 2012 hammer2_vfs_statfs(mp, &mp->mnt_stat, NULL); 2013 break; 2014 } 2015 2016 /* 2017 * Loop, try again 2018 */ 2019 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2020 hammer2_inode_unlock(pmp->iroot); 2021 error = tsleep(&pmp->iroot, PCATCH, "h2root", hz); 2022 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED); 2023 if (error == EINTR) 2024 break; 2025 } 2026 2027 if (error) { 2028 hammer2_inode_unlock(pmp->iroot); 2029 *vpp = NULL; 2030 } else { 2031 vp = hammer2_igetv(pmp->iroot, &error); 2032 hammer2_inode_unlock(pmp->iroot); 2033 *vpp = vp; 2034 } 2035 2036 return (error); 2037 } 2038 2039 /* 2040 * Filesystem status 2041 * 2042 * XXX incorporate ipdata->meta.inode_quota and data_quota 2043 */ 2044 static 2045 int 2046 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 2047 { 2048 hammer2_pfs_t *pmp; 2049 hammer2_dev_t *hmp; 2050 hammer2_blockref_t bref; 2051 struct statfs tmp; 2052 int i; 2053 2054 /* 2055 * NOTE: iroot might not have validated the cluster yet. 2056 */ 2057 pmp = MPTOPMP(mp); 2058 2059 bzero(&tmp, sizeof(tmp)); 2060 2061 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 2062 hmp = pmp->pfs_hmps[i]; 2063 if (hmp == NULL) 2064 continue; 2065 if (pmp->iroot->cluster.array[i].chain) 2066 bref = pmp->iroot->cluster.array[i].chain->bref; 2067 else 2068 bzero(&bref, sizeof(bref)); 2069 2070 tmp.f_files = bref.embed.stats.inode_count; 2071 tmp.f_ffree = 0; 2072 tmp.f_blocks = hmp->voldata.allocator_size / 2073 mp->mnt_vstat.f_bsize; 2074 tmp.f_bfree = hmp->voldata.allocator_free / 2075 mp->mnt_vstat.f_bsize; 2076 tmp.f_bavail = tmp.f_bfree; 2077 2078 if (cred && cred->cr_uid != 0) { 2079 uint64_t adj; 2080 2081 /* 5% */ 2082 adj = hmp->free_reserved / mp->mnt_vstat.f_bsize; 2083 tmp.f_blocks -= adj; 2084 tmp.f_bfree -= adj; 2085 tmp.f_bavail -= adj; 2086 } 2087 2088 mp->mnt_stat.f_blocks = tmp.f_blocks; 2089 mp->mnt_stat.f_bfree = tmp.f_bfree; 2090 mp->mnt_stat.f_bavail = tmp.f_bavail; 2091 mp->mnt_stat.f_files = tmp.f_files; 2092 mp->mnt_stat.f_ffree = tmp.f_ffree; 2093 2094 *sbp = mp->mnt_stat; 2095 } 2096 return (0); 2097 } 2098 2099 static 2100 int 2101 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred) 2102 { 2103 hammer2_pfs_t *pmp; 2104 hammer2_dev_t *hmp; 2105 hammer2_blockref_t bref; 2106 struct statvfs tmp; 2107 int i; 2108 2109 /* 2110 * NOTE: iroot might not have validated the cluster yet. 2111 */ 2112 pmp = MPTOPMP(mp); 2113 bzero(&tmp, sizeof(tmp)); 2114 2115 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 2116 hmp = pmp->pfs_hmps[i]; 2117 if (hmp == NULL) 2118 continue; 2119 if (pmp->iroot->cluster.array[i].chain) 2120 bref = pmp->iroot->cluster.array[i].chain->bref; 2121 else 2122 bzero(&bref, sizeof(bref)); 2123 2124 tmp.f_files = bref.embed.stats.inode_count; 2125 tmp.f_ffree = 0; 2126 tmp.f_blocks = hmp->voldata.allocator_size / 2127 mp->mnt_vstat.f_bsize; 2128 tmp.f_bfree = hmp->voldata.allocator_free / 2129 mp->mnt_vstat.f_bsize; 2130 tmp.f_bavail = tmp.f_bfree; 2131 2132 if (cred && cred->cr_uid != 0) { 2133 uint64_t adj; 2134 2135 /* 5% */ 2136 adj = hmp->free_reserved / mp->mnt_vstat.f_bsize; 2137 tmp.f_blocks -= adj; 2138 tmp.f_bfree -= adj; 2139 tmp.f_bavail -= adj; 2140 } 2141 2142 mp->mnt_vstat.f_blocks = tmp.f_blocks; 2143 mp->mnt_vstat.f_bfree = tmp.f_bfree; 2144 mp->mnt_vstat.f_bavail = tmp.f_bavail; 2145 mp->mnt_vstat.f_files = tmp.f_files; 2146 mp->mnt_vstat.f_ffree = tmp.f_ffree; 2147 2148 *sbp = mp->mnt_vstat; 2149 } 2150 return (0); 2151 } 2152 2153 /* 2154 * Mount-time recovery (RW mounts) 2155 * 2156 * Updates to the free block table are allowed to lag flushes by one 2157 * transaction. In case of a crash, then on a fresh mount we must do an 2158 * incremental scan of the last committed transaction id and make sure that 2159 * all related blocks have been marked allocated. 2160 */ 2161 struct hammer2_recovery_elm { 2162 TAILQ_ENTRY(hammer2_recovery_elm) entry; 2163 hammer2_chain_t *chain; 2164 hammer2_tid_t sync_tid; 2165 }; 2166 2167 TAILQ_HEAD(hammer2_recovery_list, hammer2_recovery_elm); 2168 2169 struct hammer2_recovery_info { 2170 struct hammer2_recovery_list list; 2171 hammer2_tid_t mtid; 2172 int depth; 2173 }; 2174 2175 static int hammer2_recovery_scan(hammer2_dev_t *hmp, 2176 hammer2_chain_t *parent, 2177 struct hammer2_recovery_info *info, 2178 hammer2_tid_t sync_tid); 2179 2180 #define HAMMER2_RECOVERY_MAXDEPTH 10 2181 2182 static 2183 int 2184 hammer2_recovery(hammer2_dev_t *hmp) 2185 { 2186 struct hammer2_recovery_info info; 2187 struct hammer2_recovery_elm *elm; 2188 hammer2_chain_t *parent; 2189 hammer2_tid_t sync_tid; 2190 hammer2_tid_t mirror_tid; 2191 int error; 2192 2193 hammer2_trans_init(hmp->spmp, 0); 2194 2195 sync_tid = hmp->voldata.freemap_tid; 2196 mirror_tid = hmp->voldata.mirror_tid; 2197 2198 kprintf("hammer2_mount: \"%s\": ", hmp->devrepname); 2199 if (sync_tid >= mirror_tid) { 2200 kprintf("no recovery needed\n"); 2201 } else { 2202 kprintf("freemap recovery %016jx-%016jx\n", 2203 sync_tid + 1, mirror_tid); 2204 } 2205 2206 TAILQ_INIT(&info.list); 2207 info.depth = 0; 2208 parent = hammer2_chain_lookup_init(&hmp->vchain, 0); 2209 error = hammer2_recovery_scan(hmp, parent, &info, sync_tid); 2210 hammer2_chain_lookup_done(parent); 2211 2212 while ((elm = TAILQ_FIRST(&info.list)) != NULL) { 2213 TAILQ_REMOVE(&info.list, elm, entry); 2214 parent = elm->chain; 2215 sync_tid = elm->sync_tid; 2216 kfree(elm, M_HAMMER2); 2217 2218 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2219 error |= hammer2_recovery_scan(hmp, parent, &info, 2220 hmp->voldata.freemap_tid); 2221 hammer2_chain_unlock(parent); 2222 hammer2_chain_drop(parent); /* drop elm->chain ref */ 2223 } 2224 2225 hammer2_trans_done(hmp->spmp, 0); 2226 2227 return error; 2228 } 2229 2230 static 2231 int 2232 hammer2_recovery_scan(hammer2_dev_t *hmp, hammer2_chain_t *parent, 2233 struct hammer2_recovery_info *info, 2234 hammer2_tid_t sync_tid) 2235 { 2236 const hammer2_inode_data_t *ripdata; 2237 hammer2_chain_t *chain; 2238 hammer2_blockref_t bref; 2239 int tmp_error; 2240 int rup_error; 2241 int error; 2242 int first; 2243 2244 /* 2245 * Adjust freemap to ensure that the block(s) are marked allocated. 2246 */ 2247 if (parent->bref.type != HAMMER2_BREF_TYPE_VOLUME) { 2248 hammer2_freemap_adjust(hmp, &parent->bref, 2249 HAMMER2_FREEMAP_DORECOVER); 2250 } 2251 2252 /* 2253 * Check type for recursive scan 2254 */ 2255 switch(parent->bref.type) { 2256 case HAMMER2_BREF_TYPE_VOLUME: 2257 /* data already instantiated */ 2258 break; 2259 case HAMMER2_BREF_TYPE_INODE: 2260 /* 2261 * Must instantiate data for DIRECTDATA test and also 2262 * for recursion. 2263 */ 2264 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2265 ripdata = &parent->data->ipdata; 2266 if (ripdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) { 2267 /* not applicable to recovery scan */ 2268 hammer2_chain_unlock(parent); 2269 return 0; 2270 } 2271 hammer2_chain_unlock(parent); 2272 break; 2273 case HAMMER2_BREF_TYPE_INDIRECT: 2274 /* 2275 * Must instantiate data for recursion 2276 */ 2277 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2278 hammer2_chain_unlock(parent); 2279 break; 2280 case HAMMER2_BREF_TYPE_DIRENT: 2281 case HAMMER2_BREF_TYPE_DATA: 2282 case HAMMER2_BREF_TYPE_FREEMAP: 2283 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 2284 case HAMMER2_BREF_TYPE_FREEMAP_LEAF: 2285 /* not applicable to recovery scan */ 2286 return 0; 2287 break; 2288 default: 2289 return HAMMER2_ERROR_BADBREF; 2290 } 2291 2292 /* 2293 * Defer operation if depth limit reached. 2294 */ 2295 if (info->depth >= HAMMER2_RECOVERY_MAXDEPTH) { 2296 struct hammer2_recovery_elm *elm; 2297 2298 elm = kmalloc(sizeof(*elm), M_HAMMER2, M_ZERO | M_WAITOK); 2299 elm->chain = parent; 2300 elm->sync_tid = sync_tid; 2301 hammer2_chain_ref(parent); 2302 TAILQ_INSERT_TAIL(&info->list, elm, entry); 2303 /* unlocked by caller */ 2304 2305 return(0); 2306 } 2307 2308 2309 /* 2310 * Recursive scan of the last flushed transaction only. We are 2311 * doing this without pmp assignments so don't leave the chains 2312 * hanging around after we are done with them. 2313 * 2314 * error Cumulative error this level only 2315 * rup_error Cumulative error for recursion 2316 * tmp_error Specific non-cumulative recursion error 2317 */ 2318 chain = NULL; 2319 first = 1; 2320 rup_error = 0; 2321 error = 0; 2322 2323 for (;;) { 2324 error |= hammer2_chain_scan(parent, &chain, &bref, 2325 &first, 2326 HAMMER2_LOOKUP_NODATA); 2327 2328 /* 2329 * Problem during scan or EOF 2330 */ 2331 if (error) 2332 break; 2333 2334 /* 2335 * If this is a leaf 2336 */ 2337 if (chain == NULL) { 2338 if (bref.mirror_tid > sync_tid) { 2339 hammer2_freemap_adjust(hmp, &bref, 2340 HAMMER2_FREEMAP_DORECOVER); 2341 } 2342 continue; 2343 } 2344 2345 /* 2346 * This may or may not be a recursive node. 2347 */ 2348 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 2349 if (bref.mirror_tid > sync_tid) { 2350 ++info->depth; 2351 tmp_error = hammer2_recovery_scan(hmp, chain, 2352 info, sync_tid); 2353 --info->depth; 2354 } else { 2355 tmp_error = 0; 2356 } 2357 2358 /* 2359 * Flush the recovery at the PFS boundary to stage it for 2360 * the final flush of the super-root topology. 2361 */ 2362 if (tmp_error == 0 && 2363 (bref.flags & HAMMER2_BREF_FLAG_PFSROOT) && 2364 (chain->flags & HAMMER2_CHAIN_ONFLUSH)) { 2365 hammer2_flush(chain, HAMMER2_FLUSH_TOP | 2366 HAMMER2_FLUSH_ALL); 2367 } 2368 rup_error |= tmp_error; 2369 } 2370 return ((error | rup_error) & ~HAMMER2_ERROR_EOF); 2371 } 2372 2373 /* 2374 * This fixes up an error introduced in earlier H2 implementations where 2375 * moving a PFS inode into an indirect block wound up causing the 2376 * HAMMER2_BREF_FLAG_PFSROOT flag in the bref to get cleared. 2377 */ 2378 static 2379 int 2380 hammer2_fixup_pfses(hammer2_dev_t *hmp) 2381 { 2382 const hammer2_inode_data_t *ripdata; 2383 hammer2_chain_t *parent; 2384 hammer2_chain_t *chain; 2385 hammer2_key_t key_next; 2386 hammer2_pfs_t *spmp; 2387 int error; 2388 2389 error = 0; 2390 2391 /* 2392 * Lookup mount point under the media-localized super-root. 2393 * 2394 * cluster->pmp will incorrectly point to spmp and must be fixed 2395 * up later on. 2396 */ 2397 spmp = hmp->spmp; 2398 hammer2_inode_lock(spmp->iroot, 0); 2399 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS); 2400 chain = hammer2_chain_lookup(&parent, &key_next, 2401 HAMMER2_KEY_MIN, HAMMER2_KEY_MAX, 2402 &error, 0); 2403 while (chain) { 2404 if (chain->bref.type != HAMMER2_BREF_TYPE_INODE) 2405 continue; 2406 if (chain->error) { 2407 kprintf("I/O error scanning PFS labels\n"); 2408 error |= chain->error; 2409 } else if ((chain->bref.flags & 2410 HAMMER2_BREF_FLAG_PFSROOT) == 0) { 2411 int error2; 2412 2413 ripdata = &chain->data->ipdata; 2414 hammer2_trans_init(hmp->spmp, 0); 2415 error2 = hammer2_chain_modify(chain, 2416 chain->bref.modify_tid, 2417 0, 0); 2418 if (error2 == 0) { 2419 kprintf("hammer2: Correct mis-flagged PFS %s\n", 2420 ripdata->filename); 2421 chain->bref.flags |= HAMMER2_BREF_FLAG_PFSROOT; 2422 } else { 2423 error |= error2; 2424 } 2425 hammer2_flush(chain, HAMMER2_FLUSH_TOP | 2426 HAMMER2_FLUSH_ALL); 2427 hammer2_trans_done(hmp->spmp, 0); 2428 } 2429 chain = hammer2_chain_next(&parent, chain, &key_next, 2430 key_next, HAMMER2_KEY_MAX, 2431 &error, 0); 2432 } 2433 if (parent) { 2434 hammer2_chain_unlock(parent); 2435 hammer2_chain_drop(parent); 2436 } 2437 hammer2_inode_unlock(spmp->iroot); 2438 2439 return error; 2440 } 2441 2442 /* 2443 * Sync a mount point; this is called periodically on a per-mount basis from 2444 * the filesystem syncer, and whenever a user issues a sync. 2445 */ 2446 int 2447 hammer2_vfs_sync(struct mount *mp, int waitfor) 2448 { 2449 int error; 2450 2451 error = hammer2_vfs_sync_pmp(MPTOPMP(mp), waitfor); 2452 2453 return error; 2454 } 2455 2456 /* 2457 * Because frontend operations lock vnodes before we get a chance to 2458 * lock the related inode, we can't just acquire a vnode lock without 2459 * risking a deadlock. The frontend may be holding a vnode lock while 2460 * also blocked on our SYNCQ flag while trying to get the inode lock. 2461 * 2462 * To deal with this situation we can check the vnode lock situation 2463 * after locking the inode and perform a work-around. 2464 */ 2465 int 2466 hammer2_vfs_sync_pmp(hammer2_pfs_t *pmp, int waitfor) 2467 { 2468 hammer2_inode_t *ip; 2469 hammer2_depend_t *depend; 2470 hammer2_depend_t *depend_next; 2471 struct vnode *vp; 2472 uint32_t pass2; 2473 int error; 2474 int wakecount; 2475 int dorestart; 2476 2477 /* 2478 * Move all inodes on sideq to syncq. This will clear sideq. 2479 * This should represent all flushable inodes. These inodes 2480 * will already have refs due to being on syncq or sideq. We 2481 * must do this all at once with the spinlock held to ensure that 2482 * all inode dependencies are part of the same flush. 2483 * 2484 * We should be able to do this asynchronously from frontend 2485 * operations because we will be locking the inodes later on 2486 * to actually flush them, and that will partition any frontend 2487 * op using the same inode. Either it has already locked the 2488 * inode and we will block, or it has not yet locked the inode 2489 * and it will block until we are finished flushing that inode. 2490 * 2491 * When restarting, only move the inodes flagged as PASS2 from 2492 * SIDEQ to SYNCQ. PASS2 propagation by inode_lock4() and 2493 * inode_depend() are atomic with the spin-lock. 2494 */ 2495 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH); 2496 #ifdef HAMMER2_DEBUG_SYNC 2497 kprintf("FILESYSTEM SYNC BOUNDARY\n"); 2498 #endif 2499 dorestart = 0; 2500 2501 /* 2502 * Move inodes from depq to syncq, releasing the related 2503 * depend structures. 2504 */ 2505 restart: 2506 #ifdef HAMMER2_DEBUG_SYNC 2507 kprintf("FILESYSTEM SYNC RESTART (%d)\n", dorestart); 2508 #endif 2509 hammer2_trans_setflags(pmp, 0/*HAMMER2_TRANS_COPYQ*/); 2510 hammer2_trans_clearflags(pmp, HAMMER2_TRANS_RESCAN); 2511 2512 /* 2513 * Move inodes from depq to syncq. When restarting, only depq's 2514 * marked pass2 are moved. 2515 */ 2516 hammer2_spin_ex(&pmp->list_spin); 2517 depend_next = TAILQ_FIRST(&pmp->depq); 2518 wakecount = 0; 2519 2520 while ((depend = depend_next) != NULL) { 2521 depend_next = TAILQ_NEXT(depend, entry); 2522 if (dorestart && depend->pass2 == 0) 2523 continue; 2524 TAILQ_FOREACH(ip, &depend->sideq, entry) { 2525 KKASSERT(ip->flags & HAMMER2_INODE_SIDEQ); 2526 atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ); 2527 atomic_clear_int(&ip->flags, HAMMER2_INODE_SIDEQ); 2528 ip->depend = NULL; 2529 } 2530 2531 /* 2532 * NOTE: pmp->sideq_count includes both sideq and syncq 2533 */ 2534 TAILQ_CONCAT(&pmp->syncq, &depend->sideq, entry); 2535 2536 depend->count = 0; 2537 depend->pass2 = 0; 2538 TAILQ_REMOVE(&pmp->depq, depend, entry); 2539 } 2540 2541 hammer2_spin_unex(&pmp->list_spin); 2542 hammer2_trans_clearflags(pmp, /*HAMMER2_TRANS_COPYQ |*/ 2543 HAMMER2_TRANS_WAITING); 2544 dorestart = 0; 2545 2546 /* 2547 * sideq_count may have dropped enough to allow us to unstall 2548 * the frontend. 2549 */ 2550 hammer2_pfs_memory_wakeup(pmp, 0); 2551 2552 /* 2553 * Now run through all inodes on syncq. 2554 * 2555 * Flush transactions only interlock with other flush transactions. 2556 * Any conflicting frontend operations will block on the inode, but 2557 * may hold a vnode lock while doing so. 2558 */ 2559 hammer2_spin_ex(&pmp->list_spin); 2560 while ((ip = TAILQ_FIRST(&pmp->syncq)) != NULL) { 2561 /* 2562 * Remove the inode from the SYNCQ, transfer the syncq ref 2563 * to us. We must clear SYNCQ to allow any potential 2564 * front-end deadlock to proceed. We must set PASS2 so 2565 * the dependency code knows what to do. 2566 */ 2567 pass2 = ip->flags; 2568 cpu_ccfence(); 2569 if (atomic_cmpset_int(&ip->flags, 2570 pass2, 2571 (pass2 & ~(HAMMER2_INODE_SYNCQ | 2572 HAMMER2_INODE_SYNCQ_WAKEUP)) | 2573 HAMMER2_INODE_SYNCQ_PASS2) == 0) 2574 { 2575 continue; 2576 } 2577 TAILQ_REMOVE(&pmp->syncq, ip, entry); 2578 --pmp->sideq_count; 2579 hammer2_spin_unex(&pmp->list_spin); 2580 2581 /* 2582 * Tickle anyone waiting on ip->flags or the hysteresis 2583 * on the dirty inode count. 2584 */ 2585 if (pass2 & HAMMER2_INODE_SYNCQ_WAKEUP) 2586 wakeup(&ip->flags); 2587 if (++wakecount >= hammer2_limit_dirty_inodes / 20 + 1) { 2588 wakecount = 0; 2589 hammer2_pfs_memory_wakeup(pmp, 0); 2590 } 2591 2592 /* 2593 * Relock the inode, and we inherit a ref from the above. 2594 * We will check for a race after we acquire the vnode. 2595 */ 2596 hammer2_mtx_ex(&ip->lock); 2597 2598 /* 2599 * We need the vp in order to vfsync() dirty buffers, so if 2600 * one isn't attached we can skip it. 2601 * 2602 * Ordering the inode lock and then the vnode lock has the 2603 * potential to deadlock. If we had left SYNCQ set that could 2604 * also deadlock us against the frontend even if we don't hold 2605 * any locks, but the latter is not a problem now since we 2606 * cleared it. igetv will temporarily release the inode lock 2607 * in a safe manner to work-around the deadlock. 2608 * 2609 * Unfortunately it is still possible to deadlock when the 2610 * frontend obtains multiple inode locks, because all the 2611 * related vnodes are already locked (nor can the vnode locks 2612 * be released and reacquired without messing up RECLAIM and 2613 * INACTIVE sequencing). 2614 * 2615 * The solution for now is to move the vp back onto SIDEQ 2616 * and set dorestart, which will restart the flush after we 2617 * exhaust the current SYNCQ. Note that additional 2618 * dependencies may build up, so we definitely need to move 2619 * the whole SIDEQ back to SYNCQ when we restart. 2620 */ 2621 vp = ip->vp; 2622 if (vp) { 2623 if (vget(vp, LK_EXCLUSIVE|LK_NOWAIT)) { 2624 /* 2625 * Failed to get the vnode, requeue the inode 2626 * (PASS2 is already set so it will be found 2627 * again on the restart). 2628 * 2629 * Then unlock, possibly sleep, and retry 2630 * later. We sleep if PASS2 was *previously* 2631 * set, before we set it again above. 2632 */ 2633 vp = NULL; 2634 dorestart = 1; 2635 #ifdef HAMMER2_DEBUG_SYNC 2636 kprintf("inum %ld (sync delayed by vnode)\n", 2637 (long)ip->meta.inum); 2638 #endif 2639 hammer2_inode_delayed_sideq(ip); 2640 2641 hammer2_mtx_unlock(&ip->lock); 2642 hammer2_inode_drop(ip); 2643 2644 if (pass2 & HAMMER2_INODE_SYNCQ_PASS2) { 2645 tsleep(&dorestart, 0, "h2syndel", 2); 2646 } 2647 hammer2_spin_ex(&pmp->list_spin); 2648 continue; 2649 } 2650 } else { 2651 vp = NULL; 2652 } 2653 2654 /* 2655 * If the inode wound up on a SIDEQ again it will already be 2656 * prepped for another PASS2. In this situation if we flush 2657 * it now we will just wind up flushing it again in the same 2658 * syncer run, so we might as well not flush it now. 2659 */ 2660 if (ip->flags & HAMMER2_INODE_SIDEQ) { 2661 hammer2_mtx_unlock(&ip->lock); 2662 hammer2_inode_drop(ip); 2663 if (vp) 2664 vput(vp); 2665 dorestart = 1; 2666 hammer2_spin_ex(&pmp->list_spin); 2667 continue; 2668 } 2669 2670 /* 2671 * Ok we have the inode exclusively locked and if vp is 2672 * not NULL that will also be exclusively locked. Do the 2673 * meat of the flush. 2674 * 2675 * vp token needed for v_rbdirty_tree check / vclrisdirty 2676 * sequencing. Though we hold the vnode exclusively so 2677 * we shouldn't need to hold the token also in this case. 2678 */ 2679 if (vp) { 2680 vfsync(vp, MNT_WAIT, 1, NULL, NULL); 2681 bio_track_wait(&vp->v_track_write, 0, 0); /* XXX */ 2682 } 2683 2684 /* 2685 * If the inode has not yet been inserted into the tree 2686 * we must do so. Then sync and flush it. The flush should 2687 * update the parent. 2688 */ 2689 if (ip->flags & HAMMER2_INODE_DELETING) { 2690 #ifdef HAMMER2_DEBUG_SYNC 2691 kprintf("inum %ld destroy\n", (long)ip->meta.inum); 2692 #endif 2693 hammer2_inode_chain_des(ip); 2694 atomic_add_long(&hammer2_iod_inode_deletes, 1); 2695 } else if (ip->flags & HAMMER2_INODE_CREATING) { 2696 #ifdef HAMMER2_DEBUG_SYNC 2697 kprintf("inum %ld insert\n", (long)ip->meta.inum); 2698 #endif 2699 hammer2_inode_chain_ins(ip); 2700 atomic_add_long(&hammer2_iod_inode_creates, 1); 2701 } 2702 #ifdef HAMMER2_DEBUG_SYNC 2703 kprintf("inum %ld chain-sync\n", (long)ip->meta.inum); 2704 #endif 2705 2706 /* 2707 * Because I kinda messed up the design and index the inodes 2708 * under the root inode, along side the directory entries, 2709 * we can't flush the inode index under the iroot until the 2710 * end. If we do it now we might miss effects created by 2711 * other inodes on the SYNCQ. 2712 * 2713 * Do a normal (non-FSSYNC) flush instead, which allows the 2714 * vnode code to work the same. We don't want to force iroot 2715 * back onto the SIDEQ, and we also don't want the flush code 2716 * to update pfs_iroot_blocksets until the final flush later. 2717 * 2718 * XXX at the moment this will likely result in a double-flush 2719 * of the iroot chain. 2720 */ 2721 hammer2_inode_chain_sync(ip); 2722 if (ip == pmp->iroot) { 2723 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP); 2724 } else { 2725 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP | 2726 HAMMER2_XOP_FSSYNC); 2727 } 2728 if (vp) { 2729 lwkt_gettoken(&vp->v_token); 2730 if ((ip->flags & (HAMMER2_INODE_MODIFIED | 2731 HAMMER2_INODE_RESIZED | 2732 HAMMER2_INODE_DIRTYDATA)) == 0 && 2733 RB_EMPTY(&vp->v_rbdirty_tree) && 2734 !bio_track_active(&vp->v_track_write)) { 2735 vclrisdirty(vp); 2736 } else { 2737 hammer2_inode_delayed_sideq(ip); 2738 } 2739 lwkt_reltoken(&vp->v_token); 2740 vput(vp); 2741 vp = NULL; /* safety */ 2742 } 2743 atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_PASS2); 2744 hammer2_inode_unlock(ip); /* unlock+drop */ 2745 /* ip pointer invalid */ 2746 2747 /* 2748 * If the inode got dirted after we dropped our locks, 2749 * it will have already been moved back to the SIDEQ. 2750 */ 2751 hammer2_spin_ex(&pmp->list_spin); 2752 } 2753 hammer2_spin_unex(&pmp->list_spin); 2754 hammer2_pfs_memory_wakeup(pmp, 0); 2755 2756 if (dorestart || (pmp->trans.flags & HAMMER2_TRANS_RESCAN)) { 2757 #ifdef HAMMER2_DEBUG_SYNC 2758 kprintf("FILESYSTEM SYNC STAGE 1 RESTART\n"); 2759 /*tsleep(&dorestart, 0, "h2STG1-R", hz*20);*/ 2760 #endif 2761 dorestart = 1; 2762 goto restart; 2763 } 2764 #ifdef HAMMER2_DEBUG_SYNC 2765 kprintf("FILESYSTEM SYNC STAGE 2 BEGIN\n"); 2766 /*tsleep(&dorestart, 0, "h2STG2", hz*20);*/ 2767 #endif 2768 2769 /* 2770 * We have to flush the PFS root last, even if it does not appear to 2771 * be dirty, because all the inodes in the PFS are indexed under it. 2772 * The normal flushing of iroot above would only occur if directory 2773 * entries under the root were changed. 2774 * 2775 * Specifying VOLHDR will cause an additionl flush of hmp->spmp 2776 * for the media making up the cluster. 2777 */ 2778 if ((ip = pmp->iroot) != NULL) { 2779 hammer2_inode_ref(ip); 2780 hammer2_mtx_ex(&ip->lock); 2781 hammer2_inode_chain_sync(ip); 2782 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP | 2783 HAMMER2_XOP_FSSYNC | 2784 HAMMER2_XOP_VOLHDR); 2785 hammer2_inode_unlock(ip); /* unlock+drop */ 2786 } 2787 #ifdef HAMMER2_DEBUG_SYNC 2788 kprintf("FILESYSTEM SYNC STAGE 2 DONE\n"); 2789 #endif 2790 2791 /* 2792 * device bioq sync 2793 */ 2794 hammer2_bioq_sync(pmp); 2795 2796 error = 0; /* XXX */ 2797 hammer2_trans_done(pmp, HAMMER2_TRANS_ISFLUSH); 2798 2799 return (error); 2800 } 2801 2802 static 2803 int 2804 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp) 2805 { 2806 hammer2_inode_t *ip; 2807 2808 KKASSERT(MAXFIDSZ >= 16); 2809 ip = VTOI(vp); 2810 fhp->fid_len = offsetof(struct fid, fid_data[16]); 2811 fhp->fid_ext = 0; 2812 ((hammer2_tid_t *)fhp->fid_data)[0] = ip->meta.inum; 2813 ((hammer2_tid_t *)fhp->fid_data)[1] = 0; 2814 2815 return 0; 2816 } 2817 2818 static 2819 int 2820 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 2821 struct fid *fhp, struct vnode **vpp) 2822 { 2823 hammer2_tid_t inum; 2824 int error; 2825 2826 inum = ((hammer2_tid_t *)fhp->fid_data)[0] & HAMMER2_DIRHASH_USERMSK; 2827 if (vpp) { 2828 if (inum == 1) 2829 error = hammer2_vfs_root(mp, vpp); 2830 else 2831 error = hammer2_vfs_vget(mp, NULL, inum, vpp); 2832 } else { 2833 error = 0; 2834 } 2835 return error; 2836 } 2837 2838 static 2839 int 2840 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 2841 int *exflagsp, struct ucred **credanonp) 2842 { 2843 hammer2_pfs_t *pmp; 2844 struct netcred *np; 2845 int error; 2846 2847 pmp = MPTOPMP(mp); 2848 np = vfs_export_lookup(mp, &pmp->export, nam); 2849 if (np) { 2850 *exflagsp = np->netc_exflags; 2851 *credanonp = &np->netc_anon; 2852 error = 0; 2853 } else { 2854 error = EACCES; 2855 } 2856 return error; 2857 } 2858 2859 /* 2860 * This handles hysteresis on regular file flushes. Because the BIOs are 2861 * routed to a thread it is possible for an excessive number to build up 2862 * and cause long front-end stalls long before the runningbuffspace limit 2863 * is hit, so we implement hammer2_flush_pipe to control the 2864 * hysteresis. 2865 * 2866 * This is a particular problem when compression is used. 2867 */ 2868 void 2869 hammer2_lwinprog_ref(hammer2_pfs_t *pmp) 2870 { 2871 atomic_add_int(&pmp->count_lwinprog, 1); 2872 } 2873 2874 void 2875 hammer2_lwinprog_drop(hammer2_pfs_t *pmp) 2876 { 2877 int lwinprog; 2878 2879 lwinprog = atomic_fetchadd_int(&pmp->count_lwinprog, -1); 2880 if ((lwinprog & HAMMER2_LWINPROG_WAITING) && 2881 (lwinprog & HAMMER2_LWINPROG_MASK) <= hammer2_flush_pipe * 2 / 3) { 2882 atomic_clear_int(&pmp->count_lwinprog, 2883 HAMMER2_LWINPROG_WAITING); 2884 wakeup(&pmp->count_lwinprog); 2885 } 2886 if ((lwinprog & HAMMER2_LWINPROG_WAITING0) && 2887 (lwinprog & HAMMER2_LWINPROG_MASK) <= 0) { 2888 atomic_clear_int(&pmp->count_lwinprog, 2889 HAMMER2_LWINPROG_WAITING0); 2890 wakeup(&pmp->count_lwinprog); 2891 } 2892 } 2893 2894 void 2895 hammer2_lwinprog_wait(hammer2_pfs_t *pmp, int flush_pipe) 2896 { 2897 int lwinprog; 2898 int lwflag = (flush_pipe) ? HAMMER2_LWINPROG_WAITING : 2899 HAMMER2_LWINPROG_WAITING0; 2900 2901 for (;;) { 2902 lwinprog = pmp->count_lwinprog; 2903 cpu_ccfence(); 2904 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe) 2905 break; 2906 tsleep_interlock(&pmp->count_lwinprog, 0); 2907 atomic_set_int(&pmp->count_lwinprog, lwflag); 2908 lwinprog = pmp->count_lwinprog; 2909 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe) 2910 break; 2911 tsleep(&pmp->count_lwinprog, PINTERLOCKED, "h2wpipe", hz); 2912 } 2913 } 2914 2915 /* 2916 * It is possible for an excessive number of dirty chains or dirty inodes 2917 * to build up. When this occurs we start an asynchronous filesystem sync. 2918 * If the level continues to build up, we stall, waiting for it to drop, 2919 * with some hysteresis. 2920 * 2921 * This relies on the kernel calling hammer2_vfs_modifying() prior to 2922 * obtaining any vnode locks before making a modifying VOP call. 2923 */ 2924 static int 2925 hammer2_vfs_modifying(struct mount *mp) 2926 { 2927 if (mp->mnt_flag & MNT_RDONLY) 2928 return EROFS; 2929 hammer2_pfs_memory_wait(MPTOPMP(mp)); 2930 2931 return 0; 2932 } 2933 2934 /* 2935 * Initiate an asynchronous filesystem sync and, with hysteresis, 2936 * stall if the internal data structure count becomes too bloated. 2937 */ 2938 void 2939 hammer2_pfs_memory_wait(hammer2_pfs_t *pmp) 2940 { 2941 uint32_t waiting; 2942 int pcatch; 2943 int error; 2944 2945 if (pmp == NULL || pmp->mp == NULL) 2946 return; 2947 2948 for (;;) { 2949 waiting = pmp->inmem_dirty_chains & HAMMER2_DIRTYCHAIN_MASK; 2950 cpu_ccfence(); 2951 2952 /* 2953 * Start the syncer running at 1/2 the limit 2954 */ 2955 if (waiting > hammer2_limit_dirty_chains / 2 || 2956 pmp->sideq_count > hammer2_limit_dirty_inodes / 2) { 2957 trigger_syncer(pmp->mp); 2958 } 2959 2960 /* 2961 * Stall at the limit waiting for the counts to drop. 2962 * This code will typically be woken up once the count 2963 * drops below 3/4 the limit, or in one second. 2964 */ 2965 if (waiting < hammer2_limit_dirty_chains && 2966 pmp->sideq_count < hammer2_limit_dirty_inodes) { 2967 break; 2968 } 2969 2970 pcatch = curthread->td_proc ? PCATCH : 0; 2971 2972 tsleep_interlock(&pmp->inmem_dirty_chains, pcatch); 2973 atomic_set_int(&pmp->inmem_dirty_chains, 2974 HAMMER2_DIRTYCHAIN_WAITING); 2975 if (waiting < hammer2_limit_dirty_chains && 2976 pmp->sideq_count < hammer2_limit_dirty_inodes) { 2977 break; 2978 } 2979 trigger_syncer(pmp->mp); 2980 error = tsleep(&pmp->inmem_dirty_chains, PINTERLOCKED | pcatch, 2981 "h2memw", hz); 2982 if (error == ERESTART) 2983 break; 2984 } 2985 } 2986 2987 /* 2988 * Wake up any stalled frontend ops waiting, with hysteresis, using 2989 * 2/3 of the limit. 2990 */ 2991 void 2992 hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp, int count) 2993 { 2994 uint32_t waiting; 2995 2996 if (pmp) { 2997 waiting = atomic_fetchadd_int(&pmp->inmem_dirty_chains, count); 2998 /* don't need --waiting to test flag */ 2999 3000 if ((waiting & HAMMER2_DIRTYCHAIN_WAITING) && 3001 (pmp->inmem_dirty_chains & HAMMER2_DIRTYCHAIN_MASK) <= 3002 hammer2_limit_dirty_chains * 2 / 3 && 3003 pmp->sideq_count <= hammer2_limit_dirty_inodes * 2 / 3) { 3004 atomic_clear_int(&pmp->inmem_dirty_chains, 3005 HAMMER2_DIRTYCHAIN_WAITING); 3006 wakeup(&pmp->inmem_dirty_chains); 3007 } 3008 } 3009 } 3010 3011 void 3012 hammer2_pfs_memory_inc(hammer2_pfs_t *pmp) 3013 { 3014 if (pmp) { 3015 atomic_add_int(&pmp->inmem_dirty_chains, 1); 3016 } 3017 } 3018 3019 /* 3020 * Volume header data locks 3021 */ 3022 void 3023 hammer2_voldata_lock(hammer2_dev_t *hmp) 3024 { 3025 lockmgr(&hmp->vollk, LK_EXCLUSIVE); 3026 } 3027 3028 void 3029 hammer2_voldata_unlock(hammer2_dev_t *hmp) 3030 { 3031 lockmgr(&hmp->vollk, LK_RELEASE); 3032 } 3033 3034 /* 3035 * Caller indicates that the volume header is being modified. Flag 3036 * the related chain and adjust its transaction id. 3037 * 3038 * The transaction id is set to voldata.mirror_tid + 1, similar to 3039 * what hammer2_chain_modify() does. Be very careful here, volume 3040 * data can be updated independently of the rest of the filesystem. 3041 */ 3042 void 3043 hammer2_voldata_modify(hammer2_dev_t *hmp) 3044 { 3045 if ((hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) == 0) { 3046 atomic_add_long(&hammer2_count_modified_chains, 1); 3047 atomic_set_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED); 3048 hammer2_pfs_memory_inc(hmp->vchain.pmp); 3049 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid + 1; 3050 } 3051 } 3052 3053 /* 3054 * Returns 0 if the filesystem has tons of free space 3055 * Returns 1 if the filesystem has less than 10% remaining 3056 * Returns 2 if the filesystem has less than 2%/5% (user/root) remaining. 3057 */ 3058 int 3059 hammer2_vfs_enospace(hammer2_inode_t *ip, off_t bytes, struct ucred *cred) 3060 { 3061 hammer2_pfs_t *pmp; 3062 hammer2_dev_t *hmp; 3063 hammer2_off_t free_reserved; 3064 hammer2_off_t free_nominal; 3065 int i; 3066 3067 pmp = ip->pmp; 3068 3069 if (pmp->free_ticks == 0 || pmp->free_ticks != ticks) { 3070 free_reserved = HAMMER2_SEGSIZE; 3071 free_nominal = 0x7FFFFFFFFFFFFFFFLLU; 3072 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 3073 hmp = pmp->pfs_hmps[i]; 3074 if (hmp == NULL) 3075 continue; 3076 if (pmp->pfs_types[i] != HAMMER2_PFSTYPE_MASTER && 3077 pmp->pfs_types[i] != HAMMER2_PFSTYPE_SOFT_MASTER) 3078 continue; 3079 3080 if (free_nominal > hmp->voldata.allocator_free) 3081 free_nominal = hmp->voldata.allocator_free; 3082 if (free_reserved < hmp->free_reserved) 3083 free_reserved = hmp->free_reserved; 3084 } 3085 3086 /* 3087 * SMP races ok 3088 */ 3089 pmp->free_reserved = free_reserved; 3090 pmp->free_nominal = free_nominal; 3091 pmp->free_ticks = ticks; 3092 } else { 3093 free_reserved = pmp->free_reserved; 3094 free_nominal = pmp->free_nominal; 3095 } 3096 if (cred && cred->cr_uid != 0) { 3097 if ((int64_t)(free_nominal - bytes) < 3098 (int64_t)free_reserved) { 3099 return 2; 3100 } 3101 } else { 3102 if ((int64_t)(free_nominal - bytes) < 3103 (int64_t)free_reserved / 2) { 3104 return 2; 3105 } 3106 } 3107 if ((int64_t)(free_nominal - bytes) < (int64_t)free_reserved * 2) 3108 return 1; 3109 return 0; 3110 } 3111