1 /* 2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/nlookup.h> 39 #include <sys/vnode.h> 40 #include <sys/mount.h> 41 #include <sys/fcntl.h> 42 #include <sys/vfsops.h> 43 #include <sys/sysctl.h> 44 #include <sys/socket.h> 45 #include <sys/objcache.h> 46 #include <sys/proc.h> 47 #include <sys/lock.h> 48 #include <sys/file.h> 49 50 #include "hammer2.h" 51 52 TAILQ_HEAD(hammer2_mntlist, hammer2_dev); 53 static struct hammer2_mntlist hammer2_mntlist; 54 55 struct hammer2_pfslist hammer2_pfslist; 56 struct hammer2_pfslist hammer2_spmplist; 57 struct lock hammer2_mntlk; 58 59 int hammer2_supported_version = HAMMER2_VOL_VERSION_DEFAULT; 60 int hammer2_debug; 61 int hammer2_aux_flags; 62 int hammer2_xop_nthreads; 63 int hammer2_xop_sgroups; 64 int hammer2_xop_xgroups; 65 int hammer2_xop_xbase; 66 int hammer2_xop_mod; 67 long hammer2_debug_inode; 68 int hammer2_cluster_meta_read = 1; /* physical read-ahead */ 69 int hammer2_cluster_data_read = 4; /* physical read-ahead */ 70 int hammer2_cluster_write = 0; /* physical write clustering */ 71 int hammer2_dedup_enable = 1; 72 int hammer2_always_compress = 0; /* always try to compress */ 73 int hammer2_flush_pipe = 100; 74 int hammer2_dio_count; 75 int hammer2_dio_limit = 256; 76 int hammer2_bulkfree_tps = 5000; 77 int hammer2_spread_workers; 78 int hammer2_limit_saved_depth; 79 long hammer2_chain_allocs; 80 long hammer2_limit_saved_chains; 81 long hammer2_limit_dirty_chains; 82 long hammer2_limit_dirty_inodes; 83 long hammer2_count_modified_chains; 84 long hammer2_iod_file_read; 85 long hammer2_iod_meta_read; 86 long hammer2_iod_indr_read; 87 long hammer2_iod_fmap_read; 88 long hammer2_iod_volu_read; 89 long hammer2_iod_file_write; 90 long hammer2_iod_file_wembed; 91 long hammer2_iod_file_wzero; 92 long hammer2_iod_file_wdedup; 93 long hammer2_iod_meta_write; 94 long hammer2_iod_indr_write; 95 long hammer2_iod_fmap_write; 96 long hammer2_iod_volu_write; 97 static long hammer2_iod_inode_creates; 98 static long hammer2_iod_inode_deletes; 99 100 long hammer2_process_icrc32; 101 long hammer2_process_xxhash64; 102 103 MALLOC_DECLARE(M_HAMMER2_CBUFFER); 104 MALLOC_DEFINE(M_HAMMER2_CBUFFER, "HAMMER2-compbuffer", 105 "Buffer used for compression."); 106 107 MALLOC_DECLARE(M_HAMMER2_DEBUFFER); 108 MALLOC_DEFINE(M_HAMMER2_DEBUFFER, "HAMMER2-decompbuffer", 109 "Buffer used for decompression."); 110 111 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem"); 112 113 SYSCTL_INT(_vfs_hammer2, OID_AUTO, supported_version, CTLFLAG_RD, 114 &hammer2_supported_version, 0, ""); 115 SYSCTL_INT(_vfs_hammer2, OID_AUTO, aux_flags, CTLFLAG_RW, 116 &hammer2_aux_flags, 0, ""); 117 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW, 118 &hammer2_debug, 0, ""); 119 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, debug_inode, CTLFLAG_RW, 120 &hammer2_debug_inode, 0, ""); 121 SYSCTL_INT(_vfs_hammer2, OID_AUTO, spread_workers, CTLFLAG_RW, 122 &hammer2_spread_workers, 0, ""); 123 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_meta_read, CTLFLAG_RW, 124 &hammer2_cluster_meta_read, 0, ""); 125 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_data_read, CTLFLAG_RW, 126 &hammer2_cluster_data_read, 0, ""); 127 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_write, CTLFLAG_RW, 128 &hammer2_cluster_write, 0, ""); 129 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dedup_enable, CTLFLAG_RW, 130 &hammer2_dedup_enable, 0, ""); 131 SYSCTL_INT(_vfs_hammer2, OID_AUTO, always_compress, CTLFLAG_RW, 132 &hammer2_always_compress, 0, ""); 133 SYSCTL_INT(_vfs_hammer2, OID_AUTO, flush_pipe, CTLFLAG_RW, 134 &hammer2_flush_pipe, 0, ""); 135 SYSCTL_INT(_vfs_hammer2, OID_AUTO, bulkfree_tps, CTLFLAG_RW, 136 &hammer2_bulkfree_tps, 0, ""); 137 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, chain_allocs, CTLFLAG_RD, 138 &hammer2_chain_allocs, 0, ""); 139 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_saved_chains, CTLFLAG_RW, 140 &hammer2_limit_saved_chains, 0, ""); 141 SYSCTL_INT(_vfs_hammer2, OID_AUTO, limit_saved_depth, CTLFLAG_RW, 142 &hammer2_limit_saved_depth, 0, ""); 143 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_chains, CTLFLAG_RW, 144 &hammer2_limit_dirty_chains, 0, ""); 145 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_inodes, CTLFLAG_RW, 146 &hammer2_limit_dirty_inodes, 0, ""); 147 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, count_modified_chains, CTLFLAG_RD, 148 &hammer2_count_modified_chains, 0, ""); 149 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_count, CTLFLAG_RD, 150 &hammer2_dio_count, 0, ""); 151 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_limit, CTLFLAG_RW, 152 &hammer2_dio_limit, 0, ""); 153 154 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RD, 155 &hammer2_iod_file_read, 0, ""); 156 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RD, 157 &hammer2_iod_meta_read, 0, ""); 158 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RD, 159 &hammer2_iod_indr_read, 0, ""); 160 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_read, CTLFLAG_RD, 161 &hammer2_iod_fmap_read, 0, ""); 162 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_read, CTLFLAG_RD, 163 &hammer2_iod_volu_read, 0, ""); 164 165 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RD, 166 &hammer2_iod_file_write, 0, ""); 167 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wembed, CTLFLAG_RD, 168 &hammer2_iod_file_wembed, 0, ""); 169 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wzero, CTLFLAG_RD, 170 &hammer2_iod_file_wzero, 0, ""); 171 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wdedup, CTLFLAG_RD, 172 &hammer2_iod_file_wdedup, 0, ""); 173 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RD, 174 &hammer2_iod_meta_write, 0, ""); 175 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RD, 176 &hammer2_iod_indr_write, 0, ""); 177 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_write, CTLFLAG_RD, 178 &hammer2_iod_fmap_write, 0, ""); 179 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RD, 180 &hammer2_iod_volu_write, 0, ""); 181 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_inode_creates, CTLFLAG_RD, 182 &hammer2_iod_inode_creates, 0, ""); 183 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_inode_deletes, CTLFLAG_RD, 184 &hammer2_iod_inode_deletes, 0, ""); 185 186 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, process_icrc32, CTLFLAG_RD, 187 &hammer2_process_icrc32, 0, ""); 188 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, process_xxhash64, CTLFLAG_RD, 189 &hammer2_process_xxhash64, 0, ""); 190 191 static int hammer2_vfs_init(struct vfsconf *conf); 192 static int hammer2_vfs_uninit(struct vfsconf *vfsp); 193 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, 194 struct ucred *cred); 195 static int hammer2_remount(hammer2_dev_t *, struct mount *, char *, 196 struct ucred *); 197 static int hammer2_recovery(hammer2_dev_t *hmp); 198 static int hammer2_vfs_unmount(struct mount *mp, int mntflags); 199 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp); 200 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, 201 struct ucred *cred); 202 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, 203 struct ucred *cred); 204 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 205 struct fid *fhp, struct vnode **vpp); 206 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp); 207 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 208 int *exflagsp, struct ucred **credanonp); 209 static int hammer2_vfs_modifying(struct mount *mp); 210 211 static void hammer2_update_pmps(hammer2_dev_t *hmp); 212 213 static void hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp); 214 static void hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, 215 hammer2_dev_t *hmp); 216 static int hammer2_fixup_pfses(hammer2_dev_t *hmp); 217 218 /* 219 * HAMMER2 vfs operations. 220 */ 221 static struct vfsops hammer2_vfsops = { 222 .vfs_flags = 0, 223 .vfs_init = hammer2_vfs_init, 224 .vfs_uninit = hammer2_vfs_uninit, 225 .vfs_sync = hammer2_vfs_sync, 226 .vfs_mount = hammer2_vfs_mount, 227 .vfs_unmount = hammer2_vfs_unmount, 228 .vfs_root = hammer2_vfs_root, 229 .vfs_statfs = hammer2_vfs_statfs, 230 .vfs_statvfs = hammer2_vfs_statvfs, 231 .vfs_vget = hammer2_vfs_vget, 232 .vfs_vptofh = hammer2_vfs_vptofh, 233 .vfs_fhtovp = hammer2_vfs_fhtovp, 234 .vfs_checkexp = hammer2_vfs_checkexp, 235 .vfs_modifying = hammer2_vfs_modifying 236 }; 237 238 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", ""); 239 240 VFS_SET(hammer2_vfsops, hammer2, VFCF_MPSAFE); 241 MODULE_VERSION(hammer2, 1); 242 243 static 244 int 245 hammer2_vfs_init(struct vfsconf *conf) 246 { 247 static struct objcache_malloc_args margs_read; 248 static struct objcache_malloc_args margs_write; 249 static struct objcache_malloc_args margs_vop; 250 251 int error; 252 int mod; 253 254 error = 0; 255 kmalloc_raise_limit(M_HAMMER2, 0); /* unlimited */ 256 257 /* 258 * hammer2_xop_nthreads must be a multiple of ncpus, 259 * minimum 2 * ncpus. 260 */ 261 mod = ncpus; 262 hammer2_xop_mod = mod; 263 hammer2_xop_nthreads = mod * 2; 264 while (hammer2_xop_nthreads / mod < HAMMER2_XOPGROUPS_MIN || 265 hammer2_xop_nthreads < HAMMER2_XOPTHREADS_MIN) 266 { 267 hammer2_xop_nthreads += mod; 268 } 269 hammer2_xop_sgroups = hammer2_xop_nthreads / mod / 2; 270 hammer2_xop_xgroups = hammer2_xop_nthreads / mod - hammer2_xop_sgroups; 271 hammer2_xop_xbase = hammer2_xop_sgroups * mod; 272 273 /* 274 * A large DIO cache is needed to retain dedup enablement masks. 275 * The bulkfree code clears related masks as part of the disk block 276 * recycling algorithm, preventing it from being used for a later 277 * dedup. 278 * 279 * NOTE: A large buffer cache can actually interfere with dedup 280 * operation because we dedup based on media physical buffers 281 * and not logical buffers. Try to make the DIO case large 282 * enough to avoid this problem, but also cap it. 283 */ 284 hammer2_dio_limit = nbuf * 2; 285 if (hammer2_dio_limit > 100000) 286 hammer2_dio_limit = 100000; 287 288 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref)) 289 error = EINVAL; 290 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data)) 291 error = EINVAL; 292 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data)) 293 error = EINVAL; 294 295 if (error) 296 kprintf("HAMMER2 structure size mismatch; cannot continue.\n"); 297 298 margs_read.objsize = 65536; 299 margs_read.mtype = M_HAMMER2_DEBUFFER; 300 301 margs_write.objsize = 32768; 302 margs_write.mtype = M_HAMMER2_CBUFFER; 303 304 margs_vop.objsize = sizeof(hammer2_xop_t); 305 margs_vop.mtype = M_HAMMER2; 306 307 /* 308 * Note thaht for the XOPS cache we want backing store allocations 309 * to use M_ZERO. This is not allowed in objcache_get() (to avoid 310 * confusion), so use the backing store function that does it. This 311 * means that initial XOPS objects are zerod but REUSED objects are 312 * not. So we are responsible for cleaning the object up sufficiently 313 * for our needs before objcache_put()ing it back (typically just the 314 * FIFO indices). 315 */ 316 cache_buffer_read = objcache_create(margs_read.mtype->ks_shortdesc, 317 0, 1, NULL, NULL, NULL, 318 objcache_malloc_alloc, 319 objcache_malloc_free, 320 &margs_read); 321 cache_buffer_write = objcache_create(margs_write.mtype->ks_shortdesc, 322 0, 1, NULL, NULL, NULL, 323 objcache_malloc_alloc, 324 objcache_malloc_free, 325 &margs_write); 326 cache_xops = objcache_create(margs_vop.mtype->ks_shortdesc, 327 0, 1, NULL, NULL, NULL, 328 objcache_malloc_alloc_zero, 329 objcache_malloc_free, 330 &margs_vop); 331 332 333 lockinit(&hammer2_mntlk, "mntlk", 0, 0); 334 TAILQ_INIT(&hammer2_mntlist); 335 TAILQ_INIT(&hammer2_pfslist); 336 TAILQ_INIT(&hammer2_spmplist); 337 338 hammer2_limit_dirty_chains = maxvnodes / 10; 339 if (hammer2_limit_dirty_chains > HAMMER2_LIMIT_DIRTY_CHAINS) 340 hammer2_limit_dirty_chains = HAMMER2_LIMIT_DIRTY_CHAINS; 341 if (hammer2_limit_dirty_chains < 1000) 342 hammer2_limit_dirty_chains = 1000; 343 344 hammer2_limit_dirty_inodes = maxvnodes / 25; 345 if (hammer2_limit_dirty_inodes < 100) 346 hammer2_limit_dirty_inodes = 100; 347 if (hammer2_limit_dirty_inodes > HAMMER2_LIMIT_DIRTY_INODES) 348 hammer2_limit_dirty_inodes = HAMMER2_LIMIT_DIRTY_INODES; 349 350 hammer2_limit_saved_chains = hammer2_limit_dirty_chains * 5; 351 352 return (error); 353 } 354 355 static 356 int 357 hammer2_vfs_uninit(struct vfsconf *vfsp __unused) 358 { 359 objcache_destroy(cache_buffer_read); 360 objcache_destroy(cache_buffer_write); 361 objcache_destroy(cache_xops); 362 return 0; 363 } 364 365 /* 366 * Core PFS allocator. Used to allocate or reference the pmp structure 367 * for PFS cluster mounts and the spmp structure for media (hmp) structures. 368 * The pmp can be passed in or loaded by this function using the chain and 369 * inode data. 370 * 371 * pmp->modify_tid tracks new modify_tid transaction ids for front-end 372 * transactions. Note that synchronization does not use this field. 373 * (typically frontend operations and synchronization cannot run on the 374 * same PFS node at the same time). 375 * 376 * XXX check locking 377 */ 378 hammer2_pfs_t * 379 hammer2_pfsalloc(hammer2_chain_t *chain, 380 const hammer2_inode_data_t *ripdata, 381 hammer2_dev_t *force_local) 382 { 383 hammer2_pfs_t *pmp; 384 hammer2_inode_t *iroot; 385 int count; 386 int i; 387 int j; 388 389 pmp = NULL; 390 391 /* 392 * Locate or create the PFS based on the cluster id. If ripdata 393 * is NULL this is a spmp which is unique and is always allocated. 394 * 395 * If the device is mounted in local mode all PFSs are considered 396 * independent and not part of any cluster (for debugging only). 397 */ 398 if (ripdata) { 399 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 400 if (force_local != pmp->force_local) 401 continue; 402 if (force_local == NULL && 403 bcmp(&pmp->pfs_clid, &ripdata->meta.pfs_clid, 404 sizeof(pmp->pfs_clid)) == 0) { 405 break; 406 } else if (force_local && pmp->pfs_names[0] && 407 strcmp(pmp->pfs_names[0], (const char *)ripdata->filename) == 0) { 408 break; 409 } 410 } 411 } 412 413 if (pmp == NULL) { 414 pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO); 415 pmp->force_local = force_local; 416 hammer2_trans_manage_init(pmp); 417 kmalloc_create_obj(&pmp->minode, "HAMMER2-inodes", 418 sizeof(struct hammer2_inode)); 419 lockinit(&pmp->lock, "pfslk", 0, 0); 420 hammer2_spin_init(&pmp->inum_spin, "hm2pfsalloc_inum"); 421 hammer2_spin_init(&pmp->xop_spin, "h2xop"); 422 hammer2_spin_init(&pmp->lru_spin, "h2lru"); 423 RB_INIT(&pmp->inum_tree); 424 TAILQ_INIT(&pmp->syncq); 425 TAILQ_INIT(&pmp->depq); 426 TAILQ_INIT(&pmp->lru_list); 427 hammer2_spin_init(&pmp->list_spin, "h2pfsalloc_list"); 428 429 /* 430 * Save the last media transaction id for the flusher. Set 431 * initial 432 */ 433 if (ripdata) { 434 pmp->pfs_clid = ripdata->meta.pfs_clid; 435 TAILQ_INSERT_TAIL(&hammer2_pfslist, pmp, mntentry); 436 } else { 437 pmp->flags |= HAMMER2_PMPF_SPMP; 438 TAILQ_INSERT_TAIL(&hammer2_spmplist, pmp, mntentry); 439 } 440 441 /* 442 * The synchronization thread may start too early, make 443 * sure it stays frozen until we are ready to let it go. 444 * XXX 445 */ 446 /* 447 pmp->primary_thr.flags = HAMMER2_THREAD_FROZEN | 448 HAMMER2_THREAD_REMASTER; 449 */ 450 } 451 452 /* 453 * Create the PFS's root inode and any missing XOP helper threads. 454 */ 455 if ((iroot = pmp->iroot) == NULL) { 456 iroot = hammer2_inode_get(pmp, NULL, 1, -1); 457 if (ripdata) 458 iroot->meta = ripdata->meta; 459 pmp->iroot = iroot; 460 hammer2_inode_ref(iroot); 461 hammer2_inode_unlock(iroot); 462 } 463 464 /* 465 * Stop here if no chain is passed in. 466 */ 467 if (chain == NULL) 468 goto done; 469 470 /* 471 * When a chain is passed in we must add it to the PFS's root 472 * inode, update pmp->pfs_types[], and update the syncronization 473 * threads. 474 * 475 * When forcing local mode, mark the PFS as a MASTER regardless. 476 * 477 * At the moment empty spots can develop due to removals or failures. 478 * Ultimately we want to re-fill these spots but doing so might 479 * confused running code. XXX 480 */ 481 hammer2_inode_ref(iroot); 482 hammer2_mtx_ex(&iroot->lock); 483 j = iroot->cluster.nchains; 484 485 if (j == HAMMER2_MAXCLUSTER) { 486 kprintf("hammer2_pfsalloc: cluster full!\n"); 487 /* XXX fatal error? */ 488 } else { 489 KKASSERT(chain->pmp == NULL); 490 chain->pmp = pmp; 491 hammer2_chain_ref(chain); 492 iroot->cluster.array[j].chain = chain; 493 if (force_local) 494 pmp->pfs_types[j] = HAMMER2_PFSTYPE_MASTER; 495 else 496 pmp->pfs_types[j] = ripdata->meta.pfs_type; 497 pmp->pfs_names[j] = kstrdup((const char *)ripdata->filename, M_HAMMER2); 498 pmp->pfs_hmps[j] = chain->hmp; 499 hammer2_spin_ex(&pmp->inum_spin); 500 pmp->pfs_iroot_blocksets[j] = chain->data->ipdata.u.blockset; 501 hammer2_spin_unex(&pmp->inum_spin); 502 503 /* 504 * If the PFS is already mounted we must account 505 * for the mount_count here. 506 */ 507 if (pmp->mp) 508 ++chain->hmp->mount_count; 509 510 /* 511 * May have to fixup dirty chain tracking. Previous 512 * pmp was NULL so nothing to undo. 513 */ 514 if (chain->flags & HAMMER2_CHAIN_MODIFIED) 515 hammer2_pfs_memory_inc(pmp); 516 ++j; 517 } 518 iroot->cluster.nchains = j; 519 520 /* 521 * Update nmasters from any PFS inode which is part of the cluster. 522 * It is possible that this will result in a value which is too 523 * high. MASTER PFSs are authoritative for pfs_nmasters and will 524 * override this value later on. 525 * 526 * (This informs us of masters that might not currently be 527 * discoverable by this mount). 528 */ 529 if (ripdata && pmp->pfs_nmasters < ripdata->meta.pfs_nmasters) { 530 pmp->pfs_nmasters = ripdata->meta.pfs_nmasters; 531 } 532 533 /* 534 * Count visible masters. Masters are usually added with 535 * ripdata->meta.pfs_nmasters set to 1. This detects when there 536 * are more (XXX and must update the master inodes). 537 */ 538 count = 0; 539 for (i = 0; i < iroot->cluster.nchains; ++i) { 540 if (pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) 541 ++count; 542 } 543 if (pmp->pfs_nmasters < count) 544 pmp->pfs_nmasters = count; 545 546 /* 547 * Create missing synchronization and support threads. 548 * 549 * Single-node masters (including snapshots) have nothing to 550 * synchronize and do not require this thread. 551 * 552 * Multi-node masters or any number of soft masters, slaves, copy, 553 * or other PFS types need the thread. 554 * 555 * Each thread is responsible for its particular cluster index. 556 * We use independent threads so stalls or mismatches related to 557 * any given target do not affect other targets. 558 */ 559 for (i = 0; i < iroot->cluster.nchains; ++i) { 560 /* 561 * Single-node masters (including snapshots) have nothing 562 * to synchronize and will make direct xops support calls, 563 * thus they do not require this thread. 564 * 565 * Note that there can be thousands of snapshots. We do not 566 * want to create thousands of threads. 567 */ 568 if (pmp->pfs_nmasters <= 1 && 569 pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) { 570 continue; 571 } 572 573 /* 574 * Sync support thread 575 */ 576 if (pmp->sync_thrs[i].td == NULL) { 577 hammer2_thr_create(&pmp->sync_thrs[i], pmp, NULL, 578 "h2nod", i, -1, 579 hammer2_primary_sync_thread); 580 } 581 } 582 583 /* 584 * Create missing Xop threads 585 * 586 * NOTE: We create helper threads for all mounted PFSs or any 587 * PFSs with 2+ nodes (so the sync thread can update them, 588 * even if not mounted). 589 */ 590 if (pmp->mp || iroot->cluster.nchains >= 2) 591 hammer2_xop_helper_create(pmp); 592 593 hammer2_mtx_unlock(&iroot->lock); 594 hammer2_inode_drop(iroot); 595 done: 596 return pmp; 597 } 598 599 /* 600 * Deallocate an element of a probed PFS. If destroying and this is a 601 * MASTER, adjust nmasters. 602 * 603 * This function does not physically destroy the PFS element in its device 604 * under the super-root (see hammer2_ioctl_pfs_delete()). 605 */ 606 void 607 hammer2_pfsdealloc(hammer2_pfs_t *pmp, int clindex, int destroying) 608 { 609 hammer2_inode_t *iroot; 610 hammer2_chain_t *chain; 611 int j; 612 613 /* 614 * Cleanup our reference on iroot. iroot is (should) not be needed 615 * by the flush code. 616 */ 617 iroot = pmp->iroot; 618 if (iroot) { 619 /* 620 * Stop synchronizing 621 * 622 * XXX flush after acquiring the iroot lock. 623 * XXX clean out the cluster index from all inode structures. 624 */ 625 hammer2_thr_delete(&pmp->sync_thrs[clindex]); 626 627 /* 628 * Remove the cluster index from the group. If destroying 629 * the PFS and this is a master, adjust pfs_nmasters. 630 */ 631 hammer2_mtx_ex(&iroot->lock); 632 chain = iroot->cluster.array[clindex].chain; 633 iroot->cluster.array[clindex].chain = NULL; 634 635 switch(pmp->pfs_types[clindex]) { 636 case HAMMER2_PFSTYPE_MASTER: 637 if (destroying && pmp->pfs_nmasters > 0) 638 --pmp->pfs_nmasters; 639 /* XXX adjust ripdata->meta.pfs_nmasters */ 640 break; 641 default: 642 break; 643 } 644 pmp->pfs_types[clindex] = HAMMER2_PFSTYPE_NONE; 645 646 hammer2_mtx_unlock(&iroot->lock); 647 648 /* 649 * Release the chain. 650 */ 651 if (chain) { 652 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 653 hammer2_chain_drop(chain); 654 } 655 656 /* 657 * Terminate all XOP threads for the cluster index. 658 */ 659 if (pmp->xop_groups) { 660 for (j = 0; j < hammer2_xop_nthreads; ++j) { 661 hammer2_thr_delete( 662 &pmp->xop_groups[j].thrs[clindex]); 663 } 664 } 665 } 666 } 667 668 /* 669 * Destroy a PFS, typically only occurs after the last mount on a device 670 * has gone away. 671 */ 672 static void 673 hammer2_pfsfree(hammer2_pfs_t *pmp) 674 { 675 hammer2_inode_t *iroot; 676 hammer2_chain_t *chain; 677 int chains_still_present = 0; 678 int i; 679 int j; 680 681 /* 682 * Cleanup our reference on iroot. iroot is (should) not be needed 683 * by the flush code. 684 */ 685 if (pmp->flags & HAMMER2_PMPF_SPMP) 686 TAILQ_REMOVE(&hammer2_spmplist, pmp, mntentry); 687 else 688 TAILQ_REMOVE(&hammer2_pfslist, pmp, mntentry); 689 690 /* 691 * Cleanup chains remaining on LRU list. 692 */ 693 hammer2_spin_ex(&pmp->lru_spin); 694 while ((chain = TAILQ_FIRST(&pmp->lru_list)) != NULL) { 695 KKASSERT(chain->flags & HAMMER2_CHAIN_ONLRU); 696 atomic_add_int(&pmp->lru_count, -1); 697 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONLRU); 698 TAILQ_REMOVE(&pmp->lru_list, chain, lru_node); 699 hammer2_chain_ref(chain); 700 hammer2_spin_unex(&pmp->lru_spin); 701 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 702 hammer2_chain_drop(chain); 703 hammer2_spin_ex(&pmp->lru_spin); 704 } 705 hammer2_spin_unex(&pmp->lru_spin); 706 707 /* 708 * Clean up iroot 709 */ 710 iroot = pmp->iroot; 711 if (iroot) { 712 for (i = 0; i < iroot->cluster.nchains; ++i) { 713 hammer2_thr_delete(&pmp->sync_thrs[i]); 714 if (pmp->xop_groups) { 715 for (j = 0; j < hammer2_xop_nthreads; ++j) 716 hammer2_thr_delete( 717 &pmp->xop_groups[j].thrs[i]); 718 } 719 chain = iroot->cluster.array[i].chain; 720 if (chain && !RB_EMPTY(&chain->core.rbtree)) { 721 kprintf("hammer2: Warning pmp %p still " 722 "has active chains\n", pmp); 723 chains_still_present = 1; 724 } 725 } 726 KASSERT(iroot->refs == 1, 727 ("PMP->IROOT %p REFS WRONG %d", iroot, iroot->refs)); 728 729 /* ref for iroot */ 730 hammer2_inode_drop(iroot); 731 pmp->iroot = NULL; 732 } 733 734 /* 735 * Free remaining pmp resources 736 */ 737 if (chains_still_present) { 738 kprintf("hammer2: cannot free pmp %p, still in use\n", pmp); 739 } else { 740 kmalloc_destroy_obj(&pmp->minode); 741 kfree(pmp, M_HAMMER2); 742 } 743 } 744 745 /* 746 * Remove all references to hmp from the pfs list. Any PFS which becomes 747 * empty is terminated and freed. 748 * 749 * XXX inefficient. 750 */ 751 static void 752 hammer2_pfsfree_scan(hammer2_dev_t *hmp, int which) 753 { 754 hammer2_pfs_t *pmp; 755 hammer2_inode_t *iroot; 756 hammer2_chain_t *rchain; 757 int i; 758 int j; 759 struct hammer2_pfslist *wlist; 760 761 if (which == 0) 762 wlist = &hammer2_pfslist; 763 else 764 wlist = &hammer2_spmplist; 765 again: 766 TAILQ_FOREACH(pmp, wlist, mntentry) { 767 if ((iroot = pmp->iroot) == NULL) 768 continue; 769 770 /* 771 * Determine if this PFS is affected. If it is we must 772 * freeze all management threads and lock its iroot. 773 * 774 * Freezing a management thread forces it idle, operations 775 * in-progress will be aborted and it will have to start 776 * over again when unfrozen, or exit if told to exit. 777 */ 778 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 779 if (pmp->pfs_hmps[i] == hmp) 780 break; 781 } 782 if (i == HAMMER2_MAXCLUSTER) 783 continue; 784 785 hammer2_vfs_sync_pmp(pmp, MNT_WAIT); 786 787 /* 788 * Make sure all synchronization threads are locked 789 * down. 790 */ 791 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 792 if (pmp->pfs_hmps[i] == NULL) 793 continue; 794 hammer2_thr_freeze_async(&pmp->sync_thrs[i]); 795 if (pmp->xop_groups) { 796 for (j = 0; j < hammer2_xop_nthreads; ++j) { 797 hammer2_thr_freeze_async( 798 &pmp->xop_groups[j].thrs[i]); 799 } 800 } 801 } 802 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 803 if (pmp->pfs_hmps[i] == NULL) 804 continue; 805 hammer2_thr_freeze(&pmp->sync_thrs[i]); 806 if (pmp->xop_groups) { 807 for (j = 0; j < hammer2_xop_nthreads; ++j) { 808 hammer2_thr_freeze( 809 &pmp->xop_groups[j].thrs[i]); 810 } 811 } 812 } 813 814 /* 815 * Lock the inode and clean out matching chains. 816 * Note that we cannot use hammer2_inode_lock_*() 817 * here because that would attempt to validate the 818 * cluster that we are in the middle of ripping 819 * apart. 820 * 821 * WARNING! We are working directly on the inodes 822 * embedded cluster. 823 */ 824 hammer2_mtx_ex(&iroot->lock); 825 826 /* 827 * Remove the chain from matching elements of the PFS. 828 */ 829 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 830 if (pmp->pfs_hmps[i] != hmp) 831 continue; 832 hammer2_thr_delete(&pmp->sync_thrs[i]); 833 if (pmp->xop_groups) { 834 for (j = 0; j < hammer2_xop_nthreads; ++j) { 835 hammer2_thr_delete( 836 &pmp->xop_groups[j].thrs[i]); 837 } 838 } 839 rchain = iroot->cluster.array[i].chain; 840 iroot->cluster.array[i].chain = NULL; 841 pmp->pfs_types[i] = HAMMER2_PFSTYPE_NONE; 842 if (pmp->pfs_names[i]) { 843 kfree(pmp->pfs_names[i], M_HAMMER2); 844 pmp->pfs_names[i] = NULL; 845 } 846 if (rchain) { 847 hammer2_chain_drop(rchain); 848 /* focus hint */ 849 if (iroot->cluster.focus == rchain) 850 iroot->cluster.focus = NULL; 851 } 852 pmp->pfs_hmps[i] = NULL; 853 } 854 hammer2_mtx_unlock(&iroot->lock); 855 856 /* 857 * Cleanup trailing chains. Gaps may remain. 858 */ 859 for (i = HAMMER2_MAXCLUSTER - 1; i >= 0; --i) { 860 if (pmp->pfs_hmps[i]) 861 break; 862 } 863 iroot->cluster.nchains = i + 1; 864 865 /* 866 * If the PMP has no elements remaining we can destroy it. 867 * (this will transition management threads from frozen->exit). 868 */ 869 if (iroot->cluster.nchains == 0) { 870 /* 871 * If this was the hmp's spmp, we need to clean 872 * a little more stuff out. 873 */ 874 if (hmp->spmp == pmp) { 875 hmp->spmp = NULL; 876 hmp->vchain.pmp = NULL; 877 hmp->fchain.pmp = NULL; 878 } 879 880 /* 881 * Free the pmp and restart the loop 882 */ 883 KKASSERT(TAILQ_EMPTY(&pmp->syncq)); 884 KKASSERT(TAILQ_EMPTY(&pmp->depq)); 885 hammer2_pfsfree(pmp); 886 goto again; 887 } 888 889 /* 890 * If elements still remain we need to set the REMASTER 891 * flag and unfreeze it. 892 */ 893 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 894 if (pmp->pfs_hmps[i] == NULL) 895 continue; 896 hammer2_thr_remaster(&pmp->sync_thrs[i]); 897 hammer2_thr_unfreeze(&pmp->sync_thrs[i]); 898 if (pmp->xop_groups) { 899 for (j = 0; j < hammer2_xop_nthreads; ++j) { 900 hammer2_thr_remaster( 901 &pmp->xop_groups[j].thrs[i]); 902 hammer2_thr_unfreeze( 903 &pmp->xop_groups[j].thrs[i]); 904 } 905 } 906 } 907 } 908 } 909 910 /* 911 * Mount or remount HAMMER2 fileystem from physical media 912 * 913 * mountroot 914 * mp mount point structure 915 * path NULL 916 * data <unused> 917 * cred <unused> 918 * 919 * mount 920 * mp mount point structure 921 * path path to mount point 922 * data pointer to argument structure in user space 923 * volume volume path (device@LABEL form) 924 * hflags user mount flags 925 * cred user credentials 926 * 927 * RETURNS: 0 Success 928 * !0 error number 929 */ 930 static 931 int 932 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, 933 struct ucred *cred) 934 { 935 struct hammer2_mount_info info; 936 hammer2_pfs_t *pmp; 937 hammer2_pfs_t *spmp; 938 hammer2_dev_t *hmp, *hmp_tmp; 939 hammer2_dev_t *force_local; 940 hammer2_key_t key_next; 941 hammer2_key_t key_dummy; 942 hammer2_key_t lhc; 943 hammer2_chain_t *parent; 944 hammer2_chain_t *chain; 945 const hammer2_inode_data_t *ripdata; 946 hammer2_devvp_list_t devvpl; 947 hammer2_devvp_t *e, *e_tmp; 948 struct file *fp; 949 char devstr[MNAMELEN]; 950 size_t size; 951 size_t done; 952 char *label; 953 int ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); 954 int error; 955 int i; 956 957 hmp = NULL; 958 pmp = NULL; 959 label = NULL; 960 bzero(&info, sizeof(info)); 961 962 if (path) { 963 /* 964 * Non-root mount or updating a mount 965 */ 966 error = copyin(data, &info, sizeof(info)); 967 if (error) 968 return (error); 969 } 970 971 if (mp->mnt_flag & MNT_UPDATE) { 972 /* 973 * Update mount. Note that pmp->iroot->cluster is 974 * an inode-embedded cluster and thus cannot be 975 * directly locked. 976 * 977 * XXX HAMMER2 needs to implement NFS export via 978 * mountctl. 979 */ 980 hammer2_cluster_t *cluster; 981 982 error = 0; 983 pmp = MPTOPMP(mp); 984 pmp->hflags = info.hflags; 985 cluster = &pmp->iroot->cluster; 986 for (i = 0; i < cluster->nchains; ++i) { 987 if (cluster->array[i].chain == NULL) 988 continue; 989 hmp = cluster->array[i].chain->hmp; 990 error = hammer2_remount(hmp, mp, path, cred); 991 if (error) 992 break; 993 } 994 995 return error; 996 } 997 998 if (path == NULL) { 999 /* 1000 * Root mount 1001 */ 1002 info.cluster_fd = -1; 1003 ksnprintf(devstr, sizeof(devstr), "%s", 1004 mp->mnt_stat.f_mntfromname); 1005 done = strlen(devstr) + 1; 1006 kprintf("hammer2_mount: root devstr=\"%s\"\n", devstr); 1007 } else { 1008 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done); 1009 if (error) 1010 return (error); 1011 kprintf("hammer2_mount: devstr=\"%s\"\n", devstr); 1012 } 1013 1014 /* 1015 * Extract device and label, automatically mount @BOOT, @ROOT, or @DATA 1016 * if no label specified, based on the partition id. Error out if no 1017 * label or device (with partition id) is specified. This is strictly 1018 * a convenience to match the default label created by newfs_hammer2, 1019 * our preference is that a label always be specified. 1020 * 1021 * NOTE: We allow 'mount @LABEL <blah>'... that is, a mount command 1022 * that does not specify a device, as long as some H2 label 1023 * has already been mounted from that device. This makes 1024 * mounting snapshots a lot easier. 1025 */ 1026 label = strchr(devstr, '@'); 1027 if (label && ((label + 1) - devstr) > done) { 1028 kprintf("hammer2_mount: bad label %s/%zd\n", devstr, done); 1029 return (EINVAL); 1030 } 1031 if (label == NULL || label[1] == 0) { 1032 char slice; 1033 1034 if (label == NULL) 1035 label = devstr + strlen(devstr); 1036 else 1037 *label = '\0'; /* clean up trailing @ */ 1038 1039 slice = label[-1]; 1040 switch(slice) { 1041 case 'a': 1042 label = "BOOT"; 1043 break; 1044 case 'd': 1045 label = "ROOT"; 1046 break; 1047 default: 1048 label = "DATA"; 1049 break; 1050 } 1051 } else { 1052 *label = '\0'; 1053 label++; 1054 } 1055 1056 kprintf("hammer2_mount: device=\"%s\" label=\"%s\" rdonly=%d\n", 1057 devstr, label, ronly); 1058 1059 /* 1060 * Initialize all device vnodes. 1061 */ 1062 TAILQ_INIT(&devvpl); 1063 error = hammer2_init_devvp(devstr, path == NULL, &devvpl); 1064 if (error) { 1065 kprintf("hammer2: failed to initialize devvp in %s\n", devstr); 1066 hammer2_cleanup_devvp(&devvpl); 1067 return error; 1068 } 1069 1070 /* 1071 * Determine if the device has already been mounted. After this 1072 * check hmp will be non-NULL if we are doing the second or more 1073 * hammer2 mounts from the same device. 1074 */ 1075 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 1076 if (!TAILQ_EMPTY(&devvpl)) { 1077 /* 1078 * Match the device. Due to the way devfs works, 1079 * we may not be able to directly match the vnode pointer, 1080 * so also check to see if the underlying device matches. 1081 */ 1082 TAILQ_FOREACH(hmp_tmp, &hammer2_mntlist, mntentry) { 1083 TAILQ_FOREACH(e_tmp, &hmp_tmp->devvpl, entry) { 1084 int devvp_found = 0; 1085 TAILQ_FOREACH(e, &devvpl, entry) { 1086 KKASSERT(e->devvp); 1087 if (e_tmp->devvp == e->devvp) 1088 devvp_found = 1; 1089 if (e_tmp->devvp->v_rdev && 1090 e_tmp->devvp->v_rdev == e->devvp->v_rdev) 1091 devvp_found = 1; 1092 } 1093 if (!devvp_found) 1094 goto next_hmp; 1095 } 1096 hmp = hmp_tmp; 1097 kprintf("hammer2_mount: hmp=%p matched\n", hmp); 1098 break; 1099 next_hmp: 1100 continue; 1101 } 1102 1103 /* 1104 * If no match this may be a fresh H2 mount, make sure 1105 * the device is not mounted on anything else. 1106 */ 1107 if (hmp == NULL) { 1108 TAILQ_FOREACH(e, &devvpl, entry) { 1109 struct vnode *devvp = e->devvp; 1110 KKASSERT(devvp); 1111 error = vfs_mountedon(devvp); 1112 if (error) { 1113 kprintf("hammer2_mount: %s mounted %d\n", 1114 e->path, error); 1115 hammer2_cleanup_devvp(&devvpl); 1116 lockmgr(&hammer2_mntlk, LK_RELEASE); 1117 return error; 1118 } 1119 } 1120 } 1121 } else { 1122 /* 1123 * Match the label to a pmp already probed. 1124 */ 1125 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 1126 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 1127 if (pmp->pfs_names[i] && 1128 strcmp(pmp->pfs_names[i], label) == 0) { 1129 hmp = pmp->pfs_hmps[i]; 1130 break; 1131 } 1132 } 1133 if (hmp) 1134 break; 1135 } 1136 if (hmp == NULL) { 1137 kprintf("hammer2_mount: PFS label \"%s\" not found\n", 1138 label); 1139 hammer2_cleanup_devvp(&devvpl); 1140 lockmgr(&hammer2_mntlk, LK_RELEASE); 1141 return ENOENT; 1142 } 1143 } 1144 1145 /* 1146 * Open the device if this isn't a secondary mount and construct 1147 * the H2 device mount (hmp). 1148 */ 1149 if (hmp == NULL) { 1150 hammer2_chain_t *schain; 1151 hammer2_xop_head_t xop; 1152 1153 /* 1154 * Now open the device 1155 */ 1156 KKASSERT(!TAILQ_EMPTY(&devvpl)); 1157 error = hammer2_open_devvp(&devvpl, ronly); 1158 if (error) { 1159 hammer2_close_devvp(&devvpl, ronly); 1160 hammer2_cleanup_devvp(&devvpl); 1161 lockmgr(&hammer2_mntlk, LK_RELEASE); 1162 return error; 1163 } 1164 1165 /* 1166 * Construct volumes and link with device vnodes. 1167 */ 1168 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO); 1169 hmp->devvp = NULL; 1170 error = hammer2_init_volumes(mp, &devvpl, hmp->volumes, 1171 &hmp->voldata, &hmp->volhdrno, 1172 &hmp->devvp); 1173 if (error) { 1174 hammer2_close_devvp(&devvpl, ronly); 1175 hammer2_cleanup_devvp(&devvpl); 1176 lockmgr(&hammer2_mntlk, LK_RELEASE); 1177 kfree(hmp, M_HAMMER2); 1178 return error; 1179 } 1180 if (!hmp->devvp) { 1181 kprintf("hammer2: failed to initialize root volume\n"); 1182 hammer2_unmount_helper(mp, NULL, hmp); 1183 lockmgr(&hammer2_mntlk, LK_RELEASE); 1184 hammer2_vfs_unmount(mp, MNT_FORCE); 1185 return EINVAL; 1186 } 1187 1188 ksnprintf(hmp->devrepname, sizeof(hmp->devrepname), "%s", devstr); 1189 hmp->ronly = ronly; 1190 hmp->hflags = info.hflags & HMNT2_DEVFLAGS; 1191 kmalloc_create_obj(&hmp->mchain, "HAMMER2-chains", 1192 sizeof(struct hammer2_chain)); 1193 kmalloc_create_obj(&hmp->mio, "HAMMER2-dio", 1194 sizeof(struct hammer2_io)); 1195 kmalloc_create(&hmp->mmsg, "HAMMER2-msg"); 1196 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry); 1197 RB_INIT(&hmp->iotree); 1198 hammer2_spin_init(&hmp->io_spin, "h2mount_io"); 1199 hammer2_spin_init(&hmp->list_spin, "h2mount_list"); 1200 1201 lockinit(&hmp->vollk, "h2vol", 0, 0); 1202 lockinit(&hmp->bulklk, "h2bulk", 0, 0); 1203 lockinit(&hmp->bflock, "h2bflk", 0, 0); 1204 1205 /* 1206 * vchain setup. vchain.data is embedded. 1207 * vchain.refs is initialized and will never drop to 0. 1208 */ 1209 hmp->vchain.hmp = hmp; 1210 hmp->vchain.refs = 1; 1211 hmp->vchain.data = (void *)&hmp->voldata; 1212 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME; 1213 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX; 1214 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid; 1215 hammer2_chain_init(&hmp->vchain); 1216 1217 /* 1218 * fchain setup. fchain.data is embedded. 1219 * fchain.refs is initialized and will never drop to 0. 1220 * 1221 * The data is not used but needs to be initialized to 1222 * pass assertion muster. We use this chain primarily 1223 * as a placeholder for the freemap's top-level radix tree 1224 * so it does not interfere with the volume's topology 1225 * radix tree. 1226 */ 1227 hmp->fchain.hmp = hmp; 1228 hmp->fchain.refs = 1; 1229 hmp->fchain.data = (void *)&hmp->voldata.freemap_blockset; 1230 hmp->fchain.bref.type = HAMMER2_BREF_TYPE_FREEMAP; 1231 hmp->fchain.bref.data_off = 0 | HAMMER2_PBUFRADIX; 1232 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid; 1233 hmp->fchain.bref.methods = 1234 HAMMER2_ENC_CHECK(HAMMER2_CHECK_FREEMAP) | 1235 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE); 1236 hammer2_chain_init(&hmp->fchain); 1237 1238 /* 1239 * Initialize volume header related fields. 1240 */ 1241 KKASSERT(hmp->voldata.magic == HAMMER2_VOLUME_ID_HBO || 1242 hmp->voldata.magic == HAMMER2_VOLUME_ID_ABO); 1243 hmp->volsync = hmp->voldata; 1244 hmp->free_reserved = hmp->voldata.allocator_size / 20; 1245 /* 1246 * Must use hmp instead of volume header for these two 1247 * in order to handle volume versions transparently. 1248 */ 1249 if (hmp->voldata.version >= HAMMER2_VOL_VERSION_MULTI_VOLUMES) { 1250 hmp->nvolumes = hmp->voldata.nvolumes; 1251 hmp->total_size = hmp->voldata.total_size; 1252 } else { 1253 hmp->nvolumes = 1; 1254 hmp->total_size = hmp->voldata.volu_size; 1255 } 1256 KKASSERT(hmp->nvolumes > 0); 1257 1258 /* 1259 * Move devvpl entries to hmp. 1260 */ 1261 TAILQ_INIT(&hmp->devvpl); 1262 while ((e = TAILQ_FIRST(&devvpl)) != NULL) { 1263 TAILQ_REMOVE(&devvpl, e, entry); 1264 TAILQ_INSERT_TAIL(&hmp->devvpl, e, entry); 1265 } 1266 KKASSERT(TAILQ_EMPTY(&devvpl)); 1267 KKASSERT(!TAILQ_EMPTY(&hmp->devvpl)); 1268 1269 /* 1270 * Really important to get these right or the flush and 1271 * teardown code will get confused. 1272 */ 1273 hmp->spmp = hammer2_pfsalloc(NULL, NULL, NULL); 1274 spmp = hmp->spmp; 1275 spmp->pfs_hmps[0] = hmp; 1276 1277 /* 1278 * Dummy-up vchain and fchain's modify_tid. mirror_tid 1279 * is inherited from the volume header. 1280 */ 1281 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid; 1282 hmp->vchain.bref.modify_tid = hmp->vchain.bref.mirror_tid; 1283 hmp->vchain.pmp = spmp; 1284 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid; 1285 hmp->fchain.bref.modify_tid = hmp->fchain.bref.mirror_tid; 1286 hmp->fchain.pmp = spmp; 1287 1288 /* 1289 * First locate the super-root inode, which is key 0 1290 * relative to the volume header's blockset. 1291 * 1292 * Then locate the root inode by scanning the directory keyspace 1293 * represented by the label. 1294 */ 1295 parent = hammer2_chain_lookup_init(&hmp->vchain, 0); 1296 schain = hammer2_chain_lookup(&parent, &key_dummy, 1297 HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY, 1298 &error, 0); 1299 hammer2_chain_lookup_done(parent); 1300 if (schain == NULL) { 1301 kprintf("hammer2_mount: invalid super-root\n"); 1302 hammer2_unmount_helper(mp, NULL, hmp); 1303 lockmgr(&hammer2_mntlk, LK_RELEASE); 1304 hammer2_vfs_unmount(mp, MNT_FORCE); 1305 return EINVAL; 1306 } 1307 if (schain->error) { 1308 kprintf("hammer2_mount: error %s reading super-root\n", 1309 hammer2_error_str(schain->error)); 1310 hammer2_chain_unlock(schain); 1311 hammer2_chain_drop(schain); 1312 schain = NULL; 1313 hammer2_unmount_helper(mp, NULL, hmp); 1314 lockmgr(&hammer2_mntlk, LK_RELEASE); 1315 hammer2_vfs_unmount(mp, MNT_FORCE); 1316 return EINVAL; 1317 } 1318 1319 /* 1320 * The super-root always uses an inode_tid of 1 when 1321 * creating PFSs. 1322 */ 1323 spmp->inode_tid = 1; 1324 spmp->modify_tid = schain->bref.modify_tid + 1; 1325 1326 /* 1327 * Sanity-check schain's pmp and finish initialization. 1328 * Any chain belonging to the super-root topology should 1329 * have a NULL pmp (not even set to spmp). 1330 */ 1331 ripdata = &schain->data->ipdata; 1332 KKASSERT(schain->pmp == NULL); 1333 spmp->pfs_clid = ripdata->meta.pfs_clid; 1334 1335 /* 1336 * Replace the dummy spmp->iroot with a real one. It's 1337 * easier to just do a wholesale replacement than to try 1338 * to update the chain and fixup the iroot fields. 1339 * 1340 * The returned inode is locked with the supplied cluster. 1341 */ 1342 hammer2_dummy_xop_from_chain(&xop, schain); 1343 hammer2_inode_drop(spmp->iroot); 1344 spmp->iroot = hammer2_inode_get(spmp, &xop, -1, -1); 1345 spmp->spmp_hmp = hmp; 1346 spmp->pfs_types[0] = ripdata->meta.pfs_type; 1347 spmp->pfs_hmps[0] = hmp; 1348 hammer2_inode_ref(spmp->iroot); 1349 hammer2_inode_unlock(spmp->iroot); 1350 hammer2_cluster_unlock(&xop.cluster); 1351 hammer2_chain_drop(schain); 1352 /* do not call hammer2_cluster_drop() on an embedded cluster */ 1353 schain = NULL; /* now invalid */ 1354 /* leave spmp->iroot with one ref */ 1355 1356 if (!hmp->ronly) { 1357 error = hammer2_recovery(hmp); 1358 if (error == 0) 1359 error |= hammer2_fixup_pfses(hmp); 1360 /* XXX do something with error */ 1361 } 1362 hammer2_update_pmps(hmp); 1363 hammer2_iocom_init(hmp); 1364 hammer2_bulkfree_init(hmp); 1365 1366 /* 1367 * Ref the cluster management messaging descriptor. The mount 1368 * program deals with the other end of the communications pipe. 1369 * 1370 * Root mounts typically do not supply one. 1371 */ 1372 if (info.cluster_fd >= 0) { 1373 fp = holdfp(curthread, info.cluster_fd, -1); 1374 if (fp) { 1375 hammer2_cluster_reconnect(hmp, fp); 1376 } else { 1377 kprintf("hammer2_mount: bad cluster_fd!\n"); 1378 } 1379 } 1380 } else { 1381 spmp = hmp->spmp; 1382 if (info.hflags & HMNT2_DEVFLAGS) { 1383 kprintf("hammer2_mount: Warning: mount flags pertaining " 1384 "to the whole device may only be specified " 1385 "on the first mount of the device: %08x\n", 1386 info.hflags & HMNT2_DEVFLAGS); 1387 } 1388 } 1389 1390 /* 1391 * Force local mount (disassociate all PFSs from their clusters). 1392 * Used primarily for debugging. 1393 */ 1394 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL; 1395 1396 /* 1397 * Lookup the mount point under the media-localized super-root. 1398 * Scanning hammer2_pfslist doesn't help us because it represents 1399 * PFS cluster ids which can aggregate several named PFSs together. 1400 * 1401 * cluster->pmp will incorrectly point to spmp and must be fixed 1402 * up later on. 1403 */ 1404 hammer2_inode_lock(spmp->iroot, 0); 1405 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS); 1406 lhc = hammer2_dirhash(label, strlen(label)); 1407 chain = hammer2_chain_lookup(&parent, &key_next, 1408 lhc, lhc + HAMMER2_DIRHASH_LOMASK, 1409 &error, 0); 1410 while (chain) { 1411 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && 1412 strcmp(label, (char *)chain->data->ipdata.filename) == 0) { 1413 break; 1414 } 1415 chain = hammer2_chain_next(&parent, chain, &key_next, 1416 key_next, 1417 lhc + HAMMER2_DIRHASH_LOMASK, 1418 &error, 0); 1419 } 1420 if (parent) { 1421 hammer2_chain_unlock(parent); 1422 hammer2_chain_drop(parent); 1423 } 1424 hammer2_inode_unlock(spmp->iroot); 1425 1426 /* 1427 * PFS could not be found? 1428 */ 1429 if (chain == NULL) { 1430 hammer2_unmount_helper(mp, NULL, hmp); 1431 lockmgr(&hammer2_mntlk, LK_RELEASE); 1432 hammer2_vfs_unmount(mp, MNT_FORCE); 1433 1434 if (error) { 1435 kprintf("hammer2_mount: PFS label I/O error\n"); 1436 return EINVAL; 1437 } else { 1438 kprintf("hammer2_mount: PFS label \"%s\" not found\n", 1439 label); 1440 return ENOENT; 1441 } 1442 } 1443 1444 /* 1445 * Acquire the pmp structure (it should have already been allocated 1446 * via hammer2_update_pmps()). 1447 */ 1448 if (chain->error) { 1449 kprintf("hammer2_mount: PFS label I/O error\n"); 1450 } else { 1451 ripdata = &chain->data->ipdata; 1452 pmp = hammer2_pfsalloc(NULL, ripdata, force_local); 1453 } 1454 hammer2_chain_unlock(chain); 1455 hammer2_chain_drop(chain); 1456 1457 /* 1458 * PFS to mount must exist at this point. 1459 */ 1460 if (pmp == NULL) { 1461 kprintf("hammer2_mount: Failed to acquire PFS structure\n"); 1462 hammer2_unmount_helper(mp, NULL, hmp); 1463 lockmgr(&hammer2_mntlk, LK_RELEASE); 1464 hammer2_vfs_unmount(mp, MNT_FORCE); 1465 return EINVAL; 1466 } 1467 1468 /* 1469 * Finish the mount 1470 */ 1471 kprintf("hammer2_mount: hmp=%p pmp=%p\n", hmp, pmp); 1472 1473 /* Check if the pmp has already been mounted. */ 1474 if (pmp->mp) { 1475 kprintf("hammer2_mount: PFS already mounted!\n"); 1476 hammer2_unmount_helper(mp, NULL, hmp); 1477 lockmgr(&hammer2_mntlk, LK_RELEASE); 1478 hammer2_vfs_unmount(mp, MNT_FORCE); 1479 return EBUSY; 1480 } 1481 1482 pmp->hflags = info.hflags; 1483 mp->mnt_flag |= MNT_LOCAL; 1484 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /* all entry pts are SMP */ 1485 mp->mnt_kern_flag |= MNTK_THR_SYNC; /* new vsyncscan semantics */ 1486 1487 /* 1488 * required mount structure initializations 1489 */ 1490 mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE; 1491 mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE; 1492 1493 mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE; 1494 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE; 1495 1496 /* 1497 * Optional fields 1498 */ 1499 mp->mnt_iosize_max = MAXPHYS; 1500 1501 /* 1502 * Connect up mount pointers. 1503 */ 1504 hammer2_mount_helper(mp, pmp); 1505 lockmgr(&hammer2_mntlk, LK_RELEASE); 1506 1507 /* 1508 * Finish setup 1509 */ 1510 vfs_getnewfsid(mp); 1511 vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops); 1512 vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops); 1513 vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops); 1514 1515 if (path) { 1516 copyinstr(info.volume, mp->mnt_stat.f_mntfromname, 1517 MNAMELEN - 1, &size); 1518 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 1519 } /* else root mount, already in there */ 1520 1521 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname)); 1522 if (path) { 1523 copyinstr(path, mp->mnt_stat.f_mntonname, 1524 sizeof(mp->mnt_stat.f_mntonname) - 1, 1525 &size); 1526 } else { 1527 /* root mount */ 1528 mp->mnt_stat.f_mntonname[0] = '/'; 1529 } 1530 1531 /* 1532 * Initial statfs to prime mnt_stat. 1533 */ 1534 hammer2_vfs_statfs(mp, &mp->mnt_stat, cred); 1535 1536 return 0; 1537 } 1538 1539 /* 1540 * Scan PFSs under the super-root and create hammer2_pfs structures. 1541 */ 1542 static 1543 void 1544 hammer2_update_pmps(hammer2_dev_t *hmp) 1545 { 1546 const hammer2_inode_data_t *ripdata; 1547 hammer2_chain_t *parent; 1548 hammer2_chain_t *chain; 1549 hammer2_dev_t *force_local; 1550 hammer2_pfs_t *spmp; 1551 hammer2_key_t key_next; 1552 int error; 1553 1554 /* 1555 * Force local mount (disassociate all PFSs from their clusters). 1556 * Used primarily for debugging. 1557 */ 1558 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL; 1559 1560 /* 1561 * Lookup mount point under the media-localized super-root. 1562 * 1563 * cluster->pmp will incorrectly point to spmp and must be fixed 1564 * up later on. 1565 */ 1566 spmp = hmp->spmp; 1567 hammer2_inode_lock(spmp->iroot, 0); 1568 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS); 1569 chain = hammer2_chain_lookup(&parent, &key_next, 1570 HAMMER2_KEY_MIN, HAMMER2_KEY_MAX, 1571 &error, 0); 1572 while (chain) { 1573 if (chain->error) { 1574 kprintf("I/O error scanning PFS labels\n"); 1575 } else if (chain->bref.type != HAMMER2_BREF_TYPE_INODE) { 1576 kprintf("Non inode chain type %d under super-root\n", 1577 chain->bref.type); 1578 } else { 1579 ripdata = &chain->data->ipdata; 1580 hammer2_pfsalloc(chain, ripdata, force_local); 1581 } 1582 chain = hammer2_chain_next(&parent, chain, &key_next, 1583 key_next, HAMMER2_KEY_MAX, 1584 &error, 0); 1585 } 1586 if (parent) { 1587 hammer2_chain_unlock(parent); 1588 hammer2_chain_drop(parent); 1589 } 1590 hammer2_inode_unlock(spmp->iroot); 1591 } 1592 1593 static 1594 int 1595 hammer2_remount(hammer2_dev_t *hmp, struct mount *mp, char *path __unused, 1596 struct ucred *cred) 1597 { 1598 hammer2_volume_t *vol; 1599 struct vnode *devvp; 1600 int i, error, result = 0; 1601 1602 if (!(hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR))) 1603 return 0; 1604 1605 for (i = 0; i < hmp->nvolumes; ++i) { 1606 vol = &hmp->volumes[i]; 1607 devvp = vol->dev->devvp; 1608 KKASSERT(devvp); 1609 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1610 VOP_OPEN(devvp, FREAD | FWRITE, FSCRED, NULL); 1611 vn_unlock(devvp); 1612 error = 0; 1613 if (vol->id == HAMMER2_ROOT_VOLUME) { 1614 error = hammer2_recovery(hmp); 1615 if (error == 0) 1616 error |= hammer2_fixup_pfses(hmp); 1617 } 1618 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1619 if (error == 0) { 1620 VOP_CLOSE(devvp, FREAD, NULL); 1621 } else { 1622 VOP_CLOSE(devvp, FREAD | FWRITE, NULL); 1623 } 1624 vn_unlock(devvp); 1625 result |= error; 1626 } 1627 if (result == 0) { 1628 kprintf("hammer2: enable read/write\n"); 1629 hmp->ronly = 0; 1630 } 1631 1632 return result; 1633 } 1634 1635 static 1636 int 1637 hammer2_vfs_unmount(struct mount *mp, int mntflags) 1638 { 1639 hammer2_pfs_t *pmp; 1640 int flags; 1641 int error = 0; 1642 1643 pmp = MPTOPMP(mp); 1644 1645 if (pmp == NULL) 1646 return(0); 1647 1648 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 1649 1650 /* 1651 * If mount initialization proceeded far enough we must flush 1652 * its vnodes and sync the underlying mount points. Three syncs 1653 * are required to fully flush the filesystem (freemap updates lag 1654 * by one flush, and one extra for safety). 1655 */ 1656 if (mntflags & MNT_FORCE) 1657 flags = FORCECLOSE; 1658 else 1659 flags = 0; 1660 if (pmp->iroot) { 1661 error = vflush(mp, 0, flags); 1662 if (error) 1663 goto failed; 1664 hammer2_vfs_sync(mp, MNT_WAIT); 1665 hammer2_vfs_sync(mp, MNT_WAIT); 1666 hammer2_vfs_sync(mp, MNT_WAIT); 1667 } 1668 1669 /* 1670 * Cleanup the frontend support XOPS threads 1671 */ 1672 hammer2_xop_helper_cleanup(pmp); 1673 1674 if (pmp->mp) 1675 hammer2_unmount_helper(mp, pmp, NULL); 1676 1677 error = 0; 1678 failed: 1679 lockmgr(&hammer2_mntlk, LK_RELEASE); 1680 1681 return (error); 1682 } 1683 1684 /* 1685 * Mount helper, hook the system mount into our PFS. 1686 * The mount lock is held. 1687 * 1688 * We must bump the mount_count on related devices for any 1689 * mounted PFSs. 1690 */ 1691 static 1692 void 1693 hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp) 1694 { 1695 hammer2_cluster_t *cluster; 1696 hammer2_chain_t *rchain; 1697 int i; 1698 1699 mp->mnt_data = (qaddr_t)pmp; 1700 pmp->mp = mp; 1701 1702 /* 1703 * After pmp->mp is set we have to adjust hmp->mount_count. 1704 */ 1705 cluster = &pmp->iroot->cluster; 1706 for (i = 0; i < cluster->nchains; ++i) { 1707 rchain = cluster->array[i].chain; 1708 if (rchain == NULL) 1709 continue; 1710 ++rchain->hmp->mount_count; 1711 } 1712 1713 /* 1714 * Create missing Xop threads 1715 */ 1716 hammer2_xop_helper_create(pmp); 1717 } 1718 1719 /* 1720 * Unmount helper, unhook the system mount from our PFS. 1721 * The mount lock is held. 1722 * 1723 * If hmp is supplied a mount responsible for being the first to open 1724 * the block device failed and the block device and all PFSs using the 1725 * block device must be cleaned up. 1726 * 1727 * If pmp is supplied multiple devices might be backing the PFS and each 1728 * must be disconnected. This might not be the last PFS using some of the 1729 * underlying devices. Also, we have to adjust our hmp->mount_count 1730 * accounting for the devices backing the pmp which is now undergoing an 1731 * unmount. 1732 */ 1733 static 1734 void 1735 hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, hammer2_dev_t *hmp) 1736 { 1737 hammer2_cluster_t *cluster; 1738 hammer2_chain_t *rchain; 1739 int dumpcnt; 1740 int i; 1741 1742 /* 1743 * If no device supplied this is a high-level unmount and we have to 1744 * to disconnect the mount, adjust mount_count, and locate devices 1745 * that might now have no mounts. 1746 */ 1747 if (pmp) { 1748 KKASSERT(hmp == NULL); 1749 KKASSERT(MPTOPMP(mp) == pmp); 1750 pmp->mp = NULL; 1751 mp->mnt_data = NULL; 1752 1753 /* 1754 * After pmp->mp is cleared we have to account for 1755 * mount_count. 1756 */ 1757 cluster = &pmp->iroot->cluster; 1758 for (i = 0; i < cluster->nchains; ++i) { 1759 rchain = cluster->array[i].chain; 1760 if (rchain == NULL) 1761 continue; 1762 --rchain->hmp->mount_count; 1763 /* scrapping hmp now may invalidate the pmp */ 1764 } 1765 again: 1766 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) { 1767 if (hmp->mount_count == 0) { 1768 hammer2_unmount_helper(NULL, NULL, hmp); 1769 goto again; 1770 } 1771 } 1772 return; 1773 } 1774 1775 /* 1776 * Try to terminate the block device. We can't terminate it if 1777 * there are still PFSs referencing it. 1778 */ 1779 if (hmp->mount_count) 1780 return; 1781 1782 /* 1783 * Decomission the network before we start messing with the 1784 * device and PFS. 1785 */ 1786 hammer2_iocom_uninit(hmp); 1787 1788 hammer2_bulkfree_uninit(hmp); 1789 hammer2_pfsfree_scan(hmp, 0); 1790 1791 /* 1792 * Cycle the volume data lock as a safety (probably not needed any 1793 * more). To ensure everything is out we need to flush at least 1794 * three times. (1) The running of the sideq can dirty the 1795 * filesystem, (2) A normal flush can dirty the freemap, and 1796 * (3) ensure that the freemap is fully synchronized. 1797 * 1798 * The next mount's recovery scan can clean everything up but we want 1799 * to leave the filesystem in a 100% clean state on a normal unmount. 1800 */ 1801 #if 0 1802 hammer2_voldata_lock(hmp); 1803 hammer2_voldata_unlock(hmp); 1804 #endif 1805 1806 /* 1807 * Flush whatever is left. Unmounted but modified PFS's might still 1808 * have some dirty chains on them. 1809 */ 1810 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS); 1811 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS); 1812 1813 if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) { 1814 hammer2_voldata_modify(hmp); 1815 hammer2_flush(&hmp->fchain, HAMMER2_FLUSH_TOP | 1816 HAMMER2_FLUSH_ALL); 1817 } 1818 hammer2_chain_unlock(&hmp->fchain); 1819 1820 if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) { 1821 hammer2_flush(&hmp->vchain, HAMMER2_FLUSH_TOP | 1822 HAMMER2_FLUSH_ALL); 1823 } 1824 hammer2_chain_unlock(&hmp->vchain); 1825 1826 if ((hmp->vchain.flags | hmp->fchain.flags) & 1827 HAMMER2_CHAIN_FLUSH_MASK) { 1828 kprintf("hammer2_unmount: chains left over after final sync\n"); 1829 kprintf(" vchain %08x\n", hmp->vchain.flags); 1830 kprintf(" fchain %08x\n", hmp->fchain.flags); 1831 1832 if (hammer2_debug & 0x0010) 1833 Debugger("entered debugger"); 1834 } 1835 1836 hammer2_pfsfree_scan(hmp, 1); 1837 1838 KKASSERT(hmp->spmp == NULL); 1839 1840 /* 1841 * Finish up with the device vnode 1842 */ 1843 if (!TAILQ_EMPTY(&hmp->devvpl)) { 1844 hammer2_close_devvp(&hmp->devvpl, hmp->ronly); 1845 hammer2_cleanup_devvp(&hmp->devvpl); 1846 } 1847 KKASSERT(TAILQ_EMPTY(&hmp->devvpl)); 1848 1849 /* 1850 * Clear vchain/fchain flags that might prevent final cleanup 1851 * of these chains. 1852 */ 1853 if (hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) { 1854 atomic_add_long(&hammer2_count_modified_chains, -1); 1855 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED); 1856 hammer2_pfs_memory_wakeup(hmp->vchain.pmp, -1); 1857 } 1858 if (hmp->vchain.flags & HAMMER2_CHAIN_UPDATE) { 1859 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_UPDATE); 1860 } 1861 1862 if (hmp->fchain.flags & HAMMER2_CHAIN_MODIFIED) { 1863 atomic_add_long(&hammer2_count_modified_chains, -1); 1864 atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_MODIFIED); 1865 hammer2_pfs_memory_wakeup(hmp->fchain.pmp, -1); 1866 } 1867 if (hmp->fchain.flags & HAMMER2_CHAIN_UPDATE) { 1868 atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_UPDATE); 1869 } 1870 1871 dumpcnt = 50; 1872 hammer2_dump_chain(&hmp->vchain, 0, 0, &dumpcnt, 'v', (u_int)-1); 1873 dumpcnt = 50; 1874 hammer2_dump_chain(&hmp->fchain, 0, 0, &dumpcnt, 'f', (u_int)-1); 1875 1876 /* 1877 * Final drop of embedded freemap root chain to 1878 * clean up fchain.core (fchain structure is not 1879 * flagged ALLOCATED so it is cleaned out and then 1880 * left to rot). 1881 */ 1882 hammer2_chain_drop(&hmp->fchain); 1883 1884 /* 1885 * Final drop of embedded volume root chain to clean 1886 * up vchain.core (vchain structure is not flagged 1887 * ALLOCATED so it is cleaned out and then left to 1888 * rot). 1889 */ 1890 hammer2_chain_drop(&hmp->vchain); 1891 1892 hammer2_io_cleanup(hmp, &hmp->iotree); 1893 if (hmp->iofree_count) { 1894 kprintf("io_cleanup: %d I/O's left hanging\n", 1895 hmp->iofree_count); 1896 } 1897 1898 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry); 1899 kmalloc_destroy_obj(&hmp->mchain); 1900 kmalloc_destroy_obj(&hmp->mio); 1901 kmalloc_destroy(&hmp->mmsg); 1902 kfree(hmp, M_HAMMER2); 1903 } 1904 1905 int 1906 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp, 1907 ino_t ino, struct vnode **vpp) 1908 { 1909 hammer2_xop_lookup_t *xop; 1910 hammer2_pfs_t *pmp; 1911 hammer2_inode_t *ip; 1912 hammer2_tid_t inum; 1913 int error; 1914 1915 inum = (hammer2_tid_t)ino & HAMMER2_DIRHASH_USERMSK; 1916 1917 error = 0; 1918 pmp = MPTOPMP(mp); 1919 1920 /* 1921 * Easy if we already have it cached 1922 */ 1923 ip = hammer2_inode_lookup(pmp, inum); 1924 if (ip) { 1925 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 1926 *vpp = hammer2_igetv(ip, &error); 1927 hammer2_inode_unlock(ip); 1928 hammer2_inode_drop(ip); /* from lookup */ 1929 1930 return error; 1931 } 1932 1933 /* 1934 * Otherwise we have to find the inode 1935 */ 1936 xop = hammer2_xop_alloc(pmp->iroot, 0); 1937 xop->lhc = inum; 1938 hammer2_xop_start(&xop->head, &hammer2_lookup_desc); 1939 error = hammer2_xop_collect(&xop->head, 0); 1940 1941 if (error == 0) 1942 ip = hammer2_inode_get(pmp, &xop->head, -1, -1); 1943 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1944 1945 if (ip) { 1946 *vpp = hammer2_igetv(ip, &error); 1947 hammer2_inode_unlock(ip); 1948 } else { 1949 *vpp = NULL; 1950 error = ENOENT; 1951 } 1952 return (error); 1953 } 1954 1955 static 1956 int 1957 hammer2_vfs_root(struct mount *mp, struct vnode **vpp) 1958 { 1959 hammer2_pfs_t *pmp; 1960 struct vnode *vp; 1961 int error; 1962 1963 pmp = MPTOPMP(mp); 1964 if (pmp->iroot == NULL) { 1965 kprintf("hammer2 (%s): no root inode\n", 1966 mp->mnt_stat.f_mntfromname); 1967 *vpp = NULL; 1968 return EINVAL; 1969 } 1970 1971 error = 0; 1972 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED); 1973 1974 while (pmp->inode_tid == 0) { 1975 hammer2_xop_ipcluster_t *xop; 1976 const hammer2_inode_meta_t *meta; 1977 1978 xop = hammer2_xop_alloc(pmp->iroot, HAMMER2_XOP_MODIFYING); 1979 hammer2_xop_start(&xop->head, &hammer2_ipcluster_desc); 1980 error = hammer2_xop_collect(&xop->head, 0); 1981 1982 if (error == 0) { 1983 meta = &hammer2_xop_gdata(&xop->head)->ipdata.meta; 1984 pmp->iroot->meta = *meta; 1985 pmp->inode_tid = meta->pfs_inum + 1; 1986 hammer2_xop_pdata(&xop->head); 1987 /* meta invalid */ 1988 1989 if (pmp->inode_tid < HAMMER2_INODE_START) 1990 pmp->inode_tid = HAMMER2_INODE_START; 1991 pmp->modify_tid = 1992 xop->head.cluster.focus->bref.modify_tid + 1; 1993 #if 0 1994 kprintf("PFS: Starting inode %jd\n", 1995 (intmax_t)pmp->inode_tid); 1996 kprintf("PMP focus good set nextino=%ld mod=%016jx\n", 1997 pmp->inode_tid, pmp->modify_tid); 1998 #endif 1999 wakeup(&pmp->iroot); 2000 2001 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2002 2003 /* 2004 * Prime the mount info. 2005 */ 2006 hammer2_vfs_statfs(mp, &mp->mnt_stat, NULL); 2007 break; 2008 } 2009 2010 /* 2011 * Loop, try again 2012 */ 2013 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2014 hammer2_inode_unlock(pmp->iroot); 2015 error = tsleep(&pmp->iroot, PCATCH, "h2root", hz); 2016 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED); 2017 if (error == EINTR) 2018 break; 2019 } 2020 2021 if (error) { 2022 hammer2_inode_unlock(pmp->iroot); 2023 *vpp = NULL; 2024 } else { 2025 vp = hammer2_igetv(pmp->iroot, &error); 2026 hammer2_inode_unlock(pmp->iroot); 2027 *vpp = vp; 2028 } 2029 2030 return (error); 2031 } 2032 2033 /* 2034 * Filesystem status 2035 * 2036 * XXX incorporate ipdata->meta.inode_quota and data_quota 2037 */ 2038 static 2039 int 2040 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 2041 { 2042 hammer2_pfs_t *pmp; 2043 hammer2_dev_t *hmp; 2044 hammer2_blockref_t bref; 2045 struct statfs tmp; 2046 int i; 2047 2048 /* 2049 * NOTE: iroot might not have validated the cluster yet. 2050 */ 2051 pmp = MPTOPMP(mp); 2052 2053 bzero(&tmp, sizeof(tmp)); 2054 2055 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 2056 hmp = pmp->pfs_hmps[i]; 2057 if (hmp == NULL) 2058 continue; 2059 if (pmp->iroot->cluster.array[i].chain) 2060 bref = pmp->iroot->cluster.array[i].chain->bref; 2061 else 2062 bzero(&bref, sizeof(bref)); 2063 2064 tmp.f_files = bref.embed.stats.inode_count; 2065 tmp.f_ffree = 0; 2066 tmp.f_blocks = hmp->voldata.allocator_size / 2067 mp->mnt_vstat.f_bsize; 2068 tmp.f_bfree = hmp->voldata.allocator_free / 2069 mp->mnt_vstat.f_bsize; 2070 tmp.f_bavail = tmp.f_bfree; 2071 2072 if (cred && cred->cr_uid != 0) { 2073 uint64_t adj; 2074 2075 /* 5% */ 2076 adj = hmp->free_reserved / mp->mnt_vstat.f_bsize; 2077 tmp.f_blocks -= adj; 2078 tmp.f_bfree -= adj; 2079 tmp.f_bavail -= adj; 2080 } 2081 2082 mp->mnt_stat.f_blocks = tmp.f_blocks; 2083 mp->mnt_stat.f_bfree = tmp.f_bfree; 2084 mp->mnt_stat.f_bavail = tmp.f_bavail; 2085 mp->mnt_stat.f_files = tmp.f_files; 2086 mp->mnt_stat.f_ffree = tmp.f_ffree; 2087 2088 *sbp = mp->mnt_stat; 2089 } 2090 return (0); 2091 } 2092 2093 static 2094 int 2095 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred) 2096 { 2097 hammer2_pfs_t *pmp; 2098 hammer2_dev_t *hmp; 2099 hammer2_blockref_t bref; 2100 struct statvfs tmp; 2101 int i; 2102 2103 /* 2104 * NOTE: iroot might not have validated the cluster yet. 2105 */ 2106 pmp = MPTOPMP(mp); 2107 bzero(&tmp, sizeof(tmp)); 2108 2109 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 2110 hmp = pmp->pfs_hmps[i]; 2111 if (hmp == NULL) 2112 continue; 2113 if (pmp->iroot->cluster.array[i].chain) 2114 bref = pmp->iroot->cluster.array[i].chain->bref; 2115 else 2116 bzero(&bref, sizeof(bref)); 2117 2118 tmp.f_files = bref.embed.stats.inode_count; 2119 tmp.f_ffree = 0; 2120 tmp.f_blocks = hmp->voldata.allocator_size / 2121 mp->mnt_vstat.f_bsize; 2122 tmp.f_bfree = hmp->voldata.allocator_free / 2123 mp->mnt_vstat.f_bsize; 2124 tmp.f_bavail = tmp.f_bfree; 2125 2126 if (cred && cred->cr_uid != 0) { 2127 uint64_t adj; 2128 2129 /* 5% */ 2130 adj = hmp->free_reserved / mp->mnt_vstat.f_bsize; 2131 tmp.f_blocks -= adj; 2132 tmp.f_bfree -= adj; 2133 tmp.f_bavail -= adj; 2134 } 2135 2136 mp->mnt_vstat.f_blocks = tmp.f_blocks; 2137 mp->mnt_vstat.f_bfree = tmp.f_bfree; 2138 mp->mnt_vstat.f_bavail = tmp.f_bavail; 2139 mp->mnt_vstat.f_files = tmp.f_files; 2140 mp->mnt_vstat.f_ffree = tmp.f_ffree; 2141 2142 *sbp = mp->mnt_vstat; 2143 } 2144 return (0); 2145 } 2146 2147 /* 2148 * Mount-time recovery (RW mounts) 2149 * 2150 * Updates to the free block table are allowed to lag flushes by one 2151 * transaction. In case of a crash, then on a fresh mount we must do an 2152 * incremental scan of the last committed transaction id and make sure that 2153 * all related blocks have been marked allocated. 2154 */ 2155 struct hammer2_recovery_elm { 2156 TAILQ_ENTRY(hammer2_recovery_elm) entry; 2157 hammer2_chain_t *chain; 2158 hammer2_tid_t sync_tid; 2159 }; 2160 2161 TAILQ_HEAD(hammer2_recovery_list, hammer2_recovery_elm); 2162 2163 struct hammer2_recovery_info { 2164 struct hammer2_recovery_list list; 2165 hammer2_tid_t mtid; 2166 int depth; 2167 }; 2168 2169 static int hammer2_recovery_scan(hammer2_dev_t *hmp, 2170 hammer2_chain_t *parent, 2171 struct hammer2_recovery_info *info, 2172 hammer2_tid_t sync_tid); 2173 2174 #define HAMMER2_RECOVERY_MAXDEPTH 10 2175 2176 static 2177 int 2178 hammer2_recovery(hammer2_dev_t *hmp) 2179 { 2180 struct hammer2_recovery_info info; 2181 struct hammer2_recovery_elm *elm; 2182 hammer2_chain_t *parent; 2183 hammer2_tid_t sync_tid; 2184 hammer2_tid_t mirror_tid; 2185 int error; 2186 2187 hammer2_trans_init(hmp->spmp, 0); 2188 2189 sync_tid = hmp->voldata.freemap_tid; 2190 mirror_tid = hmp->voldata.mirror_tid; 2191 2192 kprintf("hammer2_mount: \"%s\": ", hmp->devrepname); 2193 if (sync_tid >= mirror_tid) { 2194 kprintf("no recovery needed\n"); 2195 } else { 2196 kprintf("freemap recovery %016jx-%016jx\n", 2197 sync_tid + 1, mirror_tid); 2198 } 2199 2200 TAILQ_INIT(&info.list); 2201 info.depth = 0; 2202 parent = hammer2_chain_lookup_init(&hmp->vchain, 0); 2203 error = hammer2_recovery_scan(hmp, parent, &info, sync_tid); 2204 hammer2_chain_lookup_done(parent); 2205 2206 while ((elm = TAILQ_FIRST(&info.list)) != NULL) { 2207 TAILQ_REMOVE(&info.list, elm, entry); 2208 parent = elm->chain; 2209 sync_tid = elm->sync_tid; 2210 kfree(elm, M_HAMMER2); 2211 2212 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2213 error |= hammer2_recovery_scan(hmp, parent, &info, 2214 hmp->voldata.freemap_tid); 2215 hammer2_chain_unlock(parent); 2216 hammer2_chain_drop(parent); /* drop elm->chain ref */ 2217 } 2218 2219 hammer2_trans_done(hmp->spmp, 0); 2220 2221 return error; 2222 } 2223 2224 static 2225 int 2226 hammer2_recovery_scan(hammer2_dev_t *hmp, hammer2_chain_t *parent, 2227 struct hammer2_recovery_info *info, 2228 hammer2_tid_t sync_tid) 2229 { 2230 const hammer2_inode_data_t *ripdata; 2231 hammer2_chain_t *chain; 2232 hammer2_blockref_t bref; 2233 int tmp_error; 2234 int rup_error; 2235 int error; 2236 int first; 2237 2238 /* 2239 * Adjust freemap to ensure that the block(s) are marked allocated. 2240 */ 2241 if (parent->bref.type != HAMMER2_BREF_TYPE_VOLUME) { 2242 hammer2_freemap_adjust(hmp, &parent->bref, 2243 HAMMER2_FREEMAP_DORECOVER); 2244 } 2245 2246 /* 2247 * Check type for recursive scan 2248 */ 2249 switch(parent->bref.type) { 2250 case HAMMER2_BREF_TYPE_VOLUME: 2251 /* data already instantiated */ 2252 break; 2253 case HAMMER2_BREF_TYPE_INODE: 2254 /* 2255 * Must instantiate data for DIRECTDATA test and also 2256 * for recursion. 2257 */ 2258 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2259 ripdata = &parent->data->ipdata; 2260 if (ripdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) { 2261 /* not applicable to recovery scan */ 2262 hammer2_chain_unlock(parent); 2263 return 0; 2264 } 2265 hammer2_chain_unlock(parent); 2266 break; 2267 case HAMMER2_BREF_TYPE_INDIRECT: 2268 /* 2269 * Must instantiate data for recursion 2270 */ 2271 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 2272 hammer2_chain_unlock(parent); 2273 break; 2274 case HAMMER2_BREF_TYPE_DIRENT: 2275 case HAMMER2_BREF_TYPE_DATA: 2276 case HAMMER2_BREF_TYPE_FREEMAP: 2277 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 2278 case HAMMER2_BREF_TYPE_FREEMAP_LEAF: 2279 /* not applicable to recovery scan */ 2280 return 0; 2281 break; 2282 default: 2283 return HAMMER2_ERROR_BADBREF; 2284 } 2285 2286 /* 2287 * Defer operation if depth limit reached. 2288 */ 2289 if (info->depth >= HAMMER2_RECOVERY_MAXDEPTH) { 2290 struct hammer2_recovery_elm *elm; 2291 2292 elm = kmalloc(sizeof(*elm), M_HAMMER2, M_ZERO | M_WAITOK); 2293 elm->chain = parent; 2294 elm->sync_tid = sync_tid; 2295 hammer2_chain_ref(parent); 2296 TAILQ_INSERT_TAIL(&info->list, elm, entry); 2297 /* unlocked by caller */ 2298 2299 return(0); 2300 } 2301 2302 2303 /* 2304 * Recursive scan of the last flushed transaction only. We are 2305 * doing this without pmp assignments so don't leave the chains 2306 * hanging around after we are done with them. 2307 * 2308 * error Cumulative error this level only 2309 * rup_error Cumulative error for recursion 2310 * tmp_error Specific non-cumulative recursion error 2311 */ 2312 chain = NULL; 2313 first = 1; 2314 rup_error = 0; 2315 error = 0; 2316 2317 for (;;) { 2318 error |= hammer2_chain_scan(parent, &chain, &bref, 2319 &first, 2320 HAMMER2_LOOKUP_NODATA); 2321 2322 /* 2323 * Problem during scan or EOF 2324 */ 2325 if (error) 2326 break; 2327 2328 /* 2329 * If this is a leaf 2330 */ 2331 if (chain == NULL) { 2332 if (bref.mirror_tid > sync_tid) { 2333 hammer2_freemap_adjust(hmp, &bref, 2334 HAMMER2_FREEMAP_DORECOVER); 2335 } 2336 continue; 2337 } 2338 2339 /* 2340 * This may or may not be a recursive node. 2341 */ 2342 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 2343 if (bref.mirror_tid > sync_tid) { 2344 ++info->depth; 2345 tmp_error = hammer2_recovery_scan(hmp, chain, 2346 info, sync_tid); 2347 --info->depth; 2348 } else { 2349 tmp_error = 0; 2350 } 2351 2352 /* 2353 * Flush the recovery at the PFS boundary to stage it for 2354 * the final flush of the super-root topology. 2355 */ 2356 if (tmp_error == 0 && 2357 (bref.flags & HAMMER2_BREF_FLAG_PFSROOT) && 2358 (chain->flags & HAMMER2_CHAIN_ONFLUSH)) { 2359 hammer2_flush(chain, HAMMER2_FLUSH_TOP | 2360 HAMMER2_FLUSH_ALL); 2361 } 2362 rup_error |= tmp_error; 2363 } 2364 return ((error | rup_error) & ~HAMMER2_ERROR_EOF); 2365 } 2366 2367 /* 2368 * This fixes up an error introduced in earlier H2 implementations where 2369 * moving a PFS inode into an indirect block wound up causing the 2370 * HAMMER2_BREF_FLAG_PFSROOT flag in the bref to get cleared. 2371 */ 2372 static 2373 int 2374 hammer2_fixup_pfses(hammer2_dev_t *hmp) 2375 { 2376 const hammer2_inode_data_t *ripdata; 2377 hammer2_chain_t *parent; 2378 hammer2_chain_t *chain; 2379 hammer2_key_t key_next; 2380 hammer2_pfs_t *spmp; 2381 int error; 2382 2383 error = 0; 2384 2385 /* 2386 * Lookup mount point under the media-localized super-root. 2387 * 2388 * cluster->pmp will incorrectly point to spmp and must be fixed 2389 * up later on. 2390 */ 2391 spmp = hmp->spmp; 2392 hammer2_inode_lock(spmp->iroot, 0); 2393 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS); 2394 chain = hammer2_chain_lookup(&parent, &key_next, 2395 HAMMER2_KEY_MIN, HAMMER2_KEY_MAX, 2396 &error, 0); 2397 while (chain) { 2398 if (chain->bref.type != HAMMER2_BREF_TYPE_INODE) 2399 continue; 2400 if (chain->error) { 2401 kprintf("I/O error scanning PFS labels\n"); 2402 error |= chain->error; 2403 } else if ((chain->bref.flags & 2404 HAMMER2_BREF_FLAG_PFSROOT) == 0) { 2405 int error2; 2406 2407 ripdata = &chain->data->ipdata; 2408 hammer2_trans_init(hmp->spmp, 0); 2409 error2 = hammer2_chain_modify(chain, 2410 chain->bref.modify_tid, 2411 0, 0); 2412 if (error2 == 0) { 2413 kprintf("hammer2: Correct mis-flagged PFS %s\n", 2414 ripdata->filename); 2415 chain->bref.flags |= HAMMER2_BREF_FLAG_PFSROOT; 2416 } else { 2417 error |= error2; 2418 } 2419 hammer2_flush(chain, HAMMER2_FLUSH_TOP | 2420 HAMMER2_FLUSH_ALL); 2421 hammer2_trans_done(hmp->spmp, 0); 2422 } 2423 chain = hammer2_chain_next(&parent, chain, &key_next, 2424 key_next, HAMMER2_KEY_MAX, 2425 &error, 0); 2426 } 2427 if (parent) { 2428 hammer2_chain_unlock(parent); 2429 hammer2_chain_drop(parent); 2430 } 2431 hammer2_inode_unlock(spmp->iroot); 2432 2433 return error; 2434 } 2435 2436 /* 2437 * Sync a mount point; this is called periodically on a per-mount basis from 2438 * the filesystem syncer, and whenever a user issues a sync. 2439 */ 2440 int 2441 hammer2_vfs_sync(struct mount *mp, int waitfor) 2442 { 2443 int error; 2444 2445 error = hammer2_vfs_sync_pmp(MPTOPMP(mp), waitfor); 2446 2447 return error; 2448 } 2449 2450 /* 2451 * Because frontend operations lock vnodes before we get a chance to 2452 * lock the related inode, we can't just acquire a vnode lock without 2453 * risking a deadlock. The frontend may be holding a vnode lock while 2454 * also blocked on our SYNCQ flag while trying to get the inode lock. 2455 * 2456 * To deal with this situation we can check the vnode lock situation 2457 * after locking the inode and perform a work-around. 2458 */ 2459 int 2460 hammer2_vfs_sync_pmp(hammer2_pfs_t *pmp, int waitfor) 2461 { 2462 hammer2_inode_t *ip; 2463 hammer2_depend_t *depend; 2464 hammer2_depend_t *depend_next; 2465 struct vnode *vp; 2466 uint32_t pass2; 2467 int error; 2468 int wakecount; 2469 int dorestart; 2470 2471 /* 2472 * Move all inodes on sideq to syncq. This will clear sideq. 2473 * This should represent all flushable inodes. These inodes 2474 * will already have refs due to being on syncq or sideq. We 2475 * must do this all at once with the spinlock held to ensure that 2476 * all inode dependencies are part of the same flush. 2477 * 2478 * We should be able to do this asynchronously from frontend 2479 * operations because we will be locking the inodes later on 2480 * to actually flush them, and that will partition any frontend 2481 * op using the same inode. Either it has already locked the 2482 * inode and we will block, or it has not yet locked the inode 2483 * and it will block until we are finished flushing that inode. 2484 * 2485 * When restarting, only move the inodes flagged as PASS2 from 2486 * SIDEQ to SYNCQ. PASS2 propagation by inode_lock4() and 2487 * inode_depend() are atomic with the spin-lock. 2488 */ 2489 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH); 2490 #ifdef HAMMER2_DEBUG_SYNC 2491 kprintf("FILESYSTEM SYNC BOUNDARY\n"); 2492 #endif 2493 dorestart = 0; 2494 2495 /* 2496 * Move inodes from depq to syncq, releasing the related 2497 * depend structures. 2498 */ 2499 restart: 2500 #ifdef HAMMER2_DEBUG_SYNC 2501 kprintf("FILESYSTEM SYNC RESTART (%d)\n", dorestart); 2502 #endif 2503 hammer2_trans_setflags(pmp, 0/*HAMMER2_TRANS_COPYQ*/); 2504 hammer2_trans_clearflags(pmp, HAMMER2_TRANS_RESCAN); 2505 2506 /* 2507 * Move inodes from depq to syncq. When restarting, only depq's 2508 * marked pass2 are moved. 2509 */ 2510 hammer2_spin_ex(&pmp->list_spin); 2511 depend_next = TAILQ_FIRST(&pmp->depq); 2512 wakecount = 0; 2513 2514 while ((depend = depend_next) != NULL) { 2515 depend_next = TAILQ_NEXT(depend, entry); 2516 if (dorestart && depend->pass2 == 0) 2517 continue; 2518 TAILQ_FOREACH(ip, &depend->sideq, entry) { 2519 KKASSERT(ip->flags & HAMMER2_INODE_SIDEQ); 2520 atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ); 2521 atomic_clear_int(&ip->flags, HAMMER2_INODE_SIDEQ); 2522 ip->depend = NULL; 2523 } 2524 2525 /* 2526 * NOTE: pmp->sideq_count includes both sideq and syncq 2527 */ 2528 TAILQ_CONCAT(&pmp->syncq, &depend->sideq, entry); 2529 2530 depend->count = 0; 2531 depend->pass2 = 0; 2532 TAILQ_REMOVE(&pmp->depq, depend, entry); 2533 } 2534 2535 hammer2_spin_unex(&pmp->list_spin); 2536 hammer2_trans_clearflags(pmp, /*HAMMER2_TRANS_COPYQ |*/ 2537 HAMMER2_TRANS_WAITING); 2538 dorestart = 0; 2539 2540 /* 2541 * sideq_count may have dropped enough to allow us to unstall 2542 * the frontend. 2543 */ 2544 hammer2_pfs_memory_wakeup(pmp, 0); 2545 2546 /* 2547 * Now run through all inodes on syncq. 2548 * 2549 * Flush transactions only interlock with other flush transactions. 2550 * Any conflicting frontend operations will block on the inode, but 2551 * may hold a vnode lock while doing so. 2552 */ 2553 hammer2_spin_ex(&pmp->list_spin); 2554 while ((ip = TAILQ_FIRST(&pmp->syncq)) != NULL) { 2555 /* 2556 * Remove the inode from the SYNCQ, transfer the syncq ref 2557 * to us. We must clear SYNCQ to allow any potential 2558 * front-end deadlock to proceed. We must set PASS2 so 2559 * the dependency code knows what to do. 2560 */ 2561 pass2 = ip->flags; 2562 cpu_ccfence(); 2563 if (atomic_cmpset_int(&ip->flags, 2564 pass2, 2565 (pass2 & ~(HAMMER2_INODE_SYNCQ | 2566 HAMMER2_INODE_SYNCQ_WAKEUP)) | 2567 HAMMER2_INODE_SYNCQ_PASS2) == 0) 2568 { 2569 continue; 2570 } 2571 TAILQ_REMOVE(&pmp->syncq, ip, entry); 2572 --pmp->sideq_count; 2573 hammer2_spin_unex(&pmp->list_spin); 2574 2575 /* 2576 * Tickle anyone waiting on ip->flags or the hysteresis 2577 * on the dirty inode count. 2578 */ 2579 if (pass2 & HAMMER2_INODE_SYNCQ_WAKEUP) 2580 wakeup(&ip->flags); 2581 if (++wakecount >= hammer2_limit_dirty_inodes / 20 + 1) { 2582 wakecount = 0; 2583 hammer2_pfs_memory_wakeup(pmp, 0); 2584 } 2585 2586 /* 2587 * Relock the inode, and we inherit a ref from the above. 2588 * We will check for a race after we acquire the vnode. 2589 */ 2590 hammer2_mtx_ex(&ip->lock); 2591 2592 /* 2593 * We need the vp in order to vfsync() dirty buffers, so if 2594 * one isn't attached we can skip it. 2595 * 2596 * Ordering the inode lock and then the vnode lock has the 2597 * potential to deadlock. If we had left SYNCQ set that could 2598 * also deadlock us against the frontend even if we don't hold 2599 * any locks, but the latter is not a problem now since we 2600 * cleared it. igetv will temporarily release the inode lock 2601 * in a safe manner to work-around the deadlock. 2602 * 2603 * Unfortunately it is still possible to deadlock when the 2604 * frontend obtains multiple inode locks, because all the 2605 * related vnodes are already locked (nor can the vnode locks 2606 * be released and reacquired without messing up RECLAIM and 2607 * INACTIVE sequencing). 2608 * 2609 * The solution for now is to move the vp back onto SIDEQ 2610 * and set dorestart, which will restart the flush after we 2611 * exhaust the current SYNCQ. Note that additional 2612 * dependencies may build up, so we definitely need to move 2613 * the whole SIDEQ back to SYNCQ when we restart. 2614 */ 2615 vp = ip->vp; 2616 if (vp) { 2617 if (vget(vp, LK_EXCLUSIVE|LK_NOWAIT)) { 2618 /* 2619 * Failed to get the vnode, requeue the inode 2620 * (PASS2 is already set so it will be found 2621 * again on the restart). 2622 * 2623 * Then unlock, possibly sleep, and retry 2624 * later. We sleep if PASS2 was *previously* 2625 * set, before we set it again above. 2626 */ 2627 vp = NULL; 2628 dorestart = 1; 2629 #ifdef HAMMER2_DEBUG_SYNC 2630 kprintf("inum %ld (sync delayed by vnode)\n", 2631 (long)ip->meta.inum); 2632 #endif 2633 hammer2_inode_delayed_sideq(ip); 2634 2635 hammer2_mtx_unlock(&ip->lock); 2636 hammer2_inode_drop(ip); 2637 2638 if (pass2 & HAMMER2_INODE_SYNCQ_PASS2) { 2639 tsleep(&dorestart, 0, "h2syndel", 2); 2640 } 2641 hammer2_spin_ex(&pmp->list_spin); 2642 continue; 2643 } 2644 } else { 2645 vp = NULL; 2646 } 2647 2648 /* 2649 * If the inode wound up on a SIDEQ again it will already be 2650 * prepped for another PASS2. In this situation if we flush 2651 * it now we will just wind up flushing it again in the same 2652 * syncer run, so we might as well not flush it now. 2653 */ 2654 if (ip->flags & HAMMER2_INODE_SIDEQ) { 2655 hammer2_mtx_unlock(&ip->lock); 2656 hammer2_inode_drop(ip); 2657 if (vp) 2658 vput(vp); 2659 dorestart = 1; 2660 hammer2_spin_ex(&pmp->list_spin); 2661 continue; 2662 } 2663 2664 /* 2665 * Ok we have the inode exclusively locked and if vp is 2666 * not NULL that will also be exclusively locked. Do the 2667 * meat of the flush. 2668 * 2669 * vp token needed for v_rbdirty_tree check / vclrisdirty 2670 * sequencing. Though we hold the vnode exclusively so 2671 * we shouldn't need to hold the token also in this case. 2672 */ 2673 if (vp) { 2674 vfsync(vp, MNT_WAIT, 1, NULL, NULL); 2675 bio_track_wait(&vp->v_track_write, 0, 0); /* XXX */ 2676 } 2677 2678 /* 2679 * If the inode has not yet been inserted into the tree 2680 * we must do so. Then sync and flush it. The flush should 2681 * update the parent. 2682 */ 2683 if (ip->flags & HAMMER2_INODE_DELETING) { 2684 #ifdef HAMMER2_DEBUG_SYNC 2685 kprintf("inum %ld destroy\n", (long)ip->meta.inum); 2686 #endif 2687 hammer2_inode_chain_des(ip); 2688 atomic_add_long(&hammer2_iod_inode_deletes, 1); 2689 } else if (ip->flags & HAMMER2_INODE_CREATING) { 2690 #ifdef HAMMER2_DEBUG_SYNC 2691 kprintf("inum %ld insert\n", (long)ip->meta.inum); 2692 #endif 2693 hammer2_inode_chain_ins(ip); 2694 atomic_add_long(&hammer2_iod_inode_creates, 1); 2695 } 2696 #ifdef HAMMER2_DEBUG_SYNC 2697 kprintf("inum %ld chain-sync\n", (long)ip->meta.inum); 2698 #endif 2699 2700 /* 2701 * Because I kinda messed up the design and index the inodes 2702 * under the root inode, along side the directory entries, 2703 * we can't flush the inode index under the iroot until the 2704 * end. If we do it now we might miss effects created by 2705 * other inodes on the SYNCQ. 2706 * 2707 * Do a normal (non-FSSYNC) flush instead, which allows the 2708 * vnode code to work the same. We don't want to force iroot 2709 * back onto the SIDEQ, and we also don't want the flush code 2710 * to update pfs_iroot_blocksets until the final flush later. 2711 * 2712 * XXX at the moment this will likely result in a double-flush 2713 * of the iroot chain. 2714 */ 2715 hammer2_inode_chain_sync(ip); 2716 if (ip == pmp->iroot) { 2717 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP); 2718 } else { 2719 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP | 2720 HAMMER2_XOP_FSSYNC); 2721 } 2722 if (vp) { 2723 lwkt_gettoken(&vp->v_token); 2724 if ((ip->flags & (HAMMER2_INODE_MODIFIED | 2725 HAMMER2_INODE_RESIZED | 2726 HAMMER2_INODE_DIRTYDATA)) == 0 && 2727 RB_EMPTY(&vp->v_rbdirty_tree) && 2728 !bio_track_active(&vp->v_track_write)) { 2729 vclrisdirty(vp); 2730 } else { 2731 hammer2_inode_delayed_sideq(ip); 2732 } 2733 lwkt_reltoken(&vp->v_token); 2734 vput(vp); 2735 vp = NULL; /* safety */ 2736 } 2737 atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_PASS2); 2738 hammer2_inode_unlock(ip); /* unlock+drop */ 2739 /* ip pointer invalid */ 2740 2741 /* 2742 * If the inode got dirted after we dropped our locks, 2743 * it will have already been moved back to the SIDEQ. 2744 */ 2745 hammer2_spin_ex(&pmp->list_spin); 2746 } 2747 hammer2_spin_unex(&pmp->list_spin); 2748 hammer2_pfs_memory_wakeup(pmp, 0); 2749 2750 if (dorestart || (pmp->trans.flags & HAMMER2_TRANS_RESCAN)) { 2751 #ifdef HAMMER2_DEBUG_SYNC 2752 kprintf("FILESYSTEM SYNC STAGE 1 RESTART\n"); 2753 /*tsleep(&dorestart, 0, "h2STG1-R", hz*20);*/ 2754 #endif 2755 dorestart = 1; 2756 goto restart; 2757 } 2758 #ifdef HAMMER2_DEBUG_SYNC 2759 kprintf("FILESYSTEM SYNC STAGE 2 BEGIN\n"); 2760 /*tsleep(&dorestart, 0, "h2STG2", hz*20);*/ 2761 #endif 2762 2763 /* 2764 * We have to flush the PFS root last, even if it does not appear to 2765 * be dirty, because all the inodes in the PFS are indexed under it. 2766 * The normal flushing of iroot above would only occur if directory 2767 * entries under the root were changed. 2768 * 2769 * Specifying VOLHDR will cause an additionl flush of hmp->spmp 2770 * for the media making up the cluster. 2771 */ 2772 if ((ip = pmp->iroot) != NULL) { 2773 hammer2_inode_ref(ip); 2774 hammer2_mtx_ex(&ip->lock); 2775 hammer2_inode_chain_sync(ip); 2776 hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP | 2777 HAMMER2_XOP_FSSYNC | 2778 HAMMER2_XOP_VOLHDR); 2779 hammer2_inode_unlock(ip); /* unlock+drop */ 2780 } 2781 #ifdef HAMMER2_DEBUG_SYNC 2782 kprintf("FILESYSTEM SYNC STAGE 2 DONE\n"); 2783 #endif 2784 2785 /* 2786 * device bioq sync 2787 */ 2788 hammer2_bioq_sync(pmp); 2789 2790 error = 0; /* XXX */ 2791 hammer2_trans_done(pmp, HAMMER2_TRANS_ISFLUSH); 2792 2793 return (error); 2794 } 2795 2796 static 2797 int 2798 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp) 2799 { 2800 hammer2_inode_t *ip; 2801 2802 KKASSERT(MAXFIDSZ >= 16); 2803 ip = VTOI(vp); 2804 fhp->fid_len = offsetof(struct fid, fid_data[16]); 2805 fhp->fid_ext = 0; 2806 ((hammer2_tid_t *)fhp->fid_data)[0] = ip->meta.inum; 2807 ((hammer2_tid_t *)fhp->fid_data)[1] = 0; 2808 2809 return 0; 2810 } 2811 2812 static 2813 int 2814 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 2815 struct fid *fhp, struct vnode **vpp) 2816 { 2817 hammer2_tid_t inum; 2818 int error; 2819 2820 inum = ((hammer2_tid_t *)fhp->fid_data)[0] & HAMMER2_DIRHASH_USERMSK; 2821 if (vpp) { 2822 if (inum == 1) 2823 error = hammer2_vfs_root(mp, vpp); 2824 else 2825 error = hammer2_vfs_vget(mp, NULL, inum, vpp); 2826 } else { 2827 error = 0; 2828 } 2829 return error; 2830 } 2831 2832 static 2833 int 2834 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 2835 int *exflagsp, struct ucred **credanonp) 2836 { 2837 hammer2_pfs_t *pmp; 2838 struct netcred *np; 2839 int error; 2840 2841 pmp = MPTOPMP(mp); 2842 np = vfs_export_lookup(mp, &pmp->export, nam); 2843 if (np) { 2844 *exflagsp = np->netc_exflags; 2845 *credanonp = &np->netc_anon; 2846 error = 0; 2847 } else { 2848 error = EACCES; 2849 } 2850 return error; 2851 } 2852 2853 /* 2854 * This handles hysteresis on regular file flushes. Because the BIOs are 2855 * routed to a thread it is possible for an excessive number to build up 2856 * and cause long front-end stalls long before the runningbuffspace limit 2857 * is hit, so we implement hammer2_flush_pipe to control the 2858 * hysteresis. 2859 * 2860 * This is a particular problem when compression is used. 2861 */ 2862 void 2863 hammer2_lwinprog_ref(hammer2_pfs_t *pmp) 2864 { 2865 atomic_add_int(&pmp->count_lwinprog, 1); 2866 } 2867 2868 void 2869 hammer2_lwinprog_drop(hammer2_pfs_t *pmp) 2870 { 2871 int lwinprog; 2872 2873 lwinprog = atomic_fetchadd_int(&pmp->count_lwinprog, -1); 2874 if ((lwinprog & HAMMER2_LWINPROG_WAITING) && 2875 (lwinprog & HAMMER2_LWINPROG_MASK) <= hammer2_flush_pipe * 2 / 3) { 2876 atomic_clear_int(&pmp->count_lwinprog, 2877 HAMMER2_LWINPROG_WAITING); 2878 wakeup(&pmp->count_lwinprog); 2879 } 2880 if ((lwinprog & HAMMER2_LWINPROG_WAITING0) && 2881 (lwinprog & HAMMER2_LWINPROG_MASK) <= 0) { 2882 atomic_clear_int(&pmp->count_lwinprog, 2883 HAMMER2_LWINPROG_WAITING0); 2884 wakeup(&pmp->count_lwinprog); 2885 } 2886 } 2887 2888 void 2889 hammer2_lwinprog_wait(hammer2_pfs_t *pmp, int flush_pipe) 2890 { 2891 int lwinprog; 2892 int lwflag = (flush_pipe) ? HAMMER2_LWINPROG_WAITING : 2893 HAMMER2_LWINPROG_WAITING0; 2894 2895 for (;;) { 2896 lwinprog = pmp->count_lwinprog; 2897 cpu_ccfence(); 2898 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe) 2899 break; 2900 tsleep_interlock(&pmp->count_lwinprog, 0); 2901 atomic_set_int(&pmp->count_lwinprog, lwflag); 2902 lwinprog = pmp->count_lwinprog; 2903 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe) 2904 break; 2905 tsleep(&pmp->count_lwinprog, PINTERLOCKED, "h2wpipe", hz); 2906 } 2907 } 2908 2909 /* 2910 * It is possible for an excessive number of dirty chains or dirty inodes 2911 * to build up. When this occurs we start an asynchronous filesystem sync. 2912 * If the level continues to build up, we stall, waiting for it to drop, 2913 * with some hysteresis. 2914 * 2915 * This relies on the kernel calling hammer2_vfs_modifying() prior to 2916 * obtaining any vnode locks before making a modifying VOP call. 2917 */ 2918 static int 2919 hammer2_vfs_modifying(struct mount *mp) 2920 { 2921 if (mp->mnt_flag & MNT_RDONLY) 2922 return EROFS; 2923 hammer2_pfs_memory_wait(MPTOPMP(mp)); 2924 2925 return 0; 2926 } 2927 2928 /* 2929 * Initiate an asynchronous filesystem sync and, with hysteresis, 2930 * stall if the internal data structure count becomes too bloated. 2931 */ 2932 void 2933 hammer2_pfs_memory_wait(hammer2_pfs_t *pmp) 2934 { 2935 uint32_t waiting; 2936 int pcatch; 2937 int error; 2938 2939 if (pmp == NULL || pmp->mp == NULL) 2940 return; 2941 2942 for (;;) { 2943 waiting = pmp->inmem_dirty_chains & HAMMER2_DIRTYCHAIN_MASK; 2944 cpu_ccfence(); 2945 2946 /* 2947 * Start the syncer running at 1/2 the limit 2948 */ 2949 if (waiting > hammer2_limit_dirty_chains / 2 || 2950 pmp->sideq_count > hammer2_limit_dirty_inodes / 2) { 2951 trigger_syncer(pmp->mp); 2952 } 2953 2954 /* 2955 * Stall at the limit waiting for the counts to drop. 2956 * This code will typically be woken up once the count 2957 * drops below 3/4 the limit, or in one second. 2958 */ 2959 if (waiting < hammer2_limit_dirty_chains && 2960 pmp->sideq_count < hammer2_limit_dirty_inodes) { 2961 break; 2962 } 2963 2964 pcatch = curthread->td_proc ? PCATCH : 0; 2965 2966 tsleep_interlock(&pmp->inmem_dirty_chains, pcatch); 2967 atomic_set_int(&pmp->inmem_dirty_chains, 2968 HAMMER2_DIRTYCHAIN_WAITING); 2969 if (waiting < hammer2_limit_dirty_chains && 2970 pmp->sideq_count < hammer2_limit_dirty_inodes) { 2971 break; 2972 } 2973 trigger_syncer(pmp->mp); 2974 error = tsleep(&pmp->inmem_dirty_chains, PINTERLOCKED | pcatch, 2975 "h2memw", hz); 2976 if (error == ERESTART) 2977 break; 2978 } 2979 } 2980 2981 /* 2982 * Wake up any stalled frontend ops waiting, with hysteresis, using 2983 * 2/3 of the limit. 2984 */ 2985 void 2986 hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp, int count) 2987 { 2988 uint32_t waiting; 2989 2990 if (pmp) { 2991 waiting = atomic_fetchadd_int(&pmp->inmem_dirty_chains, count); 2992 /* don't need --waiting to test flag */ 2993 2994 if ((waiting & HAMMER2_DIRTYCHAIN_WAITING) && 2995 (pmp->inmem_dirty_chains & HAMMER2_DIRTYCHAIN_MASK) <= 2996 hammer2_limit_dirty_chains * 2 / 3 && 2997 pmp->sideq_count <= hammer2_limit_dirty_inodes * 2 / 3) { 2998 atomic_clear_int(&pmp->inmem_dirty_chains, 2999 HAMMER2_DIRTYCHAIN_WAITING); 3000 wakeup(&pmp->inmem_dirty_chains); 3001 } 3002 } 3003 } 3004 3005 void 3006 hammer2_pfs_memory_inc(hammer2_pfs_t *pmp) 3007 { 3008 if (pmp) { 3009 atomic_add_int(&pmp->inmem_dirty_chains, 1); 3010 } 3011 } 3012 3013 /* 3014 * Volume header data locks 3015 */ 3016 void 3017 hammer2_voldata_lock(hammer2_dev_t *hmp) 3018 { 3019 lockmgr(&hmp->vollk, LK_EXCLUSIVE); 3020 } 3021 3022 void 3023 hammer2_voldata_unlock(hammer2_dev_t *hmp) 3024 { 3025 lockmgr(&hmp->vollk, LK_RELEASE); 3026 } 3027 3028 /* 3029 * Caller indicates that the volume header is being modified. Flag 3030 * the related chain and adjust its transaction id. 3031 * 3032 * The transaction id is set to voldata.mirror_tid + 1, similar to 3033 * what hammer2_chain_modify() does. Be very careful here, volume 3034 * data can be updated independently of the rest of the filesystem. 3035 */ 3036 void 3037 hammer2_voldata_modify(hammer2_dev_t *hmp) 3038 { 3039 if ((hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) == 0) { 3040 atomic_add_long(&hammer2_count_modified_chains, 1); 3041 atomic_set_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED); 3042 hammer2_pfs_memory_inc(hmp->vchain.pmp); 3043 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid + 1; 3044 } 3045 } 3046 3047 /* 3048 * Returns 0 if the filesystem has tons of free space 3049 * Returns 1 if the filesystem has less than 10% remaining 3050 * Returns 2 if the filesystem has less than 2%/5% (user/root) remaining. 3051 */ 3052 int 3053 hammer2_vfs_enospace(hammer2_inode_t *ip, off_t bytes, struct ucred *cred) 3054 { 3055 hammer2_pfs_t *pmp; 3056 hammer2_dev_t *hmp; 3057 hammer2_off_t free_reserved; 3058 hammer2_off_t free_nominal; 3059 int i; 3060 3061 pmp = ip->pmp; 3062 3063 if (pmp->free_ticks == 0 || pmp->free_ticks != ticks) { 3064 free_reserved = HAMMER2_SEGSIZE; 3065 free_nominal = 0x7FFFFFFFFFFFFFFFLLU; 3066 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 3067 hmp = pmp->pfs_hmps[i]; 3068 if (hmp == NULL) 3069 continue; 3070 if (pmp->pfs_types[i] != HAMMER2_PFSTYPE_MASTER && 3071 pmp->pfs_types[i] != HAMMER2_PFSTYPE_SOFT_MASTER) 3072 continue; 3073 3074 if (free_nominal > hmp->voldata.allocator_free) 3075 free_nominal = hmp->voldata.allocator_free; 3076 if (free_reserved < hmp->free_reserved) 3077 free_reserved = hmp->free_reserved; 3078 } 3079 3080 /* 3081 * SMP races ok 3082 */ 3083 pmp->free_reserved = free_reserved; 3084 pmp->free_nominal = free_nominal; 3085 pmp->free_ticks = ticks; 3086 } else { 3087 free_reserved = pmp->free_reserved; 3088 free_nominal = pmp->free_nominal; 3089 } 3090 if (cred && cred->cr_uid != 0) { 3091 if ((int64_t)(free_nominal - bytes) < 3092 (int64_t)free_reserved) { 3093 return 2; 3094 } 3095 } else { 3096 if ((int64_t)(free_nominal - bytes) < 3097 (int64_t)free_reserved / 2) { 3098 return 2; 3099 } 3100 } 3101 if ((int64_t)(free_nominal - bytes) < (int64_t)free_reserved * 2) 3102 return 1; 3103 return 0; 3104 } 3105