1 /* 2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 /* 37 * HAMMER2 IN-MEMORY CACHE OF MEDIA STRUCTURES 38 * 39 * This header file contains structures used internally by the HAMMER2 40 * implementation. See hammer2_disk.h for on-disk structures. 41 * 42 * There is an in-memory representation of all on-media data structure. 43 * Almost everything is represented by a hammer2_chain structure in-memory. 44 * Other higher-level structures typically map to chains. 45 * 46 * A great deal of data is accessed simply via its buffer cache buffer, 47 * which is mapped for the duration of the chain's lock. Hammer2 must 48 * implement its own buffer cache layer on top of the system layer to 49 * allow for different threads to lock different sub-block-sized buffers. 50 * 51 * When modifications are made to a chain a new filesystem block must be 52 * allocated. Multiple modifications do not typically allocate new blocks 53 * until the current block has been flushed. Flushes do not block the 54 * front-end unless the front-end operation crosses the current inode being 55 * flushed. 56 * 57 * The in-memory representation may remain cached (for example in order to 58 * placemark clustering locks) even after the related data has been 59 * detached. 60 */ 61 62 #ifndef _VFS_HAMMER2_HAMMER2_H_ 63 #define _VFS_HAMMER2_HAMMER2_H_ 64 65 #ifdef _KERNEL 66 #include <sys/param.h> 67 #endif 68 #include <sys/types.h> 69 #ifdef _KERNEL 70 #include <sys/kernel.h> 71 #endif 72 #include <sys/conf.h> 73 #ifdef _KERNEL 74 #include <sys/systm.h> 75 #endif 76 #include <sys/tree.h> 77 #include <sys/malloc.h> 78 #include <sys/mount.h> 79 #include <sys/vnode.h> 80 #include <sys/proc.h> 81 #include <sys/mountctl.h> 82 #include <sys/priv.h> 83 #include <sys/stat.h> 84 #include <sys/thread.h> 85 #include <sys/globaldata.h> 86 #include <sys/lockf.h> 87 #include <sys/buf.h> 88 #include <sys/queue.h> 89 #include <sys/limits.h> 90 #include <sys/dmsg.h> 91 #include <sys/mutex.h> 92 #ifdef _KERNEL 93 #include <sys/kern_syscall.h> 94 #endif 95 96 #ifdef _KERNEL 97 #include <sys/signal2.h> 98 #include <sys/buf2.h> 99 #include <sys/mutex2.h> 100 #endif 101 102 #include "hammer2_xxhash.h" 103 #include "hammer2_disk.h" 104 #include "hammer2_mount.h" 105 #include "hammer2_ioctl.h" 106 107 struct hammer2_io; 108 struct hammer2_chain; 109 struct hammer2_cluster; 110 struct hammer2_inode; 111 struct hammer2_depend; 112 struct hammer2_dev; 113 struct hammer2_pfs; 114 struct hammer2_span; 115 struct hammer2_msg; 116 struct hammer2_thread; 117 union hammer2_xop; 118 119 /* 120 * Mutex and lock shims. Hammer2 requires support for asynchronous and 121 * abortable locks, and both exclusive and shared spinlocks. Normal 122 * synchronous non-abortable locks can be substituted for spinlocks. 123 */ 124 typedef mtx_t hammer2_mtx_t; 125 typedef mtx_link_t hammer2_mtx_link_t; 126 typedef mtx_state_t hammer2_mtx_state_t; 127 128 typedef struct spinlock hammer2_spin_t; 129 130 #define hammer2_mtx_ex mtx_lock_ex_quick 131 #define hammer2_mtx_ex_try mtx_lock_ex_try 132 #define hammer2_mtx_sh mtx_lock_sh_quick 133 #define hammer2_mtx_sh_again mtx_lock_sh_again 134 #define hammer2_mtx_sh_try mtx_lock_sh_try 135 #define hammer2_mtx_unlock mtx_unlock 136 #define hammer2_mtx_downgrade mtx_downgrade 137 #define hammer2_mtx_owned mtx_owned 138 #define hammer2_mtx_init mtx_init 139 #define hammer2_mtx_temp_release mtx_lock_temp_release 140 #define hammer2_mtx_temp_restore mtx_lock_temp_restore 141 #define hammer2_mtx_refs mtx_lockrefs 142 143 #define hammer2_spin_init spin_init 144 #define hammer2_spin_sh spin_lock_shared 145 #define hammer2_spin_ex spin_lock 146 #define hammer2_spin_unsh spin_unlock_shared 147 #define hammer2_spin_unex spin_unlock 148 149 TAILQ_HEAD(hammer2_xop_list, hammer2_xop_head); 150 TAILQ_HEAD(hammer2_chain_list, hammer2_chain); 151 152 typedef struct hammer2_xop_list hammer2_xop_list_t; 153 154 #ifdef _KERNEL 155 /* 156 * General lock support 157 */ 158 static __inline 159 int 160 hammer2_mtx_upgrade_try(hammer2_mtx_t *mtx) 161 { 162 return mtx_upgrade_try(mtx); 163 } 164 165 #endif 166 167 /* 168 * The xid tracks internal transactional updates. 169 * 170 * XXX fix-me, really needs to be 64-bits 171 */ 172 typedef uint32_t hammer2_xid_t; 173 174 #define HAMMER2_XID_MIN 0x00000000U 175 #define HAMMER2_XID_MAX 0x7FFFFFFFU 176 177 /* 178 * Cap the dynamic calculation for the maximum number of dirty 179 * chains and dirty inodes allowed. 180 */ 181 #define HAMMER2_LIMIT_DIRTY_CHAINS (1024*1024) 182 #define HAMMER2_LIMIT_DIRTY_INODES (65536) 183 184 /* 185 * The chain structure tracks a portion of the media topology from the 186 * root (volume) down. Chains represent volumes, inodes, indirect blocks, 187 * data blocks, and freemap nodes and leafs. 188 * 189 * The chain structure utilizes a simple singly-homed topology and the 190 * chain's in-memory topology will move around as the chains do, due mainly 191 * to renames and indirect block creation. 192 * 193 * Block Table Updates 194 * 195 * Block table updates for insertions and updates are delayed until the 196 * flush. This allows us to avoid having to modify the parent chain 197 * all the way to the root. 198 * 199 * Block table deletions are performed immediately (modifying the parent 200 * in the process) because the flush code uses the chain structure to 201 * track delayed updates and the chain will be (likely) gone or moved to 202 * another location in the topology after a deletion. 203 * 204 * A prior iteration of the code tried to keep the relationship intact 205 * on deletes by doing a delete-duplicate operation on the chain, but 206 * it added way too much complexity to the codebase. 207 * 208 * Flush Synchronization 209 * 210 * The flush code must flush modified chains bottom-up. Because chain 211 * structures can shift around and are NOT topologically stable, 212 * modified chains are independently indexed for the flush. As the flush 213 * runs it modifies (or further modifies) and updates the parents, 214 * propagating the flush all the way to the volume root. 215 * 216 * Modifying front-end operations can occur during a flush but will block 217 * in two cases: (1) when the front-end tries to operate on the inode 218 * currently in the midst of being flushed and (2) if the front-end 219 * crosses an inode currently being flushed (such as during a rename). 220 * So, for example, if you rename directory "x" to "a/b/c/d/e/f/g/x" and 221 * the flusher is currently working on "a/b/c", the rename will block 222 * temporarily in order to ensure that "x" exists in one place or the 223 * other. 224 * 225 * Meta-data statistics are updated by the flusher. The front-end will 226 * make estimates but meta-data must be fully synchronized only during a 227 * flush in order to ensure that it remains correct across a crash. 228 * 229 * Multiple flush synchronizations can theoretically be in-flight at the 230 * same time but the implementation is not coded to handle the case and 231 * currently serializes them. 232 * 233 * Snapshots: 234 * 235 * Snapshots currently require the subdirectory tree being snapshotted 236 * to be flushed. The snapshot then creates a new super-root inode which 237 * copies the flushed blockdata of the directory or file that was 238 * snapshotted. 239 * 240 * RBTREE NOTES: 241 * 242 * - Note that the radix tree runs in powers of 2 only so sub-trees 243 * cannot straddle edges. 244 */ 245 RB_HEAD(hammer2_chain_tree, hammer2_chain); 246 TAILQ_HEAD(h2_flush_list, hammer2_chain); 247 TAILQ_HEAD(h2_core_list, hammer2_chain); 248 249 #define CHAIN_CORE_DELETE_BMAP_ENTRIES \ 250 (HAMMER2_PBUFSIZE / sizeof(hammer2_blockref_t) / sizeof(uint32_t)) 251 252 struct hammer2_reptrack { 253 hammer2_spin_t spin; 254 struct hammer2_reptrack *next; 255 struct hammer2_chain *chain; 256 }; 257 258 /* 259 * Core topology for chain (embedded in chain). Protected by a spinlock. 260 */ 261 struct hammer2_chain_core { 262 hammer2_spin_t spin; 263 struct hammer2_reptrack *reptrack; 264 struct hammer2_chain_tree rbtree; /* sub-chains */ 265 int live_zero; /* blockref array opt */ 266 u_int live_count; /* live (not deleted) chains in tree */ 267 u_int chain_count; /* live + deleted chains under core */ 268 int generation; /* generation number (inserts only) */ 269 }; 270 271 typedef struct hammer2_chain_core hammer2_chain_core_t; 272 273 RB_HEAD(hammer2_io_tree, hammer2_io); 274 275 /* 276 * DIO - Management structure wrapping system buffer cache. 277 * 278 * HAMMER2 uses an I/O abstraction that allows it to cache and manipulate 279 * fixed-sized filesystem buffers frontend by variable-sized hammer2_chain 280 * structures. 281 */ 282 /* #define HAMMER2_IO_DEBUG */ 283 284 #ifdef HAMMER2_IO_DEBUG 285 #define HAMMER2_IO_DEBUG_ARGS , const char *file, int line 286 #define HAMMER2_IO_DEBUG_CALL , file, line 287 #define HAMMER2_IO_DEBUG_COUNT 2048 288 #define HAMMER2_IO_DEBUG_MASK (HAMMER2_IO_DEBUG_COUNT - 1) 289 #else 290 #define HAMMER2_IO_DEBUG_ARGS 291 #define HAMMER2_IO_DEBUG_CALL 292 #endif 293 294 struct hammer2_io { 295 RB_ENTRY(hammer2_io) rbnode; /* indexed by device offset */ 296 struct hammer2_dev *hmp; 297 struct buf *bp; 298 off_t pbase; 299 uint64_t refs; 300 int psize; 301 int act; /* activity */ 302 int btype; /* approximate BREF_TYPE_* */ 303 int ticks; 304 int error; 305 #ifdef HAMMER2_IO_DEBUG 306 int debug_index; 307 #else 308 int unused01; 309 #endif 310 uint64_t dedup_valid; /* valid for dedup operation */ 311 uint64_t dedup_alloc; /* allocated / de-dupable */ 312 #ifdef HAMMER2_IO_DEBUG 313 const char *debug_file[HAMMER2_IO_DEBUG_COUNT]; 314 void *debug_td[HAMMER2_IO_DEBUG_COUNT]; 315 int debug_line[HAMMER2_IO_DEBUG_COUNT]; 316 uint64_t debug_refs[HAMMER2_IO_DEBUG_COUNT]; 317 #endif 318 }; 319 320 typedef struct hammer2_io hammer2_io_t; 321 322 #define HAMMER2_DIO_INPROG 0x8000000000000000LLU /* bio in progress */ 323 #define HAMMER2_DIO_GOOD 0x4000000000000000LLU /* dio->bp is stable */ 324 #define HAMMER2_DIO_WAITING 0x2000000000000000LLU /* wait on INPROG */ 325 #define HAMMER2_DIO_DIRTY 0x1000000000000000LLU /* flush last drop */ 326 #define HAMMER2_DIO_FLUSH 0x0800000000000000LLU /* immediate flush */ 327 328 #define HAMMER2_DIO_MASK 0x00FFFFFFFFFFFFFFLLU 329 330 /* 331 * Primary chain structure keeps track of the topology in-memory. 332 */ 333 struct hammer2_chain { 334 hammer2_mtx_t lock; 335 hammer2_chain_core_t core; 336 RB_ENTRY(hammer2_chain) rbnode; /* live chain(s) */ 337 hammer2_blockref_t bref; 338 struct hammer2_chain *parent; 339 struct hammer2_dev *hmp; 340 struct hammer2_pfs *pmp; /* A PFS or super-root (spmp) */ 341 342 struct lock diolk; /* xop focus interlock */ 343 hammer2_io_t *dio; /* physical data buffer */ 344 hammer2_media_data_t *data; /* data pointer shortcut */ 345 u_int bytes; /* physical data size */ 346 u_int flags; 347 u_int refs; 348 u_int lockcnt; 349 int error; /* on-lock data error state */ 350 int cache_index; /* heur speeds up lookup */ 351 352 TAILQ_ENTRY(hammer2_chain) flush_node; /* flush list */ 353 TAILQ_ENTRY(hammer2_chain) lru_node; /* 0-refs LRU */ 354 }; 355 356 typedef struct hammer2_chain hammer2_chain_t; 357 358 int hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2); 359 RB_PROTOTYPE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp); 360 361 /* 362 * Special notes on flags: 363 * 364 * INITIAL - This flag allows a chain to be created and for storage to 365 * be allocated without having to immediately instantiate the 366 * related buffer. The data is assumed to be all-zeros. It 367 * is primarily used for indirect blocks. 368 * 369 * MODIFIED - The chain's media data has been modified. Prevents chain 370 * free on lastdrop if still in the topology. 371 * 372 * UPDATE - Chain might not be modified but parent blocktable needs 373 * an update. Prevents chain free on lastdrop if still in 374 * the topology. 375 * 376 * FICTITIOUS - Faked chain as a placeholder for an error condition. This 377 * chain is unsuitable for I/O. 378 * 379 * BMAPPED - Indicates that the chain is present in the parent blockmap. 380 * 381 * BMAPUPD - Indicates that the chain is present but needs to be updated 382 * in the parent blockmap. 383 */ 384 #define HAMMER2_CHAIN_MODIFIED 0x00000001 /* dirty chain data */ 385 #define HAMMER2_CHAIN_ALLOCATED 0x00000002 /* kmalloc'd chain */ 386 #define HAMMER2_CHAIN_DESTROY 0x00000004 387 #define HAMMER2_CHAIN_DEDUPABLE 0x00000008 /* registered w/dedup */ 388 #define HAMMER2_CHAIN_DELETED 0x00000010 /* deleted chain */ 389 #define HAMMER2_CHAIN_INITIAL 0x00000020 /* initial create */ 390 #define HAMMER2_CHAIN_UPDATE 0x00000040 /* need parent update */ 391 #define HAMMER2_CHAIN_UNUSED0080 0x00000080 392 #define HAMMER2_CHAIN_TESTEDGOOD 0x00000100 /* crc tested good */ 393 #define HAMMER2_CHAIN_ONFLUSH 0x00000200 /* on a flush list */ 394 #define HAMMER2_CHAIN_FICTITIOUS 0x00000400 /* unsuitable for I/O */ 395 #define HAMMER2_CHAIN_VOLUMESYNC 0x00000800 /* needs volume sync */ 396 #define HAMMER2_CHAIN_UNUSED1000 0x00001000 397 #define HAMMER2_CHAIN_COUNTEDBREFS 0x00002000 /* block table stats */ 398 #define HAMMER2_CHAIN_ONRBTREE 0x00004000 /* on parent RB tree */ 399 #define HAMMER2_CHAIN_ONLRU 0x00008000 /* on LRU list */ 400 #define HAMMER2_CHAIN_EMBEDDED 0x00010000 /* embedded data */ 401 #define HAMMER2_CHAIN_RELEASE 0x00020000 /* don't keep around */ 402 #define HAMMER2_CHAIN_BMAPPED 0x00040000 /* present in blkmap */ 403 #define HAMMER2_CHAIN_BMAPUPD 0x00080000 /* +needs updating */ 404 #define HAMMER2_CHAIN_IOINPROG 0x00100000 /* I/O interlock */ 405 #define HAMMER2_CHAIN_IOSIGNAL 0x00200000 /* I/O interlock */ 406 #define HAMMER2_CHAIN_PFSBOUNDARY 0x00400000 /* super->pfs inode */ 407 #define HAMMER2_CHAIN_HINT_LEAF_COUNT 0x00800000 /* redo leaf count */ 408 #define HAMMER2_CHAIN_LRUHINT 0x01000000 /* was reused */ 409 410 #define HAMMER2_CHAIN_FLUSH_MASK (HAMMER2_CHAIN_MODIFIED | \ 411 HAMMER2_CHAIN_UPDATE | \ 412 HAMMER2_CHAIN_ONFLUSH | \ 413 HAMMER2_CHAIN_DESTROY) 414 415 /* 416 * Hammer2 error codes, used by chain->error and cluster->error. The error 417 * code is typically set on-lock unless no I/O was requested, and set on 418 * I/O otherwise. If set for a cluster it generally means that the cluster 419 * code could not find a valid copy to present. 420 * 421 * All H2 error codes are flags and can be accumulated by ORing them 422 * together. 423 * 424 * IO - An I/O error occurred 425 * CHECK - I/O succeeded but did not match the check code 426 * INCOMPLETE - A cluster is not complete enough to use, or 427 * a chain cannot be loaded because its parent has an error. 428 * 429 * NOTE: API allows callers to check zero/non-zero to determine if an error 430 * condition exists. 431 * 432 * NOTE: Chain's data field is usually NULL on an IO error but not necessarily 433 * NULL on other errors. Check chain->error, not chain->data. 434 */ 435 #define HAMMER2_ERROR_NONE 0 /* no error (must be 0) */ 436 #define HAMMER2_ERROR_EIO 0x00000001 /* device I/O error */ 437 #define HAMMER2_ERROR_CHECK 0x00000002 /* check code error */ 438 #define HAMMER2_ERROR_INCOMPLETE 0x00000004 /* incomplete cluster */ 439 #define HAMMER2_ERROR_DEPTH 0x00000008 /* tmp depth limit */ 440 #define HAMMER2_ERROR_BADBREF 0x00000010 /* illegal bref */ 441 #define HAMMER2_ERROR_ENOSPC 0x00000020 /* allocation failure */ 442 #define HAMMER2_ERROR_ENOENT 0x00000040 /* entry not found */ 443 #define HAMMER2_ERROR_ENOTEMPTY 0x00000080 /* dir not empty */ 444 #define HAMMER2_ERROR_EAGAIN 0x00000100 /* retry */ 445 #define HAMMER2_ERROR_ENOTDIR 0x00000200 /* not directory */ 446 #define HAMMER2_ERROR_EISDIR 0x00000400 /* is directory */ 447 #define HAMMER2_ERROR_EINPROGRESS 0x00000800 /* already running */ 448 #define HAMMER2_ERROR_ABORTED 0x00001000 /* aborted operation */ 449 #define HAMMER2_ERROR_EOF 0x00002000 /* end of scan */ 450 #define HAMMER2_ERROR_EINVAL 0x00004000 /* catch-all */ 451 #define HAMMER2_ERROR_EEXIST 0x00008000 /* entry exists */ 452 #define HAMMER2_ERROR_EDEADLK 0x00010000 453 #define HAMMER2_ERROR_ESRCH 0x00020000 454 #define HAMMER2_ERROR_ETIMEDOUT 0x00040000 455 456 /* 457 * Flags passed to hammer2_chain_lookup() and hammer2_chain_next() 458 * 459 * NOTES: 460 * NODATA - Asks that the chain->data not be resolved in order 461 * to avoid I/O. 462 * 463 * NODIRECT - Prevents a lookup of offset 0 in an inode from returning 464 * the inode itself if the inode is in DIRECTDATA mode 465 * (i.e. file is <= 512 bytes). Used by the synchronization 466 * code to prevent confusion. 467 * 468 * SHARED - The input chain is expected to be locked shared, 469 * and the output chain is locked shared. 470 * 471 * MATCHIND - Allows an indirect block / freemap node to be returned 472 * when the passed key range matches the radix. Remember 473 * that key_end is inclusive (e.g. {0x000,0xFFF}, 474 * not {0x000,0x1000}). 475 * 476 * (Cannot be used for remote or cluster ops). 477 * 478 * ALLNODES - Allows NULL focus. 479 * 480 * ALWAYS - Always resolve the data. If ALWAYS and NODATA are both 481 * missing, bulk file data is not resolved but inodes and 482 * other meta-data will. 483 */ 484 #define HAMMER2_LOOKUP_UNUSED0001 0x00000001 485 #define HAMMER2_LOOKUP_NODATA 0x00000002 /* data left NULL */ 486 #define HAMMER2_LOOKUP_NODIRECT 0x00000004 /* no offset=0 DD */ 487 #define HAMMER2_LOOKUP_SHARED 0x00000100 488 #define HAMMER2_LOOKUP_MATCHIND 0x00000200 /* return all chains */ 489 #define HAMMER2_LOOKUP_ALLNODES 0x00000400 /* allow NULL focus */ 490 #define HAMMER2_LOOKUP_ALWAYS 0x00000800 /* resolve data */ 491 #define HAMMER2_LOOKUP_UNUSED1000 0x00001000 492 493 /* 494 * Flags passed to hammer2_chain_modify() and hammer2_chain_resize() 495 * 496 * NOTE: OPTDATA allows us to avoid instantiating buffers for INDIRECT 497 * blocks in the INITIAL-create state. 498 */ 499 #define HAMMER2_MODIFY_OPTDATA 0x00000002 /* data can be NULL */ 500 #define HAMMER2_MODIFY_NO_MODIFY_TID 0x00000004 501 #define HAMMER2_MODIFY_UNUSED0008 0x00000008 502 503 /* 504 * Flags passed to hammer2_chain_lock() 505 * 506 * NOTE: RDONLY is set to optimize cluster operations when *no* modifications 507 * will be made to either the cluster being locked or any underlying 508 * cluster. It allows the cluster to lock and access data for a subset 509 * of available nodes instead of all available nodes. 510 * 511 * NOTE: NONBLOCK is only used for hammer2_chain_repparent() and getparent(), 512 * other functions (e.g. hammer2_chain_lookup(), etc) can't handle its 513 * operation. 514 */ 515 #define HAMMER2_RESOLVE_NEVER 1 516 #define HAMMER2_RESOLVE_MAYBE 2 517 #define HAMMER2_RESOLVE_ALWAYS 3 518 #define HAMMER2_RESOLVE_MASK 0x0F 519 520 #define HAMMER2_RESOLVE_SHARED 0x10 /* request shared lock */ 521 #define HAMMER2_RESOLVE_LOCKAGAIN 0x20 /* another shared lock */ 522 #define HAMMER2_RESOLVE_UNUSED40 0x40 523 #define HAMMER2_RESOLVE_NONBLOCK 0x80 /* non-blocking */ 524 525 /* 526 * Flags passed to hammer2_chain_delete() 527 */ 528 #define HAMMER2_DELETE_PERMANENT 0x0001 529 530 /* 531 * Flags passed to hammer2_chain_insert() or hammer2_chain_rename() 532 * or hammer2_chain_create(). 533 */ 534 #define HAMMER2_INSERT_PFSROOT 0x0004 535 #define HAMMER2_INSERT_SAMEPARENT 0x0008 536 537 /* 538 * Flags passed to hammer2_chain_delete_duplicate() 539 */ 540 #define HAMMER2_DELDUP_RECORE 0x0001 541 542 /* 543 * Cluster different types of storage together for allocations 544 */ 545 #define HAMMER2_FREECACHE_INODE 0 546 #define HAMMER2_FREECACHE_INDIR 1 547 #define HAMMER2_FREECACHE_DATA 2 548 #define HAMMER2_FREECACHE_UNUSED3 3 549 #define HAMMER2_FREECACHE_TYPES 4 550 551 /* 552 * hammer2_freemap_alloc() block preference 553 */ 554 #define HAMMER2_OFF_NOPREF ((hammer2_off_t)-1) 555 556 /* 557 * BMAP read-ahead maximum parameters 558 */ 559 #define HAMMER2_BMAP_COUNT 16 /* max bmap read-ahead */ 560 #define HAMMER2_BMAP_BYTES (HAMMER2_PBUFSIZE * HAMMER2_BMAP_COUNT) 561 562 /* 563 * hammer2_freemap_adjust() 564 */ 565 #define HAMMER2_FREEMAP_DORECOVER 1 566 #define HAMMER2_FREEMAP_DOMAYFREE 2 567 #define HAMMER2_FREEMAP_DOREALFREE 3 568 569 /* 570 * HAMMER2 cluster - A set of chains representing the same entity. 571 * 572 * hammer2_cluster typically represents a temporary set of representitive 573 * chains. The one exception is that a hammer2_cluster is embedded in 574 * hammer2_inode. This embedded cluster is ONLY used to track the 575 * representitive chains and cannot be directly locked. 576 * 577 * A cluster is usually temporary (and thus per-thread) for locking purposes, 578 * allowing us to embed the asynchronous storage required for cluster 579 * operations in the cluster itself and adjust the state and status without 580 * having to worry too much about SMP issues. 581 * 582 * The exception is the cluster embedded in the hammer2_inode structure. 583 * This is used to cache the cluster state on an inode-by-inode basis. 584 * Individual hammer2_chain structures not incorporated into clusters might 585 * also stick around to cache miscellanious elements. 586 * 587 * Because the cluster is a 'working copy' and is usually subject to cluster 588 * quorum rules, it is quite possible for us to end up with an insufficient 589 * number of live chains to execute an operation. If an insufficient number 590 * of chains remain in a working copy, the operation may have to be 591 * downgraded, retried, stall until the requisit number of chains are 592 * available, or possibly even error out depending on the mount type. 593 * 594 * A cluster's focus is set when it is locked. The focus can only be set 595 * to a chain still part of the synchronized set. 596 */ 597 #define HAMMER2_XOPFIFO 16 598 #define HAMMER2_XOPFIFO_MASK (HAMMER2_XOPFIFO - 1) 599 #define HAMMER2_XOPGROUPS 32 600 #define HAMMER2_XOPGROUPS_MASK (HAMMER2_XOPGROUPS - 1) 601 602 #define HAMMER2_MAXCLUSTER 8 603 #define HAMMER2_XOPMASK_CLUSTER (uint64_t)((1LLU << HAMMER2_MAXCLUSTER) - 1) 604 #define HAMMER2_XOPMASK_VOP (uint64_t)0x0000000080000000LLU 605 #define HAMMER2_XOPMASK_FIFOW (uint64_t)0x0000000040000000LLU 606 #define HAMMER2_XOPMASK_WAIT (uint64_t)0x0000000020000000LLU 607 #define HAMMER2_XOPMASK_FEED (uint64_t)0x0000000100000000LLU 608 609 #define HAMMER2_XOPMASK_ALLDONE (HAMMER2_XOPMASK_VOP | HAMMER2_XOPMASK_CLUSTER) 610 611 #define HAMMER2_SPECTHREADS 1 /* sync */ 612 613 struct hammer2_cluster_item { 614 hammer2_chain_t *chain; 615 int error; 616 uint32_t flags; 617 }; 618 619 typedef struct hammer2_cluster_item hammer2_cluster_item_t; 620 621 /* 622 * INVALID - Invalid for focus, i.e. not part of synchronized set. 623 * Once set, this bit is sticky across operations. 624 * 625 * FEMOD - Indicates that front-end modifying operations can 626 * mess with this entry and MODSYNC will copy also 627 * effect it. 628 */ 629 #define HAMMER2_CITEM_INVALID 0x00000001 630 #define HAMMER2_CITEM_FEMOD 0x00000002 631 #define HAMMER2_CITEM_NULL 0x00000004 632 633 struct hammer2_cluster { 634 int refs; /* track for deallocation */ 635 int ddflag; 636 struct hammer2_pfs *pmp; 637 uint32_t flags; 638 int nchains; 639 int error; /* error code valid on lock */ 640 int focus_index; 641 hammer2_chain_t *focus; /* current focus (or mod) */ 642 hammer2_cluster_item_t array[HAMMER2_MAXCLUSTER]; 643 }; 644 645 typedef struct hammer2_cluster hammer2_cluster_t; 646 647 /* 648 * WRHARD - Hard mounts can write fully synchronized 649 * RDHARD - Hard mounts can read fully synchronized 650 * UNHARD - Unsynchronized masters present 651 * NOHARD - No masters visible 652 * WRSOFT - Soft mounts can write to at least the SOFT_MASTER 653 * RDSOFT - Soft mounts can read from at least a SOFT_SLAVE 654 * UNSOFT - Unsynchronized slaves present 655 * NOSOFT - No slaves visible 656 * RDSLAVE - slaves are accessible (possibly unsynchronized or remote). 657 * MSYNCED - All masters are fully synchronized 658 * SSYNCED - All known local slaves are fully synchronized to masters 659 * 660 * All available masters are always incorporated. All PFSs belonging to a 661 * cluster (master, slave, copy, whatever) always try to synchronize the 662 * total number of known masters in the PFSs root inode. 663 * 664 * A cluster might have access to many slaves, copies, or caches, but we 665 * have a limited number of cluster slots. Any such elements which are 666 * directly mounted from block device(s) will always be incorporated. Note 667 * that SSYNCED only applies to such elements which are directly mounted, 668 * not to any remote slaves, copies, or caches that could be available. These 669 * bits are used to monitor and drive our synchronization threads. 670 * 671 * When asking the question 'is any data accessible at all', then a simple 672 * test against (RDHARD|RDSOFT|RDSLAVE) gives you the answer. If any of 673 * these bits are set the object can be read with certain caveats: 674 * RDHARD - no caveats. RDSOFT - authoritative but might not be synchronized. 675 * and RDSLAVE - not authoritative, has some data but it could be old or 676 * incomplete. 677 * 678 * When both soft and hard mounts are available, data will be read and written 679 * via the soft mount only. But all might be in the cluster because 680 * background synchronization threads still need to do their work. 681 */ 682 #define HAMMER2_CLUSTER_INODE 0x00000001 /* embedded in inode struct */ 683 #define HAMMER2_CLUSTER_UNUSED2 0x00000002 684 #define HAMMER2_CLUSTER_LOCKED 0x00000004 /* cluster lks not recursive */ 685 #define HAMMER2_CLUSTER_WRHARD 0x00000100 /* hard-mount can write */ 686 #define HAMMER2_CLUSTER_RDHARD 0x00000200 /* hard-mount can read */ 687 #define HAMMER2_CLUSTER_UNHARD 0x00000400 /* unsynchronized masters */ 688 #define HAMMER2_CLUSTER_NOHARD 0x00000800 /* no masters visible */ 689 #define HAMMER2_CLUSTER_WRSOFT 0x00001000 /* soft-mount can write */ 690 #define HAMMER2_CLUSTER_RDSOFT 0x00002000 /* soft-mount can read */ 691 #define HAMMER2_CLUSTER_UNSOFT 0x00004000 /* unsynchronized slaves */ 692 #define HAMMER2_CLUSTER_NOSOFT 0x00008000 /* no slaves visible */ 693 #define HAMMER2_CLUSTER_MSYNCED 0x00010000 /* all masters synchronized */ 694 #define HAMMER2_CLUSTER_SSYNCED 0x00020000 /* known slaves synchronized */ 695 696 #define HAMMER2_CLUSTER_ANYDATA ( HAMMER2_CLUSTER_RDHARD | \ 697 HAMMER2_CLUSTER_RDSOFT | \ 698 HAMMER2_CLUSTER_RDSLAVE) 699 700 #define HAMMER2_CLUSTER_RDOK ( HAMMER2_CLUSTER_RDHARD | \ 701 HAMMER2_CLUSTER_RDSOFT) 702 703 #define HAMMER2_CLUSTER_WROK ( HAMMER2_CLUSTER_WRHARD | \ 704 HAMMER2_CLUSTER_WRSOFT) 705 706 #define HAMMER2_CLUSTER_ZFLAGS ( HAMMER2_CLUSTER_WRHARD | \ 707 HAMMER2_CLUSTER_RDHARD | \ 708 HAMMER2_CLUSTER_WRSOFT | \ 709 HAMMER2_CLUSTER_RDSOFT | \ 710 HAMMER2_CLUSTER_MSYNCED | \ 711 HAMMER2_CLUSTER_SSYNCED) 712 713 /* 714 * Helper functions (cluster must be locked for flags to be valid). 715 */ 716 static __inline 717 int 718 hammer2_cluster_rdok(hammer2_cluster_t *cluster) 719 { 720 return (cluster->flags & HAMMER2_CLUSTER_RDOK); 721 } 722 723 static __inline 724 int 725 hammer2_cluster_wrok(hammer2_cluster_t *cluster) 726 { 727 return (cluster->flags & HAMMER2_CLUSTER_WROK); 728 } 729 730 RB_HEAD(hammer2_inode_tree, hammer2_inode); /* ip->rbnode */ 731 TAILQ_HEAD(inoq_head, hammer2_inode); /* ip->entry */ 732 TAILQ_HEAD(depq_head, hammer2_depend); /* depend->entry */ 733 734 struct hammer2_depend { 735 TAILQ_ENTRY(hammer2_depend) entry; 736 struct inoq_head sideq; 737 long count; 738 int pass2; 739 int unused01; 740 }; 741 742 typedef struct hammer2_depend hammer2_depend_t; 743 744 /* 745 * A hammer2 inode. 746 * 747 * NOTE: The inode-embedded cluster is never used directly for I/O (since 748 * it may be shared). Instead it will be replicated-in and synchronized 749 * back out if changed. 750 */ 751 struct hammer2_inode { 752 RB_ENTRY(hammer2_inode) rbnode; /* inumber lookup (HL) */ 753 TAILQ_ENTRY(hammer2_inode) entry; /* SYNCQ/SIDEQ */ 754 hammer2_depend_t *depend; /* non-NULL if SIDEQ */ 755 hammer2_depend_t depend_static; /* (in-place allocation) */ 756 hammer2_mtx_t lock; /* inode lock */ 757 hammer2_mtx_t truncate_lock; /* prevent truncates */ 758 struct hammer2_pfs *pmp; /* PFS mount */ 759 struct vnode *vp; 760 struct spinlock cluster_spin; /* update cluster */ 761 hammer2_cluster_t cluster; 762 struct lockf advlock; 763 u_int flags; 764 u_int refs; /* +vpref, +flushref */ 765 uint8_t comp_heuristic; 766 hammer2_inode_meta_t meta; /* copy of meta-data */ 767 hammer2_off_t osize; 768 }; 769 770 typedef struct hammer2_inode hammer2_inode_t; 771 772 /* 773 * MODIFIED - Inode is in a modified state, ip->meta may have changes. 774 * RESIZED - Inode truncated (any) or inode extended beyond 775 * EMBEDDED_BYTES. 776 * 777 * SYNCQ - Inode is included in the current filesystem sync. The 778 * DELETING and CREATING flags will be acted upon. 779 * 780 * SIDEQ - Inode has likely been disconnected from the vnode topology 781 * and so is not visible to the vnode-based filesystem syncer 782 * code, but is dirty and must be included in the next 783 * filesystem sync. These inodes are moved to the SYNCQ at 784 * the time the sync occurs. 785 * 786 * Inodes are not placed on this queue simply because they have 787 * become dirty, if a vnode is attached. 788 * 789 * DELETING - Inode is flagged for deletion during the next filesystem 790 * sync. That is, the inode's chain is currently connected 791 * and must be deleting during the current or next fs sync. 792 * 793 * CREATING - Inode is flagged for creation during the next filesystem 794 * sync. That is, the inode's chain topology exists (so 795 * kernel buffer flushes can occur), but is currently 796 * disconnected and must be inserted during the current or 797 * next fs sync. If the DELETING flag is also set, the 798 * topology can be thrown away instead. 799 * 800 * If an inode that is already part of the current filesystem sync is 801 * modified by the frontend, including by buffer flushes, the inode lock 802 * code detects the SYNCQ flag and moves the inode to the head of the 803 * flush-in-progress, then blocks until the flush has gotten past it. 804 */ 805 #define HAMMER2_INODE_MODIFIED 0x0001 806 #define HAMMER2_INODE_SROOT 0x0002 /* kmalloc special case */ 807 #define HAMMER2_INODE_RENAME_INPROG 0x0004 808 #define HAMMER2_INODE_ONRBTREE 0x0008 809 #define HAMMER2_INODE_RESIZED 0x0010 /* requires inode_fsync */ 810 #define HAMMER2_INODE_UNUSED0020 0x0020 811 #define HAMMER2_INODE_ISUNLINKED 0x0040 812 #define HAMMER2_INODE_METAGOOD 0x0080 /* inode meta-data good */ 813 #define HAMMER2_INODE_SIDEQ 0x0100 /* on side processing queue */ 814 #define HAMMER2_INODE_NOSIDEQ 0x0200 /* disable sideq operation */ 815 #define HAMMER2_INODE_DIRTYDATA 0x0400 /* interlocks inode flush */ 816 #define HAMMER2_INODE_SYNCQ 0x0800 /* sync interlock, sequenced */ 817 #define HAMMER2_INODE_DELETING 0x1000 /* sync interlock, chain topo */ 818 #define HAMMER2_INODE_CREATING 0x2000 /* sync interlock, chain topo */ 819 #define HAMMER2_INODE_SYNCQ_WAKEUP 0x4000 /* sync interlock wakeup */ 820 #define HAMMER2_INODE_SYNCQ_PASS2 0x8000 /* force retry delay */ 821 822 #define HAMMER2_INODE_DIRTY (HAMMER2_INODE_MODIFIED | \ 823 HAMMER2_INODE_DIRTYDATA | \ 824 HAMMER2_INODE_DELETING | \ 825 HAMMER2_INODE_CREATING) 826 827 int hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2); 828 RB_PROTOTYPE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp, 829 hammer2_tid_t); 830 831 /* 832 * Transaction management sub-structure under hammer2_pfs 833 */ 834 struct hammer2_trans { 835 uint32_t flags; 836 uint32_t sync_wait; 837 }; 838 839 typedef struct hammer2_trans hammer2_trans_t; 840 841 #define HAMMER2_TRANS_ISFLUSH 0x80000000 /* flush code */ 842 #define HAMMER2_TRANS_BUFCACHE 0x40000000 /* bio strategy */ 843 #define HAMMER2_TRANS_SIDEQ 0x20000000 /* run sideq */ 844 #define HAMMER2_TRANS_UNUSED10 0x10000000 845 #define HAMMER2_TRANS_WAITING 0x08000000 /* someone waiting */ 846 #define HAMMER2_TRANS_RESCAN 0x04000000 /* rescan sideq */ 847 #define HAMMER2_TRANS_MASK 0x00FFFFFF /* count mask */ 848 849 #define HAMMER2_FREEMAP_HEUR_NRADIX 4 /* pwr 2 PBUFRADIX-MINIORADIX */ 850 #define HAMMER2_FREEMAP_HEUR_TYPES 8 851 #define HAMMER2_FREEMAP_HEUR_SIZE (HAMMER2_FREEMAP_HEUR_NRADIX * \ 852 HAMMER2_FREEMAP_HEUR_TYPES) 853 854 #define HAMMER2_DEDUP_HEUR_SIZE (65536 * 4) 855 #define HAMMER2_DEDUP_HEUR_MASK (HAMMER2_DEDUP_HEUR_SIZE - 1) 856 857 #define HAMMER2_FLUSH_TOP 0x0001 858 #define HAMMER2_FLUSH_ALL 0x0002 859 #define HAMMER2_FLUSH_INODE_STOP 0x0004 /* stop at sub-inode */ 860 #define HAMMER2_FLUSH_FSSYNC 0x0008 /* part of filesystem sync */ 861 862 863 /* 864 * Hammer2 support thread element. 865 * 866 * Potentially many support threads can hang off of hammer2, primarily 867 * off the hammer2_pfs structure. Typically: 868 * 869 * td x Nodes A synchronization thread for each node. 870 * td x Nodes x workers Worker threads for frontend operations. 871 * td x 1 Bioq thread for logical buffer writes. 872 * 873 * In addition, the synchronization thread(s) associated with the 874 * super-root PFS (spmp) for a node is responsible for automatic bulkfree 875 * and dedup scans. 876 */ 877 struct hammer2_thread { 878 struct hammer2_pfs *pmp; 879 struct hammer2_dev *hmp; 880 hammer2_xop_list_t xopq; 881 thread_t td; 882 uint32_t flags; 883 int depth; 884 int clindex; /* cluster element index */ 885 int repidx; 886 char *scratch; /* MAXPHYS */ 887 }; 888 889 typedef struct hammer2_thread hammer2_thread_t; 890 891 #define HAMMER2_THREAD_UNMOUNTING 0x0001 /* unmount request */ 892 #define HAMMER2_THREAD_DEV 0x0002 /* related to dev, not pfs */ 893 #define HAMMER2_THREAD_WAITING 0x0004 /* thread in idle tsleep */ 894 #define HAMMER2_THREAD_REMASTER 0x0008 /* remaster request */ 895 #define HAMMER2_THREAD_STOP 0x0010 /* exit request */ 896 #define HAMMER2_THREAD_FREEZE 0x0020 /* force idle */ 897 #define HAMMER2_THREAD_FROZEN 0x0040 /* thread is frozen */ 898 #define HAMMER2_THREAD_XOPQ 0x0080 /* work pending */ 899 #define HAMMER2_THREAD_STOPPED 0x0100 /* thread has stopped */ 900 #define HAMMER2_THREAD_UNFREEZE 0x0200 901 902 #define HAMMER2_THREAD_WAKEUP_MASK (HAMMER2_THREAD_UNMOUNTING | \ 903 HAMMER2_THREAD_REMASTER | \ 904 HAMMER2_THREAD_STOP | \ 905 HAMMER2_THREAD_FREEZE | \ 906 HAMMER2_THREAD_XOPQ) 907 908 /* 909 * Support structure for dedup heuristic. 910 */ 911 struct hammer2_dedup { 912 hammer2_off_t data_off; 913 uint64_t data_crc; 914 uint32_t ticks; 915 uint32_t unused03; 916 }; 917 918 typedef struct hammer2_dedup hammer2_dedup_t; 919 920 /* 921 * hammer2_xop - container for VOP/XOP operation (allocated, not on stack). 922 * 923 * This structure is used to distribute a VOP operation across multiple 924 * nodes. It provides a rendezvous for concurrent node execution and 925 * can be detached from the frontend operation to allow the frontend to 926 * return early. 927 * 928 * This structure also sequences operations on up to three inodes. 929 */ 930 typedef void (*hammer2_xop_func_t)(union hammer2_xop *xop, void *scratch, 931 int clindex); 932 933 struct hammer2_xop_desc { 934 hammer2_xop_func_t storage_func; /* local storage function */ 935 hammer2_xop_func_t dmsg_dispatch; /* dmsg dispatch function */ 936 hammer2_xop_func_t dmsg_process; /* dmsg processing function */ 937 const char *id; 938 }; 939 940 typedef struct hammer2_xop_desc hammer2_xop_desc_t; 941 942 struct hammer2_xop_fifo { 943 TAILQ_ENTRY(hammer2_xop_head) entry; 944 hammer2_chain_t *array[HAMMER2_XOPFIFO]; 945 int errors[HAMMER2_XOPFIFO]; 946 int ri; 947 int wi; 948 int flags; 949 hammer2_thread_t *thr; 950 }; 951 952 typedef struct hammer2_xop_fifo hammer2_xop_fifo_t; 953 954 #define HAMMER2_XOP_FIFO_RUN 0x0001 955 #define HAMMER2_XOP_FIFO_STALL 0x0002 956 957 struct hammer2_xop_head { 958 hammer2_xop_desc_t *desc; 959 hammer2_tid_t mtid; 960 struct hammer2_inode *ip1; 961 struct hammer2_inode *ip2; 962 struct hammer2_inode *ip3; 963 uint64_t run_mask; 964 uint64_t chk_mask; 965 int flags; 966 int state; 967 int error; 968 hammer2_key_t collect_key; 969 char *name1; 970 size_t name1_len; 971 char *name2; 972 size_t name2_len; 973 hammer2_xop_fifo_t collect[HAMMER2_MAXCLUSTER]; 974 hammer2_cluster_t cluster; /* help collections */ 975 hammer2_io_t *focus_dio; 976 }; 977 978 typedef struct hammer2_xop_head hammer2_xop_head_t; 979 980 struct hammer2_xop_ipcluster { 981 hammer2_xop_head_t head; 982 }; 983 984 struct hammer2_xop_strategy { 985 hammer2_xop_head_t head; 986 hammer2_key_t lbase; 987 int finished; 988 hammer2_mtx_t lock; 989 struct bio *bio; 990 }; 991 992 struct hammer2_xop_readdir { 993 hammer2_xop_head_t head; 994 hammer2_key_t lkey; 995 }; 996 997 struct hammer2_xop_nresolve { 998 hammer2_xop_head_t head; 999 hammer2_key_t lhc; /* if name is NULL used lhc */ 1000 }; 1001 1002 struct hammer2_xop_unlink { 1003 hammer2_xop_head_t head; 1004 int isdir; 1005 int dopermanent; 1006 }; 1007 1008 #define H2DOPERM_PERMANENT 0x01 1009 #define H2DOPERM_FORCE 0x02 1010 #define H2DOPERM_IGNINO 0x04 1011 1012 struct hammer2_xop_nrename { 1013 hammer2_xop_head_t head; 1014 hammer2_tid_t lhc; 1015 int ip_key; 1016 }; 1017 1018 struct hammer2_xop_scanlhc { 1019 hammer2_xop_head_t head; 1020 hammer2_key_t lhc; 1021 }; 1022 1023 struct hammer2_xop_scanall { 1024 hammer2_xop_head_t head; 1025 hammer2_key_t key_beg; /* inclusive */ 1026 hammer2_key_t key_end; /* inclusive */ 1027 int resolve_flags; 1028 int lookup_flags; 1029 }; 1030 1031 struct hammer2_xop_lookup { 1032 hammer2_xop_head_t head; 1033 hammer2_key_t lhc; 1034 }; 1035 1036 struct hammer2_xop_mkdirent { 1037 hammer2_xop_head_t head; 1038 hammer2_dirent_head_t dirent; 1039 hammer2_key_t lhc; 1040 }; 1041 1042 struct hammer2_xop_create { 1043 hammer2_xop_head_t head; 1044 hammer2_inode_meta_t meta; /* initial metadata */ 1045 hammer2_key_t lhc; 1046 int flags; 1047 }; 1048 1049 struct hammer2_xop_destroy { 1050 hammer2_xop_head_t head; 1051 }; 1052 1053 struct hammer2_xop_fsync { 1054 hammer2_xop_head_t head; 1055 hammer2_inode_meta_t meta; 1056 hammer2_off_t osize; 1057 u_int ipflags; 1058 int clear_directdata; 1059 }; 1060 1061 struct hammer2_xop_unlinkall { 1062 hammer2_xop_head_t head; 1063 hammer2_key_t key_beg; 1064 hammer2_key_t key_end; 1065 }; 1066 1067 struct hammer2_xop_connect { 1068 hammer2_xop_head_t head; 1069 hammer2_key_t lhc; 1070 }; 1071 1072 struct hammer2_xop_flush { 1073 hammer2_xop_head_t head; 1074 }; 1075 1076 typedef struct hammer2_xop_readdir hammer2_xop_readdir_t; 1077 typedef struct hammer2_xop_nresolve hammer2_xop_nresolve_t; 1078 typedef struct hammer2_xop_unlink hammer2_xop_unlink_t; 1079 typedef struct hammer2_xop_nrename hammer2_xop_nrename_t; 1080 typedef struct hammer2_xop_ipcluster hammer2_xop_ipcluster_t; 1081 typedef struct hammer2_xop_strategy hammer2_xop_strategy_t; 1082 typedef struct hammer2_xop_mkdirent hammer2_xop_mkdirent_t; 1083 typedef struct hammer2_xop_create hammer2_xop_create_t; 1084 typedef struct hammer2_xop_destroy hammer2_xop_destroy_t; 1085 typedef struct hammer2_xop_fsync hammer2_xop_fsync_t; 1086 typedef struct hammer2_xop_unlinkall hammer2_xop_unlinkall_t; 1087 typedef struct hammer2_xop_scanlhc hammer2_xop_scanlhc_t; 1088 typedef struct hammer2_xop_scanall hammer2_xop_scanall_t; 1089 typedef struct hammer2_xop_lookup hammer2_xop_lookup_t; 1090 typedef struct hammer2_xop_connect hammer2_xop_connect_t; 1091 typedef struct hammer2_xop_flush hammer2_xop_flush_t; 1092 1093 union hammer2_xop { 1094 hammer2_xop_head_t head; 1095 hammer2_xop_ipcluster_t xop_ipcluster; 1096 hammer2_xop_readdir_t xop_readdir; 1097 hammer2_xop_nresolve_t xop_nresolve; 1098 hammer2_xop_unlink_t xop_unlink; 1099 hammer2_xop_nrename_t xop_nrename; 1100 hammer2_xop_strategy_t xop_strategy; 1101 hammer2_xop_mkdirent_t xop_mkdirent; 1102 hammer2_xop_create_t xop_create; 1103 hammer2_xop_destroy_t xop_destroy; 1104 hammer2_xop_fsync_t xop_fsync; 1105 hammer2_xop_unlinkall_t xop_unlinkall; 1106 hammer2_xop_scanlhc_t xop_scanlhc; 1107 hammer2_xop_scanall_t xop_scanall; 1108 hammer2_xop_lookup_t xop_lookup; 1109 hammer2_xop_flush_t xop_flush; 1110 hammer2_xop_connect_t xop_connect; 1111 }; 1112 1113 typedef union hammer2_xop hammer2_xop_t; 1114 1115 /* 1116 * hammer2_xop_group - Manage XOP support threads. 1117 */ 1118 struct hammer2_xop_group { 1119 hammer2_thread_t thrs[HAMMER2_MAXCLUSTER]; 1120 }; 1121 1122 typedef struct hammer2_xop_group hammer2_xop_group_t; 1123 1124 /* 1125 * flags to hammer2_xop_collect() 1126 */ 1127 #define HAMMER2_XOP_COLLECT_NOWAIT 0x00000001 1128 #define HAMMER2_XOP_COLLECT_WAITALL 0x00000002 1129 1130 /* 1131 * flags to hammer2_xop_alloc() 1132 * 1133 * MODIFYING - This is a modifying transaction, allocate a mtid. 1134 * RECURSE - Recurse top-level inode (for root flushes) 1135 */ 1136 #define HAMMER2_XOP_MODIFYING 0x00000001 1137 #define HAMMER2_XOP_STRATEGY 0x00000002 1138 #define HAMMER2_XOP_INODE_STOP 0x00000004 1139 #define HAMMER2_XOP_VOLHDR 0x00000008 1140 #define HAMMER2_XOP_FSSYNC 0x00000010 1141 #define HAMMER2_XOP_IROOT 0x00000020 1142 1143 /* 1144 * Global (per partition) management structure, represents a hard block 1145 * device. Typically referenced by hammer2_chain structures when applicable. 1146 * Typically not used for network-managed elements. 1147 * 1148 * Note that a single hammer2_dev can be indirectly tied to multiple system 1149 * mount points. There is no direct relationship. System mounts are 1150 * per-cluster-id, not per-block-device, and a single hard mount might contain 1151 * many PFSs and those PFSs might combine together in various ways to form 1152 * the set of available clusters. 1153 */ 1154 struct hammer2_dev { 1155 struct vnode *devvp; /* device vnode */ 1156 int ronly; /* read-only mount */ 1157 int mount_count; /* number of actively mounted PFSs */ 1158 TAILQ_ENTRY(hammer2_dev) mntentry; /* hammer2_mntlist */ 1159 1160 struct malloc_type *mchain; 1161 int nipstacks; 1162 int maxipstacks; 1163 kdmsg_iocom_t iocom; /* volume-level dmsg interface */ 1164 struct spinlock io_spin; /* iotree, iolruq access */ 1165 struct hammer2_io_tree iotree; 1166 int iofree_count; 1167 int freemap_relaxed; 1168 hammer2_chain_t vchain; /* anchor chain (topology) */ 1169 hammer2_chain_t fchain; /* anchor chain (freemap) */ 1170 struct spinlock list_spin; 1171 struct hammer2_pfs *spmp; /* super-root pmp for transactions */ 1172 struct lock vollk; /* lockmgr lock */ 1173 struct lock bulklk; /* bulkfree operation lock */ 1174 struct lock bflock; /* bulk-free manual function lock */ 1175 hammer2_off_t heur_freemap[HAMMER2_FREEMAP_HEUR_SIZE]; 1176 hammer2_dedup_t heur_dedup[HAMMER2_DEDUP_HEUR_SIZE]; 1177 int volhdrno; /* last volhdrno written */ 1178 uint32_t hflags; /* HMNT2 flags applicable to device */ 1179 hammer2_off_t free_reserved; /* nominal free reserved */ 1180 hammer2_thread_t bfthr; /* bulk-free thread */ 1181 char devrepname[64]; /* for kprintf */ 1182 hammer2_ioc_bulkfree_t bflast; /* stats for last bulkfree run */ 1183 hammer2_volume_data_t voldata; 1184 hammer2_volume_data_t volsync; /* synchronized voldata */ 1185 }; 1186 1187 typedef struct hammer2_dev hammer2_dev_t; 1188 1189 /* 1190 * Helper functions (cluster must be locked for flags to be valid). 1191 */ 1192 static __inline 1193 int 1194 hammer2_chain_rdok(hammer2_chain_t *chain) 1195 { 1196 return (chain->error == 0); 1197 } 1198 1199 static __inline 1200 int 1201 hammer2_chain_wrok(hammer2_chain_t *chain) 1202 { 1203 return (chain->error == 0 && chain->hmp->ronly == 0); 1204 } 1205 1206 /* 1207 * Per-cluster management structure. This structure will be tied to a 1208 * system mount point if the system is mounting the PFS, but is also used 1209 * to manage clusters encountered during the super-root scan or received 1210 * via LNK_SPANs that might not be mounted. 1211 * 1212 * This structure is also used to represent the super-root that hangs off 1213 * of a hard mount point. The super-root is not really a cluster element. 1214 * In this case the spmp_hmp field will be non-NULL. It's just easier to do 1215 * this than to special case super-root manipulation in the hammer2_chain* 1216 * code as being only hammer2_dev-related. 1217 * 1218 * pfs_mode and pfs_nmasters are rollup fields which critically describes 1219 * how elements of the cluster act on the cluster. pfs_mode is only applicable 1220 * when a PFS is mounted by the system. pfs_nmasters is our best guess as to 1221 * how many masters have been configured for a cluster and is always 1222 * applicable. pfs_types[] is an array with 1:1 correspondance to the 1223 * iroot cluster and describes the PFS types of the nodes making up the 1224 * cluster. 1225 * 1226 * WARNING! Portions of this structure have deferred initialization. In 1227 * particular, if not mounted there will be no wthread. 1228 * umounted network PFSs will also be missing iroot and numerous 1229 * other fields will not be initialized prior to mount. 1230 * 1231 * Synchronization threads are chain-specific and only applicable 1232 * to local hard PFS entries. A hammer2_pfs structure may contain 1233 * more than one when multiple hard PFSs are present on the local 1234 * machine which require synchronization monitoring. Most PFSs 1235 * (such as snapshots) are 1xMASTER PFSs which do not need a 1236 * synchronization thread. 1237 * 1238 * WARNING! The chains making up pfs->iroot's cluster are accounted for in 1239 * hammer2_dev->mount_count when the pfs is associated with a mount 1240 * point. 1241 */ 1242 struct hammer2_pfs { 1243 struct mount *mp; 1244 TAILQ_ENTRY(hammer2_pfs) mntentry; /* hammer2_pfslist */ 1245 uuid_t pfs_clid; 1246 hammer2_dev_t *spmp_hmp; /* only if super-root pmp */ 1247 hammer2_dev_t *force_local; /* only if 'local' mount */ 1248 hammer2_inode_t *iroot; /* PFS root inode */ 1249 uint8_t pfs_types[HAMMER2_MAXCLUSTER]; 1250 char *pfs_names[HAMMER2_MAXCLUSTER]; 1251 hammer2_dev_t *pfs_hmps[HAMMER2_MAXCLUSTER]; 1252 hammer2_blockset_t pfs_iroot_blocksets[HAMMER2_MAXCLUSTER]; 1253 hammer2_trans_t trans; 1254 struct lock lock; /* PFS lock for certain ops */ 1255 struct lock lock_nlink; /* rename and nlink lock */ 1256 struct netexport export; /* nfs export */ 1257 int unused00; 1258 int ronly; /* read-only mount */ 1259 int hflags; /* pfs-specific mount flags */ 1260 struct malloc_type *minode; 1261 struct malloc_type *mmsg; 1262 struct spinlock inum_spin; /* inumber lookup */ 1263 struct hammer2_inode_tree inum_tree; /* (not applicable to spmp) */ 1264 long inum_count; /* #of inodes in inum_tree */ 1265 struct spinlock lru_spin; /* inumber lookup */ 1266 struct hammer2_chain_list lru_list; /* basis for LRU tests */ 1267 int lru_count; /* #of chains on LRU */ 1268 int flags; 1269 hammer2_tid_t modify_tid; /* modify transaction id */ 1270 hammer2_tid_t inode_tid; /* inode allocator */ 1271 uint8_t pfs_nmasters; /* total masters */ 1272 uint8_t pfs_mode; /* operating mode PFSMODE */ 1273 uint8_t unused01; 1274 uint8_t unused02; 1275 int free_ticks; /* free_* calculations */ 1276 long inmem_inodes; 1277 hammer2_off_t free_reserved; 1278 hammer2_off_t free_nominal; 1279 uint32_t inmem_dirty_chains; 1280 int count_lwinprog; /* logical write in prog */ 1281 struct spinlock list_spin; 1282 struct inoq_head syncq; /* SYNCQ flagged inodes */ 1283 struct depq_head depq; /* SIDEQ flagged inodes */ 1284 long sideq_count; /* total inodes on depq */ 1285 hammer2_thread_t sync_thrs[HAMMER2_MAXCLUSTER]; 1286 uint32_t cluster_flags; /* cached cluster flags */ 1287 int has_xop_threads; 1288 struct spinlock xop_spin; /* xop sequencer */ 1289 hammer2_xop_group_t xop_groups[HAMMER2_XOPGROUPS]; 1290 }; 1291 1292 typedef struct hammer2_pfs hammer2_pfs_t; 1293 1294 TAILQ_HEAD(hammer2_pfslist, hammer2_pfs); 1295 1296 #define HAMMER2_PMPF_SPMP 0x00000001 1297 1298 /* 1299 * NOTE: The LRU list contains at least all the chains with refs == 0 1300 * that can be recycled, and may contain additional chains which 1301 * cannot. 1302 */ 1303 #define HAMMER2_LRU_LIMIT 4096 1304 1305 #define HAMMER2_DIRTYCHAIN_WAITING 0x80000000 1306 #define HAMMER2_DIRTYCHAIN_MASK 0x7FFFFFFF 1307 1308 #define HAMMER2_LWINPROG_WAITING 0x80000000 1309 #define HAMMER2_LWINPROG_WAITING0 0x40000000 1310 #define HAMMER2_LWINPROG_MASK 0x3FFFFFFF 1311 1312 /* 1313 * hammer2_cluster_check 1314 */ 1315 #define HAMMER2_CHECK_NULL 0x00000001 1316 1317 /* 1318 * Misc 1319 */ 1320 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES) 1321 #define VTOI(vp) ((hammer2_inode_t *)(vp)->v_data) 1322 #endif 1323 1324 #if defined(_KERNEL) 1325 1326 MALLOC_DECLARE(M_HAMMER2); 1327 1328 #define ITOV(ip) ((ip)->vp) 1329 1330 /* 1331 * Currently locked chains retain the locked buffer cache buffer for 1332 * indirect blocks, and indirect blocks can be one of two sizes. The 1333 * device buffer has to match the case to avoid deadlocking recursive 1334 * chains that might otherwise try to access different offsets within 1335 * the same device buffer. 1336 */ 1337 static __inline 1338 int 1339 hammer2_devblkradix(int radix) 1340 { 1341 #if 0 1342 if (radix <= HAMMER2_LBUFRADIX) { 1343 return (HAMMER2_LBUFRADIX); 1344 } else { 1345 return (HAMMER2_PBUFRADIX); 1346 } 1347 #endif 1348 return (HAMMER2_PBUFRADIX); 1349 } 1350 1351 /* 1352 * XXX almost time to remove this. DIO uses PBUFSIZE exclusively now. 1353 */ 1354 static __inline 1355 size_t 1356 hammer2_devblksize(size_t bytes) 1357 { 1358 #if 0 1359 if (bytes <= HAMMER2_LBUFSIZE) { 1360 return(HAMMER2_LBUFSIZE); 1361 } else { 1362 KKASSERT(bytes <= HAMMER2_PBUFSIZE && 1363 (bytes ^ (bytes - 1)) == ((bytes << 1) - 1)); 1364 return (HAMMER2_PBUFSIZE); 1365 } 1366 #endif 1367 return (HAMMER2_PBUFSIZE); 1368 } 1369 1370 1371 static __inline 1372 hammer2_pfs_t * 1373 MPTOPMP(struct mount *mp) 1374 { 1375 return ((hammer2_pfs_t *)mp->mnt_data); 1376 } 1377 1378 #define HAMMER2_DEDUP_FRAG (HAMMER2_PBUFSIZE / 64) 1379 #define HAMMER2_DEDUP_FRAGRADIX (HAMMER2_PBUFRADIX - 6) 1380 1381 static __inline 1382 uint64_t 1383 hammer2_dedup_mask(hammer2_io_t *dio, hammer2_off_t data_off, u_int bytes) 1384 { 1385 int bbeg; 1386 int bits; 1387 uint64_t mask; 1388 1389 bbeg = (int)((data_off & ~HAMMER2_OFF_MASK_RADIX) - dio->pbase) >> 1390 HAMMER2_DEDUP_FRAGRADIX; 1391 bits = (int)((bytes + (HAMMER2_DEDUP_FRAG - 1)) >> 1392 HAMMER2_DEDUP_FRAGRADIX); 1393 mask = ((uint64_t)1 << bbeg) - 1; 1394 if (bbeg + bits == 64) 1395 mask = (uint64_t)-1; 1396 else 1397 mask = ((uint64_t)1 << (bbeg + bits)) - 1; 1398 1399 mask &= ~(((uint64_t)1 << bbeg) - 1); 1400 1401 return mask; 1402 } 1403 1404 static __inline 1405 int 1406 hammer2_error_to_errno(int error) 1407 { 1408 if (error) { 1409 if (error & HAMMER2_ERROR_EIO) 1410 error = EIO; 1411 else if (error & HAMMER2_ERROR_CHECK) 1412 error = EDOM; 1413 else if (error & HAMMER2_ERROR_ABORTED) 1414 error = EINTR; 1415 else if (error & HAMMER2_ERROR_BADBREF) 1416 error = EIO; 1417 else if (error & HAMMER2_ERROR_ENOSPC) 1418 error = ENOSPC; 1419 else if (error & HAMMER2_ERROR_ENOENT) 1420 error = ENOENT; 1421 else if (error & HAMMER2_ERROR_ENOTEMPTY) 1422 error = ENOTEMPTY; 1423 else if (error & HAMMER2_ERROR_EAGAIN) 1424 error = EAGAIN; 1425 else if (error & HAMMER2_ERROR_ENOTDIR) 1426 error = ENOTDIR; 1427 else if (error & HAMMER2_ERROR_EISDIR) 1428 error = EISDIR; 1429 else if (error & HAMMER2_ERROR_EINPROGRESS) 1430 error = EINPROGRESS; 1431 else if (error & HAMMER2_ERROR_EEXIST) 1432 error = EEXIST; 1433 else 1434 error = EDOM; 1435 } 1436 return error; 1437 } 1438 1439 static __inline 1440 int 1441 hammer2_errno_to_error(int error) 1442 { 1443 switch(error) { 1444 case 0: 1445 return 0; 1446 case EIO: 1447 return HAMMER2_ERROR_EIO; 1448 case EINVAL: 1449 default: 1450 return HAMMER2_ERROR_EINVAL; 1451 } 1452 } 1453 1454 1455 extern struct vop_ops hammer2_vnode_vops; 1456 extern struct vop_ops hammer2_spec_vops; 1457 extern struct vop_ops hammer2_fifo_vops; 1458 extern struct hammer2_pfslist hammer2_pfslist; 1459 extern struct lock hammer2_mntlk; 1460 1461 1462 extern int hammer2_debug; 1463 extern long hammer2_debug_inode; 1464 extern int hammer2_cluster_meta_read; 1465 extern int hammer2_cluster_data_read; 1466 extern int hammer2_cluster_write; 1467 extern int hammer2_dedup_enable; 1468 extern int hammer2_always_compress; 1469 extern int hammer2_inval_enable; 1470 extern int hammer2_flush_pipe; 1471 extern int hammer2_dio_count; 1472 extern int hammer2_dio_limit; 1473 extern int hammer2_bulkfree_tps; 1474 extern int hammer2_worker_rmask; 1475 extern long hammer2_chain_allocs; 1476 extern long hammer2_chain_frees; 1477 extern long hammer2_limit_dirty_chains; 1478 extern long hammer2_limit_dirty_inodes; 1479 extern long hammer2_count_modified_chains; 1480 extern long hammer2_iod_invals; 1481 extern long hammer2_iod_file_read; 1482 extern long hammer2_iod_meta_read; 1483 extern long hammer2_iod_indr_read; 1484 extern long hammer2_iod_fmap_read; 1485 extern long hammer2_iod_volu_read; 1486 extern long hammer2_iod_file_write; 1487 extern long hammer2_iod_file_wembed; 1488 extern long hammer2_iod_file_wzero; 1489 extern long hammer2_iod_file_wdedup; 1490 extern long hammer2_iod_meta_write; 1491 extern long hammer2_iod_indr_write; 1492 extern long hammer2_iod_fmap_write; 1493 extern long hammer2_iod_volu_write; 1494 1495 extern long hammer2_process_xxhash64; 1496 extern long hammer2_process_icrc32; 1497 1498 extern struct objcache *cache_buffer_read; 1499 extern struct objcache *cache_buffer_write; 1500 extern struct objcache *cache_xops; 1501 1502 /* 1503 * hammer2_subr.c 1504 */ 1505 #define hammer2_icrc32(buf, size) iscsi_crc32((buf), (size)) 1506 #define hammer2_icrc32c(buf, size, crc) iscsi_crc32_ext((buf), (size), (crc)) 1507 1508 int hammer2_signal_check(time_t *timep); 1509 const char *hammer2_error_str(int error); 1510 const char *hammer2_bref_type_str(hammer2_blockref_t *bref); 1511 1512 void hammer2_inode_delayed_sideq(hammer2_inode_t *ip); 1513 void hammer2_inode_lock(hammer2_inode_t *ip, int how); 1514 void hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2, 1515 hammer2_inode_t *ip3, hammer2_inode_t *ip4); 1516 void hammer2_inode_unlock(hammer2_inode_t *ip); 1517 void hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2); 1518 hammer2_chain_t *hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how); 1519 hammer2_chain_t *hammer2_inode_chain_and_parent(hammer2_inode_t *ip, 1520 int clindex, hammer2_chain_t **parentp, int how); 1521 hammer2_mtx_state_t hammer2_inode_lock_temp_release(hammer2_inode_t *ip); 1522 void hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, 1523 hammer2_mtx_state_t ostate); 1524 int hammer2_inode_lock_upgrade(hammer2_inode_t *ip); 1525 void hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int); 1526 1527 void hammer2_dev_exlock(hammer2_dev_t *hmp); 1528 void hammer2_dev_shlock(hammer2_dev_t *hmp); 1529 void hammer2_dev_unlock(hammer2_dev_t *hmp); 1530 1531 int hammer2_get_dtype(uint8_t type); 1532 int hammer2_get_vtype(uint8_t type); 1533 uint8_t hammer2_get_obj_type(enum vtype vtype); 1534 void hammer2_time_to_timespec(uint64_t xtime, struct timespec *ts); 1535 uint64_t hammer2_timespec_to_time(const struct timespec *ts); 1536 uint32_t hammer2_to_unix_xid(const uuid_t *uuid); 1537 void hammer2_guid_to_uuid(uuid_t *uuid, uint32_t guid); 1538 void hammer2_trans_manage_init(hammer2_pfs_t *pmp); 1539 1540 hammer2_key_t hammer2_dirhash(const unsigned char *name, size_t len); 1541 int hammer2_getradix(size_t bytes); 1542 1543 int hammer2_calc_logical(hammer2_inode_t *ip, hammer2_off_t uoff, 1544 hammer2_key_t *lbasep, hammer2_key_t *leofp); 1545 int hammer2_calc_physical(hammer2_inode_t *ip, hammer2_key_t lbase); 1546 void hammer2_update_time(uint64_t *timep); 1547 void hammer2_adjreadcounter(hammer2_blockref_t *bref, size_t bytes); 1548 1549 /* 1550 * hammer2_inode.c 1551 */ 1552 struct vnode *hammer2_igetv(hammer2_inode_t *ip, int *errorp); 1553 hammer2_inode_t *hammer2_inode_lookup(hammer2_pfs_t *pmp, 1554 hammer2_tid_t inum); 1555 hammer2_inode_t *hammer2_inode_get(hammer2_pfs_t *pmp, 1556 hammer2_xop_head_t *xop, hammer2_tid_t inum, int idx); 1557 void hammer2_inode_free(hammer2_inode_t *ip); 1558 void hammer2_inode_ref(hammer2_inode_t *ip); 1559 void hammer2_inode_drop(hammer2_inode_t *ip); 1560 void hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip, 1561 hammer2_cluster_t *cluster); 1562 void hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster, 1563 int idx); 1564 void hammer2_inode_modify(hammer2_inode_t *ip); 1565 void hammer2_inode_run_sideq(hammer2_pfs_t *pmp, int doall); 1566 1567 hammer2_inode_t *hammer2_inode_create_normal(hammer2_inode_t *pip, 1568 struct vattr *vap, struct ucred *cred, 1569 hammer2_key_t inum, int *errorp); 1570 hammer2_inode_t *hammer2_inode_create_pfs(hammer2_pfs_t *spmp, 1571 const uint8_t *name, size_t name_len, 1572 int *errorp); 1573 int hammer2_inode_chain_ins(hammer2_inode_t *ip); 1574 int hammer2_inode_chain_des(hammer2_inode_t *ip); 1575 int hammer2_inode_chain_sync(hammer2_inode_t *ip); 1576 int hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags); 1577 int hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen); 1578 int hammer2_dirent_create(hammer2_inode_t *dip, const char *name, 1579 size_t name_len, hammer2_key_t inum, uint8_t type); 1580 1581 /* 1582 * hammer2_chain.c 1583 */ 1584 void hammer2_voldata_lock(hammer2_dev_t *hmp); 1585 void hammer2_voldata_unlock(hammer2_dev_t *hmp); 1586 void hammer2_voldata_modify(hammer2_dev_t *hmp); 1587 hammer2_chain_t *hammer2_chain_alloc(hammer2_dev_t *hmp, 1588 hammer2_pfs_t *pmp, 1589 hammer2_blockref_t *bref); 1590 void hammer2_chain_core_init(hammer2_chain_t *chain); 1591 void hammer2_chain_ref(hammer2_chain_t *chain); 1592 void hammer2_chain_ref_hold(hammer2_chain_t *chain); 1593 void hammer2_chain_drop(hammer2_chain_t *chain); 1594 void hammer2_chain_drop_unhold(hammer2_chain_t *chain); 1595 void hammer2_chain_unhold(hammer2_chain_t *chain); 1596 void hammer2_chain_rehold(hammer2_chain_t *chain); 1597 int hammer2_chain_lock(hammer2_chain_t *chain, int how); 1598 void hammer2_chain_lock_unhold(hammer2_chain_t *chain, int how); 1599 void hammer2_chain_load_data(hammer2_chain_t *chain); 1600 const hammer2_media_data_t *hammer2_chain_rdata(hammer2_chain_t *chain); 1601 hammer2_media_data_t *hammer2_chain_wdata(hammer2_chain_t *chain); 1602 1603 int hammer2_chain_inode_find(hammer2_pfs_t *pmp, hammer2_key_t inum, 1604 int clindex, int flags, 1605 hammer2_chain_t **parentp, 1606 hammer2_chain_t **chainp); 1607 int hammer2_chain_modify(hammer2_chain_t *chain, hammer2_tid_t mtid, 1608 hammer2_off_t dedup_off, int flags); 1609 int hammer2_chain_modify_ip(hammer2_inode_t *ip, hammer2_chain_t *chain, 1610 hammer2_tid_t mtid, int flags); 1611 int hammer2_chain_resize(hammer2_chain_t *chain, 1612 hammer2_tid_t mtid, hammer2_off_t dedup_off, 1613 int nradix, int flags); 1614 void hammer2_chain_unlock(hammer2_chain_t *chain); 1615 void hammer2_chain_unlock_hold(hammer2_chain_t *chain); 1616 void hammer2_chain_wait(hammer2_chain_t *chain); 1617 hammer2_chain_t *hammer2_chain_get(hammer2_chain_t *parent, int generation, 1618 hammer2_blockref_t *bref, int how); 1619 hammer2_chain_t *hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags); 1620 void hammer2_chain_lookup_done(hammer2_chain_t *parent); 1621 hammer2_chain_t *hammer2_chain_getparent(hammer2_chain_t *chain, int flags); 1622 hammer2_chain_t *hammer2_chain_repparent(hammer2_chain_t **chainp, int flags); 1623 hammer2_chain_t *hammer2_chain_lookup(hammer2_chain_t **parentp, 1624 hammer2_key_t *key_nextp, 1625 hammer2_key_t key_beg, hammer2_key_t key_end, 1626 int *errorp, int flags); 1627 hammer2_chain_t *hammer2_chain_next(hammer2_chain_t **parentp, 1628 hammer2_chain_t *chain, 1629 hammer2_key_t *key_nextp, 1630 hammer2_key_t key_beg, hammer2_key_t key_end, 1631 int *errorp, int flags); 1632 int hammer2_chain_scan(hammer2_chain_t *parent, 1633 hammer2_chain_t **chainp, 1634 hammer2_blockref_t *bref, 1635 int *firstp, int flags); 1636 1637 int hammer2_chain_create(hammer2_chain_t **parentp, hammer2_chain_t **chainp, 1638 hammer2_dev_t *hmp, hammer2_pfs_t *pmp, 1639 int methods, hammer2_key_t key, int keybits, 1640 int type, size_t bytes, hammer2_tid_t mtid, 1641 hammer2_off_t dedup_off, int flags); 1642 void hammer2_chain_rename(hammer2_chain_t **parentp, 1643 hammer2_chain_t *chain, 1644 hammer2_tid_t mtid, int flags); 1645 int hammer2_chain_delete(hammer2_chain_t *parent, hammer2_chain_t *chain, 1646 hammer2_tid_t mtid, int flags); 1647 int hammer2_chain_indirect_maintenance(hammer2_chain_t *parent, 1648 hammer2_chain_t *chain); 1649 void hammer2_chain_setflush(hammer2_chain_t *chain); 1650 void hammer2_chain_countbrefs(hammer2_chain_t *chain, 1651 hammer2_blockref_t *base, int count); 1652 hammer2_chain_t *hammer2_chain_bulksnap(hammer2_dev_t *hmp); 1653 void hammer2_chain_bulkdrop(hammer2_chain_t *copy); 1654 1655 void hammer2_chain_setcheck(hammer2_chain_t *chain, void *bdata); 1656 int hammer2_chain_testcheck(hammer2_chain_t *chain, void *bdata); 1657 int hammer2_chain_dirent_test(hammer2_chain_t *chain, const char *name, 1658 size_t name_len); 1659 1660 void hammer2_pfs_memory_wait(hammer2_pfs_t *pmp); 1661 void hammer2_pfs_memory_inc(hammer2_pfs_t *pmp); 1662 void hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp); 1663 1664 void hammer2_base_delete(hammer2_chain_t *parent, 1665 hammer2_blockref_t *base, int count, 1666 hammer2_chain_t *chain, 1667 hammer2_blockref_t *obref); 1668 void hammer2_base_insert(hammer2_chain_t *parent, 1669 hammer2_blockref_t *base, int count, 1670 hammer2_chain_t *chain, 1671 hammer2_blockref_t *elm); 1672 1673 /* 1674 * hammer2_flush.c 1675 */ 1676 int hammer2_flush(hammer2_chain_t *chain, int istop); 1677 void hammer2_delayed_flush(hammer2_chain_t *chain); 1678 1679 /* 1680 * hammer2_trans.c 1681 */ 1682 void hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags); 1683 void hammer2_trans_setflags(hammer2_pfs_t *pmp, uint32_t flags); 1684 void hammer2_trans_clearflags(hammer2_pfs_t *pmp, uint32_t flags); 1685 hammer2_tid_t hammer2_trans_sub(hammer2_pfs_t *pmp); 1686 void hammer2_trans_done(hammer2_pfs_t *pmp, uint32_t flags); 1687 hammer2_tid_t hammer2_trans_newinum(hammer2_pfs_t *pmp); 1688 void hammer2_trans_assert_strategy(hammer2_pfs_t *pmp); 1689 void hammer2_dedup_record(hammer2_chain_t *chain, hammer2_io_t *dio, 1690 const char *data); 1691 1692 /* 1693 * hammer2_ioctl.c 1694 */ 1695 int hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data, 1696 int fflag, struct ucred *cred); 1697 1698 /* 1699 * hammer2_io.c 1700 */ 1701 void hammer2_io_inval(hammer2_io_t *dio, hammer2_off_t data_off, u_int bytes); 1702 void hammer2_io_cleanup(hammer2_dev_t *hmp, struct hammer2_io_tree *tree); 1703 char *hammer2_io_data(hammer2_io_t *dio, off_t lbase); 1704 void hammer2_io_bkvasync(hammer2_io_t *dio); 1705 void hammer2_io_dedup_set(hammer2_dev_t *hmp, hammer2_blockref_t *bref); 1706 void hammer2_io_dedup_delete(hammer2_dev_t *hmp, uint8_t btype, 1707 hammer2_off_t data_off, u_int bytes); 1708 void hammer2_io_dedup_assert(hammer2_dev_t *hmp, hammer2_off_t data_off, 1709 u_int bytes); 1710 void hammer2_io_callback(struct bio *bio); 1711 int hammer2_io_new(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize, 1712 hammer2_io_t **diop); 1713 int hammer2_io_newnz(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize, 1714 hammer2_io_t **diop); 1715 int _hammer2_io_bread(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize, 1716 hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS); 1717 void hammer2_io_setdirty(hammer2_io_t *dio); 1718 1719 hammer2_io_t *_hammer2_io_getblk(hammer2_dev_t *hmp, int btype, off_t lbase, 1720 int lsize, int op HAMMER2_IO_DEBUG_ARGS); 1721 hammer2_io_t *_hammer2_io_getquick(hammer2_dev_t *hmp, off_t lbase, 1722 int lsize HAMMER2_IO_DEBUG_ARGS); 1723 void _hammer2_io_putblk(hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS); 1724 int _hammer2_io_bwrite(hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS); 1725 void _hammer2_io_bawrite(hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS); 1726 void _hammer2_io_bdwrite(hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS); 1727 void _hammer2_io_brelse(hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS); 1728 void _hammer2_io_bqrelse(hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS); 1729 void _hammer2_io_ref(hammer2_io_t *dio HAMMER2_IO_DEBUG_ARGS); 1730 1731 #ifndef HAMMER2_IO_DEBUG 1732 1733 #define hammer2_io_getblk(hmp, btype, lbase, lsize, op) \ 1734 _hammer2_io_getblk((hmp), (btype), (lbase), (lsize), (op)) 1735 #define hammer2_io_getquick(hmp, lbase, lsize) \ 1736 _hammer2_io_getquick((hmp), (lbase), (lsize)) 1737 #define hammer2_io_putblk(diop) \ 1738 _hammer2_io_putblk(diop) 1739 #define hammer2_io_bwrite(diop) \ 1740 _hammer2_io_bwrite((diop)) 1741 #define hammer2_io_bawrite(diop) \ 1742 _hammer2_io_bawrite((diop)) 1743 #define hammer2_io_bdwrite(diop) \ 1744 _hammer2_io_bdwrite((diop)) 1745 #define hammer2_io_brelse(diop) \ 1746 _hammer2_io_brelse((diop)) 1747 #define hammer2_io_bqrelse(diop) \ 1748 _hammer2_io_bqrelse((diop)) 1749 #define hammer2_io_ref(dio) \ 1750 _hammer2_io_ref((dio)) 1751 1752 #define hammer2_io_bread(hmp, btype, lbase, lsize, diop) \ 1753 _hammer2_io_bread((hmp), (btype), (lbase), (lsize), (diop)) 1754 1755 #else 1756 1757 #define hammer2_io_getblk(hmp, btype, lbase, lsize, op) \ 1758 _hammer2_io_getblk((hmp), (btype), (lbase), (lsize), (op), \ 1759 __FILE__, __LINE__) 1760 1761 #define hammer2_io_getquick(hmp, lbase, lsize) \ 1762 _hammer2_io_getquick((hmp), (lbase), (lsize), __FILE__, __LINE__) 1763 1764 #define hammer2_io_putblk(diop) \ 1765 _hammer2_io_putblk(diop, __FILE__, __LINE__) 1766 1767 #define hammer2_io_bwrite(diop) \ 1768 _hammer2_io_bwrite((diop), __FILE__, __LINE__) 1769 #define hammer2_io_bawrite(diop) \ 1770 _hammer2_io_bawrite((diop), __FILE__, __LINE__) 1771 #define hammer2_io_bdwrite(diop) \ 1772 _hammer2_io_bdwrite((diop), __FILE__, __LINE__) 1773 #define hammer2_io_brelse(diop) \ 1774 _hammer2_io_brelse((diop), __FILE__, __LINE__) 1775 #define hammer2_io_bqrelse(diop) \ 1776 _hammer2_io_bqrelse((diop), __FILE__, __LINE__) 1777 #define hammer2_io_ref(dio) \ 1778 _hammer2_io_ref((dio), __FILE__, __LINE__) 1779 1780 #define hammer2_io_bread(hmp, btype, lbase, lsize, diop) \ 1781 _hammer2_io_bread((hmp), (btype), (lbase), (lsize), (diop), \ 1782 __FILE__, __LINE__) 1783 1784 #endif 1785 1786 /* 1787 * hammer2_thread.c 1788 */ 1789 void hammer2_thr_signal(hammer2_thread_t *thr, uint32_t flags); 1790 void hammer2_thr_signal2(hammer2_thread_t *thr, 1791 uint32_t pflags, uint32_t nflags); 1792 void hammer2_thr_wait(hammer2_thread_t *thr, uint32_t flags); 1793 void hammer2_thr_wait_neg(hammer2_thread_t *thr, uint32_t flags); 1794 int hammer2_thr_wait_any(hammer2_thread_t *thr, uint32_t flags, int timo); 1795 void hammer2_thr_create(hammer2_thread_t *thr, 1796 hammer2_pfs_t *pmp, hammer2_dev_t *hmp, 1797 const char *id, int clindex, int repidx, 1798 void (*func)(void *arg)); 1799 void hammer2_thr_delete(hammer2_thread_t *thr); 1800 void hammer2_thr_remaster(hammer2_thread_t *thr); 1801 void hammer2_thr_freeze_async(hammer2_thread_t *thr); 1802 void hammer2_thr_freeze(hammer2_thread_t *thr); 1803 void hammer2_thr_unfreeze(hammer2_thread_t *thr); 1804 int hammer2_thr_break(hammer2_thread_t *thr); 1805 void hammer2_primary_xops_thread(void *arg); 1806 1807 /* 1808 * hammer2_thread.c (XOP API) 1809 */ 1810 void hammer2_xop_group_init(hammer2_pfs_t *pmp, hammer2_xop_group_t *xgrp); 1811 void *hammer2_xop_alloc(hammer2_inode_t *ip, int flags); 1812 void hammer2_xop_setname(hammer2_xop_head_t *xop, 1813 const char *name, size_t name_len); 1814 void hammer2_xop_setname2(hammer2_xop_head_t *xop, 1815 const char *name, size_t name_len); 1816 size_t hammer2_xop_setname_inum(hammer2_xop_head_t *xop, hammer2_key_t inum); 1817 void hammer2_xop_setip2(hammer2_xop_head_t *xop, hammer2_inode_t *ip2); 1818 void hammer2_xop_setip3(hammer2_xop_head_t *xop, hammer2_inode_t *ip3); 1819 void hammer2_xop_reinit(hammer2_xop_head_t *xop); 1820 void hammer2_xop_helper_create(hammer2_pfs_t *pmp); 1821 void hammer2_xop_helper_cleanup(hammer2_pfs_t *pmp); 1822 void hammer2_xop_start(hammer2_xop_head_t *xop, hammer2_xop_desc_t *desc); 1823 void hammer2_xop_start_except(hammer2_xop_head_t *xop, hammer2_xop_desc_t *desc, 1824 int notidx); 1825 int hammer2_xop_collect(hammer2_xop_head_t *xop, int flags); 1826 void hammer2_xop_retire(hammer2_xop_head_t *xop, uint64_t mask); 1827 int hammer2_xop_active(hammer2_xop_head_t *xop); 1828 int hammer2_xop_feed(hammer2_xop_head_t *xop, hammer2_chain_t *chain, 1829 int clindex, int error); 1830 1831 /* 1832 * hammer2_synchro.c 1833 */ 1834 void hammer2_primary_sync_thread(void *arg); 1835 1836 /* 1837 * XOP backends in hammer2_xops.c, primarily for VNOPS. Other XOP backends 1838 * may be integrated into other source files. 1839 */ 1840 void hammer2_xop_ipcluster(hammer2_xop_t *xop, void *scratch, int clindex); 1841 void hammer2_xop_readdir(hammer2_xop_t *xop, void *scratch, int clindex); 1842 void hammer2_xop_nresolve(hammer2_xop_t *xop, void *scratch, int clindex); 1843 void hammer2_xop_unlink(hammer2_xop_t *xop, void *scratch, int clindex); 1844 void hammer2_xop_nrename(hammer2_xop_t *xop, void *scratch, int clindex); 1845 void hammer2_xop_scanlhc(hammer2_xop_t *xop, void *scratch, int clindex); 1846 void hammer2_xop_scanall(hammer2_xop_t *xop, void *scratch, int clindex); 1847 void hammer2_xop_lookup(hammer2_xop_t *xop, void *scratch, int clindex); 1848 void hammer2_xop_delete(hammer2_xop_t *xop, void *scratch, int clindex); 1849 void hammer2_xop_inode_mkdirent(hammer2_xop_t *xop, void *scratch, int clindex); 1850 void hammer2_xop_inode_create(hammer2_xop_t *xop, void *scratch, int clindex); 1851 void hammer2_xop_inode_create_det(hammer2_xop_t *xop, 1852 void *scratch, int clindex); 1853 void hammer2_xop_inode_create_ins(hammer2_xop_t *xop, 1854 void *scratch, int clindex); 1855 void hammer2_xop_inode_destroy(hammer2_xop_t *xop, void *scratch, int clindex); 1856 void hammer2_xop_inode_chain_sync(hammer2_xop_t *xop, void *scratch, 1857 int clindex); 1858 void hammer2_xop_inode_unlinkall(hammer2_xop_t *xop, void *scratch, 1859 int clindex); 1860 void hammer2_xop_inode_connect(hammer2_xop_t *xop, void *scratch, int clindex); 1861 void hammer2_xop_inode_flush(hammer2_xop_t *xop, void *scratch, int clindex); 1862 void hammer2_xop_strategy_read(hammer2_xop_t *xop, void *scratch, int clindex); 1863 void hammer2_xop_strategy_write(hammer2_xop_t *xop, void *scratch, int clindex); 1864 1865 void hammer2_dmsg_ipcluster(hammer2_xop_t *xop, void *scratch, int clindex); 1866 void hammer2_dmsg_readdir(hammer2_xop_t *xop, void *scratch, int clindex); 1867 void hammer2_dmsg_nresolve(hammer2_xop_t *xop, void *scratch, int clindex); 1868 void hammer2_dmsg_unlink(hammer2_xop_t *xop, void *scratch, int clindex); 1869 void hammer2_dmsg_nrename(hammer2_xop_t *xop, void *scratch, int clindex); 1870 void hammer2_dmsg_scanlhc(hammer2_xop_t *xop, void *scratch, int clindex); 1871 void hammer2_dmsg_scanall(hammer2_xop_t *xop, void *scratch, int clindex); 1872 void hammer2_dmsg_lookup(hammer2_xop_t *xop, void *scratch, int clindex); 1873 void hammer2_dmsg_inode_mkdirent(hammer2_xop_t *xop, void *scratch, 1874 int clindex); 1875 void hammer2_dmsg_inode_create(hammer2_xop_t *xop, void *scratch, int clindex); 1876 void hammer2_dmsg_inode_destroy(hammer2_xop_t *xop, void *scratch, int clindex); 1877 void hammer2_dmsg_inode_chain_sync(hammer2_xop_t *xop, void *scratch, 1878 int clindex); 1879 void hammer2_dmsg_inode_unlinkall(hammer2_xop_t *xop, void *scratch, 1880 int clindex); 1881 void hammer2_dmsg_inode_connect(hammer2_xop_t *xop, void *scratch, int clindex); 1882 void hammer2_dmsg_inode_flush(hammer2_xop_t *xop, void *scratch, int clindex); 1883 void hammer2_dmsg_strategy_read(hammer2_xop_t *xop, void *scratch, int clindex); 1884 void hammer2_dmsg_strategy_write(hammer2_xop_t *xop, void *scratch, 1885 int clindex); 1886 1887 void hammer2_rmsg_ipcluster(hammer2_xop_t *xop, void *scratch, int clindex); 1888 void hammer2_rmsg_readdir(hammer2_xop_t *xop, void *scratch, int clindex); 1889 void hammer2_rmsg_nresolve(hammer2_xop_t *xop, void *scratch, int clindex); 1890 void hammer2_rmsg_unlink(hammer2_xop_t *xop, void *scratch, int clindex); 1891 void hammer2_rmsg_nrename(hammer2_xop_t *xop, void *scratch, int clindex); 1892 void hammer2_rmsg_scanlhc(hammer2_xop_t *xop, void *scratch, int clindex); 1893 void hammer2_rmsg_scanall(hammer2_xop_t *xop, void *scratch, int clindex); 1894 void hammer2_rmsg_lookup(hammer2_xop_t *xop, void *scratch, int clindex); 1895 void hammer2_rmsg_inode_mkdirent(hammer2_xop_t *xop, void *scratch, 1896 int clindex); 1897 void hammer2_rmsg_inode_create(hammer2_xop_t *xop, void *scratch, int clindex); 1898 void hammer2_rmsg_inode_destroy(hammer2_xop_t *xop, void *scratch, int clindex); 1899 void hammer2_rmsg_inode_chain_sync(hammer2_xop_t *xop, void *scratch, 1900 int clindex); 1901 void hammer2_rmsg_inode_unlinkall(hammer2_xop_t *xop, void *scratch, 1902 int clindex); 1903 void hammer2_rmsg_inode_connect(hammer2_xop_t *xop, void *scratch, int clindex); 1904 void hammer2_rmsg_inode_flush(hammer2_xop_t *xop, void *scratch, int clindex); 1905 void hammer2_rmsg_strategy_read(hammer2_xop_t *xop, void *scratch, int clindex); 1906 void hammer2_rmsg_strategy_write(hammer2_xop_t *xop, void *scratch, 1907 int clindex); 1908 1909 extern hammer2_xop_desc_t hammer2_ipcluster_desc; 1910 extern hammer2_xop_desc_t hammer2_readdir_desc; 1911 extern hammer2_xop_desc_t hammer2_nresolve_desc; 1912 extern hammer2_xop_desc_t hammer2_unlink_desc; 1913 extern hammer2_xop_desc_t hammer2_nrename_desc; 1914 extern hammer2_xop_desc_t hammer2_scanlhc_desc; 1915 extern hammer2_xop_desc_t hammer2_scanall_desc; 1916 extern hammer2_xop_desc_t hammer2_lookup_desc; 1917 extern hammer2_xop_desc_t hammer2_delete_desc; 1918 extern hammer2_xop_desc_t hammer2_inode_mkdirent_desc; 1919 extern hammer2_xop_desc_t hammer2_inode_create_desc; 1920 extern hammer2_xop_desc_t hammer2_inode_create_det_desc; 1921 extern hammer2_xop_desc_t hammer2_inode_create_ins_desc; 1922 extern hammer2_xop_desc_t hammer2_inode_destroy_desc; 1923 extern hammer2_xop_desc_t hammer2_inode_chain_sync_desc; 1924 extern hammer2_xop_desc_t hammer2_inode_unlinkall_desc; 1925 extern hammer2_xop_desc_t hammer2_inode_connect_desc; 1926 extern hammer2_xop_desc_t hammer2_inode_flush_desc; 1927 extern hammer2_xop_desc_t hammer2_strategy_read_desc; 1928 extern hammer2_xop_desc_t hammer2_strategy_write_desc; 1929 1930 /* 1931 * hammer2_msgops.c 1932 */ 1933 int hammer2_msg_dbg_rcvmsg(kdmsg_msg_t *msg); 1934 int hammer2_msg_adhoc_input(kdmsg_msg_t *msg); 1935 1936 /* 1937 * hammer2_vfsops.c 1938 */ 1939 void hammer2_volconf_update(hammer2_dev_t *hmp, int index); 1940 void hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp, char pfx, 1941 u_int flags); 1942 int hammer2_vfs_sync(struct mount *mp, int waitflags); 1943 int hammer2_vfs_sync_pmp(hammer2_pfs_t *pmp, int waitfor); 1944 int hammer2_vfs_enospace(hammer2_inode_t *ip, off_t bytes, struct ucred *cred); 1945 1946 hammer2_pfs_t *hammer2_pfsalloc(hammer2_chain_t *chain, 1947 const hammer2_inode_data_t *ripdata, 1948 hammer2_tid_t modify_tid, 1949 hammer2_dev_t *force_local); 1950 void hammer2_pfsdealloc(hammer2_pfs_t *pmp, int clindex, int destroying); 1951 int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp, 1952 ino_t ino, struct vnode **vpp); 1953 1954 void hammer2_lwinprog_ref(hammer2_pfs_t *pmp); 1955 void hammer2_lwinprog_drop(hammer2_pfs_t *pmp); 1956 void hammer2_lwinprog_wait(hammer2_pfs_t *pmp, int pipe); 1957 1958 /* 1959 * hammer2_freemap.c 1960 */ 1961 int hammer2_freemap_alloc(hammer2_chain_t *chain, size_t bytes); 1962 void hammer2_freemap_adjust(hammer2_dev_t *hmp, 1963 hammer2_blockref_t *bref, int how); 1964 1965 /* 1966 * hammer2_cluster.c 1967 */ 1968 uint8_t hammer2_cluster_type(hammer2_cluster_t *cluster); 1969 void hammer2_cluster_bref(hammer2_cluster_t *cluster, hammer2_blockref_t *bref); 1970 hammer2_cluster_t *hammer2_cluster_alloc(hammer2_pfs_t *pmp, 1971 hammer2_blockref_t *bref); 1972 void hammer2_cluster_ref(hammer2_cluster_t *cluster); 1973 void hammer2_cluster_drop(hammer2_cluster_t *cluster); 1974 void hammer2_cluster_unhold(hammer2_cluster_t *cluster); 1975 void hammer2_cluster_rehold(hammer2_cluster_t *cluster); 1976 void hammer2_cluster_lock(hammer2_cluster_t *cluster, int how); 1977 int hammer2_cluster_check(hammer2_cluster_t *cluster, hammer2_key_t lokey, 1978 int flags); 1979 void hammer2_cluster_resolve(hammer2_cluster_t *cluster); 1980 void hammer2_cluster_forcegood(hammer2_cluster_t *cluster); 1981 void hammer2_cluster_unlock(hammer2_cluster_t *cluster); 1982 1983 void hammer2_bulkfree_init(hammer2_dev_t *hmp); 1984 void hammer2_bulkfree_uninit(hammer2_dev_t *hmp); 1985 int hammer2_bulkfree_pass(hammer2_dev_t *hmp, hammer2_chain_t *vchain, 1986 struct hammer2_ioc_bulkfree *bfi); 1987 void hammer2_dummy_xop_from_chain(hammer2_xop_head_t *xop, 1988 hammer2_chain_t *chain); 1989 1990 /* 1991 * hammer2_iocom.c 1992 */ 1993 void hammer2_iocom_init(hammer2_dev_t *hmp); 1994 void hammer2_iocom_uninit(hammer2_dev_t *hmp); 1995 void hammer2_cluster_reconnect(hammer2_dev_t *hmp, struct file *fp); 1996 1997 /* 1998 * hammer2_strategy.c 1999 */ 2000 int hammer2_vop_strategy(struct vop_strategy_args *ap); 2001 int hammer2_vop_bmap(struct vop_bmap_args *ap); 2002 void hammer2_write_thread(void *arg); 2003 void hammer2_bioq_sync(hammer2_pfs_t *pmp); 2004 void hammer2_dedup_clear(hammer2_dev_t *hmp); 2005 2006 /* 2007 * More complex inlines 2008 */ 2009 2010 #define hammer2_xop_gdata(xop) _hammer2_xop_gdata((xop), __FILE__, __LINE__) 2011 2012 static __inline 2013 const hammer2_media_data_t * 2014 _hammer2_xop_gdata(hammer2_xop_head_t *xop, const char *file, int line) 2015 { 2016 hammer2_chain_t *focus; 2017 const void *data; 2018 2019 focus = xop->cluster.focus; 2020 if (focus->dio) { 2021 lockmgr(&focus->diolk, LK_SHARED); 2022 if ((xop->focus_dio = focus->dio) != NULL) { 2023 _hammer2_io_ref(xop->focus_dio HAMMER2_IO_DEBUG_CALL); 2024 hammer2_io_bkvasync(xop->focus_dio); 2025 } 2026 data = focus->data; 2027 lockmgr(&focus->diolk, LK_RELEASE); 2028 } else { 2029 data = focus->data; 2030 } 2031 2032 return data; 2033 } 2034 2035 #define hammer2_xop_pdata(xop) _hammer2_xop_pdata((xop), __FILE__, __LINE__) 2036 2037 static __inline 2038 void 2039 _hammer2_xop_pdata(hammer2_xop_head_t *xop, const char *file, int line) 2040 { 2041 if (xop->focus_dio) 2042 _hammer2_io_putblk(&xop->focus_dio HAMMER2_IO_DEBUG_CALL); 2043 } 2044 2045 #endif /* !_KERNEL */ 2046 #endif /* !_VFS_HAMMER2_HAMMER2_H_ */ 2047