1 /* 2 * Copyright (c) 2011-2014 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 /* 37 * HAMMER2 IN-MEMORY CACHE OF MEDIA STRUCTURES 38 * 39 * This header file contains structures used internally by the HAMMER2 40 * implementation. See hammer2_disk.h for on-disk structures. 41 * 42 * There is an in-memory representation of all on-media data structure. 43 * Basically everything is represented by a hammer2_chain structure 44 * in-memory and other higher-level structures map to chains. 45 * 46 * A great deal of data is accessed simply via its buffer cache buffer, 47 * which is mapped for the duration of the chain's lock. However, because 48 * chains may represent blocks smaller than the 16KB minimum we impose 49 * on buffer cache buffers, we cannot hold related buffer cache buffers 50 * locked for smaller blocks. In these situations we kmalloc() a copy 51 * of the block. 52 * 53 * When modifications are made to a chain a new filesystem block must be 54 * allocated. Multiple modifications do not necessarily allocate new 55 * blocks. However, when a flush occurs a flush synchronization point 56 * is created and any new modifications made after this point will allocate 57 * a new block even if the chain is already in a modified state. 58 * 59 * The in-memory representation may remain cached (for example in order to 60 * placemark clustering locks) even after the related data has been 61 * detached. 62 * 63 * CORE SHARING 64 * 65 * In order to support concurrent flushes a flush synchronization point 66 * is created represented by a transaction id. Among other things, 67 * operations may move filesystem objects from one part of the topology 68 * to another (for example, if you rename a file or when indirect blocks 69 * are created or destroyed, and a few other things). When this occurs 70 * across a flush synchronization point the flusher needs to be able to 71 * recurse down BOTH the 'before' version of the topology and the 'after' 72 * version. 73 * 74 * To facilitate this modifications to chains do what is called a 75 * DELETE-DUPLICATE operation. Chains are not actually moved in-memory. 76 * Instead the chain we wish to move is deleted and a new chain is created 77 * at the target location in the topology. ANY SUBCHAINS PLACED UNDER THE 78 * CHAIN BEING MOVED HAVE TO EXIST IN BOTH PLACES. To make this work 79 * all sub-chains are managed by the hammer2_chain_core structure. This 80 * structure can be multi-homed, meaning that it can have more than one 81 * chain as its parent. When a chain is delete-duplicated the chain's core 82 * becomes shared under both the old and new chain. 83 * 84 * STALE CHAINS 85 * 86 * When a chain is delete-duplicated the old chain typically becomes stale. 87 * This is detected via the HAMMER2_CHAIN_DUPLICATED flag in chain->flags. 88 * To avoid executing live filesystem operations on stale chains, the inode 89 * locking code will follow stale chains via core->ownerq until it finds 90 * the live chain. The lock prevents ripups by other threads. Lookups 91 * must properly order locking operations to prevent other threads from 92 * racing the lookup operation and will also follow stale chains when 93 * required. 94 */ 95 96 #ifndef _VFS_HAMMER2_HAMMER2_H_ 97 #define _VFS_HAMMER2_HAMMER2_H_ 98 99 #include <sys/param.h> 100 #include <sys/types.h> 101 #include <sys/kernel.h> 102 #include <sys/conf.h> 103 #include <sys/systm.h> 104 #include <sys/tree.h> 105 #include <sys/malloc.h> 106 #include <sys/mount.h> 107 #include <sys/vnode.h> 108 #include <sys/proc.h> 109 #include <sys/mountctl.h> 110 #include <sys/priv.h> 111 #include <sys/stat.h> 112 #include <sys/thread.h> 113 #include <sys/globaldata.h> 114 #include <sys/lockf.h> 115 #include <sys/buf.h> 116 #include <sys/queue.h> 117 #include <sys/limits.h> 118 #include <sys/signal2.h> 119 #include <sys/dmsg.h> 120 #include <sys/mutex.h> 121 #include <sys/kern_syscall.h> 122 123 #include <sys/buf2.h> 124 #include <sys/mutex2.h> 125 126 #include "hammer2_disk.h" 127 #include "hammer2_mount.h" 128 #include "hammer2_ioctl.h" 129 #include "hammer2_ccms.h" 130 131 struct hammer2_chain; 132 struct hammer2_cluster; 133 struct hammer2_inode; 134 struct hammer2_mount; 135 struct hammer2_pfsmount; 136 struct hammer2_span; 137 struct hammer2_state; 138 struct hammer2_msg; 139 140 /* 141 * The xid tracks internal transactional updates. 142 * 143 * XXX fix-me, really needs to be 64-bits 144 */ 145 typedef uint32_t hammer2_xid_t; 146 147 #define HAMMER2_XID_MIN 0x00000000U 148 #define HAMMER2_XID_MAX 0x7FFFFFFFU 149 150 /* 151 * The chain structure tracks a portion of the media topology from the 152 * root (volume) down. Chains represent volumes, inodes, indirect blocks, 153 * data blocks, and freemap nodes and leafs. 154 * 155 * The chain structure can be multi-homed and its topological recursion 156 * (chain->core) can be shared amongst several chains. Chain structures 157 * are topologically stable once placed in the in-memory topology (they 158 * don't move around). Modifications which cross flush synchronization 159 * boundaries, renames, resizing, or any move of the chain to elsewhere 160 * in the topology is accomplished via the DELETE-DUPLICATE mechanism. 161 * 162 * Deletions and delete-duplicates: 163 * 164 * Any movement of chains within the topology utilize a delete-duplicate 165 * operation instead of a simple rename. That is, the chain must be 166 * deleted from its original location and then duplicated to the new 167 * location. A new chain structure is allocated while the old is 168 * deleted. Deleted chains are removed from the above chain_core's 169 * rbtree but remain linked via the shadow topology for flush 170 * synchronization purposes. 171 * 172 * delete_bmap is allocated and a bit set if the chain was originally 173 * loaded via the blockmap. 174 * 175 * Flush synchronization: 176 * 177 * Flushes must synchronize chains up through the root. To do this 178 * the in-memory topology would normally have to be frozen during the 179 * flush. To avoid freezing the topology and to allow concurrent 180 * foreground / flush activity, any new modifications made while a 181 * flush is in progress retains the original chain in a shadow topology 182 * that is only visible to the flush code. Only one flush can be 183 * running at a time so the shadow hierarchy can be implemented with 184 * just a few link fields in our in-memory data structures. 185 * 186 * Advantages: 187 * 188 * (1) Fully coherent snapshots can be taken without requiring 189 * a pre-flush, resulting in extremely fast (sub-millisecond) 190 * snapshots. 191 * 192 * (2) Multiple synchronization points can be in-flight at the same 193 * time, representing multiple snapshots or flushes. 194 * 195 * (3) The algorithms needed to keep track of everything are actually 196 * not that complex. 197 * 198 * Special Considerations: 199 * 200 * A chain is ref-counted on a per-chain basis, but the chain's lock 201 * is associated with the shared chain_core and is not per-chain. 202 * 203 * The power-of-2 nature of the media radix tree ensures that there 204 * will be no overlaps which straddle edges. 205 */ 206 RB_HEAD(hammer2_chain_tree, hammer2_chain); 207 TAILQ_HEAD(h2_flush_deferral_list, hammer2_chain); 208 TAILQ_HEAD(h2_core_list, hammer2_chain); 209 210 #define CHAIN_CORE_DELETE_BMAP_ENTRIES \ 211 (HAMMER2_PBUFSIZE / sizeof(hammer2_blockref_t) / sizeof(uint32_t)) 212 213 struct hammer2_chain_core { 214 int good; 215 struct ccms_cst cst; 216 struct h2_core_list ownerq; /* all chains sharing this core */ 217 struct hammer2_chain_tree rbtree; /* live chains */ 218 struct hammer2_chain_tree dbtree; /* bmapped deletions */ 219 struct h2_core_list dbq; /* other deletions */ 220 int live_zero; /* blockref array opt */ 221 u_int sharecnt; 222 u_int flags; 223 u_int live_count; /* live (not deleted) chains in tree */ 224 u_int chain_count; /* live + deleted chains under core */ 225 int generation; /* generation number (inserts only) */ 226 }; 227 228 typedef struct hammer2_chain_core hammer2_chain_core_t; 229 230 #define HAMMER2_CORE_UNUSED0001 0x0001 231 #define HAMMER2_CORE_COUNTEDBREFS 0x0002 232 233 /* 234 * H2 is a copy-on-write filesystem. In order to allow chains to allocate 235 * smaller blocks (down to 64-bytes), but improve performance and make 236 * clustered I/O possible using larger block sizes, the kernel buffer cache 237 * is abstracted via the hammer2_io structure. 238 */ 239 RB_HEAD(hammer2_io_tree, hammer2_io); 240 241 struct hammer2_io { 242 RB_ENTRY(hammer2_io) rbnode; /* indexed by device offset */ 243 struct spinlock spin; 244 struct hammer2_mount *hmp; 245 struct buf *bp; 246 struct bio *bio; 247 off_t pbase; 248 int psize; 249 void (*callback)(struct hammer2_io *dio, 250 struct hammer2_cluster *cluster, 251 struct hammer2_chain *chain, 252 void *arg1, off_t arg2); 253 struct hammer2_cluster *arg_l; /* INPROG I/O only */ 254 struct hammer2_chain *arg_c; /* INPROG I/O only */ 255 void *arg_p; /* INPROG I/O only */ 256 off_t arg_o; /* INPROG I/O only */ 257 int refs; 258 int act; /* activity */ 259 }; 260 261 typedef struct hammer2_io hammer2_io_t; 262 263 /* 264 * Primary chain structure keeps track of the topology in-memory. 265 */ 266 struct hammer2_chain { 267 TAILQ_ENTRY(hammer2_chain) core_entry; /* contemporary chains */ 268 RB_ENTRY(hammer2_chain) rbnode; /* live chain(s) */ 269 TAILQ_ENTRY(hammer2_chain) db_entry; /* non bmapped deletions */ 270 hammer2_blockref_t bref; 271 hammer2_chain_core_t *core; 272 hammer2_chain_core_t *above; 273 struct hammer2_state *state; /* if active cache msg */ 274 struct hammer2_mount *hmp; 275 struct hammer2_pfsmount *pmp; /* (pfs-cluster pmp or spmp) */ 276 277 hammer2_xid_t modify_xid; /* flush filter */ 278 hammer2_xid_t delete_xid; /* flush filter */ 279 hammer2_xid_t update_xlo; /* flush propagation */ 280 hammer2_xid_t update_xhi; /* setsubmod propagation */ 281 hammer2_key_t data_count; /* delta's to apply */ 282 hammer2_key_t inode_count; /* delta's to apply */ 283 hammer2_io_t *dio; /* physical data buffer */ 284 u_int bytes; /* physical data size */ 285 u_int flags; 286 u_int refs; 287 u_int lockcnt; 288 hammer2_media_data_t *data; /* data pointer shortcut */ 289 TAILQ_ENTRY(hammer2_chain) flush_node; /* flush deferral list */ 290 291 int inode_reason; 292 }; 293 294 typedef struct hammer2_chain hammer2_chain_t; 295 296 int hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2); 297 RB_PROTOTYPE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp); 298 299 /* 300 * Special notes on flags: 301 * 302 * INITIAL - This flag allows a chain to be created and for storage to 303 * be allocated without having to immediately instantiate the 304 * related buffer. The data is assumed to be all-zeros. It 305 * is primarily used for indirect blocks. 306 * 307 * MODIFIED- The chain's media data has been modified. 308 */ 309 #define HAMMER2_CHAIN_MODIFIED 0x00000001 /* dirty chain data */ 310 #define HAMMER2_CHAIN_ALLOCATED 0x00000002 /* kmalloc'd chain */ 311 #define HAMMER2_CHAIN_FLUSH_TEMPORARY 0x00000004 312 #define HAMMER2_CHAIN_FORCECOW 0x00000008 /* force copy-on-wr */ 313 #define HAMMER2_CHAIN_DELETED 0x00000010 /* deleted chain */ 314 #define HAMMER2_CHAIN_INITIAL 0x00000020 /* initial create */ 315 #define HAMMER2_CHAIN_FLUSH_CREATE 0x00000040 /* needs flush blkadd */ 316 #define HAMMER2_CHAIN_FLUSH_DELETE 0x00000080 /* needs flush blkdel */ 317 #define HAMMER2_CHAIN_IOFLUSH 0x00000100 /* bawrite on put */ 318 #define HAMMER2_CHAIN_DEFERRED 0x00000200 /* on a deferral list */ 319 #define HAMMER2_CHAIN_UNLINKED 0x00000400 /* delete on reclaim */ 320 #define HAMMER2_CHAIN_VOLUMESYNC 0x00000800 /* needs volume sync */ 321 #define HAMMER2_CHAIN_ONDBQ 0x00001000 /* !bmapped deletes */ 322 #define HAMMER2_CHAIN_MOUNTED 0x00002000 /* PFS is mounted */ 323 #define HAMMER2_CHAIN_ONRBTREE 0x00004000 /* on parent RB tree */ 324 #define HAMMER2_CHAIN_SNAPSHOT 0x00008000 /* snapshot special */ 325 #define HAMMER2_CHAIN_EMBEDDED 0x00010000 /* embedded data */ 326 #define HAMMER2_CHAIN_RELEASE 0x00020000 /* don't keep around */ 327 #define HAMMER2_CHAIN_BMAPPED 0x00040000 /* in parent blkmap */ 328 #define HAMMER2_CHAIN_ONDBTREE 0x00080000 /* bmapped deletes */ 329 #define HAMMER2_CHAIN_DUPLICATED 0x00100000 /* fwd delete-dup */ 330 #define HAMMER2_CHAIN_PFSROOT 0x00200000 /* in pfs->cluster */ 331 #define HAMMER2_CHAIN_PFSBOUNDARY 0x00400000 /* super->pfs inode */ 332 333 /* 334 * Flags passed to hammer2_chain_lookup() and hammer2_chain_next() 335 * 336 * NOTE: MATCHIND allows an indirect block / freemap node to be returned 337 * when the passed key range matches the radix. Remember that key_end 338 * is inclusive (e.g. {0x000,0xFFF}, not {0x000,0x1000}). 339 */ 340 #define HAMMER2_LOOKUP_NOLOCK 0x00000001 /* ref only */ 341 #define HAMMER2_LOOKUP_NODATA 0x00000002 /* data left NULL */ 342 #define HAMMER2_LOOKUP_SHARED 0x00000100 343 #define HAMMER2_LOOKUP_MATCHIND 0x00000200 /* return all chains */ 344 #define HAMMER2_LOOKUP_UNUSED0400 0x00000400 345 #define HAMMER2_LOOKUP_ALWAYS 0x00000800 /* resolve data */ 346 347 /* 348 * Flags passed to hammer2_chain_modify() and hammer2_chain_resize() 349 * 350 * NOTE: OPTDATA allows us to avoid instantiating buffers for INDIRECT 351 * blocks in the INITIAL-create state. 352 */ 353 #define HAMMER2_MODIFY_OPTDATA 0x00000002 /* data can be NULL */ 354 #define HAMMER2_MODIFY_NO_MODIFY_TID 0x00000004 355 #define HAMMER2_MODIFY_ASSERTNOCOPY 0x00000008 /* assert no del-dup */ 356 #define HAMMER2_MODIFY_NOREALLOC 0x00000010 357 #define HAMMER2_MODIFY_INPLACE 0x00000020 /* don't del-dup */ 358 359 /* 360 * Flags passed to hammer2_chain_lock() 361 */ 362 #define HAMMER2_RESOLVE_NEVER 1 363 #define HAMMER2_RESOLVE_MAYBE 2 364 #define HAMMER2_RESOLVE_ALWAYS 3 365 #define HAMMER2_RESOLVE_MASK 0x0F 366 367 #define HAMMER2_RESOLVE_SHARED 0x10 /* request shared lock */ 368 #define HAMMER2_RESOLVE_NOREF 0x20 /* already ref'd on lock */ 369 370 /* 371 * Flags passed to hammer2_chain_delete() 372 */ 373 #define HAMMER2_DELETE_UNUSED0001 0x0001 374 375 /* 376 * Flags passed to hammer2_chain_delete_duplicate() 377 */ 378 #define HAMMER2_DELDUP_RECORE 0x0001 379 380 /* 381 * Cluster different types of storage together for allocations 382 */ 383 #define HAMMER2_FREECACHE_INODE 0 384 #define HAMMER2_FREECACHE_INDIR 1 385 #define HAMMER2_FREECACHE_DATA 2 386 #define HAMMER2_FREECACHE_UNUSED3 3 387 #define HAMMER2_FREECACHE_TYPES 4 388 389 /* 390 * hammer2_freemap_alloc() block preference 391 */ 392 #define HAMMER2_OFF_NOPREF ((hammer2_off_t)-1) 393 394 /* 395 * BMAP read-ahead maximum parameters 396 */ 397 #define HAMMER2_BMAP_COUNT 16 /* max bmap read-ahead */ 398 #define HAMMER2_BMAP_BYTES (HAMMER2_PBUFSIZE * HAMMER2_BMAP_COUNT) 399 400 /* 401 * Misc 402 */ 403 #define HAMMER2_FLUSH_DEPTH_LIMIT 10 /* stack recursion limit */ 404 405 /* 406 * hammer2_freemap_adjust() 407 */ 408 #define HAMMER2_FREEMAP_DORECOVER 1 409 #define HAMMER2_FREEMAP_DOMAYFREE 2 410 #define HAMMER2_FREEMAP_DOREALFREE 3 411 412 /* 413 * HAMMER2 cluster - A set of chains representing the same entity. 414 * 415 * The hammer2_pfsmount structure embeds a hammer2_cluster. All other 416 * hammer2_cluster use cases use temporary allocations. 417 * 418 * The cluster API mimics the chain API. Except as used in the pfsmount, 419 * the cluster structure is a temporary 'working copy' of a set of chains 420 * representing targets compatible with the operation. However, for 421 * performance reasons the cluster API does not necessarily issue concurrent 422 * requests to the underlying chain API for all compatible chains all the 423 * time. This may sometimes necessitate revisiting parent cluster nodes 424 * to 'flesh out' (validate more chains). 425 * 426 * If an insufficient number of chains remain in a working copy, the operation 427 * may have to be downgraded, retried, or stall until the requisit number 428 * of chains are available. 429 */ 430 #define HAMMER2_MAXCLUSTER 8 431 432 struct hammer2_cluster { 433 int status; /* operational status */ 434 int refs; /* track for deallocation */ 435 struct hammer2_pfsmount *pmp; 436 uint32_t flags; 437 int nchains; 438 hammer2_chain_t *focus; /* current focus (or mod) */ 439 hammer2_chain_t *array[HAMMER2_MAXCLUSTER]; 440 char missed[HAMMER2_MAXCLUSTER]; 441 int cache_index[HAMMER2_MAXCLUSTER]; 442 }; 443 444 typedef struct hammer2_cluster hammer2_cluster_t; 445 446 #define HAMMER2_CLUSTER_INODE 0x00000001 /* embedded in inode */ 447 #define HAMMER2_CLUSTER_NOSYNC 0x00000002 /* not in sync (cumulative) */ 448 449 450 RB_HEAD(hammer2_inode_tree, hammer2_inode); 451 452 /* 453 * A hammer2 inode. 454 * 455 * NOTE: The inode's attribute CST which is also used to lock the inode 456 * is embedded in the chain (chain.cst) and aliased w/ attr_cst. 457 */ 458 struct hammer2_inode { 459 RB_ENTRY(hammer2_inode) rbnode; /* inumber lookup (HL) */ 460 ccms_cst_t topo_cst; /* directory topology cst */ 461 struct hammer2_pfsmount *pmp; /* PFS mount */ 462 struct hammer2_inode *pip; /* parent inode */ 463 struct vnode *vp; 464 hammer2_cluster_t cluster; 465 struct lockf advlock; 466 hammer2_tid_t inum; 467 u_int flags; 468 u_int refs; /* +vpref, +flushref */ 469 uint8_t comp_heuristic; 470 hammer2_off_t size; 471 uint64_t mtime; 472 }; 473 474 typedef struct hammer2_inode hammer2_inode_t; 475 476 #define HAMMER2_INODE_MODIFIED 0x0001 477 #define HAMMER2_INODE_SROOT 0x0002 /* kmalloc special case */ 478 #define HAMMER2_INODE_RENAME_INPROG 0x0004 479 #define HAMMER2_INODE_ONRBTREE 0x0008 480 #define HAMMER2_INODE_RESIZED 0x0010 481 #define HAMMER2_INODE_MTIME 0x0020 482 483 int hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2); 484 RB_PROTOTYPE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp, 485 hammer2_tid_t); 486 487 /* 488 * inode-unlink side-structure 489 */ 490 struct hammer2_inode_unlink { 491 TAILQ_ENTRY(hammer2_inode_unlink) entry; 492 hammer2_inode_t *ip; 493 }; 494 TAILQ_HEAD(hammer2_unlk_list, hammer2_inode_unlink); 495 496 typedef struct hammer2_inode_unlink hammer2_inode_unlink_t; 497 498 /* 499 * A hammer2 transaction and flush sequencing structure. 500 * 501 * This global structure is tied into hammer2_mount and is used 502 * to sequence modifying operations and flushes. 503 * 504 * (a) Any modifying operations with sync_tid >= flush_tid will stall until 505 * all modifying operating with sync_tid < flush_tid complete. 506 * 507 * The flush related to flush_tid stalls until all modifying operations 508 * with sync_tid < flush_tid complete. 509 * 510 * (b) Once unstalled, modifying operations with sync_tid > flush_tid are 511 * allowed to run. All modifications cause modify/duplicate operations 512 * to occur on the related chains. Note that most INDIRECT blocks will 513 * be unaffected because the modifications just overload the RBTREE 514 * structurally instead of actually modifying the indirect blocks. 515 * 516 * (c) The actual flush unstalls and RUNS CONCURRENTLY with (b), but only 517 * utilizes the chain structures with sync_tid <= flush_tid. The 518 * flush will modify related indirect blocks and inodes in-place 519 * (rather than duplicate) since the adjustments are compatible with 520 * (b)'s RBTREE overloading 521 * 522 * SPECIAL NOTE: Inode modifications have to also propagate along any 523 * modify/duplicate chains. File writes detect the flush 524 * and force out the conflicting buffer cache buffer(s) 525 * before reusing them. 526 * 527 * (d) Snapshots can be made instantly but must be flushed and disconnected 528 * from their duplicative source before they can be mounted. This is 529 * because while H2's on-media structure supports forks, its in-memory 530 * structure only supports very simple forking for background flushing 531 * purposes. 532 * 533 * TODO: Flush merging. When fsync() is called on multiple discrete files 534 * concurrently there is no reason to stall the second fsync. 535 * The final flush that reaches to root can cover both fsync()s. 536 * 537 * The chains typically terminate as they fly onto the disk. The flush 538 * ultimately reaches the volume header. 539 */ 540 struct hammer2_trans { 541 TAILQ_ENTRY(hammer2_trans) entry; 542 struct hammer2_pfsmount *pmp; 543 hammer2_xid_t sync_xid; 544 hammer2_tid_t inode_tid; /* inode number assignment */ 545 thread_t td; /* pointer */ 546 int flags; 547 int blocked; 548 uint8_t inodes_created; 549 uint8_t dummy[7]; 550 }; 551 552 typedef struct hammer2_trans hammer2_trans_t; 553 554 #define HAMMER2_TRANS_ISFLUSH 0x0001 /* formal flush */ 555 #define HAMMER2_TRANS_CONCURRENT 0x0002 /* concurrent w/flush */ 556 #define HAMMER2_TRANS_BUFCACHE 0x0004 /* from bioq strategy write */ 557 #define HAMMER2_TRANS_NEWINODE 0x0008 /* caller allocating inode */ 558 #define HAMMER2_TRANS_UNUSED0010 0x0010 559 #define HAMMER2_TRANS_PREFLUSH 0x0020 /* preflush state */ 560 561 #define HAMMER2_FREEMAP_HEUR_NRADIX 4 /* pwr 2 PBUFRADIX-MINIORADIX */ 562 #define HAMMER2_FREEMAP_HEUR_TYPES 8 563 #define HAMMER2_FREEMAP_HEUR (HAMMER2_FREEMAP_HEUR_NRADIX * \ 564 HAMMER2_FREEMAP_HEUR_TYPES) 565 566 #define HAMMER2_CLUSTER_COPY_NOCHAINS 0x0001 /* do not copy or ref chains */ 567 #define HAMMER2_CLUSTER_COPY_NOREF 0x0002 /* do not ref chains or cl */ 568 569 /* 570 * Transaction Rendezvous 571 */ 572 TAILQ_HEAD(hammer2_trans_queue, hammer2_trans); 573 574 struct hammer2_trans_manage { 575 hammer2_xid_t flush_xid; /* last flush transaction */ 576 hammer2_xid_t alloc_xid; 577 struct lock translk; /* lockmgr lock */ 578 struct hammer2_trans_queue transq; /* modifying transactions */ 579 int flushcnt; /* track flush trans */ 580 }; 581 582 typedef struct hammer2_trans_manage hammer2_trans_manage_t; 583 584 /* 585 * Global (per device) mount structure for device (aka vp->v_mount->hmp) 586 */ 587 struct hammer2_mount { 588 struct vnode *devvp; /* device vnode */ 589 int ronly; /* read-only mount */ 590 int pmp_count; /* PFS mounts backed by us */ 591 TAILQ_ENTRY(hammer2_mount) mntentry; /* hammer2_mntlist */ 592 593 struct malloc_type *mchain; 594 int nipstacks; 595 int maxipstacks; 596 struct spinlock io_spin; /* iotree access */ 597 struct hammer2_io_tree iotree; 598 int iofree_count; 599 hammer2_chain_t vchain; /* anchor chain (topology) */ 600 hammer2_chain_t fchain; /* anchor chain (freemap) */ 601 struct hammer2_pfsmount *spmp; /* super-root pmp for transactions */ 602 struct lock vollk; /* lockmgr lock */ 603 hammer2_off_t heur_freemap[HAMMER2_FREEMAP_HEUR]; 604 int volhdrno; /* last volhdrno written */ 605 hammer2_volume_data_t voldata; 606 hammer2_volume_data_t volsync; /* synchronized voldata */ 607 }; 608 609 typedef struct hammer2_mount hammer2_mount_t; 610 611 /* 612 * HAMMER2 PFS mount point structure (aka vp->v_mount->mnt_data). 613 * This has a 1:1 correspondence to struct mount (note that the 614 * hammer2_mount structure has a N:1 correspondence). 615 * 616 * This structure represents a cluster mount and not necessarily a 617 * PFS under a specific device mount (HMP). The distinction is important 618 * because the elements backing a cluster mount can change on the fly. 619 * 620 * Usually the first element under the cluster represents the original 621 * user-requested mount that bootstraps the whole mess. In significant 622 * setups the original is usually just a read-only media image (or 623 * representitive file) that simply contains a bootstrap volume header 624 * listing the configuration. 625 */ 626 struct hammer2_pfsmount { 627 struct mount *mp; 628 TAILQ_ENTRY(hammer2_pfsmount) mntentry; /* hammer2_pfslist */ 629 uuid_t pfs_clid; 630 uuid_t pfs_fsid; 631 hammer2_mount_t *spmp_hmp; /* (spmp only) */ 632 hammer2_inode_t *iroot; /* PFS root inode */ 633 hammer2_inode_t *ihidden; /* PFS hidden directory */ 634 struct lock lock; /* PFS lock for certain ops */ 635 hammer2_off_t inode_count; /* copy of inode_count */ 636 ccms_domain_t ccms_dom; 637 struct netexport export; /* nfs export */ 638 int ronly; /* read-only mount */ 639 struct malloc_type *minode; 640 struct malloc_type *mmsg; 641 kdmsg_iocom_t iocom; 642 struct spinlock inum_spin; /* inumber lookup */ 643 struct hammer2_inode_tree inum_tree; /* (not applicable to spmp) */ 644 hammer2_tid_t alloc_tid; 645 hammer2_tid_t flush_tid; 646 hammer2_tid_t inode_tid; 647 long inmem_inodes; 648 long inmem_dirty_chains; 649 int count_lwinprog; /* logical write in prog */ 650 struct spinlock unlinkq_spin; 651 struct hammer2_unlk_list unlinkq; 652 thread_t wthread_td; /* write thread td */ 653 struct bio_queue_head wthread_bioq; /* logical buffer bioq */ 654 struct mtx wthread_mtx; /* interlock */ 655 int wthread_destroy;/* termination sequencing */ 656 }; 657 658 typedef struct hammer2_pfsmount hammer2_pfsmount_t; 659 660 #define HAMMER2_DIRTYCHAIN_WAITING 0x80000000 661 #define HAMMER2_DIRTYCHAIN_MASK 0x7FFFFFFF 662 663 #define HAMMER2_LWINPROG_WAITING 0x80000000 664 #define HAMMER2_LWINPROG_MASK 0x7FFFFFFF 665 666 #if defined(_KERNEL) 667 668 MALLOC_DECLARE(M_HAMMER2); 669 670 #define VTOI(vp) ((hammer2_inode_t *)(vp)->v_data) 671 #define ITOV(ip) ((ip)->vp) 672 673 /* 674 * Currently locked chains retain the locked buffer cache buffer for 675 * indirect blocks, and indirect blocks can be one of two sizes. The 676 * device buffer has to match the case to avoid deadlocking recursive 677 * chains that might otherwise try to access different offsets within 678 * the same device buffer. 679 */ 680 static __inline 681 int 682 hammer2_devblkradix(int radix) 683 { 684 if (radix <= HAMMER2_LBUFRADIX) { 685 return (HAMMER2_LBUFRADIX); 686 } else { 687 return (HAMMER2_PBUFRADIX); 688 } 689 } 690 691 static __inline 692 size_t 693 hammer2_devblksize(size_t bytes) 694 { 695 if (bytes <= HAMMER2_LBUFSIZE) { 696 return(HAMMER2_LBUFSIZE); 697 } else { 698 KKASSERT(bytes <= HAMMER2_PBUFSIZE && 699 (bytes ^ (bytes - 1)) == ((bytes << 1) - 1)); 700 return (HAMMER2_PBUFSIZE); 701 } 702 } 703 704 705 static __inline 706 hammer2_pfsmount_t * 707 MPTOPMP(struct mount *mp) 708 { 709 return ((hammer2_pfsmount_t *)mp->mnt_data); 710 } 711 712 extern struct vop_ops hammer2_vnode_vops; 713 extern struct vop_ops hammer2_spec_vops; 714 extern struct vop_ops hammer2_fifo_vops; 715 716 extern int hammer2_debug; 717 extern int hammer2_cluster_enable; 718 extern int hammer2_hardlink_enable; 719 extern int hammer2_flush_pipe; 720 extern int hammer2_synchronous_flush; 721 extern int hammer2_dio_count; 722 extern long hammer2_limit_dirty_chains; 723 extern long hammer2_iod_file_read; 724 extern long hammer2_iod_meta_read; 725 extern long hammer2_iod_indr_read; 726 extern long hammer2_iod_fmap_read; 727 extern long hammer2_iod_volu_read; 728 extern long hammer2_iod_file_write; 729 extern long hammer2_iod_meta_write; 730 extern long hammer2_iod_indr_write; 731 extern long hammer2_iod_fmap_write; 732 extern long hammer2_iod_volu_write; 733 extern long hammer2_ioa_file_read; 734 extern long hammer2_ioa_meta_read; 735 extern long hammer2_ioa_indr_read; 736 extern long hammer2_ioa_fmap_read; 737 extern long hammer2_ioa_volu_read; 738 extern long hammer2_ioa_file_write; 739 extern long hammer2_ioa_meta_write; 740 extern long hammer2_ioa_indr_write; 741 extern long hammer2_ioa_fmap_write; 742 extern long hammer2_ioa_volu_write; 743 744 extern struct objcache *cache_buffer_read; 745 extern struct objcache *cache_buffer_write; 746 747 extern int destroy; 748 extern int write_thread_wakeup; 749 750 extern mtx_t thread_protect; 751 752 /* 753 * hammer2_subr.c 754 */ 755 #define hammer2_icrc32(buf, size) iscsi_crc32((buf), (size)) 756 #define hammer2_icrc32c(buf, size, crc) iscsi_crc32_ext((buf), (size), (crc)) 757 758 hammer2_cluster_t *hammer2_inode_lock_ex(hammer2_inode_t *ip); 759 hammer2_cluster_t *hammer2_inode_lock_sh(hammer2_inode_t *ip); 760 void hammer2_inode_unlock_ex(hammer2_inode_t *ip, hammer2_cluster_t *chain); 761 void hammer2_inode_unlock_sh(hammer2_inode_t *ip, hammer2_cluster_t *chain); 762 ccms_state_t hammer2_inode_lock_temp_release(hammer2_inode_t *ip); 763 void hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, ccms_state_t ostate); 764 ccms_state_t hammer2_inode_lock_upgrade(hammer2_inode_t *ip); 765 void hammer2_inode_lock_downgrade(hammer2_inode_t *ip, ccms_state_t ostate); 766 767 void hammer2_mount_exlock(hammer2_mount_t *hmp); 768 void hammer2_mount_shlock(hammer2_mount_t *hmp); 769 void hammer2_mount_unlock(hammer2_mount_t *hmp); 770 771 int hammer2_get_dtype(const hammer2_inode_data_t *ipdata); 772 int hammer2_get_vtype(const hammer2_inode_data_t *ipdata); 773 u_int8_t hammer2_get_obj_type(enum vtype vtype); 774 void hammer2_time_to_timespec(u_int64_t xtime, struct timespec *ts); 775 u_int64_t hammer2_timespec_to_time(const struct timespec *ts); 776 u_int32_t hammer2_to_unix_xid(const uuid_t *uuid); 777 void hammer2_guid_to_uuid(uuid_t *uuid, u_int32_t guid); 778 hammer2_xid_t hammer2_trans_newxid(hammer2_pfsmount_t *pmp); 779 void hammer2_trans_manage_init(void); 780 781 hammer2_key_t hammer2_dirhash(const unsigned char *name, size_t len); 782 int hammer2_getradix(size_t bytes); 783 784 int hammer2_calc_logical(hammer2_inode_t *ip, hammer2_off_t uoff, 785 hammer2_key_t *lbasep, hammer2_key_t *leofp); 786 int hammer2_calc_physical(hammer2_inode_t *ip, 787 const hammer2_inode_data_t *ipdata, 788 hammer2_key_t lbase); 789 void hammer2_update_time(uint64_t *timep); 790 void hammer2_adjreadcounter(hammer2_blockref_t *bref, size_t bytes); 791 792 /* 793 * hammer2_inode.c 794 */ 795 struct vnode *hammer2_igetv(hammer2_inode_t *ip, hammer2_cluster_t *cparent, 796 int *errorp); 797 void hammer2_inode_lock_nlinks(hammer2_inode_t *ip); 798 void hammer2_inode_unlock_nlinks(hammer2_inode_t *ip); 799 hammer2_inode_t *hammer2_inode_lookup(hammer2_pfsmount_t *pmp, 800 hammer2_tid_t inum); 801 hammer2_inode_t *hammer2_inode_get(hammer2_pfsmount_t *pmp, 802 hammer2_inode_t *dip, hammer2_cluster_t *cluster); 803 void hammer2_inode_free(hammer2_inode_t *ip); 804 void hammer2_inode_ref(hammer2_inode_t *ip); 805 void hammer2_inode_drop(hammer2_inode_t *ip); 806 void hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip, 807 hammer2_cluster_t *cluster); 808 void hammer2_run_unlinkq(hammer2_trans_t *trans, hammer2_pfsmount_t *pmp); 809 810 hammer2_inode_t *hammer2_inode_create(hammer2_trans_t *trans, 811 hammer2_inode_t *dip, 812 struct vattr *vap, struct ucred *cred, 813 const uint8_t *name, size_t name_len, 814 hammer2_cluster_t **clusterp, int *errorp); 815 int hammer2_inode_connect(hammer2_trans_t *trans, 816 hammer2_cluster_t **clusterp, int hlink, 817 hammer2_inode_t *dip, hammer2_cluster_t *dcluster, 818 const uint8_t *name, size_t name_len, 819 hammer2_key_t key); 820 hammer2_inode_t *hammer2_inode_common_parent(hammer2_inode_t *fdip, 821 hammer2_inode_t *tdip); 822 void hammer2_inode_fsync(hammer2_trans_t *trans, hammer2_inode_t *ip, 823 hammer2_cluster_t *cparent); 824 int hammer2_unlink_file(hammer2_trans_t *trans, hammer2_inode_t *dip, 825 const uint8_t *name, size_t name_len, int isdir, 826 int *hlinkp, struct nchandle *nch); 827 int hammer2_hardlink_consolidate(hammer2_trans_t *trans, 828 hammer2_inode_t *ip, hammer2_cluster_t **clusterp, 829 hammer2_inode_t *cdip, hammer2_cluster_t *cdcluster, 830 int nlinks); 831 int hammer2_hardlink_deconsolidate(hammer2_trans_t *trans, hammer2_inode_t *dip, 832 hammer2_chain_t **chainp, hammer2_chain_t **ochainp); 833 int hammer2_hardlink_find(hammer2_inode_t *dip, hammer2_cluster_t *cluster); 834 void hammer2_inode_install_hidden(hammer2_pfsmount_t *pmp); 835 836 /* 837 * hammer2_chain.c 838 */ 839 void hammer2_voldata_lock(hammer2_mount_t *hmp); 840 void hammer2_voldata_unlock(hammer2_mount_t *hmp); 841 void hammer2_voldata_modify(hammer2_mount_t *hmp); 842 hammer2_chain_t *hammer2_chain_alloc(hammer2_mount_t *hmp, 843 hammer2_pfsmount_t *pmp, 844 hammer2_trans_t *trans, 845 hammer2_blockref_t *bref); 846 void hammer2_chain_core_alloc(hammer2_trans_t *trans, hammer2_chain_t *nchain, 847 hammer2_chain_t *ochain); 848 void hammer2_chain_ref(hammer2_chain_t *chain); 849 void hammer2_chain_drop(hammer2_chain_t *chain); 850 int hammer2_chain_lock(hammer2_chain_t *chain, int how); 851 void hammer2_chain_load_async(hammer2_cluster_t *cluster, 852 void (*func)(hammer2_io_t *dio, 853 hammer2_cluster_t *cluster, 854 hammer2_chain_t *chain, 855 void *arg_p, off_t arg_o), 856 void *arg_p); 857 void hammer2_chain_moved(hammer2_chain_t *chain); 858 void hammer2_chain_modify(hammer2_trans_t *trans, 859 hammer2_chain_t **chainp, int flags); 860 void hammer2_chain_resize(hammer2_trans_t *trans, hammer2_inode_t *ip, 861 hammer2_chain_t *parent, 862 hammer2_chain_t **chainp, 863 int nradix, int flags); 864 void hammer2_chain_unlock(hammer2_chain_t *chain); 865 void hammer2_chain_wait(hammer2_chain_t *chain); 866 hammer2_chain_t *hammer2_chain_get(hammer2_chain_t *parent, int generation, 867 hammer2_blockref_t *bref); 868 hammer2_chain_t *hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags); 869 void hammer2_chain_lookup_done(hammer2_chain_t *parent); 870 hammer2_chain_t *hammer2_chain_lookup(hammer2_chain_t **parentp, 871 hammer2_key_t *key_nextp, 872 hammer2_key_t key_beg, hammer2_key_t key_end, 873 int *cache_indexp, int flags, int *ddflagp); 874 hammer2_chain_t *hammer2_chain_next(hammer2_chain_t **parentp, 875 hammer2_chain_t *chain, 876 hammer2_key_t *key_nextp, 877 hammer2_key_t key_beg, hammer2_key_t key_end, 878 int *cache_indexp, int flags); 879 hammer2_chain_t *hammer2_chain_scan(hammer2_chain_t *parent, 880 hammer2_chain_t *chain, 881 int *cache_indexp, int flags); 882 883 int hammer2_chain_create(hammer2_trans_t *trans, hammer2_chain_t **parentp, 884 hammer2_chain_t **chainp, 885 hammer2_pfsmount_t *pmp, 886 hammer2_key_t key, int keybits, 887 int type, size_t bytes); 888 void hammer2_chain_duplicate(hammer2_trans_t *trans, hammer2_chain_t **parentp, 889 hammer2_chain_t **chainp, 890 hammer2_blockref_t *bref, int snapshot, 891 int duplicate_reason); 892 int hammer2_chain_snapshot(hammer2_trans_t *trans, hammer2_chain_t **chainp, 893 hammer2_ioc_pfs_t *pfs); 894 void hammer2_chain_delete(hammer2_trans_t *trans, hammer2_chain_t *chain, 895 int flags); 896 void hammer2_chain_delete_duplicate(hammer2_trans_t *trans, 897 hammer2_chain_t **chainp, int flags); 898 void hammer2_flush(hammer2_trans_t *trans, hammer2_chain_t **chainp); 899 void hammer2_chain_commit(hammer2_trans_t *trans, hammer2_chain_t *chain); 900 void hammer2_chain_setsubmod(hammer2_trans_t *trans, hammer2_chain_t *chain); 901 void hammer2_chain_countbrefs(hammer2_chain_t *chain, 902 hammer2_blockref_t *base, int count); 903 904 void hammer2_pfs_memory_wait(hammer2_pfsmount_t *pmp); 905 void hammer2_pfs_memory_inc(hammer2_pfsmount_t *pmp); 906 void hammer2_pfs_memory_wakeup(hammer2_pfsmount_t *pmp); 907 908 int hammer2_base_find(hammer2_chain_t *chain, 909 hammer2_blockref_t *base, int count, 910 int *cache_indexp, hammer2_key_t *key_nextp, 911 hammer2_key_t key_beg, hammer2_key_t key_end, 912 int delete_filter); 913 void hammer2_base_delete(hammer2_trans_t *trans, hammer2_chain_t *chain, 914 hammer2_blockref_t *base, int count, 915 int *cache_indexp, hammer2_chain_t *child); 916 void hammer2_base_insert(hammer2_trans_t *trans, hammer2_chain_t *chain, 917 hammer2_blockref_t *base, int count, 918 int *cache_indexp, hammer2_chain_t *child); 919 void hammer2_chain_refactor(hammer2_chain_t **chainp); 920 921 /* 922 * hammer2_trans.c 923 */ 924 void hammer2_trans_init(hammer2_trans_t *trans, hammer2_pfsmount_t *pmp, 925 int flags); 926 void hammer2_trans_spmp(hammer2_trans_t *trans, hammer2_pfsmount_t *pmp); 927 void hammer2_trans_done(hammer2_trans_t *trans); 928 929 /* 930 * hammer2_ioctl.c 931 */ 932 int hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data, 933 int fflag, struct ucred *cred); 934 935 /* 936 * hammer2_io.c 937 */ 938 hammer2_io_t *hammer2_io_getblk(hammer2_mount_t *hmp, off_t lbase, 939 int lsize, int *ownerp); 940 void hammer2_io_putblk(hammer2_io_t **diop); 941 void hammer2_io_cleanup(hammer2_mount_t *hmp, struct hammer2_io_tree *tree); 942 char *hammer2_io_data(hammer2_io_t *dio, off_t lbase); 943 int hammer2_io_new(hammer2_mount_t *hmp, off_t lbase, int lsize, 944 hammer2_io_t **diop); 945 int hammer2_io_newnz(hammer2_mount_t *hmp, off_t lbase, int lsize, 946 hammer2_io_t **diop); 947 int hammer2_io_newq(hammer2_mount_t *hmp, off_t lbase, int lsize, 948 hammer2_io_t **diop); 949 int hammer2_io_bread(hammer2_mount_t *hmp, off_t lbase, int lsize, 950 hammer2_io_t **diop); 951 void hammer2_io_breadcb(hammer2_mount_t *hmp, off_t lbase, int lsize, 952 void (*callback)(hammer2_io_t *dio, 953 hammer2_cluster_t *arg_l, 954 hammer2_chain_t *arg_c, 955 void *arg_p, off_t arg_o), 956 hammer2_cluster_t *arg_l, 957 hammer2_chain_t *arg_c, 958 void *arg_p, off_t arg_o); 959 void hammer2_io_bawrite(hammer2_io_t **diop); 960 void hammer2_io_bdwrite(hammer2_io_t **diop); 961 int hammer2_io_bwrite(hammer2_io_t **diop); 962 int hammer2_io_isdirty(hammer2_io_t *dio); 963 void hammer2_io_setdirty(hammer2_io_t *dio); 964 void hammer2_io_setinval(hammer2_io_t *dio, u_int bytes); 965 void hammer2_io_brelse(hammer2_io_t **diop); 966 void hammer2_io_bqrelse(hammer2_io_t **diop); 967 968 /* 969 * hammer2_msgops.c 970 */ 971 int hammer2_msg_dbg_rcvmsg(kdmsg_msg_t *msg); 972 int hammer2_msg_adhoc_input(kdmsg_msg_t *msg); 973 974 /* 975 * hammer2_vfsops.c 976 */ 977 void hammer2_clusterctl_wakeup(kdmsg_iocom_t *iocom); 978 void hammer2_volconf_update(hammer2_pfsmount_t *pmp, int index); 979 void hammer2_cluster_reconnect(hammer2_pfsmount_t *pmp, struct file *fp); 980 void hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp, char pfx); 981 void hammer2_bioq_sync(hammer2_pfsmount_t *pmp); 982 int hammer2_vfs_sync(struct mount *mp, int waitflags); 983 void hammer2_lwinprog_ref(hammer2_pfsmount_t *pmp); 984 void hammer2_lwinprog_drop(hammer2_pfsmount_t *pmp); 985 void hammer2_lwinprog_wait(hammer2_pfsmount_t *pmp); 986 987 /* 988 * hammer2_freemap.c 989 */ 990 int hammer2_freemap_alloc(hammer2_trans_t *trans, hammer2_chain_t *chain, 991 size_t bytes); 992 void hammer2_freemap_adjust(hammer2_trans_t *trans, hammer2_mount_t *hmp, 993 hammer2_blockref_t *bref, int how); 994 995 /* 996 * hammer2_cluster.c 997 */ 998 u_int hammer2_cluster_bytes(hammer2_cluster_t *cluster); 999 uint8_t hammer2_cluster_type(hammer2_cluster_t *cluster); 1000 const hammer2_media_data_t *hammer2_cluster_data(hammer2_cluster_t *cluster); 1001 hammer2_media_data_t *hammer2_cluster_wdata(hammer2_cluster_t *cluster); 1002 hammer2_cluster_t *hammer2_cluster_from_chain(hammer2_chain_t *chain); 1003 int hammer2_cluster_modified(hammer2_cluster_t *cluster); 1004 int hammer2_cluster_unlinked(hammer2_cluster_t *cluster); 1005 int hammer2_cluster_duplicated(hammer2_cluster_t *cluster); 1006 void hammer2_cluster_set_chainflags(hammer2_cluster_t *cluster, uint32_t flags); 1007 void hammer2_cluster_bref(hammer2_cluster_t *cluster, hammer2_blockref_t *bref); 1008 void hammer2_cluster_setsubmod(hammer2_trans_t *trans, 1009 hammer2_cluster_t *cluster); 1010 hammer2_cluster_t *hammer2_cluster_alloc(hammer2_pfsmount_t *pmp, 1011 hammer2_trans_t *trans, 1012 hammer2_blockref_t *bref); 1013 void hammer2_cluster_core_alloc(hammer2_trans_t *trans, 1014 hammer2_cluster_t *ncluster, 1015 hammer2_cluster_t *ocluster); 1016 void hammer2_cluster_ref(hammer2_cluster_t *cluster); 1017 void hammer2_cluster_drop(hammer2_cluster_t *cluster); 1018 void hammer2_cluster_wait(hammer2_cluster_t *cluster); 1019 int hammer2_cluster_lock(hammer2_cluster_t *cluster, int how); 1020 void hammer2_cluster_replace(hammer2_cluster_t *dst, hammer2_cluster_t *src); 1021 void hammer2_cluster_replace_locked(hammer2_cluster_t *dst, 1022 hammer2_cluster_t *src); 1023 hammer2_cluster_t *hammer2_cluster_copy(hammer2_cluster_t *ocluster, 1024 int with_chains); 1025 void hammer2_cluster_refactor(hammer2_cluster_t *cluster); 1026 void hammer2_cluster_unlock(hammer2_cluster_t *cluster); 1027 void hammer2_cluster_resize(hammer2_trans_t *trans, hammer2_inode_t *ip, 1028 hammer2_cluster_t *cparent, hammer2_cluster_t *cluster, 1029 int nradix, int flags); 1030 hammer2_inode_data_t *hammer2_cluster_modify_ip(hammer2_trans_t *trans, 1031 hammer2_inode_t *ip, hammer2_cluster_t *cluster, 1032 int flags); 1033 void hammer2_cluster_modify(hammer2_trans_t *trans, hammer2_cluster_t *cluster, 1034 int flags); 1035 void hammer2_cluster_modsync(hammer2_cluster_t *cluster); 1036 hammer2_cluster_t *hammer2_cluster_lookup_init(hammer2_cluster_t *cparent, 1037 int flags); 1038 void hammer2_cluster_lookup_done(hammer2_cluster_t *cparent); 1039 hammer2_cluster_t *hammer2_cluster_lookup(hammer2_cluster_t *cparent, 1040 hammer2_key_t *key_nextp, 1041 hammer2_key_t key_beg, hammer2_key_t key_end, 1042 int flags, int *ddflagp); 1043 hammer2_cluster_t *hammer2_cluster_next(hammer2_cluster_t *cparent, 1044 hammer2_cluster_t *cluster, 1045 hammer2_key_t *key_nextp, 1046 hammer2_key_t key_beg, hammer2_key_t key_end, 1047 int flags); 1048 hammer2_cluster_t *hammer2_cluster_scan(hammer2_cluster_t *cparent, 1049 hammer2_cluster_t *cluster, int flags); 1050 int hammer2_cluster_create(hammer2_trans_t *trans, hammer2_cluster_t *cparent, 1051 hammer2_cluster_t **clusterp, 1052 hammer2_key_t key, int keybits, int type, size_t bytes); 1053 void hammer2_cluster_duplicate(hammer2_trans_t *trans, 1054 hammer2_cluster_t *cparent, hammer2_cluster_t *cluster, 1055 hammer2_blockref_t *bref, 1056 int snapshot, int duplicate_reason); 1057 void hammer2_cluster_delete_duplicate(hammer2_trans_t *trans, 1058 hammer2_cluster_t *cluster, int flags); 1059 void hammer2_cluster_delete(hammer2_trans_t *trans, hammer2_cluster_t *cluster, 1060 int flags); 1061 int hammer2_cluster_snapshot(hammer2_trans_t *trans, 1062 hammer2_cluster_t *ocluster, hammer2_ioc_pfs_t *pfs); 1063 1064 #endif /* !_KERNEL */ 1065 #endif /* !_VFS_HAMMER2_HAMMER2_H_ */ 1066