1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #ifndef VFS_HAMMER_HAMMER_H_ 36 #define VFS_HAMMER_HAMMER_H_ 37 38 /* 39 * This header file contains structures used internally by the HAMMERFS 40 * implementation. See hammer_disk.h for on-disk structures. 41 */ 42 43 #include <sys/param.h> 44 #ifdef _KERNEL 45 #include <sys/kernel.h> 46 #include <sys/systm.h> 47 #endif 48 #include <sys/conf.h> 49 #include <sys/tree.h> 50 #include <sys/malloc.h> 51 #include <sys/mount.h> 52 #include <sys/vnode.h> 53 #include <sys/proc.h> 54 #include <sys/priv.h> 55 #include <sys/dirent.h> 56 #include <sys/stat.h> 57 #include <sys/fcntl.h> 58 #include <sys/lockf.h> 59 #include <sys/file.h> 60 #include <sys/event.h> 61 #include <sys/buf.h> 62 #include <sys/queue.h> 63 #include <sys/ktr.h> 64 #include <sys/limits.h> 65 #include <sys/sysctl.h> 66 #include <vm/swap_pager.h> 67 #include <vm/vm_extern.h> 68 69 #include "hammer_disk.h" 70 #include "hammer_mount.h" 71 #include "hammer_ioctl.h" 72 73 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES) 74 75 MALLOC_DECLARE(M_HAMMER); 76 77 /* 78 * Kernel trace 79 */ 80 #if !defined(KTR_HAMMER) 81 #define KTR_HAMMER KTR_ALL 82 #endif 83 /* KTR_INFO_MASTER_EXTERN(hammer); */ 84 85 /* 86 * Misc structures 87 */ 88 struct hammer_mount; 89 struct hammer_inode; 90 struct hammer_volume; 91 struct hammer_buffer; 92 struct hammer_node; 93 struct hammer_undo; 94 struct hammer_reserve; 95 struct hammer_io; 96 97 /* 98 * Key structure used for custom RB tree inode lookups. This prototypes 99 * the function hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info). 100 */ 101 typedef struct hammer_inode_info { 102 int64_t obj_id; /* (key) object identifier */ 103 hammer_tid_t obj_asof; /* (key) snapshot transid or 0 */ 104 uint32_t obj_localization; /* (key) pseudo-fs id for upper 16 bits */ 105 union { 106 struct hammer_btree_leaf_elm *leaf; 107 } u; 108 } *hammer_inode_info_t; 109 110 typedef enum hammer_transaction_type { 111 HAMMER_TRANS_RO, 112 HAMMER_TRANS_STD, 113 HAMMER_TRANS_FLS 114 } hammer_transaction_type_t; 115 116 /* 117 * HAMMER Transaction tracking 118 */ 119 struct hammer_transaction { 120 hammer_transaction_type_t type; 121 struct hammer_mount *hmp; 122 hammer_tid_t tid; 123 uint64_t time; 124 uint32_t time32; 125 int sync_lock_refs; 126 int flags; 127 struct hammer_volume *rootvol; 128 }; 129 130 typedef struct hammer_transaction *hammer_transaction_t; 131 132 #define HAMMER_TRANSF_NEWINODE 0x0001 133 #define HAMMER_TRANSF_CRCDOM 0x0004 /* EDOM on CRC error, less critical */ 134 135 /* 136 * HAMMER locks 137 */ 138 struct hammer_lock { 139 volatile u_int refs; /* active references */ 140 volatile u_int lockval; /* lock count and control bits */ 141 struct thread *lowner; /* owner if exclusively held */ 142 struct thread *rowner; /* owner if exclusively held */ 143 }; 144 145 #define HAMMER_REFS_LOCKED 0x40000000 /* transition check */ 146 #define HAMMER_REFS_WANTED 0x20000000 /* transition check */ 147 #define HAMMER_REFS_CHECK 0x10000000 /* transition check */ 148 149 #define HAMMER_REFS_FLAGS (HAMMER_REFS_LOCKED | \ 150 HAMMER_REFS_WANTED | \ 151 HAMMER_REFS_CHECK) 152 153 #define HAMMER_LOCKF_EXCLUSIVE 0x40000000 154 #define HAMMER_LOCKF_WANTED 0x20000000 155 156 static __inline int 157 hammer_notlocked(struct hammer_lock *lock) 158 { 159 return(lock->lockval == 0); 160 } 161 162 static __inline int 163 hammer_islocked(struct hammer_lock *lock) 164 { 165 return(lock->lockval != 0); 166 } 167 168 /* 169 * Returns the number of refs on the object. 170 */ 171 static __inline int 172 hammer_isactive(struct hammer_lock *lock) 173 { 174 return(lock->refs & ~HAMMER_REFS_FLAGS); 175 } 176 177 static __inline int 178 hammer_oneref(struct hammer_lock *lock) 179 { 180 return((lock->refs & ~HAMMER_REFS_FLAGS) == 1); 181 } 182 183 static __inline int 184 hammer_norefs(struct hammer_lock *lock) 185 { 186 return((lock->refs & ~HAMMER_REFS_FLAGS) == 0); 187 } 188 189 static __inline int 190 hammer_norefsorlock(struct hammer_lock *lock) 191 { 192 return(lock->refs == 0); 193 } 194 195 static __inline int 196 hammer_refsorlock(struct hammer_lock *lock) 197 { 198 return(lock->refs != 0); 199 } 200 201 /* 202 * Return if we specifically own the lock exclusively. 203 */ 204 static __inline int 205 hammer_lock_excl_owned(struct hammer_lock *lock, thread_t td) 206 { 207 if ((lock->lockval & HAMMER_LOCKF_EXCLUSIVE) && 208 lock->lowner == td) { 209 return(1); 210 } 211 return(0); 212 } 213 214 /* 215 * Flush state, used by various structures 216 */ 217 typedef enum hammer_inode_state { 218 HAMMER_FST_IDLE, 219 HAMMER_FST_SETUP, 220 HAMMER_FST_FLUSH 221 } hammer_inode_state_t; 222 223 /* 224 * Pseudo-filesystem extended data tracking 225 */ 226 struct hammer_pseudofs_inmem; 227 RB_HEAD(hammer_pfs_rb_tree, hammer_pseudofs_inmem); 228 RB_PROTOTYPE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node, 229 hammer_pfs_rb_compare, uint32_t); 230 231 struct hammer_pseudofs_inmem { 232 RB_ENTRY(hammer_pseudofs_inmem) rb_node; 233 struct hammer_lock lock; 234 uint32_t localization; 235 hammer_tid_t create_tid; 236 int flags; 237 udev_t fsid_udev; 238 struct hammer_pseudofs_data pfsd; 239 }; 240 241 typedef struct hammer_pseudofs_inmem *hammer_pseudofs_inmem_t; 242 243 /* 244 * Cache object ids. A fixed number of objid cache structures are 245 * created to reserve object id's for newly created files in multiples 246 * of 100,000, localized to a particular directory, and recycled as 247 * needed. This allows parallel create operations in different 248 * directories to retain fairly localized object ids which in turn 249 * improves reblocking performance and layout. 250 */ 251 #define OBJID_CACHE_SIZE 2048 252 #define OBJID_CACHE_BULK_BITS 10 /* 10 bits (1024) */ 253 #define OBJID_CACHE_BULK (32 * 32) /* two level (1024) */ 254 #define OBJID_CACHE_BULK_MASK (OBJID_CACHE_BULK - 1) 255 #define OBJID_CACHE_BULK_MASK64 ((uint64_t)(OBJID_CACHE_BULK - 1)) 256 257 typedef struct hammer_objid_cache { 258 TAILQ_ENTRY(hammer_objid_cache) entry; 259 struct hammer_inode *dip; 260 hammer_tid_t base_tid; 261 int count; 262 uint32_t bm0; 263 uint32_t bm1[32]; 264 } *hammer_objid_cache_t; 265 266 /* 267 * Associate an inode with a B-Tree node to cache search start positions 268 */ 269 typedef struct hammer_node_cache { 270 TAILQ_ENTRY(hammer_node_cache) entry; 271 struct hammer_node *node; 272 struct hammer_inode *ip; 273 } *hammer_node_cache_t; 274 275 TAILQ_HEAD(hammer_node_cache_list, hammer_node_cache); 276 277 /* 278 * Live dedup cache 279 */ 280 struct hammer_dedup_cache; 281 RB_HEAD(hammer_dedup_crc_rb_tree, hammer_dedup_cache); 282 RB_PROTOTYPE2(hammer_dedup_crc_rb_tree, hammer_dedup_cache, crc_entry, 283 hammer_dedup_crc_rb_compare, hammer_crc_t); 284 285 RB_HEAD(hammer_dedup_off_rb_tree, hammer_dedup_cache); 286 RB_PROTOTYPE2(hammer_dedup_off_rb_tree, hammer_dedup_cache, off_entry, 287 hammer_dedup_off_rb_compare, hammer_off_t); 288 289 #define DEDUP_CACHE_SIZE 4096 /* XXX make it a dynamic tunable */ 290 291 typedef struct hammer_dedup_cache { 292 RB_ENTRY(hammer_dedup_cache) crc_entry; 293 RB_ENTRY(hammer_dedup_cache) off_entry; 294 TAILQ_ENTRY(hammer_dedup_cache) lru_entry; 295 struct hammer_mount *hmp; 296 int64_t obj_id; 297 uint32_t localization; 298 off_t file_offset; 299 int bytes; 300 hammer_off_t data_offset; 301 hammer_crc_t crc; 302 } *hammer_dedup_cache_t; 303 304 /* 305 * Structure used to organize flush groups. Flush groups must be 306 * organized into chunks in order to avoid blowing out the UNDO FIFO. 307 * Without this a 'sync' could end up flushing 50,000 inodes in a single 308 * transaction. 309 */ 310 RB_HEAD(hammer_fls_rb_tree, hammer_inode); 311 RB_PROTOTYPE(hammer_fls_rb_tree, hammer_inode, rb_flsnode, 312 hammer_ino_rb_compare); 313 314 struct hammer_flush_group { 315 TAILQ_ENTRY(hammer_flush_group) flush_entry; 316 struct hammer_fls_rb_tree flush_tree; 317 int seq; /* our seq no */ 318 int total_count; /* record load */ 319 int running; /* group is running */ 320 int closed; 321 int refs; 322 }; 323 324 typedef struct hammer_flush_group *hammer_flush_group_t; 325 326 TAILQ_HEAD(hammer_flush_group_list, hammer_flush_group); 327 328 /* 329 * Structure used to represent an inode in-memory. 330 * 331 * The record and data associated with an inode may be out of sync with 332 * the disk (xDIRTY flags), or not even on the disk at all (ONDISK flag 333 * clear). 334 * 335 * An inode may also hold a cache of unsynchronized records, used for 336 * database and directories only. Unsynchronized regular file data is 337 * stored in the buffer cache. 338 * 339 * NOTE: A file which is created and destroyed within the initial 340 * synchronization period can wind up not doing any disk I/O at all. 341 * 342 * Finally, an inode may cache numerous disk-referencing B-Tree cursors. 343 */ 344 RB_HEAD(hammer_ino_rb_tree, hammer_inode); 345 RB_PROTOTYPEX(hammer_ino_rb_tree, INFO, hammer_inode, rb_node, 346 hammer_ino_rb_compare, hammer_inode_info_t); 347 348 RB_HEAD(hammer_redo_rb_tree, hammer_inode); 349 RB_PROTOTYPE2(hammer_redo_rb_tree, hammer_inode, rb_redonode, 350 hammer_redo_rb_compare, hammer_off_t); 351 352 struct hammer_record; 353 RB_HEAD(hammer_rec_rb_tree, hammer_record); 354 RB_PROTOTYPEX(hammer_rec_rb_tree, INFO, hammer_record, rb_node, 355 hammer_rec_rb_compare, hammer_btree_leaf_elm_t); 356 357 TAILQ_HEAD(hammer_record_list, hammer_record); 358 TAILQ_HEAD(hammer_node_list, hammer_node); 359 360 struct hammer_inode { 361 RB_ENTRY(hammer_inode) rb_node; 362 hammer_inode_state_t flush_state; 363 hammer_flush_group_t flush_group; 364 RB_ENTRY(hammer_inode) rb_flsnode; /* when on flush list */ 365 RB_ENTRY(hammer_inode) rb_redonode; /* when INODE_RDIRTY is set */ 366 struct hammer_record_list target_list; /* target of dependant recs */ 367 int64_t obj_id; /* (key) object identifier */ 368 hammer_tid_t obj_asof; /* (key) snapshot or 0 */ 369 uint32_t obj_localization; /* (key) pseudo-fs id for upper 16 bits */ 370 struct hammer_mount *hmp; 371 hammer_objid_cache_t objid_cache; 372 int flags; 373 int error; /* flush error */ 374 int cursor_ip_refs; /* sanity */ 375 #if 0 376 int cursor_exclreq_count; 377 #endif 378 int rsv_recs; 379 struct vnode *vp; 380 hammer_pseudofs_inmem_t pfsm; 381 struct lockf advlock; 382 struct hammer_lock lock; /* sync copy interlock */ 383 off_t trunc_off; 384 struct hammer_btree_leaf_elm ino_leaf; /* in-memory cache */ 385 struct hammer_inode_data ino_data; /* in-memory cache */ 386 struct hammer_rec_rb_tree rec_tree; /* in-memory cache */ 387 int rec_generation; 388 389 /* 390 * search initiate cache 391 * cache[0] - this inode 392 * cache[1] - related data, the content depends on situations 393 * cache[2] - for dip to cache ip to shortcut B-Tree search 394 * cache[3] - related data copied from dip to a new ip's cache[1] 395 */ 396 struct hammer_node_cache cache[4]; 397 398 /* 399 * When a demark is created to synchronize an inode to 400 * disk, certain fields are copied so the front-end VOPs 401 * can continue to run in parallel with the synchronization 402 * occuring in the background. 403 */ 404 int sync_flags; /* to-sync flags cache */ 405 off_t sync_trunc_off; /* to-sync truncation */ 406 off_t save_trunc_off; /* write optimization */ 407 struct hammer_btree_leaf_elm sync_ino_leaf; /* to-sync cache */ 408 struct hammer_inode_data sync_ino_data; /* to-sync cache */ 409 size_t redo_count; 410 411 /* 412 * Track the earliest offset in the UNDO/REDO FIFO containing 413 * REDO records. This is staged to the backend during flush 414 * sequences. While the inode is staged redo_fifo_next is used 415 * to track the earliest offset for rotation into redo_fifo_start 416 * on completion of the flush. 417 */ 418 hammer_off_t redo_fifo_start; 419 hammer_off_t redo_fifo_next; 420 }; 421 422 typedef struct hammer_inode *hammer_inode_t; 423 424 #define VTOI(vp) ((struct hammer_inode *)(vp)->v_data) 425 426 /* 427 * NOTE: DDIRTY does not include atime or mtime and does not include 428 * write-append size changes. SDIRTY handles write-append size 429 * changes. 430 * 431 * REDO indicates that REDO logging is active, creating a definitive 432 * stream of REDO records in the UNDO/REDO log for writes and 433 * truncations, including boundary records when/if REDO is turned off. 434 * REDO is typically enabled by fsync() and turned off if excessive 435 * writes without an fsync() occurs. 436 * 437 * RDIRTY indicates that REDO records were laid down in the UNDO/REDO 438 * FIFO (even if REDO is turned off some might still be active) and 439 * still being tracked for this inode. See hammer_redo.c 440 */ 441 #define HAMMER_INODE_DDIRTY 0x0001 /* in-memory ino_data is dirty */ 442 /* (not including atime/mtime) */ 443 #define HAMMER_INODE_RSV_INODES 0x0002 /* hmp->rsv_inodes bumped */ 444 #define HAMMER_INODE_CONN_DOWN 0x0004 /* include in downward recursion */ 445 #define HAMMER_INODE_XDIRTY 0x0008 /* in-memory records */ 446 #define HAMMER_INODE_ONDISK 0x0010 /* inode is on-disk (else not yet) */ 447 #define HAMMER_INODE_FLUSH 0x0020 /* flush on last ref */ 448 #define HAMMER_INODE_DELETED 0x0080 /* inode delete (backend) */ 449 #define HAMMER_INODE_DELONDISK 0x0100 /* delete synchronized to disk */ 450 #define HAMMER_INODE_RO 0x0200 /* read-only (because of as-of) */ 451 #define HAMMER_INODE_RECSW 0x0400 /* waiting on data record flush */ 452 #define HAMMER_INODE_DONDISK 0x0800 /* data records may be on disk */ 453 #define HAMMER_INODE_BUFS 0x1000 /* dirty high level bps present */ 454 #define HAMMER_INODE_REFLUSH 0x2000 /* flush on dependancy / reflush */ 455 #define HAMMER_INODE_RECLAIM 0x4000 /* trying to reclaim */ 456 #define HAMMER_INODE_FLUSHW 0x8000 /* someone waiting for flush */ 457 458 #define HAMMER_INODE_TRUNCATED 0x00010000 459 #define HAMMER_INODE_DELETING 0x00020000 /* inode delete request (frontend)*/ 460 #define HAMMER_INODE_RESIGNAL 0x00040000 /* re-signal on re-flush */ 461 #define HAMMER_INODE_ATIME 0x00100000 /* in-memory atime modified */ 462 #define HAMMER_INODE_MTIME 0x00200000 /* in-memory mtime modified */ 463 #define HAMMER_INODE_WOULDBLOCK 0x00400000 /* re-issue to new flush group */ 464 #define HAMMER_INODE_DUMMY 0x00800000 /* dummy inode covering bad file */ 465 #define HAMMER_INODE_SDIRTY 0x01000000 /* in-memory ino_data.size is dirty*/ 466 #define HAMMER_INODE_REDO 0x02000000 /* REDO logging active */ 467 #define HAMMER_INODE_RDIRTY 0x04000000 /* REDO records active in fifo */ 468 #define HAMMER_INODE_SLAVEFLUSH 0x08000000 /* being flushed by slave */ 469 470 #define HAMMER_INODE_MODMASK (HAMMER_INODE_DDIRTY|HAMMER_INODE_SDIRTY| \ 471 HAMMER_INODE_XDIRTY|HAMMER_INODE_BUFS| \ 472 HAMMER_INODE_ATIME|HAMMER_INODE_MTIME| \ 473 HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING) 474 475 #define HAMMER_INODE_MODMASK_NOXDIRTY \ 476 (HAMMER_INODE_MODMASK & ~HAMMER_INODE_XDIRTY) 477 478 #define HAMMER_INODE_MODMASK_NOREDO \ 479 (HAMMER_INODE_DDIRTY| \ 480 HAMMER_INODE_XDIRTY| \ 481 HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING) 482 483 #define HAMMER_FLUSH_SIGNAL 0x0001 484 #define HAMMER_FLUSH_RECURSION 0x0002 485 486 /* 487 * Used by the inode reclaim code to pipeline reclaims and avoid 488 * blowing out kernel memory or letting the flusher get too far 489 * behind. The reclaim wakes up when count reaches 0 or the 490 * timer expires. 491 */ 492 struct hammer_reclaim { 493 TAILQ_ENTRY(hammer_reclaim) entry; 494 int count; 495 }; 496 497 /* 498 * Track who is creating the greatest burden on the 499 * inode cache. 500 */ 501 struct hammer_inostats { 502 pid_t pid; /* track user process */ 503 int ltick; /* last tick */ 504 int count; /* count (degenerates) */ 505 }; 506 507 #define HAMMER_INOSTATS_HSIZE 32 508 #define HAMMER_INOSTATS_HMASK (HAMMER_INOSTATS_HSIZE - 1) 509 510 /* 511 * Structure used to represent an unsynchronized record in-memory. These 512 * records typically represent directory entries. Only non-historical 513 * records are kept in-memory. 514 * 515 * Records are organized as a per-inode RB-Tree. If the inode is not 516 * on disk then neither are any records and the in-memory record tree 517 * represents the entire contents of the inode. If the inode is on disk 518 * then the on-disk B-Tree is scanned in parallel with the in-memory 519 * RB-Tree to synthesize the current state of the file. 520 * 521 * Records are also used to enforce the ordering of directory create/delete 522 * operations. A new inode will not be flushed to disk unless its related 523 * directory entry is also being flushed at the same time. A directory entry 524 * will not be removed unless its related inode is also being removed at the 525 * same time. 526 */ 527 typedef enum hammer_record_type { 528 HAMMER_MEM_RECORD_GENERAL, /* misc record */ 529 HAMMER_MEM_RECORD_INODE, /* inode record */ 530 HAMMER_MEM_RECORD_ADD, /* positive memory cache record */ 531 HAMMER_MEM_RECORD_DEL, /* negative delete-on-disk record */ 532 HAMMER_MEM_RECORD_DATA /* bulk-data record w/on-disk ref */ 533 } hammer_record_type_t; 534 535 struct hammer_record { 536 RB_ENTRY(hammer_record) rb_node; 537 TAILQ_ENTRY(hammer_record) target_entry; 538 hammer_inode_state_t flush_state; 539 hammer_flush_group_t flush_group; 540 hammer_record_type_t type; 541 struct hammer_lock lock; 542 struct hammer_reserve *resv; 543 struct hammer_inode *ip; 544 struct hammer_inode *target_ip; 545 struct hammer_btree_leaf_elm leaf; 546 union hammer_data_ondisk *data; 547 int flags; 548 int gflags; 549 hammer_off_t zone2_offset; /* direct-write only */ 550 }; 551 552 typedef struct hammer_record *hammer_record_t; 553 554 /* 555 * Record flags. Note that FE can only be set by the frontend if the 556 * record has not been interlocked by the backend w/ BE. 557 */ 558 #define HAMMER_RECF_ALLOCDATA 0x0001 559 #define HAMMER_RECF_ONRBTREE 0x0002 560 #define HAMMER_RECF_DELETED_FE 0x0004 /* deleted (frontend) */ 561 #define HAMMER_RECF_DELETED_BE 0x0008 /* deleted (backend) */ 562 #define HAMMER_RECF_COMMITTED 0x0010 /* committed to the B-Tree */ 563 #define HAMMER_RECF_INTERLOCK_BE 0x0020 /* backend interlock */ 564 #define HAMMER_RECF_WANTED 0x0040 /* wanted by the frontend */ 565 #define HAMMER_RECF_DEDUPED 0x0080 /* will be live-dedup'ed */ 566 #define HAMMER_RECF_CONVERT_DELETE 0x0100 /* special case */ 567 #define HAMMER_RECF_REDO 0x1000 /* REDO was laid down */ 568 569 /* 570 * These flags must be separate to deal with SMP races 571 */ 572 #define HAMMER_RECG_DIRECT_IO 0x0001 /* related direct I/O running*/ 573 #define HAMMER_RECG_DIRECT_WAIT 0x0002 /* related direct I/O running*/ 574 #define HAMMER_RECG_DIRECT_INVAL 0x0004 /* buffer alias invalidation */ 575 /* 576 * hammer_create_at_cursor() and hammer_delete_at_cursor() flags. 577 */ 578 #define HAMMER_CREATE_MODE_UMIRROR 0x0001 579 #define HAMMER_CREATE_MODE_SYS 0x0002 580 581 #define HAMMER_DELETE_ADJUST 0x0001 582 #define HAMMER_DELETE_DESTROY 0x0002 583 584 /* 585 * In-memory structures representing on-disk structures. 586 */ 587 RB_HEAD(hammer_vol_rb_tree, hammer_volume); 588 RB_HEAD(hammer_buf_rb_tree, hammer_buffer); 589 RB_HEAD(hammer_nod_rb_tree, hammer_node); 590 RB_HEAD(hammer_und_rb_tree, hammer_undo); 591 RB_HEAD(hammer_res_rb_tree, hammer_reserve); 592 RB_HEAD(hammer_mod_rb_tree, hammer_io); 593 594 RB_PROTOTYPE2(hammer_vol_rb_tree, hammer_volume, rb_node, 595 hammer_vol_rb_compare, int32_t); 596 RB_PROTOTYPE2(hammer_buf_rb_tree, hammer_buffer, rb_node, 597 hammer_buf_rb_compare, hammer_off_t); 598 RB_PROTOTYPE2(hammer_nod_rb_tree, hammer_node, rb_node, 599 hammer_nod_rb_compare, hammer_off_t); 600 RB_PROTOTYPE2(hammer_und_rb_tree, hammer_undo, rb_node, 601 hammer_und_rb_compare, hammer_off_t); 602 RB_PROTOTYPE2(hammer_res_rb_tree, hammer_reserve, rb_node, 603 hammer_res_rb_compare, hammer_off_t); 604 RB_PROTOTYPE2(hammer_mod_rb_tree, hammer_io, rb_node, 605 hammer_mod_rb_compare, hammer_off_t); 606 607 /* 608 * IO management - embedded at the head of various in-memory structures 609 * 610 * VOLUME - hammer_volume containing meta-data 611 * META_BUFFER - hammer_buffer containing meta-data 612 * UNDO_BUFFER - hammer_buffer containing undo-data 613 * DATA_BUFFER - hammer_buffer containing pure-data 614 * DUMMY - hammer_buffer not containing valid data 615 * 616 * Dirty volume headers and dirty meta-data buffers are locked until the 617 * flusher can sequence them out. Dirty pure-data buffers can be written. 618 * Clean buffers can be passively released. 619 */ 620 typedef enum hammer_io_type { 621 HAMMER_STRUCTURE_VOLUME, 622 HAMMER_STRUCTURE_META_BUFFER, 623 HAMMER_STRUCTURE_UNDO_BUFFER, 624 HAMMER_STRUCTURE_DATA_BUFFER, 625 HAMMER_STRUCTURE_DUMMY 626 } hammer_io_type_t; 627 628 /* 629 * XXX: struct hammer_io can't directly embed LIST_ENTRY() at offset 0, 630 * since a list head in struct buf expects a struct called worklist for 631 * list entries. HAMMER needs to define and use struct worklist. 632 */ 633 struct worklist { 634 LIST_ENTRY(worklist) node; 635 }; 636 637 TAILQ_HEAD(hammer_io_list, hammer_io); 638 typedef struct hammer_io_list *hammer_io_list_t; 639 640 struct hammer_io { 641 struct worklist worklist; /* must be at offset 0 */ 642 struct hammer_lock lock; 643 enum hammer_io_type type; 644 struct hammer_mount *hmp; 645 struct hammer_volume *volume; 646 RB_ENTRY(hammer_io) rb_node; /* if modified */ 647 TAILQ_ENTRY(hammer_io) iorun_entry; /* iorun_list */ 648 struct hammer_mod_rb_tree *mod_root; 649 struct buf *bp; 650 int64_t offset; /* volume offset */ 651 int bytes; /* buffer cache buffer size */ 652 int modify_refs; 653 654 /* 655 * These can be modified at any time by the backend while holding 656 * io_token, due to bio_done and hammer_io_complete() callbacks. 657 */ 658 u_int running : 1; /* bp write IO in progress */ 659 u_int waiting : 1; /* someone is waiting on us */ 660 u_int ioerror : 1; /* abort on io-error */ 661 u_int unusedA : 29; 662 663 /* 664 * These can only be modified by the frontend while holding 665 * fs_token, or by the backend while holding the io interlocked 666 * with no references (which will block the frontend when it 667 * tries to reference it). 668 * 669 * WARNING! SMP RACES will create havoc if the callbacks ever tried 670 * to modify any of these outside the above restrictions. 671 */ 672 u_int modified : 1; /* bp's data was modified */ 673 u_int released : 1; /* bp released (w/ B_LOCKED set) */ 674 u_int waitdep : 1; /* flush waits for dependancies */ 675 u_int recovered : 1; /* has recovery ref */ 676 u_int waitmod : 1; /* waiting for modify_refs */ 677 u_int reclaim : 1; /* reclaim requested */ 678 u_int gencrc : 1; /* crc needs to be generated */ 679 u_int unusedB : 25; 680 }; 681 682 typedef struct hammer_io *hammer_io_t; 683 684 #define HAMMER_CLUSTER_SIZE (64 * 1024) 685 #if HAMMER_CLUSTER_SIZE > MAXBSIZE 686 #undef HAMMER_CLUSTER_SIZE 687 #define HAMMER_CLUSTER_SIZE MAXBSIZE 688 #endif 689 690 /* 691 * In-memory volume representing on-disk buffer 692 */ 693 struct hammer_volume { 694 struct hammer_io io; /* must be at offset 0 */ 695 RB_ENTRY(hammer_volume) rb_node; 696 struct hammer_volume_ondisk *ondisk; 697 int32_t vol_no; 698 hammer_off_t maxbuf_off; /* Maximum buffer offset (zone-2) */ 699 char *vol_name; 700 struct vnode *devvp; 701 int vol_flags; 702 }; 703 704 typedef struct hammer_volume *hammer_volume_t; 705 706 #define HAMMER_ITOV(iop) ((hammer_volume_t)(iop)) 707 708 /* 709 * In-memory buffer representing an on-disk buffer. 710 */ 711 struct hammer_buffer { 712 struct hammer_io io; /* must be at offset 0 */ 713 RB_ENTRY(hammer_buffer) rb_node; 714 void *ondisk; 715 hammer_off_t zoneX_offset; 716 hammer_off_t zone2_offset; 717 struct hammer_reserve *resv; 718 struct hammer_node_list node_list; 719 }; 720 721 typedef struct hammer_buffer *hammer_buffer_t; 722 723 #define HAMMER_ITOB(iop) ((hammer_buffer_t)(iop)) 724 725 /* 726 * In-memory B-Tree node, representing an on-disk B-Tree node. 727 * 728 * This is a hang-on structure which is backed by a hammer_buffer, 729 * and used for fine-grained locking of B-Tree nodes in order to 730 * properly control lock ordering. 731 */ 732 struct hammer_node { 733 struct hammer_lock lock; /* node-by-node lock */ 734 TAILQ_ENTRY(hammer_node) entry; /* per-buffer linkage */ 735 RB_ENTRY(hammer_node) rb_node; /* per-mount linkage */ 736 hammer_off_t node_offset; /* full offset spec */ 737 struct hammer_mount *hmp; 738 struct hammer_buffer *buffer; /* backing buffer */ 739 hammer_node_ondisk_t ondisk; /* ptr to on-disk structure */ 740 TAILQ_HEAD(, hammer_cursor) cursor_list; /* deadlock recovery */ 741 struct hammer_node_cache_list cache_list; /* passive caches */ 742 int flags; 743 #if 0 744 int cursor_exclreq_count; 745 #endif 746 }; 747 748 #define HAMMER_NODE_DELETED 0x0001 749 #define HAMMER_NODE_FLUSH 0x0002 750 #define HAMMER_NODE_CRCGOOD 0x0004 751 #define HAMMER_NODE_NEEDSCRC 0x0008 752 #define HAMMER_NODE_NEEDSMIRROR 0x0010 753 #define HAMMER_NODE_CRCBAD 0x0020 754 #define HAMMER_NODE_NONLINEAR 0x0040 /* linear heuristic */ 755 756 #define HAMMER_NODE_CRCANY (HAMMER_NODE_CRCGOOD | HAMMER_NODE_CRCBAD) 757 758 typedef struct hammer_node *hammer_node_t; 759 760 /* 761 * List of locked nodes. This structure is used to lock potentially large 762 * numbers of nodes as an aid for complex B-Tree operations. 763 */ 764 struct hammer_node_lock; 765 TAILQ_HEAD(hammer_node_lock_list, hammer_node_lock); 766 767 struct hammer_node_lock { 768 TAILQ_ENTRY(hammer_node_lock) entry; 769 struct hammer_node_lock_list list; 770 struct hammer_node_lock *parent; 771 hammer_node_t node; 772 hammer_node_ondisk_t copy; /* copy of on-disk data */ 773 int index; /* index of this node in parent */ 774 int count; /* count children */ 775 int flags; 776 }; 777 778 typedef struct hammer_node_lock *hammer_node_lock_t; 779 780 #define HAMMER_NODE_LOCK_UPDATED 0x0001 781 #define HAMMER_NODE_LOCK_LCACHE 0x0002 782 783 /* 784 * The reserve structure prevents the blockmap from allocating 785 * out of a reserved big-block. Such reservations are used by 786 * the direct-write mechanism. 787 * 788 * The structure is also used to hold off on reallocations of 789 * big-blocks from the freemap until flush dependancies have 790 * been dealt with. 791 */ 792 struct hammer_reserve { 793 RB_ENTRY(hammer_reserve) rb_node; 794 TAILQ_ENTRY(hammer_reserve) delay_entry; 795 int flg_no; 796 int flags; 797 int refs; 798 int zone; 799 int append_off; 800 int32_t bytes_free; 801 hammer_off_t zone_offset; 802 }; 803 804 typedef struct hammer_reserve *hammer_reserve_t; 805 806 #define HAMMER_RESF_ONDELAY 0x0001 807 #define HAMMER_RESF_LAYER2FREE 0x0002 808 809 #include "hammer_cursor.h" 810 811 /* 812 * The undo structure tracks recent undos to avoid laying down duplicate 813 * undos within a flush group, saving us a significant amount of overhead. 814 * 815 * This is strictly a heuristic. 816 */ 817 #define HAMMER_MAX_UNDOS 1024 818 #define HAMMER_MAX_FLUSHERS 4 819 820 struct hammer_undo { 821 RB_ENTRY(hammer_undo) rb_node; 822 TAILQ_ENTRY(hammer_undo) lru_entry; 823 hammer_off_t offset; 824 int bytes; 825 }; 826 827 typedef struct hammer_undo *hammer_undo_t; 828 829 struct hammer_flusher_info; 830 TAILQ_HEAD(hammer_flusher_info_list, hammer_flusher_info); 831 832 struct hammer_flusher { 833 int signal; /* flusher thread sequencer */ 834 int done; /* last completed flush group */ 835 int next; /* next unallocated flg seqno */ 836 int group_lock; /* lock sequencing of the next flush */ 837 int exiting; /* request master exit */ 838 thread_t td; /* master flusher thread */ 839 hammer_tid_t tid; /* last flushed transaction id */ 840 int finalize_want; /* serialize finalization */ 841 struct hammer_lock finalize_lock; /* serialize finalization */ 842 struct hammer_transaction trans; /* shared transaction */ 843 struct hammer_flusher_info_list run_list; 844 struct hammer_flusher_info_list ready_list; 845 }; 846 847 #define HAMMER_FLUSH_UNDOS_RELAXED 0 848 #define HAMMER_FLUSH_UNDOS_FORCED 1 849 #define HAMMER_FLUSH_UNDOS_AUTO 2 850 /* 851 * Internal hammer mount data structure 852 */ 853 struct hammer_mount { 854 struct mount *mp; 855 struct hammer_ino_rb_tree rb_inos_root; 856 struct hammer_redo_rb_tree rb_redo_root; 857 struct hammer_vol_rb_tree rb_vols_root; 858 struct hammer_nod_rb_tree rb_nods_root; 859 struct hammer_und_rb_tree rb_undo_root; 860 struct hammer_res_rb_tree rb_resv_root; 861 struct hammer_buf_rb_tree rb_bufs_root; 862 struct hammer_pfs_rb_tree rb_pfsm_root; 863 864 struct hammer_dedup_crc_rb_tree rb_dedup_crc_root; 865 struct hammer_dedup_off_rb_tree rb_dedup_off_root; 866 867 struct hammer_volume *rootvol; 868 struct hammer_base_elm root_btree_beg; 869 struct hammer_base_elm root_btree_end; 870 871 struct malloc_type *m_misc; 872 struct malloc_type *m_inodes; 873 874 int flags; /* HAMMER_MOUNT_xxx flags */ 875 int hflags; 876 int ronly; 877 int nvolumes; 878 int master_id; /* -1 or 0-15 for mirroring */ 879 int version; /* hammer filesystem version to use */ 880 int rsv_inodes; /* reserved space due to dirty inodes */ 881 int64_t rsv_databytes; /* reserved space due to record data */ 882 int rsv_recs; /* reserved space due to dirty records */ 883 int rsv_fromdelay; /* big-blocks reserved due to flush delay */ 884 int undo_rec_limit; /* based on size of undo area */ 885 886 int volume_to_remove; /* volume that is currently being removed */ 887 888 int count_inodes; /* total number of inodes */ 889 int count_iqueued; /* inodes queued to flusher */ 890 int count_reclaims; /* inodes pending reclaim by flusher */ 891 892 struct hammer_flusher flusher; 893 894 u_int check_interrupt; 895 u_int check_yield; 896 uuid_t fsid; 897 struct hammer_mod_rb_tree volu_root; /* dirty undo buffers */ 898 struct hammer_mod_rb_tree undo_root; /* dirty undo buffers */ 899 struct hammer_mod_rb_tree data_root; /* dirty data buffers */ 900 struct hammer_mod_rb_tree meta_root; /* dirty meta bufs */ 901 struct hammer_mod_rb_tree lose_root; /* loose buffers */ 902 long locked_dirty_space; /* meta/volu count */ 903 long io_running_space; /* io_token */ 904 int objid_cache_count; 905 int dedup_cache_count; 906 int error; /* critical I/O error */ 907 struct krate krate; /* rate limited kprintf */ 908 struct krate kdiag; /* rate limited kprintf */ 909 hammer_tid_t asof; /* snapshot mount */ 910 hammer_tid_t next_tid; 911 hammer_tid_t flush_tid1; /* flusher tid sequencing */ 912 hammer_tid_t flush_tid2; /* flusher tid sequencing */ 913 int64_t copy_stat_freebigblocks; /* number of free big-blocks */ 914 uint32_t undo_seqno; /* UNDO/REDO FIFO seqno */ 915 uint32_t recover_stage2_seqno; /* REDO recovery seqno */ 916 hammer_off_t recover_stage2_offset; /* REDO recovery offset */ 917 918 struct netexport export; 919 struct hammer_lock sync_lock; 920 struct hammer_lock undo_lock; 921 struct hammer_lock blkmap_lock; 922 struct hammer_lock snapshot_lock; 923 struct hammer_lock volume_lock; 924 struct hammer_blockmap blockmap[HAMMER_MAX_ZONES]; 925 struct hammer_undo undos[HAMMER_MAX_UNDOS]; 926 int undo_alloc; 927 TAILQ_HEAD(, hammer_undo) undo_lru_list; 928 TAILQ_HEAD(, hammer_reserve) delay_list; 929 struct hammer_flush_group_list flush_group_list; 930 hammer_flush_group_t fill_flush_group; 931 hammer_flush_group_t next_flush_group; 932 TAILQ_HEAD(, hammer_objid_cache) objid_cache_list; 933 TAILQ_HEAD(, hammer_dedup_cache) dedup_lru_list; 934 hammer_dedup_cache_t dedup_free_cache; 935 TAILQ_HEAD(, hammer_reclaim) reclaim_list; 936 TAILQ_HEAD(, hammer_io) iorun_list; 937 938 struct lwkt_token fs_token; /* high level */ 939 struct lwkt_token io_token; /* low level (IO callback) */ 940 941 struct hammer_inostats inostats[HAMMER_INOSTATS_HSIZE]; 942 uint64_t volume_map[4]; /* 256 bits bitfield */ 943 }; 944 945 typedef struct hammer_mount *hammer_mount_t; 946 947 #define HAMMER_MOUNT_CRITICAL_ERROR 0x0001 948 #define HAMMER_MOUNT_FLUSH_RECOVERY 0x0002 949 #define HAMMER_MOUNT_REDO_SYNC 0x0004 950 #define HAMMER_MOUNT_REDO_RECOVERY_REQ 0x0008 951 #define HAMMER_MOUNT_REDO_RECOVERY_RUN 0x0010 952 953 #define HAMMER_VOLUME_NUMBER_ADD(hmp, vol) \ 954 (hmp)->volume_map[(vol)->vol_no >> 6] |= \ 955 ((uint64_t)1 << ((vol)->vol_no & ((1 << 6) - 1))) 956 957 #define HAMMER_VOLUME_NUMBER_DEL(hmp, vol) \ 958 (hmp)->volume_map[(vol)->vol_no >> 6] &= \ 959 ~((uint64_t)1 << ((vol)->vol_no & ((1 << 6) - 1))) 960 961 #define HAMMER_VOLUME_NUMBER_IS_SET(hmp, n) \ 962 (((hmp)->volume_map[(n) >> 6] & \ 963 ((uint64_t)1 << ((n) & ((1 << 6) - 1)))) != 0) 964 965 #define HAMMER_VOLUME_NUMBER_FOREACH(hmp, n) \ 966 for (n = 0; n < HAMMER_MAX_VOLUMES; n++) \ 967 if (HAMMER_VOLUME_NUMBER_IS_SET(hmp, n)) 968 969 /* 970 * Minium buffer cache bufs required to rebalance the B-Tree. 971 * This is because we must hold the children and the children's children 972 * locked. Even this might not be enough if things are horribly out 973 * of balance. 974 */ 975 #define HAMMER_REBALANCE_MIN_BUFS \ 976 (HAMMER_BTREE_LEAF_ELMS * HAMMER_BTREE_LEAF_ELMS) 977 978 #endif /* _KERNEL || _KERNEL_STRUCTURES */ 979 980 #if defined(_KERNEL) 981 /* 982 * checkspace slop (8MB chunks), higher numbers are more conservative. 983 */ 984 #define HAMMER_CHKSPC_REBLOCK 25 985 #define HAMMER_CHKSPC_MIRROR 20 986 #define HAMMER_CHKSPC_WRITE 20 987 #define HAMMER_CHKSPC_CREATE 20 988 #define HAMMER_CHKSPC_REMOVE 10 989 #define HAMMER_CHKSPC_EMERGENCY 0 990 991 extern struct vop_ops hammer_vnode_vops; 992 extern struct vop_ops hammer_spec_vops; 993 extern struct vop_ops hammer_fifo_vops; 994 995 extern int hammer_debug_io; 996 extern int hammer_debug_general; 997 extern int hammer_debug_inode; 998 extern int hammer_debug_locks; 999 extern int hammer_debug_btree; 1000 extern int hammer_debug_tid; 1001 extern int hammer_debug_recover; 1002 extern int hammer_debug_critical; 1003 extern int hammer_cluster_enable; 1004 extern int hammer_live_dedup; 1005 extern int hammer_tdmux_ticks; 1006 extern int hammer_count_fsyncs; 1007 extern int hammer_count_inodes; 1008 extern int hammer_count_iqueued; 1009 extern int hammer_count_reclaims; 1010 extern int hammer_count_records; 1011 extern int hammer_count_record_datas; 1012 extern int hammer_count_volumes; 1013 extern int hammer_count_buffers; 1014 extern int hammer_count_nodes; 1015 extern int64_t hammer_count_extra_space_used; 1016 extern int64_t hammer_stats_btree_lookups; 1017 extern int64_t hammer_stats_btree_searches; 1018 extern int64_t hammer_stats_btree_inserts; 1019 extern int64_t hammer_stats_btree_deletes; 1020 extern int64_t hammer_stats_btree_elements; 1021 extern int64_t hammer_stats_btree_splits; 1022 extern int64_t hammer_stats_btree_iterations; 1023 extern int64_t hammer_stats_btree_root_iterations; 1024 extern int64_t hammer_stats_record_iterations; 1025 extern int64_t hammer_stats_file_read; 1026 extern int64_t hammer_stats_file_write; 1027 extern int64_t hammer_stats_file_iopsr; 1028 extern int64_t hammer_stats_file_iopsw; 1029 extern int64_t hammer_stats_disk_read; 1030 extern int64_t hammer_stats_disk_write; 1031 extern int64_t hammer_stats_inode_flushes; 1032 extern int64_t hammer_stats_commits; 1033 extern int64_t hammer_stats_undo; 1034 extern int64_t hammer_stats_redo; 1035 extern long hammer_count_dirtybufspace; 1036 extern int hammer_count_refedbufs; 1037 extern int hammer_count_reservations; 1038 extern long hammer_count_io_running_read; 1039 extern long hammer_count_io_running_write; 1040 extern int hammer_count_io_locked; 1041 extern long hammer_limit_dirtybufspace; 1042 extern int hammer_limit_recs; 1043 extern int hammer_limit_inode_recs; 1044 extern int hammer_limit_reclaims; 1045 extern int hammer_live_dedup_cache_size; 1046 extern int hammer_limit_redo; 1047 extern int hammer_verify_zone; 1048 extern int hammer_verify_data; 1049 extern int hammer_double_buffer; 1050 extern int hammer_btree_full_undo; 1051 extern int hammer_yield_check; 1052 extern int hammer_fsync_mode; 1053 extern int hammer_autoflush; 1054 extern int64_t hammer_contention_count; 1055 1056 extern int64_t hammer_live_dedup_vnode_bcmps; 1057 extern int64_t hammer_live_dedup_device_bcmps; 1058 extern int64_t hammer_live_dedup_findblk_failures; 1059 extern int64_t hammer_live_dedup_bmap_saves; 1060 1061 void hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip, 1062 int error, const char *msg); 1063 int hammer_vop_inactive(struct vop_inactive_args *); 1064 int hammer_vop_reclaim(struct vop_reclaim_args *); 1065 int hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp); 1066 struct hammer_inode *hammer_get_inode(hammer_transaction_t trans, 1067 hammer_inode_t dip, int64_t obj_id, 1068 hammer_tid_t asof, uint32_t localization, 1069 int flags, int *errorp); 1070 struct hammer_inode *hammer_get_dummy_inode(hammer_transaction_t trans, 1071 hammer_inode_t dip, int64_t obj_id, 1072 hammer_tid_t asof, uint32_t localization, 1073 int flags, int *errorp); 1074 struct hammer_inode *hammer_find_inode(hammer_transaction_t trans, 1075 int64_t obj_id, hammer_tid_t asof, 1076 uint32_t localization); 1077 void hammer_scan_inode_snapshots(hammer_mount_t hmp, 1078 hammer_inode_info_t iinfo, 1079 int (*callback)(hammer_inode_t ip, void *data), 1080 void *data); 1081 void hammer_put_inode(struct hammer_inode *ip); 1082 void hammer_put_inode_ref(struct hammer_inode *ip); 1083 void hammer_inode_waitreclaims(hammer_transaction_t trans); 1084 void hammer_inode_dirty(struct hammer_inode *ip); 1085 1086 int hammer_unload_volume(hammer_volume_t volume, void *data); 1087 int hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused); 1088 1089 int hammer_unload_buffer(hammer_buffer_t buffer, void *data); 1090 int hammer_install_volume(hammer_mount_t hmp, const char *volname, 1091 struct vnode *devvp, void *data); 1092 int hammer_mountcheck_volumes(hammer_mount_t hmp); 1093 int hammer_get_installed_volumes(hammer_mount_t hmp); 1094 1095 int hammer_mem_add(hammer_record_t record); 1096 int hammer_ip_lookup(hammer_cursor_t cursor); 1097 int hammer_ip_first(hammer_cursor_t cursor); 1098 int hammer_ip_next(hammer_cursor_t cursor); 1099 int hammer_ip_resolve_data(hammer_cursor_t cursor); 1100 int hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip, 1101 hammer_tid_t tid); 1102 int hammer_create_at_cursor(hammer_cursor_t cursor, 1103 hammer_btree_leaf_elm_t leaf, void *udata, int mode); 1104 int hammer_delete_at_cursor(hammer_cursor_t cursor, int delete_flags, 1105 hammer_tid_t delete_tid, uint32_t delete_ts, 1106 int track, int64_t *stat_bytes); 1107 int hammer_ip_check_directory_empty(hammer_transaction_t trans, 1108 hammer_inode_t ip); 1109 int hammer_sync_hmp(hammer_mount_t hmp, int waitfor); 1110 int hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor); 1111 1112 hammer_record_t 1113 hammer_alloc_mem_record(hammer_inode_t ip, int data_len); 1114 void hammer_flush_record_done(hammer_record_t record, int error); 1115 void hammer_wait_mem_record_ident(hammer_record_t record, const char *ident); 1116 void hammer_rel_mem_record(hammer_record_t record); 1117 1118 int hammer_cursor_up(hammer_cursor_t cursor); 1119 int hammer_cursor_up_locked(hammer_cursor_t cursor); 1120 int hammer_cursor_down(hammer_cursor_t cursor); 1121 int hammer_cursor_upgrade(hammer_cursor_t cursor); 1122 int hammer_cursor_upgrade_node(hammer_cursor_t cursor); 1123 void hammer_cursor_downgrade(hammer_cursor_t cursor); 1124 int hammer_cursor_upgrade2(hammer_cursor_t c1, hammer_cursor_t c2); 1125 void hammer_cursor_downgrade2(hammer_cursor_t c1, hammer_cursor_t c2); 1126 int hammer_cursor_seek(hammer_cursor_t cursor, hammer_node_t node, 1127 int index); 1128 void hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident); 1129 int hammer_lock_ex_try(struct hammer_lock *lock); 1130 void hammer_lock_sh(struct hammer_lock *lock); 1131 int hammer_lock_sh_try(struct hammer_lock *lock); 1132 int hammer_lock_upgrade(struct hammer_lock *lock, int shcount); 1133 void hammer_lock_downgrade(struct hammer_lock *lock, int shcount); 1134 int hammer_lock_status(struct hammer_lock *lock); 1135 void hammer_unlock(struct hammer_lock *lock); 1136 void hammer_ref(struct hammer_lock *lock); 1137 int hammer_ref_interlock(struct hammer_lock *lock); 1138 int hammer_ref_interlock_true(struct hammer_lock *lock); 1139 void hammer_ref_interlock_done(struct hammer_lock *lock); 1140 void hammer_rel(struct hammer_lock *lock); 1141 int hammer_rel_interlock(struct hammer_lock *lock, int locked); 1142 void hammer_rel_interlock_done(struct hammer_lock *lock, int orig_locked); 1143 int hammer_get_interlock(struct hammer_lock *lock); 1144 int hammer_try_interlock_norefs(struct hammer_lock *lock); 1145 void hammer_put_interlock(struct hammer_lock *lock, int error); 1146 1147 void hammer_sync_lock_ex(hammer_transaction_t trans); 1148 void hammer_sync_lock_sh(hammer_transaction_t trans); 1149 int hammer_sync_lock_sh_try(hammer_transaction_t trans); 1150 void hammer_sync_unlock(hammer_transaction_t trans); 1151 1152 uint32_t hammer_to_unix_xid(uuid_t *uuid); 1153 void hammer_guid_to_uuid(uuid_t *uuid, uint32_t guid); 1154 void hammer_time_to_timespec(uint64_t xtime, struct timespec *ts); 1155 uint64_t hammer_timespec_to_time(struct timespec *ts); 1156 int hammer_str_to_tid(const char *str, int *ispfsp, 1157 hammer_tid_t *tidp, uint32_t *localizationp); 1158 hammer_tid_t hammer_alloc_objid(hammer_mount_t hmp, hammer_inode_t dip, 1159 int64_t namekey); 1160 void hammer_clear_objid(hammer_inode_t dip); 1161 void hammer_destroy_objid_cache(hammer_mount_t hmp); 1162 1163 int hammer_dedup_crc_rb_compare(hammer_dedup_cache_t dc1, 1164 hammer_dedup_cache_t dc2); 1165 int hammer_dedup_off_rb_compare(hammer_dedup_cache_t dc1, 1166 hammer_dedup_cache_t dc2); 1167 hammer_dedup_cache_t hammer_dedup_cache_add(hammer_inode_t ip, 1168 hammer_btree_leaf_elm_t leaf); 1169 hammer_dedup_cache_t hammer_dedup_cache_lookup(hammer_mount_t hmp, 1170 hammer_crc_t crc); 1171 void hammer_dedup_cache_inval(hammer_mount_t hmp, hammer_off_t base_offset); 1172 void hammer_destroy_dedup_cache(hammer_mount_t hmp); 1173 void hammer_dump_dedup_cache(hammer_mount_t hmp); 1174 int hammer_dedup_validate(hammer_dedup_cache_t dcp, int zone, int bytes, 1175 void *data); 1176 1177 int hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset, 1178 int bytes); 1179 void hammer_clear_undo_history(hammer_mount_t hmp); 1180 enum vtype hammer_get_vnode_type(uint8_t obj_type); 1181 int hammer_get_dtype(uint8_t obj_type); 1182 uint8_t hammer_get_obj_type(enum vtype vtype); 1183 int64_t hammer_direntry_namekey(hammer_inode_t dip, const void *name, int len, 1184 uint32_t *max_iterationsp); 1185 int hammer_nohistory(hammer_inode_t ip); 1186 1187 int hammer_init_cursor(hammer_transaction_t trans, hammer_cursor_t cursor, 1188 hammer_node_cache_t cache, hammer_inode_t ip); 1189 void hammer_normalize_cursor(hammer_cursor_t cursor); 1190 void hammer_done_cursor(hammer_cursor_t cursor); 1191 int hammer_recover_cursor(hammer_cursor_t cursor); 1192 void hammer_unlock_cursor(hammer_cursor_t cursor); 1193 int hammer_lock_cursor(hammer_cursor_t cursor); 1194 hammer_cursor_t hammer_push_cursor(hammer_cursor_t ocursor); 1195 void hammer_pop_cursor(hammer_cursor_t ocursor, hammer_cursor_t ncursor); 1196 1197 void hammer_cursor_replaced_node(hammer_node_t onode, hammer_node_t nnode); 1198 void hammer_cursor_removed_node(hammer_node_t onode, hammer_node_t parent, 1199 int index); 1200 void hammer_cursor_split_node(hammer_node_t onode, hammer_node_t nnode, 1201 int index); 1202 void hammer_cursor_moved_element(hammer_node_t oparent, int pindex, 1203 hammer_node_t onode, int oindex, 1204 hammer_node_t nnode, int nindex); 1205 void hammer_cursor_parent_changed(hammer_node_t node, hammer_node_t oparent, 1206 hammer_node_t nparent, int nindex); 1207 void hammer_cursor_inserted_element(hammer_node_t node, int index); 1208 void hammer_cursor_deleted_element(hammer_node_t node, int index); 1209 void hammer_cursor_invalidate_cache(hammer_cursor_t cursor); 1210 1211 int hammer_btree_lookup(hammer_cursor_t cursor); 1212 int hammer_btree_first(hammer_cursor_t cursor); 1213 int hammer_btree_last(hammer_cursor_t cursor); 1214 int hammer_btree_extract(hammer_cursor_t cursor, int flags); 1215 int hammer_btree_iterate(hammer_cursor_t cursor); 1216 int hammer_btree_iterate_reverse(hammer_cursor_t cursor); 1217 int hammer_btree_insert(hammer_cursor_t cursor, 1218 hammer_btree_leaf_elm_t elm, int *doprop); 1219 int hammer_btree_delete(hammer_cursor_t cursor, int *ndelete); 1220 void hammer_btree_do_propagation(hammer_cursor_t cursor, 1221 hammer_pseudofs_inmem_t pfsm, 1222 hammer_btree_leaf_elm_t leaf); 1223 int hammer_btree_cmp(hammer_base_elm_t key1, hammer_base_elm_t key2); 1224 int hammer_btree_chkts(hammer_tid_t ts, hammer_base_elm_t key); 1225 int hammer_btree_correct_rhb(hammer_cursor_t cursor, hammer_tid_t tid); 1226 int hammer_btree_correct_lhb(hammer_cursor_t cursor, hammer_tid_t tid); 1227 1228 int btree_set_parent_of_child(hammer_transaction_t trans, 1229 hammer_node_t node, 1230 hammer_btree_elm_t elm); 1231 void hammer_node_lock_init(hammer_node_lock_t parent, hammer_node_t node); 1232 void hammer_btree_lcache_init(hammer_mount_t hmp, hammer_node_lock_t lcache, 1233 int depth); 1234 void hammer_btree_lcache_free(hammer_mount_t hmp, hammer_node_lock_t lcache); 1235 int hammer_btree_lock_children(hammer_cursor_t cursor, int depth, 1236 hammer_node_lock_t parent, 1237 hammer_node_lock_t lcache); 1238 void hammer_btree_lock_copy(hammer_cursor_t cursor, 1239 hammer_node_lock_t parent); 1240 int hammer_btree_sync_copy(hammer_cursor_t cursor, 1241 hammer_node_lock_t parent); 1242 void hammer_btree_unlock_children(hammer_mount_t hmp, 1243 hammer_node_lock_t parent, 1244 hammer_node_lock_t lcache); 1245 int hammer_btree_search_node(hammer_base_elm_t elm, hammer_node_ondisk_t node); 1246 hammer_node_t hammer_btree_get_parent(hammer_transaction_t trans, 1247 hammer_node_t node, int *parent_indexp, 1248 int *errorp, int try_exclusive); 1249 1250 void hammer_print_btree_node(hammer_node_ondisk_t ondisk); 1251 void hammer_print_btree_elm(hammer_btree_elm_t elm); 1252 1253 void *hammer_bread(hammer_mount_t hmp, hammer_off_t off, 1254 int *errorp, struct hammer_buffer **bufferp); 1255 void *hammer_bnew(hammer_mount_t hmp, hammer_off_t off, 1256 int *errorp, struct hammer_buffer **bufferp); 1257 void *hammer_bread_ext(hammer_mount_t hmp, hammer_off_t off, int bytes, 1258 int *errorp, struct hammer_buffer **bufferp); 1259 void *hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t off, int bytes, 1260 int *errorp, struct hammer_buffer **bufferp); 1261 1262 hammer_volume_t hammer_get_root_volume(hammer_mount_t hmp, int *errorp); 1263 1264 hammer_volume_t hammer_get_volume(hammer_mount_t hmp, 1265 int32_t vol_no, int *errorp); 1266 hammer_buffer_t hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset, 1267 int bytes, int isnew, int *errorp); 1268 void hammer_sync_buffers(hammer_mount_t hmp, 1269 hammer_off_t base_offset, int bytes); 1270 int hammer_del_buffers(hammer_mount_t hmp, 1271 hammer_off_t base_offset, 1272 hammer_off_t zone2_offset, int bytes, 1273 int report_conflicts); 1274 1275 int hammer_ref_volume(hammer_volume_t volume); 1276 int hammer_ref_buffer(hammer_buffer_t buffer); 1277 void hammer_flush_buffer_nodes(hammer_buffer_t buffer); 1278 1279 void hammer_rel_volume(hammer_volume_t volume, int locked); 1280 void hammer_rel_buffer(hammer_buffer_t buffer, int locked); 1281 1282 int hammer_vfs_export(struct mount *mp, int op, 1283 const struct export_args *export); 1284 hammer_node_t hammer_get_node(hammer_transaction_t trans, 1285 hammer_off_t node_offset, int isnew, int *errorp); 1286 void hammer_ref_node(hammer_node_t node); 1287 hammer_node_t hammer_ref_node_safe(hammer_transaction_t trans, 1288 hammer_node_cache_t cache, int *errorp); 1289 void hammer_rel_node(hammer_node_t node); 1290 void hammer_delete_node(hammer_transaction_t trans, 1291 hammer_node_t node); 1292 void hammer_cache_node(hammer_node_cache_t cache, 1293 hammer_node_t node); 1294 void hammer_uncache_node(hammer_node_cache_t cache); 1295 void hammer_flush_node(hammer_node_t node, int locked); 1296 1297 void hammer_dup_buffer(struct hammer_buffer **bufferp, 1298 struct hammer_buffer *buffer); 1299 hammer_node_t hammer_alloc_btree(hammer_transaction_t trans, 1300 hammer_off_t hint, int *errorp); 1301 void *hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, 1302 uint16_t rec_type, hammer_off_t *data_offsetp, 1303 struct hammer_buffer **data_bufferp, 1304 hammer_off_t hint, int *errorp); 1305 1306 int hammer_generate_undo(hammer_transaction_t trans, 1307 hammer_off_t zone_offset, void *base, int len); 1308 int hammer_generate_redo(hammer_transaction_t trans, hammer_inode_t ip, 1309 hammer_off_t file_offset, uint32_t flags, 1310 void *base, int len); 1311 void hammer_generate_redo_sync(hammer_transaction_t trans); 1312 void hammer_redo_fifo_start_flush(hammer_inode_t ip); 1313 void hammer_redo_fifo_end_flush(hammer_inode_t ip); 1314 1315 void hammer_format_undo(void *base, uint32_t seqno); 1316 int hammer_upgrade_undo_4(hammer_transaction_t trans); 1317 1318 void hammer_put_volume(struct hammer_volume *volume, int flush); 1319 void hammer_put_buffer(struct hammer_buffer *buffer, int flush); 1320 1321 hammer_off_t hammer_freemap_alloc(hammer_transaction_t trans, 1322 hammer_off_t owner, int *errorp); 1323 void hammer_freemap_free(hammer_transaction_t trans, hammer_off_t phys_offset, 1324 hammer_off_t owner, int *errorp); 1325 int _hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp); 1326 hammer_off_t hammer_blockmap_alloc(hammer_transaction_t trans, int zone, 1327 int bytes, hammer_off_t hint, int *errorp); 1328 hammer_reserve_t hammer_blockmap_reserve(hammer_mount_t hmp, int zone, 1329 int bytes, hammer_off_t *zone_offp, int *errorp); 1330 hammer_reserve_t hammer_blockmap_reserve_dedup(hammer_mount_t hmp, int zone, 1331 int bytes, hammer_off_t zone_offset, int *errorp); 1332 void hammer_blockmap_reserve_complete(hammer_mount_t hmp, 1333 hammer_reserve_t resv); 1334 void hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv); 1335 void hammer_blockmap_free(hammer_transaction_t trans, 1336 hammer_off_t zone_offset, int bytes); 1337 int hammer_blockmap_dedup(hammer_transaction_t trans, 1338 hammer_off_t zone_offset, int bytes); 1339 int hammer_blockmap_finalize(hammer_transaction_t trans, 1340 hammer_reserve_t resv, 1341 hammer_off_t zone_offset, int bytes); 1342 int hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t zone_offset, 1343 int *curp, int *errorp); 1344 hammer_off_t hammer_blockmap_lookup_verify(hammer_mount_t hmp, 1345 hammer_off_t zone_offset, int *errorp); 1346 1347 hammer_off_t hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone_offset, 1348 int *errorp); 1349 int64_t hammer_undo_used(hammer_transaction_t trans); 1350 int64_t hammer_undo_space(hammer_transaction_t trans); 1351 int64_t hammer_undo_max(hammer_mount_t hmp); 1352 int hammer_undo_reclaim(hammer_io_t io); 1353 1354 void hammer_start_transaction(struct hammer_transaction *trans, 1355 struct hammer_mount *hmp); 1356 void hammer_simple_transaction(struct hammer_transaction *trans, 1357 struct hammer_mount *hmp); 1358 void hammer_start_transaction_fls(struct hammer_transaction *trans, 1359 struct hammer_mount *hmp); 1360 void hammer_done_transaction(struct hammer_transaction *trans); 1361 hammer_tid_t hammer_alloc_tid(hammer_mount_t hmp, int count); 1362 1363 void hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags); 1364 void hammer_flush_inode(hammer_inode_t ip, int flags); 1365 void hammer_flush_inode_done(hammer_inode_t ip, int error); 1366 void hammer_wait_inode(hammer_inode_t ip); 1367 1368 int hammer_create_inode(struct hammer_transaction *trans, struct vattr *vap, 1369 struct ucred *cred, struct hammer_inode *dip, 1370 const char *name, int namelen, 1371 hammer_pseudofs_inmem_t pfsm, 1372 struct hammer_inode **ipp); 1373 void hammer_rel_inode(hammer_inode_t ip, int flush); 1374 int hammer_reload_inode(hammer_inode_t ip, void *arg __unused); 1375 int hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2); 1376 int hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2); 1377 int hammer_destroy_inode_callback(hammer_inode_t ip, void *data __unused); 1378 1379 int hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip); 1380 void hammer_test_inode(hammer_inode_t dip); 1381 void hammer_inode_unloadable_check(hammer_inode_t ip, int getvp); 1382 int hammer_update_atime_quick(hammer_inode_t ip); 1383 1384 int hammer_ip_add_direntry(struct hammer_transaction *trans, 1385 hammer_inode_t dip, const char *name, int bytes, 1386 hammer_inode_t nip); 1387 int hammer_ip_del_direntry(struct hammer_transaction *trans, 1388 hammer_cursor_t cursor, hammer_inode_t dip, 1389 hammer_inode_t ip); 1390 void hammer_ip_replace_bulk(hammer_mount_t hmp, hammer_record_t record); 1391 hammer_record_t hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, 1392 void *data, int bytes, int *errorp); 1393 int hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size); 1394 int hammer_ip_add_record(struct hammer_transaction *trans, 1395 hammer_record_t record); 1396 int hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip, 1397 int64_t ran_beg, int64_t ran_end, int truncating); 1398 int hammer_ip_delete_clean(hammer_cursor_t cursor, hammer_inode_t ip, 1399 int *countp); 1400 int hammer_ip_sync_data(hammer_cursor_t cursor, hammer_inode_t ip, 1401 int64_t offset, void *data, int bytes); 1402 int hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t rec); 1403 hammer_pseudofs_inmem_t hammer_load_pseudofs(hammer_transaction_t trans, 1404 uint32_t localization, int *errorp); 1405 int hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred, 1406 hammer_pseudofs_inmem_t pfsm, hammer_inode_t dip); 1407 int hammer_save_pseudofs(hammer_transaction_t trans, 1408 hammer_pseudofs_inmem_t pfsm); 1409 int hammer_unload_pseudofs(hammer_transaction_t trans, uint32_t localization); 1410 void hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm); 1411 int hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag, 1412 struct ucred *cred); 1413 1414 void hammer_io_init(hammer_io_t io, hammer_volume_t volume, 1415 enum hammer_io_type type); 1416 int hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit); 1417 void hammer_io_advance(struct hammer_io *io); 1418 int hammer_io_new(struct vnode *devvp, struct hammer_io *io); 1419 int hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset); 1420 struct buf *hammer_io_release(struct hammer_io *io, int flush); 1421 void hammer_io_flush(struct hammer_io *io, int reclaim); 1422 void hammer_io_wait(struct hammer_io *io); 1423 void hammer_io_waitdep(struct hammer_io *io); 1424 void hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush); 1425 int hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio, 1426 hammer_btree_leaf_elm_t leaf); 1427 int hammer_io_indirect_read(hammer_mount_t hmp, struct bio *bio, 1428 hammer_btree_leaf_elm_t leaf); 1429 int hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio, 1430 hammer_record_t record); 1431 void hammer_io_direct_wait(hammer_record_t record); 1432 void hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf); 1433 void hammer_io_write_interlock(hammer_io_t io); 1434 void hammer_io_done_interlock(hammer_io_t io); 1435 void hammer_io_clear_modify(struct hammer_io *io, int inval); 1436 void hammer_io_clear_modlist(struct hammer_io *io); 1437 void hammer_io_flush_sync(hammer_mount_t hmp); 1438 void hammer_io_clear_error(struct hammer_io *io); 1439 void hammer_io_clear_error_noassert(struct hammer_io *io); 1440 void hammer_io_notmeta(hammer_buffer_t buffer); 1441 void hammer_io_limit_backlog(hammer_mount_t hmp); 1442 1443 void hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume, 1444 void *base, int len); 1445 void hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer, 1446 void *base, int len); 1447 void hammer_modify_volume_done(hammer_volume_t volume); 1448 void hammer_modify_buffer_done(hammer_buffer_t buffer); 1449 1450 int hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip, 1451 struct hammer_ioc_reblock *reblock); 1452 int hammer_ioc_rebalance(hammer_transaction_t trans, hammer_inode_t ip, 1453 struct hammer_ioc_rebalance *rebal); 1454 int hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip, 1455 struct hammer_ioc_prune *prune); 1456 int hammer_ioc_mirror_read(hammer_transaction_t trans, hammer_inode_t ip, 1457 struct hammer_ioc_mirror_rw *mirror); 1458 int hammer_ioc_mirror_write(hammer_transaction_t trans, hammer_inode_t ip, 1459 struct hammer_ioc_mirror_rw *mirror); 1460 int hammer_ioc_set_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, 1461 struct ucred *cred, struct hammer_ioc_pseudofs_rw *pfs); 1462 int hammer_ioc_get_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, 1463 struct hammer_ioc_pseudofs_rw *pfs); 1464 int hammer_ioc_destroy_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, 1465 struct hammer_ioc_pseudofs_rw *pfs); 1466 int hammer_ioc_downgrade_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, 1467 struct hammer_ioc_pseudofs_rw *pfs); 1468 int hammer_ioc_upgrade_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, 1469 struct hammer_ioc_pseudofs_rw *pfs); 1470 int hammer_ioc_wait_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, 1471 struct hammer_ioc_pseudofs_rw *pfs); 1472 int hammer_ioc_iterate_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, 1473 struct hammer_ioc_pfs_iterate *pi); 1474 int hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip, 1475 struct hammer_ioc_volume *ioc); 1476 int hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip, 1477 struct hammer_ioc_volume *ioc); 1478 int hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip, 1479 struct hammer_ioc_volume_list *ioc); 1480 int hammer_ioc_dedup(hammer_transaction_t trans, hammer_inode_t ip, 1481 struct hammer_ioc_dedup *dedup); 1482 1483 int hammer_signal_check(hammer_mount_t hmp); 1484 1485 void hammer_flusher_create(hammer_mount_t hmp); 1486 void hammer_flusher_destroy(hammer_mount_t hmp); 1487 void hammer_flusher_sync(hammer_mount_t hmp); 1488 int hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t flg); 1489 int hammer_flusher_async_one(hammer_mount_t hmp); 1490 int hammer_flusher_running(hammer_mount_t hmp); 1491 void hammer_flusher_wait(hammer_mount_t hmp, int seq); 1492 void hammer_flusher_wait_next(hammer_mount_t hmp); 1493 int hammer_flusher_meta_limit(hammer_mount_t hmp); 1494 int hammer_flusher_meta_halflimit(hammer_mount_t hmp); 1495 int hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter); 1496 void hammer_flusher_clean_loose_ios(hammer_mount_t hmp); 1497 void hammer_flusher_finalize(hammer_transaction_t trans, int final); 1498 int hammer_flusher_haswork(hammer_mount_t hmp); 1499 int hammer_flush_dirty(hammer_mount_t hmp, int max_count); 1500 void hammer_flusher_flush_undos(hammer_mount_t hmp, int already_flushed); 1501 1502 int hammer_recover_stage1(hammer_mount_t hmp, hammer_volume_t rootvol); 1503 int hammer_recover_stage2(hammer_mount_t hmp, hammer_volume_t rootvol); 1504 void hammer_recover_flush_buffers(hammer_mount_t hmp, 1505 hammer_volume_t root_volume, int final); 1506 1507 void hammer_crc_set_blockmap(hammer_blockmap_t blockmap); 1508 void hammer_crc_set_volume(hammer_volume_ondisk_t ondisk); 1509 void hammer_crc_set_leaf(void *data, hammer_btree_leaf_elm_t leaf); 1510 1511 int hammer_crc_test_blockmap(hammer_blockmap_t blockmap); 1512 int hammer_crc_test_volume(hammer_volume_ondisk_t ondisk); 1513 int hammer_crc_test_btree(hammer_node_ondisk_t ondisk); 1514 int hammer_crc_test_leaf(void *data, hammer_btree_leaf_elm_t leaf); 1515 udev_t hammer_fsid_to_udev(uuid_t *uuid); 1516 1517 1518 int hammer_blocksize(int64_t file_offset); 1519 int hammer_blockoff(int64_t file_offset); 1520 int64_t hammer_blockdemarc(int64_t file_offset1, int64_t file_offset2); 1521 1522 /* 1523 * Shortcut for _hammer_checkspace(), used all over the code. 1524 */ 1525 static __inline int 1526 hammer_checkspace(hammer_mount_t hmp, int slop) 1527 { 1528 return(_hammer_checkspace(hmp, slop, NULL)); 1529 } 1530 1531 static __inline void 1532 hammer_wait_mem_record(hammer_record_t record) 1533 { 1534 hammer_wait_mem_record_ident(record, "hmmwai"); 1535 } 1536 1537 static __inline void 1538 hammer_lock_ex(struct hammer_lock *lock) 1539 { 1540 hammer_lock_ex_ident(lock, "hmrlck"); 1541 } 1542 1543 static __inline void 1544 hammer_modify_volume_noundo(hammer_transaction_t trans, hammer_volume_t volume) 1545 { 1546 hammer_modify_volume(trans, volume, NULL, 0); 1547 } 1548 1549 static __inline void 1550 hammer_modify_buffer_noundo(hammer_transaction_t trans, hammer_buffer_t buffer) 1551 { 1552 hammer_modify_buffer(trans, buffer, NULL, 0); 1553 } 1554 1555 /* 1556 * Indicate that a B-Tree node is being modified. 1557 */ 1558 static __inline void 1559 hammer_modify_node_noundo(hammer_transaction_t trans, hammer_node_t node) 1560 { 1561 KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0); 1562 hammer_modify_buffer(trans, node->buffer, NULL, 0); 1563 } 1564 1565 static __inline void 1566 hammer_modify_node_all(hammer_transaction_t trans, struct hammer_node *node) 1567 { 1568 KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0); 1569 hammer_modify_buffer(trans, node->buffer, 1570 node->ondisk, sizeof(*node->ondisk)); 1571 } 1572 1573 static __inline void 1574 hammer_modify_node(hammer_transaction_t trans, hammer_node_t node, 1575 void *base, int len) 1576 { 1577 hammer_crc_t *crcptr; 1578 1579 KKASSERT((char *)base >= (char *)node->ondisk && 1580 (char *)base + len <= 1581 (char *)node->ondisk + sizeof(*node->ondisk)); 1582 KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0); 1583 1584 if (hammer_btree_full_undo) { 1585 hammer_modify_node_all(trans, node); 1586 } else { 1587 hammer_modify_buffer(trans, node->buffer, base, len); 1588 crcptr = &node->ondisk->crc; 1589 hammer_modify_buffer(trans, node->buffer, 1590 crcptr, sizeof(hammer_crc_t)); 1591 --node->buffer->io.modify_refs; /* only want one ref */ 1592 } 1593 } 1594 1595 /* 1596 * Indicate that the specified modifications have been completed. 1597 * 1598 * Do not try to generate the crc here, it's very expensive to do and a 1599 * sequence of insertions or deletions can result in many calls to this 1600 * function on the same node. 1601 */ 1602 static __inline void 1603 hammer_modify_node_done(hammer_node_t node) 1604 { 1605 node->flags |= HAMMER_NODE_CRCGOOD; 1606 if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0) { 1607 node->flags |= HAMMER_NODE_NEEDSCRC; 1608 node->buffer->io.gencrc = 1; 1609 hammer_ref_node(node); 1610 } 1611 hammer_modify_buffer_done(node->buffer); 1612 } 1613 1614 static __inline int 1615 hammer_btree_extract_leaf(hammer_cursor_t cursor) 1616 { 1617 return(hammer_btree_extract(cursor, 0)); 1618 } 1619 1620 static __inline int 1621 hammer_btree_extract_data(hammer_cursor_t cursor) 1622 { 1623 return(hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA)); 1624 } 1625 1626 static __inline void 1627 hammer_crc_set_btree(hammer_node_ondisk_t ondisk) 1628 { 1629 ondisk->crc = crc32(&ondisk->crc + 1, HAMMER_BTREE_CRCSIZE); 1630 } 1631 1632 /* 1633 * Lookup a blockmap offset. 1634 */ 1635 static __inline hammer_off_t 1636 hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t zone_offset, 1637 int *errorp) 1638 { 1639 #if defined INVARIANTS 1640 int zone = HAMMER_ZONE_DECODE(zone_offset); 1641 KKASSERT(hammer_is_zone2_mapped_index(zone)); 1642 #endif 1643 1644 /* 1645 * We can actually skip blockmap verify by default, 1646 * as normal blockmaps are now direct-mapped onto the freemap 1647 * and so represent zone-2 addresses. 1648 */ 1649 if (hammer_verify_zone == 0) { 1650 *errorp = 0; 1651 return hammer_xlate_to_zone2(zone_offset); 1652 } 1653 1654 return hammer_blockmap_lookup_verify(hmp, zone_offset, errorp); 1655 } 1656 1657 #define hammer_modify_volume_field(trans, vol, field) \ 1658 hammer_modify_volume(trans, vol, &(vol)->ondisk->field, \ 1659 sizeof((vol)->ondisk->field)) 1660 1661 #define hammer_modify_node_field(trans, node, field) \ 1662 hammer_modify_node(trans, node, &(node)->ondisk->field, \ 1663 sizeof((node)->ondisk->field)) 1664 1665 /* 1666 * The HAMMER_INODE_CAP_DIR_LOCAL_INO capability is set on newly 1667 * created directories for HAMMER version 2 or greater and causes 1668 * directory entries to be placed the inode localization zone in 1669 * the B-Tree instead of the misc zone. 1670 * 1671 * This greatly improves localization between directory entries and 1672 * inodes 1673 */ 1674 static __inline uint32_t 1675 hammer_dir_localization(hammer_inode_t dip) 1676 { 1677 return(HAMMER_DIR_INODE_LOCALIZATION(&dip->ino_data)); 1678 } 1679 1680 static __inline 1681 hammer_io_t 1682 hammer_buf_peek_io(struct buf *bp) 1683 { 1684 return((hammer_io_t)LIST_FIRST(&bp->b_dep)); 1685 } 1686 1687 static __inline 1688 void 1689 hammer_buf_attach_io(struct buf *bp, hammer_io_t io) 1690 { 1691 /* struct buf and struct hammer_io are 1:1 */ 1692 KKASSERT(hammer_buf_peek_io(bp) == NULL); 1693 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node); 1694 } 1695 1696 #define hkprintf(format, args...) \ 1697 kprintf("HAMMER: "format,## args) 1698 #define hvkprintf(vol, format, args...) \ 1699 kprintf("HAMMER(%s) "format, vol->ondisk->vol_label,## args) 1700 #define hmkprintf(hmp, format, args...) \ 1701 kprintf("HAMMER(%s) "format, hmp->mp->mnt_stat.f_mntfromname,## args) 1702 #define hdkprintf(format, args...) \ 1703 kprintf("%s: "format, __func__,## args) 1704 1705 #define hkrateprintf(rate , format, args...) \ 1706 krateprintf(rate, "HAMMER: "format,## args) 1707 #define hvkrateprintf(rate, vol, format, args...) \ 1708 krateprintf(rate, "HAMMER(%s) "format, vol->ondisk->vol_label,## args) 1709 #define hmkrateprintf(rate, hmp, format, args...) \ 1710 krateprintf(rate, "HAMMER(%s) "format, hmp->mp->mnt_stat.f_mntfromname,## args) 1711 #define hdkrateprintf(rate, format, args...) \ 1712 krateprintf(rate, "%s: "format, __func__,## args) 1713 1714 #define hpanic(format, args...) \ 1715 panic("%s: "format, __func__,## args) 1716 #endif /* _KERNEL */ 1717 1718 #endif /* !VFS_HAMMER_HAMMER_H_ */ 1719