1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer.h,v 1.130 2008/11/13 02:18:43 dillon Exp $ 35 */ 36 /* 37 * This header file contains structures used internally by the HAMMERFS 38 * implementation. See hammer_disk.h for on-disk structures. 39 */ 40 41 #include <sys/param.h> 42 #include <sys/types.h> 43 #include <sys/kernel.h> 44 #include <sys/conf.h> 45 #include <sys/systm.h> 46 #include <sys/tree.h> 47 #include <sys/malloc.h> 48 #include <sys/mount.h> 49 #include <sys/mountctl.h> 50 #include <sys/vnode.h> 51 #include <sys/proc.h> 52 #include <sys/priv.h> 53 #include <sys/stat.h> 54 #include <sys/globaldata.h> 55 #include <sys/lockf.h> 56 #include <sys/buf.h> 57 #include <sys/queue.h> 58 #include <sys/ktr.h> 59 #include <sys/globaldata.h> 60 #include <sys/limits.h> 61 #include <vm/vm_extern.h> 62 63 #include <sys/buf2.h> 64 #include <sys/signal2.h> 65 #include <sys/mplock2.h> 66 #include "hammer_disk.h" 67 #include "hammer_mount.h" 68 #include "hammer_ioctl.h" 69 70 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES) 71 72 MALLOC_DECLARE(M_HAMMER); 73 74 /* 75 * Kernel trace 76 */ 77 #if !defined(KTR_HAMMER) 78 #define KTR_HAMMER KTR_ALL 79 #endif 80 KTR_INFO_MASTER_EXTERN(hammer); 81 82 /* 83 * Misc structures 84 */ 85 struct hammer_mount; 86 87 /* 88 * Key structure used for custom RB tree inode lookups. This prototypes 89 * the function hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info). 90 */ 91 typedef struct hammer_inode_info { 92 int64_t obj_id; /* (key) object identifier */ 93 hammer_tid_t obj_asof; /* (key) snapshot transid or 0 */ 94 u_int32_t obj_localization; /* (key) pseudo-fs */ 95 union { 96 struct hammer_btree_leaf_elm *leaf; 97 } u; 98 } *hammer_inode_info_t; 99 100 typedef enum hammer_transaction_type { 101 HAMMER_TRANS_RO, 102 HAMMER_TRANS_STD, 103 HAMMER_TRANS_FLS 104 } hammer_transaction_type_t; 105 106 /* 107 * HAMMER Transaction tracking 108 */ 109 struct hammer_transaction { 110 hammer_transaction_type_t type; 111 struct hammer_mount *hmp; 112 hammer_tid_t tid; 113 u_int64_t time; 114 u_int32_t time32; 115 int sync_lock_refs; 116 int flags; 117 struct hammer_volume *rootvol; 118 }; 119 120 typedef struct hammer_transaction *hammer_transaction_t; 121 122 #define HAMMER_TRANSF_NEWINODE 0x0001 123 #define HAMMER_TRANSF_DIDIO 0x0002 124 #define HAMMER_TRANSF_CRCDOM 0x0004 /* EDOM on CRC error, less critical */ 125 126 /* 127 * HAMMER locks 128 */ 129 struct hammer_lock { 130 volatile u_int refs; /* active references */ 131 volatile u_int lockval; /* lock count and control bits */ 132 struct thread *lowner; /* owner if exclusively held */ 133 struct thread *rowner; /* owner if exclusively held */ 134 }; 135 136 #define HAMMER_REFS_LOCKED 0x40000000 /* transition check */ 137 #define HAMMER_REFS_WANTED 0x20000000 /* transition check */ 138 #define HAMMER_REFS_CHECK 0x10000000 /* transition check */ 139 140 #define HAMMER_REFS_FLAGS (HAMMER_REFS_LOCKED | \ 141 HAMMER_REFS_WANTED | \ 142 HAMMER_REFS_CHECK) 143 144 #define HAMMER_LOCKF_EXCLUSIVE 0x40000000 145 #define HAMMER_LOCKF_WANTED 0x20000000 146 147 static __inline int 148 hammer_notlocked(struct hammer_lock *lock) 149 { 150 return(lock->lockval == 0); 151 } 152 153 static __inline int 154 hammer_islocked(struct hammer_lock *lock) 155 { 156 return(lock->lockval != 0); 157 } 158 159 /* 160 * Returns the number of refs on the object. 161 */ 162 static __inline int 163 hammer_isactive(struct hammer_lock *lock) 164 { 165 return(lock->refs & ~HAMMER_REFS_FLAGS); 166 } 167 168 static __inline int 169 hammer_oneref(struct hammer_lock *lock) 170 { 171 return((lock->refs & ~HAMMER_REFS_FLAGS) == 1); 172 } 173 174 static __inline int 175 hammer_norefs(struct hammer_lock *lock) 176 { 177 return((lock->refs & ~HAMMER_REFS_FLAGS) == 0); 178 } 179 180 static __inline int 181 hammer_norefsorlock(struct hammer_lock *lock) 182 { 183 return(lock->refs == 0); 184 } 185 186 static __inline int 187 hammer_refsorlock(struct hammer_lock *lock) 188 { 189 return(lock->refs != 0); 190 } 191 192 /* 193 * Return if we specifically own the lock exclusively. 194 */ 195 static __inline int 196 hammer_lock_excl_owned(struct hammer_lock *lock, thread_t td) 197 { 198 if ((lock->lockval & HAMMER_LOCKF_EXCLUSIVE) && 199 lock->lowner == td) { 200 return(1); 201 } 202 return(0); 203 } 204 205 /* 206 * Flush state, used by various structures 207 */ 208 typedef enum hammer_inode_state { 209 HAMMER_FST_IDLE, 210 HAMMER_FST_SETUP, 211 HAMMER_FST_FLUSH 212 } hammer_inode_state_t; 213 214 TAILQ_HEAD(hammer_record_list, hammer_record); 215 216 /* 217 * Pseudo-filesystem extended data tracking 218 */ 219 struct hammer_pfs_rb_tree; 220 struct hammer_pseudofs_inmem; 221 RB_HEAD(hammer_pfs_rb_tree, hammer_pseudofs_inmem); 222 RB_PROTOTYPE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node, 223 hammer_pfs_rb_compare, u_int32_t); 224 225 struct hammer_pseudofs_inmem { 226 RB_ENTRY(hammer_pseudofs_inmem) rb_node; 227 struct hammer_lock lock; 228 u_int32_t localization; 229 hammer_tid_t create_tid; 230 int flags; 231 udev_t fsid_udev; 232 struct hammer_pseudofs_data pfsd; 233 }; 234 235 typedef struct hammer_pseudofs_inmem *hammer_pseudofs_inmem_t; 236 237 #define HAMMER_PFSM_DELETED 0x0001 238 239 /* 240 * Cache object ids. A fixed number of objid cache structures are 241 * created to reserve object id's for newly created files in multiples 242 * of 100,000, localized to a particular directory, and recycled as 243 * needed. This allows parallel create operations in different 244 * directories to retain fairly localized object ids which in turn 245 * improves reblocking performance and layout. 246 */ 247 #define OBJID_CACHE_SIZE 2048 248 #define OBJID_CACHE_BULK_BITS 10 /* 10 bits (1024) */ 249 #define OBJID_CACHE_BULK (32 * 32) /* two level (1024) */ 250 #define OBJID_CACHE_BULK_MASK (OBJID_CACHE_BULK - 1) 251 #define OBJID_CACHE_BULK_MASK64 ((u_int64_t)(OBJID_CACHE_BULK - 1)) 252 253 typedef struct hammer_objid_cache { 254 TAILQ_ENTRY(hammer_objid_cache) entry; 255 struct hammer_inode *dip; 256 hammer_tid_t base_tid; 257 int count; 258 u_int32_t bm0; 259 u_int32_t bm1[32]; 260 } *hammer_objid_cache_t; 261 262 /* 263 * Associate an inode with a B-Tree node to cache search start positions 264 */ 265 typedef struct hammer_node_cache { 266 TAILQ_ENTRY(hammer_node_cache) entry; 267 struct hammer_node *node; 268 struct hammer_inode *ip; 269 } *hammer_node_cache_t; 270 271 TAILQ_HEAD(hammer_node_cache_list, hammer_node_cache); 272 273 /* 274 * Live dedup cache 275 */ 276 struct hammer_dedup_crc_rb_tree; 277 RB_HEAD(hammer_dedup_crc_rb_tree, hammer_dedup_cache); 278 RB_PROTOTYPE2(hammer_dedup_crc_rb_tree, hammer_dedup_cache, crc_entry, 279 hammer_dedup_crc_rb_compare, hammer_crc_t); 280 281 struct hammer_dedup_off_rb_tree; 282 RB_HEAD(hammer_dedup_off_rb_tree, hammer_dedup_cache); 283 RB_PROTOTYPE2(hammer_dedup_off_rb_tree, hammer_dedup_cache, off_entry, 284 hammer_dedup_off_rb_compare, hammer_off_t); 285 286 #define DEDUP_CACHE_SIZE 4096 /* XXX make it a dynamic tunable */ 287 288 typedef struct hammer_dedup_cache { 289 RB_ENTRY(hammer_dedup_cache) crc_entry; 290 RB_ENTRY(hammer_dedup_cache) off_entry; 291 TAILQ_ENTRY(hammer_dedup_cache) lru_entry; 292 struct hammer_mount *hmp; 293 int64_t obj_id; 294 u_int32_t localization; 295 off_t file_offset; 296 int bytes; 297 hammer_off_t data_offset; 298 hammer_crc_t crc; 299 } *hammer_dedup_cache_t; 300 301 /* 302 * Structure used to organize flush groups. Flush groups must be 303 * organized into chunks in order to avoid blowing out the UNDO FIFO. 304 * Without this a 'sync' could end up flushing 50,000 inodes in a single 305 * transaction. 306 */ 307 struct hammer_fls_rb_tree; 308 RB_HEAD(hammer_fls_rb_tree, hammer_inode); 309 RB_PROTOTYPE(hammer_fls_rb_tree, hammer_inode, rb_flsnode, 310 hammer_ino_rb_compare); 311 312 struct hammer_flush_group { 313 TAILQ_ENTRY(hammer_flush_group) flush_entry; 314 struct hammer_fls_rb_tree flush_tree; 315 int seq; /* our seq no */ 316 int total_count; /* record load */ 317 int running; /* group is running */ 318 int closed; 319 int refs; 320 }; 321 322 typedef struct hammer_flush_group *hammer_flush_group_t; 323 324 TAILQ_HEAD(hammer_flush_group_list, hammer_flush_group); 325 326 /* 327 * Structure used to represent an inode in-memory. 328 * 329 * The record and data associated with an inode may be out of sync with 330 * the disk (xDIRTY flags), or not even on the disk at all (ONDISK flag 331 * clear). 332 * 333 * An inode may also hold a cache of unsynchronized records, used for 334 * database and directories only. Unsynchronized regular file data is 335 * stored in the buffer cache. 336 * 337 * NOTE: A file which is created and destroyed within the initial 338 * synchronization period can wind up not doing any disk I/O at all. 339 * 340 * Finally, an inode may cache numerous disk-referencing B-Tree cursors. 341 */ 342 struct hammer_ino_rb_tree; 343 struct hammer_inode; 344 RB_HEAD(hammer_ino_rb_tree, hammer_inode); 345 RB_PROTOTYPEX(hammer_ino_rb_tree, INFO, hammer_inode, rb_node, 346 hammer_ino_rb_compare, hammer_inode_info_t); 347 348 struct hammer_redo_rb_tree; 349 RB_HEAD(hammer_redo_rb_tree, hammer_inode); 350 RB_PROTOTYPE2(hammer_redo_rb_tree, hammer_inode, rb_redonode, 351 hammer_redo_rb_compare, hammer_off_t); 352 353 struct hammer_rec_rb_tree; 354 struct hammer_record; 355 RB_HEAD(hammer_rec_rb_tree, hammer_record); 356 RB_PROTOTYPEX(hammer_rec_rb_tree, INFO, hammer_record, rb_node, 357 hammer_rec_rb_compare, hammer_btree_leaf_elm_t); 358 359 TAILQ_HEAD(hammer_node_list, hammer_node); 360 361 struct hammer_inode { 362 RB_ENTRY(hammer_inode) rb_node; 363 hammer_inode_state_t flush_state; 364 hammer_flush_group_t flush_group; 365 RB_ENTRY(hammer_inode) rb_flsnode; /* when on flush list */ 366 RB_ENTRY(hammer_inode) rb_redonode; /* when INODE_RDIRTY is set */ 367 struct hammer_record_list target_list; /* target of dependant recs */ 368 int64_t obj_id; /* (key) object identifier */ 369 hammer_tid_t obj_asof; /* (key) snapshot or 0 */ 370 u_int32_t obj_localization; /* (key) pseudo-fs */ 371 struct hammer_mount *hmp; 372 hammer_objid_cache_t objid_cache; 373 int flags; 374 int error; /* flush error */ 375 int cursor_ip_refs; /* sanity */ 376 int rsv_recs; 377 struct vnode *vp; 378 hammer_pseudofs_inmem_t pfsm; 379 struct lockf advlock; 380 struct hammer_lock lock; /* sync copy interlock */ 381 off_t trunc_off; 382 struct hammer_btree_leaf_elm ino_leaf; /* in-memory cache */ 383 struct hammer_inode_data ino_data; /* in-memory cache */ 384 struct hammer_rec_rb_tree rec_tree; /* in-memory cache */ 385 int rec_generation; 386 struct hammer_node_cache cache[4]; /* search initiate cache */ 387 388 /* 389 * When a demark is created to synchronize an inode to 390 * disk, certain fields are copied so the front-end VOPs 391 * can continue to run in parallel with the synchronization 392 * occuring in the background. 393 */ 394 int sync_flags; /* to-sync flags cache */ 395 off_t sync_trunc_off; /* to-sync truncation */ 396 off_t save_trunc_off; /* write optimization */ 397 struct hammer_btree_leaf_elm sync_ino_leaf; /* to-sync cache */ 398 struct hammer_inode_data sync_ino_data; /* to-sync cache */ 399 size_t redo_count; 400 401 /* 402 * Track the earliest offset in the UNDO/REDO FIFO containing 403 * REDO records. This is staged to the backend during flush 404 * sequences. While the inode is staged redo_fifo_next is used 405 * to track the earliest offset for rotation into redo_fifo_start 406 * on completion of the flush. 407 */ 408 hammer_off_t redo_fifo_start; 409 hammer_off_t redo_fifo_next; 410 }; 411 412 typedef struct hammer_inode *hammer_inode_t; 413 414 #define VTOI(vp) ((struct hammer_inode *)(vp)->v_data) 415 416 /* 417 * NOTE: DDIRTY does not include atime or mtime and does not include 418 * write-append size changes. SDIRTY handles write-append size 419 * changes. 420 * 421 * REDO indicates that REDO logging is active, creating a definitive 422 * stream of REDO records in the UNDO/REDO log for writes and 423 * truncations, including boundary records when/if REDO is turned off. 424 * REDO is typically enabled by fsync() and turned off if excessive 425 * writes without an fsync() occurs. 426 * 427 * RDIRTY indicates that REDO records were laid down in the UNDO/REDO 428 * FIFO (even if REDO is turned off some might still be active) and 429 * still being tracked for this inode. See hammer_redo.c 430 */ 431 /* (not including atime/mtime) */ 432 #define HAMMER_INODE_DDIRTY 0x0001 /* in-memory ino_data is dirty */ 433 #define HAMMER_INODE_RSV_INODES 0x0002 /* hmp->rsv_inodes bumped */ 434 #define HAMMER_INODE_CONN_DOWN 0x0004 /* include in downward recursion */ 435 #define HAMMER_INODE_XDIRTY 0x0008 /* in-memory records */ 436 #define HAMMER_INODE_ONDISK 0x0010 /* inode is on-disk (else not yet) */ 437 #define HAMMER_INODE_FLUSH 0x0020 /* flush on last ref */ 438 #define HAMMER_INODE_DELETED 0x0080 /* inode delete (backend) */ 439 #define HAMMER_INODE_DELONDISK 0x0100 /* delete synchronized to disk */ 440 #define HAMMER_INODE_RO 0x0200 /* read-only (because of as-of) */ 441 #define HAMMER_INODE_UNUSED0400 0x0400 442 #define HAMMER_INODE_DONDISK 0x0800 /* data records may be on disk */ 443 #define HAMMER_INODE_BUFS 0x1000 /* dirty high level bps present */ 444 #define HAMMER_INODE_REFLUSH 0x2000 /* flush on dependancy / reflush */ 445 #define HAMMER_INODE_RECLAIM 0x4000 /* trying to reclaim */ 446 #define HAMMER_INODE_FLUSHW 0x8000 /* Someone waiting for flush */ 447 448 #define HAMMER_INODE_TRUNCATED 0x00010000 449 #define HAMMER_INODE_DELETING 0x00020000 /* inode delete request (frontend)*/ 450 #define HAMMER_INODE_RESIGNAL 0x00040000 /* re-signal on re-flush */ 451 #define HAMMER_INODE_ATIME 0x00100000 /* in-memory atime modified */ 452 #define HAMMER_INODE_MTIME 0x00200000 /* in-memory mtime modified */ 453 #define HAMMER_INODE_WOULDBLOCK 0x00400000 /* re-issue to new flush group */ 454 #define HAMMER_INODE_DUMMY 0x00800000 /* dummy inode covering bad file */ 455 #define HAMMER_INODE_SDIRTY 0x01000000 /* in-memory ino_data.size is dirty*/ 456 #define HAMMER_INODE_REDO 0x02000000 /* REDO logging active */ 457 #define HAMMER_INODE_RDIRTY 0x04000000 /* REDO records active in fifo */ 458 459 #define HAMMER_INODE_MODMASK (HAMMER_INODE_DDIRTY|HAMMER_INODE_SDIRTY| \ 460 HAMMER_INODE_XDIRTY|HAMMER_INODE_BUFS| \ 461 HAMMER_INODE_ATIME|HAMMER_INODE_MTIME| \ 462 HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING) 463 464 #define HAMMER_INODE_MODMASK_NOXDIRTY \ 465 (HAMMER_INODE_MODMASK & ~HAMMER_INODE_XDIRTY) 466 467 #define HAMMER_INODE_MODMASK_NOREDO \ 468 (HAMMER_INODE_DDIRTY| \ 469 HAMMER_INODE_XDIRTY| \ 470 HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING) 471 472 #define HAMMER_FLUSH_SIGNAL 0x0001 473 #define HAMMER_FLUSH_RECURSION 0x0002 474 475 /* 476 * Used by the inode reclaim code to pipeline reclaims and avoid 477 * blowing out kernel memory or letting the flusher get too far 478 * behind. The reclaim wakes up when count reaches 0 or the 479 * timer expires. 480 */ 481 struct hammer_reclaim { 482 TAILQ_ENTRY(hammer_reclaim) entry; 483 int count; 484 }; 485 486 /* 487 * Track who is creating the greatest burden on the 488 * inode cache. 489 */ 490 struct hammer_inostats { 491 pid_t pid; /* track user process */ 492 int ltick; /* last tick */ 493 int count; /* count (degenerates) */ 494 }; 495 496 #define HAMMER_INOSTATS_HSIZE 32 497 #define HAMMER_INOSTATS_HMASK (HAMMER_INOSTATS_HSIZE - 1) 498 499 /* 500 * Structure used to represent an unsynchronized record in-memory. These 501 * records typically represent directory entries. Only non-historical 502 * records are kept in-memory. 503 * 504 * Records are organized as a per-inode RB-Tree. If the inode is not 505 * on disk then neither are any records and the in-memory record tree 506 * represents the entire contents of the inode. If the inode is on disk 507 * then the on-disk B-Tree is scanned in parallel with the in-memory 508 * RB-Tree to synthesize the current state of the file. 509 * 510 * Records are also used to enforce the ordering of directory create/delete 511 * operations. A new inode will not be flushed to disk unless its related 512 * directory entry is also being flushed at the same time. A directory entry 513 * will not be removed unless its related inode is also being removed at the 514 * same time. 515 */ 516 typedef enum hammer_record_type { 517 HAMMER_MEM_RECORD_GENERAL, /* misc record */ 518 HAMMER_MEM_RECORD_INODE, /* inode record */ 519 HAMMER_MEM_RECORD_ADD, /* positive memory cache record */ 520 HAMMER_MEM_RECORD_DEL, /* negative delete-on-disk record */ 521 HAMMER_MEM_RECORD_DATA /* bulk-data record w/on-disk ref */ 522 } hammer_record_type_t; 523 524 struct hammer_record { 525 RB_ENTRY(hammer_record) rb_node; 526 TAILQ_ENTRY(hammer_record) target_entry; 527 hammer_inode_state_t flush_state; 528 hammer_flush_group_t flush_group; 529 hammer_record_type_t type; 530 struct hammer_lock lock; 531 struct hammer_reserve *resv; 532 struct hammer_inode *ip; 533 struct hammer_inode *target_ip; 534 struct hammer_btree_leaf_elm leaf; 535 union hammer_data_ondisk *data; 536 int flags; 537 int gflags; 538 hammer_off_t zone2_offset; /* direct-write only */ 539 }; 540 541 typedef struct hammer_record *hammer_record_t; 542 543 /* 544 * Record flags. Note that FE can only be set by the frontend if the 545 * record has not been interlocked by the backend w/ BE. 546 */ 547 #define HAMMER_RECF_ALLOCDATA 0x0001 548 #define HAMMER_RECF_ONRBTREE 0x0002 549 #define HAMMER_RECF_DELETED_FE 0x0004 /* deleted (frontend) */ 550 #define HAMMER_RECF_DELETED_BE 0x0008 /* deleted (backend) */ 551 #define HAMMER_RECF_COMMITTED 0x0010 /* committed to the B-Tree */ 552 #define HAMMER_RECF_INTERLOCK_BE 0x0020 /* backend interlock */ 553 #define HAMMER_RECF_WANTED 0x0040 /* wanted by the frontend */ 554 #define HAMMER_RECF_DEDUPED 0x0080 /* will be live-dedup'ed */ 555 #define HAMMER_RECF_CONVERT_DELETE 0x0100 /* special case */ 556 #define HAMMER_RECF_REDO 0x1000 /* REDO was laid down */ 557 558 /* 559 * These flags must be separate to deal with SMP races 560 */ 561 #define HAMMER_RECG_DIRECT_IO 0x0001 /* related direct I/O running*/ 562 #define HAMMER_RECG_DIRECT_WAIT 0x0002 /* related direct I/O running*/ 563 #define HAMMER_RECG_DIRECT_INVAL 0x0004 /* buffer alias invalidation */ 564 /* 565 * hammer_create_at_cursor() and hammer_delete_at_cursor() flags. 566 */ 567 #define HAMMER_CREATE_MODE_UMIRROR 0x0001 568 #define HAMMER_CREATE_MODE_SYS 0x0002 569 570 #define HAMMER_DELETE_ADJUST 0x0001 571 #define HAMMER_DELETE_DESTROY 0x0002 572 573 /* 574 * In-memory structures representing on-disk structures. 575 */ 576 struct hammer_volume; 577 struct hammer_buffer; 578 struct hammer_node; 579 struct hammer_undo; 580 struct hammer_reserve; 581 582 RB_HEAD(hammer_vol_rb_tree, hammer_volume); 583 RB_HEAD(hammer_buf_rb_tree, hammer_buffer); 584 RB_HEAD(hammer_nod_rb_tree, hammer_node); 585 RB_HEAD(hammer_und_rb_tree, hammer_undo); 586 RB_HEAD(hammer_res_rb_tree, hammer_reserve); 587 RB_HEAD(hammer_mod_rb_tree, hammer_io); 588 589 RB_PROTOTYPE2(hammer_vol_rb_tree, hammer_volume, rb_node, 590 hammer_vol_rb_compare, int32_t); 591 RB_PROTOTYPE2(hammer_buf_rb_tree, hammer_buffer, rb_node, 592 hammer_buf_rb_compare, hammer_off_t); 593 RB_PROTOTYPE2(hammer_nod_rb_tree, hammer_node, rb_node, 594 hammer_nod_rb_compare, hammer_off_t); 595 RB_PROTOTYPE2(hammer_und_rb_tree, hammer_undo, rb_node, 596 hammer_und_rb_compare, hammer_off_t); 597 RB_PROTOTYPE2(hammer_res_rb_tree, hammer_reserve, rb_node, 598 hammer_res_rb_compare, hammer_off_t); 599 RB_PROTOTYPE2(hammer_mod_rb_tree, hammer_io, rb_node, 600 hammer_mod_rb_compare, hammer_off_t); 601 602 /* 603 * IO management - embedded at the head of various in-memory structures 604 * 605 * VOLUME - hammer_volume containing meta-data 606 * META_BUFFER - hammer_buffer containing meta-data 607 * DATA_BUFFER - hammer_buffer containing pure-data 608 * 609 * Dirty volume headers and dirty meta-data buffers are locked until the 610 * flusher can sequence them out. Dirty pure-data buffers can be written. 611 * Clean buffers can be passively released. 612 */ 613 typedef enum hammer_io_type { 614 HAMMER_STRUCTURE_VOLUME, 615 HAMMER_STRUCTURE_META_BUFFER, 616 HAMMER_STRUCTURE_UNDO_BUFFER, 617 HAMMER_STRUCTURE_DATA_BUFFER, 618 HAMMER_STRUCTURE_DUMMY 619 } hammer_io_type_t; 620 621 union hammer_io_structure; 622 struct hammer_io; 623 624 struct worklist { 625 LIST_ENTRY(worklist) node; 626 }; 627 628 TAILQ_HEAD(hammer_io_list, hammer_io); 629 typedef struct hammer_io_list *hammer_io_list_t; 630 631 struct hammer_io { 632 struct worklist worklist; 633 struct hammer_lock lock; 634 enum hammer_io_type type; 635 struct hammer_mount *hmp; 636 struct hammer_volume *volume; 637 RB_ENTRY(hammer_io) rb_node; /* if modified */ 638 TAILQ_ENTRY(hammer_io) iorun_entry; /* iorun_list */ 639 struct hammer_mod_rb_tree *mod_root; 640 struct buf *bp; 641 int64_t offset; /* zone-2 offset */ 642 int bytes; /* buffer cache buffer size */ 643 int modify_refs; 644 645 /* 646 * These can be modified at any time by the backend while holding 647 * io_token, due to bio_done and hammer_io_complete() callbacks. 648 */ 649 u_int running : 1; /* bp write IO in progress */ 650 u_int waiting : 1; /* someone is waiting on us */ 651 u_int ioerror : 1; /* abort on io-error */ 652 u_int unusedA : 29; 653 654 /* 655 * These can only be modified by the frontend while holding 656 * fs_token, or by the backend while holding the io interlocked 657 * with no references (which will block the frontend when it 658 * tries to reference it). 659 * 660 * WARNING! SMP RACES will create havoc if the callbacks ever tried 661 * to modify any of these outside the above restrictions. 662 */ 663 u_int modified : 1; /* bp's data was modified */ 664 u_int released : 1; /* bp released (w/ B_LOCKED set) */ 665 u_int validated : 1; /* ondisk has been validated */ 666 u_int waitdep : 1; /* flush waits for dependancies */ 667 u_int recovered : 1; /* has recovery ref */ 668 u_int waitmod : 1; /* waiting for modify_refs */ 669 u_int reclaim : 1; /* reclaim requested */ 670 u_int gencrc : 1; /* crc needs to be generated */ 671 u_int unusedB : 24; 672 }; 673 674 typedef struct hammer_io *hammer_io_t; 675 676 #define HAMMER_CLUSTER_SIZE (64 * 1024) 677 #if HAMMER_CLUSTER_SIZE > MAXBSIZE 678 #undef HAMMER_CLUSTER_SIZE 679 #define HAMMER_CLUSTER_SIZE MAXBSIZE 680 #endif 681 #define HAMMER_CLUSTER_BUFS (HAMMER_CLUSTER_SIZE / HAMMER_BUFSIZE) 682 683 /* 684 * In-memory volume representing on-disk buffer 685 */ 686 struct hammer_volume { 687 struct hammer_io io; 688 RB_ENTRY(hammer_volume) rb_node; 689 struct hammer_volume_ondisk *ondisk; 690 int32_t vol_no; 691 int64_t nblocks; /* note: special calculation for statfs */ 692 int64_t buffer_base; /* base offset of buffer 0 */ 693 hammer_off_t maxbuf_off; /* Maximum buffer offset (zone-2) */ 694 hammer_off_t maxraw_off; /* Maximum raw offset for device */ 695 char *vol_name; 696 struct vnode *devvp; 697 int vol_flags; 698 }; 699 700 typedef struct hammer_volume *hammer_volume_t; 701 702 /* 703 * In-memory buffer (other then volume, super-cluster, or cluster), 704 * representing an on-disk buffer. 705 */ 706 struct hammer_buffer { 707 struct hammer_io io; 708 RB_ENTRY(hammer_buffer) rb_node; 709 void *ondisk; 710 hammer_off_t zoneX_offset; 711 hammer_off_t zone2_offset; 712 struct hammer_reserve *resv; 713 struct hammer_node_list clist; 714 }; 715 716 typedef struct hammer_buffer *hammer_buffer_t; 717 718 /* 719 * In-memory B-Tree node, representing an on-disk B-Tree node. 720 * 721 * This is a hang-on structure which is backed by a hammer_buffer, 722 * indexed by a hammer_cluster, and used for fine-grained locking of 723 * B-Tree nodes in order to properly control lock ordering. A hammer_buffer 724 * can contain multiple nodes representing wildly disassociated portions 725 * of the B-Tree so locking cannot be done on a buffer-by-buffer basis. 726 * 727 * This structure uses a cluster-relative index to reduce the number 728 * of layers required to access it, and also because all on-disk B-Tree 729 * references are cluster-relative offsets. 730 */ 731 struct hammer_node { 732 struct hammer_lock lock; /* node-by-node lock */ 733 TAILQ_ENTRY(hammer_node) entry; /* per-buffer linkage */ 734 RB_ENTRY(hammer_node) rb_node; /* per-cluster linkage */ 735 hammer_off_t node_offset; /* full offset spec */ 736 struct hammer_mount *hmp; 737 struct hammer_buffer *buffer; /* backing buffer */ 738 hammer_node_ondisk_t ondisk; /* ptr to on-disk structure */ 739 TAILQ_HEAD(, hammer_cursor) cursor_list; /* deadlock recovery */ 740 struct hammer_node_cache_list cache_list; /* passive caches */ 741 int flags; 742 }; 743 744 #define HAMMER_NODE_DELETED 0x0001 745 #define HAMMER_NODE_FLUSH 0x0002 746 #define HAMMER_NODE_CRCGOOD 0x0004 747 #define HAMMER_NODE_NEEDSCRC 0x0008 748 #define HAMMER_NODE_NEEDSMIRROR 0x0010 749 #define HAMMER_NODE_CRCBAD 0x0020 750 #define HAMMER_NODE_NONLINEAR 0x0040 /* linear heuristic */ 751 752 #define HAMMER_NODE_CRCANY (HAMMER_NODE_CRCGOOD | HAMMER_NODE_CRCBAD) 753 754 typedef struct hammer_node *hammer_node_t; 755 756 /* 757 * List of locked nodes. This structure is used to lock potentially large 758 * numbers of nodes as an aid for complex B-Tree operations. 759 */ 760 struct hammer_node_lock; 761 TAILQ_HEAD(hammer_node_lock_list, hammer_node_lock); 762 763 struct hammer_node_lock { 764 TAILQ_ENTRY(hammer_node_lock) entry; 765 struct hammer_node_lock_list list; 766 struct hammer_node_lock *parent; 767 hammer_node_t node; 768 hammer_node_ondisk_t copy; /* copy of on-disk data */ 769 int index; /* index of this node in parent */ 770 int count; /* count children */ 771 int flags; 772 }; 773 774 typedef struct hammer_node_lock *hammer_node_lock_t; 775 776 #define HAMMER_NODE_LOCK_UPDATED 0x0001 777 #define HAMMER_NODE_LOCK_LCACHE 0x0002 778 779 /* 780 * Common I/O management structure - embedded in in-memory structures 781 * which are backed by filesystem buffers. 782 */ 783 union hammer_io_structure { 784 struct hammer_io io; 785 struct hammer_volume volume; 786 struct hammer_buffer buffer; 787 }; 788 789 typedef union hammer_io_structure *hammer_io_structure_t; 790 791 /* 792 * The reserve structure prevents the blockmap from allocating 793 * out of a reserved bigblock. Such reservations are used by 794 * the direct-write mechanism. 795 * 796 * The structure is also used to hold off on reallocations of 797 * big blocks from the freemap until flush dependancies have 798 * been dealt with. 799 */ 800 struct hammer_reserve { 801 RB_ENTRY(hammer_reserve) rb_node; 802 TAILQ_ENTRY(hammer_reserve) delay_entry; 803 int flush_group; 804 int flags; 805 int refs; 806 int zone; 807 int append_off; 808 int32_t bytes_free; 809 hammer_off_t zone_offset; 810 }; 811 812 typedef struct hammer_reserve *hammer_reserve_t; 813 814 #define HAMMER_RESF_ONDELAY 0x0001 815 #define HAMMER_RESF_LAYER2FREE 0x0002 816 817 #include "hammer_cursor.h" 818 819 /* 820 * The undo structure tracks recent undos to avoid laying down duplicate 821 * undos within a flush group, saving us a significant amount of overhead. 822 * 823 * This is strictly a heuristic. 824 */ 825 #define HAMMER_MAX_UNDOS 1024 826 #define HAMMER_MAX_FLUSHERS 4 827 828 struct hammer_undo { 829 RB_ENTRY(hammer_undo) rb_node; 830 TAILQ_ENTRY(hammer_undo) lru_entry; 831 hammer_off_t offset; 832 int bytes; 833 }; 834 835 typedef struct hammer_undo *hammer_undo_t; 836 837 struct hammer_flusher_info; 838 TAILQ_HEAD(hammer_flusher_info_list, hammer_flusher_info); 839 840 struct hammer_flusher { 841 int signal; /* flusher thread sequencer */ 842 int act; /* currently active flush group */ 843 int done; /* set to act when complete */ 844 int next; /* next unallocated flg seqno */ 845 int group_lock; /* lock sequencing of the next flush */ 846 int exiting; /* request master exit */ 847 thread_t td; /* master flusher thread */ 848 hammer_tid_t tid; /* last flushed transaction id */ 849 int finalize_want; /* serialize finalization */ 850 struct hammer_lock finalize_lock; /* serialize finalization */ 851 struct hammer_transaction trans; /* shared transaction */ 852 struct hammer_flusher_info_list run_list; 853 struct hammer_flusher_info_list ready_list; 854 }; 855 856 #define HAMMER_FLUSH_UNDOS_RELAXED 0 857 #define HAMMER_FLUSH_UNDOS_FORCED 1 858 #define HAMMER_FLUSH_UNDOS_AUTO 2 859 /* 860 * Internal hammer mount data structure 861 */ 862 struct hammer_mount { 863 struct mount *mp; 864 /*struct vnode *rootvp;*/ 865 struct hammer_ino_rb_tree rb_inos_root; 866 struct hammer_redo_rb_tree rb_redo_root; 867 struct hammer_vol_rb_tree rb_vols_root; 868 struct hammer_nod_rb_tree rb_nods_root; 869 struct hammer_und_rb_tree rb_undo_root; 870 struct hammer_res_rb_tree rb_resv_root; 871 struct hammer_buf_rb_tree rb_bufs_root; 872 struct hammer_pfs_rb_tree rb_pfsm_root; 873 874 struct hammer_dedup_crc_rb_tree rb_dedup_crc_root; 875 struct hammer_dedup_off_rb_tree rb_dedup_off_root; 876 877 struct hammer_volume *rootvol; 878 struct hammer_base_elm root_btree_beg; 879 struct hammer_base_elm root_btree_end; 880 881 struct malloc_type *m_misc; 882 struct malloc_type *m_inodes; 883 884 int flags; /* HAMMER_MOUNT_xxx flags */ 885 int hflags; 886 int ronly; 887 int nvolumes; 888 int volume_iterator; 889 int master_id; /* -1 or 0-15 - clustering and mirroring */ 890 int version; /* hammer filesystem version to use */ 891 int rsv_inodes; /* reserved space due to dirty inodes */ 892 int64_t rsv_databytes; /* reserved space due to record data */ 893 int rsv_recs; /* reserved space due to dirty records */ 894 int rsv_fromdelay; /* bigblocks reserved due to flush delay */ 895 int undo_rec_limit; /* based on size of undo area */ 896 int last_newrecords; 897 int count_newrecords; 898 899 int volume_to_remove; /* volume that is currently being removed */ 900 901 int inode_reclaims; /* inodes pending reclaim by flusher */ 902 int count_inodes; /* total number of inodes */ 903 int count_iqueued; /* inodes queued to flusher */ 904 905 struct hammer_flusher flusher; 906 907 u_int check_interrupt; 908 u_int check_yield; 909 uuid_t fsid; 910 struct hammer_mod_rb_tree volu_root; /* dirty undo buffers */ 911 struct hammer_mod_rb_tree undo_root; /* dirty undo buffers */ 912 struct hammer_mod_rb_tree data_root; /* dirty data buffers */ 913 struct hammer_mod_rb_tree meta_root; /* dirty meta bufs */ 914 struct hammer_mod_rb_tree lose_root; /* loose buffers */ 915 int locked_dirty_space; /* meta/volu count */ 916 int io_running_space; /* io_token */ 917 int io_running_wakeup; /* io_token */ 918 int objid_cache_count; 919 int dedup_cache_count; 920 int error; /* critical I/O error */ 921 struct krate krate; /* rate limited kprintf */ 922 hammer_tid_t asof; /* snapshot mount */ 923 hammer_tid_t next_tid; 924 hammer_tid_t flush_tid1; /* flusher tid sequencing */ 925 hammer_tid_t flush_tid2; /* flusher tid sequencing */ 926 int64_t copy_stat_freebigblocks; /* number of free bigblocks */ 927 u_int32_t undo_seqno; /* UNDO/REDO FIFO seqno */ 928 u_int32_t recover_stage2_seqno; /* REDO recovery seqno */ 929 hammer_off_t recover_stage2_offset; /* REDO recovery offset */ 930 931 struct netexport export; 932 struct hammer_lock sync_lock; 933 struct hammer_lock free_lock; 934 struct hammer_lock undo_lock; 935 struct hammer_lock blkmap_lock; 936 struct hammer_lock snapshot_lock; 937 struct hammer_lock volume_lock; 938 struct hammer_blockmap blockmap[HAMMER_MAX_ZONES]; 939 struct hammer_undo undos[HAMMER_MAX_UNDOS]; 940 int undo_alloc; 941 TAILQ_HEAD(, hammer_undo) undo_lru_list; 942 TAILQ_HEAD(, hammer_reserve) delay_list; 943 struct hammer_flush_group_list flush_group_list; 944 hammer_flush_group_t fill_flush_group; 945 hammer_flush_group_t next_flush_group; 946 TAILQ_HEAD(, hammer_objid_cache) objid_cache_list; 947 TAILQ_HEAD(, hammer_dedup_cache) dedup_lru_list; 948 hammer_dedup_cache_t dedup_free_cache; 949 TAILQ_HEAD(, hammer_reclaim) reclaim_list; 950 TAILQ_HEAD(, hammer_io) iorun_list; 951 952 struct lwkt_token fs_token; /* high level */ 953 struct lwkt_token io_token; /* low level (IO callback) */ 954 955 struct hammer_inostats inostats[HAMMER_INOSTATS_HSIZE]; 956 }; 957 958 typedef struct hammer_mount *hammer_mount_t; 959 960 #define HAMMER_MOUNT_CRITICAL_ERROR 0x0001 961 #define HAMMER_MOUNT_FLUSH_RECOVERY 0x0002 962 #define HAMMER_MOUNT_REDO_SYNC 0x0004 963 #define HAMMER_MOUNT_REDO_RECOVERY_REQ 0x0008 964 #define HAMMER_MOUNT_REDO_RECOVERY_RUN 0x0010 965 966 struct hammer_sync_info { 967 int error; 968 int waitfor; 969 }; 970 971 /* 972 * Minium buffer cache bufs required to rebalance the B-Tree. 973 * This is because we must hold the children and the children's children 974 * locked. Even this might not be enough if things are horribly out 975 * of balance. 976 */ 977 #define HAMMER_REBALANCE_MIN_BUFS \ 978 (HAMMER_BTREE_LEAF_ELMS * HAMMER_BTREE_LEAF_ELMS) 979 980 981 #endif 982 983 /* 984 * checkspace slop (8MB chunks), higher numbers are more conservative. 985 */ 986 #define HAMMER_CHKSPC_REBLOCK 25 987 #define HAMMER_CHKSPC_MIRROR 20 988 #define HAMMER_CHKSPC_WRITE 20 989 #define HAMMER_CHKSPC_CREATE 20 990 #define HAMMER_CHKSPC_REMOVE 10 991 #define HAMMER_CHKSPC_EMERGENCY 0 992 993 #if defined(_KERNEL) 994 995 extern struct vop_ops hammer_vnode_vops; 996 extern struct vop_ops hammer_spec_vops; 997 extern struct vop_ops hammer_fifo_vops; 998 extern struct bio_ops hammer_bioops; 999 1000 extern int hammer_debug_io; 1001 extern int hammer_debug_general; 1002 extern int hammer_debug_debug; 1003 extern int hammer_debug_inode; 1004 extern int hammer_debug_locks; 1005 extern int hammer_debug_btree; 1006 extern int hammer_debug_tid; 1007 extern int hammer_debug_recover; 1008 extern int hammer_debug_recover_faults; 1009 extern int hammer_debug_critical; 1010 extern int hammer_cluster_enable; 1011 extern int hammer_live_dedup; 1012 extern int hammer_count_fsyncs; 1013 extern int hammer_count_inodes; 1014 extern int hammer_count_iqueued; 1015 extern int hammer_count_reclaiming; 1016 extern int hammer_count_records; 1017 extern int hammer_count_record_datas; 1018 extern int hammer_count_volumes; 1019 extern int hammer_count_buffers; 1020 extern int hammer_count_nodes; 1021 extern int64_t hammer_count_extra_space_used; 1022 extern int64_t hammer_stats_btree_lookups; 1023 extern int64_t hammer_stats_btree_searches; 1024 extern int64_t hammer_stats_btree_inserts; 1025 extern int64_t hammer_stats_btree_deletes; 1026 extern int64_t hammer_stats_btree_elements; 1027 extern int64_t hammer_stats_btree_splits; 1028 extern int64_t hammer_stats_btree_iterations; 1029 extern int64_t hammer_stats_btree_root_iterations; 1030 extern int64_t hammer_stats_record_iterations; 1031 extern int64_t hammer_stats_file_read; 1032 extern int64_t hammer_stats_file_write; 1033 extern int64_t hammer_stats_file_iopsr; 1034 extern int64_t hammer_stats_file_iopsw; 1035 extern int64_t hammer_stats_disk_read; 1036 extern int64_t hammer_stats_disk_write; 1037 extern int64_t hammer_stats_inode_flushes; 1038 extern int64_t hammer_stats_commits; 1039 extern int64_t hammer_stats_undo; 1040 extern int64_t hammer_stats_redo; 1041 extern int hammer_count_dirtybufspace; 1042 extern int hammer_count_refedbufs; 1043 extern int hammer_count_reservations; 1044 extern int hammer_count_io_running_read; 1045 extern int hammer_count_io_running_write; 1046 extern int hammer_count_io_locked; 1047 extern int hammer_limit_dirtybufspace; 1048 extern int hammer_limit_running_io; 1049 extern int hammer_limit_recs; 1050 extern int hammer_limit_inode_recs; 1051 extern int hammer_limit_reclaim; 1052 extern int hammer_live_dedup_cache_size; 1053 extern int hammer_limit_redo; 1054 extern int hammer_bio_count; 1055 extern int hammer_verify_zone; 1056 extern int hammer_verify_data; 1057 extern int hammer_write_mode; 1058 extern int hammer_double_buffer; 1059 extern int hammer_yield_check; 1060 extern int hammer_fsync_mode; 1061 extern int hammer_autoflush; 1062 extern int64_t hammer_contention_count; 1063 1064 extern int64_t hammer_live_dedup_vnode_bcmps; 1065 extern int64_t hammer_live_dedup_device_bcmps; 1066 extern int64_t hammer_live_dedup_findblk_failures; 1067 extern int64_t hammer_live_dedup_bmap_saves; 1068 1069 void hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip, 1070 int error, const char *msg); 1071 int hammer_vop_inactive(struct vop_inactive_args *); 1072 int hammer_vop_reclaim(struct vop_reclaim_args *); 1073 int hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp); 1074 struct hammer_inode *hammer_get_inode(hammer_transaction_t trans, 1075 hammer_inode_t dip, int64_t obj_id, 1076 hammer_tid_t asof, u_int32_t localization, 1077 int flags, int *errorp); 1078 struct hammer_inode *hammer_get_dummy_inode(hammer_transaction_t trans, 1079 hammer_inode_t dip, int64_t obj_id, 1080 hammer_tid_t asof, u_int32_t localization, 1081 int flags, int *errorp); 1082 struct hammer_inode *hammer_find_inode(hammer_transaction_t trans, 1083 int64_t obj_id, hammer_tid_t asof, 1084 u_int32_t localization); 1085 void hammer_scan_inode_snapshots(hammer_mount_t hmp, 1086 hammer_inode_info_t iinfo, 1087 int (*callback)(hammer_inode_t ip, void *data), 1088 void *data); 1089 void hammer_put_inode(struct hammer_inode *ip); 1090 void hammer_put_inode_ref(struct hammer_inode *ip); 1091 void hammer_inode_waitreclaims(hammer_transaction_t trans); 1092 1093 int hammer_unload_volume(hammer_volume_t volume, void *data __unused); 1094 int hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused); 1095 1096 int hammer_unload_buffer(hammer_buffer_t buffer, void *data); 1097 int hammer_install_volume(hammer_mount_t hmp, const char *volname, 1098 struct vnode *devvp); 1099 int hammer_mountcheck_volumes(hammer_mount_t hmp); 1100 1101 int hammer_mem_add(hammer_record_t record); 1102 int hammer_ip_lookup(hammer_cursor_t cursor); 1103 int hammer_ip_first(hammer_cursor_t cursor); 1104 int hammer_ip_next(hammer_cursor_t cursor); 1105 int hammer_ip_resolve_data(hammer_cursor_t cursor); 1106 int hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip, 1107 hammer_tid_t tid); 1108 int hammer_create_at_cursor(hammer_cursor_t cursor, 1109 hammer_btree_leaf_elm_t leaf, void *udata, int mode); 1110 int hammer_delete_at_cursor(hammer_cursor_t cursor, int delete_flags, 1111 hammer_tid_t delete_tid, u_int32_t delete_ts, 1112 int track, int64_t *stat_bytes); 1113 int hammer_ip_check_directory_empty(hammer_transaction_t trans, 1114 hammer_inode_t ip); 1115 int hammer_sync_hmp(hammer_mount_t hmp, int waitfor); 1116 int hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor); 1117 1118 hammer_record_t 1119 hammer_alloc_mem_record(hammer_inode_t ip, int data_len); 1120 void hammer_flush_record_done(hammer_record_t record, int error); 1121 void hammer_wait_mem_record_ident(hammer_record_t record, const char *ident); 1122 void hammer_rel_mem_record(hammer_record_t record); 1123 1124 int hammer_cursor_up(hammer_cursor_t cursor); 1125 int hammer_cursor_up_locked(hammer_cursor_t cursor); 1126 int hammer_cursor_down(hammer_cursor_t cursor); 1127 int hammer_cursor_upgrade(hammer_cursor_t cursor); 1128 int hammer_cursor_upgrade_node(hammer_cursor_t cursor); 1129 void hammer_cursor_downgrade(hammer_cursor_t cursor); 1130 int hammer_cursor_upgrade2(hammer_cursor_t c1, hammer_cursor_t c2); 1131 void hammer_cursor_downgrade2(hammer_cursor_t c1, hammer_cursor_t c2); 1132 int hammer_cursor_seek(hammer_cursor_t cursor, hammer_node_t node, 1133 int index); 1134 void hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident); 1135 int hammer_lock_ex_try(struct hammer_lock *lock); 1136 void hammer_lock_sh(struct hammer_lock *lock); 1137 int hammer_lock_sh_try(struct hammer_lock *lock); 1138 int hammer_lock_upgrade(struct hammer_lock *lock, int shcount); 1139 void hammer_lock_downgrade(struct hammer_lock *lock, int shcount); 1140 int hammer_lock_status(struct hammer_lock *lock); 1141 void hammer_unlock(struct hammer_lock *lock); 1142 void hammer_ref(struct hammer_lock *lock); 1143 int hammer_ref_interlock(struct hammer_lock *lock); 1144 int hammer_ref_interlock_true(struct hammer_lock *lock); 1145 void hammer_ref_interlock_done(struct hammer_lock *lock); 1146 void hammer_rel(struct hammer_lock *lock); 1147 int hammer_rel_interlock(struct hammer_lock *lock, int locked); 1148 void hammer_rel_interlock_done(struct hammer_lock *lock, int orig_locked); 1149 int hammer_get_interlock(struct hammer_lock *lock); 1150 int hammer_try_interlock_norefs(struct hammer_lock *lock); 1151 void hammer_put_interlock(struct hammer_lock *lock, int error); 1152 1153 void hammer_sync_lock_ex(hammer_transaction_t trans); 1154 void hammer_sync_lock_sh(hammer_transaction_t trans); 1155 int hammer_sync_lock_sh_try(hammer_transaction_t trans); 1156 void hammer_sync_unlock(hammer_transaction_t trans); 1157 1158 u_int32_t hammer_to_unix_xid(uuid_t *uuid); 1159 void hammer_guid_to_uuid(uuid_t *uuid, u_int32_t guid); 1160 void hammer_time_to_timespec(u_int64_t xtime, struct timespec *ts); 1161 u_int64_t hammer_timespec_to_time(struct timespec *ts); 1162 int hammer_str_to_tid(const char *str, int *ispfsp, 1163 hammer_tid_t *tidp, u_int32_t *localizationp); 1164 int hammer_is_atatext(const char *name, int len); 1165 hammer_tid_t hammer_alloc_objid(hammer_mount_t hmp, hammer_inode_t dip, 1166 int64_t namekey); 1167 void hammer_clear_objid(hammer_inode_t dip); 1168 void hammer_destroy_objid_cache(hammer_mount_t hmp); 1169 1170 int hammer_dedup_crc_rb_compare(hammer_dedup_cache_t dc1, 1171 hammer_dedup_cache_t dc2); 1172 int hammer_dedup_off_rb_compare(hammer_dedup_cache_t dc1, 1173 hammer_dedup_cache_t dc2); 1174 hammer_dedup_cache_t hammer_dedup_cache_add(hammer_inode_t ip, 1175 hammer_btree_leaf_elm_t leaf); 1176 hammer_dedup_cache_t hammer_dedup_cache_lookup(hammer_mount_t hmp, 1177 hammer_crc_t crc); 1178 void hammer_dedup_cache_inval(hammer_mount_t hmp, hammer_off_t base_offset); 1179 void hammer_destroy_dedup_cache(hammer_mount_t hmp); 1180 void hammer_dump_dedup_cache(hammer_mount_t hmp); 1181 int hammer_dedup_validate(hammer_dedup_cache_t dcp, int zone, int bytes, 1182 void *data); 1183 1184 int hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset, 1185 int bytes); 1186 void hammer_clear_undo_history(hammer_mount_t hmp); 1187 enum vtype hammer_get_vnode_type(u_int8_t obj_type); 1188 int hammer_get_dtype(u_int8_t obj_type); 1189 u_int8_t hammer_get_obj_type(enum vtype vtype); 1190 int64_t hammer_directory_namekey(hammer_inode_t dip, const void *name, int len, 1191 u_int32_t *max_iterationsp); 1192 int hammer_nohistory(hammer_inode_t ip); 1193 1194 int hammer_init_cursor(hammer_transaction_t trans, hammer_cursor_t cursor, 1195 hammer_node_cache_t cache, hammer_inode_t ip); 1196 void hammer_normalize_cursor(hammer_cursor_t cursor); 1197 void hammer_done_cursor(hammer_cursor_t cursor); 1198 int hammer_recover_cursor(hammer_cursor_t cursor); 1199 void hammer_unlock_cursor(hammer_cursor_t cursor); 1200 int hammer_lock_cursor(hammer_cursor_t cursor); 1201 hammer_cursor_t hammer_push_cursor(hammer_cursor_t ocursor); 1202 void hammer_pop_cursor(hammer_cursor_t ocursor, hammer_cursor_t ncursor); 1203 1204 void hammer_cursor_replaced_node(hammer_node_t onode, hammer_node_t nnode); 1205 void hammer_cursor_removed_node(hammer_node_t onode, hammer_node_t parent, 1206 int index); 1207 void hammer_cursor_split_node(hammer_node_t onode, hammer_node_t nnode, 1208 int index); 1209 void hammer_cursor_moved_element(hammer_node_t oparent, int pindex, 1210 hammer_node_t onode, int oindex, 1211 hammer_node_t nnode, int nindex); 1212 void hammer_cursor_parent_changed(hammer_node_t node, hammer_node_t oparent, 1213 hammer_node_t nparent, int nindex); 1214 void hammer_cursor_inserted_element(hammer_node_t node, int index); 1215 void hammer_cursor_deleted_element(hammer_node_t node, int index); 1216 void hammer_cursor_invalidate_cache(hammer_cursor_t cursor); 1217 1218 int hammer_btree_lookup(hammer_cursor_t cursor); 1219 int hammer_btree_first(hammer_cursor_t cursor); 1220 int hammer_btree_last(hammer_cursor_t cursor); 1221 int hammer_btree_extract(hammer_cursor_t cursor, int flags); 1222 int hammer_btree_iterate(hammer_cursor_t cursor); 1223 int hammer_btree_iterate_reverse(hammer_cursor_t cursor); 1224 int hammer_btree_insert(hammer_cursor_t cursor, 1225 hammer_btree_leaf_elm_t elm, int *doprop); 1226 int hammer_btree_delete(hammer_cursor_t cursor); 1227 void hammer_btree_do_propagation(hammer_cursor_t cursor, 1228 hammer_pseudofs_inmem_t pfsm, 1229 hammer_btree_leaf_elm_t leaf); 1230 int hammer_btree_cmp(hammer_base_elm_t key1, hammer_base_elm_t key2); 1231 int hammer_btree_chkts(hammer_tid_t ts, hammer_base_elm_t key); 1232 int hammer_btree_correct_rhb(hammer_cursor_t cursor, hammer_tid_t tid); 1233 int hammer_btree_correct_lhb(hammer_cursor_t cursor, hammer_tid_t tid); 1234 1235 int btree_set_parent(hammer_transaction_t trans, hammer_node_t node, 1236 hammer_btree_elm_t elm); 1237 void hammer_node_lock_init(hammer_node_lock_t parent, hammer_node_t node); 1238 void hammer_btree_lcache_init(hammer_mount_t hmp, hammer_node_lock_t lcache, 1239 int depth); 1240 void hammer_btree_lcache_free(hammer_mount_t hmp, hammer_node_lock_t lcache); 1241 int hammer_btree_lock_children(hammer_cursor_t cursor, int depth, 1242 hammer_node_lock_t parent, 1243 hammer_node_lock_t lcache); 1244 void hammer_btree_lock_copy(hammer_cursor_t cursor, 1245 hammer_node_lock_t parent); 1246 int hammer_btree_sync_copy(hammer_cursor_t cursor, 1247 hammer_node_lock_t parent); 1248 void hammer_btree_unlock_children(hammer_mount_t hmp, 1249 hammer_node_lock_t parent, 1250 hammer_node_lock_t lcache); 1251 int hammer_btree_search_node(hammer_base_elm_t elm, hammer_node_ondisk_t node); 1252 hammer_node_t hammer_btree_get_parent(hammer_transaction_t trans, 1253 hammer_node_t node, int *parent_indexp, 1254 int *errorp, int try_exclusive); 1255 1256 void hammer_print_btree_node(hammer_node_ondisk_t ondisk); 1257 void hammer_print_btree_elm(hammer_btree_elm_t elm, u_int8_t type, int i); 1258 1259 void *hammer_bread(struct hammer_mount *hmp, hammer_off_t off, 1260 int *errorp, struct hammer_buffer **bufferp); 1261 void *hammer_bnew(struct hammer_mount *hmp, hammer_off_t off, 1262 int *errorp, struct hammer_buffer **bufferp); 1263 void *hammer_bread_ext(struct hammer_mount *hmp, hammer_off_t off, int bytes, 1264 int *errorp, struct hammer_buffer **bufferp); 1265 void *hammer_bnew_ext(struct hammer_mount *hmp, hammer_off_t off, int bytes, 1266 int *errorp, struct hammer_buffer **bufferp); 1267 1268 hammer_volume_t hammer_get_root_volume(hammer_mount_t hmp, int *errorp); 1269 1270 hammer_volume_t hammer_get_volume(hammer_mount_t hmp, 1271 int32_t vol_no, int *errorp); 1272 hammer_buffer_t hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset, 1273 int bytes, int isnew, int *errorp); 1274 void hammer_sync_buffers(hammer_mount_t hmp, 1275 hammer_off_t base_offset, int bytes); 1276 int hammer_del_buffers(hammer_mount_t hmp, 1277 hammer_off_t base_offset, 1278 hammer_off_t zone2_offset, int bytes, 1279 int report_conflicts); 1280 1281 int hammer_ref_volume(hammer_volume_t volume); 1282 int hammer_ref_buffer(hammer_buffer_t buffer); 1283 void hammer_flush_buffer_nodes(hammer_buffer_t buffer); 1284 1285 void hammer_rel_volume(hammer_volume_t volume, int locked); 1286 void hammer_rel_buffer(hammer_buffer_t buffer, int locked); 1287 1288 int hammer_vfs_export(struct mount *mp, int op, 1289 const struct export_args *export); 1290 hammer_node_t hammer_get_node(hammer_transaction_t trans, 1291 hammer_off_t node_offset, int isnew, int *errorp); 1292 void hammer_ref_node(hammer_node_t node); 1293 hammer_node_t hammer_ref_node_safe(hammer_transaction_t trans, 1294 hammer_node_cache_t cache, int *errorp); 1295 void hammer_rel_node(hammer_node_t node); 1296 void hammer_delete_node(hammer_transaction_t trans, 1297 hammer_node_t node); 1298 void hammer_cache_node(hammer_node_cache_t cache, 1299 hammer_node_t node); 1300 void hammer_uncache_node(hammer_node_cache_t cache); 1301 void hammer_flush_node(hammer_node_t node, int locked); 1302 1303 void hammer_dup_buffer(struct hammer_buffer **bufferp, 1304 struct hammer_buffer *buffer); 1305 hammer_node_t hammer_alloc_btree(hammer_transaction_t trans, 1306 hammer_off_t hint, int *errorp); 1307 void *hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, 1308 u_int16_t rec_type, hammer_off_t *data_offsetp, 1309 struct hammer_buffer **data_bufferp, 1310 hammer_off_t hint, int *errorp); 1311 1312 int hammer_generate_undo(hammer_transaction_t trans, 1313 hammer_off_t zone1_offset, void *base, int len); 1314 int hammer_generate_redo(hammer_transaction_t trans, hammer_inode_t ip, 1315 hammer_off_t file_offset, u_int32_t flags, 1316 void *base, int len); 1317 void hammer_generate_redo_sync(hammer_transaction_t trans); 1318 void hammer_redo_fifo_start_flush(hammer_inode_t ip); 1319 void hammer_redo_fifo_end_flush(hammer_inode_t ip); 1320 1321 void hammer_format_undo(void *base, u_int32_t seqno); 1322 int hammer_upgrade_undo_4(hammer_transaction_t trans); 1323 1324 void hammer_put_volume(struct hammer_volume *volume, int flush); 1325 void hammer_put_buffer(struct hammer_buffer *buffer, int flush); 1326 1327 hammer_off_t hammer_freemap_alloc(hammer_transaction_t trans, 1328 hammer_off_t owner, int *errorp); 1329 void hammer_freemap_free(hammer_transaction_t trans, hammer_off_t phys_offset, 1330 hammer_off_t owner, int *errorp); 1331 int _hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp); 1332 hammer_off_t hammer_blockmap_alloc(hammer_transaction_t trans, int zone, 1333 int bytes, hammer_off_t hint, int *errorp); 1334 hammer_reserve_t hammer_blockmap_reserve(hammer_mount_t hmp, int zone, 1335 int bytes, hammer_off_t *zone_offp, int *errorp); 1336 hammer_reserve_t hammer_blockmap_reserve_dedup(hammer_mount_t hmp, int zone, 1337 int bytes, hammer_off_t zone_offset, int *errorp); 1338 void hammer_blockmap_reserve_complete(hammer_mount_t hmp, 1339 hammer_reserve_t resv); 1340 void hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv); 1341 void hammer_blockmap_free(hammer_transaction_t trans, 1342 hammer_off_t bmap_off, int bytes); 1343 int hammer_blockmap_dedup(hammer_transaction_t trans, 1344 hammer_off_t bmap_off, int bytes); 1345 int hammer_blockmap_finalize(hammer_transaction_t trans, 1346 hammer_reserve_t resv, 1347 hammer_off_t bmap_off, int bytes); 1348 int hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t bmap_off, 1349 int *curp, int *errorp); 1350 hammer_off_t hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t bmap_off, 1351 int *errorp); 1352 hammer_off_t hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t bmap_off, 1353 int *errorp); 1354 int64_t hammer_undo_used(hammer_transaction_t trans); 1355 int64_t hammer_undo_space(hammer_transaction_t trans); 1356 int64_t hammer_undo_max(hammer_mount_t hmp); 1357 int hammer_undo_reclaim(hammer_io_t io); 1358 1359 void hammer_start_transaction(struct hammer_transaction *trans, 1360 struct hammer_mount *hmp); 1361 void hammer_simple_transaction(struct hammer_transaction *trans, 1362 struct hammer_mount *hmp); 1363 void hammer_start_transaction_fls(struct hammer_transaction *trans, 1364 struct hammer_mount *hmp); 1365 void hammer_done_transaction(struct hammer_transaction *trans); 1366 hammer_tid_t hammer_alloc_tid(hammer_mount_t hmp, int count); 1367 1368 void hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags); 1369 void hammer_flush_inode(hammer_inode_t ip, int flags); 1370 void hammer_flush_inode_done(hammer_inode_t ip, int error); 1371 void hammer_wait_inode(hammer_inode_t ip); 1372 1373 int hammer_create_inode(struct hammer_transaction *trans, struct vattr *vap, 1374 struct ucred *cred, struct hammer_inode *dip, 1375 const char *name, int namelen, 1376 hammer_pseudofs_inmem_t pfsm, 1377 struct hammer_inode **ipp); 1378 void hammer_rel_inode(hammer_inode_t ip, int flush); 1379 int hammer_reload_inode(hammer_inode_t ip, void *arg __unused); 1380 int hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2); 1381 int hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2); 1382 int hammer_destroy_inode_callback(hammer_inode_t ip, void *data __unused); 1383 1384 int hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip); 1385 void hammer_test_inode(hammer_inode_t dip); 1386 void hammer_inode_unloadable_check(hammer_inode_t ip, int getvp); 1387 1388 int hammer_ip_add_directory(struct hammer_transaction *trans, 1389 hammer_inode_t dip, const char *name, int bytes, 1390 hammer_inode_t nip); 1391 int hammer_ip_del_directory(struct hammer_transaction *trans, 1392 hammer_cursor_t cursor, hammer_inode_t dip, 1393 hammer_inode_t ip); 1394 void hammer_ip_replace_bulk(hammer_mount_t hmp, hammer_record_t record); 1395 hammer_record_t hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, 1396 void *data, int bytes, int *errorp); 1397 int hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size); 1398 int hammer_ip_add_record(struct hammer_transaction *trans, 1399 hammer_record_t record); 1400 int hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip, 1401 int64_t ran_beg, int64_t ran_end, int truncating); 1402 int hammer_ip_delete_clean(hammer_cursor_t cursor, hammer_inode_t ip, 1403 int *countp); 1404 int hammer_ip_sync_data(hammer_cursor_t cursor, hammer_inode_t ip, 1405 int64_t offset, void *data, int bytes); 1406 int hammer_ip_sync_record(hammer_transaction_t trans, hammer_record_t rec); 1407 int hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t rec); 1408 hammer_pseudofs_inmem_t hammer_load_pseudofs(hammer_transaction_t trans, 1409 u_int32_t localization, int *errorp); 1410 int hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred, 1411 hammer_pseudofs_inmem_t pfsm); 1412 int hammer_save_pseudofs(hammer_transaction_t trans, 1413 hammer_pseudofs_inmem_t pfsm); 1414 int hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization); 1415 void hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm); 1416 int hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag, 1417 struct ucred *cred); 1418 1419 void hammer_io_init(hammer_io_t io, hammer_volume_t volume, 1420 enum hammer_io_type type); 1421 int hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit); 1422 void hammer_io_advance(struct hammer_io *io); 1423 int hammer_io_new(struct vnode *devvp, struct hammer_io *io); 1424 int hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset); 1425 struct buf *hammer_io_release(struct hammer_io *io, int flush); 1426 void hammer_io_flush(struct hammer_io *io, int reclaim); 1427 void hammer_io_wait(struct hammer_io *io); 1428 void hammer_io_waitdep(struct hammer_io *io); 1429 void hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush); 1430 int hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio, 1431 hammer_btree_leaf_elm_t leaf); 1432 int hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio, 1433 hammer_record_t record); 1434 void hammer_io_direct_wait(hammer_record_t record); 1435 void hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf); 1436 void hammer_io_write_interlock(hammer_io_t io); 1437 void hammer_io_done_interlock(hammer_io_t io); 1438 void hammer_io_clear_modify(struct hammer_io *io, int inval); 1439 void hammer_io_clear_modlist(struct hammer_io *io); 1440 void hammer_io_flush_sync(hammer_mount_t hmp); 1441 void hammer_io_clear_error(struct hammer_io *io); 1442 void hammer_io_clear_error_noassert(struct hammer_io *io); 1443 void hammer_io_notmeta(hammer_buffer_t buffer); 1444 void hammer_io_limit_backlog(hammer_mount_t hmp); 1445 1446 void hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume, 1447 void *base, int len); 1448 void hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer, 1449 void *base, int len); 1450 void hammer_modify_volume_done(hammer_volume_t volume); 1451 void hammer_modify_buffer_done(hammer_buffer_t buffer); 1452 1453 int hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip, 1454 struct hammer_ioc_reblock *reblock); 1455 int hammer_ioc_rebalance(hammer_transaction_t trans, hammer_inode_t ip, 1456 struct hammer_ioc_rebalance *rebal); 1457 int hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip, 1458 struct hammer_ioc_prune *prune); 1459 int hammer_ioc_mirror_read(hammer_transaction_t trans, hammer_inode_t ip, 1460 struct hammer_ioc_mirror_rw *mirror); 1461 int hammer_ioc_mirror_write(hammer_transaction_t trans, hammer_inode_t ip, 1462 struct hammer_ioc_mirror_rw *mirror); 1463 int hammer_ioc_set_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, 1464 struct ucred *cred, struct hammer_ioc_pseudofs_rw *pfs); 1465 int hammer_ioc_get_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, 1466 struct hammer_ioc_pseudofs_rw *pfs); 1467 int hammer_ioc_destroy_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, 1468 struct hammer_ioc_pseudofs_rw *pfs); 1469 int hammer_ioc_downgrade_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, 1470 struct hammer_ioc_pseudofs_rw *pfs); 1471 int hammer_ioc_upgrade_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, 1472 struct hammer_ioc_pseudofs_rw *pfs); 1473 int hammer_ioc_wait_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, 1474 struct hammer_ioc_pseudofs_rw *pfs); 1475 int hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip, 1476 struct hammer_ioc_volume *ioc); 1477 int hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip, 1478 struct hammer_ioc_volume *ioc); 1479 int hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip, 1480 struct hammer_ioc_volume_list *ioc); 1481 int hammer_ioc_dedup(hammer_transaction_t trans, hammer_inode_t ip, 1482 struct hammer_ioc_dedup *dedup); 1483 1484 int hammer_signal_check(hammer_mount_t hmp); 1485 1486 void hammer_flusher_create(hammer_mount_t hmp); 1487 void hammer_flusher_destroy(hammer_mount_t hmp); 1488 void hammer_flusher_sync(hammer_mount_t hmp); 1489 int hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t flg); 1490 int hammer_flusher_async_one(hammer_mount_t hmp); 1491 void hammer_flusher_wait(hammer_mount_t hmp, int seq); 1492 void hammer_flusher_wait_next(hammer_mount_t hmp); 1493 int hammer_flusher_meta_limit(hammer_mount_t hmp); 1494 int hammer_flusher_meta_halflimit(hammer_mount_t hmp); 1495 int hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter); 1496 void hammer_flusher_clean_loose_ios(hammer_mount_t hmp); 1497 void hammer_flusher_finalize(hammer_transaction_t trans, int final); 1498 int hammer_flusher_haswork(hammer_mount_t hmp); 1499 void hammer_flusher_flush_undos(hammer_mount_t hmp, int already_flushed); 1500 1501 int hammer_recover_stage1(hammer_mount_t hmp, hammer_volume_t rootvol); 1502 int hammer_recover_stage2(hammer_mount_t hmp, hammer_volume_t rootvol); 1503 void hammer_recover_flush_buffers(hammer_mount_t hmp, 1504 hammer_volume_t root_volume, int final); 1505 1506 void hammer_crc_set_blockmap(hammer_blockmap_t blockmap); 1507 void hammer_crc_set_volume(hammer_volume_ondisk_t ondisk); 1508 void hammer_crc_set_leaf(void *data, hammer_btree_leaf_elm_t leaf); 1509 1510 int hammer_crc_test_blockmap(hammer_blockmap_t blockmap); 1511 int hammer_crc_test_volume(hammer_volume_ondisk_t ondisk); 1512 int hammer_crc_test_btree(hammer_node_ondisk_t ondisk); 1513 int hammer_crc_test_leaf(void *data, hammer_btree_leaf_elm_t leaf); 1514 void hkprintf(const char *ctl, ...) __printflike(1, 2); 1515 udev_t hammer_fsid_to_udev(uuid_t *uuid); 1516 1517 1518 int hammer_blocksize(int64_t file_offset); 1519 int hammer_blockoff(int64_t file_offset); 1520 int64_t hammer_blockdemarc(int64_t file_offset1, int64_t file_offset2); 1521 1522 /* 1523 * Shortcut for _hammer_checkspace(), used all over the code. 1524 */ 1525 static __inline int 1526 hammer_checkspace(hammer_mount_t hmp, int slop) 1527 { 1528 return(_hammer_checkspace(hmp, slop, NULL)); 1529 } 1530 1531 #endif 1532 1533 static __inline void 1534 hammer_wait_mem_record(hammer_record_t record) 1535 { 1536 hammer_wait_mem_record_ident(record, "hmmwai"); 1537 } 1538 1539 static __inline void 1540 hammer_lock_ex(struct hammer_lock *lock) 1541 { 1542 hammer_lock_ex_ident(lock, "hmrlck"); 1543 } 1544 1545 /* 1546 * Indicate that a B-Tree node is being modified. 1547 */ 1548 static __inline void 1549 hammer_modify_node_noundo(hammer_transaction_t trans, hammer_node_t node) 1550 { 1551 KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0); 1552 hammer_modify_buffer(trans, node->buffer, NULL, 0); 1553 } 1554 1555 static __inline void 1556 hammer_modify_node_all(hammer_transaction_t trans, struct hammer_node *node) 1557 { 1558 KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0); 1559 hammer_modify_buffer(trans, node->buffer, 1560 node->ondisk, sizeof(*node->ondisk)); 1561 } 1562 1563 static __inline void 1564 hammer_modify_node(hammer_transaction_t trans, hammer_node_t node, 1565 void *base, int len) 1566 { 1567 hammer_crc_t *crcptr; 1568 1569 KKASSERT((char *)base >= (char *)node->ondisk && 1570 (char *)base + len <= 1571 (char *)node->ondisk + sizeof(*node->ondisk)); 1572 KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0); 1573 hammer_modify_buffer(trans, node->buffer, base, len); 1574 crcptr = &node->ondisk->crc; 1575 hammer_modify_buffer(trans, node->buffer, crcptr, sizeof(hammer_crc_t)); 1576 --node->buffer->io.modify_refs; /* only want one ref */ 1577 } 1578 1579 /* 1580 * Indicate that the specified modifications have been completed. 1581 * 1582 * Do not try to generate the crc here, it's very expensive to do and a 1583 * sequence of insertions or deletions can result in many calls to this 1584 * function on the same node. 1585 */ 1586 static __inline void 1587 hammer_modify_node_done(hammer_node_t node) 1588 { 1589 node->flags |= HAMMER_NODE_CRCGOOD; 1590 if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0) { 1591 node->flags |= HAMMER_NODE_NEEDSCRC; 1592 node->buffer->io.gencrc = 1; 1593 hammer_ref_node(node); 1594 } 1595 hammer_modify_buffer_done(node->buffer); 1596 } 1597 1598 #define hammer_modify_volume_field(trans, vol, field) \ 1599 hammer_modify_volume(trans, vol, &(vol)->ondisk->field, \ 1600 sizeof((vol)->ondisk->field)) 1601 1602 #define hammer_modify_node_field(trans, node, field) \ 1603 hammer_modify_node(trans, node, &(node)->ondisk->field, \ 1604 sizeof((node)->ondisk->field)) 1605 1606 /* 1607 * The HAMMER_INODE_CAP_DIR_LOCAL_INO capability is set on newly 1608 * created directories for HAMMER version 2 or greater and causes 1609 * directory entries to be placed the inode localization zone in 1610 * the B-Tree instead of the misc zone. 1611 * 1612 * This greatly improves localization between directory entries and 1613 * inodes 1614 */ 1615 static __inline u_int32_t 1616 hammer_dir_localization(hammer_inode_t dip) 1617 { 1618 if (dip->ino_data.cap_flags & HAMMER_INODE_CAP_DIR_LOCAL_INO) 1619 return(HAMMER_LOCALIZE_INODE); 1620 else 1621 return(HAMMER_LOCALIZE_MISC); 1622 } 1623