1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #ifndef VFS_HAMMER_HAMMER_H_ 36 #define VFS_HAMMER_HAMMER_H_ 37 38 /* 39 * This header file contains structures used internally by the HAMMERFS 40 * implementation. See hammer_disk.h for on-disk structures. 41 */ 42 43 #include <sys/param.h> 44 #ifdef _KERNEL 45 #include <sys/kernel.h> 46 #include <sys/systm.h> 47 #endif 48 #include <sys/conf.h> 49 #include <sys/tree.h> 50 #include <sys/malloc.h> 51 #include <sys/mount.h> 52 #include <sys/vnode.h> 53 #include <sys/proc.h> 54 #include <sys/priv.h> 55 #include <sys/dirent.h> 56 #include <sys/stat.h> 57 #include <sys/fcntl.h> 58 #include <sys/lockf.h> 59 #include <sys/file.h> 60 #include <sys/event.h> 61 #include <sys/buf.h> 62 #include <sys/queue.h> 63 #include <sys/ktr.h> 64 #include <sys/limits.h> 65 #include <sys/sysctl.h> 66 #include <vm/swap_pager.h> 67 #include <vm/vm_extern.h> 68 69 #include "hammer_disk.h" 70 #include "hammer_mount.h" 71 #include "hammer_ioctl.h" 72 73 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES) 74 75 MALLOC_DECLARE(M_HAMMER); 76 77 /* 78 * Kernel trace 79 */ 80 #if !defined(KTR_HAMMER) 81 #define KTR_HAMMER KTR_ALL 82 #endif 83 /* KTR_INFO_MASTER_EXTERN(hammer); */ 84 85 /* 86 * Misc structures 87 */ 88 struct hammer_mount; 89 90 /* 91 * Key structure used for custom RB tree inode lookups. This prototypes 92 * the function hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info). 93 */ 94 typedef struct hammer_inode_info { 95 int64_t obj_id; /* (key) object identifier */ 96 hammer_tid_t obj_asof; /* (key) snapshot transid or 0 */ 97 u_int32_t obj_localization; /* (key) pseudo-fs */ 98 union { 99 struct hammer_btree_leaf_elm *leaf; 100 } u; 101 } *hammer_inode_info_t; 102 103 typedef enum hammer_transaction_type { 104 HAMMER_TRANS_RO, 105 HAMMER_TRANS_STD, 106 HAMMER_TRANS_FLS 107 } hammer_transaction_type_t; 108 109 /* 110 * HAMMER Transaction tracking 111 */ 112 struct hammer_transaction { 113 hammer_transaction_type_t type; 114 struct hammer_mount *hmp; 115 hammer_tid_t tid; 116 u_int64_t time; 117 u_int32_t time32; 118 int sync_lock_refs; 119 int flags; 120 struct hammer_volume *rootvol; 121 }; 122 123 typedef struct hammer_transaction *hammer_transaction_t; 124 125 #define HAMMER_TRANSF_NEWINODE 0x0001 126 #define HAMMER_TRANSF_DIDIO 0x0002 127 #define HAMMER_TRANSF_CRCDOM 0x0004 /* EDOM on CRC error, less critical */ 128 129 /* 130 * HAMMER locks 131 */ 132 struct hammer_lock { 133 volatile u_int refs; /* active references */ 134 volatile u_int lockval; /* lock count and control bits */ 135 struct thread *lowner; /* owner if exclusively held */ 136 struct thread *rowner; /* owner if exclusively held */ 137 }; 138 139 #define HAMMER_REFS_LOCKED 0x40000000 /* transition check */ 140 #define HAMMER_REFS_WANTED 0x20000000 /* transition check */ 141 #define HAMMER_REFS_CHECK 0x10000000 /* transition check */ 142 143 #define HAMMER_REFS_FLAGS (HAMMER_REFS_LOCKED | \ 144 HAMMER_REFS_WANTED | \ 145 HAMMER_REFS_CHECK) 146 147 #define HAMMER_LOCKF_EXCLUSIVE 0x40000000 148 #define HAMMER_LOCKF_WANTED 0x20000000 149 150 static __inline int 151 hammer_notlocked(struct hammer_lock *lock) 152 { 153 return(lock->lockval == 0); 154 } 155 156 static __inline int 157 hammer_islocked(struct hammer_lock *lock) 158 { 159 return(lock->lockval != 0); 160 } 161 162 /* 163 * Returns the number of refs on the object. 164 */ 165 static __inline int 166 hammer_isactive(struct hammer_lock *lock) 167 { 168 return(lock->refs & ~HAMMER_REFS_FLAGS); 169 } 170 171 static __inline int 172 hammer_oneref(struct hammer_lock *lock) 173 { 174 return((lock->refs & ~HAMMER_REFS_FLAGS) == 1); 175 } 176 177 static __inline int 178 hammer_norefs(struct hammer_lock *lock) 179 { 180 return((lock->refs & ~HAMMER_REFS_FLAGS) == 0); 181 } 182 183 static __inline int 184 hammer_norefsorlock(struct hammer_lock *lock) 185 { 186 return(lock->refs == 0); 187 } 188 189 static __inline int 190 hammer_refsorlock(struct hammer_lock *lock) 191 { 192 return(lock->refs != 0); 193 } 194 195 /* 196 * Return if we specifically own the lock exclusively. 197 */ 198 static __inline int 199 hammer_lock_excl_owned(struct hammer_lock *lock, thread_t td) 200 { 201 if ((lock->lockval & HAMMER_LOCKF_EXCLUSIVE) && 202 lock->lowner == td) { 203 return(1); 204 } 205 return(0); 206 } 207 208 /* 209 * Flush state, used by various structures 210 */ 211 typedef enum hammer_inode_state { 212 HAMMER_FST_IDLE, 213 HAMMER_FST_SETUP, 214 HAMMER_FST_FLUSH 215 } hammer_inode_state_t; 216 217 TAILQ_HEAD(hammer_record_list, hammer_record); 218 219 /* 220 * Pseudo-filesystem extended data tracking 221 */ 222 struct hammer_pfs_rb_tree; 223 struct hammer_pseudofs_inmem; 224 RB_HEAD(hammer_pfs_rb_tree, hammer_pseudofs_inmem); 225 RB_PROTOTYPE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node, 226 hammer_pfs_rb_compare, u_int32_t); 227 228 struct hammer_pseudofs_inmem { 229 RB_ENTRY(hammer_pseudofs_inmem) rb_node; 230 struct hammer_lock lock; 231 u_int32_t localization; 232 hammer_tid_t create_tid; 233 int flags; 234 udev_t fsid_udev; 235 struct hammer_pseudofs_data pfsd; 236 }; 237 238 typedef struct hammer_pseudofs_inmem *hammer_pseudofs_inmem_t; 239 240 /* 241 * Cache object ids. A fixed number of objid cache structures are 242 * created to reserve object id's for newly created files in multiples 243 * of 100,000, localized to a particular directory, and recycled as 244 * needed. This allows parallel create operations in different 245 * directories to retain fairly localized object ids which in turn 246 * improves reblocking performance and layout. 247 */ 248 #define OBJID_CACHE_SIZE 2048 249 #define OBJID_CACHE_BULK_BITS 10 /* 10 bits (1024) */ 250 #define OBJID_CACHE_BULK (32 * 32) /* two level (1024) */ 251 #define OBJID_CACHE_BULK_MASK (OBJID_CACHE_BULK - 1) 252 #define OBJID_CACHE_BULK_MASK64 ((u_int64_t)(OBJID_CACHE_BULK - 1)) 253 254 typedef struct hammer_objid_cache { 255 TAILQ_ENTRY(hammer_objid_cache) entry; 256 struct hammer_inode *dip; 257 hammer_tid_t base_tid; 258 int count; 259 u_int32_t bm0; 260 u_int32_t bm1[32]; 261 } *hammer_objid_cache_t; 262 263 /* 264 * Associate an inode with a B-Tree node to cache search start positions 265 */ 266 typedef struct hammer_node_cache { 267 TAILQ_ENTRY(hammer_node_cache) entry; 268 struct hammer_node *node; 269 struct hammer_inode *ip; 270 } *hammer_node_cache_t; 271 272 TAILQ_HEAD(hammer_node_cache_list, hammer_node_cache); 273 274 /* 275 * Live dedup cache 276 */ 277 struct hammer_dedup_crc_rb_tree; 278 RB_HEAD(hammer_dedup_crc_rb_tree, hammer_dedup_cache); 279 RB_PROTOTYPE2(hammer_dedup_crc_rb_tree, hammer_dedup_cache, crc_entry, 280 hammer_dedup_crc_rb_compare, hammer_crc_t); 281 282 struct hammer_dedup_off_rb_tree; 283 RB_HEAD(hammer_dedup_off_rb_tree, hammer_dedup_cache); 284 RB_PROTOTYPE2(hammer_dedup_off_rb_tree, hammer_dedup_cache, off_entry, 285 hammer_dedup_off_rb_compare, hammer_off_t); 286 287 #define DEDUP_CACHE_SIZE 4096 /* XXX make it a dynamic tunable */ 288 289 typedef struct hammer_dedup_cache { 290 RB_ENTRY(hammer_dedup_cache) crc_entry; 291 RB_ENTRY(hammer_dedup_cache) off_entry; 292 TAILQ_ENTRY(hammer_dedup_cache) lru_entry; 293 struct hammer_mount *hmp; 294 int64_t obj_id; 295 u_int32_t localization; 296 off_t file_offset; 297 int bytes; 298 hammer_off_t data_offset; 299 hammer_crc_t crc; 300 } *hammer_dedup_cache_t; 301 302 /* 303 * Structure used to organize flush groups. Flush groups must be 304 * organized into chunks in order to avoid blowing out the UNDO FIFO. 305 * Without this a 'sync' could end up flushing 50,000 inodes in a single 306 * transaction. 307 */ 308 struct hammer_fls_rb_tree; 309 RB_HEAD(hammer_fls_rb_tree, hammer_inode); 310 RB_PROTOTYPE(hammer_fls_rb_tree, hammer_inode, rb_flsnode, 311 hammer_ino_rb_compare); 312 313 struct hammer_flush_group { 314 TAILQ_ENTRY(hammer_flush_group) flush_entry; 315 struct hammer_fls_rb_tree flush_tree; 316 int seq; /* our seq no */ 317 int total_count; /* record load */ 318 int running; /* group is running */ 319 int closed; 320 int refs; 321 }; 322 323 typedef struct hammer_flush_group *hammer_flush_group_t; 324 325 TAILQ_HEAD(hammer_flush_group_list, hammer_flush_group); 326 327 /* 328 * Structure used to represent an inode in-memory. 329 * 330 * The record and data associated with an inode may be out of sync with 331 * the disk (xDIRTY flags), or not even on the disk at all (ONDISK flag 332 * clear). 333 * 334 * An inode may also hold a cache of unsynchronized records, used for 335 * database and directories only. Unsynchronized regular file data is 336 * stored in the buffer cache. 337 * 338 * NOTE: A file which is created and destroyed within the initial 339 * synchronization period can wind up not doing any disk I/O at all. 340 * 341 * Finally, an inode may cache numerous disk-referencing B-Tree cursors. 342 */ 343 struct hammer_ino_rb_tree; 344 struct hammer_inode; 345 RB_HEAD(hammer_ino_rb_tree, hammer_inode); 346 RB_PROTOTYPEX(hammer_ino_rb_tree, INFO, hammer_inode, rb_node, 347 hammer_ino_rb_compare, hammer_inode_info_t); 348 349 struct hammer_redo_rb_tree; 350 RB_HEAD(hammer_redo_rb_tree, hammer_inode); 351 RB_PROTOTYPE2(hammer_redo_rb_tree, hammer_inode, rb_redonode, 352 hammer_redo_rb_compare, hammer_off_t); 353 354 struct hammer_rec_rb_tree; 355 struct hammer_record; 356 RB_HEAD(hammer_rec_rb_tree, hammer_record); 357 RB_PROTOTYPEX(hammer_rec_rb_tree, INFO, hammer_record, rb_node, 358 hammer_rec_rb_compare, hammer_btree_leaf_elm_t); 359 360 TAILQ_HEAD(hammer_node_list, hammer_node); 361 362 struct hammer_inode { 363 RB_ENTRY(hammer_inode) rb_node; 364 hammer_inode_state_t flush_state; 365 hammer_flush_group_t flush_group; 366 RB_ENTRY(hammer_inode) rb_flsnode; /* when on flush list */ 367 RB_ENTRY(hammer_inode) rb_redonode; /* when INODE_RDIRTY is set */ 368 struct hammer_record_list target_list; /* target of dependant recs */ 369 int64_t obj_id; /* (key) object identifier */ 370 hammer_tid_t obj_asof; /* (key) snapshot or 0 */ 371 u_int32_t obj_localization; /* (key) pseudo-fs */ 372 struct hammer_mount *hmp; 373 hammer_objid_cache_t objid_cache; 374 int flags; 375 int error; /* flush error */ 376 int cursor_ip_refs; /* sanity */ 377 #if 0 378 int cursor_exclreq_count; 379 #endif 380 int rsv_recs; 381 struct vnode *vp; 382 hammer_pseudofs_inmem_t pfsm; 383 struct lockf advlock; 384 struct hammer_lock lock; /* sync copy interlock */ 385 off_t trunc_off; 386 struct hammer_btree_leaf_elm ino_leaf; /* in-memory cache */ 387 struct hammer_inode_data ino_data; /* in-memory cache */ 388 struct hammer_rec_rb_tree rec_tree; /* in-memory cache */ 389 int rec_generation; 390 391 /* 392 * search initiate cache 393 * cache[0] - this inode 394 * cache[1] - related data, the content depends on situations 395 * cache[2] - for dip to cache ip to shortcut B-Tree search 396 * cache[3] - related data copied from dip to a new ip's cache[1] 397 */ 398 struct hammer_node_cache cache[4]; 399 400 /* 401 * When a demark is created to synchronize an inode to 402 * disk, certain fields are copied so the front-end VOPs 403 * can continue to run in parallel with the synchronization 404 * occuring in the background. 405 */ 406 int sync_flags; /* to-sync flags cache */ 407 off_t sync_trunc_off; /* to-sync truncation */ 408 off_t save_trunc_off; /* write optimization */ 409 struct hammer_btree_leaf_elm sync_ino_leaf; /* to-sync cache */ 410 struct hammer_inode_data sync_ino_data; /* to-sync cache */ 411 size_t redo_count; 412 413 /* 414 * Track the earliest offset in the UNDO/REDO FIFO containing 415 * REDO records. This is staged to the backend during flush 416 * sequences. While the inode is staged redo_fifo_next is used 417 * to track the earliest offset for rotation into redo_fifo_start 418 * on completion of the flush. 419 */ 420 hammer_off_t redo_fifo_start; 421 hammer_off_t redo_fifo_next; 422 }; 423 424 typedef struct hammer_inode *hammer_inode_t; 425 426 #define VTOI(vp) ((struct hammer_inode *)(vp)->v_data) 427 428 /* 429 * NOTE: DDIRTY does not include atime or mtime and does not include 430 * write-append size changes. SDIRTY handles write-append size 431 * changes. 432 * 433 * REDO indicates that REDO logging is active, creating a definitive 434 * stream of REDO records in the UNDO/REDO log for writes and 435 * truncations, including boundary records when/if REDO is turned off. 436 * REDO is typically enabled by fsync() and turned off if excessive 437 * writes without an fsync() occurs. 438 * 439 * RDIRTY indicates that REDO records were laid down in the UNDO/REDO 440 * FIFO (even if REDO is turned off some might still be active) and 441 * still being tracked for this inode. See hammer_redo.c 442 */ 443 /* (not including atime/mtime) */ 444 #define HAMMER_INODE_DDIRTY 0x0001 /* in-memory ino_data is dirty */ 445 #define HAMMER_INODE_RSV_INODES 0x0002 /* hmp->rsv_inodes bumped */ 446 #define HAMMER_INODE_CONN_DOWN 0x0004 /* include in downward recursion */ 447 #define HAMMER_INODE_XDIRTY 0x0008 /* in-memory records */ 448 #define HAMMER_INODE_ONDISK 0x0010 /* inode is on-disk (else not yet) */ 449 #define HAMMER_INODE_FLUSH 0x0020 /* flush on last ref */ 450 #define HAMMER_INODE_DELETED 0x0080 /* inode delete (backend) */ 451 #define HAMMER_INODE_DELONDISK 0x0100 /* delete synchronized to disk */ 452 #define HAMMER_INODE_RO 0x0200 /* read-only (because of as-of) */ 453 #define HAMMER_INODE_RECSW 0x0400 /* waiting on data record flush */ 454 #define HAMMER_INODE_DONDISK 0x0800 /* data records may be on disk */ 455 #define HAMMER_INODE_BUFS 0x1000 /* dirty high level bps present */ 456 #define HAMMER_INODE_REFLUSH 0x2000 /* flush on dependancy / reflush */ 457 #define HAMMER_INODE_RECLAIM 0x4000 /* trying to reclaim */ 458 #define HAMMER_INODE_FLUSHW 0x8000 /* Someone waiting for flush */ 459 460 #define HAMMER_INODE_TRUNCATED 0x00010000 461 #define HAMMER_INODE_DELETING 0x00020000 /* inode delete request (frontend)*/ 462 #define HAMMER_INODE_RESIGNAL 0x00040000 /* re-signal on re-flush */ 463 #define HAMMER_INODE_ATIME 0x00100000 /* in-memory atime modified */ 464 #define HAMMER_INODE_MTIME 0x00200000 /* in-memory mtime modified */ 465 #define HAMMER_INODE_WOULDBLOCK 0x00400000 /* re-issue to new flush group */ 466 #define HAMMER_INODE_DUMMY 0x00800000 /* dummy inode covering bad file */ 467 #define HAMMER_INODE_SDIRTY 0x01000000 /* in-memory ino_data.size is dirty*/ 468 #define HAMMER_INODE_REDO 0x02000000 /* REDO logging active */ 469 #define HAMMER_INODE_RDIRTY 0x04000000 /* REDO records active in fifo */ 470 #define HAMMER_INODE_SLAVEFLUSH 0x08000000 /* being flushed by slave */ 471 472 #define HAMMER_INODE_MODMASK (HAMMER_INODE_DDIRTY|HAMMER_INODE_SDIRTY| \ 473 HAMMER_INODE_XDIRTY|HAMMER_INODE_BUFS| \ 474 HAMMER_INODE_ATIME|HAMMER_INODE_MTIME| \ 475 HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING) 476 477 #define HAMMER_INODE_MODMASK_NOXDIRTY \ 478 (HAMMER_INODE_MODMASK & ~HAMMER_INODE_XDIRTY) 479 480 #define HAMMER_INODE_MODMASK_NOREDO \ 481 (HAMMER_INODE_DDIRTY| \ 482 HAMMER_INODE_XDIRTY| \ 483 HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING) 484 485 #define HAMMER_FLUSH_SIGNAL 0x0001 486 #define HAMMER_FLUSH_RECURSION 0x0002 487 488 /* 489 * Used by the inode reclaim code to pipeline reclaims and avoid 490 * blowing out kernel memory or letting the flusher get too far 491 * behind. The reclaim wakes up when count reaches 0 or the 492 * timer expires. 493 */ 494 struct hammer_reclaim { 495 TAILQ_ENTRY(hammer_reclaim) entry; 496 int count; 497 }; 498 499 /* 500 * Track who is creating the greatest burden on the 501 * inode cache. 502 */ 503 struct hammer_inostats { 504 pid_t pid; /* track user process */ 505 int ltick; /* last tick */ 506 int count; /* count (degenerates) */ 507 }; 508 509 #define HAMMER_INOSTATS_HSIZE 32 510 #define HAMMER_INOSTATS_HMASK (HAMMER_INOSTATS_HSIZE - 1) 511 512 /* 513 * Structure used to represent an unsynchronized record in-memory. These 514 * records typically represent directory entries. Only non-historical 515 * records are kept in-memory. 516 * 517 * Records are organized as a per-inode RB-Tree. If the inode is not 518 * on disk then neither are any records and the in-memory record tree 519 * represents the entire contents of the inode. If the inode is on disk 520 * then the on-disk B-Tree is scanned in parallel with the in-memory 521 * RB-Tree to synthesize the current state of the file. 522 * 523 * Records are also used to enforce the ordering of directory create/delete 524 * operations. A new inode will not be flushed to disk unless its related 525 * directory entry is also being flushed at the same time. A directory entry 526 * will not be removed unless its related inode is also being removed at the 527 * same time. 528 */ 529 typedef enum hammer_record_type { 530 HAMMER_MEM_RECORD_GENERAL, /* misc record */ 531 HAMMER_MEM_RECORD_INODE, /* inode record */ 532 HAMMER_MEM_RECORD_ADD, /* positive memory cache record */ 533 HAMMER_MEM_RECORD_DEL, /* negative delete-on-disk record */ 534 HAMMER_MEM_RECORD_DATA /* bulk-data record w/on-disk ref */ 535 } hammer_record_type_t; 536 537 struct hammer_record { 538 RB_ENTRY(hammer_record) rb_node; 539 TAILQ_ENTRY(hammer_record) target_entry; 540 hammer_inode_state_t flush_state; 541 hammer_flush_group_t flush_group; 542 hammer_record_type_t type; 543 struct hammer_lock lock; 544 struct hammer_reserve *resv; 545 struct hammer_inode *ip; 546 struct hammer_inode *target_ip; 547 struct hammer_btree_leaf_elm leaf; 548 union hammer_data_ondisk *data; 549 int flags; 550 int gflags; 551 hammer_off_t zone2_offset; /* direct-write only */ 552 }; 553 554 typedef struct hammer_record *hammer_record_t; 555 556 /* 557 * Record flags. Note that FE can only be set by the frontend if the 558 * record has not been interlocked by the backend w/ BE. 559 */ 560 #define HAMMER_RECF_ALLOCDATA 0x0001 561 #define HAMMER_RECF_ONRBTREE 0x0002 562 #define HAMMER_RECF_DELETED_FE 0x0004 /* deleted (frontend) */ 563 #define HAMMER_RECF_DELETED_BE 0x0008 /* deleted (backend) */ 564 #define HAMMER_RECF_COMMITTED 0x0010 /* committed to the B-Tree */ 565 #define HAMMER_RECF_INTERLOCK_BE 0x0020 /* backend interlock */ 566 #define HAMMER_RECF_WANTED 0x0040 /* wanted by the frontend */ 567 #define HAMMER_RECF_DEDUPED 0x0080 /* will be live-dedup'ed */ 568 #define HAMMER_RECF_CONVERT_DELETE 0x0100 /* special case */ 569 #define HAMMER_RECF_REDO 0x1000 /* REDO was laid down */ 570 571 /* 572 * These flags must be separate to deal with SMP races 573 */ 574 #define HAMMER_RECG_DIRECT_IO 0x0001 /* related direct I/O running*/ 575 #define HAMMER_RECG_DIRECT_WAIT 0x0002 /* related direct I/O running*/ 576 #define HAMMER_RECG_DIRECT_INVAL 0x0004 /* buffer alias invalidation */ 577 /* 578 * hammer_create_at_cursor() and hammer_delete_at_cursor() flags. 579 */ 580 #define HAMMER_CREATE_MODE_UMIRROR 0x0001 581 #define HAMMER_CREATE_MODE_SYS 0x0002 582 583 #define HAMMER_DELETE_ADJUST 0x0001 584 #define HAMMER_DELETE_DESTROY 0x0002 585 586 /* 587 * In-memory structures representing on-disk structures. 588 */ 589 struct hammer_volume; 590 struct hammer_buffer; 591 struct hammer_node; 592 struct hammer_undo; 593 struct hammer_reserve; 594 595 RB_HEAD(hammer_vol_rb_tree, hammer_volume); 596 RB_HEAD(hammer_buf_rb_tree, hammer_buffer); 597 RB_HEAD(hammer_nod_rb_tree, hammer_node); 598 RB_HEAD(hammer_und_rb_tree, hammer_undo); 599 RB_HEAD(hammer_res_rb_tree, hammer_reserve); 600 RB_HEAD(hammer_mod_rb_tree, hammer_io); 601 602 RB_PROTOTYPE2(hammer_vol_rb_tree, hammer_volume, rb_node, 603 hammer_vol_rb_compare, int32_t); 604 RB_PROTOTYPE2(hammer_buf_rb_tree, hammer_buffer, rb_node, 605 hammer_buf_rb_compare, hammer_off_t); 606 RB_PROTOTYPE2(hammer_nod_rb_tree, hammer_node, rb_node, 607 hammer_nod_rb_compare, hammer_off_t); 608 RB_PROTOTYPE2(hammer_und_rb_tree, hammer_undo, rb_node, 609 hammer_und_rb_compare, hammer_off_t); 610 RB_PROTOTYPE2(hammer_res_rb_tree, hammer_reserve, rb_node, 611 hammer_res_rb_compare, hammer_off_t); 612 RB_PROTOTYPE2(hammer_mod_rb_tree, hammer_io, rb_node, 613 hammer_mod_rb_compare, hammer_off_t); 614 615 /* 616 * IO management - embedded at the head of various in-memory structures 617 * 618 * VOLUME - hammer_volume containing meta-data 619 * META_BUFFER - hammer_buffer containing meta-data 620 * UNDO_BUFFER - hammer_buffer containing undo-data 621 * DATA_BUFFER - hammer_buffer containing pure-data 622 * DUMMY - hammer_buffer not containing valid data 623 * 624 * Dirty volume headers and dirty meta-data buffers are locked until the 625 * flusher can sequence them out. Dirty pure-data buffers can be written. 626 * Clean buffers can be passively released. 627 */ 628 typedef enum hammer_io_type { 629 HAMMER_STRUCTURE_VOLUME, 630 HAMMER_STRUCTURE_META_BUFFER, 631 HAMMER_STRUCTURE_UNDO_BUFFER, 632 HAMMER_STRUCTURE_DATA_BUFFER, 633 HAMMER_STRUCTURE_DUMMY 634 } hammer_io_type_t; 635 636 union hammer_io_structure; 637 struct hammer_io; 638 639 struct worklist { 640 LIST_ENTRY(worklist) node; 641 }; 642 643 TAILQ_HEAD(hammer_io_list, hammer_io); 644 typedef struct hammer_io_list *hammer_io_list_t; 645 646 struct hammer_io { 647 struct worklist worklist; 648 struct hammer_lock lock; 649 enum hammer_io_type type; 650 struct hammer_mount *hmp; 651 struct hammer_volume *volume; 652 RB_ENTRY(hammer_io) rb_node; /* if modified */ 653 TAILQ_ENTRY(hammer_io) iorun_entry; /* iorun_list */ 654 struct hammer_mod_rb_tree *mod_root; 655 struct buf *bp; 656 int64_t offset; /* zone-2 offset */ 657 int bytes; /* buffer cache buffer size */ 658 int modify_refs; 659 660 /* 661 * These can be modified at any time by the backend while holding 662 * io_token, due to bio_done and hammer_io_complete() callbacks. 663 */ 664 u_int running : 1; /* bp write IO in progress */ 665 u_int waiting : 1; /* someone is waiting on us */ 666 u_int ioerror : 1; /* abort on io-error */ 667 u_int unusedA : 29; 668 669 /* 670 * These can only be modified by the frontend while holding 671 * fs_token, or by the backend while holding the io interlocked 672 * with no references (which will block the frontend when it 673 * tries to reference it). 674 * 675 * WARNING! SMP RACES will create havoc if the callbacks ever tried 676 * to modify any of these outside the above restrictions. 677 */ 678 u_int modified : 1; /* bp's data was modified */ 679 u_int released : 1; /* bp released (w/ B_LOCKED set) */ 680 u_int validated : 1; /* ondisk has been validated */ 681 u_int waitdep : 1; /* flush waits for dependancies */ 682 u_int recovered : 1; /* has recovery ref */ 683 u_int waitmod : 1; /* waiting for modify_refs */ 684 u_int reclaim : 1; /* reclaim requested */ 685 u_int gencrc : 1; /* crc needs to be generated */ 686 u_int unusedB : 24; 687 }; 688 689 typedef struct hammer_io *hammer_io_t; 690 691 #define HAMMER_CLUSTER_SIZE (64 * 1024) 692 #if HAMMER_CLUSTER_SIZE > MAXBSIZE 693 #undef HAMMER_CLUSTER_SIZE 694 #define HAMMER_CLUSTER_SIZE MAXBSIZE 695 #endif 696 697 /* 698 * In-memory volume representing on-disk buffer 699 */ 700 struct hammer_volume { 701 struct hammer_io io; 702 RB_ENTRY(hammer_volume) rb_node; 703 struct hammer_volume_ondisk *ondisk; 704 int32_t vol_no; 705 int64_t nblocks; /* note: special calculation for statfs */ 706 int64_t buffer_base; /* base offset of buffer 0 */ 707 hammer_off_t maxbuf_off; /* Maximum buffer offset (zone-2) */ 708 hammer_off_t maxraw_off; /* Maximum raw offset for device */ 709 char *vol_name; 710 struct vnode *devvp; 711 int vol_flags; 712 }; 713 714 typedef struct hammer_volume *hammer_volume_t; 715 716 /* 717 * In-memory buffer representing an on-disk buffer. 718 */ 719 struct hammer_buffer { 720 struct hammer_io io; 721 RB_ENTRY(hammer_buffer) rb_node; 722 void *ondisk; 723 hammer_off_t zoneX_offset; 724 hammer_off_t zone2_offset; 725 struct hammer_reserve *resv; 726 struct hammer_node_list clist; 727 }; 728 729 typedef struct hammer_buffer *hammer_buffer_t; 730 731 /* 732 * In-memory B-Tree node, representing an on-disk B-Tree node. 733 * 734 * This is a hang-on structure which is backed by a hammer_buffer, 735 * and used for fine-grained locking of B-Tree nodes in order to 736 * properly control lock ordering. 737 */ 738 struct hammer_node { 739 struct hammer_lock lock; /* node-by-node lock */ 740 TAILQ_ENTRY(hammer_node) entry; /* per-buffer linkage */ 741 RB_ENTRY(hammer_node) rb_node; /* per-mount linkage */ 742 hammer_off_t node_offset; /* full offset spec */ 743 struct hammer_mount *hmp; 744 struct hammer_buffer *buffer; /* backing buffer */ 745 hammer_node_ondisk_t ondisk; /* ptr to on-disk structure */ 746 TAILQ_HEAD(, hammer_cursor) cursor_list; /* deadlock recovery */ 747 struct hammer_node_cache_list cache_list; /* passive caches */ 748 int flags; 749 #if 0 750 int cursor_exclreq_count; 751 #endif 752 }; 753 754 #define HAMMER_NODE_DELETED 0x0001 755 #define HAMMER_NODE_FLUSH 0x0002 756 #define HAMMER_NODE_CRCGOOD 0x0004 757 #define HAMMER_NODE_NEEDSCRC 0x0008 758 #define HAMMER_NODE_NEEDSMIRROR 0x0010 759 #define HAMMER_NODE_CRCBAD 0x0020 760 #define HAMMER_NODE_NONLINEAR 0x0040 /* linear heuristic */ 761 762 #define HAMMER_NODE_CRCANY (HAMMER_NODE_CRCGOOD | HAMMER_NODE_CRCBAD) 763 764 typedef struct hammer_node *hammer_node_t; 765 766 /* 767 * List of locked nodes. This structure is used to lock potentially large 768 * numbers of nodes as an aid for complex B-Tree operations. 769 */ 770 struct hammer_node_lock; 771 TAILQ_HEAD(hammer_node_lock_list, hammer_node_lock); 772 773 struct hammer_node_lock { 774 TAILQ_ENTRY(hammer_node_lock) entry; 775 struct hammer_node_lock_list list; 776 struct hammer_node_lock *parent; 777 hammer_node_t node; 778 hammer_node_ondisk_t copy; /* copy of on-disk data */ 779 int index; /* index of this node in parent */ 780 int count; /* count children */ 781 int flags; 782 }; 783 784 typedef struct hammer_node_lock *hammer_node_lock_t; 785 786 #define HAMMER_NODE_LOCK_UPDATED 0x0001 787 #define HAMMER_NODE_LOCK_LCACHE 0x0002 788 789 /* 790 * Common I/O management structure - embedded in in-memory structures 791 * which are backed by filesystem buffers. 792 */ 793 union hammer_io_structure { 794 struct hammer_io io; 795 struct hammer_volume volume; 796 struct hammer_buffer buffer; 797 }; 798 799 typedef union hammer_io_structure *hammer_io_structure_t; 800 801 /* 802 * The reserve structure prevents the blockmap from allocating 803 * out of a reserved big-block. Such reservations are used by 804 * the direct-write mechanism. 805 * 806 * The structure is also used to hold off on reallocations of 807 * big-blocks from the freemap until flush dependancies have 808 * been dealt with. 809 */ 810 struct hammer_reserve { 811 RB_ENTRY(hammer_reserve) rb_node; 812 TAILQ_ENTRY(hammer_reserve) delay_entry; 813 int flush_group; 814 int flags; 815 int refs; 816 int zone; 817 int append_off; 818 int32_t bytes_free; 819 hammer_off_t zone_offset; 820 }; 821 822 typedef struct hammer_reserve *hammer_reserve_t; 823 824 #define HAMMER_RESF_ONDELAY 0x0001 825 #define HAMMER_RESF_LAYER2FREE 0x0002 826 827 #include "hammer_cursor.h" 828 829 /* 830 * The undo structure tracks recent undos to avoid laying down duplicate 831 * undos within a flush group, saving us a significant amount of overhead. 832 * 833 * This is strictly a heuristic. 834 */ 835 #define HAMMER_MAX_UNDOS 1024 836 #define HAMMER_MAX_FLUSHERS 4 837 838 struct hammer_undo { 839 RB_ENTRY(hammer_undo) rb_node; 840 TAILQ_ENTRY(hammer_undo) lru_entry; 841 hammer_off_t offset; 842 int bytes; 843 }; 844 845 typedef struct hammer_undo *hammer_undo_t; 846 847 struct hammer_flusher_info; 848 TAILQ_HEAD(hammer_flusher_info_list, hammer_flusher_info); 849 850 struct hammer_flusher { 851 int signal; /* flusher thread sequencer */ 852 int done; /* last completed flush group */ 853 int next; /* next unallocated flg seqno */ 854 int group_lock; /* lock sequencing of the next flush */ 855 int exiting; /* request master exit */ 856 thread_t td; /* master flusher thread */ 857 hammer_tid_t tid; /* last flushed transaction id */ 858 int finalize_want; /* serialize finalization */ 859 struct hammer_lock finalize_lock; /* serialize finalization */ 860 struct hammer_transaction trans; /* shared transaction */ 861 struct hammer_flusher_info_list run_list; 862 struct hammer_flusher_info_list ready_list; 863 }; 864 865 #define HAMMER_FLUSH_UNDOS_RELAXED 0 866 #define HAMMER_FLUSH_UNDOS_FORCED 1 867 #define HAMMER_FLUSH_UNDOS_AUTO 2 868 /* 869 * Internal hammer mount data structure 870 */ 871 struct hammer_mount { 872 struct mount *mp; 873 struct hammer_ino_rb_tree rb_inos_root; 874 struct hammer_redo_rb_tree rb_redo_root; 875 struct hammer_vol_rb_tree rb_vols_root; 876 struct hammer_nod_rb_tree rb_nods_root; 877 struct hammer_und_rb_tree rb_undo_root; 878 struct hammer_res_rb_tree rb_resv_root; 879 struct hammer_buf_rb_tree rb_bufs_root; 880 struct hammer_pfs_rb_tree rb_pfsm_root; 881 882 struct hammer_dedup_crc_rb_tree rb_dedup_crc_root; 883 struct hammer_dedup_off_rb_tree rb_dedup_off_root; 884 885 struct hammer_volume *rootvol; 886 struct hammer_base_elm root_btree_beg; 887 struct hammer_base_elm root_btree_end; 888 889 struct malloc_type *m_misc; 890 struct malloc_type *m_inodes; 891 892 int flags; /* HAMMER_MOUNT_xxx flags */ 893 int hflags; 894 int ronly; 895 int nvolumes; 896 int volume_iterator; 897 int master_id; /* -1 or 0-15 for mirroring */ 898 int version; /* hammer filesystem version to use */ 899 int rsv_inodes; /* reserved space due to dirty inodes */ 900 int64_t rsv_databytes; /* reserved space due to record data */ 901 int rsv_recs; /* reserved space due to dirty records */ 902 int rsv_fromdelay; /* big-blocks reserved due to flush delay */ 903 int undo_rec_limit; /* based on size of undo area */ 904 int last_newrecords; 905 int count_newrecords; 906 907 int volume_to_remove; /* volume that is currently being removed */ 908 909 int count_inodes; /* total number of inodes */ 910 int count_iqueued; /* inodes queued to flusher */ 911 int count_reclaims; /* inodes pending reclaim by flusher */ 912 913 struct hammer_flusher flusher; 914 915 u_int check_interrupt; 916 u_int check_yield; 917 uuid_t fsid; 918 struct hammer_mod_rb_tree volu_root; /* dirty undo buffers */ 919 struct hammer_mod_rb_tree undo_root; /* dirty undo buffers */ 920 struct hammer_mod_rb_tree data_root; /* dirty data buffers */ 921 struct hammer_mod_rb_tree meta_root; /* dirty meta bufs */ 922 struct hammer_mod_rb_tree lose_root; /* loose buffers */ 923 long locked_dirty_space; /* meta/volu count */ 924 long io_running_space; /* io_token */ 925 int unused01; 926 int objid_cache_count; 927 int dedup_cache_count; 928 int error; /* critical I/O error */ 929 struct krate krate; /* rate limited kprintf */ 930 struct krate kdiag; /* rate limited kprintf */ 931 hammer_tid_t asof; /* snapshot mount */ 932 hammer_tid_t next_tid; 933 hammer_tid_t flush_tid1; /* flusher tid sequencing */ 934 hammer_tid_t flush_tid2; /* flusher tid sequencing */ 935 int64_t copy_stat_freebigblocks; /* number of free big-blocks */ 936 u_int32_t undo_seqno; /* UNDO/REDO FIFO seqno */ 937 u_int32_t recover_stage2_seqno; /* REDO recovery seqno */ 938 hammer_off_t recover_stage2_offset; /* REDO recovery offset */ 939 940 struct netexport export; 941 struct hammer_lock sync_lock; 942 struct hammer_lock free_lock; 943 struct hammer_lock undo_lock; 944 struct hammer_lock blkmap_lock; 945 struct hammer_lock snapshot_lock; 946 struct hammer_lock volume_lock; 947 struct hammer_blockmap blockmap[HAMMER_MAX_ZONES]; 948 struct hammer_undo undos[HAMMER_MAX_UNDOS]; 949 int undo_alloc; 950 TAILQ_HEAD(, hammer_undo) undo_lru_list; 951 TAILQ_HEAD(, hammer_reserve) delay_list; 952 struct hammer_flush_group_list flush_group_list; 953 hammer_flush_group_t fill_flush_group; 954 hammer_flush_group_t next_flush_group; 955 TAILQ_HEAD(, hammer_objid_cache) objid_cache_list; 956 TAILQ_HEAD(, hammer_dedup_cache) dedup_lru_list; 957 hammer_dedup_cache_t dedup_free_cache; 958 TAILQ_HEAD(, hammer_reclaim) reclaim_list; 959 TAILQ_HEAD(, hammer_io) iorun_list; 960 961 struct lwkt_token fs_token; /* high level */ 962 struct lwkt_token io_token; /* low level (IO callback) */ 963 964 struct hammer_inostats inostats[HAMMER_INOSTATS_HSIZE]; 965 uint64_t volume_map[4]; /* 256 bits bitfield */ 966 }; 967 968 typedef struct hammer_mount *hammer_mount_t; 969 970 #define HAMMER_MOUNT_CRITICAL_ERROR 0x0001 971 #define HAMMER_MOUNT_FLUSH_RECOVERY 0x0002 972 #define HAMMER_MOUNT_REDO_SYNC 0x0004 973 #define HAMMER_MOUNT_REDO_RECOVERY_REQ 0x0008 974 #define HAMMER_MOUNT_REDO_RECOVERY_RUN 0x0010 975 976 #define HAMMER_VOLUME_NUMBER_ADD(hmp, vol) \ 977 (hmp)->volume_map[(vol)->vol_no >> 6] |= \ 978 ((uint64_t)1 << ((vol)->vol_no & ((1 << 6) - 1))) 979 980 #define HAMMER_VOLUME_NUMBER_DEL(hmp, vol) \ 981 (hmp)->volume_map[(vol)->vol_no >> 6] &= \ 982 ~((uint64_t)1 << ((vol)->vol_no & ((1 << 6) - 1))) 983 984 #define HAMMER_VOLUME_NUMBER_IS_SET(hmp, n) \ 985 (((hmp)->volume_map[(n) >> 6] & \ 986 ((uint64_t)1 << ((n) & ((1 << 6) - 1)))) != 0) 987 988 #define HAMMER_VOLUME_NUMBER_FOREACH(hmp, n) \ 989 for (n = 0; n < HAMMER_MAX_VOLUMES; n++) \ 990 if (HAMMER_VOLUME_NUMBER_IS_SET(hmp, n)) 991 992 struct hammer_sync_info { 993 int error; 994 int waitfor; 995 }; 996 997 /* 998 * Minium buffer cache bufs required to rebalance the B-Tree. 999 * This is because we must hold the children and the children's children 1000 * locked. Even this might not be enough if things are horribly out 1001 * of balance. 1002 */ 1003 #define HAMMER_REBALANCE_MIN_BUFS \ 1004 (HAMMER_BTREE_LEAF_ELMS * HAMMER_BTREE_LEAF_ELMS) 1005 1006 #endif /* _KERNEL || _KERNEL_STRUCTURES */ 1007 1008 #if defined(_KERNEL) 1009 /* 1010 * checkspace slop (8MB chunks), higher numbers are more conservative. 1011 */ 1012 #define HAMMER_CHKSPC_REBLOCK 25 1013 #define HAMMER_CHKSPC_MIRROR 20 1014 #define HAMMER_CHKSPC_WRITE 20 1015 #define HAMMER_CHKSPC_CREATE 20 1016 #define HAMMER_CHKSPC_REMOVE 10 1017 #define HAMMER_CHKSPC_EMERGENCY 0 1018 1019 extern struct vop_ops hammer_vnode_vops; 1020 extern struct vop_ops hammer_spec_vops; 1021 extern struct vop_ops hammer_fifo_vops; 1022 extern struct bio_ops hammer_bioops; 1023 1024 extern int hammer_debug_io; 1025 extern int hammer_debug_general; 1026 extern int hammer_debug_inode; 1027 extern int hammer_debug_locks; 1028 extern int hammer_debug_btree; 1029 extern int hammer_debug_tid; 1030 extern int hammer_debug_recover; 1031 extern int hammer_debug_recover_faults; 1032 extern int hammer_debug_critical; 1033 extern int hammer_cluster_enable; 1034 extern int hammer_live_dedup; 1035 extern int hammer_tdmux_ticks; 1036 extern int hammer_count_fsyncs; 1037 extern int hammer_count_inodes; 1038 extern int hammer_count_iqueued; 1039 extern int hammer_count_reclaims; 1040 extern int hammer_count_records; 1041 extern int hammer_count_record_datas; 1042 extern int hammer_count_volumes; 1043 extern int hammer_count_buffers; 1044 extern int hammer_count_nodes; 1045 extern int64_t hammer_count_extra_space_used; 1046 extern int64_t hammer_stats_btree_lookups; 1047 extern int64_t hammer_stats_btree_searches; 1048 extern int64_t hammer_stats_btree_inserts; 1049 extern int64_t hammer_stats_btree_deletes; 1050 extern int64_t hammer_stats_btree_elements; 1051 extern int64_t hammer_stats_btree_splits; 1052 extern int64_t hammer_stats_btree_iterations; 1053 extern int64_t hammer_stats_btree_root_iterations; 1054 extern int64_t hammer_stats_record_iterations; 1055 extern int64_t hammer_stats_file_read; 1056 extern int64_t hammer_stats_file_write; 1057 extern int64_t hammer_stats_file_iopsr; 1058 extern int64_t hammer_stats_file_iopsw; 1059 extern int64_t hammer_stats_disk_read; 1060 extern int64_t hammer_stats_disk_write; 1061 extern int64_t hammer_stats_inode_flushes; 1062 extern int64_t hammer_stats_commits; 1063 extern int64_t hammer_stats_undo; 1064 extern int64_t hammer_stats_redo; 1065 extern long hammer_count_dirtybufspace; 1066 extern int hammer_count_refedbufs; 1067 extern int hammer_count_reservations; 1068 extern long hammer_count_io_running_read; 1069 extern long hammer_count_io_running_write; 1070 extern int hammer_count_io_locked; 1071 extern long hammer_limit_dirtybufspace; 1072 extern int hammer_limit_recs; 1073 extern int hammer_limit_inode_recs; 1074 extern int hammer_limit_reclaims; 1075 extern int hammer_live_dedup_cache_size; 1076 extern int hammer_limit_redo; 1077 extern int hammer_bio_count; 1078 extern int hammer_verify_zone; 1079 extern int hammer_verify_data; 1080 extern int hammer_write_mode; 1081 extern int hammer_double_buffer; 1082 extern int hammer_btree_full_undo; 1083 extern int hammer_yield_check; 1084 extern int hammer_fsync_mode; 1085 extern int hammer_autoflush; 1086 extern int64_t hammer_contention_count; 1087 1088 extern int64_t hammer_live_dedup_vnode_bcmps; 1089 extern int64_t hammer_live_dedup_device_bcmps; 1090 extern int64_t hammer_live_dedup_findblk_failures; 1091 extern int64_t hammer_live_dedup_bmap_saves; 1092 1093 void hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip, 1094 int error, const char *msg); 1095 int hammer_vop_inactive(struct vop_inactive_args *); 1096 int hammer_vop_reclaim(struct vop_reclaim_args *); 1097 int hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp); 1098 struct hammer_inode *hammer_get_inode(hammer_transaction_t trans, 1099 hammer_inode_t dip, int64_t obj_id, 1100 hammer_tid_t asof, u_int32_t localization, 1101 int flags, int *errorp); 1102 struct hammer_inode *hammer_get_dummy_inode(hammer_transaction_t trans, 1103 hammer_inode_t dip, int64_t obj_id, 1104 hammer_tid_t asof, u_int32_t localization, 1105 int flags, int *errorp); 1106 struct hammer_inode *hammer_find_inode(hammer_transaction_t trans, 1107 int64_t obj_id, hammer_tid_t asof, 1108 u_int32_t localization); 1109 void hammer_scan_inode_snapshots(hammer_mount_t hmp, 1110 hammer_inode_info_t iinfo, 1111 int (*callback)(hammer_inode_t ip, void *data), 1112 void *data); 1113 void hammer_put_inode(struct hammer_inode *ip); 1114 void hammer_put_inode_ref(struct hammer_inode *ip); 1115 void hammer_inode_waitreclaims(hammer_transaction_t trans); 1116 void hammer_inode_dirty(struct hammer_inode *ip); 1117 1118 int hammer_unload_volume(hammer_volume_t volume, void *data); 1119 int hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused); 1120 1121 int hammer_unload_buffer(hammer_buffer_t buffer, void *data); 1122 int hammer_install_volume(hammer_mount_t hmp, const char *volname, 1123 struct vnode *devvp, void *data); 1124 int hammer_mountcheck_volumes(hammer_mount_t hmp); 1125 int hammer_get_installed_volumes(hammer_mount_t hmp); 1126 1127 int hammer_mem_add(hammer_record_t record); 1128 int hammer_ip_lookup(hammer_cursor_t cursor); 1129 int hammer_ip_first(hammer_cursor_t cursor); 1130 int hammer_ip_next(hammer_cursor_t cursor); 1131 int hammer_ip_resolve_data(hammer_cursor_t cursor); 1132 int hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip, 1133 hammer_tid_t tid); 1134 int hammer_create_at_cursor(hammer_cursor_t cursor, 1135 hammer_btree_leaf_elm_t leaf, void *udata, int mode); 1136 int hammer_delete_at_cursor(hammer_cursor_t cursor, int delete_flags, 1137 hammer_tid_t delete_tid, u_int32_t delete_ts, 1138 int track, int64_t *stat_bytes); 1139 int hammer_ip_check_directory_empty(hammer_transaction_t trans, 1140 hammer_inode_t ip); 1141 int hammer_sync_hmp(hammer_mount_t hmp, int waitfor); 1142 int hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor); 1143 1144 hammer_record_t 1145 hammer_alloc_mem_record(hammer_inode_t ip, int data_len); 1146 void hammer_flush_record_done(hammer_record_t record, int error); 1147 void hammer_wait_mem_record_ident(hammer_record_t record, const char *ident); 1148 void hammer_rel_mem_record(hammer_record_t record); 1149 1150 int hammer_cursor_up(hammer_cursor_t cursor); 1151 int hammer_cursor_up_locked(hammer_cursor_t cursor); 1152 int hammer_cursor_down(hammer_cursor_t cursor); 1153 int hammer_cursor_upgrade(hammer_cursor_t cursor); 1154 int hammer_cursor_upgrade_node(hammer_cursor_t cursor); 1155 void hammer_cursor_downgrade(hammer_cursor_t cursor); 1156 int hammer_cursor_upgrade2(hammer_cursor_t c1, hammer_cursor_t c2); 1157 void hammer_cursor_downgrade2(hammer_cursor_t c1, hammer_cursor_t c2); 1158 int hammer_cursor_seek(hammer_cursor_t cursor, hammer_node_t node, 1159 int index); 1160 void hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident); 1161 int hammer_lock_ex_try(struct hammer_lock *lock); 1162 void hammer_lock_sh(struct hammer_lock *lock); 1163 int hammer_lock_sh_try(struct hammer_lock *lock); 1164 int hammer_lock_upgrade(struct hammer_lock *lock, int shcount); 1165 void hammer_lock_downgrade(struct hammer_lock *lock, int shcount); 1166 int hammer_lock_status(struct hammer_lock *lock); 1167 void hammer_unlock(struct hammer_lock *lock); 1168 void hammer_ref(struct hammer_lock *lock); 1169 int hammer_ref_interlock(struct hammer_lock *lock); 1170 int hammer_ref_interlock_true(struct hammer_lock *lock); 1171 void hammer_ref_interlock_done(struct hammer_lock *lock); 1172 void hammer_rel(struct hammer_lock *lock); 1173 int hammer_rel_interlock(struct hammer_lock *lock, int locked); 1174 void hammer_rel_interlock_done(struct hammer_lock *lock, int orig_locked); 1175 int hammer_get_interlock(struct hammer_lock *lock); 1176 int hammer_try_interlock_norefs(struct hammer_lock *lock); 1177 void hammer_put_interlock(struct hammer_lock *lock, int error); 1178 1179 void hammer_sync_lock_ex(hammer_transaction_t trans); 1180 void hammer_sync_lock_sh(hammer_transaction_t trans); 1181 int hammer_sync_lock_sh_try(hammer_transaction_t trans); 1182 void hammer_sync_unlock(hammer_transaction_t trans); 1183 1184 u_int32_t hammer_to_unix_xid(uuid_t *uuid); 1185 void hammer_guid_to_uuid(uuid_t *uuid, u_int32_t guid); 1186 void hammer_time_to_timespec(u_int64_t xtime, struct timespec *ts); 1187 u_int64_t hammer_timespec_to_time(struct timespec *ts); 1188 int hammer_str_to_tid(const char *str, int *ispfsp, 1189 hammer_tid_t *tidp, u_int32_t *localizationp); 1190 hammer_tid_t hammer_alloc_objid(hammer_mount_t hmp, hammer_inode_t dip, 1191 int64_t namekey); 1192 void hammer_clear_objid(hammer_inode_t dip); 1193 void hammer_destroy_objid_cache(hammer_mount_t hmp); 1194 1195 int hammer_dedup_crc_rb_compare(hammer_dedup_cache_t dc1, 1196 hammer_dedup_cache_t dc2); 1197 int hammer_dedup_off_rb_compare(hammer_dedup_cache_t dc1, 1198 hammer_dedup_cache_t dc2); 1199 hammer_dedup_cache_t hammer_dedup_cache_add(hammer_inode_t ip, 1200 hammer_btree_leaf_elm_t leaf); 1201 hammer_dedup_cache_t hammer_dedup_cache_lookup(hammer_mount_t hmp, 1202 hammer_crc_t crc); 1203 void hammer_dedup_cache_inval(hammer_mount_t hmp, hammer_off_t base_offset); 1204 void hammer_destroy_dedup_cache(hammer_mount_t hmp); 1205 void hammer_dump_dedup_cache(hammer_mount_t hmp); 1206 int hammer_dedup_validate(hammer_dedup_cache_t dcp, int zone, int bytes, 1207 void *data); 1208 1209 int hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset, 1210 int bytes); 1211 void hammer_clear_undo_history(hammer_mount_t hmp); 1212 enum vtype hammer_get_vnode_type(u_int8_t obj_type); 1213 int hammer_get_dtype(u_int8_t obj_type); 1214 u_int8_t hammer_get_obj_type(enum vtype vtype); 1215 int64_t hammer_directory_namekey(hammer_inode_t dip, const void *name, int len, 1216 u_int32_t *max_iterationsp); 1217 int hammer_nohistory(hammer_inode_t ip); 1218 1219 int hammer_init_cursor(hammer_transaction_t trans, hammer_cursor_t cursor, 1220 hammer_node_cache_t cache, hammer_inode_t ip); 1221 void hammer_normalize_cursor(hammer_cursor_t cursor); 1222 void hammer_done_cursor(hammer_cursor_t cursor); 1223 int hammer_recover_cursor(hammer_cursor_t cursor); 1224 void hammer_unlock_cursor(hammer_cursor_t cursor); 1225 int hammer_lock_cursor(hammer_cursor_t cursor); 1226 hammer_cursor_t hammer_push_cursor(hammer_cursor_t ocursor); 1227 void hammer_pop_cursor(hammer_cursor_t ocursor, hammer_cursor_t ncursor); 1228 1229 void hammer_cursor_replaced_node(hammer_node_t onode, hammer_node_t nnode); 1230 void hammer_cursor_removed_node(hammer_node_t onode, hammer_node_t parent, 1231 int index); 1232 void hammer_cursor_split_node(hammer_node_t onode, hammer_node_t nnode, 1233 int index); 1234 void hammer_cursor_moved_element(hammer_node_t oparent, int pindex, 1235 hammer_node_t onode, int oindex, 1236 hammer_node_t nnode, int nindex); 1237 void hammer_cursor_parent_changed(hammer_node_t node, hammer_node_t oparent, 1238 hammer_node_t nparent, int nindex); 1239 void hammer_cursor_inserted_element(hammer_node_t node, int index); 1240 void hammer_cursor_deleted_element(hammer_node_t node, int index); 1241 void hammer_cursor_invalidate_cache(hammer_cursor_t cursor); 1242 1243 int hammer_btree_lookup(hammer_cursor_t cursor); 1244 int hammer_btree_first(hammer_cursor_t cursor); 1245 int hammer_btree_last(hammer_cursor_t cursor); 1246 int hammer_btree_extract(hammer_cursor_t cursor, int flags); 1247 int hammer_btree_iterate(hammer_cursor_t cursor); 1248 int hammer_btree_iterate_reverse(hammer_cursor_t cursor); 1249 int hammer_btree_insert(hammer_cursor_t cursor, 1250 hammer_btree_leaf_elm_t elm, int *doprop); 1251 int hammer_btree_delete(hammer_cursor_t cursor, int *ndelete); 1252 void hammer_btree_do_propagation(hammer_cursor_t cursor, 1253 hammer_pseudofs_inmem_t pfsm, 1254 hammer_btree_leaf_elm_t leaf); 1255 int hammer_btree_cmp(hammer_base_elm_t key1, hammer_base_elm_t key2); 1256 int hammer_btree_chkts(hammer_tid_t ts, hammer_base_elm_t key); 1257 int hammer_btree_correct_rhb(hammer_cursor_t cursor, hammer_tid_t tid); 1258 int hammer_btree_correct_lhb(hammer_cursor_t cursor, hammer_tid_t tid); 1259 1260 int btree_set_parent_of_child(hammer_transaction_t trans, 1261 hammer_node_t node, 1262 hammer_btree_elm_t elm); 1263 void hammer_node_lock_init(hammer_node_lock_t parent, hammer_node_t node); 1264 void hammer_btree_lcache_init(hammer_mount_t hmp, hammer_node_lock_t lcache, 1265 int depth); 1266 void hammer_btree_lcache_free(hammer_mount_t hmp, hammer_node_lock_t lcache); 1267 int hammer_btree_lock_children(hammer_cursor_t cursor, int depth, 1268 hammer_node_lock_t parent, 1269 hammer_node_lock_t lcache); 1270 void hammer_btree_lock_copy(hammer_cursor_t cursor, 1271 hammer_node_lock_t parent); 1272 int hammer_btree_sync_copy(hammer_cursor_t cursor, 1273 hammer_node_lock_t parent); 1274 void hammer_btree_unlock_children(hammer_mount_t hmp, 1275 hammer_node_lock_t parent, 1276 hammer_node_lock_t lcache); 1277 int hammer_btree_search_node(hammer_base_elm_t elm, hammer_node_ondisk_t node); 1278 hammer_node_t hammer_btree_get_parent(hammer_transaction_t trans, 1279 hammer_node_t node, int *parent_indexp, 1280 int *errorp, int try_exclusive); 1281 1282 void hammer_print_btree_node(hammer_node_ondisk_t ondisk); 1283 void hammer_print_btree_elm(hammer_btree_elm_t elm); 1284 1285 void *hammer_bread(hammer_mount_t hmp, hammer_off_t off, 1286 int *errorp, struct hammer_buffer **bufferp); 1287 void *hammer_bnew(hammer_mount_t hmp, hammer_off_t off, 1288 int *errorp, struct hammer_buffer **bufferp); 1289 void *hammer_bread_ext(hammer_mount_t hmp, hammer_off_t off, int bytes, 1290 int *errorp, struct hammer_buffer **bufferp); 1291 void *hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t off, int bytes, 1292 int *errorp, struct hammer_buffer **bufferp); 1293 1294 hammer_volume_t hammer_get_root_volume(hammer_mount_t hmp, int *errorp); 1295 1296 hammer_volume_t hammer_get_volume(hammer_mount_t hmp, 1297 int32_t vol_no, int *errorp); 1298 hammer_buffer_t hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset, 1299 int bytes, int isnew, int *errorp); 1300 void hammer_sync_buffers(hammer_mount_t hmp, 1301 hammer_off_t base_offset, int bytes); 1302 int hammer_del_buffers(hammer_mount_t hmp, 1303 hammer_off_t base_offset, 1304 hammer_off_t zone2_offset, int bytes, 1305 int report_conflicts); 1306 1307 int hammer_ref_volume(hammer_volume_t volume); 1308 int hammer_ref_buffer(hammer_buffer_t buffer); 1309 void hammer_flush_buffer_nodes(hammer_buffer_t buffer); 1310 1311 void hammer_rel_volume(hammer_volume_t volume, int locked); 1312 void hammer_rel_buffer(hammer_buffer_t buffer, int locked); 1313 1314 int hammer_vfs_export(struct mount *mp, int op, 1315 const struct export_args *export); 1316 hammer_node_t hammer_get_node(hammer_transaction_t trans, 1317 hammer_off_t node_offset, int isnew, int *errorp); 1318 void hammer_ref_node(hammer_node_t node); 1319 hammer_node_t hammer_ref_node_safe(hammer_transaction_t trans, 1320 hammer_node_cache_t cache, int *errorp); 1321 void hammer_rel_node(hammer_node_t node); 1322 void hammer_delete_node(hammer_transaction_t trans, 1323 hammer_node_t node); 1324 void hammer_cache_node(hammer_node_cache_t cache, 1325 hammer_node_t node); 1326 void hammer_uncache_node(hammer_node_cache_t cache); 1327 void hammer_flush_node(hammer_node_t node, int locked); 1328 1329 void hammer_dup_buffer(struct hammer_buffer **bufferp, 1330 struct hammer_buffer *buffer); 1331 hammer_node_t hammer_alloc_btree(hammer_transaction_t trans, 1332 hammer_off_t hint, int *errorp); 1333 void *hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, 1334 u_int16_t rec_type, hammer_off_t *data_offsetp, 1335 struct hammer_buffer **data_bufferp, 1336 hammer_off_t hint, int *errorp); 1337 1338 int hammer_generate_undo(hammer_transaction_t trans, 1339 hammer_off_t zone_offset, void *base, int len); 1340 int hammer_generate_redo(hammer_transaction_t trans, hammer_inode_t ip, 1341 hammer_off_t file_offset, u_int32_t flags, 1342 void *base, int len); 1343 void hammer_generate_redo_sync(hammer_transaction_t trans); 1344 void hammer_redo_fifo_start_flush(hammer_inode_t ip); 1345 void hammer_redo_fifo_end_flush(hammer_inode_t ip); 1346 1347 void hammer_format_undo(void *base, u_int32_t seqno); 1348 int hammer_upgrade_undo_4(hammer_transaction_t trans); 1349 1350 void hammer_put_volume(struct hammer_volume *volume, int flush); 1351 void hammer_put_buffer(struct hammer_buffer *buffer, int flush); 1352 1353 hammer_off_t hammer_freemap_alloc(hammer_transaction_t trans, 1354 hammer_off_t owner, int *errorp); 1355 void hammer_freemap_free(hammer_transaction_t trans, hammer_off_t phys_offset, 1356 hammer_off_t owner, int *errorp); 1357 int _hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp); 1358 hammer_off_t hammer_blockmap_alloc(hammer_transaction_t trans, int zone, 1359 int bytes, hammer_off_t hint, int *errorp); 1360 hammer_reserve_t hammer_blockmap_reserve(hammer_mount_t hmp, int zone, 1361 int bytes, hammer_off_t *zone_offp, int *errorp); 1362 hammer_reserve_t hammer_blockmap_reserve_dedup(hammer_mount_t hmp, int zone, 1363 int bytes, hammer_off_t zone_offset, int *errorp); 1364 void hammer_blockmap_reserve_complete(hammer_mount_t hmp, 1365 hammer_reserve_t resv); 1366 void hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv); 1367 void hammer_blockmap_free(hammer_transaction_t trans, 1368 hammer_off_t zone_offset, int bytes); 1369 int hammer_blockmap_dedup(hammer_transaction_t trans, 1370 hammer_off_t zone_offset, int bytes); 1371 int hammer_blockmap_finalize(hammer_transaction_t trans, 1372 hammer_reserve_t resv, 1373 hammer_off_t zone_offset, int bytes); 1374 int hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t zone_offset, 1375 int *curp, int *errorp); 1376 hammer_off_t hammer_blockmap_lookup_verify(hammer_mount_t hmp, 1377 hammer_off_t zone_offset, int *errorp); 1378 1379 hammer_off_t hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone_offset, 1380 int *errorp); 1381 int64_t hammer_undo_used(hammer_transaction_t trans); 1382 int64_t hammer_undo_space(hammer_transaction_t trans); 1383 int64_t hammer_undo_max(hammer_mount_t hmp); 1384 int hammer_undo_reclaim(hammer_io_t io); 1385 1386 void hammer_start_transaction(struct hammer_transaction *trans, 1387 struct hammer_mount *hmp); 1388 void hammer_simple_transaction(struct hammer_transaction *trans, 1389 struct hammer_mount *hmp); 1390 void hammer_start_transaction_fls(struct hammer_transaction *trans, 1391 struct hammer_mount *hmp); 1392 void hammer_done_transaction(struct hammer_transaction *trans); 1393 hammer_tid_t hammer_alloc_tid(hammer_mount_t hmp, int count); 1394 1395 void hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags); 1396 void hammer_flush_inode(hammer_inode_t ip, int flags); 1397 void hammer_flush_inode_done(hammer_inode_t ip, int error); 1398 void hammer_wait_inode(hammer_inode_t ip); 1399 1400 int hammer_create_inode(struct hammer_transaction *trans, struct vattr *vap, 1401 struct ucred *cred, struct hammer_inode *dip, 1402 const char *name, int namelen, 1403 hammer_pseudofs_inmem_t pfsm, 1404 struct hammer_inode **ipp); 1405 void hammer_rel_inode(hammer_inode_t ip, int flush); 1406 int hammer_reload_inode(hammer_inode_t ip, void *arg __unused); 1407 int hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2); 1408 int hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2); 1409 int hammer_destroy_inode_callback(hammer_inode_t ip, void *data __unused); 1410 1411 int hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip); 1412 void hammer_test_inode(hammer_inode_t dip); 1413 void hammer_inode_unloadable_check(hammer_inode_t ip, int getvp); 1414 int hammer_update_atime_quick(hammer_inode_t ip); 1415 1416 int hammer_ip_add_directory(struct hammer_transaction *trans, 1417 hammer_inode_t dip, const char *name, int bytes, 1418 hammer_inode_t nip); 1419 int hammer_ip_del_directory(struct hammer_transaction *trans, 1420 hammer_cursor_t cursor, hammer_inode_t dip, 1421 hammer_inode_t ip); 1422 void hammer_ip_replace_bulk(hammer_mount_t hmp, hammer_record_t record); 1423 hammer_record_t hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, 1424 void *data, int bytes, int *errorp); 1425 int hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size); 1426 int hammer_ip_add_record(struct hammer_transaction *trans, 1427 hammer_record_t record); 1428 int hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip, 1429 int64_t ran_beg, int64_t ran_end, int truncating); 1430 int hammer_ip_delete_clean(hammer_cursor_t cursor, hammer_inode_t ip, 1431 int *countp); 1432 int hammer_ip_sync_data(hammer_cursor_t cursor, hammer_inode_t ip, 1433 int64_t offset, void *data, int bytes); 1434 int hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t rec); 1435 hammer_pseudofs_inmem_t hammer_load_pseudofs(hammer_transaction_t trans, 1436 u_int32_t localization, int *errorp); 1437 int hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred, 1438 hammer_pseudofs_inmem_t pfsm); 1439 int hammer_save_pseudofs(hammer_transaction_t trans, 1440 hammer_pseudofs_inmem_t pfsm); 1441 int hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization); 1442 void hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm); 1443 int hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag, 1444 struct ucred *cred); 1445 1446 void hammer_io_init(hammer_io_t io, hammer_volume_t volume, 1447 enum hammer_io_type type); 1448 int hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit); 1449 void hammer_io_advance(struct hammer_io *io); 1450 int hammer_io_new(struct vnode *devvp, struct hammer_io *io); 1451 int hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset); 1452 struct buf *hammer_io_release(struct hammer_io *io, int flush); 1453 void hammer_io_flush(struct hammer_io *io, int reclaim); 1454 void hammer_io_wait(struct hammer_io *io); 1455 void hammer_io_waitdep(struct hammer_io *io); 1456 void hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush); 1457 int hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio, 1458 hammer_btree_leaf_elm_t leaf); 1459 int hammer_io_indirect_read(hammer_mount_t hmp, struct bio *bio, 1460 hammer_btree_leaf_elm_t leaf); 1461 int hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio, 1462 hammer_record_t record); 1463 void hammer_io_direct_wait(hammer_record_t record); 1464 void hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf); 1465 void hammer_io_write_interlock(hammer_io_t io); 1466 void hammer_io_done_interlock(hammer_io_t io); 1467 void hammer_io_clear_modify(struct hammer_io *io, int inval); 1468 void hammer_io_clear_modlist(struct hammer_io *io); 1469 void hammer_io_flush_sync(hammer_mount_t hmp); 1470 void hammer_io_clear_error(struct hammer_io *io); 1471 void hammer_io_clear_error_noassert(struct hammer_io *io); 1472 void hammer_io_notmeta(hammer_buffer_t buffer); 1473 void hammer_io_limit_backlog(hammer_mount_t hmp); 1474 1475 void hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume, 1476 void *base, int len); 1477 void hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer, 1478 void *base, int len); 1479 void hammer_modify_volume_done(hammer_volume_t volume); 1480 void hammer_modify_buffer_done(hammer_buffer_t buffer); 1481 1482 int hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip, 1483 struct hammer_ioc_reblock *reblock); 1484 int hammer_ioc_rebalance(hammer_transaction_t trans, hammer_inode_t ip, 1485 struct hammer_ioc_rebalance *rebal); 1486 int hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip, 1487 struct hammer_ioc_prune *prune); 1488 int hammer_ioc_mirror_read(hammer_transaction_t trans, hammer_inode_t ip, 1489 struct hammer_ioc_mirror_rw *mirror); 1490 int hammer_ioc_mirror_write(hammer_transaction_t trans, hammer_inode_t ip, 1491 struct hammer_ioc_mirror_rw *mirror); 1492 int hammer_ioc_set_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, 1493 struct ucred *cred, struct hammer_ioc_pseudofs_rw *pfs); 1494 int hammer_ioc_get_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, 1495 struct hammer_ioc_pseudofs_rw *pfs); 1496 int hammer_ioc_destroy_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, 1497 struct hammer_ioc_pseudofs_rw *pfs); 1498 int hammer_ioc_downgrade_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, 1499 struct hammer_ioc_pseudofs_rw *pfs); 1500 int hammer_ioc_upgrade_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, 1501 struct hammer_ioc_pseudofs_rw *pfs); 1502 int hammer_ioc_wait_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, 1503 struct hammer_ioc_pseudofs_rw *pfs); 1504 int hammer_ioc_iterate_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, 1505 struct hammer_ioc_pfs_iterate *pi); 1506 int hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip, 1507 struct hammer_ioc_volume *ioc); 1508 int hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip, 1509 struct hammer_ioc_volume *ioc); 1510 int hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip, 1511 struct hammer_ioc_volume_list *ioc); 1512 int hammer_ioc_dedup(hammer_transaction_t trans, hammer_inode_t ip, 1513 struct hammer_ioc_dedup *dedup); 1514 1515 int hammer_signal_check(hammer_mount_t hmp); 1516 1517 void hammer_flusher_create(hammer_mount_t hmp); 1518 void hammer_flusher_destroy(hammer_mount_t hmp); 1519 void hammer_flusher_sync(hammer_mount_t hmp); 1520 int hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t flg); 1521 int hammer_flusher_async_one(hammer_mount_t hmp); 1522 int hammer_flusher_running(hammer_mount_t hmp); 1523 void hammer_flusher_wait(hammer_mount_t hmp, int seq); 1524 void hammer_flusher_wait_next(hammer_mount_t hmp); 1525 int hammer_flusher_meta_limit(hammer_mount_t hmp); 1526 int hammer_flusher_meta_halflimit(hammer_mount_t hmp); 1527 int hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter); 1528 void hammer_flusher_clean_loose_ios(hammer_mount_t hmp); 1529 void hammer_flusher_finalize(hammer_transaction_t trans, int final); 1530 int hammer_flusher_haswork(hammer_mount_t hmp); 1531 int hammer_flush_dirty(hammer_mount_t hmp, int max_count); 1532 void hammer_flusher_flush_undos(hammer_mount_t hmp, int already_flushed); 1533 1534 int hammer_recover_stage1(hammer_mount_t hmp, hammer_volume_t rootvol); 1535 int hammer_recover_stage2(hammer_mount_t hmp, hammer_volume_t rootvol); 1536 void hammer_recover_flush_buffers(hammer_mount_t hmp, 1537 hammer_volume_t root_volume, int final); 1538 1539 void hammer_crc_set_blockmap(hammer_blockmap_t blockmap); 1540 void hammer_crc_set_volume(hammer_volume_ondisk_t ondisk); 1541 void hammer_crc_set_leaf(void *data, hammer_btree_leaf_elm_t leaf); 1542 1543 int hammer_crc_test_blockmap(hammer_blockmap_t blockmap); 1544 int hammer_crc_test_volume(hammer_volume_ondisk_t ondisk); 1545 int hammer_crc_test_btree(hammer_node_ondisk_t ondisk); 1546 int hammer_crc_test_leaf(void *data, hammer_btree_leaf_elm_t leaf); 1547 udev_t hammer_fsid_to_udev(uuid_t *uuid); 1548 1549 1550 int hammer_blocksize(int64_t file_offset); 1551 int hammer_blockoff(int64_t file_offset); 1552 int64_t hammer_blockdemarc(int64_t file_offset1, int64_t file_offset2); 1553 1554 /* 1555 * Shortcut for _hammer_checkspace(), used all over the code. 1556 */ 1557 static __inline int 1558 hammer_checkspace(hammer_mount_t hmp, int slop) 1559 { 1560 return(_hammer_checkspace(hmp, slop, NULL)); 1561 } 1562 1563 static __inline void 1564 hammer_wait_mem_record(hammer_record_t record) 1565 { 1566 hammer_wait_mem_record_ident(record, "hmmwai"); 1567 } 1568 1569 static __inline void 1570 hammer_lock_ex(struct hammer_lock *lock) 1571 { 1572 hammer_lock_ex_ident(lock, "hmrlck"); 1573 } 1574 1575 static __inline void 1576 hammer_modify_volume_noundo(hammer_transaction_t trans, hammer_volume_t volume) 1577 { 1578 hammer_modify_volume(trans, volume, NULL, 0); 1579 } 1580 1581 static __inline void 1582 hammer_modify_buffer_noundo(hammer_transaction_t trans, hammer_buffer_t buffer) 1583 { 1584 hammer_modify_buffer(trans, buffer, NULL, 0); 1585 } 1586 1587 /* 1588 * Indicate that a B-Tree node is being modified. 1589 */ 1590 static __inline void 1591 hammer_modify_node_noundo(hammer_transaction_t trans, hammer_node_t node) 1592 { 1593 KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0); 1594 hammer_modify_buffer(trans, node->buffer, NULL, 0); 1595 } 1596 1597 static __inline void 1598 hammer_modify_node_all(hammer_transaction_t trans, struct hammer_node *node) 1599 { 1600 KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0); 1601 hammer_modify_buffer(trans, node->buffer, 1602 node->ondisk, sizeof(*node->ondisk)); 1603 } 1604 1605 static __inline void 1606 hammer_modify_node(hammer_transaction_t trans, hammer_node_t node, 1607 void *base, int len) 1608 { 1609 hammer_crc_t *crcptr; 1610 1611 KKASSERT((char *)base >= (char *)node->ondisk && 1612 (char *)base + len <= 1613 (char *)node->ondisk + sizeof(*node->ondisk)); 1614 KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0); 1615 1616 if (hammer_btree_full_undo) { 1617 hammer_modify_node_all(trans, node); 1618 } else { 1619 hammer_modify_buffer(trans, node->buffer, base, len); 1620 crcptr = &node->ondisk->crc; 1621 hammer_modify_buffer(trans, node->buffer, 1622 crcptr, sizeof(hammer_crc_t)); 1623 --node->buffer->io.modify_refs; /* only want one ref */ 1624 } 1625 } 1626 1627 /* 1628 * Indicate that the specified modifications have been completed. 1629 * 1630 * Do not try to generate the crc here, it's very expensive to do and a 1631 * sequence of insertions or deletions can result in many calls to this 1632 * function on the same node. 1633 */ 1634 static __inline void 1635 hammer_modify_node_done(hammer_node_t node) 1636 { 1637 node->flags |= HAMMER_NODE_CRCGOOD; 1638 if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0) { 1639 node->flags |= HAMMER_NODE_NEEDSCRC; 1640 node->buffer->io.gencrc = 1; 1641 hammer_ref_node(node); 1642 } 1643 hammer_modify_buffer_done(node->buffer); 1644 } 1645 1646 /* 1647 * Lookup a blockmap offset. 1648 */ 1649 static __inline hammer_off_t 1650 hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t zone_offset, 1651 int *errorp) 1652 { 1653 #if defined INVARIANTS 1654 int zone = HAMMER_ZONE_DECODE(zone_offset); 1655 KKASSERT(zone >= HAMMER_ZONE2_MAPPED_INDEX && zone < HAMMER_MAX_ZONES); 1656 #endif 1657 1658 /* 1659 * We can actually skip blockmap verify by default, 1660 * as normal blockmaps are now direct-mapped onto the freemap 1661 * and so represent zone-2 addresses. 1662 */ 1663 if (hammer_verify_zone == 0) { 1664 *errorp = 0; 1665 return hammer_xlate_to_zone2(zone_offset); 1666 } 1667 1668 return hammer_blockmap_lookup_verify(hmp, zone_offset, errorp); 1669 } 1670 1671 #define hammer_modify_volume_field(trans, vol, field) \ 1672 hammer_modify_volume(trans, vol, &(vol)->ondisk->field, \ 1673 sizeof((vol)->ondisk->field)) 1674 1675 #define hammer_modify_node_field(trans, node, field) \ 1676 hammer_modify_node(trans, node, &(node)->ondisk->field, \ 1677 sizeof((node)->ondisk->field)) 1678 1679 /* 1680 * The HAMMER_INODE_CAP_DIR_LOCAL_INO capability is set on newly 1681 * created directories for HAMMER version 2 or greater and causes 1682 * directory entries to be placed the inode localization zone in 1683 * the B-Tree instead of the misc zone. 1684 * 1685 * This greatly improves localization between directory entries and 1686 * inodes 1687 */ 1688 static __inline u_int32_t 1689 hammer_dir_localization(hammer_inode_t dip) 1690 { 1691 return(HAMMER_DIR_INODE_LOCALIZATION(&dip->ino_data)); 1692 } 1693 1694 #define hkprintf(format, args...) \ 1695 kprintf("HAMMER: "format,## args) 1696 #define hvkprintf(vol, format, args...) \ 1697 kprintf("HAMMER(%s) "format, vol->ondisk->vol_name,## args) 1698 #define hmkprintf(hmp, format, args...) \ 1699 kprintf("HAMMER(%s) "format, hmp->mp->mnt_stat.f_mntfromname,## args) 1700 #define hdkprintf(format, args...) \ 1701 kprintf("%s: "format, __func__,## args) 1702 1703 #define hkrateprintf(rate , format, args...) \ 1704 krateprintf(rate, "HAMMER: "format,## args) 1705 #define hvkrateprintf(rate, vol, format, args...) \ 1706 krateprintf(rate, "HAMMER(%s) "format, vol->ondisk->vol_name,## args) 1707 #define hmkrateprintf(rate, hmp, format, args...) \ 1708 krateprintf(rate, "HAMMER(%s) "format, hmp->mp->mnt_stat.f_mntfromname,## args) 1709 #define hdkrateprintf(rate, format, args...) \ 1710 krateprintf(rate, "%s: "format, __func__,## args) 1711 1712 #define hpanic(format, args...) \ 1713 panic("%s: "format, __func__,## args) 1714 #endif /* _KERNEL */ 1715 1716 #endif /* !VFS_HAMMER_HAMMER_H_ */ 1717