1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #ifndef VFS_HAMMER_HAMMER_H_
36 #define VFS_HAMMER_HAMMER_H_
37
38 /*
39 * This header file contains structures used internally by the HAMMERFS
40 * implementation. See hammer_disk.h for on-disk structures.
41 */
42
43 #include <sys/param.h>
44 #ifdef _KERNEL
45 #include <sys/kernel.h>
46 #include <sys/systm.h>
47 #include <sys/uio.h>
48 #endif
49 #include <sys/conf.h>
50 #include <sys/tree.h>
51 #include <sys/malloc.h>
52 #include <sys/mount.h>
53 #include <sys/vnode.h>
54 #include <sys/proc.h>
55 #include <sys/caps.h>
56 #include <sys/dirent.h>
57 #include <sys/stat.h>
58 #include <sys/fcntl.h>
59 #include <sys/lockf.h>
60 #include <sys/file.h>
61 #include <sys/event.h>
62 #include <sys/buf.h>
63 #include <sys/queue.h>
64 #include <sys/ktr.h>
65 #include <sys/limits.h>
66 #include <sys/sysctl.h>
67 #include <vm/swap_pager.h>
68 #include <vm/vm_extern.h>
69
70 #include "hammer_disk.h"
71 #include "hammer_mount.h"
72 #include "hammer_ioctl.h"
73 #include "hammer_crc.h"
74
75 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
76
77 #ifdef MALLOC_DECLARE
78 MALLOC_DECLARE(M_HAMMER);
79 #endif
80
81 /*
82 * Kernel trace
83 */
84 #if !defined(KTR_HAMMER)
85 #define KTR_HAMMER KTR_ALL
86 #endif
87 /* KTR_INFO_MASTER_EXTERN(hammer); */
88
89 /*
90 * Misc structures
91 */
92 struct hammer_mount;
93 struct hammer_inode;
94 struct hammer_volume;
95 struct hammer_buffer;
96 struct hammer_node;
97 struct hammer_undo;
98 struct hammer_reserve;
99 struct hammer_io;
100
101 /*
102 * Key structure used for custom RB tree inode lookups. This prototypes
103 * the function hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info).
104 */
105 typedef struct hammer_inode_info {
106 int64_t obj_id; /* (key) object identifier */
107 hammer_tid_t obj_asof; /* (key) snapshot transid or 0 */
108 uint32_t obj_localization; /* (key) pseudo-fs id for upper 16 bits */
109 union {
110 hammer_btree_leaf_elm_t leaf;
111 } u;
112 } *hammer_inode_info_t;
113
114 typedef enum hammer_transaction_type {
115 HAMMER_TRANS_RO,
116 HAMMER_TRANS_STD,
117 HAMMER_TRANS_FLS
118 } hammer_transaction_type_t;
119
120 /*
121 * HAMMER Transaction tracking
122 */
123 typedef struct hammer_transaction {
124 hammer_transaction_type_t type;
125 struct hammer_mount *hmp;
126 hammer_tid_t tid;
127 uint64_t time;
128 uint32_t time32;
129 int sync_lock_refs;
130 int flags;
131 struct hammer_volume *rootvol;
132 } *hammer_transaction_t;
133
134 #define HAMMER_TRANSF_NEWINODE 0x0001
135 #define HAMMER_TRANSF_CRCDOM 0x0004 /* EDOM on CRC error, less critical */
136
137 /*
138 * HAMMER locks
139 */
140 struct hammer_lock {
141 volatile u_int refs; /* active references */
142 volatile u_int lockval; /* lock count and control bits */
143 struct thread *lowner; /* owner if exclusively held */
144 struct thread *rowner; /* owner if exclusively held */
145 };
146
147 #define HAMMER_REFS_LOCKED 0x40000000 /* transition check */
148 #define HAMMER_REFS_WANTED 0x20000000 /* transition check */
149 #define HAMMER_REFS_CHECK 0x10000000 /* transition check */
150
151 #define HAMMER_REFS_FLAGS (HAMMER_REFS_LOCKED | \
152 HAMMER_REFS_WANTED | \
153 HAMMER_REFS_CHECK)
154
155 #define HAMMER_LOCKF_EXCLUSIVE 0x40000000
156 #define HAMMER_LOCKF_WANTED 0x20000000
157
158 #define HAMMER_LIMIT_RECLAIMS 16384 /* maximum reclaims in-prog */
159
160 static __inline int
hammer_notlocked(struct hammer_lock * lock)161 hammer_notlocked(struct hammer_lock *lock)
162 {
163 return(lock->lockval == 0);
164 }
165
166 static __inline int
hammer_islocked(struct hammer_lock * lock)167 hammer_islocked(struct hammer_lock *lock)
168 {
169 return(lock->lockval != 0);
170 }
171
172 /*
173 * Returns the number of refs on the object.
174 */
175 static __inline int
hammer_isactive(struct hammer_lock * lock)176 hammer_isactive(struct hammer_lock *lock)
177 {
178 return(lock->refs & ~HAMMER_REFS_FLAGS);
179 }
180
181 static __inline int
hammer_oneref(struct hammer_lock * lock)182 hammer_oneref(struct hammer_lock *lock)
183 {
184 return((lock->refs & ~HAMMER_REFS_FLAGS) == 1);
185 }
186
187 static __inline int
hammer_norefs(struct hammer_lock * lock)188 hammer_norefs(struct hammer_lock *lock)
189 {
190 return((lock->refs & ~HAMMER_REFS_FLAGS) == 0);
191 }
192
193 static __inline int
hammer_norefsorlock(struct hammer_lock * lock)194 hammer_norefsorlock(struct hammer_lock *lock)
195 {
196 return(lock->refs == 0);
197 }
198
199 static __inline int
hammer_refsorlock(struct hammer_lock * lock)200 hammer_refsorlock(struct hammer_lock *lock)
201 {
202 return(lock->refs != 0);
203 }
204
205 /*
206 * Return if we specifically own the lock exclusively.
207 */
208 static __inline int
hammer_lock_excl_owned(struct hammer_lock * lock,thread_t td)209 hammer_lock_excl_owned(struct hammer_lock *lock, thread_t td)
210 {
211 if ((lock->lockval & HAMMER_LOCKF_EXCLUSIVE) &&
212 lock->lowner == td) {
213 return(1);
214 }
215 return(0);
216 }
217
218 /*
219 * Flush state, used by various structures
220 */
221 typedef enum hammer_inode_state {
222 HAMMER_FST_IDLE,
223 HAMMER_FST_SETUP,
224 HAMMER_FST_FLUSH
225 } hammer_inode_state_t;
226
227 /*
228 * Pseudo-filesystem extended data tracking
229 */
230 struct hammer_pseudofs_inmem;
231 RB_HEAD(hammer_pfs_rb_tree, hammer_pseudofs_inmem);
232 RB_PROTOTYPE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
233 hammer_pfs_rb_compare, uint32_t);
234
235 typedef struct hammer_pseudofs_inmem {
236 RB_ENTRY(hammer_pseudofs_inmem) rb_node;
237 struct hammer_lock lock;
238 uint32_t localization;
239 hammer_tid_t create_tid;
240 int flags;
241 dev_t fsid_udev;
242 struct hammer_pseudofs_data pfsd;
243 } *hammer_pseudofs_inmem_t;
244
245 /*
246 * Cache object ids. A fixed number of objid cache structures are
247 * created to reserve object id's for newly created files in multiples
248 * of 100,000, localized to a particular directory, and recycled as
249 * needed. This allows parallel create operations in different
250 * directories to retain fairly localized object ids which in turn
251 * improves reblocking performance and layout.
252 */
253 #define OBJID_CACHE_SIZE 2048
254 #define OBJID_CACHE_BULK_BITS 10 /* 10 bits (1024) */
255 #define OBJID_CACHE_BULK (32 * 32) /* two level (1024) */
256 #define OBJID_CACHE_BULK_MASK (OBJID_CACHE_BULK - 1)
257 #define OBJID_CACHE_BULK_MASK64 ((uint64_t)(OBJID_CACHE_BULK - 1))
258
259 typedef struct hammer_objid_cache {
260 TAILQ_ENTRY(hammer_objid_cache) entry;
261 struct hammer_inode *dip;
262 hammer_tid_t base_tid;
263 int count;
264 uint32_t bm0;
265 uint32_t bm1[32];
266 } *hammer_objid_cache_t;
267
268 /*
269 * Associate an inode with a B-Tree node to cache search start positions
270 */
271 typedef struct hammer_node_cache {
272 TAILQ_ENTRY(hammer_node_cache) entry;
273 struct hammer_node *node;
274 struct hammer_inode *ip;
275 } *hammer_node_cache_t;
276
277 TAILQ_HEAD(hammer_node_cache_list, hammer_node_cache);
278
279 /*
280 * Structure used to organize flush groups. Flush groups must be
281 * organized into chunks in order to avoid blowing out the UNDO FIFO.
282 * Without this a 'sync' could end up flushing 50,000 inodes in a single
283 * transaction.
284 */
285 RB_HEAD(hammer_fls_rb_tree, hammer_inode);
286 RB_PROTOTYPE(hammer_fls_rb_tree, hammer_inode, rb_flsnode,
287 hammer_ino_rb_compare);
288
289 typedef struct hammer_flush_group {
290 TAILQ_ENTRY(hammer_flush_group) flush_entry;
291 struct hammer_fls_rb_tree flush_tree;
292 int seq; /* our seq no */
293 int total_count; /* record load */
294 int running; /* group is running */
295 int closed;
296 int refs;
297 } *hammer_flush_group_t;
298
299 TAILQ_HEAD(hammer_flush_group_list, hammer_flush_group);
300
301 /*
302 * Structure used to represent an inode in-memory.
303 *
304 * The record and data associated with an inode may be out of sync with
305 * the disk (xDIRTY flags), or not even on the disk at all (ONDISK flag
306 * clear).
307 *
308 * An inode may also hold a cache of unsynchronized records, used for
309 * database and directories only. Unsynchronized regular file data is
310 * stored in the buffer cache.
311 *
312 * NOTE: A file which is created and destroyed within the initial
313 * synchronization period can wind up not doing any disk I/O at all.
314 *
315 * Finally, an inode may cache numerous disk-referencing B-Tree cursors.
316 */
317 RB_HEAD(hammer_ino_rb_tree, hammer_inode);
318 RB_PROTOTYPEX(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
319 hammer_ino_rb_compare, hammer_inode_info_t);
320
321 RB_HEAD(hammer_redo_rb_tree, hammer_inode);
322 RB_PROTOTYPE2(hammer_redo_rb_tree, hammer_inode, rb_redonode,
323 hammer_redo_rb_compare, hammer_off_t);
324
325 struct hammer_record;
326 RB_HEAD(hammer_rec_rb_tree, hammer_record);
327 RB_PROTOTYPEX(hammer_rec_rb_tree, INFO, hammer_record, rb_node,
328 hammer_rec_rb_compare, hammer_btree_leaf_elm_t);
329
330 TAILQ_HEAD(hammer_record_list, hammer_record);
331 TAILQ_HEAD(hammer_node_list, hammer_node);
332
333 typedef struct hammer_inode {
334 RB_ENTRY(hammer_inode) rb_node;
335 hammer_inode_state_t flush_state;
336 hammer_flush_group_t flush_group;
337 RB_ENTRY(hammer_inode) rb_flsnode; /* when on flush list */
338 RB_ENTRY(hammer_inode) rb_redonode; /* when INODE_RDIRTY is set */
339 struct hammer_record_list target_list; /* target of dependant recs */
340 int64_t obj_id; /* (key) object identifier */
341 hammer_tid_t obj_asof; /* (key) snapshot or 0 */
342 uint32_t obj_localization; /* (key) pseudo-fs id for upper 16 bits */
343 struct hammer_mount *hmp;
344 hammer_objid_cache_t objid_cache;
345 int flags;
346 int error; /* flush error */
347 int cursor_ip_refs; /* sanity */
348 #if 0
349 int cursor_exclreq_count;
350 #endif
351 int rsv_recs;
352 struct vnode *vp;
353 hammer_pseudofs_inmem_t pfsm;
354 struct lockf advlock;
355 struct hammer_lock lock; /* sync copy interlock */
356 off_t trunc_off;
357 struct hammer_btree_leaf_elm ino_leaf; /* in-memory cache */
358 struct hammer_inode_data ino_data; /* in-memory cache */
359 struct hammer_rec_rb_tree rec_tree; /* in-memory cache */
360 int rec_generation;
361
362 /*
363 * search initiate cache
364 * cache[0] - this inode
365 * cache[1] - related data, the content depends on situations
366 * cache[2] - for dip to cache ip to shortcut B-Tree search
367 * cache[3] - related data copied from dip to a new ip's cache[1]
368 */
369 struct hammer_node_cache cache[4];
370
371 /*
372 * When a demark is created to synchronize an inode to
373 * disk, certain fields are copied so the front-end VOPs
374 * can continue to run in parallel with the synchronization
375 * occuring in the background.
376 */
377 int sync_flags; /* to-sync flags cache */
378 off_t sync_trunc_off; /* to-sync truncation */
379 off_t save_trunc_off; /* write optimization */
380 struct hammer_btree_leaf_elm sync_ino_leaf; /* to-sync cache */
381 struct hammer_inode_data sync_ino_data; /* to-sync cache */
382 size_t redo_count;
383
384 /*
385 * Track the earliest offset in the UNDO/REDO FIFO containing
386 * REDO records. This is staged to the backend during flush
387 * sequences. While the inode is staged redo_fifo_next is used
388 * to track the earliest offset for rotation into redo_fifo_start
389 * on completion of the flush.
390 */
391 hammer_off_t redo_fifo_start;
392 hammer_off_t redo_fifo_next;
393 } *hammer_inode_t;
394
395 #define VTOI(vp) ((hammer_inode_t)(vp)->v_data)
396
397 /*
398 * NOTE: DDIRTY does not include atime or mtime and does not include
399 * write-append size changes. SDIRTY handles write-append size
400 * changes.
401 *
402 * REDO indicates that REDO logging is active, creating a definitive
403 * stream of REDO records in the UNDO/REDO log for writes and
404 * truncations, including boundary records when/if REDO is turned off.
405 * REDO is typically enabled by fsync() and turned off if excessive
406 * writes without an fsync() occurs.
407 *
408 * RDIRTY indicates that REDO records were laid down in the UNDO/REDO
409 * FIFO (even if REDO is turned off some might still be active) and
410 * still being tracked for this inode. See hammer_redo.c
411 */
412 #define HAMMER_INODE_DDIRTY 0x0001 /* in-memory ino_data is dirty */
413 /* (not including atime/mtime) */
414 #define HAMMER_INODE_RSV_INODES 0x0002 /* hmp->rsv_inodes bumped */
415 #define HAMMER_INODE_CONN_DOWN 0x0004 /* include in downward recursion */
416 #define HAMMER_INODE_XDIRTY 0x0008 /* in-memory records */
417 #define HAMMER_INODE_ONDISK 0x0010 /* inode is on-disk (else not yet) */
418 #define HAMMER_INODE_FLUSH 0x0020 /* flush on last ref */
419 #define HAMMER_INODE_DELETED 0x0080 /* inode delete (backend) */
420 #define HAMMER_INODE_DELONDISK 0x0100 /* delete synchronized to disk */
421 #define HAMMER_INODE_RO 0x0200 /* read-only (because of as-of) */
422 #define HAMMER_INODE_RECSW 0x0400 /* waiting on data record flush */
423 #define HAMMER_INODE_DONDISK 0x0800 /* data records may be on disk */
424 #define HAMMER_INODE_BUFS 0x1000 /* dirty high level bps present */
425 #define HAMMER_INODE_REFLUSH 0x2000 /* flush on dependancy / reflush */
426 #define HAMMER_INODE_RECLAIM 0x4000 /* trying to reclaim */
427 #define HAMMER_INODE_FLUSHW 0x8000 /* someone waiting for flush */
428
429 #define HAMMER_INODE_TRUNCATED 0x00010000
430 #define HAMMER_INODE_DELETING 0x00020000 /* inode delete request (frontend)*/
431 #define HAMMER_INODE_RESIGNAL 0x00040000 /* re-signal on re-flush */
432 #define HAMMER_INODE_ATIME 0x00100000 /* in-memory atime modified */
433 #define HAMMER_INODE_MTIME 0x00200000 /* in-memory mtime modified */
434 #define HAMMER_INODE_WOULDBLOCK 0x00400000 /* re-issue to new flush group */
435 #define HAMMER_INODE_DUMMY 0x00800000 /* dummy inode covering bad file */
436 #define HAMMER_INODE_SDIRTY 0x01000000 /* in-memory ino_data.size is dirty*/
437 #define HAMMER_INODE_REDO 0x02000000 /* REDO logging active */
438 #define HAMMER_INODE_RDIRTY 0x04000000 /* REDO records active in fifo */
439 #define HAMMER_INODE_SLAVEFLUSH 0x08000000 /* being flushed by slave */
440
441 #define HAMMER_INODE_MODMASK (HAMMER_INODE_DDIRTY|HAMMER_INODE_SDIRTY| \
442 HAMMER_INODE_XDIRTY|HAMMER_INODE_BUFS| \
443 HAMMER_INODE_ATIME|HAMMER_INODE_MTIME| \
444 HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING)
445
446 #define HAMMER_INODE_MODMASK_NOXDIRTY \
447 (HAMMER_INODE_MODMASK & ~HAMMER_INODE_XDIRTY)
448
449 #define HAMMER_INODE_MODMASK_NOREDO \
450 (HAMMER_INODE_DDIRTY| \
451 HAMMER_INODE_XDIRTY| \
452 HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING)
453
454 #define HAMMER_FLUSH_SIGNAL 0x0001
455 #define HAMMER_FLUSH_RECURSION 0x0002
456
457 /*
458 * Used by the inode reclaim code to pipeline reclaims and avoid
459 * blowing out kernel memory or letting the flusher get too far
460 * behind. The reclaim wakes up when count reaches 0 or the
461 * timer expires.
462 */
463 struct hammer_reclaim {
464 TAILQ_ENTRY(hammer_reclaim) entry;
465 int count;
466 };
467
468 /*
469 * Track who is creating the greatest burden on the
470 * inode cache.
471 */
472 struct hammer_inostats {
473 pid_t pid; /* track user process */
474 int ltick; /* last tick */
475 int count; /* count (degenerates) */
476 };
477
478 #define HAMMER_INOSTATS_HSIZE 32
479 #define HAMMER_INOSTATS_HMASK (HAMMER_INOSTATS_HSIZE - 1)
480
481 /*
482 * Structure used to represent an unsynchronized record in-memory. These
483 * records typically represent directory entries. Only non-historical
484 * records are kept in-memory.
485 *
486 * Records are organized as a per-inode RB-Tree. If the inode is not
487 * on disk then neither are any records and the in-memory record tree
488 * represents the entire contents of the inode. If the inode is on disk
489 * then the on-disk B-Tree is scanned in parallel with the in-memory
490 * RB-Tree to synthesize the current state of the file.
491 *
492 * Records are also used to enforce the ordering of directory create/delete
493 * operations. A new inode will not be flushed to disk unless its related
494 * directory entry is also being flushed at the same time. A directory entry
495 * will not be removed unless its related inode is also being removed at the
496 * same time.
497 */
498 typedef enum hammer_record_type {
499 HAMMER_MEM_RECORD_GENERAL, /* misc record */
500 HAMMER_MEM_RECORD_INODE, /* inode record */
501 HAMMER_MEM_RECORD_ADD, /* positive memory cache record */
502 HAMMER_MEM_RECORD_DEL, /* negative delete-on-disk record */
503 HAMMER_MEM_RECORD_DATA /* bulk-data record w/on-disk ref */
504 } hammer_record_type_t;
505
506 typedef struct hammer_record {
507 RB_ENTRY(hammer_record) rb_node;
508 TAILQ_ENTRY(hammer_record) target_entry;
509 hammer_inode_state_t flush_state;
510 hammer_flush_group_t flush_group;
511 hammer_record_type_t type;
512 struct hammer_lock lock;
513 struct hammer_reserve *resv;
514 hammer_inode_t ip;
515 hammer_inode_t target_ip;
516 struct hammer_btree_leaf_elm leaf;
517 hammer_data_ondisk_t data;
518 int flags;
519 int gflags;
520 hammer_off_t zone2_offset; /* direct-write only */
521 } *hammer_record_t;
522
523 /*
524 * Record flags. Note that FE can only be set by the frontend if the
525 * record has not been interlocked by the backend w/ BE.
526 */
527 #define HAMMER_RECF_ALLOCDATA 0x0001
528 #define HAMMER_RECF_ONRBTREE 0x0002
529 #define HAMMER_RECF_DELETED_FE 0x0004 /* deleted (frontend) */
530 #define HAMMER_RECF_DELETED_BE 0x0008 /* deleted (backend) */
531 #define HAMMER_RECF_COMMITTED 0x0010 /* committed to the B-Tree */
532 #define HAMMER_RECF_INTERLOCK_BE 0x0020 /* backend interlock */
533 #define HAMMER_RECF_WANTED 0x0040 /* wanted by the frontend */
534 #define HAMMER_RECF_CONVERT_DELETE 0x0100 /* special case */
535 #define HAMMER_RECF_REDO 0x1000 /* REDO was laid down */
536
537 /*
538 * These flags must be separate to deal with SMP races
539 */
540 #define HAMMER_RECG_DIRECT_IO 0x0001 /* related direct I/O running*/
541 #define HAMMER_RECG_DIRECT_WAIT 0x0002 /* related direct I/O running*/
542 #define HAMMER_RECG_DIRECT_INVAL 0x0004 /* buffer alias invalidation */
543 /*
544 * hammer_create_at_cursor() and hammer_delete_at_cursor() flags.
545 */
546 #define HAMMER_CREATE_MODE_UMIRROR 0x0001
547 #define HAMMER_CREATE_MODE_SYS 0x0002
548
549 #define HAMMER_DELETE_ADJUST 0x0001
550 #define HAMMER_DELETE_DESTROY 0x0002
551
552 /*
553 * In-memory structures representing on-disk structures.
554 */
555 RB_HEAD(hammer_vol_rb_tree, hammer_volume);
556 RB_HEAD(hammer_buf_rb_tree, hammer_buffer);
557 RB_HEAD(hammer_nod_rb_tree, hammer_node);
558 RB_HEAD(hammer_und_rb_tree, hammer_undo);
559 RB_HEAD(hammer_res_rb_tree, hammer_reserve);
560 RB_HEAD(hammer_mod_rb_tree, hammer_io);
561
562 RB_PROTOTYPE2(hammer_vol_rb_tree, hammer_volume, rb_node,
563 hammer_vol_rb_compare, int32_t);
564 RB_PROTOTYPE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
565 hammer_buf_rb_compare, hammer_off_t);
566 RB_PROTOTYPE2(hammer_nod_rb_tree, hammer_node, rb_node,
567 hammer_nod_rb_compare, hammer_off_t);
568 RB_PROTOTYPE2(hammer_und_rb_tree, hammer_undo, rb_node,
569 hammer_und_rb_compare, hammer_off_t);
570 RB_PROTOTYPE2(hammer_res_rb_tree, hammer_reserve, rb_node,
571 hammer_res_rb_compare, hammer_off_t);
572 RB_PROTOTYPE2(hammer_mod_rb_tree, hammer_io, rb_node,
573 hammer_mod_rb_compare, hammer_off_t);
574
575 /*
576 * IO management - embedded at the head of various in-memory structures
577 *
578 * VOLUME - hammer_volume containing meta-data
579 * META_BUFFER - hammer_buffer containing meta-data
580 * UNDO_BUFFER - hammer_buffer containing undo-data
581 * DATA_BUFFER - hammer_buffer containing pure-data
582 * DUMMY - hammer_buffer not containing valid data
583 *
584 * Dirty volume headers and dirty meta-data buffers are locked until the
585 * flusher can sequence them out. Dirty pure-data buffers can be written.
586 * Clean buffers can be passively released.
587 */
588 typedef enum hammer_io_type {
589 HAMMER_IOTYPE_VOLUME,
590 HAMMER_IOTYPE_META_BUFFER,
591 HAMMER_IOTYPE_UNDO_BUFFER,
592 HAMMER_IOTYPE_DATA_BUFFER,
593 HAMMER_IOTYPE_DUMMY
594 } hammer_io_type_t;
595
596 typedef struct hammer_io {
597 struct hammer_lock lock;
598 hammer_io_type_t type;
599 struct hammer_mount *hmp;
600 struct hammer_volume *volume;
601 RB_ENTRY(hammer_io) rb_node; /* if modified */
602 TAILQ_ENTRY(hammer_io) iorun_entry; /* iorun_list */
603 struct hammer_mod_rb_tree *mod_root;
604 struct buf *bp;
605 int64_t offset; /* volume offset */
606 int bytes; /* buffer cache buffer size */
607 int modify_refs;
608
609 /*
610 * These can be modified at any time by the backend while holding
611 * io_token, due to bio_done and hammer_io_complete() callbacks.
612 */
613 u_int running : 1; /* bp write IO in progress */
614 u_int waiting : 1; /* someone is waiting on us */
615 u_int ioerror : 1; /* abort on io-error */
616 u_int unusedA : 29;
617
618 /*
619 * These can only be modified by the frontend while holding
620 * fs_token, or by the backend while holding the io interlocked
621 * with no references (which will block the frontend when it
622 * tries to reference it).
623 *
624 * WARNING! SMP RACES will create havoc if the callbacks ever tried
625 * to modify any of these outside the above restrictions.
626 */
627 u_int modified : 1; /* bp's data was modified */
628 u_int released : 1; /* bp released (w/ B_LOCKED set) */
629 u_int waitdep : 1; /* flush waits for dependancies */
630 u_int recovered : 1; /* has recovery ref */
631 u_int waitmod : 1; /* waiting for modify_refs */
632 u_int reclaim : 1; /* reclaim requested */
633 u_int gencrc : 1; /* crc needs to be generated */
634 u_int unusedB : 25;
635 } *hammer_io_t;
636
637 #define HAMMER_CLUSTER_SIZE (64 * 1024)
638 #if HAMMER_CLUSTER_SIZE > MAXBSIZE
639 #undef HAMMER_CLUSTER_SIZE
640 #define HAMMER_CLUSTER_SIZE MAXBSIZE
641 #endif
642
643 /*
644 * In-memory volume representing on-disk buffer
645 */
646 typedef struct hammer_volume {
647 struct hammer_io io; /* must be at offset 0 */
648 RB_ENTRY(hammer_volume) rb_node;
649 hammer_volume_ondisk_t ondisk;
650 int32_t vol_no;
651 hammer_off_t maxbuf_off; /* Maximum buffer offset (zone-2) */
652 char *vol_name;
653 struct vnode *devvp;
654 int vol_flags;
655 } *hammer_volume_t;
656
657 #define HAMMER_ITOV(iop) ((hammer_volume_t)(iop))
658
659 /*
660 * In-memory buffer representing an on-disk buffer.
661 */
662 typedef struct hammer_buffer {
663 struct hammer_io io; /* must be at offset 0 */
664 RB_ENTRY(hammer_buffer) rb_node;
665 void *ondisk;
666 hammer_off_t zoneX_offset;
667 hammer_off_t zone2_offset;
668 struct hammer_reserve *resv;
669 struct hammer_node_list node_list;
670 } *hammer_buffer_t;
671
672 #define HAMMER_ITOB(iop) ((hammer_buffer_t)(iop))
673
674 /*
675 * In-memory B-Tree node, representing an on-disk B-Tree node.
676 *
677 * This is a hang-on structure which is backed by a hammer_buffer,
678 * and used for fine-grained locking of B-Tree nodes in order to
679 * properly control lock ordering.
680 */
681 typedef struct hammer_node {
682 struct hammer_lock lock; /* node-by-node lock */
683 TAILQ_ENTRY(hammer_node) entry; /* per-buffer linkage */
684 RB_ENTRY(hammer_node) rb_node; /* per-mount linkage */
685 hammer_off_t node_offset; /* full offset spec */
686 struct hammer_mount *hmp;
687 hammer_buffer_t buffer; /* backing buffer */
688 hammer_node_ondisk_t ondisk; /* ptr to on-disk structure */
689 TAILQ_HEAD(, hammer_cursor) cursor_list; /* deadlock recovery */
690 struct hammer_node_cache_list cache_list; /* passive caches */
691 int flags;
692 #if 0
693 int cursor_exclreq_count;
694 #endif
695 } *hammer_node_t;
696
697 #define HAMMER_NODE_DELETED 0x0001
698 #define HAMMER_NODE_FLUSH 0x0002
699 #define HAMMER_NODE_CRCGOOD 0x0004
700 #define HAMMER_NODE_NEEDSCRC 0x0008
701 #define HAMMER_NODE_NEEDSMIRROR 0x0010
702 #define HAMMER_NODE_CRCBAD 0x0020
703 #define HAMMER_NODE_NONLINEAR 0x0040 /* linear heuristic */
704
705 #define HAMMER_NODE_CRCANY (HAMMER_NODE_CRCGOOD | HAMMER_NODE_CRCBAD)
706
707 /*
708 * List of locked nodes. This structure is used to lock potentially large
709 * numbers of nodes as an aid for complex B-Tree operations.
710 */
711 struct hammer_node_lock;
712 TAILQ_HEAD(hammer_node_lock_list, hammer_node_lock);
713
714 typedef struct hammer_node_lock {
715 TAILQ_ENTRY(hammer_node_lock) entry;
716 struct hammer_node_lock_list list;
717 struct hammer_node_lock *parent;
718 hammer_node_t node;
719 hammer_node_ondisk_t copy; /* copy of on-disk data */
720 int index; /* index of this node in parent */
721 int count; /* count children */
722 int flags;
723 } *hammer_node_lock_t;
724
725 #define HAMMER_NODE_LOCK_UPDATED 0x0001
726 #define HAMMER_NODE_LOCK_LCACHE 0x0002
727
728 /*
729 * The reserve structure prevents the blockmap from allocating
730 * out of a reserved big-block. Such reservations are used by
731 * the direct-write mechanism.
732 *
733 * The structure is also used to hold off on reallocations of
734 * big-blocks from the freemap until flush dependancies have
735 * been dealt with.
736 */
737 typedef struct hammer_reserve {
738 RB_ENTRY(hammer_reserve) rb_node;
739 TAILQ_ENTRY(hammer_reserve) delay_entry;
740 int flg_no;
741 int flags;
742 int refs;
743 int zone;
744 int append_off;
745 hammer_off_t zone_offset;
746 } *hammer_reserve_t;
747
748 #define HAMMER_RESF_ONDELAY 0x0001
749 #define HAMMER_RESF_LAYER2FREE 0x0002
750
751 #include "hammer_cursor.h"
752
753 /*
754 * The undo structure tracks recent undos to avoid laying down duplicate
755 * undos within a flush group, saving us a significant amount of overhead.
756 *
757 * This is strictly a heuristic.
758 */
759 #define HAMMER_MAX_UNDOS 1024
760 #define HAMMER_MAX_FLUSHERS 4
761
762 typedef struct hammer_undo {
763 RB_ENTRY(hammer_undo) rb_node;
764 TAILQ_ENTRY(hammer_undo) lru_entry;
765 hammer_off_t offset;
766 int bytes;
767 } *hammer_undo_t;
768
769 struct hammer_flusher_info;
770 TAILQ_HEAD(hammer_flusher_info_list, hammer_flusher_info);
771
772 struct hammer_flusher {
773 int signal; /* flusher thread sequencer */
774 int done; /* last completed flush group */
775 int next; /* next unallocated flg seqno */
776 int group_lock; /* lock sequencing of the next flush */
777 int exiting; /* request master exit */
778 thread_t td; /* master flusher thread */
779 hammer_tid_t tid; /* last flushed transaction id */
780 int finalize_want; /* serialize finalization */
781 struct hammer_lock finalize_lock; /* serialize finalization */
782 struct hammer_transaction trans; /* shared transaction */
783 struct hammer_flusher_info_list run_list;
784 struct hammer_flusher_info_list ready_list;
785 };
786
787 #define HAMMER_FLUSH_UNDOS_RELAXED 0
788 #define HAMMER_FLUSH_UNDOS_FORCED 1
789 #define HAMMER_FLUSH_UNDOS_AUTO 2
790 /*
791 * Internal hammer mount data structure
792 */
793 typedef struct hammer_mount {
794 struct mount *mp;
795 struct hammer_ino_rb_tree rb_inos_root;
796 struct hammer_redo_rb_tree rb_redo_root;
797 struct hammer_vol_rb_tree rb_vols_root;
798 struct hammer_nod_rb_tree rb_nods_root;
799 struct hammer_und_rb_tree rb_undo_root;
800 struct hammer_res_rb_tree rb_resv_root;
801 struct hammer_buf_rb_tree rb_bufs_root;
802 struct hammer_pfs_rb_tree rb_pfsm_root;
803 hammer_volume_t rootvol;
804 struct hammer_base_elm root_btree_beg;
805 struct hammer_base_elm root_btree_end;
806
807 struct malloc_type *m_misc;
808 struct malloc_type *m_inodes;
809
810 int flags; /* HAMMER_MOUNT_xxx flags */
811 int hflags;
812 int ronly;
813 int nvolumes;
814 int master_id; /* default 0, no-mirror -1, otherwise 1-15 */
815 int version; /* hammer filesystem version to use */
816 int rsv_inodes; /* reserved space due to dirty inodes */
817 int64_t rsv_databytes; /* reserved space due to record data */
818 int rsv_recs; /* reserved space due to dirty records */
819 int rsv_fromdelay; /* big-blocks reserved due to flush delay */
820 int undo_rec_limit; /* based on size of undo area */
821
822 int volume_to_remove; /* volume that is currently being removed */
823
824 int count_inodes; /* total number of inodes */
825 int count_iqueued; /* inodes queued to flusher */
826 int count_reclaims; /* inodes pending reclaim by flusher */
827
828 struct hammer_flusher flusher;
829
830 u_int check_interrupt;
831 u_int check_yield;
832 hammer_uuid_t fsid;
833 struct hammer_mod_rb_tree volu_root; /* dirty undo buffers */
834 struct hammer_mod_rb_tree undo_root; /* dirty undo buffers */
835 struct hammer_mod_rb_tree data_root; /* dirty data buffers */
836 struct hammer_mod_rb_tree meta_root; /* dirty meta bufs */
837 struct hammer_mod_rb_tree lose_root; /* loose buffers */
838 long locked_dirty_space; /* meta/volu count */
839 long io_running_space; /* io_token */
840 int objid_cache_count;
841 int error; /* critical I/O error */
842 struct krate krate; /* rate limited kprintf */
843 struct krate kdiag; /* rate limited kprintf */
844 hammer_tid_t asof; /* snapshot mount */
845 hammer_tid_t next_tid;
846 hammer_tid_t flush_tid1; /* flusher tid sequencing */
847 hammer_tid_t flush_tid2; /* flusher tid sequencing */
848 int64_t copy_stat_freebigblocks; /* number of free big-blocks */
849 uint32_t undo_seqno; /* UNDO/REDO FIFO seqno */
850 uint32_t recover_stage2_seqno; /* REDO recovery seqno */
851 hammer_off_t recover_stage2_offset; /* REDO recovery offset */
852
853 struct netexport export;
854 struct hammer_lock sync_lock;
855 struct hammer_lock undo_lock;
856 struct hammer_lock blkmap_lock;
857 struct hammer_lock snapshot_lock;
858 struct hammer_lock volume_lock;
859 struct hammer_blockmap blockmap[HAMMER_MAX_ZONES];
860 struct hammer_undo undos[HAMMER_MAX_UNDOS];
861 int undo_alloc;
862 TAILQ_HEAD(, hammer_undo) undo_lru_list;
863 TAILQ_HEAD(, hammer_reserve) delay_list;
864 struct hammer_flush_group_list flush_group_list;
865 hammer_flush_group_t fill_flush_group;
866 hammer_flush_group_t next_flush_group;
867 TAILQ_HEAD(, hammer_objid_cache) objid_cache_list;
868 TAILQ_HEAD(, hammer_reclaim) reclaim_list;
869 TAILQ_HEAD(, hammer_io) iorun_list;
870
871 struct lwkt_token fs_token; /* high level */
872 struct lwkt_token io_token; /* low level (IO callback) */
873
874 struct hammer_inostats inostats[HAMMER_INOSTATS_HSIZE];
875 uint64_t volume_map[4]; /* 256 bits bitfield */
876 } *hammer_mount_t;
877
878 #define HAMMER_MOUNT_CRITICAL_ERROR 0x0001
879 #define HAMMER_MOUNT_FLUSH_RECOVERY 0x0002
880 #define HAMMER_MOUNT_REDO_SYNC 0x0004
881 #define HAMMER_MOUNT_REDO_RECOVERY_REQ 0x0008
882 #define HAMMER_MOUNT_REDO_RECOVERY_RUN 0x0010
883
884 #define HAMMER_VOLUME_NUMBER_FOREACH(hmp, n) \
885 for (n = 0; n < HAMMER_MAX_VOLUMES; n++) \
886 if (hammer_volume_number_test(hmp, n))
887
888 /*
889 * Minium buffer cache bufs required to rebalance the B-Tree.
890 * This is because we must hold the children and the children's children
891 * locked. Even this might not be enough if things are horribly out
892 * of balance.
893 */
894 #define HAMMER_REBALANCE_MIN_BUFS \
895 (HAMMER_BTREE_LEAF_ELMS * HAMMER_BTREE_LEAF_ELMS)
896
897 #endif /* _KERNEL || _KERNEL_STRUCTURES */
898
899 #if defined(_KERNEL)
900 /*
901 * checkspace slop (8MB chunks), higher numbers are more conservative.
902 */
903 #define HAMMER_CHKSPC_REBLOCK 25
904 #define HAMMER_CHKSPC_MIRROR 20
905 #define HAMMER_CHKSPC_WRITE 20
906 #define HAMMER_CHKSPC_CREATE 20
907 #define HAMMER_CHKSPC_REMOVE 10
908 #define HAMMER_CHKSPC_EMERGENCY 0
909
910 extern struct vop_ops hammer_vnode_vops;
911 extern struct vop_ops hammer_spec_vops;
912 extern struct vop_ops hammer_fifo_vops;
913
914 extern int hammer_debug_io;
915 extern int hammer_debug_general;
916 extern int hammer_debug_inode;
917 extern int hammer_debug_locks;
918 extern int hammer_debug_btree;
919 extern int hammer_debug_tid;
920 extern int hammer_debug_recover;
921 extern int hammer_debug_critical;
922 extern int hammer_cluster_enable;
923 extern int hammer_tdmux_ticks;
924 extern int hammer_count_fsyncs;
925 extern int hammer_count_inodes;
926 extern int hammer_count_iqueued;
927 extern int hammer_count_reclaims;
928 extern int hammer_count_records;
929 extern int hammer_count_record_datas;
930 extern int hammer_count_volumes;
931 extern int hammer_count_buffers;
932 extern int hammer_count_nodes;
933 extern int64_t hammer_stats_btree_lookups;
934 extern int64_t hammer_stats_btree_searches;
935 extern int64_t hammer_stats_btree_inserts;
936 extern int64_t hammer_stats_btree_deletes;
937 extern int64_t hammer_stats_btree_elements;
938 extern int64_t hammer_stats_btree_splits;
939 extern int64_t hammer_stats_btree_iterations;
940 extern int64_t hammer_stats_btree_root_iterations;
941 extern int64_t hammer_stats_record_iterations;
942 extern int64_t hammer_stats_file_read;
943 extern int64_t hammer_stats_file_write;
944 extern int64_t hammer_stats_disk_read;
945 extern int64_t hammer_stats_disk_write;
946 extern int64_t hammer_stats_inode_flushes;
947 extern int64_t hammer_stats_commits;
948 extern int64_t hammer_stats_undo;
949 extern int64_t hammer_stats_redo;
950 extern long hammer_count_dirtybufspace;
951 extern int hammer_count_refedbufs;
952 extern int hammer_count_reservations;
953 extern long hammer_count_io_running_read;
954 extern long hammer_count_io_running_write;
955 extern int hammer_count_io_locked;
956 extern long hammer_limit_dirtybufspace;
957 extern int hammer_limit_recs;
958 extern int hammer_limit_inode_recs;
959 extern int hammer_limit_reclaims;
960 extern int hammer_live_dedup_cache_size;
961 extern int hammer_limit_redo;
962 extern int hammer_verify_zone;
963 extern int hammer_verify_data;
964 extern int hammer_double_buffer;
965 extern int hammer_btree_full_undo;
966 extern int hammer_yield_check;
967 extern int hammer_fsync_mode;
968 extern int hammer_autoflush;
969 extern int64_t hammer_contention_count;
970
971 void hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip,
972 int error, const char *msg);
973 int hammer_vop_inactive(struct vop_inactive_args *);
974 int hammer_vop_reclaim(struct vop_reclaim_args *);
975 int hammer_get_vnode(hammer_inode_t ip, struct vnode **vpp);
976 hammer_inode_t hammer_get_inode(hammer_transaction_t trans,
977 hammer_inode_t dip, int64_t obj_id,
978 hammer_tid_t asof, uint32_t localization,
979 int flags, int *errorp);
980 hammer_inode_t hammer_get_dummy_inode(hammer_transaction_t trans,
981 hammer_inode_t dip, int64_t obj_id,
982 hammer_tid_t asof, uint32_t localization,
983 int flags, int *errorp);
984 hammer_inode_t hammer_find_inode(hammer_transaction_t trans,
985 int64_t obj_id, hammer_tid_t asof,
986 uint32_t localization);
987 void hammer_scan_inode_snapshots(hammer_mount_t hmp,
988 hammer_inode_info_t iinfo,
989 int (*callback)(hammer_inode_t ip, void *data),
990 void *data);
991 void hammer_put_inode(hammer_inode_t ip);
992 void hammer_put_inode_ref(hammer_inode_t ip);
993 void hammer_inode_waitreclaims(hammer_transaction_t trans);
994 void hammer_inode_dirty(hammer_inode_t ip);
995
996 int hammer_unload_volume(hammer_volume_t volume, void *data);
997 int hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused);
998
999 int hammer_unload_buffer(hammer_buffer_t buffer, void *data);
1000 int hammer_install_volume(hammer_mount_t hmp, const char *volname,
1001 struct vnode *devvp, void *data);
1002 int hammer_mountcheck_volumes(hammer_mount_t hmp);
1003 int hammer_get_installed_volumes(hammer_mount_t hmp);
1004
1005 int hammer_mem_add(hammer_record_t record);
1006 int hammer_ip_lookup(hammer_cursor_t cursor);
1007 int hammer_ip_first(hammer_cursor_t cursor);
1008 int hammer_ip_next(hammer_cursor_t cursor);
1009 int hammer_ip_resolve_data(hammer_cursor_t cursor);
1010 int hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip,
1011 hammer_tid_t tid);
1012 int hammer_create_at_cursor(hammer_cursor_t cursor,
1013 hammer_btree_leaf_elm_t leaf, void *udata, int mode);
1014 int hammer_delete_at_cursor(hammer_cursor_t cursor, int delete_flags,
1015 hammer_tid_t delete_tid, uint32_t delete_ts,
1016 int track, int64_t *stat_bytes);
1017 int hammer_ip_check_directory_empty(hammer_transaction_t trans,
1018 hammer_inode_t ip);
1019 int hammer_sync_hmp(hammer_mount_t hmp, int waitfor);
1020 int hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor);
1021
1022 hammer_record_t
1023 hammer_alloc_mem_record(hammer_inode_t ip, int data_len);
1024 void hammer_flush_record_done(hammer_record_t record, int error);
1025 void hammer_wait_mem_record_ident(hammer_record_t record, const char *ident);
1026 void hammer_rel_mem_record(hammer_record_t record);
1027
1028 int hammer_cursor_up(hammer_cursor_t cursor);
1029 int hammer_cursor_up_locked(hammer_cursor_t cursor);
1030 int hammer_cursor_down(hammer_cursor_t cursor);
1031 int hammer_cursor_upgrade(hammer_cursor_t cursor);
1032 int hammer_cursor_upgrade_node(hammer_cursor_t cursor);
1033 void hammer_cursor_downgrade(hammer_cursor_t cursor);
1034 int hammer_cursor_upgrade2(hammer_cursor_t c1, hammer_cursor_t c2);
1035 void hammer_cursor_downgrade2(hammer_cursor_t c1, hammer_cursor_t c2);
1036 int hammer_cursor_seek(hammer_cursor_t cursor, hammer_node_t node,
1037 int index);
1038 void hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident);
1039 int hammer_lock_ex_try(struct hammer_lock *lock);
1040 void hammer_lock_sh(struct hammer_lock *lock);
1041 int hammer_lock_sh_try(struct hammer_lock *lock);
1042 int hammer_lock_upgrade(struct hammer_lock *lock, int shcount);
1043 void hammer_lock_downgrade(struct hammer_lock *lock, int shcount);
1044 int hammer_lock_status(struct hammer_lock *lock);
1045 void hammer_unlock(struct hammer_lock *lock);
1046 void hammer_ref(struct hammer_lock *lock);
1047 int hammer_ref_interlock(struct hammer_lock *lock);
1048 int hammer_ref_interlock_true(struct hammer_lock *lock);
1049 void hammer_ref_interlock_done(struct hammer_lock *lock);
1050 void hammer_rel(struct hammer_lock *lock);
1051 int hammer_rel_interlock(struct hammer_lock *lock, int locked);
1052 void hammer_rel_interlock_done(struct hammer_lock *lock, int orig_locked);
1053 int hammer_get_interlock(struct hammer_lock *lock);
1054 int hammer_try_interlock_norefs(struct hammer_lock *lock);
1055 void hammer_put_interlock(struct hammer_lock *lock, int error);
1056
1057 void hammer_sync_lock_ex(hammer_transaction_t trans);
1058 void hammer_sync_lock_sh(hammer_transaction_t trans);
1059 int hammer_sync_lock_sh_try(hammer_transaction_t trans);
1060 void hammer_sync_unlock(hammer_transaction_t trans);
1061
1062 uint32_t hammer_to_unix_xid(hammer_uuid_t *uuid);
1063 void hammer_guid_to_uuid(hammer_uuid_t *uuid, uint32_t guid);
1064 void hammer_time_to_timespec(uint64_t xtime, struct timespec *ts);
1065 uint64_t hammer_timespec_to_time(struct timespec *ts);
1066 int hammer_str_to_tid(const char *str, int *ispfsp,
1067 hammer_tid_t *tidp, uint32_t *localizationp);
1068 hammer_tid_t hammer_alloc_objid(hammer_mount_t hmp, hammer_inode_t dip,
1069 int64_t namekey);
1070 void hammer_clear_objid(hammer_inode_t dip);
1071 void hammer_destroy_objid_cache(hammer_mount_t hmp);
1072
1073 int hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset,
1074 int bytes);
1075 void hammer_clear_undo_history(hammer_mount_t hmp);
1076 enum vtype hammer_get_vnode_type(uint8_t obj_type);
1077 int hammer_get_dtype(uint8_t obj_type);
1078 uint8_t hammer_get_obj_type(enum vtype vtype);
1079 int64_t hammer_direntry_namekey(hammer_inode_t dip, const void *name, int len,
1080 uint32_t *max_iterationsp);
1081 int hammer_nohistory(hammer_inode_t ip);
1082
1083 int hammer_init_cursor(hammer_transaction_t trans, hammer_cursor_t cursor,
1084 hammer_node_cache_t cache, hammer_inode_t ip);
1085 void hammer_normalize_cursor(hammer_cursor_t cursor);
1086 void hammer_done_cursor(hammer_cursor_t cursor);
1087 int hammer_recover_cursor(hammer_cursor_t cursor);
1088 void hammer_unlock_cursor(hammer_cursor_t cursor);
1089 int hammer_lock_cursor(hammer_cursor_t cursor);
1090 hammer_cursor_t hammer_push_cursor(hammer_cursor_t ocursor);
1091 void hammer_pop_cursor(hammer_cursor_t ocursor, hammer_cursor_t ncursor);
1092
1093 void hammer_cursor_replaced_node(hammer_node_t onode, hammer_node_t nnode);
1094 void hammer_cursor_removed_node(hammer_node_t onode, hammer_node_t parent,
1095 int index);
1096 void hammer_cursor_split_node(hammer_node_t onode, hammer_node_t nnode,
1097 int index);
1098 void hammer_cursor_moved_element(hammer_node_t oparent, int pindex,
1099 hammer_node_t onode, int oindex,
1100 hammer_node_t nnode, int nindex);
1101 void hammer_cursor_parent_changed(hammer_node_t node, hammer_node_t oparent,
1102 hammer_node_t nparent, int nindex);
1103 void hammer_cursor_inserted_element(hammer_node_t node, int index);
1104 void hammer_cursor_deleted_element(hammer_node_t node, int index);
1105 void hammer_cursor_invalidate_cache(hammer_cursor_t cursor);
1106
1107 int hammer_btree_lookup(hammer_cursor_t cursor);
1108 int hammer_btree_first(hammer_cursor_t cursor);
1109 int hammer_btree_last(hammer_cursor_t cursor);
1110 int hammer_btree_extract(hammer_cursor_t cursor, int flags);
1111 int hammer_btree_iterate(hammer_cursor_t cursor);
1112 int hammer_btree_iterate_reverse(hammer_cursor_t cursor);
1113 int hammer_btree_insert(hammer_cursor_t cursor,
1114 hammer_btree_leaf_elm_t elm, int *doprop);
1115 int hammer_btree_delete(hammer_cursor_t cursor, int *ndelete);
1116 void hammer_btree_do_propagation(hammer_cursor_t cursor,
1117 hammer_btree_leaf_elm_t leaf);
1118 int hammer_btree_cmp(hammer_base_elm_t key1, hammer_base_elm_t key2);
1119 int hammer_btree_chkts(hammer_tid_t ts, hammer_base_elm_t key);
1120 int hammer_btree_correct_rhb(hammer_cursor_t cursor, hammer_tid_t tid);
1121 int hammer_btree_correct_lhb(hammer_cursor_t cursor, hammer_tid_t tid);
1122
1123 int btree_set_parent_of_child(hammer_transaction_t trans,
1124 hammer_node_t node,
1125 hammer_btree_elm_t elm);
1126 void hammer_node_lock_init(hammer_node_lock_t parent, hammer_node_t node);
1127 void hammer_btree_lcache_init(hammer_mount_t hmp, hammer_node_lock_t lcache,
1128 int depth);
1129 void hammer_btree_lcache_free(hammer_mount_t hmp, hammer_node_lock_t lcache);
1130 int hammer_btree_lock_children(hammer_cursor_t cursor, int depth,
1131 hammer_node_lock_t parent,
1132 hammer_node_lock_t lcache);
1133 void hammer_btree_lock_copy(hammer_cursor_t cursor,
1134 hammer_node_lock_t parent);
1135 int hammer_btree_sync_copy(hammer_cursor_t cursor,
1136 hammer_node_lock_t parent);
1137 void hammer_btree_unlock_children(hammer_mount_t hmp,
1138 hammer_node_lock_t parent,
1139 hammer_node_lock_t lcache);
1140 int hammer_btree_search_node(hammer_base_elm_t elm, hammer_node_ondisk_t node);
1141 hammer_node_t hammer_btree_get_parent(hammer_transaction_t trans,
1142 hammer_node_t node, int *parent_indexp,
1143 int *errorp, int try_exclusive);
1144
1145 void hammer_print_btree_node(hammer_node_ondisk_t ondisk);
1146 void hammer_print_btree_elm(hammer_btree_elm_t elm);
1147
1148 void *hammer_bread(hammer_mount_t hmp, hammer_off_t off,
1149 int *errorp, hammer_buffer_t *bufferp);
1150 void *hammer_bnew(hammer_mount_t hmp, hammer_off_t off,
1151 int *errorp, hammer_buffer_t *bufferp);
1152 void *hammer_bread_ext(hammer_mount_t hmp, hammer_off_t off, int bytes,
1153 int *errorp, hammer_buffer_t *bufferp);
1154 void *hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t off, int bytes,
1155 int *errorp, hammer_buffer_t *bufferp);
1156
1157 hammer_volume_t hammer_get_root_volume(hammer_mount_t hmp, int *errorp);
1158
1159 hammer_volume_t hammer_get_volume(hammer_mount_t hmp,
1160 int32_t vol_no, int *errorp);
1161 hammer_buffer_t hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
1162 int bytes, int isnew, int *errorp);
1163 void hammer_sync_buffers(hammer_mount_t hmp,
1164 hammer_off_t base_offset, int bytes);
1165 int hammer_del_buffers(hammer_mount_t hmp,
1166 hammer_off_t base_offset,
1167 hammer_off_t zone2_offset, int bytes,
1168 int report_conflicts);
1169
1170 int hammer_ref_volume(hammer_volume_t volume);
1171 int hammer_ref_buffer(hammer_buffer_t buffer);
1172 void hammer_flush_buffer_nodes(hammer_buffer_t buffer);
1173
1174 void hammer_rel_volume(hammer_volume_t volume, int locked);
1175 void hammer_rel_buffer(hammer_buffer_t buffer, int locked);
1176
1177 int hammer_vfs_export(struct mount *mp, int op,
1178 const struct export_args *export);
1179 hammer_node_t hammer_get_node(hammer_transaction_t trans,
1180 hammer_off_t node_offset, int isnew, int *errorp);
1181 void hammer_ref_node(hammer_node_t node);
1182 hammer_node_t hammer_ref_node_safe(hammer_transaction_t trans,
1183 hammer_node_cache_t cache, int *errorp);
1184 void hammer_rel_node(hammer_node_t node);
1185 void hammer_delete_node(hammer_transaction_t trans,
1186 hammer_node_t node);
1187 void hammer_cache_node(hammer_node_cache_t cache,
1188 hammer_node_t node);
1189 void hammer_uncache_node(hammer_node_cache_t cache);
1190 void hammer_flush_node(hammer_node_t node, int locked);
1191
1192 hammer_node_t hammer_alloc_btree(hammer_transaction_t trans,
1193 hammer_off_t hint, int *errorp);
1194 void *hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1195 uint16_t rec_type, hammer_off_t *data_offsetp,
1196 hammer_buffer_t *data_bufferp,
1197 hammer_off_t hint, int *errorp);
1198
1199 int hammer_generate_undo(hammer_transaction_t trans,
1200 hammer_off_t zone_offset, void *base, int len);
1201 int hammer_generate_redo(hammer_transaction_t trans, hammer_inode_t ip,
1202 hammer_off_t file_offset, uint32_t flags,
1203 void *base, int len);
1204 void hammer_generate_redo_sync(hammer_transaction_t trans);
1205 void hammer_redo_fifo_start_flush(hammer_inode_t ip);
1206 void hammer_redo_fifo_end_flush(hammer_inode_t ip);
1207
1208 void hammer_format_undo(hammer_mount_t hmp, void *base, uint32_t seqno);
1209 int hammer_upgrade_undo_4(hammer_transaction_t trans);
1210
1211 hammer_off_t hammer_freemap_alloc(hammer_transaction_t trans,
1212 hammer_off_t owner, int *errorp);
1213 void hammer_freemap_free(hammer_transaction_t trans, hammer_off_t phys_offset,
1214 hammer_off_t owner, int *errorp);
1215 int _hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp);
1216 hammer_off_t hammer_blockmap_alloc(hammer_transaction_t trans, int zone,
1217 int bytes, hammer_off_t hint, int *errorp);
1218 hammer_reserve_t hammer_blockmap_reserve(hammer_mount_t hmp, int zone,
1219 int bytes, hammer_off_t *zone_offp, int *errorp);
1220 void hammer_blockmap_reserve_complete(hammer_mount_t hmp,
1221 hammer_reserve_t resv);
1222 void hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv);
1223 void hammer_blockmap_free(hammer_transaction_t trans,
1224 hammer_off_t zone_offset, int bytes);
1225 int hammer_blockmap_dedup(hammer_transaction_t trans,
1226 hammer_off_t zone_offset, int bytes);
1227 int hammer_blockmap_finalize(hammer_transaction_t trans,
1228 hammer_reserve_t resv,
1229 hammer_off_t zone_offset, int bytes);
1230 int hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t zone_offset,
1231 int *curp, int *errorp);
1232 hammer_off_t hammer_blockmap_lookup_verify(hammer_mount_t hmp,
1233 hammer_off_t zone_offset, int *errorp);
1234
1235 hammer_off_t hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone_offset,
1236 int *errorp);
1237 int64_t hammer_undo_used(hammer_transaction_t trans);
1238 int64_t hammer_undo_space(hammer_transaction_t trans);
1239 int64_t hammer_undo_max(hammer_mount_t hmp);
1240 int hammer_undo_reclaim(hammer_io_t io);
1241
1242 void hammer_start_transaction(hammer_transaction_t trans,
1243 hammer_mount_t hmp);
1244 void hammer_simple_transaction(hammer_transaction_t trans,
1245 hammer_mount_t hmp);
1246 void hammer_start_transaction_fls(hammer_transaction_t trans,
1247 hammer_mount_t hmp);
1248 void hammer_done_transaction(hammer_transaction_t trans);
1249 hammer_tid_t hammer_alloc_tid(hammer_mount_t hmp, int count);
1250
1251 void hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags);
1252 void hammer_flush_inode(hammer_inode_t ip, int flags);
1253 void hammer_wait_inode(hammer_inode_t ip);
1254
1255 int hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
1256 struct ucred *cred, hammer_inode_t dip,
1257 const char *name, int namelen,
1258 hammer_pseudofs_inmem_t pfsm,
1259 hammer_inode_t *ipp);
1260 void hammer_rel_inode(hammer_inode_t ip, int flush);
1261 int hammer_reload_inode(hammer_inode_t ip, void *arg __unused);
1262 int hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2);
1263 int hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2);
1264 int hammer_destroy_inode_callback(hammer_inode_t ip, void *data __unused);
1265
1266 int hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip);
1267 void hammer_sync_inode_done(hammer_inode_t ip, int error);
1268 void hammer_test_inode(hammer_inode_t dip);
1269 void hammer_inode_unloadable_check(hammer_inode_t ip, int getvp);
1270 int hammer_update_atime_quick(hammer_inode_t ip);
1271
1272 int hammer_ip_add_direntry(hammer_transaction_t trans,
1273 hammer_inode_t dip, const char *name, int bytes,
1274 hammer_inode_t nip);
1275 int hammer_ip_del_direntry(hammer_transaction_t trans,
1276 hammer_cursor_t cursor, hammer_inode_t dip,
1277 hammer_inode_t ip);
1278 void hammer_ip_replace_bulk(hammer_mount_t hmp, hammer_record_t record);
1279 hammer_record_t hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset,
1280 void *data, int bytes, int *errorp);
1281 int hammer_ip_frontend_trunc(hammer_inode_t ip, off_t file_size);
1282 int hammer_ip_add_record(hammer_transaction_t trans,
1283 hammer_record_t record);
1284 int hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip,
1285 int64_t ran_beg, int64_t ran_end, int truncating);
1286 int hammer_ip_delete_clean(hammer_cursor_t cursor, hammer_inode_t ip,
1287 int *countp);
1288 int hammer_ip_sync_data(hammer_cursor_t cursor, hammer_inode_t ip,
1289 int64_t offset, void *data, int bytes);
1290 int hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t rec);
1291 hammer_pseudofs_inmem_t hammer_load_pseudofs(hammer_transaction_t trans,
1292 uint32_t localization, int *errorp);
1293 int hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
1294 hammer_pseudofs_inmem_t pfsm, hammer_inode_t dip);
1295 int hammer_save_pseudofs(hammer_transaction_t trans,
1296 hammer_pseudofs_inmem_t pfsm);
1297 int hammer_unload_pseudofs(hammer_transaction_t trans, uint32_t localization);
1298 void hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm);
1299 int hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag,
1300 struct ucred *cred);
1301
1302 void hammer_io_init(hammer_io_t io, hammer_volume_t volume,
1303 hammer_io_type_t type);
1304 hammer_io_type_t hammer_zone_to_iotype(int zone);
1305 int hammer_io_read(struct vnode *devvp, hammer_io_t io, int limit);
1306 void hammer_io_advance(hammer_io_t io);
1307 int hammer_io_new(struct vnode *devvp, hammer_io_t io);
1308 int hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset);
1309 struct buf *hammer_io_release(hammer_io_t io, int flush);
1310 void hammer_io_flush(hammer_io_t io, int reclaim);
1311 void hammer_io_wait(hammer_io_t io);
1312 void hammer_io_waitdep(hammer_io_t io);
1313 void hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush);
1314 int hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1315 hammer_btree_leaf_elm_t leaf);
1316 int hammer_io_indirect_read(hammer_mount_t hmp, struct bio *bio,
1317 hammer_btree_leaf_elm_t leaf);
1318 int hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio,
1319 hammer_record_t record);
1320 void hammer_io_direct_wait(hammer_record_t record);
1321 void hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf);
1322 void hammer_io_write_interlock(hammer_io_t io);
1323 void hammer_io_done_interlock(hammer_io_t io);
1324 void hammer_io_clear_modify(hammer_io_t io, int inval);
1325 void hammer_io_clear_modlist(hammer_io_t io);
1326 void hammer_io_flush_sync(hammer_mount_t hmp);
1327 void hammer_io_clear_error(hammer_io_t io);
1328 void hammer_io_clear_error_noassert(hammer_io_t io);
1329 void hammer_io_notmeta(hammer_buffer_t buffer);
1330 void hammer_io_limit_backlog(hammer_mount_t hmp);
1331
1332 void hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
1333 void *base, int len);
1334 void hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
1335 void *base, int len);
1336 void hammer_modify_volume_done(hammer_volume_t volume);
1337 void hammer_modify_buffer_done(hammer_buffer_t buffer);
1338
1339 int hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip,
1340 struct hammer_ioc_reblock *reblock);
1341 int hammer_ioc_rebalance(hammer_transaction_t trans, hammer_inode_t ip,
1342 struct hammer_ioc_rebalance *rebal);
1343 int hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip,
1344 struct hammer_ioc_prune *prune);
1345 int hammer_ioc_mirror_read(hammer_transaction_t trans, hammer_inode_t ip,
1346 struct hammer_ioc_mirror_rw *mirror);
1347 int hammer_ioc_mirror_write(hammer_transaction_t trans, hammer_inode_t ip,
1348 struct hammer_ioc_mirror_rw *mirror);
1349 int hammer_ioc_set_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1350 struct ucred *cred, struct hammer_ioc_pseudofs_rw *pfs);
1351 int hammer_ioc_get_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1352 struct hammer_ioc_pseudofs_rw *pfs);
1353 int hammer_ioc_destroy_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1354 struct hammer_ioc_pseudofs_rw *pfs);
1355 int hammer_ioc_downgrade_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1356 struct hammer_ioc_pseudofs_rw *pfs);
1357 int hammer_ioc_upgrade_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1358 struct hammer_ioc_pseudofs_rw *pfs);
1359 int hammer_ioc_wait_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1360 struct hammer_ioc_pseudofs_rw *pfs);
1361 int hammer_ioc_scan_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1362 struct hammer_ioc_pseudofs_rw *pfs);
1363 int hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip,
1364 struct hammer_ioc_volume *ioc);
1365 int hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip,
1366 struct hammer_ioc_volume *ioc);
1367 int hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip,
1368 struct hammer_ioc_volume_list *ioc);
1369 int hammer_ioc_dedup(hammer_transaction_t trans, hammer_inode_t ip,
1370 struct hammer_ioc_dedup *dedup);
1371
1372 int hammer_signal_check(hammer_mount_t hmp);
1373
1374 void hammer_flusher_create(hammer_mount_t hmp);
1375 void hammer_flusher_destroy(hammer_mount_t hmp);
1376 void hammer_flusher_sync(hammer_mount_t hmp);
1377 int hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t flg);
1378 int hammer_flusher_async_one(hammer_mount_t hmp);
1379 int hammer_flusher_running(hammer_mount_t hmp);
1380 void hammer_flusher_wait(hammer_mount_t hmp, int seq);
1381 void hammer_flusher_wait_next(hammer_mount_t hmp);
1382 int hammer_flusher_meta_limit(hammer_mount_t hmp);
1383 int hammer_flusher_meta_halflimit(hammer_mount_t hmp);
1384 int hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter);
1385 void hammer_flusher_clean_loose_ios(hammer_mount_t hmp);
1386 void hammer_flusher_finalize(hammer_transaction_t trans, int final);
1387 int hammer_flusher_haswork(hammer_mount_t hmp);
1388 int hammer_flush_dirty(hammer_mount_t hmp, int max_count);
1389 void hammer_flusher_flush_undos(hammer_mount_t hmp, int already_flushed);
1390
1391 int hammer_recover_stage1(hammer_mount_t hmp, hammer_volume_t rootvol);
1392 int hammer_recover_stage2(hammer_mount_t hmp, hammer_volume_t rootvol);
1393 void hammer_recover_flush_buffers(hammer_mount_t hmp,
1394 hammer_volume_t root_volume, int final);
1395
1396 dev_t hammer_fsid_to_udev(hammer_uuid_t *uuid);
1397
1398
1399 int hammer_blocksize(int64_t file_offset);
1400 int hammer_blockoff(int64_t file_offset);
1401 int64_t hammer_blockdemarc(int64_t file_offset1, int64_t file_offset2);
1402
1403 /*
1404 * Shortcut for _hammer_checkspace(), used all over the code.
1405 */
1406 static __inline int
hammer_checkspace(hammer_mount_t hmp,int slop)1407 hammer_checkspace(hammer_mount_t hmp, int slop)
1408 {
1409 return(_hammer_checkspace(hmp, slop, NULL));
1410 }
1411
1412 static __inline void
hammer_wait_mem_record(hammer_record_t record)1413 hammer_wait_mem_record(hammer_record_t record)
1414 {
1415 hammer_wait_mem_record_ident(record, "hmmwai");
1416 }
1417
1418 static __inline void
hammer_lock_ex(struct hammer_lock * lock)1419 hammer_lock_ex(struct hammer_lock *lock)
1420 {
1421 hammer_lock_ex_ident(lock, "hmrlck");
1422 }
1423
1424 static __inline void
hammer_modify_volume_noundo(hammer_transaction_t trans,hammer_volume_t volume)1425 hammer_modify_volume_noundo(hammer_transaction_t trans, hammer_volume_t volume)
1426 {
1427 hammer_modify_volume(trans, volume, NULL, 0);
1428 }
1429
1430 static __inline void
hammer_modify_buffer_noundo(hammer_transaction_t trans,hammer_buffer_t buffer)1431 hammer_modify_buffer_noundo(hammer_transaction_t trans, hammer_buffer_t buffer)
1432 {
1433 hammer_modify_buffer(trans, buffer, NULL, 0);
1434 }
1435
1436 /*
1437 * Indicate that a B-Tree node is being modified.
1438 */
1439 static __inline void
hammer_modify_node_noundo(hammer_transaction_t trans,hammer_node_t node)1440 hammer_modify_node_noundo(hammer_transaction_t trans, hammer_node_t node)
1441 {
1442 KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0);
1443 hammer_modify_buffer(trans, node->buffer, NULL, 0);
1444 }
1445
1446 static __inline void
hammer_modify_node_all(hammer_transaction_t trans,hammer_node_t node)1447 hammer_modify_node_all(hammer_transaction_t trans, hammer_node_t node)
1448 {
1449 KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0);
1450 hammer_modify_buffer(trans, node->buffer,
1451 node->ondisk, sizeof(*node->ondisk));
1452 }
1453
1454 static __inline void
hammer_modify_node(hammer_transaction_t trans,hammer_node_t node,void * base,int len)1455 hammer_modify_node(hammer_transaction_t trans, hammer_node_t node,
1456 void *base, int len)
1457 {
1458 KKASSERT((char *)base >= (char *)node->ondisk &&
1459 (char *)base + len <=
1460 (char *)node->ondisk + sizeof(*node->ondisk));
1461 KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0);
1462
1463 if (hammer_btree_full_undo) {
1464 hammer_modify_node_all(trans, node);
1465 } else {
1466 hammer_modify_buffer(trans, node->buffer, base, len);
1467 hammer_modify_buffer(trans, node->buffer, &node->ondisk->crc,
1468 sizeof(hammer_crc_t));
1469 --node->buffer->io.modify_refs; /* only want one ref */
1470 }
1471 }
1472
1473 /*
1474 * Indicate that the specified modifications have been completed.
1475 *
1476 * Do not try to generate the crc here, it's very expensive to do and a
1477 * sequence of insertions or deletions can result in many calls to this
1478 * function on the same node.
1479 */
1480 static __inline void
hammer_modify_node_done(hammer_node_t node)1481 hammer_modify_node_done(hammer_node_t node)
1482 {
1483 node->flags |= HAMMER_NODE_CRCGOOD;
1484 if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0) {
1485 node->flags |= HAMMER_NODE_NEEDSCRC;
1486 node->buffer->io.gencrc = 1;
1487 hammer_ref_node(node);
1488 }
1489 hammer_modify_buffer_done(node->buffer);
1490 }
1491
1492 static __inline int
hammer_btree_extract_leaf(hammer_cursor_t cursor)1493 hammer_btree_extract_leaf(hammer_cursor_t cursor)
1494 {
1495 return(hammer_btree_extract(cursor, 0));
1496 }
1497
1498 static __inline int
hammer_btree_extract_data(hammer_cursor_t cursor)1499 hammer_btree_extract_data(hammer_cursor_t cursor)
1500 {
1501 return(hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA));
1502 }
1503
1504 /*
1505 * Lookup a blockmap offset.
1506 */
1507 static __inline hammer_off_t
hammer_blockmap_lookup(hammer_mount_t hmp,hammer_off_t zone_offset,int * errorp)1508 hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t zone_offset,
1509 int *errorp)
1510 {
1511 #if defined INVARIANTS
1512 KKASSERT(hammer_is_zone_record(zone_offset));
1513 #endif
1514
1515 /*
1516 * We can actually skip blockmap verify by default,
1517 * as normal blockmaps are now direct-mapped onto the freemap
1518 * and so represent zone-2 addresses.
1519 */
1520 if (hammer_verify_zone == 0) {
1521 *errorp = 0;
1522 return hammer_xlate_to_zone2(zone_offset);
1523 }
1524
1525 return hammer_blockmap_lookup_verify(hmp, zone_offset, errorp);
1526 }
1527
1528 #define hammer_modify_volume_field(trans, vol, field) \
1529 hammer_modify_volume(trans, vol, &(vol)->ondisk->field, \
1530 sizeof((vol)->ondisk->field))
1531
1532 #define hammer_modify_node_field(trans, node, field) \
1533 hammer_modify_node(trans, node, &(node)->ondisk->field, \
1534 sizeof((node)->ondisk->field))
1535
1536 /*
1537 * The HAMMER_INODE_CAP_DIR_LOCAL_INO capability is set on newly
1538 * created directories for HAMMER version 2 or greater and causes
1539 * directory entries to be placed the inode localization zone in
1540 * the B-Tree instead of the misc zone.
1541 *
1542 * This greatly improves localization between directory entries and
1543 * inodes
1544 */
1545 static __inline uint32_t
hammer_dir_localization(hammer_inode_t dip)1546 hammer_dir_localization(hammer_inode_t dip)
1547 {
1548 return(HAMMER_DIR_INODE_LOCALIZATION(&dip->ino_data));
1549 }
1550
1551 static __inline
1552 hammer_io_t
hammer_buf_peek_io(struct buf * bp)1553 hammer_buf_peek_io(struct buf *bp)
1554 {
1555 return((hammer_io_t)bp->b_priv);
1556 }
1557
1558 static __inline
1559 void
hammer_buf_attach_io(struct buf * bp,hammer_io_t io)1560 hammer_buf_attach_io(struct buf *bp, hammer_io_t io)
1561 {
1562 /* struct buf and struct hammer_io are 1:1 */
1563 KKASSERT(hammer_buf_peek_io(bp) == NULL);
1564 bp->b_priv = io;
1565 }
1566
1567 static __inline int
__hammer_vol_index(int vol_no)1568 __hammer_vol_index(int vol_no)
1569 {
1570 return(vol_no >> 6);
1571 }
1572
1573 static __inline uint64_t
__hammer_vol_low(int vol_no)1574 __hammer_vol_low(int vol_no)
1575 {
1576 return((uint64_t)1 << (vol_no & ((1 << 6) - 1)));
1577 }
1578
1579 static __inline void
hammer_volume_number_add(hammer_mount_t hmp,hammer_volume_t vol)1580 hammer_volume_number_add(hammer_mount_t hmp, hammer_volume_t vol)
1581 {
1582 int i = __hammer_vol_index(vol->vol_no);
1583 hmp->volume_map[i] |= __hammer_vol_low(vol->vol_no);
1584 }
1585
1586 static __inline void
hammer_volume_number_del(hammer_mount_t hmp,hammer_volume_t vol)1587 hammer_volume_number_del(hammer_mount_t hmp, hammer_volume_t vol)
1588 {
1589 int i = __hammer_vol_index(vol->vol_no);
1590 hmp->volume_map[i] &= ~__hammer_vol_low(vol->vol_no);
1591 }
1592
1593 static __inline int
hammer_volume_number_test(hammer_mount_t hmp,int n)1594 hammer_volume_number_test(hammer_mount_t hmp, int n)
1595 {
1596 int i = __hammer_vol_index(n);
1597 return((hmp->volume_map[i] & __hammer_vol_low(n)) != 0);
1598 }
1599
1600 #define hkprintf(format, args...) \
1601 kprintf("HAMMER: "format,## args)
1602 #define hvkprintf(vol, format, args...) \
1603 kprintf("HAMMER(%s) "format, vol->ondisk->vol_label,## args)
1604 #define hmkprintf(hmp, format, args...) \
1605 kprintf("HAMMER(%s) "format, hmp->mp->mnt_stat.f_mntfromname,## args)
1606 #define hdkprintf(format, args...) \
1607 kprintf("%s: "format, __func__,## args)
1608
1609 #define hkrateprintf(rate , format, args...) \
1610 krateprintf(rate, "HAMMER: "format,## args)
1611 #define hvkrateprintf(rate, vol, format, args...) \
1612 krateprintf(rate, "HAMMER(%s) "format, vol->ondisk->vol_label,## args)
1613 #define hmkrateprintf(rate, hmp, format, args...) \
1614 krateprintf(rate, "HAMMER(%s) "format, hmp->mp->mnt_stat.f_mntfromname,## args)
1615 #define hdkrateprintf(rate, format, args...) \
1616 krateprintf(rate, "%s: "format, __func__,## args)
1617
1618 #define hpanic(format, args...) \
1619 panic("%s: "format, __func__,## args)
1620 #endif /* _KERNEL */
1621
1622 #endif /* !VFS_HAMMER_HAMMER_H_ */
1623