xref: /dragonfly/sys/vfs/hammer/hammer.h (revision 267c04fd)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * This header file contains structures used internally by the HAMMERFS
36  * implementation.  See hammer_disk.h for on-disk structures.
37  */
38 
39 #include <sys/param.h>
40 #ifdef _KERNEL
41 #include <sys/kernel.h>
42 #include <sys/systm.h>
43 #endif
44 #include <sys/conf.h>
45 #include <sys/tree.h>
46 #include <sys/malloc.h>
47 #include <sys/mount.h>
48 #include <sys/mountctl.h>
49 #include <sys/vnode.h>
50 #include <sys/proc.h>
51 #include <sys/priv.h>
52 #include <sys/stat.h>
53 #include <sys/globaldata.h>
54 #include <sys/lockf.h>
55 #include <sys/buf.h>
56 #include <sys/queue.h>
57 #include <sys/ktr.h>
58 #include <sys/limits.h>
59 #include <vm/vm_extern.h>
60 
61 #ifdef _KERNEL
62 #include <sys/buf2.h>
63 #include <sys/signal2.h>
64 #include <vm/vm_page2.h>
65 #endif
66 
67 #include "hammer_disk.h"
68 #include "hammer_mount.h"
69 #include "hammer_ioctl.h"
70 
71 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
72 
73 MALLOC_DECLARE(M_HAMMER);
74 
75 /*
76  * Kernel trace
77  */
78 #if !defined(KTR_HAMMER)
79 #define KTR_HAMMER	KTR_ALL
80 #endif
81 /* KTR_INFO_MASTER_EXTERN(hammer); */
82 
83 /*
84  * Misc structures
85  */
86 struct hammer_mount;
87 
88 /*
89  * Key structure used for custom RB tree inode lookups.  This prototypes
90  * the function hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info).
91  */
92 typedef struct hammer_inode_info {
93 	int64_t		obj_id;		/* (key) object identifier */
94 	hammer_tid_t	obj_asof;	/* (key) snapshot transid or 0 */
95 	u_int32_t	obj_localization; /* (key) pseudo-fs */
96 	union {
97 		struct hammer_btree_leaf_elm *leaf;
98 	} u;
99 } *hammer_inode_info_t;
100 
101 typedef enum hammer_transaction_type {
102 	HAMMER_TRANS_RO,
103 	HAMMER_TRANS_STD,
104 	HAMMER_TRANS_FLS
105 } hammer_transaction_type_t;
106 
107 /*
108  * HAMMER Transaction tracking
109  */
110 struct hammer_transaction {
111 	hammer_transaction_type_t type;
112 	struct hammer_mount *hmp;
113 	hammer_tid_t	tid;
114 	u_int64_t	time;
115 	u_int32_t	time32;
116 	int		sync_lock_refs;
117 	int		flags;
118 	struct hammer_volume *rootvol;
119 };
120 
121 typedef struct hammer_transaction *hammer_transaction_t;
122 
123 #define HAMMER_TRANSF_NEWINODE	0x0001
124 #define HAMMER_TRANSF_DIDIO	0x0002
125 #define HAMMER_TRANSF_CRCDOM	0x0004	/* EDOM on CRC error, less critical */
126 
127 /*
128  * HAMMER locks
129  */
130 struct hammer_lock {
131 	volatile u_int	refs;		/* active references */
132 	volatile u_int	lockval;	/* lock count and control bits */
133 	struct thread	*lowner;	/* owner if exclusively held */
134 	struct thread	*rowner;	/* owner if exclusively held */
135 };
136 
137 #define HAMMER_REFS_LOCKED	0x40000000	/* transition check */
138 #define HAMMER_REFS_WANTED	0x20000000	/* transition check */
139 #define HAMMER_REFS_CHECK	0x10000000	/* transition check */
140 
141 #define HAMMER_REFS_FLAGS	(HAMMER_REFS_LOCKED | \
142 				 HAMMER_REFS_WANTED | \
143 				 HAMMER_REFS_CHECK)
144 
145 #define HAMMER_LOCKF_EXCLUSIVE	0x40000000
146 #define HAMMER_LOCKF_WANTED	0x20000000
147 
148 static __inline int
149 hammer_notlocked(struct hammer_lock *lock)
150 {
151 	return(lock->lockval == 0);
152 }
153 
154 static __inline int
155 hammer_islocked(struct hammer_lock *lock)
156 {
157 	return(lock->lockval != 0);
158 }
159 
160 /*
161  * Returns the number of refs on the object.
162  */
163 static __inline int
164 hammer_isactive(struct hammer_lock *lock)
165 {
166 	return(lock->refs & ~HAMMER_REFS_FLAGS);
167 }
168 
169 static __inline int
170 hammer_oneref(struct hammer_lock *lock)
171 {
172 	return((lock->refs & ~HAMMER_REFS_FLAGS) == 1);
173 }
174 
175 static __inline int
176 hammer_norefs(struct hammer_lock *lock)
177 {
178 	return((lock->refs & ~HAMMER_REFS_FLAGS) == 0);
179 }
180 
181 static __inline int
182 hammer_norefsorlock(struct hammer_lock *lock)
183 {
184 	return(lock->refs == 0);
185 }
186 
187 static __inline int
188 hammer_refsorlock(struct hammer_lock *lock)
189 {
190 	return(lock->refs != 0);
191 }
192 
193 /*
194  * Return if we specifically own the lock exclusively.
195  */
196 static __inline int
197 hammer_lock_excl_owned(struct hammer_lock *lock, thread_t td)
198 {
199 	if ((lock->lockval & HAMMER_LOCKF_EXCLUSIVE) &&
200 	    lock->lowner == td) {
201 		return(1);
202 	}
203 	return(0);
204 }
205 
206 /*
207  * Flush state, used by various structures
208  */
209 typedef enum hammer_inode_state {
210 	HAMMER_FST_IDLE,
211 	HAMMER_FST_SETUP,
212 	HAMMER_FST_FLUSH
213 } hammer_inode_state_t;
214 
215 TAILQ_HEAD(hammer_record_list, hammer_record);
216 
217 /*
218  * Pseudo-filesystem extended data tracking
219  */
220 struct hammer_pfs_rb_tree;
221 struct hammer_pseudofs_inmem;
222 RB_HEAD(hammer_pfs_rb_tree, hammer_pseudofs_inmem);
223 RB_PROTOTYPE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
224 	      hammer_pfs_rb_compare, u_int32_t);
225 
226 struct hammer_pseudofs_inmem {
227 	RB_ENTRY(hammer_pseudofs_inmem)	rb_node;
228 	struct hammer_lock	lock;
229 	u_int32_t		localization;
230 	hammer_tid_t		create_tid;
231 	int			flags;
232 	udev_t			fsid_udev;
233 	struct hammer_pseudofs_data pfsd;
234 };
235 
236 typedef struct hammer_pseudofs_inmem *hammer_pseudofs_inmem_t;
237 
238 #define HAMMER_PFSM_DELETED	0x0001
239 
240 /*
241  * Cache object ids.  A fixed number of objid cache structures are
242  * created to reserve object id's for newly created files in multiples
243  * of 100,000, localized to a particular directory, and recycled as
244  * needed.  This allows parallel create operations in different
245  * directories to retain fairly localized object ids which in turn
246  * improves reblocking performance and layout.
247  */
248 #define OBJID_CACHE_SIZE	2048
249 #define OBJID_CACHE_BULK_BITS	10		/* 10 bits (1024)	*/
250 #define OBJID_CACHE_BULK	(32 * 32)	/* two level (1024)	*/
251 #define OBJID_CACHE_BULK_MASK	(OBJID_CACHE_BULK - 1)
252 #define OBJID_CACHE_BULK_MASK64	((u_int64_t)(OBJID_CACHE_BULK - 1))
253 
254 typedef struct hammer_objid_cache {
255 	TAILQ_ENTRY(hammer_objid_cache) entry;
256 	struct hammer_inode		*dip;
257 	hammer_tid_t			base_tid;
258 	int				count;
259 	u_int32_t			bm0;
260 	u_int32_t			bm1[32];
261 } *hammer_objid_cache_t;
262 
263 /*
264  * Associate an inode with a B-Tree node to cache search start positions
265  */
266 typedef struct hammer_node_cache {
267 	TAILQ_ENTRY(hammer_node_cache)	entry;
268 	struct hammer_node		*node;
269 	struct hammer_inode		*ip;
270 } *hammer_node_cache_t;
271 
272 TAILQ_HEAD(hammer_node_cache_list, hammer_node_cache);
273 
274 /*
275  * Live dedup cache
276  */
277 struct hammer_dedup_crc_rb_tree;
278 RB_HEAD(hammer_dedup_crc_rb_tree, hammer_dedup_cache);
279 RB_PROTOTYPE2(hammer_dedup_crc_rb_tree, hammer_dedup_cache, crc_entry,
280 		hammer_dedup_crc_rb_compare, hammer_crc_t);
281 
282 struct hammer_dedup_off_rb_tree;
283 RB_HEAD(hammer_dedup_off_rb_tree, hammer_dedup_cache);
284 RB_PROTOTYPE2(hammer_dedup_off_rb_tree, hammer_dedup_cache, off_entry,
285 		hammer_dedup_off_rb_compare, hammer_off_t);
286 
287 #define DEDUP_CACHE_SIZE	4096 /* XXX make it a dynamic tunable */
288 
289 typedef struct hammer_dedup_cache {
290 	RB_ENTRY(hammer_dedup_cache) crc_entry;
291 	RB_ENTRY(hammer_dedup_cache) off_entry;
292 	TAILQ_ENTRY(hammer_dedup_cache) lru_entry;
293 	struct hammer_mount *hmp;
294 	int64_t obj_id;
295 	u_int32_t localization;
296 	off_t file_offset;
297 	int bytes;
298 	hammer_off_t data_offset;
299 	hammer_crc_t crc;
300 } *hammer_dedup_cache_t;
301 
302 /*
303  * Structure used to organize flush groups.  Flush groups must be
304  * organized into chunks in order to avoid blowing out the UNDO FIFO.
305  * Without this a 'sync' could end up flushing 50,000 inodes in a single
306  * transaction.
307  */
308 struct hammer_fls_rb_tree;
309 RB_HEAD(hammer_fls_rb_tree, hammer_inode);
310 RB_PROTOTYPE(hammer_fls_rb_tree, hammer_inode, rb_flsnode,
311 	      hammer_ino_rb_compare);
312 
313 struct hammer_flush_group {
314 	TAILQ_ENTRY(hammer_flush_group)	flush_entry;
315 	struct hammer_fls_rb_tree	flush_tree;
316 	int				seq;		/* our seq no */
317 	int				total_count;	/* record load */
318 	int				running;	/* group is running */
319 	int				closed;
320 	int				refs;
321 };
322 
323 typedef struct hammer_flush_group *hammer_flush_group_t;
324 
325 TAILQ_HEAD(hammer_flush_group_list, hammer_flush_group);
326 
327 /*
328  * Structure used to represent an inode in-memory.
329  *
330  * The record and data associated with an inode may be out of sync with
331  * the disk (xDIRTY flags), or not even on the disk at all (ONDISK flag
332  * clear).
333  *
334  * An inode may also hold a cache of unsynchronized records, used for
335  * database and directories only.  Unsynchronized regular file data is
336  * stored in the buffer cache.
337  *
338  * NOTE: A file which is created and destroyed within the initial
339  * synchronization period can wind up not doing any disk I/O at all.
340  *
341  * Finally, an inode may cache numerous disk-referencing B-Tree cursors.
342  */
343 struct hammer_ino_rb_tree;
344 struct hammer_inode;
345 RB_HEAD(hammer_ino_rb_tree, hammer_inode);
346 RB_PROTOTYPEX(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
347 	      hammer_ino_rb_compare, hammer_inode_info_t);
348 
349 struct hammer_redo_rb_tree;
350 RB_HEAD(hammer_redo_rb_tree, hammer_inode);
351 RB_PROTOTYPE2(hammer_redo_rb_tree, hammer_inode, rb_redonode,
352 	      hammer_redo_rb_compare, hammer_off_t);
353 
354 struct hammer_rec_rb_tree;
355 struct hammer_record;
356 RB_HEAD(hammer_rec_rb_tree, hammer_record);
357 RB_PROTOTYPEX(hammer_rec_rb_tree, INFO, hammer_record, rb_node,
358 	      hammer_rec_rb_compare, hammer_btree_leaf_elm_t);
359 
360 TAILQ_HEAD(hammer_node_list, hammer_node);
361 
362 struct hammer_inode {
363 	RB_ENTRY(hammer_inode)	rb_node;
364 	hammer_inode_state_t	flush_state;
365 	hammer_flush_group_t	flush_group;
366 	RB_ENTRY(hammer_inode)	rb_flsnode;	/* when on flush list */
367 	RB_ENTRY(hammer_inode)	rb_redonode;	/* when INODE_RDIRTY is set */
368 	struct hammer_record_list target_list;	/* target of dependant recs */
369 	int64_t			obj_id;		/* (key) object identifier */
370 	hammer_tid_t		obj_asof;	/* (key) snapshot or 0 */
371 	u_int32_t		obj_localization; /* (key) pseudo-fs */
372 	struct hammer_mount	*hmp;
373 	hammer_objid_cache_t	objid_cache;
374 	int			flags;
375 	int			error;		/* flush error */
376 	int			cursor_ip_refs;	/* sanity */
377 	int			cursor_exclreq_count;
378 	int			rsv_recs;
379 	struct vnode		*vp;
380 	hammer_pseudofs_inmem_t	pfsm;
381 	struct lockf		advlock;
382 	struct hammer_lock	lock;		/* sync copy interlock */
383 	off_t			trunc_off;
384 	struct hammer_btree_leaf_elm ino_leaf;  /* in-memory cache */
385 	struct hammer_inode_data ino_data;	/* in-memory cache */
386 	struct hammer_rec_rb_tree rec_tree;	/* in-memory cache */
387 	int			rec_generation;
388 	struct hammer_node_cache cache[4];	/* search initiate cache */
389 
390 	/*
391 	 * When a demark is created to synchronize an inode to
392 	 * disk, certain fields are copied so the front-end VOPs
393 	 * can continue to run in parallel with the synchronization
394 	 * occuring in the background.
395 	 */
396 	int		sync_flags;		/* to-sync flags cache */
397 	off_t		sync_trunc_off;		/* to-sync truncation */
398 	off_t		save_trunc_off;		/* write optimization */
399 	struct hammer_btree_leaf_elm sync_ino_leaf; /* to-sync cache */
400 	struct hammer_inode_data sync_ino_data; /* to-sync cache */
401 	size_t		redo_count;
402 
403 	/*
404 	 * Track the earliest offset in the UNDO/REDO FIFO containing
405 	 * REDO records.  This is staged to the backend during flush
406 	 * sequences.  While the inode is staged redo_fifo_next is used
407 	 * to track the earliest offset for rotation into redo_fifo_start
408 	 * on completion of the flush.
409 	 */
410 	hammer_off_t	redo_fifo_start;
411 	hammer_off_t	redo_fifo_next;
412 };
413 
414 typedef struct hammer_inode *hammer_inode_t;
415 
416 #define VTOI(vp)	((struct hammer_inode *)(vp)->v_data)
417 
418 /*
419  * NOTE: DDIRTY does not include atime or mtime and does not include
420  *	 write-append size changes.  SDIRTY handles write-append size
421  *	 changes.
422  *
423  *	 REDO indicates that REDO logging is active, creating a definitive
424  *	 stream of REDO records in the UNDO/REDO log for writes and
425  *	 truncations, including boundary records when/if REDO is turned off.
426  *	 REDO is typically enabled by fsync() and turned off if excessive
427  *	 writes without an fsync() occurs.
428  *
429  *	 RDIRTY indicates that REDO records were laid down in the UNDO/REDO
430  *	 FIFO (even if REDO is turned off some might still be active) and
431  *	 still being tracked for this inode.  See hammer_redo.c
432  */
433 					/* (not including atime/mtime) */
434 #define HAMMER_INODE_DDIRTY	0x0001	/* in-memory ino_data is dirty */
435 #define HAMMER_INODE_RSV_INODES	0x0002	/* hmp->rsv_inodes bumped */
436 #define HAMMER_INODE_CONN_DOWN	0x0004	/* include in downward recursion */
437 #define HAMMER_INODE_XDIRTY	0x0008	/* in-memory records */
438 #define HAMMER_INODE_ONDISK	0x0010	/* inode is on-disk (else not yet) */
439 #define HAMMER_INODE_FLUSH	0x0020	/* flush on last ref */
440 #define HAMMER_INODE_DELETED	0x0080	/* inode delete (backend) */
441 #define HAMMER_INODE_DELONDISK	0x0100	/* delete synchronized to disk */
442 #define HAMMER_INODE_RO		0x0200	/* read-only (because of as-of) */
443 #define HAMMER_INODE_RECSW	0x0400	/* waiting on data record flush */
444 #define HAMMER_INODE_DONDISK	0x0800	/* data records may be on disk */
445 #define HAMMER_INODE_BUFS	0x1000	/* dirty high level bps present */
446 #define HAMMER_INODE_REFLUSH	0x2000	/* flush on dependancy / reflush */
447 #define HAMMER_INODE_RECLAIM	0x4000	/* trying to reclaim */
448 #define HAMMER_INODE_FLUSHW	0x8000	/* Someone waiting for flush */
449 
450 #define HAMMER_INODE_TRUNCATED	0x00010000
451 #define HAMMER_INODE_DELETING	0x00020000 /* inode delete request (frontend)*/
452 #define HAMMER_INODE_RESIGNAL	0x00040000 /* re-signal on re-flush */
453 #define HAMMER_INODE_ATIME	0x00100000 /* in-memory atime modified */
454 #define HAMMER_INODE_MTIME	0x00200000 /* in-memory mtime modified */
455 #define HAMMER_INODE_WOULDBLOCK 0x00400000 /* re-issue to new flush group */
456 #define HAMMER_INODE_DUMMY	0x00800000 /* dummy inode covering bad file */
457 #define HAMMER_INODE_SDIRTY	0x01000000 /* in-memory ino_data.size is dirty*/
458 #define HAMMER_INODE_REDO	0x02000000 /* REDO logging active */
459 #define HAMMER_INODE_RDIRTY	0x04000000 /* REDO records active in fifo */
460 #define HAMMER_INODE_SLAVEFLUSH	0x08000000 /* being flushed by slave */
461 
462 #define HAMMER_INODE_MODMASK	(HAMMER_INODE_DDIRTY|HAMMER_INODE_SDIRTY|   \
463 				 HAMMER_INODE_XDIRTY|HAMMER_INODE_BUFS|	    \
464 				 HAMMER_INODE_ATIME|HAMMER_INODE_MTIME|     \
465 				 HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING)
466 
467 #define HAMMER_INODE_MODMASK_NOXDIRTY	\
468 				(HAMMER_INODE_MODMASK & ~HAMMER_INODE_XDIRTY)
469 
470 #define HAMMER_INODE_MODMASK_NOREDO	\
471 				(HAMMER_INODE_DDIRTY|			    \
472 				 HAMMER_INODE_XDIRTY|			    \
473 				 HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING)
474 
475 #define HAMMER_FLUSH_SIGNAL	0x0001
476 #define HAMMER_FLUSH_RECURSION	0x0002
477 
478 /*
479  * Used by the inode reclaim code to pipeline reclaims and avoid
480  * blowing out kernel memory or letting the flusher get too far
481  * behind.  The reclaim wakes up when count reaches 0 or the
482  * timer expires.
483  */
484 struct hammer_reclaim {
485 	TAILQ_ENTRY(hammer_reclaim) entry;
486 	int	count;
487 };
488 
489 /*
490  * Track who is creating the greatest burden on the
491  * inode cache.
492  */
493 struct hammer_inostats {
494 	pid_t		pid;	/* track user process */
495 	int		ltick;	/* last tick */
496 	int		count;	/* count (degenerates) */
497 };
498 
499 #define HAMMER_INOSTATS_HSIZE	32
500 #define HAMMER_INOSTATS_HMASK	(HAMMER_INOSTATS_HSIZE - 1)
501 
502 /*
503  * Structure used to represent an unsynchronized record in-memory.  These
504  * records typically represent directory entries.  Only non-historical
505  * records are kept in-memory.
506  *
507  * Records are organized as a per-inode RB-Tree.  If the inode is not
508  * on disk then neither are any records and the in-memory record tree
509  * represents the entire contents of the inode.  If the inode is on disk
510  * then the on-disk B-Tree is scanned in parallel with the in-memory
511  * RB-Tree to synthesize the current state of the file.
512  *
513  * Records are also used to enforce the ordering of directory create/delete
514  * operations.  A new inode will not be flushed to disk unless its related
515  * directory entry is also being flushed at the same time.  A directory entry
516  * will not be removed unless its related inode is also being removed at the
517  * same time.
518  */
519 typedef enum hammer_record_type {
520 	HAMMER_MEM_RECORD_GENERAL,	/* misc record */
521 	HAMMER_MEM_RECORD_INODE,	/* inode record */
522 	HAMMER_MEM_RECORD_ADD,		/* positive memory cache record */
523 	HAMMER_MEM_RECORD_DEL,		/* negative delete-on-disk record */
524 	HAMMER_MEM_RECORD_DATA		/* bulk-data record w/on-disk ref */
525 } hammer_record_type_t;
526 
527 struct hammer_record {
528 	RB_ENTRY(hammer_record)		rb_node;
529 	TAILQ_ENTRY(hammer_record)	target_entry;
530 	hammer_inode_state_t		flush_state;
531 	hammer_flush_group_t		flush_group;
532 	hammer_record_type_t		type;
533 	struct hammer_lock		lock;
534 	struct hammer_reserve		*resv;
535 	struct hammer_inode		*ip;
536 	struct hammer_inode		*target_ip;
537 	struct hammer_btree_leaf_elm	leaf;
538 	union hammer_data_ondisk	*data;
539 	int				flags;
540 	int				gflags;
541 	hammer_off_t			zone2_offset;	/* direct-write only */
542 };
543 
544 typedef struct hammer_record *hammer_record_t;
545 
546 /*
547  * Record flags.  Note that FE can only be set by the frontend if the
548  * record has not been interlocked by the backend w/ BE.
549  */
550 #define HAMMER_RECF_ALLOCDATA		0x0001
551 #define HAMMER_RECF_ONRBTREE		0x0002
552 #define HAMMER_RECF_DELETED_FE		0x0004	/* deleted (frontend) */
553 #define HAMMER_RECF_DELETED_BE		0x0008	/* deleted (backend) */
554 #define HAMMER_RECF_COMMITTED		0x0010	/* committed to the B-Tree */
555 #define HAMMER_RECF_INTERLOCK_BE	0x0020	/* backend interlock */
556 #define HAMMER_RECF_WANTED		0x0040	/* wanted by the frontend */
557 #define HAMMER_RECF_DEDUPED		0x0080	/* will be live-dedup'ed */
558 #define HAMMER_RECF_CONVERT_DELETE	0x0100	/* special case */
559 #define HAMMER_RECF_REDO		0x1000	/* REDO was laid down */
560 
561 /*
562  * These flags must be separate to deal with SMP races
563  */
564 #define HAMMER_RECG_DIRECT_IO		0x0001	/* related direct I/O running*/
565 #define HAMMER_RECG_DIRECT_WAIT		0x0002	/* related direct I/O running*/
566 #define HAMMER_RECG_DIRECT_INVAL	0x0004	/* buffer alias invalidation */
567 /*
568  * hammer_create_at_cursor() and hammer_delete_at_cursor() flags.
569  */
570 #define HAMMER_CREATE_MODE_UMIRROR	0x0001
571 #define HAMMER_CREATE_MODE_SYS		0x0002
572 
573 #define HAMMER_DELETE_ADJUST		0x0001
574 #define HAMMER_DELETE_DESTROY		0x0002
575 
576 /*
577  * In-memory structures representing on-disk structures.
578  */
579 struct hammer_volume;
580 struct hammer_buffer;
581 struct hammer_node;
582 struct hammer_undo;
583 struct hammer_reserve;
584 
585 RB_HEAD(hammer_vol_rb_tree, hammer_volume);
586 RB_HEAD(hammer_buf_rb_tree, hammer_buffer);
587 RB_HEAD(hammer_nod_rb_tree, hammer_node);
588 RB_HEAD(hammer_und_rb_tree, hammer_undo);
589 RB_HEAD(hammer_res_rb_tree, hammer_reserve);
590 RB_HEAD(hammer_mod_rb_tree, hammer_io);
591 
592 RB_PROTOTYPE2(hammer_vol_rb_tree, hammer_volume, rb_node,
593 	      hammer_vol_rb_compare, int32_t);
594 RB_PROTOTYPE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
595 	      hammer_buf_rb_compare, hammer_off_t);
596 RB_PROTOTYPE2(hammer_nod_rb_tree, hammer_node, rb_node,
597 	      hammer_nod_rb_compare, hammer_off_t);
598 RB_PROTOTYPE2(hammer_und_rb_tree, hammer_undo, rb_node,
599 	      hammer_und_rb_compare, hammer_off_t);
600 RB_PROTOTYPE2(hammer_res_rb_tree, hammer_reserve, rb_node,
601 	      hammer_res_rb_compare, hammer_off_t);
602 RB_PROTOTYPE2(hammer_mod_rb_tree, hammer_io, rb_node,
603 	      hammer_mod_rb_compare, hammer_off_t);
604 
605 /*
606  * IO management - embedded at the head of various in-memory structures
607  *
608  * VOLUME	- hammer_volume containing meta-data
609  * META_BUFFER	- hammer_buffer containing meta-data
610  * DATA_BUFFER	- hammer_buffer containing pure-data
611  *
612  * Dirty volume headers and dirty meta-data buffers are locked until the
613  * flusher can sequence them out.  Dirty pure-data buffers can be written.
614  * Clean buffers can be passively released.
615  */
616 typedef enum hammer_io_type {
617 	HAMMER_STRUCTURE_VOLUME,
618 	HAMMER_STRUCTURE_META_BUFFER,
619 	HAMMER_STRUCTURE_UNDO_BUFFER,
620 	HAMMER_STRUCTURE_DATA_BUFFER,
621 	HAMMER_STRUCTURE_DUMMY
622 } hammer_io_type_t;
623 
624 union hammer_io_structure;
625 struct hammer_io;
626 
627 struct worklist {
628 	LIST_ENTRY(worklist) node;
629 };
630 
631 TAILQ_HEAD(hammer_io_list, hammer_io);
632 typedef struct hammer_io_list *hammer_io_list_t;
633 
634 struct hammer_io {
635 	struct worklist		worklist;
636 	struct hammer_lock	lock;
637 	enum hammer_io_type	type;
638 	struct hammer_mount	*hmp;
639 	struct hammer_volume	*volume;
640 	RB_ENTRY(hammer_io)	rb_node;     /* if modified */
641 	TAILQ_ENTRY(hammer_io)	iorun_entry; /* iorun_list */
642 	struct hammer_mod_rb_tree *mod_root;
643 	struct buf		*bp;
644 	int64_t			offset;	   /* zone-2 offset */
645 	int			bytes;	   /* buffer cache buffer size */
646 	int			modify_refs;
647 
648 	/*
649 	 * These can be modified at any time by the backend while holding
650 	 * io_token, due to bio_done and hammer_io_complete() callbacks.
651 	 */
652 	u_int		running : 1;	/* bp write IO in progress */
653 	u_int		waiting : 1;	/* someone is waiting on us */
654 	u_int		ioerror : 1;	/* abort on io-error */
655 	u_int		unusedA : 29;
656 
657 	/*
658 	 * These can only be modified by the frontend while holding
659 	 * fs_token, or by the backend while holding the io interlocked
660 	 * with no references (which will block the frontend when it
661 	 * tries to reference it).
662 	 *
663 	 * WARNING! SMP RACES will create havoc if the callbacks ever tried
664 	 *	    to modify any of these outside the above restrictions.
665 	 */
666 	u_int		modified : 1;	/* bp's data was modified */
667 	u_int		released : 1;	/* bp released (w/ B_LOCKED set) */
668 	u_int		validated : 1;	/* ondisk has been validated */
669 	u_int		waitdep : 1;	/* flush waits for dependancies */
670 	u_int		recovered : 1;	/* has recovery ref */
671 	u_int		waitmod : 1;	/* waiting for modify_refs */
672 	u_int		reclaim : 1;	/* reclaim requested */
673 	u_int		gencrc : 1;	/* crc needs to be generated */
674 	u_int		unusedB : 24;
675 };
676 
677 typedef struct hammer_io *hammer_io_t;
678 
679 #define HAMMER_CLUSTER_SIZE	(64 * 1024)
680 #if HAMMER_CLUSTER_SIZE > MAXBSIZE
681 #undef  HAMMER_CLUSTER_SIZE
682 #define HAMMER_CLUSTER_SIZE	MAXBSIZE
683 #endif
684 #define HAMMER_CLUSTER_BUFS	(HAMMER_CLUSTER_SIZE / HAMMER_BUFSIZE)
685 
686 /*
687  * In-memory volume representing on-disk buffer
688  */
689 struct hammer_volume {
690 	struct hammer_io io;
691 	RB_ENTRY(hammer_volume) rb_node;
692 	struct hammer_volume_ondisk *ondisk;
693 	int32_t	vol_no;
694 	int64_t nblocks;	/* note: special calculation for statfs */
695 	int64_t buffer_base;	/* base offset of buffer 0 */
696 	hammer_off_t maxbuf_off; /* Maximum buffer offset (zone-2) */
697 	hammer_off_t maxraw_off; /* Maximum raw offset for device */
698 	char	*vol_name;
699 	struct vnode *devvp;
700 	int	vol_flags;
701 };
702 
703 typedef struct hammer_volume *hammer_volume_t;
704 
705 /*
706  * In-memory buffer (other then volume, super-cluster, or cluster),
707  * representing an on-disk buffer.
708  */
709 struct hammer_buffer {
710 	struct hammer_io io;
711 	RB_ENTRY(hammer_buffer) rb_node;
712 	void *ondisk;
713 	hammer_off_t zoneX_offset;
714 	hammer_off_t zone2_offset;
715 	struct hammer_reserve *resv;
716 	struct hammer_node_list clist;
717 };
718 
719 typedef struct hammer_buffer *hammer_buffer_t;
720 
721 /*
722  * In-memory B-Tree node, representing an on-disk B-Tree node.
723  *
724  * This is a hang-on structure which is backed by a hammer_buffer,
725  * indexed by a hammer_cluster, and used for fine-grained locking of
726  * B-Tree nodes in order to properly control lock ordering.  A hammer_buffer
727  * can contain multiple nodes representing wildly disassociated portions
728  * of the B-Tree so locking cannot be done on a buffer-by-buffer basis.
729  *
730  * This structure uses a cluster-relative index to reduce the number
731  * of layers required to access it, and also because all on-disk B-Tree
732  * references are cluster-relative offsets.
733  */
734 struct hammer_node {
735 	struct hammer_lock	lock;		/* node-by-node lock */
736 	TAILQ_ENTRY(hammer_node) entry;		/* per-buffer linkage */
737 	RB_ENTRY(hammer_node)	rb_node;	/* per-cluster linkage */
738 	hammer_off_t		node_offset;	/* full offset spec */
739 	struct hammer_mount	*hmp;
740 	struct hammer_buffer	*buffer;	/* backing buffer */
741 	hammer_node_ondisk_t	ondisk;		/* ptr to on-disk structure */
742 	TAILQ_HEAD(, hammer_cursor) cursor_list;  /* deadlock recovery */
743 	struct hammer_node_cache_list cache_list; /* passive caches */
744 	int			flags;
745 	int			cursor_exclreq_count;
746 };
747 
748 #define HAMMER_NODE_DELETED	0x0001
749 #define HAMMER_NODE_FLUSH	0x0002
750 #define HAMMER_NODE_CRCGOOD	0x0004
751 #define HAMMER_NODE_NEEDSCRC	0x0008
752 #define HAMMER_NODE_NEEDSMIRROR	0x0010
753 #define HAMMER_NODE_CRCBAD	0x0020
754 #define HAMMER_NODE_NONLINEAR	0x0040		/* linear heuristic */
755 
756 #define HAMMER_NODE_CRCANY	(HAMMER_NODE_CRCGOOD | HAMMER_NODE_CRCBAD)
757 
758 typedef struct hammer_node	*hammer_node_t;
759 
760 /*
761  * List of locked nodes.  This structure is used to lock potentially large
762  * numbers of nodes as an aid for complex B-Tree operations.
763  */
764 struct hammer_node_lock;
765 TAILQ_HEAD(hammer_node_lock_list, hammer_node_lock);
766 
767 struct hammer_node_lock {
768 	TAILQ_ENTRY(hammer_node_lock) entry;
769 	struct hammer_node_lock_list  list;
770 	struct hammer_node_lock	      *parent;
771 	hammer_node_t	node;
772 	hammer_node_ondisk_t copy;	/* copy of on-disk data */
773 	int		index;		/* index of this node in parent */
774 	int		count;		/* count children */
775 	int		flags;
776 };
777 
778 typedef struct hammer_node_lock *hammer_node_lock_t;
779 
780 #define HAMMER_NODE_LOCK_UPDATED	0x0001
781 #define HAMMER_NODE_LOCK_LCACHE		0x0002
782 
783 /*
784  * Common I/O management structure - embedded in in-memory structures
785  * which are backed by filesystem buffers.
786  */
787 union hammer_io_structure {
788 	struct hammer_io	io;
789 	struct hammer_volume	volume;
790 	struct hammer_buffer	buffer;
791 };
792 
793 typedef union hammer_io_structure *hammer_io_structure_t;
794 
795 /*
796  * The reserve structure prevents the blockmap from allocating
797  * out of a reserved big-block.  Such reservations are used by
798  * the direct-write mechanism.
799  *
800  * The structure is also used to hold off on reallocations of
801  * big-blocks from the freemap until flush dependancies have
802  * been dealt with.
803  */
804 struct hammer_reserve {
805 	RB_ENTRY(hammer_reserve) rb_node;
806 	TAILQ_ENTRY(hammer_reserve) delay_entry;
807 	int		flush_group;
808 	int		flags;
809 	int		refs;
810 	int		zone;
811 	int		append_off;
812 	int32_t		bytes_free;
813 	hammer_off_t	zone_offset;
814 };
815 
816 typedef struct hammer_reserve *hammer_reserve_t;
817 
818 #define HAMMER_RESF_ONDELAY	0x0001
819 #define HAMMER_RESF_LAYER2FREE	0x0002
820 
821 #include "hammer_cursor.h"
822 
823 /*
824  * The undo structure tracks recent undos to avoid laying down duplicate
825  * undos within a flush group, saving us a significant amount of overhead.
826  *
827  * This is strictly a heuristic.
828  */
829 #define HAMMER_MAX_UNDOS		1024
830 #define HAMMER_MAX_FLUSHERS		4
831 
832 struct hammer_undo {
833 	RB_ENTRY(hammer_undo)	rb_node;
834 	TAILQ_ENTRY(hammer_undo) lru_entry;
835 	hammer_off_t		offset;
836 	int			bytes;
837 };
838 
839 typedef struct hammer_undo *hammer_undo_t;
840 
841 struct hammer_flusher_info;
842 TAILQ_HEAD(hammer_flusher_info_list, hammer_flusher_info);
843 
844 struct hammer_flusher {
845 	int		signal;		/* flusher thread sequencer */
846 	int		done;		/* last completed flush group */
847 	int		next;		/* next unallocated flg seqno */
848 	int		group_lock;	/* lock sequencing of the next flush */
849 	int		exiting;	/* request master exit */
850 	thread_t	td;		/* master flusher thread */
851 	hammer_tid_t	tid;		/* last flushed transaction id */
852 	int		finalize_want;		/* serialize finalization */
853 	struct hammer_lock finalize_lock;	/* serialize finalization */
854 	struct hammer_transaction trans;	/* shared transaction */
855 	struct hammer_flusher_info_list run_list;
856 	struct hammer_flusher_info_list ready_list;
857 };
858 
859 #define HAMMER_FLUSH_UNDOS_RELAXED	0
860 #define HAMMER_FLUSH_UNDOS_FORCED	1
861 #define HAMMER_FLUSH_UNDOS_AUTO		2
862 /*
863  * Internal hammer mount data structure
864  */
865 struct hammer_mount {
866 	struct mount *mp;
867 	/*struct vnode *rootvp;*/
868 	struct hammer_ino_rb_tree rb_inos_root;
869 	struct hammer_redo_rb_tree rb_redo_root;
870 	struct hammer_vol_rb_tree rb_vols_root;
871 	struct hammer_nod_rb_tree rb_nods_root;
872 	struct hammer_und_rb_tree rb_undo_root;
873 	struct hammer_res_rb_tree rb_resv_root;
874 	struct hammer_buf_rb_tree rb_bufs_root;
875 	struct hammer_pfs_rb_tree rb_pfsm_root;
876 
877 	struct hammer_dedup_crc_rb_tree rb_dedup_crc_root;
878 	struct hammer_dedup_off_rb_tree rb_dedup_off_root;
879 
880 	struct hammer_volume *rootvol;
881 	struct hammer_base_elm root_btree_beg;
882 	struct hammer_base_elm root_btree_end;
883 
884 	struct malloc_type	*m_misc;
885 	struct malloc_type	*m_inodes;
886 
887 	int	flags;		/* HAMMER_MOUNT_xxx flags */
888 	int	hflags;
889 	int	ronly;
890 	int	nvolumes;
891 	int	volume_iterator;
892 	int	master_id;	/* -1 or 0-15 - clustering and mirroring */
893 	int	version;	/* hammer filesystem version to use */
894 	int	rsv_inodes;	/* reserved space due to dirty inodes */
895 	int64_t	rsv_databytes;	/* reserved space due to record data */
896 	int	rsv_recs;	/* reserved space due to dirty records */
897 	int	rsv_fromdelay;	/* big-blocks reserved due to flush delay */
898 	int	undo_rec_limit;	/* based on size of undo area */
899 	int	last_newrecords;
900 	int	count_newrecords;
901 
902 	int	volume_to_remove; /* volume that is currently being removed */
903 
904 	int	count_inodes;	/* total number of inodes */
905 	int	count_iqueued;	/* inodes queued to flusher */
906 	int	count_reclaims; /* inodes pending reclaim by flusher */
907 
908 	struct hammer_flusher flusher;
909 
910 	u_int	check_interrupt;
911 	u_int	check_yield;
912 	uuid_t	fsid;
913 	struct hammer_mod_rb_tree volu_root;	/* dirty undo buffers */
914 	struct hammer_mod_rb_tree undo_root;	/* dirty undo buffers */
915 	struct hammer_mod_rb_tree data_root;	/* dirty data buffers */
916 	struct hammer_mod_rb_tree meta_root;	/* dirty meta bufs    */
917 	struct hammer_mod_rb_tree lose_root;	/* loose buffers      */
918 	long	locked_dirty_space;		/* meta/volu count    */
919 	long	io_running_space;		/* io_token */
920 	int	unused01;
921 	int	objid_cache_count;
922 	int	dedup_cache_count;
923 	int	error;				/* critical I/O error */
924 	struct krate	krate;			/* rate limited kprintf */
925 	struct krate	kdiag;			/* rate limited kprintf */
926 	hammer_tid_t	asof;			/* snapshot mount */
927 	hammer_tid_t	next_tid;
928 	hammer_tid_t	flush_tid1;		/* flusher tid sequencing */
929 	hammer_tid_t	flush_tid2;		/* flusher tid sequencing */
930 	int64_t copy_stat_freebigblocks;	/* number of free big-blocks */
931 	u_int32_t	undo_seqno;		/* UNDO/REDO FIFO seqno */
932 	u_int32_t	recover_stage2_seqno;	/* REDO recovery seqno */
933 	hammer_off_t	recover_stage2_offset;	/* REDO recovery offset */
934 
935 	struct netexport export;
936 	struct hammer_lock sync_lock;
937 	struct hammer_lock free_lock;
938 	struct hammer_lock undo_lock;
939 	struct hammer_lock blkmap_lock;
940 	struct hammer_lock snapshot_lock;
941 	struct hammer_lock volume_lock;
942 	struct hammer_blockmap  blockmap[HAMMER_MAX_ZONES];
943 	struct hammer_undo	undos[HAMMER_MAX_UNDOS];
944 	int			undo_alloc;
945 	TAILQ_HEAD(, hammer_undo)  undo_lru_list;
946 	TAILQ_HEAD(, hammer_reserve) delay_list;
947 	struct hammer_flush_group_list	flush_group_list;
948 	hammer_flush_group_t	fill_flush_group;
949 	hammer_flush_group_t	next_flush_group;
950 	TAILQ_HEAD(, hammer_objid_cache) objid_cache_list;
951 	TAILQ_HEAD(, hammer_dedup_cache) dedup_lru_list;
952 	hammer_dedup_cache_t	dedup_free_cache;
953 	TAILQ_HEAD(, hammer_reclaim) reclaim_list;
954 	TAILQ_HEAD(, hammer_io) iorun_list;
955 
956 	struct lwkt_token	fs_token;	/* high level */
957 	struct lwkt_token	io_token;	/* low level (IO callback) */
958 
959 	struct hammer_inostats	inostats[HAMMER_INOSTATS_HSIZE];
960 };
961 
962 typedef struct hammer_mount	*hammer_mount_t;
963 
964 #define HAMMER_MOUNT_CRITICAL_ERROR	0x0001
965 #define HAMMER_MOUNT_FLUSH_RECOVERY	0x0002
966 #define HAMMER_MOUNT_REDO_SYNC		0x0004
967 #define HAMMER_MOUNT_REDO_RECOVERY_REQ	0x0008
968 #define HAMMER_MOUNT_REDO_RECOVERY_RUN	0x0010
969 
970 struct hammer_sync_info {
971 	int error;
972 	int waitfor;
973 };
974 
975 /*
976  * Minium buffer cache bufs required to rebalance the B-Tree.
977  * This is because we must hold the children and the children's children
978  * locked.  Even this might not be enough if things are horribly out
979  * of balance.
980  */
981 #define HAMMER_REBALANCE_MIN_BUFS	\
982 	(HAMMER_BTREE_LEAF_ELMS * HAMMER_BTREE_LEAF_ELMS)
983 
984 #endif  /* _KERNEL || _KERNEL_STRUCTURES */
985 
986 #if defined(_KERNEL)
987 /*
988  * checkspace slop (8MB chunks), higher numbers are more conservative.
989  */
990 #define HAMMER_CHKSPC_REBLOCK	25
991 #define HAMMER_CHKSPC_MIRROR	20
992 #define HAMMER_CHKSPC_WRITE	20
993 #define HAMMER_CHKSPC_CREATE	20
994 #define HAMMER_CHKSPC_REMOVE	10
995 #define HAMMER_CHKSPC_EMERGENCY	0
996 
997 extern struct vop_ops hammer_vnode_vops;
998 extern struct vop_ops hammer_spec_vops;
999 extern struct vop_ops hammer_fifo_vops;
1000 extern struct bio_ops hammer_bioops;
1001 
1002 extern int hammer_debug_io;
1003 extern int hammer_debug_general;
1004 extern int hammer_debug_debug;
1005 extern int hammer_debug_inode;
1006 extern int hammer_debug_locks;
1007 extern int hammer_debug_btree;
1008 extern int hammer_debug_tid;
1009 extern int hammer_debug_recover;
1010 extern int hammer_debug_recover_faults;
1011 extern int hammer_debug_critical;
1012 extern int hammer_cluster_enable;
1013 extern int hammer_live_dedup;
1014 extern int hammer_tdmux_ticks;
1015 extern int hammer_count_fsyncs;
1016 extern int hammer_count_inodes;
1017 extern int hammer_count_iqueued;
1018 extern int hammer_count_reclaims;
1019 extern int hammer_count_records;
1020 extern int hammer_count_record_datas;
1021 extern int hammer_count_volumes;
1022 extern int hammer_count_buffers;
1023 extern int hammer_count_nodes;
1024 extern int64_t hammer_count_extra_space_used;
1025 extern int64_t hammer_stats_btree_lookups;
1026 extern int64_t hammer_stats_btree_searches;
1027 extern int64_t hammer_stats_btree_inserts;
1028 extern int64_t hammer_stats_btree_deletes;
1029 extern int64_t hammer_stats_btree_elements;
1030 extern int64_t hammer_stats_btree_splits;
1031 extern int64_t hammer_stats_btree_iterations;
1032 extern int64_t hammer_stats_btree_root_iterations;
1033 extern int64_t hammer_stats_record_iterations;
1034 extern int64_t hammer_stats_file_read;
1035 extern int64_t hammer_stats_file_write;
1036 extern int64_t hammer_stats_file_iopsr;
1037 extern int64_t hammer_stats_file_iopsw;
1038 extern int64_t hammer_stats_disk_read;
1039 extern int64_t hammer_stats_disk_write;
1040 extern int64_t hammer_stats_inode_flushes;
1041 extern int64_t hammer_stats_commits;
1042 extern int64_t hammer_stats_undo;
1043 extern int64_t hammer_stats_redo;
1044 extern long hammer_count_dirtybufspace;
1045 extern int hammer_count_refedbufs;
1046 extern int hammer_count_reservations;
1047 extern long hammer_count_io_running_read;
1048 extern long hammer_count_io_running_write;
1049 extern int hammer_count_io_locked;
1050 extern long hammer_limit_dirtybufspace;
1051 extern int hammer_limit_recs;
1052 extern int hammer_limit_inode_recs;
1053 extern int hammer_limit_reclaims;
1054 extern int hammer_live_dedup_cache_size;
1055 extern int hammer_limit_redo;
1056 extern int hammer_bio_count;
1057 extern int hammer_verify_zone;
1058 extern int hammer_verify_data;
1059 extern int hammer_write_mode;
1060 extern int hammer_double_buffer;
1061 extern int hammer_btree_full_undo;
1062 extern int hammer_yield_check;
1063 extern int hammer_fsync_mode;
1064 extern int hammer_autoflush;
1065 extern int64_t hammer_contention_count;
1066 
1067 extern int64_t hammer_live_dedup_vnode_bcmps;
1068 extern int64_t hammer_live_dedup_device_bcmps;
1069 extern int64_t hammer_live_dedup_findblk_failures;
1070 extern int64_t hammer_live_dedup_bmap_saves;
1071 
1072 void	hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip,
1073 			int error, const char *msg);
1074 int	hammer_vop_inactive(struct vop_inactive_args *);
1075 int	hammer_vop_reclaim(struct vop_reclaim_args *);
1076 int	hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp);
1077 struct hammer_inode *hammer_get_inode(hammer_transaction_t trans,
1078 			hammer_inode_t dip, int64_t obj_id,
1079 			hammer_tid_t asof, u_int32_t localization,
1080 			int flags, int *errorp);
1081 struct hammer_inode *hammer_get_dummy_inode(hammer_transaction_t trans,
1082 			hammer_inode_t dip, int64_t obj_id,
1083 			hammer_tid_t asof, u_int32_t localization,
1084 			int flags, int *errorp);
1085 struct hammer_inode *hammer_find_inode(hammer_transaction_t trans,
1086 			int64_t obj_id, hammer_tid_t asof,
1087 			u_int32_t localization);
1088 void	hammer_scan_inode_snapshots(hammer_mount_t hmp,
1089 			hammer_inode_info_t iinfo,
1090 			int (*callback)(hammer_inode_t ip, void *data),
1091 			void *data);
1092 void	hammer_put_inode(struct hammer_inode *ip);
1093 void	hammer_put_inode_ref(struct hammer_inode *ip);
1094 void	hammer_inode_waitreclaims(hammer_transaction_t trans);
1095 void	hammer_inode_dirty(struct hammer_inode *ip);
1096 
1097 int	hammer_unload_volume(hammer_volume_t volume, void *data __unused);
1098 int	hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused);
1099 
1100 int	hammer_unload_buffer(hammer_buffer_t buffer, void *data);
1101 int	hammer_install_volume(hammer_mount_t hmp, const char *volname,
1102 			struct vnode *devvp);
1103 int	hammer_mountcheck_volumes(hammer_mount_t hmp);
1104 
1105 int	hammer_mem_add(hammer_record_t record);
1106 int	hammer_ip_lookup(hammer_cursor_t cursor);
1107 int	hammer_ip_first(hammer_cursor_t cursor);
1108 int	hammer_ip_next(hammer_cursor_t cursor);
1109 int	hammer_ip_resolve_data(hammer_cursor_t cursor);
1110 int	hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip,
1111 			hammer_tid_t tid);
1112 int	hammer_create_at_cursor(hammer_cursor_t cursor,
1113 			hammer_btree_leaf_elm_t leaf, void *udata, int mode);
1114 int	hammer_delete_at_cursor(hammer_cursor_t cursor, int delete_flags,
1115 			hammer_tid_t delete_tid, u_int32_t delete_ts,
1116 			int track, int64_t *stat_bytes);
1117 int	hammer_ip_check_directory_empty(hammer_transaction_t trans,
1118 			hammer_inode_t ip);
1119 int	hammer_sync_hmp(hammer_mount_t hmp, int waitfor);
1120 int	hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor);
1121 
1122 hammer_record_t
1123 	hammer_alloc_mem_record(hammer_inode_t ip, int data_len);
1124 void	hammer_flush_record_done(hammer_record_t record, int error);
1125 void	hammer_wait_mem_record_ident(hammer_record_t record, const char *ident);
1126 void	hammer_rel_mem_record(hammer_record_t record);
1127 
1128 int	hammer_cursor_up(hammer_cursor_t cursor);
1129 int	hammer_cursor_up_locked(hammer_cursor_t cursor);
1130 int	hammer_cursor_down(hammer_cursor_t cursor);
1131 int	hammer_cursor_upgrade(hammer_cursor_t cursor);
1132 int	hammer_cursor_upgrade_node(hammer_cursor_t cursor);
1133 void	hammer_cursor_downgrade(hammer_cursor_t cursor);
1134 int	hammer_cursor_upgrade2(hammer_cursor_t c1, hammer_cursor_t c2);
1135 void	hammer_cursor_downgrade2(hammer_cursor_t c1, hammer_cursor_t c2);
1136 int	hammer_cursor_seek(hammer_cursor_t cursor, hammer_node_t node,
1137 			int index);
1138 void	hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident);
1139 int	hammer_lock_ex_try(struct hammer_lock *lock);
1140 void	hammer_lock_sh(struct hammer_lock *lock);
1141 int	hammer_lock_sh_try(struct hammer_lock *lock);
1142 int	hammer_lock_upgrade(struct hammer_lock *lock, int shcount);
1143 void	hammer_lock_downgrade(struct hammer_lock *lock, int shcount);
1144 int	hammer_lock_status(struct hammer_lock *lock);
1145 void	hammer_unlock(struct hammer_lock *lock);
1146 void	hammer_ref(struct hammer_lock *lock);
1147 int	hammer_ref_interlock(struct hammer_lock *lock);
1148 int	hammer_ref_interlock_true(struct hammer_lock *lock);
1149 void	hammer_ref_interlock_done(struct hammer_lock *lock);
1150 void	hammer_rel(struct hammer_lock *lock);
1151 int	hammer_rel_interlock(struct hammer_lock *lock, int locked);
1152 void	hammer_rel_interlock_done(struct hammer_lock *lock, int orig_locked);
1153 int	hammer_get_interlock(struct hammer_lock *lock);
1154 int	hammer_try_interlock_norefs(struct hammer_lock *lock);
1155 void	hammer_put_interlock(struct hammer_lock *lock, int error);
1156 
1157 void	hammer_sync_lock_ex(hammer_transaction_t trans);
1158 void	hammer_sync_lock_sh(hammer_transaction_t trans);
1159 int	hammer_sync_lock_sh_try(hammer_transaction_t trans);
1160 void	hammer_sync_unlock(hammer_transaction_t trans);
1161 
1162 u_int32_t hammer_to_unix_xid(uuid_t *uuid);
1163 void hammer_guid_to_uuid(uuid_t *uuid, u_int32_t guid);
1164 void	hammer_time_to_timespec(u_int64_t xtime, struct timespec *ts);
1165 u_int64_t hammer_timespec_to_time(struct timespec *ts);
1166 int	hammer_str_to_tid(const char *str, int *ispfsp,
1167 			hammer_tid_t *tidp, u_int32_t *localizationp);
1168 int	hammer_is_atatext(const char *name, int len);
1169 hammer_tid_t hammer_alloc_objid(hammer_mount_t hmp, hammer_inode_t dip,
1170 			int64_t namekey);
1171 void hammer_clear_objid(hammer_inode_t dip);
1172 void hammer_destroy_objid_cache(hammer_mount_t hmp);
1173 
1174 int hammer_dedup_crc_rb_compare(hammer_dedup_cache_t dc1,
1175 			hammer_dedup_cache_t dc2);
1176 int hammer_dedup_off_rb_compare(hammer_dedup_cache_t dc1,
1177 			hammer_dedup_cache_t dc2);
1178 hammer_dedup_cache_t hammer_dedup_cache_add(hammer_inode_t ip,
1179 			hammer_btree_leaf_elm_t leaf);
1180 hammer_dedup_cache_t hammer_dedup_cache_lookup(hammer_mount_t hmp,
1181 			hammer_crc_t crc);
1182 void hammer_dedup_cache_inval(hammer_mount_t hmp, hammer_off_t base_offset);
1183 void hammer_destroy_dedup_cache(hammer_mount_t hmp);
1184 void hammer_dump_dedup_cache(hammer_mount_t hmp);
1185 int hammer_dedup_validate(hammer_dedup_cache_t dcp, int zone, int bytes,
1186 			void *data);
1187 
1188 int hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset,
1189 			int bytes);
1190 void hammer_clear_undo_history(hammer_mount_t hmp);
1191 enum vtype hammer_get_vnode_type(u_int8_t obj_type);
1192 int hammer_get_dtype(u_int8_t obj_type);
1193 u_int8_t hammer_get_obj_type(enum vtype vtype);
1194 int64_t hammer_directory_namekey(hammer_inode_t dip, const void *name, int len,
1195 			u_int32_t *max_iterationsp);
1196 int	hammer_nohistory(hammer_inode_t ip);
1197 
1198 int	hammer_init_cursor(hammer_transaction_t trans, hammer_cursor_t cursor,
1199 			hammer_node_cache_t cache, hammer_inode_t ip);
1200 void	hammer_normalize_cursor(hammer_cursor_t cursor);
1201 void	hammer_done_cursor(hammer_cursor_t cursor);
1202 int	hammer_recover_cursor(hammer_cursor_t cursor);
1203 void	hammer_unlock_cursor(hammer_cursor_t cursor);
1204 int	hammer_lock_cursor(hammer_cursor_t cursor);
1205 hammer_cursor_t	hammer_push_cursor(hammer_cursor_t ocursor);
1206 void	hammer_pop_cursor(hammer_cursor_t ocursor, hammer_cursor_t ncursor);
1207 
1208 void	hammer_cursor_replaced_node(hammer_node_t onode, hammer_node_t nnode);
1209 void	hammer_cursor_removed_node(hammer_node_t onode, hammer_node_t parent,
1210 			int index);
1211 void	hammer_cursor_split_node(hammer_node_t onode, hammer_node_t nnode,
1212 			int index);
1213 void	hammer_cursor_moved_element(hammer_node_t oparent, int pindex,
1214 			hammer_node_t onode, int oindex,
1215 			hammer_node_t nnode, int nindex);
1216 void	hammer_cursor_parent_changed(hammer_node_t node, hammer_node_t oparent,
1217 			hammer_node_t nparent, int nindex);
1218 void	hammer_cursor_inserted_element(hammer_node_t node, int index);
1219 void	hammer_cursor_deleted_element(hammer_node_t node, int index);
1220 void	hammer_cursor_invalidate_cache(hammer_cursor_t cursor);
1221 
1222 int	hammer_btree_lookup(hammer_cursor_t cursor);
1223 int	hammer_btree_first(hammer_cursor_t cursor);
1224 int	hammer_btree_last(hammer_cursor_t cursor);
1225 int	hammer_btree_extract(hammer_cursor_t cursor, int flags);
1226 int	hammer_btree_iterate(hammer_cursor_t cursor);
1227 int	hammer_btree_iterate_reverse(hammer_cursor_t cursor);
1228 int	hammer_btree_insert(hammer_cursor_t cursor,
1229 			    hammer_btree_leaf_elm_t elm, int *doprop);
1230 int	hammer_btree_delete(hammer_cursor_t cursor, int *ndelete);
1231 void	hammer_btree_do_propagation(hammer_cursor_t cursor,
1232 			    hammer_pseudofs_inmem_t pfsm,
1233 			    hammer_btree_leaf_elm_t leaf);
1234 int	hammer_btree_cmp(hammer_base_elm_t key1, hammer_base_elm_t key2);
1235 int	hammer_btree_chkts(hammer_tid_t ts, hammer_base_elm_t key);
1236 int	hammer_btree_correct_rhb(hammer_cursor_t cursor, hammer_tid_t tid);
1237 int	hammer_btree_correct_lhb(hammer_cursor_t cursor, hammer_tid_t tid);
1238 
1239 int	btree_set_parent(hammer_transaction_t trans, hammer_node_t node,
1240                         hammer_btree_elm_t elm);
1241 void	hammer_node_lock_init(hammer_node_lock_t parent, hammer_node_t node);
1242 void	hammer_btree_lcache_init(hammer_mount_t hmp, hammer_node_lock_t lcache,
1243 			int depth);
1244 void	hammer_btree_lcache_free(hammer_mount_t hmp, hammer_node_lock_t lcache);
1245 int	hammer_btree_lock_children(hammer_cursor_t cursor, int depth,
1246 			hammer_node_lock_t parent,
1247 			hammer_node_lock_t lcache);
1248 void	hammer_btree_lock_copy(hammer_cursor_t cursor,
1249 			hammer_node_lock_t parent);
1250 int	hammer_btree_sync_copy(hammer_cursor_t cursor,
1251 			hammer_node_lock_t parent);
1252 void	hammer_btree_unlock_children(hammer_mount_t hmp,
1253 			hammer_node_lock_t parent,
1254 			hammer_node_lock_t lcache);
1255 int	hammer_btree_search_node(hammer_base_elm_t elm, hammer_node_ondisk_t node);
1256 hammer_node_t hammer_btree_get_parent(hammer_transaction_t trans,
1257 			hammer_node_t node, int *parent_indexp,
1258 			int *errorp, int try_exclusive);
1259 
1260 void	hammer_print_btree_node(hammer_node_ondisk_t ondisk);
1261 void	hammer_print_btree_elm(hammer_btree_elm_t elm, u_int8_t type, int i);
1262 
1263 void	*hammer_bread(struct hammer_mount *hmp, hammer_off_t off,
1264 			int *errorp, struct hammer_buffer **bufferp);
1265 void	*hammer_bnew(struct hammer_mount *hmp, hammer_off_t off,
1266 			int *errorp, struct hammer_buffer **bufferp);
1267 void	*hammer_bread_ext(struct hammer_mount *hmp, hammer_off_t off, int bytes,
1268 			int *errorp, struct hammer_buffer **bufferp);
1269 void	*hammer_bnew_ext(struct hammer_mount *hmp, hammer_off_t off, int bytes,
1270 			int *errorp, struct hammer_buffer **bufferp);
1271 
1272 hammer_volume_t hammer_get_root_volume(hammer_mount_t hmp, int *errorp);
1273 
1274 hammer_volume_t	hammer_get_volume(hammer_mount_t hmp,
1275 			int32_t vol_no, int *errorp);
1276 hammer_buffer_t	hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
1277 			int bytes, int isnew, int *errorp);
1278 void		hammer_sync_buffers(hammer_mount_t hmp,
1279 			hammer_off_t base_offset, int bytes);
1280 int		hammer_del_buffers(hammer_mount_t hmp,
1281 			hammer_off_t base_offset,
1282 			hammer_off_t zone2_offset, int bytes,
1283 			int report_conflicts);
1284 
1285 int		hammer_ref_volume(hammer_volume_t volume);
1286 int		hammer_ref_buffer(hammer_buffer_t buffer);
1287 void		hammer_flush_buffer_nodes(hammer_buffer_t buffer);
1288 
1289 void		hammer_rel_volume(hammer_volume_t volume, int locked);
1290 void		hammer_rel_buffer(hammer_buffer_t buffer, int locked);
1291 
1292 int		hammer_vfs_export(struct mount *mp, int op,
1293 			const struct export_args *export);
1294 hammer_node_t	hammer_get_node(hammer_transaction_t trans,
1295 			hammer_off_t node_offset, int isnew, int *errorp);
1296 void		hammer_ref_node(hammer_node_t node);
1297 hammer_node_t	hammer_ref_node_safe(hammer_transaction_t trans,
1298 			hammer_node_cache_t cache, int *errorp);
1299 void		hammer_rel_node(hammer_node_t node);
1300 void		hammer_delete_node(hammer_transaction_t trans,
1301 			hammer_node_t node);
1302 void		hammer_cache_node(hammer_node_cache_t cache,
1303 			hammer_node_t node);
1304 void		hammer_uncache_node(hammer_node_cache_t cache);
1305 void		hammer_flush_node(hammer_node_t node, int locked);
1306 
1307 void hammer_dup_buffer(struct hammer_buffer **bufferp,
1308 			struct hammer_buffer *buffer);
1309 hammer_node_t hammer_alloc_btree(hammer_transaction_t trans,
1310 			hammer_off_t hint, int *errorp);
1311 void *hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1312 			u_int16_t rec_type, hammer_off_t *data_offsetp,
1313 			struct hammer_buffer **data_bufferp,
1314 			hammer_off_t hint, int *errorp);
1315 
1316 int hammer_generate_undo(hammer_transaction_t trans,
1317 			hammer_off_t zone_offset, void *base, int len);
1318 int hammer_generate_redo(hammer_transaction_t trans, hammer_inode_t ip,
1319 			hammer_off_t file_offset, u_int32_t flags,
1320 			void *base, int len);
1321 void hammer_generate_redo_sync(hammer_transaction_t trans);
1322 void hammer_redo_fifo_start_flush(hammer_inode_t ip);
1323 void hammer_redo_fifo_end_flush(hammer_inode_t ip);
1324 
1325 void hammer_format_undo(void *base, u_int32_t seqno);
1326 int hammer_upgrade_undo_4(hammer_transaction_t trans);
1327 
1328 void hammer_put_volume(struct hammer_volume *volume, int flush);
1329 void hammer_put_buffer(struct hammer_buffer *buffer, int flush);
1330 
1331 hammer_off_t hammer_freemap_alloc(hammer_transaction_t trans,
1332 			hammer_off_t owner, int *errorp);
1333 void hammer_freemap_free(hammer_transaction_t trans, hammer_off_t phys_offset,
1334 			hammer_off_t owner, int *errorp);
1335 int _hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp);
1336 hammer_off_t hammer_blockmap_alloc(hammer_transaction_t trans, int zone,
1337 			int bytes, hammer_off_t hint, int *errorp);
1338 hammer_reserve_t hammer_blockmap_reserve(hammer_mount_t hmp, int zone,
1339 			int bytes, hammer_off_t *zone_offp, int *errorp);
1340 hammer_reserve_t hammer_blockmap_reserve_dedup(hammer_mount_t hmp, int zone,
1341 			int bytes, hammer_off_t zone_offset, int *errorp);
1342 void hammer_blockmap_reserve_complete(hammer_mount_t hmp,
1343 			hammer_reserve_t resv);
1344 void hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv);
1345 void hammer_blockmap_free(hammer_transaction_t trans,
1346 			hammer_off_t zone_offset, int bytes);
1347 int hammer_blockmap_dedup(hammer_transaction_t trans,
1348 			hammer_off_t zone_offset, int bytes);
1349 int hammer_blockmap_finalize(hammer_transaction_t trans,
1350 			hammer_reserve_t resv,
1351 			hammer_off_t zone_offset, int bytes);
1352 int hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t zone_offset,
1353 			int *curp, int *errorp);
1354 hammer_off_t hammer_blockmap_lookup_verify(hammer_mount_t hmp,
1355 			hammer_off_t zone_offset, int *errorp);
1356 
1357 hammer_off_t hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone_offset,
1358 			int *errorp);
1359 int64_t hammer_undo_used(hammer_transaction_t trans);
1360 int64_t hammer_undo_space(hammer_transaction_t trans);
1361 int64_t hammer_undo_max(hammer_mount_t hmp);
1362 int hammer_undo_reclaim(hammer_io_t io);
1363 
1364 void hammer_start_transaction(struct hammer_transaction *trans,
1365 			      struct hammer_mount *hmp);
1366 void hammer_simple_transaction(struct hammer_transaction *trans,
1367 			      struct hammer_mount *hmp);
1368 void hammer_start_transaction_fls(struct hammer_transaction *trans,
1369 			          struct hammer_mount *hmp);
1370 void hammer_done_transaction(struct hammer_transaction *trans);
1371 hammer_tid_t hammer_alloc_tid(hammer_mount_t hmp, int count);
1372 
1373 void hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags);
1374 void hammer_flush_inode(hammer_inode_t ip, int flags);
1375 void hammer_flush_inode_done(hammer_inode_t ip, int error);
1376 void hammer_wait_inode(hammer_inode_t ip);
1377 
1378 int  hammer_create_inode(struct hammer_transaction *trans, struct vattr *vap,
1379 			struct ucred *cred, struct hammer_inode *dip,
1380 			const char *name, int namelen,
1381 			hammer_pseudofs_inmem_t pfsm,
1382 			struct hammer_inode **ipp);
1383 void hammer_rel_inode(hammer_inode_t ip, int flush);
1384 int hammer_reload_inode(hammer_inode_t ip, void *arg __unused);
1385 int hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2);
1386 int hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2);
1387 int hammer_destroy_inode_callback(hammer_inode_t ip, void *data __unused);
1388 
1389 int hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip);
1390 void hammer_test_inode(hammer_inode_t dip);
1391 void hammer_inode_unloadable_check(hammer_inode_t ip, int getvp);
1392 int hammer_update_atime_quick(hammer_inode_t ip);
1393 
1394 int  hammer_ip_add_directory(struct hammer_transaction *trans,
1395 			hammer_inode_t dip, const char *name, int bytes,
1396 			hammer_inode_t nip);
1397 int  hammer_ip_del_directory(struct hammer_transaction *trans,
1398 			hammer_cursor_t cursor, hammer_inode_t dip,
1399 			hammer_inode_t ip);
1400 void hammer_ip_replace_bulk(hammer_mount_t hmp, hammer_record_t record);
1401 hammer_record_t hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset,
1402 			void *data, int bytes, int *errorp);
1403 int  hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size);
1404 int  hammer_ip_add_record(struct hammer_transaction *trans,
1405 			hammer_record_t record);
1406 int  hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip,
1407 			int64_t ran_beg, int64_t ran_end, int truncating);
1408 int  hammer_ip_delete_clean(hammer_cursor_t cursor, hammer_inode_t ip,
1409 			int *countp);
1410 int  hammer_ip_sync_data(hammer_cursor_t cursor, hammer_inode_t ip,
1411 			int64_t offset, void *data, int bytes);
1412 int  hammer_ip_sync_record(hammer_transaction_t trans, hammer_record_t rec);
1413 int  hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t rec);
1414 hammer_pseudofs_inmem_t  hammer_load_pseudofs(hammer_transaction_t trans,
1415 			u_int32_t localization, int *errorp);
1416 int  hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
1417 			hammer_pseudofs_inmem_t pfsm);
1418 int  hammer_save_pseudofs(hammer_transaction_t trans,
1419 			hammer_pseudofs_inmem_t pfsm);
1420 int  hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization);
1421 void hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm);
1422 int hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag,
1423 			struct ucred *cred);
1424 
1425 void hammer_io_init(hammer_io_t io, hammer_volume_t volume,
1426 			enum hammer_io_type type);
1427 int hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit);
1428 void hammer_io_advance(struct hammer_io *io);
1429 int hammer_io_new(struct vnode *devvp, struct hammer_io *io);
1430 int hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset);
1431 struct buf *hammer_io_release(struct hammer_io *io, int flush);
1432 void hammer_io_flush(struct hammer_io *io, int reclaim);
1433 void hammer_io_wait(struct hammer_io *io);
1434 void hammer_io_waitdep(struct hammer_io *io);
1435 void hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush);
1436 int hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1437 			hammer_btree_leaf_elm_t leaf);
1438 int hammer_io_indirect_read(hammer_mount_t hmp, struct bio *bio,
1439 			hammer_btree_leaf_elm_t leaf);
1440 int hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio,
1441 			hammer_record_t record);
1442 void hammer_io_direct_wait(hammer_record_t record);
1443 void hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf);
1444 void hammer_io_write_interlock(hammer_io_t io);
1445 void hammer_io_done_interlock(hammer_io_t io);
1446 void hammer_io_clear_modify(struct hammer_io *io, int inval);
1447 void hammer_io_clear_modlist(struct hammer_io *io);
1448 void hammer_io_flush_sync(hammer_mount_t hmp);
1449 void hammer_io_clear_error(struct hammer_io *io);
1450 void hammer_io_clear_error_noassert(struct hammer_io *io);
1451 void hammer_io_notmeta(hammer_buffer_t buffer);
1452 void hammer_io_limit_backlog(hammer_mount_t hmp);
1453 
1454 void hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
1455 			void *base, int len);
1456 void hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
1457 			void *base, int len);
1458 void hammer_modify_volume_done(hammer_volume_t volume);
1459 void hammer_modify_buffer_done(hammer_buffer_t buffer);
1460 
1461 int hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip,
1462 			struct hammer_ioc_reblock *reblock);
1463 int hammer_ioc_rebalance(hammer_transaction_t trans, hammer_inode_t ip,
1464 			struct hammer_ioc_rebalance *rebal);
1465 int hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip,
1466 			struct hammer_ioc_prune *prune);
1467 int hammer_ioc_mirror_read(hammer_transaction_t trans, hammer_inode_t ip,
1468 			struct hammer_ioc_mirror_rw *mirror);
1469 int hammer_ioc_mirror_write(hammer_transaction_t trans, hammer_inode_t ip,
1470 			struct hammer_ioc_mirror_rw *mirror);
1471 int hammer_ioc_set_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1472 			struct ucred *cred, struct hammer_ioc_pseudofs_rw *pfs);
1473 int hammer_ioc_get_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1474                         struct hammer_ioc_pseudofs_rw *pfs);
1475 int hammer_ioc_destroy_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1476                         struct hammer_ioc_pseudofs_rw *pfs);
1477 int hammer_ioc_downgrade_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1478                         struct hammer_ioc_pseudofs_rw *pfs);
1479 int hammer_ioc_upgrade_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1480                         struct hammer_ioc_pseudofs_rw *pfs);
1481 int hammer_ioc_wait_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1482                         struct hammer_ioc_pseudofs_rw *pfs);
1483 int hammer_ioc_iterate_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1484 			struct hammer_ioc_pfs_iterate *pi);
1485 int hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip,
1486                         struct hammer_ioc_volume *ioc);
1487 int hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip,
1488                         struct hammer_ioc_volume *ioc);
1489 int hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip,
1490 			struct hammer_ioc_volume_list *ioc);
1491 int hammer_ioc_dedup(hammer_transaction_t trans, hammer_inode_t ip,
1492 			struct hammer_ioc_dedup *dedup);
1493 
1494 int hammer_signal_check(hammer_mount_t hmp);
1495 
1496 void hammer_flusher_create(hammer_mount_t hmp);
1497 void hammer_flusher_destroy(hammer_mount_t hmp);
1498 void hammer_flusher_sync(hammer_mount_t hmp);
1499 int  hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t flg);
1500 int  hammer_flusher_async_one(hammer_mount_t hmp);
1501 int hammer_flusher_running(hammer_mount_t hmp);
1502 void hammer_flusher_wait(hammer_mount_t hmp, int seq);
1503 void hammer_flusher_wait_next(hammer_mount_t hmp);
1504 int  hammer_flusher_meta_limit(hammer_mount_t hmp);
1505 int  hammer_flusher_meta_halflimit(hammer_mount_t hmp);
1506 int  hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter);
1507 void hammer_flusher_clean_loose_ios(hammer_mount_t hmp);
1508 void hammer_flusher_finalize(hammer_transaction_t trans, int final);
1509 int  hammer_flusher_haswork(hammer_mount_t hmp);
1510 void hammer_flusher_flush_undos(hammer_mount_t hmp, int already_flushed);
1511 
1512 int hammer_recover_stage1(hammer_mount_t hmp, hammer_volume_t rootvol);
1513 int hammer_recover_stage2(hammer_mount_t hmp, hammer_volume_t rootvol);
1514 void hammer_recover_flush_buffers(hammer_mount_t hmp,
1515 			hammer_volume_t root_volume, int final);
1516 
1517 void hammer_crc_set_blockmap(hammer_blockmap_t blockmap);
1518 void hammer_crc_set_volume(hammer_volume_ondisk_t ondisk);
1519 void hammer_crc_set_leaf(void *data, hammer_btree_leaf_elm_t leaf);
1520 
1521 int hammer_crc_test_blockmap(hammer_blockmap_t blockmap);
1522 int hammer_crc_test_volume(hammer_volume_ondisk_t ondisk);
1523 int hammer_crc_test_btree(hammer_node_ondisk_t ondisk);
1524 int hammer_crc_test_leaf(void *data, hammer_btree_leaf_elm_t leaf);
1525 void hkprintf(const char *ctl, ...) __printflike(1, 2);
1526 udev_t hammer_fsid_to_udev(uuid_t *uuid);
1527 
1528 
1529 int hammer_blocksize(int64_t file_offset);
1530 int hammer_blockoff(int64_t file_offset);
1531 int64_t hammer_blockdemarc(int64_t file_offset1, int64_t file_offset2);
1532 
1533 /*
1534  * Shortcut for _hammer_checkspace(), used all over the code.
1535  */
1536 static __inline int
1537 hammer_checkspace(hammer_mount_t hmp, int slop)
1538 {
1539 	return(_hammer_checkspace(hmp, slop, NULL));
1540 }
1541 
1542 static __inline void
1543 hammer_wait_mem_record(hammer_record_t record)
1544 {
1545 	hammer_wait_mem_record_ident(record, "hmmwai");
1546 }
1547 
1548 static __inline void
1549 hammer_lock_ex(struct hammer_lock *lock)
1550 {
1551 	hammer_lock_ex_ident(lock, "hmrlck");
1552 }
1553 
1554 static __inline void
1555 hammer_modify_volume_noundo(hammer_transaction_t trans, hammer_volume_t volume)
1556 {
1557 	hammer_modify_volume(trans, volume, NULL, 0);
1558 }
1559 
1560 static __inline void
1561 hammer_modify_buffer_noundo(hammer_transaction_t trans, hammer_buffer_t buffer)
1562 {
1563 	hammer_modify_buffer(trans, buffer, NULL, 0);
1564 }
1565 
1566 /*
1567  * Indicate that a B-Tree node is being modified.
1568  */
1569 static __inline void
1570 hammer_modify_node_noundo(hammer_transaction_t trans, hammer_node_t node)
1571 {
1572 	KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0);
1573 	hammer_modify_buffer(trans, node->buffer, NULL, 0);
1574 }
1575 
1576 static __inline void
1577 hammer_modify_node_all(hammer_transaction_t trans, struct hammer_node *node)
1578 {
1579 	KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0);
1580 	hammer_modify_buffer(trans, node->buffer,
1581 			     node->ondisk, sizeof(*node->ondisk));
1582 }
1583 
1584 static __inline void
1585 hammer_modify_node(hammer_transaction_t trans, hammer_node_t node,
1586 		   void *base, int len)
1587 {
1588 	hammer_crc_t *crcptr;
1589 
1590 	KKASSERT((char *)base >= (char *)node->ondisk &&
1591 		 (char *)base + len <=
1592 		    (char *)node->ondisk + sizeof(*node->ondisk));
1593 	KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0);
1594 
1595 	if (hammer_btree_full_undo) {
1596 		hammer_modify_node_all(trans, node);
1597 	} else {
1598 		hammer_modify_buffer(trans, node->buffer, base, len);
1599 		crcptr = &node->ondisk->crc;
1600 		hammer_modify_buffer(trans, node->buffer,
1601 				     crcptr, sizeof(hammer_crc_t));
1602 		--node->buffer->io.modify_refs;	/* only want one ref */
1603 	}
1604 }
1605 
1606 /*
1607  * Indicate that the specified modifications have been completed.
1608  *
1609  * Do not try to generate the crc here, it's very expensive to do and a
1610  * sequence of insertions or deletions can result in many calls to this
1611  * function on the same node.
1612  */
1613 static __inline void
1614 hammer_modify_node_done(hammer_node_t node)
1615 {
1616 	node->flags |= HAMMER_NODE_CRCGOOD;
1617 	if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0) {
1618 		node->flags |= HAMMER_NODE_NEEDSCRC;
1619 		node->buffer->io.gencrc = 1;
1620 		hammer_ref_node(node);
1621 	}
1622 	hammer_modify_buffer_done(node->buffer);
1623 }
1624 
1625 /*
1626  * Lookup a blockmap offset.
1627  */
1628 static __inline hammer_off_t
1629 hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t zone_offset,
1630 			int *errorp)
1631 {
1632 #if defined INVARIANTS
1633 	int zone = HAMMER_ZONE_DECODE(zone_offset);
1634 	KKASSERT(zone >= HAMMER_ZONE2_MAPPED_INDEX && zone < HAMMER_MAX_ZONES);
1635 #endif
1636 
1637 	/*
1638 	 * We can actually skip blockmap verify by default,
1639 	 * as normal blockmaps are now direct-mapped onto the freemap
1640 	 * and so represent zone-2 addresses.
1641 	 */
1642 	if (hammer_verify_zone == 0) {
1643 		*errorp = 0;
1644 		return hammer_xlate_to_zone2(zone_offset);
1645 	}
1646 
1647 	return hammer_blockmap_lookup_verify(hmp, zone_offset, errorp);
1648 }
1649 
1650 #define hammer_modify_volume_field(trans, vol, field)		\
1651 	hammer_modify_volume(trans, vol, &(vol)->ondisk->field,	\
1652 			     sizeof((vol)->ondisk->field))
1653 
1654 #define hammer_modify_node_field(trans, node, field)		\
1655 	hammer_modify_node(trans, node, &(node)->ondisk->field,	\
1656 			     sizeof((node)->ondisk->field))
1657 
1658 /*
1659  * The HAMMER_INODE_CAP_DIR_LOCAL_INO capability is set on newly
1660  * created directories for HAMMER version 2 or greater and causes
1661  * directory entries to be placed the inode localization zone in
1662  * the B-Tree instead of the misc zone.
1663  *
1664  * This greatly improves localization between directory entries and
1665  * inodes
1666  */
1667 static __inline u_int32_t
1668 hammer_dir_localization(hammer_inode_t dip)
1669 {
1670 	if (dip->ino_data.cap_flags & HAMMER_INODE_CAP_DIR_LOCAL_INO)
1671 		return(HAMMER_LOCALIZE_INODE);
1672 	else
1673 		return(HAMMER_LOCALIZE_MISC);
1674 }
1675 #endif  /* _KERNEL */
1676