xref: /dragonfly/sys/vfs/hammer2/hammer2.h (revision 5071e670)
1 /*
2  * Copyright (c) 2011-2018 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 /*
37  * HAMMER2 IN-MEMORY CACHE OF MEDIA STRUCTURES
38  *
39  * This header file contains structures used internally by the HAMMER2
40  * implementation.  See hammer2_disk.h for on-disk structures.
41  *
42  * There is an in-memory representation of all on-media data structure.
43  * Almost everything is represented by a hammer2_chain structure in-memory.
44  * Other higher-level structures typically map to chains.
45  *
46  * A great deal of data is accessed simply via its buffer cache buffer,
47  * which is mapped for the duration of the chain's lock.  Hammer2 must
48  * implement its own buffer cache layer on top of the system layer to
49  * allow for different threads to lock different sub-block-sized buffers.
50  *
51  * When modifications are made to a chain a new filesystem block must be
52  * allocated.  Multiple modifications do not typically allocate new blocks
53  * until the current block has been flushed.  Flushes do not block the
54  * front-end unless the front-end operation crosses the current inode being
55  * flushed.
56  *
57  * The in-memory representation may remain cached (for example in order to
58  * placemark clustering locks) even after the related data has been
59  * detached.
60  */
61 
62 #ifndef _VFS_HAMMER2_HAMMER2_H_
63 #define _VFS_HAMMER2_HAMMER2_H_
64 
65 #ifdef _KERNEL
66 #include <sys/param.h>
67 #endif
68 #include <sys/types.h>
69 #ifdef _KERNEL
70 #include <sys/kernel.h>
71 #endif
72 #include <sys/conf.h>
73 #ifdef _KERNEL
74 #include <sys/systm.h>
75 #endif
76 #include <sys/tree.h>
77 #include <sys/malloc.h>
78 #include <sys/mount.h>
79 #include <sys/vnode.h>
80 #include <sys/proc.h>
81 #include <sys/mountctl.h>
82 #include <sys/priv.h>
83 #include <sys/stat.h>
84 #include <sys/thread.h>
85 #include <sys/globaldata.h>
86 #include <sys/lockf.h>
87 #include <sys/buf.h>
88 #include <sys/queue.h>
89 #include <sys/limits.h>
90 #include <sys/dmsg.h>
91 #include <sys/mutex.h>
92 #ifdef _KERNEL
93 #include <sys/kern_syscall.h>
94 #endif
95 
96 #ifdef _KERNEL
97 #include <sys/signal2.h>
98 #include <sys/buf2.h>
99 #include <sys/mutex2.h>
100 #include <sys/thread2.h>
101 #endif
102 
103 #include "hammer2_xxhash.h"
104 #include "hammer2_disk.h"
105 #include "hammer2_mount.h"
106 #include "hammer2_ioctl.h"
107 
108 struct hammer2_io;
109 struct hammer2_chain;
110 struct hammer2_cluster;
111 struct hammer2_inode;
112 struct hammer2_depend;
113 struct hammer2_dev;
114 struct hammer2_pfs;
115 struct hammer2_span;
116 struct hammer2_msg;
117 struct hammer2_thread;
118 union hammer2_xop;
119 
120 /*
121  * Mutex and lock shims.  Hammer2 requires support for asynchronous and
122  * abortable locks, and both exclusive and shared spinlocks.  Normal
123  * synchronous non-abortable locks can be substituted for spinlocks.
124  */
125 typedef mtx_t				hammer2_mtx_t;
126 typedef mtx_link_t			hammer2_mtx_link_t;
127 typedef mtx_state_t			hammer2_mtx_state_t;
128 
129 typedef struct spinlock			hammer2_spin_t;
130 
131 #define hammer2_mtx_ex			mtx_lock_ex_quick
132 #define hammer2_mtx_ex_try		mtx_lock_ex_try
133 #define hammer2_mtx_sh			mtx_lock_sh_quick
134 #define hammer2_mtx_sh_again		mtx_lock_sh_again
135 #define hammer2_mtx_sh_try		mtx_lock_sh_try
136 #define hammer2_mtx_unlock		mtx_unlock
137 #define hammer2_mtx_downgrade		mtx_downgrade
138 #define hammer2_mtx_owned		mtx_owned
139 #define hammer2_mtx_init		mtx_init
140 #define hammer2_mtx_temp_release	mtx_lock_temp_release
141 #define hammer2_mtx_temp_restore	mtx_lock_temp_restore
142 #define hammer2_mtx_refs		mtx_lockrefs
143 
144 #define hammer2_spin_init		spin_init
145 #define hammer2_spin_sh			spin_lock_shared
146 #define hammer2_spin_ex			spin_lock
147 #define hammer2_spin_unsh		spin_unlock_shared
148 #define hammer2_spin_unex		spin_unlock
149 
150 TAILQ_HEAD(hammer2_xop_list, hammer2_xop_head);
151 TAILQ_HEAD(hammer2_chain_list, hammer2_chain);
152 
153 typedef struct hammer2_xop_list	hammer2_xop_list_t;
154 
155 #ifdef _KERNEL
156 /*
157  * General lock support
158  */
159 static __inline
160 int
161 hammer2_mtx_upgrade_try(hammer2_mtx_t *mtx)
162 {
163 	return mtx_upgrade_try(mtx);
164 }
165 
166 #endif
167 
168 /*
169  * The xid tracks internal transactional updates.
170  *
171  * XXX fix-me, really needs to be 64-bits
172  */
173 typedef uint32_t hammer2_xid_t;
174 
175 #define HAMMER2_XID_MIN			0x00000000U
176 #define HAMMER2_XID_MAX			0x7FFFFFFFU
177 
178 /*
179  * Cap the dynamic calculation for the maximum number of dirty
180  * chains and dirty inodes allowed.
181  */
182 #define HAMMER2_LIMIT_DIRTY_CHAINS	(1024*1024)
183 #define HAMMER2_LIMIT_DIRTY_INODES	(65536)
184 
185 /*
186  * The chain structure tracks a portion of the media topology from the
187  * root (volume) down.  Chains represent volumes, inodes, indirect blocks,
188  * data blocks, and freemap nodes and leafs.
189  *
190  * The chain structure utilizes a simple singly-homed topology and the
191  * chain's in-memory topology will move around as the chains do, due mainly
192  * to renames and indirect block creation.
193  *
194  * Block Table Updates
195  *
196  *	Block table updates for insertions and updates are delayed until the
197  *	flush.  This allows us to avoid having to modify the parent chain
198  *	all the way to the root.
199  *
200  *	Block table deletions are performed immediately (modifying the parent
201  *	in the process) because the flush code uses the chain structure to
202  *	track delayed updates and the chain will be (likely) gone or moved to
203  *	another location in the topology after a deletion.
204  *
205  *	A prior iteration of the code tried to keep the relationship intact
206  *	on deletes by doing a delete-duplicate operation on the chain, but
207  *	it added way too much complexity to the codebase.
208  *
209  * Flush Synchronization
210  *
211  *	The flush code must flush modified chains bottom-up.  Because chain
212  *	structures can shift around and are NOT topologically stable,
213  *	modified chains are independently indexed for the flush.  As the flush
214  *	runs it modifies (or further modifies) and updates the parents,
215  *	propagating the flush all the way to the volume root.
216  *
217  *	Modifying front-end operations can occur during a flush but will block
218  *	in two cases: (1) when the front-end tries to operate on the inode
219  *	currently in the midst of being flushed and (2) if the front-end
220  *	crosses an inode currently being flushed (such as during a rename).
221  *	So, for example, if you rename directory "x" to "a/b/c/d/e/f/g/x" and
222  *	the flusher is currently working on "a/b/c", the rename will block
223  *	temporarily in order to ensure that "x" exists in one place or the
224  *	other.
225  *
226  *	Meta-data statistics are updated by the flusher.  The front-end will
227  *	make estimates but meta-data must be fully synchronized only during a
228  *	flush in order to ensure that it remains correct across a crash.
229  *
230  *	Multiple flush synchronizations can theoretically be in-flight at the
231  *	same time but the implementation is not coded to handle the case and
232  *	currently serializes them.
233  *
234  * Snapshots:
235  *
236  *	Snapshots currently require the subdirectory tree being snapshotted
237  *	to be flushed.  The snapshot then creates a new super-root inode which
238  *	copies the flushed blockdata of the directory or file that was
239  *	snapshotted.
240  *
241  * RBTREE NOTES:
242  *
243  *	- Note that the radix tree runs in powers of 2 only so sub-trees
244  *	  cannot straddle edges.
245  */
246 RB_HEAD(hammer2_chain_tree, hammer2_chain);
247 TAILQ_HEAD(h2_flush_list, hammer2_chain);
248 TAILQ_HEAD(h2_core_list, hammer2_chain);
249 
250 #define CHAIN_CORE_DELETE_BMAP_ENTRIES	\
251 	(HAMMER2_PBUFSIZE / sizeof(hammer2_blockref_t) / sizeof(uint32_t))
252 
253 struct hammer2_reptrack {
254 	hammer2_spin_t	spin;
255 	struct hammer2_reptrack *next;
256 	struct hammer2_chain	*chain;
257 };
258 
259 /*
260  * Core topology for chain (embedded in chain).  Protected by a spinlock.
261  */
262 struct hammer2_chain_core {
263 	hammer2_spin_t	spin;
264 	struct hammer2_reptrack *reptrack;
265 	struct hammer2_chain_tree rbtree; /* sub-chains */
266 	int		live_zero;	/* blockref array opt */
267 	u_int		live_count;	/* live (not deleted) chains in tree */
268 	u_int		chain_count;	/* live + deleted chains under core */
269 	int		generation;	/* generation number (inserts only) */
270 };
271 
272 typedef struct hammer2_chain_core hammer2_chain_core_t;
273 
274 RB_HEAD(hammer2_io_tree, hammer2_io);
275 
276 /*
277  * DIO - Management structure wrapping system buffer cache.
278  *
279  * HAMMER2 uses an I/O abstraction that allows it to cache and manipulate
280  * fixed-sized filesystem buffers frontend by variable-sized hammer2_chain
281  * structures.
282  */
283 struct hammer2_io {
284 	RB_ENTRY(hammer2_io) rbnode;	/* indexed by device offset */
285 	struct hammer2_dev *hmp;
286 	struct buf	*bp;
287 	off_t		pbase;
288 	uint64_t	refs;
289 	int		psize;
290 	int		act;		/* activity */
291 	int		btype;		/* approximate BREF_TYPE_* */
292 	int		ticks;
293 	int		error;
294 	int		unused01;
295 	uint64_t	dedup_valid;	/* valid for dedup operation */
296 	uint64_t	dedup_alloc;	/* allocated / de-dupable */
297 };
298 
299 typedef struct hammer2_io hammer2_io_t;
300 
301 #define HAMMER2_DIO_INPROG	0x8000000000000000LLU	/* bio in progress */
302 #define HAMMER2_DIO_GOOD	0x4000000000000000LLU	/* dio->bp is stable */
303 #define HAMMER2_DIO_WAITING	0x2000000000000000LLU	/* wait on INPROG */
304 #define HAMMER2_DIO_DIRTY	0x1000000000000000LLU	/* flush last drop */
305 
306 #define HAMMER2_DIO_MASK	0x00FFFFFFFFFFFFFFLLU
307 
308 /*
309  * Primary chain structure keeps track of the topology in-memory.
310  */
311 struct hammer2_chain {
312 	hammer2_mtx_t		lock;
313 	hammer2_chain_core_t	core;
314 	RB_ENTRY(hammer2_chain) rbnode;		/* live chain(s) */
315 	hammer2_blockref_t	bref;
316 	struct hammer2_chain	*parent;
317 	struct hammer2_dev	*hmp;
318 	struct hammer2_pfs	*pmp;		/* A PFS or super-root (spmp) */
319 
320 	struct lock	diolk;			/* xop focus interlock */
321 	hammer2_io_t	*dio;			/* physical data buffer */
322 	hammer2_media_data_t *data;		/* data pointer shortcut */
323 	u_int		bytes;			/* physical data size */
324 	u_int		flags;
325 	u_int		refs;
326 	u_int		lockcnt;
327 	int		error;			/* on-lock data error state */
328 	int		cache_index;		/* heur speeds up lookup */
329 
330 	TAILQ_ENTRY(hammer2_chain) flush_node;	/* flush list */
331 	TAILQ_ENTRY(hammer2_chain) lru_node;	/* 0-refs LRU */
332 };
333 
334 typedef struct hammer2_chain hammer2_chain_t;
335 
336 int hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2);
337 RB_PROTOTYPE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
338 
339 /*
340  * Special notes on flags:
341  *
342  * INITIAL	- This flag allows a chain to be created and for storage to
343  *		  be allocated without having to immediately instantiate the
344  *		  related buffer.  The data is assumed to be all-zeros.  It
345  *		  is primarily used for indirect blocks.
346  *
347  * MODIFIED	- The chain's media data has been modified.  Prevents chain
348  *		  free on lastdrop if still in the topology.
349  *
350  * UPDATE	- Chain might not be modified but parent blocktable needs
351  *		  an update.  Prevents chain free on lastdrop if still in
352  *		  the topology.
353  *
354  * FICTITIOUS	- Faked chain as a placeholder for an error condition.  This
355  *		  chain is unsuitable for I/O.
356  *
357  * BMAPPED	- Indicates that the chain is present in the parent blockmap.
358  *
359  * BMAPUPD	- Indicates that the chain is present but needs to be updated
360  *		  in the parent blockmap.
361  */
362 #define HAMMER2_CHAIN_MODIFIED		0x00000001	/* dirty chain data */
363 #define HAMMER2_CHAIN_ALLOCATED		0x00000002	/* kmalloc'd chain */
364 #define HAMMER2_CHAIN_DESTROY		0x00000004
365 #define HAMMER2_CHAIN_DEDUPABLE		0x00000008	/* registered w/dedup */
366 #define HAMMER2_CHAIN_DELETED		0x00000010	/* deleted chain */
367 #define HAMMER2_CHAIN_INITIAL		0x00000020	/* initial create */
368 #define HAMMER2_CHAIN_UPDATE		0x00000040	/* need parent update */
369 #define HAMMER2_CHAIN_UNUSED0080	0x00000080
370 #define HAMMER2_CHAIN_TESTEDGOOD	0x00000100	/* crc tested good */
371 #define HAMMER2_CHAIN_ONFLUSH		0x00000200	/* on a flush list */
372 #define HAMMER2_CHAIN_FICTITIOUS	0x00000400	/* unsuitable for I/O */
373 #define HAMMER2_CHAIN_VOLUMESYNC	0x00000800	/* needs volume sync */
374 #define HAMMER2_CHAIN_UNUSED1000	0x00001000
375 #define HAMMER2_CHAIN_COUNTEDBREFS	0x00002000	/* block table stats */
376 #define HAMMER2_CHAIN_ONRBTREE		0x00004000	/* on parent RB tree */
377 #define HAMMER2_CHAIN_ONLRU		0x00008000	/* on LRU list */
378 #define HAMMER2_CHAIN_EMBEDDED		0x00010000	/* embedded data */
379 #define HAMMER2_CHAIN_RELEASE		0x00020000	/* don't keep around */
380 #define HAMMER2_CHAIN_BMAPPED		0x00040000	/* present in blkmap */
381 #define HAMMER2_CHAIN_BMAPUPD		0x00080000	/* +needs updating */
382 #define HAMMER2_CHAIN_IOINPROG		0x00100000	/* I/O interlock */
383 #define HAMMER2_CHAIN_IOSIGNAL		0x00200000	/* I/O interlock */
384 #define HAMMER2_CHAIN_PFSBOUNDARY	0x00400000	/* super->pfs inode */
385 #define HAMMER2_CHAIN_HINT_LEAF_COUNT	0x00800000	/* redo leaf count */
386 #define HAMMER2_CHAIN_LRUHINT		0x01000000	/* was reused */
387 
388 #define HAMMER2_CHAIN_FLUSH_MASK	(HAMMER2_CHAIN_MODIFIED |	\
389 					 HAMMER2_CHAIN_UPDATE |		\
390 					 HAMMER2_CHAIN_ONFLUSH |	\
391 					 HAMMER2_CHAIN_DESTROY)
392 
393 /*
394  * Hammer2 error codes, used by chain->error and cluster->error.  The error
395  * code is typically set on-lock unless no I/O was requested, and set on
396  * I/O otherwise.  If set for a cluster it generally means that the cluster
397  * code could not find a valid copy to present.
398  *
399  * All H2 error codes are flags and can be accumulated by ORing them
400  * together.
401  *
402  * IO		- An I/O error occurred
403  * CHECK	- I/O succeeded but did not match the check code
404  * INCOMPLETE	- A cluster is not complete enough to use, or
405  *		  a chain cannot be loaded because its parent has an error.
406  *
407  * NOTE: API allows callers to check zero/non-zero to determine if an error
408  *	 condition exists.
409  *
410  * NOTE: Chain's data field is usually NULL on an IO error but not necessarily
411  *	 NULL on other errors.  Check chain->error, not chain->data.
412  */
413 #define HAMMER2_ERROR_NONE		0	/* no error (must be 0) */
414 #define HAMMER2_ERROR_EIO		0x00000001	/* device I/O error */
415 #define HAMMER2_ERROR_CHECK		0x00000002	/* check code error */
416 #define HAMMER2_ERROR_INCOMPLETE	0x00000004	/* incomplete cluster */
417 #define HAMMER2_ERROR_DEPTH		0x00000008	/* tmp depth limit */
418 #define HAMMER2_ERROR_BADBREF		0x00000010	/* illegal bref */
419 #define HAMMER2_ERROR_ENOSPC		0x00000020	/* allocation failure */
420 #define HAMMER2_ERROR_ENOENT		0x00000040	/* entry not found */
421 #define HAMMER2_ERROR_ENOTEMPTY		0x00000080	/* dir not empty */
422 #define HAMMER2_ERROR_EAGAIN		0x00000100	/* retry */
423 #define HAMMER2_ERROR_ENOTDIR		0x00000200	/* not directory */
424 #define HAMMER2_ERROR_EISDIR		0x00000400	/* is directory */
425 #define HAMMER2_ERROR_EINPROGRESS	0x00000800	/* already running */
426 #define HAMMER2_ERROR_ABORTED		0x00001000	/* aborted operation */
427 #define HAMMER2_ERROR_EOF		0x00002000	/* end of scan */
428 #define HAMMER2_ERROR_EINVAL		0x00004000	/* catch-all */
429 #define HAMMER2_ERROR_EEXIST		0x00008000	/* entry exists */
430 #define HAMMER2_ERROR_EDEADLK		0x00010000
431 #define HAMMER2_ERROR_ESRCH		0x00020000
432 #define HAMMER2_ERROR_ETIMEDOUT		0x00040000
433 
434 /*
435  * Flags passed to hammer2_chain_lookup() and hammer2_chain_next()
436  *
437  * NOTES:
438  *	NODATA	    - Asks that the chain->data not be resolved in order
439  *		      to avoid I/O.
440  *
441  *	NODIRECT    - Prevents a lookup of offset 0 in an inode from returning
442  *		      the inode itself if the inode is in DIRECTDATA mode
443  *		      (i.e. file is <= 512 bytes).  Used by the synchronization
444  *		      code to prevent confusion.
445  *
446  *	SHARED	    - The input chain is expected to be locked shared,
447  *		      and the output chain is locked shared.
448  *
449  *	MATCHIND    - Allows an indirect block / freemap node to be returned
450  *		      when the passed key range matches the radix.  Remember
451  *		      that key_end is inclusive (e.g. {0x000,0xFFF},
452  *		      not {0x000,0x1000}).
453  *
454  *		      (Cannot be used for remote or cluster ops).
455  *
456  *	ALLNODES    - Allows NULL focus.
457  *
458  *	ALWAYS	    - Always resolve the data.  If ALWAYS and NODATA are both
459  *		      missing, bulk file data is not resolved but inodes and
460  *		      other meta-data will.
461  */
462 #define HAMMER2_LOOKUP_UNUSED0001	0x00000001
463 #define HAMMER2_LOOKUP_NODATA		0x00000002	/* data left NULL */
464 #define HAMMER2_LOOKUP_NODIRECT		0x00000004	/* no offset=0 DD */
465 #define HAMMER2_LOOKUP_SHARED		0x00000100
466 #define HAMMER2_LOOKUP_MATCHIND		0x00000200	/* return all chains */
467 #define HAMMER2_LOOKUP_ALLNODES		0x00000400	/* allow NULL focus */
468 #define HAMMER2_LOOKUP_ALWAYS		0x00000800	/* resolve data */
469 #define HAMMER2_LOOKUP_UNUSED1000	0x00001000
470 
471 /*
472  * Flags passed to hammer2_chain_modify() and hammer2_chain_resize()
473  *
474  * NOTE: OPTDATA allows us to avoid instantiating buffers for INDIRECT
475  *	 blocks in the INITIAL-create state.
476  */
477 #define HAMMER2_MODIFY_OPTDATA		0x00000002	/* data can be NULL */
478 #define HAMMER2_MODIFY_NO_MODIFY_TID	0x00000004
479 #define HAMMER2_MODIFY_UNUSED0008	0x00000008
480 
481 /*
482  * Flags passed to hammer2_chain_lock()
483  *
484  * NOTE: RDONLY is set to optimize cluster operations when *no* modifications
485  *	 will be made to either the cluster being locked or any underlying
486  *	 cluster.  It allows the cluster to lock and access data for a subset
487  *	 of available nodes instead of all available nodes.
488  *
489  * NOTE: NONBLOCK is only used for hammer2_chain_repparent() and getparent(),
490  *	 other functions (e.g. hammer2_chain_lookup(), etc) can't handle its
491  *	 operation.
492  */
493 #define HAMMER2_RESOLVE_NEVER		1
494 #define HAMMER2_RESOLVE_MAYBE		2
495 #define HAMMER2_RESOLVE_ALWAYS		3
496 #define HAMMER2_RESOLVE_MASK		0x0F
497 
498 #define HAMMER2_RESOLVE_SHARED		0x10	/* request shared lock */
499 #define HAMMER2_RESOLVE_LOCKAGAIN	0x20	/* another shared lock */
500 #define HAMMER2_RESOLVE_UNUSED40	0x40
501 #define HAMMER2_RESOLVE_NONBLOCK	0x80	/* non-blocking */
502 
503 /*
504  * Flags passed to hammer2_chain_delete()
505  */
506 #define HAMMER2_DELETE_PERMANENT	0x0001
507 
508 /*
509  * Flags passed to hammer2_chain_insert() or hammer2_chain_rename()
510  * or hammer2_chain_create().
511  */
512 #define HAMMER2_INSERT_PFSROOT		0x0004
513 #define HAMMER2_INSERT_SAMEPARENT	0x0008
514 
515 /*
516  * Flags passed to hammer2_chain_delete_duplicate()
517  */
518 #define HAMMER2_DELDUP_RECORE		0x0001
519 
520 /*
521  * Cluster different types of storage together for allocations
522  */
523 #define HAMMER2_FREECACHE_INODE		0
524 #define HAMMER2_FREECACHE_INDIR		1
525 #define HAMMER2_FREECACHE_DATA		2
526 #define HAMMER2_FREECACHE_UNUSED3	3
527 #define HAMMER2_FREECACHE_TYPES		4
528 
529 /*
530  * hammer2_freemap_alloc() block preference
531  */
532 #define HAMMER2_OFF_NOPREF		((hammer2_off_t)-1)
533 
534 /*
535  * BMAP read-ahead maximum parameters
536  */
537 #define HAMMER2_BMAP_COUNT		16	/* max bmap read-ahead */
538 #define HAMMER2_BMAP_BYTES		(HAMMER2_PBUFSIZE * HAMMER2_BMAP_COUNT)
539 
540 /*
541  * hammer2_freemap_adjust()
542  */
543 #define HAMMER2_FREEMAP_DORECOVER	1
544 #define HAMMER2_FREEMAP_DOMAYFREE	2
545 #define HAMMER2_FREEMAP_DOREALFREE	3
546 
547 /*
548  * HAMMER2 cluster - A set of chains representing the same entity.
549  *
550  * hammer2_cluster typically represents a temporary set of representitive
551  * chains.  The one exception is that a hammer2_cluster is embedded in
552  * hammer2_inode.  This embedded cluster is ONLY used to track the
553  * representitive chains and cannot be directly locked.
554  *
555  * A cluster is usually temporary (and thus per-thread) for locking purposes,
556  * allowing us to embed the asynchronous storage required for cluster
557  * operations in the cluster itself and adjust the state and status without
558  * having to worry too much about SMP issues.
559  *
560  * The exception is the cluster embedded in the hammer2_inode structure.
561  * This is used to cache the cluster state on an inode-by-inode basis.
562  * Individual hammer2_chain structures not incorporated into clusters might
563  * also stick around to cache miscellanious elements.
564  *
565  * Because the cluster is a 'working copy' and is usually subject to cluster
566  * quorum rules, it is quite possible for us to end up with an insufficient
567  * number of live chains to execute an operation.  If an insufficient number
568  * of chains remain in a working copy, the operation may have to be
569  * downgraded, retried, stall until the requisit number of chains are
570  * available, or possibly even error out depending on the mount type.
571  *
572  * A cluster's focus is set when it is locked.  The focus can only be set
573  * to a chain still part of the synchronized set.
574  */
575 #define HAMMER2_XOPFIFO		16
576 #define HAMMER2_XOPFIFO_MASK	(HAMMER2_XOPFIFO - 1)
577 #define HAMMER2_XOPGROUPS	32
578 #define HAMMER2_XOPGROUPS_MASK	(HAMMER2_XOPGROUPS - 1)
579 
580 #define HAMMER2_MAXCLUSTER	8
581 #define HAMMER2_XOPMASK_CLUSTER	(uint64_t)((1LLU << HAMMER2_MAXCLUSTER) - 1)
582 #define HAMMER2_XOPMASK_VOP	(uint64_t)0x0000000080000000LLU
583 #define HAMMER2_XOPMASK_FIFOW	(uint64_t)0x0000000040000000LLU
584 #define HAMMER2_XOPMASK_WAIT	(uint64_t)0x0000000020000000LLU
585 #define HAMMER2_XOPMASK_FEED	(uint64_t)0x0000000100000000LLU
586 
587 #define HAMMER2_XOPMASK_ALLDONE	(HAMMER2_XOPMASK_VOP | HAMMER2_XOPMASK_CLUSTER)
588 
589 #define HAMMER2_SPECTHREADS	1	/* sync */
590 
591 struct hammer2_cluster_item {
592 	hammer2_chain_t		*chain;
593 	int			error;
594 	uint32_t		flags;
595 };
596 
597 typedef struct hammer2_cluster_item hammer2_cluster_item_t;
598 
599 /*
600  * INVALID	- Invalid for focus, i.e. not part of synchronized set.
601  *		  Once set, this bit is sticky across operations.
602  *
603  * FEMOD	- Indicates that front-end modifying operations can
604  *		  mess with this entry and MODSYNC will copy also
605  *		  effect it.
606  */
607 #define HAMMER2_CITEM_INVALID	0x00000001
608 #define HAMMER2_CITEM_FEMOD	0x00000002
609 #define HAMMER2_CITEM_NULL	0x00000004
610 
611 struct hammer2_cluster {
612 	int			refs;		/* track for deallocation */
613 	int			ddflag;
614 	struct hammer2_pfs	*pmp;
615 	uint32_t		flags;
616 	int			nchains;
617 	int			error;		/* error code valid on lock */
618 	int			focus_index;
619 	hammer2_chain_t		*focus;		/* current focus (or mod) */
620 	hammer2_cluster_item_t	array[HAMMER2_MAXCLUSTER];
621 };
622 
623 typedef struct hammer2_cluster	hammer2_cluster_t;
624 
625 /*
626  * WRHARD	- Hard mounts can write fully synchronized
627  * RDHARD	- Hard mounts can read fully synchronized
628  * UNHARD	- Unsynchronized masters present
629  * NOHARD	- No masters visible
630  * WRSOFT	- Soft mounts can write to at least the SOFT_MASTER
631  * RDSOFT	- Soft mounts can read from at least a SOFT_SLAVE
632  * UNSOFT	- Unsynchronized slaves present
633  * NOSOFT	- No slaves visible
634  * RDSLAVE	- slaves are accessible (possibly unsynchronized or remote).
635  * MSYNCED	- All masters are fully synchronized
636  * SSYNCED	- All known local slaves are fully synchronized to masters
637  *
638  * All available masters are always incorporated.  All PFSs belonging to a
639  * cluster (master, slave, copy, whatever) always try to synchronize the
640  * total number of known masters in the PFSs root inode.
641  *
642  * A cluster might have access to many slaves, copies, or caches, but we
643  * have a limited number of cluster slots.  Any such elements which are
644  * directly mounted from block device(s) will always be incorporated.   Note
645  * that SSYNCED only applies to such elements which are directly mounted,
646  * not to any remote slaves, copies, or caches that could be available.  These
647  * bits are used to monitor and drive our synchronization threads.
648  *
649  * When asking the question 'is any data accessible at all', then a simple
650  * test against (RDHARD|RDSOFT|RDSLAVE) gives you the answer.  If any of
651  * these bits are set the object can be read with certain caveats:
652  * RDHARD - no caveats.  RDSOFT - authoritative but might not be synchronized.
653  * and RDSLAVE - not authoritative, has some data but it could be old or
654  * incomplete.
655  *
656  * When both soft and hard mounts are available, data will be read and written
657  * via the soft mount only.  But all might be in the cluster because
658  * background synchronization threads still need to do their work.
659  */
660 #define HAMMER2_CLUSTER_INODE	0x00000001	/* embedded in inode struct */
661 #define HAMMER2_CLUSTER_UNUSED2	0x00000002
662 #define HAMMER2_CLUSTER_LOCKED	0x00000004	/* cluster lks not recursive */
663 #define HAMMER2_CLUSTER_WRHARD	0x00000100	/* hard-mount can write */
664 #define HAMMER2_CLUSTER_RDHARD	0x00000200	/* hard-mount can read */
665 #define HAMMER2_CLUSTER_UNHARD	0x00000400	/* unsynchronized masters */
666 #define HAMMER2_CLUSTER_NOHARD	0x00000800	/* no masters visible */
667 #define HAMMER2_CLUSTER_WRSOFT	0x00001000	/* soft-mount can write */
668 #define HAMMER2_CLUSTER_RDSOFT	0x00002000	/* soft-mount can read */
669 #define HAMMER2_CLUSTER_UNSOFT	0x00004000	/* unsynchronized slaves */
670 #define HAMMER2_CLUSTER_NOSOFT	0x00008000	/* no slaves visible */
671 #define HAMMER2_CLUSTER_MSYNCED	0x00010000	/* all masters synchronized */
672 #define HAMMER2_CLUSTER_SSYNCED	0x00020000	/* known slaves synchronized */
673 
674 #define HAMMER2_CLUSTER_ANYDATA	( HAMMER2_CLUSTER_RDHARD |	\
675 				  HAMMER2_CLUSTER_RDSOFT |	\
676 				  HAMMER2_CLUSTER_RDSLAVE)
677 
678 #define HAMMER2_CLUSTER_RDOK	( HAMMER2_CLUSTER_RDHARD |	\
679 				  HAMMER2_CLUSTER_RDSOFT)
680 
681 #define HAMMER2_CLUSTER_WROK	( HAMMER2_CLUSTER_WRHARD |	\
682 				  HAMMER2_CLUSTER_WRSOFT)
683 
684 #define HAMMER2_CLUSTER_ZFLAGS	( HAMMER2_CLUSTER_WRHARD |	\
685 				  HAMMER2_CLUSTER_RDHARD |	\
686 				  HAMMER2_CLUSTER_WRSOFT |	\
687 				  HAMMER2_CLUSTER_RDSOFT |	\
688 				  HAMMER2_CLUSTER_MSYNCED |	\
689 				  HAMMER2_CLUSTER_SSYNCED)
690 
691 /*
692  * Helper functions (cluster must be locked for flags to be valid).
693  */
694 static __inline
695 int
696 hammer2_cluster_rdok(hammer2_cluster_t *cluster)
697 {
698 	return (cluster->flags & HAMMER2_CLUSTER_RDOK);
699 }
700 
701 static __inline
702 int
703 hammer2_cluster_wrok(hammer2_cluster_t *cluster)
704 {
705 	return (cluster->flags & HAMMER2_CLUSTER_WROK);
706 }
707 
708 RB_HEAD(hammer2_inode_tree, hammer2_inode);	/* ip->rbnode */
709 TAILQ_HEAD(inoq_head, hammer2_inode);		/* ip->entry */
710 TAILQ_HEAD(depq_head, hammer2_depend);		/* depend->entry */
711 
712 struct hammer2_depend {
713 	TAILQ_ENTRY(hammer2_depend) entry;
714 	struct inoq_head	sideq;
715 	long			count;
716 	int			pass2;
717 	int			unused01;
718 };
719 
720 typedef struct hammer2_depend hammer2_depend_t;
721 
722 /*
723  * A hammer2 inode.
724  *
725  * NOTE: The inode-embedded cluster is never used directly for I/O (since
726  *	 it may be shared).  Instead it will be replicated-in and synchronized
727  *	 back out if changed.
728  */
729 struct hammer2_inode {
730 	RB_ENTRY(hammer2_inode) rbnode;		/* inumber lookup (HL) */
731 	TAILQ_ENTRY(hammer2_inode) entry;	/* SYNCQ/SIDEQ */
732 	hammer2_depend_t	*depend;	/* non-NULL if SIDEQ */
733 	hammer2_depend_t	depend_static;	/* (in-place allocation) */
734 	hammer2_mtx_t		lock;		/* inode lock */
735 	hammer2_mtx_t		truncate_lock;	/* prevent truncates */
736 	struct hammer2_pfs	*pmp;		/* PFS mount */
737 	struct vnode		*vp;
738 	struct spinlock		cluster_spin;	/* update cluster */
739 	hammer2_cluster_t	cluster;
740 	struct lockf		advlock;
741 	u_int			flags;
742 	u_int			refs;		/* +vpref, +flushref */
743 	uint8_t			comp_heuristic;
744 	hammer2_inode_meta_t	meta;		/* copy of meta-data */
745 	hammer2_off_t		osize;
746 };
747 
748 typedef struct hammer2_inode hammer2_inode_t;
749 
750 /*
751  * MODIFIED	- Inode is in a modified state, ip->meta may have changes.
752  * RESIZED	- Inode truncated (any) or inode extended beyond
753  *		  EMBEDDED_BYTES.
754  *
755  * SYNCQ	- Inode is included in the current filesystem sync.  The
756  *		  DELETING and CREATING flags will be acted upon.
757  *
758  * SIDEQ	- Inode has likely been disconnected from the vnode topology
759  *		  and so is not visible to the vnode-based filesystem syncer
760  *		  code, but is dirty and must be included in the next
761  *		  filesystem sync.  These inodes are moved to the SYNCQ at
762  *		  the time the sync occurs.
763  *
764  *		  Inodes are not placed on this queue simply because they have
765  *		  become dirty, if a vnode is attached.
766  *
767  * DELETING	- Inode is flagged for deletion during the next filesystem
768  *		  sync.  That is, the inode's chain is currently connected
769  *		  and must be deleting during the current or next fs sync.
770  *
771  * CREATING	- Inode is flagged for creation during the next filesystem
772  *		  sync.  That is, the inode's chain topology exists (so
773  *		  kernel buffer flushes can occur), but is currently
774  *		  disconnected and must be inserted during the current or
775  *		  next fs sync.  If the DELETING flag is also set, the
776  *		  topology can be thrown away instead.
777  *
778  * If an inode that is already part of the current filesystem sync is
779  * modified by the frontend, including by buffer flushes, the inode lock
780  * code detects the SYNCQ flag and moves the inode to the head of the
781  * flush-in-progress, then blocks until the flush has gotten past it.
782  */
783 #define HAMMER2_INODE_MODIFIED		0x0001
784 #define HAMMER2_INODE_SROOT		0x0002	/* kmalloc special case */
785 #define HAMMER2_INODE_RENAME_INPROG	0x0004
786 #define HAMMER2_INODE_ONRBTREE		0x0008
787 #define HAMMER2_INODE_RESIZED		0x0010	/* requires inode_fsync */
788 #define HAMMER2_INODE_UNUSED0020	0x0020
789 #define HAMMER2_INODE_ISUNLINKED	0x0040
790 #define HAMMER2_INODE_METAGOOD		0x0080	/* inode meta-data good */
791 #define HAMMER2_INODE_SIDEQ		0x0100	/* on side processing queue */
792 #define HAMMER2_INODE_NOSIDEQ		0x0200	/* disable sideq operation */
793 #define HAMMER2_INODE_DIRTYDATA		0x0400	/* interlocks inode flush */
794 #define HAMMER2_INODE_SYNCQ		0x0800	/* sync interlock, sequenced */
795 #define HAMMER2_INODE_DELETING		0x1000	/* sync interlock, chain topo */
796 #define HAMMER2_INODE_CREATING		0x2000	/* sync interlock, chain topo */
797 #define HAMMER2_INODE_SYNCQ_WAKEUP	0x4000	/* sync interlock wakeup */
798 #define HAMMER2_INODE_SYNCQ_PASS2	0x8000	/* force retry delay */
799 
800 #define HAMMER2_INODE_DIRTY		(HAMMER2_INODE_MODIFIED |	\
801 					 HAMMER2_INODE_DIRTYDATA |	\
802 					 HAMMER2_INODE_DELETING |	\
803 					 HAMMER2_INODE_CREATING)
804 
805 int hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2);
806 RB_PROTOTYPE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
807 		hammer2_tid_t);
808 
809 /*
810  * Transaction management sub-structure under hammer2_pfs
811  */
812 struct hammer2_trans {
813 	uint32_t		flags;
814 	uint32_t		sync_wait;
815 };
816 
817 typedef struct hammer2_trans hammer2_trans_t;
818 
819 #define HAMMER2_TRANS_ISFLUSH		0x80000000	/* flush code */
820 #define HAMMER2_TRANS_BUFCACHE		0x40000000	/* bio strategy */
821 #define HAMMER2_TRANS_SIDEQ		0x20000000	/* run sideq */
822 #define HAMMER2_TRANS_UNUSED10		0x10000000
823 #define HAMMER2_TRANS_WAITING		0x08000000	/* someone waiting */
824 #define HAMMER2_TRANS_RESCAN		0x04000000	/* rescan sideq */
825 #define HAMMER2_TRANS_MASK		0x00FFFFFF	/* count mask */
826 
827 #define HAMMER2_FREEMAP_HEUR_NRADIX	4	/* pwr 2 PBUFRADIX-MINIORADIX */
828 #define HAMMER2_FREEMAP_HEUR_TYPES	8
829 #define HAMMER2_FREEMAP_HEUR_SIZE	(HAMMER2_FREEMAP_HEUR_NRADIX * \
830 					 HAMMER2_FREEMAP_HEUR_TYPES)
831 
832 #define HAMMER2_DEDUP_HEUR_SIZE		(65536 * 4)
833 #define HAMMER2_DEDUP_HEUR_MASK		(HAMMER2_DEDUP_HEUR_SIZE - 1)
834 
835 #define HAMMER2_FLUSH_TOP		0x0001
836 #define HAMMER2_FLUSH_ALL		0x0002
837 #define HAMMER2_FLUSH_INODE_STOP	0x0004	/* stop at sub-inode */
838 #define HAMMER2_FLUSH_FSSYNC		0x0008	/* part of filesystem sync */
839 
840 
841 /*
842  * Hammer2 support thread element.
843  *
844  * Potentially many support threads can hang off of hammer2, primarily
845  * off the hammer2_pfs structure.  Typically:
846  *
847  * td x Nodes		 	A synchronization thread for each node.
848  * td x Nodes x workers		Worker threads for frontend operations.
849  * td x 1			Bioq thread for logical buffer writes.
850  *
851  * In addition, the synchronization thread(s) associated with the
852  * super-root PFS (spmp) for a node is responsible for automatic bulkfree
853  * and dedup scans.
854  */
855 struct hammer2_thread {
856 	struct hammer2_pfs *pmp;
857 	struct hammer2_dev *hmp;
858 	hammer2_xop_list_t xopq;
859 	thread_t	td;
860 	uint32_t	flags;
861 	int		depth;
862 	int		clindex;	/* cluster element index */
863 	int		repidx;
864 	char		*scratch;	/* MAXPHYS */
865 };
866 
867 typedef struct hammer2_thread hammer2_thread_t;
868 
869 #define HAMMER2_THREAD_UNMOUNTING	0x0001	/* unmount request */
870 #define HAMMER2_THREAD_DEV		0x0002	/* related to dev, not pfs */
871 #define HAMMER2_THREAD_WAITING		0x0004	/* thread in idle tsleep */
872 #define HAMMER2_THREAD_REMASTER		0x0008	/* remaster request */
873 #define HAMMER2_THREAD_STOP		0x0010	/* exit request */
874 #define HAMMER2_THREAD_FREEZE		0x0020	/* force idle */
875 #define HAMMER2_THREAD_FROZEN		0x0040	/* thread is frozen */
876 #define HAMMER2_THREAD_XOPQ		0x0080	/* work pending */
877 #define HAMMER2_THREAD_STOPPED		0x0100	/* thread has stopped */
878 #define HAMMER2_THREAD_UNFREEZE		0x0200
879 
880 #define HAMMER2_THREAD_WAKEUP_MASK	(HAMMER2_THREAD_UNMOUNTING |	\
881 					 HAMMER2_THREAD_REMASTER |	\
882 					 HAMMER2_THREAD_STOP |		\
883 					 HAMMER2_THREAD_FREEZE |	\
884 					 HAMMER2_THREAD_XOPQ)
885 
886 /*
887  * Support structure for dedup heuristic.
888  */
889 struct hammer2_dedup {
890 	hammer2_off_t	data_off;
891 	uint64_t	data_crc;
892 	uint32_t	ticks;
893 	uint32_t	unused03;
894 };
895 
896 typedef struct hammer2_dedup hammer2_dedup_t;
897 
898 /*
899  * hammer2_xop - container for VOP/XOP operation (allocated, not on stack).
900  *
901  * This structure is used to distribute a VOP operation across multiple
902  * nodes.  It provides a rendezvous for concurrent node execution and
903  * can be detached from the frontend operation to allow the frontend to
904  * return early.
905  *
906  * This structure also sequences operations on up to three inodes.
907  */
908 typedef void (*hammer2_xop_func_t)(union hammer2_xop *xop, void *scratch,
909 				   int clindex);
910 
911 struct hammer2_xop_desc {
912 	hammer2_xop_func_t	storage_func;	/* local storage function */
913 	hammer2_xop_func_t	dmsg_dispatch;	/* dmsg dispatch function */
914 	hammer2_xop_func_t	dmsg_process;	/* dmsg processing function */
915 	const char		*id;
916 };
917 
918 typedef struct hammer2_xop_desc hammer2_xop_desc_t;
919 
920 struct hammer2_xop_fifo {
921 	TAILQ_ENTRY(hammer2_xop_head) entry;
922 	hammer2_chain_t		*array[HAMMER2_XOPFIFO];
923 	int			errors[HAMMER2_XOPFIFO];
924 	int			ri;
925 	int			wi;
926 	int			flags;
927 	hammer2_thread_t	*thr;
928 };
929 
930 typedef struct hammer2_xop_fifo hammer2_xop_fifo_t;
931 
932 #define HAMMER2_XOP_FIFO_RUN	0x0001
933 #define HAMMER2_XOP_FIFO_STALL	0x0002
934 
935 struct hammer2_xop_head {
936 	hammer2_xop_desc_t	*desc;
937 	hammer2_tid_t		mtid;
938 	struct hammer2_inode	*ip1;
939 	struct hammer2_inode	*ip2;
940 	struct hammer2_inode	*ip3;
941 	uint64_t		run_mask;
942 	uint64_t		chk_mask;
943 	int			flags;
944 	int			state;
945 	int			error;
946 	hammer2_key_t		collect_key;
947 	char			*name1;
948 	size_t			name1_len;
949 	char			*name2;
950 	size_t			name2_len;
951 	hammer2_xop_fifo_t	collect[HAMMER2_MAXCLUSTER];
952 	hammer2_cluster_t	cluster;	/* help collections */
953 	hammer2_io_t		*focus_dio;
954 };
955 
956 typedef struct hammer2_xop_head hammer2_xop_head_t;
957 
958 struct hammer2_xop_ipcluster {
959 	hammer2_xop_head_t	head;
960 };
961 
962 struct hammer2_xop_strategy {
963 	hammer2_xop_head_t	head;
964 	hammer2_key_t		lbase;
965 	int			finished;
966 	hammer2_mtx_t		lock;
967 	struct bio		*bio;
968 };
969 
970 struct hammer2_xop_readdir {
971 	hammer2_xop_head_t	head;
972 	hammer2_key_t		lkey;
973 };
974 
975 struct hammer2_xop_nresolve {
976 	hammer2_xop_head_t	head;
977 	hammer2_key_t		lhc;	/* if name is NULL used lhc */
978 };
979 
980 struct hammer2_xop_unlink {
981 	hammer2_xop_head_t	head;
982 	int			isdir;
983 	int			dopermanent;
984 };
985 
986 #define H2DOPERM_PERMANENT	0x01
987 #define H2DOPERM_FORCE		0x02
988 #define H2DOPERM_IGNINO		0x04
989 
990 struct hammer2_xop_nrename {
991 	hammer2_xop_head_t	head;
992 	hammer2_tid_t		lhc;
993 	int			ip_key;
994 };
995 
996 struct hammer2_xop_scanlhc {
997 	hammer2_xop_head_t	head;
998 	hammer2_key_t		lhc;
999 };
1000 
1001 struct hammer2_xop_scanall {
1002 	hammer2_xop_head_t	head;
1003 	hammer2_key_t		key_beg;	/* inclusive */
1004 	hammer2_key_t		key_end;	/* inclusive */
1005 	int			resolve_flags;
1006 	int			lookup_flags;
1007 };
1008 
1009 struct hammer2_xop_lookup {
1010 	hammer2_xop_head_t	head;
1011 	hammer2_key_t		lhc;
1012 };
1013 
1014 struct hammer2_xop_mkdirent {
1015 	hammer2_xop_head_t	head;
1016 	hammer2_dirent_head_t	dirent;
1017 	hammer2_key_t		lhc;
1018 };
1019 
1020 struct hammer2_xop_create {
1021 	hammer2_xop_head_t	head;
1022 	hammer2_inode_meta_t	meta;		/* initial metadata */
1023 	hammer2_key_t		lhc;
1024 	int			flags;
1025 };
1026 
1027 struct hammer2_xop_destroy {
1028 	hammer2_xop_head_t	head;
1029 };
1030 
1031 struct hammer2_xop_fsync {
1032 	hammer2_xop_head_t	head;
1033 	hammer2_inode_meta_t	meta;
1034 	hammer2_off_t		osize;
1035 	u_int			ipflags;
1036 	int			clear_directdata;
1037 };
1038 
1039 struct hammer2_xop_unlinkall {
1040 	hammer2_xop_head_t	head;
1041 	hammer2_key_t		key_beg;
1042 	hammer2_key_t		key_end;
1043 };
1044 
1045 struct hammer2_xop_connect {
1046 	hammer2_xop_head_t	head;
1047 	hammer2_key_t		lhc;
1048 };
1049 
1050 struct hammer2_xop_flush {
1051 	hammer2_xop_head_t	head;
1052 };
1053 
1054 typedef struct hammer2_xop_readdir hammer2_xop_readdir_t;
1055 typedef struct hammer2_xop_nresolve hammer2_xop_nresolve_t;
1056 typedef struct hammer2_xop_unlink hammer2_xop_unlink_t;
1057 typedef struct hammer2_xop_nrename hammer2_xop_nrename_t;
1058 typedef struct hammer2_xop_ipcluster hammer2_xop_ipcluster_t;
1059 typedef struct hammer2_xop_strategy hammer2_xop_strategy_t;
1060 typedef struct hammer2_xop_mkdirent hammer2_xop_mkdirent_t;
1061 typedef struct hammer2_xop_create hammer2_xop_create_t;
1062 typedef struct hammer2_xop_destroy hammer2_xop_destroy_t;
1063 typedef struct hammer2_xop_fsync hammer2_xop_fsync_t;
1064 typedef struct hammer2_xop_unlinkall hammer2_xop_unlinkall_t;
1065 typedef struct hammer2_xop_scanlhc hammer2_xop_scanlhc_t;
1066 typedef struct hammer2_xop_scanall hammer2_xop_scanall_t;
1067 typedef struct hammer2_xop_lookup hammer2_xop_lookup_t;
1068 typedef struct hammer2_xop_connect hammer2_xop_connect_t;
1069 typedef struct hammer2_xop_flush hammer2_xop_flush_t;
1070 
1071 union hammer2_xop {
1072 	hammer2_xop_head_t	head;
1073 	hammer2_xop_ipcluster_t	xop_ipcluster;
1074 	hammer2_xop_readdir_t	xop_readdir;
1075 	hammer2_xop_nresolve_t	xop_nresolve;
1076 	hammer2_xop_unlink_t	xop_unlink;
1077 	hammer2_xop_nrename_t	xop_nrename;
1078 	hammer2_xop_strategy_t	xop_strategy;
1079 	hammer2_xop_mkdirent_t	xop_mkdirent;
1080 	hammer2_xop_create_t	xop_create;
1081 	hammer2_xop_destroy_t	xop_destroy;
1082 	hammer2_xop_fsync_t	xop_fsync;
1083 	hammer2_xop_unlinkall_t	xop_unlinkall;
1084 	hammer2_xop_scanlhc_t	xop_scanlhc;
1085 	hammer2_xop_scanall_t	xop_scanall;
1086 	hammer2_xop_lookup_t	xop_lookup;
1087 	hammer2_xop_flush_t	xop_flush;
1088 	hammer2_xop_connect_t	xop_connect;
1089 };
1090 
1091 typedef union hammer2_xop hammer2_xop_t;
1092 
1093 /*
1094  * hammer2_xop_group - Manage XOP support threads.
1095  */
1096 struct hammer2_xop_group {
1097 	hammer2_thread_t	thrs[HAMMER2_MAXCLUSTER];
1098 };
1099 
1100 typedef struct hammer2_xop_group hammer2_xop_group_t;
1101 
1102 /*
1103  * flags to hammer2_xop_collect()
1104  */
1105 #define HAMMER2_XOP_COLLECT_NOWAIT	0x00000001
1106 #define HAMMER2_XOP_COLLECT_WAITALL	0x00000002
1107 
1108 /*
1109  * flags to hammer2_xop_alloc()
1110  *
1111  * MODIFYING	- This is a modifying transaction, allocate a mtid.
1112  * RECURSE	- Recurse top-level inode (for root flushes)
1113  */
1114 #define HAMMER2_XOP_MODIFYING		0x00000001
1115 #define HAMMER2_XOP_STRATEGY		0x00000002
1116 #define HAMMER2_XOP_INODE_STOP		0x00000004
1117 #define HAMMER2_XOP_VOLHDR		0x00000008
1118 #define HAMMER2_XOP_FSSYNC		0x00000010
1119 #define HAMMER2_XOP_IROOT		0x00000020
1120 
1121 /*
1122  * Global (per partition) management structure, represents a hard block
1123  * device.  Typically referenced by hammer2_chain structures when applicable.
1124  * Typically not used for network-managed elements.
1125  *
1126  * Note that a single hammer2_dev can be indirectly tied to multiple system
1127  * mount points.  There is no direct relationship.  System mounts are
1128  * per-cluster-id, not per-block-device, and a single hard mount might contain
1129  * many PFSs and those PFSs might combine together in various ways to form
1130  * the set of available clusters.
1131  */
1132 struct hammer2_dev {
1133 	struct vnode	*devvp;		/* device vnode */
1134 	int		ronly;		/* read-only mount */
1135 	int		mount_count;	/* number of actively mounted PFSs */
1136 	TAILQ_ENTRY(hammer2_dev) mntentry; /* hammer2_mntlist */
1137 
1138 	struct malloc_type *mchain;
1139 	int		nipstacks;
1140 	int		maxipstacks;
1141 	kdmsg_iocom_t	iocom;		/* volume-level dmsg interface */
1142 	struct spinlock	io_spin;	/* iotree, iolruq access */
1143 	struct hammer2_io_tree iotree;
1144 	int		iofree_count;
1145 	int		freemap_relaxed;
1146 	hammer2_chain_t vchain;		/* anchor chain (topology) */
1147 	hammer2_chain_t fchain;		/* anchor chain (freemap) */
1148 	struct spinlock	list_spin;
1149 	struct hammer2_pfs *spmp;	/* super-root pmp for transactions */
1150 	struct lock	vollk;		/* lockmgr lock */
1151 	struct lock	bulklk;		/* bulkfree operation lock */
1152 	struct lock	bflock;		/* bulk-free manual function lock */
1153 	hammer2_off_t	heur_freemap[HAMMER2_FREEMAP_HEUR_SIZE];
1154 	hammer2_dedup_t heur_dedup[HAMMER2_DEDUP_HEUR_SIZE];
1155 	int		volhdrno;	/* last volhdrno written */
1156 	uint32_t	hflags;		/* HMNT2 flags applicable to device */
1157 	hammer2_off_t	free_reserved;	/* nominal free reserved */
1158 	hammer2_thread_t bfthr;		/* bulk-free thread */
1159 	char		devrepname[64];	/* for kprintf */
1160 	hammer2_ioc_bulkfree_t bflast;	/* stats for last bulkfree run */
1161 	hammer2_volume_data_t voldata;
1162 	hammer2_volume_data_t volsync;	/* synchronized voldata */
1163 };
1164 
1165 typedef struct hammer2_dev hammer2_dev_t;
1166 
1167 /*
1168  * Helper functions (cluster must be locked for flags to be valid).
1169  */
1170 static __inline
1171 int
1172 hammer2_chain_rdok(hammer2_chain_t *chain)
1173 {
1174 	return (chain->error == 0);
1175 }
1176 
1177 static __inline
1178 int
1179 hammer2_chain_wrok(hammer2_chain_t *chain)
1180 {
1181 	return (chain->error == 0 && chain->hmp->ronly == 0);
1182 }
1183 
1184 /*
1185  * Per-cluster management structure.  This structure will be tied to a
1186  * system mount point if the system is mounting the PFS, but is also used
1187  * to manage clusters encountered during the super-root scan or received
1188  * via LNK_SPANs that might not be mounted.
1189  *
1190  * This structure is also used to represent the super-root that hangs off
1191  * of a hard mount point.  The super-root is not really a cluster element.
1192  * In this case the spmp_hmp field will be non-NULL.  It's just easier to do
1193  * this than to special case super-root manipulation in the hammer2_chain*
1194  * code as being only hammer2_dev-related.
1195  *
1196  * pfs_mode and pfs_nmasters are rollup fields which critically describes
1197  * how elements of the cluster act on the cluster.  pfs_mode is only applicable
1198  * when a PFS is mounted by the system.  pfs_nmasters is our best guess as to
1199  * how many masters have been configured for a cluster and is always
1200  * applicable.  pfs_types[] is an array with 1:1 correspondance to the
1201  * iroot cluster and describes the PFS types of the nodes making up the
1202  * cluster.
1203  *
1204  * WARNING! Portions of this structure have deferred initialization.  In
1205  *	    particular, if not mounted there will be no wthread.
1206  *	    umounted network PFSs will also be missing iroot and numerous
1207  *	    other fields will not be initialized prior to mount.
1208  *
1209  *	    Synchronization threads are chain-specific and only applicable
1210  *	    to local hard PFS entries.  A hammer2_pfs structure may contain
1211  *	    more than one when multiple hard PFSs are present on the local
1212  *	    machine which require synchronization monitoring.  Most PFSs
1213  *	    (such as snapshots) are 1xMASTER PFSs which do not need a
1214  *	    synchronization thread.
1215  *
1216  * WARNING! The chains making up pfs->iroot's cluster are accounted for in
1217  *	    hammer2_dev->mount_count when the pfs is associated with a mount
1218  *	    point.
1219  */
1220 struct hammer2_pfs {
1221 	struct mount		*mp;
1222 	TAILQ_ENTRY(hammer2_pfs) mntentry;	/* hammer2_pfslist */
1223 	uuid_t			pfs_clid;
1224 	hammer2_dev_t		*spmp_hmp;	/* only if super-root pmp */
1225 	hammer2_dev_t		*force_local;	/* only if 'local' mount */
1226 	hammer2_inode_t		*iroot;		/* PFS root inode */
1227 	uint8_t			pfs_types[HAMMER2_MAXCLUSTER];
1228 	char			*pfs_names[HAMMER2_MAXCLUSTER];
1229 	hammer2_dev_t		*pfs_hmps[HAMMER2_MAXCLUSTER];
1230 	hammer2_blockset_t	pfs_iroot_blocksets[HAMMER2_MAXCLUSTER];
1231 	hammer2_trans_t		trans;
1232 	struct lock		lock;		/* PFS lock for certain ops */
1233 	struct lock		lock_nlink;	/* rename and nlink lock */
1234 	struct netexport	export;		/* nfs export */
1235 	int			unused00;
1236 	int			ronly;		/* read-only mount */
1237 	int			hflags;		/* pfs-specific mount flags */
1238 	struct malloc_type	*minode;
1239 	struct malloc_type	*mmsg;
1240 	struct spinlock		inum_spin;	/* inumber lookup */
1241 	struct hammer2_inode_tree inum_tree;	/* (not applicable to spmp) */
1242 	long			inum_count;	/* #of inodes in inum_tree */
1243 	struct spinlock		lru_spin;	/* inumber lookup */
1244 	struct hammer2_chain_list lru_list;	/* basis for LRU tests */
1245 	int			lru_count;	/* #of chains on LRU */
1246 	int			flags;
1247 	hammer2_tid_t		modify_tid;	/* modify transaction id */
1248 	hammer2_tid_t		inode_tid;	/* inode allocator */
1249 	uint8_t			pfs_nmasters;	/* total masters */
1250 	uint8_t			pfs_mode;	/* operating mode PFSMODE */
1251 	uint8_t			unused01;
1252 	uint8_t			unused02;
1253 	int			free_ticks;	/* free_* calculations */
1254 	long			inmem_inodes;
1255 	hammer2_off_t		free_reserved;
1256 	hammer2_off_t		free_nominal;
1257 	uint32_t		inmem_dirty_chains;
1258 	int			count_lwinprog;	/* logical write in prog */
1259 	struct spinlock		list_spin;
1260 	struct inoq_head	syncq;		/* SYNCQ flagged inodes */
1261 	struct depq_head	depq;		/* SIDEQ flagged inodes */
1262 	long			sideq_count;	/* total inodes on depq */
1263 	hammer2_thread_t	sync_thrs[HAMMER2_MAXCLUSTER];
1264 	uint32_t		cluster_flags;	/* cached cluster flags */
1265 	int			has_xop_threads;
1266 	struct spinlock		xop_spin;	/* xop sequencer */
1267 	hammer2_xop_group_t	xop_groups[HAMMER2_XOPGROUPS];
1268 };
1269 
1270 typedef struct hammer2_pfs hammer2_pfs_t;
1271 
1272 TAILQ_HEAD(hammer2_pfslist, hammer2_pfs);
1273 
1274 #define HAMMER2_PMPF_SPMP	0x00000001
1275 
1276 /*
1277  * NOTE: The LRU list contains at least all the chains with refs == 0
1278  *	 that can be recycled, and may contain additional chains which
1279  *	 cannot.
1280  */
1281 #define HAMMER2_LRU_LIMIT		4096
1282 
1283 #define HAMMER2_DIRTYCHAIN_WAITING	0x80000000
1284 #define HAMMER2_DIRTYCHAIN_MASK		0x7FFFFFFF
1285 
1286 #define HAMMER2_LWINPROG_WAITING	0x80000000
1287 #define HAMMER2_LWINPROG_WAITING0	0x40000000
1288 #define HAMMER2_LWINPROG_MASK		0x3FFFFFFF
1289 
1290 /*
1291  * hammer2_cluster_check
1292  */
1293 #define HAMMER2_CHECK_NULL	0x00000001
1294 
1295 /*
1296  * Misc
1297  */
1298 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
1299 #define VTOI(vp)	((hammer2_inode_t *)(vp)->v_data)
1300 #endif
1301 
1302 #if defined(_KERNEL)
1303 
1304 MALLOC_DECLARE(M_HAMMER2);
1305 
1306 #define ITOV(ip)	((ip)->vp)
1307 
1308 /*
1309  * Currently locked chains retain the locked buffer cache buffer for
1310  * indirect blocks, and indirect blocks can be one of two sizes.  The
1311  * device buffer has to match the case to avoid deadlocking recursive
1312  * chains that might otherwise try to access different offsets within
1313  * the same device buffer.
1314  */
1315 static __inline
1316 int
1317 hammer2_devblkradix(int radix)
1318 {
1319 #if 0
1320 	if (radix <= HAMMER2_LBUFRADIX) {
1321 		return (HAMMER2_LBUFRADIX);
1322 	} else {
1323 		return (HAMMER2_PBUFRADIX);
1324 	}
1325 #endif
1326 	return (HAMMER2_PBUFRADIX);
1327 }
1328 
1329 /*
1330  * XXX almost time to remove this.  DIO uses PBUFSIZE exclusively now.
1331  */
1332 static __inline
1333 size_t
1334 hammer2_devblksize(size_t bytes)
1335 {
1336 #if 0
1337 	if (bytes <= HAMMER2_LBUFSIZE) {
1338 		return(HAMMER2_LBUFSIZE);
1339 	} else {
1340 		KKASSERT(bytes <= HAMMER2_PBUFSIZE &&
1341 			 (bytes ^ (bytes - 1)) == ((bytes << 1) - 1));
1342 		return (HAMMER2_PBUFSIZE);
1343 	}
1344 #endif
1345 	return (HAMMER2_PBUFSIZE);
1346 }
1347 
1348 
1349 static __inline
1350 hammer2_pfs_t *
1351 MPTOPMP(struct mount *mp)
1352 {
1353 	return ((hammer2_pfs_t *)mp->mnt_data);
1354 }
1355 
1356 #define HAMMER2_DEDUP_FRAG      (HAMMER2_PBUFSIZE / 64)
1357 #define HAMMER2_DEDUP_FRAGRADIX (HAMMER2_PBUFRADIX - 6)
1358 
1359 static __inline
1360 uint64_t
1361 hammer2_dedup_mask(hammer2_io_t *dio, hammer2_off_t data_off, u_int bytes)
1362 {
1363 	int bbeg;
1364 	int bits;
1365 	uint64_t mask;
1366 
1367 	bbeg = (int)((data_off & ~HAMMER2_OFF_MASK_RADIX) - dio->pbase) >>
1368 	       HAMMER2_DEDUP_FRAGRADIX;
1369 	bits = (int)((bytes + (HAMMER2_DEDUP_FRAG - 1)) >>
1370 	       HAMMER2_DEDUP_FRAGRADIX);
1371 	mask = ((uint64_t)1 << bbeg) - 1;
1372 	if (bbeg + bits == 64)
1373 		mask = (uint64_t)-1;
1374 	else
1375 		mask = ((uint64_t)1 << (bbeg + bits)) - 1;
1376 
1377 	mask &= ~(((uint64_t)1 << bbeg) - 1);
1378 
1379 	return mask;
1380 }
1381 
1382 static __inline
1383 int
1384 hammer2_error_to_errno(int error)
1385 {
1386 	if (error) {
1387 		if (error & HAMMER2_ERROR_EIO)
1388 			error = EIO;
1389 		else if (error & HAMMER2_ERROR_CHECK)
1390 			error = EDOM;
1391 		else if (error & HAMMER2_ERROR_ABORTED)
1392 			error = EINTR;
1393 		else if (error & HAMMER2_ERROR_BADBREF)
1394 			error = EIO;
1395 		else if (error & HAMMER2_ERROR_ENOSPC)
1396 			error = ENOSPC;
1397 		else if (error & HAMMER2_ERROR_ENOENT)
1398 			error = ENOENT;
1399 		else if (error & HAMMER2_ERROR_ENOTEMPTY)
1400 			error = ENOTEMPTY;
1401 		else if (error & HAMMER2_ERROR_EAGAIN)
1402 			error = EAGAIN;
1403 		else if (error & HAMMER2_ERROR_ENOTDIR)
1404 			error = ENOTDIR;
1405 		else if (error & HAMMER2_ERROR_EISDIR)
1406 			error = EISDIR;
1407 		else if (error & HAMMER2_ERROR_EINPROGRESS)
1408 			error = EINPROGRESS;
1409 		else if (error & HAMMER2_ERROR_EEXIST)
1410 			error = EEXIST;
1411 		else
1412 			error = EDOM;
1413 	}
1414 	return error;
1415 }
1416 
1417 static __inline
1418 int
1419 hammer2_errno_to_error(int error)
1420 {
1421 	switch(error) {
1422 	case 0:
1423 		return 0;
1424 	case EIO:
1425 		return HAMMER2_ERROR_EIO;
1426 	case EINVAL:
1427 	default:
1428 		return HAMMER2_ERROR_EINVAL;
1429 	}
1430 }
1431 
1432 
1433 extern struct vop_ops hammer2_vnode_vops;
1434 extern struct vop_ops hammer2_spec_vops;
1435 extern struct vop_ops hammer2_fifo_vops;
1436 extern struct hammer2_pfslist hammer2_pfslist;
1437 extern struct lock hammer2_mntlk;
1438 
1439 
1440 extern int hammer2_debug;
1441 extern long hammer2_debug_inode;
1442 extern int hammer2_cluster_meta_read;
1443 extern int hammer2_cluster_data_read;
1444 extern int hammer2_cluster_write;
1445 extern int hammer2_dedup_enable;
1446 extern int hammer2_always_compress;
1447 extern int hammer2_inval_enable;
1448 extern int hammer2_flush_pipe;
1449 extern int hammer2_dio_count;
1450 extern int hammer2_dio_limit;
1451 extern int hammer2_bulkfree_tps;
1452 extern long hammer2_chain_allocs;
1453 extern long hammer2_chain_frees;
1454 extern long hammer2_limit_dirty_chains;
1455 extern long hammer2_limit_dirty_inodes;
1456 extern long hammer2_count_modified_chains;
1457 extern long hammer2_iod_invals;
1458 extern long hammer2_iod_file_read;
1459 extern long hammer2_iod_meta_read;
1460 extern long hammer2_iod_indr_read;
1461 extern long hammer2_iod_fmap_read;
1462 extern long hammer2_iod_volu_read;
1463 extern long hammer2_iod_file_write;
1464 extern long hammer2_iod_file_wembed;
1465 extern long hammer2_iod_file_wzero;
1466 extern long hammer2_iod_file_wdedup;
1467 extern long hammer2_iod_meta_write;
1468 extern long hammer2_iod_indr_write;
1469 extern long hammer2_iod_fmap_write;
1470 extern long hammer2_iod_volu_write;
1471 
1472 extern long hammer2_process_xxhash64;
1473 extern long hammer2_process_icrc32;
1474 
1475 extern struct objcache *cache_buffer_read;
1476 extern struct objcache *cache_buffer_write;
1477 extern struct objcache *cache_xops;
1478 
1479 /*
1480  * hammer2_subr.c
1481  */
1482 #define hammer2_icrc32(buf, size)	iscsi_crc32((buf), (size))
1483 #define hammer2_icrc32c(buf, size, crc)	iscsi_crc32_ext((buf), (size), (crc))
1484 
1485 int hammer2_signal_check(time_t *timep);
1486 const char *hammer2_error_str(int error);
1487 
1488 void hammer2_inode_delayed_sideq(hammer2_inode_t *ip);
1489 void hammer2_inode_lock(hammer2_inode_t *ip, int how);
1490 void hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2,
1491 			hammer2_inode_t *ip3, hammer2_inode_t *ip4);
1492 void hammer2_inode_unlock(hammer2_inode_t *ip);
1493 void hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2);
1494 hammer2_chain_t *hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how);
1495 hammer2_chain_t *hammer2_inode_chain_and_parent(hammer2_inode_t *ip,
1496 			int clindex, hammer2_chain_t **parentp, int how);
1497 hammer2_mtx_state_t hammer2_inode_lock_temp_release(hammer2_inode_t *ip);
1498 void hammer2_inode_lock_temp_restore(hammer2_inode_t *ip,
1499 			hammer2_mtx_state_t ostate);
1500 int hammer2_inode_lock_upgrade(hammer2_inode_t *ip);
1501 void hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int);
1502 
1503 void hammer2_dev_exlock(hammer2_dev_t *hmp);
1504 void hammer2_dev_shlock(hammer2_dev_t *hmp);
1505 void hammer2_dev_unlock(hammer2_dev_t *hmp);
1506 
1507 int hammer2_get_dtype(uint8_t type);
1508 int hammer2_get_vtype(uint8_t type);
1509 uint8_t hammer2_get_obj_type(enum vtype vtype);
1510 void hammer2_time_to_timespec(uint64_t xtime, struct timespec *ts);
1511 uint64_t hammer2_timespec_to_time(const struct timespec *ts);
1512 uint32_t hammer2_to_unix_xid(const uuid_t *uuid);
1513 void hammer2_guid_to_uuid(uuid_t *uuid, uint32_t guid);
1514 void hammer2_trans_manage_init(hammer2_pfs_t *pmp);
1515 
1516 hammer2_key_t hammer2_dirhash(const unsigned char *name, size_t len);
1517 int hammer2_getradix(size_t bytes);
1518 
1519 int hammer2_calc_logical(hammer2_inode_t *ip, hammer2_off_t uoff,
1520 			hammer2_key_t *lbasep, hammer2_key_t *leofp);
1521 int hammer2_calc_physical(hammer2_inode_t *ip, hammer2_key_t lbase);
1522 void hammer2_update_time(uint64_t *timep);
1523 void hammer2_adjreadcounter(hammer2_blockref_t *bref, size_t bytes);
1524 
1525 /*
1526  * hammer2_inode.c
1527  */
1528 struct vnode *hammer2_igetv(hammer2_inode_t *ip, int *errorp);
1529 hammer2_inode_t *hammer2_inode_lookup(hammer2_pfs_t *pmp,
1530 			hammer2_tid_t inum);
1531 hammer2_inode_t *hammer2_inode_get(hammer2_pfs_t *pmp,
1532 			hammer2_xop_head_t *xop, hammer2_tid_t inum, int idx);
1533 void hammer2_inode_free(hammer2_inode_t *ip);
1534 void hammer2_inode_ref(hammer2_inode_t *ip);
1535 void hammer2_inode_drop(hammer2_inode_t *ip);
1536 void hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
1537 			hammer2_cluster_t *cluster);
1538 void hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1539 			int idx);
1540 void hammer2_inode_modify(hammer2_inode_t *ip);
1541 void hammer2_inode_run_sideq(hammer2_pfs_t *pmp, int doall);
1542 
1543 hammer2_inode_t *hammer2_inode_create_normal(hammer2_inode_t *pip,
1544 			struct vattr *vap, struct ucred *cred,
1545 			hammer2_key_t inum, int *errorp);
1546 hammer2_inode_t *hammer2_inode_create_pfs(hammer2_pfs_t *spmp,
1547 			const uint8_t *name, size_t name_len,
1548 			int *errorp);
1549 int hammer2_inode_chain_ins(hammer2_inode_t *ip);
1550 int hammer2_inode_chain_des(hammer2_inode_t *ip);
1551 int hammer2_inode_chain_sync(hammer2_inode_t *ip);
1552 int hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags);
1553 int hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen);
1554 int hammer2_dirent_create(hammer2_inode_t *dip, const char *name,
1555 			size_t name_len, hammer2_key_t inum, uint8_t type);
1556 
1557 /*
1558  * hammer2_chain.c
1559  */
1560 void hammer2_voldata_lock(hammer2_dev_t *hmp);
1561 void hammer2_voldata_unlock(hammer2_dev_t *hmp);
1562 void hammer2_voldata_modify(hammer2_dev_t *hmp);
1563 hammer2_chain_t *hammer2_chain_alloc(hammer2_dev_t *hmp,
1564 				hammer2_pfs_t *pmp,
1565 				hammer2_blockref_t *bref);
1566 void hammer2_chain_core_init(hammer2_chain_t *chain);
1567 void hammer2_chain_ref(hammer2_chain_t *chain);
1568 void hammer2_chain_ref_hold(hammer2_chain_t *chain);
1569 void hammer2_chain_drop(hammer2_chain_t *chain);
1570 void hammer2_chain_drop_unhold(hammer2_chain_t *chain);
1571 int hammer2_chain_lock(hammer2_chain_t *chain, int how);
1572 void hammer2_chain_lock_unhold(hammer2_chain_t *chain, int how);
1573 void hammer2_chain_load_data(hammer2_chain_t *chain);
1574 const hammer2_media_data_t *hammer2_chain_rdata(hammer2_chain_t *chain);
1575 hammer2_media_data_t *hammer2_chain_wdata(hammer2_chain_t *chain);
1576 
1577 int hammer2_chain_inode_find(hammer2_pfs_t *pmp, hammer2_key_t inum,
1578 				int clindex, int flags,
1579 				hammer2_chain_t **parentp,
1580 				hammer2_chain_t **chainp);
1581 int hammer2_chain_modify(hammer2_chain_t *chain, hammer2_tid_t mtid,
1582 				hammer2_off_t dedup_off, int flags);
1583 int hammer2_chain_modify_ip(hammer2_inode_t *ip, hammer2_chain_t *chain,
1584 				hammer2_tid_t mtid, int flags);
1585 int hammer2_chain_resize(hammer2_chain_t *chain,
1586 				hammer2_tid_t mtid, hammer2_off_t dedup_off,
1587 				int nradix, int flags);
1588 void hammer2_chain_unlock(hammer2_chain_t *chain);
1589 void hammer2_chain_unlock_hold(hammer2_chain_t *chain);
1590 void hammer2_chain_wait(hammer2_chain_t *chain);
1591 hammer2_chain_t *hammer2_chain_get(hammer2_chain_t *parent, int generation,
1592 				hammer2_blockref_t *bref, int how);
1593 hammer2_chain_t *hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags);
1594 void hammer2_chain_lookup_done(hammer2_chain_t *parent);
1595 hammer2_chain_t *hammer2_chain_getparent(hammer2_chain_t *chain, int flags);
1596 hammer2_chain_t *hammer2_chain_repparent(hammer2_chain_t **chainp, int flags);
1597 hammer2_chain_t *hammer2_chain_lookup(hammer2_chain_t **parentp,
1598 				hammer2_key_t *key_nextp,
1599 				hammer2_key_t key_beg, hammer2_key_t key_end,
1600 				int *errorp, int flags);
1601 hammer2_chain_t *hammer2_chain_next(hammer2_chain_t **parentp,
1602 				hammer2_chain_t *chain,
1603 				hammer2_key_t *key_nextp,
1604 				hammer2_key_t key_beg, hammer2_key_t key_end,
1605 				int *errorp, int flags);
1606 int hammer2_chain_scan(hammer2_chain_t *parent,
1607 				hammer2_chain_t **chainp,
1608 				hammer2_blockref_t *bref,
1609 				int *firstp, int flags);
1610 
1611 int hammer2_chain_create(hammer2_chain_t **parentp, hammer2_chain_t **chainp,
1612 				hammer2_dev_t *hmp, hammer2_pfs_t *pmp,
1613 				int methods, hammer2_key_t key, int keybits,
1614 				int type, size_t bytes, hammer2_tid_t mtid,
1615 				hammer2_off_t dedup_off, int flags);
1616 void hammer2_chain_rename(hammer2_chain_t **parentp,
1617 				hammer2_chain_t *chain,
1618 				hammer2_tid_t mtid, int flags);
1619 int hammer2_chain_delete(hammer2_chain_t *parent, hammer2_chain_t *chain,
1620 				hammer2_tid_t mtid, int flags);
1621 int hammer2_chain_indirect_maintenance(hammer2_chain_t *parent,
1622 				hammer2_chain_t *chain);
1623 void hammer2_chain_setflush(hammer2_chain_t *chain);
1624 void hammer2_chain_countbrefs(hammer2_chain_t *chain,
1625 				hammer2_blockref_t *base, int count);
1626 hammer2_chain_t *hammer2_chain_bulksnap(hammer2_dev_t *hmp);
1627 void hammer2_chain_bulkdrop(hammer2_chain_t *copy);
1628 
1629 void hammer2_chain_setcheck(hammer2_chain_t *chain, void *bdata);
1630 int hammer2_chain_testcheck(hammer2_chain_t *chain, void *bdata);
1631 int hammer2_chain_dirent_test(hammer2_chain_t *chain, const char *name,
1632 				size_t name_len);
1633 
1634 void hammer2_pfs_memory_wait(hammer2_pfs_t *pmp);
1635 void hammer2_pfs_memory_inc(hammer2_pfs_t *pmp);
1636 void hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp);
1637 
1638 void hammer2_base_delete(hammer2_chain_t *parent,
1639 				hammer2_blockref_t *base, int count,
1640 				hammer2_chain_t *chain,
1641 				hammer2_blockref_t *obref);
1642 void hammer2_base_insert(hammer2_chain_t *parent,
1643 				hammer2_blockref_t *base, int count,
1644 				hammer2_chain_t *chain,
1645 				hammer2_blockref_t *elm);
1646 
1647 /*
1648  * hammer2_flush.c
1649  */
1650 int hammer2_flush(hammer2_chain_t *chain, int istop);
1651 void hammer2_delayed_flush(hammer2_chain_t *chain);
1652 
1653 /*
1654  * hammer2_trans.c
1655  */
1656 void hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags);
1657 void hammer2_trans_setflags(hammer2_pfs_t *pmp, uint32_t flags);
1658 void hammer2_trans_clearflags(hammer2_pfs_t *pmp, uint32_t flags);
1659 hammer2_tid_t hammer2_trans_sub(hammer2_pfs_t *pmp);
1660 void hammer2_trans_done(hammer2_pfs_t *pmp, uint32_t flags);
1661 hammer2_tid_t hammer2_trans_newinum(hammer2_pfs_t *pmp);
1662 void hammer2_trans_assert_strategy(hammer2_pfs_t *pmp);
1663 void hammer2_dedup_record(hammer2_chain_t *chain, hammer2_io_t *dio,
1664 				const char *data);
1665 
1666 /*
1667  * hammer2_ioctl.c
1668  */
1669 int hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data,
1670 				int fflag, struct ucred *cred);
1671 
1672 /*
1673  * hammer2_io.c
1674  */
1675 void hammer2_io_putblk(hammer2_io_t **diop);
1676 void hammer2_io_inval(hammer2_io_t *dio, hammer2_off_t data_off, u_int bytes);
1677 void hammer2_io_cleanup(hammer2_dev_t *hmp, struct hammer2_io_tree *tree);
1678 char *hammer2_io_data(hammer2_io_t *dio, off_t lbase);
1679 void hammer2_io_bkvasync(hammer2_io_t *dio);
1680 hammer2_io_t *hammer2_io_getblk(hammer2_dev_t *hmp, int btype, off_t lbase,
1681 				int lsize, int op);
1682 void hammer2_io_dedup_set(hammer2_dev_t *hmp, hammer2_blockref_t *bref);
1683 void hammer2_io_dedup_delete(hammer2_dev_t *hmp, uint8_t btype,
1684 				hammer2_off_t data_off, u_int bytes);
1685 void hammer2_io_dedup_assert(hammer2_dev_t *hmp, hammer2_off_t data_off,
1686 				u_int bytes);
1687 void hammer2_io_callback(struct bio *bio);
1688 int hammer2_io_new(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize,
1689 				hammer2_io_t **diop);
1690 int hammer2_io_newnz(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize,
1691 				hammer2_io_t **diop);
1692 int hammer2_io_bread(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize,
1693 				hammer2_io_t **diop);
1694 hammer2_io_t *hammer2_io_getquick(hammer2_dev_t *hmp, off_t lbase, int lsize);
1695 void hammer2_io_bawrite(hammer2_io_t **diop);
1696 void hammer2_io_bdwrite(hammer2_io_t **diop);
1697 int hammer2_io_bwrite(hammer2_io_t **diop);
1698 void hammer2_io_setdirty(hammer2_io_t *dio);
1699 void hammer2_io_brelse(hammer2_io_t **diop);
1700 void hammer2_io_bqrelse(hammer2_io_t **diop);
1701 void hammer2_io_ref(hammer2_io_t *dio);
1702 
1703 /*
1704  * hammer2_thread.c
1705  */
1706 void hammer2_thr_signal(hammer2_thread_t *thr, uint32_t flags);
1707 void hammer2_thr_signal2(hammer2_thread_t *thr,
1708 			uint32_t pflags, uint32_t nflags);
1709 void hammer2_thr_wait(hammer2_thread_t *thr, uint32_t flags);
1710 void hammer2_thr_wait_neg(hammer2_thread_t *thr, uint32_t flags);
1711 int hammer2_thr_wait_any(hammer2_thread_t *thr, uint32_t flags, int timo);
1712 void hammer2_thr_create(hammer2_thread_t *thr,
1713 			hammer2_pfs_t *pmp, hammer2_dev_t *hmp,
1714 			const char *id, int clindex, int repidx,
1715 			void (*func)(void *arg));
1716 void hammer2_thr_delete(hammer2_thread_t *thr);
1717 void hammer2_thr_remaster(hammer2_thread_t *thr);
1718 void hammer2_thr_freeze_async(hammer2_thread_t *thr);
1719 void hammer2_thr_freeze(hammer2_thread_t *thr);
1720 void hammer2_thr_unfreeze(hammer2_thread_t *thr);
1721 int hammer2_thr_break(hammer2_thread_t *thr);
1722 void hammer2_primary_xops_thread(void *arg);
1723 
1724 /*
1725  * hammer2_thread.c (XOP API)
1726  */
1727 void hammer2_xop_group_init(hammer2_pfs_t *pmp, hammer2_xop_group_t *xgrp);
1728 void *hammer2_xop_alloc(hammer2_inode_t *ip, int flags);
1729 void hammer2_xop_setname(hammer2_xop_head_t *xop,
1730 				const char *name, size_t name_len);
1731 void hammer2_xop_setname2(hammer2_xop_head_t *xop,
1732 				const char *name, size_t name_len);
1733 size_t hammer2_xop_setname_inum(hammer2_xop_head_t *xop, hammer2_key_t inum);
1734 void hammer2_xop_setip2(hammer2_xop_head_t *xop, hammer2_inode_t *ip2);
1735 void hammer2_xop_setip3(hammer2_xop_head_t *xop, hammer2_inode_t *ip3);
1736 void hammer2_xop_reinit(hammer2_xop_head_t *xop);
1737 void hammer2_xop_helper_create(hammer2_pfs_t *pmp);
1738 void hammer2_xop_helper_cleanup(hammer2_pfs_t *pmp);
1739 void hammer2_xop_start(hammer2_xop_head_t *xop, hammer2_xop_desc_t *desc);
1740 void hammer2_xop_start_except(hammer2_xop_head_t *xop, hammer2_xop_desc_t *desc,
1741 				int notidx);
1742 int hammer2_xop_collect(hammer2_xop_head_t *xop, int flags);
1743 void hammer2_xop_retire(hammer2_xop_head_t *xop, uint64_t mask);
1744 int hammer2_xop_active(hammer2_xop_head_t *xop);
1745 int hammer2_xop_feed(hammer2_xop_head_t *xop, hammer2_chain_t *chain,
1746 				int clindex, int error);
1747 
1748 /*
1749  * hammer2_synchro.c
1750  */
1751 void hammer2_primary_sync_thread(void *arg);
1752 
1753 /*
1754  * XOP backends in hammer2_xops.c, primarily for VNOPS.  Other XOP backends
1755  * may be integrated into other source files.
1756  */
1757 void hammer2_xop_ipcluster(hammer2_xop_t *xop, void *scratch, int clindex);
1758 void hammer2_xop_readdir(hammer2_xop_t *xop, void *scratch, int clindex);
1759 void hammer2_xop_nresolve(hammer2_xop_t *xop, void *scratch, int clindex);
1760 void hammer2_xop_unlink(hammer2_xop_t *xop, void *scratch, int clindex);
1761 void hammer2_xop_nrename(hammer2_xop_t *xop, void *scratch, int clindex);
1762 void hammer2_xop_scanlhc(hammer2_xop_t *xop, void *scratch, int clindex);
1763 void hammer2_xop_scanall(hammer2_xop_t *xop, void *scratch, int clindex);
1764 void hammer2_xop_lookup(hammer2_xop_t *xop, void *scratch, int clindex);
1765 void hammer2_xop_delete(hammer2_xop_t *xop, void *scratch, int clindex);
1766 void hammer2_xop_inode_mkdirent(hammer2_xop_t *xop, void *scratch, int clindex);
1767 void hammer2_xop_inode_create(hammer2_xop_t *xop, void *scratch, int clindex);
1768 void hammer2_xop_inode_create_det(hammer2_xop_t *xop,
1769 				void *scratch, int clindex);
1770 void hammer2_xop_inode_create_ins(hammer2_xop_t *xop,
1771 				void *scratch, int clindex);
1772 void hammer2_xop_inode_destroy(hammer2_xop_t *xop, void *scratch, int clindex);
1773 void hammer2_xop_inode_chain_sync(hammer2_xop_t *xop, void *scratch,
1774 				int clindex);
1775 void hammer2_xop_inode_unlinkall(hammer2_xop_t *xop, void *scratch,
1776 				int clindex);
1777 void hammer2_xop_inode_connect(hammer2_xop_t *xop, void *scratch, int clindex);
1778 void hammer2_xop_inode_flush(hammer2_xop_t *xop, void *scratch, int clindex);
1779 void hammer2_xop_strategy_read(hammer2_xop_t *xop, void *scratch, int clindex);
1780 void hammer2_xop_strategy_write(hammer2_xop_t *xop, void *scratch, int clindex);
1781 
1782 void hammer2_dmsg_ipcluster(hammer2_xop_t *xop, void *scratch, int clindex);
1783 void hammer2_dmsg_readdir(hammer2_xop_t *xop, void *scratch, int clindex);
1784 void hammer2_dmsg_nresolve(hammer2_xop_t *xop, void *scratch, int clindex);
1785 void hammer2_dmsg_unlink(hammer2_xop_t *xop, void *scratch, int clindex);
1786 void hammer2_dmsg_nrename(hammer2_xop_t *xop, void *scratch, int clindex);
1787 void hammer2_dmsg_scanlhc(hammer2_xop_t *xop, void *scratch, int clindex);
1788 void hammer2_dmsg_scanall(hammer2_xop_t *xop, void *scratch, int clindex);
1789 void hammer2_dmsg_lookup(hammer2_xop_t *xop, void *scratch, int clindex);
1790 void hammer2_dmsg_inode_mkdirent(hammer2_xop_t *xop, void *scratch,
1791 				int clindex);
1792 void hammer2_dmsg_inode_create(hammer2_xop_t *xop, void *scratch, int clindex);
1793 void hammer2_dmsg_inode_destroy(hammer2_xop_t *xop, void *scratch, int clindex);
1794 void hammer2_dmsg_inode_chain_sync(hammer2_xop_t *xop, void *scratch,
1795 				int clindex);
1796 void hammer2_dmsg_inode_unlinkall(hammer2_xop_t *xop, void *scratch,
1797 				int clindex);
1798 void hammer2_dmsg_inode_connect(hammer2_xop_t *xop, void *scratch, int clindex);
1799 void hammer2_dmsg_inode_flush(hammer2_xop_t *xop, void *scratch, int clindex);
1800 void hammer2_dmsg_strategy_read(hammer2_xop_t *xop, void *scratch, int clindex);
1801 void hammer2_dmsg_strategy_write(hammer2_xop_t *xop, void *scratch,
1802 				int clindex);
1803 
1804 void hammer2_rmsg_ipcluster(hammer2_xop_t *xop, void *scratch, int clindex);
1805 void hammer2_rmsg_readdir(hammer2_xop_t *xop, void *scratch, int clindex);
1806 void hammer2_rmsg_nresolve(hammer2_xop_t *xop, void *scratch, int clindex);
1807 void hammer2_rmsg_unlink(hammer2_xop_t *xop, void *scratch, int clindex);
1808 void hammer2_rmsg_nrename(hammer2_xop_t *xop, void *scratch, int clindex);
1809 void hammer2_rmsg_scanlhc(hammer2_xop_t *xop, void *scratch, int clindex);
1810 void hammer2_rmsg_scanall(hammer2_xop_t *xop, void *scratch, int clindex);
1811 void hammer2_rmsg_lookup(hammer2_xop_t *xop, void *scratch, int clindex);
1812 void hammer2_rmsg_inode_mkdirent(hammer2_xop_t *xop, void *scratch,
1813 				int clindex);
1814 void hammer2_rmsg_inode_create(hammer2_xop_t *xop, void *scratch, int clindex);
1815 void hammer2_rmsg_inode_destroy(hammer2_xop_t *xop, void *scratch, int clindex);
1816 void hammer2_rmsg_inode_chain_sync(hammer2_xop_t *xop, void *scratch,
1817 				int clindex);
1818 void hammer2_rmsg_inode_unlinkall(hammer2_xop_t *xop, void *scratch,
1819 				int clindex);
1820 void hammer2_rmsg_inode_connect(hammer2_xop_t *xop, void *scratch, int clindex);
1821 void hammer2_rmsg_inode_flush(hammer2_xop_t *xop, void *scratch, int clindex);
1822 void hammer2_rmsg_strategy_read(hammer2_xop_t *xop, void *scratch, int clindex);
1823 void hammer2_rmsg_strategy_write(hammer2_xop_t *xop, void *scratch,
1824 				int clindex);
1825 
1826 extern hammer2_xop_desc_t hammer2_ipcluster_desc;
1827 extern hammer2_xop_desc_t hammer2_readdir_desc;
1828 extern hammer2_xop_desc_t hammer2_nresolve_desc;
1829 extern hammer2_xop_desc_t hammer2_unlink_desc;
1830 extern hammer2_xop_desc_t hammer2_nrename_desc;
1831 extern hammer2_xop_desc_t hammer2_scanlhc_desc;
1832 extern hammer2_xop_desc_t hammer2_scanall_desc;
1833 extern hammer2_xop_desc_t hammer2_lookup_desc;
1834 extern hammer2_xop_desc_t hammer2_delete_desc;
1835 extern hammer2_xop_desc_t hammer2_inode_mkdirent_desc;
1836 extern hammer2_xop_desc_t hammer2_inode_create_desc;
1837 extern hammer2_xop_desc_t hammer2_inode_create_det_desc;
1838 extern hammer2_xop_desc_t hammer2_inode_create_ins_desc;
1839 extern hammer2_xop_desc_t hammer2_inode_destroy_desc;
1840 extern hammer2_xop_desc_t hammer2_inode_chain_sync_desc;
1841 extern hammer2_xop_desc_t hammer2_inode_unlinkall_desc;
1842 extern hammer2_xop_desc_t hammer2_inode_connect_desc;
1843 extern hammer2_xop_desc_t hammer2_inode_flush_desc;
1844 extern hammer2_xop_desc_t hammer2_strategy_read_desc;
1845 extern hammer2_xop_desc_t hammer2_strategy_write_desc;
1846 
1847 /*
1848  * hammer2_msgops.c
1849  */
1850 int hammer2_msg_dbg_rcvmsg(kdmsg_msg_t *msg);
1851 int hammer2_msg_adhoc_input(kdmsg_msg_t *msg);
1852 
1853 /*
1854  * hammer2_vfsops.c
1855  */
1856 void hammer2_volconf_update(hammer2_dev_t *hmp, int index);
1857 void hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp, char pfx,
1858 				u_int flags);
1859 int hammer2_vfs_sync(struct mount *mp, int waitflags);
1860 int hammer2_vfs_sync_pmp(hammer2_pfs_t *pmp, int waitfor);
1861 int hammer2_vfs_enospace(hammer2_inode_t *ip, off_t bytes, struct ucred *cred);
1862 
1863 hammer2_pfs_t *hammer2_pfsalloc(hammer2_chain_t *chain,
1864 				const hammer2_inode_data_t *ripdata,
1865 				hammer2_tid_t modify_tid,
1866 				hammer2_dev_t *force_local);
1867 void hammer2_pfsdealloc(hammer2_pfs_t *pmp, int clindex, int destroying);
1868 int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
1869 				ino_t ino, struct vnode **vpp);
1870 
1871 void hammer2_lwinprog_ref(hammer2_pfs_t *pmp);
1872 void hammer2_lwinprog_drop(hammer2_pfs_t *pmp);
1873 void hammer2_lwinprog_wait(hammer2_pfs_t *pmp, int pipe);
1874 
1875 /*
1876  * hammer2_freemap.c
1877  */
1878 int hammer2_freemap_alloc(hammer2_chain_t *chain, size_t bytes);
1879 void hammer2_freemap_adjust(hammer2_dev_t *hmp,
1880 				hammer2_blockref_t *bref, int how);
1881 
1882 /*
1883  * hammer2_cluster.c
1884  */
1885 uint8_t hammer2_cluster_type(hammer2_cluster_t *cluster);
1886 void hammer2_cluster_bref(hammer2_cluster_t *cluster, hammer2_blockref_t *bref);
1887 hammer2_cluster_t *hammer2_cluster_alloc(hammer2_pfs_t *pmp,
1888 				hammer2_blockref_t *bref);
1889 void hammer2_cluster_ref(hammer2_cluster_t *cluster);
1890 void hammer2_cluster_drop(hammer2_cluster_t *cluster);
1891 void hammer2_cluster_lock(hammer2_cluster_t *cluster, int how);
1892 int hammer2_cluster_check(hammer2_cluster_t *cluster, hammer2_key_t lokey,
1893 			int flags);
1894 void hammer2_cluster_resolve(hammer2_cluster_t *cluster);
1895 void hammer2_cluster_forcegood(hammer2_cluster_t *cluster);
1896 void hammer2_cluster_unlock(hammer2_cluster_t *cluster);
1897 
1898 void hammer2_bulkfree_init(hammer2_dev_t *hmp);
1899 void hammer2_bulkfree_uninit(hammer2_dev_t *hmp);
1900 int hammer2_bulkfree_pass(hammer2_dev_t *hmp, hammer2_chain_t *vchain,
1901 			struct hammer2_ioc_bulkfree *bfi);
1902 void hammer2_dummy_xop_from_chain(hammer2_xop_head_t *xop,
1903 			hammer2_chain_t *chain);
1904 
1905 /*
1906  * hammer2_iocom.c
1907  */
1908 void hammer2_iocom_init(hammer2_dev_t *hmp);
1909 void hammer2_iocom_uninit(hammer2_dev_t *hmp);
1910 void hammer2_cluster_reconnect(hammer2_dev_t *hmp, struct file *fp);
1911 
1912 /*
1913  * hammer2_strategy.c
1914  */
1915 int hammer2_vop_strategy(struct vop_strategy_args *ap);
1916 int hammer2_vop_bmap(struct vop_bmap_args *ap);
1917 void hammer2_write_thread(void *arg);
1918 void hammer2_bioq_sync(hammer2_pfs_t *pmp);
1919 void hammer2_dedup_clear(hammer2_dev_t *hmp);
1920 
1921 /*
1922  * More complex inlines
1923  */
1924 static __inline
1925 const hammer2_media_data_t *
1926 hammer2_xop_gdata(hammer2_xop_head_t *xop)
1927 {
1928 	hammer2_chain_t *focus;
1929 	const void *data;
1930 
1931 	focus = xop->cluster.focus;
1932 	if (focus->dio) {
1933 		lockmgr(&focus->diolk, LK_SHARED);
1934 		if ((xop->focus_dio = focus->dio) != NULL) {
1935 			hammer2_io_ref(xop->focus_dio);
1936 			hammer2_io_bkvasync(xop->focus_dio);
1937 		}
1938 		data = focus->data;
1939 		lockmgr(&focus->diolk, LK_RELEASE);
1940 	} else {
1941 		data = focus->data;
1942 	}
1943 
1944 	return data;
1945 }
1946 
1947 static __inline
1948 void
1949 hammer2_xop_pdata(hammer2_xop_head_t *xop)
1950 {
1951 	if (xop->focus_dio)
1952 		hammer2_io_putblk(&xop->focus_dio);
1953 }
1954 
1955 #endif /* !_KERNEL */
1956 #endif /* !_VFS_HAMMER2_HAMMER2_H_ */
1957