xref: /dragonfly/sys/vfs/hammer2/hammer2.h (revision 926ea7d7)
1 /*
2  * Copyright (c) 2011-2018 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 /*
37  * HAMMER2 IN-MEMORY CACHE OF MEDIA STRUCTURES
38  *
39  * This header file contains structures used internally by the HAMMER2
40  * implementation.  See hammer2_disk.h for on-disk structures.
41  *
42  * There is an in-memory representation of all on-media data structure.
43  * Almost everything is represented by a hammer2_chain structure in-memory.
44  * Other higher-level structures typically map to chains.
45  *
46  * A great deal of data is accessed simply via its buffer cache buffer,
47  * which is mapped for the duration of the chain's lock.  Hammer2 must
48  * implement its own buffer cache layer on top of the system layer to
49  * allow for different threads to lock different sub-block-sized buffers.
50  *
51  * When modifications are made to a chain a new filesystem block must be
52  * allocated.  Multiple modifications do not typically allocate new blocks
53  * until the current block has been flushed.  Flushes do not block the
54  * front-end unless the front-end operation crosses the current inode being
55  * flushed.
56  *
57  * The in-memory representation may remain cached (for example in order to
58  * placemark clustering locks) even after the related data has been
59  * detached.
60  */
61 
62 #ifndef _VFS_HAMMER2_HAMMER2_H_
63 #define _VFS_HAMMER2_HAMMER2_H_
64 
65 #ifdef _KERNEL
66 #include <sys/param.h>
67 #endif
68 #include <sys/types.h>
69 #ifdef _KERNEL
70 #include <sys/kernel.h>
71 #endif
72 #include <sys/conf.h>
73 #ifdef _KERNEL
74 #include <sys/systm.h>
75 #endif
76 #include <sys/tree.h>
77 #include <sys/malloc.h>
78 #include <sys/mount.h>
79 #include <sys/vnode.h>
80 #include <sys/proc.h>
81 #include <sys/mountctl.h>
82 #include <sys/priv.h>
83 #include <sys/stat.h>
84 #include <sys/thread.h>
85 #include <sys/globaldata.h>
86 #include <sys/lockf.h>
87 #include <sys/buf.h>
88 #include <sys/queue.h>
89 #include <sys/limits.h>
90 #include <sys/dmsg.h>
91 #include <sys/mutex.h>
92 #ifdef _KERNEL
93 #include <sys/kern_syscall.h>
94 #endif
95 
96 #ifdef _KERNEL
97 #include <sys/signal2.h>
98 #include <sys/buf2.h>
99 #include <sys/mutex2.h>
100 #include <sys/spinlock2.h>
101 #endif
102 
103 #include "hammer2_xxhash.h"
104 #include "hammer2_disk.h"
105 #include "hammer2_mount.h"
106 #include "hammer2_ioctl.h"
107 
108 struct hammer2_io;
109 struct hammer2_chain;
110 struct hammer2_cluster;
111 struct hammer2_inode;
112 struct hammer2_depend;
113 struct hammer2_dev;
114 struct hammer2_pfs;
115 struct hammer2_span;
116 struct hammer2_msg;
117 struct hammer2_thread;
118 union hammer2_xop;
119 
120 /*
121  * Mutex and lock shims.  Hammer2 requires support for asynchronous and
122  * abortable locks, and both exclusive and shared spinlocks.  Normal
123  * synchronous non-abortable locks can be substituted for spinlocks.
124  */
125 typedef mtx_t				hammer2_mtx_t;
126 typedef mtx_link_t			hammer2_mtx_link_t;
127 typedef mtx_state_t			hammer2_mtx_state_t;
128 
129 typedef struct spinlock			hammer2_spin_t;
130 
131 #define hammer2_mtx_ex			mtx_lock_ex_quick
132 #define hammer2_mtx_ex_try		mtx_lock_ex_try
133 #define hammer2_mtx_sh			mtx_lock_sh_quick
134 #define hammer2_mtx_sh_again		mtx_lock_sh_again
135 #define hammer2_mtx_sh_try		mtx_lock_sh_try
136 #define hammer2_mtx_unlock		mtx_unlock
137 #define hammer2_mtx_downgrade		mtx_downgrade
138 #define hammer2_mtx_owned		mtx_owned
139 #define hammer2_mtx_init		mtx_init
140 #define hammer2_mtx_temp_release	mtx_lock_temp_release
141 #define hammer2_mtx_temp_restore	mtx_lock_temp_restore
142 #define hammer2_mtx_refs		mtx_lockrefs
143 
144 #define hammer2_spin_init		spin_init
145 #define hammer2_spin_sh			spin_lock_shared
146 #define hammer2_spin_ex			spin_lock
147 #define hammer2_spin_unsh		spin_unlock_shared
148 #define hammer2_spin_unex		spin_unlock
149 
150 TAILQ_HEAD(hammer2_xop_list, hammer2_xop_head);
151 TAILQ_HEAD(hammer2_chain_list, hammer2_chain);
152 
153 typedef struct hammer2_xop_list	hammer2_xop_list_t;
154 
155 #ifdef _KERNEL
156 /*
157  * General lock support
158  */
159 static __inline
160 int
161 hammer2_mtx_upgrade_try(hammer2_mtx_t *mtx)
162 {
163 	return mtx_upgrade_try(mtx);
164 }
165 
166 #endif
167 
168 /*
169  * The xid tracks internal transactional updates.
170  *
171  * XXX fix-me, really needs to be 64-bits
172  */
173 typedef uint32_t hammer2_xid_t;
174 
175 #define HAMMER2_XID_MIN			0x00000000U
176 #define HAMMER2_XID_MAX			0x7FFFFFFFU
177 
178 /*
179  * Cap the dynamic calculation for the maximum number of dirty
180  * chains and dirty inodes allowed.
181  */
182 #define HAMMER2_LIMIT_DIRTY_CHAINS	(1024*1024)
183 #define HAMMER2_LIMIT_DIRTY_INODES	(65536)
184 
185 /*
186  * The chain structure tracks a portion of the media topology from the
187  * root (volume) down.  Chains represent volumes, inodes, indirect blocks,
188  * data blocks, and freemap nodes and leafs.
189  *
190  * The chain structure utilizes a simple singly-homed topology and the
191  * chain's in-memory topology will move around as the chains do, due mainly
192  * to renames and indirect block creation.
193  *
194  * Block Table Updates
195  *
196  *	Block table updates for insertions and updates are delayed until the
197  *	flush.  This allows us to avoid having to modify the parent chain
198  *	all the way to the root.
199  *
200  *	Block table deletions are performed immediately (modifying the parent
201  *	in the process) because the flush code uses the chain structure to
202  *	track delayed updates and the chain will be (likely) gone or moved to
203  *	another location in the topology after a deletion.
204  *
205  *	A prior iteration of the code tried to keep the relationship intact
206  *	on deletes by doing a delete-duplicate operation on the chain, but
207  *	it added way too much complexity to the codebase.
208  *
209  * Flush Synchronization
210  *
211  *	The flush code must flush modified chains bottom-up.  Because chain
212  *	structures can shift around and are NOT topologically stable,
213  *	modified chains are independently indexed for the flush.  As the flush
214  *	runs it modifies (or further modifies) and updates the parents,
215  *	propagating the flush all the way to the volume root.
216  *
217  *	Modifying front-end operations can occur during a flush but will block
218  *	in two cases: (1) when the front-end tries to operate on the inode
219  *	currently in the midst of being flushed and (2) if the front-end
220  *	crosses an inode currently being flushed (such as during a rename).
221  *	So, for example, if you rename directory "x" to "a/b/c/d/e/f/g/x" and
222  *	the flusher is currently working on "a/b/c", the rename will block
223  *	temporarily in order to ensure that "x" exists in one place or the
224  *	other.
225  *
226  *	Meta-data statistics are updated by the flusher.  The front-end will
227  *	make estimates but meta-data must be fully synchronized only during a
228  *	flush in order to ensure that it remains correct across a crash.
229  *
230  *	Multiple flush synchronizations can theoretically be in-flight at the
231  *	same time but the implementation is not coded to handle the case and
232  *	currently serializes them.
233  *
234  * Snapshots:
235  *
236  *	Snapshots currently require the subdirectory tree being snapshotted
237  *	to be flushed.  The snapshot then creates a new super-root inode which
238  *	copies the flushed blockdata of the directory or file that was
239  *	snapshotted.
240  *
241  * RBTREE NOTES:
242  *
243  *	- Note that the radix tree runs in powers of 2 only so sub-trees
244  *	  cannot straddle edges.
245  */
246 RB_HEAD(hammer2_chain_tree, hammer2_chain);
247 
248 struct hammer2_reptrack {
249 	hammer2_spin_t	spin;
250 	struct hammer2_reptrack *next;
251 	struct hammer2_chain	*chain;
252 };
253 
254 /*
255  * Core topology for chain (embedded in chain).  Protected by a spinlock.
256  */
257 struct hammer2_chain_core {
258 	hammer2_spin_t	spin;
259 	struct hammer2_reptrack *reptrack;
260 	struct hammer2_chain_tree rbtree; /* sub-chains */
261 	int		live_zero;	/* blockref array opt */
262 	u_int		live_count;	/* live (not deleted) chains in tree */
263 	u_int		chain_count;	/* live + deleted chains under core */
264 	int		generation;	/* generation number (inserts only) */
265 };
266 
267 typedef struct hammer2_chain_core hammer2_chain_core_t;
268 
269 RB_HEAD(hammer2_io_tree, hammer2_io);
270 
271 /*
272  * DIO - Management structure wrapping system buffer cache.
273  *
274  * HAMMER2 uses an I/O abstraction that allows it to cache and manipulate
275  * fixed-sized filesystem buffers frontend by variable-sized hammer2_chain
276  * structures.
277  */
278 /* #define HAMMER2_IO_DEBUG */
279 
280 #ifdef HAMMER2_IO_DEBUG
281 #define HAMMER2_IO_DEBUG_ARGS	, const char *file, int line
282 #define HAMMER2_IO_DEBUG_CALL	, file, line
283 #define HAMMER2_IO_DEBUG_COUNT	2048
284 #define HAMMER2_IO_DEBUG_MASK	(HAMMER2_IO_DEBUG_COUNT - 1)
285 #else
286 #define HAMMER2_IO_DEBUG_ARGS
287 #define HAMMER2_IO_DEBUG_CALL
288 #endif
289 
290 struct hammer2_io {
291 	RB_ENTRY(hammer2_io) rbnode;	/* indexed by device offset */
292 	struct hammer2_dev *hmp;
293 	struct buf	*bp;
294 	off_t		pbase;
295 	uint64_t	refs;
296 	int		psize;
297 	int		act;		/* activity */
298 	int		btype;		/* approximate BREF_TYPE_* */
299 	int		ticks;
300 	int		error;
301 #ifdef HAMMER2_IO_DEBUG
302 	int		debug_index;
303 #else
304 	int		unused01;
305 #endif
306 	uint64_t	dedup_valid;	/* valid for dedup operation */
307 	uint64_t	dedup_alloc;	/* allocated / de-dupable */
308 #ifdef HAMMER2_IO_DEBUG
309 	const char	*debug_file[HAMMER2_IO_DEBUG_COUNT];
310 	void		*debug_td[HAMMER2_IO_DEBUG_COUNT];
311 	int		debug_line[HAMMER2_IO_DEBUG_COUNT];
312 	uint64_t	debug_refs[HAMMER2_IO_DEBUG_COUNT];
313 #endif
314 };
315 
316 typedef struct hammer2_io hammer2_io_t;
317 
318 #define HAMMER2_DIO_INPROG	0x8000000000000000LLU	/* bio in progress */
319 #define HAMMER2_DIO_GOOD	0x4000000000000000LLU	/* dio->bp is stable */
320 #define HAMMER2_DIO_WAITING	0x2000000000000000LLU	/* wait on INPROG */
321 #define HAMMER2_DIO_DIRTY	0x1000000000000000LLU	/* flush last drop */
322 #define HAMMER2_DIO_FLUSH	0x0800000000000000LLU	/* immediate flush */
323 
324 #define HAMMER2_DIO_MASK	0x00FFFFFFFFFFFFFFLLU
325 
326 /*
327  * Primary chain structure keeps track of the topology in-memory.
328  */
329 struct hammer2_chain {
330 	hammer2_mtx_t		lock;
331 	hammer2_chain_core_t	core;
332 	RB_ENTRY(hammer2_chain) rbnode;		/* live chain(s) */
333 	hammer2_blockref_t	bref;
334 	struct hammer2_chain	*parent;
335 	struct hammer2_dev	*hmp;
336 	struct hammer2_pfs	*pmp;		/* A PFS or super-root (spmp) */
337 
338 	struct lock	diolk;			/* xop focus interlock */
339 	hammer2_io_t	*dio;			/* physical data buffer */
340 	hammer2_media_data_t *data;		/* data pointer shortcut */
341 	u_int		bytes;			/* physical data size */
342 	u_int		flags;
343 	u_int		refs;
344 	u_int		lockcnt;
345 	int		error;			/* on-lock data error state */
346 	int		cache_index;		/* heur speeds up lookup */
347 
348 	TAILQ_ENTRY(hammer2_chain) lru_node;	/* 0-refs LRU */
349 };
350 
351 typedef struct hammer2_chain hammer2_chain_t;
352 
353 int hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2);
354 RB_PROTOTYPE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
355 
356 /*
357  * Special notes on flags:
358  *
359  * INITIAL	- This flag allows a chain to be created and for storage to
360  *		  be allocated without having to immediately instantiate the
361  *		  related buffer.  The data is assumed to be all-zeros.  It
362  *		  is primarily used for indirect blocks.
363  *
364  * MODIFIED	- The chain's media data has been modified.  Prevents chain
365  *		  free on lastdrop if still in the topology.
366  *
367  * UPDATE	- Chain might not be modified but parent blocktable needs
368  *		  an update.  Prevents chain free on lastdrop if still in
369  *		  the topology.
370  *
371  * FICTITIOUS	- Faked chain as a placeholder for an error condition.  This
372  *		  chain is unsuitable for I/O.
373  *
374  * BMAPPED	- Indicates that the chain is present in the parent blockmap.
375  *
376  * BMAPUPD	- Indicates that the chain is present but needs to be updated
377  *		  in the parent blockmap.
378  */
379 #define HAMMER2_CHAIN_MODIFIED		0x00000001	/* dirty chain data */
380 #define HAMMER2_CHAIN_ALLOCATED		0x00000002	/* kmalloc'd chain */
381 #define HAMMER2_CHAIN_DESTROY		0x00000004
382 #define HAMMER2_CHAIN_DEDUPABLE		0x00000008	/* registered w/dedup */
383 #define HAMMER2_CHAIN_DELETED		0x00000010	/* deleted chain */
384 #define HAMMER2_CHAIN_INITIAL		0x00000020	/* initial create */
385 #define HAMMER2_CHAIN_UPDATE		0x00000040	/* need parent update */
386 #define HAMMER2_CHAIN_NOTTESTED		0x00000080	/* crc not generated */
387 #define HAMMER2_CHAIN_TESTEDGOOD	0x00000100	/* crc tested good */
388 #define HAMMER2_CHAIN_ONFLUSH		0x00000200	/* on a flush list */
389 #define HAMMER2_CHAIN_FICTITIOUS	0x00000400	/* unsuitable for I/O */
390 #define HAMMER2_CHAIN_VOLUMESYNC	0x00000800	/* needs volume sync */
391 #define HAMMER2_CHAIN_UNUSED1000	0x00001000
392 #define HAMMER2_CHAIN_COUNTEDBREFS	0x00002000	/* block table stats */
393 #define HAMMER2_CHAIN_ONRBTREE		0x00004000	/* on parent RB tree */
394 #define HAMMER2_CHAIN_ONLRU		0x00008000	/* on LRU list */
395 #define HAMMER2_CHAIN_EMBEDDED		0x00010000	/* embedded data */
396 #define HAMMER2_CHAIN_RELEASE		0x00020000	/* don't keep around */
397 #define HAMMER2_CHAIN_BMAPPED		0x00040000	/* present in blkmap */
398 #define HAMMER2_CHAIN_BMAPUPD		0x00080000	/* +needs updating */
399 #define HAMMER2_CHAIN_IOINPROG		0x00100000	/* I/O interlock */
400 #define HAMMER2_CHAIN_IOSIGNAL		0x00200000	/* I/O interlock */
401 #define HAMMER2_CHAIN_PFSBOUNDARY	0x00400000	/* super->pfs inode */
402 #define HAMMER2_CHAIN_HINT_LEAF_COUNT	0x00800000	/* redo leaf count */
403 #define HAMMER2_CHAIN_LRUHINT		0x01000000	/* was reused */
404 
405 #define HAMMER2_CHAIN_FLUSH_MASK	(HAMMER2_CHAIN_MODIFIED |	\
406 					 HAMMER2_CHAIN_UPDATE |		\
407 					 HAMMER2_CHAIN_ONFLUSH |	\
408 					 HAMMER2_CHAIN_DESTROY)
409 
410 /*
411  * Hammer2 error codes, used by chain->error and cluster->error.  The error
412  * code is typically set on-lock unless no I/O was requested, and set on
413  * I/O otherwise.  If set for a cluster it generally means that the cluster
414  * code could not find a valid copy to present.
415  *
416  * All H2 error codes are flags and can be accumulated by ORing them
417  * together.
418  *
419  * IO		- An I/O error occurred
420  * CHECK	- I/O succeeded but did not match the check code
421  * INCOMPLETE	- A cluster is not complete enough to use, or
422  *		  a chain cannot be loaded because its parent has an error.
423  *
424  * NOTE: API allows callers to check zero/non-zero to determine if an error
425  *	 condition exists.
426  *
427  * NOTE: Chain's data field is usually NULL on an IO error but not necessarily
428  *	 NULL on other errors.  Check chain->error, not chain->data.
429  */
430 #define HAMMER2_ERROR_NONE		0	/* no error (must be 0) */
431 #define HAMMER2_ERROR_EIO		0x00000001	/* device I/O error */
432 #define HAMMER2_ERROR_CHECK		0x00000002	/* check code error */
433 #define HAMMER2_ERROR_INCOMPLETE	0x00000004	/* incomplete cluster */
434 #define HAMMER2_ERROR_DEPTH		0x00000008	/* tmp depth limit */
435 #define HAMMER2_ERROR_BADBREF		0x00000010	/* illegal bref */
436 #define HAMMER2_ERROR_ENOSPC		0x00000020	/* allocation failure */
437 #define HAMMER2_ERROR_ENOENT		0x00000040	/* entry not found */
438 #define HAMMER2_ERROR_ENOTEMPTY		0x00000080	/* dir not empty */
439 #define HAMMER2_ERROR_EAGAIN		0x00000100	/* retry */
440 #define HAMMER2_ERROR_ENOTDIR		0x00000200	/* not directory */
441 #define HAMMER2_ERROR_EISDIR		0x00000400	/* is directory */
442 #define HAMMER2_ERROR_EINPROGRESS	0x00000800	/* already running */
443 #define HAMMER2_ERROR_ABORTED		0x00001000	/* aborted operation */
444 #define HAMMER2_ERROR_EOF		0x00002000	/* end of scan */
445 #define HAMMER2_ERROR_EINVAL		0x00004000	/* catch-all */
446 #define HAMMER2_ERROR_EEXIST		0x00008000	/* entry exists */
447 #define HAMMER2_ERROR_EDEADLK		0x00010000
448 #define HAMMER2_ERROR_ESRCH		0x00020000
449 #define HAMMER2_ERROR_ETIMEDOUT		0x00040000
450 
451 /*
452  * Flags passed to hammer2_chain_lookup() and hammer2_chain_next()
453  *
454  * NOTES:
455  *	NODATA	    - Asks that the chain->data not be resolved in order
456  *		      to avoid I/O.
457  *
458  *	NODIRECT    - Prevents a lookup of offset 0 in an inode from returning
459  *		      the inode itself if the inode is in DIRECTDATA mode
460  *		      (i.e. file is <= 512 bytes).  Used by the synchronization
461  *		      code to prevent confusion.
462  *
463  *	SHARED	    - The input chain is expected to be locked shared,
464  *		      and the output chain is locked shared.
465  *
466  *	MATCHIND    - Allows an indirect block / freemap node to be returned
467  *		      when the passed key range matches the radix.  Remember
468  *		      that key_end is inclusive (e.g. {0x000,0xFFF},
469  *		      not {0x000,0x1000}).
470  *
471  *		      (Cannot be used for remote or cluster ops).
472  *
473  *	ALLNODES    - Allows NULL focus.
474  *
475  *	ALWAYS	    - Always resolve the data.  If ALWAYS and NODATA are both
476  *		      missing, bulk file data is not resolved but inodes and
477  *		      other meta-data will.
478  */
479 #define HAMMER2_LOOKUP_UNUSED0001	0x00000001
480 #define HAMMER2_LOOKUP_NODATA		0x00000002	/* data left NULL */
481 #define HAMMER2_LOOKUP_NODIRECT		0x00000004	/* no offset=0 DD */
482 #define HAMMER2_LOOKUP_SHARED		0x00000100
483 #define HAMMER2_LOOKUP_MATCHIND		0x00000200	/* return all chains */
484 #define HAMMER2_LOOKUP_ALLNODES		0x00000400	/* allow NULL focus */
485 #define HAMMER2_LOOKUP_ALWAYS		0x00000800	/* resolve data */
486 #define HAMMER2_LOOKUP_UNUSED1000	0x00001000
487 
488 /*
489  * Flags passed to hammer2_chain_modify() and hammer2_chain_resize()
490  *
491  * NOTE: OPTDATA allows us to avoid instantiating buffers for INDIRECT
492  *	 blocks in the INITIAL-create state.
493  */
494 #define HAMMER2_MODIFY_OPTDATA		0x00000002	/* data can be NULL */
495 
496 /*
497  * Flags passed to hammer2_chain_lock()
498  *
499  * NOTE: RDONLY is set to optimize cluster operations when *no* modifications
500  *	 will be made to either the cluster being locked or any underlying
501  *	 cluster.  It allows the cluster to lock and access data for a subset
502  *	 of available nodes instead of all available nodes.
503  *
504  * NOTE: NONBLOCK is only used for hammer2_chain_repparent() and getparent(),
505  *	 other functions (e.g. hammer2_chain_lookup(), etc) can't handle its
506  *	 operation.
507  */
508 #define HAMMER2_RESOLVE_NEVER		1
509 #define HAMMER2_RESOLVE_MAYBE		2
510 #define HAMMER2_RESOLVE_ALWAYS		3
511 #define HAMMER2_RESOLVE_MASK		0x0F
512 
513 #define HAMMER2_RESOLVE_SHARED		0x10	/* request shared lock */
514 #define HAMMER2_RESOLVE_LOCKAGAIN	0x20	/* another shared lock */
515 #define HAMMER2_RESOLVE_UNUSED40	0x40
516 #define HAMMER2_RESOLVE_NONBLOCK	0x80	/* non-blocking */
517 
518 /*
519  * Flags passed to hammer2_chain_delete()
520  */
521 #define HAMMER2_DELETE_PERMANENT	0x0001
522 
523 /*
524  * Flags passed to hammer2_chain_insert() or hammer2_chain_rename()
525  * or hammer2_chain_create().
526  */
527 #define HAMMER2_INSERT_PFSROOT		0x0004
528 #define HAMMER2_INSERT_SAMEPARENT	0x0008
529 
530 /*
531  * hammer2_freemap_adjust()
532  */
533 #define HAMMER2_FREEMAP_DORECOVER	1
534 #define HAMMER2_FREEMAP_DOMAYFREE	2
535 #define HAMMER2_FREEMAP_DOREALFREE	3
536 
537 /*
538  * HAMMER2 cluster - A set of chains representing the same entity.
539  *
540  * hammer2_cluster typically represents a temporary set of representitive
541  * chains.  The one exception is that a hammer2_cluster is embedded in
542  * hammer2_inode.  This embedded cluster is ONLY used to track the
543  * representitive chains and cannot be directly locked.
544  *
545  * A cluster is usually temporary (and thus per-thread) for locking purposes,
546  * allowing us to embed the asynchronous storage required for cluster
547  * operations in the cluster itself and adjust the state and status without
548  * having to worry too much about SMP issues.
549  *
550  * The exception is the cluster embedded in the hammer2_inode structure.
551  * This is used to cache the cluster state on an inode-by-inode basis.
552  * Individual hammer2_chain structures not incorporated into clusters might
553  * also stick around to cache miscellanious elements.
554  *
555  * Because the cluster is a 'working copy' and is usually subject to cluster
556  * quorum rules, it is quite possible for us to end up with an insufficient
557  * number of live chains to execute an operation.  If an insufficient number
558  * of chains remain in a working copy, the operation may have to be
559  * downgraded, retried, stall until the requisit number of chains are
560  * available, or possibly even error out depending on the mount type.
561  *
562  * A cluster's focus is set when it is locked.  The focus can only be set
563  * to a chain still part of the synchronized set.
564  */
565 #define HAMMER2_XOPFIFO		16
566 #define HAMMER2_XOPFIFO_MASK	(HAMMER2_XOPFIFO - 1)
567 #define HAMMER2_XOPGROUPS_MIN	32
568 
569 #define HAMMER2_MAXCLUSTER	8
570 #define HAMMER2_XOPMASK_CLUSTER	(uint64_t)((1LLU << HAMMER2_MAXCLUSTER) - 1)
571 #define HAMMER2_XOPMASK_VOP	(uint64_t)0x0000000080000000LLU
572 #define HAMMER2_XOPMASK_FIFOW	(uint64_t)0x0000000040000000LLU
573 #define HAMMER2_XOPMASK_WAIT	(uint64_t)0x0000000020000000LLU
574 #define HAMMER2_XOPMASK_FEED	(uint64_t)0x0000000100000000LLU
575 
576 #define HAMMER2_XOPMASK_ALLDONE	(HAMMER2_XOPMASK_VOP | HAMMER2_XOPMASK_CLUSTER)
577 
578 struct hammer2_cluster_item {
579 	hammer2_chain_t		*chain;
580 	int			error;
581 	uint32_t		flags;
582 };
583 
584 typedef struct hammer2_cluster_item hammer2_cluster_item_t;
585 
586 /*
587  * INVALID	- Invalid for focus, i.e. not part of synchronized set.
588  *		  Once set, this bit is sticky across operations.
589  *
590  * FEMOD	- Indicates that front-end modifying operations can
591  *		  mess with this entry and MODSYNC will copy also
592  *		  effect it.
593  */
594 #define HAMMER2_CITEM_INVALID	0x00000001
595 #define HAMMER2_CITEM_FEMOD	0x00000002
596 #define HAMMER2_CITEM_NULL	0x00000004
597 
598 struct hammer2_cluster {
599 	int			refs;		/* track for deallocation */
600 	int			ddflag;
601 	struct hammer2_pfs	*pmp;
602 	uint32_t		flags;
603 	int			nchains;
604 	int			error;		/* error code valid on lock */
605 	int			focus_index;
606 	hammer2_chain_t		*focus;		/* current focus (or mod) */
607 	hammer2_cluster_item_t	array[HAMMER2_MAXCLUSTER];
608 };
609 
610 typedef struct hammer2_cluster	hammer2_cluster_t;
611 
612 /*
613  * WRHARD	- Hard mounts can write fully synchronized
614  * RDHARD	- Hard mounts can read fully synchronized
615  * UNHARD	- Unsynchronized masters present
616  * NOHARD	- No masters visible
617  * WRSOFT	- Soft mounts can write to at least the SOFT_MASTER
618  * RDSOFT	- Soft mounts can read from at least a SOFT_SLAVE
619  * UNSOFT	- Unsynchronized slaves present
620  * NOSOFT	- No slaves visible
621  * RDSLAVE	- slaves are accessible (possibly unsynchronized or remote).
622  * MSYNCED	- All masters are fully synchronized
623  * SSYNCED	- All known local slaves are fully synchronized to masters
624  *
625  * All available masters are always incorporated.  All PFSs belonging to a
626  * cluster (master, slave, copy, whatever) always try to synchronize the
627  * total number of known masters in the PFSs root inode.
628  *
629  * A cluster might have access to many slaves, copies, or caches, but we
630  * have a limited number of cluster slots.  Any such elements which are
631  * directly mounted from block device(s) will always be incorporated.   Note
632  * that SSYNCED only applies to such elements which are directly mounted,
633  * not to any remote slaves, copies, or caches that could be available.  These
634  * bits are used to monitor and drive our synchronization threads.
635  *
636  * When asking the question 'is any data accessible at all', then a simple
637  * test against (RDHARD|RDSOFT|RDSLAVE) gives you the answer.  If any of
638  * these bits are set the object can be read with certain caveats:
639  * RDHARD - no caveats.  RDSOFT - authoritative but might not be synchronized.
640  * and RDSLAVE - not authoritative, has some data but it could be old or
641  * incomplete.
642  *
643  * When both soft and hard mounts are available, data will be read and written
644  * via the soft mount only.  But all might be in the cluster because
645  * background synchronization threads still need to do their work.
646  */
647 #define HAMMER2_CLUSTER_INODE	0x00000001	/* embedded in inode struct */
648 #define HAMMER2_CLUSTER_UNUSED2	0x00000002
649 #define HAMMER2_CLUSTER_LOCKED	0x00000004	/* cluster lks not recursive */
650 #define HAMMER2_CLUSTER_WRHARD	0x00000100	/* hard-mount can write */
651 #define HAMMER2_CLUSTER_RDHARD	0x00000200	/* hard-mount can read */
652 #define HAMMER2_CLUSTER_UNHARD	0x00000400	/* unsynchronized masters */
653 #define HAMMER2_CLUSTER_NOHARD	0x00000800	/* no masters visible */
654 #define HAMMER2_CLUSTER_WRSOFT	0x00001000	/* soft-mount can write */
655 #define HAMMER2_CLUSTER_RDSOFT	0x00002000	/* soft-mount can read */
656 #define HAMMER2_CLUSTER_UNSOFT	0x00004000	/* unsynchronized slaves */
657 #define HAMMER2_CLUSTER_NOSOFT	0x00008000	/* no slaves visible */
658 #define HAMMER2_CLUSTER_MSYNCED	0x00010000	/* all masters synchronized */
659 #define HAMMER2_CLUSTER_SSYNCED	0x00020000	/* known slaves synchronized */
660 
661 #define HAMMER2_CLUSTER_ANYDATA	( HAMMER2_CLUSTER_RDHARD |	\
662 				  HAMMER2_CLUSTER_RDSOFT |	\
663 				  HAMMER2_CLUSTER_RDSLAVE)
664 
665 #define HAMMER2_CLUSTER_RDOK	( HAMMER2_CLUSTER_RDHARD |	\
666 				  HAMMER2_CLUSTER_RDSOFT)
667 
668 #define HAMMER2_CLUSTER_WROK	( HAMMER2_CLUSTER_WRHARD |	\
669 				  HAMMER2_CLUSTER_WRSOFT)
670 
671 #define HAMMER2_CLUSTER_ZFLAGS	( HAMMER2_CLUSTER_WRHARD |	\
672 				  HAMMER2_CLUSTER_RDHARD |	\
673 				  HAMMER2_CLUSTER_WRSOFT |	\
674 				  HAMMER2_CLUSTER_RDSOFT |	\
675 				  HAMMER2_CLUSTER_MSYNCED |	\
676 				  HAMMER2_CLUSTER_SSYNCED)
677 
678 /*
679  * Helper functions (cluster must be locked for flags to be valid).
680  */
681 static __inline
682 int
683 hammer2_cluster_rdok(hammer2_cluster_t *cluster)
684 {
685 	return (cluster->flags & HAMMER2_CLUSTER_RDOK);
686 }
687 
688 static __inline
689 int
690 hammer2_cluster_wrok(hammer2_cluster_t *cluster)
691 {
692 	return (cluster->flags & HAMMER2_CLUSTER_WROK);
693 }
694 
695 RB_HEAD(hammer2_inode_tree, hammer2_inode);	/* ip->rbnode */
696 TAILQ_HEAD(inoq_head, hammer2_inode);		/* ip->entry */
697 TAILQ_HEAD(depq_head, hammer2_depend);		/* depend->entry */
698 
699 struct hammer2_depend {
700 	TAILQ_ENTRY(hammer2_depend) entry;
701 	struct inoq_head	sideq;
702 	long			count;
703 	int			pass2;
704 	int			unused01;
705 };
706 
707 typedef struct hammer2_depend hammer2_depend_t;
708 
709 /*
710  * A hammer2 inode.
711  *
712  * NOTE: The inode-embedded cluster is never used directly for I/O (since
713  *	 it may be shared).  Instead it will be replicated-in and synchronized
714  *	 back out if changed.
715  */
716 struct hammer2_inode {
717 	RB_ENTRY(hammer2_inode) rbnode;		/* inumber lookup (HL) */
718 	TAILQ_ENTRY(hammer2_inode) entry;	/* SYNCQ/SIDEQ */
719 	hammer2_depend_t	*depend;	/* non-NULL if SIDEQ */
720 	hammer2_depend_t	depend_static;	/* (in-place allocation) */
721 	hammer2_mtx_t		lock;		/* inode lock */
722 	hammer2_mtx_t		truncate_lock;	/* prevent truncates */
723 	struct hammer2_pfs	*pmp;		/* PFS mount */
724 	struct vnode		*vp;
725 	struct spinlock		cluster_spin;	/* update cluster */
726 	hammer2_cluster_t	cluster;
727 	struct lockf		advlock;
728 	u_int			flags;
729 	u_int			refs;		/* +vpref, +flushref */
730 	uint8_t			comp_heuristic;
731 	hammer2_inode_meta_t	meta;		/* copy of meta-data */
732 	hammer2_off_t		osize;
733 };
734 
735 typedef struct hammer2_inode hammer2_inode_t;
736 
737 /*
738  * MODIFIED	- Inode is in a modified state, ip->meta may have changes.
739  * RESIZED	- Inode truncated (any) or inode extended beyond
740  *		  EMBEDDED_BYTES.
741  *
742  * SYNCQ	- Inode is included in the current filesystem sync.  The
743  *		  DELETING and CREATING flags will be acted upon.
744  *
745  * SIDEQ	- Inode has likely been disconnected from the vnode topology
746  *		  and so is not visible to the vnode-based filesystem syncer
747  *		  code, but is dirty and must be included in the next
748  *		  filesystem sync.  These inodes are moved to the SYNCQ at
749  *		  the time the sync occurs.
750  *
751  *		  Inodes are not placed on this queue simply because they have
752  *		  become dirty, if a vnode is attached.
753  *
754  * DELETING	- Inode is flagged for deletion during the next filesystem
755  *		  sync.  That is, the inode's chain is currently connected
756  *		  and must be deleting during the current or next fs sync.
757  *
758  * CREATING	- Inode is flagged for creation during the next filesystem
759  *		  sync.  That is, the inode's chain topology exists (so
760  *		  kernel buffer flushes can occur), but is currently
761  *		  disconnected and must be inserted during the current or
762  *		  next fs sync.  If the DELETING flag is also set, the
763  *		  topology can be thrown away instead.
764  *
765  * If an inode that is already part of the current filesystem sync is
766  * modified by the frontend, including by buffer flushes, the inode lock
767  * code detects the SYNCQ flag and moves the inode to the head of the
768  * flush-in-progress, then blocks until the flush has gotten past it.
769  */
770 #define HAMMER2_INODE_MODIFIED		0x0001
771 #define HAMMER2_INODE_SROOT		0x0002	/* kmalloc special case */
772 #define HAMMER2_INODE_RENAME_INPROG	0x0004
773 #define HAMMER2_INODE_ONRBTREE		0x0008
774 #define HAMMER2_INODE_RESIZED		0x0010	/* requires inode_fsync */
775 #define HAMMER2_INODE_UNUSED0020	0x0020
776 #define HAMMER2_INODE_ISUNLINKED	0x0040
777 #define HAMMER2_INODE_METAGOOD		0x0080	/* inode meta-data good */
778 #define HAMMER2_INODE_SIDEQ		0x0100	/* on side processing queue */
779 #define HAMMER2_INODE_NOSIDEQ		0x0200	/* disable sideq operation */
780 #define HAMMER2_INODE_DIRTYDATA		0x0400	/* interlocks inode flush */
781 #define HAMMER2_INODE_SYNCQ		0x0800	/* sync interlock, sequenced */
782 #define HAMMER2_INODE_DELETING		0x1000	/* sync interlock, chain topo */
783 #define HAMMER2_INODE_CREATING		0x2000	/* sync interlock, chain topo */
784 #define HAMMER2_INODE_SYNCQ_WAKEUP	0x4000	/* sync interlock wakeup */
785 #define HAMMER2_INODE_SYNCQ_PASS2	0x8000	/* force retry delay */
786 
787 #define HAMMER2_INODE_DIRTY		(HAMMER2_INODE_MODIFIED |	\
788 					 HAMMER2_INODE_DIRTYDATA |	\
789 					 HAMMER2_INODE_DELETING |	\
790 					 HAMMER2_INODE_CREATING)
791 
792 int hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2);
793 RB_PROTOTYPE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
794 		hammer2_tid_t);
795 
796 /*
797  * Transaction management sub-structure under hammer2_pfs
798  */
799 struct hammer2_trans {
800 	uint32_t		flags;
801 	uint32_t		sync_wait;
802 };
803 
804 typedef struct hammer2_trans hammer2_trans_t;
805 
806 #define HAMMER2_TRANS_ISFLUSH		0x80000000	/* flush code */
807 #define HAMMER2_TRANS_BUFCACHE		0x40000000	/* bio strategy */
808 #define HAMMER2_TRANS_SIDEQ		0x20000000	/* run sideq */
809 #define HAMMER2_TRANS_UNUSED10		0x10000000
810 #define HAMMER2_TRANS_WAITING		0x08000000	/* someone waiting */
811 #define HAMMER2_TRANS_RESCAN		0x04000000	/* rescan sideq */
812 #define HAMMER2_TRANS_MASK		0x00FFFFFF	/* count mask */
813 
814 #define HAMMER2_FREEMAP_HEUR_NRADIX	4	/* pwr 2 PBUFRADIX-LBUFRADIX */
815 #define HAMMER2_FREEMAP_HEUR_TYPES	8
816 #define HAMMER2_FREEMAP_HEUR_SIZE	(HAMMER2_FREEMAP_HEUR_NRADIX * \
817 					 HAMMER2_FREEMAP_HEUR_TYPES)
818 
819 #define HAMMER2_DEDUP_HEUR_SIZE		(65536 * 4)
820 #define HAMMER2_DEDUP_HEUR_MASK		(HAMMER2_DEDUP_HEUR_SIZE - 1)
821 
822 #define HAMMER2_FLUSH_TOP		0x0001
823 #define HAMMER2_FLUSH_ALL		0x0002
824 #define HAMMER2_FLUSH_INODE_STOP	0x0004	/* stop at sub-inode */
825 #define HAMMER2_FLUSH_FSSYNC		0x0008	/* part of filesystem sync */
826 
827 
828 /*
829  * Hammer2 support thread element.
830  *
831  * Potentially many support threads can hang off of hammer2, primarily
832  * off the hammer2_pfs structure.  Typically:
833  *
834  * td x Nodes		 	A synchronization thread for each node.
835  * td x Nodes x workers		Worker threads for frontend operations.
836  * td x 1			Bioq thread for logical buffer writes.
837  *
838  * In addition, the synchronization thread(s) associated with the
839  * super-root PFS (spmp) for a node is responsible for automatic bulkfree
840  * and dedup scans.
841  */
842 struct hammer2_thread {
843 	struct hammer2_pfs *pmp;
844 	struct hammer2_dev *hmp;
845 	hammer2_xop_list_t xopq;
846 	thread_t	td;
847 	uint32_t	flags;
848 	int		depth;
849 	int		clindex;	/* cluster element index */
850 	int		repidx;
851 	char		*scratch;	/* MAXPHYS */
852 };
853 
854 typedef struct hammer2_thread hammer2_thread_t;
855 
856 #define HAMMER2_THREAD_UNMOUNTING	0x0001	/* unmount request */
857 #define HAMMER2_THREAD_DEV		0x0002	/* related to dev, not pfs */
858 #define HAMMER2_THREAD_WAITING		0x0004	/* thread in idle tsleep */
859 #define HAMMER2_THREAD_REMASTER		0x0008	/* remaster request */
860 #define HAMMER2_THREAD_STOP		0x0010	/* exit request */
861 #define HAMMER2_THREAD_FREEZE		0x0020	/* force idle */
862 #define HAMMER2_THREAD_FROZEN		0x0040	/* thread is frozen */
863 #define HAMMER2_THREAD_XOPQ		0x0080	/* work pending */
864 #define HAMMER2_THREAD_STOPPED		0x0100	/* thread has stopped */
865 #define HAMMER2_THREAD_UNFREEZE		0x0200
866 
867 #define HAMMER2_THREAD_WAKEUP_MASK	(HAMMER2_THREAD_UNMOUNTING |	\
868 					 HAMMER2_THREAD_REMASTER |	\
869 					 HAMMER2_THREAD_STOP |		\
870 					 HAMMER2_THREAD_FREEZE |	\
871 					 HAMMER2_THREAD_XOPQ)
872 
873 /*
874  * Support structure for dedup heuristic.
875  */
876 struct hammer2_dedup {
877 	hammer2_off_t	data_off;
878 	uint64_t	data_crc;
879 	uint32_t	ticks;
880 	uint32_t	saved_error;
881 };
882 
883 typedef struct hammer2_dedup hammer2_dedup_t;
884 
885 /*
886  * hammer2_xop - container for VOP/XOP operation (allocated, not on stack).
887  *
888  * This structure is used to distribute a VOP operation across multiple
889  * nodes.  It provides a rendezvous for concurrent node execution and
890  * can be detached from the frontend operation to allow the frontend to
891  * return early.
892  *
893  * This structure also sequences operations on up to three inodes.
894  */
895 typedef void (*hammer2_xop_func_t)(union hammer2_xop *xop, void *scratch,
896 				   int clindex);
897 
898 struct hammer2_xop_desc {
899 	hammer2_xop_func_t	storage_func;	/* local storage function */
900 	hammer2_xop_func_t	dmsg_dispatch;	/* dmsg dispatch function */
901 	hammer2_xop_func_t	dmsg_process;	/* dmsg processing function */
902 	const char		*id;
903 };
904 
905 typedef struct hammer2_xop_desc hammer2_xop_desc_t;
906 
907 struct hammer2_xop_fifo {
908 	TAILQ_ENTRY(hammer2_xop_head) entry;
909 	hammer2_chain_t		*array[HAMMER2_XOPFIFO];
910 	int			errors[HAMMER2_XOPFIFO];
911 	int			ri;
912 	int			wi;
913 	int			flags;
914 	hammer2_thread_t	*thr;
915 };
916 
917 typedef struct hammer2_xop_fifo hammer2_xop_fifo_t;
918 
919 #define HAMMER2_XOP_FIFO_RUN	0x0001
920 #define HAMMER2_XOP_FIFO_STALL	0x0002
921 
922 struct hammer2_xop_head {
923 	hammer2_xop_desc_t	*desc;
924 	hammer2_tid_t		mtid;
925 	struct hammer2_inode	*ip1;
926 	struct hammer2_inode	*ip2;
927 	struct hammer2_inode	*ip3;
928 	uint64_t		run_mask;
929 	uint64_t		chk_mask;
930 	int			flags;
931 	int			state;
932 	int			error;
933 	hammer2_key_t		collect_key;
934 	char			*name1;
935 	size_t			name1_len;
936 	char			*name2;
937 	size_t			name2_len;
938 	hammer2_xop_fifo_t	collect[HAMMER2_MAXCLUSTER];
939 	hammer2_cluster_t	cluster;	/* help collections */
940 	hammer2_io_t		*focus_dio;
941 };
942 
943 typedef struct hammer2_xop_head hammer2_xop_head_t;
944 
945 struct hammer2_xop_ipcluster {
946 	hammer2_xop_head_t	head;
947 };
948 
949 struct hammer2_xop_strategy {
950 	hammer2_xop_head_t	head;
951 	hammer2_key_t		lbase;
952 	int			finished;
953 	hammer2_mtx_t		lock;
954 	struct bio		*bio;
955 };
956 
957 struct hammer2_xop_readdir {
958 	hammer2_xop_head_t	head;
959 	hammer2_key_t		lkey;
960 };
961 
962 struct hammer2_xop_nresolve {
963 	hammer2_xop_head_t	head;
964 	hammer2_key_t		lhc;	/* if name is NULL used lhc */
965 };
966 
967 struct hammer2_xop_unlink {
968 	hammer2_xop_head_t	head;
969 	int			isdir;
970 	int			dopermanent;
971 };
972 
973 #define H2DOPERM_PERMANENT	0x01
974 #define H2DOPERM_FORCE		0x02
975 #define H2DOPERM_IGNINO		0x04
976 
977 struct hammer2_xop_nrename {
978 	hammer2_xop_head_t	head;
979 	hammer2_tid_t		lhc;
980 	int			ip_key;
981 };
982 
983 struct hammer2_xop_scanlhc {
984 	hammer2_xop_head_t	head;
985 	hammer2_key_t		lhc;
986 };
987 
988 struct hammer2_xop_scanall {
989 	hammer2_xop_head_t	head;
990 	hammer2_key_t		key_beg;	/* inclusive */
991 	hammer2_key_t		key_end;	/* inclusive */
992 	int			resolve_flags;
993 	int			lookup_flags;
994 };
995 
996 struct hammer2_xop_lookup {
997 	hammer2_xop_head_t	head;
998 	hammer2_key_t		lhc;
999 };
1000 
1001 struct hammer2_xop_mkdirent {
1002 	hammer2_xop_head_t	head;
1003 	hammer2_dirent_head_t	dirent;
1004 	hammer2_key_t		lhc;
1005 };
1006 
1007 struct hammer2_xop_create {
1008 	hammer2_xop_head_t	head;
1009 	hammer2_inode_meta_t	meta;		/* initial metadata */
1010 	hammer2_key_t		lhc;
1011 	int			flags;
1012 };
1013 
1014 struct hammer2_xop_destroy {
1015 	hammer2_xop_head_t	head;
1016 };
1017 
1018 struct hammer2_xop_fsync {
1019 	hammer2_xop_head_t	head;
1020 	hammer2_inode_meta_t	meta;
1021 	hammer2_off_t		osize;
1022 	u_int			ipflags;
1023 	int			clear_directdata;
1024 };
1025 
1026 struct hammer2_xop_unlinkall {
1027 	hammer2_xop_head_t	head;
1028 	hammer2_key_t		key_beg;
1029 	hammer2_key_t		key_end;
1030 };
1031 
1032 struct hammer2_xop_connect {
1033 	hammer2_xop_head_t	head;
1034 	hammer2_key_t		lhc;
1035 };
1036 
1037 struct hammer2_xop_flush {
1038 	hammer2_xop_head_t	head;
1039 };
1040 
1041 typedef struct hammer2_xop_readdir hammer2_xop_readdir_t;
1042 typedef struct hammer2_xop_nresolve hammer2_xop_nresolve_t;
1043 typedef struct hammer2_xop_unlink hammer2_xop_unlink_t;
1044 typedef struct hammer2_xop_nrename hammer2_xop_nrename_t;
1045 typedef struct hammer2_xop_ipcluster hammer2_xop_ipcluster_t;
1046 typedef struct hammer2_xop_strategy hammer2_xop_strategy_t;
1047 typedef struct hammer2_xop_mkdirent hammer2_xop_mkdirent_t;
1048 typedef struct hammer2_xop_create hammer2_xop_create_t;
1049 typedef struct hammer2_xop_destroy hammer2_xop_destroy_t;
1050 typedef struct hammer2_xop_fsync hammer2_xop_fsync_t;
1051 typedef struct hammer2_xop_unlinkall hammer2_xop_unlinkall_t;
1052 typedef struct hammer2_xop_scanlhc hammer2_xop_scanlhc_t;
1053 typedef struct hammer2_xop_scanall hammer2_xop_scanall_t;
1054 typedef struct hammer2_xop_lookup hammer2_xop_lookup_t;
1055 typedef struct hammer2_xop_connect hammer2_xop_connect_t;
1056 typedef struct hammer2_xop_flush hammer2_xop_flush_t;
1057 
1058 union hammer2_xop {
1059 	hammer2_xop_head_t	head;
1060 	hammer2_xop_ipcluster_t	xop_ipcluster;
1061 	hammer2_xop_readdir_t	xop_readdir;
1062 	hammer2_xop_nresolve_t	xop_nresolve;
1063 	hammer2_xop_unlink_t	xop_unlink;
1064 	hammer2_xop_nrename_t	xop_nrename;
1065 	hammer2_xop_strategy_t	xop_strategy;
1066 	hammer2_xop_mkdirent_t	xop_mkdirent;
1067 	hammer2_xop_create_t	xop_create;
1068 	hammer2_xop_destroy_t	xop_destroy;
1069 	hammer2_xop_fsync_t	xop_fsync;
1070 	hammer2_xop_unlinkall_t	xop_unlinkall;
1071 	hammer2_xop_scanlhc_t	xop_scanlhc;
1072 	hammer2_xop_scanall_t	xop_scanall;
1073 	hammer2_xop_lookup_t	xop_lookup;
1074 	hammer2_xop_flush_t	xop_flush;
1075 	hammer2_xop_connect_t	xop_connect;
1076 };
1077 
1078 typedef union hammer2_xop hammer2_xop_t;
1079 
1080 /*
1081  * hammer2_xop_group - Manage XOP support threads.
1082  */
1083 struct hammer2_xop_group {
1084 	hammer2_thread_t	thrs[HAMMER2_MAXCLUSTER];
1085 };
1086 
1087 typedef struct hammer2_xop_group hammer2_xop_group_t;
1088 
1089 /*
1090  * flags to hammer2_xop_collect()
1091  */
1092 #define HAMMER2_XOP_COLLECT_NOWAIT	0x00000001
1093 #define HAMMER2_XOP_COLLECT_WAITALL	0x00000002
1094 
1095 /*
1096  * flags to hammer2_xop_alloc()
1097  *
1098  * MODIFYING	- This is a modifying transaction, allocate a mtid.
1099  * RECURSE	- Recurse top-level inode (for root flushes)
1100  */
1101 #define HAMMER2_XOP_MODIFYING		0x00000001
1102 #define HAMMER2_XOP_STRATEGY		0x00000002
1103 #define HAMMER2_XOP_INODE_STOP		0x00000004
1104 #define HAMMER2_XOP_VOLHDR		0x00000008
1105 #define HAMMER2_XOP_FSSYNC		0x00000010
1106 #define HAMMER2_XOP_IROOT		0x00000020
1107 
1108 /*
1109  * Global (per partition) management structure, represents a hard block
1110  * device.  Typically referenced by hammer2_chain structures when applicable.
1111  * Typically not used for network-managed elements.
1112  *
1113  * Note that a single hammer2_dev can be indirectly tied to multiple system
1114  * mount points.  There is no direct relationship.  System mounts are
1115  * per-cluster-id, not per-block-device, and a single hard mount might contain
1116  * many PFSs and those PFSs might combine together in various ways to form
1117  * the set of available clusters.
1118  */
1119 struct hammer2_dev {
1120 	struct vnode	*devvp;		/* device vnode */
1121 	int		ronly;		/* read-only mount */
1122 	int		mount_count;	/* number of actively mounted PFSs */
1123 	TAILQ_ENTRY(hammer2_dev) mntentry; /* hammer2_mntlist */
1124 
1125 	struct malloc_type *mchain;
1126 	int		nipstacks;
1127 	int		maxipstacks;
1128 	kdmsg_iocom_t	iocom;		/* volume-level dmsg interface */
1129 	struct spinlock	io_spin;	/* iotree, iolruq access */
1130 	struct hammer2_io_tree iotree;
1131 	int		iofree_count;
1132 	int		freemap_relaxed;
1133 	hammer2_chain_t vchain;		/* anchor chain (topology) */
1134 	hammer2_chain_t fchain;		/* anchor chain (freemap) */
1135 	struct spinlock	list_spin;
1136 	struct hammer2_pfs *spmp;	/* super-root pmp for transactions */
1137 	struct lock	vollk;		/* lockmgr lock */
1138 	struct lock	bulklk;		/* bulkfree operation lock */
1139 	struct lock	bflock;		/* bulk-free manual function lock */
1140 	hammer2_off_t	heur_freemap[HAMMER2_FREEMAP_HEUR_SIZE];
1141 	hammer2_dedup_t heur_dedup[HAMMER2_DEDUP_HEUR_SIZE];
1142 	int		volhdrno;	/* last volhdrno written */
1143 	uint32_t	hflags;		/* HMNT2 flags applicable to device */
1144 	hammer2_off_t	free_reserved;	/* nominal free reserved */
1145 	hammer2_thread_t bfthr;		/* bulk-free thread */
1146 	char		devrepname[64];	/* for kprintf */
1147 	hammer2_ioc_bulkfree_t bflast;	/* stats for last bulkfree run */
1148 	hammer2_volume_data_t voldata;
1149 	hammer2_volume_data_t volsync;	/* synchronized voldata */
1150 };
1151 
1152 typedef struct hammer2_dev hammer2_dev_t;
1153 
1154 /*
1155  * Helper functions (cluster must be locked for flags to be valid).
1156  */
1157 static __inline
1158 int
1159 hammer2_chain_rdok(hammer2_chain_t *chain)
1160 {
1161 	return (chain->error == 0);
1162 }
1163 
1164 static __inline
1165 int
1166 hammer2_chain_wrok(hammer2_chain_t *chain)
1167 {
1168 	return (chain->error == 0 && chain->hmp->ronly == 0);
1169 }
1170 
1171 /*
1172  * Per-cluster management structure.  This structure will be tied to a
1173  * system mount point if the system is mounting the PFS, but is also used
1174  * to manage clusters encountered during the super-root scan or received
1175  * via LNK_SPANs that might not be mounted.
1176  *
1177  * This structure is also used to represent the super-root that hangs off
1178  * of a hard mount point.  The super-root is not really a cluster element.
1179  * In this case the spmp_hmp field will be non-NULL.  It's just easier to do
1180  * this than to special case super-root manipulation in the hammer2_chain*
1181  * code as being only hammer2_dev-related.
1182  *
1183  * pfs_mode and pfs_nmasters are rollup fields which critically describes
1184  * how elements of the cluster act on the cluster.  pfs_mode is only applicable
1185  * when a PFS is mounted by the system.  pfs_nmasters is our best guess as to
1186  * how many masters have been configured for a cluster and is always
1187  * applicable.  pfs_types[] is an array with 1:1 correspondance to the
1188  * iroot cluster and describes the PFS types of the nodes making up the
1189  * cluster.
1190  *
1191  * WARNING! Portions of this structure have deferred initialization.  In
1192  *	    particular, if not mounted there will be no wthread.
1193  *	    umounted network PFSs will also be missing iroot and numerous
1194  *	    other fields will not be initialized prior to mount.
1195  *
1196  *	    Synchronization threads are chain-specific and only applicable
1197  *	    to local hard PFS entries.  A hammer2_pfs structure may contain
1198  *	    more than one when multiple hard PFSs are present on the local
1199  *	    machine which require synchronization monitoring.  Most PFSs
1200  *	    (such as snapshots) are 1xMASTER PFSs which do not need a
1201  *	    synchronization thread.
1202  *
1203  * WARNING! The chains making up pfs->iroot's cluster are accounted for in
1204  *	    hammer2_dev->mount_count when the pfs is associated with a mount
1205  *	    point.
1206  */
1207 struct hammer2_pfs {
1208 	struct mount		*mp;
1209 	TAILQ_ENTRY(hammer2_pfs) mntentry;	/* hammer2_pfslist */
1210 	uuid_t			pfs_clid;
1211 	hammer2_dev_t		*spmp_hmp;	/* only if super-root pmp */
1212 	hammer2_dev_t		*force_local;	/* only if 'local' mount */
1213 	hammer2_inode_t		*iroot;		/* PFS root inode */
1214 	uint8_t			pfs_types[HAMMER2_MAXCLUSTER];
1215 	char			*pfs_names[HAMMER2_MAXCLUSTER];
1216 	hammer2_dev_t		*pfs_hmps[HAMMER2_MAXCLUSTER];
1217 	hammer2_blockset_t	pfs_iroot_blocksets[HAMMER2_MAXCLUSTER];
1218 	hammer2_trans_t		trans;
1219 	struct lock		lock;		/* PFS lock for certain ops */
1220 	struct lock		lock_nlink;	/* rename and nlink lock */
1221 	struct netexport	export;		/* nfs export */
1222 	int			unused00;
1223 	int			ronly;		/* read-only mount */
1224 	int			hflags;		/* pfs-specific mount flags */
1225 	struct malloc_type	*minode;
1226 	struct malloc_type	*mmsg;
1227 	struct spinlock		inum_spin;	/* inumber lookup */
1228 	struct hammer2_inode_tree inum_tree;	/* (not applicable to spmp) */
1229 	long			inum_count;	/* #of inodes in inum_tree */
1230 	struct spinlock		lru_spin;	/* inumber lookup */
1231 	struct hammer2_chain_list lru_list;	/* basis for LRU tests */
1232 	int			lru_count;	/* #of chains on LRU */
1233 	int			flags;
1234 	hammer2_tid_t		modify_tid;	/* modify transaction id */
1235 	hammer2_tid_t		inode_tid;	/* inode allocator */
1236 	uint8_t			pfs_nmasters;	/* total masters */
1237 	uint8_t			pfs_mode;	/* operating mode PFSMODE */
1238 	uint8_t			unused01;
1239 	uint8_t			unused02;
1240 	int			free_ticks;	/* free_* calculations */
1241 	long			inmem_inodes;
1242 	hammer2_off_t		free_reserved;
1243 	hammer2_off_t		free_nominal;
1244 	uint32_t		inmem_dirty_chains;
1245 	int			count_lwinprog;	/* logical write in prog */
1246 	struct spinlock		list_spin;
1247 	struct inoq_head	syncq;		/* SYNCQ flagged inodes */
1248 	struct depq_head	depq;		/* SIDEQ flagged inodes */
1249 	long			sideq_count;	/* total inodes on depq */
1250 	hammer2_thread_t	sync_thrs[HAMMER2_MAXCLUSTER];
1251 	uint32_t		cluster_flags;	/* cached cluster flags */
1252 	int			has_xop_threads;
1253 	struct spinlock		xop_spin;	/* xop sequencer */
1254 	hammer2_xop_group_t	*xop_groups;
1255 };
1256 
1257 typedef struct hammer2_pfs hammer2_pfs_t;
1258 
1259 TAILQ_HEAD(hammer2_pfslist, hammer2_pfs);
1260 
1261 /*
1262  * pmp->flags
1263  */
1264 #define HAMMER2_PMPF_SPMP	0x00000001
1265 #define HAMMER2_PMPF_EMERG	0x00000002	/* Emergency delete mode */
1266 
1267 /*
1268  * NOTE: The LRU list contains at least all the chains with refs == 0
1269  *	 that can be recycled, and may contain additional chains which
1270  *	 cannot.
1271  */
1272 #define HAMMER2_LRU_LIMIT		4096
1273 
1274 #define HAMMER2_DIRTYCHAIN_WAITING	0x80000000
1275 #define HAMMER2_DIRTYCHAIN_MASK		0x7FFFFFFF
1276 
1277 #define HAMMER2_LWINPROG_WAITING	0x80000000
1278 #define HAMMER2_LWINPROG_WAITING0	0x40000000
1279 #define HAMMER2_LWINPROG_MASK		0x3FFFFFFF
1280 
1281 /*
1282  * hammer2_cluster_check
1283  */
1284 #define HAMMER2_CHECK_NULL	0x00000001
1285 
1286 /*
1287  * Misc
1288  */
1289 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
1290 #define VTOI(vp)	((hammer2_inode_t *)(vp)->v_data)
1291 #endif
1292 
1293 #if defined(_KERNEL)
1294 
1295 #ifdef MALLOC_DECLARE
1296 MALLOC_DECLARE(M_HAMMER2);
1297 #endif
1298 
1299 #define ITOV(ip)	((ip)->vp)
1300 
1301 /*
1302  * Currently locked chains retain the locked buffer cache buffer for
1303  * indirect blocks, and indirect blocks can be one of two sizes.  The
1304  * device buffer has to match the case to avoid deadlocking recursive
1305  * chains that might otherwise try to access different offsets within
1306  * the same device buffer.
1307  */
1308 static __inline
1309 int
1310 hammer2_devblkradix(int radix)
1311 {
1312 #if 0
1313 	if (radix <= HAMMER2_LBUFRADIX) {
1314 		return (HAMMER2_LBUFRADIX);
1315 	} else {
1316 		return (HAMMER2_PBUFRADIX);
1317 	}
1318 #endif
1319 	return (HAMMER2_PBUFRADIX);
1320 }
1321 
1322 /*
1323  * XXX almost time to remove this.  DIO uses PBUFSIZE exclusively now.
1324  */
1325 static __inline
1326 size_t
1327 hammer2_devblksize(size_t bytes)
1328 {
1329 #if 0
1330 	if (bytes <= HAMMER2_LBUFSIZE) {
1331 		return(HAMMER2_LBUFSIZE);
1332 	} else {
1333 		KKASSERT(bytes <= HAMMER2_PBUFSIZE &&
1334 			 (bytes ^ (bytes - 1)) == ((bytes << 1) - 1));
1335 		return (HAMMER2_PBUFSIZE);
1336 	}
1337 #endif
1338 	return (HAMMER2_PBUFSIZE);
1339 }
1340 
1341 
1342 static __inline
1343 hammer2_pfs_t *
1344 MPTOPMP(struct mount *mp)
1345 {
1346 	return ((hammer2_pfs_t *)mp->mnt_data);
1347 }
1348 
1349 #define HAMMER2_DEDUP_FRAG      (HAMMER2_PBUFSIZE / 64)
1350 #define HAMMER2_DEDUP_FRAGRADIX (HAMMER2_PBUFRADIX - 6)
1351 
1352 static __inline
1353 uint64_t
1354 hammer2_dedup_mask(hammer2_io_t *dio, hammer2_off_t data_off, u_int bytes)
1355 {
1356 	int bbeg;
1357 	int bits;
1358 	uint64_t mask;
1359 
1360 	bbeg = (int)((data_off & ~HAMMER2_OFF_MASK_RADIX) - dio->pbase) >>
1361 	       HAMMER2_DEDUP_FRAGRADIX;
1362 	bits = (int)((bytes + (HAMMER2_DEDUP_FRAG - 1)) >>
1363 	       HAMMER2_DEDUP_FRAGRADIX);
1364 	mask = ((uint64_t)1 << bbeg) - 1;
1365 	if (bbeg + bits == 64)
1366 		mask = (uint64_t)-1;
1367 	else
1368 		mask = ((uint64_t)1 << (bbeg + bits)) - 1;
1369 
1370 	mask &= ~(((uint64_t)1 << bbeg) - 1);
1371 
1372 	return mask;
1373 }
1374 
1375 static __inline
1376 int
1377 hammer2_error_to_errno(int error)
1378 {
1379 	if (error) {
1380 		if (error & HAMMER2_ERROR_EIO)
1381 			error = EIO;
1382 		else if (error & HAMMER2_ERROR_CHECK)
1383 			error = EDOM;
1384 		else if (error & HAMMER2_ERROR_ABORTED)
1385 			error = EINTR;
1386 		else if (error & HAMMER2_ERROR_BADBREF)
1387 			error = EIO;
1388 		else if (error & HAMMER2_ERROR_ENOSPC)
1389 			error = ENOSPC;
1390 		else if (error & HAMMER2_ERROR_ENOENT)
1391 			error = ENOENT;
1392 		else if (error & HAMMER2_ERROR_ENOTEMPTY)
1393 			error = ENOTEMPTY;
1394 		else if (error & HAMMER2_ERROR_EAGAIN)
1395 			error = EAGAIN;
1396 		else if (error & HAMMER2_ERROR_ENOTDIR)
1397 			error = ENOTDIR;
1398 		else if (error & HAMMER2_ERROR_EISDIR)
1399 			error = EISDIR;
1400 		else if (error & HAMMER2_ERROR_EINPROGRESS)
1401 			error = EINPROGRESS;
1402 		else if (error & HAMMER2_ERROR_EEXIST)
1403 			error = EEXIST;
1404 		else
1405 			error = EDOM;
1406 	}
1407 	return error;
1408 }
1409 
1410 static __inline
1411 int
1412 hammer2_errno_to_error(int error)
1413 {
1414 	switch(error) {
1415 	case 0:
1416 		return 0;
1417 	case EIO:
1418 		return HAMMER2_ERROR_EIO;
1419 	case EINVAL:
1420 	default:
1421 		return HAMMER2_ERROR_EINVAL;
1422 	}
1423 }
1424 
1425 
1426 extern struct vop_ops hammer2_vnode_vops;
1427 extern struct vop_ops hammer2_spec_vops;
1428 extern struct vop_ops hammer2_fifo_vops;
1429 extern struct hammer2_pfslist hammer2_pfslist;
1430 extern struct lock hammer2_mntlk;
1431 
1432 
1433 extern int hammer2_debug;
1434 extern int hammer2_xopgroups;
1435 extern long hammer2_debug_inode;
1436 extern int hammer2_cluster_meta_read;
1437 extern int hammer2_cluster_data_read;
1438 extern int hammer2_cluster_write;
1439 extern int hammer2_dedup_enable;
1440 extern int hammer2_always_compress;
1441 extern int hammer2_inval_enable;
1442 extern int hammer2_flush_pipe;
1443 extern int hammer2_dio_count;
1444 extern int hammer2_dio_limit;
1445 extern int hammer2_bulkfree_tps;
1446 extern int hammer2_worker_rmask;
1447 extern long hammer2_chain_allocs;
1448 extern long hammer2_chain_frees;
1449 extern long hammer2_limit_dirty_chains;
1450 extern long hammer2_limit_dirty_inodes;
1451 extern long hammer2_count_modified_chains;
1452 extern long hammer2_iod_invals;
1453 extern long hammer2_iod_file_read;
1454 extern long hammer2_iod_meta_read;
1455 extern long hammer2_iod_indr_read;
1456 extern long hammer2_iod_fmap_read;
1457 extern long hammer2_iod_volu_read;
1458 extern long hammer2_iod_file_write;
1459 extern long hammer2_iod_file_wembed;
1460 extern long hammer2_iod_file_wzero;
1461 extern long hammer2_iod_file_wdedup;
1462 extern long hammer2_iod_meta_write;
1463 extern long hammer2_iod_indr_write;
1464 extern long hammer2_iod_fmap_write;
1465 extern long hammer2_iod_volu_write;
1466 
1467 extern long hammer2_process_xxhash64;
1468 extern long hammer2_process_icrc32;
1469 
1470 extern struct objcache *cache_buffer_read;
1471 extern struct objcache *cache_buffer_write;
1472 extern struct objcache *cache_xops;
1473 
1474 /*
1475  * hammer2_subr.c
1476  */
1477 #define hammer2_icrc32(buf, size)	iscsi_crc32((buf), (size))
1478 #define hammer2_icrc32c(buf, size, crc)	iscsi_crc32_ext((buf), (size), (crc))
1479 
1480 int hammer2_signal_check(time_t *timep);
1481 const char *hammer2_error_str(int error);
1482 const char *hammer2_bref_type_str(hammer2_blockref_t *bref);
1483 
1484 void hammer2_dev_exlock(hammer2_dev_t *hmp);
1485 void hammer2_dev_shlock(hammer2_dev_t *hmp);
1486 void hammer2_dev_unlock(hammer2_dev_t *hmp);
1487 
1488 int hammer2_get_dtype(uint8_t type);
1489 int hammer2_get_vtype(uint8_t type);
1490 uint8_t hammer2_get_obj_type(enum vtype vtype);
1491 void hammer2_time_to_timespec(uint64_t xtime, struct timespec *ts);
1492 uint64_t hammer2_timespec_to_time(const struct timespec *ts);
1493 uint32_t hammer2_to_unix_xid(const uuid_t *uuid);
1494 void hammer2_guid_to_uuid(uuid_t *uuid, uint32_t guid);
1495 
1496 hammer2_key_t hammer2_dirhash(const unsigned char *name, size_t len);
1497 int hammer2_getradix(size_t bytes);
1498 
1499 int hammer2_calc_logical(hammer2_inode_t *ip, hammer2_off_t uoff,
1500 			hammer2_key_t *lbasep, hammer2_key_t *leofp);
1501 int hammer2_calc_physical(hammer2_inode_t *ip, hammer2_key_t lbase);
1502 void hammer2_update_time(uint64_t *timep);
1503 void hammer2_adjreadcounter(hammer2_blockref_t *bref, size_t bytes);
1504 
1505 /*
1506  * hammer2_inode.c
1507  */
1508 struct vnode *hammer2_igetv(hammer2_inode_t *ip, int *errorp);
1509 hammer2_inode_t *hammer2_inode_lookup(hammer2_pfs_t *pmp,
1510 			hammer2_tid_t inum);
1511 hammer2_inode_t *hammer2_inode_get(hammer2_pfs_t *pmp,
1512 			hammer2_xop_head_t *xop, hammer2_tid_t inum, int idx);
1513 void hammer2_inode_ref(hammer2_inode_t *ip);
1514 void hammer2_inode_drop(hammer2_inode_t *ip);
1515 void hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
1516 			hammer2_cluster_t *cluster);
1517 void hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1518 			int idx);
1519 void hammer2_inode_modify(hammer2_inode_t *ip);
1520 void hammer2_inode_delayed_sideq(hammer2_inode_t *ip);
1521 void hammer2_inode_lock(hammer2_inode_t *ip, int how);
1522 void hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2,
1523 			hammer2_inode_t *ip3, hammer2_inode_t *ip4);
1524 void hammer2_inode_unlock(hammer2_inode_t *ip);
1525 void hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2);
1526 hammer2_chain_t *hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how);
1527 hammer2_chain_t *hammer2_inode_chain_and_parent(hammer2_inode_t *ip,
1528 			int clindex, hammer2_chain_t **parentp, int how);
1529 hammer2_mtx_state_t hammer2_inode_lock_temp_release(hammer2_inode_t *ip);
1530 void hammer2_inode_lock_temp_restore(hammer2_inode_t *ip,
1531 			hammer2_mtx_state_t ostate);
1532 int hammer2_inode_lock_upgrade(hammer2_inode_t *ip);
1533 void hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int);
1534 
1535 hammer2_inode_t *hammer2_inode_create_normal(hammer2_inode_t *pip,
1536 			struct vattr *vap, struct ucred *cred,
1537 			hammer2_key_t inum, int *errorp);
1538 hammer2_inode_t *hammer2_inode_create_pfs(hammer2_pfs_t *spmp,
1539 			const uint8_t *name, size_t name_len,
1540 			int *errorp);
1541 int hammer2_inode_chain_ins(hammer2_inode_t *ip);
1542 int hammer2_inode_chain_des(hammer2_inode_t *ip);
1543 int hammer2_inode_chain_sync(hammer2_inode_t *ip);
1544 int hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags);
1545 int hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen);
1546 int hammer2_dirent_create(hammer2_inode_t *dip, const char *name,
1547 			size_t name_len, hammer2_key_t inum, uint8_t type);
1548 
1549 /*
1550  * hammer2_chain.c
1551  */
1552 void hammer2_voldata_lock(hammer2_dev_t *hmp);
1553 void hammer2_voldata_unlock(hammer2_dev_t *hmp);
1554 void hammer2_voldata_modify(hammer2_dev_t *hmp);
1555 hammer2_chain_t *hammer2_chain_alloc(hammer2_dev_t *hmp,
1556 				hammer2_pfs_t *pmp,
1557 				hammer2_blockref_t *bref);
1558 void hammer2_chain_core_init(hammer2_chain_t *chain);
1559 void hammer2_chain_ref(hammer2_chain_t *chain);
1560 void hammer2_chain_ref_hold(hammer2_chain_t *chain);
1561 void hammer2_chain_drop(hammer2_chain_t *chain);
1562 void hammer2_chain_drop_unhold(hammer2_chain_t *chain);
1563 void hammer2_chain_unhold(hammer2_chain_t *chain);
1564 void hammer2_chain_rehold(hammer2_chain_t *chain);
1565 int hammer2_chain_lock(hammer2_chain_t *chain, int how);
1566 void hammer2_chain_lock_unhold(hammer2_chain_t *chain, int how);
1567 void hammer2_chain_load_data(hammer2_chain_t *chain);
1568 const hammer2_media_data_t *hammer2_chain_rdata(hammer2_chain_t *chain);
1569 hammer2_media_data_t *hammer2_chain_wdata(hammer2_chain_t *chain);
1570 
1571 int hammer2_chain_inode_find(hammer2_pfs_t *pmp, hammer2_key_t inum,
1572 				int clindex, int flags,
1573 				hammer2_chain_t **parentp,
1574 				hammer2_chain_t **chainp);
1575 int hammer2_chain_modify(hammer2_chain_t *chain, hammer2_tid_t mtid,
1576 				hammer2_off_t dedup_off, int flags);
1577 int hammer2_chain_modify_ip(hammer2_inode_t *ip, hammer2_chain_t *chain,
1578 				hammer2_tid_t mtid, int flags);
1579 int hammer2_chain_resize(hammer2_chain_t *chain,
1580 				hammer2_tid_t mtid, hammer2_off_t dedup_off,
1581 				int nradix, int flags);
1582 void hammer2_chain_unlock(hammer2_chain_t *chain);
1583 void hammer2_chain_unlock_hold(hammer2_chain_t *chain);
1584 void hammer2_chain_wait(hammer2_chain_t *chain);
1585 hammer2_chain_t *hammer2_chain_get(hammer2_chain_t *parent, int generation,
1586 				hammer2_blockref_t *bref, int how);
1587 hammer2_chain_t *hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags);
1588 void hammer2_chain_lookup_done(hammer2_chain_t *parent);
1589 hammer2_chain_t *hammer2_chain_getparent(hammer2_chain_t *chain, int flags);
1590 hammer2_chain_t *hammer2_chain_repparent(hammer2_chain_t **chainp, int flags);
1591 hammer2_chain_t *hammer2_chain_lookup(hammer2_chain_t **parentp,
1592 				hammer2_key_t *key_nextp,
1593 				hammer2_key_t key_beg, hammer2_key_t key_end,
1594 				int *errorp, int flags);
1595 hammer2_chain_t *hammer2_chain_next(hammer2_chain_t **parentp,
1596 				hammer2_chain_t *chain,
1597 				hammer2_key_t *key_nextp,
1598 				hammer2_key_t key_beg, hammer2_key_t key_end,
1599 				int *errorp, int flags);
1600 int hammer2_chain_scan(hammer2_chain_t *parent,
1601 				hammer2_chain_t **chainp,
1602 				hammer2_blockref_t *bref,
1603 				int *firstp, int flags);
1604 
1605 int hammer2_chain_create(hammer2_chain_t **parentp, hammer2_chain_t **chainp,
1606 				hammer2_dev_t *hmp, hammer2_pfs_t *pmp,
1607 				int methods, hammer2_key_t key, int keybits,
1608 				int type, size_t bytes, hammer2_tid_t mtid,
1609 				hammer2_off_t dedup_off, int flags);
1610 void hammer2_chain_rename(hammer2_chain_t **parentp,
1611 				hammer2_chain_t *chain,
1612 				hammer2_tid_t mtid, int flags);
1613 int hammer2_chain_delete(hammer2_chain_t *parent, hammer2_chain_t *chain,
1614 				hammer2_tid_t mtid, int flags);
1615 int hammer2_chain_indirect_maintenance(hammer2_chain_t *parent,
1616 				hammer2_chain_t *chain);
1617 void hammer2_chain_setflush(hammer2_chain_t *chain);
1618 void hammer2_chain_countbrefs(hammer2_chain_t *chain,
1619 				hammer2_blockref_t *base, int count);
1620 hammer2_chain_t *hammer2_chain_bulksnap(hammer2_dev_t *hmp);
1621 void hammer2_chain_bulkdrop(hammer2_chain_t *copy);
1622 
1623 void hammer2_chain_setcheck(hammer2_chain_t *chain, void *bdata);
1624 int hammer2_chain_testcheck(hammer2_chain_t *chain, void *bdata);
1625 int hammer2_chain_dirent_test(hammer2_chain_t *chain, const char *name,
1626 				size_t name_len);
1627 
1628 void hammer2_base_delete(hammer2_chain_t *parent,
1629 				hammer2_blockref_t *base, int count,
1630 				hammer2_chain_t *chain,
1631 				hammer2_blockref_t *obref);
1632 void hammer2_base_insert(hammer2_chain_t *parent,
1633 				hammer2_blockref_t *base, int count,
1634 				hammer2_chain_t *chain,
1635 				hammer2_blockref_t *elm);
1636 
1637 /*
1638  * hammer2_flush.c
1639  */
1640 void hammer2_trans_manage_init(hammer2_pfs_t *pmp);
1641 int hammer2_flush(hammer2_chain_t *chain, int istop);
1642 void hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags);
1643 void hammer2_trans_setflags(hammer2_pfs_t *pmp, uint32_t flags);
1644 void hammer2_trans_clearflags(hammer2_pfs_t *pmp, uint32_t flags);
1645 hammer2_tid_t hammer2_trans_sub(hammer2_pfs_t *pmp);
1646 void hammer2_trans_done(hammer2_pfs_t *pmp, uint32_t flags);
1647 hammer2_tid_t hammer2_trans_newinum(hammer2_pfs_t *pmp);
1648 void hammer2_trans_assert_strategy(hammer2_pfs_t *pmp);
1649 
1650 /*
1651  * hammer2_ioctl.c
1652  */
1653 int hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data,
1654 				int fflag, struct ucred *cred);
1655 
1656 /*
1657  * hammer2_io.c
1658  */
1659 void hammer2_io_inval(hammer2_io_t *dio, hammer2_off_t data_off, u_int bytes);
1660 void hammer2_io_cleanup(hammer2_dev_t *hmp, struct hammer2_io_tree *tree);
1661 char *hammer2_io_data(hammer2_io_t *dio, off_t lbase);
1662 void hammer2_io_bkvasync(hammer2_io_t *dio);
1663 void hammer2_io_dedup_set(hammer2_dev_t *hmp, hammer2_blockref_t *bref);
1664 void hammer2_io_dedup_delete(hammer2_dev_t *hmp, uint8_t btype,
1665 				hammer2_off_t data_off, u_int bytes);
1666 void hammer2_io_dedup_assert(hammer2_dev_t *hmp, hammer2_off_t data_off,
1667 				u_int bytes);
1668 int hammer2_io_new(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize,
1669 				hammer2_io_t **diop);
1670 int hammer2_io_newnz(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize,
1671 				hammer2_io_t **diop);
1672 int _hammer2_io_bread(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize,
1673 				hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS);
1674 void hammer2_io_setdirty(hammer2_io_t *dio);
1675 
1676 hammer2_io_t *_hammer2_io_getblk(hammer2_dev_t *hmp, int btype, off_t lbase,
1677 				int lsize, int op HAMMER2_IO_DEBUG_ARGS);
1678 hammer2_io_t *_hammer2_io_getquick(hammer2_dev_t *hmp, off_t lbase,
1679 				int lsize HAMMER2_IO_DEBUG_ARGS);
1680 void _hammer2_io_putblk(hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS);
1681 int _hammer2_io_bwrite(hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS);
1682 void _hammer2_io_bawrite(hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS);
1683 void _hammer2_io_bdwrite(hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS);
1684 void _hammer2_io_brelse(hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS);
1685 void _hammer2_io_bqrelse(hammer2_io_t **diop HAMMER2_IO_DEBUG_ARGS);
1686 void _hammer2_io_ref(hammer2_io_t *dio HAMMER2_IO_DEBUG_ARGS);
1687 
1688 #ifndef HAMMER2_IO_DEBUG
1689 
1690 #define hammer2_io_getblk(hmp, btype, lbase, lsize, op)			\
1691 	_hammer2_io_getblk((hmp), (btype), (lbase), (lsize), (op))
1692 #define hammer2_io_getquick(hmp, lbase, lsize)				\
1693 	_hammer2_io_getquick((hmp), (lbase), (lsize))
1694 #define hammer2_io_putblk(diop)						\
1695 	_hammer2_io_putblk(diop)
1696 #define hammer2_io_bwrite(diop)						\
1697 	_hammer2_io_bwrite((diop))
1698 #define hammer2_io_bawrite(diop)					\
1699 	_hammer2_io_bawrite((diop))
1700 #define hammer2_io_bdwrite(diop)					\
1701 	_hammer2_io_bdwrite((diop))
1702 #define hammer2_io_brelse(diop)						\
1703 	_hammer2_io_brelse((diop))
1704 #define hammer2_io_bqrelse(diop)					\
1705 	_hammer2_io_bqrelse((diop))
1706 #define hammer2_io_ref(dio)						\
1707 	_hammer2_io_ref((dio))
1708 
1709 #define hammer2_io_bread(hmp, btype, lbase, lsize, diop)		\
1710 	_hammer2_io_bread((hmp), (btype), (lbase), (lsize), (diop))
1711 
1712 #else
1713 
1714 #define hammer2_io_getblk(hmp, btype, lbase, lsize, op)			\
1715 	_hammer2_io_getblk((hmp), (btype), (lbase), (lsize), (op),	\
1716 	__FILE__, __LINE__)
1717 
1718 #define hammer2_io_getquick(hmp, lbase, lsize)				\
1719 	_hammer2_io_getquick((hmp), (lbase), (lsize), __FILE__, __LINE__)
1720 
1721 #define hammer2_io_putblk(diop)						\
1722 	_hammer2_io_putblk(diop, __FILE__, __LINE__)
1723 
1724 #define hammer2_io_bwrite(diop)						\
1725 	_hammer2_io_bwrite((diop), __FILE__, __LINE__)
1726 #define hammer2_io_bawrite(diop)					\
1727 	_hammer2_io_bawrite((diop), __FILE__, __LINE__)
1728 #define hammer2_io_bdwrite(diop)					\
1729 	_hammer2_io_bdwrite((diop), __FILE__, __LINE__)
1730 #define hammer2_io_brelse(diop)						\
1731 	_hammer2_io_brelse((diop), __FILE__, __LINE__)
1732 #define hammer2_io_bqrelse(diop)					\
1733 	_hammer2_io_bqrelse((diop), __FILE__, __LINE__)
1734 #define hammer2_io_ref(dio)						\
1735 	_hammer2_io_ref((dio), __FILE__, __LINE__)
1736 
1737 #define hammer2_io_bread(hmp, btype, lbase, lsize, diop)		\
1738 	_hammer2_io_bread((hmp), (btype), (lbase), (lsize), (diop),	\
1739 			  __FILE__, __LINE__)
1740 
1741 #endif
1742 
1743 /*
1744  * hammer2_admin.c
1745  */
1746 void hammer2_thr_signal(hammer2_thread_t *thr, uint32_t flags);
1747 void hammer2_thr_signal2(hammer2_thread_t *thr,
1748 			uint32_t pflags, uint32_t nflags);
1749 void hammer2_thr_wait(hammer2_thread_t *thr, uint32_t flags);
1750 void hammer2_thr_wait_neg(hammer2_thread_t *thr, uint32_t flags);
1751 int hammer2_thr_wait_any(hammer2_thread_t *thr, uint32_t flags, int timo);
1752 void hammer2_thr_create(hammer2_thread_t *thr,
1753 			hammer2_pfs_t *pmp, hammer2_dev_t *hmp,
1754 			const char *id, int clindex, int repidx,
1755 			void (*func)(void *arg));
1756 void hammer2_thr_delete(hammer2_thread_t *thr);
1757 void hammer2_thr_remaster(hammer2_thread_t *thr);
1758 void hammer2_thr_freeze_async(hammer2_thread_t *thr);
1759 void hammer2_thr_freeze(hammer2_thread_t *thr);
1760 void hammer2_thr_unfreeze(hammer2_thread_t *thr);
1761 int hammer2_thr_break(hammer2_thread_t *thr);
1762 void hammer2_primary_xops_thread(void *arg);
1763 
1764 /*
1765  * hammer2_thread.c (XOP API)
1766  */
1767 void *hammer2_xop_alloc(hammer2_inode_t *ip, int flags);
1768 void hammer2_xop_setname(hammer2_xop_head_t *xop,
1769 				const char *name, size_t name_len);
1770 void hammer2_xop_setname2(hammer2_xop_head_t *xop,
1771 				const char *name, size_t name_len);
1772 size_t hammer2_xop_setname_inum(hammer2_xop_head_t *xop, hammer2_key_t inum);
1773 void hammer2_xop_setip2(hammer2_xop_head_t *xop, hammer2_inode_t *ip2);
1774 void hammer2_xop_setip3(hammer2_xop_head_t *xop, hammer2_inode_t *ip3);
1775 void hammer2_xop_reinit(hammer2_xop_head_t *xop);
1776 void hammer2_xop_helper_create(hammer2_pfs_t *pmp);
1777 void hammer2_xop_helper_cleanup(hammer2_pfs_t *pmp);
1778 void hammer2_xop_start(hammer2_xop_head_t *xop, hammer2_xop_desc_t *desc);
1779 void hammer2_xop_start_except(hammer2_xop_head_t *xop, hammer2_xop_desc_t *desc,
1780 				int notidx);
1781 int hammer2_xop_collect(hammer2_xop_head_t *xop, int flags);
1782 void hammer2_xop_retire(hammer2_xop_head_t *xop, uint64_t mask);
1783 int hammer2_xop_active(hammer2_xop_head_t *xop);
1784 int hammer2_xop_feed(hammer2_xop_head_t *xop, hammer2_chain_t *chain,
1785 				int clindex, int error);
1786 
1787 /*
1788  * hammer2_synchro.c
1789  */
1790 void hammer2_primary_sync_thread(void *arg);
1791 
1792 /*
1793  * XOP backends in hammer2_xops.c, primarily for VNOPS.  Other XOP backends
1794  * may be integrated into other source files.
1795  */
1796 void hammer2_xop_ipcluster(hammer2_xop_t *xop, void *scratch, int clindex);
1797 void hammer2_xop_readdir(hammer2_xop_t *xop, void *scratch, int clindex);
1798 void hammer2_xop_nresolve(hammer2_xop_t *xop, void *scratch, int clindex);
1799 void hammer2_xop_unlink(hammer2_xop_t *xop, void *scratch, int clindex);
1800 void hammer2_xop_nrename(hammer2_xop_t *xop, void *scratch, int clindex);
1801 void hammer2_xop_scanlhc(hammer2_xop_t *xop, void *scratch, int clindex);
1802 void hammer2_xop_scanall(hammer2_xop_t *xop, void *scratch, int clindex);
1803 void hammer2_xop_lookup(hammer2_xop_t *xop, void *scratch, int clindex);
1804 void hammer2_xop_delete(hammer2_xop_t *xop, void *scratch, int clindex);
1805 void hammer2_xop_inode_mkdirent(hammer2_xop_t *xop, void *scratch, int clindex);
1806 void hammer2_xop_inode_create(hammer2_xop_t *xop, void *scratch, int clindex);
1807 void hammer2_xop_inode_create_det(hammer2_xop_t *xop,
1808 				void *scratch, int clindex);
1809 void hammer2_xop_inode_create_ins(hammer2_xop_t *xop,
1810 				void *scratch, int clindex);
1811 void hammer2_xop_inode_destroy(hammer2_xop_t *xop, void *scratch, int clindex);
1812 void hammer2_xop_inode_chain_sync(hammer2_xop_t *xop, void *scratch,
1813 				int clindex);
1814 void hammer2_xop_inode_unlinkall(hammer2_xop_t *xop, void *scratch,
1815 				int clindex);
1816 void hammer2_xop_inode_connect(hammer2_xop_t *xop, void *scratch, int clindex);
1817 void hammer2_xop_inode_flush(hammer2_xop_t *xop, void *scratch, int clindex);
1818 void hammer2_xop_strategy_read(hammer2_xop_t *xop, void *scratch, int clindex);
1819 void hammer2_xop_strategy_write(hammer2_xop_t *xop, void *scratch, int clindex);
1820 
1821 void hammer2_dmsg_ipcluster(hammer2_xop_t *xop, void *scratch, int clindex);
1822 void hammer2_dmsg_readdir(hammer2_xop_t *xop, void *scratch, int clindex);
1823 void hammer2_dmsg_nresolve(hammer2_xop_t *xop, void *scratch, int clindex);
1824 void hammer2_dmsg_unlink(hammer2_xop_t *xop, void *scratch, int clindex);
1825 void hammer2_dmsg_nrename(hammer2_xop_t *xop, void *scratch, int clindex);
1826 void hammer2_dmsg_scanlhc(hammer2_xop_t *xop, void *scratch, int clindex);
1827 void hammer2_dmsg_scanall(hammer2_xop_t *xop, void *scratch, int clindex);
1828 void hammer2_dmsg_lookup(hammer2_xop_t *xop, void *scratch, int clindex);
1829 void hammer2_dmsg_inode_mkdirent(hammer2_xop_t *xop, void *scratch,
1830 				int clindex);
1831 void hammer2_dmsg_inode_create(hammer2_xop_t *xop, void *scratch, int clindex);
1832 void hammer2_dmsg_inode_destroy(hammer2_xop_t *xop, void *scratch, int clindex);
1833 void hammer2_dmsg_inode_chain_sync(hammer2_xop_t *xop, void *scratch,
1834 				int clindex);
1835 void hammer2_dmsg_inode_unlinkall(hammer2_xop_t *xop, void *scratch,
1836 				int clindex);
1837 void hammer2_dmsg_inode_connect(hammer2_xop_t *xop, void *scratch, int clindex);
1838 void hammer2_dmsg_inode_flush(hammer2_xop_t *xop, void *scratch, int clindex);
1839 void hammer2_dmsg_strategy_read(hammer2_xop_t *xop, void *scratch, int clindex);
1840 void hammer2_dmsg_strategy_write(hammer2_xop_t *xop, void *scratch,
1841 				int clindex);
1842 
1843 void hammer2_rmsg_ipcluster(hammer2_xop_t *xop, void *scratch, int clindex);
1844 void hammer2_rmsg_readdir(hammer2_xop_t *xop, void *scratch, int clindex);
1845 void hammer2_rmsg_nresolve(hammer2_xop_t *xop, void *scratch, int clindex);
1846 void hammer2_rmsg_unlink(hammer2_xop_t *xop, void *scratch, int clindex);
1847 void hammer2_rmsg_nrename(hammer2_xop_t *xop, void *scratch, int clindex);
1848 void hammer2_rmsg_scanlhc(hammer2_xop_t *xop, void *scratch, int clindex);
1849 void hammer2_rmsg_scanall(hammer2_xop_t *xop, void *scratch, int clindex);
1850 void hammer2_rmsg_lookup(hammer2_xop_t *xop, void *scratch, int clindex);
1851 void hammer2_rmsg_inode_mkdirent(hammer2_xop_t *xop, void *scratch,
1852 				int clindex);
1853 void hammer2_rmsg_inode_create(hammer2_xop_t *xop, void *scratch, int clindex);
1854 void hammer2_rmsg_inode_destroy(hammer2_xop_t *xop, void *scratch, int clindex);
1855 void hammer2_rmsg_inode_chain_sync(hammer2_xop_t *xop, void *scratch,
1856 				int clindex);
1857 void hammer2_rmsg_inode_unlinkall(hammer2_xop_t *xop, void *scratch,
1858 				int clindex);
1859 void hammer2_rmsg_inode_connect(hammer2_xop_t *xop, void *scratch, int clindex);
1860 void hammer2_rmsg_inode_flush(hammer2_xop_t *xop, void *scratch, int clindex);
1861 void hammer2_rmsg_strategy_read(hammer2_xop_t *xop, void *scratch, int clindex);
1862 void hammer2_rmsg_strategy_write(hammer2_xop_t *xop, void *scratch,
1863 				int clindex);
1864 
1865 extern hammer2_xop_desc_t hammer2_ipcluster_desc;
1866 extern hammer2_xop_desc_t hammer2_readdir_desc;
1867 extern hammer2_xop_desc_t hammer2_nresolve_desc;
1868 extern hammer2_xop_desc_t hammer2_unlink_desc;
1869 extern hammer2_xop_desc_t hammer2_nrename_desc;
1870 extern hammer2_xop_desc_t hammer2_scanlhc_desc;
1871 extern hammer2_xop_desc_t hammer2_scanall_desc;
1872 extern hammer2_xop_desc_t hammer2_lookup_desc;
1873 extern hammer2_xop_desc_t hammer2_delete_desc;
1874 extern hammer2_xop_desc_t hammer2_inode_mkdirent_desc;
1875 extern hammer2_xop_desc_t hammer2_inode_create_desc;
1876 extern hammer2_xop_desc_t hammer2_inode_create_det_desc;
1877 extern hammer2_xop_desc_t hammer2_inode_create_ins_desc;
1878 extern hammer2_xop_desc_t hammer2_inode_destroy_desc;
1879 extern hammer2_xop_desc_t hammer2_inode_chain_sync_desc;
1880 extern hammer2_xop_desc_t hammer2_inode_unlinkall_desc;
1881 extern hammer2_xop_desc_t hammer2_inode_connect_desc;
1882 extern hammer2_xop_desc_t hammer2_inode_flush_desc;
1883 extern hammer2_xop_desc_t hammer2_strategy_read_desc;
1884 extern hammer2_xop_desc_t hammer2_strategy_write_desc;
1885 
1886 /*
1887  * hammer2_msgops.c
1888  */
1889 int hammer2_msg_dbg_rcvmsg(kdmsg_msg_t *msg);
1890 int hammer2_msg_adhoc_input(kdmsg_msg_t *msg);
1891 
1892 /*
1893  * hammer2_vfsops.c
1894  */
1895 void hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp, char pfx,
1896 				u_int flags);
1897 int hammer2_vfs_sync(struct mount *mp, int waitflags);
1898 int hammer2_vfs_sync_pmp(hammer2_pfs_t *pmp, int waitfor);
1899 int hammer2_vfs_enospace(hammer2_inode_t *ip, off_t bytes, struct ucred *cred);
1900 
1901 hammer2_pfs_t *hammer2_pfsalloc(hammer2_chain_t *chain,
1902 				const hammer2_inode_data_t *ripdata,
1903 				hammer2_tid_t modify_tid,
1904 				hammer2_dev_t *force_local);
1905 void hammer2_pfsdealloc(hammer2_pfs_t *pmp, int clindex, int destroying);
1906 int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
1907 				ino_t ino, struct vnode **vpp);
1908 
1909 void hammer2_lwinprog_ref(hammer2_pfs_t *pmp);
1910 void hammer2_lwinprog_drop(hammer2_pfs_t *pmp);
1911 void hammer2_lwinprog_wait(hammer2_pfs_t *pmp, int pipe);
1912 
1913 void hammer2_pfs_memory_wait(hammer2_pfs_t *pmp);
1914 void hammer2_pfs_memory_inc(hammer2_pfs_t *pmp);
1915 void hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp, int count);
1916 
1917 /*
1918  * hammer2_freemap.c
1919  */
1920 int hammer2_freemap_alloc(hammer2_chain_t *chain, size_t bytes);
1921 void hammer2_freemap_adjust(hammer2_dev_t *hmp,
1922 				hammer2_blockref_t *bref, int how);
1923 
1924 /*
1925  * hammer2_cluster.c
1926  */
1927 uint8_t hammer2_cluster_type(hammer2_cluster_t *cluster);
1928 void hammer2_cluster_bref(hammer2_cluster_t *cluster, hammer2_blockref_t *bref);
1929 void hammer2_cluster_ref(hammer2_cluster_t *cluster);
1930 void hammer2_cluster_drop(hammer2_cluster_t *cluster);
1931 void hammer2_cluster_unhold(hammer2_cluster_t *cluster);
1932 void hammer2_cluster_rehold(hammer2_cluster_t *cluster);
1933 void hammer2_cluster_lock(hammer2_cluster_t *cluster, int how);
1934 int hammer2_cluster_check(hammer2_cluster_t *cluster, hammer2_key_t lokey,
1935 			int flags);
1936 void hammer2_cluster_resolve(hammer2_cluster_t *cluster);
1937 void hammer2_cluster_forcegood(hammer2_cluster_t *cluster);
1938 void hammer2_cluster_unlock(hammer2_cluster_t *cluster);
1939 
1940 void hammer2_bulkfree_init(hammer2_dev_t *hmp);
1941 void hammer2_bulkfree_uninit(hammer2_dev_t *hmp);
1942 int hammer2_bulkfree_pass(hammer2_dev_t *hmp, hammer2_chain_t *vchain,
1943 			struct hammer2_ioc_bulkfree *bfi);
1944 void hammer2_dummy_xop_from_chain(hammer2_xop_head_t *xop,
1945 			hammer2_chain_t *chain);
1946 
1947 /*
1948  * hammer2_iocom.c
1949  */
1950 void hammer2_iocom_init(hammer2_dev_t *hmp);
1951 void hammer2_iocom_uninit(hammer2_dev_t *hmp);
1952 void hammer2_cluster_reconnect(hammer2_dev_t *hmp, struct file *fp);
1953 void hammer2_volconf_update(hammer2_dev_t *hmp, int index);
1954 
1955 /*
1956  * hammer2_strategy.c
1957  */
1958 int hammer2_vop_strategy(struct vop_strategy_args *ap);
1959 int hammer2_vop_bmap(struct vop_bmap_args *ap);
1960 void hammer2_bioq_sync(hammer2_pfs_t *pmp);
1961 void hammer2_dedup_record(hammer2_chain_t *chain, hammer2_io_t *dio,
1962 				const char *data);
1963 void hammer2_dedup_clear(hammer2_dev_t *hmp);
1964 
1965 /*
1966  * More complex inlines
1967  */
1968 
1969 #define hammer2_xop_gdata(xop)	_hammer2_xop_gdata((xop), __FILE__, __LINE__)
1970 
1971 static __inline
1972 const hammer2_media_data_t *
1973 _hammer2_xop_gdata(hammer2_xop_head_t *xop, const char *file, int line)
1974 {
1975 	hammer2_chain_t *focus;
1976 	const void *data;
1977 
1978 	focus = xop->cluster.focus;
1979 	if (focus->dio) {
1980 		lockmgr(&focus->diolk, LK_SHARED);
1981 		if ((xop->focus_dio = focus->dio) != NULL) {
1982 			_hammer2_io_ref(xop->focus_dio HAMMER2_IO_DEBUG_CALL);
1983 			hammer2_io_bkvasync(xop->focus_dio);
1984 		}
1985 		data = focus->data;
1986 		lockmgr(&focus->diolk, LK_RELEASE);
1987 	} else {
1988 		data = focus->data;
1989 	}
1990 
1991 	return data;
1992 }
1993 
1994 #define hammer2_xop_pdata(xop)	_hammer2_xop_pdata((xop), __FILE__, __LINE__)
1995 
1996 static __inline
1997 void
1998 _hammer2_xop_pdata(hammer2_xop_head_t *xop, const char *file, int line)
1999 {
2000 	if (xop->focus_dio)
2001 		_hammer2_io_putblk(&xop->focus_dio HAMMER2_IO_DEBUG_CALL);
2002 }
2003 
2004 #endif /* !_KERNEL */
2005 #endif /* !_VFS_HAMMER2_HAMMER2_H_ */
2006