xref: /dragonfly/sys/vfs/hammer2/hammer2.h (revision 0dbf214d)
1 /*
2  * Copyright (c) 2011-2018 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 /*
37  * HAMMER2 IN-MEMORY CACHE OF MEDIA STRUCTURES
38  *
39  * This header file contains structures used internally by the HAMMER2
40  * implementation.  See hammer2_disk.h for on-disk structures.
41  *
42  * There is an in-memory representation of all on-media data structure.
43  * Almost everything is represented by a hammer2_chain structure in-memory.
44  * Other higher-level structures typically map to chains.
45  *
46  * A great deal of data is accessed simply via its buffer cache buffer,
47  * which is mapped for the duration of the chain's lock.  Hammer2 must
48  * implement its own buffer cache layer on top of the system layer to
49  * allow for different threads to lock different sub-block-sized buffers.
50  *
51  * When modifications are made to a chain a new filesystem block must be
52  * allocated.  Multiple modifications do not typically allocate new blocks
53  * until the current block has been flushed.  Flushes do not block the
54  * front-end unless the front-end operation crosses the current inode being
55  * flushed.
56  *
57  * The in-memory representation may remain cached (for example in order to
58  * placemark clustering locks) even after the related data has been
59  * detached.
60  */
61 
62 #ifndef _VFS_HAMMER2_HAMMER2_H_
63 #define _VFS_HAMMER2_HAMMER2_H_
64 
65 #ifdef _KERNEL
66 #include <sys/param.h>
67 #endif
68 #include <sys/types.h>
69 #ifdef _KERNEL
70 #include <sys/kernel.h>
71 #endif
72 #include <sys/conf.h>
73 #ifdef _KERNEL
74 #include <sys/systm.h>
75 #endif
76 #include <sys/tree.h>
77 #include <sys/malloc.h>
78 #include <sys/mount.h>
79 #include <sys/vnode.h>
80 #include <sys/proc.h>
81 #include <sys/mountctl.h>
82 #include <sys/priv.h>
83 #include <sys/stat.h>
84 #include <sys/thread.h>
85 #include <sys/globaldata.h>
86 #include <sys/lockf.h>
87 #include <sys/buf.h>
88 #include <sys/queue.h>
89 #include <sys/limits.h>
90 #include <sys/dmsg.h>
91 #include <sys/mutex.h>
92 #ifdef _KERNEL
93 #include <sys/kern_syscall.h>
94 #endif
95 
96 #ifdef _KERNEL
97 #include <sys/signal2.h>
98 #include <sys/buf2.h>
99 #include <sys/mutex2.h>
100 #include <sys/thread2.h>
101 #endif
102 
103 #include "hammer2_xxhash.h"
104 #include "hammer2_disk.h"
105 #include "hammer2_mount.h"
106 #include "hammer2_ioctl.h"
107 
108 struct hammer2_io;
109 struct hammer2_chain;
110 struct hammer2_cluster;
111 struct hammer2_inode;
112 struct hammer2_dev;
113 struct hammer2_pfs;
114 struct hammer2_span;
115 struct hammer2_msg;
116 struct hammer2_thread;
117 union hammer2_xop;
118 
119 /*
120  * Mutex and lock shims.  Hammer2 requires support for asynchronous and
121  * abortable locks, and both exclusive and shared spinlocks.  Normal
122  * synchronous non-abortable locks can be substituted for spinlocks.
123  */
124 typedef mtx_t				hammer2_mtx_t;
125 typedef mtx_link_t			hammer2_mtx_link_t;
126 typedef mtx_state_t			hammer2_mtx_state_t;
127 
128 typedef struct spinlock			hammer2_spin_t;
129 
130 #define hammer2_mtx_ex			mtx_lock_ex_quick
131 #define hammer2_mtx_ex_try		mtx_lock_ex_try
132 #define hammer2_mtx_sh			mtx_lock_sh_quick
133 #define hammer2_mtx_sh_again		mtx_lock_sh_again
134 #define hammer2_mtx_sh_try		mtx_lock_sh_try
135 #define hammer2_mtx_unlock		mtx_unlock
136 #define hammer2_mtx_downgrade		mtx_downgrade
137 #define hammer2_mtx_owned		mtx_owned
138 #define hammer2_mtx_init		mtx_init
139 #define hammer2_mtx_temp_release	mtx_lock_temp_release
140 #define hammer2_mtx_temp_restore	mtx_lock_temp_restore
141 #define hammer2_mtx_refs		mtx_lockrefs
142 
143 #define hammer2_spin_init		spin_init
144 #define hammer2_spin_sh			spin_lock_shared
145 #define hammer2_spin_ex			spin_lock
146 #define hammer2_spin_unsh		spin_unlock_shared
147 #define hammer2_spin_unex		spin_unlock
148 
149 TAILQ_HEAD(hammer2_xop_list, hammer2_xop_head);
150 TAILQ_HEAD(hammer2_chain_list, hammer2_chain);
151 
152 typedef struct hammer2_xop_list	hammer2_xop_list_t;
153 
154 #ifdef _KERNEL
155 /*
156  * General lock support
157  */
158 static __inline
159 int
160 hammer2_mtx_upgrade_try(hammer2_mtx_t *mtx)
161 {
162 	return mtx_upgrade_try(mtx);
163 }
164 
165 #endif
166 
167 /*
168  * The xid tracks internal transactional updates.
169  *
170  * XXX fix-me, really needs to be 64-bits
171  */
172 typedef uint32_t hammer2_xid_t;
173 
174 #define HAMMER2_XID_MIN			0x00000000U
175 #define HAMMER2_XID_MAX			0x7FFFFFFFU
176 
177 #define HAMMER2_LIMIT_DIRTY_CHAINS	(65536)
178 #define HAMMER2_LIMIT_DIRTY_INODES	(16384)
179 
180 /*
181  * The chain structure tracks a portion of the media topology from the
182  * root (volume) down.  Chains represent volumes, inodes, indirect blocks,
183  * data blocks, and freemap nodes and leafs.
184  *
185  * The chain structure utilizes a simple singly-homed topology and the
186  * chain's in-memory topology will move around as the chains do, due mainly
187  * to renames and indirect block creation.
188  *
189  * Block Table Updates
190  *
191  *	Block table updates for insertions and updates are delayed until the
192  *	flush.  This allows us to avoid having to modify the parent chain
193  *	all the way to the root.
194  *
195  *	Block table deletions are performed immediately (modifying the parent
196  *	in the process) because the flush code uses the chain structure to
197  *	track delayed updates and the chain will be (likely) gone or moved to
198  *	another location in the topology after a deletion.
199  *
200  *	A prior iteration of the code tried to keep the relationship intact
201  *	on deletes by doing a delete-duplicate operation on the chain, but
202  *	it added way too much complexity to the codebase.
203  *
204  * Flush Synchronization
205  *
206  *	The flush code must flush modified chains bottom-up.  Because chain
207  *	structures can shift around and are NOT topologically stable,
208  *	modified chains are independently indexed for the flush.  As the flush
209  *	runs it modifies (or further modifies) and updates the parents,
210  *	propagating the flush all the way to the volume root.
211  *
212  *	Modifying front-end operations can occur during a flush but will block
213  *	in two cases: (1) when the front-end tries to operate on the inode
214  *	currently in the midst of being flushed and (2) if the front-end
215  *	crosses an inode currently being flushed (such as during a rename).
216  *	So, for example, if you rename directory "x" to "a/b/c/d/e/f/g/x" and
217  *	the flusher is currently working on "a/b/c", the rename will block
218  *	temporarily in order to ensure that "x" exists in one place or the
219  *	other.
220  *
221  *	Meta-data statistics are updated by the flusher.  The front-end will
222  *	make estimates but meta-data must be fully synchronized only during a
223  *	flush in order to ensure that it remains correct across a crash.
224  *
225  *	Multiple flush synchronizations can theoretically be in-flight at the
226  *	same time but the implementation is not coded to handle the case and
227  *	currently serializes them.
228  *
229  * Snapshots:
230  *
231  *	Snapshots currently require the subdirectory tree being snapshotted
232  *	to be flushed.  The snapshot then creates a new super-root inode which
233  *	copies the flushed blockdata of the directory or file that was
234  *	snapshotted.
235  *
236  * RBTREE NOTES:
237  *
238  *	- Note that the radix tree runs in powers of 2 only so sub-trees
239  *	  cannot straddle edges.
240  */
241 RB_HEAD(hammer2_chain_tree, hammer2_chain);
242 TAILQ_HEAD(h2_flush_list, hammer2_chain);
243 TAILQ_HEAD(h2_core_list, hammer2_chain);
244 
245 #define CHAIN_CORE_DELETE_BMAP_ENTRIES	\
246 	(HAMMER2_PBUFSIZE / sizeof(hammer2_blockref_t) / sizeof(uint32_t))
247 
248 struct hammer2_reptrack {
249 	hammer2_spin_t	spin;
250 	struct hammer2_reptrack *next;
251 	struct hammer2_chain	*chain;
252 };
253 
254 /*
255  * Core topology for chain (embedded in chain).  Protected by a spinlock.
256  */
257 struct hammer2_chain_core {
258 	hammer2_spin_t	spin;
259 	struct hammer2_reptrack *reptrack;
260 	struct hammer2_chain_tree rbtree; /* sub-chains */
261 	int		live_zero;	/* blockref array opt */
262 	u_int		live_count;	/* live (not deleted) chains in tree */
263 	u_int		chain_count;	/* live + deleted chains under core */
264 	int		generation;	/* generation number (inserts only) */
265 };
266 
267 typedef struct hammer2_chain_core hammer2_chain_core_t;
268 
269 RB_HEAD(hammer2_io_tree, hammer2_io);
270 
271 /*
272  * DIO - Management structure wrapping system buffer cache.
273  *
274  * HAMMER2 uses an I/O abstraction that allows it to cache and manipulate
275  * fixed-sized filesystem buffers frontend by variable-sized hammer2_chain
276  * structures.
277  */
278 struct hammer2_io {
279 	RB_ENTRY(hammer2_io) rbnode;	/* indexed by device offset */
280 	struct hammer2_dev *hmp;
281 	struct buf	*bp;
282 	off_t		pbase;
283 	uint64_t	refs;
284 	int		psize;
285 	int		act;		/* activity */
286 	int		btype;		/* approximate BREF_TYPE_* */
287 	int		ticks;
288 	int		error;
289 	int		unused01;
290 	uint64_t	dedup_valid;	/* valid for dedup operation */
291 	uint64_t	dedup_alloc;	/* allocated / de-dupable */
292 };
293 
294 typedef struct hammer2_io hammer2_io_t;
295 
296 #define HAMMER2_DIO_INPROG	0x8000000000000000LLU	/* bio in progress */
297 #define HAMMER2_DIO_GOOD	0x4000000000000000LLU	/* dio->bp is stable */
298 #define HAMMER2_DIO_WAITING	0x2000000000000000LLU	/* wait on INPROG */
299 #define HAMMER2_DIO_DIRTY	0x1000000000000000LLU	/* flush last drop */
300 
301 #define HAMMER2_DIO_MASK	0x00FFFFFFFFFFFFFFLLU
302 
303 /*
304  * Primary chain structure keeps track of the topology in-memory.
305  */
306 struct hammer2_chain {
307 	hammer2_mtx_t		lock;
308 	hammer2_chain_core_t	core;
309 	RB_ENTRY(hammer2_chain) rbnode;		/* live chain(s) */
310 	hammer2_blockref_t	bref;
311 	struct hammer2_chain	*parent;
312 	struct hammer2_dev	*hmp;
313 	struct hammer2_pfs	*pmp;		/* A PFS or super-root (spmp) */
314 
315 	struct lock	diolk;			/* xop focus interlock */
316 	hammer2_io_t	*dio;			/* physical data buffer */
317 	hammer2_media_data_t *data;		/* data pointer shortcut */
318 	u_int		bytes;			/* physical data size */
319 	u_int		flags;
320 	u_int		refs;
321 	u_int		lockcnt;
322 	int		error;			/* on-lock data error state */
323 	int		cache_index;		/* heur speeds up lookup */
324 
325 	TAILQ_ENTRY(hammer2_chain) flush_node;	/* flush list */
326 	TAILQ_ENTRY(hammer2_chain) lru_node;	/* 0-refs LRU */
327 };
328 
329 typedef struct hammer2_chain hammer2_chain_t;
330 
331 int hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2);
332 RB_PROTOTYPE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
333 
334 /*
335  * Special notes on flags:
336  *
337  * INITIAL	- This flag allows a chain to be created and for storage to
338  *		  be allocated without having to immediately instantiate the
339  *		  related buffer.  The data is assumed to be all-zeros.  It
340  *		  is primarily used for indirect blocks.
341  *
342  * MODIFIED	- The chain's media data has been modified.  Prevents chain
343  *		  free on lastdrop if still in the topology.
344  *
345  * UPDATE	- Chain might not be modified but parent blocktable needs
346  *		  an update.  Prevents chain free on lastdrop if still in
347  *		  the topology.
348  *
349  * FICTITIOUS	- Faked chain as a placeholder for an error condition.  This
350  *		  chain is unsuitable for I/O.
351  *
352  * BMAPPED	- Indicates that the chain is present in the parent blockmap.
353  *
354  * BMAPUPD	- Indicates that the chain is present but needs to be updated
355  *		  in the parent blockmap.
356  */
357 #define HAMMER2_CHAIN_MODIFIED		0x00000001	/* dirty chain data */
358 #define HAMMER2_CHAIN_ALLOCATED		0x00000002	/* kmalloc'd chain */
359 #define HAMMER2_CHAIN_DESTROY		0x00000004
360 #define HAMMER2_CHAIN_DEDUPABLE		0x00000008	/* registered w/dedup */
361 #define HAMMER2_CHAIN_DELETED		0x00000010	/* deleted chain */
362 #define HAMMER2_CHAIN_INITIAL		0x00000020	/* initial create */
363 #define HAMMER2_CHAIN_UPDATE		0x00000040	/* need parent update */
364 #define HAMMER2_CHAIN_DEFERRED		0x00000080	/* flush depth defer */
365 #define HAMMER2_CHAIN_TESTEDGOOD	0x00000100	/* crc tested good */
366 #define HAMMER2_CHAIN_ONFLUSH		0x00000200	/* on a flush list */
367 #define HAMMER2_CHAIN_FICTITIOUS	0x00000400	/* unsuitable for I/O */
368 #define HAMMER2_CHAIN_VOLUMESYNC	0x00000800	/* needs volume sync */
369 #define HAMMER2_CHAIN_DELAYED		0x00001000	/* delayed flush */
370 #define HAMMER2_CHAIN_COUNTEDBREFS	0x00002000	/* block table stats */
371 #define HAMMER2_CHAIN_ONRBTREE		0x00004000	/* on parent RB tree */
372 #define HAMMER2_CHAIN_ONLRU		0x00008000	/* on LRU list */
373 #define HAMMER2_CHAIN_EMBEDDED		0x00010000	/* embedded data */
374 #define HAMMER2_CHAIN_RELEASE		0x00020000	/* don't keep around */
375 #define HAMMER2_CHAIN_BMAPPED		0x00040000	/* present in blkmap */
376 #define HAMMER2_CHAIN_BMAPUPD		0x00080000	/* +needs updating */
377 #define HAMMER2_CHAIN_IOINPROG		0x00100000	/* I/O interlock */
378 #define HAMMER2_CHAIN_IOSIGNAL		0x00200000	/* I/O interlock */
379 #define HAMMER2_CHAIN_PFSBOUNDARY	0x00400000	/* super->pfs inode */
380 #define HAMMER2_CHAIN_HINT_LEAF_COUNT	0x00800000	/* redo leaf count */
381 #define HAMMER2_CHAIN_LRUHINT		0x01000000	/* was reused */
382 
383 #define HAMMER2_CHAIN_FLUSH_MASK	(HAMMER2_CHAIN_MODIFIED |	\
384 					 HAMMER2_CHAIN_UPDATE |		\
385 					 HAMMER2_CHAIN_ONFLUSH |	\
386 					 HAMMER2_CHAIN_DESTROY)
387 
388 /*
389  * Hammer2 error codes, used by chain->error and cluster->error.  The error
390  * code is typically set on-lock unless no I/O was requested, and set on
391  * I/O otherwise.  If set for a cluster it generally means that the cluster
392  * code could not find a valid copy to present.
393  *
394  * All H2 error codes are flags and can be accumulated by ORing them
395  * together.
396  *
397  * IO		- An I/O error occurred
398  * CHECK	- I/O succeeded but did not match the check code
399  * INCOMPLETE	- A cluster is not complete enough to use, or
400  *		  a chain cannot be loaded because its parent has an error.
401  *
402  * NOTE: API allows callers to check zero/non-zero to determine if an error
403  *	 condition exists.
404  *
405  * NOTE: Chain's data field is usually NULL on an IO error but not necessarily
406  *	 NULL on other errors.  Check chain->error, not chain->data.
407  */
408 #define HAMMER2_ERROR_NONE		0	/* no error (must be 0) */
409 #define HAMMER2_ERROR_EIO		0x00000001	/* device I/O error */
410 #define HAMMER2_ERROR_CHECK		0x00000002	/* check code error */
411 #define HAMMER2_ERROR_INCOMPLETE	0x00000004	/* incomplete cluster */
412 #define HAMMER2_ERROR_DEPTH		0x00000008	/* tmp depth limit */
413 #define HAMMER2_ERROR_BADBREF		0x00000010	/* illegal bref */
414 #define HAMMER2_ERROR_ENOSPC		0x00000020	/* allocation failure */
415 #define HAMMER2_ERROR_ENOENT		0x00000040	/* entry not found */
416 #define HAMMER2_ERROR_ENOTEMPTY		0x00000080	/* dir not empty */
417 #define HAMMER2_ERROR_EAGAIN		0x00000100	/* retry */
418 #define HAMMER2_ERROR_ENOTDIR		0x00000200	/* not directory */
419 #define HAMMER2_ERROR_EISDIR		0x00000400	/* is directory */
420 #define HAMMER2_ERROR_EINPROGRESS	0x00000800	/* already running */
421 #define HAMMER2_ERROR_ABORTED		0x00001000	/* aborted operation */
422 #define HAMMER2_ERROR_EOF		0x00002000	/* end of scan */
423 #define HAMMER2_ERROR_EINVAL		0x00004000	/* catch-all */
424 #define HAMMER2_ERROR_EEXIST		0x00008000	/* entry exists */
425 #define HAMMER2_ERROR_EDEADLK		0x00010000
426 #define HAMMER2_ERROR_ESRCH		0x00020000
427 #define HAMMER2_ERROR_ETIMEDOUT		0x00040000
428 
429 /*
430  * Flags passed to hammer2_chain_lookup() and hammer2_chain_next()
431  *
432  * NOTES:
433  *	NODATA	    - Asks that the chain->data not be resolved in order
434  *		      to avoid I/O.
435  *
436  *	NODIRECT    - Prevents a lookup of offset 0 in an inode from returning
437  *		      the inode itself if the inode is in DIRECTDATA mode
438  *		      (i.e. file is <= 512 bytes).  Used by the synchronization
439  *		      code to prevent confusion.
440  *
441  *	SHARED	    - The input chain is expected to be locked shared,
442  *		      and the output chain is locked shared.
443  *
444  *	MATCHIND    - Allows an indirect block / freemap node to be returned
445  *		      when the passed key range matches the radix.  Remember
446  *		      that key_end is inclusive (e.g. {0x000,0xFFF},
447  *		      not {0x000,0x1000}).
448  *
449  *		      (Cannot be used for remote or cluster ops).
450  *
451  *	ALLNODES    - Allows NULL focus.
452  *
453  *	ALWAYS	    - Always resolve the data.  If ALWAYS and NODATA are both
454  *		      missing, bulk file data is not resolved but inodes and
455  *		      other meta-data will.
456  */
457 #define HAMMER2_LOOKUP_UNUSED0001	0x00000001
458 #define HAMMER2_LOOKUP_NODATA		0x00000002	/* data left NULL */
459 #define HAMMER2_LOOKUP_NODIRECT		0x00000004	/* no offset=0 DD */
460 #define HAMMER2_LOOKUP_SHARED		0x00000100
461 #define HAMMER2_LOOKUP_MATCHIND		0x00000200	/* return all chains */
462 #define HAMMER2_LOOKUP_ALLNODES		0x00000400	/* allow NULL focus */
463 #define HAMMER2_LOOKUP_ALWAYS		0x00000800	/* resolve data */
464 #define HAMMER2_LOOKUP_UNUSED1000	0x00001000
465 
466 /*
467  * Flags passed to hammer2_chain_modify() and hammer2_chain_resize()
468  *
469  * NOTE: OPTDATA allows us to avoid instantiating buffers for INDIRECT
470  *	 blocks in the INITIAL-create state.
471  */
472 #define HAMMER2_MODIFY_OPTDATA		0x00000002	/* data can be NULL */
473 #define HAMMER2_MODIFY_NO_MODIFY_TID	0x00000004
474 #define HAMMER2_MODIFY_UNUSED0008	0x00000008
475 
476 /*
477  * Flags passed to hammer2_chain_lock()
478  *
479  * NOTE: RDONLY is set to optimize cluster operations when *no* modifications
480  *	 will be made to either the cluster being locked or any underlying
481  *	 cluster.  It allows the cluster to lock and access data for a subset
482  *	 of available nodes instead of all available nodes.
483  *
484  * NOTE: NONBLOCK is only used for hammer2_chain_repparent() and getparent(),
485  *	 other functions (e.g. hammer2_chain_lookup(), etc) can't handle its
486  *	 operation.
487  */
488 #define HAMMER2_RESOLVE_NEVER		1
489 #define HAMMER2_RESOLVE_MAYBE		2
490 #define HAMMER2_RESOLVE_ALWAYS		3
491 #define HAMMER2_RESOLVE_MASK		0x0F
492 
493 #define HAMMER2_RESOLVE_SHARED		0x10	/* request shared lock */
494 #define HAMMER2_RESOLVE_LOCKAGAIN	0x20	/* another shared lock */
495 #define HAMMER2_RESOLVE_RDONLY		0x40	/* higher level op flag */
496 #define HAMMER2_RESOLVE_NONBLOCK	0x80	/* non-blocking */
497 
498 /*
499  * Flags passed to hammer2_chain_delete()
500  */
501 #define HAMMER2_DELETE_PERMANENT	0x0001
502 
503 /*
504  * Flags passed to hammer2_chain_insert() or hammer2_chain_rename()
505  * or hammer2_chain_create().
506  */
507 #define HAMMER2_INSERT_PFSROOT		0x0004
508 #define HAMMER2_INSERT_SAMEPARENT	0x0008
509 
510 /*
511  * Flags passed to hammer2_chain_delete_duplicate()
512  */
513 #define HAMMER2_DELDUP_RECORE		0x0001
514 
515 /*
516  * Cluster different types of storage together for allocations
517  */
518 #define HAMMER2_FREECACHE_INODE		0
519 #define HAMMER2_FREECACHE_INDIR		1
520 #define HAMMER2_FREECACHE_DATA		2
521 #define HAMMER2_FREECACHE_UNUSED3	3
522 #define HAMMER2_FREECACHE_TYPES		4
523 
524 /*
525  * hammer2_freemap_alloc() block preference
526  */
527 #define HAMMER2_OFF_NOPREF		((hammer2_off_t)-1)
528 
529 /*
530  * BMAP read-ahead maximum parameters
531  */
532 #define HAMMER2_BMAP_COUNT		16	/* max bmap read-ahead */
533 #define HAMMER2_BMAP_BYTES		(HAMMER2_PBUFSIZE * HAMMER2_BMAP_COUNT)
534 
535 /*
536  * hammer2_freemap_adjust()
537  */
538 #define HAMMER2_FREEMAP_DORECOVER	1
539 #define HAMMER2_FREEMAP_DOMAYFREE	2
540 #define HAMMER2_FREEMAP_DOREALFREE	3
541 
542 /*
543  * HAMMER2 cluster - A set of chains representing the same entity.
544  *
545  * hammer2_cluster typically represents a temporary set of representitive
546  * chains.  The one exception is that a hammer2_cluster is embedded in
547  * hammer2_inode.  This embedded cluster is ONLY used to track the
548  * representitive chains and cannot be directly locked.
549  *
550  * A cluster is usually temporary (and thus per-thread) for locking purposes,
551  * allowing us to embed the asynchronous storage required for cluster
552  * operations in the cluster itself and adjust the state and status without
553  * having to worry too much about SMP issues.
554  *
555  * The exception is the cluster embedded in the hammer2_inode structure.
556  * This is used to cache the cluster state on an inode-by-inode basis.
557  * Individual hammer2_chain structures not incorporated into clusters might
558  * also stick around to cache miscellanious elements.
559  *
560  * Because the cluster is a 'working copy' and is usually subject to cluster
561  * quorum rules, it is quite possible for us to end up with an insufficient
562  * number of live chains to execute an operation.  If an insufficient number
563  * of chains remain in a working copy, the operation may have to be
564  * downgraded, retried, stall until the requisit number of chains are
565  * available, or possibly even error out depending on the mount type.
566  *
567  * A cluster's focus is set when it is locked.  The focus can only be set
568  * to a chain still part of the synchronized set.
569  */
570 #define HAMMER2_XOPFIFO		16
571 #define HAMMER2_XOPFIFO_MASK	(HAMMER2_XOPFIFO - 1)
572 #define HAMMER2_XOPGROUPS	32
573 #define HAMMER2_XOPGROUPS_MASK	(HAMMER2_XOPGROUPS - 1)
574 
575 #define HAMMER2_MAXCLUSTER	8
576 #define HAMMER2_XOPMASK_CLUSTER	(uint64_t)((1LLU << HAMMER2_MAXCLUSTER) - 1)
577 #define HAMMER2_XOPMASK_VOP	(uint64_t)0x0000000080000000LLU
578 #define HAMMER2_XOPMASK_FIFOW	(uint64_t)0x0000000040000000LLU
579 #define HAMMER2_XOPMASK_WAIT	(uint64_t)0x0000000020000000LLU
580 #define HAMMER2_XOPMASK_FEED	(uint64_t)0x0000000100000000LLU
581 
582 #define HAMMER2_XOPMASK_ALLDONE	(HAMMER2_XOPMASK_VOP | HAMMER2_XOPMASK_CLUSTER)
583 
584 #define HAMMER2_SPECTHREADS	1	/* sync */
585 
586 struct hammer2_cluster_item {
587 	hammer2_chain_t		*chain;
588 	int			error;
589 	uint32_t		flags;
590 };
591 
592 typedef struct hammer2_cluster_item hammer2_cluster_item_t;
593 
594 /*
595  * INVALID	- Invalid for focus, i.e. not part of synchronized set.
596  *		  Once set, this bit is sticky across operations.
597  *
598  * FEMOD	- Indicates that front-end modifying operations can
599  *		  mess with this entry and MODSYNC will copy also
600  *		  effect it.
601  */
602 #define HAMMER2_CITEM_INVALID	0x00000001
603 #define HAMMER2_CITEM_FEMOD	0x00000002
604 #define HAMMER2_CITEM_NULL	0x00000004
605 
606 struct hammer2_cluster {
607 	int			refs;		/* track for deallocation */
608 	int			ddflag;
609 	struct hammer2_pfs	*pmp;
610 	uint32_t		flags;
611 	int			nchains;
612 	int			error;		/* error code valid on lock */
613 	int			focus_index;
614 	hammer2_chain_t		*focus;		/* current focus (or mod) */
615 	hammer2_cluster_item_t	array[HAMMER2_MAXCLUSTER];
616 };
617 
618 typedef struct hammer2_cluster	hammer2_cluster_t;
619 
620 /*
621  * WRHARD	- Hard mounts can write fully synchronized
622  * RDHARD	- Hard mounts can read fully synchronized
623  * UNHARD	- Unsynchronized masters present
624  * NOHARD	- No masters visible
625  * WRSOFT	- Soft mounts can write to at least the SOFT_MASTER
626  * RDSOFT	- Soft mounts can read from at least a SOFT_SLAVE
627  * UNSOFT	- Unsynchronized slaves present
628  * NOSOFT	- No slaves visible
629  * RDSLAVE	- slaves are accessible (possibly unsynchronized or remote).
630  * MSYNCED	- All masters are fully synchronized
631  * SSYNCED	- All known local slaves are fully synchronized to masters
632  *
633  * All available masters are always incorporated.  All PFSs belonging to a
634  * cluster (master, slave, copy, whatever) always try to synchronize the
635  * total number of known masters in the PFSs root inode.
636  *
637  * A cluster might have access to many slaves, copies, or caches, but we
638  * have a limited number of cluster slots.  Any such elements which are
639  * directly mounted from block device(s) will always be incorporated.   Note
640  * that SSYNCED only applies to such elements which are directly mounted,
641  * not to any remote slaves, copies, or caches that could be available.  These
642  * bits are used to monitor and drive our synchronization threads.
643  *
644  * When asking the question 'is any data accessible at all', then a simple
645  * test against (RDHARD|RDSOFT|RDSLAVE) gives you the answer.  If any of
646  * these bits are set the object can be read with certain caveats:
647  * RDHARD - no caveats.  RDSOFT - authoritative but might not be synchronized.
648  * and RDSLAVE - not authoritative, has some data but it could be old or
649  * incomplete.
650  *
651  * When both soft and hard mounts are available, data will be read and written
652  * via the soft mount only.  But all might be in the cluster because
653  * background synchronization threads still need to do their work.
654  */
655 #define HAMMER2_CLUSTER_INODE	0x00000001	/* embedded in inode struct */
656 #define HAMMER2_CLUSTER_UNUSED2	0x00000002
657 #define HAMMER2_CLUSTER_LOCKED	0x00000004	/* cluster lks not recursive */
658 #define HAMMER2_CLUSTER_WRHARD	0x00000100	/* hard-mount can write */
659 #define HAMMER2_CLUSTER_RDHARD	0x00000200	/* hard-mount can read */
660 #define HAMMER2_CLUSTER_UNHARD	0x00000400	/* unsynchronized masters */
661 #define HAMMER2_CLUSTER_NOHARD	0x00000800	/* no masters visible */
662 #define HAMMER2_CLUSTER_WRSOFT	0x00001000	/* soft-mount can write */
663 #define HAMMER2_CLUSTER_RDSOFT	0x00002000	/* soft-mount can read */
664 #define HAMMER2_CLUSTER_UNSOFT	0x00004000	/* unsynchronized slaves */
665 #define HAMMER2_CLUSTER_NOSOFT	0x00008000	/* no slaves visible */
666 #define HAMMER2_CLUSTER_MSYNCED	0x00010000	/* all masters synchronized */
667 #define HAMMER2_CLUSTER_SSYNCED	0x00020000	/* known slaves synchronized */
668 
669 #define HAMMER2_CLUSTER_ANYDATA	( HAMMER2_CLUSTER_RDHARD |	\
670 				  HAMMER2_CLUSTER_RDSOFT |	\
671 				  HAMMER2_CLUSTER_RDSLAVE)
672 
673 #define HAMMER2_CLUSTER_RDOK	( HAMMER2_CLUSTER_RDHARD |	\
674 				  HAMMER2_CLUSTER_RDSOFT)
675 
676 #define HAMMER2_CLUSTER_WROK	( HAMMER2_CLUSTER_WRHARD |	\
677 				  HAMMER2_CLUSTER_WRSOFT)
678 
679 #define HAMMER2_CLUSTER_ZFLAGS	( HAMMER2_CLUSTER_WRHARD |	\
680 				  HAMMER2_CLUSTER_RDHARD |	\
681 				  HAMMER2_CLUSTER_WRSOFT |	\
682 				  HAMMER2_CLUSTER_RDSOFT |	\
683 				  HAMMER2_CLUSTER_MSYNCED |	\
684 				  HAMMER2_CLUSTER_SSYNCED)
685 
686 /*
687  * Helper functions (cluster must be locked for flags to be valid).
688  */
689 static __inline
690 int
691 hammer2_cluster_rdok(hammer2_cluster_t *cluster)
692 {
693 	return (cluster->flags & HAMMER2_CLUSTER_RDOK);
694 }
695 
696 static __inline
697 int
698 hammer2_cluster_wrok(hammer2_cluster_t *cluster)
699 {
700 	return (cluster->flags & HAMMER2_CLUSTER_WROK);
701 }
702 
703 RB_HEAD(hammer2_inode_tree, hammer2_inode);
704 
705 /*
706  * A hammer2 inode.
707  *
708  * NOTE: The inode-embedded cluster is never used directly for I/O (since
709  *	 it may be shared).  Instead it will be replicated-in and synchronized
710  *	 back out if changed.
711  */
712 struct hammer2_inode {
713 	RB_ENTRY(hammer2_inode) rbnode;		/* inumber lookup (HL) */
714 	hammer2_mtx_t		lock;		/* inode lock */
715 	hammer2_mtx_t		truncate_lock;	/* prevent truncates */
716 	struct hammer2_pfs	*pmp;		/* PFS mount */
717 	struct vnode		*vp;
718 	struct spinlock		cluster_spin;	/* update cluster */
719 	hammer2_cluster_t	cluster;
720 	struct lockf		advlock;
721 	u_int			flags;
722 	u_int			refs;		/* +vpref, +flushref */
723 	uint8_t			comp_heuristic;
724 	hammer2_inode_meta_t	meta;		/* copy of meta-data */
725 	hammer2_off_t		osize;
726 };
727 
728 typedef struct hammer2_inode hammer2_inode_t;
729 
730 /*
731  * MODIFIED	- Inode is in a modified state, ip->meta may have changes.
732  * RESIZED	- Inode truncated (any) or inode extended beyond
733  *		  EMBEDDED_BYTES.
734  */
735 #define HAMMER2_INODE_MODIFIED		0x0001
736 #define HAMMER2_INODE_SROOT		0x0002	/* kmalloc special case */
737 #define HAMMER2_INODE_RENAME_INPROG	0x0004
738 #define HAMMER2_INODE_ONRBTREE		0x0008
739 #define HAMMER2_INODE_RESIZED		0x0010	/* requires inode_fsync */
740 #define HAMMER2_INODE_ISDELETED		0x0020	/* deleted */
741 #define HAMMER2_INODE_ISUNLINKED	0x0040
742 #define HAMMER2_INODE_METAGOOD		0x0080	/* inode meta-data good */
743 #define HAMMER2_INODE_ONSIDEQ		0x0100	/* on side processing queue */
744 #define HAMMER2_INODE_NOSIDEQ		0x0200	/* disable sideq operation */
745 #define HAMMER2_INODE_DIRTYDATA		0x0400	/* interlocks inode flush */
746 
747 int hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2);
748 RB_PROTOTYPE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
749 		hammer2_tid_t);
750 
751 /*
752  * inode-unlink side-structure
753  */
754 struct hammer2_inode_sideq {
755 	TAILQ_ENTRY(hammer2_inode_sideq) entry;
756 	hammer2_inode_t	*ip;
757 };
758 TAILQ_HEAD(h2_sideq_list, hammer2_inode_sideq);
759 
760 typedef struct hammer2_inode_sideq hammer2_inode_sideq_t;
761 
762 /*
763  * Transaction management sub-structure under hammer2_pfs
764  */
765 struct hammer2_trans {
766 	uint32_t		flags;
767 	uint32_t		sync_wait;
768 	int			fticks;			/* FPENDING start */
769 };
770 
771 typedef struct hammer2_trans hammer2_trans_t;
772 
773 #define HAMMER2_TRANS_ISFLUSH		0x80000000	/* flush code */
774 #define HAMMER2_TRANS_BUFCACHE		0x40000000	/* bio strategy */
775 #define HAMMER2_TRANS_UNUSED20		0x20000000
776 #define HAMMER2_TRANS_FPENDING		0x10000000	/* flush pending */
777 #define HAMMER2_TRANS_WAITING		0x08000000	/* someone waiting */
778 #define HAMMER2_TRANS_MASK		0x00FFFFFF	/* count mask */
779 
780 #define HAMMER2_FREEMAP_HEUR_NRADIX	4	/* pwr 2 PBUFRADIX-MINIORADIX */
781 #define HAMMER2_FREEMAP_HEUR_TYPES	8
782 #define HAMMER2_FREEMAP_HEUR_SIZE	(HAMMER2_FREEMAP_HEUR_NRADIX * \
783 					 HAMMER2_FREEMAP_HEUR_TYPES)
784 
785 #define HAMMER2_DEDUP_HEUR_SIZE		(65536 * 4)
786 #define HAMMER2_DEDUP_HEUR_MASK		(HAMMER2_DEDUP_HEUR_SIZE - 1)
787 
788 #define HAMMER2_FLUSH_TOP		0x0001
789 #define HAMMER2_FLUSH_ALL		0x0002
790 #define HAMMER2_FLUSH_INODE_STOP	0x0004	/* stop at sub-inode */
791 
792 
793 /*
794  * Hammer2 support thread element.
795  *
796  * Potentially many support threads can hang off of hammer2, primarily
797  * off the hammer2_pfs structure.  Typically:
798  *
799  * td x Nodes		 	A synchronization thread for each node.
800  * td x Nodes x workers		Worker threads for frontend operations.
801  * td x 1			Bioq thread for logical buffer writes.
802  *
803  * In addition, the synchronization thread(s) associated with the
804  * super-root PFS (spmp) for a node is responsible for automatic bulkfree
805  * and dedup scans.
806  */
807 struct hammer2_thread {
808 	struct hammer2_pfs *pmp;
809 	struct hammer2_dev *hmp;
810 	hammer2_xop_list_t xopq;
811 	thread_t	td;
812 	uint32_t	flags;
813 	int		depth;
814 	int		clindex;	/* cluster element index */
815 	int		repidx;
816 	char		*scratch;	/* MAXPHYS */
817 };
818 
819 typedef struct hammer2_thread hammer2_thread_t;
820 
821 #define HAMMER2_THREAD_UNMOUNTING	0x0001	/* unmount request */
822 #define HAMMER2_THREAD_DEV		0x0002	/* related to dev, not pfs */
823 #define HAMMER2_THREAD_WAITING		0x0004	/* thread in idle tsleep */
824 #define HAMMER2_THREAD_REMASTER		0x0008	/* remaster request */
825 #define HAMMER2_THREAD_STOP		0x0010	/* exit request */
826 #define HAMMER2_THREAD_FREEZE		0x0020	/* force idle */
827 #define HAMMER2_THREAD_FROZEN		0x0040	/* thread is frozen */
828 #define HAMMER2_THREAD_XOPQ		0x0080	/* work pending */
829 #define HAMMER2_THREAD_STOPPED		0x0100	/* thread has stopped */
830 #define HAMMER2_THREAD_UNFREEZE		0x0200
831 
832 #define HAMMER2_THREAD_WAKEUP_MASK	(HAMMER2_THREAD_UNMOUNTING |	\
833 					 HAMMER2_THREAD_REMASTER |	\
834 					 HAMMER2_THREAD_STOP |		\
835 					 HAMMER2_THREAD_FREEZE |	\
836 					 HAMMER2_THREAD_XOPQ)
837 
838 /*
839  * Support structure for dedup heuristic.
840  */
841 struct hammer2_dedup {
842 	hammer2_off_t	data_off;
843 	uint64_t	data_crc;
844 	uint32_t	ticks;
845 	uint32_t	unused03;
846 };
847 
848 typedef struct hammer2_dedup hammer2_dedup_t;
849 
850 /*
851  * hammer2_xop - container for VOP/XOP operation (allocated, not on stack).
852  *
853  * This structure is used to distribute a VOP operation across multiple
854  * nodes.  It provides a rendezvous for concurrent node execution and
855  * can be detached from the frontend operation to allow the frontend to
856  * return early.
857  *
858  * This structure also sequences operations on up to three inodes.
859  */
860 typedef void (*hammer2_xop_func_t)(hammer2_thread_t *thr,
861 				   union hammer2_xop *xop);
862 
863 struct hammer2_xop_fifo {
864 	TAILQ_ENTRY(hammer2_xop_head) entry;
865 	hammer2_chain_t		*array[HAMMER2_XOPFIFO];
866 	int			errors[HAMMER2_XOPFIFO];
867 	int			ri;
868 	int			wi;
869 	int			flags;
870 	hammer2_thread_t	*thr;
871 };
872 
873 typedef struct hammer2_xop_fifo hammer2_xop_fifo_t;
874 
875 #define HAMMER2_XOP_FIFO_RUN	0x0001
876 #define HAMMER2_XOP_FIFO_STALL	0x0002
877 
878 struct hammer2_xop_head {
879 	hammer2_xop_func_t	func;
880 	hammer2_tid_t		mtid;
881 	struct hammer2_inode	*ip1;
882 	struct hammer2_inode	*ip2;
883 	struct hammer2_inode	*ip3;
884 	uint64_t		run_mask;
885 	uint64_t		chk_mask;
886 	int			flags;
887 	int			state;
888 	int			error;
889 	hammer2_key_t		collect_key;
890 	char			*name1;
891 	size_t			name1_len;
892 	char			*name2;
893 	size_t			name2_len;
894 	hammer2_xop_fifo_t	collect[HAMMER2_MAXCLUSTER];
895 	hammer2_cluster_t	cluster;	/* help collections */
896 	hammer2_io_t		*focus_dio;
897 };
898 
899 typedef struct hammer2_xop_head hammer2_xop_head_t;
900 
901 struct hammer2_xop_ipcluster {
902 	hammer2_xop_head_t	head;
903 };
904 
905 struct hammer2_xop_strategy {
906 	hammer2_xop_head_t	head;
907 	hammer2_key_t		lbase;
908 	int			finished;
909 	hammer2_mtx_t		lock;
910 	struct bio		*bio;
911 };
912 
913 struct hammer2_xop_readdir {
914 	hammer2_xop_head_t	head;
915 	hammer2_key_t		lkey;
916 };
917 
918 struct hammer2_xop_nresolve {
919 	hammer2_xop_head_t	head;
920 	hammer2_key_t		lhc;	/* if name is NULL used lhc */
921 };
922 
923 struct hammer2_xop_unlink {
924 	hammer2_xop_head_t	head;
925 	int			isdir;
926 	int			dopermanent;
927 };
928 
929 #define H2DOPERM_PERMANENT	0x01
930 #define H2DOPERM_FORCE		0x02
931 #define H2DOPERM_IGNINO		0x04
932 
933 struct hammer2_xop_nrename {
934 	hammer2_xop_head_t	head;
935 	hammer2_tid_t		lhc;
936 	int			ip_key;
937 };
938 
939 struct hammer2_xop_scanlhc {
940 	hammer2_xop_head_t	head;
941 	hammer2_key_t		lhc;
942 };
943 
944 struct hammer2_xop_scanall {
945 	hammer2_xop_head_t	head;
946 	hammer2_key_t		key_beg;	/* inclusive */
947 	hammer2_key_t		key_end;	/* inclusive */
948 	int			resolve_flags;
949 	int			lookup_flags;
950 };
951 
952 struct hammer2_xop_lookup {
953 	hammer2_xop_head_t	head;
954 	hammer2_key_t		lhc;
955 };
956 
957 struct hammer2_xop_mkdirent {
958 	hammer2_xop_head_t	head;
959 	hammer2_dirent_head_t	dirent;
960 	hammer2_key_t		lhc;
961 };
962 
963 struct hammer2_xop_create {
964 	hammer2_xop_head_t	head;
965 	hammer2_inode_meta_t	meta;		/* initial metadata */
966 	hammer2_key_t		lhc;
967 	int			flags;
968 };
969 
970 struct hammer2_xop_destroy {
971 	hammer2_xop_head_t	head;
972 };
973 
974 struct hammer2_xop_fsync {
975 	hammer2_xop_head_t	head;
976 	hammer2_inode_meta_t	meta;
977 	hammer2_off_t		osize;
978 	u_int			ipflags;
979 	int			clear_directdata;
980 };
981 
982 struct hammer2_xop_unlinkall {
983 	hammer2_xop_head_t	head;
984 	hammer2_key_t		key_beg;
985 	hammer2_key_t		key_end;
986 };
987 
988 struct hammer2_xop_connect {
989 	hammer2_xop_head_t	head;
990 	hammer2_key_t		lhc;
991 };
992 
993 struct hammer2_xop_flush {
994 	hammer2_xop_head_t	head;
995 };
996 
997 typedef struct hammer2_xop_readdir hammer2_xop_readdir_t;
998 typedef struct hammer2_xop_nresolve hammer2_xop_nresolve_t;
999 typedef struct hammer2_xop_unlink hammer2_xop_unlink_t;
1000 typedef struct hammer2_xop_nrename hammer2_xop_nrename_t;
1001 typedef struct hammer2_xop_ipcluster hammer2_xop_ipcluster_t;
1002 typedef struct hammer2_xop_strategy hammer2_xop_strategy_t;
1003 typedef struct hammer2_xop_mkdirent hammer2_xop_mkdirent_t;
1004 typedef struct hammer2_xop_create hammer2_xop_create_t;
1005 typedef struct hammer2_xop_destroy hammer2_xop_destroy_t;
1006 typedef struct hammer2_xop_fsync hammer2_xop_fsync_t;
1007 typedef struct hammer2_xop_unlinkall hammer2_xop_unlinkall_t;
1008 typedef struct hammer2_xop_scanlhc hammer2_xop_scanlhc_t;
1009 typedef struct hammer2_xop_scanall hammer2_xop_scanall_t;
1010 typedef struct hammer2_xop_lookup hammer2_xop_lookup_t;
1011 typedef struct hammer2_xop_connect hammer2_xop_connect_t;
1012 typedef struct hammer2_xop_flush hammer2_xop_flush_t;
1013 
1014 union hammer2_xop {
1015 	hammer2_xop_head_t	head;
1016 	hammer2_xop_ipcluster_t	xop_ipcluster;
1017 	hammer2_xop_readdir_t	xop_readdir;
1018 	hammer2_xop_nresolve_t	xop_nresolve;
1019 	hammer2_xop_unlink_t	xop_unlink;
1020 	hammer2_xop_nrename_t	xop_nrename;
1021 	hammer2_xop_strategy_t	xop_strategy;
1022 	hammer2_xop_mkdirent_t	xop_mkdirent;
1023 	hammer2_xop_create_t	xop_create;
1024 	hammer2_xop_destroy_t	xop_destroy;
1025 	hammer2_xop_fsync_t	xop_fsync;
1026 	hammer2_xop_unlinkall_t	xop_unlinkall;
1027 	hammer2_xop_scanlhc_t	xop_scanlhc;
1028 	hammer2_xop_scanall_t	xop_scanall;
1029 	hammer2_xop_lookup_t	xop_lookup;
1030 	hammer2_xop_flush_t	xop_flush;
1031 	hammer2_xop_connect_t	xop_connect;
1032 };
1033 
1034 typedef union hammer2_xop hammer2_xop_t;
1035 
1036 /*
1037  * hammer2_xop_group - Manage XOP support threads.
1038  */
1039 struct hammer2_xop_group {
1040 	hammer2_thread_t	thrs[HAMMER2_MAXCLUSTER];
1041 };
1042 
1043 typedef struct hammer2_xop_group hammer2_xop_group_t;
1044 
1045 /*
1046  * flags to hammer2_xop_collect()
1047  */
1048 #define HAMMER2_XOP_COLLECT_NOWAIT	0x00000001
1049 #define HAMMER2_XOP_COLLECT_WAITALL	0x00000002
1050 
1051 /*
1052  * flags to hammer2_xop_alloc()
1053  *
1054  * MODIFYING	- This is a modifying transaction, allocate a mtid.
1055  * RECURSE	- Recurse top-level inode (for root flushes)
1056  */
1057 #define HAMMER2_XOP_MODIFYING		0x00000001
1058 #define HAMMER2_XOP_STRATEGY		0x00000002
1059 #define HAMMER2_XOP_INODE_STOP		0x00000004
1060 #define HAMMER2_XOP_VOLHDR		0x00000008
1061 
1062 /*
1063  * Global (per partition) management structure, represents a hard block
1064  * device.  Typically referenced by hammer2_chain structures when applicable.
1065  * Typically not used for network-managed elements.
1066  *
1067  * Note that a single hammer2_dev can be indirectly tied to multiple system
1068  * mount points.  There is no direct relationship.  System mounts are
1069  * per-cluster-id, not per-block-device, and a single hard mount might contain
1070  * many PFSs and those PFSs might combine together in various ways to form
1071  * the set of available clusters.
1072  */
1073 struct hammer2_dev {
1074 	struct vnode	*devvp;		/* device vnode */
1075 	int		ronly;		/* read-only mount */
1076 	int		mount_count;	/* number of actively mounted PFSs */
1077 	TAILQ_ENTRY(hammer2_dev) mntentry; /* hammer2_mntlist */
1078 
1079 	struct malloc_type *mchain;
1080 	int		nipstacks;
1081 	int		maxipstacks;
1082 	kdmsg_iocom_t	iocom;		/* volume-level dmsg interface */
1083 	struct spinlock	io_spin;	/* iotree, iolruq access */
1084 	struct hammer2_io_tree iotree;
1085 	int		iofree_count;
1086 	int		freemap_relaxed;
1087 	hammer2_chain_t vchain;		/* anchor chain (topology) */
1088 	hammer2_chain_t fchain;		/* anchor chain (freemap) */
1089 	struct spinlock	list_spin;
1090 	struct h2_flush_list flushq;	/* flush seeds */
1091 	struct hammer2_pfs *spmp;	/* super-root pmp for transactions */
1092 	struct lock	vollk;		/* lockmgr lock */
1093 	struct lock	bulklk;		/* bulkfree operation lock */
1094 	struct lock	bflock;		/* bulk-free manual function lock */
1095 	hammer2_off_t	heur_freemap[HAMMER2_FREEMAP_HEUR_SIZE];
1096 	hammer2_dedup_t heur_dedup[HAMMER2_DEDUP_HEUR_SIZE];
1097 	int		volhdrno;	/* last volhdrno written */
1098 	uint32_t	hflags;		/* HMNT2 flags applicable to device */
1099 	hammer2_off_t	free_reserved;	/* nominal free reserved */
1100 	hammer2_thread_t bfthr;		/* bulk-free thread */
1101 	char		devrepname[64];	/* for kprintf */
1102 	hammer2_ioc_bulkfree_t bflast;	/* stats for last bulkfree run */
1103 	hammer2_volume_data_t voldata;
1104 	hammer2_volume_data_t volsync;	/* synchronized voldata */
1105 };
1106 
1107 typedef struct hammer2_dev hammer2_dev_t;
1108 
1109 /*
1110  * Helper functions (cluster must be locked for flags to be valid).
1111  */
1112 static __inline
1113 int
1114 hammer2_chain_rdok(hammer2_chain_t *chain)
1115 {
1116 	return (chain->error == 0);
1117 }
1118 
1119 static __inline
1120 int
1121 hammer2_chain_wrok(hammer2_chain_t *chain)
1122 {
1123 	return (chain->error == 0 && chain->hmp->ronly == 0);
1124 }
1125 
1126 /*
1127  * Per-cluster management structure.  This structure will be tied to a
1128  * system mount point if the system is mounting the PFS, but is also used
1129  * to manage clusters encountered during the super-root scan or received
1130  * via LNK_SPANs that might not be mounted.
1131  *
1132  * This structure is also used to represent the super-root that hangs off
1133  * of a hard mount point.  The super-root is not really a cluster element.
1134  * In this case the spmp_hmp field will be non-NULL.  It's just easier to do
1135  * this than to special case super-root manipulation in the hammer2_chain*
1136  * code as being only hammer2_dev-related.
1137  *
1138  * pfs_mode and pfs_nmasters are rollup fields which critically describes
1139  * how elements of the cluster act on the cluster.  pfs_mode is only applicable
1140  * when a PFS is mounted by the system.  pfs_nmasters is our best guess as to
1141  * how many masters have been configured for a cluster and is always
1142  * applicable.  pfs_types[] is an array with 1:1 correspondance to the
1143  * iroot cluster and describes the PFS types of the nodes making up the
1144  * cluster.
1145  *
1146  * WARNING! Portions of this structure have deferred initialization.  In
1147  *	    particular, if not mounted there will be no wthread.
1148  *	    umounted network PFSs will also be missing iroot and numerous
1149  *	    other fields will not be initialized prior to mount.
1150  *
1151  *	    Synchronization threads are chain-specific and only applicable
1152  *	    to local hard PFS entries.  A hammer2_pfs structure may contain
1153  *	    more than one when multiple hard PFSs are present on the local
1154  *	    machine which require synchronization monitoring.  Most PFSs
1155  *	    (such as snapshots) are 1xMASTER PFSs which do not need a
1156  *	    synchronization thread.
1157  *
1158  * WARNING! The chains making up pfs->iroot's cluster are accounted for in
1159  *	    hammer2_dev->mount_count when the pfs is associated with a mount
1160  *	    point.
1161  */
1162 struct hammer2_pfs {
1163 	struct mount		*mp;
1164 	TAILQ_ENTRY(hammer2_pfs) mntentry;	/* hammer2_pfslist */
1165 	uuid_t			pfs_clid;
1166 	hammer2_dev_t		*spmp_hmp;	/* only if super-root pmp */
1167 	hammer2_dev_t		*force_local;	/* only if 'local' mount */
1168 	hammer2_inode_t		*iroot;		/* PFS root inode */
1169 	uint8_t			pfs_types[HAMMER2_MAXCLUSTER];
1170 	char			*pfs_names[HAMMER2_MAXCLUSTER];
1171 	hammer2_dev_t		*pfs_hmps[HAMMER2_MAXCLUSTER];
1172 	hammer2_trans_t		trans;
1173 	struct lock		lock;		/* PFS lock for certain ops */
1174 	struct lock		lock_nlink;	/* rename and nlink lock */
1175 	struct netexport	export;		/* nfs export */
1176 	int			speedup_ticks;	/* speedup_syncer() helper */
1177 	int			ronly;		/* read-only mount */
1178 	int			hflags;		/* pfs-specific mount flags */
1179 	struct malloc_type	*minode;
1180 	struct malloc_type	*mmsg;
1181 	struct spinlock		inum_spin;	/* inumber lookup */
1182 	struct hammer2_inode_tree inum_tree;	/* (not applicable to spmp) */
1183 	long			inum_count;	/* #of inodes in inum_tree */
1184 	struct spinlock		lru_spin;	/* inumber lookup */
1185 	struct hammer2_chain_list lru_list;	/* basis for LRU tests */
1186 	int			lru_count;	/* #of chains on LRU */
1187 	int			flags;
1188 	hammer2_tid_t		modify_tid;	/* modify transaction id */
1189 	hammer2_tid_t		inode_tid;	/* inode allocator */
1190 	uint8_t			pfs_nmasters;	/* total masters */
1191 	uint8_t			pfs_mode;	/* operating mode PFSMODE */
1192 	uint8_t			unused01;
1193 	uint8_t			unused02;
1194 	int			free_ticks;	/* free_* calculations */
1195 	long			inmem_inodes;
1196 	hammer2_off_t		free_reserved;
1197 	hammer2_off_t		free_nominal;
1198 	uint32_t		inmem_dirty_chains;
1199 	int			count_lwinprog;	/* logical write in prog */
1200 	struct spinlock		list_spin;
1201 	struct h2_sideq_list	sideq;		/* last-close dirty/unlink */
1202 	long			sideq_count;
1203 	hammer2_thread_t	sync_thrs[HAMMER2_MAXCLUSTER];
1204 	uint32_t		cluster_flags;	/* cached cluster flags */
1205 	int			has_xop_threads;
1206 	struct spinlock		xop_spin;	/* xop sequencer */
1207 	hammer2_xop_group_t	xop_groups[HAMMER2_XOPGROUPS];
1208 };
1209 
1210 typedef struct hammer2_pfs hammer2_pfs_t;
1211 
1212 TAILQ_HEAD(hammer2_pfslist, hammer2_pfs);
1213 
1214 #define HAMMER2_PMPF_SPMP	0x00000001
1215 
1216 /*
1217  * NOTE: The LRU list contains at least all the chains with refs == 0
1218  *	 that can be recycled, and may contain additional chains which
1219  *	 cannot.
1220  */
1221 #define HAMMER2_LRU_LIMIT		4096
1222 
1223 #define HAMMER2_DIRTYCHAIN_WAITING	0x80000000
1224 #define HAMMER2_DIRTYCHAIN_MASK		0x7FFFFFFF
1225 
1226 #define HAMMER2_LWINPROG_WAITING	0x80000000
1227 #define HAMMER2_LWINPROG_WAITING0	0x40000000
1228 #define HAMMER2_LWINPROG_MASK		0x3FFFFFFF
1229 
1230 /*
1231  * hammer2_cluster_check
1232  */
1233 #define HAMMER2_CHECK_NULL	0x00000001
1234 
1235 /*
1236  * Misc
1237  */
1238 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
1239 #define VTOI(vp)	((hammer2_inode_t *)(vp)->v_data)
1240 #endif
1241 
1242 #if defined(_KERNEL)
1243 
1244 MALLOC_DECLARE(M_HAMMER2);
1245 
1246 #define ITOV(ip)	((ip)->vp)
1247 
1248 /*
1249  * Currently locked chains retain the locked buffer cache buffer for
1250  * indirect blocks, and indirect blocks can be one of two sizes.  The
1251  * device buffer has to match the case to avoid deadlocking recursive
1252  * chains that might otherwise try to access different offsets within
1253  * the same device buffer.
1254  */
1255 static __inline
1256 int
1257 hammer2_devblkradix(int radix)
1258 {
1259 #if 0
1260 	if (radix <= HAMMER2_LBUFRADIX) {
1261 		return (HAMMER2_LBUFRADIX);
1262 	} else {
1263 		return (HAMMER2_PBUFRADIX);
1264 	}
1265 #endif
1266 	return (HAMMER2_PBUFRADIX);
1267 }
1268 
1269 /*
1270  * XXX almost time to remove this.  DIO uses PBUFSIZE exclusively now.
1271  */
1272 static __inline
1273 size_t
1274 hammer2_devblksize(size_t bytes)
1275 {
1276 #if 0
1277 	if (bytes <= HAMMER2_LBUFSIZE) {
1278 		return(HAMMER2_LBUFSIZE);
1279 	} else {
1280 		KKASSERT(bytes <= HAMMER2_PBUFSIZE &&
1281 			 (bytes ^ (bytes - 1)) == ((bytes << 1) - 1));
1282 		return (HAMMER2_PBUFSIZE);
1283 	}
1284 #endif
1285 	return (HAMMER2_PBUFSIZE);
1286 }
1287 
1288 
1289 static __inline
1290 hammer2_pfs_t *
1291 MPTOPMP(struct mount *mp)
1292 {
1293 	return ((hammer2_pfs_t *)mp->mnt_data);
1294 }
1295 
1296 #define HAMMER2_DEDUP_FRAG      (HAMMER2_PBUFSIZE / 64)
1297 #define HAMMER2_DEDUP_FRAGRADIX (HAMMER2_PBUFRADIX - 6)
1298 
1299 static __inline
1300 uint64_t
1301 hammer2_dedup_mask(hammer2_io_t *dio, hammer2_off_t data_off, u_int bytes)
1302 {
1303 	int bbeg;
1304 	int bits;
1305 	uint64_t mask;
1306 
1307 	bbeg = (int)((data_off & ~HAMMER2_OFF_MASK_RADIX) - dio->pbase) >>
1308 	       HAMMER2_DEDUP_FRAGRADIX;
1309 	bits = (int)((bytes + (HAMMER2_DEDUP_FRAG - 1)) >>
1310 	       HAMMER2_DEDUP_FRAGRADIX);
1311 	mask = ((uint64_t)1 << bbeg) - 1;
1312 	if (bbeg + bits == 64)
1313 		mask = (uint64_t)-1;
1314 	else
1315 		mask = ((uint64_t)1 << (bbeg + bits)) - 1;
1316 
1317 	mask &= ~(((uint64_t)1 << bbeg) - 1);
1318 
1319 	return mask;
1320 }
1321 
1322 static __inline
1323 int
1324 hammer2_error_to_errno(int error)
1325 {
1326 	if (error) {
1327 		if (error & HAMMER2_ERROR_EIO)
1328 			error = EIO;
1329 		else if (error & HAMMER2_ERROR_CHECK)
1330 			error = EDOM;
1331 		else if (error & HAMMER2_ERROR_ABORTED)
1332 			error = EINTR;
1333 		else if (error & HAMMER2_ERROR_BADBREF)
1334 			error = EIO;
1335 		else if (error & HAMMER2_ERROR_ENOSPC)
1336 			error = ENOSPC;
1337 		else if (error & HAMMER2_ERROR_ENOENT)
1338 			error = ENOENT;
1339 		else if (error & HAMMER2_ERROR_ENOTEMPTY)
1340 			error = ENOTEMPTY;
1341 		else if (error & HAMMER2_ERROR_EAGAIN)
1342 			error = EAGAIN;
1343 		else if (error & HAMMER2_ERROR_ENOTDIR)
1344 			error = ENOTDIR;
1345 		else if (error & HAMMER2_ERROR_EISDIR)
1346 			error = EISDIR;
1347 		else if (error & HAMMER2_ERROR_EINPROGRESS)
1348 			error = EINPROGRESS;
1349 		else if (error & HAMMER2_ERROR_EEXIST)
1350 			error = EEXIST;
1351 		else
1352 			error = EDOM;
1353 	}
1354 	return error;
1355 }
1356 
1357 static __inline
1358 int
1359 hammer2_errno_to_error(int error)
1360 {
1361 	switch(error) {
1362 	case 0:
1363 		return 0;
1364 	case EIO:
1365 		return HAMMER2_ERROR_EIO;
1366 	case EINVAL:
1367 	default:
1368 		return HAMMER2_ERROR_EINVAL;
1369 	}
1370 }
1371 
1372 
1373 extern struct vop_ops hammer2_vnode_vops;
1374 extern struct vop_ops hammer2_spec_vops;
1375 extern struct vop_ops hammer2_fifo_vops;
1376 extern struct hammer2_pfslist hammer2_pfslist;
1377 extern struct lock hammer2_mntlk;
1378 
1379 
1380 extern int hammer2_debug;
1381 extern int hammer2_cluster_meta_read;
1382 extern int hammer2_cluster_data_read;
1383 extern int hammer2_dedup_enable;
1384 extern int hammer2_always_compress;
1385 extern int hammer2_inval_enable;
1386 extern int hammer2_flush_pipe;
1387 extern int hammer2_dio_count;
1388 extern int hammer2_dio_limit;
1389 extern int hammer2_bulkfree_tps;
1390 extern long hammer2_chain_allocs;
1391 extern long hammer2_chain_frees;
1392 extern long hammer2_limit_dirty_chains;
1393 extern long hammer2_count_modified_chains;
1394 extern long hammer2_iod_invals;
1395 extern long hammer2_iod_file_read;
1396 extern long hammer2_iod_meta_read;
1397 extern long hammer2_iod_indr_read;
1398 extern long hammer2_iod_fmap_read;
1399 extern long hammer2_iod_volu_read;
1400 extern long hammer2_iod_file_write;
1401 extern long hammer2_iod_file_wembed;
1402 extern long hammer2_iod_file_wzero;
1403 extern long hammer2_iod_file_wdedup;
1404 extern long hammer2_iod_meta_write;
1405 extern long hammer2_iod_indr_write;
1406 extern long hammer2_iod_fmap_write;
1407 extern long hammer2_iod_volu_write;
1408 
1409 extern long hammer2_process_xxhash64;
1410 extern long hammer2_process_icrc32;
1411 
1412 extern struct objcache *cache_buffer_read;
1413 extern struct objcache *cache_buffer_write;
1414 extern struct objcache *cache_xops;
1415 
1416 /*
1417  * hammer2_subr.c
1418  */
1419 #define hammer2_icrc32(buf, size)	iscsi_crc32((buf), (size))
1420 #define hammer2_icrc32c(buf, size, crc)	iscsi_crc32_ext((buf), (size), (crc))
1421 
1422 int hammer2_signal_check(time_t *timep);
1423 const char *hammer2_error_str(int error);
1424 
1425 void hammer2_inode_lock(hammer2_inode_t *ip, int how);
1426 void hammer2_inode_unlock(hammer2_inode_t *ip);
1427 hammer2_chain_t *hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how);
1428 hammer2_chain_t *hammer2_inode_chain_and_parent(hammer2_inode_t *ip,
1429 			int clindex, hammer2_chain_t **parentp, int how);
1430 hammer2_mtx_state_t hammer2_inode_lock_temp_release(hammer2_inode_t *ip);
1431 void hammer2_inode_lock_temp_restore(hammer2_inode_t *ip,
1432 			hammer2_mtx_state_t ostate);
1433 int hammer2_inode_lock_upgrade(hammer2_inode_t *ip);
1434 void hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int);
1435 
1436 void hammer2_dev_exlock(hammer2_dev_t *hmp);
1437 void hammer2_dev_shlock(hammer2_dev_t *hmp);
1438 void hammer2_dev_unlock(hammer2_dev_t *hmp);
1439 
1440 int hammer2_get_dtype(uint8_t type);
1441 int hammer2_get_vtype(uint8_t type);
1442 uint8_t hammer2_get_obj_type(enum vtype vtype);
1443 void hammer2_time_to_timespec(uint64_t xtime, struct timespec *ts);
1444 uint64_t hammer2_timespec_to_time(const struct timespec *ts);
1445 uint32_t hammer2_to_unix_xid(const uuid_t *uuid);
1446 void hammer2_guid_to_uuid(uuid_t *uuid, uint32_t guid);
1447 void hammer2_trans_manage_init(hammer2_pfs_t *pmp);
1448 
1449 hammer2_key_t hammer2_dirhash(const unsigned char *name, size_t len);
1450 int hammer2_getradix(size_t bytes);
1451 
1452 int hammer2_calc_logical(hammer2_inode_t *ip, hammer2_off_t uoff,
1453 			hammer2_key_t *lbasep, hammer2_key_t *leofp);
1454 int hammer2_calc_physical(hammer2_inode_t *ip, hammer2_key_t lbase);
1455 void hammer2_update_time(uint64_t *timep);
1456 void hammer2_adjreadcounter(hammer2_blockref_t *bref, size_t bytes);
1457 
1458 /*
1459  * hammer2_inode.c
1460  */
1461 struct vnode *hammer2_igetv(hammer2_inode_t *ip, int *errorp);
1462 hammer2_inode_t *hammer2_inode_lookup(hammer2_pfs_t *pmp,
1463 			hammer2_tid_t inum);
1464 hammer2_inode_t *hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_inode_t *dip,
1465 			hammer2_xop_head_t *xop, int idx);
1466 void hammer2_inode_free(hammer2_inode_t *ip);
1467 void hammer2_inode_ref(hammer2_inode_t *ip);
1468 void hammer2_inode_drop(hammer2_inode_t *ip);
1469 void hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
1470 			hammer2_cluster_t *cluster);
1471 void hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1472 			int idx);
1473 void hammer2_inode_modify(hammer2_inode_t *ip);
1474 void hammer2_inode_run_sideq(hammer2_pfs_t *pmp, int doall);
1475 
1476 hammer2_inode_t *hammer2_inode_create(hammer2_inode_t *dip,
1477 			hammer2_inode_t *pip,
1478 			struct vattr *vap, struct ucred *cred,
1479 			const uint8_t *name, size_t name_len, hammer2_key_t lhc,
1480 			hammer2_key_t inum, uint8_t type, uint8_t target_type,
1481 			int flags, int *errorp);
1482 int hammer2_inode_chain_sync(hammer2_inode_t *ip);
1483 int hammer2_inode_chain_flush(hammer2_inode_t *ip);
1484 int hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen);
1485 int hammer2_dirent_create(hammer2_inode_t *dip, const char *name,
1486 			size_t name_len, hammer2_key_t inum, uint8_t type);
1487 
1488 /*
1489  * hammer2_chain.c
1490  */
1491 void hammer2_voldata_lock(hammer2_dev_t *hmp);
1492 void hammer2_voldata_unlock(hammer2_dev_t *hmp);
1493 void hammer2_voldata_modify(hammer2_dev_t *hmp);
1494 hammer2_chain_t *hammer2_chain_alloc(hammer2_dev_t *hmp,
1495 				hammer2_pfs_t *pmp,
1496 				hammer2_blockref_t *bref);
1497 void hammer2_chain_core_init(hammer2_chain_t *chain);
1498 void hammer2_chain_ref(hammer2_chain_t *chain);
1499 void hammer2_chain_ref_hold(hammer2_chain_t *chain);
1500 void hammer2_chain_drop(hammer2_chain_t *chain);
1501 void hammer2_chain_drop_unhold(hammer2_chain_t *chain);
1502 int hammer2_chain_lock(hammer2_chain_t *chain, int how);
1503 void hammer2_chain_lock_unhold(hammer2_chain_t *chain, int how);
1504 void hammer2_chain_load_data(hammer2_chain_t *chain);
1505 const hammer2_media_data_t *hammer2_chain_rdata(hammer2_chain_t *chain);
1506 hammer2_media_data_t *hammer2_chain_wdata(hammer2_chain_t *chain);
1507 
1508 int hammer2_chain_inode_find(hammer2_pfs_t *pmp, hammer2_key_t inum,
1509 				int clindex, int flags,
1510 				hammer2_chain_t **parentp,
1511 				hammer2_chain_t **chainp);
1512 int hammer2_chain_modify(hammer2_chain_t *chain, hammer2_tid_t mtid,
1513 				hammer2_off_t dedup_off, int flags);
1514 int hammer2_chain_modify_ip(hammer2_inode_t *ip, hammer2_chain_t *chain,
1515 				hammer2_tid_t mtid, int flags);
1516 int hammer2_chain_resize(hammer2_chain_t *chain,
1517 				hammer2_tid_t mtid, hammer2_off_t dedup_off,
1518 				int nradix, int flags);
1519 void hammer2_chain_unlock(hammer2_chain_t *chain);
1520 void hammer2_chain_unlock_hold(hammer2_chain_t *chain);
1521 void hammer2_chain_wait(hammer2_chain_t *chain);
1522 hammer2_chain_t *hammer2_chain_get(hammer2_chain_t *parent, int generation,
1523 				hammer2_blockref_t *bref, int how);
1524 hammer2_chain_t *hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags);
1525 void hammer2_chain_lookup_done(hammer2_chain_t *parent);
1526 hammer2_chain_t *hammer2_chain_getparent(hammer2_chain_t *chain, int flags);
1527 hammer2_chain_t *hammer2_chain_repparent(hammer2_chain_t **chainp, int flags);
1528 hammer2_chain_t *hammer2_chain_lookup(hammer2_chain_t **parentp,
1529 				hammer2_key_t *key_nextp,
1530 				hammer2_key_t key_beg, hammer2_key_t key_end,
1531 				int *errorp, int flags);
1532 hammer2_chain_t *hammer2_chain_next(hammer2_chain_t **parentp,
1533 				hammer2_chain_t *chain,
1534 				hammer2_key_t *key_nextp,
1535 				hammer2_key_t key_beg, hammer2_key_t key_end,
1536 				int *errorp, int flags);
1537 int hammer2_chain_scan(hammer2_chain_t *parent,
1538 				hammer2_chain_t **chainp,
1539 				hammer2_blockref_t *bref,
1540 				int *firstp, int flags);
1541 
1542 int hammer2_chain_create(hammer2_chain_t **parentp, hammer2_chain_t **chainp,
1543 				hammer2_pfs_t *pmp, int methods,
1544 				hammer2_key_t key, int keybits,
1545 				int type, size_t bytes, hammer2_tid_t mtid,
1546 				hammer2_off_t dedup_off, int flags);
1547 void hammer2_chain_rename(hammer2_chain_t **parentp,
1548 				hammer2_chain_t *chain,
1549 				hammer2_tid_t mtid, int flags);
1550 int hammer2_chain_delete(hammer2_chain_t *parent, hammer2_chain_t *chain,
1551 				hammer2_tid_t mtid, int flags);
1552 int hammer2_chain_indirect_maintenance(hammer2_chain_t *parent,
1553 				hammer2_chain_t *chain);
1554 void hammer2_chain_setflush(hammer2_chain_t *chain);
1555 void hammer2_chain_countbrefs(hammer2_chain_t *chain,
1556 				hammer2_blockref_t *base, int count);
1557 hammer2_chain_t *hammer2_chain_bulksnap(hammer2_dev_t *hmp);
1558 void hammer2_chain_bulkdrop(hammer2_chain_t *copy);
1559 
1560 void hammer2_chain_setcheck(hammer2_chain_t *chain, void *bdata);
1561 int hammer2_chain_testcheck(hammer2_chain_t *chain, void *bdata);
1562 int hammer2_chain_dirent_test(hammer2_chain_t *chain, const char *name,
1563 				size_t name_len);
1564 
1565 void hammer2_pfs_memory_wait(hammer2_inode_t *ip, int always_moderate);
1566 void hammer2_pfs_memory_inc(hammer2_pfs_t *pmp);
1567 void hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp);
1568 
1569 void hammer2_base_delete(hammer2_chain_t *parent,
1570 				hammer2_blockref_t *base, int count,
1571 				hammer2_chain_t *chain);
1572 void hammer2_base_insert(hammer2_chain_t *parent,
1573 				hammer2_blockref_t *base, int count,
1574 				hammer2_chain_t *chain,
1575 				hammer2_blockref_t *elm);
1576 
1577 /*
1578  * hammer2_flush.c
1579  */
1580 int hammer2_flush(hammer2_chain_t *chain, int istop);
1581 void hammer2_delayed_flush(hammer2_chain_t *chain);
1582 
1583 /*
1584  * hammer2_trans.c
1585  */
1586 void hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags);
1587 hammer2_tid_t hammer2_trans_sub(hammer2_pfs_t *pmp);
1588 void hammer2_trans_done(hammer2_pfs_t *pmp, int quicksideq);
1589 hammer2_tid_t hammer2_trans_newinum(hammer2_pfs_t *pmp);
1590 void hammer2_trans_assert_strategy(hammer2_pfs_t *pmp);
1591 void hammer2_dedup_record(hammer2_chain_t *chain, hammer2_io_t *dio,
1592 				const char *data);
1593 
1594 /*
1595  * hammer2_ioctl.c
1596  */
1597 int hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data,
1598 				int fflag, struct ucred *cred);
1599 
1600 /*
1601  * hammer2_io.c
1602  */
1603 void hammer2_io_putblk(hammer2_io_t **diop);
1604 void hammer2_io_inval(hammer2_io_t *dio, hammer2_off_t data_off, u_int bytes);
1605 void hammer2_io_cleanup(hammer2_dev_t *hmp, struct hammer2_io_tree *tree);
1606 char *hammer2_io_data(hammer2_io_t *dio, off_t lbase);
1607 void hammer2_io_bkvasync(hammer2_io_t *dio);
1608 hammer2_io_t *hammer2_io_getblk(hammer2_dev_t *hmp, int btype, off_t lbase,
1609 				int lsize, int op);
1610 void hammer2_io_dedup_set(hammer2_dev_t *hmp, hammer2_blockref_t *bref);
1611 void hammer2_io_dedup_delete(hammer2_dev_t *hmp, uint8_t btype,
1612 				hammer2_off_t data_off, u_int bytes);
1613 void hammer2_io_dedup_assert(hammer2_dev_t *hmp, hammer2_off_t data_off,
1614 				u_int bytes);
1615 void hammer2_io_callback(struct bio *bio);
1616 int hammer2_io_new(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize,
1617 				hammer2_io_t **diop);
1618 int hammer2_io_newnz(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize,
1619 				hammer2_io_t **diop);
1620 int hammer2_io_bread(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize,
1621 				hammer2_io_t **diop);
1622 hammer2_io_t *hammer2_io_getquick(hammer2_dev_t *hmp, off_t lbase, int lsize);
1623 void hammer2_io_bawrite(hammer2_io_t **diop);
1624 void hammer2_io_bdwrite(hammer2_io_t **diop);
1625 int hammer2_io_bwrite(hammer2_io_t **diop);
1626 void hammer2_io_setdirty(hammer2_io_t *dio);
1627 void hammer2_io_brelse(hammer2_io_t **diop);
1628 void hammer2_io_bqrelse(hammer2_io_t **diop);
1629 void hammer2_io_ref(hammer2_io_t *dio);
1630 
1631 /*
1632  * hammer2_thread.c
1633  */
1634 void hammer2_thr_signal(hammer2_thread_t *thr, uint32_t flags);
1635 void hammer2_thr_signal2(hammer2_thread_t *thr,
1636 			uint32_t pflags, uint32_t nflags);
1637 void hammer2_thr_wait(hammer2_thread_t *thr, uint32_t flags);
1638 void hammer2_thr_wait_neg(hammer2_thread_t *thr, uint32_t flags);
1639 int hammer2_thr_wait_any(hammer2_thread_t *thr, uint32_t flags, int timo);
1640 void hammer2_thr_create(hammer2_thread_t *thr,
1641 			hammer2_pfs_t *pmp, hammer2_dev_t *hmp,
1642 			const char *id, int clindex, int repidx,
1643 			void (*func)(void *arg));
1644 void hammer2_thr_delete(hammer2_thread_t *thr);
1645 void hammer2_thr_remaster(hammer2_thread_t *thr);
1646 void hammer2_thr_freeze_async(hammer2_thread_t *thr);
1647 void hammer2_thr_freeze(hammer2_thread_t *thr);
1648 void hammer2_thr_unfreeze(hammer2_thread_t *thr);
1649 int hammer2_thr_break(hammer2_thread_t *thr);
1650 void hammer2_primary_xops_thread(void *arg);
1651 
1652 /*
1653  * hammer2_thread.c (XOP API)
1654  */
1655 void hammer2_xop_group_init(hammer2_pfs_t *pmp, hammer2_xop_group_t *xgrp);
1656 void *hammer2_xop_alloc(hammer2_inode_t *ip, int flags);
1657 void hammer2_xop_setname(hammer2_xop_head_t *xop,
1658 				const char *name, size_t name_len);
1659 void hammer2_xop_setname2(hammer2_xop_head_t *xop,
1660 				const char *name, size_t name_len);
1661 size_t hammer2_xop_setname_inum(hammer2_xop_head_t *xop, hammer2_key_t inum);
1662 void hammer2_xop_setip2(hammer2_xop_head_t *xop, hammer2_inode_t *ip2);
1663 void hammer2_xop_setip3(hammer2_xop_head_t *xop, hammer2_inode_t *ip3);
1664 void hammer2_xop_reinit(hammer2_xop_head_t *xop);
1665 void hammer2_xop_helper_create(hammer2_pfs_t *pmp);
1666 void hammer2_xop_helper_cleanup(hammer2_pfs_t *pmp);
1667 void hammer2_xop_start(hammer2_xop_head_t *xop, hammer2_xop_func_t func);
1668 void hammer2_xop_start_except(hammer2_xop_head_t *xop, hammer2_xop_func_t func,
1669 				int notidx);
1670 int hammer2_xop_collect(hammer2_xop_head_t *xop, int flags);
1671 void hammer2_xop_retire(hammer2_xop_head_t *xop, uint64_t mask);
1672 int hammer2_xop_active(hammer2_xop_head_t *xop);
1673 int hammer2_xop_feed(hammer2_xop_head_t *xop, hammer2_chain_t *chain,
1674 				int clindex, int error);
1675 
1676 /*
1677  * hammer2_synchro.c
1678  */
1679 void hammer2_primary_sync_thread(void *arg);
1680 
1681 /*
1682  * XOP backends in hammer2_xops.c, primarily for VNOPS.  Other XOP backends
1683  * may be integrated into other source files.
1684  */
1685 void hammer2_xop_ipcluster(hammer2_thread_t *thr, hammer2_xop_t *xop);
1686 void hammer2_xop_readdir(hammer2_thread_t *thr, hammer2_xop_t *xop);
1687 void hammer2_xop_nresolve(hammer2_thread_t *thr, hammer2_xop_t *xop);
1688 void hammer2_xop_unlink(hammer2_thread_t *thr, hammer2_xop_t *xop);
1689 void hammer2_xop_nrename(hammer2_thread_t *thr, hammer2_xop_t *xop);
1690 void hammer2_xop_scanlhc(hammer2_thread_t *thr, hammer2_xop_t *xop);
1691 void hammer2_xop_scanall(hammer2_thread_t *thr, hammer2_xop_t *xop);
1692 void hammer2_xop_lookup(hammer2_thread_t *thr, hammer2_xop_t *xop);
1693 void hammer2_inode_xop_mkdirent(hammer2_thread_t *thr, hammer2_xop_t *xop);
1694 void hammer2_inode_xop_create(hammer2_thread_t *thr, hammer2_xop_t *xop);
1695 void hammer2_inode_xop_destroy(hammer2_thread_t *thr, hammer2_xop_t *xop);
1696 void hammer2_inode_xop_chain_sync(hammer2_thread_t *thr, hammer2_xop_t *xop);
1697 void hammer2_inode_xop_unlinkall(hammer2_thread_t *thr, hammer2_xop_t *xop);
1698 void hammer2_inode_xop_connect(hammer2_thread_t *thr, hammer2_xop_t *xop);
1699 void hammer2_inode_xop_flush(hammer2_thread_t *thr, hammer2_xop_t *xop);
1700 
1701 /*
1702  * hammer2_msgops.c
1703  */
1704 int hammer2_msg_dbg_rcvmsg(kdmsg_msg_t *msg);
1705 int hammer2_msg_adhoc_input(kdmsg_msg_t *msg);
1706 
1707 /*
1708  * hammer2_vfsops.c
1709  */
1710 void hammer2_volconf_update(hammer2_dev_t *hmp, int index);
1711 void hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp, char pfx,
1712 				u_int flags);
1713 int hammer2_vfs_sync(struct mount *mp, int waitflags);
1714 int hammer2_vfs_enospace(hammer2_inode_t *ip, off_t bytes, struct ucred *cred);
1715 
1716 hammer2_pfs_t *hammer2_pfsalloc(hammer2_chain_t *chain,
1717 				const hammer2_inode_data_t *ripdata,
1718 				hammer2_tid_t modify_tid,
1719 				hammer2_dev_t *force_local);
1720 void hammer2_pfsdealloc(hammer2_pfs_t *pmp, int clindex, int destroying);
1721 int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
1722 				ino_t ino, struct vnode **vpp);
1723 
1724 void hammer2_lwinprog_ref(hammer2_pfs_t *pmp);
1725 void hammer2_lwinprog_drop(hammer2_pfs_t *pmp);
1726 void hammer2_lwinprog_wait(hammer2_pfs_t *pmp, int pipe);
1727 
1728 /*
1729  * hammer2_freemap.c
1730  */
1731 int hammer2_freemap_alloc(hammer2_chain_t *chain, size_t bytes);
1732 void hammer2_freemap_adjust(hammer2_dev_t *hmp,
1733 				hammer2_blockref_t *bref, int how);
1734 
1735 /*
1736  * hammer2_cluster.c
1737  */
1738 uint8_t hammer2_cluster_type(hammer2_cluster_t *cluster);
1739 void hammer2_cluster_bref(hammer2_cluster_t *cluster, hammer2_blockref_t *bref);
1740 hammer2_cluster_t *hammer2_cluster_alloc(hammer2_pfs_t *pmp,
1741 				hammer2_blockref_t *bref);
1742 void hammer2_cluster_ref(hammer2_cluster_t *cluster);
1743 void hammer2_cluster_drop(hammer2_cluster_t *cluster);
1744 void hammer2_cluster_lock(hammer2_cluster_t *cluster, int how);
1745 int hammer2_cluster_check(hammer2_cluster_t *cluster, hammer2_key_t lokey,
1746 			int flags);
1747 void hammer2_cluster_resolve(hammer2_cluster_t *cluster);
1748 void hammer2_cluster_forcegood(hammer2_cluster_t *cluster);
1749 void hammer2_cluster_unlock(hammer2_cluster_t *cluster);
1750 
1751 void hammer2_bulkfree_init(hammer2_dev_t *hmp);
1752 void hammer2_bulkfree_uninit(hammer2_dev_t *hmp);
1753 int hammer2_bulkfree_pass(hammer2_dev_t *hmp, hammer2_chain_t *vchain,
1754 			struct hammer2_ioc_bulkfree *bfi);
1755 void hammer2_dummy_xop_from_chain(hammer2_xop_head_t *xop,
1756 			hammer2_chain_t *chain);
1757 
1758 /*
1759  * hammer2_iocom.c
1760  */
1761 void hammer2_iocom_init(hammer2_dev_t *hmp);
1762 void hammer2_iocom_uninit(hammer2_dev_t *hmp);
1763 void hammer2_cluster_reconnect(hammer2_dev_t *hmp, struct file *fp);
1764 
1765 /*
1766  * hammer2_strategy.c
1767  */
1768 int hammer2_vop_strategy(struct vop_strategy_args *ap);
1769 int hammer2_vop_bmap(struct vop_bmap_args *ap);
1770 void hammer2_write_thread(void *arg);
1771 void hammer2_bioq_sync(hammer2_pfs_t *pmp);
1772 void hammer2_dedup_clear(hammer2_dev_t *hmp);
1773 
1774 /*
1775  * More complex inlines
1776  */
1777 static __inline
1778 const hammer2_media_data_t *
1779 hammer2_xop_gdata(hammer2_xop_head_t *xop)
1780 {
1781 	hammer2_chain_t *focus;
1782 	const void *data;
1783 
1784 	focus = xop->cluster.focus;
1785 	if (focus->dio) {
1786 		lockmgr(&focus->diolk, LK_SHARED);
1787 		if ((xop->focus_dio = focus->dio) != NULL) {
1788 			hammer2_io_ref(xop->focus_dio);
1789 			hammer2_io_bkvasync(xop->focus_dio);
1790 		}
1791 		data = focus->data;
1792 		lockmgr(&focus->diolk, LK_RELEASE);
1793 	} else {
1794 		data = focus->data;
1795 	}
1796 
1797 	return data;
1798 }
1799 
1800 static __inline
1801 void
1802 hammer2_xop_pdata(hammer2_xop_head_t *xop)
1803 {
1804 	if (xop->focus_dio)
1805 		hammer2_io_putblk(&xop->focus_dio);
1806 }
1807 
1808 #endif /* !_KERNEL */
1809 #endif /* !_VFS_HAMMER2_HAMMER2_H_ */
1810