xref: /dragonfly/sys/vfs/hammer2/hammer2.h (revision 267c04fd)
1 /*
2  * Copyright (c) 2011-2015 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 /*
37  * HAMMER2 IN-MEMORY CACHE OF MEDIA STRUCTURES
38  *
39  * This header file contains structures used internally by the HAMMER2
40  * implementation.  See hammer2_disk.h for on-disk structures.
41  *
42  * There is an in-memory representation of all on-media data structure.
43  * Almost everything is represented by a hammer2_chain structure in-memory.
44  * Other higher-level structures typically map to chains.
45  *
46  * A great deal of data is accessed simply via its buffer cache buffer,
47  * which is mapped for the duration of the chain's lock.  Hammer2 must
48  * implement its own buffer cache layer on top of the system layer to
49  * allow for different threads to lock different sub-block-sized buffers.
50  *
51  * When modifications are made to a chain a new filesystem block must be
52  * allocated.  Multiple modifications do not typically allocate new blocks
53  * until the current block has been flushed.  Flushes do not block the
54  * front-end unless the front-end operation crosses the current inode being
55  * flushed.
56  *
57  * The in-memory representation may remain cached (for example in order to
58  * placemark clustering locks) even after the related data has been
59  * detached.
60  */
61 
62 #ifndef _VFS_HAMMER2_HAMMER2_H_
63 #define _VFS_HAMMER2_HAMMER2_H_
64 
65 #include <sys/param.h>
66 #include <sys/types.h>
67 #include <sys/kernel.h>
68 #include <sys/conf.h>
69 #include <sys/systm.h>
70 #include <sys/tree.h>
71 #include <sys/malloc.h>
72 #include <sys/mount.h>
73 #include <sys/vnode.h>
74 #include <sys/proc.h>
75 #include <sys/mountctl.h>
76 #include <sys/priv.h>
77 #include <sys/stat.h>
78 #include <sys/thread.h>
79 #include <sys/globaldata.h>
80 #include <sys/lockf.h>
81 #include <sys/buf.h>
82 #include <sys/queue.h>
83 #include <sys/limits.h>
84 #include <sys/dmsg.h>
85 #include <sys/mutex.h>
86 #include <sys/kern_syscall.h>
87 
88 #include <sys/signal2.h>
89 #include <sys/buf2.h>
90 #include <sys/mutex2.h>
91 #include <sys/thread2.h>
92 
93 #include "hammer2_disk.h"
94 #include "hammer2_mount.h"
95 #include "hammer2_ioctl.h"
96 
97 struct hammer2_io;
98 struct hammer2_iocb;
99 struct hammer2_chain;
100 struct hammer2_cluster;
101 struct hammer2_inode;
102 struct hammer2_dev;
103 struct hammer2_pfs;
104 struct hammer2_span;
105 struct hammer2_state;
106 struct hammer2_msg;
107 struct hammer2_thread;
108 union hammer2_xop;
109 
110 /*
111  * Mutex and lock shims.  Hammer2 requires support for asynchronous and
112  * abortable locks, and both exclusive and shared spinlocks.  Normal
113  * synchronous non-abortable locks can be substituted for spinlocks.
114  */
115 typedef mtx_t				hammer2_mtx_t;
116 typedef mtx_link_t			hammer2_mtx_link_t;
117 typedef mtx_state_t			hammer2_mtx_state_t;
118 
119 typedef struct spinlock			hammer2_spin_t;
120 
121 #define hammer2_mtx_ex			mtx_lock_ex_quick
122 #define hammer2_mtx_sh			mtx_lock_sh_quick
123 #define hammer2_mtx_unlock		mtx_unlock
124 #define hammer2_mtx_owned		mtx_owned
125 #define hammer2_mtx_init		mtx_init
126 #define hammer2_mtx_temp_release	mtx_lock_temp_release
127 #define hammer2_mtx_temp_restore	mtx_lock_temp_restore
128 #define hammer2_mtx_refs		mtx_lockrefs
129 
130 #define hammer2_spin_init		spin_init
131 #define hammer2_spin_sh			spin_lock_shared
132 #define hammer2_spin_ex			spin_lock
133 #define hammer2_spin_unsh		spin_unlock_shared
134 #define hammer2_spin_unex		spin_unlock
135 
136 TAILQ_HEAD(hammer2_xop_list, hammer2_xop_head);
137 
138 typedef struct hammer2_xop_list	hammer2_xop_list_t;
139 
140 
141 /*
142  * General lock support
143  */
144 static __inline
145 int
146 hammer2_mtx_upgrade(hammer2_mtx_t *mtx)
147 {
148 	int wasexclusive;
149 
150 	if (mtx_islocked_ex(mtx)) {
151 		wasexclusive = 1;
152 	} else {
153 		mtx_unlock(mtx);
154 		mtx_lock_ex_quick(mtx);
155 		wasexclusive = 0;
156 	}
157 	return wasexclusive;
158 }
159 
160 /*
161  * Downgrade an inode lock from exclusive to shared only if the inode
162  * lock was previously shared.  If the inode lock was previously exclusive,
163  * this is a NOP.
164  */
165 static __inline
166 void
167 hammer2_mtx_downgrade(hammer2_mtx_t *mtx, int wasexclusive)
168 {
169 	if (wasexclusive == 0)
170 		mtx_downgrade(mtx);
171 }
172 
173 /*
174  * The xid tracks internal transactional updates.
175  *
176  * XXX fix-me, really needs to be 64-bits
177  */
178 typedef uint32_t hammer2_xid_t;
179 
180 #define HAMMER2_XID_MIN	0x00000000U
181 #define HAMMER2_XID_MAX 0x7FFFFFFFU
182 
183 /*
184  * The chain structure tracks a portion of the media topology from the
185  * root (volume) down.  Chains represent volumes, inodes, indirect blocks,
186  * data blocks, and freemap nodes and leafs.
187  *
188  * The chain structure utilizes a simple singly-homed topology and the
189  * chain's in-memory topology will move around as the chains do, due mainly
190  * to renames and indirect block creation.
191  *
192  * Block Table Updates
193  *
194  *	Block table updates for insertions and updates are delayed until the
195  *	flush.  This allows us to avoid having to modify the parent chain
196  *	all the way to the root.
197  *
198  *	Block table deletions are performed immediately (modifying the parent
199  *	in the process) because the flush code uses the chain structure to
200  *	track delayed updates and the chain will be (likely) gone or moved to
201  *	another location in the topology after a deletion.
202  *
203  *	A prior iteration of the code tried to keep the relationship intact
204  *	on deletes by doing a delete-duplicate operation on the chain, but
205  *	it added way too much complexity to the codebase.
206  *
207  * Flush Synchronization
208  *
209  *	The flush code must flush modified chains bottom-up.  Because chain
210  *	structures can shift around and are NOT topologically stable,
211  *	modified chains are independently indexed for the flush.  As the flush
212  *	runs it modifies (or further modifies) and updates the parents,
213  *	propagating the flush all the way to the volume root.
214  *
215  *	Modifying front-end operations can occur during a flush but will block
216  *	in two cases: (1) when the front-end tries to operate on the inode
217  *	currently in the midst of being flushed and (2) if the front-end
218  *	crosses an inode currently being flushed (such as during a rename).
219  *	So, for example, if you rename directory "x" to "a/b/c/d/e/f/g/x" and
220  *	the flusher is currently working on "a/b/c", the rename will block
221  *	temporarily in order to ensure that "x" exists in one place or the
222  *	other.
223  *
224  *	Meta-data statistics are updated by the flusher.  The front-end will
225  *	make estimates but meta-data must be fully synchronized only during a
226  *	flush in order to ensure that it remains correct across a crash.
227  *
228  *	Multiple flush synchronizations can theoretically be in-flight at the
229  *	same time but the implementation is not coded to handle the case and
230  *	currently serializes them.
231  *
232  * Snapshots:
233  *
234  *	Snapshots currently require the subdirectory tree being snapshotted
235  *	to be flushed.  The snapshot then creates a new super-root inode which
236  *	copies the flushed blockdata of the directory or file that was
237  *	snapshotted.
238  *
239  * RBTREE NOTES:
240  *
241  *	- Note that the radix tree runs in powers of 2 only so sub-trees
242  *	  cannot straddle edges.
243  */
244 RB_HEAD(hammer2_chain_tree, hammer2_chain);
245 TAILQ_HEAD(h2_flush_list, hammer2_chain);
246 TAILQ_HEAD(h2_core_list, hammer2_chain);
247 TAILQ_HEAD(h2_iocb_list, hammer2_iocb);
248 
249 #define CHAIN_CORE_DELETE_BMAP_ENTRIES	\
250 	(HAMMER2_PBUFSIZE / sizeof(hammer2_blockref_t) / sizeof(uint32_t))
251 
252 /*
253  * Core topology for chain (embedded in chain).  Protected by a spinlock.
254  */
255 struct hammer2_chain_core {
256 	hammer2_spin_t	spin;
257 	struct hammer2_chain_tree rbtree; /* sub-chains */
258 	int		live_zero;	/* blockref array opt */
259 	u_int		live_count;	/* live (not deleted) chains in tree */
260 	u_int		chain_count;	/* live + deleted chains under core */
261 	int		generation;	/* generation number (inserts only) */
262 };
263 
264 typedef struct hammer2_chain_core hammer2_chain_core_t;
265 
266 RB_HEAD(hammer2_io_tree, hammer2_io);
267 
268 /*
269  * IOCB - IO callback (into chain, cluster, or manual request)
270  */
271 struct hammer2_iocb {
272 	TAILQ_ENTRY(hammer2_iocb) entry;
273 	void (*callback)(struct hammer2_iocb *iocb);
274 	struct hammer2_io	*dio;
275 	struct hammer2_cluster	*cluster;
276 	struct hammer2_chain	*chain;
277 	void			*ptr;
278 	off_t			lbase;
279 	int			lsize;
280 	uint32_t		flags;
281 	int			error;
282 };
283 
284 typedef struct hammer2_iocb hammer2_iocb_t;
285 
286 #define HAMMER2_IOCB_INTERLOCK	0x00000001
287 #define HAMMER2_IOCB_ONQ	0x00000002
288 #define HAMMER2_IOCB_DONE	0x00000004
289 #define HAMMER2_IOCB_INPROG	0x00000008
290 #define HAMMER2_IOCB_UNUSED10	0x00000010
291 #define HAMMER2_IOCB_QUICK	0x00010000
292 #define HAMMER2_IOCB_ZERO	0x00020000
293 #define HAMMER2_IOCB_READ	0x00040000
294 #define HAMMER2_IOCB_WAKEUP	0x00080000
295 
296 /*
297  * DIO - Management structure wrapping system buffer cache.
298  *
299  *	 Used for multiple purposes including concurrent management
300  *	 if small requests by chains into larger DIOs.
301  */
302 struct hammer2_io {
303 	RB_ENTRY(hammer2_io) rbnode;	/* indexed by device offset */
304 	struct h2_iocb_list iocbq;
305 	struct spinlock spin;
306 	struct hammer2_dev *hmp;
307 	struct buf	*bp;
308 	off_t		pbase;
309 	int		psize;
310 	int		refs;
311 	int		act;			/* activity */
312 };
313 
314 typedef struct hammer2_io hammer2_io_t;
315 
316 #define HAMMER2_DIO_INPROG	0x80000000	/* bio in progress */
317 #define HAMMER2_DIO_GOOD	0x40000000	/* dio->bp is stable */
318 #define HAMMER2_DIO_WAITING	0x20000000	/* (old) */
319 #define HAMMER2_DIO_DIRTY	0x10000000	/* flush on last drop */
320 
321 #define HAMMER2_DIO_MASK	0x0FFFFFFF
322 
323 /*
324  * Primary chain structure keeps track of the topology in-memory.
325  */
326 struct hammer2_chain {
327 	hammer2_mtx_t		lock;
328 	hammer2_chain_core_t	core;
329 	RB_ENTRY(hammer2_chain) rbnode;		/* live chain(s) */
330 	hammer2_blockref_t	bref;
331 	struct hammer2_chain	*parent;
332 	struct hammer2_state	*state;		/* if active cache msg */
333 	struct hammer2_dev	*hmp;
334 	struct hammer2_pfs	*pmp;		/* A PFS or super-root (spmp) */
335 
336 	hammer2_io_t	*dio;			/* physical data buffer */
337 	u_int		bytes;			/* physical data size */
338 	u_int		flags;
339 	u_int		refs;
340 	u_int		lockcnt;
341 	int		error;			/* on-lock data error state */
342 
343 	hammer2_media_data_t *data;		/* data pointer shortcut */
344 	TAILQ_ENTRY(hammer2_chain) flush_node;	/* flush list */
345 };
346 
347 typedef struct hammer2_chain hammer2_chain_t;
348 
349 int hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2);
350 RB_PROTOTYPE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
351 
352 /*
353  * Special notes on flags:
354  *
355  * INITIAL	- This flag allows a chain to be created and for storage to
356  *		  be allocated without having to immediately instantiate the
357  *		  related buffer.  The data is assumed to be all-zeros.  It
358  *		  is primarily used for indirect blocks.
359  *
360  * MODIFIED	- The chain's media data has been modified.
361  *
362  * UPDATE	- Chain might not be modified but parent blocktable needs update
363  *
364  * FICTITIOUS	- Faked chain as a placeholder for an error condition.  This
365  *		  chain is unsuitable for I/O.
366  *
367  * BMAPPED	- Indicates that the chain is present in the parent blockmap.
368  *
369  * BMAPUPD	- Indicates that the chain is present but needs to be updated
370  *		  in the parent blockmap.
371  */
372 #define HAMMER2_CHAIN_MODIFIED		0x00000001	/* dirty chain data */
373 #define HAMMER2_CHAIN_ALLOCATED		0x00000002	/* kmalloc'd chain */
374 #define HAMMER2_CHAIN_DESTROY		0x00000004
375 #define HAMMER2_CHAIN_UNUSED0008	0x00000008
376 #define HAMMER2_CHAIN_DELETED		0x00000010	/* deleted chain */
377 #define HAMMER2_CHAIN_INITIAL		0x00000020	/* initial create */
378 #define HAMMER2_CHAIN_UPDATE		0x00000040	/* need parent update */
379 #define HAMMER2_CHAIN_DEFERRED		0x00000080	/* flush depth defer */
380 #define HAMMER2_CHAIN_IOFLUSH		0x00000100	/* bawrite on put */
381 #define HAMMER2_CHAIN_ONFLUSH		0x00000200	/* on a flush list */
382 #define HAMMER2_CHAIN_FICTITIOUS	0x00000400	/* unsuitable for I/O */
383 #define HAMMER2_CHAIN_VOLUMESYNC	0x00000800	/* needs volume sync */
384 #define HAMMER2_CHAIN_DELAYED		0x00001000	/* delayed flush */
385 #define HAMMER2_CHAIN_COUNTEDBREFS	0x00002000	/* block table stats */
386 #define HAMMER2_CHAIN_ONRBTREE		0x00004000	/* on parent RB tree */
387 #define HAMMER2_CHAIN_UNUSED00008000	0x00008000
388 #define HAMMER2_CHAIN_EMBEDDED		0x00010000	/* embedded data */
389 #define HAMMER2_CHAIN_RELEASE		0x00020000	/* don't keep around */
390 #define HAMMER2_CHAIN_BMAPPED		0x00040000	/* present in blkmap */
391 #define HAMMER2_CHAIN_BMAPUPD		0x00080000	/* +needs updating */
392 #define HAMMER2_CHAIN_IOINPROG		0x00100000	/* I/O interlock */
393 #define HAMMER2_CHAIN_IOSIGNAL		0x00200000	/* I/O interlock */
394 #define HAMMER2_CHAIN_PFSBOUNDARY	0x00400000	/* super->pfs inode */
395 
396 #define HAMMER2_CHAIN_FLUSH_MASK	(HAMMER2_CHAIN_MODIFIED |	\
397 					 HAMMER2_CHAIN_UPDATE |		\
398 					 HAMMER2_CHAIN_ONFLUSH)
399 
400 /*
401  * Hammer2 error codes, used by chain->error and cluster->error.  The error
402  * code is typically set on-lock unless no I/O was requested, and set on
403  * I/O otherwise.  If set for a cluster it generally means that the cluster
404  * code could not find a valid copy to present.
405  *
406  * IO		- An I/O error occurred
407  * CHECK	- I/O succeeded but did not match the check code
408  * INCOMPLETE	- A cluster is not complete enough to use, or
409  *		  a chain cannot be loaded because its parent has an error.
410  *
411  * NOTE: API allows callers to check zero/non-zero to determine if an error
412  *	 condition exists.
413  *
414  * NOTE: Chain's data field is usually NULL on an IO error but not necessarily
415  *	 NULL on other errors.  Check chain->error, not chain->data.
416  */
417 #define HAMMER2_ERROR_NONE		0
418 #define HAMMER2_ERROR_IO		1	/* device I/O error */
419 #define HAMMER2_ERROR_CHECK		2	/* check code mismatch */
420 #define HAMMER2_ERROR_INCOMPLETE	3	/* incomplete cluster */
421 #define HAMMER2_ERROR_DEPTH		4	/* temporary depth limit */
422 
423 /*
424  * Flags passed to hammer2_chain_lookup() and hammer2_chain_next()
425  *
426  * NOTES:
427  *	NOLOCK	    - Input and output chains are referenced only and not
428  *		      locked.  Output chain might be temporarily locked
429  *		      internally.
430  *
431  *	NODATA	    - Asks that the chain->data not be resolved in order
432  *		      to avoid I/O.
433  *
434  *	NODIRECT    - Prevents a lookup of offset 0 in an inode from returning
435  *		      the inode itself if the inode is in DIRECTDATA mode
436  *		      (i.e. file is <= 512 bytes).  Used by the synchronization
437  *		      code to prevent confusion.
438  *
439  *	SHARED	    - The input chain is expected to be locked shared,
440  *		      and the output chain is locked shared.
441  *
442  *	MATCHIND    - Allows an indirect block / freemap node to be returned
443  *		      when the passed key range matches the radix.  Remember
444  *		      that key_end is inclusive (e.g. {0x000,0xFFF},
445  *		      not {0x000,0x1000}).
446  *
447  *		      (Cannot be used for remote or cluster ops).
448  *
449  *	ALLNODES    - Allows NULL focus.
450  *
451  *	ALWAYS	    - Always resolve the data.  If ALWAYS and NODATA are both
452  *		      missing, bulk file data is not resolved but inodes and
453  *		      other meta-data will.
454  *
455  *	NOUNLOCK    - Used by hammer2_chain_next() to leave the lock on
456  *		      the input chain intact.  The chain is still dropped.
457  *		      This allows the caller to add a reference to the chain
458  *		      and retain it in a locked state (used by the
459  *		      XOP/feed/collect code).
460  */
461 #define HAMMER2_LOOKUP_NOLOCK		0x00000001	/* ref only */
462 #define HAMMER2_LOOKUP_NODATA		0x00000002	/* data left NULL */
463 #define HAMMER2_LOOKUP_NODIRECT		0x00000004	/* no offset=0 DD */
464 #define HAMMER2_LOOKUP_SHARED		0x00000100
465 #define HAMMER2_LOOKUP_MATCHIND		0x00000200	/* return all chains */
466 #define HAMMER2_LOOKUP_ALLNODES		0x00000400	/* allow NULL focus */
467 #define HAMMER2_LOOKUP_ALWAYS		0x00000800	/* resolve data */
468 #define HAMMER2_LOOKUP_NOUNLOCK		0x00001000	/* leave lock intact */
469 
470 /*
471  * Flags passed to hammer2_chain_modify() and hammer2_chain_resize()
472  *
473  * NOTE: OPTDATA allows us to avoid instantiating buffers for INDIRECT
474  *	 blocks in the INITIAL-create state.
475  */
476 #define HAMMER2_MODIFY_OPTDATA		0x00000002	/* data can be NULL */
477 #define HAMMER2_MODIFY_NO_MODIFY_TID	0x00000004
478 #define HAMMER2_MODIFY_UNUSED0008	0x00000008
479 #define HAMMER2_MODIFY_NOREALLOC	0x00000010
480 
481 /*
482  * Flags passed to hammer2_chain_lock()
483  *
484  * NOTE: RDONLY is set to optimize cluster operations when *no* modifications
485  *	 will be made to either the cluster being locked or any underlying
486  *	 cluster.  It allows the cluster to lock and access data for a subset
487  *	 of available nodes instead of all available nodes.
488  */
489 #define HAMMER2_RESOLVE_NEVER		1
490 #define HAMMER2_RESOLVE_MAYBE		2
491 #define HAMMER2_RESOLVE_ALWAYS		3
492 #define HAMMER2_RESOLVE_MASK		0x0F
493 
494 #define HAMMER2_RESOLVE_SHARED		0x10	/* request shared lock */
495 #define HAMMER2_RESOLVE_UNUSED20	0x20
496 #define HAMMER2_RESOLVE_RDONLY		0x40	/* higher level op flag */
497 
498 /*
499  * Flags passed to hammer2_chain_delete()
500  */
501 #define HAMMER2_DELETE_PERMANENT	0x0001
502 
503 /*
504  * Flags passed to hammer2_chain_insert() or hammer2_chain_rename()
505  */
506 #define HAMMER2_INSERT_PFSROOT		0x0004
507 
508 /*
509  * Flags passed to hammer2_chain_delete_duplicate()
510  */
511 #define HAMMER2_DELDUP_RECORE		0x0001
512 
513 /*
514  * Cluster different types of storage together for allocations
515  */
516 #define HAMMER2_FREECACHE_INODE		0
517 #define HAMMER2_FREECACHE_INDIR		1
518 #define HAMMER2_FREECACHE_DATA		2
519 #define HAMMER2_FREECACHE_UNUSED3	3
520 #define HAMMER2_FREECACHE_TYPES		4
521 
522 /*
523  * hammer2_freemap_alloc() block preference
524  */
525 #define HAMMER2_OFF_NOPREF		((hammer2_off_t)-1)
526 
527 /*
528  * BMAP read-ahead maximum parameters
529  */
530 #define HAMMER2_BMAP_COUNT		16	/* max bmap read-ahead */
531 #define HAMMER2_BMAP_BYTES		(HAMMER2_PBUFSIZE * HAMMER2_BMAP_COUNT)
532 
533 /*
534  * hammer2_freemap_adjust()
535  */
536 #define HAMMER2_FREEMAP_DORECOVER	1
537 #define HAMMER2_FREEMAP_DOMAYFREE	2
538 #define HAMMER2_FREEMAP_DOREALFREE	3
539 
540 /*
541  * HAMMER2 cluster - A set of chains representing the same entity.
542  *
543  * hammer2_cluster typically represents a temporary set of representitive
544  * chains.  The one exception is that a hammer2_cluster is embedded in
545  * hammer2_inode.  This embedded cluster is ONLY used to track the
546  * representitive chains and cannot be directly locked.
547  *
548  * A cluster is usually temporary (and thus per-thread) for locking purposes,
549  * allowing us to embed the asynchronous storage required for cluster
550  * operations in the cluster itself and adjust the state and status without
551  * having to worry too much about SMP issues.
552  *
553  * The exception is the cluster embedded in the hammer2_inode structure.
554  * This is used to cache the cluster state on an inode-by-inode basis.
555  * Individual hammer2_chain structures not incorporated into clusters might
556  * also stick around to cache miscellanious elements.
557  *
558  * Because the cluster is a 'working copy' and is usually subject to cluster
559  * quorum rules, it is quite possible for us to end up with an insufficient
560  * number of live chains to execute an operation.  If an insufficient number
561  * of chains remain in a working copy, the operation may have to be
562  * downgraded, retried, stall until the requisit number of chains are
563  * available, or possibly even error out depending on the mount type.
564  *
565  * A cluster's focus is set when it is locked.  The focus can only be set
566  * to a chain still part of the synchronized set.
567  */
568 #define HAMMER2_MAXCLUSTER	8
569 #define HAMMER2_XOPFIFO		16
570 #define HAMMER2_XOPFIFO_MASK	(HAMMER2_XOPFIFO - 1)
571 #define HAMMER2_XOPGROUPS	16
572 #define HAMMER2_XOPGROUPS_MASK	(HAMMER2_XOPGROUPS - 1)
573 #define HAMMER2_XOPMASK_VOP	0x80000000U
574 
575 struct hammer2_cluster_item {
576 	hammer2_chain_t		*chain;
577 	int			cache_index;
578 	uint32_t		flags;
579 };
580 
581 typedef struct hammer2_cluster_item hammer2_cluster_item_t;
582 
583 /*
584  * INVALID	- Invalid for focus, i.e. not part of synchronized set.
585  *		  Once set, this bit is sticky across operations.
586  *
587  * FEMOD	- Indicates that front-end modifying operations can
588  *		  mess with this entry and MODSYNC will copy also
589  *		  effect it.
590  */
591 #define HAMMER2_CITEM_INVALID	0x00000001
592 #define HAMMER2_CITEM_FEMOD	0x00000002
593 #define HAMMER2_CITEM_NULL	0x00000004
594 
595 struct hammer2_cluster {
596 	int			refs;		/* track for deallocation */
597 	int			ddflag;
598 	struct hammer2_pfs	*pmp;
599 	uint32_t		flags;
600 	int			nchains;
601 	int			error;		/* error code valid on lock */
602 	int			focus_index;
603 	hammer2_iocb_t		iocb;
604 	hammer2_chain_t		*focus;		/* current focus (or mod) */
605 	hammer2_cluster_item_t	array[HAMMER2_MAXCLUSTER];
606 };
607 
608 typedef struct hammer2_cluster	hammer2_cluster_t;
609 
610 /*
611  * WRHARD	- Hard mounts can write fully synchronized
612  * RDHARD	- Hard mounts can read fully synchronized
613  * UNHARD	- Unsynchronized masters present
614  * NOHARD	- No masters visible
615  * WRSOFT	- Soft mounts can write to at least the SOFT_MASTER
616  * RDSOFT	- Soft mounts can read from at least a SOFT_SLAVE
617  * UNSOFT	- Unsynchronized slaves present
618  * NOSOFT	- No slaves visible
619  * RDSLAVE	- slaves are accessible (possibly unsynchronized or remote).
620  * MSYNCED	- All masters are fully synchronized
621  * SSYNCED	- All known local slaves are fully synchronized to masters
622  *
623  * All available masters are always incorporated.  All PFSs belonging to a
624  * cluster (master, slave, copy, whatever) always try to synchronize the
625  * total number of known masters in the PFSs root inode.
626  *
627  * A cluster might have access to many slaves, copies, or caches, but we
628  * have a limited number of cluster slots.  Any such elements which are
629  * directly mounted from block device(s) will always be incorporated.   Note
630  * that SSYNCED only applies to such elements which are directly mounted,
631  * not to any remote slaves, copies, or caches that could be available.  These
632  * bits are used to monitor and drive our synchronization threads.
633  *
634  * When asking the question 'is any data accessible at all', then a simple
635  * test against (RDHARD|RDSOFT|RDSLAVE) gives you the answer.  If any of
636  * these bits are set the object can be read with certain caveats:
637  * RDHARD - no caveats.  RDSOFT - authoritative but might not be synchronized.
638  * and RDSLAVE - not authoritative, has some data but it could be old or
639  * incomplete.
640  *
641  * When both soft and hard mounts are available, data will be read and written
642  * via the soft mount only.  But all might be in the cluster because
643  * background synchronization threads still need to do their work.
644  */
645 #define HAMMER2_CLUSTER_INODE	0x00000001	/* embedded in inode struct */
646 #define HAMMER2_CLUSTER_UNUSED2	0x00000002
647 #define HAMMER2_CLUSTER_LOCKED	0x00000004	/* cluster lks not recursive */
648 #define HAMMER2_CLUSTER_WRHARD	0x00000100	/* hard-mount can write */
649 #define HAMMER2_CLUSTER_RDHARD	0x00000200	/* hard-mount can read */
650 #define HAMMER2_CLUSTER_UNHARD	0x00000400	/* unsynchronized masters */
651 #define HAMMER2_CLUSTER_NOHARD	0x00000800	/* no masters visible */
652 #define HAMMER2_CLUSTER_WRSOFT	0x00001000	/* soft-mount can write */
653 #define HAMMER2_CLUSTER_RDSOFT	0x00002000	/* soft-mount can read */
654 #define HAMMER2_CLUSTER_UNSOFT	0x00004000	/* unsynchronized slaves */
655 #define HAMMER2_CLUSTER_NOSOFT	0x00008000	/* no slaves visible */
656 #define HAMMER2_CLUSTER_MSYNCED	0x00010000	/* all masters synchronized */
657 #define HAMMER2_CLUSTER_SSYNCED	0x00020000	/* known slaves synchronized */
658 
659 #define HAMMER2_CLUSTER_ANYDATA	( HAMMER2_CLUSTER_RDHARD |	\
660 				  HAMMER2_CLUSTER_RDSOFT |	\
661 				  HAMMER2_CLUSTER_RDSLAVE)
662 
663 #define HAMMER2_CLUSTER_RDOK	( HAMMER2_CLUSTER_RDHARD |	\
664 				  HAMMER2_CLUSTER_RDSOFT)
665 
666 #define HAMMER2_CLUSTER_WROK	( HAMMER2_CLUSTER_WRHARD |	\
667 				  HAMMER2_CLUSTER_WRSOFT)
668 
669 #define HAMMER2_CLUSTER_ZFLAGS	( HAMMER2_CLUSTER_WRHARD |	\
670 				  HAMMER2_CLUSTER_RDHARD |	\
671 				  HAMMER2_CLUSTER_WRSOFT |	\
672 				  HAMMER2_CLUSTER_RDSOFT |	\
673 				  HAMMER2_CLUSTER_MSYNCED |	\
674 				  HAMMER2_CLUSTER_SSYNCED)
675 
676 /*
677  * Helper functions (cluster must be locked for flags to be valid).
678  */
679 static __inline
680 int
681 hammer2_cluster_rdok(hammer2_cluster_t *cluster)
682 {
683 	return (cluster->flags & HAMMER2_CLUSTER_RDOK);
684 }
685 
686 static __inline
687 int
688 hammer2_cluster_wrok(hammer2_cluster_t *cluster)
689 {
690 	return (cluster->flags & HAMMER2_CLUSTER_WROK);
691 }
692 
693 RB_HEAD(hammer2_inode_tree, hammer2_inode);
694 
695 /*
696  * A hammer2 inode.
697  *
698  * NOTE: The inode-embedded cluster is never used directly for I/O (since
699  *	 it may be shared).  Instead it will be replicated-in and synchronized
700  *	 back out if changed.
701  */
702 struct hammer2_inode {
703 	RB_ENTRY(hammer2_inode) rbnode;		/* inumber lookup (HL) */
704 	hammer2_mtx_t		lock;		/* inode lock */
705 	struct hammer2_pfs	*pmp;		/* PFS mount */
706 	struct hammer2_inode	*pip;		/* parent inode */
707 	struct vnode		*vp;
708 	struct spinlock		cluster_spin;	/* update cluster */
709 	hammer2_cluster_t	cluster;
710 	struct lockf		advlock;
711 	u_int			flags;
712 	u_int			refs;		/* +vpref, +flushref */
713 	uint8_t			comp_heuristic;
714 	hammer2_inode_meta_t	meta;		/* copy of meta-data */
715 	hammer2_off_t		osize;
716 };
717 
718 typedef struct hammer2_inode hammer2_inode_t;
719 
720 /*
721  * MODIFIED	- Inode is in a modified state, ip->meta may have changes.
722  * RESIZED	- Inode truncated (any) or inode extended beyond
723  *		  EMBEDDED_BYTES.
724  */
725 #define HAMMER2_INODE_MODIFIED		0x0001
726 #define HAMMER2_INODE_SROOT		0x0002	/* kmalloc special case */
727 #define HAMMER2_INODE_RENAME_INPROG	0x0004
728 #define HAMMER2_INODE_ONRBTREE		0x0008
729 #define HAMMER2_INODE_RESIZED		0x0010	/* requires inode_fsync */
730 #define HAMMER2_INODE_UNUSED0020	0x0020
731 #define HAMMER2_INODE_ISUNLINKED	0x0040
732 #define HAMMER2_INODE_METAGOOD		0x0080	/* inode meta-data good */
733 
734 int hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2);
735 RB_PROTOTYPE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
736 		hammer2_tid_t);
737 
738 /*
739  * inode-unlink side-structure
740  */
741 struct hammer2_inode_unlink {
742 	TAILQ_ENTRY(hammer2_inode_unlink) entry;
743 	hammer2_inode_t	*ip;
744 };
745 TAILQ_HEAD(h2_unlk_list, hammer2_inode_unlink);
746 
747 typedef struct hammer2_inode_unlink hammer2_inode_unlink_t;
748 
749 /*
750  * Transaction management sub-structure under hammer2_pfs
751  */
752 struct hammer2_trans {
753 	uint32_t		flags;
754 	uint32_t		sync_wait;
755 };
756 
757 typedef struct hammer2_trans hammer2_trans_t;
758 
759 #define HAMMER2_TRANS_ISFLUSH		0x80000000	/* flush code */
760 #define HAMMER2_TRANS_BUFCACHE		0x40000000	/* bio strategy */
761 #define HAMMER2_TRANS_PREFLUSH		0x20000000	/* preflush state */
762 #define HAMMER2_TRANS_FPENDING		0x10000000	/* flush pending */
763 #define HAMMER2_TRANS_WAITING		0x08000000	/* someone waiting */
764 #define HAMMER2_TRANS_MASK		0x00FFFFFF	/* count mask */
765 
766 #define HAMMER2_FREEMAP_HEUR_NRADIX	4	/* pwr 2 PBUFRADIX-MINIORADIX */
767 #define HAMMER2_FREEMAP_HEUR_TYPES	8
768 #define HAMMER2_FREEMAP_HEUR		(HAMMER2_FREEMAP_HEUR_NRADIX * \
769 					 HAMMER2_FREEMAP_HEUR_TYPES)
770 
771 /*
772  * Hammer2 support thread element.
773  *
774  * Potentially many support threads can hang off of hammer2, primarily
775  * off the hammer2_pfs structure.  Typically:
776  *
777  * td x Nodes		 	A synchronization thread for each node.
778  * td x Nodes x workers		Worker threads for frontend operations.
779  * td x 1			Bioq thread for logical buffer writes.
780  *
781  * In addition, the synchronization thread(s) associated with the
782  * super-root PFS (spmp) for a node is responsible for automatic bulkfree
783  * and dedup scans.
784  */
785 struct hammer2_thread {
786 	struct hammer2_pfs *pmp;
787 	thread_t	td;
788 	uint32_t	flags;
789 	int		depth;
790 	int		clindex;	/* cluster element index */
791 	int		repidx;
792 	struct lock	lk;		/* thread control lock */
793 	hammer2_xop_list_t xopq;
794 };
795 
796 typedef struct hammer2_thread hammer2_thread_t;
797 
798 #define HAMMER2_THREAD_UNMOUNTING	0x0001	/* unmount request */
799 #define HAMMER2_THREAD_DEV		0x0002	/* related to dev, not pfs */
800 #define HAMMER2_THREAD_UNUSED04		0x0004
801 #define HAMMER2_THREAD_REMASTER		0x0008	/* remaster request */
802 #define HAMMER2_THREAD_STOP		0x0010	/* exit request */
803 #define HAMMER2_THREAD_FREEZE		0x0020	/* force idle */
804 #define HAMMER2_THREAD_FROZEN		0x0040	/* restart */
805 
806 
807 /*
808  * hammer2_xop - container for VOP/XOP operation (allocated, not on stack).
809  *
810  * This structure is used to distribute a VOP operation across multiple
811  * nodes.  It provides a rendezvous for concurrent node execution and
812  * can be detached from the frontend operation to allow the frontend to
813  * return early.
814  */
815 typedef void (*hammer2_xop_func_t)(union hammer2_xop *xop, int clidx);
816 
817 typedef struct hammer2_xop_fifo {
818 	TAILQ_ENTRY(hammer2_xop_head) entry;
819 	hammer2_chain_t		*array[HAMMER2_XOPFIFO];
820 	int			errors[HAMMER2_XOPFIFO];
821 	int			ri;
822 	int			wi;
823 	int			unused03;
824 } hammer2_xop_fifo_t;
825 
826 struct hammer2_xop_head {
827 	hammer2_xop_func_t	func;
828 	hammer2_tid_t		mtid;
829 	struct hammer2_inode	*ip;
830 	struct hammer2_inode	*ip2;
831 	struct hammer2_inode	*ip3;
832 	struct hammer2_xop_group *xgrp;
833 	uint32_t		check_counter;
834 	uint32_t		run_mask;
835 	uint32_t		chk_mask;
836 	int			state;
837 	int			error;
838 	hammer2_key_t		collect_key;
839 	char			*name;
840 	size_t			name_len;
841 	char			*name2;
842 	size_t			name2_len;
843 	hammer2_xop_fifo_t	collect[HAMMER2_MAXCLUSTER];
844 	hammer2_cluster_t	cluster;	/* help collections */
845 };
846 
847 typedef struct hammer2_xop_head hammer2_xop_head_t;
848 
849 struct hammer2_xop_ipcluster {
850 	hammer2_xop_head_t	head;
851 };
852 
853 struct hammer2_xop_strategy {
854 	hammer2_xop_head_t	head;
855 	hammer2_key_t		lbase;
856 	int			finished;
857 	struct bio		*bio;
858 };
859 
860 struct hammer2_xop_readdir {
861 	hammer2_xop_head_t	head;
862 	hammer2_key_t		lkey;
863 };
864 
865 struct hammer2_xop_nresolve {
866 	hammer2_xop_head_t	head;
867 	hammer2_key_t		lhc;	/* if name is NULL used lhc */
868 };
869 
870 struct hammer2_xop_nlink {
871 	hammer2_xop_head_t	head;
872 };
873 
874 struct hammer2_xop_unlink {
875 	hammer2_xop_head_t	head;
876 	int			isdir;
877 	int			dopermanent;
878 };
879 
880 struct hammer2_xop_nrename {
881 	hammer2_xop_head_t	head;
882 	hammer2_tid_t		lhc;
883 	int			ip_key;
884 };
885 
886 struct hammer2_xop_scanlhc {
887 	hammer2_xop_head_t	head;
888 	hammer2_key_t		lhc;
889 };
890 
891 struct hammer2_xop_scanall {
892 	hammer2_xop_head_t	head;
893 	hammer2_key_t		key_beg;	/* inclusive */
894 	hammer2_key_t		key_end;	/* inclusive */
895 };
896 
897 struct hammer2_xop_lookup {
898 	hammer2_xop_head_t	head;
899 	hammer2_key_t		lhc;
900 };
901 
902 struct hammer2_xop_create {
903 	hammer2_xop_head_t	head;
904 	hammer2_inode_meta_t	meta;		/* initial metadata */
905 	hammer2_key_t		lhc;
906 	int			flags;
907 };
908 
909 struct hammer2_xop_destroy {
910 	hammer2_xop_head_t	head;
911 };
912 
913 struct hammer2_xop_fsync {
914 	hammer2_xop_head_t	head;
915 	hammer2_inode_meta_t	meta;
916 	hammer2_off_t		osize;
917 	u_int			ipflags;
918 	int			clear_directdata;
919 };
920 
921 struct hammer2_xop_unlinkall {
922 	hammer2_xop_head_t	head;
923 	hammer2_key_t		key_beg;
924 	hammer2_key_t		key_end;
925 };
926 
927 struct hammer2_xop_connect {
928 	hammer2_xop_head_t	head;
929 	hammer2_key_t		lhc;
930 };
931 
932 struct hammer2_xop_flush {
933 	hammer2_xop_head_t	head;
934 };
935 
936 typedef struct hammer2_xop_readdir hammer2_xop_readdir_t;
937 typedef struct hammer2_xop_nresolve hammer2_xop_nresolve_t;
938 typedef struct hammer2_xop_nlink hammer2_xop_nlink_t;
939 typedef struct hammer2_xop_unlink hammer2_xop_unlink_t;
940 typedef struct hammer2_xop_nrename hammer2_xop_nrename_t;
941 typedef struct hammer2_xop_ipcluster hammer2_xop_ipcluster_t;
942 typedef struct hammer2_xop_strategy hammer2_xop_strategy_t;
943 typedef struct hammer2_xop_create hammer2_xop_create_t;
944 typedef struct hammer2_xop_destroy hammer2_xop_destroy_t;
945 typedef struct hammer2_xop_fsync hammer2_xop_fsync_t;
946 typedef struct hammer2_xop_unlinkall hammer2_xop_unlinkall_t;
947 typedef struct hammer2_xop_scanlhc hammer2_xop_scanlhc_t;
948 typedef struct hammer2_xop_scanall hammer2_xop_scanall_t;
949 typedef struct hammer2_xop_lookup hammer2_xop_lookup_t;
950 typedef struct hammer2_xop_connect hammer2_xop_connect_t;
951 typedef struct hammer2_xop_flush hammer2_xop_flush_t;
952 
953 union hammer2_xop {
954 	hammer2_xop_head_t	head;
955 	hammer2_xop_ipcluster_t	xop_ipcluster;
956 	hammer2_xop_readdir_t	xop_readdir;
957 	hammer2_xop_nresolve_t	xop_nresolve;
958 	hammer2_xop_nlink_t	xop_nlink;
959 	hammer2_xop_unlink_t	xop_unlink;
960 	hammer2_xop_nrename_t	xop_nrename;
961 	hammer2_xop_strategy_t	xop_strategy;
962 	hammer2_xop_create_t	xop_create;
963 	hammer2_xop_destroy_t	xop_destroy;
964 	hammer2_xop_fsync_t	xop_fsync;
965 	hammer2_xop_unlinkall_t	xop_unlinkall;
966 	hammer2_xop_scanlhc_t	xop_scanlhc;
967 	hammer2_xop_scanall_t	xop_scanall;
968 	hammer2_xop_lookup_t	xop_lookup;
969 	hammer2_xop_flush_t	xop_flush;
970 	hammer2_xop_connect_t	xop_connect;
971 };
972 
973 typedef union hammer2_xop hammer2_xop_t;
974 
975 /*
976  * hammer2_xop_group - Manage XOP support threads.
977  */
978 struct hammer2_xop_group {
979 	hammer2_thread_t	thrs[HAMMER2_MAXCLUSTER];
980 	hammer2_mtx_t		mtx;
981 	hammer2_mtx_t		mtx2;
982 };
983 
984 typedef struct hammer2_xop_group hammer2_xop_group_t;
985 
986 /*
987  * flags to hammer2_xop_collect()
988  */
989 #define HAMMER2_XOP_COLLECT_NOWAIT	0x00000001
990 #define HAMMER2_XOP_COLLECT_WAITALL	0x00000002
991 
992 /*
993  * flags to hammer2_xop_alloc()
994  *
995  * MODIFYING	- This is a modifying transaction, allocate a mtid.
996  */
997 #define HAMMER2_XOP_MODIFYING		0x00000001
998 
999 /*
1000  * Global (per partition) management structure, represents a hard block
1001  * device.  Typically referenced by hammer2_chain structures when applicable.
1002  * Typically not used for network-managed elements.
1003  *
1004  * Note that a single hammer2_dev can be indirectly tied to multiple system
1005  * mount points.  There is no direct relationship.  System mounts are
1006  * per-cluster-id, not per-block-device, and a single hard mount might contain
1007  * many PFSs and those PFSs might combine together in various ways to form
1008  * the set of available clusters.
1009  */
1010 struct hammer2_dev {
1011 	struct vnode	*devvp;		/* device vnode */
1012 	int		ronly;		/* read-only mount */
1013 	int		mount_count;	/* number of actively mounted PFSs */
1014 	TAILQ_ENTRY(hammer2_dev) mntentry; /* hammer2_mntlist */
1015 
1016 	struct malloc_type *mchain;
1017 	int		nipstacks;
1018 	int		maxipstacks;
1019 	kdmsg_iocom_t	iocom;		/* volume-level dmsg interface */
1020 	struct spinlock	io_spin;	/* iotree access */
1021 	struct hammer2_io_tree iotree;
1022 	int		iofree_count;
1023 	hammer2_chain_t vchain;		/* anchor chain (topology) */
1024 	hammer2_chain_t fchain;		/* anchor chain (freemap) */
1025 	struct spinlock	list_spin;
1026 	struct h2_flush_list	flushq;	/* flush seeds */
1027 	struct hammer2_pfs *spmp;	/* super-root pmp for transactions */
1028 	struct lock	vollk;		/* lockmgr lock */
1029 	hammer2_off_t	heur_freemap[HAMMER2_FREEMAP_HEUR];
1030 	int		volhdrno;	/* last volhdrno written */
1031 	char		devrepname[64];	/* for kprintf */
1032 	hammer2_volume_data_t voldata;
1033 	hammer2_volume_data_t volsync;	/* synchronized voldata */
1034 };
1035 
1036 typedef struct hammer2_dev hammer2_dev_t;
1037 
1038 /*
1039  * Helper functions (cluster must be locked for flags to be valid).
1040  */
1041 static __inline
1042 int
1043 hammer2_chain_rdok(hammer2_chain_t *chain)
1044 {
1045 	return (chain->error == 0);
1046 }
1047 
1048 static __inline
1049 int
1050 hammer2_chain_wrok(hammer2_chain_t *chain)
1051 {
1052 	return (chain->error == 0 && chain->hmp->ronly == 0);
1053 }
1054 
1055 /*
1056  * Per-cluster management structure.  This structure will be tied to a
1057  * system mount point if the system is mounting the PFS, but is also used
1058  * to manage clusters encountered during the super-root scan or received
1059  * via LNK_SPANs that might not be mounted.
1060  *
1061  * This structure is also used to represent the super-root that hangs off
1062  * of a hard mount point.  The super-root is not really a cluster element.
1063  * In this case the spmp_hmp field will be non-NULL.  It's just easier to do
1064  * this than to special case super-root manipulation in the hammer2_chain*
1065  * code as being only hammer2_dev-related.
1066  *
1067  * pfs_mode and pfs_nmasters are rollup fields which critically describes
1068  * how elements of the cluster act on the cluster.  pfs_mode is only applicable
1069  * when a PFS is mounted by the system.  pfs_nmasters is our best guess as to
1070  * how many masters have been configured for a cluster and is always
1071  * applicable.  pfs_types[] is an array with 1:1 correspondance to the
1072  * iroot cluster and describes the PFS types of the nodes making up the
1073  * cluster.
1074  *
1075  * WARNING! Portions of this structure have deferred initialization.  In
1076  *	    particular, if not mounted there will be no ihidden or wthread.
1077  *	    umounted network PFSs will also be missing iroot and numerous
1078  *	    other fields will not be initialized prior to mount.
1079  *
1080  *	    Synchronization threads are chain-specific and only applicable
1081  *	    to local hard PFS entries.  A hammer2_pfs structure may contain
1082  *	    more than one when multiple hard PFSs are present on the local
1083  *	    machine which require synchronization monitoring.  Most PFSs
1084  *	    (such as snapshots) are 1xMASTER PFSs which do not need a
1085  *	    synchronization thread.
1086  *
1087  * WARNING! The chains making up pfs->iroot's cluster are accounted for in
1088  *	    hammer2_dev->mount_count when the pfs is associated with a mount
1089  *	    point.
1090  */
1091 struct hammer2_pfs {
1092 	struct mount		*mp;
1093 	TAILQ_ENTRY(hammer2_pfs) mntentry;	/* hammer2_pfslist */
1094 	uuid_t			pfs_clid;
1095 	hammer2_dev_t		*spmp_hmp;	/* only if super-root pmp */
1096 	hammer2_inode_t		*iroot;		/* PFS root inode */
1097 	hammer2_inode_t		*ihidden;	/* PFS hidden directory */
1098 	uint8_t			pfs_types[HAMMER2_MAXCLUSTER];
1099 	char			*pfs_names[HAMMER2_MAXCLUSTER];
1100 	hammer2_dev_t		*pfs_hmps[HAMMER2_MAXCLUSTER];
1101 	hammer2_trans_t		trans;
1102 	struct lock		lock;		/* PFS lock for certain ops */
1103 	struct netexport	export;		/* nfs export */
1104 	int			ronly;		/* read-only mount */
1105 	struct malloc_type	*minode;
1106 	struct malloc_type	*mmsg;
1107 	struct spinlock		inum_spin;	/* inumber lookup */
1108 	struct hammer2_inode_tree inum_tree;	/* (not applicable to spmp) */
1109 	hammer2_tid_t		modify_tid;	/* modify transaction id */
1110 	hammer2_tid_t		inode_tid;	/* inode allocator */
1111 	uint8_t			pfs_nmasters;	/* total masters */
1112 	uint8_t			pfs_mode;	/* operating mode PFSMODE */
1113 	uint8_t			unused01;
1114 	uint8_t			unused02;
1115 	int			xop_iterator;
1116 	long			inmem_inodes;
1117 	uint32_t		inmem_dirty_chains;
1118 	int			count_lwinprog;	/* logical write in prog */
1119 	struct spinlock		list_spin;
1120 	struct h2_unlk_list	unlinkq;	/* last-close unlink */
1121 	hammer2_thread_t	sync_thrs[HAMMER2_MAXCLUSTER];
1122 	uint32_t		cluster_flags;	/* cached cluster flags */
1123 	int			has_xop_threads;
1124 	hammer2_xop_group_t	xop_groups[HAMMER2_XOPGROUPS];
1125 };
1126 
1127 typedef struct hammer2_pfs hammer2_pfs_t;
1128 
1129 #define HAMMER2_DIRTYCHAIN_WAITING	0x80000000
1130 #define HAMMER2_DIRTYCHAIN_MASK		0x7FFFFFFF
1131 
1132 #define HAMMER2_LWINPROG_WAITING	0x80000000
1133 #define HAMMER2_LWINPROG_WAITING0	0x40000000
1134 #define HAMMER2_LWINPROG_MASK		0x3FFFFFFF
1135 
1136 /*
1137  * hammer2_cluster_check
1138  */
1139 #define HAMMER2_CHECK_NULL	0x00000001
1140 
1141 /*
1142  * Bulkscan
1143  */
1144 #define HAMMER2_BULK_ABORT	0x00000001
1145 
1146 /*
1147  * Misc
1148  */
1149 #if defined(_KERNEL)
1150 
1151 MALLOC_DECLARE(M_HAMMER2);
1152 
1153 #define VTOI(vp)	((hammer2_inode_t *)(vp)->v_data)
1154 #define ITOV(ip)	((ip)->vp)
1155 
1156 /*
1157  * Currently locked chains retain the locked buffer cache buffer for
1158  * indirect blocks, and indirect blocks can be one of two sizes.  The
1159  * device buffer has to match the case to avoid deadlocking recursive
1160  * chains that might otherwise try to access different offsets within
1161  * the same device buffer.
1162  */
1163 static __inline
1164 int
1165 hammer2_devblkradix(int radix)
1166 {
1167 #if 0
1168 	if (radix <= HAMMER2_LBUFRADIX) {
1169 		return (HAMMER2_LBUFRADIX);
1170 	} else {
1171 		return (HAMMER2_PBUFRADIX);
1172 	}
1173 #endif
1174 	return (HAMMER2_PBUFRADIX);
1175 }
1176 
1177 /*
1178  * XXX almost time to remove this.  DIO uses PBUFSIZE exclusively now.
1179  */
1180 static __inline
1181 size_t
1182 hammer2_devblksize(size_t bytes)
1183 {
1184 #if 0
1185 	if (bytes <= HAMMER2_LBUFSIZE) {
1186 		return(HAMMER2_LBUFSIZE);
1187 	} else {
1188 		KKASSERT(bytes <= HAMMER2_PBUFSIZE &&
1189 			 (bytes ^ (bytes - 1)) == ((bytes << 1) - 1));
1190 		return (HAMMER2_PBUFSIZE);
1191 	}
1192 #endif
1193 	return (HAMMER2_PBUFSIZE);
1194 }
1195 
1196 
1197 static __inline
1198 hammer2_pfs_t *
1199 MPTOPMP(struct mount *mp)
1200 {
1201 	return ((hammer2_pfs_t *)mp->mnt_data);
1202 }
1203 
1204 #define LOCKSTART	int __nlocks = curthread->td_locks
1205 #define LOCKENTER	(++curthread->td_locks)
1206 #define LOCKEXIT	(--curthread->td_locks)
1207 #define LOCKSTOP	KKASSERT(curthread->td_locks == __nlocks)
1208 
1209 extern struct vop_ops hammer2_vnode_vops;
1210 extern struct vop_ops hammer2_spec_vops;
1211 extern struct vop_ops hammer2_fifo_vops;
1212 
1213 extern int hammer2_debug;
1214 extern int hammer2_cluster_enable;
1215 extern int hammer2_hardlink_enable;
1216 extern int hammer2_flush_pipe;
1217 extern int hammer2_synchronous_flush;
1218 extern int hammer2_dio_count;
1219 extern long hammer2_limit_dirty_chains;
1220 extern long hammer2_iod_file_read;
1221 extern long hammer2_iod_meta_read;
1222 extern long hammer2_iod_indr_read;
1223 extern long hammer2_iod_fmap_read;
1224 extern long hammer2_iod_volu_read;
1225 extern long hammer2_iod_file_write;
1226 extern long hammer2_iod_meta_write;
1227 extern long hammer2_iod_indr_write;
1228 extern long hammer2_iod_fmap_write;
1229 extern long hammer2_iod_volu_write;
1230 extern long hammer2_ioa_file_read;
1231 extern long hammer2_ioa_meta_read;
1232 extern long hammer2_ioa_indr_read;
1233 extern long hammer2_ioa_fmap_read;
1234 extern long hammer2_ioa_volu_read;
1235 extern long hammer2_ioa_file_write;
1236 extern long hammer2_ioa_meta_write;
1237 extern long hammer2_ioa_indr_write;
1238 extern long hammer2_ioa_fmap_write;
1239 extern long hammer2_ioa_volu_write;
1240 
1241 extern struct objcache *cache_buffer_read;
1242 extern struct objcache *cache_buffer_write;
1243 extern struct objcache *cache_xops;
1244 
1245 /*
1246  * hammer2_subr.c
1247  */
1248 #define hammer2_icrc32(buf, size)	iscsi_crc32((buf), (size))
1249 #define hammer2_icrc32c(buf, size, crc)	iscsi_crc32_ext((buf), (size), (crc))
1250 
1251 int hammer2_signal_check(time_t *timep);
1252 const char *hammer2_error_str(int error);
1253 
1254 void hammer2_inode_lock(hammer2_inode_t *ip, int how);
1255 void hammer2_inode_unlock(hammer2_inode_t *ip);
1256 hammer2_chain_t *hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how);
1257 hammer2_chain_t *hammer2_inode_chain_and_parent(hammer2_inode_t *ip,
1258 			int clindex, hammer2_chain_t **parentp, int how);
1259 hammer2_mtx_state_t hammer2_inode_lock_temp_release(hammer2_inode_t *ip);
1260 void hammer2_inode_lock_temp_restore(hammer2_inode_t *ip,
1261 			hammer2_mtx_state_t ostate);
1262 int hammer2_inode_lock_upgrade(hammer2_inode_t *ip);
1263 void hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int);
1264 
1265 void hammer2_dev_exlock(hammer2_dev_t *hmp);
1266 void hammer2_dev_shlock(hammer2_dev_t *hmp);
1267 void hammer2_dev_unlock(hammer2_dev_t *hmp);
1268 
1269 int hammer2_get_dtype(const hammer2_inode_data_t *ipdata);
1270 int hammer2_get_vtype(uint8_t type);
1271 u_int8_t hammer2_get_obj_type(enum vtype vtype);
1272 void hammer2_time_to_timespec(u_int64_t xtime, struct timespec *ts);
1273 u_int64_t hammer2_timespec_to_time(const struct timespec *ts);
1274 u_int32_t hammer2_to_unix_xid(const uuid_t *uuid);
1275 void hammer2_guid_to_uuid(uuid_t *uuid, u_int32_t guid);
1276 void hammer2_trans_manage_init(hammer2_pfs_t *pmp);
1277 
1278 hammer2_key_t hammer2_dirhash(const unsigned char *name, size_t len);
1279 int hammer2_getradix(size_t bytes);
1280 
1281 int hammer2_calc_logical(hammer2_inode_t *ip, hammer2_off_t uoff,
1282 			hammer2_key_t *lbasep, hammer2_key_t *leofp);
1283 int hammer2_calc_physical(hammer2_inode_t *ip, hammer2_key_t lbase);
1284 void hammer2_update_time(uint64_t *timep);
1285 void hammer2_adjreadcounter(hammer2_blockref_t *bref, size_t bytes);
1286 
1287 /*
1288  * hammer2_inode.c
1289  */
1290 struct vnode *hammer2_igetv(hammer2_inode_t *ip, int *errorp);
1291 hammer2_inode_t *hammer2_inode_lookup(hammer2_pfs_t *pmp,
1292 			hammer2_tid_t inum);
1293 hammer2_inode_t *hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_inode_t *dip,
1294 			hammer2_cluster_t *cluster, int idx);
1295 void hammer2_inode_free(hammer2_inode_t *ip);
1296 void hammer2_inode_ref(hammer2_inode_t *ip);
1297 void hammer2_inode_drop(hammer2_inode_t *ip);
1298 void hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
1299 			hammer2_cluster_t *cluster);
1300 void hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1301 			int idx);
1302 void hammer2_inode_modify(hammer2_inode_t *ip);
1303 void hammer2_inode_run_unlinkq(hammer2_pfs_t *pmp);
1304 
1305 hammer2_inode_t *hammer2_inode_create(hammer2_inode_t *dip,
1306 			struct vattr *vap, struct ucred *cred,
1307 			const uint8_t *name, size_t name_len, hammer2_key_t lhc,
1308 			hammer2_key_t inum, uint8_t type, uint8_t target_type,
1309 			int flags, int *errorp);
1310 int hammer2_inode_connect(hammer2_inode_t *dip, hammer2_inode_t *ip,
1311 			const char *name, size_t name_len,
1312 			hammer2_key_t lhc);
1313 hammer2_inode_t *hammer2_inode_common_parent(hammer2_inode_t *fdip,
1314 			hammer2_inode_t *tdip);
1315 void hammer2_inode_fsync(hammer2_inode_t *ip);
1316 int hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen);
1317 void hammer2_inode_install_hidden(hammer2_pfs_t *pmp);
1318 
1319 /*
1320  * hammer2_chain.c
1321  */
1322 void hammer2_voldata_lock(hammer2_dev_t *hmp);
1323 void hammer2_voldata_unlock(hammer2_dev_t *hmp);
1324 void hammer2_voldata_modify(hammer2_dev_t *hmp);
1325 hammer2_chain_t *hammer2_chain_alloc(hammer2_dev_t *hmp,
1326 				hammer2_pfs_t *pmp,
1327 				hammer2_blockref_t *bref);
1328 void hammer2_chain_core_init(hammer2_chain_t *chain);
1329 void hammer2_chain_ref(hammer2_chain_t *chain);
1330 void hammer2_chain_drop(hammer2_chain_t *chain);
1331 void hammer2_chain_lock(hammer2_chain_t *chain, int how);
1332 void hammer2_chain_load_data(hammer2_chain_t *chain);
1333 const hammer2_media_data_t *hammer2_chain_rdata(hammer2_chain_t *chain);
1334 hammer2_media_data_t *hammer2_chain_wdata(hammer2_chain_t *chain);
1335 int hammer2_chain_snapshot(hammer2_chain_t *chain, hammer2_ioc_pfs_t *pmp,
1336 				hammer2_tid_t mtid);
1337 
1338 int hammer2_chain_hardlink_find(hammer2_inode_t *dip,
1339 				hammer2_chain_t **parentp,
1340 				hammer2_chain_t **chainp,
1341 				int flags);
1342 void hammer2_chain_modify(hammer2_chain_t *chain,
1343 				hammer2_tid_t mtid, int flags);
1344 void hammer2_chain_modify_ip(hammer2_inode_t *ip, hammer2_chain_t *chain,
1345 				hammer2_tid_t mtid, int flags);
1346 void hammer2_chain_resize(hammer2_inode_t *ip, hammer2_chain_t *parent,
1347 				hammer2_chain_t *chain,
1348 				hammer2_tid_t mtid, int nradix, int flags);
1349 void hammer2_chain_unlock(hammer2_chain_t *chain);
1350 void hammer2_chain_wait(hammer2_chain_t *chain);
1351 hammer2_chain_t *hammer2_chain_get(hammer2_chain_t *parent, int generation,
1352 				hammer2_blockref_t *bref);
1353 hammer2_chain_t *hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags);
1354 void hammer2_chain_lookup_done(hammer2_chain_t *parent);
1355 hammer2_chain_t *hammer2_chain_getparent(hammer2_chain_t **parentp, int how);
1356 hammer2_chain_t *hammer2_chain_lookup(hammer2_chain_t **parentp,
1357 				hammer2_key_t *key_nextp,
1358 				hammer2_key_t key_beg, hammer2_key_t key_end,
1359 				int *cache_indexp, int flags);
1360 hammer2_chain_t *hammer2_chain_next(hammer2_chain_t **parentp,
1361 				hammer2_chain_t *chain,
1362 				hammer2_key_t *key_nextp,
1363 				hammer2_key_t key_beg, hammer2_key_t key_end,
1364 				int *cache_indexp, int flags);
1365 hammer2_chain_t *hammer2_chain_scan(hammer2_chain_t *parent,
1366 				hammer2_chain_t *chain,
1367 				int *cache_indexp, int flags);
1368 
1369 int hammer2_chain_create(hammer2_chain_t **parentp,
1370 				hammer2_chain_t **chainp, hammer2_pfs_t *pmp,
1371 				hammer2_key_t key, int keybits,
1372 				int type, size_t bytes,
1373 				hammer2_tid_t mtid, int flags);
1374 void hammer2_chain_rename(hammer2_blockref_t *bref,
1375 				hammer2_chain_t **parentp,
1376 				hammer2_chain_t *chain,
1377 				hammer2_tid_t mtid, int flags);
1378 void hammer2_chain_delete(hammer2_chain_t *parent, hammer2_chain_t *chain,
1379 				hammer2_tid_t mtid, int flags);
1380 void hammer2_flush(hammer2_chain_t *chain, hammer2_tid_t mtid, int istop);
1381 void hammer2_delayed_flush(hammer2_chain_t *chain);
1382 void hammer2_chain_setflush(hammer2_chain_t *chain);
1383 void hammer2_chain_countbrefs(hammer2_chain_t *chain,
1384 				hammer2_blockref_t *base, int count);
1385 
1386 void hammer2_chain_setcheck(hammer2_chain_t *chain, void *bdata);
1387 int hammer2_chain_testcheck(hammer2_chain_t *chain, void *bdata);
1388 
1389 
1390 void hammer2_pfs_memory_wait(hammer2_pfs_t *pmp);
1391 void hammer2_pfs_memory_inc(hammer2_pfs_t *pmp);
1392 void hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp);
1393 
1394 void hammer2_base_delete(hammer2_chain_t *chain,
1395 				hammer2_blockref_t *base, int count,
1396 				int *cache_indexp, hammer2_chain_t *child);
1397 void hammer2_base_insert(hammer2_chain_t *chain,
1398 				hammer2_blockref_t *base, int count,
1399 				int *cache_indexp, hammer2_chain_t *child);
1400 
1401 /*
1402  * hammer2_trans.c
1403  */
1404 void hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags);
1405 hammer2_tid_t hammer2_trans_sub(hammer2_pfs_t *pmp);
1406 void hammer2_trans_clear_preflush(hammer2_pfs_t *pmp);
1407 void hammer2_trans_done(hammer2_pfs_t *pmp);
1408 hammer2_tid_t hammer2_trans_newinum(hammer2_pfs_t *pmp);
1409 void hammer2_trans_assert_strategy(hammer2_pfs_t *pmp);
1410 
1411 /*
1412  * hammer2_ioctl.c
1413  */
1414 int hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data,
1415 				int fflag, struct ucred *cred);
1416 
1417 /*
1418  * hammer2_io.c
1419  */
1420 void hammer2_io_putblk(hammer2_io_t **diop);
1421 void hammer2_io_cleanup(hammer2_dev_t *hmp, struct hammer2_io_tree *tree);
1422 char *hammer2_io_data(hammer2_io_t *dio, off_t lbase);
1423 void hammer2_io_getblk(hammer2_dev_t *hmp, off_t lbase, int lsize,
1424 				hammer2_iocb_t *iocb);
1425 void hammer2_io_complete(hammer2_iocb_t *iocb);
1426 void hammer2_io_callback(struct bio *bio);
1427 void hammer2_iocb_wait(hammer2_iocb_t *iocb);
1428 int hammer2_io_new(hammer2_dev_t *hmp, off_t lbase, int lsize,
1429 				hammer2_io_t **diop);
1430 int hammer2_io_newnz(hammer2_dev_t *hmp, off_t lbase, int lsize,
1431 				hammer2_io_t **diop);
1432 int hammer2_io_newq(hammer2_dev_t *hmp, off_t lbase, int lsize,
1433 				hammer2_io_t **diop);
1434 int hammer2_io_bread(hammer2_dev_t *hmp, off_t lbase, int lsize,
1435 				hammer2_io_t **diop);
1436 void hammer2_io_bawrite(hammer2_io_t **diop);
1437 void hammer2_io_bdwrite(hammer2_io_t **diop);
1438 int hammer2_io_bwrite(hammer2_io_t **diop);
1439 int hammer2_io_isdirty(hammer2_io_t *dio);
1440 void hammer2_io_setdirty(hammer2_io_t *dio);
1441 void hammer2_io_setinval(hammer2_io_t *dio, u_int bytes);
1442 void hammer2_io_brelse(hammer2_io_t **diop);
1443 void hammer2_io_bqrelse(hammer2_io_t **diop);
1444 
1445 /*
1446  * XOP API in hammer2_thread.c
1447  */
1448 void hammer2_xop_group_init(hammer2_pfs_t *pmp, hammer2_xop_group_t *xgrp);
1449 void *hammer2_xop_alloc(hammer2_inode_t *ip, int flags);
1450 void hammer2_xop_setname(hammer2_xop_head_t *xop,
1451 				const char *name, size_t name_len);
1452 void hammer2_xop_setname2(hammer2_xop_head_t *xop,
1453 				const char *name, size_t name_len);
1454 void hammer2_xop_setip2(hammer2_xop_head_t *xop, hammer2_inode_t *ip2);
1455 void hammer2_xop_setip3(hammer2_xop_head_t *xop, hammer2_inode_t *ip3);
1456 void hammer2_xop_reinit(hammer2_xop_head_t *xop);
1457 void hammer2_xop_helper_create(hammer2_pfs_t *pmp);
1458 void hammer2_xop_helper_cleanup(hammer2_pfs_t *pmp);
1459 void hammer2_xop_start(hammer2_xop_head_t *xop, hammer2_xop_func_t func);
1460 void hammer2_xop_start_except(hammer2_xop_head_t *xop, hammer2_xop_func_t func,
1461 				int notidx);
1462 int hammer2_xop_collect(hammer2_xop_head_t *xop, int flags);
1463 void hammer2_xop_retire(hammer2_xop_head_t *xop, uint32_t mask);
1464 int hammer2_xop_active(hammer2_xop_head_t *xop);
1465 int hammer2_xop_feed(hammer2_xop_head_t *xop, hammer2_chain_t *chain,
1466 				int clindex, int error);
1467 
1468 /*
1469  * XOP backends in hammer2_xops.c
1470  */
1471 void hammer2_xop_ipcluster(hammer2_xop_t *xop, int clidx);
1472 void hammer2_xop_readdir(hammer2_xop_t *xop, int clidx);
1473 void hammer2_xop_nresolve(hammer2_xop_t *xop, int clidx);
1474 void hammer2_xop_unlink(hammer2_xop_t *xop, int clidx);
1475 void hammer2_xop_nrename(hammer2_xop_t *xop, int clidx);
1476 void hammer2_xop_nlink(hammer2_xop_t *xop, int clidx);
1477 void hammer2_xop_scanlhc(hammer2_xop_t *xop, int clidx);
1478 void hammer2_xop_scanall(hammer2_xop_t *xop, int clidx);
1479 void hammer2_xop_lookup(hammer2_xop_t *xop, int clidx);
1480 void hammer2_inode_xop_create(hammer2_xop_t *xop, int clidx);
1481 void hammer2_inode_xop_destroy(hammer2_xop_t *xop, int clidx);
1482 void hammer2_inode_xop_fsync(hammer2_xop_t *xop, int clidx);
1483 void hammer2_inode_xop_unlinkall(hammer2_xop_t *xop, int clidx);
1484 void hammer2_inode_xop_connect(hammer2_xop_t *xop, int clidx);
1485 void hammer2_inode_xop_flush(hammer2_xop_t *xop, int clidx);
1486 
1487 /*
1488  * hammer2_msgops.c
1489  */
1490 int hammer2_msg_dbg_rcvmsg(kdmsg_msg_t *msg);
1491 int hammer2_msg_adhoc_input(kdmsg_msg_t *msg);
1492 
1493 /*
1494  * hammer2_vfsops.c
1495  */
1496 void hammer2_volconf_update(hammer2_dev_t *hmp, int index);
1497 void hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp, char pfx);
1498 int hammer2_vfs_sync(struct mount *mp, int waitflags);
1499 hammer2_pfs_t *hammer2_pfsalloc(hammer2_chain_t *chain,
1500 				const hammer2_inode_data_t *ripdata,
1501 				hammer2_tid_t modify_tid);
1502 
1503 void hammer2_lwinprog_ref(hammer2_pfs_t *pmp);
1504 void hammer2_lwinprog_drop(hammer2_pfs_t *pmp);
1505 void hammer2_lwinprog_wait(hammer2_pfs_t *pmp, int pipe);
1506 
1507 /*
1508  * hammer2_freemap.c
1509  */
1510 int hammer2_freemap_alloc(hammer2_chain_t *chain, size_t bytes);
1511 void hammer2_freemap_adjust(hammer2_dev_t *hmp,
1512 				hammer2_blockref_t *bref, int how);
1513 
1514 /*
1515  * hammer2_cluster.c
1516  */
1517 uint8_t hammer2_cluster_type(hammer2_cluster_t *cluster);
1518 const hammer2_media_data_t *hammer2_cluster_rdata(hammer2_cluster_t *cluster);
1519 hammer2_media_data_t *hammer2_cluster_wdata(hammer2_cluster_t *cluster);
1520 hammer2_cluster_t *hammer2_cluster_from_chain(hammer2_chain_t *chain);
1521 void hammer2_cluster_bref(hammer2_cluster_t *cluster, hammer2_blockref_t *bref);
1522 hammer2_cluster_t *hammer2_cluster_alloc(hammer2_pfs_t *pmp,
1523 				hammer2_blockref_t *bref);
1524 void hammer2_cluster_ref(hammer2_cluster_t *cluster);
1525 void hammer2_cluster_drop(hammer2_cluster_t *cluster);
1526 void hammer2_cluster_lock(hammer2_cluster_t *cluster, int how);
1527 int hammer2_cluster_check(hammer2_cluster_t *cluster, hammer2_key_t lokey,
1528 			int flags);
1529 void hammer2_cluster_resolve(hammer2_cluster_t *cluster);
1530 void hammer2_cluster_forcegood(hammer2_cluster_t *cluster);
1531 hammer2_cluster_t *hammer2_cluster_copy(hammer2_cluster_t *ocluster);
1532 void hammer2_cluster_unlock(hammer2_cluster_t *cluster);
1533 
1534 int hammer2_bulk_scan(hammer2_chain_t *parent,
1535 			int (*func)(hammer2_chain_t *chain, void *info),
1536 			void *info);
1537 int hammer2_bulkfree_pass(hammer2_dev_t *hmp,
1538 			struct hammer2_ioc_bulkfree *bfi);
1539 
1540 /*
1541  * hammer2_iocom.c
1542  */
1543 void hammer2_iocom_init(hammer2_dev_t *hmp);
1544 void hammer2_iocom_uninit(hammer2_dev_t *hmp);
1545 void hammer2_cluster_reconnect(hammer2_dev_t *hmp, struct file *fp);
1546 
1547 /*
1548  * hammer2_thread.c
1549  */
1550 void hammer2_thr_create(hammer2_thread_t *thr, hammer2_pfs_t *pmp,
1551 			const char *id, int clindex, int repidx,
1552 			void (*func)(void *arg));
1553 void hammer2_thr_delete(hammer2_thread_t *thr);
1554 void hammer2_thr_remaster(hammer2_thread_t *thr);
1555 void hammer2_thr_freeze_async(hammer2_thread_t *thr);
1556 void hammer2_thr_freeze(hammer2_thread_t *thr);
1557 void hammer2_thr_unfreeze(hammer2_thread_t *thr);
1558 void hammer2_primary_sync_thread(void *arg);
1559 void hammer2_primary_xops_thread(void *arg);
1560 
1561 /*
1562  * hammer2_strategy.c
1563  */
1564 int hammer2_vop_strategy(struct vop_strategy_args *ap);
1565 int hammer2_vop_bmap(struct vop_bmap_args *ap);
1566 void hammer2_write_thread(void *arg);
1567 void hammer2_bioq_sync(hammer2_pfs_t *pmp);
1568 
1569 #endif /* !_KERNEL */
1570 #endif /* !_VFS_HAMMER2_HAMMER2_H_ */
1571