xref: /freebsd/sys/contrib/openzfs/include/sys/dnode.h (revision 81ad6265)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
24  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
25  */
26 
27 #ifndef	_SYS_DNODE_H
28 #define	_SYS_DNODE_H
29 
30 #include <sys/zfs_context.h>
31 #include <sys/avl.h>
32 #include <sys/spa.h>
33 #include <sys/txg.h>
34 #include <sys/zio.h>
35 #include <sys/zfs_refcount.h>
36 #include <sys/dmu_zfetch.h>
37 #include <sys/zrlock.h>
38 #include <sys/multilist.h>
39 #include <sys/wmsum.h>
40 
41 #ifdef	__cplusplus
42 extern "C" {
43 #endif
44 
45 /*
46  * dnode_hold() flags.
47  */
48 #define	DNODE_MUST_BE_ALLOCATED	1
49 #define	DNODE_MUST_BE_FREE	2
50 #define	DNODE_DRY_RUN		4
51 
52 /*
53  * dnode_next_offset() flags.
54  */
55 #define	DNODE_FIND_HOLE		1
56 #define	DNODE_FIND_BACKWARDS	2
57 #define	DNODE_FIND_HAVELOCK	4
58 
59 /*
60  * Fixed constants.
61  */
62 #define	DNODE_SHIFT		9	/* 512 bytes */
63 #define	DN_MIN_INDBLKSHIFT	12	/* 4k */
64 /*
65  * If we ever increase this value beyond 20, we need to revisit all logic that
66  * does x << level * ebps to handle overflow.  With a 1M indirect block size,
67  * 4 levels of indirect blocks would not be able to guarantee addressing an
68  * entire object, so 5 levels will be used, but 5 * (20 - 7) = 65.
69  */
70 #define	DN_MAX_INDBLKSHIFT	17	/* 128k */
71 #define	DNODE_BLOCK_SHIFT	14	/* 16k */
72 #define	DNODE_CORE_SIZE		64	/* 64 bytes for dnode sans blkptrs */
73 #define	DN_MAX_OBJECT_SHIFT	48	/* 256 trillion (zfs_fid_t limit) */
74 #define	DN_MAX_OFFSET_SHIFT	64	/* 2^64 bytes in a dnode */
75 
76 /*
77  * dnode id flags
78  *
79  * Note: a file will never ever have its ids moved from bonus->spill
80  */
81 #define	DN_ID_CHKED_BONUS	0x1
82 #define	DN_ID_CHKED_SPILL	0x2
83 #define	DN_ID_OLD_EXIST		0x4
84 #define	DN_ID_NEW_EXIST		0x8
85 
86 /*
87  * Derived constants.
88  */
89 #define	DNODE_MIN_SIZE		(1 << DNODE_SHIFT)
90 #define	DNODE_MAX_SIZE		(1 << DNODE_BLOCK_SHIFT)
91 #define	DNODE_BLOCK_SIZE	(1 << DNODE_BLOCK_SHIFT)
92 #define	DNODE_MIN_SLOTS		(DNODE_MIN_SIZE >> DNODE_SHIFT)
93 #define	DNODE_MAX_SLOTS		(DNODE_MAX_SIZE >> DNODE_SHIFT)
94 #define	DN_BONUS_SIZE(dnsize)	((dnsize) - DNODE_CORE_SIZE - \
95 	(1 << SPA_BLKPTRSHIFT))
96 #define	DN_SLOTS_TO_BONUSLEN(slots)	DN_BONUS_SIZE((slots) << DNODE_SHIFT)
97 #define	DN_OLD_MAX_BONUSLEN	(DN_BONUS_SIZE(DNODE_MIN_SIZE))
98 #define	DN_MAX_NBLKPTR	((DNODE_MIN_SIZE - DNODE_CORE_SIZE) >> SPA_BLKPTRSHIFT)
99 #define	DN_MAX_OBJECT	(1ULL << DN_MAX_OBJECT_SHIFT)
100 #define	DN_ZERO_BONUSLEN	(DN_BONUS_SIZE(DNODE_MAX_SIZE) + 1)
101 #define	DN_KILL_SPILLBLK (1)
102 
103 #define	DN_SLOT_UNINIT		((void *)NULL)	/* Uninitialized */
104 #define	DN_SLOT_FREE		((void *)1UL)	/* Free slot */
105 #define	DN_SLOT_ALLOCATED	((void *)2UL)	/* Allocated slot */
106 #define	DN_SLOT_INTERIOR	((void *)3UL)	/* Interior allocated slot */
107 #define	DN_SLOT_IS_PTR(dn)	((void *)dn > DN_SLOT_INTERIOR)
108 #define	DN_SLOT_IS_VALID(dn)	((void *)dn != NULL)
109 
110 #define	DNODES_PER_BLOCK_SHIFT	(DNODE_BLOCK_SHIFT - DNODE_SHIFT)
111 #define	DNODES_PER_BLOCK	(1ULL << DNODES_PER_BLOCK_SHIFT)
112 
113 /*
114  * This is inaccurate if the indblkshift of the particular object is not the
115  * max.  But it's only used by userland to calculate the zvol reservation.
116  */
117 #define	DNODES_PER_LEVEL_SHIFT	(DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT)
118 #define	DNODES_PER_LEVEL	(1ULL << DNODES_PER_LEVEL_SHIFT)
119 
120 #define	DN_MAX_LEVELS	(DIV_ROUND_UP(DN_MAX_OFFSET_SHIFT - SPA_MINBLOCKSHIFT, \
121 	DN_MIN_INDBLKSHIFT - SPA_BLKPTRSHIFT) + 1)
122 
123 #define	DN_BONUS(dnp)	((void*)((dnp)->dn_bonus + \
124 	(((dnp)->dn_nblkptr - 1) * sizeof (blkptr_t))))
125 #define	DN_MAX_BONUS_LEN(dnp) \
126 	((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ? \
127 	(uint8_t *)DN_SPILL_BLKPTR(dnp) - (uint8_t *)DN_BONUS(dnp) : \
128 	(uint8_t *)(dnp + (dnp->dn_extra_slots + 1)) - (uint8_t *)DN_BONUS(dnp))
129 
130 #define	DN_USED_BYTES(dnp) (((dnp)->dn_flags & DNODE_FLAG_USED_BYTES) ? \
131 	(dnp)->dn_used : (dnp)->dn_used << SPA_MINBLOCKSHIFT)
132 
133 #define	EPB(blkshift, typeshift)	(1 << (blkshift - typeshift))
134 
135 struct dmu_buf_impl;
136 struct objset;
137 struct zio;
138 
139 enum dnode_dirtycontext {
140 	DN_UNDIRTIED,
141 	DN_DIRTY_OPEN,
142 	DN_DIRTY_SYNC
143 };
144 
145 /* Is dn_used in bytes?  if not, it's in multiples of SPA_MINBLOCKSIZE */
146 #define	DNODE_FLAG_USED_BYTES			(1 << 0)
147 #define	DNODE_FLAG_USERUSED_ACCOUNTED		(1 << 1)
148 
149 /* Does dnode have a SA spill blkptr in bonus? */
150 #define	DNODE_FLAG_SPILL_BLKPTR			(1 << 2)
151 
152 /* User/Group/Project dnode accounting */
153 #define	DNODE_FLAG_USEROBJUSED_ACCOUNTED	(1 << 3)
154 
155 /*
156  * This mask defines the set of flags which are "portable", meaning
157  * that they can be preserved when doing a raw encrypted zfs send.
158  * Flags included in this mask will be protected by AAD when the block
159  * of dnodes is encrypted.
160  */
161 #define	DNODE_CRYPT_PORTABLE_FLAGS_MASK		(DNODE_FLAG_SPILL_BLKPTR)
162 
163 /*
164  * VARIABLE-LENGTH (LARGE) DNODES
165  *
166  * The motivation for variable-length dnodes is to eliminate the overhead
167  * associated with using spill blocks.  Spill blocks are used to store
168  * system attribute data (i.e. file metadata) that does not fit in the
169  * dnode's bonus buffer. By allowing a larger bonus buffer area the use of
170  * a spill block can be avoided.  Spill blocks potentially incur an
171  * additional read I/O for every dnode in a dnode block. As a worst case
172  * example, reading 32 dnodes from a 16k dnode block and all of the spill
173  * blocks could issue 33 separate reads. Now suppose those dnodes have size
174  * 1024 and therefore don't need spill blocks. Then the worst case number
175  * of blocks read is reduced from 33 to two--one per dnode block.
176  *
177  * ZFS-on-Linux systems that make heavy use of extended attributes benefit
178  * from this feature. In particular, ZFS-on-Linux supports the xattr=sa
179  * dataset property which allows file extended attribute data to be stored
180  * in the dnode bonus buffer as an alternative to the traditional
181  * directory-based format. Workloads such as SELinux and the Lustre
182  * distributed filesystem often store enough xattr data to force spill
183  * blocks when xattr=sa is in effect. Large dnodes may therefore provide a
184  * performance benefit to such systems. Other use cases that benefit from
185  * this feature include files with large ACLs and symbolic links with long
186  * target names.
187  *
188  * The size of a dnode may be a multiple of 512 bytes up to the size of a
189  * dnode block (currently 16384 bytes). The dn_extra_slots field of the
190  * on-disk dnode_phys_t structure describes the size of the physical dnode
191  * on disk. The field represents how many "extra" dnode_phys_t slots a
192  * dnode consumes in its dnode block. This convention results in a value of
193  * 0 for 512 byte dnodes which preserves on-disk format compatibility with
194  * older software which doesn't support large dnodes.
195  *
196  * Similarly, the in-memory dnode_t structure has a dn_num_slots field
197  * to represent the total number of dnode_phys_t slots consumed on disk.
198  * Thus dn->dn_num_slots is 1 greater than the corresponding
199  * dnp->dn_extra_slots. This difference in convention was adopted
200  * because, unlike on-disk structures, backward compatibility is not a
201  * concern for in-memory objects, so we used a more natural way to
202  * represent size for a dnode_t.
203  *
204  * The default size for newly created dnodes is determined by the value of
205  * the "dnodesize" dataset property. By default the property is set to
206  * "legacy" which is compatible with older software. Setting the property
207  * to "auto" will allow the filesystem to choose the most suitable dnode
208  * size. Currently this just sets the default dnode size to 1k, but future
209  * code improvements could dynamically choose a size based on observed
210  * workload patterns. Dnodes of varying sizes can coexist within the same
211  * dataset and even within the same dnode block.
212  */
213 
214 typedef struct dnode_phys {
215 	uint8_t dn_type;		/* dmu_object_type_t */
216 	uint8_t dn_indblkshift;		/* ln2(indirect block size) */
217 	uint8_t dn_nlevels;		/* 1=dn_blkptr->data blocks */
218 	uint8_t dn_nblkptr;		/* length of dn_blkptr */
219 	uint8_t dn_bonustype;		/* type of data in bonus buffer */
220 	uint8_t	dn_checksum;		/* ZIO_CHECKSUM type */
221 	uint8_t	dn_compress;		/* ZIO_COMPRESS type */
222 	uint8_t dn_flags;		/* DNODE_FLAG_* */
223 	uint16_t dn_datablkszsec;	/* data block size in 512b sectors */
224 	uint16_t dn_bonuslen;		/* length of dn_bonus */
225 	uint8_t dn_extra_slots;		/* # of subsequent slots consumed */
226 	uint8_t dn_pad2[3];
227 
228 	/* accounting is protected by dn_dirty_mtx */
229 	uint64_t dn_maxblkid;		/* largest allocated block ID */
230 	uint64_t dn_used;		/* bytes (or sectors) of disk space */
231 
232 	/*
233 	 * Both dn_pad2 and dn_pad3 are protected by the block's MAC. This
234 	 * allows us to protect any fields that might be added here in the
235 	 * future. In either case, developers will want to check
236 	 * zio_crypt_init_uios_dnode() and zio_crypt_do_dnode_hmac_updates()
237 	 * to ensure the new field is being protected and updated properly.
238 	 */
239 	uint64_t dn_pad3[4];
240 
241 	/*
242 	 * The tail region is 448 bytes for a 512 byte dnode, and
243 	 * correspondingly larger for larger dnode sizes. The spill
244 	 * block pointer, when present, is always at the end of the tail
245 	 * region. There are three ways this space may be used, using
246 	 * a 512 byte dnode for this diagram:
247 	 *
248 	 * 0       64      128     192     256     320     384     448 (offset)
249 	 * +---------------+---------------+---------------+-------+
250 	 * | dn_blkptr[0]  | dn_blkptr[1]  | dn_blkptr[2]  | /     |
251 	 * +---------------+---------------+---------------+-------+
252 	 * | dn_blkptr[0]  | dn_bonus[0..319]                      |
253 	 * +---------------+-----------------------+---------------+
254 	 * | dn_blkptr[0]  | dn_bonus[0..191]      | dn_spill      |
255 	 * +---------------+-----------------------+---------------+
256 	 */
257 	union {
258 		blkptr_t dn_blkptr[1+DN_OLD_MAX_BONUSLEN/sizeof (blkptr_t)];
259 		struct {
260 			blkptr_t __dn_ignore1;
261 			uint8_t dn_bonus[DN_OLD_MAX_BONUSLEN];
262 		};
263 		struct {
264 			blkptr_t __dn_ignore2;
265 			uint8_t __dn_ignore3[DN_OLD_MAX_BONUSLEN -
266 			    sizeof (blkptr_t)];
267 			blkptr_t dn_spill;
268 		};
269 	};
270 } dnode_phys_t;
271 
272 #define	DN_SPILL_BLKPTR(dnp)	((blkptr_t *)((char *)(dnp) + \
273 	(((dnp)->dn_extra_slots + 1) << DNODE_SHIFT) - (1 << SPA_BLKPTRSHIFT)))
274 
275 struct dnode {
276 	/*
277 	 * Protects the structure of the dnode, including the number of levels
278 	 * of indirection (dn_nlevels), dn_maxblkid, and dn_next_*
279 	 */
280 	krwlock_t dn_struct_rwlock;
281 
282 	/* Our link on dn_objset->os_dnodes list; protected by os_lock.  */
283 	list_node_t dn_link;
284 
285 	/* immutable: */
286 	struct objset *dn_objset;
287 	uint64_t dn_object;
288 	struct dmu_buf_impl *dn_dbuf;
289 	struct dnode_handle *dn_handle;
290 	dnode_phys_t *dn_phys; /* pointer into dn->dn_dbuf->db.db_data */
291 
292 	/*
293 	 * Copies of stuff in dn_phys.  They're valid in the open
294 	 * context (eg. even before the dnode is first synced).
295 	 * Where necessary, these are protected by dn_struct_rwlock.
296 	 */
297 	dmu_object_type_t dn_type;	/* object type */
298 	uint16_t dn_bonuslen;		/* bonus length */
299 	uint8_t dn_bonustype;		/* bonus type */
300 	uint8_t dn_nblkptr;		/* number of blkptrs (immutable) */
301 	uint8_t dn_checksum;		/* ZIO_CHECKSUM type */
302 	uint8_t dn_compress;		/* ZIO_COMPRESS type */
303 	uint8_t dn_nlevels;
304 	uint8_t dn_indblkshift;
305 	uint8_t dn_datablkshift;	/* zero if blksz not power of 2! */
306 	uint8_t dn_moved;		/* Has this dnode been moved? */
307 	uint16_t dn_datablkszsec;	/* in 512b sectors */
308 	uint32_t dn_datablksz;		/* in bytes */
309 	uint64_t dn_maxblkid;
310 	uint8_t dn_next_type[TXG_SIZE];
311 	uint8_t dn_num_slots;		/* metadnode slots consumed on disk */
312 	uint8_t dn_next_nblkptr[TXG_SIZE];
313 	uint8_t dn_next_nlevels[TXG_SIZE];
314 	uint8_t dn_next_indblkshift[TXG_SIZE];
315 	uint8_t dn_next_bonustype[TXG_SIZE];
316 	uint8_t dn_rm_spillblk[TXG_SIZE];	/* for removing spill blk */
317 	uint16_t dn_next_bonuslen[TXG_SIZE];
318 	uint32_t dn_next_blksz[TXG_SIZE];	/* next block size in bytes */
319 	uint64_t dn_next_maxblkid[TXG_SIZE];	/* next maxblkid in bytes */
320 
321 	/* protected by dn_dbufs_mtx; declared here to fill 32-bit hole */
322 	uint32_t dn_dbufs_count;	/* count of dn_dbufs */
323 
324 	/* protected by os_lock: */
325 	multilist_node_t dn_dirty_link[TXG_SIZE]; /* next on dataset's dirty */
326 
327 	/* protected by dn_mtx: */
328 	kmutex_t dn_mtx;
329 	list_t dn_dirty_records[TXG_SIZE];
330 	struct range_tree *dn_free_ranges[TXG_SIZE];
331 	uint64_t dn_allocated_txg;
332 	uint64_t dn_free_txg;
333 	uint64_t dn_assigned_txg;
334 	uint64_t dn_dirty_txg;			/* txg dnode was last dirtied */
335 	kcondvar_t dn_notxholds;
336 	kcondvar_t dn_nodnholds;
337 	enum dnode_dirtycontext dn_dirtyctx;
338 	const void *dn_dirtyctx_firstset;	/* dbg: contents meaningless */
339 
340 	/* protected by own devices */
341 	zfs_refcount_t dn_tx_holds;
342 	zfs_refcount_t dn_holds;
343 
344 	kmutex_t dn_dbufs_mtx;
345 	/*
346 	 * Descendent dbufs, ordered by dbuf_compare. Note that dn_dbufs
347 	 * can contain multiple dbufs of the same (level, blkid) when a
348 	 * dbuf is marked DB_EVICTING without being removed from
349 	 * dn_dbufs. To maintain the avl invariant that there cannot be
350 	 * duplicate entries, we order the dbufs by an arbitrary value -
351 	 * their address in memory. This means that dn_dbufs cannot be used to
352 	 * directly look up a dbuf. Instead, callers must use avl_walk, have
353 	 * a reference to the dbuf, or look up a non-existent node with
354 	 * db_state = DB_SEARCH (see dbuf_free_range for an example).
355 	 */
356 	avl_tree_t dn_dbufs;
357 
358 	/* protected by dn_struct_rwlock */
359 	struct dmu_buf_impl *dn_bonus;	/* bonus buffer dbuf */
360 
361 	boolean_t dn_have_spill;	/* have spill or are spilling */
362 
363 	/* parent IO for current sync write */
364 	zio_t *dn_zio;
365 
366 	/* used in syncing context */
367 	uint64_t dn_oldused;	/* old phys used bytes */
368 	uint64_t dn_oldflags;	/* old phys dn_flags */
369 	uint64_t dn_olduid, dn_oldgid, dn_oldprojid;
370 	uint64_t dn_newuid, dn_newgid, dn_newprojid;
371 	int dn_id_flags;
372 
373 	/* holds prefetch structure */
374 	struct zfetch	dn_zfetch;
375 };
376 
377 /*
378  * Since AVL already has embedded element counter, use dn_dbufs_count
379  * only for dbufs not counted there (bonus buffers) and just add them.
380  */
381 #define	DN_DBUFS_COUNT(dn)	((dn)->dn_dbufs_count + \
382     avl_numnodes(&(dn)->dn_dbufs))
383 
384 /*
385  * We use this (otherwise unused) bit to indicate if the value of
386  * dn_next_maxblkid[txgoff] is valid to use in dnode_sync().
387  */
388 #define	DMU_NEXT_MAXBLKID_SET		(1ULL << 63)
389 
390 /*
391  * Adds a level of indirection between the dbuf and the dnode to avoid
392  * iterating descendent dbufs in dnode_move(). Handles are not allocated
393  * individually, but as an array of child dnodes in dnode_hold_impl().
394  */
395 typedef struct dnode_handle {
396 	/* Protects dnh_dnode from modification by dnode_move(). */
397 	zrlock_t dnh_zrlock;
398 	dnode_t *dnh_dnode;
399 } dnode_handle_t;
400 
401 typedef struct dnode_children {
402 	dmu_buf_user_t dnc_dbu;		/* User evict data */
403 	size_t dnc_count;		/* number of children */
404 	dnode_handle_t dnc_children[];	/* sized dynamically */
405 } dnode_children_t;
406 
407 typedef struct free_range {
408 	avl_node_t fr_node;
409 	uint64_t fr_blkid;
410 	uint64_t fr_nblks;
411 } free_range_t;
412 
413 void dnode_special_open(struct objset *dd, dnode_phys_t *dnp,
414     uint64_t object, dnode_handle_t *dnh);
415 void dnode_special_close(dnode_handle_t *dnh);
416 
417 void dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx);
418 void dnode_setbonus_type(dnode_t *dn, dmu_object_type_t, dmu_tx_t *tx);
419 void dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx);
420 
421 int dnode_hold(struct objset *dd, uint64_t object,
422     const void *ref, dnode_t **dnp);
423 int dnode_hold_impl(struct objset *dd, uint64_t object, int flag, int dn_slots,
424     const void *ref, dnode_t **dnp);
425 boolean_t dnode_add_ref(dnode_t *dn, const void *ref);
426 void dnode_rele(dnode_t *dn, const void *ref);
427 void dnode_rele_and_unlock(dnode_t *dn, const void *tag, boolean_t evicting);
428 int dnode_try_claim(objset_t *os, uint64_t object, int slots);
429 boolean_t dnode_is_dirty(dnode_t *dn);
430 void dnode_setdirty(dnode_t *dn, dmu_tx_t *tx);
431 void dnode_set_dirtyctx(dnode_t *dn, dmu_tx_t *tx, const void *tag);
432 void dnode_sync(dnode_t *dn, dmu_tx_t *tx);
433 void dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
434     dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx);
435 void dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
436     dmu_object_type_t bonustype, int bonuslen, int dn_slots,
437     boolean_t keep_spill, dmu_tx_t *tx);
438 void dnode_free(dnode_t *dn, dmu_tx_t *tx);
439 void dnode_byteswap(dnode_phys_t *dnp);
440 void dnode_buf_byteswap(void *buf, size_t size);
441 void dnode_verify(dnode_t *dn);
442 int dnode_set_nlevels(dnode_t *dn, int nlevels, dmu_tx_t *tx);
443 int dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx);
444 void dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx);
445 void dnode_diduse_space(dnode_t *dn, int64_t space);
446 void dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx,
447     boolean_t have_read, boolean_t force);
448 uint64_t dnode_block_freed(dnode_t *dn, uint64_t blkid);
449 void dnode_init(void);
450 void dnode_fini(void);
451 int dnode_next_offset(dnode_t *dn, int flags, uint64_t *off,
452     int minlvl, uint64_t blkfill, uint64_t txg);
453 void dnode_evict_dbufs(dnode_t *dn);
454 void dnode_evict_bonus(dnode_t *dn);
455 void dnode_free_interior_slots(dnode_t *dn);
456 
457 #define	DNODE_IS_DIRTY(_dn)						\
458 	((_dn)->dn_dirty_txg >= spa_syncing_txg((_dn)->dn_objset->os_spa))
459 
460 #define	DNODE_LEVEL_IS_CACHEABLE(_dn, _level)				\
461 	((_dn)->dn_objset->os_primary_cache == ZFS_CACHE_ALL ||		\
462 	(((_level) > 0 || DMU_OT_IS_METADATA((_dn)->dn_type)) &&	\
463 	(_dn)->dn_objset->os_primary_cache == ZFS_CACHE_METADATA))
464 
465 /*
466  * Used for dnodestats kstat.
467  */
468 typedef struct dnode_stats {
469 	/*
470 	 * Number of failed attempts to hold a meta dnode dbuf.
471 	 */
472 	kstat_named_t dnode_hold_dbuf_hold;
473 	/*
474 	 * Number of failed attempts to read a meta dnode dbuf.
475 	 */
476 	kstat_named_t dnode_hold_dbuf_read;
477 	/*
478 	 * Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) was able
479 	 * to hold the requested object number which was allocated.  This is
480 	 * the common case when looking up any allocated object number.
481 	 */
482 	kstat_named_t dnode_hold_alloc_hits;
483 	/*
484 	 * Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) was not
485 	 * able to hold the request object number because it was not allocated.
486 	 */
487 	kstat_named_t dnode_hold_alloc_misses;
488 	/*
489 	 * Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) was not
490 	 * able to hold the request object number because the object number
491 	 * refers to an interior large dnode slot.
492 	 */
493 	kstat_named_t dnode_hold_alloc_interior;
494 	/*
495 	 * Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) needed
496 	 * to retry acquiring slot zrl locks due to contention.
497 	 */
498 	kstat_named_t dnode_hold_alloc_lock_retry;
499 	/*
500 	 * Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) did not
501 	 * need to create the dnode because another thread did so after
502 	 * dropping the read lock but before acquiring the write lock.
503 	 */
504 	kstat_named_t dnode_hold_alloc_lock_misses;
505 	/*
506 	 * Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) found
507 	 * a free dnode instantiated by dnode_create() but not yet allocated
508 	 * by dnode_allocate().
509 	 */
510 	kstat_named_t dnode_hold_alloc_type_none;
511 	/*
512 	 * Number of times dnode_hold(..., DNODE_MUST_BE_FREE) was able
513 	 * to hold the requested range of free dnode slots.
514 	 */
515 	kstat_named_t dnode_hold_free_hits;
516 	/*
517 	 * Number of times dnode_hold(..., DNODE_MUST_BE_FREE) was not
518 	 * able to hold the requested range of free dnode slots because
519 	 * at least one slot was allocated.
520 	 */
521 	kstat_named_t dnode_hold_free_misses;
522 	/*
523 	 * Number of times dnode_hold(..., DNODE_MUST_BE_FREE) was not
524 	 * able to hold the requested range of free dnode slots because
525 	 * after acquiring the zrl lock at least one slot was allocated.
526 	 */
527 	kstat_named_t dnode_hold_free_lock_misses;
528 	/*
529 	 * Number of times dnode_hold(..., DNODE_MUST_BE_FREE) needed
530 	 * to retry acquiring slot zrl locks due to contention.
531 	 */
532 	kstat_named_t dnode_hold_free_lock_retry;
533 	/*
534 	 * Number of times dnode_hold(..., DNODE_MUST_BE_FREE) requested
535 	 * a range of dnode slots which were held by another thread.
536 	 */
537 	kstat_named_t dnode_hold_free_refcount;
538 	/*
539 	 * Number of times dnode_hold(..., DNODE_MUST_BE_FREE) requested
540 	 * a range of dnode slots which would overflow the dnode_phys_t.
541 	 */
542 	kstat_named_t dnode_hold_free_overflow;
543 	/*
544 	 * Number of times dnode_free_interior_slots() needed to retry
545 	 * acquiring a slot zrl lock due to contention.
546 	 */
547 	kstat_named_t dnode_free_interior_lock_retry;
548 	/*
549 	 * Number of new dnodes allocated by dnode_allocate().
550 	 */
551 	kstat_named_t dnode_allocate;
552 	/*
553 	 * Number of dnodes re-allocated by dnode_reallocate().
554 	 */
555 	kstat_named_t dnode_reallocate;
556 	/*
557 	 * Number of meta dnode dbufs evicted.
558 	 */
559 	kstat_named_t dnode_buf_evict;
560 	/*
561 	 * Number of times dmu_object_alloc*() reached the end of the existing
562 	 * object ID chunk and advanced to a new one.
563 	 */
564 	kstat_named_t dnode_alloc_next_chunk;
565 	/*
566 	 * Number of times multiple threads attempted to allocate a dnode
567 	 * from the same block of free dnodes.
568 	 */
569 	kstat_named_t dnode_alloc_race;
570 	/*
571 	 * Number of times dmu_object_alloc*() was forced to advance to the
572 	 * next meta dnode dbuf due to an error from  dmu_object_next().
573 	 */
574 	kstat_named_t dnode_alloc_next_block;
575 	/*
576 	 * Statistics for tracking dnodes which have been moved.
577 	 */
578 	kstat_named_t dnode_move_invalid;
579 	kstat_named_t dnode_move_recheck1;
580 	kstat_named_t dnode_move_recheck2;
581 	kstat_named_t dnode_move_special;
582 	kstat_named_t dnode_move_handle;
583 	kstat_named_t dnode_move_rwlock;
584 	kstat_named_t dnode_move_active;
585 } dnode_stats_t;
586 
587 typedef struct dnode_sums {
588 	wmsum_t dnode_hold_dbuf_hold;
589 	wmsum_t dnode_hold_dbuf_read;
590 	wmsum_t dnode_hold_alloc_hits;
591 	wmsum_t dnode_hold_alloc_misses;
592 	wmsum_t dnode_hold_alloc_interior;
593 	wmsum_t dnode_hold_alloc_lock_retry;
594 	wmsum_t dnode_hold_alloc_lock_misses;
595 	wmsum_t dnode_hold_alloc_type_none;
596 	wmsum_t dnode_hold_free_hits;
597 	wmsum_t dnode_hold_free_misses;
598 	wmsum_t dnode_hold_free_lock_misses;
599 	wmsum_t dnode_hold_free_lock_retry;
600 	wmsum_t dnode_hold_free_refcount;
601 	wmsum_t dnode_hold_free_overflow;
602 	wmsum_t dnode_free_interior_lock_retry;
603 	wmsum_t dnode_allocate;
604 	wmsum_t dnode_reallocate;
605 	wmsum_t dnode_buf_evict;
606 	wmsum_t dnode_alloc_next_chunk;
607 	wmsum_t dnode_alloc_race;
608 	wmsum_t dnode_alloc_next_block;
609 	wmsum_t dnode_move_invalid;
610 	wmsum_t dnode_move_recheck1;
611 	wmsum_t dnode_move_recheck2;
612 	wmsum_t dnode_move_special;
613 	wmsum_t dnode_move_handle;
614 	wmsum_t dnode_move_rwlock;
615 	wmsum_t dnode_move_active;
616 } dnode_sums_t;
617 
618 extern dnode_stats_t dnode_stats;
619 extern dnode_sums_t dnode_sums;
620 
621 #define	DNODE_STAT_INCR(stat, val) \
622     wmsum_add(&dnode_sums.stat, (val))
623 #define	DNODE_STAT_BUMP(stat) \
624     DNODE_STAT_INCR(stat, 1);
625 
626 #ifdef ZFS_DEBUG
627 
628 #define	dprintf_dnode(dn, fmt, ...) do { \
629 	if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
630 	char __db_buf[32]; \
631 	uint64_t __db_obj = (dn)->dn_object; \
632 	if (__db_obj == DMU_META_DNODE_OBJECT) \
633 		(void) strlcpy(__db_buf, "mdn", sizeof (__db_buf));	\
634 	else \
635 		(void) snprintf(__db_buf, sizeof (__db_buf), "%lld", \
636 		    (u_longlong_t)__db_obj);\
637 	dprintf_ds((dn)->dn_objset->os_dsl_dataset, "obj=%s " fmt, \
638 	    __db_buf, __VA_ARGS__); \
639 	} \
640 } while (0)
641 
642 #define	DNODE_VERIFY(dn)		dnode_verify(dn)
643 #define	FREE_VERIFY(db, start, end, tx)	free_verify(db, start, end, tx)
644 
645 #else
646 
647 #define	dprintf_dnode(db, fmt, ...)
648 #define	DNODE_VERIFY(dn)		((void) sizeof ((uintptr_t)(dn)))
649 #define	FREE_VERIFY(db, start, end, tx)
650 
651 #endif
652 
653 #ifdef	__cplusplus
654 }
655 #endif
656 
657 #endif	/* _SYS_DNODE_H */
658