xref: /freebsd/sys/contrib/openzfs/include/sys/dbuf.h (revision 271171e0)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
24  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26  */
27 
28 #ifndef	_SYS_DBUF_H
29 #define	_SYS_DBUF_H
30 
31 #include <sys/dmu.h>
32 #include <sys/spa.h>
33 #include <sys/txg.h>
34 #include <sys/zio.h>
35 #include <sys/arc.h>
36 #include <sys/zfs_context.h>
37 #include <sys/zfs_refcount.h>
38 #include <sys/zrlock.h>
39 #include <sys/multilist.h>
40 
41 #ifdef	__cplusplus
42 extern "C" {
43 #endif
44 
45 #define	IN_DMU_SYNC 2
46 
47 /*
48  * define flags for dbuf_read
49  */
50 
51 #define	DB_RF_MUST_SUCCEED	(1 << 0)
52 #define	DB_RF_CANFAIL		(1 << 1)
53 #define	DB_RF_HAVESTRUCT	(1 << 2)
54 #define	DB_RF_NOPREFETCH	(1 << 3)
55 #define	DB_RF_NEVERWAIT		(1 << 4)
56 #define	DB_RF_CACHED		(1 << 5)
57 #define	DB_RF_NO_DECRYPT	(1 << 6)
58 
59 /*
60  * The simplified state transition diagram for dbufs looks like:
61  *
62  *		+----> READ ----+
63  *		|		|
64  *		|		V
65  *  (alloc)-->UNCACHED	     CACHED-->EVICTING-->(free)
66  *		|		^	 ^
67  *		|		|	 |
68  *		+----> FILL ----+	 |
69  *		|			 |
70  *		|			 |
71  *		+--------> NOFILL -------+
72  *
73  * DB_SEARCH is an invalid state for a dbuf. It is used by dbuf_free_range
74  * to find all dbufs in a range of a dnode and must be less than any other
75  * dbuf_states_t (see comment on dn_dbufs in dnode.h).
76  */
77 typedef enum dbuf_states {
78 	DB_SEARCH = -1,
79 	DB_UNCACHED,
80 	DB_FILL,
81 	DB_NOFILL,
82 	DB_READ,
83 	DB_CACHED,
84 	DB_EVICTING
85 } dbuf_states_t;
86 
87 typedef enum dbuf_cached_state {
88 	DB_NO_CACHE = -1,
89 	DB_DBUF_CACHE,
90 	DB_DBUF_METADATA_CACHE,
91 	DB_CACHE_MAX
92 } dbuf_cached_state_t;
93 
94 struct dnode;
95 struct dmu_tx;
96 
97 /*
98  * level = 0 means the user data
99  * level = 1 means the single indirect block
100  * etc.
101  */
102 
103 struct dmu_buf_impl;
104 
105 typedef enum override_states {
106 	DR_NOT_OVERRIDDEN,
107 	DR_IN_DMU_SYNC,
108 	DR_OVERRIDDEN
109 } override_states_t;
110 
111 typedef enum db_lock_type {
112 	DLT_NONE,
113 	DLT_PARENT,
114 	DLT_OBJSET
115 } db_lock_type_t;
116 
117 typedef struct dbuf_dirty_record {
118 	/* link on our parents dirty list */
119 	list_node_t dr_dirty_node;
120 
121 	/* transaction group this data will sync in */
122 	uint64_t dr_txg;
123 
124 	/* zio of outstanding write IO */
125 	zio_t *dr_zio;
126 
127 	/* pointer back to our dbuf */
128 	struct dmu_buf_impl *dr_dbuf;
129 
130 	/* list link for dbuf dirty records */
131 	list_node_t dr_dbuf_node;
132 
133 	/*
134 	 * The dnode we are part of.  Note that the dnode can not be moved or
135 	 * evicted due to the hold that's added by dnode_setdirty() or
136 	 * dmu_objset_sync_dnodes(), and released by dnode_rele_task() or
137 	 * userquota_updates_task().  This hold is necessary for
138 	 * dirty_lightweight_leaf-type dirty records, which don't have a hold
139 	 * on a dbuf.
140 	 */
141 	dnode_t *dr_dnode;
142 
143 	/* pointer to parent dirty record */
144 	struct dbuf_dirty_record *dr_parent;
145 
146 	/* How much space was changed to dsl_pool_dirty_space() for this? */
147 	unsigned int dr_accounted;
148 
149 	/* A copy of the bp that points to us */
150 	blkptr_t dr_bp_copy;
151 
152 	union dirty_types {
153 		struct dirty_indirect {
154 
155 			/* protect access to list */
156 			kmutex_t dr_mtx;
157 
158 			/* Our list of dirty children */
159 			list_t dr_children;
160 		} di;
161 		struct dirty_leaf {
162 
163 			/*
164 			 * dr_data is set when we dirty the buffer
165 			 * so that we can retain the pointer even if it
166 			 * gets COW'd in a subsequent transaction group.
167 			 */
168 			arc_buf_t *dr_data;
169 			blkptr_t dr_overridden_by;
170 			override_states_t dr_override_state;
171 			uint8_t dr_copies;
172 			boolean_t dr_nopwrite;
173 			boolean_t dr_has_raw_params;
174 
175 			/*
176 			 * If dr_has_raw_params is set, the following crypt
177 			 * params will be set on the BP that's written.
178 			 */
179 			boolean_t dr_byteorder;
180 			uint8_t	dr_salt[ZIO_DATA_SALT_LEN];
181 			uint8_t	dr_iv[ZIO_DATA_IV_LEN];
182 			uint8_t	dr_mac[ZIO_DATA_MAC_LEN];
183 		} dl;
184 		struct dirty_lightweight_leaf {
185 			/*
186 			 * This dirty record refers to a leaf (level=0)
187 			 * block, whose dbuf has not been instantiated for
188 			 * performance reasons.
189 			 */
190 			uint64_t dr_blkid;
191 			abd_t *dr_abd;
192 			zio_prop_t dr_props;
193 			enum zio_flag dr_flags;
194 		} dll;
195 	} dt;
196 } dbuf_dirty_record_t;
197 
198 typedef struct dmu_buf_impl {
199 	/*
200 	 * The following members are immutable, with the exception of
201 	 * db.db_data, which is protected by db_mtx.
202 	 */
203 
204 	/* the publicly visible structure */
205 	dmu_buf_t db;
206 
207 	/* the objset we belong to */
208 	struct objset *db_objset;
209 
210 	/*
211 	 * handle to safely access the dnode we belong to (NULL when evicted)
212 	 */
213 	struct dnode_handle *db_dnode_handle;
214 
215 	/*
216 	 * our parent buffer; if the dnode points to us directly,
217 	 * db_parent == db_dnode_handle->dnh_dnode->dn_dbuf
218 	 * only accessed by sync thread ???
219 	 * (NULL when evicted)
220 	 * May change from NULL to non-NULL under the protection of db_mtx
221 	 * (see dbuf_check_blkptr())
222 	 */
223 	struct dmu_buf_impl *db_parent;
224 
225 	/*
226 	 * link for hash table of all dmu_buf_impl_t's
227 	 */
228 	struct dmu_buf_impl *db_hash_next;
229 
230 	/*
231 	 * Our link on the owner dnodes's dn_dbufs list.
232 	 * Protected by its dn_dbufs_mtx.  Should be on the same cache line
233 	 * as db_level and db_blkid for the best avl_add() performance.
234 	 */
235 	avl_node_t db_link;
236 
237 	/* our block number */
238 	uint64_t db_blkid;
239 
240 	/*
241 	 * Pointer to the blkptr_t which points to us. May be NULL if we
242 	 * don't have one yet. (NULL when evicted)
243 	 */
244 	blkptr_t *db_blkptr;
245 
246 	/*
247 	 * Our indirection level.  Data buffers have db_level==0.
248 	 * Indirect buffers which point to data buffers have
249 	 * db_level==1. etc.  Buffers which contain dnodes have
250 	 * db_level==0, since the dnodes are stored in a file.
251 	 */
252 	uint8_t db_level;
253 
254 	/*
255 	 * Protects db_buf's contents if they contain an indirect block or data
256 	 * block of the meta-dnode. We use this lock to protect the structure of
257 	 * the block tree. This means that when modifying this dbuf's data, we
258 	 * grab its rwlock. When modifying its parent's data (including the
259 	 * blkptr to this dbuf), we grab the parent's rwlock. The lock ordering
260 	 * for this lock is:
261 	 * 1) dn_struct_rwlock
262 	 * 2) db_rwlock
263 	 * We don't currently grab multiple dbufs' db_rwlocks at once.
264 	 */
265 	krwlock_t db_rwlock;
266 
267 	/* buffer holding our data */
268 	arc_buf_t *db_buf;
269 
270 	/* db_mtx protects the members below */
271 	kmutex_t db_mtx;
272 
273 	/*
274 	 * Current state of the buffer
275 	 */
276 	dbuf_states_t db_state;
277 
278 	/*
279 	 * Refcount accessed by dmu_buf_{hold,rele}.
280 	 * If nonzero, the buffer can't be destroyed.
281 	 * Protected by db_mtx.
282 	 */
283 	zfs_refcount_t db_holds;
284 
285 	kcondvar_t db_changed;
286 	dbuf_dirty_record_t *db_data_pending;
287 
288 	/* List of dirty records for the buffer sorted newest to oldest. */
289 	list_t db_dirty_records;
290 
291 	/* Link in dbuf_cache or dbuf_metadata_cache */
292 	multilist_node_t db_cache_link;
293 
294 	/* Tells us which dbuf cache this dbuf is in, if any */
295 	dbuf_cached_state_t db_caching_status;
296 
297 	/* Data which is unique to data (leaf) blocks: */
298 
299 	/* User callback information. */
300 	dmu_buf_user_t *db_user;
301 
302 	/*
303 	 * Evict user data as soon as the dirty and reference
304 	 * counts are equal.
305 	 */
306 	uint8_t db_user_immediate_evict;
307 
308 	/*
309 	 * This block was freed while a read or write was
310 	 * active.
311 	 */
312 	uint8_t db_freed_in_flight;
313 
314 	/*
315 	 * dnode_evict_dbufs() or dnode_evict_bonus() tried to
316 	 * evict this dbuf, but couldn't due to outstanding
317 	 * references.  Evict once the refcount drops to 0.
318 	 */
319 	uint8_t db_pending_evict;
320 
321 	uint8_t db_dirtycnt;
322 } dmu_buf_impl_t;
323 
324 #define	DBUF_RWLOCKS 8192
325 #define	DBUF_HASH_RWLOCK(h, idx) (&(h)->hash_rwlocks[(idx) & (DBUF_RWLOCKS-1)])
326 typedef struct dbuf_hash_table {
327 	uint64_t hash_table_mask;
328 	dmu_buf_impl_t **hash_table;
329 	krwlock_t hash_rwlocks[DBUF_RWLOCKS] ____cacheline_aligned;
330 } dbuf_hash_table_t;
331 
332 typedef void (*dbuf_prefetch_fn)(void *, uint64_t, uint64_t, boolean_t);
333 
334 uint64_t dbuf_whichblock(const struct dnode *di, const int64_t level,
335     const uint64_t offset);
336 
337 void dbuf_create_bonus(struct dnode *dn);
338 int dbuf_spill_set_blksz(dmu_buf_t *db, uint64_t blksz, dmu_tx_t *tx);
339 
340 void dbuf_rm_spill(struct dnode *dn, dmu_tx_t *tx);
341 
342 dmu_buf_impl_t *dbuf_hold(struct dnode *dn, uint64_t blkid, const void *tag);
343 dmu_buf_impl_t *dbuf_hold_level(struct dnode *dn, int level, uint64_t blkid,
344     const void *tag);
345 int dbuf_hold_impl(struct dnode *dn, uint8_t level, uint64_t blkid,
346     boolean_t fail_sparse, boolean_t fail_uncached,
347     const void *tag, dmu_buf_impl_t **dbp);
348 
349 int dbuf_prefetch_impl(struct dnode *dn, int64_t level, uint64_t blkid,
350     zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
351     void *arg);
352 int dbuf_prefetch(struct dnode *dn, int64_t level, uint64_t blkid,
353     zio_priority_t prio, arc_flags_t aflags);
354 
355 void dbuf_add_ref(dmu_buf_impl_t *db, const void *tag);
356 boolean_t dbuf_try_add_ref(dmu_buf_t *db, objset_t *os, uint64_t obj,
357     uint64_t blkid, const void *tag);
358 uint64_t dbuf_refcount(dmu_buf_impl_t *db);
359 
360 void dbuf_rele(dmu_buf_impl_t *db, const void *tag);
361 void dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag,
362     boolean_t evicting);
363 
364 dmu_buf_impl_t *dbuf_find(struct objset *os, uint64_t object, uint8_t level,
365     uint64_t blkid);
366 
367 int dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags);
368 void dmu_buf_will_not_fill(dmu_buf_t *db, dmu_tx_t *tx);
369 void dmu_buf_will_fill(dmu_buf_t *db, dmu_tx_t *tx);
370 void dmu_buf_fill_done(dmu_buf_t *db, dmu_tx_t *tx);
371 void dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx);
372 dbuf_dirty_record_t *dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
373 dbuf_dirty_record_t *dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid,
374     dmu_tx_t *tx);
375 arc_buf_t *dbuf_loan_arcbuf(dmu_buf_impl_t *db);
376 void dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
377     bp_embedded_type_t etype, enum zio_compress comp,
378     int uncompressed_size, int compressed_size, int byteorder, dmu_tx_t *tx);
379 
380 int dmu_lightweight_write_by_dnode(dnode_t *dn, uint64_t offset, abd_t *abd,
381     const struct zio_prop *zp, enum zio_flag flags, dmu_tx_t *tx);
382 
383 void dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx);
384 void dbuf_destroy(dmu_buf_impl_t *db);
385 
386 void dbuf_unoverride(dbuf_dirty_record_t *dr);
387 void dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx);
388 void dbuf_release_bp(dmu_buf_impl_t *db);
389 db_lock_type_t dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw,
390     const void *tag);
391 void dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type,
392     const void *tag);
393 
394 void dbuf_free_range(struct dnode *dn, uint64_t start, uint64_t end,
395     struct dmu_tx *);
396 
397 void dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx);
398 
399 void dbuf_stats_init(dbuf_hash_table_t *hash);
400 void dbuf_stats_destroy(void);
401 
402 int dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
403     blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift);
404 
405 #define	DB_DNODE(_db)		((_db)->db_dnode_handle->dnh_dnode)
406 #define	DB_DNODE_LOCK(_db)	((_db)->db_dnode_handle->dnh_zrlock)
407 #define	DB_DNODE_ENTER(_db)	(zrl_add(&DB_DNODE_LOCK(_db)))
408 #define	DB_DNODE_EXIT(_db)	(zrl_remove(&DB_DNODE_LOCK(_db)))
409 #define	DB_DNODE_HELD(_db)	(!zrl_is_zero(&DB_DNODE_LOCK(_db)))
410 
411 void dbuf_init(void);
412 void dbuf_fini(void);
413 
414 boolean_t dbuf_is_metadata(dmu_buf_impl_t *db);
415 
416 static inline dbuf_dirty_record_t *
417 dbuf_find_dirty_lte(dmu_buf_impl_t *db, uint64_t txg)
418 {
419 	dbuf_dirty_record_t *dr;
420 
421 	for (dr = list_head(&db->db_dirty_records);
422 	    dr != NULL && dr->dr_txg > txg;
423 	    dr = list_next(&db->db_dirty_records, dr))
424 		continue;
425 	return (dr);
426 }
427 
428 static inline dbuf_dirty_record_t *
429 dbuf_find_dirty_eq(dmu_buf_impl_t *db, uint64_t txg)
430 {
431 	dbuf_dirty_record_t *dr;
432 
433 	dr = dbuf_find_dirty_lte(db, txg);
434 	if (dr && dr->dr_txg == txg)
435 		return (dr);
436 	return (NULL);
437 }
438 
439 #define	DBUF_GET_BUFC_TYPE(_db)	\
440 	(dbuf_is_metadata(_db) ? ARC_BUFC_METADATA : ARC_BUFC_DATA)
441 
442 #define	DBUF_IS_CACHEABLE(_db)						\
443 	((_db)->db_objset->os_primary_cache == ZFS_CACHE_ALL ||		\
444 	(dbuf_is_metadata(_db) &&					\
445 	((_db)->db_objset->os_primary_cache == ZFS_CACHE_METADATA)))
446 
447 boolean_t dbuf_is_l2cacheable(dmu_buf_impl_t *db);
448 
449 #ifdef ZFS_DEBUG
450 
451 /*
452  * There should be a ## between the string literal and fmt, to make it
453  * clear that we're joining two strings together, but gcc does not
454  * support that preprocessor token.
455  */
456 #define	dprintf_dbuf(dbuf, fmt, ...) do { \
457 	if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
458 	char __db_buf[32]; \
459 	uint64_t __db_obj = (dbuf)->db.db_object; \
460 	if (__db_obj == DMU_META_DNODE_OBJECT) \
461 		(void) strlcpy(__db_buf, "mdn", sizeof (__db_buf));	\
462 	else \
463 		(void) snprintf(__db_buf, sizeof (__db_buf), "%lld", \
464 		    (u_longlong_t)__db_obj); \
465 	dprintf_ds((dbuf)->db_objset->os_dsl_dataset, \
466 	    "obj=%s lvl=%u blkid=%lld " fmt, \
467 	    __db_buf, (dbuf)->db_level, \
468 	    (u_longlong_t)(dbuf)->db_blkid, __VA_ARGS__); \
469 	} \
470 } while (0)
471 
472 #define	dprintf_dbuf_bp(db, bp, fmt, ...) do {			\
473 	if (zfs_flags & ZFS_DEBUG_DPRINTF) {			\
474 	char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_SLEEP);	\
475 	snprintf_blkptr(__blkbuf, BP_SPRINTF_LEN, bp);		\
476 	dprintf_dbuf(db, fmt " %s\n", __VA_ARGS__, __blkbuf);	\
477 	kmem_free(__blkbuf, BP_SPRINTF_LEN);			\
478 	}							\
479 } while (0)
480 
481 #define	DBUF_VERIFY(db)	dbuf_verify(db)
482 
483 #else
484 
485 #define	dprintf_dbuf(db, fmt, ...)
486 #define	dprintf_dbuf_bp(db, bp, fmt, ...)
487 #define	DBUF_VERIFY(db)
488 
489 #endif
490 
491 
492 #ifdef	__cplusplus
493 }
494 #endif
495 
496 #endif /* _SYS_DBUF_H */
497