xref: /freebsd/sys/contrib/openzfs/include/sys/dbuf.h (revision 2a58b312)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
24  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26  */
27 
28 #ifndef	_SYS_DBUF_H
29 #define	_SYS_DBUF_H
30 
31 #include <sys/dmu.h>
32 #include <sys/spa.h>
33 #include <sys/txg.h>
34 #include <sys/zio.h>
35 #include <sys/arc.h>
36 #include <sys/zfs_context.h>
37 #include <sys/zfs_refcount.h>
38 #include <sys/zrlock.h>
39 #include <sys/multilist.h>
40 
41 #ifdef	__cplusplus
42 extern "C" {
43 #endif
44 
45 #define	IN_DMU_SYNC 2
46 
47 /*
48  * define flags for dbuf_read
49  */
50 
51 #define	DB_RF_MUST_SUCCEED	(1 << 0)
52 #define	DB_RF_CANFAIL		(1 << 1)
53 #define	DB_RF_HAVESTRUCT	(1 << 2)
54 #define	DB_RF_NOPREFETCH	(1 << 3)
55 #define	DB_RF_NEVERWAIT		(1 << 4)
56 #define	DB_RF_CACHED		(1 << 5)
57 #define	DB_RF_NO_DECRYPT	(1 << 6)
58 #define	DB_RF_PARTIAL_FIRST	(1 << 7)
59 #define	DB_RF_PARTIAL_MORE	(1 << 8)
60 
61 /*
62  * The simplified state transition diagram for dbufs looks like:
63  *
64  *		+----> READ ----+
65  *		|		|
66  *		|		V
67  *  (alloc)-->UNCACHED	     CACHED-->EVICTING-->(free)
68  *		|		^	 ^
69  *		|		|	 |
70  *		+----> FILL ----+	 |
71  *		|			 |
72  *		|			 |
73  *		+--------> NOFILL -------+
74  *
75  * DB_SEARCH is an invalid state for a dbuf. It is used by dbuf_free_range
76  * to find all dbufs in a range of a dnode and must be less than any other
77  * dbuf_states_t (see comment on dn_dbufs in dnode.h).
78  */
79 typedef enum dbuf_states {
80 	DB_SEARCH = -1,
81 	DB_UNCACHED,
82 	DB_FILL,
83 	DB_NOFILL,
84 	DB_READ,
85 	DB_CACHED,
86 	DB_EVICTING
87 } dbuf_states_t;
88 
89 typedef enum dbuf_cached_state {
90 	DB_NO_CACHE = -1,
91 	DB_DBUF_CACHE,
92 	DB_DBUF_METADATA_CACHE,
93 	DB_CACHE_MAX
94 } dbuf_cached_state_t;
95 
96 struct dnode;
97 struct dmu_tx;
98 
99 /*
100  * level = 0 means the user data
101  * level = 1 means the single indirect block
102  * etc.
103  */
104 
105 struct dmu_buf_impl;
106 
107 typedef enum override_states {
108 	DR_NOT_OVERRIDDEN,
109 	DR_IN_DMU_SYNC,
110 	DR_OVERRIDDEN
111 } override_states_t;
112 
113 typedef enum db_lock_type {
114 	DLT_NONE,
115 	DLT_PARENT,
116 	DLT_OBJSET
117 } db_lock_type_t;
118 
119 typedef struct dbuf_dirty_record {
120 	/* link on our parents dirty list */
121 	list_node_t dr_dirty_node;
122 
123 	/* transaction group this data will sync in */
124 	uint64_t dr_txg;
125 
126 	/* zio of outstanding write IO */
127 	zio_t *dr_zio;
128 
129 	/* pointer back to our dbuf */
130 	struct dmu_buf_impl *dr_dbuf;
131 
132 	/* list link for dbuf dirty records */
133 	list_node_t dr_dbuf_node;
134 
135 	/*
136 	 * The dnode we are part of.  Note that the dnode can not be moved or
137 	 * evicted due to the hold that's added by dnode_setdirty() or
138 	 * dmu_objset_sync_dnodes(), and released by dnode_rele_task() or
139 	 * userquota_updates_task().  This hold is necessary for
140 	 * dirty_lightweight_leaf-type dirty records, which don't have a hold
141 	 * on a dbuf.
142 	 */
143 	dnode_t *dr_dnode;
144 
145 	/* pointer to parent dirty record */
146 	struct dbuf_dirty_record *dr_parent;
147 
148 	/* How much space was changed to dsl_pool_dirty_space() for this? */
149 	unsigned int dr_accounted;
150 
151 	/* A copy of the bp that points to us */
152 	blkptr_t dr_bp_copy;
153 
154 	union dirty_types {
155 		struct dirty_indirect {
156 
157 			/* protect access to list */
158 			kmutex_t dr_mtx;
159 
160 			/* Our list of dirty children */
161 			list_t dr_children;
162 		} di;
163 		struct dirty_leaf {
164 
165 			/*
166 			 * dr_data is set when we dirty the buffer
167 			 * so that we can retain the pointer even if it
168 			 * gets COW'd in a subsequent transaction group.
169 			 */
170 			arc_buf_t *dr_data;
171 			blkptr_t dr_overridden_by;
172 			override_states_t dr_override_state;
173 			uint8_t dr_copies;
174 			boolean_t dr_nopwrite;
175 			boolean_t dr_brtwrite;
176 			boolean_t dr_has_raw_params;
177 
178 			/*
179 			 * If dr_has_raw_params is set, the following crypt
180 			 * params will be set on the BP that's written.
181 			 */
182 			boolean_t dr_byteorder;
183 			uint8_t	dr_salt[ZIO_DATA_SALT_LEN];
184 			uint8_t	dr_iv[ZIO_DATA_IV_LEN];
185 			uint8_t	dr_mac[ZIO_DATA_MAC_LEN];
186 		} dl;
187 		struct dirty_lightweight_leaf {
188 			/*
189 			 * This dirty record refers to a leaf (level=0)
190 			 * block, whose dbuf has not been instantiated for
191 			 * performance reasons.
192 			 */
193 			uint64_t dr_blkid;
194 			abd_t *dr_abd;
195 			zio_prop_t dr_props;
196 			zio_flag_t dr_flags;
197 		} dll;
198 	} dt;
199 } dbuf_dirty_record_t;
200 
201 typedef struct dmu_buf_impl {
202 	/*
203 	 * The following members are immutable, with the exception of
204 	 * db.db_data, which is protected by db_mtx.
205 	 */
206 
207 	/* the publicly visible structure */
208 	dmu_buf_t db;
209 
210 	/* the objset we belong to */
211 	struct objset *db_objset;
212 
213 	/*
214 	 * handle to safely access the dnode we belong to (NULL when evicted)
215 	 */
216 	struct dnode_handle *db_dnode_handle;
217 
218 	/*
219 	 * our parent buffer; if the dnode points to us directly,
220 	 * db_parent == db_dnode_handle->dnh_dnode->dn_dbuf
221 	 * only accessed by sync thread ???
222 	 * (NULL when evicted)
223 	 * May change from NULL to non-NULL under the protection of db_mtx
224 	 * (see dbuf_check_blkptr())
225 	 */
226 	struct dmu_buf_impl *db_parent;
227 
228 	/*
229 	 * link for hash table of all dmu_buf_impl_t's
230 	 */
231 	struct dmu_buf_impl *db_hash_next;
232 
233 	/*
234 	 * Our link on the owner dnodes's dn_dbufs list.
235 	 * Protected by its dn_dbufs_mtx.  Should be on the same cache line
236 	 * as db_level and db_blkid for the best avl_add() performance.
237 	 */
238 	avl_node_t db_link;
239 
240 	/* our block number */
241 	uint64_t db_blkid;
242 
243 	/*
244 	 * Pointer to the blkptr_t which points to us. May be NULL if we
245 	 * don't have one yet. (NULL when evicted)
246 	 */
247 	blkptr_t *db_blkptr;
248 
249 	/*
250 	 * Our indirection level.  Data buffers have db_level==0.
251 	 * Indirect buffers which point to data buffers have
252 	 * db_level==1. etc.  Buffers which contain dnodes have
253 	 * db_level==0, since the dnodes are stored in a file.
254 	 */
255 	uint8_t db_level;
256 
257 	/*
258 	 * Protects db_buf's contents if they contain an indirect block or data
259 	 * block of the meta-dnode. We use this lock to protect the structure of
260 	 * the block tree. This means that when modifying this dbuf's data, we
261 	 * grab its rwlock. When modifying its parent's data (including the
262 	 * blkptr to this dbuf), we grab the parent's rwlock. The lock ordering
263 	 * for this lock is:
264 	 * 1) dn_struct_rwlock
265 	 * 2) db_rwlock
266 	 * We don't currently grab multiple dbufs' db_rwlocks at once.
267 	 */
268 	krwlock_t db_rwlock;
269 
270 	/* buffer holding our data */
271 	arc_buf_t *db_buf;
272 
273 	/* db_mtx protects the members below */
274 	kmutex_t db_mtx;
275 
276 	/*
277 	 * Current state of the buffer
278 	 */
279 	dbuf_states_t db_state;
280 
281 	/*
282 	 * Refcount accessed by dmu_buf_{hold,rele}.
283 	 * If nonzero, the buffer can't be destroyed.
284 	 * Protected by db_mtx.
285 	 */
286 	zfs_refcount_t db_holds;
287 
288 	kcondvar_t db_changed;
289 	dbuf_dirty_record_t *db_data_pending;
290 
291 	/* List of dirty records for the buffer sorted newest to oldest. */
292 	list_t db_dirty_records;
293 
294 	/* Link in dbuf_cache or dbuf_metadata_cache */
295 	multilist_node_t db_cache_link;
296 
297 	/* Tells us which dbuf cache this dbuf is in, if any */
298 	dbuf_cached_state_t db_caching_status;
299 
300 	uint64_t db_hash;
301 
302 	/* Data which is unique to data (leaf) blocks: */
303 
304 	/* User callback information. */
305 	dmu_buf_user_t *db_user;
306 
307 	/*
308 	 * Evict user data as soon as the dirty and reference
309 	 * counts are equal.
310 	 */
311 	uint8_t db_user_immediate_evict;
312 
313 	/*
314 	 * This block was freed while a read or write was
315 	 * active.
316 	 */
317 	uint8_t db_freed_in_flight;
318 
319 	/*
320 	 * dnode_evict_dbufs() or dnode_evict_bonus() tried to
321 	 * evict this dbuf, but couldn't due to outstanding
322 	 * references.  Evict once the refcount drops to 0.
323 	 */
324 	uint8_t db_pending_evict;
325 
326 	uint8_t db_dirtycnt;
327 
328 	/* The buffer was partially read.  More reads may follow. */
329 	uint8_t db_partial_read;
330 } dmu_buf_impl_t;
331 
332 #define	DBUF_HASH_MUTEX(h, idx) \
333 	(&(h)->hash_mutexes[(idx) & ((h)->hash_mutex_mask)])
334 
335 typedef struct dbuf_hash_table {
336 	uint64_t hash_table_mask;
337 	uint64_t hash_mutex_mask;
338 	dmu_buf_impl_t **hash_table;
339 	kmutex_t *hash_mutexes;
340 } dbuf_hash_table_t;
341 
342 typedef void (*dbuf_prefetch_fn)(void *, uint64_t, uint64_t, boolean_t);
343 
344 uint64_t dbuf_whichblock(const struct dnode *di, const int64_t level,
345     const uint64_t offset);
346 
347 void dbuf_create_bonus(struct dnode *dn);
348 int dbuf_spill_set_blksz(dmu_buf_t *db, uint64_t blksz, dmu_tx_t *tx);
349 
350 void dbuf_rm_spill(struct dnode *dn, dmu_tx_t *tx);
351 
352 dmu_buf_impl_t *dbuf_hold(struct dnode *dn, uint64_t blkid, const void *tag);
353 dmu_buf_impl_t *dbuf_hold_level(struct dnode *dn, int level, uint64_t blkid,
354     const void *tag);
355 int dbuf_hold_impl(struct dnode *dn, uint8_t level, uint64_t blkid,
356     boolean_t fail_sparse, boolean_t fail_uncached,
357     const void *tag, dmu_buf_impl_t **dbp);
358 
359 int dbuf_prefetch_impl(struct dnode *dn, int64_t level, uint64_t blkid,
360     zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
361     void *arg);
362 int dbuf_prefetch(struct dnode *dn, int64_t level, uint64_t blkid,
363     zio_priority_t prio, arc_flags_t aflags);
364 
365 void dbuf_add_ref(dmu_buf_impl_t *db, const void *tag);
366 boolean_t dbuf_try_add_ref(dmu_buf_t *db, objset_t *os, uint64_t obj,
367     uint64_t blkid, const void *tag);
368 uint64_t dbuf_refcount(dmu_buf_impl_t *db);
369 
370 void dbuf_rele(dmu_buf_impl_t *db, const void *tag);
371 void dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag,
372     boolean_t evicting);
373 
374 dmu_buf_impl_t *dbuf_find(struct objset *os, uint64_t object, uint8_t level,
375     uint64_t blkid, uint64_t *hash_out);
376 
377 int dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags);
378 void dmu_buf_will_not_fill(dmu_buf_t *db, dmu_tx_t *tx);
379 void dmu_buf_will_fill(dmu_buf_t *db, dmu_tx_t *tx);
380 void dmu_buf_fill_done(dmu_buf_t *db, dmu_tx_t *tx);
381 void dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx);
382 dbuf_dirty_record_t *dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
383 dbuf_dirty_record_t *dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid,
384     dmu_tx_t *tx);
385 boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
386 arc_buf_t *dbuf_loan_arcbuf(dmu_buf_impl_t *db);
387 void dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
388     bp_embedded_type_t etype, enum zio_compress comp,
389     int uncompressed_size, int compressed_size, int byteorder, dmu_tx_t *tx);
390 
391 int dmu_lightweight_write_by_dnode(dnode_t *dn, uint64_t offset, abd_t *abd,
392     const struct zio_prop *zp, zio_flag_t flags, dmu_tx_t *tx);
393 
394 void dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx);
395 void dbuf_destroy(dmu_buf_impl_t *db);
396 
397 void dbuf_unoverride(dbuf_dirty_record_t *dr);
398 void dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx);
399 void dbuf_release_bp(dmu_buf_impl_t *db);
400 db_lock_type_t dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw,
401     const void *tag);
402 void dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type,
403     const void *tag);
404 
405 void dbuf_free_range(struct dnode *dn, uint64_t start, uint64_t end,
406     struct dmu_tx *);
407 
408 void dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx);
409 
410 void dbuf_stats_init(dbuf_hash_table_t *hash);
411 void dbuf_stats_destroy(void);
412 
413 int dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
414     blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift);
415 
416 #define	DB_DNODE(_db)		((_db)->db_dnode_handle->dnh_dnode)
417 #define	DB_DNODE_LOCK(_db)	((_db)->db_dnode_handle->dnh_zrlock)
418 #define	DB_DNODE_ENTER(_db)	(zrl_add(&DB_DNODE_LOCK(_db)))
419 #define	DB_DNODE_EXIT(_db)	(zrl_remove(&DB_DNODE_LOCK(_db)))
420 #define	DB_DNODE_HELD(_db)	(!zrl_is_zero(&DB_DNODE_LOCK(_db)))
421 
422 void dbuf_init(void);
423 void dbuf_fini(void);
424 
425 boolean_t dbuf_is_metadata(dmu_buf_impl_t *db);
426 
427 static inline dbuf_dirty_record_t *
428 dbuf_find_dirty_lte(dmu_buf_impl_t *db, uint64_t txg)
429 {
430 	dbuf_dirty_record_t *dr;
431 
432 	for (dr = list_head(&db->db_dirty_records);
433 	    dr != NULL && dr->dr_txg > txg;
434 	    dr = list_next(&db->db_dirty_records, dr))
435 		continue;
436 	return (dr);
437 }
438 
439 static inline dbuf_dirty_record_t *
440 dbuf_find_dirty_eq(dmu_buf_impl_t *db, uint64_t txg)
441 {
442 	dbuf_dirty_record_t *dr;
443 
444 	dr = dbuf_find_dirty_lte(db, txg);
445 	if (dr && dr->dr_txg == txg)
446 		return (dr);
447 	return (NULL);
448 }
449 
450 #define	DBUF_GET_BUFC_TYPE(_db)	\
451 	(dbuf_is_metadata(_db) ? ARC_BUFC_METADATA : ARC_BUFC_DATA)
452 
453 #define	DBUF_IS_CACHEABLE(_db)						\
454 	((_db)->db_objset->os_primary_cache == ZFS_CACHE_ALL ||		\
455 	(dbuf_is_metadata(_db) &&					\
456 	((_db)->db_objset->os_primary_cache == ZFS_CACHE_METADATA)))
457 
458 boolean_t dbuf_is_l2cacheable(dmu_buf_impl_t *db);
459 
460 #ifdef ZFS_DEBUG
461 
462 /*
463  * There should be a ## between the string literal and fmt, to make it
464  * clear that we're joining two strings together, but gcc does not
465  * support that preprocessor token.
466  */
467 #define	dprintf_dbuf(dbuf, fmt, ...) do { \
468 	if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
469 	char __db_buf[32]; \
470 	uint64_t __db_obj = (dbuf)->db.db_object; \
471 	if (__db_obj == DMU_META_DNODE_OBJECT) \
472 		(void) strlcpy(__db_buf, "mdn", sizeof (__db_buf));	\
473 	else \
474 		(void) snprintf(__db_buf, sizeof (__db_buf), "%lld", \
475 		    (u_longlong_t)__db_obj); \
476 	dprintf_ds((dbuf)->db_objset->os_dsl_dataset, \
477 	    "obj=%s lvl=%u blkid=%lld " fmt, \
478 	    __db_buf, (dbuf)->db_level, \
479 	    (u_longlong_t)(dbuf)->db_blkid, __VA_ARGS__); \
480 	} \
481 } while (0)
482 
483 #define	dprintf_dbuf_bp(db, bp, fmt, ...) do {			\
484 	if (zfs_flags & ZFS_DEBUG_DPRINTF) {			\
485 	char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_SLEEP);	\
486 	snprintf_blkptr(__blkbuf, BP_SPRINTF_LEN, bp);		\
487 	dprintf_dbuf(db, fmt " %s\n", __VA_ARGS__, __blkbuf);	\
488 	kmem_free(__blkbuf, BP_SPRINTF_LEN);			\
489 	}							\
490 } while (0)
491 
492 #define	DBUF_VERIFY(db)	dbuf_verify(db)
493 
494 #else
495 
496 #define	dprintf_dbuf(db, fmt, ...)
497 #define	dprintf_dbuf_bp(db, bp, fmt, ...)
498 #define	DBUF_VERIFY(db)
499 
500 #endif
501 
502 
503 #ifdef	__cplusplus
504 }
505 #endif
506 
507 #endif /* _SYS_DBUF_H */
508