xref: /freebsd/sys/contrib/openzfs/module/zfs/dbuf.c (revision aca928a5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24  * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27  * Copyright (c) 2019, Klara Inc.
28  * Copyright (c) 2019, Allan Jude
29  * Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
30  */
31 
32 #include <sys/zfs_context.h>
33 #include <sys/arc.h>
34 #include <sys/dmu.h>
35 #include <sys/dmu_send.h>
36 #include <sys/dmu_impl.h>
37 #include <sys/dbuf.h>
38 #include <sys/dmu_objset.h>
39 #include <sys/dsl_dataset.h>
40 #include <sys/dsl_dir.h>
41 #include <sys/dmu_tx.h>
42 #include <sys/spa.h>
43 #include <sys/zio.h>
44 #include <sys/dmu_zfetch.h>
45 #include <sys/sa.h>
46 #include <sys/sa_impl.h>
47 #include <sys/zfeature.h>
48 #include <sys/blkptr.h>
49 #include <sys/range_tree.h>
50 #include <sys/trace_zfs.h>
51 #include <sys/callb.h>
52 #include <sys/abd.h>
53 #include <sys/brt.h>
54 #include <sys/vdev.h>
55 #include <cityhash.h>
56 #include <sys/spa_impl.h>
57 #include <sys/wmsum.h>
58 #include <sys/vdev_impl.h>
59 
60 static kstat_t *dbuf_ksp;
61 
62 typedef struct dbuf_stats {
63 	/*
64 	 * Various statistics about the size of the dbuf cache.
65 	 */
66 	kstat_named_t cache_count;
67 	kstat_named_t cache_size_bytes;
68 	kstat_named_t cache_size_bytes_max;
69 	/*
70 	 * Statistics regarding the bounds on the dbuf cache size.
71 	 */
72 	kstat_named_t cache_target_bytes;
73 	kstat_named_t cache_lowater_bytes;
74 	kstat_named_t cache_hiwater_bytes;
75 	/*
76 	 * Total number of dbuf cache evictions that have occurred.
77 	 */
78 	kstat_named_t cache_total_evicts;
79 	/*
80 	 * The distribution of dbuf levels in the dbuf cache and
81 	 * the total size of all dbufs at each level.
82 	 */
83 	kstat_named_t cache_levels[DN_MAX_LEVELS];
84 	kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
85 	/*
86 	 * Statistics about the dbuf hash table.
87 	 */
88 	kstat_named_t hash_hits;
89 	kstat_named_t hash_misses;
90 	kstat_named_t hash_collisions;
91 	kstat_named_t hash_elements;
92 	kstat_named_t hash_elements_max;
93 	/*
94 	 * Number of sublists containing more than one dbuf in the dbuf
95 	 * hash table. Keep track of the longest hash chain.
96 	 */
97 	kstat_named_t hash_chains;
98 	kstat_named_t hash_chain_max;
99 	/*
100 	 * Number of times a dbuf_create() discovers that a dbuf was
101 	 * already created and in the dbuf hash table.
102 	 */
103 	kstat_named_t hash_insert_race;
104 	/*
105 	 * Number of entries in the hash table dbuf and mutex arrays.
106 	 */
107 	kstat_named_t hash_table_count;
108 	kstat_named_t hash_mutex_count;
109 	/*
110 	 * Statistics about the size of the metadata dbuf cache.
111 	 */
112 	kstat_named_t metadata_cache_count;
113 	kstat_named_t metadata_cache_size_bytes;
114 	kstat_named_t metadata_cache_size_bytes_max;
115 	/*
116 	 * For diagnostic purposes, this is incremented whenever we can't add
117 	 * something to the metadata cache because it's full, and instead put
118 	 * the data in the regular dbuf cache.
119 	 */
120 	kstat_named_t metadata_cache_overflow;
121 } dbuf_stats_t;
122 
123 dbuf_stats_t dbuf_stats = {
124 	{ "cache_count",			KSTAT_DATA_UINT64 },
125 	{ "cache_size_bytes",			KSTAT_DATA_UINT64 },
126 	{ "cache_size_bytes_max",		KSTAT_DATA_UINT64 },
127 	{ "cache_target_bytes",			KSTAT_DATA_UINT64 },
128 	{ "cache_lowater_bytes",		KSTAT_DATA_UINT64 },
129 	{ "cache_hiwater_bytes",		KSTAT_DATA_UINT64 },
130 	{ "cache_total_evicts",			KSTAT_DATA_UINT64 },
131 	{ { "cache_levels_N",			KSTAT_DATA_UINT64 } },
132 	{ { "cache_levels_bytes_N",		KSTAT_DATA_UINT64 } },
133 	{ "hash_hits",				KSTAT_DATA_UINT64 },
134 	{ "hash_misses",			KSTAT_DATA_UINT64 },
135 	{ "hash_collisions",			KSTAT_DATA_UINT64 },
136 	{ "hash_elements",			KSTAT_DATA_UINT64 },
137 	{ "hash_elements_max",			KSTAT_DATA_UINT64 },
138 	{ "hash_chains",			KSTAT_DATA_UINT64 },
139 	{ "hash_chain_max",			KSTAT_DATA_UINT64 },
140 	{ "hash_insert_race",			KSTAT_DATA_UINT64 },
141 	{ "hash_table_count",			KSTAT_DATA_UINT64 },
142 	{ "hash_mutex_count",			KSTAT_DATA_UINT64 },
143 	{ "metadata_cache_count",		KSTAT_DATA_UINT64 },
144 	{ "metadata_cache_size_bytes",		KSTAT_DATA_UINT64 },
145 	{ "metadata_cache_size_bytes_max",	KSTAT_DATA_UINT64 },
146 	{ "metadata_cache_overflow",		KSTAT_DATA_UINT64 }
147 };
148 
149 struct {
150 	wmsum_t cache_count;
151 	wmsum_t cache_total_evicts;
152 	wmsum_t cache_levels[DN_MAX_LEVELS];
153 	wmsum_t cache_levels_bytes[DN_MAX_LEVELS];
154 	wmsum_t hash_hits;
155 	wmsum_t hash_misses;
156 	wmsum_t hash_collisions;
157 	wmsum_t hash_chains;
158 	wmsum_t hash_insert_race;
159 	wmsum_t metadata_cache_count;
160 	wmsum_t metadata_cache_overflow;
161 } dbuf_sums;
162 
163 #define	DBUF_STAT_INCR(stat, val)	\
164 	wmsum_add(&dbuf_sums.stat, val)
165 #define	DBUF_STAT_DECR(stat, val)	\
166 	DBUF_STAT_INCR(stat, -(val))
167 #define	DBUF_STAT_BUMP(stat)		\
168 	DBUF_STAT_INCR(stat, 1)
169 #define	DBUF_STAT_BUMPDOWN(stat)	\
170 	DBUF_STAT_INCR(stat, -1)
171 #define	DBUF_STAT_MAX(stat, v) {					\
172 	uint64_t _m;							\
173 	while ((v) > (_m = dbuf_stats.stat.value.ui64) &&		\
174 	    (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
175 		continue;						\
176 }
177 
178 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
179 static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr);
180 
181 /*
182  * Global data structures and functions for the dbuf cache.
183  */
184 static kmem_cache_t *dbuf_kmem_cache;
185 static taskq_t *dbu_evict_taskq;
186 
187 static kthread_t *dbuf_cache_evict_thread;
188 static kmutex_t dbuf_evict_lock;
189 static kcondvar_t dbuf_evict_cv;
190 static boolean_t dbuf_evict_thread_exit;
191 
192 /*
193  * There are two dbuf caches; each dbuf can only be in one of them at a time.
194  *
195  * 1. Cache of metadata dbufs, to help make read-heavy administrative commands
196  *    from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
197  *    that represent the metadata that describes filesystems/snapshots/
198  *    bookmarks/properties/etc. We only evict from this cache when we export a
199  *    pool, to short-circuit as much I/O as possible for all administrative
200  *    commands that need the metadata. There is no eviction policy for this
201  *    cache, because we try to only include types in it which would occupy a
202  *    very small amount of space per object but create a large impact on the
203  *    performance of these commands. Instead, after it reaches a maximum size
204  *    (which should only happen on very small memory systems with a very large
205  *    number of filesystem objects), we stop taking new dbufs into the
206  *    metadata cache, instead putting them in the normal dbuf cache.
207  *
208  * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
209  *    are not currently held but have been recently released. These dbufs
210  *    are not eligible for arc eviction until they are aged out of the cache.
211  *    Dbufs that are aged out of the cache will be immediately destroyed and
212  *    become eligible for arc eviction.
213  *
214  * Dbufs are added to these caches once the last hold is released. If a dbuf is
215  * later accessed and still exists in the dbuf cache, then it will be removed
216  * from the cache and later re-added to the head of the cache.
217  *
218  * If a given dbuf meets the requirements for the metadata cache, it will go
219  * there, otherwise it will be considered for the generic LRU dbuf cache. The
220  * caches and the refcounts tracking their sizes are stored in an array indexed
221  * by those caches' matching enum values (from dbuf_cached_state_t).
222  */
223 typedef struct dbuf_cache {
224 	multilist_t cache;
225 	zfs_refcount_t size ____cacheline_aligned;
226 } dbuf_cache_t;
227 dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
228 
229 /* Size limits for the caches */
230 static uint64_t dbuf_cache_max_bytes = UINT64_MAX;
231 static uint64_t dbuf_metadata_cache_max_bytes = UINT64_MAX;
232 
233 /* Set the default sizes of the caches to log2 fraction of arc size */
234 static uint_t dbuf_cache_shift = 5;
235 static uint_t dbuf_metadata_cache_shift = 6;
236 
237 /* Set the dbuf hash mutex count as log2 shift (dynamic by default) */
238 static uint_t dbuf_mutex_cache_shift = 0;
239 
240 static unsigned long dbuf_cache_target_bytes(void);
241 static unsigned long dbuf_metadata_cache_target_bytes(void);
242 
243 /*
244  * The LRU dbuf cache uses a three-stage eviction policy:
245  *	- A low water marker designates when the dbuf eviction thread
246  *	should stop evicting from the dbuf cache.
247  *	- When we reach the maximum size (aka mid water mark), we
248  *	signal the eviction thread to run.
249  *	- The high water mark indicates when the eviction thread
250  *	is unable to keep up with the incoming load and eviction must
251  *	happen in the context of the calling thread.
252  *
253  * The dbuf cache:
254  *                                                 (max size)
255  *                                      low water   mid water   hi water
256  * +----------------------------------------+----------+----------+
257  * |                                        |          |          |
258  * |                                        |          |          |
259  * |                                        |          |          |
260  * |                                        |          |          |
261  * +----------------------------------------+----------+----------+
262  *                                        stop        signal     evict
263  *                                      evicting     eviction   directly
264  *                                                    thread
265  *
266  * The high and low water marks indicate the operating range for the eviction
267  * thread. The low water mark is, by default, 90% of the total size of the
268  * cache and the high water mark is at 110% (both of these percentages can be
269  * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
270  * respectively). The eviction thread will try to ensure that the cache remains
271  * within this range by waking up every second and checking if the cache is
272  * above the low water mark. The thread can also be woken up by callers adding
273  * elements into the cache if the cache is larger than the mid water (i.e max
274  * cache size). Once the eviction thread is woken up and eviction is required,
275  * it will continue evicting buffers until it's able to reduce the cache size
276  * to the low water mark. If the cache size continues to grow and hits the high
277  * water mark, then callers adding elements to the cache will begin to evict
278  * directly from the cache until the cache is no longer above the high water
279  * mark.
280  */
281 
282 /*
283  * The percentage above and below the maximum cache size.
284  */
285 static uint_t dbuf_cache_hiwater_pct = 10;
286 static uint_t dbuf_cache_lowater_pct = 10;
287 
288 static int
dbuf_cons(void * vdb,void * unused,int kmflag)289 dbuf_cons(void *vdb, void *unused, int kmflag)
290 {
291 	(void) unused, (void) kmflag;
292 	dmu_buf_impl_t *db = vdb;
293 	memset(db, 0, sizeof (dmu_buf_impl_t));
294 
295 	mutex_init(&db->db_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
296 	rw_init(&db->db_rwlock, NULL, RW_NOLOCKDEP, NULL);
297 	cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
298 	multilist_link_init(&db->db_cache_link);
299 	zfs_refcount_create(&db->db_holds);
300 
301 	return (0);
302 }
303 
304 static void
dbuf_dest(void * vdb,void * unused)305 dbuf_dest(void *vdb, void *unused)
306 {
307 	(void) unused;
308 	dmu_buf_impl_t *db = vdb;
309 	mutex_destroy(&db->db_mtx);
310 	rw_destroy(&db->db_rwlock);
311 	cv_destroy(&db->db_changed);
312 	ASSERT(!multilist_link_active(&db->db_cache_link));
313 	zfs_refcount_destroy(&db->db_holds);
314 }
315 
316 /*
317  * dbuf hash table routines
318  */
319 static dbuf_hash_table_t dbuf_hash_table;
320 
321 /*
322  * We use Cityhash for this. It's fast, and has good hash properties without
323  * requiring any large static buffers.
324  */
325 static uint64_t
dbuf_hash(void * os,uint64_t obj,uint8_t lvl,uint64_t blkid)326 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
327 {
328 	return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
329 }
330 
331 #define	DTRACE_SET_STATE(db, why) \
332 	DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db,	\
333 	    const char *, why)
334 
335 #define	DBUF_EQUAL(dbuf, os, obj, level, blkid)		\
336 	((dbuf)->db.db_object == (obj) &&		\
337 	(dbuf)->db_objset == (os) &&			\
338 	(dbuf)->db_level == (level) &&			\
339 	(dbuf)->db_blkid == (blkid))
340 
341 dmu_buf_impl_t *
dbuf_find(objset_t * os,uint64_t obj,uint8_t level,uint64_t blkid,uint64_t * hash_out)342 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid,
343     uint64_t *hash_out)
344 {
345 	dbuf_hash_table_t *h = &dbuf_hash_table;
346 	uint64_t hv;
347 	uint64_t idx;
348 	dmu_buf_impl_t *db;
349 
350 	hv = dbuf_hash(os, obj, level, blkid);
351 	idx = hv & h->hash_table_mask;
352 
353 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
354 	for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
355 		if (DBUF_EQUAL(db, os, obj, level, blkid)) {
356 			mutex_enter(&db->db_mtx);
357 			if (db->db_state != DB_EVICTING) {
358 				mutex_exit(DBUF_HASH_MUTEX(h, idx));
359 				return (db);
360 			}
361 			mutex_exit(&db->db_mtx);
362 		}
363 	}
364 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
365 	if (hash_out != NULL)
366 		*hash_out = hv;
367 	return (NULL);
368 }
369 
370 static dmu_buf_impl_t *
dbuf_find_bonus(objset_t * os,uint64_t object)371 dbuf_find_bonus(objset_t *os, uint64_t object)
372 {
373 	dnode_t *dn;
374 	dmu_buf_impl_t *db = NULL;
375 
376 	if (dnode_hold(os, object, FTAG, &dn) == 0) {
377 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
378 		if (dn->dn_bonus != NULL) {
379 			db = dn->dn_bonus;
380 			mutex_enter(&db->db_mtx);
381 		}
382 		rw_exit(&dn->dn_struct_rwlock);
383 		dnode_rele(dn, FTAG);
384 	}
385 	return (db);
386 }
387 
388 /*
389  * Insert an entry into the hash table.  If there is already an element
390  * equal to elem in the hash table, then the already existing element
391  * will be returned and the new element will not be inserted.
392  * Otherwise returns NULL.
393  */
394 static dmu_buf_impl_t *
dbuf_hash_insert(dmu_buf_impl_t * db)395 dbuf_hash_insert(dmu_buf_impl_t *db)
396 {
397 	dbuf_hash_table_t *h = &dbuf_hash_table;
398 	objset_t *os = db->db_objset;
399 	uint64_t obj = db->db.db_object;
400 	int level = db->db_level;
401 	uint64_t blkid, idx;
402 	dmu_buf_impl_t *dbf;
403 	uint32_t i;
404 
405 	blkid = db->db_blkid;
406 	ASSERT3U(dbuf_hash(os, obj, level, blkid), ==, db->db_hash);
407 	idx = db->db_hash & h->hash_table_mask;
408 
409 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
410 	for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
411 	    dbf = dbf->db_hash_next, i++) {
412 		if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
413 			mutex_enter(&dbf->db_mtx);
414 			if (dbf->db_state != DB_EVICTING) {
415 				mutex_exit(DBUF_HASH_MUTEX(h, idx));
416 				return (dbf);
417 			}
418 			mutex_exit(&dbf->db_mtx);
419 		}
420 	}
421 
422 	if (i > 0) {
423 		DBUF_STAT_BUMP(hash_collisions);
424 		if (i == 1)
425 			DBUF_STAT_BUMP(hash_chains);
426 
427 		DBUF_STAT_MAX(hash_chain_max, i);
428 	}
429 
430 	mutex_enter(&db->db_mtx);
431 	db->db_hash_next = h->hash_table[idx];
432 	h->hash_table[idx] = db;
433 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
434 	uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64);
435 	DBUF_STAT_MAX(hash_elements_max, he);
436 
437 	return (NULL);
438 }
439 
440 /*
441  * This returns whether this dbuf should be stored in the metadata cache, which
442  * is based on whether it's from one of the dnode types that store data related
443  * to traversing dataset hierarchies.
444  */
445 static boolean_t
dbuf_include_in_metadata_cache(dmu_buf_impl_t * db)446 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
447 {
448 	DB_DNODE_ENTER(db);
449 	dmu_object_type_t type = DB_DNODE(db)->dn_type;
450 	DB_DNODE_EXIT(db);
451 
452 	/* Check if this dbuf is one of the types we care about */
453 	if (DMU_OT_IS_METADATA_CACHED(type)) {
454 		/* If we hit this, then we set something up wrong in dmu_ot */
455 		ASSERT(DMU_OT_IS_METADATA(type));
456 
457 		/*
458 		 * Sanity check for small-memory systems: don't allocate too
459 		 * much memory for this purpose.
460 		 */
461 		if (zfs_refcount_count(
462 		    &dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
463 		    dbuf_metadata_cache_target_bytes()) {
464 			DBUF_STAT_BUMP(metadata_cache_overflow);
465 			return (B_FALSE);
466 		}
467 
468 		return (B_TRUE);
469 	}
470 
471 	return (B_FALSE);
472 }
473 
474 /*
475  * Remove an entry from the hash table.  It must be in the EVICTING state.
476  */
477 static void
dbuf_hash_remove(dmu_buf_impl_t * db)478 dbuf_hash_remove(dmu_buf_impl_t *db)
479 {
480 	dbuf_hash_table_t *h = &dbuf_hash_table;
481 	uint64_t idx;
482 	dmu_buf_impl_t *dbf, **dbp;
483 
484 	ASSERT3U(dbuf_hash(db->db_objset, db->db.db_object, db->db_level,
485 	    db->db_blkid), ==, db->db_hash);
486 	idx = db->db_hash & h->hash_table_mask;
487 
488 	/*
489 	 * We mustn't hold db_mtx to maintain lock ordering:
490 	 * DBUF_HASH_MUTEX > db_mtx.
491 	 */
492 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
493 	ASSERT(db->db_state == DB_EVICTING);
494 	ASSERT(!MUTEX_HELD(&db->db_mtx));
495 
496 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
497 	dbp = &h->hash_table[idx];
498 	while ((dbf = *dbp) != db) {
499 		dbp = &dbf->db_hash_next;
500 		ASSERT(dbf != NULL);
501 	}
502 	*dbp = db->db_hash_next;
503 	db->db_hash_next = NULL;
504 	if (h->hash_table[idx] &&
505 	    h->hash_table[idx]->db_hash_next == NULL)
506 		DBUF_STAT_BUMPDOWN(hash_chains);
507 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
508 	atomic_dec_64(&dbuf_stats.hash_elements.value.ui64);
509 }
510 
511 typedef enum {
512 	DBVU_EVICTING,
513 	DBVU_NOT_EVICTING
514 } dbvu_verify_type_t;
515 
516 static void
dbuf_verify_user(dmu_buf_impl_t * db,dbvu_verify_type_t verify_type)517 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
518 {
519 #ifdef ZFS_DEBUG
520 	int64_t holds;
521 
522 	if (db->db_user == NULL)
523 		return;
524 
525 	/* Only data blocks support the attachment of user data. */
526 	ASSERT(db->db_level == 0);
527 
528 	/* Clients must resolve a dbuf before attaching user data. */
529 	ASSERT(db->db.db_data != NULL);
530 	ASSERT3U(db->db_state, ==, DB_CACHED);
531 
532 	holds = zfs_refcount_count(&db->db_holds);
533 	if (verify_type == DBVU_EVICTING) {
534 		/*
535 		 * Immediate eviction occurs when holds == dirtycnt.
536 		 * For normal eviction buffers, holds is zero on
537 		 * eviction, except when dbuf_fix_old_data() calls
538 		 * dbuf_clear_data().  However, the hold count can grow
539 		 * during eviction even though db_mtx is held (see
540 		 * dmu_bonus_hold() for an example), so we can only
541 		 * test the generic invariant that holds >= dirtycnt.
542 		 */
543 		ASSERT3U(holds, >=, db->db_dirtycnt);
544 	} else {
545 		if (db->db_user_immediate_evict == TRUE)
546 			ASSERT3U(holds, >=, db->db_dirtycnt);
547 		else
548 			ASSERT3U(holds, >, 0);
549 	}
550 #endif
551 }
552 
553 static void
dbuf_evict_user(dmu_buf_impl_t * db)554 dbuf_evict_user(dmu_buf_impl_t *db)
555 {
556 	dmu_buf_user_t *dbu = db->db_user;
557 
558 	ASSERT(MUTEX_HELD(&db->db_mtx));
559 
560 	if (dbu == NULL)
561 		return;
562 
563 	dbuf_verify_user(db, DBVU_EVICTING);
564 	db->db_user = NULL;
565 
566 #ifdef ZFS_DEBUG
567 	if (dbu->dbu_clear_on_evict_dbufp != NULL)
568 		*dbu->dbu_clear_on_evict_dbufp = NULL;
569 #endif
570 
571 	if (db->db_caching_status != DB_NO_CACHE) {
572 		/*
573 		 * This is a cached dbuf, so the size of the user data is
574 		 * included in its cached amount. We adjust it here because the
575 		 * user data has already been detached from the dbuf, and the
576 		 * sync functions are not supposed to touch it (the dbuf might
577 		 * not exist anymore by the time the sync functions run.
578 		 */
579 		uint64_t size = dbu->dbu_size;
580 		(void) zfs_refcount_remove_many(
581 		    &dbuf_caches[db->db_caching_status].size, size, dbu);
582 		if (db->db_caching_status == DB_DBUF_CACHE)
583 			DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size);
584 	}
585 
586 	/*
587 	 * There are two eviction callbacks - one that we call synchronously
588 	 * and one that we invoke via a taskq.  The async one is useful for
589 	 * avoiding lock order reversals and limiting stack depth.
590 	 *
591 	 * Note that if we have a sync callback but no async callback,
592 	 * it's likely that the sync callback will free the structure
593 	 * containing the dbu.  In that case we need to take care to not
594 	 * dereference dbu after calling the sync evict func.
595 	 */
596 	boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
597 
598 	if (dbu->dbu_evict_func_sync != NULL)
599 		dbu->dbu_evict_func_sync(dbu);
600 
601 	if (has_async) {
602 		taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
603 		    dbu, 0, &dbu->dbu_tqent);
604 	}
605 }
606 
607 boolean_t
dbuf_is_metadata(dmu_buf_impl_t * db)608 dbuf_is_metadata(dmu_buf_impl_t *db)
609 {
610 	/*
611 	 * Consider indirect blocks and spill blocks to be meta data.
612 	 */
613 	if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
614 		return (B_TRUE);
615 	} else {
616 		boolean_t is_metadata;
617 
618 		DB_DNODE_ENTER(db);
619 		is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
620 		DB_DNODE_EXIT(db);
621 
622 		return (is_metadata);
623 	}
624 }
625 
626 /*
627  * We want to exclude buffers that are on a special allocation class from
628  * L2ARC.
629  */
630 boolean_t
dbuf_is_l2cacheable(dmu_buf_impl_t * db)631 dbuf_is_l2cacheable(dmu_buf_impl_t *db)
632 {
633 	if (db->db_objset->os_secondary_cache == ZFS_CACHE_ALL ||
634 	    (db->db_objset->os_secondary_cache ==
635 	    ZFS_CACHE_METADATA && dbuf_is_metadata(db))) {
636 		if (l2arc_exclude_special == 0)
637 			return (B_TRUE);
638 
639 		blkptr_t *bp = db->db_blkptr;
640 		if (bp == NULL || BP_IS_HOLE(bp))
641 			return (B_FALSE);
642 		uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
643 		vdev_t *rvd = db->db_objset->os_spa->spa_root_vdev;
644 		vdev_t *vd = NULL;
645 
646 		if (vdev < rvd->vdev_children)
647 			vd = rvd->vdev_child[vdev];
648 
649 		if (vd == NULL)
650 			return (B_TRUE);
651 
652 		if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
653 		    vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
654 			return (B_TRUE);
655 	}
656 	return (B_FALSE);
657 }
658 
659 static inline boolean_t
dnode_level_is_l2cacheable(blkptr_t * bp,dnode_t * dn,int64_t level)660 dnode_level_is_l2cacheable(blkptr_t *bp, dnode_t *dn, int64_t level)
661 {
662 	if (dn->dn_objset->os_secondary_cache == ZFS_CACHE_ALL ||
663 	    (dn->dn_objset->os_secondary_cache == ZFS_CACHE_METADATA &&
664 	    (level > 0 ||
665 	    DMU_OT_IS_METADATA(dn->dn_handle->dnh_dnode->dn_type)))) {
666 		if (l2arc_exclude_special == 0)
667 			return (B_TRUE);
668 
669 		if (bp == NULL || BP_IS_HOLE(bp))
670 			return (B_FALSE);
671 		uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
672 		vdev_t *rvd = dn->dn_objset->os_spa->spa_root_vdev;
673 		vdev_t *vd = NULL;
674 
675 		if (vdev < rvd->vdev_children)
676 			vd = rvd->vdev_child[vdev];
677 
678 		if (vd == NULL)
679 			return (B_TRUE);
680 
681 		if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
682 		    vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
683 			return (B_TRUE);
684 	}
685 	return (B_FALSE);
686 }
687 
688 
689 /*
690  * This function *must* return indices evenly distributed between all
691  * sublists of the multilist. This is needed due to how the dbuf eviction
692  * code is laid out; dbuf_evict_thread() assumes dbufs are evenly
693  * distributed between all sublists and uses this assumption when
694  * deciding which sublist to evict from and how much to evict from it.
695  */
696 static unsigned int
dbuf_cache_multilist_index_func(multilist_t * ml,void * obj)697 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
698 {
699 	dmu_buf_impl_t *db = obj;
700 
701 	/*
702 	 * The assumption here, is the hash value for a given
703 	 * dmu_buf_impl_t will remain constant throughout it's lifetime
704 	 * (i.e. it's objset, object, level and blkid fields don't change).
705 	 * Thus, we don't need to store the dbuf's sublist index
706 	 * on insertion, as this index can be recalculated on removal.
707 	 *
708 	 * Also, the low order bits of the hash value are thought to be
709 	 * distributed evenly. Otherwise, in the case that the multilist
710 	 * has a power of two number of sublists, each sublists' usage
711 	 * would not be evenly distributed. In this context full 64bit
712 	 * division would be a waste of time, so limit it to 32 bits.
713 	 */
714 	return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object,
715 	    db->db_level, db->db_blkid) %
716 	    multilist_get_num_sublists(ml));
717 }
718 
719 /*
720  * The target size of the dbuf cache can grow with the ARC target,
721  * unless limited by the tunable dbuf_cache_max_bytes.
722  */
723 static inline unsigned long
dbuf_cache_target_bytes(void)724 dbuf_cache_target_bytes(void)
725 {
726 	return (MIN(dbuf_cache_max_bytes,
727 	    arc_target_bytes() >> dbuf_cache_shift));
728 }
729 
730 /*
731  * The target size of the dbuf metadata cache can grow with the ARC target,
732  * unless limited by the tunable dbuf_metadata_cache_max_bytes.
733  */
734 static inline unsigned long
dbuf_metadata_cache_target_bytes(void)735 dbuf_metadata_cache_target_bytes(void)
736 {
737 	return (MIN(dbuf_metadata_cache_max_bytes,
738 	    arc_target_bytes() >> dbuf_metadata_cache_shift));
739 }
740 
741 static inline uint64_t
dbuf_cache_hiwater_bytes(void)742 dbuf_cache_hiwater_bytes(void)
743 {
744 	uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
745 	return (dbuf_cache_target +
746 	    (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
747 }
748 
749 static inline uint64_t
dbuf_cache_lowater_bytes(void)750 dbuf_cache_lowater_bytes(void)
751 {
752 	uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
753 	return (dbuf_cache_target -
754 	    (dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
755 }
756 
757 static inline boolean_t
dbuf_cache_above_lowater(void)758 dbuf_cache_above_lowater(void)
759 {
760 	return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
761 	    dbuf_cache_lowater_bytes());
762 }
763 
764 /*
765  * Evict the oldest eligible dbuf from the dbuf cache.
766  */
767 static void
dbuf_evict_one(void)768 dbuf_evict_one(void)
769 {
770 	int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache);
771 	multilist_sublist_t *mls = multilist_sublist_lock_idx(
772 	    &dbuf_caches[DB_DBUF_CACHE].cache, idx);
773 
774 	ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
775 
776 	dmu_buf_impl_t *db = multilist_sublist_tail(mls);
777 	while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
778 		db = multilist_sublist_prev(mls, db);
779 	}
780 
781 	DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
782 	    multilist_sublist_t *, mls);
783 
784 	if (db != NULL) {
785 		multilist_sublist_remove(mls, db);
786 		multilist_sublist_unlock(mls);
787 		uint64_t size = db->db.db_size;
788 		uint64_t usize = dmu_buf_user_size(&db->db);
789 		(void) zfs_refcount_remove_many(
790 		    &dbuf_caches[DB_DBUF_CACHE].size, size, db);
791 		(void) zfs_refcount_remove_many(
792 		    &dbuf_caches[DB_DBUF_CACHE].size, usize, db->db_user);
793 		DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
794 		DBUF_STAT_BUMPDOWN(cache_count);
795 		DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size + usize);
796 		ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
797 		db->db_caching_status = DB_NO_CACHE;
798 		dbuf_destroy(db);
799 		DBUF_STAT_BUMP(cache_total_evicts);
800 	} else {
801 		multilist_sublist_unlock(mls);
802 	}
803 }
804 
805 /*
806  * The dbuf evict thread is responsible for aging out dbufs from the
807  * cache. Once the cache has reached it's maximum size, dbufs are removed
808  * and destroyed. The eviction thread will continue running until the size
809  * of the dbuf cache is at or below the maximum size. Once the dbuf is aged
810  * out of the cache it is destroyed and becomes eligible for arc eviction.
811  */
812 static __attribute__((noreturn)) void
dbuf_evict_thread(void * unused)813 dbuf_evict_thread(void *unused)
814 {
815 	(void) unused;
816 	callb_cpr_t cpr;
817 
818 	CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
819 
820 	mutex_enter(&dbuf_evict_lock);
821 	while (!dbuf_evict_thread_exit) {
822 		while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
823 			CALLB_CPR_SAFE_BEGIN(&cpr);
824 			(void) cv_timedwait_idle_hires(&dbuf_evict_cv,
825 			    &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
826 			CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
827 		}
828 		mutex_exit(&dbuf_evict_lock);
829 
830 		/*
831 		 * Keep evicting as long as we're above the low water mark
832 		 * for the cache. We do this without holding the locks to
833 		 * minimize lock contention.
834 		 */
835 		while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
836 			dbuf_evict_one();
837 		}
838 
839 		mutex_enter(&dbuf_evict_lock);
840 	}
841 
842 	dbuf_evict_thread_exit = B_FALSE;
843 	cv_broadcast(&dbuf_evict_cv);
844 	CALLB_CPR_EXIT(&cpr);	/* drops dbuf_evict_lock */
845 	thread_exit();
846 }
847 
848 /*
849  * Wake up the dbuf eviction thread if the dbuf cache is at its max size.
850  * If the dbuf cache is at its high water mark, then evict a dbuf from the
851  * dbuf cache using the caller's context.
852  */
853 static void
dbuf_evict_notify(uint64_t size)854 dbuf_evict_notify(uint64_t size)
855 {
856 	/*
857 	 * We check if we should evict without holding the dbuf_evict_lock,
858 	 * because it's OK to occasionally make the wrong decision here,
859 	 * and grabbing the lock results in massive lock contention.
860 	 */
861 	if (size > dbuf_cache_target_bytes()) {
862 		if (size > dbuf_cache_hiwater_bytes())
863 			dbuf_evict_one();
864 		cv_signal(&dbuf_evict_cv);
865 	}
866 }
867 
868 static int
dbuf_kstat_update(kstat_t * ksp,int rw)869 dbuf_kstat_update(kstat_t *ksp, int rw)
870 {
871 	dbuf_stats_t *ds = ksp->ks_data;
872 	dbuf_hash_table_t *h = &dbuf_hash_table;
873 
874 	if (rw == KSTAT_WRITE)
875 		return (SET_ERROR(EACCES));
876 
877 	ds->cache_count.value.ui64 =
878 	    wmsum_value(&dbuf_sums.cache_count);
879 	ds->cache_size_bytes.value.ui64 =
880 	    zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
881 	ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
882 	ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
883 	ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
884 	ds->cache_total_evicts.value.ui64 =
885 	    wmsum_value(&dbuf_sums.cache_total_evicts);
886 	for (int i = 0; i < DN_MAX_LEVELS; i++) {
887 		ds->cache_levels[i].value.ui64 =
888 		    wmsum_value(&dbuf_sums.cache_levels[i]);
889 		ds->cache_levels_bytes[i].value.ui64 =
890 		    wmsum_value(&dbuf_sums.cache_levels_bytes[i]);
891 	}
892 	ds->hash_hits.value.ui64 =
893 	    wmsum_value(&dbuf_sums.hash_hits);
894 	ds->hash_misses.value.ui64 =
895 	    wmsum_value(&dbuf_sums.hash_misses);
896 	ds->hash_collisions.value.ui64 =
897 	    wmsum_value(&dbuf_sums.hash_collisions);
898 	ds->hash_chains.value.ui64 =
899 	    wmsum_value(&dbuf_sums.hash_chains);
900 	ds->hash_insert_race.value.ui64 =
901 	    wmsum_value(&dbuf_sums.hash_insert_race);
902 	ds->hash_table_count.value.ui64 = h->hash_table_mask + 1;
903 	ds->hash_mutex_count.value.ui64 = h->hash_mutex_mask + 1;
904 	ds->metadata_cache_count.value.ui64 =
905 	    wmsum_value(&dbuf_sums.metadata_cache_count);
906 	ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
907 	    &dbuf_caches[DB_DBUF_METADATA_CACHE].size);
908 	ds->metadata_cache_overflow.value.ui64 =
909 	    wmsum_value(&dbuf_sums.metadata_cache_overflow);
910 	return (0);
911 }
912 
913 void
dbuf_init(void)914 dbuf_init(void)
915 {
916 	uint64_t hmsize, hsize = 1ULL << 16;
917 	dbuf_hash_table_t *h = &dbuf_hash_table;
918 
919 	/*
920 	 * The hash table is big enough to fill one eighth of physical memory
921 	 * with an average block size of zfs_arc_average_blocksize (default 8K).
922 	 * By default, the table will take up
923 	 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
924 	 */
925 	while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8)
926 		hsize <<= 1;
927 
928 	h->hash_table = NULL;
929 	while (h->hash_table == NULL) {
930 		h->hash_table_mask = hsize - 1;
931 
932 		h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
933 		if (h->hash_table == NULL)
934 			hsize >>= 1;
935 
936 		ASSERT3U(hsize, >=, 1ULL << 10);
937 	}
938 
939 	/*
940 	 * The hash table buckets are protected by an array of mutexes where
941 	 * each mutex is reponsible for protecting 128 buckets.  A minimum
942 	 * array size of 8192 is targeted to avoid contention.
943 	 */
944 	if (dbuf_mutex_cache_shift == 0)
945 		hmsize = MAX(hsize >> 7, 1ULL << 13);
946 	else
947 		hmsize = 1ULL << MIN(dbuf_mutex_cache_shift, 24);
948 
949 	h->hash_mutexes = NULL;
950 	while (h->hash_mutexes == NULL) {
951 		h->hash_mutex_mask = hmsize - 1;
952 
953 		h->hash_mutexes = vmem_zalloc(hmsize * sizeof (kmutex_t),
954 		    KM_SLEEP);
955 		if (h->hash_mutexes == NULL)
956 			hmsize >>= 1;
957 	}
958 
959 	dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
960 	    sizeof (dmu_buf_impl_t),
961 	    0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
962 
963 	for (int i = 0; i < hmsize; i++)
964 		mutex_init(&h->hash_mutexes[i], NULL, MUTEX_NOLOCKDEP, NULL);
965 
966 	dbuf_stats_init(h);
967 
968 	/*
969 	 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
970 	 * configuration is not required.
971 	 */
972 	dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
973 
974 	for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
975 		multilist_create(&dbuf_caches[dcs].cache,
976 		    sizeof (dmu_buf_impl_t),
977 		    offsetof(dmu_buf_impl_t, db_cache_link),
978 		    dbuf_cache_multilist_index_func);
979 		zfs_refcount_create(&dbuf_caches[dcs].size);
980 	}
981 
982 	dbuf_evict_thread_exit = B_FALSE;
983 	mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
984 	cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
985 	dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
986 	    NULL, 0, &p0, TS_RUN, minclsyspri);
987 
988 	wmsum_init(&dbuf_sums.cache_count, 0);
989 	wmsum_init(&dbuf_sums.cache_total_evicts, 0);
990 	for (int i = 0; i < DN_MAX_LEVELS; i++) {
991 		wmsum_init(&dbuf_sums.cache_levels[i], 0);
992 		wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0);
993 	}
994 	wmsum_init(&dbuf_sums.hash_hits, 0);
995 	wmsum_init(&dbuf_sums.hash_misses, 0);
996 	wmsum_init(&dbuf_sums.hash_collisions, 0);
997 	wmsum_init(&dbuf_sums.hash_chains, 0);
998 	wmsum_init(&dbuf_sums.hash_insert_race, 0);
999 	wmsum_init(&dbuf_sums.metadata_cache_count, 0);
1000 	wmsum_init(&dbuf_sums.metadata_cache_overflow, 0);
1001 
1002 	dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
1003 	    KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
1004 	    KSTAT_FLAG_VIRTUAL);
1005 	if (dbuf_ksp != NULL) {
1006 		for (int i = 0; i < DN_MAX_LEVELS; i++) {
1007 			snprintf(dbuf_stats.cache_levels[i].name,
1008 			    KSTAT_STRLEN, "cache_level_%d", i);
1009 			dbuf_stats.cache_levels[i].data_type =
1010 			    KSTAT_DATA_UINT64;
1011 			snprintf(dbuf_stats.cache_levels_bytes[i].name,
1012 			    KSTAT_STRLEN, "cache_level_%d_bytes", i);
1013 			dbuf_stats.cache_levels_bytes[i].data_type =
1014 			    KSTAT_DATA_UINT64;
1015 		}
1016 		dbuf_ksp->ks_data = &dbuf_stats;
1017 		dbuf_ksp->ks_update = dbuf_kstat_update;
1018 		kstat_install(dbuf_ksp);
1019 	}
1020 }
1021 
1022 void
dbuf_fini(void)1023 dbuf_fini(void)
1024 {
1025 	dbuf_hash_table_t *h = &dbuf_hash_table;
1026 
1027 	dbuf_stats_destroy();
1028 
1029 	for (int i = 0; i < (h->hash_mutex_mask + 1); i++)
1030 		mutex_destroy(&h->hash_mutexes[i]);
1031 
1032 	vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
1033 	vmem_free(h->hash_mutexes, (h->hash_mutex_mask + 1) *
1034 	    sizeof (kmutex_t));
1035 
1036 	kmem_cache_destroy(dbuf_kmem_cache);
1037 	taskq_destroy(dbu_evict_taskq);
1038 
1039 	mutex_enter(&dbuf_evict_lock);
1040 	dbuf_evict_thread_exit = B_TRUE;
1041 	while (dbuf_evict_thread_exit) {
1042 		cv_signal(&dbuf_evict_cv);
1043 		cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
1044 	}
1045 	mutex_exit(&dbuf_evict_lock);
1046 
1047 	mutex_destroy(&dbuf_evict_lock);
1048 	cv_destroy(&dbuf_evict_cv);
1049 
1050 	for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
1051 		zfs_refcount_destroy(&dbuf_caches[dcs].size);
1052 		multilist_destroy(&dbuf_caches[dcs].cache);
1053 	}
1054 
1055 	if (dbuf_ksp != NULL) {
1056 		kstat_delete(dbuf_ksp);
1057 		dbuf_ksp = NULL;
1058 	}
1059 
1060 	wmsum_fini(&dbuf_sums.cache_count);
1061 	wmsum_fini(&dbuf_sums.cache_total_evicts);
1062 	for (int i = 0; i < DN_MAX_LEVELS; i++) {
1063 		wmsum_fini(&dbuf_sums.cache_levels[i]);
1064 		wmsum_fini(&dbuf_sums.cache_levels_bytes[i]);
1065 	}
1066 	wmsum_fini(&dbuf_sums.hash_hits);
1067 	wmsum_fini(&dbuf_sums.hash_misses);
1068 	wmsum_fini(&dbuf_sums.hash_collisions);
1069 	wmsum_fini(&dbuf_sums.hash_chains);
1070 	wmsum_fini(&dbuf_sums.hash_insert_race);
1071 	wmsum_fini(&dbuf_sums.metadata_cache_count);
1072 	wmsum_fini(&dbuf_sums.metadata_cache_overflow);
1073 }
1074 
1075 /*
1076  * Other stuff.
1077  */
1078 
1079 #ifdef ZFS_DEBUG
1080 static void
dbuf_verify(dmu_buf_impl_t * db)1081 dbuf_verify(dmu_buf_impl_t *db)
1082 {
1083 	dnode_t *dn;
1084 	dbuf_dirty_record_t *dr;
1085 	uint32_t txg_prev;
1086 
1087 	ASSERT(MUTEX_HELD(&db->db_mtx));
1088 
1089 	if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
1090 		return;
1091 
1092 	ASSERT(db->db_objset != NULL);
1093 	DB_DNODE_ENTER(db);
1094 	dn = DB_DNODE(db);
1095 	if (dn == NULL) {
1096 		ASSERT(db->db_parent == NULL);
1097 		ASSERT(db->db_blkptr == NULL);
1098 	} else {
1099 		ASSERT3U(db->db.db_object, ==, dn->dn_object);
1100 		ASSERT3P(db->db_objset, ==, dn->dn_objset);
1101 		ASSERT3U(db->db_level, <, dn->dn_nlevels);
1102 		ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
1103 		    db->db_blkid == DMU_SPILL_BLKID ||
1104 		    !avl_is_empty(&dn->dn_dbufs));
1105 	}
1106 	if (db->db_blkid == DMU_BONUS_BLKID) {
1107 		ASSERT(dn != NULL);
1108 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1109 		ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
1110 	} else if (db->db_blkid == DMU_SPILL_BLKID) {
1111 		ASSERT(dn != NULL);
1112 		ASSERT0(db->db.db_offset);
1113 	} else {
1114 		ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
1115 	}
1116 
1117 	if ((dr = list_head(&db->db_dirty_records)) != NULL) {
1118 		ASSERT(dr->dr_dbuf == db);
1119 		txg_prev = dr->dr_txg;
1120 		for (dr = list_next(&db->db_dirty_records, dr); dr != NULL;
1121 		    dr = list_next(&db->db_dirty_records, dr)) {
1122 			ASSERT(dr->dr_dbuf == db);
1123 			ASSERT(txg_prev > dr->dr_txg);
1124 			txg_prev = dr->dr_txg;
1125 		}
1126 	}
1127 
1128 	/*
1129 	 * We can't assert that db_size matches dn_datablksz because it
1130 	 * can be momentarily different when another thread is doing
1131 	 * dnode_set_blksz().
1132 	 */
1133 	if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
1134 		dr = db->db_data_pending;
1135 		/*
1136 		 * It should only be modified in syncing context, so
1137 		 * make sure we only have one copy of the data.
1138 		 */
1139 		ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
1140 	}
1141 
1142 	/* verify db->db_blkptr */
1143 	if (db->db_blkptr) {
1144 		if (db->db_parent == dn->dn_dbuf) {
1145 			/* db is pointed to by the dnode */
1146 			/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
1147 			if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
1148 				ASSERT(db->db_parent == NULL);
1149 			else
1150 				ASSERT(db->db_parent != NULL);
1151 			if (db->db_blkid != DMU_SPILL_BLKID)
1152 				ASSERT3P(db->db_blkptr, ==,
1153 				    &dn->dn_phys->dn_blkptr[db->db_blkid]);
1154 		} else {
1155 			/* db is pointed to by an indirect block */
1156 			int epb __maybe_unused = db->db_parent->db.db_size >>
1157 			    SPA_BLKPTRSHIFT;
1158 			ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
1159 			ASSERT3U(db->db_parent->db.db_object, ==,
1160 			    db->db.db_object);
1161 			/*
1162 			 * dnode_grow_indblksz() can make this fail if we don't
1163 			 * have the parent's rwlock.  XXX indblksz no longer
1164 			 * grows.  safe to do this now?
1165 			 */
1166 			if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) {
1167 				ASSERT3P(db->db_blkptr, ==,
1168 				    ((blkptr_t *)db->db_parent->db.db_data +
1169 				    db->db_blkid % epb));
1170 			}
1171 		}
1172 	}
1173 	if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
1174 	    (db->db_buf == NULL || db->db_buf->b_data) &&
1175 	    db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
1176 	    db->db_state != DB_FILL && (dn == NULL || !dn->dn_free_txg)) {
1177 		/*
1178 		 * If the blkptr isn't set but they have nonzero data,
1179 		 * it had better be dirty, otherwise we'll lose that
1180 		 * data when we evict this buffer.
1181 		 *
1182 		 * There is an exception to this rule for indirect blocks; in
1183 		 * this case, if the indirect block is a hole, we fill in a few
1184 		 * fields on each of the child blocks (importantly, birth time)
1185 		 * to prevent hole birth times from being lost when you
1186 		 * partially fill in a hole.
1187 		 */
1188 		if (db->db_dirtycnt == 0) {
1189 			if (db->db_level == 0) {
1190 				uint64_t *buf = db->db.db_data;
1191 				int i;
1192 
1193 				for (i = 0; i < db->db.db_size >> 3; i++) {
1194 					ASSERT(buf[i] == 0);
1195 				}
1196 			} else {
1197 				blkptr_t *bps = db->db.db_data;
1198 				ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
1199 				    db->db.db_size);
1200 				/*
1201 				 * We want to verify that all the blkptrs in the
1202 				 * indirect block are holes, but we may have
1203 				 * automatically set up a few fields for them.
1204 				 * We iterate through each blkptr and verify
1205 				 * they only have those fields set.
1206 				 */
1207 				for (int i = 0;
1208 				    i < db->db.db_size / sizeof (blkptr_t);
1209 				    i++) {
1210 					blkptr_t *bp = &bps[i];
1211 					ASSERT(ZIO_CHECKSUM_IS_ZERO(
1212 					    &bp->blk_cksum));
1213 					ASSERT(
1214 					    DVA_IS_EMPTY(&bp->blk_dva[0]) &&
1215 					    DVA_IS_EMPTY(&bp->blk_dva[1]) &&
1216 					    DVA_IS_EMPTY(&bp->blk_dva[2]));
1217 					ASSERT0(bp->blk_fill);
1218 					ASSERT0(bp->blk_pad[0]);
1219 					ASSERT0(bp->blk_pad[1]);
1220 					ASSERT(!BP_IS_EMBEDDED(bp));
1221 					ASSERT(BP_IS_HOLE(bp));
1222 					ASSERT0(BP_GET_PHYSICAL_BIRTH(bp));
1223 				}
1224 			}
1225 		}
1226 	}
1227 	DB_DNODE_EXIT(db);
1228 }
1229 #endif
1230 
1231 static void
dbuf_clear_data(dmu_buf_impl_t * db)1232 dbuf_clear_data(dmu_buf_impl_t *db)
1233 {
1234 	ASSERT(MUTEX_HELD(&db->db_mtx));
1235 	dbuf_evict_user(db);
1236 	ASSERT3P(db->db_buf, ==, NULL);
1237 	db->db.db_data = NULL;
1238 	if (db->db_state != DB_NOFILL) {
1239 		db->db_state = DB_UNCACHED;
1240 		DTRACE_SET_STATE(db, "clear data");
1241 	}
1242 }
1243 
1244 static void
dbuf_set_data(dmu_buf_impl_t * db,arc_buf_t * buf)1245 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
1246 {
1247 	ASSERT(MUTEX_HELD(&db->db_mtx));
1248 	ASSERT(buf != NULL);
1249 
1250 	db->db_buf = buf;
1251 	ASSERT(buf->b_data != NULL);
1252 	db->db.db_data = buf->b_data;
1253 }
1254 
1255 static arc_buf_t *
dbuf_alloc_arcbuf(dmu_buf_impl_t * db)1256 dbuf_alloc_arcbuf(dmu_buf_impl_t *db)
1257 {
1258 	spa_t *spa = db->db_objset->os_spa;
1259 
1260 	return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size));
1261 }
1262 
1263 /*
1264  * Loan out an arc_buf for read.  Return the loaned arc_buf.
1265  */
1266 arc_buf_t *
dbuf_loan_arcbuf(dmu_buf_impl_t * db)1267 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
1268 {
1269 	arc_buf_t *abuf;
1270 
1271 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1272 	mutex_enter(&db->db_mtx);
1273 	if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
1274 		int blksz = db->db.db_size;
1275 		spa_t *spa = db->db_objset->os_spa;
1276 
1277 		mutex_exit(&db->db_mtx);
1278 		abuf = arc_loan_buf(spa, B_FALSE, blksz);
1279 		memcpy(abuf->b_data, db->db.db_data, blksz);
1280 	} else {
1281 		abuf = db->db_buf;
1282 		arc_loan_inuse_buf(abuf, db);
1283 		db->db_buf = NULL;
1284 		dbuf_clear_data(db);
1285 		mutex_exit(&db->db_mtx);
1286 	}
1287 	return (abuf);
1288 }
1289 
1290 /*
1291  * Calculate which level n block references the data at the level 0 offset
1292  * provided.
1293  */
1294 uint64_t
dbuf_whichblock(const dnode_t * dn,const int64_t level,const uint64_t offset)1295 dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
1296 {
1297 	if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
1298 		/*
1299 		 * The level n blkid is equal to the level 0 blkid divided by
1300 		 * the number of level 0s in a level n block.
1301 		 *
1302 		 * The level 0 blkid is offset >> datablkshift =
1303 		 * offset / 2^datablkshift.
1304 		 *
1305 		 * The number of level 0s in a level n is the number of block
1306 		 * pointers in an indirect block, raised to the power of level.
1307 		 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
1308 		 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
1309 		 *
1310 		 * Thus, the level n blkid is: offset /
1311 		 * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
1312 		 * = offset / 2^(datablkshift + level *
1313 		 *   (indblkshift - SPA_BLKPTRSHIFT))
1314 		 * = offset >> (datablkshift + level *
1315 		 *   (indblkshift - SPA_BLKPTRSHIFT))
1316 		 */
1317 
1318 		const unsigned exp = dn->dn_datablkshift +
1319 		    level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
1320 
1321 		if (exp >= 8 * sizeof (offset)) {
1322 			/* This only happens on the highest indirection level */
1323 			ASSERT3U(level, ==, dn->dn_nlevels - 1);
1324 			return (0);
1325 		}
1326 
1327 		ASSERT3U(exp, <, 8 * sizeof (offset));
1328 
1329 		return (offset >> exp);
1330 	} else {
1331 		ASSERT3U(offset, <, dn->dn_datablksz);
1332 		return (0);
1333 	}
1334 }
1335 
1336 /*
1337  * This function is used to lock the parent of the provided dbuf. This should be
1338  * used when modifying or reading db_blkptr.
1339  */
1340 db_lock_type_t
dmu_buf_lock_parent(dmu_buf_impl_t * db,krw_t rw,const void * tag)1341 dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, const void *tag)
1342 {
1343 	enum db_lock_type ret = DLT_NONE;
1344 	if (db->db_parent != NULL) {
1345 		rw_enter(&db->db_parent->db_rwlock, rw);
1346 		ret = DLT_PARENT;
1347 	} else if (dmu_objset_ds(db->db_objset) != NULL) {
1348 		rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw,
1349 		    tag);
1350 		ret = DLT_OBJSET;
1351 	}
1352 	/*
1353 	 * We only return a DLT_NONE lock when it's the top-most indirect block
1354 	 * of the meta-dnode of the MOS.
1355 	 */
1356 	return (ret);
1357 }
1358 
1359 /*
1360  * We need to pass the lock type in because it's possible that the block will
1361  * move from being the topmost indirect block in a dnode (and thus, have no
1362  * parent) to not the top-most via an indirection increase. This would cause a
1363  * panic if we didn't pass the lock type in.
1364  */
1365 void
dmu_buf_unlock_parent(dmu_buf_impl_t * db,db_lock_type_t type,const void * tag)1366 dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, const void *tag)
1367 {
1368 	if (type == DLT_PARENT)
1369 		rw_exit(&db->db_parent->db_rwlock);
1370 	else if (type == DLT_OBJSET)
1371 		rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag);
1372 }
1373 
1374 static void
dbuf_read_done(zio_t * zio,const zbookmark_phys_t * zb,const blkptr_t * bp,arc_buf_t * buf,void * vdb)1375 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
1376     arc_buf_t *buf, void *vdb)
1377 {
1378 	(void) zb, (void) bp;
1379 	dmu_buf_impl_t *db = vdb;
1380 
1381 	mutex_enter(&db->db_mtx);
1382 	ASSERT3U(db->db_state, ==, DB_READ);
1383 	/*
1384 	 * All reads are synchronous, so we must have a hold on the dbuf
1385 	 */
1386 	ASSERT(zfs_refcount_count(&db->db_holds) > 0);
1387 	ASSERT(db->db_buf == NULL);
1388 	ASSERT(db->db.db_data == NULL);
1389 	if (buf == NULL) {
1390 		/* i/o error */
1391 		ASSERT(zio == NULL || zio->io_error != 0);
1392 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1393 		ASSERT3P(db->db_buf, ==, NULL);
1394 		db->db_state = DB_UNCACHED;
1395 		DTRACE_SET_STATE(db, "i/o error");
1396 	} else if (db->db_level == 0 && db->db_freed_in_flight) {
1397 		/* freed in flight */
1398 		ASSERT(zio == NULL || zio->io_error == 0);
1399 		arc_release(buf, db);
1400 		memset(buf->b_data, 0, db->db.db_size);
1401 		arc_buf_freeze(buf);
1402 		db->db_freed_in_flight = FALSE;
1403 		dbuf_set_data(db, buf);
1404 		db->db_state = DB_CACHED;
1405 		DTRACE_SET_STATE(db, "freed in flight");
1406 	} else {
1407 		/* success */
1408 		ASSERT(zio == NULL || zio->io_error == 0);
1409 		dbuf_set_data(db, buf);
1410 		db->db_state = DB_CACHED;
1411 		DTRACE_SET_STATE(db, "successful read");
1412 	}
1413 	cv_broadcast(&db->db_changed);
1414 	dbuf_rele_and_unlock(db, NULL, B_FALSE);
1415 }
1416 
1417 /*
1418  * Shortcut for performing reads on bonus dbufs.  Returns
1419  * an error if we fail to verify the dnode associated with
1420  * a decrypted block. Otherwise success.
1421  */
1422 static int
dbuf_read_bonus(dmu_buf_impl_t * db,dnode_t * dn)1423 dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn)
1424 {
1425 	int bonuslen, max_bonuslen;
1426 
1427 	bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
1428 	max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1429 	ASSERT(MUTEX_HELD(&db->db_mtx));
1430 	ASSERT(DB_DNODE_HELD(db));
1431 	ASSERT3U(bonuslen, <=, db->db.db_size);
1432 	db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
1433 	arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
1434 	if (bonuslen < max_bonuslen)
1435 		memset(db->db.db_data, 0, max_bonuslen);
1436 	if (bonuslen)
1437 		memcpy(db->db.db_data, DN_BONUS(dn->dn_phys), bonuslen);
1438 	db->db_state = DB_CACHED;
1439 	DTRACE_SET_STATE(db, "bonus buffer filled");
1440 	return (0);
1441 }
1442 
1443 static void
dbuf_handle_indirect_hole(dmu_buf_impl_t * db,dnode_t * dn,blkptr_t * dbbp)1444 dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *dbbp)
1445 {
1446 	blkptr_t *bps = db->db.db_data;
1447 	uint32_t indbs = 1ULL << dn->dn_indblkshift;
1448 	int n_bps = indbs >> SPA_BLKPTRSHIFT;
1449 
1450 	for (int i = 0; i < n_bps; i++) {
1451 		blkptr_t *bp = &bps[i];
1452 
1453 		ASSERT3U(BP_GET_LSIZE(dbbp), ==, indbs);
1454 		BP_SET_LSIZE(bp, BP_GET_LEVEL(dbbp) == 1 ?
1455 		    dn->dn_datablksz : BP_GET_LSIZE(dbbp));
1456 		BP_SET_TYPE(bp, BP_GET_TYPE(dbbp));
1457 		BP_SET_LEVEL(bp, BP_GET_LEVEL(dbbp) - 1);
1458 		BP_SET_BIRTH(bp, BP_GET_LOGICAL_BIRTH(dbbp), 0);
1459 	}
1460 }
1461 
1462 /*
1463  * Handle reads on dbufs that are holes, if necessary.  This function
1464  * requires that the dbuf's mutex is held. Returns success (0) if action
1465  * was taken, ENOENT if no action was taken.
1466  */
1467 static int
dbuf_read_hole(dmu_buf_impl_t * db,dnode_t * dn,blkptr_t * bp)1468 dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *bp)
1469 {
1470 	ASSERT(MUTEX_HELD(&db->db_mtx));
1471 
1472 	int is_hole = bp == NULL || BP_IS_HOLE(bp);
1473 	/*
1474 	 * For level 0 blocks only, if the above check fails:
1475 	 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
1476 	 * processes the delete record and clears the bp while we are waiting
1477 	 * for the dn_mtx (resulting in a "no" from block_freed).
1478 	 */
1479 	if (!is_hole && db->db_level == 0)
1480 		is_hole = dnode_block_freed(dn, db->db_blkid) || BP_IS_HOLE(bp);
1481 
1482 	if (is_hole) {
1483 		dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1484 		memset(db->db.db_data, 0, db->db.db_size);
1485 
1486 		if (bp != NULL && db->db_level > 0 && BP_IS_HOLE(bp) &&
1487 		    BP_GET_LOGICAL_BIRTH(bp) != 0) {
1488 			dbuf_handle_indirect_hole(db, dn, bp);
1489 		}
1490 		db->db_state = DB_CACHED;
1491 		DTRACE_SET_STATE(db, "hole read satisfied");
1492 		return (0);
1493 	}
1494 	return (ENOENT);
1495 }
1496 
1497 /*
1498  * This function ensures that, when doing a decrypting read of a block,
1499  * we make sure we have decrypted the dnode associated with it. We must do
1500  * this so that we ensure we are fully authenticating the checksum-of-MACs
1501  * tree from the root of the objset down to this block. Indirect blocks are
1502  * always verified against their secure checksum-of-MACs assuming that the
1503  * dnode containing them is correct. Now that we are doing a decrypting read,
1504  * we can be sure that the key is loaded and verify that assumption. This is
1505  * especially important considering that we always read encrypted dnode
1506  * blocks as raw data (without verifying their MACs) to start, and
1507  * decrypt / authenticate them when we need to read an encrypted bonus buffer.
1508  */
1509 static int
dbuf_read_verify_dnode_crypt(dmu_buf_impl_t * db,dnode_t * dn,uint32_t flags)1510 dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
1511 {
1512 	objset_t *os = db->db_objset;
1513 	dmu_buf_impl_t *dndb;
1514 	arc_buf_t *dnbuf;
1515 	zbookmark_phys_t zb;
1516 	int err;
1517 
1518 	if ((flags & DB_RF_NO_DECRYPT) != 0 ||
1519 	    !os->os_encrypted || os->os_raw_receive ||
1520 	    (dndb = dn->dn_dbuf) == NULL)
1521 		return (0);
1522 
1523 	dnbuf = dndb->db_buf;
1524 	if (!arc_is_encrypted(dnbuf))
1525 		return (0);
1526 
1527 	mutex_enter(&dndb->db_mtx);
1528 
1529 	/*
1530 	 * Since dnode buffer is modified by sync process, there can be only
1531 	 * one copy of it.  It means we can not modify (decrypt) it while it
1532 	 * is being written.  I don't see how this may happen now, since
1533 	 * encrypted dnode writes by receive should be completed before any
1534 	 * plain-text reads due to txg wait, but better be safe than sorry.
1535 	 */
1536 	while (1) {
1537 		if (!arc_is_encrypted(dnbuf)) {
1538 			mutex_exit(&dndb->db_mtx);
1539 			return (0);
1540 		}
1541 		dbuf_dirty_record_t *dr = dndb->db_data_pending;
1542 		if (dr == NULL || dr->dt.dl.dr_data != dnbuf)
1543 			break;
1544 		cv_wait(&dndb->db_changed, &dndb->db_mtx);
1545 	};
1546 
1547 	SET_BOOKMARK(&zb, dmu_objset_id(os),
1548 	    DMU_META_DNODE_OBJECT, 0, dndb->db_blkid);
1549 	err = arc_untransform(dnbuf, os->os_spa, &zb, B_TRUE);
1550 
1551 	/*
1552 	 * An error code of EACCES tells us that the key is still not
1553 	 * available. This is ok if we are only reading authenticated
1554 	 * (and therefore non-encrypted) blocks.
1555 	 */
1556 	if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
1557 	    !DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
1558 	    (db->db_blkid == DMU_BONUS_BLKID &&
1559 	    !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
1560 		err = 0;
1561 
1562 	mutex_exit(&dndb->db_mtx);
1563 
1564 	return (err);
1565 }
1566 
1567 /*
1568  * Drops db_mtx and the parent lock specified by dblt and tag before
1569  * returning.
1570  */
1571 static int
dbuf_read_impl(dmu_buf_impl_t * db,dnode_t * dn,zio_t * zio,uint32_t flags,db_lock_type_t dblt,const void * tag)1572 dbuf_read_impl(dmu_buf_impl_t *db, dnode_t *dn, zio_t *zio, uint32_t flags,
1573     db_lock_type_t dblt, const void *tag)
1574 {
1575 	zbookmark_phys_t zb;
1576 	uint32_t aflags = ARC_FLAG_NOWAIT;
1577 	int err, zio_flags;
1578 	blkptr_t bp, *bpp = NULL;
1579 
1580 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1581 	ASSERT(MUTEX_HELD(&db->db_mtx));
1582 	ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1583 	ASSERT(db->db_buf == NULL);
1584 	ASSERT(db->db_parent == NULL ||
1585 	    RW_LOCK_HELD(&db->db_parent->db_rwlock));
1586 
1587 	if (db->db_blkid == DMU_BONUS_BLKID) {
1588 		err = dbuf_read_bonus(db, dn);
1589 		goto early_unlock;
1590 	}
1591 
1592 	/*
1593 	 * If we have a pending block clone, we don't want to read the
1594 	 * underlying block, but the content of the block being cloned,
1595 	 * pointed by the dirty record, so we have the most recent data.
1596 	 * If there is no dirty record, then we hit a race in a sync
1597 	 * process when the dirty record is already removed, while the
1598 	 * dbuf is not yet destroyed. Such case is equivalent to uncached.
1599 	 */
1600 	if (db->db_state == DB_NOFILL) {
1601 		dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
1602 		if (dr != NULL) {
1603 			if (!dr->dt.dl.dr_brtwrite) {
1604 				err = EIO;
1605 				goto early_unlock;
1606 			}
1607 			bp = dr->dt.dl.dr_overridden_by;
1608 			bpp = &bp;
1609 		}
1610 	}
1611 
1612 	if (bpp == NULL && db->db_blkptr != NULL) {
1613 		bp = *db->db_blkptr;
1614 		bpp = &bp;
1615 	}
1616 
1617 	err = dbuf_read_hole(db, dn, bpp);
1618 	if (err == 0)
1619 		goto early_unlock;
1620 
1621 	ASSERT(bpp != NULL);
1622 
1623 	/*
1624 	 * Any attempt to read a redacted block should result in an error. This
1625 	 * will never happen under normal conditions, but can be useful for
1626 	 * debugging purposes.
1627 	 */
1628 	if (BP_IS_REDACTED(bpp)) {
1629 		ASSERT(dsl_dataset_feature_is_active(
1630 		    db->db_objset->os_dsl_dataset,
1631 		    SPA_FEATURE_REDACTED_DATASETS));
1632 		err = SET_ERROR(EIO);
1633 		goto early_unlock;
1634 	}
1635 
1636 	SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1637 	    db->db.db_object, db->db_level, db->db_blkid);
1638 
1639 	/*
1640 	 * All bps of an encrypted os should have the encryption bit set.
1641 	 * If this is not true it indicates tampering and we report an error.
1642 	 */
1643 	if (db->db_objset->os_encrypted && !BP_USES_CRYPT(bpp)) {
1644 		spa_log_error(db->db_objset->os_spa, &zb,
1645 		    BP_GET_LOGICAL_BIRTH(bpp));
1646 		err = SET_ERROR(EIO);
1647 		goto early_unlock;
1648 	}
1649 
1650 	db->db_state = DB_READ;
1651 	DTRACE_SET_STATE(db, "read issued");
1652 	mutex_exit(&db->db_mtx);
1653 
1654 	if (!DBUF_IS_CACHEABLE(db))
1655 		aflags |= ARC_FLAG_UNCACHED;
1656 	else if (dbuf_is_l2cacheable(db))
1657 		aflags |= ARC_FLAG_L2CACHE;
1658 
1659 	dbuf_add_ref(db, NULL);
1660 
1661 	zio_flags = (flags & DB_RF_CANFAIL) ?
1662 	    ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
1663 
1664 	if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr))
1665 		zio_flags |= ZIO_FLAG_RAW;
1666 	/*
1667 	 * The zio layer will copy the provided blkptr later, but we have our
1668 	 * own copy so that we can release the parent's rwlock. We have to
1669 	 * do that so that if dbuf_read_done is called synchronously (on
1670 	 * an l1 cache hit) we don't acquire the db_mtx while holding the
1671 	 * parent's rwlock, which would be a lock ordering violation.
1672 	 */
1673 	dmu_buf_unlock_parent(db, dblt, tag);
1674 	return (arc_read(zio, db->db_objset->os_spa, bpp,
1675 	    dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
1676 	    &aflags, &zb));
1677 
1678 early_unlock:
1679 	mutex_exit(&db->db_mtx);
1680 	dmu_buf_unlock_parent(db, dblt, tag);
1681 	return (err);
1682 }
1683 
1684 /*
1685  * This is our just-in-time copy function.  It makes a copy of buffers that
1686  * have been modified in a previous transaction group before we access them in
1687  * the current active group.
1688  *
1689  * This function is used in three places: when we are dirtying a buffer for the
1690  * first time in a txg, when we are freeing a range in a dnode that includes
1691  * this buffer, and when we are accessing a buffer which was received compressed
1692  * and later referenced in a WRITE_BYREF record.
1693  *
1694  * Note that when we are called from dbuf_free_range() we do not put a hold on
1695  * the buffer, we just traverse the active dbuf list for the dnode.
1696  */
1697 static void
dbuf_fix_old_data(dmu_buf_impl_t * db,uint64_t txg)1698 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
1699 {
1700 	dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
1701 
1702 	ASSERT(MUTEX_HELD(&db->db_mtx));
1703 	ASSERT(db->db.db_data != NULL);
1704 	ASSERT(db->db_level == 0);
1705 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
1706 
1707 	if (dr == NULL ||
1708 	    (dr->dt.dl.dr_data !=
1709 	    ((db->db_blkid  == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
1710 		return;
1711 
1712 	/*
1713 	 * If the last dirty record for this dbuf has not yet synced
1714 	 * and its referencing the dbuf data, either:
1715 	 *	reset the reference to point to a new copy,
1716 	 * or (if there a no active holders)
1717 	 *	just null out the current db_data pointer.
1718 	 */
1719 	ASSERT3U(dr->dr_txg, >=, txg - 2);
1720 	if (db->db_blkid == DMU_BONUS_BLKID) {
1721 		dnode_t *dn = DB_DNODE(db);
1722 		int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1723 		dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
1724 		arc_space_consume(bonuslen, ARC_SPACE_BONUS);
1725 		memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen);
1726 	} else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
1727 		dnode_t *dn = DB_DNODE(db);
1728 		int size = arc_buf_size(db->db_buf);
1729 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1730 		spa_t *spa = db->db_objset->os_spa;
1731 		enum zio_compress compress_type =
1732 		    arc_get_compression(db->db_buf);
1733 		uint8_t complevel = arc_get_complevel(db->db_buf);
1734 
1735 		if (arc_is_encrypted(db->db_buf)) {
1736 			boolean_t byteorder;
1737 			uint8_t salt[ZIO_DATA_SALT_LEN];
1738 			uint8_t iv[ZIO_DATA_IV_LEN];
1739 			uint8_t mac[ZIO_DATA_MAC_LEN];
1740 
1741 			arc_get_raw_params(db->db_buf, &byteorder, salt,
1742 			    iv, mac);
1743 			dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
1744 			    dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
1745 			    mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
1746 			    compress_type, complevel);
1747 		} else if (compress_type != ZIO_COMPRESS_OFF) {
1748 			ASSERT3U(type, ==, ARC_BUFC_DATA);
1749 			dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
1750 			    size, arc_buf_lsize(db->db_buf), compress_type,
1751 			    complevel);
1752 		} else {
1753 			dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
1754 		}
1755 		memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size);
1756 	} else {
1757 		db->db_buf = NULL;
1758 		dbuf_clear_data(db);
1759 	}
1760 }
1761 
1762 int
dbuf_read(dmu_buf_impl_t * db,zio_t * pio,uint32_t flags)1763 dbuf_read(dmu_buf_impl_t *db, zio_t *pio, uint32_t flags)
1764 {
1765 	dnode_t *dn;
1766 	boolean_t miss = B_TRUE, need_wait = B_FALSE, prefetch;
1767 	int err;
1768 
1769 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1770 
1771 	DB_DNODE_ENTER(db);
1772 	dn = DB_DNODE(db);
1773 
1774 	/*
1775 	 * Ensure that this block's dnode has been decrypted if the caller
1776 	 * has requested decrypted data.
1777 	 */
1778 	err = dbuf_read_verify_dnode_crypt(db, dn, flags);
1779 	if (err != 0)
1780 		goto done;
1781 
1782 	prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1783 	    (flags & DB_RF_NOPREFETCH) == 0;
1784 
1785 	mutex_enter(&db->db_mtx);
1786 	if (flags & DB_RF_PARTIAL_FIRST)
1787 		db->db_partial_read = B_TRUE;
1788 	else if (!(flags & DB_RF_PARTIAL_MORE))
1789 		db->db_partial_read = B_FALSE;
1790 	miss = (db->db_state != DB_CACHED);
1791 
1792 	if (db->db_state == DB_READ || db->db_state == DB_FILL) {
1793 		/*
1794 		 * Another reader came in while the dbuf was in flight between
1795 		 * UNCACHED and CACHED.  Either a writer will finish filling
1796 		 * the buffer, sending the dbuf to CACHED, or the first reader's
1797 		 * request will reach the read_done callback and send the dbuf
1798 		 * to CACHED.  Otherwise, a failure occurred and the dbuf will
1799 		 * be sent to UNCACHED.
1800 		 */
1801 		if (flags & DB_RF_NEVERWAIT) {
1802 			mutex_exit(&db->db_mtx);
1803 			DB_DNODE_EXIT(db);
1804 			goto done;
1805 		}
1806 		do {
1807 			ASSERT(db->db_state == DB_READ ||
1808 			    (flags & DB_RF_HAVESTRUCT) == 0);
1809 			DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, db,
1810 			    zio_t *, pio);
1811 			cv_wait(&db->db_changed, &db->db_mtx);
1812 		} while (db->db_state == DB_READ || db->db_state == DB_FILL);
1813 		if (db->db_state == DB_UNCACHED) {
1814 			err = SET_ERROR(EIO);
1815 			mutex_exit(&db->db_mtx);
1816 			DB_DNODE_EXIT(db);
1817 			goto done;
1818 		}
1819 	}
1820 
1821 	if (db->db_state == DB_CACHED) {
1822 		/*
1823 		 * If the arc buf is compressed or encrypted and the caller
1824 		 * requested uncompressed data, we need to untransform it
1825 		 * before returning. We also call arc_untransform() on any
1826 		 * unauthenticated blocks, which will verify their MAC if
1827 		 * the key is now available.
1828 		 */
1829 		if ((flags & DB_RF_NO_DECRYPT) == 0 && db->db_buf != NULL &&
1830 		    (arc_is_encrypted(db->db_buf) ||
1831 		    arc_is_unauthenticated(db->db_buf) ||
1832 		    arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
1833 			spa_t *spa = dn->dn_objset->os_spa;
1834 			zbookmark_phys_t zb;
1835 
1836 			SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1837 			    db->db.db_object, db->db_level, db->db_blkid);
1838 			dbuf_fix_old_data(db, spa_syncing_txg(spa));
1839 			err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
1840 			dbuf_set_data(db, db->db_buf);
1841 		}
1842 		mutex_exit(&db->db_mtx);
1843 	} else {
1844 		ASSERT(db->db_state == DB_UNCACHED ||
1845 		    db->db_state == DB_NOFILL);
1846 		db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
1847 		if (pio == NULL && (db->db_state == DB_NOFILL ||
1848 		    (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)))) {
1849 			spa_t *spa = dn->dn_objset->os_spa;
1850 			pio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
1851 			need_wait = B_TRUE;
1852 		}
1853 		err = dbuf_read_impl(db, dn, pio, flags, dblt, FTAG);
1854 		/* dbuf_read_impl drops db_mtx and parent's rwlock. */
1855 		miss = (db->db_state != DB_CACHED);
1856 	}
1857 
1858 	if (err == 0 && prefetch) {
1859 		dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, miss,
1860 		    flags & DB_RF_HAVESTRUCT);
1861 	}
1862 	DB_DNODE_EXIT(db);
1863 
1864 	/*
1865 	 * If we created a zio we must execute it to avoid leaking it, even if
1866 	 * it isn't attached to any work due to an error in dbuf_read_impl().
1867 	 */
1868 	if (need_wait) {
1869 		if (err == 0)
1870 			err = zio_wait(pio);
1871 		else
1872 			(void) zio_wait(pio);
1873 		pio = NULL;
1874 	}
1875 
1876 done:
1877 	if (miss)
1878 		DBUF_STAT_BUMP(hash_misses);
1879 	else
1880 		DBUF_STAT_BUMP(hash_hits);
1881 	if (pio && err != 0) {
1882 		zio_t *zio = zio_null(pio, pio->io_spa, NULL, NULL, NULL,
1883 		    ZIO_FLAG_CANFAIL);
1884 		zio->io_error = err;
1885 		zio_nowait(zio);
1886 	}
1887 
1888 	return (err);
1889 }
1890 
1891 static void
dbuf_noread(dmu_buf_impl_t * db)1892 dbuf_noread(dmu_buf_impl_t *db)
1893 {
1894 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1895 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1896 	mutex_enter(&db->db_mtx);
1897 	while (db->db_state == DB_READ || db->db_state == DB_FILL)
1898 		cv_wait(&db->db_changed, &db->db_mtx);
1899 	if (db->db_state == DB_UNCACHED) {
1900 		ASSERT(db->db_buf == NULL);
1901 		ASSERT(db->db.db_data == NULL);
1902 		dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1903 		db->db_state = DB_FILL;
1904 		DTRACE_SET_STATE(db, "assigning filled buffer");
1905 	} else if (db->db_state == DB_NOFILL) {
1906 		dbuf_clear_data(db);
1907 	} else {
1908 		ASSERT3U(db->db_state, ==, DB_CACHED);
1909 	}
1910 	mutex_exit(&db->db_mtx);
1911 }
1912 
1913 void
dbuf_unoverride(dbuf_dirty_record_t * dr)1914 dbuf_unoverride(dbuf_dirty_record_t *dr)
1915 {
1916 	dmu_buf_impl_t *db = dr->dr_dbuf;
1917 	blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
1918 	uint64_t txg = dr->dr_txg;
1919 
1920 	ASSERT(MUTEX_HELD(&db->db_mtx));
1921 	/*
1922 	 * This assert is valid because dmu_sync() expects to be called by
1923 	 * a zilog's get_data while holding a range lock.  This call only
1924 	 * comes from dbuf_dirty() callers who must also hold a range lock.
1925 	 */
1926 	ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
1927 	ASSERT(db->db_level == 0);
1928 
1929 	if (db->db_blkid == DMU_BONUS_BLKID ||
1930 	    dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
1931 		return;
1932 
1933 	ASSERT(db->db_data_pending != dr);
1934 
1935 	/* free this block */
1936 	if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
1937 		zio_free(db->db_objset->os_spa, txg, bp);
1938 
1939 	if (dr->dt.dl.dr_brtwrite) {
1940 		ASSERT0P(dr->dt.dl.dr_data);
1941 		dr->dt.dl.dr_data = db->db_buf;
1942 	}
1943 	dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1944 	dr->dt.dl.dr_nopwrite = B_FALSE;
1945 	dr->dt.dl.dr_brtwrite = B_FALSE;
1946 	dr->dt.dl.dr_has_raw_params = B_FALSE;
1947 
1948 	/*
1949 	 * Release the already-written buffer, so we leave it in
1950 	 * a consistent dirty state.  Note that all callers are
1951 	 * modifying the buffer, so they will immediately do
1952 	 * another (redundant) arc_release().  Therefore, leave
1953 	 * the buf thawed to save the effort of freezing &
1954 	 * immediately re-thawing it.
1955 	 */
1956 	if (dr->dt.dl.dr_data)
1957 		arc_release(dr->dt.dl.dr_data, db);
1958 }
1959 
1960 /*
1961  * Evict (if its unreferenced) or clear (if its referenced) any level-0
1962  * data blocks in the free range, so that any future readers will find
1963  * empty blocks.
1964  */
1965 void
dbuf_free_range(dnode_t * dn,uint64_t start_blkid,uint64_t end_blkid,dmu_tx_t * tx)1966 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
1967     dmu_tx_t *tx)
1968 {
1969 	dmu_buf_impl_t *db_search;
1970 	dmu_buf_impl_t *db, *db_next;
1971 	uint64_t txg = tx->tx_txg;
1972 	avl_index_t where;
1973 	dbuf_dirty_record_t *dr;
1974 
1975 	if (end_blkid > dn->dn_maxblkid &&
1976 	    !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
1977 		end_blkid = dn->dn_maxblkid;
1978 	dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid,
1979 	    (u_longlong_t)end_blkid);
1980 
1981 	db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
1982 	db_search->db_level = 0;
1983 	db_search->db_blkid = start_blkid;
1984 	db_search->db_state = DB_SEARCH;
1985 
1986 	mutex_enter(&dn->dn_dbufs_mtx);
1987 	db = avl_find(&dn->dn_dbufs, db_search, &where);
1988 	ASSERT3P(db, ==, NULL);
1989 
1990 	db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
1991 
1992 	for (; db != NULL; db = db_next) {
1993 		db_next = AVL_NEXT(&dn->dn_dbufs, db);
1994 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1995 
1996 		if (db->db_level != 0 || db->db_blkid > end_blkid) {
1997 			break;
1998 		}
1999 		ASSERT3U(db->db_blkid, >=, start_blkid);
2000 
2001 		/* found a level 0 buffer in the range */
2002 		mutex_enter(&db->db_mtx);
2003 		if (dbuf_undirty(db, tx)) {
2004 			/* mutex has been dropped and dbuf destroyed */
2005 			continue;
2006 		}
2007 
2008 		if (db->db_state == DB_UNCACHED ||
2009 		    db->db_state == DB_NOFILL ||
2010 		    db->db_state == DB_EVICTING) {
2011 			ASSERT(db->db.db_data == NULL);
2012 			mutex_exit(&db->db_mtx);
2013 			continue;
2014 		}
2015 		if (db->db_state == DB_READ || db->db_state == DB_FILL) {
2016 			/* will be handled in dbuf_read_done or dbuf_rele */
2017 			db->db_freed_in_flight = TRUE;
2018 			mutex_exit(&db->db_mtx);
2019 			continue;
2020 		}
2021 		if (zfs_refcount_count(&db->db_holds) == 0) {
2022 			ASSERT(db->db_buf);
2023 			dbuf_destroy(db);
2024 			continue;
2025 		}
2026 		/* The dbuf is referenced */
2027 
2028 		dr = list_head(&db->db_dirty_records);
2029 		if (dr != NULL) {
2030 			if (dr->dr_txg == txg) {
2031 				/*
2032 				 * This buffer is "in-use", re-adjust the file
2033 				 * size to reflect that this buffer may
2034 				 * contain new data when we sync.
2035 				 */
2036 				if (db->db_blkid != DMU_SPILL_BLKID &&
2037 				    db->db_blkid > dn->dn_maxblkid)
2038 					dn->dn_maxblkid = db->db_blkid;
2039 				dbuf_unoverride(dr);
2040 			} else {
2041 				/*
2042 				 * This dbuf is not dirty in the open context.
2043 				 * Either uncache it (if its not referenced in
2044 				 * the open context) or reset its contents to
2045 				 * empty.
2046 				 */
2047 				dbuf_fix_old_data(db, txg);
2048 			}
2049 		}
2050 		/* clear the contents if its cached */
2051 		if (db->db_state == DB_CACHED) {
2052 			ASSERT(db->db.db_data != NULL);
2053 			arc_release(db->db_buf, db);
2054 			rw_enter(&db->db_rwlock, RW_WRITER);
2055 			memset(db->db.db_data, 0, db->db.db_size);
2056 			rw_exit(&db->db_rwlock);
2057 			arc_buf_freeze(db->db_buf);
2058 		}
2059 
2060 		mutex_exit(&db->db_mtx);
2061 	}
2062 
2063 	mutex_exit(&dn->dn_dbufs_mtx);
2064 	kmem_free(db_search, sizeof (dmu_buf_impl_t));
2065 }
2066 
2067 void
dbuf_new_size(dmu_buf_impl_t * db,int size,dmu_tx_t * tx)2068 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
2069 {
2070 	arc_buf_t *buf, *old_buf;
2071 	dbuf_dirty_record_t *dr;
2072 	int osize = db->db.db_size;
2073 	arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2074 	dnode_t *dn;
2075 
2076 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2077 
2078 	DB_DNODE_ENTER(db);
2079 	dn = DB_DNODE(db);
2080 
2081 	/*
2082 	 * XXX we should be doing a dbuf_read, checking the return
2083 	 * value and returning that up to our callers
2084 	 */
2085 	dmu_buf_will_dirty(&db->db, tx);
2086 
2087 	/* create the data buffer for the new block */
2088 	buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
2089 
2090 	/* copy old block data to the new block */
2091 	old_buf = db->db_buf;
2092 	memcpy(buf->b_data, old_buf->b_data, MIN(osize, size));
2093 	/* zero the remainder */
2094 	if (size > osize)
2095 		memset((uint8_t *)buf->b_data + osize, 0, size - osize);
2096 
2097 	mutex_enter(&db->db_mtx);
2098 	dbuf_set_data(db, buf);
2099 	arc_buf_destroy(old_buf, db);
2100 	db->db.db_size = size;
2101 
2102 	dr = list_head(&db->db_dirty_records);
2103 	/* dirty record added by dmu_buf_will_dirty() */
2104 	VERIFY(dr != NULL);
2105 	if (db->db_level == 0)
2106 		dr->dt.dl.dr_data = buf;
2107 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2108 	ASSERT3U(dr->dr_accounted, ==, osize);
2109 	dr->dr_accounted = size;
2110 	mutex_exit(&db->db_mtx);
2111 
2112 	dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
2113 	DB_DNODE_EXIT(db);
2114 }
2115 
2116 void
dbuf_release_bp(dmu_buf_impl_t * db)2117 dbuf_release_bp(dmu_buf_impl_t *db)
2118 {
2119 	objset_t *os __maybe_unused = db->db_objset;
2120 
2121 	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
2122 	ASSERT(arc_released(os->os_phys_buf) ||
2123 	    list_link_active(&os->os_dsl_dataset->ds_synced_link));
2124 	ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
2125 
2126 	(void) arc_release(db->db_buf, db);
2127 }
2128 
2129 /*
2130  * We already have a dirty record for this TXG, and we are being
2131  * dirtied again.
2132  */
2133 static void
dbuf_redirty(dbuf_dirty_record_t * dr)2134 dbuf_redirty(dbuf_dirty_record_t *dr)
2135 {
2136 	dmu_buf_impl_t *db = dr->dr_dbuf;
2137 
2138 	ASSERT(MUTEX_HELD(&db->db_mtx));
2139 
2140 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
2141 		/*
2142 		 * If this buffer has already been written out,
2143 		 * we now need to reset its state.
2144 		 */
2145 		dbuf_unoverride(dr);
2146 		if (db->db.db_object != DMU_META_DNODE_OBJECT &&
2147 		    db->db_state != DB_NOFILL) {
2148 			/* Already released on initial dirty, so just thaw. */
2149 			ASSERT(arc_released(db->db_buf));
2150 			arc_buf_thaw(db->db_buf);
2151 		}
2152 	}
2153 }
2154 
2155 dbuf_dirty_record_t *
dbuf_dirty_lightweight(dnode_t * dn,uint64_t blkid,dmu_tx_t * tx)2156 dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx)
2157 {
2158 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
2159 	IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid);
2160 	dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE);
2161 	ASSERT(dn->dn_maxblkid >= blkid);
2162 
2163 	dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP);
2164 	list_link_init(&dr->dr_dirty_node);
2165 	list_link_init(&dr->dr_dbuf_node);
2166 	dr->dr_dnode = dn;
2167 	dr->dr_txg = tx->tx_txg;
2168 	dr->dt.dll.dr_blkid = blkid;
2169 	dr->dr_accounted = dn->dn_datablksz;
2170 
2171 	/*
2172 	 * There should not be any dbuf for the block that we're dirtying.
2173 	 * Otherwise the buffer contents could be inconsistent between the
2174 	 * dbuf and the lightweight dirty record.
2175 	 */
2176 	ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid,
2177 	    NULL));
2178 
2179 	mutex_enter(&dn->dn_mtx);
2180 	int txgoff = tx->tx_txg & TXG_MASK;
2181 	if (dn->dn_free_ranges[txgoff] != NULL) {
2182 		range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1);
2183 	}
2184 
2185 	if (dn->dn_nlevels == 1) {
2186 		ASSERT3U(blkid, <, dn->dn_nblkptr);
2187 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2188 		mutex_exit(&dn->dn_mtx);
2189 		rw_exit(&dn->dn_struct_rwlock);
2190 		dnode_setdirty(dn, tx);
2191 	} else {
2192 		mutex_exit(&dn->dn_mtx);
2193 
2194 		int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2195 		dmu_buf_impl_t *parent_db = dbuf_hold_level(dn,
2196 		    1, blkid >> epbs, FTAG);
2197 		rw_exit(&dn->dn_struct_rwlock);
2198 		if (parent_db == NULL) {
2199 			kmem_free(dr, sizeof (*dr));
2200 			return (NULL);
2201 		}
2202 		int err = dbuf_read(parent_db, NULL,
2203 		    (DB_RF_NOPREFETCH | DB_RF_CANFAIL));
2204 		if (err != 0) {
2205 			dbuf_rele(parent_db, FTAG);
2206 			kmem_free(dr, sizeof (*dr));
2207 			return (NULL);
2208 		}
2209 
2210 		dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx);
2211 		dbuf_rele(parent_db, FTAG);
2212 		mutex_enter(&parent_dr->dt.di.dr_mtx);
2213 		ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg);
2214 		list_insert_tail(&parent_dr->dt.di.dr_children, dr);
2215 		mutex_exit(&parent_dr->dt.di.dr_mtx);
2216 		dr->dr_parent = parent_dr;
2217 	}
2218 
2219 	dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx);
2220 
2221 	return (dr);
2222 }
2223 
2224 dbuf_dirty_record_t *
dbuf_dirty(dmu_buf_impl_t * db,dmu_tx_t * tx)2225 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2226 {
2227 	dnode_t *dn;
2228 	objset_t *os;
2229 	dbuf_dirty_record_t *dr, *dr_next, *dr_head;
2230 	int txgoff = tx->tx_txg & TXG_MASK;
2231 	boolean_t drop_struct_rwlock = B_FALSE;
2232 
2233 	ASSERT(tx->tx_txg != 0);
2234 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2235 	DMU_TX_DIRTY_BUF(tx, db);
2236 
2237 	DB_DNODE_ENTER(db);
2238 	dn = DB_DNODE(db);
2239 	/*
2240 	 * Shouldn't dirty a regular buffer in syncing context.  Private
2241 	 * objects may be dirtied in syncing context, but only if they
2242 	 * were already pre-dirtied in open context.
2243 	 */
2244 #ifdef ZFS_DEBUG
2245 	if (dn->dn_objset->os_dsl_dataset != NULL) {
2246 		rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
2247 		    RW_READER, FTAG);
2248 	}
2249 	ASSERT(!dmu_tx_is_syncing(tx) ||
2250 	    BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
2251 	    DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2252 	    dn->dn_objset->os_dsl_dataset == NULL);
2253 	if (dn->dn_objset->os_dsl_dataset != NULL)
2254 		rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
2255 #endif
2256 	/*
2257 	 * We make this assert for private objects as well, but after we
2258 	 * check if we're already dirty.  They are allowed to re-dirty
2259 	 * in syncing context.
2260 	 */
2261 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
2262 	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2263 	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2264 
2265 	mutex_enter(&db->db_mtx);
2266 	/*
2267 	 * XXX make this true for indirects too?  The problem is that
2268 	 * transactions created with dmu_tx_create_assigned() from
2269 	 * syncing context don't bother holding ahead.
2270 	 */
2271 	ASSERT(db->db_level != 0 ||
2272 	    db->db_state == DB_CACHED || db->db_state == DB_FILL ||
2273 	    db->db_state == DB_NOFILL);
2274 
2275 	mutex_enter(&dn->dn_mtx);
2276 	dnode_set_dirtyctx(dn, tx, db);
2277 	if (tx->tx_txg > dn->dn_dirty_txg)
2278 		dn->dn_dirty_txg = tx->tx_txg;
2279 	mutex_exit(&dn->dn_mtx);
2280 
2281 	if (db->db_blkid == DMU_SPILL_BLKID)
2282 		dn->dn_have_spill = B_TRUE;
2283 
2284 	/*
2285 	 * If this buffer is already dirty, we're done.
2286 	 */
2287 	dr_head = list_head(&db->db_dirty_records);
2288 	ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg ||
2289 	    db->db.db_object == DMU_META_DNODE_OBJECT);
2290 	dr_next = dbuf_find_dirty_lte(db, tx->tx_txg);
2291 	if (dr_next && dr_next->dr_txg == tx->tx_txg) {
2292 		DB_DNODE_EXIT(db);
2293 
2294 		dbuf_redirty(dr_next);
2295 		mutex_exit(&db->db_mtx);
2296 		return (dr_next);
2297 	}
2298 
2299 	/*
2300 	 * Only valid if not already dirty.
2301 	 */
2302 	ASSERT(dn->dn_object == 0 ||
2303 	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2304 	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2305 
2306 	ASSERT3U(dn->dn_nlevels, >, db->db_level);
2307 
2308 	/*
2309 	 * We should only be dirtying in syncing context if it's the
2310 	 * mos or we're initializing the os or it's a special object.
2311 	 * However, we are allowed to dirty in syncing context provided
2312 	 * we already dirtied it in open context.  Hence we must make
2313 	 * this assertion only if we're not already dirty.
2314 	 */
2315 	os = dn->dn_objset;
2316 	VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
2317 #ifdef ZFS_DEBUG
2318 	if (dn->dn_objset->os_dsl_dataset != NULL)
2319 		rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
2320 	ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2321 	    os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
2322 	if (dn->dn_objset->os_dsl_dataset != NULL)
2323 		rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
2324 #endif
2325 	ASSERT(db->db.db_size != 0);
2326 
2327 	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2328 
2329 	if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
2330 		dmu_objset_willuse_space(os, db->db.db_size, tx);
2331 	}
2332 
2333 	/*
2334 	 * If this buffer is dirty in an old transaction group we need
2335 	 * to make a copy of it so that the changes we make in this
2336 	 * transaction group won't leak out when we sync the older txg.
2337 	 */
2338 	dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
2339 	list_link_init(&dr->dr_dirty_node);
2340 	list_link_init(&dr->dr_dbuf_node);
2341 	dr->dr_dnode = dn;
2342 	if (db->db_level == 0) {
2343 		void *data_old = db->db_buf;
2344 
2345 		if (db->db_state != DB_NOFILL) {
2346 			if (db->db_blkid == DMU_BONUS_BLKID) {
2347 				dbuf_fix_old_data(db, tx->tx_txg);
2348 				data_old = db->db.db_data;
2349 			} else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
2350 				/*
2351 				 * Release the data buffer from the cache so
2352 				 * that we can modify it without impacting
2353 				 * possible other users of this cached data
2354 				 * block.  Note that indirect blocks and
2355 				 * private objects are not released until the
2356 				 * syncing state (since they are only modified
2357 				 * then).
2358 				 */
2359 				arc_release(db->db_buf, db);
2360 				dbuf_fix_old_data(db, tx->tx_txg);
2361 				data_old = db->db_buf;
2362 			}
2363 			ASSERT(data_old != NULL);
2364 		}
2365 		dr->dt.dl.dr_data = data_old;
2366 	} else {
2367 		mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
2368 		list_create(&dr->dt.di.dr_children,
2369 		    sizeof (dbuf_dirty_record_t),
2370 		    offsetof(dbuf_dirty_record_t, dr_dirty_node));
2371 	}
2372 	if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
2373 		dr->dr_accounted = db->db.db_size;
2374 	}
2375 	dr->dr_dbuf = db;
2376 	dr->dr_txg = tx->tx_txg;
2377 	list_insert_before(&db->db_dirty_records, dr_next, dr);
2378 
2379 	/*
2380 	 * We could have been freed_in_flight between the dbuf_noread
2381 	 * and dbuf_dirty.  We win, as though the dbuf_noread() had
2382 	 * happened after the free.
2383 	 */
2384 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
2385 	    db->db_blkid != DMU_SPILL_BLKID) {
2386 		mutex_enter(&dn->dn_mtx);
2387 		if (dn->dn_free_ranges[txgoff] != NULL) {
2388 			range_tree_clear(dn->dn_free_ranges[txgoff],
2389 			    db->db_blkid, 1);
2390 		}
2391 		mutex_exit(&dn->dn_mtx);
2392 		db->db_freed_in_flight = FALSE;
2393 	}
2394 
2395 	/*
2396 	 * This buffer is now part of this txg
2397 	 */
2398 	dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
2399 	db->db_dirtycnt += 1;
2400 	ASSERT3U(db->db_dirtycnt, <=, 3);
2401 
2402 	mutex_exit(&db->db_mtx);
2403 
2404 	if (db->db_blkid == DMU_BONUS_BLKID ||
2405 	    db->db_blkid == DMU_SPILL_BLKID) {
2406 		mutex_enter(&dn->dn_mtx);
2407 		ASSERT(!list_link_active(&dr->dr_dirty_node));
2408 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2409 		mutex_exit(&dn->dn_mtx);
2410 		dnode_setdirty(dn, tx);
2411 		DB_DNODE_EXIT(db);
2412 		return (dr);
2413 	}
2414 
2415 	if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
2416 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
2417 		drop_struct_rwlock = B_TRUE;
2418 	}
2419 
2420 	/*
2421 	 * If we are overwriting a dedup BP, then unless it is snapshotted,
2422 	 * when we get to syncing context we will need to decrement its
2423 	 * refcount in the DDT.  Prefetch the relevant DDT block so that
2424 	 * syncing context won't have to wait for the i/o.
2425 	 */
2426 	if (db->db_blkptr != NULL) {
2427 		db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
2428 		ddt_prefetch(os->os_spa, db->db_blkptr);
2429 		dmu_buf_unlock_parent(db, dblt, FTAG);
2430 	}
2431 
2432 	/*
2433 	 * We need to hold the dn_struct_rwlock to make this assertion,
2434 	 * because it protects dn_phys / dn_next_nlevels from changing.
2435 	 */
2436 	ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
2437 	    dn->dn_phys->dn_nlevels > db->db_level ||
2438 	    dn->dn_next_nlevels[txgoff] > db->db_level ||
2439 	    dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
2440 	    dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
2441 
2442 
2443 	if (db->db_level == 0) {
2444 		ASSERT(!db->db_objset->os_raw_receive ||
2445 		    dn->dn_maxblkid >= db->db_blkid);
2446 		dnode_new_blkid(dn, db->db_blkid, tx,
2447 		    drop_struct_rwlock, B_FALSE);
2448 		ASSERT(dn->dn_maxblkid >= db->db_blkid);
2449 	}
2450 
2451 	if (db->db_level+1 < dn->dn_nlevels) {
2452 		dmu_buf_impl_t *parent = db->db_parent;
2453 		dbuf_dirty_record_t *di;
2454 		int parent_held = FALSE;
2455 
2456 		if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
2457 			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2458 			parent = dbuf_hold_level(dn, db->db_level + 1,
2459 			    db->db_blkid >> epbs, FTAG);
2460 			ASSERT(parent != NULL);
2461 			parent_held = TRUE;
2462 		}
2463 		if (drop_struct_rwlock)
2464 			rw_exit(&dn->dn_struct_rwlock);
2465 		ASSERT3U(db->db_level + 1, ==, parent->db_level);
2466 		di = dbuf_dirty(parent, tx);
2467 		if (parent_held)
2468 			dbuf_rele(parent, FTAG);
2469 
2470 		mutex_enter(&db->db_mtx);
2471 		/*
2472 		 * Since we've dropped the mutex, it's possible that
2473 		 * dbuf_undirty() might have changed this out from under us.
2474 		 */
2475 		if (list_head(&db->db_dirty_records) == dr ||
2476 		    dn->dn_object == DMU_META_DNODE_OBJECT) {
2477 			mutex_enter(&di->dt.di.dr_mtx);
2478 			ASSERT3U(di->dr_txg, ==, tx->tx_txg);
2479 			ASSERT(!list_link_active(&dr->dr_dirty_node));
2480 			list_insert_tail(&di->dt.di.dr_children, dr);
2481 			mutex_exit(&di->dt.di.dr_mtx);
2482 			dr->dr_parent = di;
2483 		}
2484 		mutex_exit(&db->db_mtx);
2485 	} else {
2486 		ASSERT(db->db_level + 1 == dn->dn_nlevels);
2487 		ASSERT(db->db_blkid < dn->dn_nblkptr);
2488 		ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
2489 		mutex_enter(&dn->dn_mtx);
2490 		ASSERT(!list_link_active(&dr->dr_dirty_node));
2491 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2492 		mutex_exit(&dn->dn_mtx);
2493 		if (drop_struct_rwlock)
2494 			rw_exit(&dn->dn_struct_rwlock);
2495 	}
2496 
2497 	dnode_setdirty(dn, tx);
2498 	DB_DNODE_EXIT(db);
2499 	return (dr);
2500 }
2501 
2502 static void
dbuf_undirty_bonus(dbuf_dirty_record_t * dr)2503 dbuf_undirty_bonus(dbuf_dirty_record_t *dr)
2504 {
2505 	dmu_buf_impl_t *db = dr->dr_dbuf;
2506 
2507 	if (dr->dt.dl.dr_data != db->db.db_data) {
2508 		struct dnode *dn = dr->dr_dnode;
2509 		int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
2510 
2511 		kmem_free(dr->dt.dl.dr_data, max_bonuslen);
2512 		arc_space_return(max_bonuslen, ARC_SPACE_BONUS);
2513 	}
2514 	db->db_data_pending = NULL;
2515 	ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
2516 	list_remove(&db->db_dirty_records, dr);
2517 	if (dr->dr_dbuf->db_level != 0) {
2518 		mutex_destroy(&dr->dt.di.dr_mtx);
2519 		list_destroy(&dr->dt.di.dr_children);
2520 	}
2521 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
2522 	ASSERT3U(db->db_dirtycnt, >, 0);
2523 	db->db_dirtycnt -= 1;
2524 }
2525 
2526 /*
2527  * Undirty a buffer in the transaction group referenced by the given
2528  * transaction.  Return whether this evicted the dbuf.
2529  */
2530 boolean_t
dbuf_undirty(dmu_buf_impl_t * db,dmu_tx_t * tx)2531 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2532 {
2533 	uint64_t txg = tx->tx_txg;
2534 	boolean_t brtwrite;
2535 
2536 	ASSERT(txg != 0);
2537 
2538 	/*
2539 	 * Due to our use of dn_nlevels below, this can only be called
2540 	 * in open context, unless we are operating on the MOS.
2541 	 * From syncing context, dn_nlevels may be different from the
2542 	 * dn_nlevels used when dbuf was dirtied.
2543 	 */
2544 	ASSERT(db->db_objset ==
2545 	    dmu_objset_pool(db->db_objset)->dp_meta_objset ||
2546 	    txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
2547 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2548 	ASSERT0(db->db_level);
2549 	ASSERT(MUTEX_HELD(&db->db_mtx));
2550 
2551 	/*
2552 	 * If this buffer is not dirty, we're done.
2553 	 */
2554 	dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg);
2555 	if (dr == NULL)
2556 		return (B_FALSE);
2557 	ASSERT(dr->dr_dbuf == db);
2558 
2559 	brtwrite = dr->dt.dl.dr_brtwrite;
2560 	if (brtwrite) {
2561 		/*
2562 		 * We are freeing a block that we cloned in the same
2563 		 * transaction group.
2564 		 */
2565 		brt_pending_remove(dmu_objset_spa(db->db_objset),
2566 		    &dr->dt.dl.dr_overridden_by, tx);
2567 	}
2568 
2569 	dnode_t *dn = dr->dr_dnode;
2570 
2571 	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2572 
2573 	ASSERT(db->db.db_size != 0);
2574 
2575 	dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
2576 	    dr->dr_accounted, txg);
2577 
2578 	list_remove(&db->db_dirty_records, dr);
2579 
2580 	/*
2581 	 * Note that there are three places in dbuf_dirty()
2582 	 * where this dirty record may be put on a list.
2583 	 * Make sure to do a list_remove corresponding to
2584 	 * every one of those list_insert calls.
2585 	 */
2586 	if (dr->dr_parent) {
2587 		mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
2588 		list_remove(&dr->dr_parent->dt.di.dr_children, dr);
2589 		mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
2590 	} else if (db->db_blkid == DMU_SPILL_BLKID ||
2591 	    db->db_level + 1 == dn->dn_nlevels) {
2592 		ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
2593 		mutex_enter(&dn->dn_mtx);
2594 		list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
2595 		mutex_exit(&dn->dn_mtx);
2596 	}
2597 
2598 	if (db->db_state != DB_NOFILL && !brtwrite) {
2599 		dbuf_unoverride(dr);
2600 
2601 		ASSERT(db->db_buf != NULL);
2602 		ASSERT(dr->dt.dl.dr_data != NULL);
2603 		if (dr->dt.dl.dr_data != db->db_buf)
2604 			arc_buf_destroy(dr->dt.dl.dr_data, db);
2605 	}
2606 
2607 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
2608 
2609 	ASSERT(db->db_dirtycnt > 0);
2610 	db->db_dirtycnt -= 1;
2611 
2612 	if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
2613 		ASSERT(db->db_state == DB_NOFILL || brtwrite ||
2614 		    arc_released(db->db_buf));
2615 		dbuf_destroy(db);
2616 		return (B_TRUE);
2617 	}
2618 
2619 	return (B_FALSE);
2620 }
2621 
2622 static void
dmu_buf_will_dirty_impl(dmu_buf_t * db_fake,int flags,dmu_tx_t * tx)2623 dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
2624 {
2625 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2626 	boolean_t undirty = B_FALSE;
2627 
2628 	ASSERT(tx->tx_txg != 0);
2629 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2630 
2631 	/*
2632 	 * Quick check for dirtiness to improve performance for some workloads
2633 	 * (e.g. file deletion with indirect blocks cached).
2634 	 */
2635 	mutex_enter(&db->db_mtx);
2636 	if (db->db_state == DB_CACHED || db->db_state == DB_NOFILL) {
2637 		/*
2638 		 * It's possible that the dbuf is already dirty but not cached,
2639 		 * because there are some calls to dbuf_dirty() that don't
2640 		 * go through dmu_buf_will_dirty().
2641 		 */
2642 		dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2643 		if (dr != NULL) {
2644 			if (db->db_level == 0 &&
2645 			    dr->dt.dl.dr_brtwrite) {
2646 				/*
2647 				 * Block cloning: If we are dirtying a cloned
2648 				 * level 0 block, we cannot simply redirty it,
2649 				 * because this dr has no associated data.
2650 				 * We will go through a full undirtying below,
2651 				 * before dirtying it again.
2652 				 */
2653 				undirty = B_TRUE;
2654 			} else {
2655 				/* This dbuf is already dirty and cached. */
2656 				dbuf_redirty(dr);
2657 				mutex_exit(&db->db_mtx);
2658 				return;
2659 			}
2660 		}
2661 	}
2662 	mutex_exit(&db->db_mtx);
2663 
2664 	DB_DNODE_ENTER(db);
2665 	if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
2666 		flags |= DB_RF_HAVESTRUCT;
2667 	DB_DNODE_EXIT(db);
2668 
2669 	/*
2670 	 * Block cloning: Do the dbuf_read() before undirtying the dbuf, as we
2671 	 * want to make sure dbuf_read() will read the pending cloned block and
2672 	 * not the uderlying block that is being replaced. dbuf_undirty() will
2673 	 * do dbuf_unoverride(), so we will end up with cloned block content,
2674 	 * without overridden BP.
2675 	 */
2676 	(void) dbuf_read(db, NULL, flags);
2677 	if (undirty) {
2678 		mutex_enter(&db->db_mtx);
2679 		VERIFY(!dbuf_undirty(db, tx));
2680 		mutex_exit(&db->db_mtx);
2681 	}
2682 	(void) dbuf_dirty(db, tx);
2683 }
2684 
2685 void
dmu_buf_will_dirty(dmu_buf_t * db_fake,dmu_tx_t * tx)2686 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2687 {
2688 	dmu_buf_will_dirty_impl(db_fake,
2689 	    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
2690 }
2691 
2692 boolean_t
dmu_buf_is_dirty(dmu_buf_t * db_fake,dmu_tx_t * tx)2693 dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2694 {
2695 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2696 	dbuf_dirty_record_t *dr;
2697 
2698 	mutex_enter(&db->db_mtx);
2699 	dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2700 	mutex_exit(&db->db_mtx);
2701 	return (dr != NULL);
2702 }
2703 
2704 void
dmu_buf_will_clone(dmu_buf_t * db_fake,dmu_tx_t * tx)2705 dmu_buf_will_clone(dmu_buf_t *db_fake, dmu_tx_t *tx)
2706 {
2707 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2708 
2709 	/*
2710 	 * Block cloning: We are going to clone into this block, so undirty
2711 	 * modifications done to this block so far in this txg. This includes
2712 	 * writes and clones into this block.
2713 	 */
2714 	mutex_enter(&db->db_mtx);
2715 	DBUF_VERIFY(db);
2716 	VERIFY(!dbuf_undirty(db, tx));
2717 	ASSERT0P(dbuf_find_dirty_eq(db, tx->tx_txg));
2718 	if (db->db_buf != NULL) {
2719 		arc_buf_destroy(db->db_buf, db);
2720 		db->db_buf = NULL;
2721 		dbuf_clear_data(db);
2722 	}
2723 
2724 	db->db_state = DB_NOFILL;
2725 	DTRACE_SET_STATE(db, "allocating NOFILL buffer for clone");
2726 
2727 	DBUF_VERIFY(db);
2728 	mutex_exit(&db->db_mtx);
2729 
2730 	dbuf_noread(db);
2731 	(void) dbuf_dirty(db, tx);
2732 }
2733 
2734 void
dmu_buf_will_not_fill(dmu_buf_t * db_fake,dmu_tx_t * tx)2735 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2736 {
2737 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2738 
2739 	mutex_enter(&db->db_mtx);
2740 	db->db_state = DB_NOFILL;
2741 	DTRACE_SET_STATE(db, "allocating NOFILL buffer");
2742 	mutex_exit(&db->db_mtx);
2743 
2744 	dbuf_noread(db);
2745 	(void) dbuf_dirty(db, tx);
2746 }
2747 
2748 void
dmu_buf_will_fill(dmu_buf_t * db_fake,dmu_tx_t * tx,boolean_t canfail)2749 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx, boolean_t canfail)
2750 {
2751 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2752 
2753 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2754 	ASSERT(tx->tx_txg != 0);
2755 	ASSERT(db->db_level == 0);
2756 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2757 
2758 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
2759 	    dmu_tx_private_ok(tx));
2760 
2761 	mutex_enter(&db->db_mtx);
2762 	if (db->db_state == DB_NOFILL) {
2763 		/*
2764 		 * Block cloning: We will be completely overwriting a block
2765 		 * cloned in this transaction group, so let's undirty the
2766 		 * pending clone and mark the block as uncached. This will be
2767 		 * as if the clone was never done.  But if the fill can fail
2768 		 * we should have a way to return back to the cloned data.
2769 		 */
2770 		if (canfail && dbuf_find_dirty_eq(db, tx->tx_txg) != NULL) {
2771 			mutex_exit(&db->db_mtx);
2772 			dmu_buf_will_dirty(db_fake, tx);
2773 			return;
2774 		}
2775 		VERIFY(!dbuf_undirty(db, tx));
2776 		db->db_state = DB_UNCACHED;
2777 	}
2778 	mutex_exit(&db->db_mtx);
2779 
2780 	dbuf_noread(db);
2781 	(void) dbuf_dirty(db, tx);
2782 }
2783 
2784 /*
2785  * This function is effectively the same as dmu_buf_will_dirty(), but
2786  * indicates the caller expects raw encrypted data in the db, and provides
2787  * the crypt params (byteorder, salt, iv, mac) which should be stored in the
2788  * blkptr_t when this dbuf is written.  This is only used for blocks of
2789  * dnodes, during raw receive.
2790  */
2791 void
dmu_buf_set_crypt_params(dmu_buf_t * db_fake,boolean_t byteorder,const uint8_t * salt,const uint8_t * iv,const uint8_t * mac,dmu_tx_t * tx)2792 dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
2793     const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
2794 {
2795 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2796 	dbuf_dirty_record_t *dr;
2797 
2798 	/*
2799 	 * dr_has_raw_params is only processed for blocks of dnodes
2800 	 * (see dbuf_sync_dnode_leaf_crypt()).
2801 	 */
2802 	ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
2803 	ASSERT3U(db->db_level, ==, 0);
2804 	ASSERT(db->db_objset->os_raw_receive);
2805 
2806 	dmu_buf_will_dirty_impl(db_fake,
2807 	    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
2808 
2809 	dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2810 
2811 	ASSERT3P(dr, !=, NULL);
2812 
2813 	dr->dt.dl.dr_has_raw_params = B_TRUE;
2814 	dr->dt.dl.dr_byteorder = byteorder;
2815 	memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN);
2816 	memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN);
2817 	memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN);
2818 }
2819 
2820 static void
dbuf_override_impl(dmu_buf_impl_t * db,const blkptr_t * bp,dmu_tx_t * tx)2821 dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
2822 {
2823 	struct dirty_leaf *dl;
2824 	dbuf_dirty_record_t *dr;
2825 
2826 	dr = list_head(&db->db_dirty_records);
2827 	ASSERT3P(dr, !=, NULL);
2828 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2829 	dl = &dr->dt.dl;
2830 	dl->dr_overridden_by = *bp;
2831 	dl->dr_override_state = DR_OVERRIDDEN;
2832 	BP_SET_LOGICAL_BIRTH(&dl->dr_overridden_by, dr->dr_txg);
2833 }
2834 
2835 boolean_t
dmu_buf_fill_done(dmu_buf_t * dbuf,dmu_tx_t * tx,boolean_t failed)2836 dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx, boolean_t failed)
2837 {
2838 	(void) tx;
2839 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2840 	mutex_enter(&db->db_mtx);
2841 	DBUF_VERIFY(db);
2842 
2843 	if (db->db_state == DB_FILL) {
2844 		if (db->db_level == 0 && db->db_freed_in_flight) {
2845 			ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2846 			/* we were freed while filling */
2847 			/* XXX dbuf_undirty? */
2848 			memset(db->db.db_data, 0, db->db.db_size);
2849 			db->db_freed_in_flight = FALSE;
2850 			db->db_state = DB_CACHED;
2851 			DTRACE_SET_STATE(db,
2852 			    "fill done handling freed in flight");
2853 			failed = B_FALSE;
2854 		} else if (failed) {
2855 			VERIFY(!dbuf_undirty(db, tx));
2856 			arc_buf_destroy(db->db_buf, db);
2857 			db->db_buf = NULL;
2858 			dbuf_clear_data(db);
2859 			DTRACE_SET_STATE(db, "fill failed");
2860 		} else {
2861 			db->db_state = DB_CACHED;
2862 			DTRACE_SET_STATE(db, "fill done");
2863 		}
2864 		cv_broadcast(&db->db_changed);
2865 	} else {
2866 		db->db_state = DB_CACHED;
2867 		failed = B_FALSE;
2868 	}
2869 	mutex_exit(&db->db_mtx);
2870 	return (failed);
2871 }
2872 
2873 void
dmu_buf_write_embedded(dmu_buf_t * dbuf,void * data,bp_embedded_type_t etype,enum zio_compress comp,int uncompressed_size,int compressed_size,int byteorder,dmu_tx_t * tx)2874 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
2875     bp_embedded_type_t etype, enum zio_compress comp,
2876     int uncompressed_size, int compressed_size, int byteorder,
2877     dmu_tx_t *tx)
2878 {
2879 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2880 	struct dirty_leaf *dl;
2881 	dmu_object_type_t type;
2882 	dbuf_dirty_record_t *dr;
2883 
2884 	if (etype == BP_EMBEDDED_TYPE_DATA) {
2885 		ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
2886 		    SPA_FEATURE_EMBEDDED_DATA));
2887 	}
2888 
2889 	DB_DNODE_ENTER(db);
2890 	type = DB_DNODE(db)->dn_type;
2891 	DB_DNODE_EXIT(db);
2892 
2893 	ASSERT0(db->db_level);
2894 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2895 
2896 	dmu_buf_will_not_fill(dbuf, tx);
2897 
2898 	dr = list_head(&db->db_dirty_records);
2899 	ASSERT3P(dr, !=, NULL);
2900 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2901 	dl = &dr->dt.dl;
2902 	encode_embedded_bp_compressed(&dl->dr_overridden_by,
2903 	    data, comp, uncompressed_size, compressed_size);
2904 	BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
2905 	BP_SET_TYPE(&dl->dr_overridden_by, type);
2906 	BP_SET_LEVEL(&dl->dr_overridden_by, 0);
2907 	BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
2908 
2909 	dl->dr_override_state = DR_OVERRIDDEN;
2910 	BP_SET_LOGICAL_BIRTH(&dl->dr_overridden_by, dr->dr_txg);
2911 }
2912 
2913 void
dmu_buf_redact(dmu_buf_t * dbuf,dmu_tx_t * tx)2914 dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx)
2915 {
2916 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2917 	dmu_object_type_t type;
2918 	ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset,
2919 	    SPA_FEATURE_REDACTED_DATASETS));
2920 
2921 	DB_DNODE_ENTER(db);
2922 	type = DB_DNODE(db)->dn_type;
2923 	DB_DNODE_EXIT(db);
2924 
2925 	ASSERT0(db->db_level);
2926 	dmu_buf_will_not_fill(dbuf, tx);
2927 
2928 	blkptr_t bp = { { { {0} } } };
2929 	BP_SET_TYPE(&bp, type);
2930 	BP_SET_LEVEL(&bp, 0);
2931 	BP_SET_BIRTH(&bp, tx->tx_txg, 0);
2932 	BP_SET_REDACTED(&bp);
2933 	BPE_SET_LSIZE(&bp, dbuf->db_size);
2934 
2935 	dbuf_override_impl(db, &bp, tx);
2936 }
2937 
2938 /*
2939  * Directly assign a provided arc buf to a given dbuf if it's not referenced
2940  * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
2941  */
2942 void
dbuf_assign_arcbuf(dmu_buf_impl_t * db,arc_buf_t * buf,dmu_tx_t * tx)2943 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
2944 {
2945 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2946 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2947 	ASSERT(db->db_level == 0);
2948 	ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
2949 	ASSERT(buf != NULL);
2950 	ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
2951 	ASSERT(tx->tx_txg != 0);
2952 
2953 	arc_return_buf(buf, db);
2954 	ASSERT(arc_released(buf));
2955 
2956 	mutex_enter(&db->db_mtx);
2957 
2958 	while (db->db_state == DB_READ || db->db_state == DB_FILL)
2959 		cv_wait(&db->db_changed, &db->db_mtx);
2960 
2961 	ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED ||
2962 	    db->db_state == DB_NOFILL);
2963 
2964 	if (db->db_state == DB_CACHED &&
2965 	    zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
2966 		/*
2967 		 * In practice, we will never have a case where we have an
2968 		 * encrypted arc buffer while additional holds exist on the
2969 		 * dbuf. We don't handle this here so we simply assert that
2970 		 * fact instead.
2971 		 */
2972 		ASSERT(!arc_is_encrypted(buf));
2973 		mutex_exit(&db->db_mtx);
2974 		(void) dbuf_dirty(db, tx);
2975 		memcpy(db->db.db_data, buf->b_data, db->db.db_size);
2976 		arc_buf_destroy(buf, db);
2977 		return;
2978 	}
2979 
2980 	if (db->db_state == DB_CACHED) {
2981 		dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
2982 
2983 		ASSERT(db->db_buf != NULL);
2984 		if (dr != NULL && dr->dr_txg == tx->tx_txg) {
2985 			ASSERT(dr->dt.dl.dr_data == db->db_buf);
2986 
2987 			if (!arc_released(db->db_buf)) {
2988 				ASSERT(dr->dt.dl.dr_override_state ==
2989 				    DR_OVERRIDDEN);
2990 				arc_release(db->db_buf, db);
2991 			}
2992 			dr->dt.dl.dr_data = buf;
2993 			arc_buf_destroy(db->db_buf, db);
2994 		} else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
2995 			arc_release(db->db_buf, db);
2996 			arc_buf_destroy(db->db_buf, db);
2997 		}
2998 		db->db_buf = NULL;
2999 	} else if (db->db_state == DB_NOFILL) {
3000 		/*
3001 		 * We will be completely replacing the cloned block.  In case
3002 		 * it was cloned in this transaction group, let's undirty the
3003 		 * pending clone and mark the block as uncached. This will be
3004 		 * as if the clone was never done.
3005 		 */
3006 		VERIFY(!dbuf_undirty(db, tx));
3007 		db->db_state = DB_UNCACHED;
3008 	}
3009 	ASSERT(db->db_buf == NULL);
3010 	dbuf_set_data(db, buf);
3011 	db->db_state = DB_FILL;
3012 	DTRACE_SET_STATE(db, "filling assigned arcbuf");
3013 	mutex_exit(&db->db_mtx);
3014 	(void) dbuf_dirty(db, tx);
3015 	dmu_buf_fill_done(&db->db, tx, B_FALSE);
3016 }
3017 
3018 void
dbuf_destroy(dmu_buf_impl_t * db)3019 dbuf_destroy(dmu_buf_impl_t *db)
3020 {
3021 	dnode_t *dn;
3022 	dmu_buf_impl_t *parent = db->db_parent;
3023 	dmu_buf_impl_t *dndb;
3024 
3025 	ASSERT(MUTEX_HELD(&db->db_mtx));
3026 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
3027 
3028 	if (db->db_buf != NULL) {
3029 		arc_buf_destroy(db->db_buf, db);
3030 		db->db_buf = NULL;
3031 	}
3032 
3033 	if (db->db_blkid == DMU_BONUS_BLKID) {
3034 		int slots = DB_DNODE(db)->dn_num_slots;
3035 		int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
3036 		if (db->db.db_data != NULL) {
3037 			kmem_free(db->db.db_data, bonuslen);
3038 			arc_space_return(bonuslen, ARC_SPACE_BONUS);
3039 			db->db_state = DB_UNCACHED;
3040 			DTRACE_SET_STATE(db, "buffer cleared");
3041 		}
3042 	}
3043 
3044 	dbuf_clear_data(db);
3045 
3046 	if (multilist_link_active(&db->db_cache_link)) {
3047 		ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3048 		    db->db_caching_status == DB_DBUF_METADATA_CACHE);
3049 
3050 		multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
3051 
3052 		ASSERT0(dmu_buf_user_size(&db->db));
3053 		(void) zfs_refcount_remove_many(
3054 		    &dbuf_caches[db->db_caching_status].size,
3055 		    db->db.db_size, db);
3056 
3057 		if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3058 			DBUF_STAT_BUMPDOWN(metadata_cache_count);
3059 		} else {
3060 			DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
3061 			DBUF_STAT_BUMPDOWN(cache_count);
3062 			DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
3063 			    db->db.db_size);
3064 		}
3065 		db->db_caching_status = DB_NO_CACHE;
3066 	}
3067 
3068 	ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
3069 	ASSERT(db->db_data_pending == NULL);
3070 	ASSERT(list_is_empty(&db->db_dirty_records));
3071 
3072 	db->db_state = DB_EVICTING;
3073 	DTRACE_SET_STATE(db, "buffer eviction started");
3074 	db->db_blkptr = NULL;
3075 
3076 	/*
3077 	 * Now that db_state is DB_EVICTING, nobody else can find this via
3078 	 * the hash table.  We can now drop db_mtx, which allows us to
3079 	 * acquire the dn_dbufs_mtx.
3080 	 */
3081 	mutex_exit(&db->db_mtx);
3082 
3083 	DB_DNODE_ENTER(db);
3084 	dn = DB_DNODE(db);
3085 	dndb = dn->dn_dbuf;
3086 	if (db->db_blkid != DMU_BONUS_BLKID) {
3087 		boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
3088 		if (needlock)
3089 			mutex_enter_nested(&dn->dn_dbufs_mtx,
3090 			    NESTED_SINGLE);
3091 		avl_remove(&dn->dn_dbufs, db);
3092 		membar_producer();
3093 		DB_DNODE_EXIT(db);
3094 		if (needlock)
3095 			mutex_exit(&dn->dn_dbufs_mtx);
3096 		/*
3097 		 * Decrementing the dbuf count means that the hold corresponding
3098 		 * to the removed dbuf is no longer discounted in dnode_move(),
3099 		 * so the dnode cannot be moved until after we release the hold.
3100 		 * The membar_producer() ensures visibility of the decremented
3101 		 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
3102 		 * release any lock.
3103 		 */
3104 		mutex_enter(&dn->dn_mtx);
3105 		dnode_rele_and_unlock(dn, db, B_TRUE);
3106 		db->db_dnode_handle = NULL;
3107 
3108 		dbuf_hash_remove(db);
3109 	} else {
3110 		DB_DNODE_EXIT(db);
3111 	}
3112 
3113 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
3114 
3115 	db->db_parent = NULL;
3116 
3117 	ASSERT(db->db_buf == NULL);
3118 	ASSERT(db->db.db_data == NULL);
3119 	ASSERT(db->db_hash_next == NULL);
3120 	ASSERT(db->db_blkptr == NULL);
3121 	ASSERT(db->db_data_pending == NULL);
3122 	ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
3123 	ASSERT(!multilist_link_active(&db->db_cache_link));
3124 
3125 	/*
3126 	 * If this dbuf is referenced from an indirect dbuf,
3127 	 * decrement the ref count on the indirect dbuf.
3128 	 */
3129 	if (parent && parent != dndb) {
3130 		mutex_enter(&parent->db_mtx);
3131 		dbuf_rele_and_unlock(parent, db, B_TRUE);
3132 	}
3133 
3134 	kmem_cache_free(dbuf_kmem_cache, db);
3135 	arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3136 }
3137 
3138 /*
3139  * Note: While bpp will always be updated if the function returns success,
3140  * parentp will not be updated if the dnode does not have dn_dbuf filled in;
3141  * this happens when the dnode is the meta-dnode, or {user|group|project}used
3142  * object.
3143  */
3144 __attribute__((always_inline))
3145 static inline int
dbuf_findbp(dnode_t * dn,int level,uint64_t blkid,int fail_sparse,dmu_buf_impl_t ** parentp,blkptr_t ** bpp)3146 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
3147     dmu_buf_impl_t **parentp, blkptr_t **bpp)
3148 {
3149 	*parentp = NULL;
3150 	*bpp = NULL;
3151 
3152 	ASSERT(blkid != DMU_BONUS_BLKID);
3153 
3154 	if (blkid == DMU_SPILL_BLKID) {
3155 		mutex_enter(&dn->dn_mtx);
3156 		if (dn->dn_have_spill &&
3157 		    (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
3158 			*bpp = DN_SPILL_BLKPTR(dn->dn_phys);
3159 		else
3160 			*bpp = NULL;
3161 		dbuf_add_ref(dn->dn_dbuf, NULL);
3162 		*parentp = dn->dn_dbuf;
3163 		mutex_exit(&dn->dn_mtx);
3164 		return (0);
3165 	}
3166 
3167 	int nlevels =
3168 	    (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
3169 	int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
3170 
3171 	ASSERT3U(level * epbs, <, 64);
3172 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3173 	/*
3174 	 * This assertion shouldn't trip as long as the max indirect block size
3175 	 * is less than 1M.  The reason for this is that up to that point,
3176 	 * the number of levels required to address an entire object with blocks
3177 	 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64.	 In
3178 	 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
3179 	 * (i.e. we can address the entire object), objects will all use at most
3180 	 * N-1 levels and the assertion won't overflow.	 However, once epbs is
3181 	 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66.  Then, 4 levels will not be
3182 	 * enough to address an entire object, so objects will have 5 levels,
3183 	 * but then this assertion will overflow.
3184 	 *
3185 	 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
3186 	 * need to redo this logic to handle overflows.
3187 	 */
3188 	ASSERT(level >= nlevels ||
3189 	    ((nlevels - level - 1) * epbs) +
3190 	    highbit64(dn->dn_phys->dn_nblkptr) <= 64);
3191 	if (level >= nlevels ||
3192 	    blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
3193 	    ((nlevels - level - 1) * epbs)) ||
3194 	    (fail_sparse &&
3195 	    blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
3196 		/* the buffer has no parent yet */
3197 		return (SET_ERROR(ENOENT));
3198 	} else if (level < nlevels-1) {
3199 		/* this block is referenced from an indirect block */
3200 		int err;
3201 
3202 		err = dbuf_hold_impl(dn, level + 1,
3203 		    blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
3204 
3205 		if (err)
3206 			return (err);
3207 		err = dbuf_read(*parentp, NULL,
3208 		    (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
3209 		if (err) {
3210 			dbuf_rele(*parentp, NULL);
3211 			*parentp = NULL;
3212 			return (err);
3213 		}
3214 		rw_enter(&(*parentp)->db_rwlock, RW_READER);
3215 		*bpp = ((blkptr_t *)(*parentp)->db.db_data) +
3216 		    (blkid & ((1ULL << epbs) - 1));
3217 		if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
3218 			ASSERT(BP_IS_HOLE(*bpp));
3219 		rw_exit(&(*parentp)->db_rwlock);
3220 		return (0);
3221 	} else {
3222 		/* the block is referenced from the dnode */
3223 		ASSERT3U(level, ==, nlevels-1);
3224 		ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
3225 		    blkid < dn->dn_phys->dn_nblkptr);
3226 		if (dn->dn_dbuf) {
3227 			dbuf_add_ref(dn->dn_dbuf, NULL);
3228 			*parentp = dn->dn_dbuf;
3229 		}
3230 		*bpp = &dn->dn_phys->dn_blkptr[blkid];
3231 		return (0);
3232 	}
3233 }
3234 
3235 static dmu_buf_impl_t *
dbuf_create(dnode_t * dn,uint8_t level,uint64_t blkid,dmu_buf_impl_t * parent,blkptr_t * blkptr,uint64_t hash)3236 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
3237     dmu_buf_impl_t *parent, blkptr_t *blkptr, uint64_t hash)
3238 {
3239 	objset_t *os = dn->dn_objset;
3240 	dmu_buf_impl_t *db, *odb;
3241 
3242 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3243 	ASSERT(dn->dn_type != DMU_OT_NONE);
3244 
3245 	db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
3246 
3247 	list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t),
3248 	    offsetof(dbuf_dirty_record_t, dr_dbuf_node));
3249 
3250 	db->db_objset = os;
3251 	db->db.db_object = dn->dn_object;
3252 	db->db_level = level;
3253 	db->db_blkid = blkid;
3254 	db->db_dirtycnt = 0;
3255 	db->db_dnode_handle = dn->dn_handle;
3256 	db->db_parent = parent;
3257 	db->db_blkptr = blkptr;
3258 	db->db_hash = hash;
3259 
3260 	db->db_user = NULL;
3261 	db->db_user_immediate_evict = FALSE;
3262 	db->db_freed_in_flight = FALSE;
3263 	db->db_pending_evict = FALSE;
3264 
3265 	if (blkid == DMU_BONUS_BLKID) {
3266 		ASSERT3P(parent, ==, dn->dn_dbuf);
3267 		db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
3268 		    (dn->dn_nblkptr-1) * sizeof (blkptr_t);
3269 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
3270 		db->db.db_offset = DMU_BONUS_BLKID;
3271 		db->db_state = DB_UNCACHED;
3272 		DTRACE_SET_STATE(db, "bonus buffer created");
3273 		db->db_caching_status = DB_NO_CACHE;
3274 		/* the bonus dbuf is not placed in the hash table */
3275 		arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3276 		return (db);
3277 	} else if (blkid == DMU_SPILL_BLKID) {
3278 		db->db.db_size = (blkptr != NULL) ?
3279 		    BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
3280 		db->db.db_offset = 0;
3281 	} else {
3282 		int blocksize =
3283 		    db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
3284 		db->db.db_size = blocksize;
3285 		db->db.db_offset = db->db_blkid * blocksize;
3286 	}
3287 
3288 	/*
3289 	 * Hold the dn_dbufs_mtx while we get the new dbuf
3290 	 * in the hash table *and* added to the dbufs list.
3291 	 * This prevents a possible deadlock with someone
3292 	 * trying to look up this dbuf before it's added to the
3293 	 * dn_dbufs list.
3294 	 */
3295 	mutex_enter(&dn->dn_dbufs_mtx);
3296 	db->db_state = DB_EVICTING; /* not worth logging this state change */
3297 	if ((odb = dbuf_hash_insert(db)) != NULL) {
3298 		/* someone else inserted it first */
3299 		mutex_exit(&dn->dn_dbufs_mtx);
3300 		kmem_cache_free(dbuf_kmem_cache, db);
3301 		DBUF_STAT_BUMP(hash_insert_race);
3302 		return (odb);
3303 	}
3304 	avl_add(&dn->dn_dbufs, db);
3305 
3306 	db->db_state = DB_UNCACHED;
3307 	DTRACE_SET_STATE(db, "regular buffer created");
3308 	db->db_caching_status = DB_NO_CACHE;
3309 	mutex_exit(&dn->dn_dbufs_mtx);
3310 	arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3311 
3312 	if (parent && parent != dn->dn_dbuf)
3313 		dbuf_add_ref(parent, db);
3314 
3315 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
3316 	    zfs_refcount_count(&dn->dn_holds) > 0);
3317 	(void) zfs_refcount_add(&dn->dn_holds, db);
3318 
3319 	dprintf_dbuf(db, "db=%p\n", db);
3320 
3321 	return (db);
3322 }
3323 
3324 /*
3325  * This function returns a block pointer and information about the object,
3326  * given a dnode and a block.  This is a publicly accessible version of
3327  * dbuf_findbp that only returns some information, rather than the
3328  * dbuf.  Note that the dnode passed in must be held, and the dn_struct_rwlock
3329  * should be locked as (at least) a reader.
3330  */
3331 int
dbuf_dnode_findbp(dnode_t * dn,uint64_t level,uint64_t blkid,blkptr_t * bp,uint16_t * datablkszsec,uint8_t * indblkshift)3332 dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
3333     blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift)
3334 {
3335 	dmu_buf_impl_t *dbp = NULL;
3336 	blkptr_t *bp2;
3337 	int err = 0;
3338 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3339 
3340 	err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2);
3341 	if (err == 0) {
3342 		ASSERT3P(bp2, !=, NULL);
3343 		*bp = *bp2;
3344 		if (dbp != NULL)
3345 			dbuf_rele(dbp, NULL);
3346 		if (datablkszsec != NULL)
3347 			*datablkszsec = dn->dn_phys->dn_datablkszsec;
3348 		if (indblkshift != NULL)
3349 			*indblkshift = dn->dn_phys->dn_indblkshift;
3350 	}
3351 
3352 	return (err);
3353 }
3354 
3355 typedef struct dbuf_prefetch_arg {
3356 	spa_t *dpa_spa;	/* The spa to issue the prefetch in. */
3357 	zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
3358 	int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
3359 	int dpa_curlevel; /* The current level that we're reading */
3360 	dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
3361 	zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
3362 	zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
3363 	arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
3364 	dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */
3365 	void *dpa_arg; /* prefetch completion arg */
3366 } dbuf_prefetch_arg_t;
3367 
3368 static void
dbuf_prefetch_fini(dbuf_prefetch_arg_t * dpa,boolean_t io_done)3369 dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done)
3370 {
3371 	if (dpa->dpa_cb != NULL) {
3372 		dpa->dpa_cb(dpa->dpa_arg, dpa->dpa_zb.zb_level,
3373 		    dpa->dpa_zb.zb_blkid, io_done);
3374 	}
3375 	kmem_free(dpa, sizeof (*dpa));
3376 }
3377 
3378 static void
dbuf_issue_final_prefetch_done(zio_t * zio,const zbookmark_phys_t * zb,const blkptr_t * iobp,arc_buf_t * abuf,void * private)3379 dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb,
3380     const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3381 {
3382 	(void) zio, (void) zb, (void) iobp;
3383 	dbuf_prefetch_arg_t *dpa = private;
3384 
3385 	if (abuf != NULL)
3386 		arc_buf_destroy(abuf, private);
3387 
3388 	dbuf_prefetch_fini(dpa, B_TRUE);
3389 }
3390 
3391 /*
3392  * Actually issue the prefetch read for the block given.
3393  */
3394 static void
dbuf_issue_final_prefetch(dbuf_prefetch_arg_t * dpa,blkptr_t * bp)3395 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
3396 {
3397 	ASSERT(!BP_IS_REDACTED(bp) ||
3398 	    dsl_dataset_feature_is_active(
3399 	    dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3400 	    SPA_FEATURE_REDACTED_DATASETS));
3401 
3402 	if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
3403 		return (dbuf_prefetch_fini(dpa, B_FALSE));
3404 
3405 	int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
3406 	arc_flags_t aflags =
3407 	    dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
3408 	    ARC_FLAG_NO_BUF;
3409 
3410 	/* dnodes are always read as raw and then converted later */
3411 	if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
3412 	    dpa->dpa_curlevel == 0)
3413 		zio_flags |= ZIO_FLAG_RAW;
3414 
3415 	ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3416 	ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
3417 	ASSERT(dpa->dpa_zio != NULL);
3418 	(void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp,
3419 	    dbuf_issue_final_prefetch_done, dpa,
3420 	    dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
3421 }
3422 
3423 /*
3424  * Called when an indirect block above our prefetch target is read in.  This
3425  * will either read in the next indirect block down the tree or issue the actual
3426  * prefetch if the next block down is our target.
3427  */
3428 static void
dbuf_prefetch_indirect_done(zio_t * zio,const zbookmark_phys_t * zb,const blkptr_t * iobp,arc_buf_t * abuf,void * private)3429 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
3430     const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3431 {
3432 	(void) zb, (void) iobp;
3433 	dbuf_prefetch_arg_t *dpa = private;
3434 
3435 	ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
3436 	ASSERT3S(dpa->dpa_curlevel, >, 0);
3437 
3438 	if (abuf == NULL) {
3439 		ASSERT(zio == NULL || zio->io_error != 0);
3440 		dbuf_prefetch_fini(dpa, B_TRUE);
3441 		return;
3442 	}
3443 	ASSERT(zio == NULL || zio->io_error == 0);
3444 
3445 	/*
3446 	 * The dpa_dnode is only valid if we are called with a NULL
3447 	 * zio. This indicates that the arc_read() returned without
3448 	 * first calling zio_read() to issue a physical read. Once
3449 	 * a physical read is made the dpa_dnode must be invalidated
3450 	 * as the locks guarding it may have been dropped. If the
3451 	 * dpa_dnode is still valid, then we want to add it to the dbuf
3452 	 * cache. To do so, we must hold the dbuf associated with the block
3453 	 * we just prefetched, read its contents so that we associate it
3454 	 * with an arc_buf_t, and then release it.
3455 	 */
3456 	if (zio != NULL) {
3457 		ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
3458 		if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
3459 			ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
3460 		} else {
3461 			ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
3462 		}
3463 		ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
3464 
3465 		dpa->dpa_dnode = NULL;
3466 	} else if (dpa->dpa_dnode != NULL) {
3467 		uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
3468 		    (dpa->dpa_epbs * (dpa->dpa_curlevel -
3469 		    dpa->dpa_zb.zb_level));
3470 		dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
3471 		    dpa->dpa_curlevel, curblkid, FTAG);
3472 		if (db == NULL) {
3473 			arc_buf_destroy(abuf, private);
3474 			dbuf_prefetch_fini(dpa, B_TRUE);
3475 			return;
3476 		}
3477 		(void) dbuf_read(db, NULL,
3478 		    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
3479 		dbuf_rele(db, FTAG);
3480 	}
3481 
3482 	dpa->dpa_curlevel--;
3483 	uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
3484 	    (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
3485 	blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
3486 	    P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
3487 
3488 	ASSERT(!BP_IS_REDACTED(bp) || (dpa->dpa_dnode &&
3489 	    dsl_dataset_feature_is_active(
3490 	    dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3491 	    SPA_FEATURE_REDACTED_DATASETS)));
3492 	if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
3493 		arc_buf_destroy(abuf, private);
3494 		dbuf_prefetch_fini(dpa, B_TRUE);
3495 		return;
3496 	} else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
3497 		ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
3498 		dbuf_issue_final_prefetch(dpa, bp);
3499 	} else {
3500 		arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3501 		zbookmark_phys_t zb;
3502 
3503 		/* flag if L2ARC eligible, l2arc_noprefetch then decides */
3504 		if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
3505 			iter_aflags |= ARC_FLAG_L2CACHE;
3506 
3507 		ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3508 
3509 		SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
3510 		    dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
3511 
3512 		(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3513 		    bp, dbuf_prefetch_indirect_done, dpa,
3514 		    ZIO_PRIORITY_SYNC_READ,
3515 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3516 		    &iter_aflags, &zb);
3517 	}
3518 
3519 	arc_buf_destroy(abuf, private);
3520 }
3521 
3522 /*
3523  * Issue prefetch reads for the given block on the given level.  If the indirect
3524  * blocks above that block are not in memory, we will read them in
3525  * asynchronously.  As a result, this call never blocks waiting for a read to
3526  * complete. Note that the prefetch might fail if the dataset is encrypted and
3527  * the encryption key is unmapped before the IO completes.
3528  */
3529 int
dbuf_prefetch_impl(dnode_t * dn,int64_t level,uint64_t blkid,zio_priority_t prio,arc_flags_t aflags,dbuf_prefetch_fn cb,void * arg)3530 dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid,
3531     zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
3532     void *arg)
3533 {
3534 	blkptr_t bp;
3535 	int epbs, nlevels, curlevel;
3536 	uint64_t curblkid;
3537 
3538 	ASSERT(blkid != DMU_BONUS_BLKID);
3539 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3540 
3541 	if (blkid > dn->dn_maxblkid)
3542 		goto no_issue;
3543 
3544 	if (level == 0 && dnode_block_freed(dn, blkid))
3545 		goto no_issue;
3546 
3547 	/*
3548 	 * This dnode hasn't been written to disk yet, so there's nothing to
3549 	 * prefetch.
3550 	 */
3551 	nlevels = dn->dn_phys->dn_nlevels;
3552 	if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
3553 		goto no_issue;
3554 
3555 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3556 	if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
3557 		goto no_issue;
3558 
3559 	dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
3560 	    level, blkid, NULL);
3561 	if (db != NULL) {
3562 		mutex_exit(&db->db_mtx);
3563 		/*
3564 		 * This dbuf already exists.  It is either CACHED, or
3565 		 * (we assume) about to be read or filled.
3566 		 */
3567 		goto no_issue;
3568 	}
3569 
3570 	/*
3571 	 * Find the closest ancestor (indirect block) of the target block
3572 	 * that is present in the cache.  In this indirect block, we will
3573 	 * find the bp that is at curlevel, curblkid.
3574 	 */
3575 	curlevel = level;
3576 	curblkid = blkid;
3577 	while (curlevel < nlevels - 1) {
3578 		int parent_level = curlevel + 1;
3579 		uint64_t parent_blkid = curblkid >> epbs;
3580 		dmu_buf_impl_t *db;
3581 
3582 		if (dbuf_hold_impl(dn, parent_level, parent_blkid,
3583 		    FALSE, TRUE, FTAG, &db) == 0) {
3584 			blkptr_t *bpp = db->db_buf->b_data;
3585 			bp = bpp[P2PHASE(curblkid, 1 << epbs)];
3586 			dbuf_rele(db, FTAG);
3587 			break;
3588 		}
3589 
3590 		curlevel = parent_level;
3591 		curblkid = parent_blkid;
3592 	}
3593 
3594 	if (curlevel == nlevels - 1) {
3595 		/* No cached indirect blocks found. */
3596 		ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
3597 		bp = dn->dn_phys->dn_blkptr[curblkid];
3598 	}
3599 	ASSERT(!BP_IS_REDACTED(&bp) ||
3600 	    dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset,
3601 	    SPA_FEATURE_REDACTED_DATASETS));
3602 	if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp))
3603 		goto no_issue;
3604 
3605 	ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
3606 
3607 	zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
3608 	    ZIO_FLAG_CANFAIL);
3609 
3610 	dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
3611 	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
3612 	SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3613 	    dn->dn_object, level, blkid);
3614 	dpa->dpa_curlevel = curlevel;
3615 	dpa->dpa_prio = prio;
3616 	dpa->dpa_aflags = aflags;
3617 	dpa->dpa_spa = dn->dn_objset->os_spa;
3618 	dpa->dpa_dnode = dn;
3619 	dpa->dpa_epbs = epbs;
3620 	dpa->dpa_zio = pio;
3621 	dpa->dpa_cb = cb;
3622 	dpa->dpa_arg = arg;
3623 
3624 	if (!DNODE_LEVEL_IS_CACHEABLE(dn, level))
3625 		dpa->dpa_aflags |= ARC_FLAG_UNCACHED;
3626 	else if (dnode_level_is_l2cacheable(&bp, dn, level))
3627 		dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
3628 
3629 	/*
3630 	 * If we have the indirect just above us, no need to do the asynchronous
3631 	 * prefetch chain; we'll just run the last step ourselves.  If we're at
3632 	 * a higher level, though, we want to issue the prefetches for all the
3633 	 * indirect blocks asynchronously, so we can go on with whatever we were
3634 	 * doing.
3635 	 */
3636 	if (curlevel == level) {
3637 		ASSERT3U(curblkid, ==, blkid);
3638 		dbuf_issue_final_prefetch(dpa, &bp);
3639 	} else {
3640 		arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3641 		zbookmark_phys_t zb;
3642 
3643 		/* flag if L2ARC eligible, l2arc_noprefetch then decides */
3644 		if (dnode_level_is_l2cacheable(&bp, dn, level))
3645 			iter_aflags |= ARC_FLAG_L2CACHE;
3646 
3647 		SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3648 		    dn->dn_object, curlevel, curblkid);
3649 		(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3650 		    &bp, dbuf_prefetch_indirect_done, dpa,
3651 		    ZIO_PRIORITY_SYNC_READ,
3652 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3653 		    &iter_aflags, &zb);
3654 	}
3655 	/*
3656 	 * We use pio here instead of dpa_zio since it's possible that
3657 	 * dpa may have already been freed.
3658 	 */
3659 	zio_nowait(pio);
3660 	return (1);
3661 no_issue:
3662 	if (cb != NULL)
3663 		cb(arg, level, blkid, B_FALSE);
3664 	return (0);
3665 }
3666 
3667 int
dbuf_prefetch(dnode_t * dn,int64_t level,uint64_t blkid,zio_priority_t prio,arc_flags_t aflags)3668 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
3669     arc_flags_t aflags)
3670 {
3671 
3672 	return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL));
3673 }
3674 
3675 /*
3676  * Helper function for dbuf_hold_impl() to copy a buffer. Handles
3677  * the case of encrypted, compressed and uncompressed buffers by
3678  * allocating the new buffer, respectively, with arc_alloc_raw_buf(),
3679  * arc_alloc_compressed_buf() or arc_alloc_buf().*
3680  *
3681  * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl().
3682  */
3683 noinline static void
dbuf_hold_copy(dnode_t * dn,dmu_buf_impl_t * db)3684 dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
3685 {
3686 	dbuf_dirty_record_t *dr = db->db_data_pending;
3687 	arc_buf_t *data = dr->dt.dl.dr_data;
3688 	enum zio_compress compress_type = arc_get_compression(data);
3689 	uint8_t complevel = arc_get_complevel(data);
3690 
3691 	if (arc_is_encrypted(data)) {
3692 		boolean_t byteorder;
3693 		uint8_t salt[ZIO_DATA_SALT_LEN];
3694 		uint8_t iv[ZIO_DATA_IV_LEN];
3695 		uint8_t mac[ZIO_DATA_MAC_LEN];
3696 
3697 		arc_get_raw_params(data, &byteorder, salt, iv, mac);
3698 		dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
3699 		    dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
3700 		    dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
3701 		    compress_type, complevel));
3702 	} else if (compress_type != ZIO_COMPRESS_OFF) {
3703 		dbuf_set_data(db, arc_alloc_compressed_buf(
3704 		    dn->dn_objset->os_spa, db, arc_buf_size(data),
3705 		    arc_buf_lsize(data), compress_type, complevel));
3706 	} else {
3707 		dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
3708 		    DBUF_GET_BUFC_TYPE(db), db->db.db_size));
3709 	}
3710 
3711 	rw_enter(&db->db_rwlock, RW_WRITER);
3712 	memcpy(db->db.db_data, data->b_data, arc_buf_size(data));
3713 	rw_exit(&db->db_rwlock);
3714 }
3715 
3716 /*
3717  * Returns with db_holds incremented, and db_mtx not held.
3718  * Note: dn_struct_rwlock must be held.
3719  */
3720 int
dbuf_hold_impl(dnode_t * dn,uint8_t level,uint64_t blkid,boolean_t fail_sparse,boolean_t fail_uncached,const void * tag,dmu_buf_impl_t ** dbp)3721 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
3722     boolean_t fail_sparse, boolean_t fail_uncached,
3723     const void *tag, dmu_buf_impl_t **dbp)
3724 {
3725 	dmu_buf_impl_t *db, *parent = NULL;
3726 	uint64_t hv;
3727 
3728 	/* If the pool has been created, verify the tx_sync_lock is not held */
3729 	spa_t *spa = dn->dn_objset->os_spa;
3730 	dsl_pool_t *dp = spa->spa_dsl_pool;
3731 	if (dp != NULL) {
3732 		ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock));
3733 	}
3734 
3735 	ASSERT(blkid != DMU_BONUS_BLKID);
3736 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3737 	ASSERT3U(dn->dn_nlevels, >, level);
3738 
3739 	*dbp = NULL;
3740 
3741 	/* dbuf_find() returns with db_mtx held */
3742 	db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid, &hv);
3743 
3744 	if (db == NULL) {
3745 		blkptr_t *bp = NULL;
3746 		int err;
3747 
3748 		if (fail_uncached)
3749 			return (SET_ERROR(ENOENT));
3750 
3751 		ASSERT3P(parent, ==, NULL);
3752 		err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
3753 		if (fail_sparse) {
3754 			if (err == 0 && bp && BP_IS_HOLE(bp))
3755 				err = SET_ERROR(ENOENT);
3756 			if (err) {
3757 				if (parent)
3758 					dbuf_rele(parent, NULL);
3759 				return (err);
3760 			}
3761 		}
3762 		if (err && err != ENOENT)
3763 			return (err);
3764 		db = dbuf_create(dn, level, blkid, parent, bp, hv);
3765 	}
3766 
3767 	if (fail_uncached && db->db_state != DB_CACHED) {
3768 		mutex_exit(&db->db_mtx);
3769 		return (SET_ERROR(ENOENT));
3770 	}
3771 
3772 	if (db->db_buf != NULL) {
3773 		arc_buf_access(db->db_buf);
3774 		ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
3775 	}
3776 
3777 	ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
3778 
3779 	/*
3780 	 * If this buffer is currently syncing out, and we are
3781 	 * still referencing it from db_data, we need to make a copy
3782 	 * of it in case we decide we want to dirty it again in this txg.
3783 	 */
3784 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
3785 	    dn->dn_object != DMU_META_DNODE_OBJECT &&
3786 	    db->db_state == DB_CACHED && db->db_data_pending) {
3787 		dbuf_dirty_record_t *dr = db->db_data_pending;
3788 		if (dr->dt.dl.dr_data == db->db_buf) {
3789 			ASSERT3P(db->db_buf, !=, NULL);
3790 			dbuf_hold_copy(dn, db);
3791 		}
3792 	}
3793 
3794 	if (multilist_link_active(&db->db_cache_link)) {
3795 		ASSERT(zfs_refcount_is_zero(&db->db_holds));
3796 		ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3797 		    db->db_caching_status == DB_DBUF_METADATA_CACHE);
3798 
3799 		multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
3800 
3801 		uint64_t size = db->db.db_size;
3802 		uint64_t usize = dmu_buf_user_size(&db->db);
3803 		(void) zfs_refcount_remove_many(
3804 		    &dbuf_caches[db->db_caching_status].size, size, db);
3805 		(void) zfs_refcount_remove_many(
3806 		    &dbuf_caches[db->db_caching_status].size, usize,
3807 		    db->db_user);
3808 
3809 		if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3810 			DBUF_STAT_BUMPDOWN(metadata_cache_count);
3811 		} else {
3812 			DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
3813 			DBUF_STAT_BUMPDOWN(cache_count);
3814 			DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
3815 			    size + usize);
3816 		}
3817 		db->db_caching_status = DB_NO_CACHE;
3818 	}
3819 	(void) zfs_refcount_add(&db->db_holds, tag);
3820 	DBUF_VERIFY(db);
3821 	mutex_exit(&db->db_mtx);
3822 
3823 	/* NOTE: we can't rele the parent until after we drop the db_mtx */
3824 	if (parent)
3825 		dbuf_rele(parent, NULL);
3826 
3827 	ASSERT3P(DB_DNODE(db), ==, dn);
3828 	ASSERT3U(db->db_blkid, ==, blkid);
3829 	ASSERT3U(db->db_level, ==, level);
3830 	*dbp = db;
3831 
3832 	return (0);
3833 }
3834 
3835 dmu_buf_impl_t *
dbuf_hold(dnode_t * dn,uint64_t blkid,const void * tag)3836 dbuf_hold(dnode_t *dn, uint64_t blkid, const void *tag)
3837 {
3838 	return (dbuf_hold_level(dn, 0, blkid, tag));
3839 }
3840 
3841 dmu_buf_impl_t *
dbuf_hold_level(dnode_t * dn,int level,uint64_t blkid,const void * tag)3842 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, const void *tag)
3843 {
3844 	dmu_buf_impl_t *db;
3845 	int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
3846 	return (err ? NULL : db);
3847 }
3848 
3849 void
dbuf_create_bonus(dnode_t * dn)3850 dbuf_create_bonus(dnode_t *dn)
3851 {
3852 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
3853 
3854 	ASSERT(dn->dn_bonus == NULL);
3855 	dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL,
3856 	    dbuf_hash(dn->dn_objset, dn->dn_object, 0, DMU_BONUS_BLKID));
3857 }
3858 
3859 int
dbuf_spill_set_blksz(dmu_buf_t * db_fake,uint64_t blksz,dmu_tx_t * tx)3860 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
3861 {
3862 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3863 
3864 	if (db->db_blkid != DMU_SPILL_BLKID)
3865 		return (SET_ERROR(ENOTSUP));
3866 	if (blksz == 0)
3867 		blksz = SPA_MINBLOCKSIZE;
3868 	ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
3869 	blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
3870 
3871 	dbuf_new_size(db, blksz, tx);
3872 
3873 	return (0);
3874 }
3875 
3876 void
dbuf_rm_spill(dnode_t * dn,dmu_tx_t * tx)3877 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
3878 {
3879 	dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
3880 }
3881 
3882 #pragma weak dmu_buf_add_ref = dbuf_add_ref
3883 void
dbuf_add_ref(dmu_buf_impl_t * db,const void * tag)3884 dbuf_add_ref(dmu_buf_impl_t *db, const void *tag)
3885 {
3886 	int64_t holds = zfs_refcount_add(&db->db_holds, tag);
3887 	VERIFY3S(holds, >, 1);
3888 }
3889 
3890 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
3891 boolean_t
dbuf_try_add_ref(dmu_buf_t * db_fake,objset_t * os,uint64_t obj,uint64_t blkid,const void * tag)3892 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
3893     const void *tag)
3894 {
3895 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3896 	dmu_buf_impl_t *found_db;
3897 	boolean_t result = B_FALSE;
3898 
3899 	if (blkid == DMU_BONUS_BLKID)
3900 		found_db = dbuf_find_bonus(os, obj);
3901 	else
3902 		found_db = dbuf_find(os, obj, 0, blkid, NULL);
3903 
3904 	if (found_db != NULL) {
3905 		if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
3906 			(void) zfs_refcount_add(&db->db_holds, tag);
3907 			result = B_TRUE;
3908 		}
3909 		mutex_exit(&found_db->db_mtx);
3910 	}
3911 	return (result);
3912 }
3913 
3914 /*
3915  * If you call dbuf_rele() you had better not be referencing the dnode handle
3916  * unless you have some other direct or indirect hold on the dnode. (An indirect
3917  * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
3918  * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
3919  * dnode's parent dbuf evicting its dnode handles.
3920  */
3921 void
dbuf_rele(dmu_buf_impl_t * db,const void * tag)3922 dbuf_rele(dmu_buf_impl_t *db, const void *tag)
3923 {
3924 	mutex_enter(&db->db_mtx);
3925 	dbuf_rele_and_unlock(db, tag, B_FALSE);
3926 }
3927 
3928 void
dmu_buf_rele(dmu_buf_t * db,const void * tag)3929 dmu_buf_rele(dmu_buf_t *db, const void *tag)
3930 {
3931 	dbuf_rele((dmu_buf_impl_t *)db, tag);
3932 }
3933 
3934 /*
3935  * dbuf_rele() for an already-locked dbuf.  This is necessary to allow
3936  * db_dirtycnt and db_holds to be updated atomically.  The 'evicting'
3937  * argument should be set if we are already in the dbuf-evicting code
3938  * path, in which case we don't want to recursively evict.  This allows us to
3939  * avoid deeply nested stacks that would have a call flow similar to this:
3940  *
3941  * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
3942  *	^						|
3943  *	|						|
3944  *	+-----dbuf_destroy()<--dbuf_evict_one()<--------+
3945  *
3946  */
3947 void
dbuf_rele_and_unlock(dmu_buf_impl_t * db,const void * tag,boolean_t evicting)3948 dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag, boolean_t evicting)
3949 {
3950 	int64_t holds;
3951 	uint64_t size;
3952 
3953 	ASSERT(MUTEX_HELD(&db->db_mtx));
3954 	DBUF_VERIFY(db);
3955 
3956 	/*
3957 	 * Remove the reference to the dbuf before removing its hold on the
3958 	 * dnode so we can guarantee in dnode_move() that a referenced bonus
3959 	 * buffer has a corresponding dnode hold.
3960 	 */
3961 	holds = zfs_refcount_remove(&db->db_holds, tag);
3962 	ASSERT(holds >= 0);
3963 
3964 	/*
3965 	 * We can't freeze indirects if there is a possibility that they
3966 	 * may be modified in the current syncing context.
3967 	 */
3968 	if (db->db_buf != NULL &&
3969 	    holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
3970 		arc_buf_freeze(db->db_buf);
3971 	}
3972 
3973 	if (holds == db->db_dirtycnt &&
3974 	    db->db_level == 0 && db->db_user_immediate_evict)
3975 		dbuf_evict_user(db);
3976 
3977 	if (holds == 0) {
3978 		if (db->db_blkid == DMU_BONUS_BLKID) {
3979 			dnode_t *dn;
3980 			boolean_t evict_dbuf = db->db_pending_evict;
3981 
3982 			/*
3983 			 * If the dnode moves here, we cannot cross this
3984 			 * barrier until the move completes.
3985 			 */
3986 			DB_DNODE_ENTER(db);
3987 
3988 			dn = DB_DNODE(db);
3989 			atomic_dec_32(&dn->dn_dbufs_count);
3990 
3991 			/*
3992 			 * Decrementing the dbuf count means that the bonus
3993 			 * buffer's dnode hold is no longer discounted in
3994 			 * dnode_move(). The dnode cannot move until after
3995 			 * the dnode_rele() below.
3996 			 */
3997 			DB_DNODE_EXIT(db);
3998 
3999 			/*
4000 			 * Do not reference db after its lock is dropped.
4001 			 * Another thread may evict it.
4002 			 */
4003 			mutex_exit(&db->db_mtx);
4004 
4005 			if (evict_dbuf)
4006 				dnode_evict_bonus(dn);
4007 
4008 			dnode_rele(dn, db);
4009 		} else if (db->db_buf == NULL) {
4010 			/*
4011 			 * This is a special case: we never associated this
4012 			 * dbuf with any data allocated from the ARC.
4013 			 */
4014 			ASSERT(db->db_state == DB_UNCACHED ||
4015 			    db->db_state == DB_NOFILL);
4016 			dbuf_destroy(db);
4017 		} else if (arc_released(db->db_buf)) {
4018 			/*
4019 			 * This dbuf has anonymous data associated with it.
4020 			 */
4021 			dbuf_destroy(db);
4022 		} else if (!(DBUF_IS_CACHEABLE(db) || db->db_partial_read) ||
4023 		    db->db_pending_evict) {
4024 			dbuf_destroy(db);
4025 		} else if (!multilist_link_active(&db->db_cache_link)) {
4026 			ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
4027 
4028 			dbuf_cached_state_t dcs =
4029 			    dbuf_include_in_metadata_cache(db) ?
4030 			    DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
4031 			db->db_caching_status = dcs;
4032 
4033 			multilist_insert(&dbuf_caches[dcs].cache, db);
4034 			uint64_t db_size = db->db.db_size;
4035 			uint64_t dbu_size = dmu_buf_user_size(&db->db);
4036 			(void) zfs_refcount_add_many(
4037 			    &dbuf_caches[dcs].size, db_size, db);
4038 			size = zfs_refcount_add_many(
4039 			    &dbuf_caches[dcs].size, dbu_size, db->db_user);
4040 			uint8_t db_level = db->db_level;
4041 			mutex_exit(&db->db_mtx);
4042 
4043 			if (dcs == DB_DBUF_METADATA_CACHE) {
4044 				DBUF_STAT_BUMP(metadata_cache_count);
4045 				DBUF_STAT_MAX(metadata_cache_size_bytes_max,
4046 				    size);
4047 			} else {
4048 				DBUF_STAT_BUMP(cache_count);
4049 				DBUF_STAT_MAX(cache_size_bytes_max, size);
4050 				DBUF_STAT_BUMP(cache_levels[db_level]);
4051 				DBUF_STAT_INCR(cache_levels_bytes[db_level],
4052 				    db_size + dbu_size);
4053 			}
4054 
4055 			if (dcs == DB_DBUF_CACHE && !evicting)
4056 				dbuf_evict_notify(size);
4057 		}
4058 	} else {
4059 		mutex_exit(&db->db_mtx);
4060 	}
4061 
4062 }
4063 
4064 #pragma weak dmu_buf_refcount = dbuf_refcount
4065 uint64_t
dbuf_refcount(dmu_buf_impl_t * db)4066 dbuf_refcount(dmu_buf_impl_t *db)
4067 {
4068 	return (zfs_refcount_count(&db->db_holds));
4069 }
4070 
4071 uint64_t
dmu_buf_user_refcount(dmu_buf_t * db_fake)4072 dmu_buf_user_refcount(dmu_buf_t *db_fake)
4073 {
4074 	uint64_t holds;
4075 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4076 
4077 	mutex_enter(&db->db_mtx);
4078 	ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
4079 	holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
4080 	mutex_exit(&db->db_mtx);
4081 
4082 	return (holds);
4083 }
4084 
4085 void *
dmu_buf_replace_user(dmu_buf_t * db_fake,dmu_buf_user_t * old_user,dmu_buf_user_t * new_user)4086 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
4087     dmu_buf_user_t *new_user)
4088 {
4089 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4090 
4091 	mutex_enter(&db->db_mtx);
4092 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
4093 	if (db->db_user == old_user)
4094 		db->db_user = new_user;
4095 	else
4096 		old_user = db->db_user;
4097 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
4098 	mutex_exit(&db->db_mtx);
4099 
4100 	return (old_user);
4101 }
4102 
4103 void *
dmu_buf_set_user(dmu_buf_t * db_fake,dmu_buf_user_t * user)4104 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4105 {
4106 	return (dmu_buf_replace_user(db_fake, NULL, user));
4107 }
4108 
4109 void *
dmu_buf_set_user_ie(dmu_buf_t * db_fake,dmu_buf_user_t * user)4110 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4111 {
4112 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4113 
4114 	db->db_user_immediate_evict = TRUE;
4115 	return (dmu_buf_set_user(db_fake, user));
4116 }
4117 
4118 void *
dmu_buf_remove_user(dmu_buf_t * db_fake,dmu_buf_user_t * user)4119 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4120 {
4121 	return (dmu_buf_replace_user(db_fake, user, NULL));
4122 }
4123 
4124 void *
dmu_buf_get_user(dmu_buf_t * db_fake)4125 dmu_buf_get_user(dmu_buf_t *db_fake)
4126 {
4127 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4128 
4129 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
4130 	return (db->db_user);
4131 }
4132 
4133 uint64_t
dmu_buf_user_size(dmu_buf_t * db_fake)4134 dmu_buf_user_size(dmu_buf_t *db_fake)
4135 {
4136 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4137 	if (db->db_user == NULL)
4138 		return (0);
4139 	return (atomic_load_64(&db->db_user->dbu_size));
4140 }
4141 
4142 void
dmu_buf_add_user_size(dmu_buf_t * db_fake,uint64_t nadd)4143 dmu_buf_add_user_size(dmu_buf_t *db_fake, uint64_t nadd)
4144 {
4145 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4146 	ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
4147 	ASSERT3P(db->db_user, !=, NULL);
4148 	ASSERT3U(atomic_load_64(&db->db_user->dbu_size), <, UINT64_MAX - nadd);
4149 	atomic_add_64(&db->db_user->dbu_size, nadd);
4150 }
4151 
4152 void
dmu_buf_sub_user_size(dmu_buf_t * db_fake,uint64_t nsub)4153 dmu_buf_sub_user_size(dmu_buf_t *db_fake, uint64_t nsub)
4154 {
4155 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4156 	ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
4157 	ASSERT3P(db->db_user, !=, NULL);
4158 	ASSERT3U(atomic_load_64(&db->db_user->dbu_size), >=, nsub);
4159 	atomic_sub_64(&db->db_user->dbu_size, nsub);
4160 }
4161 
4162 void
dmu_buf_user_evict_wait(void)4163 dmu_buf_user_evict_wait(void)
4164 {
4165 	taskq_wait(dbu_evict_taskq);
4166 }
4167 
4168 blkptr_t *
dmu_buf_get_blkptr(dmu_buf_t * db)4169 dmu_buf_get_blkptr(dmu_buf_t *db)
4170 {
4171 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4172 	return (dbi->db_blkptr);
4173 }
4174 
4175 objset_t *
dmu_buf_get_objset(dmu_buf_t * db)4176 dmu_buf_get_objset(dmu_buf_t *db)
4177 {
4178 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4179 	return (dbi->db_objset);
4180 }
4181 
4182 static void
dbuf_check_blkptr(dnode_t * dn,dmu_buf_impl_t * db)4183 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
4184 {
4185 	/* ASSERT(dmu_tx_is_syncing(tx) */
4186 	ASSERT(MUTEX_HELD(&db->db_mtx));
4187 
4188 	if (db->db_blkptr != NULL)
4189 		return;
4190 
4191 	if (db->db_blkid == DMU_SPILL_BLKID) {
4192 		db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
4193 		BP_ZERO(db->db_blkptr);
4194 		return;
4195 	}
4196 	if (db->db_level == dn->dn_phys->dn_nlevels-1) {
4197 		/*
4198 		 * This buffer was allocated at a time when there was
4199 		 * no available blkptrs from the dnode, or it was
4200 		 * inappropriate to hook it in (i.e., nlevels mismatch).
4201 		 */
4202 		ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
4203 		ASSERT(db->db_parent == NULL);
4204 		db->db_parent = dn->dn_dbuf;
4205 		db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
4206 		DBUF_VERIFY(db);
4207 	} else {
4208 		dmu_buf_impl_t *parent = db->db_parent;
4209 		int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4210 
4211 		ASSERT(dn->dn_phys->dn_nlevels > 1);
4212 		if (parent == NULL) {
4213 			mutex_exit(&db->db_mtx);
4214 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
4215 			parent = dbuf_hold_level(dn, db->db_level + 1,
4216 			    db->db_blkid >> epbs, db);
4217 			rw_exit(&dn->dn_struct_rwlock);
4218 			mutex_enter(&db->db_mtx);
4219 			db->db_parent = parent;
4220 		}
4221 		db->db_blkptr = (blkptr_t *)parent->db.db_data +
4222 		    (db->db_blkid & ((1ULL << epbs) - 1));
4223 		DBUF_VERIFY(db);
4224 	}
4225 }
4226 
4227 static void
dbuf_sync_bonus(dbuf_dirty_record_t * dr,dmu_tx_t * tx)4228 dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4229 {
4230 	dmu_buf_impl_t *db = dr->dr_dbuf;
4231 	void *data = dr->dt.dl.dr_data;
4232 
4233 	ASSERT0(db->db_level);
4234 	ASSERT(MUTEX_HELD(&db->db_mtx));
4235 	ASSERT(db->db_blkid == DMU_BONUS_BLKID);
4236 	ASSERT(data != NULL);
4237 
4238 	dnode_t *dn = dr->dr_dnode;
4239 	ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
4240 	    DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
4241 	memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys));
4242 
4243 	dbuf_sync_leaf_verify_bonus_dnode(dr);
4244 
4245 	dbuf_undirty_bonus(dr);
4246 	dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4247 }
4248 
4249 /*
4250  * When syncing out a blocks of dnodes, adjust the block to deal with
4251  * encryption.  Normally, we make sure the block is decrypted before writing
4252  * it.  If we have crypt params, then we are writing a raw (encrypted) block,
4253  * from a raw receive.  In this case, set the ARC buf's crypt params so
4254  * that the BP will be filled with the correct byteorder, salt, iv, and mac.
4255  */
4256 static void
dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t * dr)4257 dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
4258 {
4259 	int err;
4260 	dmu_buf_impl_t *db = dr->dr_dbuf;
4261 
4262 	ASSERT(MUTEX_HELD(&db->db_mtx));
4263 	ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
4264 	ASSERT3U(db->db_level, ==, 0);
4265 
4266 	if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
4267 		zbookmark_phys_t zb;
4268 
4269 		/*
4270 		 * Unfortunately, there is currently no mechanism for
4271 		 * syncing context to handle decryption errors. An error
4272 		 * here is only possible if an attacker maliciously
4273 		 * changed a dnode block and updated the associated
4274 		 * checksums going up the block tree.
4275 		 */
4276 		SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
4277 		    db->db.db_object, db->db_level, db->db_blkid);
4278 		err = arc_untransform(db->db_buf, db->db_objset->os_spa,
4279 		    &zb, B_TRUE);
4280 		if (err)
4281 			panic("Invalid dnode block MAC");
4282 	} else if (dr->dt.dl.dr_has_raw_params) {
4283 		(void) arc_release(dr->dt.dl.dr_data, db);
4284 		arc_convert_to_raw(dr->dt.dl.dr_data,
4285 		    dmu_objset_id(db->db_objset),
4286 		    dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
4287 		    dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
4288 	}
4289 }
4290 
4291 /*
4292  * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
4293  * is critical the we not allow the compiler to inline this function in to
4294  * dbuf_sync_list() thereby drastically bloating the stack usage.
4295  */
4296 noinline static void
dbuf_sync_indirect(dbuf_dirty_record_t * dr,dmu_tx_t * tx)4297 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4298 {
4299 	dmu_buf_impl_t *db = dr->dr_dbuf;
4300 	dnode_t *dn = dr->dr_dnode;
4301 
4302 	ASSERT(dmu_tx_is_syncing(tx));
4303 
4304 	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4305 
4306 	mutex_enter(&db->db_mtx);
4307 
4308 	ASSERT(db->db_level > 0);
4309 	DBUF_VERIFY(db);
4310 
4311 	/* Read the block if it hasn't been read yet. */
4312 	if (db->db_buf == NULL) {
4313 		mutex_exit(&db->db_mtx);
4314 		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
4315 		mutex_enter(&db->db_mtx);
4316 	}
4317 	ASSERT3U(db->db_state, ==, DB_CACHED);
4318 	ASSERT(db->db_buf != NULL);
4319 
4320 	/* Indirect block size must match what the dnode thinks it is. */
4321 	ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4322 	dbuf_check_blkptr(dn, db);
4323 
4324 	/* Provide the pending dirty record to child dbufs */
4325 	db->db_data_pending = dr;
4326 
4327 	mutex_exit(&db->db_mtx);
4328 
4329 	dbuf_write(dr, db->db_buf, tx);
4330 
4331 	zio_t *zio = dr->dr_zio;
4332 	mutex_enter(&dr->dt.di.dr_mtx);
4333 	dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
4334 	ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4335 	mutex_exit(&dr->dt.di.dr_mtx);
4336 	zio_nowait(zio);
4337 }
4338 
4339 /*
4340  * Verify that the size of the data in our bonus buffer does not exceed
4341  * its recorded size.
4342  *
4343  * The purpose of this verification is to catch any cases in development
4344  * where the size of a phys structure (i.e space_map_phys_t) grows and,
4345  * due to incorrect feature management, older pools expect to read more
4346  * data even though they didn't actually write it to begin with.
4347  *
4348  * For a example, this would catch an error in the feature logic where we
4349  * open an older pool and we expect to write the space map histogram of
4350  * a space map with size SPACE_MAP_SIZE_V0.
4351  */
4352 static void
dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t * dr)4353 dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
4354 {
4355 #ifdef ZFS_DEBUG
4356 	dnode_t *dn = dr->dr_dnode;
4357 
4358 	/*
4359 	 * Encrypted bonus buffers can have data past their bonuslen.
4360 	 * Skip the verification of these blocks.
4361 	 */
4362 	if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))
4363 		return;
4364 
4365 	uint16_t bonuslen = dn->dn_phys->dn_bonuslen;
4366 	uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
4367 	ASSERT3U(bonuslen, <=, maxbonuslen);
4368 
4369 	arc_buf_t *datap = dr->dt.dl.dr_data;
4370 	char *datap_end = ((char *)datap) + bonuslen;
4371 	char *datap_max = ((char *)datap) + maxbonuslen;
4372 
4373 	/* ensure that everything is zero after our data */
4374 	for (; datap_end < datap_max; datap_end++)
4375 		ASSERT(*datap_end == 0);
4376 #endif
4377 }
4378 
4379 static blkptr_t *
dbuf_lightweight_bp(dbuf_dirty_record_t * dr)4380 dbuf_lightweight_bp(dbuf_dirty_record_t *dr)
4381 {
4382 	/* This must be a lightweight dirty record. */
4383 	ASSERT3P(dr->dr_dbuf, ==, NULL);
4384 	dnode_t *dn = dr->dr_dnode;
4385 
4386 	if (dn->dn_phys->dn_nlevels == 1) {
4387 		VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr);
4388 		return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]);
4389 	} else {
4390 		dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf;
4391 		int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
4392 		VERIFY3U(parent_db->db_level, ==, 1);
4393 		VERIFY3P(parent_db->db_dnode_handle->dnh_dnode, ==, dn);
4394 		VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid);
4395 		blkptr_t *bp = parent_db->db.db_data;
4396 		return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]);
4397 	}
4398 }
4399 
4400 static void
dbuf_lightweight_ready(zio_t * zio)4401 dbuf_lightweight_ready(zio_t *zio)
4402 {
4403 	dbuf_dirty_record_t *dr = zio->io_private;
4404 	blkptr_t *bp = zio->io_bp;
4405 
4406 	if (zio->io_error != 0)
4407 		return;
4408 
4409 	dnode_t *dn = dr->dr_dnode;
4410 
4411 	blkptr_t *bp_orig = dbuf_lightweight_bp(dr);
4412 	spa_t *spa = dmu_objset_spa(dn->dn_objset);
4413 	int64_t delta = bp_get_dsize_sync(spa, bp) -
4414 	    bp_get_dsize_sync(spa, bp_orig);
4415 	dnode_diduse_space(dn, delta);
4416 
4417 	uint64_t blkid = dr->dt.dll.dr_blkid;
4418 	mutex_enter(&dn->dn_mtx);
4419 	if (blkid > dn->dn_phys->dn_maxblkid) {
4420 		ASSERT0(dn->dn_objset->os_raw_receive);
4421 		dn->dn_phys->dn_maxblkid = blkid;
4422 	}
4423 	mutex_exit(&dn->dn_mtx);
4424 
4425 	if (!BP_IS_EMBEDDED(bp)) {
4426 		uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1;
4427 		BP_SET_FILL(bp, fill);
4428 	}
4429 
4430 	dmu_buf_impl_t *parent_db;
4431 	EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1);
4432 	if (dr->dr_parent == NULL) {
4433 		parent_db = dn->dn_dbuf;
4434 	} else {
4435 		parent_db = dr->dr_parent->dr_dbuf;
4436 	}
4437 	rw_enter(&parent_db->db_rwlock, RW_WRITER);
4438 	*bp_orig = *bp;
4439 	rw_exit(&parent_db->db_rwlock);
4440 }
4441 
4442 static void
dbuf_lightweight_done(zio_t * zio)4443 dbuf_lightweight_done(zio_t *zio)
4444 {
4445 	dbuf_dirty_record_t *dr = zio->io_private;
4446 
4447 	VERIFY0(zio->io_error);
4448 
4449 	objset_t *os = dr->dr_dnode->dn_objset;
4450 	dmu_tx_t *tx = os->os_synctx;
4451 
4452 	if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4453 		ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
4454 	} else {
4455 		dsl_dataset_t *ds = os->os_dsl_dataset;
4456 		(void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE);
4457 		dsl_dataset_block_born(ds, zio->io_bp, tx);
4458 	}
4459 
4460 	dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted,
4461 	    zio->io_txg);
4462 
4463 	abd_free(dr->dt.dll.dr_abd);
4464 	kmem_free(dr, sizeof (*dr));
4465 }
4466 
4467 noinline static void
dbuf_sync_lightweight(dbuf_dirty_record_t * dr,dmu_tx_t * tx)4468 dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4469 {
4470 	dnode_t *dn = dr->dr_dnode;
4471 	zio_t *pio;
4472 	if (dn->dn_phys->dn_nlevels == 1) {
4473 		pio = dn->dn_zio;
4474 	} else {
4475 		pio = dr->dr_parent->dr_zio;
4476 	}
4477 
4478 	zbookmark_phys_t zb = {
4479 		.zb_objset = dmu_objset_id(dn->dn_objset),
4480 		.zb_object = dn->dn_object,
4481 		.zb_level = 0,
4482 		.zb_blkid = dr->dt.dll.dr_blkid,
4483 	};
4484 
4485 	/*
4486 	 * See comment in dbuf_write().  This is so that zio->io_bp_orig
4487 	 * will have the old BP in dbuf_lightweight_done().
4488 	 */
4489 	dr->dr_bp_copy = *dbuf_lightweight_bp(dr);
4490 
4491 	dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset),
4492 	    dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd,
4493 	    dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd),
4494 	    &dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL,
4495 	    dbuf_lightweight_done, dr, ZIO_PRIORITY_ASYNC_WRITE,
4496 	    ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb);
4497 
4498 	zio_nowait(dr->dr_zio);
4499 }
4500 
4501 /*
4502  * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
4503  * critical the we not allow the compiler to inline this function in to
4504  * dbuf_sync_list() thereby drastically bloating the stack usage.
4505  */
4506 noinline static void
dbuf_sync_leaf(dbuf_dirty_record_t * dr,dmu_tx_t * tx)4507 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4508 {
4509 	arc_buf_t **datap = &dr->dt.dl.dr_data;
4510 	dmu_buf_impl_t *db = dr->dr_dbuf;
4511 	dnode_t *dn = dr->dr_dnode;
4512 	objset_t *os;
4513 	uint64_t txg = tx->tx_txg;
4514 
4515 	ASSERT(dmu_tx_is_syncing(tx));
4516 
4517 	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4518 
4519 	mutex_enter(&db->db_mtx);
4520 	/*
4521 	 * To be synced, we must be dirtied.  But we
4522 	 * might have been freed after the dirty.
4523 	 */
4524 	if (db->db_state == DB_UNCACHED) {
4525 		/* This buffer has been freed since it was dirtied */
4526 		ASSERT(db->db.db_data == NULL);
4527 	} else if (db->db_state == DB_FILL) {
4528 		/* This buffer was freed and is now being re-filled */
4529 		ASSERT(db->db.db_data != dr->dt.dl.dr_data);
4530 	} else if (db->db_state == DB_READ) {
4531 		/*
4532 		 * This buffer has a clone we need to write, and an in-flight
4533 		 * read on the BP we're about to clone. Its safe to issue the
4534 		 * write here because the read has already been issued and the
4535 		 * contents won't change.
4536 		 */
4537 		ASSERT(dr->dt.dl.dr_brtwrite &&
4538 		    dr->dt.dl.dr_override_state == DR_OVERRIDDEN);
4539 	} else {
4540 		ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
4541 	}
4542 	DBUF_VERIFY(db);
4543 
4544 	if (db->db_blkid == DMU_SPILL_BLKID) {
4545 		mutex_enter(&dn->dn_mtx);
4546 		if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
4547 			/*
4548 			 * In the previous transaction group, the bonus buffer
4549 			 * was entirely used to store the attributes for the
4550 			 * dnode which overrode the dn_spill field.  However,
4551 			 * when adding more attributes to the file a spill
4552 			 * block was required to hold the extra attributes.
4553 			 *
4554 			 * Make sure to clear the garbage left in the dn_spill
4555 			 * field from the previous attributes in the bonus
4556 			 * buffer.  Otherwise, after writing out the spill
4557 			 * block to the new allocated dva, it will free
4558 			 * the old block pointed to by the invalid dn_spill.
4559 			 */
4560 			db->db_blkptr = NULL;
4561 		}
4562 		dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
4563 		mutex_exit(&dn->dn_mtx);
4564 	}
4565 
4566 	/*
4567 	 * If this is a bonus buffer, simply copy the bonus data into the
4568 	 * dnode.  It will be written out when the dnode is synced (and it
4569 	 * will be synced, since it must have been dirty for dbuf_sync to
4570 	 * be called).
4571 	 */
4572 	if (db->db_blkid == DMU_BONUS_BLKID) {
4573 		ASSERT(dr->dr_dbuf == db);
4574 		dbuf_sync_bonus(dr, tx);
4575 		return;
4576 	}
4577 
4578 	os = dn->dn_objset;
4579 
4580 	/*
4581 	 * This function may have dropped the db_mtx lock allowing a dmu_sync
4582 	 * operation to sneak in. As a result, we need to ensure that we
4583 	 * don't check the dr_override_state until we have returned from
4584 	 * dbuf_check_blkptr.
4585 	 */
4586 	dbuf_check_blkptr(dn, db);
4587 
4588 	/*
4589 	 * If this buffer is in the middle of an immediate write,
4590 	 * wait for the synchronous IO to complete.
4591 	 */
4592 	while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
4593 		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
4594 		cv_wait(&db->db_changed, &db->db_mtx);
4595 	}
4596 
4597 	/*
4598 	 * If this is a dnode block, ensure it is appropriately encrypted
4599 	 * or decrypted, depending on what we are writing to it this txg.
4600 	 */
4601 	if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
4602 		dbuf_prepare_encrypted_dnode_leaf(dr);
4603 
4604 	if (*datap != NULL && *datap == db->db_buf &&
4605 	    dn->dn_object != DMU_META_DNODE_OBJECT &&
4606 	    zfs_refcount_count(&db->db_holds) > 1 &&
4607 	    dr->dt.dl.dr_override_state != DR_OVERRIDDEN) {
4608 		/*
4609 		 * If this buffer is currently "in use" (i.e., there
4610 		 * are active holds and db_data still references it),
4611 		 * then make a copy before we start the write so that
4612 		 * any modifications from the open txg will not leak
4613 		 * into this write.
4614 		 *
4615 		 * NOTE: this copy does not need to be made for
4616 		 * objects only modified in the syncing context (e.g.
4617 		 * DNONE_DNODE blocks).
4618 		 */
4619 		int psize = arc_buf_size(*datap);
4620 		int lsize = arc_buf_lsize(*datap);
4621 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
4622 		enum zio_compress compress_type = arc_get_compression(*datap);
4623 		uint8_t complevel = arc_get_complevel(*datap);
4624 
4625 		if (arc_is_encrypted(*datap)) {
4626 			boolean_t byteorder;
4627 			uint8_t salt[ZIO_DATA_SALT_LEN];
4628 			uint8_t iv[ZIO_DATA_IV_LEN];
4629 			uint8_t mac[ZIO_DATA_MAC_LEN];
4630 
4631 			arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
4632 			*datap = arc_alloc_raw_buf(os->os_spa, db,
4633 			    dmu_objset_id(os), byteorder, salt, iv, mac,
4634 			    dn->dn_type, psize, lsize, compress_type,
4635 			    complevel);
4636 		} else if (compress_type != ZIO_COMPRESS_OFF) {
4637 			ASSERT3U(type, ==, ARC_BUFC_DATA);
4638 			*datap = arc_alloc_compressed_buf(os->os_spa, db,
4639 			    psize, lsize, compress_type, complevel);
4640 		} else {
4641 			*datap = arc_alloc_buf(os->os_spa, db, type, psize);
4642 		}
4643 		memcpy((*datap)->b_data, db->db.db_data, psize);
4644 	}
4645 	db->db_data_pending = dr;
4646 
4647 	mutex_exit(&db->db_mtx);
4648 
4649 	dbuf_write(dr, *datap, tx);
4650 
4651 	ASSERT(!list_link_active(&dr->dr_dirty_node));
4652 	if (dn->dn_object == DMU_META_DNODE_OBJECT) {
4653 		list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
4654 	} else {
4655 		zio_nowait(dr->dr_zio);
4656 	}
4657 }
4658 
4659 /*
4660  * Syncs out a range of dirty records for indirect or leaf dbufs.  May be
4661  * called recursively from dbuf_sync_indirect().
4662  */
4663 void
dbuf_sync_list(list_t * list,int level,dmu_tx_t * tx)4664 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
4665 {
4666 	dbuf_dirty_record_t *dr;
4667 
4668 	while ((dr = list_head(list))) {
4669 		if (dr->dr_zio != NULL) {
4670 			/*
4671 			 * If we find an already initialized zio then we
4672 			 * are processing the meta-dnode, and we have finished.
4673 			 * The dbufs for all dnodes are put back on the list
4674 			 * during processing, so that we can zio_wait()
4675 			 * these IOs after initiating all child IOs.
4676 			 */
4677 			ASSERT3U(dr->dr_dbuf->db.db_object, ==,
4678 			    DMU_META_DNODE_OBJECT);
4679 			break;
4680 		}
4681 		list_remove(list, dr);
4682 		if (dr->dr_dbuf == NULL) {
4683 			dbuf_sync_lightweight(dr, tx);
4684 		} else {
4685 			if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
4686 			    dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
4687 				VERIFY3U(dr->dr_dbuf->db_level, ==, level);
4688 			}
4689 			if (dr->dr_dbuf->db_level > 0)
4690 				dbuf_sync_indirect(dr, tx);
4691 			else
4692 				dbuf_sync_leaf(dr, tx);
4693 		}
4694 	}
4695 }
4696 
4697 static void
dbuf_write_ready(zio_t * zio,arc_buf_t * buf,void * vdb)4698 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4699 {
4700 	(void) buf;
4701 	dmu_buf_impl_t *db = vdb;
4702 	dnode_t *dn;
4703 	blkptr_t *bp = zio->io_bp;
4704 	blkptr_t *bp_orig = &zio->io_bp_orig;
4705 	spa_t *spa = zio->io_spa;
4706 	int64_t delta;
4707 	uint64_t fill = 0;
4708 	int i;
4709 
4710 	ASSERT3P(db->db_blkptr, !=, NULL);
4711 	ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
4712 
4713 	DB_DNODE_ENTER(db);
4714 	dn = DB_DNODE(db);
4715 	delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
4716 	dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
4717 	zio->io_prev_space_delta = delta;
4718 
4719 	if (BP_GET_LOGICAL_BIRTH(bp) != 0) {
4720 		ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
4721 		    BP_GET_TYPE(bp) == dn->dn_type) ||
4722 		    (db->db_blkid == DMU_SPILL_BLKID &&
4723 		    BP_GET_TYPE(bp) == dn->dn_bonustype) ||
4724 		    BP_IS_EMBEDDED(bp));
4725 		ASSERT(BP_GET_LEVEL(bp) == db->db_level);
4726 	}
4727 
4728 	mutex_enter(&db->db_mtx);
4729 
4730 #ifdef ZFS_DEBUG
4731 	if (db->db_blkid == DMU_SPILL_BLKID) {
4732 		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4733 		ASSERT(!(BP_IS_HOLE(bp)) &&
4734 		    db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4735 	}
4736 #endif
4737 
4738 	if (db->db_level == 0) {
4739 		mutex_enter(&dn->dn_mtx);
4740 		if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
4741 		    db->db_blkid != DMU_SPILL_BLKID) {
4742 			ASSERT0(db->db_objset->os_raw_receive);
4743 			dn->dn_phys->dn_maxblkid = db->db_blkid;
4744 		}
4745 		mutex_exit(&dn->dn_mtx);
4746 
4747 		if (dn->dn_type == DMU_OT_DNODE) {
4748 			i = 0;
4749 			while (i < db->db.db_size) {
4750 				dnode_phys_t *dnp =
4751 				    (void *)(((char *)db->db.db_data) + i);
4752 
4753 				i += DNODE_MIN_SIZE;
4754 				if (dnp->dn_type != DMU_OT_NONE) {
4755 					fill++;
4756 					for (int j = 0; j < dnp->dn_nblkptr;
4757 					    j++) {
4758 						(void) zfs_blkptr_verify(spa,
4759 						    &dnp->dn_blkptr[j],
4760 						    BLK_CONFIG_SKIP,
4761 						    BLK_VERIFY_HALT);
4762 					}
4763 					if (dnp->dn_flags &
4764 					    DNODE_FLAG_SPILL_BLKPTR) {
4765 						(void) zfs_blkptr_verify(spa,
4766 						    DN_SPILL_BLKPTR(dnp),
4767 						    BLK_CONFIG_SKIP,
4768 						    BLK_VERIFY_HALT);
4769 					}
4770 					i += dnp->dn_extra_slots *
4771 					    DNODE_MIN_SIZE;
4772 				}
4773 			}
4774 		} else {
4775 			if (BP_IS_HOLE(bp)) {
4776 				fill = 0;
4777 			} else {
4778 				fill = 1;
4779 			}
4780 		}
4781 	} else {
4782 		blkptr_t *ibp = db->db.db_data;
4783 		ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4784 		for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
4785 			if (BP_IS_HOLE(ibp))
4786 				continue;
4787 			(void) zfs_blkptr_verify(spa, ibp,
4788 			    BLK_CONFIG_SKIP, BLK_VERIFY_HALT);
4789 			fill += BP_GET_FILL(ibp);
4790 		}
4791 	}
4792 	DB_DNODE_EXIT(db);
4793 
4794 	if (!BP_IS_EMBEDDED(bp))
4795 		BP_SET_FILL(bp, fill);
4796 
4797 	mutex_exit(&db->db_mtx);
4798 
4799 	db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG);
4800 	*db->db_blkptr = *bp;
4801 	dmu_buf_unlock_parent(db, dblt, FTAG);
4802 }
4803 
4804 /*
4805  * This function gets called just prior to running through the compression
4806  * stage of the zio pipeline. If we're an indirect block comprised of only
4807  * holes, then we want this indirect to be compressed away to a hole. In
4808  * order to do that we must zero out any information about the holes that
4809  * this indirect points to prior to before we try to compress it.
4810  */
4811 static void
dbuf_write_children_ready(zio_t * zio,arc_buf_t * buf,void * vdb)4812 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4813 {
4814 	(void) zio, (void) buf;
4815 	dmu_buf_impl_t *db = vdb;
4816 	dnode_t *dn;
4817 	blkptr_t *bp;
4818 	unsigned int epbs, i;
4819 
4820 	ASSERT3U(db->db_level, >, 0);
4821 	DB_DNODE_ENTER(db);
4822 	dn = DB_DNODE(db);
4823 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4824 	ASSERT3U(epbs, <, 31);
4825 
4826 	/* Determine if all our children are holes */
4827 	for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
4828 		if (!BP_IS_HOLE(bp))
4829 			break;
4830 	}
4831 
4832 	/*
4833 	 * If all the children are holes, then zero them all out so that
4834 	 * we may get compressed away.
4835 	 */
4836 	if (i == 1ULL << epbs) {
4837 		/*
4838 		 * We only found holes. Grab the rwlock to prevent
4839 		 * anybody from reading the blocks we're about to
4840 		 * zero out.
4841 		 */
4842 		rw_enter(&db->db_rwlock, RW_WRITER);
4843 		memset(db->db.db_data, 0, db->db.db_size);
4844 		rw_exit(&db->db_rwlock);
4845 	}
4846 	DB_DNODE_EXIT(db);
4847 }
4848 
4849 static void
dbuf_write_done(zio_t * zio,arc_buf_t * buf,void * vdb)4850 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
4851 {
4852 	(void) buf;
4853 	dmu_buf_impl_t *db = vdb;
4854 	blkptr_t *bp_orig = &zio->io_bp_orig;
4855 	blkptr_t *bp = db->db_blkptr;
4856 	objset_t *os = db->db_objset;
4857 	dmu_tx_t *tx = os->os_synctx;
4858 
4859 	ASSERT0(zio->io_error);
4860 	ASSERT(db->db_blkptr == bp);
4861 
4862 	/*
4863 	 * For nopwrites and rewrites we ensure that the bp matches our
4864 	 * original and bypass all the accounting.
4865 	 */
4866 	if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4867 		ASSERT(BP_EQUAL(bp, bp_orig));
4868 	} else {
4869 		dsl_dataset_t *ds = os->os_dsl_dataset;
4870 		(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
4871 		dsl_dataset_block_born(ds, bp, tx);
4872 	}
4873 
4874 	mutex_enter(&db->db_mtx);
4875 
4876 	DBUF_VERIFY(db);
4877 
4878 	dbuf_dirty_record_t *dr = db->db_data_pending;
4879 	dnode_t *dn = dr->dr_dnode;
4880 	ASSERT(!list_link_active(&dr->dr_dirty_node));
4881 	ASSERT(dr->dr_dbuf == db);
4882 	ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
4883 	list_remove(&db->db_dirty_records, dr);
4884 
4885 #ifdef ZFS_DEBUG
4886 	if (db->db_blkid == DMU_SPILL_BLKID) {
4887 		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4888 		ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
4889 		    db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4890 	}
4891 #endif
4892 
4893 	if (db->db_level == 0) {
4894 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
4895 		ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
4896 		if (dr->dt.dl.dr_data != NULL &&
4897 		    dr->dt.dl.dr_data != db->db_buf) {
4898 			arc_buf_destroy(dr->dt.dl.dr_data, db);
4899 		}
4900 	} else {
4901 		ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4902 		ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
4903 		if (!BP_IS_HOLE(db->db_blkptr)) {
4904 			int epbs __maybe_unused = dn->dn_phys->dn_indblkshift -
4905 			    SPA_BLKPTRSHIFT;
4906 			ASSERT3U(db->db_blkid, <=,
4907 			    dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
4908 			ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
4909 			    db->db.db_size);
4910 		}
4911 		mutex_destroy(&dr->dt.di.dr_mtx);
4912 		list_destroy(&dr->dt.di.dr_children);
4913 	}
4914 
4915 	cv_broadcast(&db->db_changed);
4916 	ASSERT(db->db_dirtycnt > 0);
4917 	db->db_dirtycnt -= 1;
4918 	db->db_data_pending = NULL;
4919 	dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4920 
4921 	dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted,
4922 	    zio->io_txg);
4923 
4924 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
4925 }
4926 
4927 static void
dbuf_write_nofill_ready(zio_t * zio)4928 dbuf_write_nofill_ready(zio_t *zio)
4929 {
4930 	dbuf_write_ready(zio, NULL, zio->io_private);
4931 }
4932 
4933 static void
dbuf_write_nofill_done(zio_t * zio)4934 dbuf_write_nofill_done(zio_t *zio)
4935 {
4936 	dbuf_write_done(zio, NULL, zio->io_private);
4937 }
4938 
4939 static void
dbuf_write_override_ready(zio_t * zio)4940 dbuf_write_override_ready(zio_t *zio)
4941 {
4942 	dbuf_dirty_record_t *dr = zio->io_private;
4943 	dmu_buf_impl_t *db = dr->dr_dbuf;
4944 
4945 	dbuf_write_ready(zio, NULL, db);
4946 }
4947 
4948 static void
dbuf_write_override_done(zio_t * zio)4949 dbuf_write_override_done(zio_t *zio)
4950 {
4951 	dbuf_dirty_record_t *dr = zio->io_private;
4952 	dmu_buf_impl_t *db = dr->dr_dbuf;
4953 	blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
4954 
4955 	mutex_enter(&db->db_mtx);
4956 	if (!BP_EQUAL(zio->io_bp, obp)) {
4957 		if (!BP_IS_HOLE(obp))
4958 			dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
4959 		arc_release(dr->dt.dl.dr_data, db);
4960 	}
4961 	mutex_exit(&db->db_mtx);
4962 
4963 	dbuf_write_done(zio, NULL, db);
4964 
4965 	if (zio->io_abd != NULL)
4966 		abd_free(zio->io_abd);
4967 }
4968 
4969 typedef struct dbuf_remap_impl_callback_arg {
4970 	objset_t	*drica_os;
4971 	uint64_t	drica_blk_birth;
4972 	dmu_tx_t	*drica_tx;
4973 } dbuf_remap_impl_callback_arg_t;
4974 
4975 static void
dbuf_remap_impl_callback(uint64_t vdev,uint64_t offset,uint64_t size,void * arg)4976 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
4977     void *arg)
4978 {
4979 	dbuf_remap_impl_callback_arg_t *drica = arg;
4980 	objset_t *os = drica->drica_os;
4981 	spa_t *spa = dmu_objset_spa(os);
4982 	dmu_tx_t *tx = drica->drica_tx;
4983 
4984 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4985 
4986 	if (os == spa_meta_objset(spa)) {
4987 		spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
4988 	} else {
4989 		dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
4990 		    size, drica->drica_blk_birth, tx);
4991 	}
4992 }
4993 
4994 static void
dbuf_remap_impl(dnode_t * dn,blkptr_t * bp,krwlock_t * rw,dmu_tx_t * tx)4995 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx)
4996 {
4997 	blkptr_t bp_copy = *bp;
4998 	spa_t *spa = dmu_objset_spa(dn->dn_objset);
4999 	dbuf_remap_impl_callback_arg_t drica;
5000 
5001 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
5002 
5003 	drica.drica_os = dn->dn_objset;
5004 	drica.drica_blk_birth = BP_GET_LOGICAL_BIRTH(bp);
5005 	drica.drica_tx = tx;
5006 	if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
5007 	    &drica)) {
5008 		/*
5009 		 * If the blkptr being remapped is tracked by a livelist,
5010 		 * then we need to make sure the livelist reflects the update.
5011 		 * First, cancel out the old blkptr by appending a 'FREE'
5012 		 * entry. Next, add an 'ALLOC' to track the new version. This
5013 		 * way we avoid trying to free an inaccurate blkptr at delete.
5014 		 * Note that embedded blkptrs are not tracked in livelists.
5015 		 */
5016 		if (dn->dn_objset != spa_meta_objset(spa)) {
5017 			dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset);
5018 			if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
5019 			    BP_GET_LOGICAL_BIRTH(bp) >
5020 			    ds->ds_dir->dd_origin_txg) {
5021 				ASSERT(!BP_IS_EMBEDDED(bp));
5022 				ASSERT(dsl_dir_is_clone(ds->ds_dir));
5023 				ASSERT(spa_feature_is_enabled(spa,
5024 				    SPA_FEATURE_LIVELIST));
5025 				bplist_append(&ds->ds_dir->dd_pending_frees,
5026 				    bp);
5027 				bplist_append(&ds->ds_dir->dd_pending_allocs,
5028 				    &bp_copy);
5029 			}
5030 		}
5031 
5032 		/*
5033 		 * The db_rwlock prevents dbuf_read_impl() from
5034 		 * dereferencing the BP while we are changing it.  To
5035 		 * avoid lock contention, only grab it when we are actually
5036 		 * changing the BP.
5037 		 */
5038 		if (rw != NULL)
5039 			rw_enter(rw, RW_WRITER);
5040 		*bp = bp_copy;
5041 		if (rw != NULL)
5042 			rw_exit(rw);
5043 	}
5044 }
5045 
5046 /*
5047  * Remap any existing BP's to concrete vdevs, if possible.
5048  */
5049 static void
dbuf_remap(dnode_t * dn,dmu_buf_impl_t * db,dmu_tx_t * tx)5050 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
5051 {
5052 	spa_t *spa = dmu_objset_spa(db->db_objset);
5053 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
5054 
5055 	if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
5056 		return;
5057 
5058 	if (db->db_level > 0) {
5059 		blkptr_t *bp = db->db.db_data;
5060 		for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
5061 			dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx);
5062 		}
5063 	} else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
5064 		dnode_phys_t *dnp = db->db.db_data;
5065 		ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==,
5066 		    DMU_OT_DNODE);
5067 		for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
5068 		    i += dnp[i].dn_extra_slots + 1) {
5069 			for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
5070 				krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL :
5071 				    &dn->dn_dbuf->db_rwlock);
5072 				dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock,
5073 				    tx);
5074 			}
5075 		}
5076 	}
5077 }
5078 
5079 
5080 /*
5081  * Populate dr->dr_zio with a zio to commit a dirty buffer to disk.
5082  * Caller is responsible for issuing the zio_[no]wait(dr->dr_zio).
5083  */
5084 static void
dbuf_write(dbuf_dirty_record_t * dr,arc_buf_t * data,dmu_tx_t * tx)5085 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
5086 {
5087 	dmu_buf_impl_t *db = dr->dr_dbuf;
5088 	dnode_t *dn = dr->dr_dnode;
5089 	objset_t *os;
5090 	dmu_buf_impl_t *parent = db->db_parent;
5091 	uint64_t txg = tx->tx_txg;
5092 	zbookmark_phys_t zb;
5093 	zio_prop_t zp;
5094 	zio_t *pio; /* parent I/O */
5095 	int wp_flag = 0;
5096 
5097 	ASSERT(dmu_tx_is_syncing(tx));
5098 
5099 	os = dn->dn_objset;
5100 
5101 	if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
5102 		/*
5103 		 * Private object buffers are released here rather than in
5104 		 * dbuf_dirty() since they are only modified in the syncing
5105 		 * context and we don't want the overhead of making multiple
5106 		 * copies of the data.
5107 		 */
5108 		if (BP_IS_HOLE(db->db_blkptr))
5109 			arc_buf_thaw(data);
5110 		else
5111 			dbuf_release_bp(db);
5112 		dbuf_remap(dn, db, tx);
5113 	}
5114 
5115 	if (parent != dn->dn_dbuf) {
5116 		/* Our parent is an indirect block. */
5117 		/* We have a dirty parent that has been scheduled for write. */
5118 		ASSERT(parent && parent->db_data_pending);
5119 		/* Our parent's buffer is one level closer to the dnode. */
5120 		ASSERT(db->db_level == parent->db_level-1);
5121 		/*
5122 		 * We're about to modify our parent's db_data by modifying
5123 		 * our block pointer, so the parent must be released.
5124 		 */
5125 		ASSERT(arc_released(parent->db_buf));
5126 		pio = parent->db_data_pending->dr_zio;
5127 	} else {
5128 		/* Our parent is the dnode itself. */
5129 		ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
5130 		    db->db_blkid != DMU_SPILL_BLKID) ||
5131 		    (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
5132 		if (db->db_blkid != DMU_SPILL_BLKID)
5133 			ASSERT3P(db->db_blkptr, ==,
5134 			    &dn->dn_phys->dn_blkptr[db->db_blkid]);
5135 		pio = dn->dn_zio;
5136 	}
5137 
5138 	ASSERT(db->db_level == 0 || data == db->db_buf);
5139 	ASSERT3U(BP_GET_LOGICAL_BIRTH(db->db_blkptr), <=, txg);
5140 	ASSERT(pio);
5141 
5142 	SET_BOOKMARK(&zb, os->os_dsl_dataset ?
5143 	    os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
5144 	    db->db.db_object, db->db_level, db->db_blkid);
5145 
5146 	if (db->db_blkid == DMU_SPILL_BLKID)
5147 		wp_flag = WP_SPILL;
5148 	wp_flag |= (data == NULL) ? WP_NOFILL : 0;
5149 
5150 	dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
5151 
5152 	/*
5153 	 * We copy the blkptr now (rather than when we instantiate the dirty
5154 	 * record), because its value can change between open context and
5155 	 * syncing context. We do not need to hold dn_struct_rwlock to read
5156 	 * db_blkptr because we are in syncing context.
5157 	 */
5158 	dr->dr_bp_copy = *db->db_blkptr;
5159 
5160 	if (db->db_level == 0 &&
5161 	    dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
5162 		/*
5163 		 * The BP for this block has been provided by open context
5164 		 * (by dmu_sync() or dmu_buf_write_embedded()).
5165 		 */
5166 		abd_t *contents = (data != NULL) ?
5167 		    abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
5168 
5169 		dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy,
5170 		    contents, db->db.db_size, db->db.db_size, &zp,
5171 		    dbuf_write_override_ready, NULL,
5172 		    dbuf_write_override_done,
5173 		    dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
5174 		mutex_enter(&db->db_mtx);
5175 		dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
5176 		zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
5177 		    dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite,
5178 		    dr->dt.dl.dr_brtwrite);
5179 		mutex_exit(&db->db_mtx);
5180 	} else if (data == NULL) {
5181 		ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
5182 		    zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
5183 		dr->dr_zio = zio_write(pio, os->os_spa, txg,
5184 		    &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
5185 		    dbuf_write_nofill_ready, NULL,
5186 		    dbuf_write_nofill_done, db,
5187 		    ZIO_PRIORITY_ASYNC_WRITE,
5188 		    ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
5189 	} else {
5190 		ASSERT(arc_released(data));
5191 
5192 		/*
5193 		 * For indirect blocks, we want to setup the children
5194 		 * ready callback so that we can properly handle an indirect
5195 		 * block that only contains holes.
5196 		 */
5197 		arc_write_done_func_t *children_ready_cb = NULL;
5198 		if (db->db_level != 0)
5199 			children_ready_cb = dbuf_write_children_ready;
5200 
5201 		dr->dr_zio = arc_write(pio, os->os_spa, txg,
5202 		    &dr->dr_bp_copy, data, !DBUF_IS_CACHEABLE(db),
5203 		    dbuf_is_l2cacheable(db), &zp, dbuf_write_ready,
5204 		    children_ready_cb, dbuf_write_done, db,
5205 		    ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
5206 	}
5207 }
5208 
5209 EXPORT_SYMBOL(dbuf_find);
5210 EXPORT_SYMBOL(dbuf_is_metadata);
5211 EXPORT_SYMBOL(dbuf_destroy);
5212 EXPORT_SYMBOL(dbuf_loan_arcbuf);
5213 EXPORT_SYMBOL(dbuf_whichblock);
5214 EXPORT_SYMBOL(dbuf_read);
5215 EXPORT_SYMBOL(dbuf_unoverride);
5216 EXPORT_SYMBOL(dbuf_free_range);
5217 EXPORT_SYMBOL(dbuf_new_size);
5218 EXPORT_SYMBOL(dbuf_release_bp);
5219 EXPORT_SYMBOL(dbuf_dirty);
5220 EXPORT_SYMBOL(dmu_buf_set_crypt_params);
5221 EXPORT_SYMBOL(dmu_buf_will_dirty);
5222 EXPORT_SYMBOL(dmu_buf_is_dirty);
5223 EXPORT_SYMBOL(dmu_buf_will_clone);
5224 EXPORT_SYMBOL(dmu_buf_will_not_fill);
5225 EXPORT_SYMBOL(dmu_buf_will_fill);
5226 EXPORT_SYMBOL(dmu_buf_fill_done);
5227 EXPORT_SYMBOL(dmu_buf_rele);
5228 EXPORT_SYMBOL(dbuf_assign_arcbuf);
5229 EXPORT_SYMBOL(dbuf_prefetch);
5230 EXPORT_SYMBOL(dbuf_hold_impl);
5231 EXPORT_SYMBOL(dbuf_hold);
5232 EXPORT_SYMBOL(dbuf_hold_level);
5233 EXPORT_SYMBOL(dbuf_create_bonus);
5234 EXPORT_SYMBOL(dbuf_spill_set_blksz);
5235 EXPORT_SYMBOL(dbuf_rm_spill);
5236 EXPORT_SYMBOL(dbuf_add_ref);
5237 EXPORT_SYMBOL(dbuf_rele);
5238 EXPORT_SYMBOL(dbuf_rele_and_unlock);
5239 EXPORT_SYMBOL(dbuf_refcount);
5240 EXPORT_SYMBOL(dbuf_sync_list);
5241 EXPORT_SYMBOL(dmu_buf_set_user);
5242 EXPORT_SYMBOL(dmu_buf_set_user_ie);
5243 EXPORT_SYMBOL(dmu_buf_get_user);
5244 EXPORT_SYMBOL(dmu_buf_get_blkptr);
5245 
5246 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, U64, ZMOD_RW,
5247 	"Maximum size in bytes of the dbuf cache.");
5248 
5249 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW,
5250 	"Percentage over dbuf_cache_max_bytes for direct dbuf eviction.");
5251 
5252 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW,
5253 	"Percentage below dbuf_cache_max_bytes when dbuf eviction stops.");
5254 
5255 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, U64, ZMOD_RW,
5256 	"Maximum size in bytes of dbuf metadata cache.");
5257 
5258 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, UINT, ZMOD_RW,
5259 	"Set size of dbuf cache to log2 fraction of arc size.");
5260 
5261 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, UINT, ZMOD_RW,
5262 	"Set size of dbuf metadata cache to log2 fraction of arc size.");
5263 
5264 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD,
5265 	"Set size of dbuf cache mutex array as log2 shift.");
5266