xref: /illumos-gate/usr/src/uts/common/fs/zfs/dbuf.c (revision b3700b07)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24  * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
25  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26  * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28  */
29 
30 #include <sys/zfs_context.h>
31 #include <sys/dmu.h>
32 #include <sys/dmu_send.h>
33 #include <sys/dmu_impl.h>
34 #include <sys/dbuf.h>
35 #include <sys/dmu_objset.h>
36 #include <sys/dsl_dataset.h>
37 #include <sys/dsl_dir.h>
38 #include <sys/dmu_tx.h>
39 #include <sys/spa.h>
40 #include <sys/zio.h>
41 #include <sys/dmu_zfetch.h>
42 #include <sys/sa.h>
43 #include <sys/sa_impl.h>
44 #include <sys/zfeature.h>
45 #include <sys/blkptr.h>
46 #include <sys/range_tree.h>
47 
48 /*
49  * Number of times that zfs_free_range() took the slow path while doing
50  * a zfs receive.  A nonzero value indicates a potential performance problem.
51  */
52 uint64_t zfs_free_range_recv_miss;
53 
54 static void dbuf_destroy(dmu_buf_impl_t *db);
55 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
56 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
57 
58 #ifndef __lint
59 extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu,
60     dmu_buf_evict_func_t *evict_func, dmu_buf_t **clear_on_evict_dbufp);
61 #endif /* ! __lint */
62 
63 /*
64  * Global data structures and functions for the dbuf cache.
65  */
66 static kmem_cache_t *dbuf_cache;
67 static taskq_t *dbu_evict_taskq;
68 
69 /* ARGSUSED */
70 static int
71 dbuf_cons(void *vdb, void *unused, int kmflag)
72 {
73 	dmu_buf_impl_t *db = vdb;
74 	bzero(db, sizeof (dmu_buf_impl_t));
75 
76 	mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
77 	cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
78 	refcount_create(&db->db_holds);
79 
80 	return (0);
81 }
82 
83 /* ARGSUSED */
84 static void
85 dbuf_dest(void *vdb, void *unused)
86 {
87 	dmu_buf_impl_t *db = vdb;
88 	mutex_destroy(&db->db_mtx);
89 	cv_destroy(&db->db_changed);
90 	refcount_destroy(&db->db_holds);
91 }
92 
93 /*
94  * dbuf hash table routines
95  */
96 static dbuf_hash_table_t dbuf_hash_table;
97 
98 static uint64_t dbuf_hash_count;
99 
100 static uint64_t
101 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
102 {
103 	uintptr_t osv = (uintptr_t)os;
104 	uint64_t crc = -1ULL;
105 
106 	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
107 	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF];
108 	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
109 	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
110 	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
111 	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF];
112 	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF];
113 
114 	crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16);
115 
116 	return (crc);
117 }
118 
119 #define	DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid);
120 
121 #define	DBUF_EQUAL(dbuf, os, obj, level, blkid)		\
122 	((dbuf)->db.db_object == (obj) &&		\
123 	(dbuf)->db_objset == (os) &&			\
124 	(dbuf)->db_level == (level) &&			\
125 	(dbuf)->db_blkid == (blkid))
126 
127 dmu_buf_impl_t *
128 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid)
129 {
130 	dbuf_hash_table_t *h = &dbuf_hash_table;
131 	uint64_t hv = DBUF_HASH(os, obj, level, blkid);
132 	uint64_t idx = hv & h->hash_table_mask;
133 	dmu_buf_impl_t *db;
134 
135 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
136 	for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
137 		if (DBUF_EQUAL(db, os, obj, level, blkid)) {
138 			mutex_enter(&db->db_mtx);
139 			if (db->db_state != DB_EVICTING) {
140 				mutex_exit(DBUF_HASH_MUTEX(h, idx));
141 				return (db);
142 			}
143 			mutex_exit(&db->db_mtx);
144 		}
145 	}
146 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
147 	return (NULL);
148 }
149 
150 static dmu_buf_impl_t *
151 dbuf_find_bonus(objset_t *os, uint64_t object)
152 {
153 	dnode_t *dn;
154 	dmu_buf_impl_t *db = NULL;
155 
156 	if (dnode_hold(os, object, FTAG, &dn) == 0) {
157 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
158 		if (dn->dn_bonus != NULL) {
159 			db = dn->dn_bonus;
160 			mutex_enter(&db->db_mtx);
161 		}
162 		rw_exit(&dn->dn_struct_rwlock);
163 		dnode_rele(dn, FTAG);
164 	}
165 	return (db);
166 }
167 
168 /*
169  * Insert an entry into the hash table.  If there is already an element
170  * equal to elem in the hash table, then the already existing element
171  * will be returned and the new element will not be inserted.
172  * Otherwise returns NULL.
173  */
174 static dmu_buf_impl_t *
175 dbuf_hash_insert(dmu_buf_impl_t *db)
176 {
177 	dbuf_hash_table_t *h = &dbuf_hash_table;
178 	objset_t *os = db->db_objset;
179 	uint64_t obj = db->db.db_object;
180 	int level = db->db_level;
181 	uint64_t blkid = db->db_blkid;
182 	uint64_t hv = DBUF_HASH(os, obj, level, blkid);
183 	uint64_t idx = hv & h->hash_table_mask;
184 	dmu_buf_impl_t *dbf;
185 
186 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
187 	for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
188 		if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
189 			mutex_enter(&dbf->db_mtx);
190 			if (dbf->db_state != DB_EVICTING) {
191 				mutex_exit(DBUF_HASH_MUTEX(h, idx));
192 				return (dbf);
193 			}
194 			mutex_exit(&dbf->db_mtx);
195 		}
196 	}
197 
198 	mutex_enter(&db->db_mtx);
199 	db->db_hash_next = h->hash_table[idx];
200 	h->hash_table[idx] = db;
201 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
202 	atomic_inc_64(&dbuf_hash_count);
203 
204 	return (NULL);
205 }
206 
207 /*
208  * Remove an entry from the hash table.  It must be in the EVICTING state.
209  */
210 static void
211 dbuf_hash_remove(dmu_buf_impl_t *db)
212 {
213 	dbuf_hash_table_t *h = &dbuf_hash_table;
214 	uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object,
215 	    db->db_level, db->db_blkid);
216 	uint64_t idx = hv & h->hash_table_mask;
217 	dmu_buf_impl_t *dbf, **dbp;
218 
219 	/*
220 	 * We musn't hold db_mtx to maintain lock ordering:
221 	 * DBUF_HASH_MUTEX > db_mtx.
222 	 */
223 	ASSERT(refcount_is_zero(&db->db_holds));
224 	ASSERT(db->db_state == DB_EVICTING);
225 	ASSERT(!MUTEX_HELD(&db->db_mtx));
226 
227 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
228 	dbp = &h->hash_table[idx];
229 	while ((dbf = *dbp) != db) {
230 		dbp = &dbf->db_hash_next;
231 		ASSERT(dbf != NULL);
232 	}
233 	*dbp = db->db_hash_next;
234 	db->db_hash_next = NULL;
235 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
236 	atomic_dec_64(&dbuf_hash_count);
237 }
238 
239 static arc_evict_func_t dbuf_do_evict;
240 
241 typedef enum {
242 	DBVU_EVICTING,
243 	DBVU_NOT_EVICTING
244 } dbvu_verify_type_t;
245 
246 static void
247 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
248 {
249 #ifdef ZFS_DEBUG
250 	int64_t holds;
251 
252 	if (db->db_user == NULL)
253 		return;
254 
255 	/* Only data blocks support the attachment of user data. */
256 	ASSERT(db->db_level == 0);
257 
258 	/* Clients must resolve a dbuf before attaching user data. */
259 	ASSERT(db->db.db_data != NULL);
260 	ASSERT3U(db->db_state, ==, DB_CACHED);
261 
262 	holds = refcount_count(&db->db_holds);
263 	if (verify_type == DBVU_EVICTING) {
264 		/*
265 		 * Immediate eviction occurs when holds == dirtycnt.
266 		 * For normal eviction buffers, holds is zero on
267 		 * eviction, except when dbuf_fix_old_data() calls
268 		 * dbuf_clear_data().  However, the hold count can grow
269 		 * during eviction even though db_mtx is held (see
270 		 * dmu_bonus_hold() for an example), so we can only
271 		 * test the generic invariant that holds >= dirtycnt.
272 		 */
273 		ASSERT3U(holds, >=, db->db_dirtycnt);
274 	} else {
275 		if (db->db_user_immediate_evict == TRUE)
276 			ASSERT3U(holds, >=, db->db_dirtycnt);
277 		else
278 			ASSERT3U(holds, >, 0);
279 	}
280 #endif
281 }
282 
283 static void
284 dbuf_evict_user(dmu_buf_impl_t *db)
285 {
286 	dmu_buf_user_t *dbu = db->db_user;
287 
288 	ASSERT(MUTEX_HELD(&db->db_mtx));
289 
290 	if (dbu == NULL)
291 		return;
292 
293 	dbuf_verify_user(db, DBVU_EVICTING);
294 	db->db_user = NULL;
295 
296 #ifdef ZFS_DEBUG
297 	if (dbu->dbu_clear_on_evict_dbufp != NULL)
298 		*dbu->dbu_clear_on_evict_dbufp = NULL;
299 #endif
300 
301 	/*
302 	 * Invoke the callback from a taskq to avoid lock order reversals
303 	 * and limit stack depth.
304 	 */
305 	taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func, dbu, 0,
306 	    &dbu->dbu_tqent);
307 }
308 
309 boolean_t
310 dbuf_is_metadata(dmu_buf_impl_t *db)
311 {
312 	if (db->db_level > 0) {
313 		return (B_TRUE);
314 	} else {
315 		boolean_t is_metadata;
316 
317 		DB_DNODE_ENTER(db);
318 		is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
319 		DB_DNODE_EXIT(db);
320 
321 		return (is_metadata);
322 	}
323 }
324 
325 void
326 dbuf_evict(dmu_buf_impl_t *db)
327 {
328 	ASSERT(MUTEX_HELD(&db->db_mtx));
329 	ASSERT(db->db_buf == NULL);
330 	ASSERT(db->db_data_pending == NULL);
331 
332 	dbuf_clear(db);
333 	dbuf_destroy(db);
334 }
335 
336 void
337 dbuf_init(void)
338 {
339 	uint64_t hsize = 1ULL << 16;
340 	dbuf_hash_table_t *h = &dbuf_hash_table;
341 	int i;
342 
343 	/*
344 	 * The hash table is big enough to fill all of physical memory
345 	 * with an average 4K block size.  The table will take up
346 	 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers).
347 	 */
348 	while (hsize * 4096 < physmem * PAGESIZE)
349 		hsize <<= 1;
350 
351 retry:
352 	h->hash_table_mask = hsize - 1;
353 	h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
354 	if (h->hash_table == NULL) {
355 		/* XXX - we should really return an error instead of assert */
356 		ASSERT(hsize > (1ULL << 10));
357 		hsize >>= 1;
358 		goto retry;
359 	}
360 
361 	dbuf_cache = kmem_cache_create("dmu_buf_impl_t",
362 	    sizeof (dmu_buf_impl_t),
363 	    0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
364 
365 	for (i = 0; i < DBUF_MUTEXES; i++)
366 		mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
367 
368 	/*
369 	 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
370 	 * configuration is not required.
371 	 */
372 	dbu_evict_taskq = taskq_create("dbu_evict", 1, minclsyspri, 0, 0, 0);
373 }
374 
375 void
376 dbuf_fini(void)
377 {
378 	dbuf_hash_table_t *h = &dbuf_hash_table;
379 	int i;
380 
381 	for (i = 0; i < DBUF_MUTEXES; i++)
382 		mutex_destroy(&h->hash_mutexes[i]);
383 	kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
384 	kmem_cache_destroy(dbuf_cache);
385 	taskq_destroy(dbu_evict_taskq);
386 }
387 
388 /*
389  * Other stuff.
390  */
391 
392 #ifdef ZFS_DEBUG
393 static void
394 dbuf_verify(dmu_buf_impl_t *db)
395 {
396 	dnode_t *dn;
397 	dbuf_dirty_record_t *dr;
398 
399 	ASSERT(MUTEX_HELD(&db->db_mtx));
400 
401 	if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
402 		return;
403 
404 	ASSERT(db->db_objset != NULL);
405 	DB_DNODE_ENTER(db);
406 	dn = DB_DNODE(db);
407 	if (dn == NULL) {
408 		ASSERT(db->db_parent == NULL);
409 		ASSERT(db->db_blkptr == NULL);
410 	} else {
411 		ASSERT3U(db->db.db_object, ==, dn->dn_object);
412 		ASSERT3P(db->db_objset, ==, dn->dn_objset);
413 		ASSERT3U(db->db_level, <, dn->dn_nlevels);
414 		ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
415 		    db->db_blkid == DMU_SPILL_BLKID ||
416 		    !avl_is_empty(&dn->dn_dbufs));
417 	}
418 	if (db->db_blkid == DMU_BONUS_BLKID) {
419 		ASSERT(dn != NULL);
420 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
421 		ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
422 	} else if (db->db_blkid == DMU_SPILL_BLKID) {
423 		ASSERT(dn != NULL);
424 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
425 		ASSERT0(db->db.db_offset);
426 	} else {
427 		ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
428 	}
429 
430 	for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
431 		ASSERT(dr->dr_dbuf == db);
432 
433 	for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
434 		ASSERT(dr->dr_dbuf == db);
435 
436 	/*
437 	 * We can't assert that db_size matches dn_datablksz because it
438 	 * can be momentarily different when another thread is doing
439 	 * dnode_set_blksz().
440 	 */
441 	if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
442 		dr = db->db_data_pending;
443 		/*
444 		 * It should only be modified in syncing context, so
445 		 * make sure we only have one copy of the data.
446 		 */
447 		ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
448 	}
449 
450 	/* verify db->db_blkptr */
451 	if (db->db_blkptr) {
452 		if (db->db_parent == dn->dn_dbuf) {
453 			/* db is pointed to by the dnode */
454 			/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
455 			if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
456 				ASSERT(db->db_parent == NULL);
457 			else
458 				ASSERT(db->db_parent != NULL);
459 			if (db->db_blkid != DMU_SPILL_BLKID)
460 				ASSERT3P(db->db_blkptr, ==,
461 				    &dn->dn_phys->dn_blkptr[db->db_blkid]);
462 		} else {
463 			/* db is pointed to by an indirect block */
464 			int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT;
465 			ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
466 			ASSERT3U(db->db_parent->db.db_object, ==,
467 			    db->db.db_object);
468 			/*
469 			 * dnode_grow_indblksz() can make this fail if we don't
470 			 * have the struct_rwlock.  XXX indblksz no longer
471 			 * grows.  safe to do this now?
472 			 */
473 			if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
474 				ASSERT3P(db->db_blkptr, ==,
475 				    ((blkptr_t *)db->db_parent->db.db_data +
476 				    db->db_blkid % epb));
477 			}
478 		}
479 	}
480 	if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
481 	    (db->db_buf == NULL || db->db_buf->b_data) &&
482 	    db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
483 	    db->db_state != DB_FILL && !dn->dn_free_txg) {
484 		/*
485 		 * If the blkptr isn't set but they have nonzero data,
486 		 * it had better be dirty, otherwise we'll lose that
487 		 * data when we evict this buffer.
488 		 */
489 		if (db->db_dirtycnt == 0) {
490 			uint64_t *buf = db->db.db_data;
491 			int i;
492 
493 			for (i = 0; i < db->db.db_size >> 3; i++) {
494 				ASSERT(buf[i] == 0);
495 			}
496 		}
497 	}
498 	DB_DNODE_EXIT(db);
499 }
500 #endif
501 
502 static void
503 dbuf_clear_data(dmu_buf_impl_t *db)
504 {
505 	ASSERT(MUTEX_HELD(&db->db_mtx));
506 	dbuf_evict_user(db);
507 	db->db_buf = NULL;
508 	db->db.db_data = NULL;
509 	if (db->db_state != DB_NOFILL)
510 		db->db_state = DB_UNCACHED;
511 }
512 
513 static void
514 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
515 {
516 	ASSERT(MUTEX_HELD(&db->db_mtx));
517 	ASSERT(buf != NULL);
518 
519 	db->db_buf = buf;
520 	ASSERT(buf->b_data != NULL);
521 	db->db.db_data = buf->b_data;
522 	if (!arc_released(buf))
523 		arc_set_callback(buf, dbuf_do_evict, db);
524 }
525 
526 /*
527  * Loan out an arc_buf for read.  Return the loaned arc_buf.
528  */
529 arc_buf_t *
530 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
531 {
532 	arc_buf_t *abuf;
533 
534 	mutex_enter(&db->db_mtx);
535 	if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
536 		int blksz = db->db.db_size;
537 		spa_t *spa = db->db_objset->os_spa;
538 
539 		mutex_exit(&db->db_mtx);
540 		abuf = arc_loan_buf(spa, blksz);
541 		bcopy(db->db.db_data, abuf->b_data, blksz);
542 	} else {
543 		abuf = db->db_buf;
544 		arc_loan_inuse_buf(abuf, db);
545 		dbuf_clear_data(db);
546 		mutex_exit(&db->db_mtx);
547 	}
548 	return (abuf);
549 }
550 
551 /*
552  * Calculate which level n block references the data at the level 0 offset
553  * provided.
554  */
555 uint64_t
556 dbuf_whichblock(dnode_t *dn, int64_t level, uint64_t offset)
557 {
558 	if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
559 		/*
560 		 * The level n blkid is equal to the level 0 blkid divided by
561 		 * the number of level 0s in a level n block.
562 		 *
563 		 * The level 0 blkid is offset >> datablkshift =
564 		 * offset / 2^datablkshift.
565 		 *
566 		 * The number of level 0s in a level n is the number of block
567 		 * pointers in an indirect block, raised to the power of level.
568 		 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
569 		 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
570 		 *
571 		 * Thus, the level n blkid is: offset /
572 		 * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT)))
573 		 * = offset / 2^(datablkshift + level *
574 		 *   (indblkshift - SPA_BLKPTRSHIFT))
575 		 * = offset >> (datablkshift + level *
576 		 *   (indblkshift - SPA_BLKPTRSHIFT))
577 		 */
578 		return (offset >> (dn->dn_datablkshift + level *
579 		    (dn->dn_indblkshift - SPA_BLKPTRSHIFT)));
580 	} else {
581 		ASSERT3U(offset, <, dn->dn_datablksz);
582 		return (0);
583 	}
584 }
585 
586 static void
587 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
588 {
589 	dmu_buf_impl_t *db = vdb;
590 
591 	mutex_enter(&db->db_mtx);
592 	ASSERT3U(db->db_state, ==, DB_READ);
593 	/*
594 	 * All reads are synchronous, so we must have a hold on the dbuf
595 	 */
596 	ASSERT(refcount_count(&db->db_holds) > 0);
597 	ASSERT(db->db_buf == NULL);
598 	ASSERT(db->db.db_data == NULL);
599 	if (db->db_level == 0 && db->db_freed_in_flight) {
600 		/* we were freed in flight; disregard any error */
601 		arc_release(buf, db);
602 		bzero(buf->b_data, db->db.db_size);
603 		arc_buf_freeze(buf);
604 		db->db_freed_in_flight = FALSE;
605 		dbuf_set_data(db, buf);
606 		db->db_state = DB_CACHED;
607 	} else if (zio == NULL || zio->io_error == 0) {
608 		dbuf_set_data(db, buf);
609 		db->db_state = DB_CACHED;
610 	} else {
611 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
612 		ASSERT3P(db->db_buf, ==, NULL);
613 		VERIFY(arc_buf_remove_ref(buf, db));
614 		db->db_state = DB_UNCACHED;
615 	}
616 	cv_broadcast(&db->db_changed);
617 	dbuf_rele_and_unlock(db, NULL);
618 }
619 
620 static void
621 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
622 {
623 	dnode_t *dn;
624 	zbookmark_phys_t zb;
625 	arc_flags_t aflags = ARC_FLAG_NOWAIT;
626 
627 	DB_DNODE_ENTER(db);
628 	dn = DB_DNODE(db);
629 	ASSERT(!refcount_is_zero(&db->db_holds));
630 	/* We need the struct_rwlock to prevent db_blkptr from changing. */
631 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
632 	ASSERT(MUTEX_HELD(&db->db_mtx));
633 	ASSERT(db->db_state == DB_UNCACHED);
634 	ASSERT(db->db_buf == NULL);
635 
636 	if (db->db_blkid == DMU_BONUS_BLKID) {
637 		int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
638 
639 		ASSERT3U(bonuslen, <=, db->db.db_size);
640 		db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN);
641 		arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
642 		if (bonuslen < DN_MAX_BONUSLEN)
643 			bzero(db->db.db_data, DN_MAX_BONUSLEN);
644 		if (bonuslen)
645 			bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
646 		DB_DNODE_EXIT(db);
647 		db->db_state = DB_CACHED;
648 		mutex_exit(&db->db_mtx);
649 		return;
650 	}
651 
652 	/*
653 	 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
654 	 * processes the delete record and clears the bp while we are waiting
655 	 * for the dn_mtx (resulting in a "no" from block_freed).
656 	 */
657 	if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
658 	    (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
659 	    BP_IS_HOLE(db->db_blkptr)))) {
660 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
661 
662 		DB_DNODE_EXIT(db);
663 		dbuf_set_data(db, arc_buf_alloc(db->db_objset->os_spa,
664 		    db->db.db_size, db, type));
665 		bzero(db->db.db_data, db->db.db_size);
666 		db->db_state = DB_CACHED;
667 		mutex_exit(&db->db_mtx);
668 		return;
669 	}
670 
671 	DB_DNODE_EXIT(db);
672 
673 	db->db_state = DB_READ;
674 	mutex_exit(&db->db_mtx);
675 
676 	if (DBUF_IS_L2CACHEABLE(db))
677 		aflags |= ARC_FLAG_L2CACHE;
678 	if (DBUF_IS_L2COMPRESSIBLE(db))
679 		aflags |= ARC_FLAG_L2COMPRESS;
680 
681 	SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ?
682 	    db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET,
683 	    db->db.db_object, db->db_level, db->db_blkid);
684 
685 	dbuf_add_ref(db, NULL);
686 
687 	(void) arc_read(zio, db->db_objset->os_spa, db->db_blkptr,
688 	    dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
689 	    (flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
690 	    &aflags, &zb);
691 }
692 
693 int
694 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
695 {
696 	int err = 0;
697 	boolean_t havepzio = (zio != NULL);
698 	boolean_t prefetch;
699 	dnode_t *dn;
700 
701 	/*
702 	 * We don't have to hold the mutex to check db_state because it
703 	 * can't be freed while we have a hold on the buffer.
704 	 */
705 	ASSERT(!refcount_is_zero(&db->db_holds));
706 
707 	if (db->db_state == DB_NOFILL)
708 		return (SET_ERROR(EIO));
709 
710 	DB_DNODE_ENTER(db);
711 	dn = DB_DNODE(db);
712 	if ((flags & DB_RF_HAVESTRUCT) == 0)
713 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
714 
715 	prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
716 	    (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
717 	    DBUF_IS_CACHEABLE(db);
718 
719 	mutex_enter(&db->db_mtx);
720 	if (db->db_state == DB_CACHED) {
721 		mutex_exit(&db->db_mtx);
722 		if (prefetch)
723 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1);
724 		if ((flags & DB_RF_HAVESTRUCT) == 0)
725 			rw_exit(&dn->dn_struct_rwlock);
726 		DB_DNODE_EXIT(db);
727 	} else if (db->db_state == DB_UNCACHED) {
728 		spa_t *spa = dn->dn_objset->os_spa;
729 
730 		if (zio == NULL)
731 			zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
732 		dbuf_read_impl(db, zio, flags);
733 
734 		/* dbuf_read_impl has dropped db_mtx for us */
735 
736 		if (prefetch)
737 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1);
738 
739 		if ((flags & DB_RF_HAVESTRUCT) == 0)
740 			rw_exit(&dn->dn_struct_rwlock);
741 		DB_DNODE_EXIT(db);
742 
743 		if (!havepzio)
744 			err = zio_wait(zio);
745 	} else {
746 		/*
747 		 * Another reader came in while the dbuf was in flight
748 		 * between UNCACHED and CACHED.  Either a writer will finish
749 		 * writing the buffer (sending the dbuf to CACHED) or the
750 		 * first reader's request will reach the read_done callback
751 		 * and send the dbuf to CACHED.  Otherwise, a failure
752 		 * occurred and the dbuf went to UNCACHED.
753 		 */
754 		mutex_exit(&db->db_mtx);
755 		if (prefetch)
756 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1);
757 		if ((flags & DB_RF_HAVESTRUCT) == 0)
758 			rw_exit(&dn->dn_struct_rwlock);
759 		DB_DNODE_EXIT(db);
760 
761 		/* Skip the wait per the caller's request. */
762 		mutex_enter(&db->db_mtx);
763 		if ((flags & DB_RF_NEVERWAIT) == 0) {
764 			while (db->db_state == DB_READ ||
765 			    db->db_state == DB_FILL) {
766 				ASSERT(db->db_state == DB_READ ||
767 				    (flags & DB_RF_HAVESTRUCT) == 0);
768 				DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
769 				    db, zio_t *, zio);
770 				cv_wait(&db->db_changed, &db->db_mtx);
771 			}
772 			if (db->db_state == DB_UNCACHED)
773 				err = SET_ERROR(EIO);
774 		}
775 		mutex_exit(&db->db_mtx);
776 	}
777 
778 	ASSERT(err || havepzio || db->db_state == DB_CACHED);
779 	return (err);
780 }
781 
782 static void
783 dbuf_noread(dmu_buf_impl_t *db)
784 {
785 	ASSERT(!refcount_is_zero(&db->db_holds));
786 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
787 	mutex_enter(&db->db_mtx);
788 	while (db->db_state == DB_READ || db->db_state == DB_FILL)
789 		cv_wait(&db->db_changed, &db->db_mtx);
790 	if (db->db_state == DB_UNCACHED) {
791 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
792 		spa_t *spa = db->db_objset->os_spa;
793 
794 		ASSERT(db->db_buf == NULL);
795 		ASSERT(db->db.db_data == NULL);
796 		dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type));
797 		db->db_state = DB_FILL;
798 	} else if (db->db_state == DB_NOFILL) {
799 		dbuf_clear_data(db);
800 	} else {
801 		ASSERT3U(db->db_state, ==, DB_CACHED);
802 	}
803 	mutex_exit(&db->db_mtx);
804 }
805 
806 /*
807  * This is our just-in-time copy function.  It makes a copy of
808  * buffers, that have been modified in a previous transaction
809  * group, before we modify them in the current active group.
810  *
811  * This function is used in two places: when we are dirtying a
812  * buffer for the first time in a txg, and when we are freeing
813  * a range in a dnode that includes this buffer.
814  *
815  * Note that when we are called from dbuf_free_range() we do
816  * not put a hold on the buffer, we just traverse the active
817  * dbuf list for the dnode.
818  */
819 static void
820 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
821 {
822 	dbuf_dirty_record_t *dr = db->db_last_dirty;
823 
824 	ASSERT(MUTEX_HELD(&db->db_mtx));
825 	ASSERT(db->db.db_data != NULL);
826 	ASSERT(db->db_level == 0);
827 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
828 
829 	if (dr == NULL ||
830 	    (dr->dt.dl.dr_data !=
831 	    ((db->db_blkid  == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
832 		return;
833 
834 	/*
835 	 * If the last dirty record for this dbuf has not yet synced
836 	 * and its referencing the dbuf data, either:
837 	 *	reset the reference to point to a new copy,
838 	 * or (if there a no active holders)
839 	 *	just null out the current db_data pointer.
840 	 */
841 	ASSERT(dr->dr_txg >= txg - 2);
842 	if (db->db_blkid == DMU_BONUS_BLKID) {
843 		/* Note that the data bufs here are zio_bufs */
844 		dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN);
845 		arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
846 		bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN);
847 	} else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
848 		int size = db->db.db_size;
849 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
850 		spa_t *spa = db->db_objset->os_spa;
851 
852 		dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type);
853 		bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
854 	} else {
855 		dbuf_clear_data(db);
856 	}
857 }
858 
859 void
860 dbuf_unoverride(dbuf_dirty_record_t *dr)
861 {
862 	dmu_buf_impl_t *db = dr->dr_dbuf;
863 	blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
864 	uint64_t txg = dr->dr_txg;
865 
866 	ASSERT(MUTEX_HELD(&db->db_mtx));
867 	ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
868 	ASSERT(db->db_level == 0);
869 
870 	if (db->db_blkid == DMU_BONUS_BLKID ||
871 	    dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
872 		return;
873 
874 	ASSERT(db->db_data_pending != dr);
875 
876 	/* free this block */
877 	if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
878 		zio_free(db->db_objset->os_spa, txg, bp);
879 
880 	dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
881 	dr->dt.dl.dr_nopwrite = B_FALSE;
882 
883 	/*
884 	 * Release the already-written buffer, so we leave it in
885 	 * a consistent dirty state.  Note that all callers are
886 	 * modifying the buffer, so they will immediately do
887 	 * another (redundant) arc_release().  Therefore, leave
888 	 * the buf thawed to save the effort of freezing &
889 	 * immediately re-thawing it.
890 	 */
891 	arc_release(dr->dt.dl.dr_data, db);
892 }
893 
894 /*
895  * Evict (if its unreferenced) or clear (if its referenced) any level-0
896  * data blocks in the free range, so that any future readers will find
897  * empty blocks.
898  *
899  * This is a no-op if the dataset is in the middle of an incremental
900  * receive; see comment below for details.
901  */
902 void
903 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
904     dmu_tx_t *tx)
905 {
906 	dmu_buf_impl_t db_search;
907 	dmu_buf_impl_t *db, *db_next;
908 	uint64_t txg = tx->tx_txg;
909 	avl_index_t where;
910 
911 	if (end_blkid > dn->dn_maxblkid && (end_blkid != DMU_SPILL_BLKID))
912 		end_blkid = dn->dn_maxblkid;
913 	dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid);
914 
915 	db_search.db_level = 0;
916 	db_search.db_blkid = start_blkid;
917 	db_search.db_state = DB_SEARCH;
918 
919 	mutex_enter(&dn->dn_dbufs_mtx);
920 	if (start_blkid >= dn->dn_unlisted_l0_blkid) {
921 		/* There can't be any dbufs in this range; no need to search. */
922 #ifdef DEBUG
923 		db = avl_find(&dn->dn_dbufs, &db_search, &where);
924 		ASSERT3P(db, ==, NULL);
925 		db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
926 		ASSERT(db == NULL || db->db_level > 0);
927 #endif
928 		mutex_exit(&dn->dn_dbufs_mtx);
929 		return;
930 	} else if (dmu_objset_is_receiving(dn->dn_objset)) {
931 		/*
932 		 * If we are receiving, we expect there to be no dbufs in
933 		 * the range to be freed, because receive modifies each
934 		 * block at most once, and in offset order.  If this is
935 		 * not the case, it can lead to performance problems,
936 		 * so note that we unexpectedly took the slow path.
937 		 */
938 		atomic_inc_64(&zfs_free_range_recv_miss);
939 	}
940 
941 	db = avl_find(&dn->dn_dbufs, &db_search, &where);
942 	ASSERT3P(db, ==, NULL);
943 	db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
944 
945 	for (; db != NULL; db = db_next) {
946 		db_next = AVL_NEXT(&dn->dn_dbufs, db);
947 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
948 
949 		if (db->db_level != 0 || db->db_blkid > end_blkid) {
950 			break;
951 		}
952 		ASSERT3U(db->db_blkid, >=, start_blkid);
953 
954 		/* found a level 0 buffer in the range */
955 		mutex_enter(&db->db_mtx);
956 		if (dbuf_undirty(db, tx)) {
957 			/* mutex has been dropped and dbuf destroyed */
958 			continue;
959 		}
960 
961 		if (db->db_state == DB_UNCACHED ||
962 		    db->db_state == DB_NOFILL ||
963 		    db->db_state == DB_EVICTING) {
964 			ASSERT(db->db.db_data == NULL);
965 			mutex_exit(&db->db_mtx);
966 			continue;
967 		}
968 		if (db->db_state == DB_READ || db->db_state == DB_FILL) {
969 			/* will be handled in dbuf_read_done or dbuf_rele */
970 			db->db_freed_in_flight = TRUE;
971 			mutex_exit(&db->db_mtx);
972 			continue;
973 		}
974 		if (refcount_count(&db->db_holds) == 0) {
975 			ASSERT(db->db_buf);
976 			dbuf_clear(db);
977 			continue;
978 		}
979 		/* The dbuf is referenced */
980 
981 		if (db->db_last_dirty != NULL) {
982 			dbuf_dirty_record_t *dr = db->db_last_dirty;
983 
984 			if (dr->dr_txg == txg) {
985 				/*
986 				 * This buffer is "in-use", re-adjust the file
987 				 * size to reflect that this buffer may
988 				 * contain new data when we sync.
989 				 */
990 				if (db->db_blkid != DMU_SPILL_BLKID &&
991 				    db->db_blkid > dn->dn_maxblkid)
992 					dn->dn_maxblkid = db->db_blkid;
993 				dbuf_unoverride(dr);
994 			} else {
995 				/*
996 				 * This dbuf is not dirty in the open context.
997 				 * Either uncache it (if its not referenced in
998 				 * the open context) or reset its contents to
999 				 * empty.
1000 				 */
1001 				dbuf_fix_old_data(db, txg);
1002 			}
1003 		}
1004 		/* clear the contents if its cached */
1005 		if (db->db_state == DB_CACHED) {
1006 			ASSERT(db->db.db_data != NULL);
1007 			arc_release(db->db_buf, db);
1008 			bzero(db->db.db_data, db->db.db_size);
1009 			arc_buf_freeze(db->db_buf);
1010 		}
1011 
1012 		mutex_exit(&db->db_mtx);
1013 	}
1014 	mutex_exit(&dn->dn_dbufs_mtx);
1015 }
1016 
1017 static int
1018 dbuf_block_freeable(dmu_buf_impl_t *db)
1019 {
1020 	dsl_dataset_t *ds = db->db_objset->os_dsl_dataset;
1021 	uint64_t birth_txg = 0;
1022 
1023 	/*
1024 	 * We don't need any locking to protect db_blkptr:
1025 	 * If it's syncing, then db_last_dirty will be set
1026 	 * so we'll ignore db_blkptr.
1027 	 *
1028 	 * This logic ensures that only block births for
1029 	 * filled blocks are considered.
1030 	 */
1031 	ASSERT(MUTEX_HELD(&db->db_mtx));
1032 	if (db->db_last_dirty && (db->db_blkptr == NULL ||
1033 	    !BP_IS_HOLE(db->db_blkptr))) {
1034 		birth_txg = db->db_last_dirty->dr_txg;
1035 	} else if (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) {
1036 		birth_txg = db->db_blkptr->blk_birth;
1037 	}
1038 
1039 	/*
1040 	 * If this block don't exist or is in a snapshot, it can't be freed.
1041 	 * Don't pass the bp to dsl_dataset_block_freeable() since we
1042 	 * are holding the db_mtx lock and might deadlock if we are
1043 	 * prefetching a dedup-ed block.
1044 	 */
1045 	if (birth_txg != 0)
1046 		return (ds == NULL ||
1047 		    dsl_dataset_block_freeable(ds, NULL, birth_txg));
1048 	else
1049 		return (B_FALSE);
1050 }
1051 
1052 void
1053 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
1054 {
1055 	arc_buf_t *buf, *obuf;
1056 	int osize = db->db.db_size;
1057 	arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1058 	dnode_t *dn;
1059 
1060 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1061 
1062 	DB_DNODE_ENTER(db);
1063 	dn = DB_DNODE(db);
1064 
1065 	/* XXX does *this* func really need the lock? */
1066 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
1067 
1068 	/*
1069 	 * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held
1070 	 * is OK, because there can be no other references to the db
1071 	 * when we are changing its size, so no concurrent DB_FILL can
1072 	 * be happening.
1073 	 */
1074 	/*
1075 	 * XXX we should be doing a dbuf_read, checking the return
1076 	 * value and returning that up to our callers
1077 	 */
1078 	dmu_buf_will_dirty(&db->db, tx);
1079 
1080 	/* create the data buffer for the new block */
1081 	buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type);
1082 
1083 	/* copy old block data to the new block */
1084 	obuf = db->db_buf;
1085 	bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
1086 	/* zero the remainder */
1087 	if (size > osize)
1088 		bzero((uint8_t *)buf->b_data + osize, size - osize);
1089 
1090 	mutex_enter(&db->db_mtx);
1091 	dbuf_set_data(db, buf);
1092 	VERIFY(arc_buf_remove_ref(obuf, db));
1093 	db->db.db_size = size;
1094 
1095 	if (db->db_level == 0) {
1096 		ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1097 		db->db_last_dirty->dt.dl.dr_data = buf;
1098 	}
1099 	mutex_exit(&db->db_mtx);
1100 
1101 	dnode_willuse_space(dn, size-osize, tx);
1102 	DB_DNODE_EXIT(db);
1103 }
1104 
1105 void
1106 dbuf_release_bp(dmu_buf_impl_t *db)
1107 {
1108 	objset_t *os = db->db_objset;
1109 
1110 	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
1111 	ASSERT(arc_released(os->os_phys_buf) ||
1112 	    list_link_active(&os->os_dsl_dataset->ds_synced_link));
1113 	ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
1114 
1115 	(void) arc_release(db->db_buf, db);
1116 }
1117 
1118 /*
1119  * We already have a dirty record for this TXG, and we are being
1120  * dirtied again.
1121  */
1122 static void
1123 dbuf_redirty(dbuf_dirty_record_t *dr)
1124 {
1125 	dmu_buf_impl_t *db = dr->dr_dbuf;
1126 
1127 	ASSERT(MUTEX_HELD(&db->db_mtx));
1128 
1129 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
1130 		/*
1131 		 * If this buffer has already been written out,
1132 		 * we now need to reset its state.
1133 		 */
1134 		dbuf_unoverride(dr);
1135 		if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1136 		    db->db_state != DB_NOFILL) {
1137 			/* Already released on initial dirty, so just thaw. */
1138 			ASSERT(arc_released(db->db_buf));
1139 			arc_buf_thaw(db->db_buf);
1140 		}
1141 	}
1142 }
1143 
1144 dbuf_dirty_record_t *
1145 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1146 {
1147 	dnode_t *dn;
1148 	objset_t *os;
1149 	dbuf_dirty_record_t **drp, *dr;
1150 	int drop_struct_lock = FALSE;
1151 	boolean_t do_free_accounting = B_FALSE;
1152 	int txgoff = tx->tx_txg & TXG_MASK;
1153 
1154 	ASSERT(tx->tx_txg != 0);
1155 	ASSERT(!refcount_is_zero(&db->db_holds));
1156 	DMU_TX_DIRTY_BUF(tx, db);
1157 
1158 	DB_DNODE_ENTER(db);
1159 	dn = DB_DNODE(db);
1160 	/*
1161 	 * Shouldn't dirty a regular buffer in syncing context.  Private
1162 	 * objects may be dirtied in syncing context, but only if they
1163 	 * were already pre-dirtied in open context.
1164 	 */
1165 	ASSERT(!dmu_tx_is_syncing(tx) ||
1166 	    BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
1167 	    DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1168 	    dn->dn_objset->os_dsl_dataset == NULL);
1169 	/*
1170 	 * We make this assert for private objects as well, but after we
1171 	 * check if we're already dirty.  They are allowed to re-dirty
1172 	 * in syncing context.
1173 	 */
1174 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1175 	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1176 	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1177 
1178 	mutex_enter(&db->db_mtx);
1179 	/*
1180 	 * XXX make this true for indirects too?  The problem is that
1181 	 * transactions created with dmu_tx_create_assigned() from
1182 	 * syncing context don't bother holding ahead.
1183 	 */
1184 	ASSERT(db->db_level != 0 ||
1185 	    db->db_state == DB_CACHED || db->db_state == DB_FILL ||
1186 	    db->db_state == DB_NOFILL);
1187 
1188 	mutex_enter(&dn->dn_mtx);
1189 	/*
1190 	 * Don't set dirtyctx to SYNC if we're just modifying this as we
1191 	 * initialize the objset.
1192 	 */
1193 	if (dn->dn_dirtyctx == DN_UNDIRTIED &&
1194 	    !BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
1195 		dn->dn_dirtyctx =
1196 		    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
1197 		ASSERT(dn->dn_dirtyctx_firstset == NULL);
1198 		dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
1199 	}
1200 	mutex_exit(&dn->dn_mtx);
1201 
1202 	if (db->db_blkid == DMU_SPILL_BLKID)
1203 		dn->dn_have_spill = B_TRUE;
1204 
1205 	/*
1206 	 * If this buffer is already dirty, we're done.
1207 	 */
1208 	drp = &db->db_last_dirty;
1209 	ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
1210 	    db->db.db_object == DMU_META_DNODE_OBJECT);
1211 	while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
1212 		drp = &dr->dr_next;
1213 	if (dr && dr->dr_txg == tx->tx_txg) {
1214 		DB_DNODE_EXIT(db);
1215 
1216 		dbuf_redirty(dr);
1217 		mutex_exit(&db->db_mtx);
1218 		return (dr);
1219 	}
1220 
1221 	/*
1222 	 * Only valid if not already dirty.
1223 	 */
1224 	ASSERT(dn->dn_object == 0 ||
1225 	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1226 	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1227 
1228 	ASSERT3U(dn->dn_nlevels, >, db->db_level);
1229 	ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
1230 	    dn->dn_phys->dn_nlevels > db->db_level ||
1231 	    dn->dn_next_nlevels[txgoff] > db->db_level ||
1232 	    dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
1233 	    dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
1234 
1235 	/*
1236 	 * We should only be dirtying in syncing context if it's the
1237 	 * mos or we're initializing the os or it's a special object.
1238 	 * However, we are allowed to dirty in syncing context provided
1239 	 * we already dirtied it in open context.  Hence we must make
1240 	 * this assertion only if we're not already dirty.
1241 	 */
1242 	os = dn->dn_objset;
1243 	ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1244 	    os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
1245 	ASSERT(db->db.db_size != 0);
1246 
1247 	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1248 
1249 	if (db->db_blkid != DMU_BONUS_BLKID) {
1250 		/*
1251 		 * Update the accounting.
1252 		 * Note: we delay "free accounting" until after we drop
1253 		 * the db_mtx.  This keeps us from grabbing other locks
1254 		 * (and possibly deadlocking) in bp_get_dsize() while
1255 		 * also holding the db_mtx.
1256 		 */
1257 		dnode_willuse_space(dn, db->db.db_size, tx);
1258 		do_free_accounting = dbuf_block_freeable(db);
1259 	}
1260 
1261 	/*
1262 	 * If this buffer is dirty in an old transaction group we need
1263 	 * to make a copy of it so that the changes we make in this
1264 	 * transaction group won't leak out when we sync the older txg.
1265 	 */
1266 	dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
1267 	if (db->db_level == 0) {
1268 		void *data_old = db->db_buf;
1269 
1270 		if (db->db_state != DB_NOFILL) {
1271 			if (db->db_blkid == DMU_BONUS_BLKID) {
1272 				dbuf_fix_old_data(db, tx->tx_txg);
1273 				data_old = db->db.db_data;
1274 			} else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
1275 				/*
1276 				 * Release the data buffer from the cache so
1277 				 * that we can modify it without impacting
1278 				 * possible other users of this cached data
1279 				 * block.  Note that indirect blocks and
1280 				 * private objects are not released until the
1281 				 * syncing state (since they are only modified
1282 				 * then).
1283 				 */
1284 				arc_release(db->db_buf, db);
1285 				dbuf_fix_old_data(db, tx->tx_txg);
1286 				data_old = db->db_buf;
1287 			}
1288 			ASSERT(data_old != NULL);
1289 		}
1290 		dr->dt.dl.dr_data = data_old;
1291 	} else {
1292 		mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL);
1293 		list_create(&dr->dt.di.dr_children,
1294 		    sizeof (dbuf_dirty_record_t),
1295 		    offsetof(dbuf_dirty_record_t, dr_dirty_node));
1296 	}
1297 	if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL)
1298 		dr->dr_accounted = db->db.db_size;
1299 	dr->dr_dbuf = db;
1300 	dr->dr_txg = tx->tx_txg;
1301 	dr->dr_next = *drp;
1302 	*drp = dr;
1303 
1304 	/*
1305 	 * We could have been freed_in_flight between the dbuf_noread
1306 	 * and dbuf_dirty.  We win, as though the dbuf_noread() had
1307 	 * happened after the free.
1308 	 */
1309 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1310 	    db->db_blkid != DMU_SPILL_BLKID) {
1311 		mutex_enter(&dn->dn_mtx);
1312 		if (dn->dn_free_ranges[txgoff] != NULL) {
1313 			range_tree_clear(dn->dn_free_ranges[txgoff],
1314 			    db->db_blkid, 1);
1315 		}
1316 		mutex_exit(&dn->dn_mtx);
1317 		db->db_freed_in_flight = FALSE;
1318 	}
1319 
1320 	/*
1321 	 * This buffer is now part of this txg
1322 	 */
1323 	dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
1324 	db->db_dirtycnt += 1;
1325 	ASSERT3U(db->db_dirtycnt, <=, 3);
1326 
1327 	mutex_exit(&db->db_mtx);
1328 
1329 	if (db->db_blkid == DMU_BONUS_BLKID ||
1330 	    db->db_blkid == DMU_SPILL_BLKID) {
1331 		mutex_enter(&dn->dn_mtx);
1332 		ASSERT(!list_link_active(&dr->dr_dirty_node));
1333 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1334 		mutex_exit(&dn->dn_mtx);
1335 		dnode_setdirty(dn, tx);
1336 		DB_DNODE_EXIT(db);
1337 		return (dr);
1338 	} else if (do_free_accounting) {
1339 		blkptr_t *bp = db->db_blkptr;
1340 		int64_t willfree = (bp && !BP_IS_HOLE(bp)) ?
1341 		    bp_get_dsize(os->os_spa, bp) : db->db.db_size;
1342 		/*
1343 		 * This is only a guess -- if the dbuf is dirty
1344 		 * in a previous txg, we don't know how much
1345 		 * space it will use on disk yet.  We should
1346 		 * really have the struct_rwlock to access
1347 		 * db_blkptr, but since this is just a guess,
1348 		 * it's OK if we get an odd answer.
1349 		 */
1350 		ddt_prefetch(os->os_spa, bp);
1351 		dnode_willuse_space(dn, -willfree, tx);
1352 	}
1353 
1354 	if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
1355 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
1356 		drop_struct_lock = TRUE;
1357 	}
1358 
1359 	if (db->db_level == 0) {
1360 		dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock);
1361 		ASSERT(dn->dn_maxblkid >= db->db_blkid);
1362 	}
1363 
1364 	if (db->db_level+1 < dn->dn_nlevels) {
1365 		dmu_buf_impl_t *parent = db->db_parent;
1366 		dbuf_dirty_record_t *di;
1367 		int parent_held = FALSE;
1368 
1369 		if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
1370 			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1371 
1372 			parent = dbuf_hold_level(dn, db->db_level+1,
1373 			    db->db_blkid >> epbs, FTAG);
1374 			ASSERT(parent != NULL);
1375 			parent_held = TRUE;
1376 		}
1377 		if (drop_struct_lock)
1378 			rw_exit(&dn->dn_struct_rwlock);
1379 		ASSERT3U(db->db_level+1, ==, parent->db_level);
1380 		di = dbuf_dirty(parent, tx);
1381 		if (parent_held)
1382 			dbuf_rele(parent, FTAG);
1383 
1384 		mutex_enter(&db->db_mtx);
1385 		/*
1386 		 * Since we've dropped the mutex, it's possible that
1387 		 * dbuf_undirty() might have changed this out from under us.
1388 		 */
1389 		if (db->db_last_dirty == dr ||
1390 		    dn->dn_object == DMU_META_DNODE_OBJECT) {
1391 			mutex_enter(&di->dt.di.dr_mtx);
1392 			ASSERT3U(di->dr_txg, ==, tx->tx_txg);
1393 			ASSERT(!list_link_active(&dr->dr_dirty_node));
1394 			list_insert_tail(&di->dt.di.dr_children, dr);
1395 			mutex_exit(&di->dt.di.dr_mtx);
1396 			dr->dr_parent = di;
1397 		}
1398 		mutex_exit(&db->db_mtx);
1399 	} else {
1400 		ASSERT(db->db_level+1 == dn->dn_nlevels);
1401 		ASSERT(db->db_blkid < dn->dn_nblkptr);
1402 		ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
1403 		mutex_enter(&dn->dn_mtx);
1404 		ASSERT(!list_link_active(&dr->dr_dirty_node));
1405 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1406 		mutex_exit(&dn->dn_mtx);
1407 		if (drop_struct_lock)
1408 			rw_exit(&dn->dn_struct_rwlock);
1409 	}
1410 
1411 	dnode_setdirty(dn, tx);
1412 	DB_DNODE_EXIT(db);
1413 	return (dr);
1414 }
1415 
1416 /*
1417  * Undirty a buffer in the transaction group referenced by the given
1418  * transaction.  Return whether this evicted the dbuf.
1419  */
1420 static boolean_t
1421 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1422 {
1423 	dnode_t *dn;
1424 	uint64_t txg = tx->tx_txg;
1425 	dbuf_dirty_record_t *dr, **drp;
1426 
1427 	ASSERT(txg != 0);
1428 
1429 	/*
1430 	 * Due to our use of dn_nlevels below, this can only be called
1431 	 * in open context, unless we are operating on the MOS.
1432 	 * From syncing context, dn_nlevels may be different from the
1433 	 * dn_nlevels used when dbuf was dirtied.
1434 	 */
1435 	ASSERT(db->db_objset ==
1436 	    dmu_objset_pool(db->db_objset)->dp_meta_objset ||
1437 	    txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
1438 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1439 	ASSERT0(db->db_level);
1440 	ASSERT(MUTEX_HELD(&db->db_mtx));
1441 
1442 	/*
1443 	 * If this buffer is not dirty, we're done.
1444 	 */
1445 	for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1446 		if (dr->dr_txg <= txg)
1447 			break;
1448 	if (dr == NULL || dr->dr_txg < txg)
1449 		return (B_FALSE);
1450 	ASSERT(dr->dr_txg == txg);
1451 	ASSERT(dr->dr_dbuf == db);
1452 
1453 	DB_DNODE_ENTER(db);
1454 	dn = DB_DNODE(db);
1455 
1456 	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1457 
1458 	ASSERT(db->db.db_size != 0);
1459 
1460 	dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
1461 	    dr->dr_accounted, txg);
1462 
1463 	*drp = dr->dr_next;
1464 
1465 	/*
1466 	 * Note that there are three places in dbuf_dirty()
1467 	 * where this dirty record may be put on a list.
1468 	 * Make sure to do a list_remove corresponding to
1469 	 * every one of those list_insert calls.
1470 	 */
1471 	if (dr->dr_parent) {
1472 		mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
1473 		list_remove(&dr->dr_parent->dt.di.dr_children, dr);
1474 		mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
1475 	} else if (db->db_blkid == DMU_SPILL_BLKID ||
1476 	    db->db_level + 1 == dn->dn_nlevels) {
1477 		ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
1478 		mutex_enter(&dn->dn_mtx);
1479 		list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
1480 		mutex_exit(&dn->dn_mtx);
1481 	}
1482 	DB_DNODE_EXIT(db);
1483 
1484 	if (db->db_state != DB_NOFILL) {
1485 		dbuf_unoverride(dr);
1486 
1487 		ASSERT(db->db_buf != NULL);
1488 		ASSERT(dr->dt.dl.dr_data != NULL);
1489 		if (dr->dt.dl.dr_data != db->db_buf)
1490 			VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, db));
1491 	}
1492 
1493 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
1494 
1495 	ASSERT(db->db_dirtycnt > 0);
1496 	db->db_dirtycnt -= 1;
1497 
1498 	if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
1499 		arc_buf_t *buf = db->db_buf;
1500 
1501 		ASSERT(db->db_state == DB_NOFILL || arc_released(buf));
1502 		dbuf_clear_data(db);
1503 		VERIFY(arc_buf_remove_ref(buf, db));
1504 		dbuf_evict(db);
1505 		return (B_TRUE);
1506 	}
1507 
1508 	return (B_FALSE);
1509 }
1510 
1511 void
1512 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
1513 {
1514 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1515 	int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH;
1516 
1517 	ASSERT(tx->tx_txg != 0);
1518 	ASSERT(!refcount_is_zero(&db->db_holds));
1519 
1520 	/*
1521 	 * Quick check for dirtyness.  For already dirty blocks, this
1522 	 * reduces runtime of this function by >90%, and overall performance
1523 	 * by 50% for some workloads (e.g. file deletion with indirect blocks
1524 	 * cached).
1525 	 */
1526 	mutex_enter(&db->db_mtx);
1527 	dbuf_dirty_record_t *dr;
1528 	for (dr = db->db_last_dirty;
1529 	    dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) {
1530 		/*
1531 		 * It's possible that it is already dirty but not cached,
1532 		 * because there are some calls to dbuf_dirty() that don't
1533 		 * go through dmu_buf_will_dirty().
1534 		 */
1535 		if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) {
1536 			/* This dbuf is already dirty and cached. */
1537 			dbuf_redirty(dr);
1538 			mutex_exit(&db->db_mtx);
1539 			return;
1540 		}
1541 	}
1542 	mutex_exit(&db->db_mtx);
1543 
1544 	DB_DNODE_ENTER(db);
1545 	if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
1546 		rf |= DB_RF_HAVESTRUCT;
1547 	DB_DNODE_EXIT(db);
1548 	(void) dbuf_read(db, NULL, rf);
1549 	(void) dbuf_dirty(db, tx);
1550 }
1551 
1552 void
1553 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1554 {
1555 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1556 
1557 	db->db_state = DB_NOFILL;
1558 
1559 	dmu_buf_will_fill(db_fake, tx);
1560 }
1561 
1562 void
1563 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1564 {
1565 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1566 
1567 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1568 	ASSERT(tx->tx_txg != 0);
1569 	ASSERT(db->db_level == 0);
1570 	ASSERT(!refcount_is_zero(&db->db_holds));
1571 
1572 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
1573 	    dmu_tx_private_ok(tx));
1574 
1575 	dbuf_noread(db);
1576 	(void) dbuf_dirty(db, tx);
1577 }
1578 
1579 #pragma weak dmu_buf_fill_done = dbuf_fill_done
1580 /* ARGSUSED */
1581 void
1582 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
1583 {
1584 	mutex_enter(&db->db_mtx);
1585 	DBUF_VERIFY(db);
1586 
1587 	if (db->db_state == DB_FILL) {
1588 		if (db->db_level == 0 && db->db_freed_in_flight) {
1589 			ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1590 			/* we were freed while filling */
1591 			/* XXX dbuf_undirty? */
1592 			bzero(db->db.db_data, db->db.db_size);
1593 			db->db_freed_in_flight = FALSE;
1594 		}
1595 		db->db_state = DB_CACHED;
1596 		cv_broadcast(&db->db_changed);
1597 	}
1598 	mutex_exit(&db->db_mtx);
1599 }
1600 
1601 void
1602 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
1603     bp_embedded_type_t etype, enum zio_compress comp,
1604     int uncompressed_size, int compressed_size, int byteorder,
1605     dmu_tx_t *tx)
1606 {
1607 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
1608 	struct dirty_leaf *dl;
1609 	dmu_object_type_t type;
1610 
1611 	if (etype == BP_EMBEDDED_TYPE_DATA) {
1612 		ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
1613 		    SPA_FEATURE_EMBEDDED_DATA));
1614 	}
1615 
1616 	DB_DNODE_ENTER(db);
1617 	type = DB_DNODE(db)->dn_type;
1618 	DB_DNODE_EXIT(db);
1619 
1620 	ASSERT0(db->db_level);
1621 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1622 
1623 	dmu_buf_will_not_fill(dbuf, tx);
1624 
1625 	ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1626 	dl = &db->db_last_dirty->dt.dl;
1627 	encode_embedded_bp_compressed(&dl->dr_overridden_by,
1628 	    data, comp, uncompressed_size, compressed_size);
1629 	BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
1630 	BP_SET_TYPE(&dl->dr_overridden_by, type);
1631 	BP_SET_LEVEL(&dl->dr_overridden_by, 0);
1632 	BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
1633 
1634 	dl->dr_override_state = DR_OVERRIDDEN;
1635 	dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg;
1636 }
1637 
1638 /*
1639  * Directly assign a provided arc buf to a given dbuf if it's not referenced
1640  * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
1641  */
1642 void
1643 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
1644 {
1645 	ASSERT(!refcount_is_zero(&db->db_holds));
1646 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1647 	ASSERT(db->db_level == 0);
1648 	ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA);
1649 	ASSERT(buf != NULL);
1650 	ASSERT(arc_buf_size(buf) == db->db.db_size);
1651 	ASSERT(tx->tx_txg != 0);
1652 
1653 	arc_return_buf(buf, db);
1654 	ASSERT(arc_released(buf));
1655 
1656 	mutex_enter(&db->db_mtx);
1657 
1658 	while (db->db_state == DB_READ || db->db_state == DB_FILL)
1659 		cv_wait(&db->db_changed, &db->db_mtx);
1660 
1661 	ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
1662 
1663 	if (db->db_state == DB_CACHED &&
1664 	    refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
1665 		mutex_exit(&db->db_mtx);
1666 		(void) dbuf_dirty(db, tx);
1667 		bcopy(buf->b_data, db->db.db_data, db->db.db_size);
1668 		VERIFY(arc_buf_remove_ref(buf, db));
1669 		xuio_stat_wbuf_copied();
1670 		return;
1671 	}
1672 
1673 	xuio_stat_wbuf_nocopy();
1674 	if (db->db_state == DB_CACHED) {
1675 		dbuf_dirty_record_t *dr = db->db_last_dirty;
1676 
1677 		ASSERT(db->db_buf != NULL);
1678 		if (dr != NULL && dr->dr_txg == tx->tx_txg) {
1679 			ASSERT(dr->dt.dl.dr_data == db->db_buf);
1680 			if (!arc_released(db->db_buf)) {
1681 				ASSERT(dr->dt.dl.dr_override_state ==
1682 				    DR_OVERRIDDEN);
1683 				arc_release(db->db_buf, db);
1684 			}
1685 			dr->dt.dl.dr_data = buf;
1686 			VERIFY(arc_buf_remove_ref(db->db_buf, db));
1687 		} else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
1688 			arc_release(db->db_buf, db);
1689 			VERIFY(arc_buf_remove_ref(db->db_buf, db));
1690 		}
1691 		db->db_buf = NULL;
1692 	}
1693 	ASSERT(db->db_buf == NULL);
1694 	dbuf_set_data(db, buf);
1695 	db->db_state = DB_FILL;
1696 	mutex_exit(&db->db_mtx);
1697 	(void) dbuf_dirty(db, tx);
1698 	dmu_buf_fill_done(&db->db, tx);
1699 }
1700 
1701 /*
1702  * "Clear" the contents of this dbuf.  This will mark the dbuf
1703  * EVICTING and clear *most* of its references.  Unfortunately,
1704  * when we are not holding the dn_dbufs_mtx, we can't clear the
1705  * entry in the dn_dbufs list.  We have to wait until dbuf_destroy()
1706  * in this case.  For callers from the DMU we will usually see:
1707  *	dbuf_clear()->arc_clear_callback()->dbuf_do_evict()->dbuf_destroy()
1708  * For the arc callback, we will usually see:
1709  *	dbuf_do_evict()->dbuf_clear();dbuf_destroy()
1710  * Sometimes, though, we will get a mix of these two:
1711  *	DMU: dbuf_clear()->arc_clear_callback()
1712  *	ARC: dbuf_do_evict()->dbuf_destroy()
1713  *
1714  * This routine will dissociate the dbuf from the arc, by calling
1715  * arc_clear_callback(), but will not evict the data from the ARC.
1716  */
1717 void
1718 dbuf_clear(dmu_buf_impl_t *db)
1719 {
1720 	dnode_t *dn;
1721 	dmu_buf_impl_t *parent = db->db_parent;
1722 	dmu_buf_impl_t *dndb;
1723 	boolean_t dbuf_gone = B_FALSE;
1724 
1725 	ASSERT(MUTEX_HELD(&db->db_mtx));
1726 	ASSERT(refcount_is_zero(&db->db_holds));
1727 
1728 	dbuf_evict_user(db);
1729 
1730 	if (db->db_state == DB_CACHED) {
1731 		ASSERT(db->db.db_data != NULL);
1732 		if (db->db_blkid == DMU_BONUS_BLKID) {
1733 			zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN);
1734 			arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
1735 		}
1736 		db->db.db_data = NULL;
1737 		db->db_state = DB_UNCACHED;
1738 	}
1739 
1740 	ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1741 	ASSERT(db->db_data_pending == NULL);
1742 
1743 	db->db_state = DB_EVICTING;
1744 	db->db_blkptr = NULL;
1745 
1746 	DB_DNODE_ENTER(db);
1747 	dn = DB_DNODE(db);
1748 	dndb = dn->dn_dbuf;
1749 	if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) {
1750 		avl_remove(&dn->dn_dbufs, db);
1751 		atomic_dec_32(&dn->dn_dbufs_count);
1752 		membar_producer();
1753 		DB_DNODE_EXIT(db);
1754 		/*
1755 		 * Decrementing the dbuf count means that the hold corresponding
1756 		 * to the removed dbuf is no longer discounted in dnode_move(),
1757 		 * so the dnode cannot be moved until after we release the hold.
1758 		 * The membar_producer() ensures visibility of the decremented
1759 		 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
1760 		 * release any lock.
1761 		 */
1762 		dnode_rele(dn, db);
1763 		db->db_dnode_handle = NULL;
1764 	} else {
1765 		DB_DNODE_EXIT(db);
1766 	}
1767 
1768 	if (db->db_buf)
1769 		dbuf_gone = arc_clear_callback(db->db_buf);
1770 
1771 	if (!dbuf_gone)
1772 		mutex_exit(&db->db_mtx);
1773 
1774 	/*
1775 	 * If this dbuf is referenced from an indirect dbuf,
1776 	 * decrement the ref count on the indirect dbuf.
1777 	 */
1778 	if (parent && parent != dndb)
1779 		dbuf_rele(parent, db);
1780 }
1781 
1782 /*
1783  * Note: While bpp will always be updated if the function returns success,
1784  * parentp will not be updated if the dnode does not have dn_dbuf filled in;
1785  * this happens when the dnode is the meta-dnode, or a userused or groupused
1786  * object.
1787  */
1788 static int
1789 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
1790     dmu_buf_impl_t **parentp, blkptr_t **bpp)
1791 {
1792 	int nlevels, epbs;
1793 
1794 	*parentp = NULL;
1795 	*bpp = NULL;
1796 
1797 	ASSERT(blkid != DMU_BONUS_BLKID);
1798 
1799 	if (blkid == DMU_SPILL_BLKID) {
1800 		mutex_enter(&dn->dn_mtx);
1801 		if (dn->dn_have_spill &&
1802 		    (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1803 			*bpp = &dn->dn_phys->dn_spill;
1804 		else
1805 			*bpp = NULL;
1806 		dbuf_add_ref(dn->dn_dbuf, NULL);
1807 		*parentp = dn->dn_dbuf;
1808 		mutex_exit(&dn->dn_mtx);
1809 		return (0);
1810 	}
1811 
1812 	if (dn->dn_phys->dn_nlevels == 0)
1813 		nlevels = 1;
1814 	else
1815 		nlevels = dn->dn_phys->dn_nlevels;
1816 
1817 	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1818 
1819 	ASSERT3U(level * epbs, <, 64);
1820 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1821 	if (level >= nlevels ||
1822 	    (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
1823 		/* the buffer has no parent yet */
1824 		return (SET_ERROR(ENOENT));
1825 	} else if (level < nlevels-1) {
1826 		/* this block is referenced from an indirect block */
1827 		int err = dbuf_hold_impl(dn, level+1,
1828 		    blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
1829 		if (err)
1830 			return (err);
1831 		err = dbuf_read(*parentp, NULL,
1832 		    (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
1833 		if (err) {
1834 			dbuf_rele(*parentp, NULL);
1835 			*parentp = NULL;
1836 			return (err);
1837 		}
1838 		*bpp = ((blkptr_t *)(*parentp)->db.db_data) +
1839 		    (blkid & ((1ULL << epbs) - 1));
1840 		return (0);
1841 	} else {
1842 		/* the block is referenced from the dnode */
1843 		ASSERT3U(level, ==, nlevels-1);
1844 		ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
1845 		    blkid < dn->dn_phys->dn_nblkptr);
1846 		if (dn->dn_dbuf) {
1847 			dbuf_add_ref(dn->dn_dbuf, NULL);
1848 			*parentp = dn->dn_dbuf;
1849 		}
1850 		*bpp = &dn->dn_phys->dn_blkptr[blkid];
1851 		return (0);
1852 	}
1853 }
1854 
1855 static dmu_buf_impl_t *
1856 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
1857     dmu_buf_impl_t *parent, blkptr_t *blkptr)
1858 {
1859 	objset_t *os = dn->dn_objset;
1860 	dmu_buf_impl_t *db, *odb;
1861 
1862 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1863 	ASSERT(dn->dn_type != DMU_OT_NONE);
1864 
1865 	db = kmem_cache_alloc(dbuf_cache, KM_SLEEP);
1866 
1867 	db->db_objset = os;
1868 	db->db.db_object = dn->dn_object;
1869 	db->db_level = level;
1870 	db->db_blkid = blkid;
1871 	db->db_last_dirty = NULL;
1872 	db->db_dirtycnt = 0;
1873 	db->db_dnode_handle = dn->dn_handle;
1874 	db->db_parent = parent;
1875 	db->db_blkptr = blkptr;
1876 
1877 	db->db_user = NULL;
1878 	db->db_user_immediate_evict = FALSE;
1879 	db->db_freed_in_flight = FALSE;
1880 	db->db_pending_evict = FALSE;
1881 
1882 	if (blkid == DMU_BONUS_BLKID) {
1883 		ASSERT3P(parent, ==, dn->dn_dbuf);
1884 		db->db.db_size = DN_MAX_BONUSLEN -
1885 		    (dn->dn_nblkptr-1) * sizeof (blkptr_t);
1886 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1887 		db->db.db_offset = DMU_BONUS_BLKID;
1888 		db->db_state = DB_UNCACHED;
1889 		/* the bonus dbuf is not placed in the hash table */
1890 		arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1891 		return (db);
1892 	} else if (blkid == DMU_SPILL_BLKID) {
1893 		db->db.db_size = (blkptr != NULL) ?
1894 		    BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
1895 		db->db.db_offset = 0;
1896 	} else {
1897 		int blocksize =
1898 		    db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
1899 		db->db.db_size = blocksize;
1900 		db->db.db_offset = db->db_blkid * blocksize;
1901 	}
1902 
1903 	/*
1904 	 * Hold the dn_dbufs_mtx while we get the new dbuf
1905 	 * in the hash table *and* added to the dbufs list.
1906 	 * This prevents a possible deadlock with someone
1907 	 * trying to look up this dbuf before its added to the
1908 	 * dn_dbufs list.
1909 	 */
1910 	mutex_enter(&dn->dn_dbufs_mtx);
1911 	db->db_state = DB_EVICTING;
1912 	if ((odb = dbuf_hash_insert(db)) != NULL) {
1913 		/* someone else inserted it first */
1914 		kmem_cache_free(dbuf_cache, db);
1915 		mutex_exit(&dn->dn_dbufs_mtx);
1916 		return (odb);
1917 	}
1918 	avl_add(&dn->dn_dbufs, db);
1919 	if (db->db_level == 0 && db->db_blkid >=
1920 	    dn->dn_unlisted_l0_blkid)
1921 		dn->dn_unlisted_l0_blkid = db->db_blkid + 1;
1922 	db->db_state = DB_UNCACHED;
1923 	mutex_exit(&dn->dn_dbufs_mtx);
1924 	arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1925 
1926 	if (parent && parent != dn->dn_dbuf)
1927 		dbuf_add_ref(parent, db);
1928 
1929 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1930 	    refcount_count(&dn->dn_holds) > 0);
1931 	(void) refcount_add(&dn->dn_holds, db);
1932 	atomic_inc_32(&dn->dn_dbufs_count);
1933 
1934 	dprintf_dbuf(db, "db=%p\n", db);
1935 
1936 	return (db);
1937 }
1938 
1939 static int
1940 dbuf_do_evict(void *private)
1941 {
1942 	dmu_buf_impl_t *db = private;
1943 
1944 	if (!MUTEX_HELD(&db->db_mtx))
1945 		mutex_enter(&db->db_mtx);
1946 
1947 	ASSERT(refcount_is_zero(&db->db_holds));
1948 
1949 	if (db->db_state != DB_EVICTING) {
1950 		ASSERT(db->db_state == DB_CACHED);
1951 		DBUF_VERIFY(db);
1952 		db->db_buf = NULL;
1953 		dbuf_evict(db);
1954 	} else {
1955 		mutex_exit(&db->db_mtx);
1956 		dbuf_destroy(db);
1957 	}
1958 	return (0);
1959 }
1960 
1961 static void
1962 dbuf_destroy(dmu_buf_impl_t *db)
1963 {
1964 	ASSERT(refcount_is_zero(&db->db_holds));
1965 
1966 	if (db->db_blkid != DMU_BONUS_BLKID) {
1967 		/*
1968 		 * If this dbuf is still on the dn_dbufs list,
1969 		 * remove it from that list.
1970 		 */
1971 		if (db->db_dnode_handle != NULL) {
1972 			dnode_t *dn;
1973 
1974 			DB_DNODE_ENTER(db);
1975 			dn = DB_DNODE(db);
1976 			mutex_enter(&dn->dn_dbufs_mtx);
1977 			avl_remove(&dn->dn_dbufs, db);
1978 			atomic_dec_32(&dn->dn_dbufs_count);
1979 			mutex_exit(&dn->dn_dbufs_mtx);
1980 			DB_DNODE_EXIT(db);
1981 			/*
1982 			 * Decrementing the dbuf count means that the hold
1983 			 * corresponding to the removed dbuf is no longer
1984 			 * discounted in dnode_move(), so the dnode cannot be
1985 			 * moved until after we release the hold.
1986 			 */
1987 			dnode_rele(dn, db);
1988 			db->db_dnode_handle = NULL;
1989 		}
1990 		dbuf_hash_remove(db);
1991 	}
1992 	db->db_parent = NULL;
1993 	db->db_buf = NULL;
1994 
1995 	ASSERT(db->db.db_data == NULL);
1996 	ASSERT(db->db_hash_next == NULL);
1997 	ASSERT(db->db_blkptr == NULL);
1998 	ASSERT(db->db_data_pending == NULL);
1999 
2000 	kmem_cache_free(dbuf_cache, db);
2001 	arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
2002 }
2003 
2004 typedef struct dbuf_prefetch_arg {
2005 	spa_t *dpa_spa;	/* The spa to issue the prefetch in. */
2006 	zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
2007 	int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
2008 	int dpa_curlevel; /* The current level that we're reading */
2009 	zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
2010 	zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
2011 	arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
2012 } dbuf_prefetch_arg_t;
2013 
2014 /*
2015  * Actually issue the prefetch read for the block given.
2016  */
2017 static void
2018 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
2019 {
2020 	if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
2021 		return;
2022 
2023 	arc_flags_t aflags =
2024 	    dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
2025 
2026 	ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
2027 	ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
2028 	ASSERT(dpa->dpa_zio != NULL);
2029 	(void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL,
2030 	    dpa->dpa_prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
2031 	    &aflags, &dpa->dpa_zb);
2032 }
2033 
2034 /*
2035  * Called when an indirect block above our prefetch target is read in.  This
2036  * will either read in the next indirect block down the tree or issue the actual
2037  * prefetch if the next block down is our target.
2038  */
2039 static void
2040 dbuf_prefetch_indirect_done(zio_t *zio, arc_buf_t *abuf, void *private)
2041 {
2042 	dbuf_prefetch_arg_t *dpa = private;
2043 
2044 	ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
2045 	ASSERT3S(dpa->dpa_curlevel, >, 0);
2046 	if (zio != NULL) {
2047 		ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
2048 		ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
2049 		ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
2050 	}
2051 
2052 	dpa->dpa_curlevel--;
2053 
2054 	uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
2055 	    (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
2056 	blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
2057 	    P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
2058 	if (BP_IS_HOLE(bp) || (zio != NULL && zio->io_error != 0)) {
2059 		kmem_free(dpa, sizeof (*dpa));
2060 	} else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
2061 		ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
2062 		dbuf_issue_final_prefetch(dpa, bp);
2063 		kmem_free(dpa, sizeof (*dpa));
2064 	} else {
2065 		arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
2066 		zbookmark_phys_t zb;
2067 
2068 		ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
2069 
2070 		SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
2071 		    dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
2072 
2073 		(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
2074 		    bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio,
2075 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
2076 		    &iter_aflags, &zb);
2077 	}
2078 	(void) arc_buf_remove_ref(abuf, private);
2079 }
2080 
2081 /*
2082  * Issue prefetch reads for the given block on the given level.  If the indirect
2083  * blocks above that block are not in memory, we will read them in
2084  * asynchronously.  As a result, this call never blocks waiting for a read to
2085  * complete.
2086  */
2087 void
2088 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
2089     arc_flags_t aflags)
2090 {
2091 	blkptr_t bp;
2092 	int epbs, nlevels, curlevel;
2093 	uint64_t curblkid;
2094 
2095 	ASSERT(blkid != DMU_BONUS_BLKID);
2096 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2097 
2098 	if (blkid > dn->dn_maxblkid)
2099 		return;
2100 
2101 	if (dnode_block_freed(dn, blkid))
2102 		return;
2103 
2104 	/*
2105 	 * This dnode hasn't been written to disk yet, so there's nothing to
2106 	 * prefetch.
2107 	 */
2108 	nlevels = dn->dn_phys->dn_nlevels;
2109 	if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
2110 		return;
2111 
2112 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2113 	if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
2114 		return;
2115 
2116 	dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
2117 	    level, blkid);
2118 	if (db != NULL) {
2119 		mutex_exit(&db->db_mtx);
2120 		/*
2121 		 * This dbuf already exists.  It is either CACHED, or
2122 		 * (we assume) about to be read or filled.
2123 		 */
2124 		return;
2125 	}
2126 
2127 	/*
2128 	 * Find the closest ancestor (indirect block) of the target block
2129 	 * that is present in the cache.  In this indirect block, we will
2130 	 * find the bp that is at curlevel, curblkid.
2131 	 */
2132 	curlevel = level;
2133 	curblkid = blkid;
2134 	while (curlevel < nlevels - 1) {
2135 		int parent_level = curlevel + 1;
2136 		uint64_t parent_blkid = curblkid >> epbs;
2137 		dmu_buf_impl_t *db;
2138 
2139 		if (dbuf_hold_impl(dn, parent_level, parent_blkid,
2140 		    FALSE, TRUE, FTAG, &db) == 0) {
2141 			blkptr_t *bpp = db->db_buf->b_data;
2142 			bp = bpp[P2PHASE(curblkid, 1 << epbs)];
2143 			dbuf_rele(db, FTAG);
2144 			break;
2145 		}
2146 
2147 		curlevel = parent_level;
2148 		curblkid = parent_blkid;
2149 	}
2150 
2151 	if (curlevel == nlevels - 1) {
2152 		/* No cached indirect blocks found. */
2153 		ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
2154 		bp = dn->dn_phys->dn_blkptr[curblkid];
2155 	}
2156 	if (BP_IS_HOLE(&bp))
2157 		return;
2158 
2159 	ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
2160 
2161 	zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
2162 	    ZIO_FLAG_CANFAIL);
2163 
2164 	dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
2165 	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
2166 	SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
2167 	    dn->dn_object, level, blkid);
2168 	dpa->dpa_curlevel = curlevel;
2169 	dpa->dpa_prio = prio;
2170 	dpa->dpa_aflags = aflags;
2171 	dpa->dpa_spa = dn->dn_objset->os_spa;
2172 	dpa->dpa_epbs = epbs;
2173 	dpa->dpa_zio = pio;
2174 
2175 	/*
2176 	 * If we have the indirect just above us, no need to do the asynchronous
2177 	 * prefetch chain; we'll just run the last step ourselves.  If we're at
2178 	 * a higher level, though, we want to issue the prefetches for all the
2179 	 * indirect blocks asynchronously, so we can go on with whatever we were
2180 	 * doing.
2181 	 */
2182 	if (curlevel == level) {
2183 		ASSERT3U(curblkid, ==, blkid);
2184 		dbuf_issue_final_prefetch(dpa, &bp);
2185 		kmem_free(dpa, sizeof (*dpa));
2186 	} else {
2187 		arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
2188 		zbookmark_phys_t zb;
2189 
2190 		SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
2191 		    dn->dn_object, curlevel, curblkid);
2192 		(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
2193 		    &bp, dbuf_prefetch_indirect_done, dpa, prio,
2194 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
2195 		    &iter_aflags, &zb);
2196 	}
2197 	/*
2198 	 * We use pio here instead of dpa_zio since it's possible that
2199 	 * dpa may have already been freed.
2200 	 */
2201 	zio_nowait(pio);
2202 }
2203 
2204 /*
2205  * Returns with db_holds incremented, and db_mtx not held.
2206  * Note: dn_struct_rwlock must be held.
2207  */
2208 int
2209 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
2210     boolean_t fail_sparse, boolean_t fail_uncached,
2211     void *tag, dmu_buf_impl_t **dbp)
2212 {
2213 	dmu_buf_impl_t *db, *parent = NULL;
2214 
2215 	ASSERT(blkid != DMU_BONUS_BLKID);
2216 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2217 	ASSERT3U(dn->dn_nlevels, >, level);
2218 
2219 	*dbp = NULL;
2220 top:
2221 	/* dbuf_find() returns with db_mtx held */
2222 	db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid);
2223 
2224 	if (db == NULL) {
2225 		blkptr_t *bp = NULL;
2226 		int err;
2227 
2228 		if (fail_uncached)
2229 			return (SET_ERROR(ENOENT));
2230 
2231 		ASSERT3P(parent, ==, NULL);
2232 		err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
2233 		if (fail_sparse) {
2234 			if (err == 0 && bp && BP_IS_HOLE(bp))
2235 				err = SET_ERROR(ENOENT);
2236 			if (err) {
2237 				if (parent)
2238 					dbuf_rele(parent, NULL);
2239 				return (err);
2240 			}
2241 		}
2242 		if (err && err != ENOENT)
2243 			return (err);
2244 		db = dbuf_create(dn, level, blkid, parent, bp);
2245 	}
2246 
2247 	if (fail_uncached && db->db_state != DB_CACHED) {
2248 		mutex_exit(&db->db_mtx);
2249 		return (SET_ERROR(ENOENT));
2250 	}
2251 
2252 	if (db->db_buf && refcount_is_zero(&db->db_holds)) {
2253 		arc_buf_add_ref(db->db_buf, db);
2254 		if (db->db_buf->b_data == NULL) {
2255 			dbuf_clear(db);
2256 			if (parent) {
2257 				dbuf_rele(parent, NULL);
2258 				parent = NULL;
2259 			}
2260 			goto top;
2261 		}
2262 		ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
2263 	}
2264 
2265 	ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
2266 
2267 	/*
2268 	 * If this buffer is currently syncing out, and we are are
2269 	 * still referencing it from db_data, we need to make a copy
2270 	 * of it in case we decide we want to dirty it again in this txg.
2271 	 */
2272 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
2273 	    dn->dn_object != DMU_META_DNODE_OBJECT &&
2274 	    db->db_state == DB_CACHED && db->db_data_pending) {
2275 		dbuf_dirty_record_t *dr = db->db_data_pending;
2276 
2277 		if (dr->dt.dl.dr_data == db->db_buf) {
2278 			arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2279 
2280 			dbuf_set_data(db,
2281 			    arc_buf_alloc(dn->dn_objset->os_spa,
2282 			    db->db.db_size, db, type));
2283 			bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data,
2284 			    db->db.db_size);
2285 		}
2286 	}
2287 
2288 	(void) refcount_add(&db->db_holds, tag);
2289 	DBUF_VERIFY(db);
2290 	mutex_exit(&db->db_mtx);
2291 
2292 	/* NOTE: we can't rele the parent until after we drop the db_mtx */
2293 	if (parent)
2294 		dbuf_rele(parent, NULL);
2295 
2296 	ASSERT3P(DB_DNODE(db), ==, dn);
2297 	ASSERT3U(db->db_blkid, ==, blkid);
2298 	ASSERT3U(db->db_level, ==, level);
2299 	*dbp = db;
2300 
2301 	return (0);
2302 }
2303 
2304 dmu_buf_impl_t *
2305 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
2306 {
2307 	return (dbuf_hold_level(dn, 0, blkid, tag));
2308 }
2309 
2310 dmu_buf_impl_t *
2311 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
2312 {
2313 	dmu_buf_impl_t *db;
2314 	int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
2315 	return (err ? NULL : db);
2316 }
2317 
2318 void
2319 dbuf_create_bonus(dnode_t *dn)
2320 {
2321 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
2322 
2323 	ASSERT(dn->dn_bonus == NULL);
2324 	dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
2325 }
2326 
2327 int
2328 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
2329 {
2330 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2331 	dnode_t *dn;
2332 
2333 	if (db->db_blkid != DMU_SPILL_BLKID)
2334 		return (SET_ERROR(ENOTSUP));
2335 	if (blksz == 0)
2336 		blksz = SPA_MINBLOCKSIZE;
2337 	ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
2338 	blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
2339 
2340 	DB_DNODE_ENTER(db);
2341 	dn = DB_DNODE(db);
2342 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2343 	dbuf_new_size(db, blksz, tx);
2344 	rw_exit(&dn->dn_struct_rwlock);
2345 	DB_DNODE_EXIT(db);
2346 
2347 	return (0);
2348 }
2349 
2350 void
2351 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
2352 {
2353 	dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
2354 }
2355 
2356 #pragma weak dmu_buf_add_ref = dbuf_add_ref
2357 void
2358 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
2359 {
2360 	int64_t holds = refcount_add(&db->db_holds, tag);
2361 	ASSERT(holds > 1);
2362 }
2363 
2364 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
2365 boolean_t
2366 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
2367     void *tag)
2368 {
2369 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2370 	dmu_buf_impl_t *found_db;
2371 	boolean_t result = B_FALSE;
2372 
2373 	if (db->db_blkid == DMU_BONUS_BLKID)
2374 		found_db = dbuf_find_bonus(os, obj);
2375 	else
2376 		found_db = dbuf_find(os, obj, 0, blkid);
2377 
2378 	if (found_db != NULL) {
2379 		if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
2380 			(void) refcount_add(&db->db_holds, tag);
2381 			result = B_TRUE;
2382 		}
2383 		mutex_exit(&db->db_mtx);
2384 	}
2385 	return (result);
2386 }
2387 
2388 /*
2389  * If you call dbuf_rele() you had better not be referencing the dnode handle
2390  * unless you have some other direct or indirect hold on the dnode. (An indirect
2391  * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
2392  * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
2393  * dnode's parent dbuf evicting its dnode handles.
2394  */
2395 void
2396 dbuf_rele(dmu_buf_impl_t *db, void *tag)
2397 {
2398 	mutex_enter(&db->db_mtx);
2399 	dbuf_rele_and_unlock(db, tag);
2400 }
2401 
2402 void
2403 dmu_buf_rele(dmu_buf_t *db, void *tag)
2404 {
2405 	dbuf_rele((dmu_buf_impl_t *)db, tag);
2406 }
2407 
2408 /*
2409  * dbuf_rele() for an already-locked dbuf.  This is necessary to allow
2410  * db_dirtycnt and db_holds to be updated atomically.
2411  */
2412 void
2413 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
2414 {
2415 	int64_t holds;
2416 
2417 	ASSERT(MUTEX_HELD(&db->db_mtx));
2418 	DBUF_VERIFY(db);
2419 
2420 	/*
2421 	 * Remove the reference to the dbuf before removing its hold on the
2422 	 * dnode so we can guarantee in dnode_move() that a referenced bonus
2423 	 * buffer has a corresponding dnode hold.
2424 	 */
2425 	holds = refcount_remove(&db->db_holds, tag);
2426 	ASSERT(holds >= 0);
2427 
2428 	/*
2429 	 * We can't freeze indirects if there is a possibility that they
2430 	 * may be modified in the current syncing context.
2431 	 */
2432 	if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0))
2433 		arc_buf_freeze(db->db_buf);
2434 
2435 	if (holds == db->db_dirtycnt &&
2436 	    db->db_level == 0 && db->db_user_immediate_evict)
2437 		dbuf_evict_user(db);
2438 
2439 	if (holds == 0) {
2440 		if (db->db_blkid == DMU_BONUS_BLKID) {
2441 			dnode_t *dn;
2442 			boolean_t evict_dbuf = db->db_pending_evict;
2443 
2444 			/*
2445 			 * If the dnode moves here, we cannot cross this
2446 			 * barrier until the move completes.
2447 			 */
2448 			DB_DNODE_ENTER(db);
2449 
2450 			dn = DB_DNODE(db);
2451 			atomic_dec_32(&dn->dn_dbufs_count);
2452 
2453 			/*
2454 			 * Decrementing the dbuf count means that the bonus
2455 			 * buffer's dnode hold is no longer discounted in
2456 			 * dnode_move(). The dnode cannot move until after
2457 			 * the dnode_rele() below.
2458 			 */
2459 			DB_DNODE_EXIT(db);
2460 
2461 			/*
2462 			 * Do not reference db after its lock is dropped.
2463 			 * Another thread may evict it.
2464 			 */
2465 			mutex_exit(&db->db_mtx);
2466 
2467 			if (evict_dbuf)
2468 				dnode_evict_bonus(dn);
2469 
2470 			dnode_rele(dn, db);
2471 		} else if (db->db_buf == NULL) {
2472 			/*
2473 			 * This is a special case: we never associated this
2474 			 * dbuf with any data allocated from the ARC.
2475 			 */
2476 			ASSERT(db->db_state == DB_UNCACHED ||
2477 			    db->db_state == DB_NOFILL);
2478 			dbuf_evict(db);
2479 		} else if (arc_released(db->db_buf)) {
2480 			arc_buf_t *buf = db->db_buf;
2481 			/*
2482 			 * This dbuf has anonymous data associated with it.
2483 			 */
2484 			dbuf_clear_data(db);
2485 			VERIFY(arc_buf_remove_ref(buf, db));
2486 			dbuf_evict(db);
2487 		} else {
2488 			VERIFY(!arc_buf_remove_ref(db->db_buf, db));
2489 
2490 			/*
2491 			 * A dbuf will be eligible for eviction if either the
2492 			 * 'primarycache' property is set or a duplicate
2493 			 * copy of this buffer is already cached in the arc.
2494 			 *
2495 			 * In the case of the 'primarycache' a buffer
2496 			 * is considered for eviction if it matches the
2497 			 * criteria set in the property.
2498 			 *
2499 			 * To decide if our buffer is considered a
2500 			 * duplicate, we must call into the arc to determine
2501 			 * if multiple buffers are referencing the same
2502 			 * block on-disk. If so, then we simply evict
2503 			 * ourselves.
2504 			 */
2505 			if (!DBUF_IS_CACHEABLE(db)) {
2506 				if (db->db_blkptr != NULL &&
2507 				    !BP_IS_HOLE(db->db_blkptr) &&
2508 				    !BP_IS_EMBEDDED(db->db_blkptr)) {
2509 					spa_t *spa =
2510 					    dmu_objset_spa(db->db_objset);
2511 					blkptr_t bp = *db->db_blkptr;
2512 					dbuf_clear(db);
2513 					arc_freed(spa, &bp);
2514 				} else {
2515 					dbuf_clear(db);
2516 				}
2517 			} else if (db->db_pending_evict ||
2518 			    arc_buf_eviction_needed(db->db_buf)) {
2519 				dbuf_clear(db);
2520 			} else {
2521 				mutex_exit(&db->db_mtx);
2522 			}
2523 		}
2524 	} else {
2525 		mutex_exit(&db->db_mtx);
2526 	}
2527 }
2528 
2529 #pragma weak dmu_buf_refcount = dbuf_refcount
2530 uint64_t
2531 dbuf_refcount(dmu_buf_impl_t *db)
2532 {
2533 	return (refcount_count(&db->db_holds));
2534 }
2535 
2536 void *
2537 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
2538     dmu_buf_user_t *new_user)
2539 {
2540 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2541 
2542 	mutex_enter(&db->db_mtx);
2543 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
2544 	if (db->db_user == old_user)
2545 		db->db_user = new_user;
2546 	else
2547 		old_user = db->db_user;
2548 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
2549 	mutex_exit(&db->db_mtx);
2550 
2551 	return (old_user);
2552 }
2553 
2554 void *
2555 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
2556 {
2557 	return (dmu_buf_replace_user(db_fake, NULL, user));
2558 }
2559 
2560 void *
2561 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
2562 {
2563 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2564 
2565 	db->db_user_immediate_evict = TRUE;
2566 	return (dmu_buf_set_user(db_fake, user));
2567 }
2568 
2569 void *
2570 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
2571 {
2572 	return (dmu_buf_replace_user(db_fake, user, NULL));
2573 }
2574 
2575 void *
2576 dmu_buf_get_user(dmu_buf_t *db_fake)
2577 {
2578 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2579 
2580 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
2581 	return (db->db_user);
2582 }
2583 
2584 void
2585 dmu_buf_user_evict_wait()
2586 {
2587 	taskq_wait(dbu_evict_taskq);
2588 }
2589 
2590 boolean_t
2591 dmu_buf_freeable(dmu_buf_t *dbuf)
2592 {
2593 	boolean_t res = B_FALSE;
2594 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2595 
2596 	if (db->db_blkptr)
2597 		res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset,
2598 		    db->db_blkptr, db->db_blkptr->blk_birth);
2599 
2600 	return (res);
2601 }
2602 
2603 blkptr_t *
2604 dmu_buf_get_blkptr(dmu_buf_t *db)
2605 {
2606 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
2607 	return (dbi->db_blkptr);
2608 }
2609 
2610 static void
2611 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
2612 {
2613 	/* ASSERT(dmu_tx_is_syncing(tx) */
2614 	ASSERT(MUTEX_HELD(&db->db_mtx));
2615 
2616 	if (db->db_blkptr != NULL)
2617 		return;
2618 
2619 	if (db->db_blkid == DMU_SPILL_BLKID) {
2620 		db->db_blkptr = &dn->dn_phys->dn_spill;
2621 		BP_ZERO(db->db_blkptr);
2622 		return;
2623 	}
2624 	if (db->db_level == dn->dn_phys->dn_nlevels-1) {
2625 		/*
2626 		 * This buffer was allocated at a time when there was
2627 		 * no available blkptrs from the dnode, or it was
2628 		 * inappropriate to hook it in (i.e., nlevels mis-match).
2629 		 */
2630 		ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
2631 		ASSERT(db->db_parent == NULL);
2632 		db->db_parent = dn->dn_dbuf;
2633 		db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
2634 		DBUF_VERIFY(db);
2635 	} else {
2636 		dmu_buf_impl_t *parent = db->db_parent;
2637 		int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2638 
2639 		ASSERT(dn->dn_phys->dn_nlevels > 1);
2640 		if (parent == NULL) {
2641 			mutex_exit(&db->db_mtx);
2642 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
2643 			parent = dbuf_hold_level(dn, db->db_level + 1,
2644 			    db->db_blkid >> epbs, db);
2645 			rw_exit(&dn->dn_struct_rwlock);
2646 			mutex_enter(&db->db_mtx);
2647 			db->db_parent = parent;
2648 		}
2649 		db->db_blkptr = (blkptr_t *)parent->db.db_data +
2650 		    (db->db_blkid & ((1ULL << epbs) - 1));
2651 		DBUF_VERIFY(db);
2652 	}
2653 }
2654 
2655 static void
2656 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2657 {
2658 	dmu_buf_impl_t *db = dr->dr_dbuf;
2659 	dnode_t *dn;
2660 	zio_t *zio;
2661 
2662 	ASSERT(dmu_tx_is_syncing(tx));
2663 
2664 	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2665 
2666 	mutex_enter(&db->db_mtx);
2667 
2668 	ASSERT(db->db_level > 0);
2669 	DBUF_VERIFY(db);
2670 
2671 	/* Read the block if it hasn't been read yet. */
2672 	if (db->db_buf == NULL) {
2673 		mutex_exit(&db->db_mtx);
2674 		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
2675 		mutex_enter(&db->db_mtx);
2676 	}
2677 	ASSERT3U(db->db_state, ==, DB_CACHED);
2678 	ASSERT(db->db_buf != NULL);
2679 
2680 	DB_DNODE_ENTER(db);
2681 	dn = DB_DNODE(db);
2682 	/* Indirect block size must match what the dnode thinks it is. */
2683 	ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2684 	dbuf_check_blkptr(dn, db);
2685 	DB_DNODE_EXIT(db);
2686 
2687 	/* Provide the pending dirty record to child dbufs */
2688 	db->db_data_pending = dr;
2689 
2690 	mutex_exit(&db->db_mtx);
2691 	dbuf_write(dr, db->db_buf, tx);
2692 
2693 	zio = dr->dr_zio;
2694 	mutex_enter(&dr->dt.di.dr_mtx);
2695 	dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
2696 	ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2697 	mutex_exit(&dr->dt.di.dr_mtx);
2698 	zio_nowait(zio);
2699 }
2700 
2701 static void
2702 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2703 {
2704 	arc_buf_t **datap = &dr->dt.dl.dr_data;
2705 	dmu_buf_impl_t *db = dr->dr_dbuf;
2706 	dnode_t *dn;
2707 	objset_t *os;
2708 	uint64_t txg = tx->tx_txg;
2709 
2710 	ASSERT(dmu_tx_is_syncing(tx));
2711 
2712 	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2713 
2714 	mutex_enter(&db->db_mtx);
2715 	/*
2716 	 * To be synced, we must be dirtied.  But we
2717 	 * might have been freed after the dirty.
2718 	 */
2719 	if (db->db_state == DB_UNCACHED) {
2720 		/* This buffer has been freed since it was dirtied */
2721 		ASSERT(db->db.db_data == NULL);
2722 	} else if (db->db_state == DB_FILL) {
2723 		/* This buffer was freed and is now being re-filled */
2724 		ASSERT(db->db.db_data != dr->dt.dl.dr_data);
2725 	} else {
2726 		ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
2727 	}
2728 	DBUF_VERIFY(db);
2729 
2730 	DB_DNODE_ENTER(db);
2731 	dn = DB_DNODE(db);
2732 
2733 	if (db->db_blkid == DMU_SPILL_BLKID) {
2734 		mutex_enter(&dn->dn_mtx);
2735 		dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
2736 		mutex_exit(&dn->dn_mtx);
2737 	}
2738 
2739 	/*
2740 	 * If this is a bonus buffer, simply copy the bonus data into the
2741 	 * dnode.  It will be written out when the dnode is synced (and it
2742 	 * will be synced, since it must have been dirty for dbuf_sync to
2743 	 * be called).
2744 	 */
2745 	if (db->db_blkid == DMU_BONUS_BLKID) {
2746 		dbuf_dirty_record_t **drp;
2747 
2748 		ASSERT(*datap != NULL);
2749 		ASSERT0(db->db_level);
2750 		ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN);
2751 		bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen);
2752 		DB_DNODE_EXIT(db);
2753 
2754 		if (*datap != db->db.db_data) {
2755 			zio_buf_free(*datap, DN_MAX_BONUSLEN);
2756 			arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
2757 		}
2758 		db->db_data_pending = NULL;
2759 		drp = &db->db_last_dirty;
2760 		while (*drp != dr)
2761 			drp = &(*drp)->dr_next;
2762 		ASSERT(dr->dr_next == NULL);
2763 		ASSERT(dr->dr_dbuf == db);
2764 		*drp = dr->dr_next;
2765 		kmem_free(dr, sizeof (dbuf_dirty_record_t));
2766 		ASSERT(db->db_dirtycnt > 0);
2767 		db->db_dirtycnt -= 1;
2768 		dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2769 		return;
2770 	}
2771 
2772 	os = dn->dn_objset;
2773 
2774 	/*
2775 	 * This function may have dropped the db_mtx lock allowing a dmu_sync
2776 	 * operation to sneak in. As a result, we need to ensure that we
2777 	 * don't check the dr_override_state until we have returned from
2778 	 * dbuf_check_blkptr.
2779 	 */
2780 	dbuf_check_blkptr(dn, db);
2781 
2782 	/*
2783 	 * If this buffer is in the middle of an immediate write,
2784 	 * wait for the synchronous IO to complete.
2785 	 */
2786 	while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
2787 		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
2788 		cv_wait(&db->db_changed, &db->db_mtx);
2789 		ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
2790 	}
2791 
2792 	if (db->db_state != DB_NOFILL &&
2793 	    dn->dn_object != DMU_META_DNODE_OBJECT &&
2794 	    refcount_count(&db->db_holds) > 1 &&
2795 	    dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
2796 	    *datap == db->db_buf) {
2797 		/*
2798 		 * If this buffer is currently "in use" (i.e., there
2799 		 * are active holds and db_data still references it),
2800 		 * then make a copy before we start the write so that
2801 		 * any modifications from the open txg will not leak
2802 		 * into this write.
2803 		 *
2804 		 * NOTE: this copy does not need to be made for
2805 		 * objects only modified in the syncing context (e.g.
2806 		 * DNONE_DNODE blocks).
2807 		 */
2808 		int blksz = arc_buf_size(*datap);
2809 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2810 		*datap = arc_buf_alloc(os->os_spa, blksz, db, type);
2811 		bcopy(db->db.db_data, (*datap)->b_data, blksz);
2812 	}
2813 	db->db_data_pending = dr;
2814 
2815 	mutex_exit(&db->db_mtx);
2816 
2817 	dbuf_write(dr, *datap, tx);
2818 
2819 	ASSERT(!list_link_active(&dr->dr_dirty_node));
2820 	if (dn->dn_object == DMU_META_DNODE_OBJECT) {
2821 		list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
2822 		DB_DNODE_EXIT(db);
2823 	} else {
2824 		/*
2825 		 * Although zio_nowait() does not "wait for an IO", it does
2826 		 * initiate the IO. If this is an empty write it seems plausible
2827 		 * that the IO could actually be completed before the nowait
2828 		 * returns. We need to DB_DNODE_EXIT() first in case
2829 		 * zio_nowait() invalidates the dbuf.
2830 		 */
2831 		DB_DNODE_EXIT(db);
2832 		zio_nowait(dr->dr_zio);
2833 	}
2834 }
2835 
2836 void
2837 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
2838 {
2839 	dbuf_dirty_record_t *dr;
2840 
2841 	while (dr = list_head(list)) {
2842 		if (dr->dr_zio != NULL) {
2843 			/*
2844 			 * If we find an already initialized zio then we
2845 			 * are processing the meta-dnode, and we have finished.
2846 			 * The dbufs for all dnodes are put back on the list
2847 			 * during processing, so that we can zio_wait()
2848 			 * these IOs after initiating all child IOs.
2849 			 */
2850 			ASSERT3U(dr->dr_dbuf->db.db_object, ==,
2851 			    DMU_META_DNODE_OBJECT);
2852 			break;
2853 		}
2854 		if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
2855 		    dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
2856 			VERIFY3U(dr->dr_dbuf->db_level, ==, level);
2857 		}
2858 		list_remove(list, dr);
2859 		if (dr->dr_dbuf->db_level > 0)
2860 			dbuf_sync_indirect(dr, tx);
2861 		else
2862 			dbuf_sync_leaf(dr, tx);
2863 	}
2864 }
2865 
2866 /* ARGSUSED */
2867 static void
2868 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
2869 {
2870 	dmu_buf_impl_t *db = vdb;
2871 	dnode_t *dn;
2872 	blkptr_t *bp = zio->io_bp;
2873 	blkptr_t *bp_orig = &zio->io_bp_orig;
2874 	spa_t *spa = zio->io_spa;
2875 	int64_t delta;
2876 	uint64_t fill = 0;
2877 	int i;
2878 
2879 	ASSERT3P(db->db_blkptr, ==, bp);
2880 
2881 	DB_DNODE_ENTER(db);
2882 	dn = DB_DNODE(db);
2883 	delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
2884 	dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
2885 	zio->io_prev_space_delta = delta;
2886 
2887 	if (bp->blk_birth != 0) {
2888 		ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
2889 		    BP_GET_TYPE(bp) == dn->dn_type) ||
2890 		    (db->db_blkid == DMU_SPILL_BLKID &&
2891 		    BP_GET_TYPE(bp) == dn->dn_bonustype) ||
2892 		    BP_IS_EMBEDDED(bp));
2893 		ASSERT(BP_GET_LEVEL(bp) == db->db_level);
2894 	}
2895 
2896 	mutex_enter(&db->db_mtx);
2897 
2898 #ifdef ZFS_DEBUG
2899 	if (db->db_blkid == DMU_SPILL_BLKID) {
2900 		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2901 		ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2902 		    db->db_blkptr == &dn->dn_phys->dn_spill);
2903 	}
2904 #endif
2905 
2906 	if (db->db_level == 0) {
2907 		mutex_enter(&dn->dn_mtx);
2908 		if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
2909 		    db->db_blkid != DMU_SPILL_BLKID)
2910 			dn->dn_phys->dn_maxblkid = db->db_blkid;
2911 		mutex_exit(&dn->dn_mtx);
2912 
2913 		if (dn->dn_type == DMU_OT_DNODE) {
2914 			dnode_phys_t *dnp = db->db.db_data;
2915 			for (i = db->db.db_size >> DNODE_SHIFT; i > 0;
2916 			    i--, dnp++) {
2917 				if (dnp->dn_type != DMU_OT_NONE)
2918 					fill++;
2919 			}
2920 		} else {
2921 			if (BP_IS_HOLE(bp)) {
2922 				fill = 0;
2923 			} else {
2924 				fill = 1;
2925 			}
2926 		}
2927 	} else {
2928 		blkptr_t *ibp = db->db.db_data;
2929 		ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2930 		for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
2931 			if (BP_IS_HOLE(ibp))
2932 				continue;
2933 			fill += BP_GET_FILL(ibp);
2934 		}
2935 	}
2936 	DB_DNODE_EXIT(db);
2937 
2938 	if (!BP_IS_EMBEDDED(bp))
2939 		bp->blk_fill = fill;
2940 
2941 	mutex_exit(&db->db_mtx);
2942 }
2943 
2944 /*
2945  * The SPA will call this callback several times for each zio - once
2946  * for every physical child i/o (zio->io_phys_children times).  This
2947  * allows the DMU to monitor the progress of each logical i/o.  For example,
2948  * there may be 2 copies of an indirect block, or many fragments of a RAID-Z
2949  * block.  There may be a long delay before all copies/fragments are completed,
2950  * so this callback allows us to retire dirty space gradually, as the physical
2951  * i/os complete.
2952  */
2953 /* ARGSUSED */
2954 static void
2955 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
2956 {
2957 	dmu_buf_impl_t *db = arg;
2958 	objset_t *os = db->db_objset;
2959 	dsl_pool_t *dp = dmu_objset_pool(os);
2960 	dbuf_dirty_record_t *dr;
2961 	int delta = 0;
2962 
2963 	dr = db->db_data_pending;
2964 	ASSERT3U(dr->dr_txg, ==, zio->io_txg);
2965 
2966 	/*
2967 	 * The callback will be called io_phys_children times.  Retire one
2968 	 * portion of our dirty space each time we are called.  Any rounding
2969 	 * error will be cleaned up by dsl_pool_sync()'s call to
2970 	 * dsl_pool_undirty_space().
2971 	 */
2972 	delta = dr->dr_accounted / zio->io_phys_children;
2973 	dsl_pool_undirty_space(dp, delta, zio->io_txg);
2974 }
2975 
2976 /* ARGSUSED */
2977 static void
2978 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
2979 {
2980 	dmu_buf_impl_t *db = vdb;
2981 	blkptr_t *bp_orig = &zio->io_bp_orig;
2982 	blkptr_t *bp = db->db_blkptr;
2983 	objset_t *os = db->db_objset;
2984 	dmu_tx_t *tx = os->os_synctx;
2985 	dbuf_dirty_record_t **drp, *dr;
2986 
2987 	ASSERT0(zio->io_error);
2988 	ASSERT(db->db_blkptr == bp);
2989 
2990 	/*
2991 	 * For nopwrites and rewrites we ensure that the bp matches our
2992 	 * original and bypass all the accounting.
2993 	 */
2994 	if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
2995 		ASSERT(BP_EQUAL(bp, bp_orig));
2996 	} else {
2997 		dsl_dataset_t *ds = os->os_dsl_dataset;
2998 		(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
2999 		dsl_dataset_block_born(ds, bp, tx);
3000 	}
3001 
3002 	mutex_enter(&db->db_mtx);
3003 
3004 	DBUF_VERIFY(db);
3005 
3006 	drp = &db->db_last_dirty;
3007 	while ((dr = *drp) != db->db_data_pending)
3008 		drp = &dr->dr_next;
3009 	ASSERT(!list_link_active(&dr->dr_dirty_node));
3010 	ASSERT(dr->dr_dbuf == db);
3011 	ASSERT(dr->dr_next == NULL);
3012 	*drp = dr->dr_next;
3013 
3014 #ifdef ZFS_DEBUG
3015 	if (db->db_blkid == DMU_SPILL_BLKID) {
3016 		dnode_t *dn;
3017 
3018 		DB_DNODE_ENTER(db);
3019 		dn = DB_DNODE(db);
3020 		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
3021 		ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
3022 		    db->db_blkptr == &dn->dn_phys->dn_spill);
3023 		DB_DNODE_EXIT(db);
3024 	}
3025 #endif
3026 
3027 	if (db->db_level == 0) {
3028 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
3029 		ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
3030 		if (db->db_state != DB_NOFILL) {
3031 			if (dr->dt.dl.dr_data != db->db_buf)
3032 				VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
3033 				    db));
3034 			else if (!arc_released(db->db_buf))
3035 				arc_set_callback(db->db_buf, dbuf_do_evict, db);
3036 		}
3037 	} else {
3038 		dnode_t *dn;
3039 
3040 		DB_DNODE_ENTER(db);
3041 		dn = DB_DNODE(db);
3042 		ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
3043 		ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
3044 		if (!BP_IS_HOLE(db->db_blkptr)) {
3045 			int epbs =
3046 			    dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3047 			ASSERT3U(db->db_blkid, <=,
3048 			    dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
3049 			ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
3050 			    db->db.db_size);
3051 			if (!arc_released(db->db_buf))
3052 				arc_set_callback(db->db_buf, dbuf_do_evict, db);
3053 		}
3054 		DB_DNODE_EXIT(db);
3055 		mutex_destroy(&dr->dt.di.dr_mtx);
3056 		list_destroy(&dr->dt.di.dr_children);
3057 	}
3058 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
3059 
3060 	cv_broadcast(&db->db_changed);
3061 	ASSERT(db->db_dirtycnt > 0);
3062 	db->db_dirtycnt -= 1;
3063 	db->db_data_pending = NULL;
3064 	dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg);
3065 }
3066 
3067 static void
3068 dbuf_write_nofill_ready(zio_t *zio)
3069 {
3070 	dbuf_write_ready(zio, NULL, zio->io_private);
3071 }
3072 
3073 static void
3074 dbuf_write_nofill_done(zio_t *zio)
3075 {
3076 	dbuf_write_done(zio, NULL, zio->io_private);
3077 }
3078 
3079 static void
3080 dbuf_write_override_ready(zio_t *zio)
3081 {
3082 	dbuf_dirty_record_t *dr = zio->io_private;
3083 	dmu_buf_impl_t *db = dr->dr_dbuf;
3084 
3085 	dbuf_write_ready(zio, NULL, db);
3086 }
3087 
3088 static void
3089 dbuf_write_override_done(zio_t *zio)
3090 {
3091 	dbuf_dirty_record_t *dr = zio->io_private;
3092 	dmu_buf_impl_t *db = dr->dr_dbuf;
3093 	blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
3094 
3095 	mutex_enter(&db->db_mtx);
3096 	if (!BP_EQUAL(zio->io_bp, obp)) {
3097 		if (!BP_IS_HOLE(obp))
3098 			dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
3099 		arc_release(dr->dt.dl.dr_data, db);
3100 	}
3101 	mutex_exit(&db->db_mtx);
3102 
3103 	dbuf_write_done(zio, NULL, db);
3104 }
3105 
3106 /* Issue I/O to commit a dirty buffer to disk. */
3107 static void
3108 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
3109 {
3110 	dmu_buf_impl_t *db = dr->dr_dbuf;
3111 	dnode_t *dn;
3112 	objset_t *os;
3113 	dmu_buf_impl_t *parent = db->db_parent;
3114 	uint64_t txg = tx->tx_txg;
3115 	zbookmark_phys_t zb;
3116 	zio_prop_t zp;
3117 	zio_t *zio;
3118 	int wp_flag = 0;
3119 
3120 	DB_DNODE_ENTER(db);
3121 	dn = DB_DNODE(db);
3122 	os = dn->dn_objset;
3123 
3124 	if (db->db_state != DB_NOFILL) {
3125 		if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
3126 			/*
3127 			 * Private object buffers are released here rather
3128 			 * than in dbuf_dirty() since they are only modified
3129 			 * in the syncing context and we don't want the
3130 			 * overhead of making multiple copies of the data.
3131 			 */
3132 			if (BP_IS_HOLE(db->db_blkptr)) {
3133 				arc_buf_thaw(data);
3134 			} else {
3135 				dbuf_release_bp(db);
3136 			}
3137 		}
3138 	}
3139 
3140 	if (parent != dn->dn_dbuf) {
3141 		/* Our parent is an indirect block. */
3142 		/* We have a dirty parent that has been scheduled for write. */
3143 		ASSERT(parent && parent->db_data_pending);
3144 		/* Our parent's buffer is one level closer to the dnode. */
3145 		ASSERT(db->db_level == parent->db_level-1);
3146 		/*
3147 		 * We're about to modify our parent's db_data by modifying
3148 		 * our block pointer, so the parent must be released.
3149 		 */
3150 		ASSERT(arc_released(parent->db_buf));
3151 		zio = parent->db_data_pending->dr_zio;
3152 	} else {
3153 		/* Our parent is the dnode itself. */
3154 		ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
3155 		    db->db_blkid != DMU_SPILL_BLKID) ||
3156 		    (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
3157 		if (db->db_blkid != DMU_SPILL_BLKID)
3158 			ASSERT3P(db->db_blkptr, ==,
3159 			    &dn->dn_phys->dn_blkptr[db->db_blkid]);
3160 		zio = dn->dn_zio;
3161 	}
3162 
3163 	ASSERT(db->db_level == 0 || data == db->db_buf);
3164 	ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
3165 	ASSERT(zio);
3166 
3167 	SET_BOOKMARK(&zb, os->os_dsl_dataset ?
3168 	    os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
3169 	    db->db.db_object, db->db_level, db->db_blkid);
3170 
3171 	if (db->db_blkid == DMU_SPILL_BLKID)
3172 		wp_flag = WP_SPILL;
3173 	wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
3174 
3175 	dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
3176 	DB_DNODE_EXIT(db);
3177 
3178 	if (db->db_level == 0 &&
3179 	    dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
3180 		/*
3181 		 * The BP for this block has been provided by open context
3182 		 * (by dmu_sync() or dmu_buf_write_embedded()).
3183 		 */
3184 		void *contents = (data != NULL) ? data->b_data : NULL;
3185 
3186 		dr->dr_zio = zio_write(zio, os->os_spa, txg,
3187 		    db->db_blkptr, contents, db->db.db_size, &zp,
3188 		    dbuf_write_override_ready, NULL, dbuf_write_override_done,
3189 		    dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
3190 		mutex_enter(&db->db_mtx);
3191 		dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
3192 		zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
3193 		    dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
3194 		mutex_exit(&db->db_mtx);
3195 	} else if (db->db_state == DB_NOFILL) {
3196 		ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
3197 		    zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
3198 		dr->dr_zio = zio_write(zio, os->os_spa, txg,
3199 		    db->db_blkptr, NULL, db->db.db_size, &zp,
3200 		    dbuf_write_nofill_ready, NULL, dbuf_write_nofill_done, db,
3201 		    ZIO_PRIORITY_ASYNC_WRITE,
3202 		    ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
3203 	} else {
3204 		ASSERT(arc_released(data));
3205 		dr->dr_zio = arc_write(zio, os->os_spa, txg,
3206 		    db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db),
3207 		    DBUF_IS_L2COMPRESSIBLE(db), &zp, dbuf_write_ready,
3208 		    dbuf_write_physdone, dbuf_write_done, db,
3209 		    ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
3210 	}
3211 }
3212