xref: /freebsd/sys/contrib/openzfs/module/zfs/dmu_tx.c (revision 2b833162)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
25  */
26 
27 #include <sys/dmu.h>
28 #include <sys/dmu_impl.h>
29 #include <sys/dbuf.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dmu_objset.h>
32 #include <sys/dsl_dataset.h>
33 #include <sys/dsl_dir.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/zap_impl.h>
36 #include <sys/spa.h>
37 #include <sys/sa.h>
38 #include <sys/sa_impl.h>
39 #include <sys/zfs_context.h>
40 #include <sys/trace_zfs.h>
41 
42 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
43     uint64_t arg1, uint64_t arg2);
44 
45 dmu_tx_stats_t dmu_tx_stats = {
46 	{ "dmu_tx_assigned",		KSTAT_DATA_UINT64 },
47 	{ "dmu_tx_delay",		KSTAT_DATA_UINT64 },
48 	{ "dmu_tx_error",		KSTAT_DATA_UINT64 },
49 	{ "dmu_tx_suspended",		KSTAT_DATA_UINT64 },
50 	{ "dmu_tx_group",		KSTAT_DATA_UINT64 },
51 	{ "dmu_tx_memory_reserve",	KSTAT_DATA_UINT64 },
52 	{ "dmu_tx_memory_reclaim",	KSTAT_DATA_UINT64 },
53 	{ "dmu_tx_dirty_throttle",	KSTAT_DATA_UINT64 },
54 	{ "dmu_tx_dirty_delay",		KSTAT_DATA_UINT64 },
55 	{ "dmu_tx_dirty_over_max",	KSTAT_DATA_UINT64 },
56 	{ "dmu_tx_dirty_frees_delay",	KSTAT_DATA_UINT64 },
57 	{ "dmu_tx_wrlog_delay",		KSTAT_DATA_UINT64 },
58 	{ "dmu_tx_quota",		KSTAT_DATA_UINT64 },
59 };
60 
61 static kstat_t *dmu_tx_ksp;
62 
63 dmu_tx_t *
64 dmu_tx_create_dd(dsl_dir_t *dd)
65 {
66 	dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
67 	tx->tx_dir = dd;
68 	if (dd != NULL)
69 		tx->tx_pool = dd->dd_pool;
70 	list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
71 	    offsetof(dmu_tx_hold_t, txh_node));
72 	list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
73 	    offsetof(dmu_tx_callback_t, dcb_node));
74 	tx->tx_start = gethrtime();
75 	return (tx);
76 }
77 
78 dmu_tx_t *
79 dmu_tx_create(objset_t *os)
80 {
81 	dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
82 	tx->tx_objset = os;
83 	return (tx);
84 }
85 
86 dmu_tx_t *
87 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
88 {
89 	dmu_tx_t *tx = dmu_tx_create_dd(NULL);
90 
91 	TXG_VERIFY(dp->dp_spa, txg);
92 	tx->tx_pool = dp;
93 	tx->tx_txg = txg;
94 	tx->tx_anyobj = TRUE;
95 
96 	return (tx);
97 }
98 
99 int
100 dmu_tx_is_syncing(dmu_tx_t *tx)
101 {
102 	return (tx->tx_anyobj);
103 }
104 
105 int
106 dmu_tx_private_ok(dmu_tx_t *tx)
107 {
108 	return (tx->tx_anyobj);
109 }
110 
111 static dmu_tx_hold_t *
112 dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
113     uint64_t arg1, uint64_t arg2)
114 {
115 	dmu_tx_hold_t *txh;
116 
117 	if (dn != NULL) {
118 		(void) zfs_refcount_add(&dn->dn_holds, tx);
119 		if (tx->tx_txg != 0) {
120 			mutex_enter(&dn->dn_mtx);
121 			/*
122 			 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
123 			 * problem, but there's no way for it to happen (for
124 			 * now, at least).
125 			 */
126 			ASSERT(dn->dn_assigned_txg == 0);
127 			dn->dn_assigned_txg = tx->tx_txg;
128 			(void) zfs_refcount_add(&dn->dn_tx_holds, tx);
129 			mutex_exit(&dn->dn_mtx);
130 		}
131 	}
132 
133 	txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
134 	txh->txh_tx = tx;
135 	txh->txh_dnode = dn;
136 	zfs_refcount_create(&txh->txh_space_towrite);
137 	zfs_refcount_create(&txh->txh_memory_tohold);
138 	txh->txh_type = type;
139 	txh->txh_arg1 = arg1;
140 	txh->txh_arg2 = arg2;
141 	list_insert_tail(&tx->tx_holds, txh);
142 
143 	return (txh);
144 }
145 
146 static dmu_tx_hold_t *
147 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
148     enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
149 {
150 	dnode_t *dn = NULL;
151 	dmu_tx_hold_t *txh;
152 	int err;
153 
154 	if (object != DMU_NEW_OBJECT) {
155 		err = dnode_hold(os, object, FTAG, &dn);
156 		if (err != 0) {
157 			tx->tx_err = err;
158 			return (NULL);
159 		}
160 	}
161 	txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2);
162 	if (dn != NULL)
163 		dnode_rele(dn, FTAG);
164 	return (txh);
165 }
166 
167 void
168 dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn)
169 {
170 	/*
171 	 * If we're syncing, they can manipulate any object anyhow, and
172 	 * the hold on the dnode_t can cause problems.
173 	 */
174 	if (!dmu_tx_is_syncing(tx))
175 		(void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0);
176 }
177 
178 /*
179  * This function reads specified data from disk.  The specified data will
180  * be needed to perform the transaction -- i.e, it will be read after
181  * we do dmu_tx_assign().  There are two reasons that we read the data now
182  * (before dmu_tx_assign()):
183  *
184  * 1. Reading it now has potentially better performance.  The transaction
185  * has not yet been assigned, so the TXG is not held open, and also the
186  * caller typically has less locks held when calling dmu_tx_hold_*() than
187  * after the transaction has been assigned.  This reduces the lock (and txg)
188  * hold times, thus reducing lock contention.
189  *
190  * 2. It is easier for callers (primarily the ZPL) to handle i/o errors
191  * that are detected before they start making changes to the DMU state
192  * (i.e. now).  Once the transaction has been assigned, and some DMU
193  * state has been changed, it can be difficult to recover from an i/o
194  * error (e.g. to undo the changes already made in memory at the DMU
195  * layer).  Typically code to do so does not exist in the caller -- it
196  * assumes that the data has already been cached and thus i/o errors are
197  * not possible.
198  *
199  * It has been observed that the i/o initiated here can be a performance
200  * problem, and it appears to be optional, because we don't look at the
201  * data which is read.  However, removing this read would only serve to
202  * move the work elsewhere (after the dmu_tx_assign()), where it may
203  * have a greater impact on performance (in addition to the impact on
204  * fault tolerance noted above).
205  */
206 static int
207 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
208 {
209 	int err;
210 	dmu_buf_impl_t *db;
211 
212 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
213 	db = dbuf_hold_level(dn, level, blkid, FTAG);
214 	rw_exit(&dn->dn_struct_rwlock);
215 	if (db == NULL)
216 		return (SET_ERROR(EIO));
217 	/*
218 	 * PARTIAL_FIRST allows caching for uncacheable blocks.  It will
219 	 * be cleared after dmu_buf_will_dirty() call dbuf_read() again.
220 	 */
221 	err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH |
222 	    (level == 0 ? DB_RF_PARTIAL_FIRST : 0));
223 	dbuf_rele(db, FTAG);
224 	return (err);
225 }
226 
227 static void
228 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
229 {
230 	dnode_t *dn = txh->txh_dnode;
231 	int err = 0;
232 
233 	if (len == 0)
234 		return;
235 
236 	(void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG);
237 
238 	if (dn == NULL)
239 		return;
240 
241 	/*
242 	 * For i/o error checking, read the blocks that will be needed
243 	 * to perform the write: the first and last level-0 blocks (if
244 	 * they are not aligned, i.e. if they are partial-block writes),
245 	 * and all the level-1 blocks.
246 	 */
247 	if (dn->dn_maxblkid == 0) {
248 		if (off < dn->dn_datablksz &&
249 		    (off > 0 || len < dn->dn_datablksz)) {
250 			err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
251 			if (err != 0) {
252 				txh->txh_tx->tx_err = err;
253 			}
254 		}
255 	} else {
256 		zio_t *zio = zio_root(dn->dn_objset->os_spa,
257 		    NULL, NULL, ZIO_FLAG_CANFAIL);
258 
259 		/* first level-0 block */
260 		uint64_t start = off >> dn->dn_datablkshift;
261 		if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) {
262 			err = dmu_tx_check_ioerr(zio, dn, 0, start);
263 			if (err != 0) {
264 				txh->txh_tx->tx_err = err;
265 			}
266 		}
267 
268 		/* last level-0 block */
269 		uint64_t end = (off + len - 1) >> dn->dn_datablkshift;
270 		if (end != start && end <= dn->dn_maxblkid &&
271 		    P2PHASE(off + len, dn->dn_datablksz)) {
272 			err = dmu_tx_check_ioerr(zio, dn, 0, end);
273 			if (err != 0) {
274 				txh->txh_tx->tx_err = err;
275 			}
276 		}
277 
278 		/* level-1 blocks */
279 		if (dn->dn_nlevels > 1) {
280 			int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
281 			for (uint64_t i = (start >> shft) + 1;
282 			    i < end >> shft; i++) {
283 				err = dmu_tx_check_ioerr(zio, dn, 1, i);
284 				if (err != 0) {
285 					txh->txh_tx->tx_err = err;
286 				}
287 			}
288 		}
289 
290 		err = zio_wait(zio);
291 		if (err != 0) {
292 			txh->txh_tx->tx_err = err;
293 		}
294 	}
295 }
296 
297 static void
298 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
299 {
300 	(void) zfs_refcount_add_many(&txh->txh_space_towrite,
301 	    DNODE_MIN_SIZE, FTAG);
302 }
303 
304 void
305 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
306 {
307 	dmu_tx_hold_t *txh;
308 
309 	ASSERT0(tx->tx_txg);
310 	ASSERT3U(len, <=, DMU_MAX_ACCESS);
311 	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
312 
313 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
314 	    object, THT_WRITE, off, len);
315 	if (txh != NULL) {
316 		dmu_tx_count_write(txh, off, len);
317 		dmu_tx_count_dnode(txh);
318 	}
319 }
320 
321 void
322 dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
323 {
324 	dmu_tx_hold_t *txh;
325 
326 	ASSERT0(tx->tx_txg);
327 	ASSERT3U(len, <=, DMU_MAX_ACCESS);
328 	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
329 
330 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_WRITE, off, len);
331 	if (txh != NULL) {
332 		dmu_tx_count_write(txh, off, len);
333 		dmu_tx_count_dnode(txh);
334 	}
335 }
336 
337 /*
338  * This function marks the transaction as being a "net free".  The end
339  * result is that refquotas will be disabled for this transaction, and
340  * this transaction will be able to use half of the pool space overhead
341  * (see dsl_pool_adjustedsize()).  Therefore this function should only
342  * be called for transactions that we expect will not cause a net increase
343  * in the amount of space used (but it's OK if that is occasionally not true).
344  */
345 void
346 dmu_tx_mark_netfree(dmu_tx_t *tx)
347 {
348 	tx->tx_netfree = B_TRUE;
349 }
350 
351 static void
352 dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
353 {
354 	dmu_tx_t *tx = txh->txh_tx;
355 	dnode_t *dn = txh->txh_dnode;
356 	int err;
357 
358 	ASSERT(tx->tx_txg == 0);
359 
360 	if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz)
361 		return;
362 	if (len == DMU_OBJECT_END)
363 		len = (dn->dn_maxblkid + 1) * dn->dn_datablksz - off;
364 
365 	/*
366 	 * For i/o error checking, we read the first and last level-0
367 	 * blocks if they are not aligned, and all the level-1 blocks.
368 	 *
369 	 * Note:  dbuf_free_range() assumes that we have not instantiated
370 	 * any level-0 dbufs that will be completely freed.  Therefore we must
371 	 * exercise care to not read or count the first and last blocks
372 	 * if they are blocksize-aligned.
373 	 */
374 	if (dn->dn_datablkshift == 0) {
375 		if (off != 0 || len < dn->dn_datablksz)
376 			dmu_tx_count_write(txh, 0, dn->dn_datablksz);
377 	} else {
378 		/* first block will be modified if it is not aligned */
379 		if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift))
380 			dmu_tx_count_write(txh, off, 1);
381 		/* last block will be modified if it is not aligned */
382 		if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift))
383 			dmu_tx_count_write(txh, off + len, 1);
384 	}
385 
386 	/*
387 	 * Check level-1 blocks.
388 	 */
389 	if (dn->dn_nlevels > 1) {
390 		int shift = dn->dn_datablkshift + dn->dn_indblkshift -
391 		    SPA_BLKPTRSHIFT;
392 		uint64_t start = off >> shift;
393 		uint64_t end = (off + len) >> shift;
394 
395 		ASSERT(dn->dn_indblkshift != 0);
396 
397 		/*
398 		 * dnode_reallocate() can result in an object with indirect
399 		 * blocks having an odd data block size.  In this case,
400 		 * just check the single block.
401 		 */
402 		if (dn->dn_datablkshift == 0)
403 			start = end = 0;
404 
405 		zio_t *zio = zio_root(tx->tx_pool->dp_spa,
406 		    NULL, NULL, ZIO_FLAG_CANFAIL);
407 		for (uint64_t i = start; i <= end; i++) {
408 			uint64_t ibyte = i << shift;
409 			err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
410 			i = ibyte >> shift;
411 			if (err == ESRCH || i > end)
412 				break;
413 			if (err != 0) {
414 				tx->tx_err = err;
415 				(void) zio_wait(zio);
416 				return;
417 			}
418 
419 			(void) zfs_refcount_add_many(&txh->txh_memory_tohold,
420 			    1 << dn->dn_indblkshift, FTAG);
421 
422 			err = dmu_tx_check_ioerr(zio, dn, 1, i);
423 			if (err != 0) {
424 				tx->tx_err = err;
425 				(void) zio_wait(zio);
426 				return;
427 			}
428 		}
429 		err = zio_wait(zio);
430 		if (err != 0) {
431 			tx->tx_err = err;
432 			return;
433 		}
434 	}
435 }
436 
437 void
438 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
439 {
440 	dmu_tx_hold_t *txh;
441 
442 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
443 	    object, THT_FREE, off, len);
444 	if (txh != NULL) {
445 		dmu_tx_count_dnode(txh);
446 		dmu_tx_count_free(txh, off, len);
447 	}
448 }
449 
450 void
451 dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, uint64_t len)
452 {
453 	dmu_tx_hold_t *txh;
454 
455 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_FREE, off, len);
456 	if (txh != NULL) {
457 		dmu_tx_count_dnode(txh);
458 		dmu_tx_count_free(txh, off, len);
459 	}
460 }
461 
462 static void
463 dmu_tx_count_clone(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
464 {
465 
466 	/*
467 	 * Reuse dmu_tx_count_free(), it does exactly what we need for clone.
468 	 */
469 	dmu_tx_count_free(txh, off, len);
470 }
471 
472 void
473 dmu_tx_hold_clone_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
474 {
475 	dmu_tx_hold_t *txh;
476 
477 	ASSERT0(tx->tx_txg);
478 	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
479 
480 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_CLONE, off, len);
481 	if (txh != NULL) {
482 		dmu_tx_count_dnode(txh);
483 		dmu_tx_count_clone(txh, off, len);
484 	}
485 }
486 
487 static void
488 dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name)
489 {
490 	dmu_tx_t *tx = txh->txh_tx;
491 	dnode_t *dn = txh->txh_dnode;
492 	int err;
493 	extern int zap_micro_max_size;
494 
495 	ASSERT(tx->tx_txg == 0);
496 
497 	dmu_tx_count_dnode(txh);
498 
499 	/*
500 	 * Modifying a almost-full microzap is around the worst case (128KB)
501 	 *
502 	 * If it is a fat zap, the worst case would be 7*16KB=112KB:
503 	 * - 3 blocks overwritten: target leaf, ptrtbl block, header block
504 	 * - 4 new blocks written if adding:
505 	 *    - 2 blocks for possibly split leaves,
506 	 *    - 2 grown ptrtbl blocks
507 	 */
508 	(void) zfs_refcount_add_many(&txh->txh_space_towrite,
509 	    zap_micro_max_size, FTAG);
510 
511 	if (dn == NULL)
512 		return;
513 
514 	ASSERT3U(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
515 
516 	if (dn->dn_maxblkid == 0 || name == NULL) {
517 		/*
518 		 * This is a microzap (only one block), or we don't know
519 		 * the name.  Check the first block for i/o errors.
520 		 */
521 		err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
522 		if (err != 0) {
523 			tx->tx_err = err;
524 		}
525 	} else {
526 		/*
527 		 * Access the name so that we'll check for i/o errors to
528 		 * the leaf blocks, etc.  We ignore ENOENT, as this name
529 		 * may not yet exist.
530 		 */
531 		err = zap_lookup_by_dnode(dn, name, 8, 0, NULL);
532 		if (err == EIO || err == ECKSUM || err == ENXIO) {
533 			tx->tx_err = err;
534 		}
535 	}
536 }
537 
538 void
539 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
540 {
541 	dmu_tx_hold_t *txh;
542 
543 	ASSERT0(tx->tx_txg);
544 
545 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
546 	    object, THT_ZAP, add, (uintptr_t)name);
547 	if (txh != NULL)
548 		dmu_tx_hold_zap_impl(txh, name);
549 }
550 
551 void
552 dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, const char *name)
553 {
554 	dmu_tx_hold_t *txh;
555 
556 	ASSERT0(tx->tx_txg);
557 	ASSERT(dn != NULL);
558 
559 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_ZAP, add, (uintptr_t)name);
560 	if (txh != NULL)
561 		dmu_tx_hold_zap_impl(txh, name);
562 }
563 
564 void
565 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
566 {
567 	dmu_tx_hold_t *txh;
568 
569 	ASSERT(tx->tx_txg == 0);
570 
571 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
572 	    object, THT_BONUS, 0, 0);
573 	if (txh)
574 		dmu_tx_count_dnode(txh);
575 }
576 
577 void
578 dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn)
579 {
580 	dmu_tx_hold_t *txh;
581 
582 	ASSERT0(tx->tx_txg);
583 
584 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_BONUS, 0, 0);
585 	if (txh)
586 		dmu_tx_count_dnode(txh);
587 }
588 
589 void
590 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
591 {
592 	dmu_tx_hold_t *txh;
593 
594 	ASSERT(tx->tx_txg == 0);
595 
596 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
597 	    DMU_NEW_OBJECT, THT_SPACE, space, 0);
598 	if (txh) {
599 		(void) zfs_refcount_add_many(
600 		    &txh->txh_space_towrite, space, FTAG);
601 	}
602 }
603 
604 #ifdef ZFS_DEBUG
605 void
606 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
607 {
608 	boolean_t match_object = B_FALSE;
609 	boolean_t match_offset = B_FALSE;
610 
611 	DB_DNODE_ENTER(db);
612 	dnode_t *dn = DB_DNODE(db);
613 	ASSERT(tx->tx_txg != 0);
614 	ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
615 	ASSERT3U(dn->dn_object, ==, db->db.db_object);
616 
617 	if (tx->tx_anyobj) {
618 		DB_DNODE_EXIT(db);
619 		return;
620 	}
621 
622 	/* XXX No checking on the meta dnode for now */
623 	if (db->db.db_object == DMU_META_DNODE_OBJECT) {
624 		DB_DNODE_EXIT(db);
625 		return;
626 	}
627 
628 	for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
629 	    txh = list_next(&tx->tx_holds, txh)) {
630 		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
631 		if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
632 			match_object = TRUE;
633 		if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
634 			int datablkshift = dn->dn_datablkshift ?
635 			    dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
636 			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
637 			int shift = datablkshift + epbs * db->db_level;
638 			uint64_t beginblk = shift >= 64 ? 0 :
639 			    (txh->txh_arg1 >> shift);
640 			uint64_t endblk = shift >= 64 ? 0 :
641 			    ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
642 			uint64_t blkid = db->db_blkid;
643 
644 			/* XXX txh_arg2 better not be zero... */
645 
646 			dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
647 			    txh->txh_type, (u_longlong_t)beginblk,
648 			    (u_longlong_t)endblk);
649 
650 			switch (txh->txh_type) {
651 			case THT_WRITE:
652 				if (blkid >= beginblk && blkid <= endblk)
653 					match_offset = TRUE;
654 				/*
655 				 * We will let this hold work for the bonus
656 				 * or spill buffer so that we don't need to
657 				 * hold it when creating a new object.
658 				 */
659 				if (blkid == DMU_BONUS_BLKID ||
660 				    blkid == DMU_SPILL_BLKID)
661 					match_offset = TRUE;
662 				/*
663 				 * They might have to increase nlevels,
664 				 * thus dirtying the new TLIBs.  Or the
665 				 * might have to change the block size,
666 				 * thus dirying the new lvl=0 blk=0.
667 				 */
668 				if (blkid == 0)
669 					match_offset = TRUE;
670 				break;
671 			case THT_FREE:
672 				/*
673 				 * We will dirty all the level 1 blocks in
674 				 * the free range and perhaps the first and
675 				 * last level 0 block.
676 				 */
677 				if (blkid >= beginblk && (blkid <= endblk ||
678 				    txh->txh_arg2 == DMU_OBJECT_END))
679 					match_offset = TRUE;
680 				break;
681 			case THT_SPILL:
682 				if (blkid == DMU_SPILL_BLKID)
683 					match_offset = TRUE;
684 				break;
685 			case THT_BONUS:
686 				if (blkid == DMU_BONUS_BLKID)
687 					match_offset = TRUE;
688 				break;
689 			case THT_ZAP:
690 				match_offset = TRUE;
691 				break;
692 			case THT_NEWOBJECT:
693 				match_object = TRUE;
694 				break;
695 			case THT_CLONE:
696 				if (blkid >= beginblk && blkid <= endblk)
697 					match_offset = TRUE;
698 				break;
699 			default:
700 				cmn_err(CE_PANIC, "bad txh_type %d",
701 				    txh->txh_type);
702 			}
703 		}
704 		if (match_object && match_offset) {
705 			DB_DNODE_EXIT(db);
706 			return;
707 		}
708 	}
709 	DB_DNODE_EXIT(db);
710 	panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
711 	    (u_longlong_t)db->db.db_object, db->db_level,
712 	    (u_longlong_t)db->db_blkid);
713 }
714 #endif
715 
716 /*
717  * If we can't do 10 iops, something is wrong.  Let us go ahead
718  * and hit zfs_dirty_data_max.
719  */
720 static const hrtime_t zfs_delay_max_ns = 100 * MICROSEC; /* 100 milliseconds */
721 
722 /*
723  * We delay transactions when we've determined that the backend storage
724  * isn't able to accommodate the rate of incoming writes.
725  *
726  * If there is already a transaction waiting, we delay relative to when
727  * that transaction finishes waiting.  This way the calculated min_time
728  * is independent of the number of threads concurrently executing
729  * transactions.
730  *
731  * If we are the only waiter, wait relative to when the transaction
732  * started, rather than the current time.  This credits the transaction for
733  * "time already served", e.g. reading indirect blocks.
734  *
735  * The minimum time for a transaction to take is calculated as:
736  *     min_time = scale * (dirty - min) / (max - dirty)
737  *     min_time is then capped at zfs_delay_max_ns.
738  *
739  * The delay has two degrees of freedom that can be adjusted via tunables.
740  * The percentage of dirty data at which we start to delay is defined by
741  * zfs_delay_min_dirty_percent. This should typically be at or above
742  * zfs_vdev_async_write_active_max_dirty_percent so that we only start to
743  * delay after writing at full speed has failed to keep up with the incoming
744  * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
745  * speaking, this variable determines the amount of delay at the midpoint of
746  * the curve.
747  *
748  * delay
749  *  10ms +-------------------------------------------------------------*+
750  *       |                                                             *|
751  *   9ms +                                                             *+
752  *       |                                                             *|
753  *   8ms +                                                             *+
754  *       |                                                            * |
755  *   7ms +                                                            * +
756  *       |                                                            * |
757  *   6ms +                                                            * +
758  *       |                                                            * |
759  *   5ms +                                                           *  +
760  *       |                                                           *  |
761  *   4ms +                                                           *  +
762  *       |                                                           *  |
763  *   3ms +                                                          *   +
764  *       |                                                          *   |
765  *   2ms +                                              (midpoint) *    +
766  *       |                                                  |    **     |
767  *   1ms +                                                  v ***       +
768  *       |             zfs_delay_scale ---------->     ********         |
769  *     0 +-------------------------------------*********----------------+
770  *       0%                    <- zfs_dirty_data_max ->               100%
771  *
772  * Note that since the delay is added to the outstanding time remaining on the
773  * most recent transaction, the delay is effectively the inverse of IOPS.
774  * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
775  * was chosen such that small changes in the amount of accumulated dirty data
776  * in the first 3/4 of the curve yield relatively small differences in the
777  * amount of delay.
778  *
779  * The effects can be easier to understand when the amount of delay is
780  * represented on a log scale:
781  *
782  * delay
783  * 100ms +-------------------------------------------------------------++
784  *       +                                                              +
785  *       |                                                              |
786  *       +                                                             *+
787  *  10ms +                                                             *+
788  *       +                                                           ** +
789  *       |                                              (midpoint)  **  |
790  *       +                                                  |     **    +
791  *   1ms +                                                  v ****      +
792  *       +             zfs_delay_scale ---------->        *****         +
793  *       |                                             ****             |
794  *       +                                          ****                +
795  * 100us +                                        **                    +
796  *       +                                       *                      +
797  *       |                                      *                       |
798  *       +                                     *                        +
799  *  10us +                                     *                        +
800  *       +                                                              +
801  *       |                                                              |
802  *       +                                                              +
803  *       +--------------------------------------------------------------+
804  *       0%                    <- zfs_dirty_data_max ->               100%
805  *
806  * Note here that only as the amount of dirty data approaches its limit does
807  * the delay start to increase rapidly. The goal of a properly tuned system
808  * should be to keep the amount of dirty data out of that range by first
809  * ensuring that the appropriate limits are set for the I/O scheduler to reach
810  * optimal throughput on the backend storage, and then by changing the value
811  * of zfs_delay_scale to increase the steepness of the curve.
812  */
813 static void
814 dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty)
815 {
816 	dsl_pool_t *dp = tx->tx_pool;
817 	uint64_t delay_min_bytes, wrlog;
818 	hrtime_t wakeup, tx_time = 0, now;
819 
820 	/* Calculate minimum transaction time for the dirty data amount. */
821 	delay_min_bytes =
822 	    zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
823 	if (dirty > delay_min_bytes) {
824 		/*
825 		 * The caller has already waited until we are under the max.
826 		 * We make them pass us the amount of dirty data so we don't
827 		 * have to handle the case of it being >= the max, which
828 		 * could cause a divide-by-zero if it's == the max.
829 		 */
830 		ASSERT3U(dirty, <, zfs_dirty_data_max);
831 
832 		tx_time = zfs_delay_scale * (dirty - delay_min_bytes) /
833 		    (zfs_dirty_data_max - dirty);
834 	}
835 
836 	/* Calculate minimum transaction time for the TX_WRITE log size. */
837 	wrlog = aggsum_upper_bound(&dp->dp_wrlog_total);
838 	delay_min_bytes =
839 	    zfs_wrlog_data_max * zfs_delay_min_dirty_percent / 100;
840 	if (wrlog >= zfs_wrlog_data_max) {
841 		tx_time = zfs_delay_max_ns;
842 	} else if (wrlog > delay_min_bytes) {
843 		tx_time = MAX(zfs_delay_scale * (wrlog - delay_min_bytes) /
844 		    (zfs_wrlog_data_max - wrlog), tx_time);
845 	}
846 
847 	if (tx_time == 0)
848 		return;
849 
850 	tx_time = MIN(tx_time, zfs_delay_max_ns);
851 	now = gethrtime();
852 	if (now > tx->tx_start + tx_time)
853 		return;
854 
855 	DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty,
856 	    uint64_t, tx_time);
857 
858 	mutex_enter(&dp->dp_lock);
859 	wakeup = MAX(tx->tx_start + tx_time, dp->dp_last_wakeup + tx_time);
860 	dp->dp_last_wakeup = wakeup;
861 	mutex_exit(&dp->dp_lock);
862 
863 	zfs_sleep_until(wakeup);
864 }
865 
866 /*
867  * This routine attempts to assign the transaction to a transaction group.
868  * To do so, we must determine if there is sufficient free space on disk.
869  *
870  * If this is a "netfree" transaction (i.e. we called dmu_tx_mark_netfree()
871  * on it), then it is assumed that there is sufficient free space,
872  * unless there's insufficient slop space in the pool (see the comment
873  * above spa_slop_shift in spa_misc.c).
874  *
875  * If it is not a "netfree" transaction, then if the data already on disk
876  * is over the allowed usage (e.g. quota), this will fail with EDQUOT or
877  * ENOSPC.  Otherwise, if the current rough estimate of pending changes,
878  * plus the rough estimate of this transaction's changes, may exceed the
879  * allowed usage, then this will fail with ERESTART, which will cause the
880  * caller to wait for the pending changes to be written to disk (by waiting
881  * for the next TXG to open), and then check the space usage again.
882  *
883  * The rough estimate of pending changes is comprised of the sum of:
884  *
885  *  - this transaction's holds' txh_space_towrite
886  *
887  *  - dd_tempreserved[], which is the sum of in-flight transactions'
888  *    holds' txh_space_towrite (i.e. those transactions that have called
889  *    dmu_tx_assign() but not yet called dmu_tx_commit()).
890  *
891  *  - dd_space_towrite[], which is the amount of dirtied dbufs.
892  *
893  * Note that all of these values are inflated by spa_get_worst_case_asize(),
894  * which means that we may get ERESTART well before we are actually in danger
895  * of running out of space, but this also mitigates any small inaccuracies
896  * in the rough estimate (e.g. txh_space_towrite doesn't take into account
897  * indirect blocks, and dd_space_towrite[] doesn't take into account changes
898  * to the MOS).
899  *
900  * Note that due to this algorithm, it is possible to exceed the allowed
901  * usage by one transaction.  Also, as we approach the allowed usage,
902  * we will allow a very limited amount of changes into each TXG, thus
903  * decreasing performance.
904  */
905 static int
906 dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
907 {
908 	spa_t *spa = tx->tx_pool->dp_spa;
909 
910 	ASSERT0(tx->tx_txg);
911 
912 	if (tx->tx_err) {
913 		DMU_TX_STAT_BUMP(dmu_tx_error);
914 		return (tx->tx_err);
915 	}
916 
917 	if (spa_suspended(spa)) {
918 		DMU_TX_STAT_BUMP(dmu_tx_suspended);
919 
920 		/*
921 		 * If the user has indicated a blocking failure mode
922 		 * then return ERESTART which will block in dmu_tx_wait().
923 		 * Otherwise, return EIO so that an error can get
924 		 * propagated back to the VOP calls.
925 		 *
926 		 * Note that we always honor the txg_how flag regardless
927 		 * of the failuremode setting.
928 		 */
929 		if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
930 		    !(txg_how & TXG_WAIT))
931 			return (SET_ERROR(EIO));
932 
933 		return (SET_ERROR(ERESTART));
934 	}
935 
936 	if (!tx->tx_dirty_delayed &&
937 	    dsl_pool_need_wrlog_delay(tx->tx_pool)) {
938 		tx->tx_wait_dirty = B_TRUE;
939 		DMU_TX_STAT_BUMP(dmu_tx_wrlog_delay);
940 		return (SET_ERROR(ERESTART));
941 	}
942 
943 	if (!tx->tx_dirty_delayed &&
944 	    dsl_pool_need_dirty_delay(tx->tx_pool)) {
945 		tx->tx_wait_dirty = B_TRUE;
946 		DMU_TX_STAT_BUMP(dmu_tx_dirty_delay);
947 		return (SET_ERROR(ERESTART));
948 	}
949 
950 	tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
951 	tx->tx_needassign_txh = NULL;
952 
953 	/*
954 	 * NB: No error returns are allowed after txg_hold_open, but
955 	 * before processing the dnode holds, due to the
956 	 * dmu_tx_unassign() logic.
957 	 */
958 
959 	uint64_t towrite = 0;
960 	uint64_t tohold = 0;
961 	for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
962 	    txh = list_next(&tx->tx_holds, txh)) {
963 		dnode_t *dn = txh->txh_dnode;
964 		if (dn != NULL) {
965 			/*
966 			 * This thread can't hold the dn_struct_rwlock
967 			 * while assigning the tx, because this can lead to
968 			 * deadlock. Specifically, if this dnode is already
969 			 * assigned to an earlier txg, this thread may need
970 			 * to wait for that txg to sync (the ERESTART case
971 			 * below).  The other thread that has assigned this
972 			 * dnode to an earlier txg prevents this txg from
973 			 * syncing until its tx can complete (calling
974 			 * dmu_tx_commit()), but it may need to acquire the
975 			 * dn_struct_rwlock to do so (e.g. via
976 			 * dmu_buf_hold*()).
977 			 *
978 			 * Note that this thread can't hold the lock for
979 			 * read either, but the rwlock doesn't record
980 			 * enough information to make that assertion.
981 			 */
982 			ASSERT(!RW_WRITE_HELD(&dn->dn_struct_rwlock));
983 
984 			mutex_enter(&dn->dn_mtx);
985 			if (dn->dn_assigned_txg == tx->tx_txg - 1) {
986 				mutex_exit(&dn->dn_mtx);
987 				tx->tx_needassign_txh = txh;
988 				DMU_TX_STAT_BUMP(dmu_tx_group);
989 				return (SET_ERROR(ERESTART));
990 			}
991 			if (dn->dn_assigned_txg == 0)
992 				dn->dn_assigned_txg = tx->tx_txg;
993 			ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
994 			(void) zfs_refcount_add(&dn->dn_tx_holds, tx);
995 			mutex_exit(&dn->dn_mtx);
996 		}
997 		towrite += zfs_refcount_count(&txh->txh_space_towrite);
998 		tohold += zfs_refcount_count(&txh->txh_memory_tohold);
999 	}
1000 
1001 	/* needed allocation: worst-case estimate of write space */
1002 	uint64_t asize = spa_get_worst_case_asize(tx->tx_pool->dp_spa, towrite);
1003 	/* calculate memory footprint estimate */
1004 	uint64_t memory = towrite + tohold;
1005 
1006 	if (tx->tx_dir != NULL && asize != 0) {
1007 		int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
1008 		    asize, tx->tx_netfree, &tx->tx_tempreserve_cookie, tx);
1009 		if (err != 0)
1010 			return (err);
1011 	}
1012 
1013 	DMU_TX_STAT_BUMP(dmu_tx_assigned);
1014 
1015 	return (0);
1016 }
1017 
1018 static void
1019 dmu_tx_unassign(dmu_tx_t *tx)
1020 {
1021 	if (tx->tx_txg == 0)
1022 		return;
1023 
1024 	txg_rele_to_quiesce(&tx->tx_txgh);
1025 
1026 	/*
1027 	 * Walk the transaction's hold list, removing the hold on the
1028 	 * associated dnode, and notifying waiters if the refcount drops to 0.
1029 	 */
1030 	for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds);
1031 	    txh && txh != tx->tx_needassign_txh;
1032 	    txh = list_next(&tx->tx_holds, txh)) {
1033 		dnode_t *dn = txh->txh_dnode;
1034 
1035 		if (dn == NULL)
1036 			continue;
1037 		mutex_enter(&dn->dn_mtx);
1038 		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1039 
1040 		if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1041 			dn->dn_assigned_txg = 0;
1042 			cv_broadcast(&dn->dn_notxholds);
1043 		}
1044 		mutex_exit(&dn->dn_mtx);
1045 	}
1046 
1047 	txg_rele_to_sync(&tx->tx_txgh);
1048 
1049 	tx->tx_lasttried_txg = tx->tx_txg;
1050 	tx->tx_txg = 0;
1051 }
1052 
1053 /*
1054  * Assign tx to a transaction group; txg_how is a bitmask:
1055  *
1056  * If TXG_WAIT is set and the currently open txg is full, this function
1057  * will wait until there's a new txg. This should be used when no locks
1058  * are being held. With this bit set, this function will only fail if
1059  * we're truly out of space (or over quota).
1060  *
1061  * If TXG_WAIT is *not* set and we can't assign into the currently open
1062  * txg without blocking, this function will return immediately with
1063  * ERESTART. This should be used whenever locks are being held.  On an
1064  * ERESTART error, the caller should drop all locks, call dmu_tx_wait(),
1065  * and try again.
1066  *
1067  * If TXG_NOTHROTTLE is set, this indicates that this tx should not be
1068  * delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for
1069  * details on the throttle). This is used by the VFS operations, after
1070  * they have already called dmu_tx_wait() (though most likely on a
1071  * different tx).
1072  *
1073  * It is guaranteed that subsequent successful calls to dmu_tx_assign()
1074  * will assign the tx to monotonically increasing txgs. Of course this is
1075  * not strong monotonicity, because the same txg can be returned multiple
1076  * times in a row. This guarantee holds both for subsequent calls from
1077  * one thread and for multiple threads. For example, it is impossible to
1078  * observe the following sequence of events:
1079  *
1080  *          Thread 1                            Thread 2
1081  *
1082  *     dmu_tx_assign(T1, ...)
1083  *     1 <- dmu_tx_get_txg(T1)
1084  *                                       dmu_tx_assign(T2, ...)
1085  *                                       2 <- dmu_tx_get_txg(T2)
1086  *     dmu_tx_assign(T3, ...)
1087  *     1 <- dmu_tx_get_txg(T3)
1088  */
1089 int
1090 dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
1091 {
1092 	int err;
1093 
1094 	ASSERT(tx->tx_txg == 0);
1095 	ASSERT0(txg_how & ~(TXG_WAIT | TXG_NOTHROTTLE));
1096 	ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1097 
1098 	/* If we might wait, we must not hold the config lock. */
1099 	IMPLY((txg_how & TXG_WAIT), !dsl_pool_config_held(tx->tx_pool));
1100 
1101 	if ((txg_how & TXG_NOTHROTTLE))
1102 		tx->tx_dirty_delayed = B_TRUE;
1103 
1104 	while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1105 		dmu_tx_unassign(tx);
1106 
1107 		if (err != ERESTART || !(txg_how & TXG_WAIT))
1108 			return (err);
1109 
1110 		dmu_tx_wait(tx);
1111 	}
1112 
1113 	txg_rele_to_quiesce(&tx->tx_txgh);
1114 
1115 	return (0);
1116 }
1117 
1118 void
1119 dmu_tx_wait(dmu_tx_t *tx)
1120 {
1121 	spa_t *spa = tx->tx_pool->dp_spa;
1122 	dsl_pool_t *dp = tx->tx_pool;
1123 	hrtime_t before;
1124 
1125 	ASSERT(tx->tx_txg == 0);
1126 	ASSERT(!dsl_pool_config_held(tx->tx_pool));
1127 
1128 	before = gethrtime();
1129 
1130 	if (tx->tx_wait_dirty) {
1131 		uint64_t dirty;
1132 
1133 		/*
1134 		 * dmu_tx_try_assign() has determined that we need to wait
1135 		 * because we've consumed much or all of the dirty buffer
1136 		 * space.
1137 		 */
1138 		mutex_enter(&dp->dp_lock);
1139 		if (dp->dp_dirty_total >= zfs_dirty_data_max)
1140 			DMU_TX_STAT_BUMP(dmu_tx_dirty_over_max);
1141 		while (dp->dp_dirty_total >= zfs_dirty_data_max)
1142 			cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock);
1143 		dirty = dp->dp_dirty_total;
1144 		mutex_exit(&dp->dp_lock);
1145 
1146 		dmu_tx_delay(tx, dirty);
1147 
1148 		tx->tx_wait_dirty = B_FALSE;
1149 
1150 		/*
1151 		 * Note: setting tx_dirty_delayed only has effect if the
1152 		 * caller used TX_WAIT.  Otherwise they are going to
1153 		 * destroy this tx and try again.  The common case,
1154 		 * zfs_write(), uses TX_WAIT.
1155 		 */
1156 		tx->tx_dirty_delayed = B_TRUE;
1157 	} else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1158 		/*
1159 		 * If the pool is suspended we need to wait until it
1160 		 * is resumed.  Note that it's possible that the pool
1161 		 * has become active after this thread has tried to
1162 		 * obtain a tx.  If that's the case then tx_lasttried_txg
1163 		 * would not have been set.
1164 		 */
1165 		txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1166 	} else if (tx->tx_needassign_txh) {
1167 		dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1168 
1169 		mutex_enter(&dn->dn_mtx);
1170 		while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1171 			cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1172 		mutex_exit(&dn->dn_mtx);
1173 		tx->tx_needassign_txh = NULL;
1174 	} else {
1175 		/*
1176 		 * If we have a lot of dirty data just wait until we sync
1177 		 * out a TXG at which point we'll hopefully have synced
1178 		 * a portion of the changes.
1179 		 */
1180 		txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1181 	}
1182 
1183 	spa_tx_assign_add_nsecs(spa, gethrtime() - before);
1184 }
1185 
1186 static void
1187 dmu_tx_destroy(dmu_tx_t *tx)
1188 {
1189 	dmu_tx_hold_t *txh;
1190 
1191 	while ((txh = list_head(&tx->tx_holds)) != NULL) {
1192 		dnode_t *dn = txh->txh_dnode;
1193 
1194 		list_remove(&tx->tx_holds, txh);
1195 		zfs_refcount_destroy_many(&txh->txh_space_towrite,
1196 		    zfs_refcount_count(&txh->txh_space_towrite));
1197 		zfs_refcount_destroy_many(&txh->txh_memory_tohold,
1198 		    zfs_refcount_count(&txh->txh_memory_tohold));
1199 		kmem_free(txh, sizeof (dmu_tx_hold_t));
1200 		if (dn != NULL)
1201 			dnode_rele(dn, tx);
1202 	}
1203 
1204 	list_destroy(&tx->tx_callbacks);
1205 	list_destroy(&tx->tx_holds);
1206 	kmem_free(tx, sizeof (dmu_tx_t));
1207 }
1208 
1209 void
1210 dmu_tx_commit(dmu_tx_t *tx)
1211 {
1212 	ASSERT(tx->tx_txg != 0);
1213 
1214 	/*
1215 	 * Go through the transaction's hold list and remove holds on
1216 	 * associated dnodes, notifying waiters if no holds remain.
1217 	 */
1218 	for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
1219 	    txh = list_next(&tx->tx_holds, txh)) {
1220 		dnode_t *dn = txh->txh_dnode;
1221 
1222 		if (dn == NULL)
1223 			continue;
1224 
1225 		mutex_enter(&dn->dn_mtx);
1226 		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1227 
1228 		if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1229 			dn->dn_assigned_txg = 0;
1230 			cv_broadcast(&dn->dn_notxholds);
1231 		}
1232 		mutex_exit(&dn->dn_mtx);
1233 	}
1234 
1235 	if (tx->tx_tempreserve_cookie)
1236 		dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1237 
1238 	if (!list_is_empty(&tx->tx_callbacks))
1239 		txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1240 
1241 	if (tx->tx_anyobj == FALSE)
1242 		txg_rele_to_sync(&tx->tx_txgh);
1243 
1244 	dmu_tx_destroy(tx);
1245 }
1246 
1247 void
1248 dmu_tx_abort(dmu_tx_t *tx)
1249 {
1250 	ASSERT(tx->tx_txg == 0);
1251 
1252 	/*
1253 	 * Call any registered callbacks with an error code.
1254 	 */
1255 	if (!list_is_empty(&tx->tx_callbacks))
1256 		dmu_tx_do_callbacks(&tx->tx_callbacks, SET_ERROR(ECANCELED));
1257 
1258 	dmu_tx_destroy(tx);
1259 }
1260 
1261 uint64_t
1262 dmu_tx_get_txg(dmu_tx_t *tx)
1263 {
1264 	ASSERT(tx->tx_txg != 0);
1265 	return (tx->tx_txg);
1266 }
1267 
1268 dsl_pool_t *
1269 dmu_tx_pool(dmu_tx_t *tx)
1270 {
1271 	ASSERT(tx->tx_pool != NULL);
1272 	return (tx->tx_pool);
1273 }
1274 
1275 void
1276 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1277 {
1278 	dmu_tx_callback_t *dcb;
1279 
1280 	dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1281 
1282 	dcb->dcb_func = func;
1283 	dcb->dcb_data = data;
1284 
1285 	list_insert_tail(&tx->tx_callbacks, dcb);
1286 }
1287 
1288 /*
1289  * Call all the commit callbacks on a list, with a given error code.
1290  */
1291 void
1292 dmu_tx_do_callbacks(list_t *cb_list, int error)
1293 {
1294 	dmu_tx_callback_t *dcb;
1295 
1296 	while ((dcb = list_tail(cb_list)) != NULL) {
1297 		list_remove(cb_list, dcb);
1298 		dcb->dcb_func(dcb->dcb_data, error);
1299 		kmem_free(dcb, sizeof (dmu_tx_callback_t));
1300 	}
1301 }
1302 
1303 /*
1304  * Interface to hold a bunch of attributes.
1305  * used for creating new files.
1306  * attrsize is the total size of all attributes
1307  * to be added during object creation
1308  *
1309  * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1310  */
1311 
1312 /*
1313  * hold necessary attribute name for attribute registration.
1314  * should be a very rare case where this is needed.  If it does
1315  * happen it would only happen on the first write to the file system.
1316  */
1317 static void
1318 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1319 {
1320 	if (!sa->sa_need_attr_registration)
1321 		return;
1322 
1323 	for (int i = 0; i != sa->sa_num_attrs; i++) {
1324 		if (!sa->sa_attr_table[i].sa_registered) {
1325 			if (sa->sa_reg_attr_obj)
1326 				dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1327 				    B_TRUE, sa->sa_attr_table[i].sa_name);
1328 			else
1329 				dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1330 				    B_TRUE, sa->sa_attr_table[i].sa_name);
1331 		}
1332 	}
1333 }
1334 
1335 void
1336 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1337 {
1338 	dmu_tx_hold_t *txh;
1339 
1340 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1341 	    THT_SPILL, 0, 0);
1342 	if (txh != NULL)
1343 		(void) zfs_refcount_add_many(&txh->txh_space_towrite,
1344 		    SPA_OLD_MAXBLOCKSIZE, FTAG);
1345 }
1346 
1347 void
1348 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1349 {
1350 	sa_os_t *sa = tx->tx_objset->os_sa;
1351 
1352 	dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1353 
1354 	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1355 		return;
1356 
1357 	if (tx->tx_objset->os_sa->sa_layout_attr_obj) {
1358 		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1359 	} else {
1360 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1361 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1362 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1363 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1364 	}
1365 
1366 	dmu_tx_sa_registration_hold(sa, tx);
1367 
1368 	if (attrsize <= DN_OLD_MAX_BONUSLEN && !sa->sa_force_spill)
1369 		return;
1370 
1371 	(void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1372 	    THT_SPILL, 0, 0);
1373 }
1374 
1375 /*
1376  * Hold SA attribute
1377  *
1378  * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1379  *
1380  * variable_size is the total size of all variable sized attributes
1381  * passed to this function.  It is not the total size of all
1382  * variable size attributes that *may* exist on this object.
1383  */
1384 void
1385 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1386 {
1387 	uint64_t object;
1388 	sa_os_t *sa = tx->tx_objset->os_sa;
1389 
1390 	ASSERT(hdl != NULL);
1391 
1392 	object = sa_handle_object(hdl);
1393 
1394 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1395 	DB_DNODE_ENTER(db);
1396 	dmu_tx_hold_bonus_by_dnode(tx, DB_DNODE(db));
1397 	DB_DNODE_EXIT(db);
1398 
1399 	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1400 		return;
1401 
1402 	if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1403 	    tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1404 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1405 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1406 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1407 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1408 	}
1409 
1410 	dmu_tx_sa_registration_hold(sa, tx);
1411 
1412 	if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1413 		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1414 
1415 	if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1416 		ASSERT(tx->tx_txg == 0);
1417 		dmu_tx_hold_spill(tx, object);
1418 	} else {
1419 		dnode_t *dn;
1420 
1421 		DB_DNODE_ENTER(db);
1422 		dn = DB_DNODE(db);
1423 		if (dn->dn_have_spill) {
1424 			ASSERT(tx->tx_txg == 0);
1425 			dmu_tx_hold_spill(tx, object);
1426 		}
1427 		DB_DNODE_EXIT(db);
1428 	}
1429 }
1430 
1431 void
1432 dmu_tx_init(void)
1433 {
1434 	dmu_tx_ksp = kstat_create("zfs", 0, "dmu_tx", "misc",
1435 	    KSTAT_TYPE_NAMED, sizeof (dmu_tx_stats) / sizeof (kstat_named_t),
1436 	    KSTAT_FLAG_VIRTUAL);
1437 
1438 	if (dmu_tx_ksp != NULL) {
1439 		dmu_tx_ksp->ks_data = &dmu_tx_stats;
1440 		kstat_install(dmu_tx_ksp);
1441 	}
1442 }
1443 
1444 void
1445 dmu_tx_fini(void)
1446 {
1447 	if (dmu_tx_ksp != NULL) {
1448 		kstat_delete(dmu_tx_ksp);
1449 		dmu_tx_ksp = NULL;
1450 	}
1451 }
1452 
1453 #if defined(_KERNEL)
1454 EXPORT_SYMBOL(dmu_tx_create);
1455 EXPORT_SYMBOL(dmu_tx_hold_write);
1456 EXPORT_SYMBOL(dmu_tx_hold_write_by_dnode);
1457 EXPORT_SYMBOL(dmu_tx_hold_free);
1458 EXPORT_SYMBOL(dmu_tx_hold_free_by_dnode);
1459 EXPORT_SYMBOL(dmu_tx_hold_zap);
1460 EXPORT_SYMBOL(dmu_tx_hold_zap_by_dnode);
1461 EXPORT_SYMBOL(dmu_tx_hold_bonus);
1462 EXPORT_SYMBOL(dmu_tx_hold_bonus_by_dnode);
1463 EXPORT_SYMBOL(dmu_tx_abort);
1464 EXPORT_SYMBOL(dmu_tx_assign);
1465 EXPORT_SYMBOL(dmu_tx_wait);
1466 EXPORT_SYMBOL(dmu_tx_commit);
1467 EXPORT_SYMBOL(dmu_tx_mark_netfree);
1468 EXPORT_SYMBOL(dmu_tx_get_txg);
1469 EXPORT_SYMBOL(dmu_tx_callback_register);
1470 EXPORT_SYMBOL(dmu_tx_do_callbacks);
1471 EXPORT_SYMBOL(dmu_tx_hold_spill);
1472 EXPORT_SYMBOL(dmu_tx_hold_sa_create);
1473 EXPORT_SYMBOL(dmu_tx_hold_sa);
1474 #endif
1475