1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
26 */
27
28 #include <sys/dmu.h>
29 #include <sys/dmu_impl.h>
30 #include <sys/dbuf.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
34 #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
35 #include <sys/dsl_pool.h>
36 #include <sys/zap_impl.h> /* for fzap_default_block_shift */
37 #include <sys/spa.h>
38 #include <sys/sa.h>
39 #include <sys/sa_impl.h>
40 #include <sys/zfs_context.h>
41 #include <sys/varargs.h>
42
43 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
44 uint64_t arg1, uint64_t arg2);
45
46
47 dmu_tx_t *
dmu_tx_create_dd(dsl_dir_t * dd)48 dmu_tx_create_dd(dsl_dir_t *dd)
49 {
50 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
51 tx->tx_dir = dd;
52 if (dd != NULL)
53 tx->tx_pool = dd->dd_pool;
54 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
55 offsetof(dmu_tx_hold_t, txh_node));
56 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
57 offsetof(dmu_tx_callback_t, dcb_node));
58 tx->tx_start = gethrtime();
59 #ifdef ZFS_DEBUG
60 refcount_create(&tx->tx_space_written);
61 refcount_create(&tx->tx_space_freed);
62 #endif
63 return (tx);
64 }
65
66 dmu_tx_t *
dmu_tx_create(objset_t * os)67 dmu_tx_create(objset_t *os)
68 {
69 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
70 tx->tx_objset = os;
71 tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset);
72 return (tx);
73 }
74
75 dmu_tx_t *
dmu_tx_create_assigned(struct dsl_pool * dp,uint64_t txg)76 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
77 {
78 dmu_tx_t *tx = dmu_tx_create_dd(NULL);
79
80 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
81 tx->tx_pool = dp;
82 tx->tx_txg = txg;
83 tx->tx_anyobj = TRUE;
84
85 return (tx);
86 }
87
88 int
dmu_tx_is_syncing(dmu_tx_t * tx)89 dmu_tx_is_syncing(dmu_tx_t *tx)
90 {
91 return (tx->tx_anyobj);
92 }
93
94 int
dmu_tx_private_ok(dmu_tx_t * tx)95 dmu_tx_private_ok(dmu_tx_t *tx)
96 {
97 return (tx->tx_anyobj);
98 }
99
100 static dmu_tx_hold_t *
dmu_tx_hold_object_impl(dmu_tx_t * tx,objset_t * os,uint64_t object,enum dmu_tx_hold_type type,uint64_t arg1,uint64_t arg2)101 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
102 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
103 {
104 dmu_tx_hold_t *txh;
105 dnode_t *dn = NULL;
106 int err;
107
108 if (object != DMU_NEW_OBJECT) {
109 err = dnode_hold(os, object, tx, &dn);
110 if (err) {
111 tx->tx_err = err;
112 return (NULL);
113 }
114
115 if (err == 0 && tx->tx_txg != 0) {
116 mutex_enter(&dn->dn_mtx);
117 /*
118 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
119 * problem, but there's no way for it to happen (for
120 * now, at least).
121 */
122 ASSERT(dn->dn_assigned_txg == 0);
123 dn->dn_assigned_txg = tx->tx_txg;
124 (void) refcount_add(&dn->dn_tx_holds, tx);
125 mutex_exit(&dn->dn_mtx);
126 }
127 }
128
129 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
130 txh->txh_tx = tx;
131 txh->txh_dnode = dn;
132 refcount_create(&txh->txh_space_towrite);
133 refcount_create(&txh->txh_space_tofree);
134 refcount_create(&txh->txh_space_tooverwrite);
135 refcount_create(&txh->txh_space_tounref);
136 refcount_create(&txh->txh_memory_tohold);
137 refcount_create(&txh->txh_fudge);
138 #ifdef ZFS_DEBUG
139 txh->txh_type = type;
140 txh->txh_arg1 = arg1;
141 txh->txh_arg2 = arg2;
142 #endif
143 list_insert_tail(&tx->tx_holds, txh);
144
145 return (txh);
146 }
147
148 void
dmu_tx_add_new_object(dmu_tx_t * tx,objset_t * os,uint64_t object)149 dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
150 {
151 /*
152 * If we're syncing, they can manipulate any object anyhow, and
153 * the hold on the dnode_t can cause problems.
154 */
155 if (!dmu_tx_is_syncing(tx)) {
156 (void) dmu_tx_hold_object_impl(tx, os,
157 object, THT_NEWOBJECT, 0, 0);
158 }
159 }
160
161 static int
dmu_tx_check_ioerr(zio_t * zio,dnode_t * dn,int level,uint64_t blkid)162 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
163 {
164 int err;
165 dmu_buf_impl_t *db;
166
167 rw_enter(&dn->dn_struct_rwlock, RW_READER);
168 db = dbuf_hold_level(dn, level, blkid, FTAG);
169 rw_exit(&dn->dn_struct_rwlock);
170 if (db == NULL)
171 return (SET_ERROR(EIO));
172 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
173 dbuf_rele(db, FTAG);
174 return (err);
175 }
176
177 static void
dmu_tx_count_twig(dmu_tx_hold_t * txh,dnode_t * dn,dmu_buf_impl_t * db,int level,uint64_t blkid,boolean_t freeable,uint64_t * history)178 dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db,
179 int level, uint64_t blkid, boolean_t freeable, uint64_t *history)
180 {
181 objset_t *os = dn->dn_objset;
182 dsl_dataset_t *ds = os->os_dsl_dataset;
183 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
184 dmu_buf_impl_t *parent = NULL;
185 blkptr_t *bp = NULL;
186 uint64_t space;
187
188 if (level >= dn->dn_nlevels || history[level] == blkid)
189 return;
190
191 history[level] = blkid;
192
193 space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift);
194
195 if (db == NULL || db == dn->dn_dbuf) {
196 ASSERT(level != 0);
197 db = NULL;
198 } else {
199 ASSERT(DB_DNODE(db) == dn);
200 ASSERT(db->db_level == level);
201 ASSERT(db->db.db_size == space);
202 ASSERT(db->db_blkid == blkid);
203 bp = db->db_blkptr;
204 parent = db->db_parent;
205 }
206
207 freeable = (bp && (freeable ||
208 dsl_dataset_block_freeable(ds, bp, bp->blk_birth)));
209
210 if (freeable) {
211 (void) refcount_add_many(&txh->txh_space_tooverwrite,
212 space, FTAG);
213 } else {
214 (void) refcount_add_many(&txh->txh_space_towrite,
215 space, FTAG);
216 }
217
218 if (bp) {
219 (void) refcount_add_many(&txh->txh_space_tounref,
220 bp_get_dsize(os->os_spa, bp), FTAG);
221 }
222
223 dmu_tx_count_twig(txh, dn, parent, level + 1,
224 blkid >> epbs, freeable, history);
225 }
226
227 /* ARGSUSED */
228 static void
dmu_tx_count_write(dmu_tx_hold_t * txh,uint64_t off,uint64_t len)229 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
230 {
231 dnode_t *dn = txh->txh_dnode;
232 uint64_t start, end, i;
233 int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
234 int err = 0;
235
236 if (len == 0)
237 return;
238
239 min_bs = SPA_MINBLOCKSHIFT;
240 max_bs = highbit64(txh->txh_tx->tx_objset->os_recordsize) - 1;
241 min_ibs = DN_MIN_INDBLKSHIFT;
242 max_ibs = DN_MAX_INDBLKSHIFT;
243
244 if (dn) {
245 uint64_t history[DN_MAX_LEVELS];
246 int nlvls = dn->dn_nlevels;
247 int delta;
248
249 /*
250 * For i/o error checking, read the first and last level-0
251 * blocks (if they are not aligned), and all the level-1 blocks.
252 */
253 if (dn->dn_maxblkid == 0) {
254 delta = dn->dn_datablksz;
255 start = (off < dn->dn_datablksz) ? 0 : 1;
256 end = (off+len <= dn->dn_datablksz) ? 0 : 1;
257 if (start == 0 && (off > 0 || len < dn->dn_datablksz)) {
258 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
259 if (err)
260 goto out;
261 delta -= off;
262 }
263 } else {
264 zio_t *zio = zio_root(dn->dn_objset->os_spa,
265 NULL, NULL, ZIO_FLAG_CANFAIL);
266
267 /* first level-0 block */
268 start = off >> dn->dn_datablkshift;
269 if (P2PHASE(off, dn->dn_datablksz) ||
270 len < dn->dn_datablksz) {
271 err = dmu_tx_check_ioerr(zio, dn, 0, start);
272 if (err)
273 goto out;
274 }
275
276 /* last level-0 block */
277 end = (off+len-1) >> dn->dn_datablkshift;
278 if (end != start && end <= dn->dn_maxblkid &&
279 P2PHASE(off+len, dn->dn_datablksz)) {
280 err = dmu_tx_check_ioerr(zio, dn, 0, end);
281 if (err)
282 goto out;
283 }
284
285 /* level-1 blocks */
286 if (nlvls > 1) {
287 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
288 for (i = (start>>shft)+1; i < end>>shft; i++) {
289 err = dmu_tx_check_ioerr(zio, dn, 1, i);
290 if (err)
291 goto out;
292 }
293 }
294
295 err = zio_wait(zio);
296 if (err)
297 goto out;
298 delta = P2NPHASE(off, dn->dn_datablksz);
299 }
300
301 min_ibs = max_ibs = dn->dn_indblkshift;
302 if (dn->dn_maxblkid > 0) {
303 /*
304 * The blocksize can't change,
305 * so we can make a more precise estimate.
306 */
307 ASSERT(dn->dn_datablkshift != 0);
308 min_bs = max_bs = dn->dn_datablkshift;
309 } else {
310 /*
311 * The blocksize can increase up to the recordsize,
312 * or if it is already more than the recordsize,
313 * up to the next power of 2.
314 */
315 min_bs = highbit64(dn->dn_datablksz - 1);
316 max_bs = MAX(max_bs, highbit64(dn->dn_datablksz - 1));
317 }
318
319 /*
320 * If this write is not off the end of the file
321 * we need to account for overwrites/unref.
322 */
323 if (start <= dn->dn_maxblkid) {
324 for (int l = 0; l < DN_MAX_LEVELS; l++)
325 history[l] = -1ULL;
326 }
327 while (start <= dn->dn_maxblkid) {
328 dmu_buf_impl_t *db;
329
330 rw_enter(&dn->dn_struct_rwlock, RW_READER);
331 err = dbuf_hold_impl(dn, 0, start,
332 FALSE, FALSE, FTAG, &db);
333 rw_exit(&dn->dn_struct_rwlock);
334
335 if (err) {
336 txh->txh_tx->tx_err = err;
337 return;
338 }
339
340 dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE,
341 history);
342 dbuf_rele(db, FTAG);
343 if (++start > end) {
344 /*
345 * Account for new indirects appearing
346 * before this IO gets assigned into a txg.
347 */
348 bits = 64 - min_bs;
349 epbs = min_ibs - SPA_BLKPTRSHIFT;
350 for (bits -= epbs * (nlvls - 1);
351 bits >= 0; bits -= epbs) {
352 (void) refcount_add_many(
353 &txh->txh_fudge,
354 1ULL << max_ibs, FTAG);
355 }
356 goto out;
357 }
358 off += delta;
359 if (len >= delta)
360 len -= delta;
361 delta = dn->dn_datablksz;
362 }
363 }
364
365 /*
366 * 'end' is the last thing we will access, not one past.
367 * This way we won't overflow when accessing the last byte.
368 */
369 start = P2ALIGN(off, 1ULL << max_bs);
370 end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
371 (void) refcount_add_many(&txh->txh_space_towrite,
372 end - start + 1, FTAG);
373
374 start >>= min_bs;
375 end >>= min_bs;
376
377 epbs = min_ibs - SPA_BLKPTRSHIFT;
378
379 /*
380 * The object contains at most 2^(64 - min_bs) blocks,
381 * and each indirect level maps 2^epbs.
382 */
383 for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
384 start >>= epbs;
385 end >>= epbs;
386 ASSERT3U(end, >=, start);
387 (void) refcount_add_many(&txh->txh_space_towrite,
388 (end - start + 1) << max_ibs, FTAG);
389 if (start != 0) {
390 /*
391 * We also need a new blkid=0 indirect block
392 * to reference any existing file data.
393 */
394 (void) refcount_add_many(&txh->txh_space_towrite,
395 1ULL << max_ibs, FTAG);
396 }
397 }
398
399 out:
400 if (refcount_count(&txh->txh_space_towrite) +
401 refcount_count(&txh->txh_space_tooverwrite) >
402 2 * DMU_MAX_ACCESS)
403 err = SET_ERROR(EFBIG);
404
405 if (err)
406 txh->txh_tx->tx_err = err;
407 }
408
409 static void
dmu_tx_count_dnode(dmu_tx_hold_t * txh)410 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
411 {
412 dnode_t *dn = txh->txh_dnode;
413 dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset);
414 uint64_t space = mdn->dn_datablksz +
415 ((uint64_t)(mdn->dn_nlevels-1) << mdn->dn_indblkshift);
416
417 if (dn && dn->dn_dbuf->db_blkptr &&
418 dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
419 dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) {
420 (void) refcount_add_many(&txh->txh_space_tooverwrite,
421 space, FTAG);
422 (void) refcount_add_many(&txh->txh_space_tounref, space, FTAG);
423 } else {
424 (void) refcount_add_many(&txh->txh_space_towrite, space, FTAG);
425 if (dn && dn->dn_dbuf->db_blkptr) {
426 (void) refcount_add_many(&txh->txh_space_tounref,
427 space, FTAG);
428 }
429 }
430 }
431
432 void
dmu_tx_hold_write(dmu_tx_t * tx,uint64_t object,uint64_t off,int len)433 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
434 {
435 dmu_tx_hold_t *txh;
436
437 ASSERT(tx->tx_txg == 0);
438 ASSERT(len < DMU_MAX_ACCESS);
439 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
440
441 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
442 object, THT_WRITE, off, len);
443 if (txh == NULL)
444 return;
445
446 dmu_tx_count_write(txh, off, len);
447 dmu_tx_count_dnode(txh);
448 }
449
450 static void
dmu_tx_count_free(dmu_tx_hold_t * txh,uint64_t off,uint64_t len)451 dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
452 {
453 uint64_t blkid, nblks, lastblk;
454 uint64_t space = 0, unref = 0, skipped = 0;
455 dnode_t *dn = txh->txh_dnode;
456 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
457 spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
458 int epbs;
459 uint64_t l0span = 0, nl1blks = 0;
460
461 if (dn->dn_nlevels == 0)
462 return;
463
464 /*
465 * The struct_rwlock protects us against dn_nlevels
466 * changing, in case (against all odds) we manage to dirty &
467 * sync out the changes after we check for being dirty.
468 * Also, dbuf_hold_impl() wants us to have the struct_rwlock.
469 */
470 rw_enter(&dn->dn_struct_rwlock, RW_READER);
471 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
472 if (dn->dn_maxblkid == 0) {
473 if (off == 0 && len >= dn->dn_datablksz) {
474 blkid = 0;
475 nblks = 1;
476 } else {
477 rw_exit(&dn->dn_struct_rwlock);
478 return;
479 }
480 } else {
481 blkid = off >> dn->dn_datablkshift;
482 nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift;
483
484 if (blkid > dn->dn_maxblkid) {
485 rw_exit(&dn->dn_struct_rwlock);
486 return;
487 }
488 if (blkid + nblks > dn->dn_maxblkid)
489 nblks = dn->dn_maxblkid - blkid + 1;
490
491 }
492 l0span = nblks; /* save for later use to calc level > 1 overhead */
493 if (dn->dn_nlevels == 1) {
494 int i;
495 for (i = 0; i < nblks; i++) {
496 blkptr_t *bp = dn->dn_phys->dn_blkptr;
497 ASSERT3U(blkid + i, <, dn->dn_nblkptr);
498 bp += blkid + i;
499 if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) {
500 dprintf_bp(bp, "can free old%s", "");
501 space += bp_get_dsize(spa, bp);
502 }
503 unref += BP_GET_ASIZE(bp);
504 }
505 nl1blks = 1;
506 nblks = 0;
507 }
508
509 lastblk = blkid + nblks - 1;
510 while (nblks) {
511 dmu_buf_impl_t *dbuf;
512 uint64_t ibyte, new_blkid;
513 int epb = 1 << epbs;
514 int err, i, blkoff, tochk;
515 blkptr_t *bp;
516
517 ibyte = blkid << dn->dn_datablkshift;
518 err = dnode_next_offset(dn,
519 DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0);
520 new_blkid = ibyte >> dn->dn_datablkshift;
521 if (err == ESRCH) {
522 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
523 break;
524 }
525 if (err) {
526 txh->txh_tx->tx_err = err;
527 break;
528 }
529 if (new_blkid > lastblk) {
530 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
531 break;
532 }
533
534 if (new_blkid > blkid) {
535 ASSERT((new_blkid >> epbs) > (blkid >> epbs));
536 skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1;
537 nblks -= new_blkid - blkid;
538 blkid = new_blkid;
539 }
540 blkoff = P2PHASE(blkid, epb);
541 tochk = MIN(epb - blkoff, nblks);
542
543 err = dbuf_hold_impl(dn, 1, blkid >> epbs,
544 FALSE, FALSE, FTAG, &dbuf);
545 if (err) {
546 txh->txh_tx->tx_err = err;
547 break;
548 }
549
550 (void) refcount_add_many(&txh->txh_memory_tohold,
551 dbuf->db.db_size, FTAG);
552
553 /*
554 * We don't check memory_tohold against DMU_MAX_ACCESS because
555 * memory_tohold is an over-estimation (especially the >L1
556 * indirect blocks), so it could fail. Callers should have
557 * already verified that they will not be holding too much
558 * memory.
559 */
560
561 err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
562 if (err != 0) {
563 txh->txh_tx->tx_err = err;
564 dbuf_rele(dbuf, FTAG);
565 break;
566 }
567
568 bp = dbuf->db.db_data;
569 bp += blkoff;
570
571 for (i = 0; i < tochk; i++) {
572 if (dsl_dataset_block_freeable(ds, &bp[i],
573 bp[i].blk_birth)) {
574 dprintf_bp(&bp[i], "can free old%s", "");
575 space += bp_get_dsize(spa, &bp[i]);
576 }
577 unref += BP_GET_ASIZE(bp);
578 }
579 dbuf_rele(dbuf, FTAG);
580
581 ++nl1blks;
582 blkid += tochk;
583 nblks -= tochk;
584 }
585 rw_exit(&dn->dn_struct_rwlock);
586
587 /*
588 * Add in memory requirements of higher-level indirects.
589 * This assumes a worst-possible scenario for dn_nlevels and a
590 * worst-possible distribution of l1-blocks over the region to free.
591 */
592 {
593 uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs);
594 int level = 2;
595 /*
596 * Here we don't use DN_MAX_LEVEL, but calculate it with the
597 * given datablkshift and indblkshift. This makes the
598 * difference between 19 and 8 on large files.
599 */
600 int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) /
601 (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
602
603 while (level++ < maxlevel) {
604 (void) refcount_add_many(&txh->txh_memory_tohold,
605 MAX(MIN(blkcnt, nl1blks), 1) << dn->dn_indblkshift,
606 FTAG);
607 blkcnt = 1 + (blkcnt >> epbs);
608 }
609 }
610
611 /* account for new level 1 indirect blocks that might show up */
612 if (skipped > 0) {
613 (void) refcount_add_many(&txh->txh_fudge,
614 skipped << dn->dn_indblkshift, FTAG);
615 skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs);
616 (void) refcount_add_many(&txh->txh_memory_tohold,
617 skipped << dn->dn_indblkshift, FTAG);
618 }
619 (void) refcount_add_many(&txh->txh_space_tofree, space, FTAG);
620 (void) refcount_add_many(&txh->txh_space_tounref, unref, FTAG);
621 }
622
623 /*
624 * This function marks the transaction as being a "net free". The end
625 * result is that refquotas will be disabled for this transaction, and
626 * this transaction will be able to use half of the pool space overhead
627 * (see dsl_pool_adjustedsize()). Therefore this function should only
628 * be called for transactions that we expect will not cause a net increase
629 * in the amount of space used (but it's OK if that is occasionally not true).
630 */
631 void
dmu_tx_mark_netfree(dmu_tx_t * tx)632 dmu_tx_mark_netfree(dmu_tx_t *tx)
633 {
634 dmu_tx_hold_t *txh;
635
636 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
637 DMU_NEW_OBJECT, THT_FREE, 0, 0);
638
639 /*
640 * Pretend that this operation will free 1GB of space. This
641 * should be large enough to cancel out the largest write.
642 * We don't want to use something like UINT64_MAX, because that would
643 * cause overflows when doing math with these values (e.g. in
644 * dmu_tx_try_assign()).
645 */
646 (void) refcount_add_many(&txh->txh_space_tofree,
647 1024 * 1024 * 1024, FTAG);
648 (void) refcount_add_many(&txh->txh_space_tounref,
649 1024 * 1024 * 1024, FTAG);
650 }
651
652 void
dmu_tx_hold_free(dmu_tx_t * tx,uint64_t object,uint64_t off,uint64_t len)653 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
654 {
655 dmu_tx_hold_t *txh;
656 dnode_t *dn;
657 int err;
658 zio_t *zio;
659
660 ASSERT(tx->tx_txg == 0);
661
662 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
663 object, THT_FREE, off, len);
664 if (txh == NULL)
665 return;
666 dn = txh->txh_dnode;
667 dmu_tx_count_dnode(txh);
668
669 if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
670 return;
671 if (len == DMU_OBJECT_END)
672 len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
673
674
675 /*
676 * For i/o error checking, we read the first and last level-0
677 * blocks if they are not aligned, and all the level-1 blocks.
678 *
679 * Note: dbuf_free_range() assumes that we have not instantiated
680 * any level-0 dbufs that will be completely freed. Therefore we must
681 * exercise care to not read or count the first and last blocks
682 * if they are blocksize-aligned.
683 */
684 if (dn->dn_datablkshift == 0) {
685 if (off != 0 || len < dn->dn_datablksz)
686 dmu_tx_count_write(txh, 0, dn->dn_datablksz);
687 } else {
688 /* first block will be modified if it is not aligned */
689 if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift))
690 dmu_tx_count_write(txh, off, 1);
691 /* last block will be modified if it is not aligned */
692 if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift))
693 dmu_tx_count_write(txh, off+len, 1);
694 }
695
696 /*
697 * Check level-1 blocks.
698 */
699 if (dn->dn_nlevels > 1) {
700 int shift = dn->dn_datablkshift + dn->dn_indblkshift -
701 SPA_BLKPTRSHIFT;
702 uint64_t start = off >> shift;
703 uint64_t end = (off + len) >> shift;
704
705 ASSERT(dn->dn_indblkshift != 0);
706
707 /*
708 * dnode_reallocate() can result in an object with indirect
709 * blocks having an odd data block size. In this case,
710 * just check the single block.
711 */
712 if (dn->dn_datablkshift == 0)
713 start = end = 0;
714
715 zio = zio_root(tx->tx_pool->dp_spa,
716 NULL, NULL, ZIO_FLAG_CANFAIL);
717 for (uint64_t i = start; i <= end; i++) {
718 uint64_t ibyte = i << shift;
719 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
720 i = ibyte >> shift;
721 if (err == ESRCH || i > end)
722 break;
723 if (err) {
724 tx->tx_err = err;
725 return;
726 }
727
728 err = dmu_tx_check_ioerr(zio, dn, 1, i);
729 if (err) {
730 tx->tx_err = err;
731 return;
732 }
733 }
734 err = zio_wait(zio);
735 if (err) {
736 tx->tx_err = err;
737 return;
738 }
739 }
740
741 dmu_tx_count_free(txh, off, len);
742 }
743
744 void
dmu_tx_hold_zap(dmu_tx_t * tx,uint64_t object,int add,const char * name)745 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
746 {
747 dmu_tx_hold_t *txh;
748 dnode_t *dn;
749 int err;
750
751 ASSERT(tx->tx_txg == 0);
752
753 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
754 object, THT_ZAP, add, (uintptr_t)name);
755 if (txh == NULL)
756 return;
757 dn = txh->txh_dnode;
758
759 dmu_tx_count_dnode(txh);
760
761 if (dn == NULL) {
762 /*
763 * We will be able to fit a new object's entries into one leaf
764 * block. So there will be at most 2 blocks total,
765 * including the header block.
766 */
767 dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
768 return;
769 }
770
771 ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
772
773 if (dn->dn_maxblkid == 0 && !add) {
774 blkptr_t *bp;
775
776 /*
777 * If there is only one block (i.e. this is a micro-zap)
778 * and we are not adding anything, the accounting is simple.
779 */
780 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
781 if (err) {
782 tx->tx_err = err;
783 return;
784 }
785
786 /*
787 * Use max block size here, since we don't know how much
788 * the size will change between now and the dbuf dirty call.
789 */
790 bp = &dn->dn_phys->dn_blkptr[0];
791 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
792 bp, bp->blk_birth)) {
793 (void) refcount_add_many(&txh->txh_space_tooverwrite,
794 MZAP_MAX_BLKSZ, FTAG);
795 } else {
796 (void) refcount_add_many(&txh->txh_space_towrite,
797 MZAP_MAX_BLKSZ, FTAG);
798 }
799 if (!BP_IS_HOLE(bp)) {
800 (void) refcount_add_many(&txh->txh_space_tounref,
801 MZAP_MAX_BLKSZ, FTAG);
802 }
803 return;
804 }
805
806 if (dn->dn_maxblkid > 0 && name) {
807 /*
808 * access the name in this fat-zap so that we'll check
809 * for i/o errors to the leaf blocks, etc.
810 */
811 err = zap_lookup_by_dnode(dn, name, 8, 0, NULL);
812 if (err == EIO) {
813 tx->tx_err = err;
814 return;
815 }
816 }
817
818 err = zap_count_write_by_dnode(dn, name, add,
819 &txh->txh_space_towrite, &txh->txh_space_tooverwrite);
820
821 /*
822 * If the modified blocks are scattered to the four winds,
823 * we'll have to modify an indirect twig for each. We can make
824 * modifications at up to 3 locations:
825 * - header block at the beginning of the object
826 * - target leaf block
827 * - end of the object, where we might need to write:
828 * - a new leaf block if the target block needs to be split
829 * - the new pointer table, if it is growing
830 * - the new cookie table, if it is growing
831 */
832 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
833 dsl_dataset_phys_t *ds_phys =
834 dsl_dataset_phys(dn->dn_objset->os_dsl_dataset);
835 for (int lvl = 1; lvl < dn->dn_nlevels; lvl++) {
836 uint64_t num_indirects = 1 + (dn->dn_maxblkid >> (epbs * lvl));
837 uint64_t spc = MIN(3, num_indirects) << dn->dn_indblkshift;
838 if (ds_phys->ds_prev_snap_obj != 0) {
839 (void) refcount_add_many(&txh->txh_space_towrite,
840 spc, FTAG);
841 } else {
842 (void) refcount_add_many(&txh->txh_space_tooverwrite,
843 spc, FTAG);
844 }
845 }
846 }
847
848 void
dmu_tx_hold_bonus(dmu_tx_t * tx,uint64_t object)849 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
850 {
851 dmu_tx_hold_t *txh;
852
853 ASSERT(tx->tx_txg == 0);
854
855 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
856 object, THT_BONUS, 0, 0);
857 if (txh)
858 dmu_tx_count_dnode(txh);
859 }
860
861 void
dmu_tx_hold_space(dmu_tx_t * tx,uint64_t space)862 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
863 {
864 dmu_tx_hold_t *txh;
865 ASSERT(tx->tx_txg == 0);
866
867 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
868 DMU_NEW_OBJECT, THT_SPACE, space, 0);
869
870 (void) refcount_add_many(&txh->txh_space_towrite, space, FTAG);
871 }
872
873 int
dmu_tx_holds(dmu_tx_t * tx,uint64_t object)874 dmu_tx_holds(dmu_tx_t *tx, uint64_t object)
875 {
876 dmu_tx_hold_t *txh;
877 int holds = 0;
878
879 /*
880 * By asserting that the tx is assigned, we're counting the
881 * number of dn_tx_holds, which is the same as the number of
882 * dn_holds. Otherwise, we'd be counting dn_holds, but
883 * dn_tx_holds could be 0.
884 */
885 ASSERT(tx->tx_txg != 0);
886
887 /* if (tx->tx_anyobj == TRUE) */
888 /* return (0); */
889
890 for (txh = list_head(&tx->tx_holds); txh;
891 txh = list_next(&tx->tx_holds, txh)) {
892 if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
893 holds++;
894 }
895
896 return (holds);
897 }
898
899 #ifdef ZFS_DEBUG
900 void
dmu_tx_dirty_buf(dmu_tx_t * tx,dmu_buf_impl_t * db)901 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
902 {
903 dmu_tx_hold_t *txh;
904 int match_object = FALSE, match_offset = FALSE;
905 dnode_t *dn;
906
907 DB_DNODE_ENTER(db);
908 dn = DB_DNODE(db);
909 ASSERT(tx->tx_txg != 0);
910 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
911 ASSERT3U(dn->dn_object, ==, db->db.db_object);
912
913 if (tx->tx_anyobj) {
914 DB_DNODE_EXIT(db);
915 return;
916 }
917
918 /* XXX No checking on the meta dnode for now */
919 if (db->db.db_object == DMU_META_DNODE_OBJECT) {
920 DB_DNODE_EXIT(db);
921 return;
922 }
923
924 for (txh = list_head(&tx->tx_holds); txh;
925 txh = list_next(&tx->tx_holds, txh)) {
926 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg);
927 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
928 match_object = TRUE;
929 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
930 int datablkshift = dn->dn_datablkshift ?
931 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
932 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
933 int shift = datablkshift + epbs * db->db_level;
934 uint64_t beginblk = shift >= 64 ? 0 :
935 (txh->txh_arg1 >> shift);
936 uint64_t endblk = shift >= 64 ? 0 :
937 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
938 uint64_t blkid = db->db_blkid;
939
940 /* XXX txh_arg2 better not be zero... */
941
942 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
943 txh->txh_type, beginblk, endblk);
944
945 switch (txh->txh_type) {
946 case THT_WRITE:
947 if (blkid >= beginblk && blkid <= endblk)
948 match_offset = TRUE;
949 /*
950 * We will let this hold work for the bonus
951 * or spill buffer so that we don't need to
952 * hold it when creating a new object.
953 */
954 if (blkid == DMU_BONUS_BLKID ||
955 blkid == DMU_SPILL_BLKID)
956 match_offset = TRUE;
957 /*
958 * They might have to increase nlevels,
959 * thus dirtying the new TLIBs. Or the
960 * might have to change the block size,
961 * thus dirying the new lvl=0 blk=0.
962 */
963 if (blkid == 0)
964 match_offset = TRUE;
965 break;
966 case THT_FREE:
967 /*
968 * We will dirty all the level 1 blocks in
969 * the free range and perhaps the first and
970 * last level 0 block.
971 */
972 if (blkid >= beginblk && (blkid <= endblk ||
973 txh->txh_arg2 == DMU_OBJECT_END))
974 match_offset = TRUE;
975 break;
976 case THT_SPILL:
977 if (blkid == DMU_SPILL_BLKID)
978 match_offset = TRUE;
979 break;
980 case THT_BONUS:
981 if (blkid == DMU_BONUS_BLKID)
982 match_offset = TRUE;
983 break;
984 case THT_ZAP:
985 match_offset = TRUE;
986 break;
987 case THT_NEWOBJECT:
988 match_object = TRUE;
989 break;
990 default:
991 ASSERT(!"bad txh_type");
992 }
993 }
994 if (match_object && match_offset) {
995 DB_DNODE_EXIT(db);
996 return;
997 }
998 }
999 DB_DNODE_EXIT(db);
1000 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
1001 (u_longlong_t)db->db.db_object, db->db_level,
1002 (u_longlong_t)db->db_blkid);
1003 }
1004 #endif
1005
1006 /*
1007 * If we can't do 10 iops, something is wrong. Let us go ahead
1008 * and hit zfs_dirty_data_max.
1009 */
1010 hrtime_t zfs_delay_max_ns = MSEC2NSEC(100);
1011 int zfs_delay_resolution_ns = 100 * 1000; /* 100 microseconds */
1012
1013 /*
1014 * We delay transactions when we've determined that the backend storage
1015 * isn't able to accommodate the rate of incoming writes.
1016 *
1017 * If there is already a transaction waiting, we delay relative to when
1018 * that transaction finishes waiting. This way the calculated min_time
1019 * is independent of the number of threads concurrently executing
1020 * transactions.
1021 *
1022 * If we are the only waiter, wait relative to when the transaction
1023 * started, rather than the current time. This credits the transaction for
1024 * "time already served", e.g. reading indirect blocks.
1025 *
1026 * The minimum time for a transaction to take is calculated as:
1027 * min_time = scale * (dirty - min) / (max - dirty)
1028 * min_time is then capped at zfs_delay_max_ns.
1029 *
1030 * The delay has two degrees of freedom that can be adjusted via tunables.
1031 * The percentage of dirty data at which we start to delay is defined by
1032 * zfs_delay_min_dirty_percent. This should typically be at or above
1033 * zfs_vdev_async_write_active_max_dirty_percent so that we only start to
1034 * delay after writing at full speed has failed to keep up with the incoming
1035 * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
1036 * speaking, this variable determines the amount of delay at the midpoint of
1037 * the curve.
1038 *
1039 * delay
1040 * 10ms +-------------------------------------------------------------*+
1041 * | *|
1042 * 9ms + *+
1043 * | *|
1044 * 8ms + *+
1045 * | * |
1046 * 7ms + * +
1047 * | * |
1048 * 6ms + * +
1049 * | * |
1050 * 5ms + * +
1051 * | * |
1052 * 4ms + * +
1053 * | * |
1054 * 3ms + * +
1055 * | * |
1056 * 2ms + (midpoint) * +
1057 * | | ** |
1058 * 1ms + v *** +
1059 * | zfs_delay_scale ----------> ******** |
1060 * 0 +-------------------------------------*********----------------+
1061 * 0% <- zfs_dirty_data_max -> 100%
1062 *
1063 * Note that since the delay is added to the outstanding time remaining on the
1064 * most recent transaction, the delay is effectively the inverse of IOPS.
1065 * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
1066 * was chosen such that small changes in the amount of accumulated dirty data
1067 * in the first 3/4 of the curve yield relatively small differences in the
1068 * amount of delay.
1069 *
1070 * The effects can be easier to understand when the amount of delay is
1071 * represented on a log scale:
1072 *
1073 * delay
1074 * 100ms +-------------------------------------------------------------++
1075 * + +
1076 * | |
1077 * + *+
1078 * 10ms + *+
1079 * + ** +
1080 * | (midpoint) ** |
1081 * + | ** +
1082 * 1ms + v **** +
1083 * + zfs_delay_scale ----------> ***** +
1084 * | **** |
1085 * + **** +
1086 * 100us + ** +
1087 * + * +
1088 * | * |
1089 * + * +
1090 * 10us + * +
1091 * + +
1092 * | |
1093 * + +
1094 * +--------------------------------------------------------------+
1095 * 0% <- zfs_dirty_data_max -> 100%
1096 *
1097 * Note here that only as the amount of dirty data approaches its limit does
1098 * the delay start to increase rapidly. The goal of a properly tuned system
1099 * should be to keep the amount of dirty data out of that range by first
1100 * ensuring that the appropriate limits are set for the I/O scheduler to reach
1101 * optimal throughput on the backend storage, and then by changing the value
1102 * of zfs_delay_scale to increase the steepness of the curve.
1103 */
1104 static void
dmu_tx_delay(dmu_tx_t * tx,uint64_t dirty)1105 dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty)
1106 {
1107 dsl_pool_t *dp = tx->tx_pool;
1108 uint64_t delay_min_bytes =
1109 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
1110 hrtime_t wakeup, min_tx_time, now;
1111
1112 if (dirty <= delay_min_bytes)
1113 return;
1114
1115 /*
1116 * The caller has already waited until we are under the max.
1117 * We make them pass us the amount of dirty data so we don't
1118 * have to handle the case of it being >= the max, which could
1119 * cause a divide-by-zero if it's == the max.
1120 */
1121 ASSERT3U(dirty, <, zfs_dirty_data_max);
1122
1123 now = gethrtime();
1124 min_tx_time = zfs_delay_scale *
1125 (dirty - delay_min_bytes) / (zfs_dirty_data_max - dirty);
1126 if (now > tx->tx_start + min_tx_time)
1127 return;
1128
1129 min_tx_time = MIN(min_tx_time, zfs_delay_max_ns);
1130
1131 DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty,
1132 uint64_t, min_tx_time);
1133
1134 mutex_enter(&dp->dp_lock);
1135 wakeup = MAX(tx->tx_start + min_tx_time,
1136 dp->dp_last_wakeup + min_tx_time);
1137 dp->dp_last_wakeup = wakeup;
1138 mutex_exit(&dp->dp_lock);
1139
1140 #ifdef _KERNEL
1141 #ifdef illumos
1142 mutex_enter(&curthread->t_delay_lock);
1143 while (cv_timedwait_hires(&curthread->t_delay_cv,
1144 &curthread->t_delay_lock, wakeup, zfs_delay_resolution_ns,
1145 CALLOUT_FLAG_ABSOLUTE | CALLOUT_FLAG_ROUNDUP) > 0)
1146 continue;
1147 mutex_exit(&curthread->t_delay_lock);
1148 #endif
1149 #ifdef __FreeBSD__
1150 pause_sbt("dmu_tx_delay", wakeup * SBT_1NS,
1151 zfs_delay_resolution_ns * SBT_1NS, C_ABSOLUTE);
1152 #endif
1153 #ifdef __NetBSD__
1154 int timo = (wakeup - now) * hz / 1000000000;
1155
1156 if (timo < 0)
1157 return;
1158
1159 if (timo == 0)
1160 timo = 1;
1161 kpause("dmu_tx_delay", false, timo, NULL);
1162 #endif
1163 #else
1164 hrtime_t delta = wakeup - gethrtime();
1165 struct timespec ts;
1166 ts.tv_sec = delta / NANOSEC;
1167 ts.tv_nsec = delta % NANOSEC;
1168 (void) nanosleep(&ts, NULL);
1169 #endif
1170 }
1171
1172 static int
dmu_tx_try_assign(dmu_tx_t * tx,txg_how_t txg_how)1173 dmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how)
1174 {
1175 dmu_tx_hold_t *txh;
1176 spa_t *spa = tx->tx_pool->dp_spa;
1177 uint64_t memory, asize, fsize, usize;
1178 uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge;
1179
1180 ASSERT0(tx->tx_txg);
1181
1182 if (tx->tx_err)
1183 return (tx->tx_err);
1184
1185 if (spa_suspended(spa)) {
1186 /*
1187 * If the user has indicated a blocking failure mode
1188 * then return ERESTART which will block in dmu_tx_wait().
1189 * Otherwise, return EIO so that an error can get
1190 * propagated back to the VOP calls.
1191 *
1192 * Note that we always honor the txg_how flag regardless
1193 * of the failuremode setting.
1194 */
1195 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
1196 txg_how != TXG_WAIT)
1197 return (SET_ERROR(EIO));
1198
1199 return (SET_ERROR(ERESTART));
1200 }
1201
1202 if (!tx->tx_waited &&
1203 dsl_pool_need_dirty_delay(tx->tx_pool)) {
1204 tx->tx_wait_dirty = B_TRUE;
1205 return (SET_ERROR(ERESTART));
1206 }
1207
1208 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
1209 tx->tx_needassign_txh = NULL;
1210
1211 /*
1212 * NB: No error returns are allowed after txg_hold_open, but
1213 * before processing the dnode holds, due to the
1214 * dmu_tx_unassign() logic.
1215 */
1216
1217 towrite = tofree = tooverwrite = tounref = tohold = fudge = 0;
1218 for (txh = list_head(&tx->tx_holds); txh;
1219 txh = list_next(&tx->tx_holds, txh)) {
1220 dnode_t *dn = txh->txh_dnode;
1221 if (dn != NULL) {
1222 mutex_enter(&dn->dn_mtx);
1223 if (dn->dn_assigned_txg == tx->tx_txg - 1) {
1224 mutex_exit(&dn->dn_mtx);
1225 tx->tx_needassign_txh = txh;
1226 return (SET_ERROR(ERESTART));
1227 }
1228 if (dn->dn_assigned_txg == 0)
1229 dn->dn_assigned_txg = tx->tx_txg;
1230 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1231 (void) refcount_add(&dn->dn_tx_holds, tx);
1232 mutex_exit(&dn->dn_mtx);
1233 }
1234 towrite += refcount_count(&txh->txh_space_towrite);
1235 tofree += refcount_count(&txh->txh_space_tofree);
1236 tooverwrite += refcount_count(&txh->txh_space_tooverwrite);
1237 tounref += refcount_count(&txh->txh_space_tounref);
1238 tohold += refcount_count(&txh->txh_memory_tohold);
1239 fudge += refcount_count(&txh->txh_fudge);
1240 }
1241
1242 /*
1243 * If a snapshot has been taken since we made our estimates,
1244 * assume that we won't be able to free or overwrite anything.
1245 */
1246 if (tx->tx_objset &&
1247 dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) >
1248 tx->tx_lastsnap_txg) {
1249 towrite += tooverwrite;
1250 tooverwrite = tofree = 0;
1251 }
1252
1253 /* needed allocation: worst-case estimate of write space */
1254 asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite);
1255 /* freed space estimate: worst-case overwrite + free estimate */
1256 fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
1257 /* convert unrefd space to worst-case estimate */
1258 usize = spa_get_asize(tx->tx_pool->dp_spa, tounref);
1259 /* calculate memory footprint estimate */
1260 memory = towrite + tooverwrite + tohold;
1261
1262 #ifdef ZFS_DEBUG
1263 /*
1264 * Add in 'tohold' to account for our dirty holds on this memory
1265 * XXX - the "fudge" factor is to account for skipped blocks that
1266 * we missed because dnode_next_offset() misses in-core-only blocks.
1267 */
1268 tx->tx_space_towrite = asize +
1269 spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge);
1270 tx->tx_space_tofree = tofree;
1271 tx->tx_space_tooverwrite = tooverwrite;
1272 tx->tx_space_tounref = tounref;
1273 #endif
1274
1275 if (tx->tx_dir && asize != 0) {
1276 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
1277 asize, fsize, usize, &tx->tx_tempreserve_cookie, tx);
1278 if (err)
1279 return (err);
1280 }
1281
1282 return (0);
1283 }
1284
1285 static void
dmu_tx_unassign(dmu_tx_t * tx)1286 dmu_tx_unassign(dmu_tx_t *tx)
1287 {
1288 dmu_tx_hold_t *txh;
1289
1290 if (tx->tx_txg == 0)
1291 return;
1292
1293 txg_rele_to_quiesce(&tx->tx_txgh);
1294
1295 /*
1296 * Walk the transaction's hold list, removing the hold on the
1297 * associated dnode, and notifying waiters if the refcount drops to 0.
1298 */
1299 for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
1300 txh = list_next(&tx->tx_holds, txh)) {
1301 dnode_t *dn = txh->txh_dnode;
1302
1303 if (dn == NULL)
1304 continue;
1305 mutex_enter(&dn->dn_mtx);
1306 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1307
1308 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1309 dn->dn_assigned_txg = 0;
1310 cv_broadcast(&dn->dn_notxholds);
1311 }
1312 mutex_exit(&dn->dn_mtx);
1313 }
1314
1315 txg_rele_to_sync(&tx->tx_txgh);
1316
1317 tx->tx_lasttried_txg = tx->tx_txg;
1318 tx->tx_txg = 0;
1319 }
1320
1321 /*
1322 * Assign tx to a transaction group. txg_how can be one of:
1323 *
1324 * (1) TXG_WAIT. If the current open txg is full, waits until there's
1325 * a new one. This should be used when you're not holding locks.
1326 * It will only fail if we're truly out of space (or over quota).
1327 *
1328 * (2) TXG_NOWAIT. If we can't assign into the current open txg without
1329 * blocking, returns immediately with ERESTART. This should be used
1330 * whenever you're holding locks. On an ERESTART error, the caller
1331 * should drop locks, do a dmu_tx_wait(tx), and try again.
1332 *
1333 * (3) TXG_WAITED. Like TXG_NOWAIT, but indicates that dmu_tx_wait()
1334 * has already been called on behalf of this operation (though
1335 * most likely on a different tx).
1336 */
1337 int
dmu_tx_assign(dmu_tx_t * tx,txg_how_t txg_how)1338 dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how)
1339 {
1340 int err;
1341
1342 ASSERT(tx->tx_txg == 0);
1343 ASSERT(txg_how == TXG_WAIT || txg_how == TXG_NOWAIT ||
1344 txg_how == TXG_WAITED);
1345 ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1346
1347 /* If we might wait, we must not hold the config lock. */
1348 ASSERT(txg_how != TXG_WAIT || !dsl_pool_config_held(tx->tx_pool));
1349
1350 if (txg_how == TXG_WAITED)
1351 tx->tx_waited = B_TRUE;
1352
1353 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1354 dmu_tx_unassign(tx);
1355
1356 if (err != ERESTART || txg_how != TXG_WAIT)
1357 return (err);
1358
1359 dmu_tx_wait(tx);
1360 }
1361
1362 txg_rele_to_quiesce(&tx->tx_txgh);
1363
1364 return (0);
1365 }
1366
1367 void
dmu_tx_wait(dmu_tx_t * tx)1368 dmu_tx_wait(dmu_tx_t *tx)
1369 {
1370 spa_t *spa = tx->tx_pool->dp_spa;
1371 dsl_pool_t *dp = tx->tx_pool;
1372
1373 ASSERT(tx->tx_txg == 0);
1374 ASSERT(!dsl_pool_config_held(tx->tx_pool));
1375
1376 if (tx->tx_wait_dirty) {
1377 /*
1378 * dmu_tx_try_assign() has determined that we need to wait
1379 * because we've consumed much or all of the dirty buffer
1380 * space.
1381 */
1382 mutex_enter(&dp->dp_lock);
1383 while (dp->dp_dirty_total >= zfs_dirty_data_max)
1384 cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock);
1385 uint64_t dirty = dp->dp_dirty_total;
1386 mutex_exit(&dp->dp_lock);
1387
1388 dmu_tx_delay(tx, dirty);
1389
1390 tx->tx_wait_dirty = B_FALSE;
1391
1392 /*
1393 * Note: setting tx_waited only has effect if the caller
1394 * used TX_WAIT. Otherwise they are going to destroy
1395 * this tx and try again. The common case, zfs_write(),
1396 * uses TX_WAIT.
1397 */
1398 tx->tx_waited = B_TRUE;
1399 } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1400 /*
1401 * If the pool is suspended we need to wait until it
1402 * is resumed. Note that it's possible that the pool
1403 * has become active after this thread has tried to
1404 * obtain a tx. If that's the case then tx_lasttried_txg
1405 * would not have been set.
1406 */
1407 txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1408 } else if (tx->tx_needassign_txh) {
1409 /*
1410 * A dnode is assigned to the quiescing txg. Wait for its
1411 * transaction to complete.
1412 */
1413 dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1414
1415 mutex_enter(&dn->dn_mtx);
1416 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1417 cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1418 mutex_exit(&dn->dn_mtx);
1419 tx->tx_needassign_txh = NULL;
1420 } else {
1421 txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
1422 }
1423 }
1424
1425 void
dmu_tx_willuse_space(dmu_tx_t * tx,int64_t delta)1426 dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
1427 {
1428 #ifdef ZFS_DEBUG
1429 if (tx->tx_dir == NULL || delta == 0)
1430 return;
1431
1432 if (delta > 0) {
1433 /* FreeBSD r318821, illumos 7793 ztest fails assertion in dmu_tx_willuse_space
1434 ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
1435 tx->tx_space_towrite);
1436 */
1437 (void) refcount_add_many(&tx->tx_space_written, delta, NULL);
1438 } else {
1439 (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
1440 }
1441 #endif
1442 }
1443
1444 static void
dmu_tx_destroy(dmu_tx_t * tx)1445 dmu_tx_destroy(dmu_tx_t *tx)
1446 {
1447 dmu_tx_hold_t *txh;
1448
1449 while ((txh = list_head(&tx->tx_holds)) != NULL) {
1450 dnode_t *dn = txh->txh_dnode;
1451
1452 list_remove(&tx->tx_holds, txh);
1453 refcount_destroy_many(&txh->txh_space_towrite,
1454 refcount_count(&txh->txh_space_towrite));
1455 refcount_destroy_many(&txh->txh_space_tofree,
1456 refcount_count(&txh->txh_space_tofree));
1457 refcount_destroy_many(&txh->txh_space_tooverwrite,
1458 refcount_count(&txh->txh_space_tooverwrite));
1459 refcount_destroy_many(&txh->txh_space_tounref,
1460 refcount_count(&txh->txh_space_tounref));
1461 refcount_destroy_many(&txh->txh_memory_tohold,
1462 refcount_count(&txh->txh_memory_tohold));
1463 refcount_destroy_many(&txh->txh_fudge,
1464 refcount_count(&txh->txh_fudge));
1465 kmem_free(txh, sizeof (dmu_tx_hold_t));
1466 if (dn != NULL)
1467 dnode_rele(dn, tx);
1468 }
1469
1470 list_destroy(&tx->tx_callbacks);
1471 list_destroy(&tx->tx_holds);
1472 #ifdef ZFS_DEBUG
1473 refcount_destroy_many(&tx->tx_space_written,
1474 refcount_count(&tx->tx_space_written));
1475 refcount_destroy_many(&tx->tx_space_freed,
1476 refcount_count(&tx->tx_space_freed));
1477 #endif
1478 kmem_free(tx, sizeof (dmu_tx_t));
1479 }
1480
1481 void
dmu_tx_commit(dmu_tx_t * tx)1482 dmu_tx_commit(dmu_tx_t *tx)
1483 {
1484 ASSERT(tx->tx_txg != 0);
1485
1486 /*
1487 * Go through the transaction's hold list and remove holds on
1488 * associated dnodes, notifying waiters if no holds remain.
1489 */
1490 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
1491 txh = list_next(&tx->tx_holds, txh)) {
1492 dnode_t *dn = txh->txh_dnode;
1493
1494 if (dn == NULL)
1495 continue;
1496
1497 mutex_enter(&dn->dn_mtx);
1498 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1499
1500 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1501 dn->dn_assigned_txg = 0;
1502 cv_broadcast(&dn->dn_notxholds);
1503 }
1504 mutex_exit(&dn->dn_mtx);
1505 }
1506
1507 if (tx->tx_tempreserve_cookie)
1508 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1509
1510 if (!list_is_empty(&tx->tx_callbacks))
1511 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1512
1513 if (tx->tx_anyobj == FALSE)
1514 txg_rele_to_sync(&tx->tx_txgh);
1515
1516 #ifdef ZFS_DEBUG
1517 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1518 tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
1519 tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
1520 #endif
1521 dmu_tx_destroy(tx);
1522 }
1523
1524 void
dmu_tx_abort(dmu_tx_t * tx)1525 dmu_tx_abort(dmu_tx_t *tx)
1526 {
1527 ASSERT(tx->tx_txg == 0);
1528
1529 /*
1530 * Call any registered callbacks with an error code.
1531 */
1532 if (!list_is_empty(&tx->tx_callbacks))
1533 dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
1534
1535 dmu_tx_destroy(tx);
1536 }
1537
1538 uint64_t
dmu_tx_get_txg(dmu_tx_t * tx)1539 dmu_tx_get_txg(dmu_tx_t *tx)
1540 {
1541 ASSERT(tx->tx_txg != 0);
1542 return (tx->tx_txg);
1543 }
1544
1545 dsl_pool_t *
dmu_tx_pool(dmu_tx_t * tx)1546 dmu_tx_pool(dmu_tx_t *tx)
1547 {
1548 ASSERT(tx->tx_pool != NULL);
1549 return (tx->tx_pool);
1550 }
1551
1552
1553 void
dmu_tx_callback_register(dmu_tx_t * tx,dmu_tx_callback_func_t * func,void * data)1554 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1555 {
1556 dmu_tx_callback_t *dcb;
1557
1558 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1559
1560 dcb->dcb_func = func;
1561 dcb->dcb_data = data;
1562
1563 list_insert_tail(&tx->tx_callbacks, dcb);
1564 }
1565
1566 /*
1567 * Call all the commit callbacks on a list, with a given error code.
1568 */
1569 void
dmu_tx_do_callbacks(list_t * cb_list,int error)1570 dmu_tx_do_callbacks(list_t *cb_list, int error)
1571 {
1572 dmu_tx_callback_t *dcb;
1573
1574 while ((dcb = list_head(cb_list)) != NULL) {
1575 list_remove(cb_list, dcb);
1576 dcb->dcb_func(dcb->dcb_data, error);
1577 kmem_free(dcb, sizeof (dmu_tx_callback_t));
1578 }
1579 }
1580
1581 /*
1582 * Interface to hold a bunch of attributes.
1583 * used for creating new files.
1584 * attrsize is the total size of all attributes
1585 * to be added during object creation
1586 *
1587 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1588 */
1589
1590 /*
1591 * hold necessary attribute name for attribute registration.
1592 * should be a very rare case where this is needed. If it does
1593 * happen it would only happen on the first write to the file system.
1594 */
1595 static void
dmu_tx_sa_registration_hold(sa_os_t * sa,dmu_tx_t * tx)1596 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1597 {
1598 int i;
1599
1600 if (!sa->sa_need_attr_registration)
1601 return;
1602
1603 for (i = 0; i != sa->sa_num_attrs; i++) {
1604 if (!sa->sa_attr_table[i].sa_registered) {
1605 if (sa->sa_reg_attr_obj)
1606 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1607 B_TRUE, sa->sa_attr_table[i].sa_name);
1608 else
1609 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1610 B_TRUE, sa->sa_attr_table[i].sa_name);
1611 }
1612 }
1613 }
1614
1615
1616 void
dmu_tx_hold_spill(dmu_tx_t * tx,uint64_t object)1617 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1618 {
1619 dnode_t *dn;
1620 dmu_tx_hold_t *txh;
1621
1622 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1623 THT_SPILL, 0, 0);
1624
1625 dn = txh->txh_dnode;
1626
1627 if (dn == NULL)
1628 return;
1629
1630 /* If blkptr doesn't exist then add space to towrite */
1631 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
1632 (void) refcount_add_many(&txh->txh_space_towrite,
1633 SPA_OLD_MAXBLOCKSIZE, FTAG);
1634 } else {
1635 blkptr_t *bp;
1636
1637 bp = &dn->dn_phys->dn_spill;
1638 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
1639 bp, bp->blk_birth)) {
1640 (void) refcount_add_many(&txh->txh_space_tooverwrite,
1641 SPA_OLD_MAXBLOCKSIZE, FTAG);
1642 } else {
1643 (void) refcount_add_many(&txh->txh_space_towrite,
1644 SPA_OLD_MAXBLOCKSIZE, FTAG);
1645 }
1646 if (!BP_IS_HOLE(bp)) {
1647 (void) refcount_add_many(&txh->txh_space_tounref,
1648 SPA_OLD_MAXBLOCKSIZE, FTAG);
1649 }
1650 }
1651 }
1652
1653 void
dmu_tx_hold_sa_create(dmu_tx_t * tx,int attrsize)1654 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1655 {
1656 sa_os_t *sa = tx->tx_objset->os_sa;
1657
1658 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1659
1660 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1661 return;
1662
1663 if (tx->tx_objset->os_sa->sa_layout_attr_obj)
1664 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1665 else {
1666 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1667 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1668 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1669 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1670 }
1671
1672 dmu_tx_sa_registration_hold(sa, tx);
1673
1674 if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill)
1675 return;
1676
1677 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1678 THT_SPILL, 0, 0);
1679 }
1680
1681 /*
1682 * Hold SA attribute
1683 *
1684 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1685 *
1686 * variable_size is the total size of all variable sized attributes
1687 * passed to this function. It is not the total size of all
1688 * variable size attributes that *may* exist on this object.
1689 */
1690 void
dmu_tx_hold_sa(dmu_tx_t * tx,sa_handle_t * hdl,boolean_t may_grow)1691 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1692 {
1693 uint64_t object;
1694 sa_os_t *sa = tx->tx_objset->os_sa;
1695
1696 ASSERT(hdl != NULL);
1697
1698 object = sa_handle_object(hdl);
1699
1700 dmu_tx_hold_bonus(tx, object);
1701
1702 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1703 return;
1704
1705 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1706 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1707 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1708 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1709 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1710 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1711 }
1712
1713 dmu_tx_sa_registration_hold(sa, tx);
1714
1715 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1716 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1717
1718 if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1719 ASSERT(tx->tx_txg == 0);
1720 dmu_tx_hold_spill(tx, object);
1721 } else {
1722 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1723 dnode_t *dn;
1724
1725 DB_DNODE_ENTER(db);
1726 dn = DB_DNODE(db);
1727 if (dn->dn_have_spill) {
1728 ASSERT(tx->tx_txg == 0);
1729 dmu_tx_hold_spill(tx, object);
1730 }
1731 DB_DNODE_EXIT(db);
1732 }
1733 }
1734