1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #include <sys/dmu.h>
27 #include <sys/dmu_impl.h>
28 #include <sys/dbuf.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/dmu_objset.h>
31 #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
32 #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
33 #include <sys/dsl_pool.h>
34 #include <sys/zap_impl.h> /* for fzap_default_block_shift */
35 #include <sys/spa.h>
36 #include <sys/zfs_context.h>
37
38 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
39 uint64_t arg1, uint64_t arg2);
40
41
42 dmu_tx_t *
dmu_tx_create_dd(dsl_dir_t * dd)43 dmu_tx_create_dd(dsl_dir_t *dd)
44 {
45 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
46 tx->tx_dir = dd;
47 if (dd)
48 tx->tx_pool = dd->dd_pool;
49 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
50 offsetof(dmu_tx_hold_t, txh_node));
51 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
52 offsetof(dmu_tx_callback_t, dcb_node));
53 #ifdef ZFS_DEBUG
54 refcount_create(&tx->tx_space_written);
55 refcount_create(&tx->tx_space_freed);
56 #endif
57 return (tx);
58 }
59
60 dmu_tx_t *
dmu_tx_create(objset_t * os)61 dmu_tx_create(objset_t *os)
62 {
63 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
64 tx->tx_objset = os;
65 tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset);
66 return (tx);
67 }
68
69 dmu_tx_t *
dmu_tx_create_assigned(struct dsl_pool * dp,uint64_t txg)70 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
71 {
72 dmu_tx_t *tx = dmu_tx_create_dd(NULL);
73
74 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
75 tx->tx_pool = dp;
76 tx->tx_txg = txg;
77 tx->tx_anyobj = TRUE;
78
79 return (tx);
80 }
81
82 int
dmu_tx_is_syncing(dmu_tx_t * tx)83 dmu_tx_is_syncing(dmu_tx_t *tx)
84 {
85 return (tx->tx_anyobj);
86 }
87
88 int
dmu_tx_private_ok(dmu_tx_t * tx)89 dmu_tx_private_ok(dmu_tx_t *tx)
90 {
91 return (tx->tx_anyobj);
92 }
93
94 static dmu_tx_hold_t *
dmu_tx_hold_object_impl(dmu_tx_t * tx,objset_t * os,uint64_t object,enum dmu_tx_hold_type type,uint64_t arg1,uint64_t arg2)95 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
96 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
97 {
98 dmu_tx_hold_t *txh;
99 dnode_t *dn = NULL;
100 int err;
101
102 if (object != DMU_NEW_OBJECT) {
103 err = dnode_hold(os, object, tx, &dn);
104 if (err) {
105 tx->tx_err = err;
106 return (NULL);
107 }
108
109 if (err == 0 && tx->tx_txg != 0) {
110 mutex_enter(&dn->dn_mtx);
111 /*
112 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
113 * problem, but there's no way for it to happen (for
114 * now, at least).
115 */
116 ASSERT(dn->dn_assigned_txg == 0);
117 dn->dn_assigned_txg = tx->tx_txg;
118 (void) refcount_add(&dn->dn_tx_holds, tx);
119 mutex_exit(&dn->dn_mtx);
120 }
121 }
122
123 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
124 txh->txh_tx = tx;
125 txh->txh_dnode = dn;
126 #ifdef ZFS_DEBUG
127 txh->txh_type = type;
128 txh->txh_arg1 = arg1;
129 txh->txh_arg2 = arg2;
130 #endif
131 list_insert_tail(&tx->tx_holds, txh);
132
133 return (txh);
134 }
135
136 void
dmu_tx_add_new_object(dmu_tx_t * tx,objset_t * os,uint64_t object)137 dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
138 {
139 /*
140 * If we're syncing, they can manipulate any object anyhow, and
141 * the hold on the dnode_t can cause problems.
142 */
143 if (!dmu_tx_is_syncing(tx)) {
144 (void) dmu_tx_hold_object_impl(tx, os,
145 object, THT_NEWOBJECT, 0, 0);
146 }
147 }
148
149 static int
dmu_tx_check_ioerr(zio_t * zio,dnode_t * dn,int level,uint64_t blkid)150 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
151 {
152 int err;
153 dmu_buf_impl_t *db;
154
155 rw_enter(&dn->dn_struct_rwlock, RW_READER);
156 db = dbuf_hold_level(dn, level, blkid, FTAG);
157 rw_exit(&dn->dn_struct_rwlock);
158 if (db == NULL)
159 return (EIO);
160 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
161 dbuf_rele(db, FTAG);
162 return (err);
163 }
164
165 static void
dmu_tx_count_twig(dmu_tx_hold_t * txh,dnode_t * dn,dmu_buf_impl_t * db,int level,uint64_t blkid,boolean_t freeable,uint64_t * history)166 dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db,
167 int level, uint64_t blkid, boolean_t freeable, uint64_t *history)
168 {
169 objset_t *os = dn->dn_objset;
170 dsl_dataset_t *ds = os->os_dsl_dataset;
171 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
172 dmu_buf_impl_t *parent = NULL;
173 blkptr_t *bp = NULL;
174 uint64_t space;
175
176 if (level >= dn->dn_nlevels || history[level] == blkid)
177 return;
178
179 history[level] = blkid;
180
181 space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift);
182
183 if (db == NULL || db == dn->dn_dbuf) {
184 ASSERT(level != 0);
185 db = NULL;
186 } else {
187 ASSERT(db->db_dnode == dn);
188 ASSERT(db->db_level == level);
189 ASSERT(db->db.db_size == space);
190 ASSERT(db->db_blkid == blkid);
191 bp = db->db_blkptr;
192 parent = db->db_parent;
193 }
194
195 freeable = (bp && (freeable ||
196 dsl_dataset_block_freeable(ds, bp->blk_birth)));
197
198 if (freeable)
199 txh->txh_space_tooverwrite += space;
200 else
201 txh->txh_space_towrite += space;
202 if (bp)
203 txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp);
204
205 dmu_tx_count_twig(txh, dn, parent, level + 1,
206 blkid >> epbs, freeable, history);
207 }
208
209 /* ARGSUSED */
210 static void
dmu_tx_count_write(dmu_tx_hold_t * txh,uint64_t off,uint64_t len)211 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
212 {
213 dnode_t *dn = txh->txh_dnode;
214 uint64_t start, end, i;
215 int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
216 int err = 0;
217
218 if (len == 0)
219 return;
220
221 min_bs = SPA_MINBLOCKSHIFT;
222 max_bs = SPA_MAXBLOCKSHIFT;
223 min_ibs = DN_MIN_INDBLKSHIFT;
224 max_ibs = DN_MAX_INDBLKSHIFT;
225
226 if (dn) {
227 uint64_t history[DN_MAX_LEVELS];
228 int nlvls = dn->dn_nlevels;
229 int delta;
230
231 /*
232 * For i/o error checking, read the first and last level-0
233 * blocks (if they are not aligned), and all the level-1 blocks.
234 */
235 if (dn->dn_maxblkid == 0) {
236 delta = dn->dn_datablksz;
237 start = (off < dn->dn_datablksz) ? 0 : 1;
238 end = (off+len <= dn->dn_datablksz) ? 0 : 1;
239 if (start == 0 && (off > 0 || len < dn->dn_datablksz)) {
240 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
241 if (err)
242 goto out;
243 delta -= off;
244 }
245 } else {
246 zio_t *zio = zio_root(dn->dn_objset->os_spa,
247 NULL, NULL, ZIO_FLAG_CANFAIL);
248
249 /* first level-0 block */
250 start = off >> dn->dn_datablkshift;
251 if (P2PHASE(off, dn->dn_datablksz) ||
252 len < dn->dn_datablksz) {
253 err = dmu_tx_check_ioerr(zio, dn, 0, start);
254 if (err)
255 goto out;
256 }
257
258 /* last level-0 block */
259 end = (off+len-1) >> dn->dn_datablkshift;
260 if (end != start && end <= dn->dn_maxblkid &&
261 P2PHASE(off+len, dn->dn_datablksz)) {
262 err = dmu_tx_check_ioerr(zio, dn, 0, end);
263 if (err)
264 goto out;
265 }
266
267 /* level-1 blocks */
268 if (nlvls > 1) {
269 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
270 for (i = (start>>shft)+1; i < end>>shft; i++) {
271 err = dmu_tx_check_ioerr(zio, dn, 1, i);
272 if (err)
273 goto out;
274 }
275 }
276
277 err = zio_wait(zio);
278 if (err)
279 goto out;
280 delta = P2NPHASE(off, dn->dn_datablksz);
281 }
282
283 if (dn->dn_maxblkid > 0) {
284 /*
285 * The blocksize can't change,
286 * so we can make a more precise estimate.
287 */
288 ASSERT(dn->dn_datablkshift != 0);
289 min_bs = max_bs = dn->dn_datablkshift;
290 min_ibs = max_ibs = dn->dn_indblkshift;
291 } else if (dn->dn_indblkshift > max_ibs) {
292 /*
293 * This ensures that if we reduce DN_MAX_INDBLKSHIFT,
294 * the code will still work correctly on older pools.
295 */
296 min_ibs = max_ibs = dn->dn_indblkshift;
297 }
298
299 /*
300 * If this write is not off the end of the file
301 * we need to account for overwrites/unref.
302 */
303 if (start <= dn->dn_maxblkid) {
304 for (int l = 0; l < DN_MAX_LEVELS; l++)
305 history[l] = -1ULL;
306 }
307 while (start <= dn->dn_maxblkid) {
308 dmu_buf_impl_t *db;
309
310 rw_enter(&dn->dn_struct_rwlock, RW_READER);
311 db = dbuf_hold_level(dn, 0, start, FTAG);
312 rw_exit(&dn->dn_struct_rwlock);
313 dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE,
314 history);
315 dbuf_rele(db, FTAG);
316 if (++start > end) {
317 /*
318 * Account for new indirects appearing
319 * before this IO gets assigned into a txg.
320 */
321 bits = 64 - min_bs;
322 epbs = min_ibs - SPA_BLKPTRSHIFT;
323 for (bits -= epbs * (nlvls - 1);
324 bits >= 0; bits -= epbs)
325 txh->txh_fudge += 1ULL << max_ibs;
326 goto out;
327 }
328 off += delta;
329 if (len >= delta)
330 len -= delta;
331 delta = dn->dn_datablksz;
332 }
333 }
334
335 /*
336 * 'end' is the last thing we will access, not one past.
337 * This way we won't overflow when accessing the last byte.
338 */
339 start = P2ALIGN(off, 1ULL << max_bs);
340 end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
341 txh->txh_space_towrite += end - start + 1;
342
343 start >>= min_bs;
344 end >>= min_bs;
345
346 epbs = min_ibs - SPA_BLKPTRSHIFT;
347
348 /*
349 * The object contains at most 2^(64 - min_bs) blocks,
350 * and each indirect level maps 2^epbs.
351 */
352 for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
353 start >>= epbs;
354 end >>= epbs;
355 ASSERT3U(end, >=, start);
356 txh->txh_space_towrite += (end - start + 1) << max_ibs;
357 if (start != 0) {
358 /*
359 * We also need a new blkid=0 indirect block
360 * to reference any existing file data.
361 */
362 txh->txh_space_towrite += 1ULL << max_ibs;
363 }
364 }
365
366 out:
367 if (txh->txh_space_towrite + txh->txh_space_tooverwrite >
368 2 * DMU_MAX_ACCESS)
369 err = EFBIG;
370
371 if (err)
372 txh->txh_tx->tx_err = err;
373 }
374
375 static void
dmu_tx_count_dnode(dmu_tx_hold_t * txh)376 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
377 {
378 dnode_t *dn = txh->txh_dnode;
379 dnode_t *mdn = txh->txh_tx->tx_objset->os_meta_dnode;
380 uint64_t space = mdn->dn_datablksz +
381 ((mdn->dn_nlevels-1) << mdn->dn_indblkshift);
382
383 if (dn && dn->dn_dbuf->db_blkptr &&
384 dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
385 dn->dn_dbuf->db_blkptr->blk_birth)) {
386 txh->txh_space_tooverwrite += space;
387 txh->txh_space_tounref += space;
388 } else {
389 txh->txh_space_towrite += space;
390 if (dn && dn->dn_dbuf->db_blkptr)
391 txh->txh_space_tounref += space;
392 }
393 }
394
395 void
dmu_tx_hold_write(dmu_tx_t * tx,uint64_t object,uint64_t off,int len)396 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
397 {
398 dmu_tx_hold_t *txh;
399
400 ASSERT(tx->tx_txg == 0);
401 ASSERT(len < DMU_MAX_ACCESS);
402 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
403
404 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
405 object, THT_WRITE, off, len);
406 if (txh == NULL)
407 return;
408
409 dmu_tx_count_write(txh, off, len);
410 dmu_tx_count_dnode(txh);
411 }
412
413 static void
dmu_tx_count_free(dmu_tx_hold_t * txh,uint64_t off,uint64_t len)414 dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
415 {
416 uint64_t blkid, nblks, lastblk;
417 uint64_t space = 0, unref = 0, skipped = 0;
418 dnode_t *dn = txh->txh_dnode;
419 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
420 spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
421 int epbs;
422
423 if (dn->dn_nlevels == 0)
424 return;
425
426 /*
427 * The struct_rwlock protects us against dn_nlevels
428 * changing, in case (against all odds) we manage to dirty &
429 * sync out the changes after we check for being dirty.
430 * Also, dbuf_hold_level() wants us to have the struct_rwlock.
431 */
432 rw_enter(&dn->dn_struct_rwlock, RW_READER);
433 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
434 if (dn->dn_maxblkid == 0) {
435 if (off == 0 && len >= dn->dn_datablksz) {
436 blkid = 0;
437 nblks = 1;
438 } else {
439 rw_exit(&dn->dn_struct_rwlock);
440 return;
441 }
442 } else {
443 blkid = off >> dn->dn_datablkshift;
444 nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift;
445
446 if (blkid >= dn->dn_maxblkid) {
447 rw_exit(&dn->dn_struct_rwlock);
448 return;
449 }
450 if (blkid + nblks > dn->dn_maxblkid)
451 nblks = dn->dn_maxblkid - blkid;
452
453 }
454 if (dn->dn_nlevels == 1) {
455 int i;
456 for (i = 0; i < nblks; i++) {
457 blkptr_t *bp = dn->dn_phys->dn_blkptr;
458 ASSERT3U(blkid + i, <, dn->dn_nblkptr);
459 bp += blkid + i;
460 if (dsl_dataset_block_freeable(ds, bp->blk_birth)) {
461 dprintf_bp(bp, "can free old%s", "");
462 space += bp_get_dsize(spa, bp);
463 }
464 unref += BP_GET_ASIZE(bp);
465 }
466 nblks = 0;
467 }
468
469 /*
470 * Add in memory requirements of higher-level indirects.
471 * This assumes a worst-possible scenario for dn_nlevels.
472 */
473 {
474 uint64_t blkcnt = 1 + ((nblks >> epbs) >> epbs);
475 int level = (dn->dn_nlevels > 1) ? 2 : 1;
476
477 while (level++ < DN_MAX_LEVELS) {
478 txh->txh_memory_tohold += blkcnt << dn->dn_indblkshift;
479 blkcnt = 1 + (blkcnt >> epbs);
480 }
481 ASSERT(blkcnt <= dn->dn_nblkptr);
482 }
483
484 lastblk = blkid + nblks - 1;
485 while (nblks) {
486 dmu_buf_impl_t *dbuf;
487 uint64_t ibyte, new_blkid;
488 int epb = 1 << epbs;
489 int err, i, blkoff, tochk;
490 blkptr_t *bp;
491
492 ibyte = blkid << dn->dn_datablkshift;
493 err = dnode_next_offset(dn,
494 DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0);
495 new_blkid = ibyte >> dn->dn_datablkshift;
496 if (err == ESRCH) {
497 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
498 break;
499 }
500 if (err) {
501 txh->txh_tx->tx_err = err;
502 break;
503 }
504 if (new_blkid > lastblk) {
505 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
506 break;
507 }
508
509 if (new_blkid > blkid) {
510 ASSERT((new_blkid >> epbs) > (blkid >> epbs));
511 skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1;
512 nblks -= new_blkid - blkid;
513 blkid = new_blkid;
514 }
515 blkoff = P2PHASE(blkid, epb);
516 tochk = MIN(epb - blkoff, nblks);
517
518 dbuf = dbuf_hold_level(dn, 1, blkid >> epbs, FTAG);
519
520 txh->txh_memory_tohold += dbuf->db.db_size;
521
522 /*
523 * We don't check memory_tohold against DMU_MAX_ACCESS because
524 * memory_tohold is an over-estimation (especially the >L1
525 * indirect blocks), so it could fail. Callers should have
526 * already verified that they will not be holding too much
527 * memory.
528 */
529
530 err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
531 if (err != 0) {
532 txh->txh_tx->tx_err = err;
533 dbuf_rele(dbuf, FTAG);
534 break;
535 }
536
537 bp = dbuf->db.db_data;
538 bp += blkoff;
539
540 for (i = 0; i < tochk; i++) {
541 if (dsl_dataset_block_freeable(ds, bp[i].blk_birth)) {
542 dprintf_bp(&bp[i], "can free old%s", "");
543 space += bp_get_dsize(spa, &bp[i]);
544 }
545 unref += BP_GET_ASIZE(bp);
546 }
547 dbuf_rele(dbuf, FTAG);
548
549 blkid += tochk;
550 nblks -= tochk;
551 }
552 rw_exit(&dn->dn_struct_rwlock);
553
554 /* account for new level 1 indirect blocks that might show up */
555 if (skipped > 0) {
556 txh->txh_fudge += skipped << dn->dn_indblkshift;
557 skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs);
558 txh->txh_memory_tohold += skipped << dn->dn_indblkshift;
559 }
560 txh->txh_space_tofree += space;
561 txh->txh_space_tounref += unref;
562 }
563
564 void
dmu_tx_hold_free(dmu_tx_t * tx,uint64_t object,uint64_t off,uint64_t len)565 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
566 {
567 dmu_tx_hold_t *txh;
568 dnode_t *dn;
569 uint64_t start, end, i;
570 int err, shift;
571 zio_t *zio;
572
573 ASSERT(tx->tx_txg == 0);
574
575 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
576 object, THT_FREE, off, len);
577 if (txh == NULL)
578 return;
579 dn = txh->txh_dnode;
580
581 /* first block */
582 if (off != 0)
583 dmu_tx_count_write(txh, off, 1);
584 /* last block */
585 if (len != DMU_OBJECT_END)
586 dmu_tx_count_write(txh, off+len, 1);
587
588 dmu_tx_count_dnode(txh);
589
590 if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
591 return;
592 if (len == DMU_OBJECT_END)
593 len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
594
595 /*
596 * For i/o error checking, read the first and last level-0
597 * blocks, and all the level-1 blocks. The above count_write's
598 * have already taken care of the level-0 blocks.
599 */
600 if (dn->dn_nlevels > 1) {
601 shift = dn->dn_datablkshift + dn->dn_indblkshift -
602 SPA_BLKPTRSHIFT;
603 start = off >> shift;
604 end = dn->dn_datablkshift ? ((off+len) >> shift) : 0;
605
606 zio = zio_root(tx->tx_pool->dp_spa,
607 NULL, NULL, ZIO_FLAG_CANFAIL);
608 for (i = start; i <= end; i++) {
609 uint64_t ibyte = i << shift;
610 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
611 i = ibyte >> shift;
612 if (err == ESRCH)
613 break;
614 if (err) {
615 tx->tx_err = err;
616 return;
617 }
618
619 err = dmu_tx_check_ioerr(zio, dn, 1, i);
620 if (err) {
621 tx->tx_err = err;
622 return;
623 }
624 }
625 err = zio_wait(zio);
626 if (err) {
627 tx->tx_err = err;
628 return;
629 }
630 }
631
632 dmu_tx_count_free(txh, off, len);
633 }
634
635 void
dmu_tx_hold_zap(dmu_tx_t * tx,uint64_t object,int add,const char * name)636 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
637 {
638 dmu_tx_hold_t *txh;
639 dnode_t *dn;
640 uint64_t nblocks;
641 int epbs, err;
642
643 ASSERT(tx->tx_txg == 0);
644
645 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
646 object, THT_ZAP, add, (uintptr_t)name);
647 if (txh == NULL)
648 return;
649 dn = txh->txh_dnode;
650
651 dmu_tx_count_dnode(txh);
652
653 if (dn == NULL) {
654 /*
655 * We will be able to fit a new object's entries into one leaf
656 * block. So there will be at most 2 blocks total,
657 * including the header block.
658 */
659 dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
660 return;
661 }
662
663 ASSERT3P(dmu_ot[dn->dn_type].ot_byteswap, ==, zap_byteswap);
664
665 if (dn->dn_maxblkid == 0 && !add) {
666 /*
667 * If there is only one block (i.e. this is a micro-zap)
668 * and we are not adding anything, the accounting is simple.
669 */
670 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
671 if (err) {
672 tx->tx_err = err;
673 return;
674 }
675
676 /*
677 * Use max block size here, since we don't know how much
678 * the size will change between now and the dbuf dirty call.
679 */
680 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
681 dn->dn_phys->dn_blkptr[0].blk_birth)) {
682 txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
683 } else {
684 txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
685 }
686 if (dn->dn_phys->dn_blkptr[0].blk_birth)
687 txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
688 return;
689 }
690
691 if (dn->dn_maxblkid > 0 && name) {
692 /*
693 * access the name in this fat-zap so that we'll check
694 * for i/o errors to the leaf blocks, etc.
695 */
696 err = zap_lookup(dn->dn_objset, dn->dn_object, name,
697 8, 0, NULL);
698 if (err == EIO) {
699 tx->tx_err = err;
700 return;
701 }
702 }
703
704 err = zap_count_write(dn->dn_objset, dn->dn_object, name, add,
705 &txh->txh_space_towrite, &txh->txh_space_tooverwrite);
706
707 /*
708 * If the modified blocks are scattered to the four winds,
709 * we'll have to modify an indirect twig for each.
710 */
711 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
712 for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs)
713 if (dn->dn_objset->os_dsl_dataset->ds_phys->ds_prev_snap_obj)
714 txh->txh_space_towrite += 3 << dn->dn_indblkshift;
715 else
716 txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift;
717 }
718
719 void
dmu_tx_hold_bonus(dmu_tx_t * tx,uint64_t object)720 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
721 {
722 dmu_tx_hold_t *txh;
723
724 ASSERT(tx->tx_txg == 0);
725
726 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
727 object, THT_BONUS, 0, 0);
728 if (txh)
729 dmu_tx_count_dnode(txh);
730 }
731
732 void
dmu_tx_hold_space(dmu_tx_t * tx,uint64_t space)733 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
734 {
735 dmu_tx_hold_t *txh;
736 ASSERT(tx->tx_txg == 0);
737
738 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
739 DMU_NEW_OBJECT, THT_SPACE, space, 0);
740
741 txh->txh_space_towrite += space;
742 }
743
744 int
dmu_tx_holds(dmu_tx_t * tx,uint64_t object)745 dmu_tx_holds(dmu_tx_t *tx, uint64_t object)
746 {
747 dmu_tx_hold_t *txh;
748 int holds = 0;
749
750 /*
751 * By asserting that the tx is assigned, we're counting the
752 * number of dn_tx_holds, which is the same as the number of
753 * dn_holds. Otherwise, we'd be counting dn_holds, but
754 * dn_tx_holds could be 0.
755 */
756 ASSERT(tx->tx_txg != 0);
757
758 /* if (tx->tx_anyobj == TRUE) */
759 /* return (0); */
760
761 for (txh = list_head(&tx->tx_holds); txh;
762 txh = list_next(&tx->tx_holds, txh)) {
763 if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
764 holds++;
765 }
766
767 return (holds);
768 }
769
770 #ifdef ZFS_DEBUG
771 void
dmu_tx_dirty_buf(dmu_tx_t * tx,dmu_buf_impl_t * db)772 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
773 {
774 dmu_tx_hold_t *txh;
775 int match_object = FALSE, match_offset = FALSE;
776 dnode_t *dn = db->db_dnode;
777
778 ASSERT(tx->tx_txg != 0);
779 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
780 ASSERT3U(dn->dn_object, ==, db->db.db_object);
781
782 if (tx->tx_anyobj)
783 return;
784
785 /* XXX No checking on the meta dnode for now */
786 if (db->db.db_object == DMU_META_DNODE_OBJECT)
787 return;
788
789 for (txh = list_head(&tx->tx_holds); txh;
790 txh = list_next(&tx->tx_holds, txh)) {
791 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg);
792 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
793 match_object = TRUE;
794 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
795 int datablkshift = dn->dn_datablkshift ?
796 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
797 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
798 int shift = datablkshift + epbs * db->db_level;
799 uint64_t beginblk = shift >= 64 ? 0 :
800 (txh->txh_arg1 >> shift);
801 uint64_t endblk = shift >= 64 ? 0 :
802 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
803 uint64_t blkid = db->db_blkid;
804
805 /* XXX txh_arg2 better not be zero... */
806
807 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
808 txh->txh_type, beginblk, endblk);
809
810 switch (txh->txh_type) {
811 case THT_WRITE:
812 if (blkid >= beginblk && blkid <= endblk)
813 match_offset = TRUE;
814 /*
815 * We will let this hold work for the bonus
816 * buffer so that we don't need to hold it
817 * when creating a new object.
818 */
819 if (blkid == DB_BONUS_BLKID)
820 match_offset = TRUE;
821 /*
822 * They might have to increase nlevels,
823 * thus dirtying the new TLIBs. Or the
824 * might have to change the block size,
825 * thus dirying the new lvl=0 blk=0.
826 */
827 if (blkid == 0)
828 match_offset = TRUE;
829 break;
830 case THT_FREE:
831 /*
832 * We will dirty all the level 1 blocks in
833 * the free range and perhaps the first and
834 * last level 0 block.
835 */
836 if (blkid >= beginblk && (blkid <= endblk ||
837 txh->txh_arg2 == DMU_OBJECT_END))
838 match_offset = TRUE;
839 break;
840 case THT_BONUS:
841 if (blkid == DB_BONUS_BLKID)
842 match_offset = TRUE;
843 break;
844 case THT_ZAP:
845 match_offset = TRUE;
846 break;
847 case THT_NEWOBJECT:
848 match_object = TRUE;
849 break;
850 default:
851 ASSERT(!"bad txh_type");
852 }
853 }
854 if (match_object && match_offset)
855 return;
856 }
857 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
858 (u_longlong_t)db->db.db_object, db->db_level,
859 (u_longlong_t)db->db_blkid);
860 }
861 #endif
862
863 static int
dmu_tx_try_assign(dmu_tx_t * tx,uint64_t txg_how)864 dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
865 {
866 dmu_tx_hold_t *txh;
867 spa_t *spa = tx->tx_pool->dp_spa;
868 uint64_t memory, asize, fsize, usize;
869 uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge;
870
871 ASSERT3U(tx->tx_txg, ==, 0);
872
873 if (tx->tx_err)
874 return (tx->tx_err);
875
876 if (spa_suspended(spa)) {
877 /*
878 * If the user has indicated a blocking failure mode
879 * then return ERESTART which will block in dmu_tx_wait().
880 * Otherwise, return EIO so that an error can get
881 * propagated back to the VOP calls.
882 *
883 * Note that we always honor the txg_how flag regardless
884 * of the failuremode setting.
885 */
886 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
887 txg_how != TXG_WAIT)
888 return (EIO);
889
890 return (ERESTART);
891 }
892
893 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
894 tx->tx_needassign_txh = NULL;
895
896 /*
897 * NB: No error returns are allowed after txg_hold_open, but
898 * before processing the dnode holds, due to the
899 * dmu_tx_unassign() logic.
900 */
901
902 towrite = tofree = tooverwrite = tounref = tohold = fudge = 0;
903 for (txh = list_head(&tx->tx_holds); txh;
904 txh = list_next(&tx->tx_holds, txh)) {
905 dnode_t *dn = txh->txh_dnode;
906 if (dn != NULL) {
907 mutex_enter(&dn->dn_mtx);
908 if (dn->dn_assigned_txg == tx->tx_txg - 1) {
909 mutex_exit(&dn->dn_mtx);
910 tx->tx_needassign_txh = txh;
911 return (ERESTART);
912 }
913 if (dn->dn_assigned_txg == 0)
914 dn->dn_assigned_txg = tx->tx_txg;
915 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
916 (void) refcount_add(&dn->dn_tx_holds, tx);
917 mutex_exit(&dn->dn_mtx);
918 }
919 towrite += txh->txh_space_towrite;
920 tofree += txh->txh_space_tofree;
921 tooverwrite += txh->txh_space_tooverwrite;
922 tounref += txh->txh_space_tounref;
923 tohold += txh->txh_memory_tohold;
924 fudge += txh->txh_fudge;
925 }
926
927 /*
928 * NB: This check must be after we've held the dnodes, so that
929 * the dmu_tx_unassign() logic will work properly
930 */
931 if (txg_how >= TXG_INITIAL && txg_how != tx->tx_txg)
932 return (ERESTART);
933
934 /*
935 * If a snapshot has been taken since we made our estimates,
936 * assume that we won't be able to free or overwrite anything.
937 */
938 if (tx->tx_objset &&
939 dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) >
940 tx->tx_lastsnap_txg) {
941 towrite += tooverwrite;
942 tooverwrite = tofree = 0;
943 }
944
945 /* needed allocation: worst-case estimate of write space */
946 asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite);
947 /* freed space estimate: worst-case overwrite + free estimate */
948 fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
949 /* convert unrefd space to worst-case estimate */
950 usize = spa_get_asize(tx->tx_pool->dp_spa, tounref);
951 /* calculate memory footprint estimate */
952 memory = towrite + tooverwrite + tohold;
953
954 #ifdef ZFS_DEBUG
955 /*
956 * Add in 'tohold' to account for our dirty holds on this memory
957 * XXX - the "fudge" factor is to account for skipped blocks that
958 * we missed because dnode_next_offset() misses in-core-only blocks.
959 */
960 tx->tx_space_towrite = asize +
961 spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge);
962 tx->tx_space_tofree = tofree;
963 tx->tx_space_tooverwrite = tooverwrite;
964 tx->tx_space_tounref = tounref;
965 #endif
966
967 if (tx->tx_dir && asize != 0) {
968 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
969 asize, fsize, usize, &tx->tx_tempreserve_cookie, tx);
970 if (err)
971 return (err);
972 }
973
974 return (0);
975 }
976
977 static void
dmu_tx_unassign(dmu_tx_t * tx)978 dmu_tx_unassign(dmu_tx_t *tx)
979 {
980 dmu_tx_hold_t *txh;
981
982 if (tx->tx_txg == 0)
983 return;
984
985 txg_rele_to_quiesce(&tx->tx_txgh);
986
987 for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
988 txh = list_next(&tx->tx_holds, txh)) {
989 dnode_t *dn = txh->txh_dnode;
990
991 if (dn == NULL)
992 continue;
993 mutex_enter(&dn->dn_mtx);
994 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
995
996 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
997 dn->dn_assigned_txg = 0;
998 cv_broadcast(&dn->dn_notxholds);
999 }
1000 mutex_exit(&dn->dn_mtx);
1001 }
1002
1003 txg_rele_to_sync(&tx->tx_txgh);
1004
1005 tx->tx_lasttried_txg = tx->tx_txg;
1006 tx->tx_txg = 0;
1007 }
1008
1009 /*
1010 * Assign tx to a transaction group. txg_how can be one of:
1011 *
1012 * (1) TXG_WAIT. If the current open txg is full, waits until there's
1013 * a new one. This should be used when you're not holding locks.
1014 * If will only fail if we're truly out of space (or over quota).
1015 *
1016 * (2) TXG_NOWAIT. If we can't assign into the current open txg without
1017 * blocking, returns immediately with ERESTART. This should be used
1018 * whenever you're holding locks. On an ERESTART error, the caller
1019 * should drop locks, do a dmu_tx_wait(tx), and try again.
1020 *
1021 * (3) A specific txg. Use this if you need to ensure that multiple
1022 * transactions all sync in the same txg. Like TXG_NOWAIT, it
1023 * returns ERESTART if it can't assign you into the requested txg.
1024 */
1025 int
dmu_tx_assign(dmu_tx_t * tx,uint64_t txg_how)1026 dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
1027 {
1028 int err;
1029
1030 ASSERT(tx->tx_txg == 0);
1031 ASSERT(txg_how != 0);
1032 ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1033
1034 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1035 dmu_tx_unassign(tx);
1036
1037 if (err != ERESTART || txg_how != TXG_WAIT)
1038 return (err);
1039
1040 dmu_tx_wait(tx);
1041 }
1042
1043 txg_rele_to_quiesce(&tx->tx_txgh);
1044
1045 return (0);
1046 }
1047
1048 void
dmu_tx_wait(dmu_tx_t * tx)1049 dmu_tx_wait(dmu_tx_t *tx)
1050 {
1051 spa_t *spa = tx->tx_pool->dp_spa;
1052
1053 ASSERT(tx->tx_txg == 0);
1054
1055 /*
1056 * It's possible that the pool has become active after this thread
1057 * has tried to obtain a tx. If that's the case then his
1058 * tx_lasttried_txg would not have been assigned.
1059 */
1060 if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1061 txg_wait_synced(tx->tx_pool, spa_last_synced_txg(spa) + 1);
1062 } else if (tx->tx_needassign_txh) {
1063 dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1064
1065 mutex_enter(&dn->dn_mtx);
1066 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1067 cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1068 mutex_exit(&dn->dn_mtx);
1069 tx->tx_needassign_txh = NULL;
1070 } else {
1071 txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
1072 }
1073 }
1074
1075 void
dmu_tx_willuse_space(dmu_tx_t * tx,int64_t delta)1076 dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
1077 {
1078 #ifdef ZFS_DEBUG
1079 if (tx->tx_dir == NULL || delta == 0)
1080 return;
1081
1082 if (delta > 0) {
1083 ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
1084 tx->tx_space_towrite);
1085 (void) refcount_add_many(&tx->tx_space_written, delta, NULL);
1086 } else {
1087 (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
1088 }
1089 #endif
1090 }
1091
1092 void
dmu_tx_commit(dmu_tx_t * tx)1093 dmu_tx_commit(dmu_tx_t *tx)
1094 {
1095 dmu_tx_hold_t *txh;
1096
1097 ASSERT(tx->tx_txg != 0);
1098
1099 while (txh = list_head(&tx->tx_holds)) {
1100 dnode_t *dn = txh->txh_dnode;
1101
1102 list_remove(&tx->tx_holds, txh);
1103 kmem_free(txh, sizeof (dmu_tx_hold_t));
1104 if (dn == NULL)
1105 continue;
1106 mutex_enter(&dn->dn_mtx);
1107 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1108
1109 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1110 dn->dn_assigned_txg = 0;
1111 cv_broadcast(&dn->dn_notxholds);
1112 }
1113 mutex_exit(&dn->dn_mtx);
1114 dnode_rele(dn, tx);
1115 }
1116
1117 if (tx->tx_tempreserve_cookie)
1118 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1119
1120 if (!list_is_empty(&tx->tx_callbacks))
1121 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1122
1123 if (tx->tx_anyobj == FALSE)
1124 txg_rele_to_sync(&tx->tx_txgh);
1125
1126 list_destroy(&tx->tx_callbacks);
1127 list_destroy(&tx->tx_holds);
1128 #ifdef ZFS_DEBUG
1129 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1130 tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
1131 tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
1132 refcount_destroy_many(&tx->tx_space_written,
1133 refcount_count(&tx->tx_space_written));
1134 refcount_destroy_many(&tx->tx_space_freed,
1135 refcount_count(&tx->tx_space_freed));
1136 #endif
1137 kmem_free(tx, sizeof (dmu_tx_t));
1138 }
1139
1140 void
dmu_tx_abort(dmu_tx_t * tx)1141 dmu_tx_abort(dmu_tx_t *tx)
1142 {
1143 dmu_tx_hold_t *txh;
1144
1145 ASSERT(tx->tx_txg == 0);
1146
1147 while (txh = list_head(&tx->tx_holds)) {
1148 dnode_t *dn = txh->txh_dnode;
1149
1150 list_remove(&tx->tx_holds, txh);
1151 kmem_free(txh, sizeof (dmu_tx_hold_t));
1152 if (dn != NULL)
1153 dnode_rele(dn, tx);
1154 }
1155
1156 /*
1157 * Call any registered callbacks with an error code.
1158 */
1159 if (!list_is_empty(&tx->tx_callbacks))
1160 dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
1161
1162 list_destroy(&tx->tx_callbacks);
1163 list_destroy(&tx->tx_holds);
1164 #ifdef ZFS_DEBUG
1165 refcount_destroy_many(&tx->tx_space_written,
1166 refcount_count(&tx->tx_space_written));
1167 refcount_destroy_many(&tx->tx_space_freed,
1168 refcount_count(&tx->tx_space_freed));
1169 #endif
1170 kmem_free(tx, sizeof (dmu_tx_t));
1171 }
1172
1173 uint64_t
dmu_tx_get_txg(dmu_tx_t * tx)1174 dmu_tx_get_txg(dmu_tx_t *tx)
1175 {
1176 ASSERT(tx->tx_txg != 0);
1177 return (tx->tx_txg);
1178 }
1179
1180 void
dmu_tx_callback_register(dmu_tx_t * tx,dmu_tx_callback_func_t * func,void * data)1181 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1182 {
1183 dmu_tx_callback_t *dcb;
1184
1185 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1186
1187 dcb->dcb_func = func;
1188 dcb->dcb_data = data;
1189
1190 list_insert_tail(&tx->tx_callbacks, dcb);
1191 }
1192
1193 /*
1194 * Call all the commit callbacks on a list, with a given error code.
1195 */
1196 void
dmu_tx_do_callbacks(list_t * cb_list,int error)1197 dmu_tx_do_callbacks(list_t *cb_list, int error)
1198 {
1199 dmu_tx_callback_t *dcb;
1200
1201 while (dcb = list_head(cb_list)) {
1202 list_remove(cb_list, dcb);
1203 dcb->dcb_func(dcb->dcb_data, error);
1204 kmem_free(dcb, sizeof (dmu_tx_callback_t));
1205 }
1206 }
1207