1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved. 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26 */ 27 28 #include <sys/zfs_context.h> 29 #include <sys/dbuf.h> 30 #include <sys/dnode.h> 31 #include <sys/dmu.h> 32 #include <sys/dmu_tx.h> 33 #include <sys/dmu_objset.h> 34 #include <sys/dmu_recv.h> 35 #include <sys/dsl_dataset.h> 36 #include <sys/spa.h> 37 #include <sys/range_tree.h> 38 #include <sys/zfeature.h> 39 40 static void 41 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx) 42 { 43 dmu_buf_impl_t *db; 44 int txgoff = tx->tx_txg & TXG_MASK; 45 int nblkptr = dn->dn_phys->dn_nblkptr; 46 int old_toplvl = dn->dn_phys->dn_nlevels - 1; 47 int new_level = dn->dn_next_nlevels[txgoff]; 48 int i; 49 50 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 51 52 /* this dnode can't be paged out because it's dirty */ 53 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE); 54 ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0); 55 56 db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG); 57 ASSERT(db != NULL); 58 59 dn->dn_phys->dn_nlevels = new_level; 60 dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset, 61 dn->dn_object, dn->dn_phys->dn_nlevels); 62 63 /* 64 * Lock ordering requires that we hold the children's db_mutexes (by 65 * calling dbuf_find()) before holding the parent's db_rwlock. The lock 66 * order is imposed by dbuf_read's steps of "grab the lock to protect 67 * db_parent, get db_parent, hold db_parent's db_rwlock". 68 */ 69 dmu_buf_impl_t *children[DN_MAX_NBLKPTR]; 70 ASSERT3U(nblkptr, <=, DN_MAX_NBLKPTR); 71 for (i = 0; i < nblkptr; i++) { 72 children[i] = 73 dbuf_find(dn->dn_objset, dn->dn_object, old_toplvl, i); 74 } 75 76 /* transfer dnode's block pointers to new indirect block */ 77 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT); 78 if (dn->dn_dbuf != NULL) 79 rw_enter(&dn->dn_dbuf->db_rwlock, RW_WRITER); 80 rw_enter(&db->db_rwlock, RW_WRITER); 81 ASSERT(db->db.db_data); 82 ASSERT(arc_released(db->db_buf)); 83 ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size); 84 bcopy(dn->dn_phys->dn_blkptr, db->db.db_data, 85 sizeof (blkptr_t) * nblkptr); 86 arc_buf_freeze(db->db_buf); 87 88 /* set dbuf's parent pointers to new indirect buf */ 89 for (i = 0; i < nblkptr; i++) { 90 dmu_buf_impl_t *child = children[i]; 91 92 if (child == NULL) 93 continue; 94 #ifdef ZFS_DEBUG 95 DB_DNODE_ENTER(child); 96 ASSERT3P(DB_DNODE(child), ==, dn); 97 DB_DNODE_EXIT(child); 98 #endif /* DEBUG */ 99 if (child->db_parent && child->db_parent != dn->dn_dbuf) { 100 ASSERT(child->db_parent->db_level == db->db_level); 101 ASSERT(child->db_blkptr != 102 &dn->dn_phys->dn_blkptr[child->db_blkid]); 103 mutex_exit(&child->db_mtx); 104 continue; 105 } 106 ASSERT(child->db_parent == NULL || 107 child->db_parent == dn->dn_dbuf); 108 109 child->db_parent = db; 110 dbuf_add_ref(db, child); 111 if (db->db.db_data) 112 child->db_blkptr = (blkptr_t *)db->db.db_data + i; 113 else 114 child->db_blkptr = NULL; 115 dprintf_dbuf_bp(child, child->db_blkptr, 116 "changed db_blkptr to new indirect %s", ""); 117 118 mutex_exit(&child->db_mtx); 119 } 120 121 bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr); 122 123 rw_exit(&db->db_rwlock); 124 if (dn->dn_dbuf != NULL) 125 rw_exit(&dn->dn_dbuf->db_rwlock); 126 127 dbuf_rele(db, FTAG); 128 129 rw_exit(&dn->dn_struct_rwlock); 130 } 131 132 static void 133 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx) 134 { 135 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 136 uint64_t bytesfreed = 0; 137 138 dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num); 139 140 for (int i = 0; i < num; i++, bp++) { 141 if (BP_IS_HOLE(bp)) 142 continue; 143 144 bytesfreed += dsl_dataset_block_kill(ds, bp, tx, B_FALSE); 145 ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys)); 146 147 /* 148 * Save some useful information on the holes being 149 * punched, including logical size, type, and indirection 150 * level. Retaining birth time enables detection of when 151 * holes are punched for reducing the number of free 152 * records transmitted during a zfs send. 153 */ 154 155 uint64_t lsize = BP_GET_LSIZE(bp); 156 dmu_object_type_t type = BP_GET_TYPE(bp); 157 uint64_t lvl = BP_GET_LEVEL(bp); 158 159 bzero(bp, sizeof (blkptr_t)); 160 161 if (spa_feature_is_active(dn->dn_objset->os_spa, 162 SPA_FEATURE_HOLE_BIRTH)) { 163 BP_SET_LSIZE(bp, lsize); 164 BP_SET_TYPE(bp, type); 165 BP_SET_LEVEL(bp, lvl); 166 BP_SET_BIRTH(bp, dmu_tx_get_txg(tx), 0); 167 } 168 } 169 dnode_diduse_space(dn, -bytesfreed); 170 } 171 172 #ifdef ZFS_DEBUG 173 static void 174 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx) 175 { 176 int off, num; 177 int i, err, epbs; 178 uint64_t txg = tx->tx_txg; 179 dnode_t *dn; 180 181 DB_DNODE_ENTER(db); 182 dn = DB_DNODE(db); 183 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 184 off = start - (db->db_blkid * 1<<epbs); 185 num = end - start + 1; 186 187 ASSERT3U(off, >=, 0); 188 ASSERT3U(num, >=, 0); 189 ASSERT3U(db->db_level, >, 0); 190 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 191 ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT); 192 ASSERT(db->db_blkptr != NULL); 193 194 for (i = off; i < off+num; i++) { 195 uint64_t *buf; 196 dmu_buf_impl_t *child; 197 dbuf_dirty_record_t *dr; 198 int j; 199 200 ASSERT(db->db_level == 1); 201 202 rw_enter(&dn->dn_struct_rwlock, RW_READER); 203 err = dbuf_hold_impl(dn, db->db_level - 1, 204 (db->db_blkid << epbs) + i, TRUE, FALSE, FTAG, &child); 205 rw_exit(&dn->dn_struct_rwlock); 206 if (err == ENOENT) 207 continue; 208 ASSERT(err == 0); 209 ASSERT(child->db_level == 0); 210 dr = dbuf_find_dirty_eq(child, txg); 211 212 /* data_old better be zeroed */ 213 if (dr) { 214 buf = dr->dt.dl.dr_data->b_data; 215 for (j = 0; j < child->db.db_size >> 3; j++) { 216 if (buf[j] != 0) { 217 panic("freed data not zero: " 218 "child=%p i=%d off=%d num=%d\n", 219 (void *)child, i, off, num); 220 } 221 } 222 } 223 224 /* 225 * db_data better be zeroed unless it's dirty in a 226 * future txg. 227 */ 228 mutex_enter(&child->db_mtx); 229 buf = child->db.db_data; 230 if (buf != NULL && child->db_state != DB_FILL && 231 list_is_empty(&child->db_dirty_records)) { 232 for (j = 0; j < child->db.db_size >> 3; j++) { 233 if (buf[j] != 0) { 234 panic("freed data not zero: " 235 "child=%p i=%d off=%d num=%d\n", 236 (void *)child, i, off, num); 237 } 238 } 239 } 240 mutex_exit(&child->db_mtx); 241 242 dbuf_rele(child, FTAG); 243 } 244 DB_DNODE_EXIT(db); 245 } 246 #endif 247 248 /* 249 * We don't usually free the indirect blocks here. If in one txg we have a 250 * free_range and a write to the same indirect block, it's important that we 251 * preserve the hole's birth times. Therefore, we don't free any any indirect 252 * blocks in free_children(). If an indirect block happens to turn into all 253 * holes, it will be freed by dbuf_write_children_ready, which happens at a 254 * point in the syncing process where we know for certain the contents of the 255 * indirect block. 256 * 257 * However, if we're freeing a dnode, its space accounting must go to zero 258 * before we actually try to free the dnode, or we will trip an assertion. In 259 * addition, we know the case described above cannot occur, because the dnode is 260 * being freed. Therefore, we free the indirect blocks immediately in that 261 * case. 262 */ 263 static void 264 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks, 265 boolean_t free_indirects, dmu_tx_t *tx) 266 { 267 dnode_t *dn; 268 blkptr_t *bp; 269 dmu_buf_impl_t *subdb; 270 uint64_t start, end, dbstart, dbend; 271 unsigned int epbs, shift, i; 272 273 /* 274 * There is a small possibility that this block will not be cached: 275 * 1 - if level > 1 and there are no children with level <= 1 276 * 2 - if this block was evicted since we read it from 277 * dmu_tx_hold_free(). 278 */ 279 if (db->db_state != DB_CACHED) 280 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 281 282 /* 283 * If we modify this indirect block, and we are not freeing the 284 * dnode (!free_indirects), then this indirect block needs to get 285 * written to disk by dbuf_write(). If it is dirty, we know it will 286 * be written (otherwise, we would have incorrect on-disk state 287 * because the space would be freed but still referenced by the BP 288 * in this indirect block). Therefore we VERIFY that it is 289 * dirty. 290 * 291 * Our VERIFY covers some cases that do not actually have to be 292 * dirty, but the open-context code happens to dirty. E.g. if the 293 * blocks we are freeing are all holes, because in that case, we 294 * are only freeing part of this indirect block, so it is an 295 * ancestor of the first or last block to be freed. The first and 296 * last L1 indirect blocks are always dirtied by dnode_free_range(). 297 */ 298 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG); 299 VERIFY(BP_GET_FILL(db->db_blkptr) == 0 || db->db_dirtycnt > 0); 300 dmu_buf_unlock_parent(db, dblt, FTAG); 301 302 dbuf_release_bp(db); 303 bp = db->db.db_data; 304 305 DB_DNODE_ENTER(db); 306 dn = DB_DNODE(db); 307 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 308 ASSERT3U(epbs, <, 31); 309 shift = (db->db_level - 1) * epbs; 310 dbstart = db->db_blkid << epbs; 311 start = blkid >> shift; 312 if (dbstart < start) { 313 bp += start - dbstart; 314 } else { 315 start = dbstart; 316 } 317 dbend = ((db->db_blkid + 1) << epbs) - 1; 318 end = (blkid + nblks - 1) >> shift; 319 if (dbend <= end) 320 end = dbend; 321 322 ASSERT3U(start, <=, end); 323 324 if (db->db_level == 1) { 325 FREE_VERIFY(db, start, end, tx); 326 rw_enter(&db->db_rwlock, RW_WRITER); 327 free_blocks(dn, bp, end - start + 1, tx); 328 rw_exit(&db->db_rwlock); 329 } else { 330 for (uint64_t id = start; id <= end; id++, bp++) { 331 if (BP_IS_HOLE(bp)) 332 continue; 333 rw_enter(&dn->dn_struct_rwlock, RW_READER); 334 VERIFY0(dbuf_hold_impl(dn, db->db_level - 1, 335 id, TRUE, FALSE, FTAG, &subdb)); 336 rw_exit(&dn->dn_struct_rwlock); 337 ASSERT3P(bp, ==, subdb->db_blkptr); 338 339 free_children(subdb, blkid, nblks, free_indirects, tx); 340 dbuf_rele(subdb, FTAG); 341 } 342 } 343 344 if (free_indirects) { 345 rw_enter(&db->db_rwlock, RW_WRITER); 346 for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) 347 ASSERT(BP_IS_HOLE(bp)); 348 bzero(db->db.db_data, db->db.db_size); 349 free_blocks(dn, db->db_blkptr, 1, tx); 350 rw_exit(&db->db_rwlock); 351 } 352 353 DB_DNODE_EXIT(db); 354 arc_buf_freeze(db->db_buf); 355 } 356 357 /* 358 * Traverse the indicated range of the provided file 359 * and "free" all the blocks contained there. 360 */ 361 static void 362 dnode_sync_free_range_impl(dnode_t *dn, uint64_t blkid, uint64_t nblks, 363 boolean_t free_indirects, dmu_tx_t *tx) 364 { 365 blkptr_t *bp = dn->dn_phys->dn_blkptr; 366 int dnlevel = dn->dn_phys->dn_nlevels; 367 boolean_t trunc = B_FALSE; 368 369 if (blkid > dn->dn_phys->dn_maxblkid) 370 return; 371 372 ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX); 373 if (blkid + nblks > dn->dn_phys->dn_maxblkid) { 374 nblks = dn->dn_phys->dn_maxblkid - blkid + 1; 375 trunc = B_TRUE; 376 } 377 378 /* There are no indirect blocks in the object */ 379 if (dnlevel == 1) { 380 if (blkid >= dn->dn_phys->dn_nblkptr) { 381 /* this range was never made persistent */ 382 return; 383 } 384 ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr); 385 free_blocks(dn, bp + blkid, nblks, tx); 386 } else { 387 int shift = (dnlevel - 1) * 388 (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT); 389 int start = blkid >> shift; 390 int end = (blkid + nblks - 1) >> shift; 391 dmu_buf_impl_t *db; 392 393 ASSERT(start < dn->dn_phys->dn_nblkptr); 394 bp += start; 395 for (int i = start; i <= end; i++, bp++) { 396 if (BP_IS_HOLE(bp)) 397 continue; 398 rw_enter(&dn->dn_struct_rwlock, RW_READER); 399 VERIFY0(dbuf_hold_impl(dn, dnlevel - 1, i, 400 TRUE, FALSE, FTAG, &db)); 401 rw_exit(&dn->dn_struct_rwlock); 402 free_children(db, blkid, nblks, free_indirects, tx); 403 dbuf_rele(db, FTAG); 404 } 405 } 406 407 /* 408 * Do not truncate the maxblkid if we are performing a raw 409 * receive. The raw receive sets the maxblkid manually and 410 * must not be overridden. Usually, the last DRR_FREE record 411 * will be at the maxblkid, because the source system sets 412 * the maxblkid when truncating. However, if the last block 413 * was freed by overwriting with zeros and being compressed 414 * away to a hole, the source system will generate a DRR_FREE 415 * record while leaving the maxblkid after the end of that 416 * record. In this case we need to leave the maxblkid as 417 * indicated in the DRR_OBJECT record, so that it matches the 418 * source system, ensuring that the cryptographic hashes will 419 * match. 420 */ 421 if (trunc && !dn->dn_objset->os_raw_receive) { 422 uint64_t off __maybe_unused; 423 dn->dn_phys->dn_maxblkid = blkid == 0 ? 0 : blkid - 1; 424 425 off = (dn->dn_phys->dn_maxblkid + 1) * 426 (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT); 427 ASSERT(off < dn->dn_phys->dn_maxblkid || 428 dn->dn_phys->dn_maxblkid == 0 || 429 dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0); 430 } 431 } 432 433 typedef struct dnode_sync_free_range_arg { 434 dnode_t *dsfra_dnode; 435 dmu_tx_t *dsfra_tx; 436 boolean_t dsfra_free_indirects; 437 } dnode_sync_free_range_arg_t; 438 439 static void 440 dnode_sync_free_range(void *arg, uint64_t blkid, uint64_t nblks) 441 { 442 dnode_sync_free_range_arg_t *dsfra = arg; 443 dnode_t *dn = dsfra->dsfra_dnode; 444 445 mutex_exit(&dn->dn_mtx); 446 dnode_sync_free_range_impl(dn, blkid, nblks, 447 dsfra->dsfra_free_indirects, dsfra->dsfra_tx); 448 mutex_enter(&dn->dn_mtx); 449 } 450 451 /* 452 * Try to kick all the dnode's dbufs out of the cache... 453 */ 454 void 455 dnode_evict_dbufs(dnode_t *dn) 456 { 457 dmu_buf_impl_t *db_marker; 458 dmu_buf_impl_t *db, *db_next; 459 460 db_marker = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP); 461 462 mutex_enter(&dn->dn_dbufs_mtx); 463 for (db = avl_first(&dn->dn_dbufs); db != NULL; db = db_next) { 464 465 #ifdef ZFS_DEBUG 466 DB_DNODE_ENTER(db); 467 ASSERT3P(DB_DNODE(db), ==, dn); 468 DB_DNODE_EXIT(db); 469 #endif /* DEBUG */ 470 471 mutex_enter(&db->db_mtx); 472 if (db->db_state != DB_EVICTING && 473 zfs_refcount_is_zero(&db->db_holds)) { 474 db_marker->db_level = db->db_level; 475 db_marker->db_blkid = db->db_blkid; 476 db_marker->db_state = DB_SEARCH; 477 avl_insert_here(&dn->dn_dbufs, db_marker, db, 478 AVL_BEFORE); 479 480 /* 481 * We need to use the "marker" dbuf rather than 482 * simply getting the next dbuf, because 483 * dbuf_destroy() may actually remove multiple dbufs. 484 * It can call itself recursively on the parent dbuf, 485 * which may also be removed from dn_dbufs. The code 486 * flow would look like: 487 * 488 * dbuf_destroy(): 489 * dnode_rele_and_unlock(parent_dbuf, evicting=TRUE): 490 * if (!cacheable || pending_evict) 491 * dbuf_destroy() 492 */ 493 dbuf_destroy(db); 494 495 db_next = AVL_NEXT(&dn->dn_dbufs, db_marker); 496 avl_remove(&dn->dn_dbufs, db_marker); 497 } else { 498 db->db_pending_evict = TRUE; 499 mutex_exit(&db->db_mtx); 500 db_next = AVL_NEXT(&dn->dn_dbufs, db); 501 } 502 } 503 mutex_exit(&dn->dn_dbufs_mtx); 504 505 kmem_free(db_marker, sizeof (dmu_buf_impl_t)); 506 507 dnode_evict_bonus(dn); 508 } 509 510 void 511 dnode_evict_bonus(dnode_t *dn) 512 { 513 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 514 if (dn->dn_bonus != NULL) { 515 if (zfs_refcount_is_zero(&dn->dn_bonus->db_holds)) { 516 mutex_enter(&dn->dn_bonus->db_mtx); 517 dbuf_destroy(dn->dn_bonus); 518 dn->dn_bonus = NULL; 519 } else { 520 dn->dn_bonus->db_pending_evict = TRUE; 521 } 522 } 523 rw_exit(&dn->dn_struct_rwlock); 524 } 525 526 static void 527 dnode_undirty_dbufs(list_t *list) 528 { 529 dbuf_dirty_record_t *dr; 530 531 while ((dr = list_head(list))) { 532 dmu_buf_impl_t *db = dr->dr_dbuf; 533 uint64_t txg = dr->dr_txg; 534 535 if (db->db_level != 0) 536 dnode_undirty_dbufs(&dr->dt.di.dr_children); 537 538 mutex_enter(&db->db_mtx); 539 /* XXX - use dbuf_undirty()? */ 540 list_remove(list, dr); 541 ASSERT(list_head(&db->db_dirty_records) == dr); 542 list_remove_head(&db->db_dirty_records); 543 ASSERT(list_is_empty(&db->db_dirty_records)); 544 db->db_dirtycnt -= 1; 545 if (db->db_level == 0) { 546 ASSERT(db->db_blkid == DMU_BONUS_BLKID || 547 dr->dt.dl.dr_data == db->db_buf); 548 dbuf_unoverride(dr); 549 } else { 550 mutex_destroy(&dr->dt.di.dr_mtx); 551 list_destroy(&dr->dt.di.dr_children); 552 } 553 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 554 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg, B_FALSE); 555 } 556 } 557 558 static void 559 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx) 560 { 561 int txgoff = tx->tx_txg & TXG_MASK; 562 563 ASSERT(dmu_tx_is_syncing(tx)); 564 565 /* 566 * Our contents should have been freed in dnode_sync() by the 567 * free range record inserted by the caller of dnode_free(). 568 */ 569 ASSERT0(DN_USED_BYTES(dn->dn_phys)); 570 ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr)); 571 572 dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]); 573 dnode_evict_dbufs(dn); 574 575 /* 576 * XXX - It would be nice to assert this, but we may still 577 * have residual holds from async evictions from the arc... 578 * 579 * zfs_obj_to_path() also depends on this being 580 * commented out. 581 * 582 * ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 1); 583 */ 584 585 /* Undirty next bits */ 586 dn->dn_next_nlevels[txgoff] = 0; 587 dn->dn_next_indblkshift[txgoff] = 0; 588 dn->dn_next_blksz[txgoff] = 0; 589 dn->dn_next_maxblkid[txgoff] = 0; 590 591 /* ASSERT(blkptrs are zero); */ 592 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE); 593 ASSERT(dn->dn_type != DMU_OT_NONE); 594 595 ASSERT(dn->dn_free_txg > 0); 596 if (dn->dn_allocated_txg != dn->dn_free_txg) 597 dmu_buf_will_dirty(&dn->dn_dbuf->db, tx); 598 bzero(dn->dn_phys, sizeof (dnode_phys_t) * dn->dn_num_slots); 599 dnode_free_interior_slots(dn); 600 601 mutex_enter(&dn->dn_mtx); 602 dn->dn_type = DMU_OT_NONE; 603 dn->dn_maxblkid = 0; 604 dn->dn_allocated_txg = 0; 605 dn->dn_free_txg = 0; 606 dn->dn_have_spill = B_FALSE; 607 dn->dn_num_slots = 1; 608 mutex_exit(&dn->dn_mtx); 609 610 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 611 612 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg); 613 /* 614 * Now that we've released our hold, the dnode may 615 * be evicted, so we mustn't access it. 616 */ 617 } 618 619 /* 620 * Write out the dnode's dirty buffers. 621 */ 622 void 623 dnode_sync(dnode_t *dn, dmu_tx_t *tx) 624 { 625 objset_t *os = dn->dn_objset; 626 dnode_phys_t *dnp = dn->dn_phys; 627 int txgoff = tx->tx_txg & TXG_MASK; 628 list_t *list = &dn->dn_dirty_records[txgoff]; 629 static const dnode_phys_t zerodn __maybe_unused = { 0 }; 630 boolean_t kill_spill = B_FALSE; 631 632 ASSERT(dmu_tx_is_syncing(tx)); 633 ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg); 634 ASSERT(dnp->dn_type != DMU_OT_NONE || 635 bcmp(dnp, &zerodn, DNODE_MIN_SIZE) == 0); 636 DNODE_VERIFY(dn); 637 638 ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf)); 639 640 /* 641 * Do user accounting if it is enabled and this is not 642 * an encrypted receive. 643 */ 644 if (dmu_objset_userused_enabled(os) && 645 !DMU_OBJECT_IS_SPECIAL(dn->dn_object) && 646 (!os->os_encrypted || !dmu_objset_is_receiving(os))) { 647 mutex_enter(&dn->dn_mtx); 648 dn->dn_oldused = DN_USED_BYTES(dn->dn_phys); 649 dn->dn_oldflags = dn->dn_phys->dn_flags; 650 dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED; 651 if (dmu_objset_userobjused_enabled(dn->dn_objset)) 652 dn->dn_phys->dn_flags |= 653 DNODE_FLAG_USEROBJUSED_ACCOUNTED; 654 mutex_exit(&dn->dn_mtx); 655 dmu_objset_userquota_get_ids(dn, B_FALSE, tx); 656 } else { 657 /* Once we account for it, we should always account for it */ 658 ASSERT(!(dn->dn_phys->dn_flags & 659 DNODE_FLAG_USERUSED_ACCOUNTED)); 660 ASSERT(!(dn->dn_phys->dn_flags & 661 DNODE_FLAG_USEROBJUSED_ACCOUNTED)); 662 } 663 664 mutex_enter(&dn->dn_mtx); 665 if (dn->dn_allocated_txg == tx->tx_txg) { 666 /* The dnode is newly allocated or reallocated */ 667 if (dnp->dn_type == DMU_OT_NONE) { 668 /* this is a first alloc, not a realloc */ 669 dnp->dn_nlevels = 1; 670 dnp->dn_nblkptr = dn->dn_nblkptr; 671 } 672 673 dnp->dn_type = dn->dn_type; 674 dnp->dn_bonustype = dn->dn_bonustype; 675 dnp->dn_bonuslen = dn->dn_bonuslen; 676 } 677 678 dnp->dn_extra_slots = dn->dn_num_slots - 1; 679 680 ASSERT(dnp->dn_nlevels > 1 || 681 BP_IS_HOLE(&dnp->dn_blkptr[0]) || 682 BP_IS_EMBEDDED(&dnp->dn_blkptr[0]) || 683 BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 684 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); 685 ASSERT(dnp->dn_nlevels < 2 || 686 BP_IS_HOLE(&dnp->dn_blkptr[0]) || 687 BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 1 << dnp->dn_indblkshift); 688 689 if (dn->dn_next_type[txgoff] != 0) { 690 dnp->dn_type = dn->dn_type; 691 dn->dn_next_type[txgoff] = 0; 692 } 693 694 if (dn->dn_next_blksz[txgoff] != 0) { 695 ASSERT(P2PHASE(dn->dn_next_blksz[txgoff], 696 SPA_MINBLOCKSIZE) == 0); 697 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) || 698 dn->dn_maxblkid == 0 || list_head(list) != NULL || 699 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT == 700 dnp->dn_datablkszsec || 701 !range_tree_is_empty(dn->dn_free_ranges[txgoff])); 702 dnp->dn_datablkszsec = 703 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT; 704 dn->dn_next_blksz[txgoff] = 0; 705 } 706 707 if (dn->dn_next_bonuslen[txgoff] != 0) { 708 if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN) 709 dnp->dn_bonuslen = 0; 710 else 711 dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff]; 712 ASSERT(dnp->dn_bonuslen <= 713 DN_SLOTS_TO_BONUSLEN(dnp->dn_extra_slots + 1)); 714 dn->dn_next_bonuslen[txgoff] = 0; 715 } 716 717 if (dn->dn_next_bonustype[txgoff] != 0) { 718 ASSERT(DMU_OT_IS_VALID(dn->dn_next_bonustype[txgoff])); 719 dnp->dn_bonustype = dn->dn_next_bonustype[txgoff]; 720 dn->dn_next_bonustype[txgoff] = 0; 721 } 722 723 boolean_t freeing_dnode = dn->dn_free_txg > 0 && 724 dn->dn_free_txg <= tx->tx_txg; 725 726 /* 727 * Remove the spill block if we have been explicitly asked to 728 * remove it, or if the object is being removed. 729 */ 730 if (dn->dn_rm_spillblk[txgoff] || freeing_dnode) { 731 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) 732 kill_spill = B_TRUE; 733 dn->dn_rm_spillblk[txgoff] = 0; 734 } 735 736 if (dn->dn_next_indblkshift[txgoff] != 0) { 737 ASSERT(dnp->dn_nlevels == 1); 738 dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff]; 739 dn->dn_next_indblkshift[txgoff] = 0; 740 } 741 742 /* 743 * Just take the live (open-context) values for checksum and compress. 744 * Strictly speaking it's a future leak, but nothing bad happens if we 745 * start using the new checksum or compress algorithm a little early. 746 */ 747 dnp->dn_checksum = dn->dn_checksum; 748 dnp->dn_compress = dn->dn_compress; 749 750 mutex_exit(&dn->dn_mtx); 751 752 if (kill_spill) { 753 free_blocks(dn, DN_SPILL_BLKPTR(dn->dn_phys), 1, tx); 754 mutex_enter(&dn->dn_mtx); 755 dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR; 756 mutex_exit(&dn->dn_mtx); 757 } 758 759 /* process all the "freed" ranges in the file */ 760 if (dn->dn_free_ranges[txgoff] != NULL) { 761 dnode_sync_free_range_arg_t dsfra; 762 dsfra.dsfra_dnode = dn; 763 dsfra.dsfra_tx = tx; 764 dsfra.dsfra_free_indirects = freeing_dnode; 765 if (freeing_dnode) { 766 ASSERT(range_tree_contains(dn->dn_free_ranges[txgoff], 767 0, dn->dn_maxblkid + 1)); 768 } 769 mutex_enter(&dn->dn_mtx); 770 range_tree_vacate(dn->dn_free_ranges[txgoff], 771 dnode_sync_free_range, &dsfra); 772 range_tree_destroy(dn->dn_free_ranges[txgoff]); 773 dn->dn_free_ranges[txgoff] = NULL; 774 mutex_exit(&dn->dn_mtx); 775 } 776 777 if (freeing_dnode) { 778 dn->dn_objset->os_freed_dnodes++; 779 dnode_sync_free(dn, tx); 780 return; 781 } 782 783 if (dn->dn_num_slots > DNODE_MIN_SLOTS) { 784 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 785 mutex_enter(&ds->ds_lock); 786 ds->ds_feature_activation[SPA_FEATURE_LARGE_DNODE] = 787 (void *)B_TRUE; 788 mutex_exit(&ds->ds_lock); 789 } 790 791 if (dn->dn_next_nlevels[txgoff]) { 792 dnode_increase_indirection(dn, tx); 793 dn->dn_next_nlevels[txgoff] = 0; 794 } 795 796 /* 797 * This must be done after dnode_sync_free_range() 798 * and dnode_increase_indirection(). See dnode_new_blkid() 799 * for an explanation of the high bit being set. 800 */ 801 if (dn->dn_next_maxblkid[txgoff]) { 802 mutex_enter(&dn->dn_mtx); 803 dnp->dn_maxblkid = 804 dn->dn_next_maxblkid[txgoff] & ~DMU_NEXT_MAXBLKID_SET; 805 dn->dn_next_maxblkid[txgoff] = 0; 806 mutex_exit(&dn->dn_mtx); 807 } 808 809 if (dn->dn_next_nblkptr[txgoff]) { 810 /* this should only happen on a realloc */ 811 ASSERT(dn->dn_allocated_txg == tx->tx_txg); 812 if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) { 813 /* zero the new blkptrs we are gaining */ 814 bzero(dnp->dn_blkptr + dnp->dn_nblkptr, 815 sizeof (blkptr_t) * 816 (dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr)); 817 #ifdef ZFS_DEBUG 818 } else { 819 int i; 820 ASSERT(dn->dn_next_nblkptr[txgoff] < dnp->dn_nblkptr); 821 /* the blkptrs we are losing better be unallocated */ 822 for (i = 0; i < dnp->dn_nblkptr; i++) { 823 if (i >= dn->dn_next_nblkptr[txgoff]) 824 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[i])); 825 } 826 #endif 827 } 828 mutex_enter(&dn->dn_mtx); 829 dnp->dn_nblkptr = dn->dn_next_nblkptr[txgoff]; 830 dn->dn_next_nblkptr[txgoff] = 0; 831 mutex_exit(&dn->dn_mtx); 832 } 833 834 dbuf_sync_list(list, dn->dn_phys->dn_nlevels - 1, tx); 835 836 if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) { 837 ASSERT3P(list_head(list), ==, NULL); 838 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg); 839 } 840 841 /* 842 * Although we have dropped our reference to the dnode, it 843 * can't be evicted until its written, and we haven't yet 844 * initiated the IO for the dnode's dbuf. 845 */ 846 } 847