1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012, 2018 by Delphix. All rights reserved. 24 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 25 * Copyright 2023 Alexander Stetsenko <alex.stetsenko@gmail.com> 26 * Copyright (c) 2023, Klara Inc. 27 */ 28 29 /* 30 * This file contains the top half of the zfs directory structure 31 * implementation. The bottom half is in zap_leaf.c. 32 * 33 * The zdir is an extendable hash data structure. There is a table of 34 * pointers to buckets (zap_t->zd_data->zd_leafs). The buckets are 35 * each a constant size and hold a variable number of directory entries. 36 * The buckets (aka "leaf nodes") are implemented in zap_leaf.c. 37 * 38 * The pointer table holds a power of 2 number of pointers. 39 * (1<<zap_t->zd_data->zd_phys->zd_prefix_len). The bucket pointed to 40 * by the pointer at index i in the table holds entries whose hash value 41 * has a zd_prefix_len - bit prefix 42 */ 43 44 #include <sys/spa.h> 45 #include <sys/dmu.h> 46 #include <sys/dnode.h> 47 #include <sys/zfs_context.h> 48 #include <sys/zfs_znode.h> 49 #include <sys/fs/zfs.h> 50 #include <sys/zap.h> 51 #include <sys/zap_impl.h> 52 #include <sys/zap_leaf.h> 53 54 /* 55 * If zap_iterate_prefetch is set, we will prefetch the entire ZAP object 56 * (all leaf blocks) when we start iterating over it. 57 * 58 * For zap_cursor_init(), the callers all intend to iterate through all the 59 * entries. There are a few cases where an error (typically i/o error) could 60 * cause it to bail out early. 61 * 62 * For zap_cursor_init_serialized(), there are callers that do the iteration 63 * outside of ZFS. Typically they would iterate over everything, but we 64 * don't have control of that. E.g. zfs_ioc_snapshot_list_next(), 65 * zcp_snapshots_iter(), and other iterators over things in the MOS - these 66 * are called by /sbin/zfs and channel programs. The other example is 67 * zfs_readdir() which iterates over directory entries for the getdents() 68 * syscall. /sbin/ls iterates to the end (unless it receives a signal), but 69 * userland doesn't have to. 70 * 71 * Given that the ZAP entries aren't returned in a specific order, the only 72 * legitimate use cases for partial iteration would be: 73 * 74 * 1. Pagination: e.g. you only want to display 100 entries at a time, so you 75 * get the first 100 and then wait for the user to hit "next page", which 76 * they may never do). 77 * 78 * 2. You want to know if there are more than X entries, without relying on 79 * the zfs-specific implementation of the directory's st_size (which is 80 * the number of entries). 81 */ 82 static int zap_iterate_prefetch = B_TRUE; 83 84 /* 85 * Enable ZAP shrinking. When enabled, empty sibling leaf blocks will be 86 * collapsed into a single block. 87 */ 88 int zap_shrink_enabled = B_TRUE; 89 90 int fzap_default_block_shift = 14; /* 16k blocksize */ 91 92 static uint64_t zap_allocate_blocks(zap_t *zap, int nblocks); 93 static int zap_shrink(zap_name_t *zn, zap_leaf_t *l, dmu_tx_t *tx); 94 95 void 96 fzap_byteswap(void *vbuf, size_t size) 97 { 98 uint64_t block_type = *(uint64_t *)vbuf; 99 100 if (block_type == ZBT_LEAF || block_type == BSWAP_64(ZBT_LEAF)) 101 zap_leaf_byteswap(vbuf, size); 102 else { 103 /* it's a ptrtbl block */ 104 byteswap_uint64_array(vbuf, size); 105 } 106 } 107 108 void 109 fzap_upgrade(zap_t *zap, dmu_tx_t *tx, zap_flags_t flags) 110 { 111 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); 112 zap->zap_ismicro = FALSE; 113 114 zap->zap_dbu.dbu_evict_func_sync = zap_evict_sync; 115 zap->zap_dbu.dbu_evict_func_async = NULL; 116 117 mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, MUTEX_DEFAULT, 0); 118 zap->zap_f.zap_block_shift = highbit64(zap->zap_dbuf->db_size) - 1; 119 120 zap_phys_t *zp = zap_f_phys(zap); 121 /* 122 * explicitly zero it since it might be coming from an 123 * initialized microzap 124 */ 125 memset(zap->zap_dbuf->db_data, 0, zap->zap_dbuf->db_size); 126 zp->zap_block_type = ZBT_HEADER; 127 zp->zap_magic = ZAP_MAGIC; 128 129 zp->zap_ptrtbl.zt_shift = ZAP_EMBEDDED_PTRTBL_SHIFT(zap); 130 131 zp->zap_freeblk = 2; /* block 1 will be the first leaf */ 132 zp->zap_num_leafs = 1; 133 zp->zap_num_entries = 0; 134 zp->zap_salt = zap->zap_salt; 135 zp->zap_normflags = zap->zap_normflags; 136 zp->zap_flags = flags; 137 138 /* block 1 will be the first leaf */ 139 for (int i = 0; i < (1<<zp->zap_ptrtbl.zt_shift); i++) 140 ZAP_EMBEDDED_PTRTBL_ENT(zap, i) = 1; 141 142 /* 143 * set up block 1 - the first leaf 144 */ 145 dmu_buf_t *db; 146 VERIFY0(dmu_buf_hold_by_dnode(zap->zap_dnode, 147 1<<FZAP_BLOCK_SHIFT(zap), FTAG, &db, DMU_READ_NO_PREFETCH)); 148 dmu_buf_will_dirty(db, tx); 149 150 zap_leaf_t *l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP); 151 l->l_dbuf = db; 152 153 zap_leaf_init(l, zp->zap_normflags != 0); 154 155 kmem_free(l, sizeof (zap_leaf_t)); 156 dmu_buf_rele(db, FTAG); 157 } 158 159 static int 160 zap_tryupgradedir(zap_t *zap, dmu_tx_t *tx) 161 { 162 if (RW_WRITE_HELD(&zap->zap_rwlock)) 163 return (1); 164 if (rw_tryupgrade(&zap->zap_rwlock)) { 165 dmu_buf_will_dirty(zap->zap_dbuf, tx); 166 return (1); 167 } 168 return (0); 169 } 170 171 /* 172 * Generic routines for dealing with the pointer & cookie tables. 173 */ 174 175 static int 176 zap_table_grow(zap_t *zap, zap_table_phys_t *tbl, 177 void (*transfer_func)(const uint64_t *src, uint64_t *dst, int n), 178 dmu_tx_t *tx) 179 { 180 uint64_t newblk; 181 int bs = FZAP_BLOCK_SHIFT(zap); 182 int hepb = 1<<(bs-4); 183 /* hepb = half the number of entries in a block */ 184 185 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); 186 ASSERT(tbl->zt_blk != 0); 187 ASSERT(tbl->zt_numblks > 0); 188 189 if (tbl->zt_nextblk != 0) { 190 newblk = tbl->zt_nextblk; 191 } else { 192 newblk = zap_allocate_blocks(zap, tbl->zt_numblks * 2); 193 tbl->zt_nextblk = newblk; 194 ASSERT0(tbl->zt_blks_copied); 195 dmu_prefetch_by_dnode(zap->zap_dnode, 0, 196 tbl->zt_blk << bs, tbl->zt_numblks << bs, 197 ZIO_PRIORITY_SYNC_READ); 198 } 199 200 /* 201 * Copy the ptrtbl from the old to new location. 202 */ 203 204 uint64_t b = tbl->zt_blks_copied; 205 dmu_buf_t *db_old; 206 int err = dmu_buf_hold_by_dnode(zap->zap_dnode, 207 (tbl->zt_blk + b) << bs, FTAG, &db_old, DMU_READ_NO_PREFETCH); 208 if (err != 0) 209 return (err); 210 211 /* first half of entries in old[b] go to new[2*b+0] */ 212 dmu_buf_t *db_new; 213 VERIFY0(dmu_buf_hold_by_dnode(zap->zap_dnode, 214 (newblk + 2*b+0) << bs, FTAG, &db_new, DMU_READ_NO_PREFETCH)); 215 dmu_buf_will_dirty(db_new, tx); 216 transfer_func(db_old->db_data, db_new->db_data, hepb); 217 dmu_buf_rele(db_new, FTAG); 218 219 /* second half of entries in old[b] go to new[2*b+1] */ 220 VERIFY0(dmu_buf_hold_by_dnode(zap->zap_dnode, 221 (newblk + 2*b+1) << bs, FTAG, &db_new, DMU_READ_NO_PREFETCH)); 222 dmu_buf_will_dirty(db_new, tx); 223 transfer_func((uint64_t *)db_old->db_data + hepb, 224 db_new->db_data, hepb); 225 dmu_buf_rele(db_new, FTAG); 226 227 dmu_buf_rele(db_old, FTAG); 228 229 tbl->zt_blks_copied++; 230 231 dprintf("copied block %llu of %llu\n", 232 (u_longlong_t)tbl->zt_blks_copied, 233 (u_longlong_t)tbl->zt_numblks); 234 235 if (tbl->zt_blks_copied == tbl->zt_numblks) { 236 (void) dmu_free_range(zap->zap_objset, zap->zap_object, 237 tbl->zt_blk << bs, tbl->zt_numblks << bs, tx); 238 239 tbl->zt_blk = newblk; 240 tbl->zt_numblks *= 2; 241 tbl->zt_shift++; 242 tbl->zt_nextblk = 0; 243 tbl->zt_blks_copied = 0; 244 245 dprintf("finished; numblocks now %llu (%uk entries)\n", 246 (u_longlong_t)tbl->zt_numblks, 1<<(tbl->zt_shift-10)); 247 } 248 249 return (0); 250 } 251 252 static int 253 zap_table_store(zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, uint64_t val, 254 dmu_tx_t *tx) 255 { 256 int bs = FZAP_BLOCK_SHIFT(zap); 257 258 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); 259 ASSERT(tbl->zt_blk != 0); 260 261 dprintf("storing %llx at index %llx\n", (u_longlong_t)val, 262 (u_longlong_t)idx); 263 264 uint64_t blk = idx >> (bs-3); 265 uint64_t off = idx & ((1<<(bs-3))-1); 266 267 dmu_buf_t *db; 268 int err = dmu_buf_hold_by_dnode(zap->zap_dnode, 269 (tbl->zt_blk + blk) << bs, FTAG, &db, DMU_READ_NO_PREFETCH); 270 if (err != 0) 271 return (err); 272 dmu_buf_will_dirty(db, tx); 273 274 if (tbl->zt_nextblk != 0) { 275 uint64_t idx2 = idx * 2; 276 uint64_t blk2 = idx2 >> (bs-3); 277 uint64_t off2 = idx2 & ((1<<(bs-3))-1); 278 dmu_buf_t *db2; 279 280 err = dmu_buf_hold_by_dnode(zap->zap_dnode, 281 (tbl->zt_nextblk + blk2) << bs, FTAG, &db2, 282 DMU_READ_NO_PREFETCH); 283 if (err != 0) { 284 dmu_buf_rele(db, FTAG); 285 return (err); 286 } 287 dmu_buf_will_dirty(db2, tx); 288 ((uint64_t *)db2->db_data)[off2] = val; 289 ((uint64_t *)db2->db_data)[off2+1] = val; 290 dmu_buf_rele(db2, FTAG); 291 } 292 293 ((uint64_t *)db->db_data)[off] = val; 294 dmu_buf_rele(db, FTAG); 295 296 return (0); 297 } 298 299 static int 300 zap_table_load(zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, uint64_t *valp) 301 { 302 int bs = FZAP_BLOCK_SHIFT(zap); 303 304 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); 305 306 uint64_t blk = idx >> (bs-3); 307 uint64_t off = idx & ((1<<(bs-3))-1); 308 309 dmu_buf_t *db; 310 int err = dmu_buf_hold_by_dnode(zap->zap_dnode, 311 (tbl->zt_blk + blk) << bs, FTAG, &db, DMU_READ_NO_PREFETCH); 312 if (err != 0) 313 return (err); 314 *valp = ((uint64_t *)db->db_data)[off]; 315 dmu_buf_rele(db, FTAG); 316 317 if (tbl->zt_nextblk != 0) { 318 /* 319 * read the nextblk for the sake of i/o error checking, 320 * so that zap_table_load() will catch errors for 321 * zap_table_store. 322 */ 323 blk = (idx*2) >> (bs-3); 324 325 err = dmu_buf_hold_by_dnode(zap->zap_dnode, 326 (tbl->zt_nextblk + blk) << bs, FTAG, &db, 327 DMU_READ_NO_PREFETCH); 328 if (err == 0) 329 dmu_buf_rele(db, FTAG); 330 } 331 return (err); 332 } 333 334 /* 335 * Routines for growing the ptrtbl. 336 */ 337 338 static void 339 zap_ptrtbl_transfer(const uint64_t *src, uint64_t *dst, int n) 340 { 341 for (int i = 0; i < n; i++) { 342 uint64_t lb = src[i]; 343 dst[2 * i + 0] = lb; 344 dst[2 * i + 1] = lb; 345 } 346 } 347 348 static int 349 zap_grow_ptrtbl(zap_t *zap, dmu_tx_t *tx) 350 { 351 /* 352 * The pointer table should never use more hash bits than we 353 * have (otherwise we'd be using useless zero bits to index it). 354 * If we are within 2 bits of running out, stop growing, since 355 * this is already an aberrant condition. 356 */ 357 if (zap_f_phys(zap)->zap_ptrtbl.zt_shift >= zap_hashbits(zap) - 2) 358 return (SET_ERROR(ENOSPC)); 359 360 if (zap_f_phys(zap)->zap_ptrtbl.zt_numblks == 0) { 361 /* 362 * We are outgrowing the "embedded" ptrtbl (the one 363 * stored in the header block). Give it its own entire 364 * block, which will double the size of the ptrtbl. 365 */ 366 ASSERT3U(zap_f_phys(zap)->zap_ptrtbl.zt_shift, ==, 367 ZAP_EMBEDDED_PTRTBL_SHIFT(zap)); 368 ASSERT0(zap_f_phys(zap)->zap_ptrtbl.zt_blk); 369 370 uint64_t newblk = zap_allocate_blocks(zap, 1); 371 dmu_buf_t *db_new; 372 int err = dmu_buf_hold_by_dnode(zap->zap_dnode, 373 newblk << FZAP_BLOCK_SHIFT(zap), FTAG, &db_new, 374 DMU_READ_NO_PREFETCH); 375 if (err != 0) 376 return (err); 377 dmu_buf_will_dirty(db_new, tx); 378 zap_ptrtbl_transfer(&ZAP_EMBEDDED_PTRTBL_ENT(zap, 0), 379 db_new->db_data, 1 << ZAP_EMBEDDED_PTRTBL_SHIFT(zap)); 380 dmu_buf_rele(db_new, FTAG); 381 382 zap_f_phys(zap)->zap_ptrtbl.zt_blk = newblk; 383 zap_f_phys(zap)->zap_ptrtbl.zt_numblks = 1; 384 zap_f_phys(zap)->zap_ptrtbl.zt_shift++; 385 386 ASSERT3U(1ULL << zap_f_phys(zap)->zap_ptrtbl.zt_shift, ==, 387 zap_f_phys(zap)->zap_ptrtbl.zt_numblks << 388 (FZAP_BLOCK_SHIFT(zap)-3)); 389 390 return (0); 391 } else { 392 return (zap_table_grow(zap, &zap_f_phys(zap)->zap_ptrtbl, 393 zap_ptrtbl_transfer, tx)); 394 } 395 } 396 397 static void 398 zap_increment_num_entries(zap_t *zap, int delta, dmu_tx_t *tx) 399 { 400 dmu_buf_will_dirty(zap->zap_dbuf, tx); 401 mutex_enter(&zap->zap_f.zap_num_entries_mtx); 402 ASSERT(delta > 0 || zap_f_phys(zap)->zap_num_entries >= -delta); 403 zap_f_phys(zap)->zap_num_entries += delta; 404 mutex_exit(&zap->zap_f.zap_num_entries_mtx); 405 } 406 407 static uint64_t 408 zap_allocate_blocks(zap_t *zap, int nblocks) 409 { 410 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); 411 uint64_t newblk = zap_f_phys(zap)->zap_freeblk; 412 zap_f_phys(zap)->zap_freeblk += nblocks; 413 return (newblk); 414 } 415 416 static void 417 zap_leaf_evict_sync(void *dbu) 418 { 419 zap_leaf_t *l = dbu; 420 421 rw_destroy(&l->l_rwlock); 422 kmem_free(l, sizeof (zap_leaf_t)); 423 } 424 425 static zap_leaf_t * 426 zap_create_leaf(zap_t *zap, dmu_tx_t *tx) 427 { 428 zap_leaf_t *l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP); 429 430 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); 431 432 rw_init(&l->l_rwlock, NULL, RW_NOLOCKDEP, NULL); 433 rw_enter(&l->l_rwlock, RW_WRITER); 434 l->l_blkid = zap_allocate_blocks(zap, 1); 435 l->l_dbuf = NULL; 436 437 VERIFY0(dmu_buf_hold_by_dnode(zap->zap_dnode, 438 l->l_blkid << FZAP_BLOCK_SHIFT(zap), NULL, &l->l_dbuf, 439 DMU_READ_NO_PREFETCH)); 440 dmu_buf_init_user(&l->l_dbu, zap_leaf_evict_sync, NULL, &l->l_dbuf); 441 VERIFY3P(NULL, ==, dmu_buf_set_user(l->l_dbuf, &l->l_dbu)); 442 dmu_buf_will_dirty(l->l_dbuf, tx); 443 444 zap_leaf_init(l, zap->zap_normflags != 0); 445 446 zap_f_phys(zap)->zap_num_leafs++; 447 448 return (l); 449 } 450 451 int 452 fzap_count(zap_t *zap, uint64_t *count) 453 { 454 ASSERT(!zap->zap_ismicro); 455 mutex_enter(&zap->zap_f.zap_num_entries_mtx); /* unnecessary */ 456 *count = zap_f_phys(zap)->zap_num_entries; 457 mutex_exit(&zap->zap_f.zap_num_entries_mtx); 458 return (0); 459 } 460 461 /* 462 * Routines for obtaining zap_leaf_t's 463 */ 464 465 void 466 zap_put_leaf(zap_leaf_t *l) 467 { 468 rw_exit(&l->l_rwlock); 469 dmu_buf_rele(l->l_dbuf, NULL); 470 } 471 472 static zap_leaf_t * 473 zap_open_leaf(uint64_t blkid, dmu_buf_t *db) 474 { 475 ASSERT(blkid != 0); 476 477 zap_leaf_t *l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP); 478 rw_init(&l->l_rwlock, NULL, RW_DEFAULT, NULL); 479 rw_enter(&l->l_rwlock, RW_WRITER); 480 l->l_blkid = blkid; 481 l->l_bs = highbit64(db->db_size) - 1; 482 l->l_dbuf = db; 483 484 dmu_buf_init_user(&l->l_dbu, zap_leaf_evict_sync, NULL, &l->l_dbuf); 485 zap_leaf_t *winner = dmu_buf_set_user(db, &l->l_dbu); 486 487 rw_exit(&l->l_rwlock); 488 if (winner != NULL) { 489 /* someone else set it first */ 490 zap_leaf_evict_sync(&l->l_dbu); 491 l = winner; 492 } 493 494 /* 495 * lhr_pad was previously used for the next leaf in the leaf 496 * chain. There should be no chained leafs (as we have removed 497 * support for them). 498 */ 499 ASSERT0(zap_leaf_phys(l)->l_hdr.lh_pad1); 500 501 /* 502 * There should be more hash entries than there can be 503 * chunks to put in the hash table 504 */ 505 ASSERT3U(ZAP_LEAF_HASH_NUMENTRIES(l), >, ZAP_LEAF_NUMCHUNKS(l) / 3); 506 507 /* The chunks should begin at the end of the hash table */ 508 ASSERT3P(&ZAP_LEAF_CHUNK(l, 0), ==, (zap_leaf_chunk_t *) 509 &zap_leaf_phys(l)->l_hash[ZAP_LEAF_HASH_NUMENTRIES(l)]); 510 511 /* The chunks should end at the end of the block */ 512 ASSERT3U((uintptr_t)&ZAP_LEAF_CHUNK(l, ZAP_LEAF_NUMCHUNKS(l)) - 513 (uintptr_t)zap_leaf_phys(l), ==, l->l_dbuf->db_size); 514 515 return (l); 516 } 517 518 static int 519 zap_get_leaf_byblk(zap_t *zap, uint64_t blkid, dmu_tx_t *tx, krw_t lt, 520 zap_leaf_t **lp) 521 { 522 dmu_buf_t *db; 523 524 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); 525 526 /* 527 * If system crashed just after dmu_free_long_range in zfs_rmnode, we 528 * would be left with an empty xattr dir in delete queue. blkid=0 529 * would be passed in when doing zfs_purgedir. If that's the case we 530 * should just return immediately. The underlying objects should 531 * already be freed, so this should be perfectly fine. 532 */ 533 if (blkid == 0) 534 return (SET_ERROR(ENOENT)); 535 536 int bs = FZAP_BLOCK_SHIFT(zap); 537 int err = dmu_buf_hold_by_dnode(zap->zap_dnode, 538 blkid << bs, NULL, &db, DMU_READ_NO_PREFETCH); 539 if (err != 0) 540 return (err); 541 542 ASSERT3U(db->db_object, ==, zap->zap_object); 543 ASSERT3U(db->db_offset, ==, blkid << bs); 544 ASSERT3U(db->db_size, ==, 1 << bs); 545 ASSERT(blkid != 0); 546 547 zap_leaf_t *l = dmu_buf_get_user(db); 548 549 if (l == NULL) 550 l = zap_open_leaf(blkid, db); 551 552 rw_enter(&l->l_rwlock, lt); 553 /* 554 * Must lock before dirtying, otherwise zap_leaf_phys(l) could change, 555 * causing ASSERT below to fail. 556 */ 557 if (lt == RW_WRITER) 558 dmu_buf_will_dirty(db, tx); 559 ASSERT3U(l->l_blkid, ==, blkid); 560 ASSERT3P(l->l_dbuf, ==, db); 561 ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_block_type, ==, ZBT_LEAF); 562 ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_magic, ==, ZAP_LEAF_MAGIC); 563 564 *lp = l; 565 return (0); 566 } 567 568 static int 569 zap_idx_to_blk(zap_t *zap, uint64_t idx, uint64_t *valp) 570 { 571 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); 572 573 if (zap_f_phys(zap)->zap_ptrtbl.zt_numblks == 0) { 574 ASSERT3U(idx, <, 575 (1ULL << zap_f_phys(zap)->zap_ptrtbl.zt_shift)); 576 *valp = ZAP_EMBEDDED_PTRTBL_ENT(zap, idx); 577 return (0); 578 } else { 579 return (zap_table_load(zap, &zap_f_phys(zap)->zap_ptrtbl, 580 idx, valp)); 581 } 582 } 583 584 static int 585 zap_set_idx_to_blk(zap_t *zap, uint64_t idx, uint64_t blk, dmu_tx_t *tx) 586 { 587 ASSERT(tx != NULL); 588 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); 589 590 if (zap_f_phys(zap)->zap_ptrtbl.zt_blk == 0) { 591 ZAP_EMBEDDED_PTRTBL_ENT(zap, idx) = blk; 592 return (0); 593 } else { 594 return (zap_table_store(zap, &zap_f_phys(zap)->zap_ptrtbl, 595 idx, blk, tx)); 596 } 597 } 598 599 static int 600 zap_set_idx_range_to_blk(zap_t *zap, uint64_t idx, uint64_t nptrs, uint64_t blk, 601 dmu_tx_t *tx) 602 { 603 int bs = FZAP_BLOCK_SHIFT(zap); 604 int epb = bs >> 3; /* entries per block */ 605 int err = 0; 606 607 ASSERT(tx != NULL); 608 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); 609 610 /* 611 * Check for i/o errors 612 */ 613 for (int i = 0; i < nptrs; i += epb) { 614 uint64_t blk; 615 err = zap_idx_to_blk(zap, idx + i, &blk); 616 if (err != 0) { 617 return (err); 618 } 619 } 620 621 for (int i = 0; i < nptrs; i++) { 622 err = zap_set_idx_to_blk(zap, idx + i, blk, tx); 623 ASSERT0(err); /* we checked for i/o errors above */ 624 if (err != 0) 625 break; 626 } 627 628 return (err); 629 } 630 631 #define ZAP_PREFIX_HASH(pref, pref_len) ((pref) << (64 - (pref_len))) 632 633 /* 634 * Each leaf has single range of entries (block pointers) in the ZAP ptrtbl. 635 * If two leaves are siblings, their ranges are adjecent and contain the same 636 * number of entries. In order to find out if a leaf has a sibling, we need to 637 * check the range corresponding to the sibling leaf. There is no need to check 638 * all entries in the range, we only need to check the frist and the last one. 639 */ 640 static uint64_t 641 check_sibling_ptrtbl_range(zap_t *zap, uint64_t prefix, uint64_t prefix_len) 642 { 643 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); 644 645 uint64_t h = ZAP_PREFIX_HASH(prefix, prefix_len); 646 uint64_t idx = ZAP_HASH_IDX(h, zap_f_phys(zap)->zap_ptrtbl.zt_shift); 647 uint64_t pref_diff = zap_f_phys(zap)->zap_ptrtbl.zt_shift - prefix_len; 648 uint64_t nptrs = (1 << pref_diff); 649 uint64_t first; 650 uint64_t last; 651 652 ASSERT3U(idx+nptrs, <=, (1UL << zap_f_phys(zap)->zap_ptrtbl.zt_shift)); 653 654 if (zap_idx_to_blk(zap, idx, &first) != 0) 655 return (0); 656 657 if (zap_idx_to_blk(zap, idx + nptrs - 1, &last) != 0) 658 return (0); 659 660 if (first != last) 661 return (0); 662 return (first); 663 } 664 665 static int 666 zap_deref_leaf(zap_t *zap, uint64_t h, dmu_tx_t *tx, krw_t lt, zap_leaf_t **lp) 667 { 668 uint64_t blk; 669 670 ASSERT(zap->zap_dbuf == NULL || 671 zap_f_phys(zap) == zap->zap_dbuf->db_data); 672 673 /* Reality check for corrupt zap objects (leaf or header). */ 674 if ((zap_f_phys(zap)->zap_block_type != ZBT_LEAF && 675 zap_f_phys(zap)->zap_block_type != ZBT_HEADER) || 676 zap_f_phys(zap)->zap_magic != ZAP_MAGIC) { 677 return (SET_ERROR(EIO)); 678 } 679 680 uint64_t idx = ZAP_HASH_IDX(h, zap_f_phys(zap)->zap_ptrtbl.zt_shift); 681 int err = zap_idx_to_blk(zap, idx, &blk); 682 if (err != 0) 683 return (err); 684 err = zap_get_leaf_byblk(zap, blk, tx, lt, lp); 685 686 ASSERT(err || 687 ZAP_HASH_IDX(h, zap_leaf_phys(*lp)->l_hdr.lh_prefix_len) == 688 zap_leaf_phys(*lp)->l_hdr.lh_prefix); 689 return (err); 690 } 691 692 static int 693 zap_expand_leaf(zap_name_t *zn, zap_leaf_t *l, 694 const void *tag, dmu_tx_t *tx, zap_leaf_t **lp) 695 { 696 zap_t *zap = zn->zn_zap; 697 uint64_t hash = zn->zn_hash; 698 int err; 699 int old_prefix_len = zap_leaf_phys(l)->l_hdr.lh_prefix_len; 700 701 ASSERT3U(old_prefix_len, <=, zap_f_phys(zap)->zap_ptrtbl.zt_shift); 702 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); 703 704 ASSERT3U(ZAP_HASH_IDX(hash, old_prefix_len), ==, 705 zap_leaf_phys(l)->l_hdr.lh_prefix); 706 707 if (zap_tryupgradedir(zap, tx) == 0 || 708 old_prefix_len == zap_f_phys(zap)->zap_ptrtbl.zt_shift) { 709 /* We failed to upgrade, or need to grow the pointer table */ 710 objset_t *os = zap->zap_objset; 711 uint64_t object = zap->zap_object; 712 713 zap_put_leaf(l); 714 zap_unlockdir(zap, tag); 715 err = zap_lockdir(os, object, tx, RW_WRITER, 716 FALSE, FALSE, tag, &zn->zn_zap); 717 zap = zn->zn_zap; 718 if (err != 0) 719 return (err); 720 ASSERT(!zap->zap_ismicro); 721 722 while (old_prefix_len == 723 zap_f_phys(zap)->zap_ptrtbl.zt_shift) { 724 err = zap_grow_ptrtbl(zap, tx); 725 if (err != 0) 726 return (err); 727 } 728 729 err = zap_deref_leaf(zap, hash, tx, RW_WRITER, &l); 730 if (err != 0) 731 return (err); 732 733 if (zap_leaf_phys(l)->l_hdr.lh_prefix_len != old_prefix_len) { 734 /* it split while our locks were down */ 735 *lp = l; 736 return (0); 737 } 738 } 739 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); 740 ASSERT3U(old_prefix_len, <, zap_f_phys(zap)->zap_ptrtbl.zt_shift); 741 ASSERT3U(ZAP_HASH_IDX(hash, old_prefix_len), ==, 742 zap_leaf_phys(l)->l_hdr.lh_prefix); 743 744 int prefix_diff = zap_f_phys(zap)->zap_ptrtbl.zt_shift - 745 (old_prefix_len + 1); 746 uint64_t sibling = 747 (ZAP_HASH_IDX(hash, old_prefix_len + 1) | 1) << prefix_diff; 748 749 /* check for i/o errors before doing zap_leaf_split */ 750 for (int i = 0; i < (1ULL << prefix_diff); i++) { 751 uint64_t blk; 752 err = zap_idx_to_blk(zap, sibling + i, &blk); 753 if (err != 0) 754 return (err); 755 ASSERT3U(blk, ==, l->l_blkid); 756 } 757 758 zap_leaf_t *nl = zap_create_leaf(zap, tx); 759 zap_leaf_split(l, nl, zap->zap_normflags != 0); 760 761 /* set sibling pointers */ 762 for (int i = 0; i < (1ULL << prefix_diff); i++) { 763 err = zap_set_idx_to_blk(zap, sibling + i, nl->l_blkid, tx); 764 ASSERT0(err); /* we checked for i/o errors above */ 765 } 766 767 ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_prefix_len, >, 0); 768 769 if (hash & (1ULL << (64 - zap_leaf_phys(l)->l_hdr.lh_prefix_len))) { 770 /* we want the sibling */ 771 zap_put_leaf(l); 772 *lp = nl; 773 } else { 774 zap_put_leaf(nl); 775 *lp = l; 776 } 777 778 return (0); 779 } 780 781 static void 782 zap_put_leaf_maybe_grow_ptrtbl(zap_name_t *zn, zap_leaf_t *l, 783 const void *tag, dmu_tx_t *tx) 784 { 785 zap_t *zap = zn->zn_zap; 786 int shift = zap_f_phys(zap)->zap_ptrtbl.zt_shift; 787 int leaffull = (zap_leaf_phys(l)->l_hdr.lh_prefix_len == shift && 788 zap_leaf_phys(l)->l_hdr.lh_nfree < ZAP_LEAF_LOW_WATER); 789 790 zap_put_leaf(l); 791 792 if (leaffull || zap_f_phys(zap)->zap_ptrtbl.zt_nextblk) { 793 /* 794 * We are in the middle of growing the pointer table, or 795 * this leaf will soon make us grow it. 796 */ 797 if (zap_tryupgradedir(zap, tx) == 0) { 798 objset_t *os = zap->zap_objset; 799 uint64_t zapobj = zap->zap_object; 800 801 zap_unlockdir(zap, tag); 802 int err = zap_lockdir(os, zapobj, tx, 803 RW_WRITER, FALSE, FALSE, tag, &zn->zn_zap); 804 zap = zn->zn_zap; 805 if (err != 0) 806 return; 807 } 808 809 /* could have finished growing while our locks were down */ 810 if (zap_f_phys(zap)->zap_ptrtbl.zt_shift == shift) 811 (void) zap_grow_ptrtbl(zap, tx); 812 } 813 } 814 815 static int 816 fzap_checkname(zap_name_t *zn) 817 { 818 if (zn->zn_key_orig_numints * zn->zn_key_intlen > ZAP_MAXNAMELEN) 819 return (SET_ERROR(ENAMETOOLONG)); 820 return (0); 821 } 822 823 static int 824 fzap_checksize(uint64_t integer_size, uint64_t num_integers) 825 { 826 /* Only integer sizes supported by C */ 827 switch (integer_size) { 828 case 1: 829 case 2: 830 case 4: 831 case 8: 832 break; 833 default: 834 return (SET_ERROR(EINVAL)); 835 } 836 837 if (integer_size * num_integers > ZAP_MAXVALUELEN) 838 return (SET_ERROR(E2BIG)); 839 840 return (0); 841 } 842 843 static int 844 fzap_check(zap_name_t *zn, uint64_t integer_size, uint64_t num_integers) 845 { 846 int err = fzap_checkname(zn); 847 if (err != 0) 848 return (err); 849 return (fzap_checksize(integer_size, num_integers)); 850 } 851 852 /* 853 * Routines for manipulating attributes. 854 */ 855 int 856 fzap_lookup(zap_name_t *zn, 857 uint64_t integer_size, uint64_t num_integers, void *buf, 858 char *realname, int rn_len, boolean_t *ncp) 859 { 860 zap_leaf_t *l; 861 zap_entry_handle_t zeh; 862 863 int err = fzap_checkname(zn); 864 if (err != 0) 865 return (err); 866 867 err = zap_deref_leaf(zn->zn_zap, zn->zn_hash, NULL, RW_READER, &l); 868 if (err != 0) 869 return (err); 870 err = zap_leaf_lookup(l, zn, &zeh); 871 if (err == 0) { 872 if ((err = fzap_checksize(integer_size, num_integers)) != 0) { 873 zap_put_leaf(l); 874 return (err); 875 } 876 877 err = zap_entry_read(&zeh, integer_size, num_integers, buf); 878 (void) zap_entry_read_name(zn->zn_zap, &zeh, rn_len, realname); 879 if (ncp) { 880 *ncp = zap_entry_normalization_conflict(&zeh, 881 zn, NULL, zn->zn_zap); 882 } 883 } 884 885 zap_put_leaf(l); 886 return (err); 887 } 888 889 int 890 fzap_add_cd(zap_name_t *zn, 891 uint64_t integer_size, uint64_t num_integers, 892 const void *val, uint32_t cd, const void *tag, dmu_tx_t *tx) 893 { 894 zap_leaf_t *l; 895 int err; 896 zap_entry_handle_t zeh; 897 zap_t *zap = zn->zn_zap; 898 899 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); 900 ASSERT(!zap->zap_ismicro); 901 ASSERT(fzap_check(zn, integer_size, num_integers) == 0); 902 903 err = zap_deref_leaf(zap, zn->zn_hash, tx, RW_WRITER, &l); 904 if (err != 0) 905 return (err); 906 retry: 907 err = zap_leaf_lookup(l, zn, &zeh); 908 if (err == 0) { 909 err = SET_ERROR(EEXIST); 910 goto out; 911 } 912 if (err != ENOENT) 913 goto out; 914 915 err = zap_entry_create(l, zn, cd, 916 integer_size, num_integers, val, &zeh); 917 918 if (err == 0) { 919 zap_increment_num_entries(zap, 1, tx); 920 } else if (err == EAGAIN) { 921 err = zap_expand_leaf(zn, l, tag, tx, &l); 922 zap = zn->zn_zap; /* zap_expand_leaf() may change zap */ 923 if (err == 0) { 924 goto retry; 925 } else if (err == ENOSPC) { 926 /* 927 * If we failed to expand the leaf, then bailout 928 * as there is no point trying 929 * zap_put_leaf_maybe_grow_ptrtbl(). 930 */ 931 return (err); 932 } 933 } 934 935 out: 936 if (zap != NULL) 937 zap_put_leaf_maybe_grow_ptrtbl(zn, l, tag, tx); 938 return (err); 939 } 940 941 int 942 fzap_add(zap_name_t *zn, 943 uint64_t integer_size, uint64_t num_integers, 944 const void *val, const void *tag, dmu_tx_t *tx) 945 { 946 int err = fzap_check(zn, integer_size, num_integers); 947 if (err != 0) 948 return (err); 949 950 return (fzap_add_cd(zn, integer_size, num_integers, 951 val, ZAP_NEED_CD, tag, tx)); 952 } 953 954 int 955 fzap_update(zap_name_t *zn, 956 int integer_size, uint64_t num_integers, const void *val, 957 const void *tag, dmu_tx_t *tx) 958 { 959 zap_leaf_t *l; 960 int err; 961 boolean_t create; 962 zap_entry_handle_t zeh; 963 zap_t *zap = zn->zn_zap; 964 965 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); 966 err = fzap_check(zn, integer_size, num_integers); 967 if (err != 0) 968 return (err); 969 970 err = zap_deref_leaf(zap, zn->zn_hash, tx, RW_WRITER, &l); 971 if (err != 0) 972 return (err); 973 retry: 974 err = zap_leaf_lookup(l, zn, &zeh); 975 create = (err == ENOENT); 976 ASSERT(err == 0 || err == ENOENT); 977 978 if (create) { 979 err = zap_entry_create(l, zn, ZAP_NEED_CD, 980 integer_size, num_integers, val, &zeh); 981 if (err == 0) 982 zap_increment_num_entries(zap, 1, tx); 983 } else { 984 err = zap_entry_update(&zeh, integer_size, num_integers, val); 985 } 986 987 if (err == EAGAIN) { 988 err = zap_expand_leaf(zn, l, tag, tx, &l); 989 zap = zn->zn_zap; /* zap_expand_leaf() may change zap */ 990 if (err == 0) 991 goto retry; 992 } 993 994 if (zap != NULL) 995 zap_put_leaf_maybe_grow_ptrtbl(zn, l, tag, tx); 996 return (err); 997 } 998 999 int 1000 fzap_length(zap_name_t *zn, 1001 uint64_t *integer_size, uint64_t *num_integers) 1002 { 1003 zap_leaf_t *l; 1004 int err; 1005 zap_entry_handle_t zeh; 1006 1007 err = zap_deref_leaf(zn->zn_zap, zn->zn_hash, NULL, RW_READER, &l); 1008 if (err != 0) 1009 return (err); 1010 err = zap_leaf_lookup(l, zn, &zeh); 1011 if (err != 0) 1012 goto out; 1013 1014 if (integer_size != NULL) 1015 *integer_size = zeh.zeh_integer_size; 1016 if (num_integers != NULL) 1017 *num_integers = zeh.zeh_num_integers; 1018 out: 1019 zap_put_leaf(l); 1020 return (err); 1021 } 1022 1023 int 1024 fzap_remove(zap_name_t *zn, dmu_tx_t *tx) 1025 { 1026 zap_leaf_t *l; 1027 int err; 1028 zap_entry_handle_t zeh; 1029 1030 err = zap_deref_leaf(zn->zn_zap, zn->zn_hash, tx, RW_WRITER, &l); 1031 if (err != 0) 1032 return (err); 1033 err = zap_leaf_lookup(l, zn, &zeh); 1034 if (err == 0) { 1035 zap_entry_remove(&zeh); 1036 zap_increment_num_entries(zn->zn_zap, -1, tx); 1037 1038 if (zap_leaf_phys(l)->l_hdr.lh_nentries == 0 && 1039 zap_shrink_enabled) 1040 return (zap_shrink(zn, l, tx)); 1041 } 1042 zap_put_leaf(l); 1043 return (err); 1044 } 1045 1046 void 1047 fzap_prefetch(zap_name_t *zn) 1048 { 1049 uint64_t blk; 1050 zap_t *zap = zn->zn_zap; 1051 1052 uint64_t idx = ZAP_HASH_IDX(zn->zn_hash, 1053 zap_f_phys(zap)->zap_ptrtbl.zt_shift); 1054 if (zap_idx_to_blk(zap, idx, &blk) != 0) 1055 return; 1056 int bs = FZAP_BLOCK_SHIFT(zap); 1057 dmu_prefetch_by_dnode(zap->zap_dnode, 0, blk << bs, 1 << bs, 1058 ZIO_PRIORITY_SYNC_READ); 1059 } 1060 1061 /* 1062 * Helper functions for consumers. 1063 */ 1064 1065 uint64_t 1066 zap_create_link(objset_t *os, dmu_object_type_t ot, uint64_t parent_obj, 1067 const char *name, dmu_tx_t *tx) 1068 { 1069 return (zap_create_link_dnsize(os, ot, parent_obj, name, 0, tx)); 1070 } 1071 1072 uint64_t 1073 zap_create_link_dnsize(objset_t *os, dmu_object_type_t ot, uint64_t parent_obj, 1074 const char *name, int dnodesize, dmu_tx_t *tx) 1075 { 1076 uint64_t new_obj; 1077 1078 new_obj = zap_create_dnsize(os, ot, DMU_OT_NONE, 0, dnodesize, tx); 1079 VERIFY(new_obj != 0); 1080 VERIFY0(zap_add(os, parent_obj, name, sizeof (uint64_t), 1, &new_obj, 1081 tx)); 1082 1083 return (new_obj); 1084 } 1085 1086 int 1087 zap_value_search(objset_t *os, uint64_t zapobj, uint64_t value, uint64_t mask, 1088 char *name) 1089 { 1090 zap_cursor_t zc; 1091 int err; 1092 1093 if (mask == 0) 1094 mask = -1ULL; 1095 1096 zap_attribute_t *za = kmem_alloc(sizeof (*za), KM_SLEEP); 1097 for (zap_cursor_init(&zc, os, zapobj); 1098 (err = zap_cursor_retrieve(&zc, za)) == 0; 1099 zap_cursor_advance(&zc)) { 1100 if ((za->za_first_integer & mask) == (value & mask)) { 1101 (void) strlcpy(name, za->za_name, MAXNAMELEN); 1102 break; 1103 } 1104 } 1105 zap_cursor_fini(&zc); 1106 kmem_free(za, sizeof (*za)); 1107 return (err); 1108 } 1109 1110 int 1111 zap_join(objset_t *os, uint64_t fromobj, uint64_t intoobj, dmu_tx_t *tx) 1112 { 1113 zap_cursor_t zc; 1114 int err = 0; 1115 1116 zap_attribute_t *za = kmem_alloc(sizeof (*za), KM_SLEEP); 1117 for (zap_cursor_init(&zc, os, fromobj); 1118 zap_cursor_retrieve(&zc, za) == 0; 1119 (void) zap_cursor_advance(&zc)) { 1120 if (za->za_integer_length != 8 || za->za_num_integers != 1) { 1121 err = SET_ERROR(EINVAL); 1122 break; 1123 } 1124 err = zap_add(os, intoobj, za->za_name, 1125 8, 1, &za->za_first_integer, tx); 1126 if (err != 0) 1127 break; 1128 } 1129 zap_cursor_fini(&zc); 1130 kmem_free(za, sizeof (*za)); 1131 return (err); 1132 } 1133 1134 int 1135 zap_join_key(objset_t *os, uint64_t fromobj, uint64_t intoobj, 1136 uint64_t value, dmu_tx_t *tx) 1137 { 1138 zap_cursor_t zc; 1139 int err = 0; 1140 1141 zap_attribute_t *za = kmem_alloc(sizeof (*za), KM_SLEEP); 1142 for (zap_cursor_init(&zc, os, fromobj); 1143 zap_cursor_retrieve(&zc, za) == 0; 1144 (void) zap_cursor_advance(&zc)) { 1145 if (za->za_integer_length != 8 || za->za_num_integers != 1) { 1146 err = SET_ERROR(EINVAL); 1147 break; 1148 } 1149 err = zap_add(os, intoobj, za->za_name, 1150 8, 1, &value, tx); 1151 if (err != 0) 1152 break; 1153 } 1154 zap_cursor_fini(&zc); 1155 kmem_free(za, sizeof (*za)); 1156 return (err); 1157 } 1158 1159 int 1160 zap_join_increment(objset_t *os, uint64_t fromobj, uint64_t intoobj, 1161 dmu_tx_t *tx) 1162 { 1163 zap_cursor_t zc; 1164 int err = 0; 1165 1166 zap_attribute_t *za = kmem_alloc(sizeof (*za), KM_SLEEP); 1167 for (zap_cursor_init(&zc, os, fromobj); 1168 zap_cursor_retrieve(&zc, za) == 0; 1169 (void) zap_cursor_advance(&zc)) { 1170 uint64_t delta = 0; 1171 1172 if (za->za_integer_length != 8 || za->za_num_integers != 1) { 1173 err = SET_ERROR(EINVAL); 1174 break; 1175 } 1176 1177 err = zap_lookup(os, intoobj, za->za_name, 8, 1, &delta); 1178 if (err != 0 && err != ENOENT) 1179 break; 1180 delta += za->za_first_integer; 1181 err = zap_update(os, intoobj, za->za_name, 8, 1, &delta, tx); 1182 if (err != 0) 1183 break; 1184 } 1185 zap_cursor_fini(&zc); 1186 kmem_free(za, sizeof (*za)); 1187 return (err); 1188 } 1189 1190 int 1191 zap_add_int(objset_t *os, uint64_t obj, uint64_t value, dmu_tx_t *tx) 1192 { 1193 char name[20]; 1194 1195 (void) snprintf(name, sizeof (name), "%llx", (longlong_t)value); 1196 return (zap_add(os, obj, name, 8, 1, &value, tx)); 1197 } 1198 1199 int 1200 zap_remove_int(objset_t *os, uint64_t obj, uint64_t value, dmu_tx_t *tx) 1201 { 1202 char name[20]; 1203 1204 (void) snprintf(name, sizeof (name), "%llx", (longlong_t)value); 1205 return (zap_remove(os, obj, name, tx)); 1206 } 1207 1208 int 1209 zap_lookup_int(objset_t *os, uint64_t obj, uint64_t value) 1210 { 1211 char name[20]; 1212 1213 (void) snprintf(name, sizeof (name), "%llx", (longlong_t)value); 1214 return (zap_lookup(os, obj, name, 8, 1, &value)); 1215 } 1216 1217 int 1218 zap_add_int_key(objset_t *os, uint64_t obj, 1219 uint64_t key, uint64_t value, dmu_tx_t *tx) 1220 { 1221 char name[20]; 1222 1223 (void) snprintf(name, sizeof (name), "%llx", (longlong_t)key); 1224 return (zap_add(os, obj, name, 8, 1, &value, tx)); 1225 } 1226 1227 int 1228 zap_update_int_key(objset_t *os, uint64_t obj, 1229 uint64_t key, uint64_t value, dmu_tx_t *tx) 1230 { 1231 char name[20]; 1232 1233 (void) snprintf(name, sizeof (name), "%llx", (longlong_t)key); 1234 return (zap_update(os, obj, name, 8, 1, &value, tx)); 1235 } 1236 1237 int 1238 zap_lookup_int_key(objset_t *os, uint64_t obj, uint64_t key, uint64_t *valuep) 1239 { 1240 char name[20]; 1241 1242 (void) snprintf(name, sizeof (name), "%llx", (longlong_t)key); 1243 return (zap_lookup(os, obj, name, 8, 1, valuep)); 1244 } 1245 1246 int 1247 zap_increment(objset_t *os, uint64_t obj, const char *name, int64_t delta, 1248 dmu_tx_t *tx) 1249 { 1250 uint64_t value = 0; 1251 1252 if (delta == 0) 1253 return (0); 1254 1255 int err = zap_lookup(os, obj, name, 8, 1, &value); 1256 if (err != 0 && err != ENOENT) 1257 return (err); 1258 value += delta; 1259 if (value == 0) 1260 err = zap_remove(os, obj, name, tx); 1261 else 1262 err = zap_update(os, obj, name, 8, 1, &value, tx); 1263 return (err); 1264 } 1265 1266 int 1267 zap_increment_int(objset_t *os, uint64_t obj, uint64_t key, int64_t delta, 1268 dmu_tx_t *tx) 1269 { 1270 char name[20]; 1271 1272 (void) snprintf(name, sizeof (name), "%llx", (longlong_t)key); 1273 return (zap_increment(os, obj, name, delta, tx)); 1274 } 1275 1276 /* 1277 * Routines for iterating over the attributes. 1278 */ 1279 1280 int 1281 fzap_cursor_retrieve(zap_t *zap, zap_cursor_t *zc, zap_attribute_t *za) 1282 { 1283 int err = ENOENT; 1284 zap_entry_handle_t zeh; 1285 zap_leaf_t *l; 1286 1287 /* retrieve the next entry at or after zc_hash/zc_cd */ 1288 /* if no entry, return ENOENT */ 1289 1290 /* 1291 * If we are reading from the beginning, we're almost certain to 1292 * iterate over the entire ZAP object. If there are multiple leaf 1293 * blocks (freeblk > 2), prefetch the whole object (up to 1294 * dmu_prefetch_max bytes), so that we read the leaf blocks 1295 * concurrently. (Unless noprefetch was requested via 1296 * zap_cursor_init_noprefetch()). 1297 */ 1298 if (zc->zc_hash == 0 && zap_iterate_prefetch && 1299 zc->zc_prefetch && zap_f_phys(zap)->zap_freeblk > 2) { 1300 dmu_prefetch_by_dnode(zap->zap_dnode, 0, 0, 1301 zap_f_phys(zap)->zap_freeblk << FZAP_BLOCK_SHIFT(zap), 1302 ZIO_PRIORITY_ASYNC_READ); 1303 } 1304 1305 if (zc->zc_leaf) { 1306 rw_enter(&zc->zc_leaf->l_rwlock, RW_READER); 1307 1308 /* 1309 * The leaf was either shrunk or split. 1310 */ 1311 if ((zap_leaf_phys(zc->zc_leaf)->l_hdr.lh_block_type == 0) || 1312 (ZAP_HASH_IDX(zc->zc_hash, 1313 zap_leaf_phys(zc->zc_leaf)->l_hdr.lh_prefix_len) != 1314 zap_leaf_phys(zc->zc_leaf)->l_hdr.lh_prefix)) { 1315 zap_put_leaf(zc->zc_leaf); 1316 zc->zc_leaf = NULL; 1317 } 1318 } 1319 1320 again: 1321 if (zc->zc_leaf == NULL) { 1322 err = zap_deref_leaf(zap, zc->zc_hash, NULL, RW_READER, 1323 &zc->zc_leaf); 1324 if (err != 0) 1325 return (err); 1326 } 1327 l = zc->zc_leaf; 1328 1329 err = zap_leaf_lookup_closest(l, zc->zc_hash, zc->zc_cd, &zeh); 1330 1331 if (err == ENOENT) { 1332 if (zap_leaf_phys(l)->l_hdr.lh_prefix_len == 0) { 1333 zc->zc_hash = -1ULL; 1334 zc->zc_cd = 0; 1335 } else { 1336 uint64_t nocare = (1ULL << 1337 (64 - zap_leaf_phys(l)->l_hdr.lh_prefix_len)) - 1; 1338 1339 zc->zc_hash = (zc->zc_hash & ~nocare) + nocare + 1; 1340 zc->zc_cd = 0; 1341 1342 if (zc->zc_hash == 0) { 1343 zc->zc_hash = -1ULL; 1344 } else { 1345 zap_put_leaf(zc->zc_leaf); 1346 zc->zc_leaf = NULL; 1347 goto again; 1348 } 1349 } 1350 } 1351 1352 if (err == 0) { 1353 zc->zc_hash = zeh.zeh_hash; 1354 zc->zc_cd = zeh.zeh_cd; 1355 za->za_integer_length = zeh.zeh_integer_size; 1356 za->za_num_integers = zeh.zeh_num_integers; 1357 if (zeh.zeh_num_integers == 0) { 1358 za->za_first_integer = 0; 1359 } else { 1360 err = zap_entry_read(&zeh, 8, 1, &za->za_first_integer); 1361 ASSERT(err == 0 || err == EOVERFLOW); 1362 } 1363 err = zap_entry_read_name(zap, &zeh, 1364 sizeof (za->za_name), za->za_name); 1365 ASSERT(err == 0); 1366 1367 za->za_normalization_conflict = 1368 zap_entry_normalization_conflict(&zeh, 1369 NULL, za->za_name, zap); 1370 } 1371 rw_exit(&zc->zc_leaf->l_rwlock); 1372 return (err); 1373 } 1374 1375 static void 1376 zap_stats_ptrtbl(zap_t *zap, uint64_t *tbl, int len, zap_stats_t *zs) 1377 { 1378 uint64_t lastblk = 0; 1379 1380 /* 1381 * NB: if a leaf has more pointers than an entire ptrtbl block 1382 * can hold, then it'll be accounted for more than once, since 1383 * we won't have lastblk. 1384 */ 1385 for (int i = 0; i < len; i++) { 1386 zap_leaf_t *l; 1387 1388 if (tbl[i] == lastblk) 1389 continue; 1390 lastblk = tbl[i]; 1391 1392 int err = zap_get_leaf_byblk(zap, tbl[i], NULL, RW_READER, &l); 1393 if (err == 0) { 1394 zap_leaf_stats(zap, l, zs); 1395 zap_put_leaf(l); 1396 } 1397 } 1398 } 1399 1400 void 1401 fzap_get_stats(zap_t *zap, zap_stats_t *zs) 1402 { 1403 int bs = FZAP_BLOCK_SHIFT(zap); 1404 zs->zs_blocksize = 1ULL << bs; 1405 1406 /* 1407 * Set zap_phys_t fields 1408 */ 1409 zs->zs_num_leafs = zap_f_phys(zap)->zap_num_leafs; 1410 zs->zs_num_entries = zap_f_phys(zap)->zap_num_entries; 1411 zs->zs_num_blocks = zap_f_phys(zap)->zap_freeblk; 1412 zs->zs_block_type = zap_f_phys(zap)->zap_block_type; 1413 zs->zs_magic = zap_f_phys(zap)->zap_magic; 1414 zs->zs_salt = zap_f_phys(zap)->zap_salt; 1415 1416 /* 1417 * Set zap_ptrtbl fields 1418 */ 1419 zs->zs_ptrtbl_len = 1ULL << zap_f_phys(zap)->zap_ptrtbl.zt_shift; 1420 zs->zs_ptrtbl_nextblk = zap_f_phys(zap)->zap_ptrtbl.zt_nextblk; 1421 zs->zs_ptrtbl_blks_copied = 1422 zap_f_phys(zap)->zap_ptrtbl.zt_blks_copied; 1423 zs->zs_ptrtbl_zt_blk = zap_f_phys(zap)->zap_ptrtbl.zt_blk; 1424 zs->zs_ptrtbl_zt_numblks = zap_f_phys(zap)->zap_ptrtbl.zt_numblks; 1425 zs->zs_ptrtbl_zt_shift = zap_f_phys(zap)->zap_ptrtbl.zt_shift; 1426 1427 if (zap_f_phys(zap)->zap_ptrtbl.zt_numblks == 0) { 1428 /* the ptrtbl is entirely in the header block. */ 1429 zap_stats_ptrtbl(zap, &ZAP_EMBEDDED_PTRTBL_ENT(zap, 0), 1430 1 << ZAP_EMBEDDED_PTRTBL_SHIFT(zap), zs); 1431 } else { 1432 dmu_prefetch_by_dnode(zap->zap_dnode, 0, 1433 zap_f_phys(zap)->zap_ptrtbl.zt_blk << bs, 1434 zap_f_phys(zap)->zap_ptrtbl.zt_numblks << bs, 1435 ZIO_PRIORITY_SYNC_READ); 1436 1437 for (int b = 0; b < zap_f_phys(zap)->zap_ptrtbl.zt_numblks; 1438 b++) { 1439 dmu_buf_t *db; 1440 int err; 1441 1442 err = dmu_buf_hold_by_dnode(zap->zap_dnode, 1443 (zap_f_phys(zap)->zap_ptrtbl.zt_blk + b) << bs, 1444 FTAG, &db, DMU_READ_NO_PREFETCH); 1445 if (err == 0) { 1446 zap_stats_ptrtbl(zap, db->db_data, 1447 1<<(bs-3), zs); 1448 dmu_buf_rele(db, FTAG); 1449 } 1450 } 1451 } 1452 } 1453 1454 /* 1455 * Find last allocated block and update freeblk. 1456 */ 1457 static void 1458 zap_trunc(zap_t *zap) 1459 { 1460 uint64_t nentries; 1461 uint64_t lastblk; 1462 1463 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); 1464 1465 if (zap_f_phys(zap)->zap_ptrtbl.zt_blk > 0) { 1466 /* External ptrtbl */ 1467 nentries = (1 << zap_f_phys(zap)->zap_ptrtbl.zt_shift); 1468 lastblk = zap_f_phys(zap)->zap_ptrtbl.zt_blk + 1469 zap_f_phys(zap)->zap_ptrtbl.zt_numblks - 1; 1470 } else { 1471 /* Embedded ptrtbl */ 1472 nentries = (1 << ZAP_EMBEDDED_PTRTBL_SHIFT(zap)); 1473 lastblk = 0; 1474 } 1475 1476 for (uint64_t idx = 0; idx < nentries; idx++) { 1477 uint64_t blk; 1478 if (zap_idx_to_blk(zap, idx, &blk) != 0) 1479 return; 1480 if (blk > lastblk) 1481 lastblk = blk; 1482 } 1483 1484 ASSERT3U(lastblk, <, zap_f_phys(zap)->zap_freeblk); 1485 1486 zap_f_phys(zap)->zap_freeblk = lastblk + 1; 1487 } 1488 1489 /* 1490 * ZAP shrinking algorithm. 1491 * 1492 * We shrink ZAP recuresively removing empty leaves. We can remove an empty leaf 1493 * only if it has a sibling. Sibling leaves have the same prefix length and 1494 * their prefixes differ only by the least significant (sibling) bit. We require 1495 * both siblings to be empty. This eliminates a need to rehash the non-empty 1496 * remaining leaf. When we have removed one of two empty sibling, we set ptrtbl 1497 * entries of the removed leaf to point out to the remaining leaf. Prefix length 1498 * of the remaining leaf is decremented. As a result, it has a new prefix and it 1499 * might have a new sibling. So, we repeat the process. 1500 * 1501 * Steps: 1502 * 1. Check if a sibling leaf (sl) exists and it is empty. 1503 * 2. Release the leaf (l) if it has the sibling bit (slbit) equal to 1. 1504 * 3. Release the sibling (sl) to derefer it again with WRITER lock. 1505 * 4. Upgrade zapdir lock to WRITER (once). 1506 * 5. Derefer released leaves again. 1507 * 6. If it is needed, recheck whether both leaves are still siblings and empty. 1508 * 7. Set ptrtbl pointers of the removed leaf (slbit 1) to point out to blkid of 1509 * the remaining leaf (slbit 0). 1510 * 8. Free disk block of the removed leaf (dmu_free_range). 1511 * 9. Decrement prefix_len of the remaining leaf. 1512 * 10. Repeat the steps. 1513 */ 1514 static int 1515 zap_shrink(zap_name_t *zn, zap_leaf_t *l, dmu_tx_t *tx) 1516 { 1517 zap_t *zap = zn->zn_zap; 1518 int64_t zt_shift = zap_f_phys(zap)->zap_ptrtbl.zt_shift; 1519 uint64_t hash = zn->zn_hash; 1520 uint64_t prefix = zap_leaf_phys(l)->l_hdr.lh_prefix; 1521 uint64_t prefix_len = zap_leaf_phys(l)->l_hdr.lh_prefix_len; 1522 boolean_t trunc = B_FALSE; 1523 int err = 0; 1524 1525 ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_nentries, ==, 0); 1526 ASSERT3U(prefix_len, <=, zap_f_phys(zap)->zap_ptrtbl.zt_shift); 1527 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); 1528 ASSERT3U(ZAP_HASH_IDX(hash, prefix_len), ==, prefix); 1529 1530 boolean_t writer = B_FALSE; 1531 1532 /* 1533 * To avoid deadlock always deref leaves in the same order - 1534 * sibling 0 first, then sibling 1. 1535 */ 1536 while (prefix_len) { 1537 zap_leaf_t *sl; 1538 int64_t prefix_diff = zt_shift - prefix_len; 1539 uint64_t sl_prefix = prefix ^ 1; 1540 uint64_t sl_hash = ZAP_PREFIX_HASH(sl_prefix, prefix_len); 1541 int slbit = prefix & 1; 1542 1543 ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_nentries, ==, 0); 1544 1545 /* 1546 * Check if there is a sibling by reading ptrtbl ptrs. 1547 */ 1548 if (check_sibling_ptrtbl_range(zap, sl_prefix, prefix_len) == 0) 1549 break; 1550 1551 /* 1552 * sibling 1, unlock it - we haven't yet dereferenced sibling 0. 1553 */ 1554 if (slbit == 1) { 1555 zap_put_leaf(l); 1556 l = NULL; 1557 } 1558 1559 /* 1560 * Dereference sibling leaf and check if it is empty. 1561 */ 1562 if ((err = zap_deref_leaf(zap, sl_hash, tx, RW_READER, 1563 &sl)) != 0) 1564 break; 1565 1566 ASSERT3U(ZAP_HASH_IDX(sl_hash, prefix_len), ==, sl_prefix); 1567 1568 /* 1569 * Check if we have a sibling and it is empty. 1570 */ 1571 if (zap_leaf_phys(sl)->l_hdr.lh_prefix_len != prefix_len || 1572 zap_leaf_phys(sl)->l_hdr.lh_nentries != 0) { 1573 zap_put_leaf(sl); 1574 break; 1575 } 1576 1577 zap_put_leaf(sl); 1578 1579 /* 1580 * If there two empty sibling, we have work to do, so 1581 * we need to lock ZAP ptrtbl as WRITER. 1582 */ 1583 if (!writer && (writer = zap_tryupgradedir(zap, tx)) == 0) { 1584 /* We failed to upgrade */ 1585 if (l != NULL) { 1586 zap_put_leaf(l); 1587 l = NULL; 1588 } 1589 1590 /* 1591 * Usually, the right way to upgrade from a READER lock 1592 * to a WRITER lock is to call zap_unlockdir() and 1593 * zap_lockdir(), but we do not have a tag. Instead, 1594 * we do it in more sophisticated way. 1595 */ 1596 rw_exit(&zap->zap_rwlock); 1597 rw_enter(&zap->zap_rwlock, RW_WRITER); 1598 dmu_buf_will_dirty(zap->zap_dbuf, tx); 1599 1600 zt_shift = zap_f_phys(zap)->zap_ptrtbl.zt_shift; 1601 writer = B_TRUE; 1602 } 1603 1604 /* 1605 * Here we have WRITER lock for ptrtbl. 1606 * Now, we need a WRITER lock for both siblings leaves. 1607 * Also, we have to recheck if the leaves are still siblings 1608 * and still empty. 1609 */ 1610 if (l == NULL) { 1611 /* sibling 0 */ 1612 if ((err = zap_deref_leaf(zap, (slbit ? sl_hash : hash), 1613 tx, RW_WRITER, &l)) != 0) 1614 break; 1615 1616 /* 1617 * The leaf isn't empty anymore or 1618 * it was shrunk/split while our locks were down. 1619 */ 1620 if (zap_leaf_phys(l)->l_hdr.lh_nentries != 0 || 1621 zap_leaf_phys(l)->l_hdr.lh_prefix_len != prefix_len) 1622 break; 1623 } 1624 1625 /* sibling 1 */ 1626 if ((err = zap_deref_leaf(zap, (slbit ? hash : sl_hash), tx, 1627 RW_WRITER, &sl)) != 0) 1628 break; 1629 1630 /* 1631 * The leaf isn't empty anymore or 1632 * it was shrunk/split while our locks were down. 1633 */ 1634 if (zap_leaf_phys(sl)->l_hdr.lh_nentries != 0 || 1635 zap_leaf_phys(sl)->l_hdr.lh_prefix_len != prefix_len) { 1636 zap_put_leaf(sl); 1637 break; 1638 } 1639 1640 /* If we have gotten here, we have a leaf to collapse */ 1641 uint64_t idx = (slbit ? prefix : sl_prefix) << prefix_diff; 1642 uint64_t nptrs = (1ULL << prefix_diff); 1643 uint64_t sl_blkid = sl->l_blkid; 1644 1645 /* 1646 * Set ptrtbl entries to point out to the slibling 0 blkid 1647 */ 1648 if ((err = zap_set_idx_range_to_blk(zap, idx, nptrs, l->l_blkid, 1649 tx)) != 0) { 1650 zap_put_leaf(sl); 1651 break; 1652 } 1653 1654 /* 1655 * Free sibling 1 disk block. 1656 */ 1657 int bs = FZAP_BLOCK_SHIFT(zap); 1658 if (sl_blkid == zap_f_phys(zap)->zap_freeblk - 1) 1659 trunc = B_TRUE; 1660 1661 (void) dmu_free_range(zap->zap_objset, zap->zap_object, 1662 sl_blkid << bs, 1 << bs, tx); 1663 zap_put_leaf(sl); 1664 1665 zap_f_phys(zap)->zap_num_leafs--; 1666 1667 /* 1668 * Update prefix and prefix_len. 1669 */ 1670 zap_leaf_phys(l)->l_hdr.lh_prefix >>= 1; 1671 zap_leaf_phys(l)->l_hdr.lh_prefix_len--; 1672 1673 prefix = zap_leaf_phys(l)->l_hdr.lh_prefix; 1674 prefix_len = zap_leaf_phys(l)->l_hdr.lh_prefix_len; 1675 } 1676 1677 if (trunc) 1678 zap_trunc(zap); 1679 1680 if (l != NULL) 1681 zap_put_leaf(l); 1682 1683 return (err); 1684 } 1685 1686 /* CSTYLED */ 1687 ZFS_MODULE_PARAM(zfs, , zap_iterate_prefetch, INT, ZMOD_RW, 1688 "When iterating ZAP object, prefetch it"); 1689 1690 /* CSTYLED */ 1691 ZFS_MODULE_PARAM(zfs, , zap_shrink_enabled, INT, ZMOD_RW, 1692 "Enable ZAP shrinking"); 1693