1 /* 2 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include "hammer.h" 36 37 static int rebalance_node(struct hammer_ioc_rebalance *rebal, 38 hammer_cursor_t cursor); 39 static void rebalance_closeout(hammer_node_lock_t base_item, int base_count, 40 hammer_btree_elm_t elm); 41 static void rebalance_parent_ptrs(hammer_node_lock_t base_item, int index, 42 hammer_node_lock_t item, hammer_node_lock_t chld_item); 43 44 /* 45 * Iterate through the specified range of object ids and rebalance B-Tree 46 * leaf and internal nodes we encounter. A forwards iteration is used. 47 * 48 * All leafs are at the same depth. We use the b-tree scan code loosely 49 * to position ourselves and create degenerate cases to skip indices 50 * that we have rebalanced in bulk. 51 */ 52 53 int 54 hammer_ioc_rebalance(hammer_transaction_t trans, hammer_inode_t ip, 55 struct hammer_ioc_rebalance *rebal) 56 { 57 struct hammer_cursor cursor; 58 hammer_btree_leaf_elm_t elm; 59 int error; 60 int seq; 61 62 if ((rebal->key_beg.localization | rebal->key_end.localization) & 63 HAMMER_LOCALIZE_PSEUDOFS_MASK) { 64 return(EINVAL); 65 } 66 if (rebal->key_beg.localization > rebal->key_end.localization) 67 return(EINVAL); 68 if (rebal->key_beg.localization == rebal->key_end.localization) { 69 if (rebal->key_beg.obj_id > rebal->key_end.obj_id) 70 return(EINVAL); 71 /* key-space limitations - no check needed */ 72 } 73 if (rebal->saturation < HAMMER_BTREE_INT_ELMS / 2) 74 rebal->saturation = HAMMER_BTREE_INT_ELMS / 2; 75 if (rebal->saturation > HAMMER_BTREE_INT_ELMS) 76 rebal->saturation = HAMMER_BTREE_INT_ELMS; 77 78 rebal->key_cur = rebal->key_beg; 79 rebal->key_cur.localization &= HAMMER_LOCALIZE_MASK; 80 rebal->key_cur.localization += ip->obj_localization; 81 82 seq = trans->hmp->flusher.act; 83 84 /* 85 * Scan forwards. Retries typically occur if a deadlock is detected. 86 */ 87 retry: 88 error = hammer_init_cursor(trans, &cursor, NULL, NULL); 89 if (error) { 90 hammer_done_cursor(&cursor); 91 goto failed; 92 } 93 cursor.key_beg = rebal->key_cur; 94 cursor.key_end = rebal->key_end; 95 cursor.key_end.localization &= HAMMER_LOCALIZE_MASK; 96 cursor.key_end.localization += ip->obj_localization; 97 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE; 98 cursor.flags |= HAMMER_CURSOR_BACKEND; 99 100 /* 101 * Cause internal nodes to be returned on the way up. Internal nodes 102 * are not returned on the way down so we can create a degenerate 103 * case to handle internal nodes as a trailing function of their 104 * sub-trees. 105 * 106 * Note that by not setting INSERTING or PRUNING no boundary 107 * corrections will be made and a sync lock is not needed for the 108 * B-Tree scan itself. 109 */ 110 cursor.flags |= HAMMER_CURSOR_REBLOCKING; 111 112 error = hammer_btree_first(&cursor); 113 114 while (error == 0) { 115 /* 116 * We only care about internal nodes visited for the last 117 * time on the way up... that is, a trailing scan of the 118 * internal node after all of its children have been recursed 119 * through. 120 */ 121 if (cursor.node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) { 122 /* 123 * Leave cursor.index alone, we want to recurse 124 * through all children of the internal node before 125 * visiting it. 126 * 127 * Process the internal node on the way up after 128 * the last child's sub-tree has been balanced. 129 */ 130 if (cursor.index == cursor.node->ondisk->count - 1) { 131 hammer_sync_lock_sh(trans); 132 error = rebalance_node(rebal, &cursor); 133 hammer_sync_unlock(trans); 134 } 135 } else { 136 /* 137 * We don't need to iterate through all the leaf 138 * elements, we only care about the parent (internal) 139 * node. 140 */ 141 cursor.index = cursor.node->ondisk->count - 1; 142 } 143 if (error) 144 break; 145 146 /* 147 * Update returned scan position and do a flush if 148 * necessary. 149 */ 150 elm = &cursor.node->ondisk->elms[cursor.index].leaf; 151 rebal->key_cur = elm->base; 152 ++rebal->stat_ncount; 153 154 while (hammer_flusher_meta_halflimit(trans->hmp) || 155 hammer_flusher_undo_exhausted(trans, 2)) { 156 hammer_unlock_cursor(&cursor); 157 hammer_flusher_wait(trans->hmp, seq); 158 hammer_lock_cursor(&cursor); 159 seq = hammer_flusher_async_one(trans->hmp); 160 } 161 162 /* 163 * Iterate, stop if a signal was received. 164 */ 165 if ((error = hammer_signal_check(trans->hmp)) != 0) 166 break; 167 error = hammer_btree_iterate(&cursor); 168 } 169 if (error == ENOENT) 170 error = 0; 171 hammer_done_cursor(&cursor); 172 if (error == EDEADLK) { 173 ++rebal->stat_collisions; 174 goto retry; 175 } 176 if (error == EINTR) { 177 rebal->head.flags |= HAMMER_IOC_HEAD_INTR; 178 error = 0; 179 } 180 failed: 181 rebal->key_cur.localization &= HAMMER_LOCALIZE_MASK; 182 return(error); 183 } 184 185 /* 186 * Rebalance an internal node, called via a trailing upward recursion. 187 * All the children have already been individually rebalanced. 188 * 189 * To rebalance we scan the elements in the children and pack them, 190 * so we actually need to lock the children and the children's children. 191 * 192 * INTERNAL_NODE 193 * / / | | | \ \ 194 * C C C C C C C children (first level) (internal or leaf nodes) 195 * children's elements (second level) 196 * 197 * <<<---------- pack children's elements, possibly remove excess 198 * children after packing. 199 * 200 * NOTE: The mirror_tids, parent pointers, and child pointers must be updated. 201 * Any live tracked B-Tree nodes must be updated (we worm out of that 202 * by not allowing any). And boundary elements must be preserved. 203 * 204 * NOTE: If the children are leaf nodes we may have a degenerate case 205 * case where there are no elements in the leafs. 206 * 207 * XXX live-tracked 208 */ 209 static int 210 rebalance_node(struct hammer_ioc_rebalance *rebal, hammer_cursor_t cursor) 211 { 212 struct hammer_node_lock lockroot; 213 hammer_node_lock_t base_item; 214 hammer_node_lock_t chld_item; 215 hammer_node_lock_t item; 216 hammer_btree_elm_t elm; 217 hammer_node_t node; 218 u_int8_t type1; 219 int base_count; 220 int root_count; 221 int avg_elms; 222 int count; 223 int error; 224 int i; 225 int n; 226 227 /* 228 * Lock the parent node via the cursor, collect and lock our 229 * children and children's children. 230 * 231 * By the way, this is a LOT of locks. 232 */ 233 hammer_node_lock_init(&lockroot, cursor->node); 234 error = hammer_cursor_upgrade(cursor); 235 if (error) 236 goto done; 237 error = hammer_btree_lock_children(cursor, 2, &lockroot); 238 if (error) 239 goto done; 240 241 /* 242 * Make a copy of all the locked on-disk data to simplify the element 243 * shifting we are going to have to do. We will modify the copy 244 * first. 245 */ 246 hammer_btree_lock_copy(cursor, &lockroot); 247 248 /* 249 * Look at the first child node. 250 */ 251 if (TAILQ_FIRST(&lockroot.list) == NULL) 252 goto done; 253 type1 = TAILQ_FIRST(&lockroot.list)->node->ondisk->type; 254 255 /* 256 * Figure out the total number of children's children and 257 * calculate the average number of elements per child. 258 * 259 * The minimum avg_elms is 1 when count > 0. avg_elms * root_elms 260 * is always greater or equal to count. 261 * 262 * If count == 0 we hit a degenerate case which will cause 263 * avg_elms to also calculate as 0. 264 */ 265 if (hammer_debug_general & 0x1000) 266 kprintf("lockroot %p count %d\n", &lockroot, lockroot.count); 267 count = 0; 268 TAILQ_FOREACH(item, &lockroot.list, entry) { 269 if (hammer_debug_general & 0x1000) 270 kprintf("add count %d\n", item->count); 271 count += item->count; 272 KKASSERT(item->node->ondisk->type == type1); 273 } 274 avg_elms = (count + (lockroot.count - 1)) / lockroot.count; 275 KKASSERT(avg_elms >= 0); 276 277 /* 278 * If the average number of elements per child is too low then 279 * calculate the desired number of children (n) such that the 280 * average number of elements is reasonable. 281 * 282 * If the desired number of children is 1 then avg_elms will 283 * wind up being count, which may still be smaller then saturation 284 * but that is ok. 285 */ 286 if (count && avg_elms < rebal->saturation) { 287 n = (count + (rebal->saturation - 1)) / rebal->saturation; 288 avg_elms = (count + (n - 1)) / n; 289 } 290 291 /* 292 * Pack the elements in the children. Elements for each item is 293 * packed into base_item until avg_elms is reached, then base_item 294 * iterates. 295 * 296 * hammer_cursor_moved_element() is called for each element moved 297 * to update tracked cursors, including the index beyond the last 298 * element (at count). 299 */ 300 base_item = TAILQ_FIRST(&lockroot.list); 301 base_count = 0; 302 root_count = 0; 303 304 TAILQ_FOREACH(item, &lockroot.list, entry) { 305 node = item->node; 306 KKASSERT(item->count == node->ondisk->count); 307 chld_item = TAILQ_FIRST(&item->list); 308 for (i = 0; i < item->count; ++i) { 309 /* 310 * Closeout. If the next element is at index 0 311 * just use the existing separator in the parent. 312 */ 313 if (base_count == avg_elms) { 314 if (i == 0) { 315 elm = &lockroot.node->ondisk->elms[ 316 item->index]; 317 } else { 318 elm = &node->ondisk->elms[i]; 319 } 320 rebalance_closeout(base_item, base_count, elm); 321 base_item = TAILQ_NEXT(base_item, entry); 322 KKASSERT(base_item); 323 base_count = 0; 324 ++root_count; 325 } 326 327 /* 328 * Check degenerate no-work case. Otherwise pack 329 * the element. 330 * 331 * All changes are made to the copy. 332 */ 333 if (item == base_item && i == base_count) { 334 ++base_count; 335 if (chld_item) 336 chld_item = TAILQ_NEXT(chld_item, entry); 337 continue; 338 } 339 340 /* 341 * Pack element. 342 */ 343 elm = &base_item->copy->elms[base_count]; 344 *elm = node->ondisk->elms[i]; 345 base_item->flags |= HAMMER_NODE_LOCK_UPDATED; 346 347 /* 348 * Adjust the mirror_tid of the target. The parent 349 * node (lockroot.node) should already have an 350 * aggregate mirror_tid so we do not have to update 351 * that. 352 */ 353 if (base_item->copy->mirror_tid < 354 node->ondisk->mirror_tid) { 355 base_item->copy->mirror_tid = 356 node->ondisk->mirror_tid; 357 KKASSERT(lockroot.node->ondisk->mirror_tid >= 358 node->ondisk->mirror_tid); 359 base_item->flags |= HAMMER_NODE_LOCK_UPDATED; 360 } 361 362 /* 363 * We moved elm. The parent pointers for any 364 * children of elm must be repointed. 365 */ 366 if (item != base_item && 367 node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) { 368 KKASSERT(chld_item); 369 rebalance_parent_ptrs(base_item, base_count, 370 item, chld_item); 371 } 372 hammer_cursor_moved_element(node, base_item->node, 373 i, base_count); 374 ++base_count; 375 if (chld_item) 376 chld_item = TAILQ_NEXT(chld_item, entry); 377 } 378 379 /* 380 * Always call at the end (i == number of elements) in 381 * case a cursor is sitting indexed there. 382 */ 383 hammer_cursor_moved_element(node, base_item->node, 384 i, base_count); 385 } 386 387 /* 388 * Packing complete, close-out base_item using the right-hand 389 * boundary of the original parent. 390 * 391 * If we will be deleting nodes from the root shift the old 392 * right-hand-boundary to the new ending index. 393 */ 394 elm = &lockroot.node->ondisk->elms[lockroot.node->ondisk->count]; 395 rebalance_closeout(base_item, base_count, elm); 396 ++root_count; 397 if (lockroot.copy->count != root_count) { 398 lockroot.copy->count = root_count; 399 lockroot.copy->elms[root_count] = *elm; 400 lockroot.flags |= HAMMER_NODE_LOCK_UPDATED; 401 } 402 403 /* 404 * Any extra items beyond base_item are now completely empty and 405 * can be destroyed. Queue the destruction up in the copy. Note 406 * that none of the destroyed nodes are part of our cursor. 407 * 408 * The cursor is locked so it isn't on the tracking list. It 409 * should have been pointing at the boundary element (at root_count). 410 * When deleting elements from the root (which is cursor.node), we 411 * have to update the cursor.index manually to keep it in bounds. 412 */ 413 while ((base_item = TAILQ_NEXT(base_item, entry)) != NULL) { 414 hammer_cursor_removed_node(base_item->node, lockroot.node, 415 base_count); 416 hammer_cursor_deleted_element(lockroot.node, base_count); 417 base_item->copy->type = HAMMER_BTREE_TYPE_DELETED; 418 base_item->copy->count = 0; 419 base_item->flags |= HAMMER_NODE_LOCK_UPDATED; 420 if (cursor->index > lockroot.copy->count) 421 --cursor->index; 422 ++rebal->stat_deletions; 423 } 424 425 /* 426 * All done, sync the locked child tree to disk. This will also 427 * flush and delete deleted nodes. 428 */ 429 rebal->stat_nrebal += hammer_btree_sync_copy(cursor, &lockroot); 430 done: 431 hammer_btree_unlock_children(cursor, &lockroot); 432 hammer_cursor_downgrade(cursor); 433 return (error); 434 } 435 436 /* 437 * Close-out the child base_item. This node contains base_count 438 * elements. 439 * 440 * If the node is an internal node the right-hand boundary must be 441 * set to elm. 442 */ 443 static 444 void 445 rebalance_closeout(hammer_node_lock_t base_item, int base_count, 446 hammer_btree_elm_t elm) 447 { 448 hammer_node_lock_t parent; 449 hammer_btree_elm_t base_elm; 450 hammer_btree_elm_t rbound_elm; 451 u_int8_t save; 452 453 /* 454 * Update the count. NOTE: base_count can be 0 for the 455 * degenerate leaf case. 456 */ 457 if (hammer_debug_general & 0x1000) { 458 kprintf("rebalance_closeout %016llx:", 459 (long long)base_item->node->node_offset); 460 } 461 if (base_item->copy->count != base_count) { 462 base_item->flags |= HAMMER_NODE_LOCK_UPDATED; 463 base_item->copy->count = base_count; 464 if (hammer_debug_general & 0x1000) 465 kprintf(" (count update)"); 466 } 467 468 /* 469 * If we are closing out an internal node we must assign 470 * a right-hand boundary. Use the element contents as the 471 * right-hand boundary. 472 * 473 * Internal nodes are required to have at least one child, 474 * otherwise the left and right boundary would end up being 475 * the same element. Only leaf nodes can be empty. 476 * 477 * Rebalancing may cut-off an internal node such that the 478 * new right hand boundary is the next element anyway, but 479 * we still have to make sure that subtree_offset, btype, 480 * and mirror_tid are all 0. 481 */ 482 if (base_item->copy->type == HAMMER_BTREE_TYPE_INTERNAL) { 483 KKASSERT(base_count != 0); 484 base_elm = &base_item->copy->elms[base_count]; 485 486 if (bcmp(base_elm, elm, sizeof(*elm)) != 0 || 487 elm->internal.subtree_offset || 488 elm->internal.mirror_tid || 489 elm->base.btype) { 490 *base_elm = *elm; 491 base_elm->internal.subtree_offset = 0; 492 base_elm->internal.mirror_tid = 0; 493 base_elm->base.btype = 0; 494 base_item->flags |= HAMMER_NODE_LOCK_UPDATED; 495 if (hammer_debug_general & 0x1000) 496 kprintf(" (rhs update)"); 497 } else { 498 if (hammer_debug_general & 0x1000) 499 kprintf(" (rhs same)"); 500 } 501 } 502 503 /* 504 * The parent's boundary must be updated. Be careful to retain 505 * the btype and non-base internal fields as that information is 506 * unrelated. 507 */ 508 parent = base_item->parent; 509 rbound_elm = &parent->copy->elms[base_item->index + 1]; 510 if (bcmp(&rbound_elm->base, &elm->base, sizeof(elm->base)) != 0) { 511 save = rbound_elm->base.btype; 512 rbound_elm->base = elm->base; 513 rbound_elm->base.btype = save; 514 parent->flags |= HAMMER_NODE_LOCK_UPDATED; 515 if (hammer_debug_general & 0x1000) { 516 kprintf(" (parent bound update %d)", 517 base_item->index + 1); 518 } 519 } 520 if (hammer_debug_general & 0x1000) 521 kprintf("\n"); 522 } 523 524 /* 525 * An element in item has moved to base_item. We must update the parent 526 * pointer of the node the element points to (which is chld_item). 527 */ 528 static 529 void 530 rebalance_parent_ptrs(hammer_node_lock_t base_item, int index, 531 hammer_node_lock_t item, hammer_node_lock_t chld_item) 532 { 533 KKASSERT(chld_item->node->ondisk->parent == item->node->node_offset); 534 chld_item->copy->parent = base_item->node->node_offset; 535 chld_item->flags |= HAMMER_NODE_LOCK_UPDATED; 536 hammer_cursor_parent_changed(chld_item->node, 537 item->node, base_item->node, index); 538 } 539