1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_cursor.c,v 1.42 2008/08/06 15:38:58 dillon Exp $ 35 */ 36 37 /* 38 * HAMMER B-Tree index - cursor support routines 39 */ 40 #include "hammer.h" 41 42 static int hammer_load_cursor_parent(hammer_cursor_t cursor, int try_exclusive); 43 44 /* 45 * Initialize a fresh cursor using the B-Tree node cache. If the cache 46 * is not available initialize a fresh cursor at the root of the filesystem. 47 */ 48 int 49 hammer_init_cursor(hammer_transaction_t trans, hammer_cursor_t cursor, 50 hammer_node_cache_t cache, hammer_inode_t ip) 51 { 52 hammer_volume_t volume; 53 hammer_node_t node; 54 int error; 55 56 bzero(cursor, sizeof(*cursor)); 57 58 cursor->trans = trans; 59 60 /* 61 * If the cursor operation is on behalf of an inode, lock 62 * the inode. 63 */ 64 if ((cursor->ip = ip) != NULL) { 65 ++ip->cursor_ip_refs; 66 if (trans->type == HAMMER_TRANS_FLS) 67 hammer_lock_ex(&ip->lock); 68 else 69 hammer_lock_sh(&ip->lock); 70 } 71 72 /* 73 * Step 1 - acquire a locked node from the cache if possible 74 */ 75 if (cache && cache->node) { 76 node = hammer_ref_node_safe(trans->hmp, cache, &error); 77 if (error == 0) { 78 hammer_lock_sh(&node->lock); 79 if (node->flags & HAMMER_NODE_DELETED) { 80 hammer_unlock(&node->lock); 81 hammer_rel_node(node); 82 node = NULL; 83 } 84 } 85 } else { 86 node = NULL; 87 } 88 89 /* 90 * Step 2 - If we couldn't get a node from the cache, get 91 * the one from the root of the filesystem. 92 */ 93 while (node == NULL) { 94 volume = hammer_get_root_volume(trans->hmp, &error); 95 if (error) 96 break; 97 node = hammer_get_node(trans, volume->ondisk->vol0_btree_root, 98 0, &error); 99 hammer_rel_volume(volume, 0); 100 if (error) 101 break; 102 hammer_lock_sh(&node->lock); 103 104 /* 105 * If someone got in before we could lock the node, retry. 106 */ 107 if (node->flags & HAMMER_NODE_DELETED) { 108 hammer_unlock(&node->lock); 109 hammer_rel_node(node); 110 node = NULL; 111 continue; 112 } 113 if (volume->ondisk->vol0_btree_root != node->node_offset) { 114 hammer_unlock(&node->lock); 115 hammer_rel_node(node); 116 node = NULL; 117 continue; 118 } 119 } 120 121 /* 122 * Step 3 - finish initializing the cursor by acquiring the parent 123 */ 124 cursor->node = node; 125 if (error == 0) 126 error = hammer_load_cursor_parent(cursor, 0); 127 KKASSERT(error == 0); 128 /* if (error) hammer_done_cursor(cursor); */ 129 return(error); 130 } 131 132 /* 133 * Normalize a cursor. Sometimes cursors can be left in a state 134 * where node is NULL. If the cursor is in this state, cursor up. 135 */ 136 void 137 hammer_normalize_cursor(hammer_cursor_t cursor) 138 { 139 if (cursor->node == NULL) { 140 KKASSERT(cursor->parent != NULL); 141 hammer_cursor_up(cursor); 142 } 143 } 144 145 146 /* 147 * We are finished with a cursor. We NULL out various fields as sanity 148 * check, in case the structure is inappropriately used afterwords. 149 */ 150 void 151 hammer_done_cursor(hammer_cursor_t cursor) 152 { 153 hammer_inode_t ip; 154 155 KKASSERT((cursor->flags & HAMMER_CURSOR_TRACKED) == 0); 156 if (cursor->parent) { 157 hammer_unlock(&cursor->parent->lock); 158 hammer_rel_node(cursor->parent); 159 cursor->parent = NULL; 160 } 161 if (cursor->node) { 162 hammer_unlock(&cursor->node->lock); 163 hammer_rel_node(cursor->node); 164 cursor->node = NULL; 165 } 166 if (cursor->data_buffer) { 167 hammer_rel_buffer(cursor->data_buffer, 0); 168 cursor->data_buffer = NULL; 169 } 170 if ((ip = cursor->ip) != NULL) { 171 KKASSERT(ip->cursor_ip_refs > 0); 172 --ip->cursor_ip_refs; 173 hammer_unlock(&ip->lock); 174 cursor->ip = NULL; 175 } 176 if (cursor->iprec) { 177 hammer_rel_mem_record(cursor->iprec); 178 cursor->iprec = NULL; 179 } 180 181 /* 182 * If we deadlocked this node will be referenced. Do a quick 183 * lock/unlock to wait for the deadlock condition to clear. 184 */ 185 if (cursor->deadlk_node) { 186 hammer_lock_ex_ident(&cursor->deadlk_node->lock, "hmrdlk"); 187 hammer_unlock(&cursor->deadlk_node->lock); 188 hammer_rel_node(cursor->deadlk_node); 189 cursor->deadlk_node = NULL; 190 } 191 if (cursor->deadlk_rec) { 192 hammer_wait_mem_record_ident(cursor->deadlk_rec, "hmmdlr"); 193 hammer_rel_mem_record(cursor->deadlk_rec); 194 cursor->deadlk_rec = NULL; 195 } 196 197 cursor->data = NULL; 198 cursor->leaf = NULL; 199 cursor->left_bound = NULL; 200 cursor->right_bound = NULL; 201 cursor->trans = NULL; 202 } 203 204 /* 205 * Upgrade cursor->node and cursor->parent to exclusive locks. This 206 * function can return EDEADLK. 207 * 208 * The lock must already be either held shared or already held exclusively 209 * by us. 210 * 211 * If we fail to upgrade the lock and cursor->deadlk_node is NULL, 212 * we add another reference to the node that failed and set 213 * cursor->deadlk_node so hammer_done_cursor() can block on it. 214 */ 215 int 216 hammer_cursor_upgrade(hammer_cursor_t cursor) 217 { 218 int error; 219 220 error = hammer_lock_upgrade(&cursor->node->lock); 221 if (error && cursor->deadlk_node == NULL) { 222 cursor->deadlk_node = cursor->node; 223 hammer_ref_node(cursor->deadlk_node); 224 } else if (error == 0 && cursor->parent) { 225 error = hammer_lock_upgrade(&cursor->parent->lock); 226 if (error && cursor->deadlk_node == NULL) { 227 cursor->deadlk_node = cursor->parent; 228 hammer_ref_node(cursor->deadlk_node); 229 } 230 } 231 return(error); 232 } 233 234 int 235 hammer_cursor_upgrade_node(hammer_cursor_t cursor) 236 { 237 int error; 238 239 error = hammer_lock_upgrade(&cursor->node->lock); 240 if (error && cursor->deadlk_node == NULL) { 241 cursor->deadlk_node = cursor->node; 242 hammer_ref_node(cursor->deadlk_node); 243 } 244 return(error); 245 } 246 247 /* 248 * Downgrade cursor->node and cursor->parent to shared locks. This 249 * function can return EDEADLK. 250 */ 251 void 252 hammer_cursor_downgrade(hammer_cursor_t cursor) 253 { 254 if (hammer_lock_excl_owned(&cursor->node->lock, curthread)) 255 hammer_lock_downgrade(&cursor->node->lock); 256 if (cursor->parent && 257 hammer_lock_excl_owned(&cursor->parent->lock, curthread)) { 258 hammer_lock_downgrade(&cursor->parent->lock); 259 } 260 } 261 262 /* 263 * Seek the cursor to the specified node and index. 264 * 265 * The caller must ref the node prior to calling this routine and release 266 * it after it returns. If the seek succeeds the cursor will gain its own 267 * ref on the node. 268 */ 269 int 270 hammer_cursor_seek(hammer_cursor_t cursor, hammer_node_t node, int index) 271 { 272 int error; 273 274 hammer_cursor_downgrade(cursor); 275 error = 0; 276 277 if (cursor->node != node) { 278 hammer_unlock(&cursor->node->lock); 279 hammer_rel_node(cursor->node); 280 cursor->node = node; 281 hammer_ref_node(node); 282 hammer_lock_sh(&node->lock); 283 KKASSERT ((node->flags & HAMMER_NODE_DELETED) == 0); 284 285 if (cursor->parent) { 286 hammer_unlock(&cursor->parent->lock); 287 hammer_rel_node(cursor->parent); 288 cursor->parent = NULL; 289 cursor->parent_index = 0; 290 } 291 error = hammer_load_cursor_parent(cursor, 0); 292 } 293 cursor->index = index; 294 return (error); 295 } 296 297 /* 298 * Load the parent of cursor->node into cursor->parent. 299 */ 300 static 301 int 302 hammer_load_cursor_parent(hammer_cursor_t cursor, int try_exclusive) 303 { 304 hammer_mount_t hmp; 305 hammer_node_t parent; 306 hammer_node_t node; 307 hammer_btree_elm_t elm; 308 int error; 309 int parent_index; 310 311 hmp = cursor->trans->hmp; 312 313 if (cursor->node->ondisk->parent) { 314 node = cursor->node; 315 parent = hammer_btree_get_parent(cursor->trans, node, 316 &parent_index, 317 &error, try_exclusive); 318 if (error == 0) { 319 elm = &parent->ondisk->elms[parent_index]; 320 cursor->parent = parent; 321 cursor->parent_index = parent_index; 322 cursor->left_bound = &elm[0].internal.base; 323 cursor->right_bound = &elm[1].internal.base; 324 } 325 } else { 326 cursor->parent = NULL; 327 cursor->parent_index = 0; 328 cursor->left_bound = &hmp->root_btree_beg; 329 cursor->right_bound = &hmp->root_btree_end; 330 error = 0; 331 } 332 return(error); 333 } 334 335 /* 336 * Cursor up to our parent node. Return ENOENT if we are at the root of 337 * the filesystem. 338 */ 339 int 340 hammer_cursor_up(hammer_cursor_t cursor) 341 { 342 int error; 343 344 hammer_cursor_downgrade(cursor); 345 346 /* 347 * If the parent is NULL we are at the root of the B-Tree and 348 * return ENOENT. 349 */ 350 if (cursor->parent == NULL) 351 return (ENOENT); 352 353 /* 354 * Set the node to its parent. 355 */ 356 hammer_unlock(&cursor->node->lock); 357 hammer_rel_node(cursor->node); 358 cursor->node = cursor->parent; 359 cursor->index = cursor->parent_index; 360 cursor->parent = NULL; 361 cursor->parent_index = 0; 362 363 error = hammer_load_cursor_parent(cursor, 0); 364 return(error); 365 } 366 367 /* 368 * Special cursor up given a locked cursor. The orignal node is not 369 * unlocked or released and the cursor is not downgraded. 370 * 371 * This function will recover from deadlocks. EDEADLK cannot be returned. 372 */ 373 int 374 hammer_cursor_up_locked(hammer_cursor_t cursor) 375 { 376 hammer_node_t save; 377 int error; 378 379 /* 380 * If the parent is NULL we are at the root of the B-Tree and 381 * return ENOENT. 382 */ 383 if (cursor->parent == NULL) 384 return (ENOENT); 385 386 save = cursor->node; 387 388 /* 389 * Set the node to its parent. 390 */ 391 cursor->node = cursor->parent; 392 cursor->index = cursor->parent_index; 393 cursor->parent = NULL; 394 cursor->parent_index = 0; 395 396 /* 397 * load the new parent, attempt to exclusively lock it. Note that 398 * we are still holding the old parent (now cursor->node) exclusively 399 * locked. This can return EDEADLK. 400 */ 401 error = hammer_load_cursor_parent(cursor, 1); 402 if (error) { 403 cursor->parent = cursor->node; 404 cursor->parent_index = cursor->index; 405 cursor->node = save; 406 cursor->index = 0; 407 } 408 return(error); 409 } 410 411 412 /* 413 * Cursor down through the current node, which must be an internal node. 414 * 415 * This routine adjusts the cursor and sets index to 0. 416 */ 417 int 418 hammer_cursor_down(hammer_cursor_t cursor) 419 { 420 hammer_node_t node; 421 hammer_btree_elm_t elm; 422 int error; 423 424 /* 425 * The current node becomes the current parent 426 */ 427 hammer_cursor_downgrade(cursor); 428 node = cursor->node; 429 KKASSERT(cursor->index >= 0 && cursor->index < node->ondisk->count); 430 if (cursor->parent) { 431 hammer_unlock(&cursor->parent->lock); 432 hammer_rel_node(cursor->parent); 433 } 434 cursor->parent = node; 435 cursor->parent_index = cursor->index; 436 cursor->node = NULL; 437 cursor->index = 0; 438 439 /* 440 * Extract element to push into at (node,index), set bounds. 441 */ 442 elm = &node->ondisk->elms[cursor->parent_index]; 443 444 /* 445 * Ok, push down into elm. If elm specifies an internal or leaf 446 * node the current node must be an internal node. If elm specifies 447 * a spike then the current node must be a leaf node. 448 */ 449 switch(elm->base.btype) { 450 case HAMMER_BTREE_TYPE_INTERNAL: 451 case HAMMER_BTREE_TYPE_LEAF: 452 KKASSERT(node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL); 453 KKASSERT(elm->internal.subtree_offset != 0); 454 cursor->left_bound = &elm[0].internal.base; 455 cursor->right_bound = &elm[1].internal.base; 456 node = hammer_get_node(cursor->trans, 457 elm->internal.subtree_offset, 0, &error); 458 if (error == 0) { 459 KASSERT(elm->base.btype == node->ondisk->type, ("BTYPE MISMATCH %c %c NODE %p\n", elm->base.btype, node->ondisk->type, node)); 460 if (node->ondisk->parent != cursor->parent->node_offset) 461 panic("node %p %016llx vs %016llx\n", node, node->ondisk->parent, cursor->parent->node_offset); 462 KKASSERT(node->ondisk->parent == cursor->parent->node_offset); 463 } 464 break; 465 default: 466 panic("hammer_cursor_down: illegal btype %02x (%c)\n", 467 elm->base.btype, 468 (elm->base.btype ? elm->base.btype : '?')); 469 break; 470 } 471 if (error == 0) { 472 hammer_lock_sh(&node->lock); 473 KKASSERT ((node->flags & HAMMER_NODE_DELETED) == 0); 474 cursor->node = node; 475 cursor->index = 0; 476 } 477 return(error); 478 } 479 480 /************************************************************************ 481 * DEADLOCK RECOVERY * 482 ************************************************************************ 483 * 484 * These are the new deadlock recovery functions. Currently they are only 485 * used for the mirror propagation and physical node removal cases but 486 * ultimately the intention is to use them for all deadlock recovery 487 * operations. 488 */ 489 void 490 hammer_unlock_cursor(hammer_cursor_t cursor, int also_ip) 491 { 492 hammer_node_t node; 493 hammer_inode_t ip; 494 495 KKASSERT((cursor->flags & HAMMER_CURSOR_TRACKED) == 0); 496 KKASSERT(cursor->node); 497 /* 498 * Release the cursor's locks and track B-Tree operations on node. 499 * While being tracked our cursor can be modified by other threads 500 * and the node may be replaced. 501 */ 502 if (cursor->parent) { 503 hammer_unlock(&cursor->parent->lock); 504 hammer_rel_node(cursor->parent); 505 cursor->parent = NULL; 506 } 507 node = cursor->node; 508 cursor->flags |= HAMMER_CURSOR_TRACKED; 509 TAILQ_INSERT_TAIL(&node->cursor_list, cursor, deadlk_entry); 510 hammer_unlock(&node->lock); 511 512 if (also_ip && (ip = cursor->ip) != NULL) 513 hammer_unlock(&ip->lock); 514 } 515 516 /* 517 * Get the cursor heated up again. The cursor's node may have 518 * changed and we might have to locate the new parent. 519 * 520 * If the exact element we were on got deleted RIPOUT will be 521 * set and we must clear ATEDISK so an iteration does not skip 522 * the element after it. 523 */ 524 int 525 hammer_lock_cursor(hammer_cursor_t cursor, int also_ip) 526 { 527 hammer_inode_t ip; 528 hammer_node_t node; 529 int error; 530 531 KKASSERT(cursor->flags & HAMMER_CURSOR_TRACKED); 532 533 /* 534 * Relock the inode 535 */ 536 if (also_ip && (ip = cursor->ip) != NULL) { 537 if (cursor->trans->type == HAMMER_TRANS_FLS) 538 hammer_lock_ex(&ip->lock); 539 else 540 hammer_lock_sh(&ip->lock); 541 } 542 543 /* 544 * Relock the node 545 */ 546 for (;;) { 547 node = cursor->node; 548 hammer_ref_node(node); 549 hammer_lock_sh(&node->lock); 550 if (cursor->node == node) { 551 hammer_rel_node(node); 552 break; 553 } 554 hammer_unlock(&node->lock); 555 hammer_rel_node(node); 556 } 557 558 /* 559 * Untrack the cursor, clean up, and re-establish the parent node. 560 */ 561 TAILQ_REMOVE(&node->cursor_list, cursor, deadlk_entry); 562 cursor->flags &= ~HAMMER_CURSOR_TRACKED; 563 564 /* 565 * If a ripout has occured iterations must re-test the (new) 566 * current element. Clearing ATEDISK prevents the element from 567 * being skipped and RETEST causes it to be re-tested. 568 */ 569 if (cursor->flags & HAMMER_CURSOR_TRACKED_RIPOUT) { 570 cursor->flags &= ~HAMMER_CURSOR_TRACKED_RIPOUT; 571 cursor->flags &= ~HAMMER_CURSOR_ATEDISK; 572 cursor->flags |= HAMMER_CURSOR_RETEST; 573 } 574 error = hammer_load_cursor_parent(cursor, 0); 575 return(error); 576 } 577 578 /* 579 * Recover from a deadlocked cursor, tracking any node removals or 580 * replacements. If the cursor's current node is removed by another 581 * thread (via btree_remove()) the cursor will be seeked upwards. 582 * 583 * The caller is working a modifying operation and must be holding the 584 * sync lock (shared). We do not release the sync lock because this 585 * would break atomicy. 586 */ 587 int 588 hammer_recover_cursor(hammer_cursor_t cursor) 589 { 590 int error; 591 592 hammer_unlock_cursor(cursor, 0); 593 KKASSERT(cursor->trans->sync_lock_refs > 0); 594 595 /* 596 * Wait for the deadlock to clear 597 */ 598 if (cursor->deadlk_node) { 599 hammer_lock_ex_ident(&cursor->deadlk_node->lock, "hmrdlk"); 600 hammer_unlock(&cursor->deadlk_node->lock); 601 hammer_rel_node(cursor->deadlk_node); 602 cursor->deadlk_node = NULL; 603 } 604 if (cursor->deadlk_rec) { 605 hammer_wait_mem_record_ident(cursor->deadlk_rec, "hmmdlr"); 606 hammer_rel_mem_record(cursor->deadlk_rec); 607 cursor->deadlk_rec = NULL; 608 } 609 error = hammer_lock_cursor(cursor, 0); 610 return(error); 611 } 612 613 /* 614 * Dup ocursor to ncursor. ncursor inherits ocursor's locks and ocursor 615 * is effectively unlocked and becomes tracked. If ocursor was not locked 616 * then ncursor also inherits the tracking. 617 * 618 * After the caller finishes working with ncursor it must be cleaned up 619 * with hammer_done_cursor(), and the caller must re-lock ocursor. 620 */ 621 hammer_cursor_t 622 hammer_push_cursor(hammer_cursor_t ocursor) 623 { 624 hammer_cursor_t ncursor; 625 hammer_inode_t ip; 626 hammer_node_t node; 627 hammer_mount_t hmp; 628 629 hmp = ocursor->trans->hmp; 630 ncursor = kmalloc(sizeof(*ncursor), hmp->m_misc, M_WAITOK | M_ZERO); 631 bcopy(ocursor, ncursor, sizeof(*ocursor)); 632 633 node = ocursor->node; 634 hammer_ref_node(node); 635 if ((ocursor->flags & HAMMER_CURSOR_TRACKED) == 0) { 636 ocursor->flags |= HAMMER_CURSOR_TRACKED; 637 TAILQ_INSERT_TAIL(&node->cursor_list, ocursor, deadlk_entry); 638 } 639 if (ncursor->parent) 640 ocursor->parent = NULL; 641 ocursor->data_buffer = NULL; 642 ocursor->leaf = NULL; 643 ocursor->data = NULL; 644 if (ncursor->flags & HAMMER_CURSOR_TRACKED) 645 TAILQ_INSERT_TAIL(&node->cursor_list, ncursor, deadlk_entry); 646 if ((ip = ncursor->ip) != NULL) { 647 ++ip->cursor_ip_refs; 648 } 649 if (ncursor->iprec) 650 hammer_ref(&ncursor->iprec->lock); 651 return(ncursor); 652 } 653 654 /* 655 * Destroy ncursor and restore ocursor 656 * 657 * This is a temporary hack for the release. We can't afford to lose 658 * the IP lock until the IP object scan code is able to deal with it, 659 * so have ocursor inherit it back. 660 */ 661 void 662 hammer_pop_cursor(hammer_cursor_t ocursor, hammer_cursor_t ncursor) 663 { 664 hammer_mount_t hmp; 665 hammer_inode_t ip; 666 667 hmp = ncursor->trans->hmp; 668 ip = ncursor->ip; 669 ncursor->ip = NULL; 670 if (ip) 671 --ip->cursor_ip_refs; 672 hammer_done_cursor(ncursor); 673 kfree(ncursor, hmp->m_misc); 674 KKASSERT(ocursor->ip == ip); 675 hammer_lock_cursor(ocursor, 0); 676 } 677 678 /* 679 * onode is being replaced by nnode by the reblocking code. 680 */ 681 void 682 hammer_cursor_replaced_node(hammer_node_t onode, hammer_node_t nnode) 683 { 684 hammer_cursor_t cursor; 685 686 while ((cursor = TAILQ_FIRST(&onode->cursor_list)) != NULL) { 687 TAILQ_REMOVE(&onode->cursor_list, cursor, deadlk_entry); 688 TAILQ_INSERT_TAIL(&nnode->cursor_list, cursor, deadlk_entry); 689 KKASSERT(cursor->node == onode); 690 cursor->node = nnode; 691 hammer_ref_node(nnode); 692 hammer_rel_node(onode); 693 } 694 } 695 696 /* 697 * node is being removed, cursors in deadlock recovery are seeked upward 698 * to the parent. 699 */ 700 void 701 hammer_cursor_removed_node(hammer_node_t node, hammer_node_t parent, int index) 702 { 703 hammer_cursor_t cursor; 704 705 KKASSERT(parent != NULL); 706 while ((cursor = TAILQ_FIRST(&node->cursor_list)) != NULL) { 707 KKASSERT(cursor->node == node); 708 KKASSERT(cursor->index == 0); 709 TAILQ_REMOVE(&node->cursor_list, cursor, deadlk_entry); 710 TAILQ_INSERT_TAIL(&parent->cursor_list, cursor, deadlk_entry); 711 cursor->flags |= HAMMER_CURSOR_TRACKED_RIPOUT; 712 cursor->node = parent; 713 cursor->index = index; 714 hammer_ref_node(parent); 715 hammer_rel_node(node); 716 } 717 } 718 719 /* 720 * node was split at (onode, index) with elements >= index moved to nnode. 721 */ 722 void 723 hammer_cursor_split_node(hammer_node_t onode, hammer_node_t nnode, int index) 724 { 725 hammer_cursor_t cursor; 726 727 again: 728 TAILQ_FOREACH(cursor, &onode->cursor_list, deadlk_entry) { 729 KKASSERT(cursor->node == onode); 730 if (cursor->index < index) 731 continue; 732 TAILQ_REMOVE(&onode->cursor_list, cursor, deadlk_entry); 733 TAILQ_INSERT_TAIL(&nnode->cursor_list, cursor, deadlk_entry); 734 cursor->node = nnode; 735 cursor->index -= index; 736 hammer_ref_node(nnode); 737 hammer_rel_node(onode); 738 goto again; 739 } 740 } 741 742 /* 743 * Deleted element at (node, index) 744 * 745 * Shift indexes >= index 746 */ 747 void 748 hammer_cursor_deleted_element(hammer_node_t node, int index) 749 { 750 hammer_cursor_t cursor; 751 752 TAILQ_FOREACH(cursor, &node->cursor_list, deadlk_entry) { 753 KKASSERT(cursor->node == node); 754 if (cursor->index == index) { 755 cursor->flags |= HAMMER_CURSOR_TRACKED_RIPOUT; 756 } else if (cursor->index > index) { 757 --cursor->index; 758 } 759 } 760 } 761 762 /* 763 * Inserted element at (node, index) 764 * 765 * Shift indexes >= index 766 */ 767 void 768 hammer_cursor_inserted_element(hammer_node_t node, int index) 769 { 770 hammer_cursor_t cursor; 771 772 TAILQ_FOREACH(cursor, &node->cursor_list, deadlk_entry) { 773 KKASSERT(cursor->node == node); 774 if (cursor->index >= index) 775 ++cursor->index; 776 } 777 } 778 779