1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_cursor.c,v 1.40 2008/07/08 04:34:41 dillon Exp $ 35 */ 36 37 /* 38 * HAMMER B-Tree index - cursor support routines 39 */ 40 #include "hammer.h" 41 42 static int hammer_load_cursor_parent(hammer_cursor_t cursor, int try_exclusive); 43 44 /* 45 * Initialize a fresh cursor using the B-Tree node cache. If the cache 46 * is not available initialize a fresh cursor at the root of the filesystem. 47 */ 48 int 49 hammer_init_cursor(hammer_transaction_t trans, hammer_cursor_t cursor, 50 hammer_node_cache_t cache, hammer_inode_t ip) 51 { 52 hammer_volume_t volume; 53 hammer_node_t node; 54 int error; 55 56 bzero(cursor, sizeof(*cursor)); 57 58 cursor->trans = trans; 59 60 /* 61 * If the cursor operation is on behalf of an inode, lock 62 * the inode. 63 */ 64 if ((cursor->ip = ip) != NULL) { 65 ++ip->cursor_ip_refs; 66 if (trans->type == HAMMER_TRANS_FLS) 67 hammer_lock_ex(&ip->lock); 68 else 69 hammer_lock_sh(&ip->lock); 70 } 71 72 /* 73 * Step 1 - acquire a locked node from the cache if possible 74 */ 75 if (cache && cache->node) { 76 node = hammer_ref_node_safe(trans->hmp, cache, &error); 77 if (error == 0) { 78 hammer_lock_sh_lowpri(&node->lock); 79 if (node->flags & HAMMER_NODE_DELETED) { 80 hammer_unlock(&node->lock); 81 hammer_rel_node(node); 82 node = NULL; 83 } 84 } 85 } else { 86 node = NULL; 87 } 88 89 /* 90 * Step 2 - If we couldn't get a node from the cache, get 91 * the one from the root of the filesystem. 92 */ 93 while (node == NULL) { 94 volume = hammer_get_root_volume(trans->hmp, &error); 95 if (error) 96 break; 97 node = hammer_get_node(trans->hmp, 98 volume->ondisk->vol0_btree_root, 99 0, &error); 100 hammer_rel_volume(volume, 0); 101 if (error) 102 break; 103 hammer_lock_sh_lowpri(&node->lock); 104 105 /* 106 * If someone got in before we could lock the node, retry. 107 */ 108 if (node->flags & HAMMER_NODE_DELETED) { 109 hammer_unlock(&node->lock); 110 hammer_rel_node(node); 111 node = NULL; 112 continue; 113 } 114 if (volume->ondisk->vol0_btree_root != node->node_offset) { 115 hammer_unlock(&node->lock); 116 hammer_rel_node(node); 117 node = NULL; 118 continue; 119 } 120 } 121 122 /* 123 * Step 3 - finish initializing the cursor by acquiring the parent 124 */ 125 cursor->node = node; 126 if (error == 0) 127 error = hammer_load_cursor_parent(cursor, 0); 128 KKASSERT(error == 0); 129 /* if (error) hammer_done_cursor(cursor); */ 130 return(error); 131 } 132 133 /* 134 * Normalize a cursor. Sometimes cursors can be left in a state 135 * where node is NULL. If the cursor is in this state, cursor up. 136 */ 137 void 138 hammer_normalize_cursor(hammer_cursor_t cursor) 139 { 140 if (cursor->node == NULL) { 141 KKASSERT(cursor->parent != NULL); 142 hammer_cursor_up(cursor); 143 } 144 } 145 146 147 /* 148 * We are finished with a cursor. We NULL out various fields as sanity 149 * check, in case the structure is inappropriately used afterwords. 150 */ 151 void 152 hammer_done_cursor(hammer_cursor_t cursor) 153 { 154 hammer_inode_t ip; 155 156 KKASSERT((cursor->flags & HAMMER_CURSOR_TRACKED) == 0); 157 if (cursor->parent) { 158 hammer_unlock(&cursor->parent->lock); 159 hammer_rel_node(cursor->parent); 160 cursor->parent = NULL; 161 } 162 if (cursor->node) { 163 hammer_unlock(&cursor->node->lock); 164 hammer_rel_node(cursor->node); 165 cursor->node = NULL; 166 } 167 if (cursor->data_buffer) { 168 hammer_rel_buffer(cursor->data_buffer, 0); 169 cursor->data_buffer = NULL; 170 } 171 if ((ip = cursor->ip) != NULL) { 172 KKASSERT(ip->cursor_ip_refs > 0); 173 --ip->cursor_ip_refs; 174 hammer_unlock(&ip->lock); 175 cursor->ip = NULL; 176 } 177 if (cursor->iprec) { 178 hammer_rel_mem_record(cursor->iprec); 179 cursor->iprec = NULL; 180 } 181 182 /* 183 * If we deadlocked this node will be referenced. Do a quick 184 * lock/unlock to wait for the deadlock condition to clear. 185 */ 186 if (cursor->deadlk_node) { 187 hammer_lock_ex_ident(&cursor->deadlk_node->lock, "hmrdlk"); 188 hammer_unlock(&cursor->deadlk_node->lock); 189 hammer_rel_node(cursor->deadlk_node); 190 cursor->deadlk_node = NULL; 191 } 192 if (cursor->deadlk_rec) { 193 hammer_wait_mem_record_ident(cursor->deadlk_rec, "hmmdlr"); 194 hammer_rel_mem_record(cursor->deadlk_rec); 195 cursor->deadlk_rec = NULL; 196 } 197 198 cursor->data = NULL; 199 cursor->leaf = NULL; 200 cursor->left_bound = NULL; 201 cursor->right_bound = NULL; 202 cursor->trans = NULL; 203 } 204 205 /* 206 * Upgrade cursor->node and cursor->parent to exclusive locks. This 207 * function can return EDEADLK. 208 * 209 * The lock must already be either held shared or already held exclusively 210 * by us. 211 * 212 * If we fail to upgrade the lock and cursor->deadlk_node is NULL, 213 * we add another reference to the node that failed and set 214 * cursor->deadlk_node so hammer_done_cursor() can block on it. 215 */ 216 int 217 hammer_cursor_upgrade(hammer_cursor_t cursor) 218 { 219 int error; 220 221 error = hammer_lock_upgrade(&cursor->node->lock); 222 if (error && cursor->deadlk_node == NULL) { 223 cursor->deadlk_node = cursor->node; 224 hammer_ref_node(cursor->deadlk_node); 225 } else if (error == 0 && cursor->parent) { 226 error = hammer_lock_upgrade(&cursor->parent->lock); 227 if (error && cursor->deadlk_node == NULL) { 228 cursor->deadlk_node = cursor->parent; 229 hammer_ref_node(cursor->deadlk_node); 230 } 231 } 232 return(error); 233 } 234 235 int 236 hammer_cursor_upgrade_node(hammer_cursor_t cursor) 237 { 238 int error; 239 240 error = hammer_lock_upgrade(&cursor->node->lock); 241 if (error && cursor->deadlk_node == NULL) { 242 cursor->deadlk_node = cursor->node; 243 hammer_ref_node(cursor->deadlk_node); 244 } 245 return(error); 246 } 247 248 /* 249 * Downgrade cursor->node and cursor->parent to shared locks. This 250 * function can return EDEADLK. 251 */ 252 void 253 hammer_cursor_downgrade(hammer_cursor_t cursor) 254 { 255 if (hammer_lock_excl_owned(&cursor->node->lock, curthread)) 256 hammer_lock_downgrade(&cursor->node->lock); 257 if (cursor->parent && 258 hammer_lock_excl_owned(&cursor->parent->lock, curthread)) { 259 hammer_lock_downgrade(&cursor->parent->lock); 260 } 261 } 262 263 /* 264 * Seek the cursor to the specified node and index. 265 * 266 * The caller must ref the node prior to calling this routine and release 267 * it after it returns. If the seek succeeds the cursor will gain its own 268 * ref on the node. 269 */ 270 int 271 hammer_cursor_seek(hammer_cursor_t cursor, hammer_node_t node, int index) 272 { 273 int error; 274 275 hammer_cursor_downgrade(cursor); 276 error = 0; 277 278 if (cursor->node != node) { 279 hammer_unlock(&cursor->node->lock); 280 hammer_rel_node(cursor->node); 281 cursor->node = node; 282 hammer_ref_node(node); 283 hammer_lock_sh(&node->lock); 284 KKASSERT ((node->flags & HAMMER_NODE_DELETED) == 0); 285 286 if (cursor->parent) { 287 hammer_unlock(&cursor->parent->lock); 288 hammer_rel_node(cursor->parent); 289 cursor->parent = NULL; 290 cursor->parent_index = 0; 291 } 292 error = hammer_load_cursor_parent(cursor, 0); 293 } 294 cursor->index = index; 295 return (error); 296 } 297 298 /* 299 * Load the parent of cursor->node into cursor->parent. 300 */ 301 static 302 int 303 hammer_load_cursor_parent(hammer_cursor_t cursor, int try_exclusive) 304 { 305 hammer_mount_t hmp; 306 hammer_node_t parent; 307 hammer_node_t node; 308 hammer_btree_elm_t elm; 309 int error; 310 int parent_index; 311 312 hmp = cursor->trans->hmp; 313 314 if (cursor->node->ondisk->parent) { 315 node = cursor->node; 316 parent = hammer_btree_get_parent(node, &parent_index, 317 &error, try_exclusive); 318 if (error == 0) { 319 elm = &parent->ondisk->elms[parent_index]; 320 cursor->parent = parent; 321 cursor->parent_index = parent_index; 322 cursor->left_bound = &elm[0].internal.base; 323 cursor->right_bound = &elm[1].internal.base; 324 } 325 } else { 326 cursor->parent = NULL; 327 cursor->parent_index = 0; 328 cursor->left_bound = &hmp->root_btree_beg; 329 cursor->right_bound = &hmp->root_btree_end; 330 error = 0; 331 } 332 return(error); 333 } 334 335 /* 336 * Cursor up to our parent node. Return ENOENT if we are at the root of 337 * the filesystem. 338 */ 339 int 340 hammer_cursor_up(hammer_cursor_t cursor) 341 { 342 int error; 343 344 hammer_cursor_downgrade(cursor); 345 346 /* 347 * If the parent is NULL we are at the root of the B-Tree and 348 * return ENOENT. 349 */ 350 if (cursor->parent == NULL) 351 return (ENOENT); 352 353 /* 354 * Set the node to its parent. 355 */ 356 hammer_unlock(&cursor->node->lock); 357 hammer_rel_node(cursor->node); 358 cursor->node = cursor->parent; 359 cursor->index = cursor->parent_index; 360 cursor->parent = NULL; 361 cursor->parent_index = 0; 362 363 error = hammer_load_cursor_parent(cursor, 0); 364 return(error); 365 } 366 367 /* 368 * Special cursor up given a locked cursor. The orignal node is not 369 * unlocked or released and the cursor is not downgraded. 370 * 371 * This function will recover from deadlocks. EDEADLK cannot be returned. 372 */ 373 int 374 hammer_cursor_up_locked(hammer_cursor_t cursor) 375 { 376 hammer_node_t save; 377 int error; 378 379 /* 380 * If the parent is NULL we are at the root of the B-Tree and 381 * return ENOENT. 382 */ 383 if (cursor->parent == NULL) 384 return (ENOENT); 385 386 save = cursor->node; 387 388 /* 389 * Set the node to its parent. 390 */ 391 cursor->node = cursor->parent; 392 cursor->index = cursor->parent_index; 393 cursor->parent = NULL; 394 cursor->parent_index = 0; 395 396 /* 397 * load the new parent, attempt to exclusively lock it. Note that 398 * we are still holding the old parent (now cursor->node) exclusively 399 * locked. This can return EDEADLK. 400 */ 401 error = hammer_load_cursor_parent(cursor, 1); 402 if (error) { 403 cursor->parent = cursor->node; 404 cursor->parent_index = cursor->index; 405 cursor->node = save; 406 cursor->index = 0; 407 } 408 return(error); 409 } 410 411 412 /* 413 * Cursor down through the current node, which must be an internal node. 414 * 415 * This routine adjusts the cursor and sets index to 0. 416 */ 417 int 418 hammer_cursor_down(hammer_cursor_t cursor) 419 { 420 hammer_node_t node; 421 hammer_btree_elm_t elm; 422 int error; 423 424 /* 425 * The current node becomes the current parent 426 */ 427 hammer_cursor_downgrade(cursor); 428 node = cursor->node; 429 KKASSERT(cursor->index >= 0 && cursor->index < node->ondisk->count); 430 if (cursor->parent) { 431 hammer_unlock(&cursor->parent->lock); 432 hammer_rel_node(cursor->parent); 433 } 434 cursor->parent = node; 435 cursor->parent_index = cursor->index; 436 cursor->node = NULL; 437 cursor->index = 0; 438 439 /* 440 * Extract element to push into at (node,index), set bounds. 441 */ 442 elm = &node->ondisk->elms[cursor->parent_index]; 443 444 /* 445 * Ok, push down into elm. If elm specifies an internal or leaf 446 * node the current node must be an internal node. If elm specifies 447 * a spike then the current node must be a leaf node. 448 */ 449 switch(elm->base.btype) { 450 case HAMMER_BTREE_TYPE_INTERNAL: 451 case HAMMER_BTREE_TYPE_LEAF: 452 KKASSERT(node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL); 453 KKASSERT(elm->internal.subtree_offset != 0); 454 cursor->left_bound = &elm[0].internal.base; 455 cursor->right_bound = &elm[1].internal.base; 456 node = hammer_get_node(cursor->trans->hmp, 457 elm->internal.subtree_offset, 0, &error); 458 if (error == 0) { 459 KASSERT(elm->base.btype == node->ondisk->type, ("BTYPE MISMATCH %c %c NODE %p\n", elm->base.btype, node->ondisk->type, node)); 460 if (node->ondisk->parent != cursor->parent->node_offset) 461 panic("node %p %016llx vs %016llx\n", node, node->ondisk->parent, cursor->parent->node_offset); 462 KKASSERT(node->ondisk->parent == cursor->parent->node_offset); 463 } 464 break; 465 default: 466 panic("hammer_cursor_down: illegal btype %02x (%c)\n", 467 elm->base.btype, 468 (elm->base.btype ? elm->base.btype : '?')); 469 break; 470 } 471 if (error == 0) { 472 hammer_lock_sh(&node->lock); 473 KKASSERT ((node->flags & HAMMER_NODE_DELETED) == 0); 474 cursor->node = node; 475 cursor->index = 0; 476 } 477 return(error); 478 } 479 480 /************************************************************************ 481 * DEADLOCK RECOVERY * 482 ************************************************************************ 483 * 484 * These are the new deadlock recovery functions. Currently they are only 485 * used for the mirror propagation and physical node removal cases but 486 * ultimately the intention is to use them for all deadlock recovery 487 * operations. 488 */ 489 void 490 hammer_unlock_cursor(hammer_cursor_t cursor, int also_ip) 491 { 492 hammer_node_t node; 493 hammer_inode_t ip; 494 495 KKASSERT((cursor->flags & HAMMER_CURSOR_TRACKED) == 0); 496 KKASSERT(cursor->node); 497 /* 498 * Release the cursor's locks and track B-Tree operations on node. 499 * While being tracked our cursor can be modified by other threads 500 * and node may be replaced. 501 */ 502 if (cursor->parent) { 503 hammer_unlock(&cursor->parent->lock); 504 hammer_rel_node(cursor->parent); 505 cursor->parent = NULL; 506 } 507 node = cursor->node; 508 cursor->flags |= HAMMER_CURSOR_TRACKED; 509 TAILQ_INSERT_TAIL(&node->cursor_list, cursor, deadlk_entry); 510 hammer_unlock(&node->lock); 511 512 if (also_ip && (ip = cursor->ip) != NULL) 513 hammer_unlock(&ip->lock); 514 } 515 516 /* 517 * Get the cursor heated up again. The cursor's node may have 518 * changed and we might have to locate the new parent. 519 * 520 * If the exact element we were on got deleted RIPOUT will be 521 * set and we must clear ATEDISK so an iteration does not skip 522 * the element after it. 523 */ 524 int 525 hammer_lock_cursor(hammer_cursor_t cursor, int also_ip) 526 { 527 hammer_inode_t ip; 528 hammer_node_t node; 529 int error; 530 531 KKASSERT(cursor->flags & HAMMER_CURSOR_TRACKED); 532 533 /* 534 * Relock the inode 535 */ 536 if (also_ip && (ip = cursor->ip) != NULL) { 537 if (cursor->trans->type == HAMMER_TRANS_FLS) 538 hammer_lock_ex(&ip->lock); 539 else 540 hammer_lock_sh(&ip->lock); 541 } 542 543 /* 544 * Relock the node 545 */ 546 for (;;) { 547 node = cursor->node; 548 hammer_ref_node(node); 549 hammer_lock_sh(&node->lock); 550 if (cursor->node == node) { 551 hammer_rel_node(node); 552 break; 553 } 554 hammer_unlock(&node->lock); 555 hammer_rel_node(node); 556 } 557 558 /* 559 * Untrack the cursor, clean up, and re-establish the parent node. 560 */ 561 TAILQ_REMOVE(&node->cursor_list, cursor, deadlk_entry); 562 cursor->flags &= ~HAMMER_CURSOR_TRACKED; 563 564 if (cursor->flags & HAMMER_CURSOR_TRACKED_RIPOUT) { 565 cursor->flags &= ~HAMMER_CURSOR_TRACKED_RIPOUT; 566 cursor->flags &= ~HAMMER_CURSOR_ATEDISK; 567 } 568 error = hammer_load_cursor_parent(cursor, 0); 569 return(error); 570 } 571 572 /* 573 * Recover from a deadlocked cursor, tracking any node removals or 574 * replacements. If the cursor's current node is removed by another 575 * thread (via btree_remove()) the cursor will be seeked upwards. 576 */ 577 int 578 hammer_recover_cursor(hammer_cursor_t cursor) 579 { 580 int error; 581 582 hammer_unlock_cursor(cursor, 0); 583 584 /* 585 * Wait for the deadlock to clear 586 */ 587 if (cursor->deadlk_node) { 588 hammer_lock_ex_ident(&cursor->deadlk_node->lock, "hmrdlk"); 589 hammer_unlock(&cursor->deadlk_node->lock); 590 hammer_rel_node(cursor->deadlk_node); 591 cursor->deadlk_node = NULL; 592 } 593 if (cursor->deadlk_rec) { 594 hammer_wait_mem_record_ident(cursor->deadlk_rec, "hmmdlr"); 595 hammer_rel_mem_record(cursor->deadlk_rec); 596 cursor->deadlk_rec = NULL; 597 } 598 599 error = hammer_lock_cursor(cursor, 0); 600 return(error); 601 } 602 603 /* 604 * Dup ocursor to ncursor. ncursor inherits ocursor's locks and ocursor 605 * is effectively unlocked and becomes tracked. If ocursor was not locked 606 * then ncursor also inherits the tracking. 607 * 608 * After the caller finishes working with ncursor it must be cleaned up 609 * with hammer_done_cursor(), and the caller must re-lock ocursor. 610 */ 611 hammer_cursor_t 612 hammer_push_cursor(hammer_cursor_t ocursor) 613 { 614 hammer_cursor_t ncursor; 615 hammer_inode_t ip; 616 hammer_node_t node; 617 618 ncursor = kmalloc(sizeof(*ncursor), M_HAMMER, M_WAITOK | M_ZERO); 619 bcopy(ocursor, ncursor, sizeof(*ocursor)); 620 621 node = ocursor->node; 622 hammer_ref_node(node); 623 if ((ocursor->flags & HAMMER_CURSOR_TRACKED) == 0) { 624 ocursor->flags |= HAMMER_CURSOR_TRACKED; 625 TAILQ_INSERT_TAIL(&node->cursor_list, ocursor, deadlk_entry); 626 } 627 if (ncursor->parent) 628 ocursor->parent = NULL; 629 ocursor->data_buffer = NULL; 630 ocursor->leaf = NULL; 631 ocursor->data = NULL; 632 if (ncursor->flags & HAMMER_CURSOR_TRACKED) 633 TAILQ_INSERT_TAIL(&node->cursor_list, ncursor, deadlk_entry); 634 if ((ip = ncursor->ip) != NULL) { 635 ++ip->cursor_ip_refs; 636 } 637 if (ncursor->iprec) 638 hammer_ref(&ncursor->iprec->lock); 639 return(ncursor); 640 } 641 642 /* 643 * Destroy ncursor and restore ocursor 644 * 645 * This is a temporary hack for the release. We can't afford to lose 646 * the IP lock until the IP object scan code is able to deal with it, 647 * so have ocursor inherit it back. 648 */ 649 void 650 hammer_pop_cursor(hammer_cursor_t ocursor, hammer_cursor_t ncursor) 651 { 652 hammer_inode_t ip; 653 654 ip = ncursor->ip; 655 ncursor->ip = NULL; 656 if (ip) 657 --ip->cursor_ip_refs; 658 hammer_done_cursor(ncursor); 659 kfree(ncursor, M_HAMMER); 660 KKASSERT(ocursor->ip == ip); 661 hammer_lock_cursor(ocursor, 0); 662 } 663 664 /* 665 * onode is being replaced by nnode by the reblocking code. 666 */ 667 void 668 hammer_cursor_replaced_node(hammer_node_t onode, hammer_node_t nnode) 669 { 670 hammer_cursor_t cursor; 671 672 while ((cursor = TAILQ_FIRST(&onode->cursor_list)) != NULL) { 673 TAILQ_REMOVE(&onode->cursor_list, cursor, deadlk_entry); 674 TAILQ_INSERT_TAIL(&nnode->cursor_list, cursor, deadlk_entry); 675 KKASSERT(cursor->node == onode); 676 cursor->node = nnode; 677 hammer_ref_node(nnode); 678 hammer_rel_node(onode); 679 } 680 } 681 682 /* 683 * node is being removed, cursors in deadlock recovery are seeked upward 684 * to the parent. 685 */ 686 void 687 hammer_cursor_removed_node(hammer_node_t node, hammer_node_t parent, int index) 688 { 689 hammer_cursor_t cursor; 690 691 KKASSERT(parent != NULL); 692 while ((cursor = TAILQ_FIRST(&node->cursor_list)) != NULL) { 693 KKASSERT(cursor->node == node); 694 KKASSERT(cursor->index == 0); 695 TAILQ_REMOVE(&node->cursor_list, cursor, deadlk_entry); 696 TAILQ_INSERT_TAIL(&parent->cursor_list, cursor, deadlk_entry); 697 cursor->flags |= HAMMER_CURSOR_TRACKED_RIPOUT; 698 cursor->node = parent; 699 cursor->index = index; 700 hammer_ref_node(parent); 701 hammer_rel_node(node); 702 } 703 } 704 705 /* 706 * node was split at (onode, index) with elements >= index moved to nnode. 707 */ 708 void 709 hammer_cursor_split_node(hammer_node_t onode, hammer_node_t nnode, int index) 710 { 711 hammer_cursor_t cursor; 712 713 again: 714 TAILQ_FOREACH(cursor, &onode->cursor_list, deadlk_entry) { 715 KKASSERT(cursor->node == onode); 716 if (cursor->index < index) 717 continue; 718 TAILQ_REMOVE(&onode->cursor_list, cursor, deadlk_entry); 719 TAILQ_INSERT_TAIL(&nnode->cursor_list, cursor, deadlk_entry); 720 cursor->node = nnode; 721 cursor->index -= index; 722 hammer_ref_node(nnode); 723 hammer_rel_node(onode); 724 goto again; 725 } 726 } 727 728 /* 729 * Deleted element at (node, index) 730 * 731 * Shift indexes >= index 732 */ 733 void 734 hammer_cursor_deleted_element(hammer_node_t node, int index) 735 { 736 hammer_cursor_t cursor; 737 738 TAILQ_FOREACH(cursor, &node->cursor_list, deadlk_entry) { 739 KKASSERT(cursor->node == node); 740 if (cursor->index == index) { 741 cursor->flags |= HAMMER_CURSOR_TRACKED_RIPOUT; 742 } else if (cursor->index > index) { 743 --cursor->index; 744 } 745 } 746 } 747 748 /* 749 * Inserted element at (node, index) 750 * 751 * Shift indexes >= index 752 */ 753 void 754 hammer_cursor_inserted_element(hammer_node_t node, int index) 755 { 756 hammer_cursor_t cursor; 757 758 TAILQ_FOREACH(cursor, &node->cursor_list, deadlk_entry) { 759 KKASSERT(cursor->node == node); 760 if (cursor->index >= index) 761 ++cursor->index; 762 } 763 } 764 765