xref: /dragonfly/sys/vfs/hammer/hammer_cursor.c (revision c3b249e6)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_cursor.c,v 1.42 2008/08/06 15:38:58 dillon Exp $
35  */
36 
37 /*
38  * HAMMER B-Tree index - cursor support routines
39  */
40 #include "hammer.h"
41 
42 static int hammer_load_cursor_parent(hammer_cursor_t cursor, int try_exclusive);
43 
44 /*
45  * Initialize a fresh cursor using the B-Tree node cache.  If the cache
46  * is not available initialize a fresh cursor at the root of the filesystem.
47  */
48 int
49 hammer_init_cursor(hammer_transaction_t trans, hammer_cursor_t cursor,
50 		   hammer_node_cache_t cache, hammer_inode_t ip)
51 {
52 	hammer_volume_t volume;
53 	hammer_node_t node;
54 	int error;
55 
56 	bzero(cursor, sizeof(*cursor));
57 
58 	cursor->trans = trans;
59 
60 	/*
61 	 * If the cursor operation is on behalf of an inode, lock
62 	 * the inode.
63 	 */
64 	if ((cursor->ip = ip) != NULL) {
65 		++ip->cursor_ip_refs;
66 		if (trans->type == HAMMER_TRANS_FLS)
67 			hammer_lock_ex(&ip->lock);
68 		else
69 			hammer_lock_sh(&ip->lock);
70 	}
71 
72 	/*
73 	 * Step 1 - acquire a locked node from the cache if possible
74 	 */
75 	if (cache && cache->node) {
76 		node = hammer_ref_node_safe(trans->hmp, cache, &error);
77 		if (error == 0) {
78 			hammer_lock_sh(&node->lock);
79 			if (node->flags & HAMMER_NODE_DELETED) {
80 				hammer_unlock(&node->lock);
81 				hammer_rel_node(node);
82 				node = NULL;
83 			}
84 		}
85 	} else {
86 		node = NULL;
87 	}
88 
89 	/*
90 	 * Step 2 - If we couldn't get a node from the cache, get
91 	 * the one from the root of the filesystem.
92 	 */
93 	while (node == NULL) {
94 		volume = hammer_get_root_volume(trans->hmp, &error);
95 		if (error)
96 			break;
97 		node = hammer_get_node(trans, volume->ondisk->vol0_btree_root,
98 				       0, &error);
99 		hammer_rel_volume(volume, 0);
100 		if (error)
101 			break;
102 		hammer_lock_sh(&node->lock);
103 
104 		/*
105 		 * If someone got in before we could lock the node, retry.
106 		 */
107 		if (node->flags & HAMMER_NODE_DELETED) {
108 			hammer_unlock(&node->lock);
109 			hammer_rel_node(node);
110 			node = NULL;
111 			continue;
112 		}
113 		if (volume->ondisk->vol0_btree_root != node->node_offset) {
114 			hammer_unlock(&node->lock);
115 			hammer_rel_node(node);
116 			node = NULL;
117 			continue;
118 		}
119 	}
120 
121 	/*
122 	 * Step 3 - finish initializing the cursor by acquiring the parent
123 	 */
124 	cursor->node = node;
125 	if (error == 0)
126 		error = hammer_load_cursor_parent(cursor, 0);
127 	KKASSERT(error == 0);
128 	/* if (error) hammer_done_cursor(cursor); */
129 	return(error);
130 }
131 
132 /*
133  * Normalize a cursor.  Sometimes cursors can be left in a state
134  * where node is NULL.  If the cursor is in this state, cursor up.
135  */
136 void
137 hammer_normalize_cursor(hammer_cursor_t cursor)
138 {
139 	if (cursor->node == NULL) {
140 		KKASSERT(cursor->parent != NULL);
141 		hammer_cursor_up(cursor);
142 	}
143 }
144 
145 
146 /*
147  * We are finished with a cursor.  We NULL out various fields as sanity
148  * check, in case the structure is inappropriately used afterwords.
149  */
150 void
151 hammer_done_cursor(hammer_cursor_t cursor)
152 {
153 	hammer_inode_t ip;
154 
155 	KKASSERT((cursor->flags & HAMMER_CURSOR_TRACKED) == 0);
156 	if (cursor->parent) {
157 		hammer_unlock(&cursor->parent->lock);
158 		hammer_rel_node(cursor->parent);
159 		cursor->parent = NULL;
160 	}
161 	if (cursor->node) {
162 		hammer_unlock(&cursor->node->lock);
163 		hammer_rel_node(cursor->node);
164 		cursor->node = NULL;
165 	}
166         if (cursor->data_buffer) {
167                 hammer_rel_buffer(cursor->data_buffer, 0);
168                 cursor->data_buffer = NULL;
169         }
170 	if ((ip = cursor->ip) != NULL) {
171                 KKASSERT(ip->cursor_ip_refs > 0);
172                 --ip->cursor_ip_refs;
173 		hammer_unlock(&ip->lock);
174                 cursor->ip = NULL;
175         }
176 	if (cursor->iprec) {
177 		hammer_rel_mem_record(cursor->iprec);
178 		cursor->iprec = NULL;
179 	}
180 
181 	/*
182 	 * If we deadlocked this node will be referenced.  Do a quick
183 	 * lock/unlock to wait for the deadlock condition to clear.
184 	 */
185 	if (cursor->deadlk_node) {
186 		hammer_lock_ex_ident(&cursor->deadlk_node->lock, "hmrdlk");
187 		hammer_unlock(&cursor->deadlk_node->lock);
188 		hammer_rel_node(cursor->deadlk_node);
189 		cursor->deadlk_node = NULL;
190 	}
191 	if (cursor->deadlk_rec) {
192 		hammer_wait_mem_record_ident(cursor->deadlk_rec, "hmmdlr");
193 		hammer_rel_mem_record(cursor->deadlk_rec);
194 		cursor->deadlk_rec = NULL;
195 	}
196 
197 	cursor->data = NULL;
198 	cursor->leaf = NULL;
199 	cursor->left_bound = NULL;
200 	cursor->right_bound = NULL;
201 	cursor->trans = NULL;
202 }
203 
204 /*
205  * Upgrade cursor->node and cursor->parent to exclusive locks.  This
206  * function can return EDEADLK.
207  *
208  * The lock must already be either held shared or already held exclusively
209  * by us.
210  *
211  * If we fail to upgrade the lock and cursor->deadlk_node is NULL,
212  * we add another reference to the node that failed and set
213  * cursor->deadlk_node so hammer_done_cursor() can block on it.
214  */
215 int
216 hammer_cursor_upgrade(hammer_cursor_t cursor)
217 {
218 	int error;
219 
220 	error = hammer_lock_upgrade(&cursor->node->lock);
221 	if (error && cursor->deadlk_node == NULL) {
222 		cursor->deadlk_node = cursor->node;
223 		hammer_ref_node(cursor->deadlk_node);
224 	} else if (error == 0 && cursor->parent) {
225 		error = hammer_lock_upgrade(&cursor->parent->lock);
226 		if (error && cursor->deadlk_node == NULL) {
227 			cursor->deadlk_node = cursor->parent;
228 			hammer_ref_node(cursor->deadlk_node);
229 		}
230 	}
231 	return(error);
232 }
233 
234 int
235 hammer_cursor_upgrade_node(hammer_cursor_t cursor)
236 {
237 	int error;
238 
239 	error = hammer_lock_upgrade(&cursor->node->lock);
240 	if (error && cursor->deadlk_node == NULL) {
241 		cursor->deadlk_node = cursor->node;
242 		hammer_ref_node(cursor->deadlk_node);
243 	}
244 	return(error);
245 }
246 
247 /*
248  * Downgrade cursor->node and cursor->parent to shared locks.  This
249  * function can return EDEADLK.
250  */
251 void
252 hammer_cursor_downgrade(hammer_cursor_t cursor)
253 {
254 	if (hammer_lock_excl_owned(&cursor->node->lock, curthread))
255 		hammer_lock_downgrade(&cursor->node->lock);
256 	if (cursor->parent &&
257 	    hammer_lock_excl_owned(&cursor->parent->lock, curthread)) {
258 		hammer_lock_downgrade(&cursor->parent->lock);
259 	}
260 }
261 
262 /*
263  * Seek the cursor to the specified node and index.
264  *
265  * The caller must ref the node prior to calling this routine and release
266  * it after it returns.  If the seek succeeds the cursor will gain its own
267  * ref on the node.
268  */
269 int
270 hammer_cursor_seek(hammer_cursor_t cursor, hammer_node_t node, int index)
271 {
272 	int error;
273 
274 	hammer_cursor_downgrade(cursor);
275 	error = 0;
276 
277 	if (cursor->node != node) {
278 		hammer_unlock(&cursor->node->lock);
279 		hammer_rel_node(cursor->node);
280 		cursor->node = node;
281 		hammer_ref_node(node);
282 		hammer_lock_sh(&node->lock);
283 		KKASSERT ((node->flags & HAMMER_NODE_DELETED) == 0);
284 
285 		if (cursor->parent) {
286 			hammer_unlock(&cursor->parent->lock);
287 			hammer_rel_node(cursor->parent);
288 			cursor->parent = NULL;
289 			cursor->parent_index = 0;
290 		}
291 		error = hammer_load_cursor_parent(cursor, 0);
292 	}
293 	cursor->index = index;
294 	return (error);
295 }
296 
297 /*
298  * Load the parent of cursor->node into cursor->parent.
299  */
300 static
301 int
302 hammer_load_cursor_parent(hammer_cursor_t cursor, int try_exclusive)
303 {
304 	hammer_mount_t hmp;
305 	hammer_node_t parent;
306 	hammer_node_t node;
307 	hammer_btree_elm_t elm;
308 	int error;
309 	int parent_index;
310 
311 	hmp = cursor->trans->hmp;
312 
313 	if (cursor->node->ondisk->parent) {
314 		node = cursor->node;
315 		parent = hammer_btree_get_parent(cursor->trans, node,
316 						 &parent_index,
317 						 &error, try_exclusive);
318 		if (error == 0) {
319 			elm = &parent->ondisk->elms[parent_index];
320 			cursor->parent = parent;
321 			cursor->parent_index = parent_index;
322 			cursor->left_bound = &elm[0].internal.base;
323 			cursor->right_bound = &elm[1].internal.base;
324 		}
325 	} else {
326 		cursor->parent = NULL;
327 		cursor->parent_index = 0;
328 		cursor->left_bound = &hmp->root_btree_beg;
329 		cursor->right_bound = &hmp->root_btree_end;
330 		error = 0;
331 	}
332 	return(error);
333 }
334 
335 /*
336  * Cursor up to our parent node.  Return ENOENT if we are at the root of
337  * the filesystem.
338  */
339 int
340 hammer_cursor_up(hammer_cursor_t cursor)
341 {
342 	int error;
343 
344 	hammer_cursor_downgrade(cursor);
345 
346 	/*
347 	 * If the parent is NULL we are at the root of the B-Tree and
348 	 * return ENOENT.
349 	 */
350 	if (cursor->parent == NULL)
351 		return (ENOENT);
352 
353 	/*
354 	 * Set the node to its parent.
355 	 */
356 	hammer_unlock(&cursor->node->lock);
357 	hammer_rel_node(cursor->node);
358 	cursor->node = cursor->parent;
359 	cursor->index = cursor->parent_index;
360 	cursor->parent = NULL;
361 	cursor->parent_index = 0;
362 
363 	error = hammer_load_cursor_parent(cursor, 0);
364 	return(error);
365 }
366 
367 /*
368  * Special cursor up given a locked cursor.  The orignal node is not
369  * unlocked or released and the cursor is not downgraded.
370  *
371  * This function will recover from deadlocks.  EDEADLK cannot be returned.
372  */
373 int
374 hammer_cursor_up_locked(hammer_cursor_t cursor)
375 {
376 	hammer_node_t save;
377 	int error;
378 
379 	/*
380 	 * If the parent is NULL we are at the root of the B-Tree and
381 	 * return ENOENT.
382 	 */
383 	if (cursor->parent == NULL)
384 		return (ENOENT);
385 
386 	save = cursor->node;
387 
388 	/*
389 	 * Set the node to its parent.
390 	 */
391 	cursor->node = cursor->parent;
392 	cursor->index = cursor->parent_index;
393 	cursor->parent = NULL;
394 	cursor->parent_index = 0;
395 
396 	/*
397 	 * load the new parent, attempt to exclusively lock it.  Note that
398 	 * we are still holding the old parent (now cursor->node) exclusively
399 	 * locked.  This can return EDEADLK.
400 	 */
401 	error = hammer_load_cursor_parent(cursor, 1);
402 	if (error) {
403 		cursor->parent = cursor->node;
404 		cursor->parent_index = cursor->index;
405 		cursor->node = save;
406 		cursor->index = 0;
407 	}
408 	return(error);
409 }
410 
411 
412 /*
413  * Cursor down through the current node, which must be an internal node.
414  *
415  * This routine adjusts the cursor and sets index to 0.
416  */
417 int
418 hammer_cursor_down(hammer_cursor_t cursor)
419 {
420 	hammer_node_t node;
421 	hammer_btree_elm_t elm;
422 	int error;
423 
424 	/*
425 	 * The current node becomes the current parent
426 	 */
427 	hammer_cursor_downgrade(cursor);
428 	node = cursor->node;
429 	KKASSERT(cursor->index >= 0 && cursor->index < node->ondisk->count);
430 	if (cursor->parent) {
431 		hammer_unlock(&cursor->parent->lock);
432 		hammer_rel_node(cursor->parent);
433 	}
434 	cursor->parent = node;
435 	cursor->parent_index = cursor->index;
436 	cursor->node = NULL;
437 	cursor->index = 0;
438 
439 	/*
440 	 * Extract element to push into at (node,index), set bounds.
441 	 */
442 	elm = &node->ondisk->elms[cursor->parent_index];
443 
444 	/*
445 	 * Ok, push down into elm.  If elm specifies an internal or leaf
446 	 * node the current node must be an internal node.  If elm specifies
447 	 * a spike then the current node must be a leaf node.
448 	 */
449 	switch(elm->base.btype) {
450 	case HAMMER_BTREE_TYPE_INTERNAL:
451 	case HAMMER_BTREE_TYPE_LEAF:
452 		KKASSERT(node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL);
453 		KKASSERT(elm->internal.subtree_offset != 0);
454 		cursor->left_bound = &elm[0].internal.base;
455 		cursor->right_bound = &elm[1].internal.base;
456 		node = hammer_get_node(cursor->trans,
457 				       elm->internal.subtree_offset, 0, &error);
458 		if (error == 0) {
459 			KASSERT(elm->base.btype == node->ondisk->type, ("BTYPE MISMATCH %c %c NODE %p\n", elm->base.btype, node->ondisk->type, node));
460 			if (node->ondisk->parent != cursor->parent->node_offset)
461 				panic("node %p %016llx vs %016llx\n", node, node->ondisk->parent, cursor->parent->node_offset);
462 			KKASSERT(node->ondisk->parent == cursor->parent->node_offset);
463 		}
464 		break;
465 	default:
466 		panic("hammer_cursor_down: illegal btype %02x (%c)\n",
467 		      elm->base.btype,
468 		      (elm->base.btype ? elm->base.btype : '?'));
469 		break;
470 	}
471 	if (error == 0) {
472 		hammer_lock_sh(&node->lock);
473 		KKASSERT ((node->flags & HAMMER_NODE_DELETED) == 0);
474 		cursor->node = node;
475 		cursor->index = 0;
476 	}
477 	return(error);
478 }
479 
480 /************************************************************************
481  *				DEADLOCK RECOVERY			*
482  ************************************************************************
483  *
484  * These are the new deadlock recovery functions.  Currently they are only
485  * used for the mirror propagation and physical node removal cases but
486  * ultimately the intention is to use them for all deadlock recovery
487  * operations.
488  */
489 void
490 hammer_unlock_cursor(hammer_cursor_t cursor)
491 {
492 	hammer_node_t node;
493 
494 	KKASSERT((cursor->flags & HAMMER_CURSOR_TRACKED) == 0);
495 	KKASSERT(cursor->node);
496 
497 	/*
498 	 * Release the cursor's locks and track B-Tree operations on node.
499 	 * While being tracked our cursor can be modified by other threads
500 	 * and the node may be replaced.
501 	 */
502 	if (cursor->parent) {
503 		hammer_unlock(&cursor->parent->lock);
504 		hammer_rel_node(cursor->parent);
505 		cursor->parent = NULL;
506 	}
507 	node = cursor->node;
508 	cursor->flags |= HAMMER_CURSOR_TRACKED;
509 	TAILQ_INSERT_TAIL(&node->cursor_list, cursor, deadlk_entry);
510 	hammer_unlock(&node->lock);
511 }
512 
513 /*
514  * Get the cursor heated up again.  The cursor's node may have
515  * changed and we might have to locate the new parent.
516  *
517  * If the exact element we were on got deleted RIPOUT will be
518  * set and we must clear ATEDISK so an iteration does not skip
519  * the element after it.
520  */
521 int
522 hammer_lock_cursor(hammer_cursor_t cursor)
523 {
524 	hammer_node_t node;
525 	int error;
526 
527 	KKASSERT(cursor->flags & HAMMER_CURSOR_TRACKED);
528 
529 	/*
530 	 * Relock the node
531 	 */
532 	for (;;) {
533 		node = cursor->node;
534 		hammer_ref_node(node);
535 		hammer_lock_sh(&node->lock);
536 		if (cursor->node == node) {
537 			hammer_rel_node(node);
538 			break;
539 		}
540 		hammer_unlock(&node->lock);
541 		hammer_rel_node(node);
542 	}
543 
544 	/*
545 	 * Untrack the cursor, clean up, and re-establish the parent node.
546 	 */
547 	TAILQ_REMOVE(&node->cursor_list, cursor, deadlk_entry);
548 	cursor->flags &= ~HAMMER_CURSOR_TRACKED;
549 
550 	/*
551 	 * If a ripout has occured iterations must re-test the (new)
552 	 * current element.  Clearing ATEDISK prevents the element from
553 	 * being skipped and RETEST causes it to be re-tested.
554 	 */
555 	if (cursor->flags & HAMMER_CURSOR_TRACKED_RIPOUT) {
556 		cursor->flags &= ~HAMMER_CURSOR_TRACKED_RIPOUT;
557 		cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
558 		cursor->flags |= HAMMER_CURSOR_RETEST;
559 	}
560 	error = hammer_load_cursor_parent(cursor, 0);
561 	return(error);
562 }
563 
564 /*
565  * Recover from a deadlocked cursor, tracking any node removals or
566  * replacements.  If the cursor's current node is removed by another
567  * thread (via btree_remove()) the cursor will be seeked upwards.
568  *
569  * The caller is working a modifying operation and must be holding the
570  * sync lock (shared).  We do not release the sync lock because this
571  * would break atomicy.
572  */
573 int
574 hammer_recover_cursor(hammer_cursor_t cursor)
575 {
576 	int error;
577 
578 	hammer_unlock_cursor(cursor);
579 	KKASSERT(cursor->trans->sync_lock_refs > 0);
580 
581 	/*
582 	 * Wait for the deadlock to clear
583 	 */
584 	if (cursor->deadlk_node) {
585 		hammer_lock_ex_ident(&cursor->deadlk_node->lock, "hmrdlk");
586 		hammer_unlock(&cursor->deadlk_node->lock);
587 		hammer_rel_node(cursor->deadlk_node);
588 		cursor->deadlk_node = NULL;
589 	}
590 	if (cursor->deadlk_rec) {
591 		hammer_wait_mem_record_ident(cursor->deadlk_rec, "hmmdlr");
592 		hammer_rel_mem_record(cursor->deadlk_rec);
593 		cursor->deadlk_rec = NULL;
594 	}
595 	error = hammer_lock_cursor(cursor);
596 	return(error);
597 }
598 
599 /*
600  * Dup ocursor to ncursor.  ncursor inherits ocursor's locks and ocursor
601  * is effectively unlocked and becomes tracked.  If ocursor was not locked
602  * then ncursor also inherits the tracking.
603  *
604  * After the caller finishes working with ncursor it must be cleaned up
605  * with hammer_done_cursor(), and the caller must re-lock ocursor.
606  */
607 hammer_cursor_t
608 hammer_push_cursor(hammer_cursor_t ocursor)
609 {
610 	hammer_cursor_t ncursor;
611 	hammer_inode_t ip;
612 	hammer_node_t node;
613 	hammer_mount_t hmp;
614 
615 	hmp = ocursor->trans->hmp;
616 	ncursor = kmalloc(sizeof(*ncursor), hmp->m_misc, M_WAITOK | M_ZERO);
617 	bcopy(ocursor, ncursor, sizeof(*ocursor));
618 
619 	node = ocursor->node;
620 	hammer_ref_node(node);
621 	if ((ocursor->flags & HAMMER_CURSOR_TRACKED) == 0) {
622 		ocursor->flags |= HAMMER_CURSOR_TRACKED;
623 		TAILQ_INSERT_TAIL(&node->cursor_list, ocursor, deadlk_entry);
624 	}
625 	if (ncursor->parent)
626 		ocursor->parent = NULL;
627 	ocursor->data_buffer = NULL;
628 	ocursor->leaf = NULL;
629 	ocursor->data = NULL;
630 	if (ncursor->flags & HAMMER_CURSOR_TRACKED)
631 		TAILQ_INSERT_TAIL(&node->cursor_list, ncursor, deadlk_entry);
632 	if ((ip = ncursor->ip) != NULL) {
633                 ++ip->cursor_ip_refs;
634 	}
635 	if (ncursor->iprec)
636 		hammer_ref(&ncursor->iprec->lock);
637 	return(ncursor);
638 }
639 
640 /*
641  * Destroy ncursor and restore ocursor
642  *
643  * This is a temporary hack for the release.  We can't afford to lose
644  * the IP lock until the IP object scan code is able to deal with it,
645  * so have ocursor inherit it back.
646  */
647 void
648 hammer_pop_cursor(hammer_cursor_t ocursor, hammer_cursor_t ncursor)
649 {
650 	hammer_mount_t hmp;
651 	hammer_inode_t ip;
652 
653 	hmp = ncursor->trans->hmp;
654 	ip = ncursor->ip;
655 	ncursor->ip = NULL;
656 	if (ip)
657                 --ip->cursor_ip_refs;
658 	hammer_done_cursor(ncursor);
659 	kfree(ncursor, hmp->m_misc);
660 	KKASSERT(ocursor->ip == ip);
661 	hammer_lock_cursor(ocursor);
662 }
663 
664 /*
665  * onode is being replaced by nnode by the reblocking code.
666  */
667 void
668 hammer_cursor_replaced_node(hammer_node_t onode, hammer_node_t nnode)
669 {
670 	hammer_cursor_t cursor;
671 
672 	while ((cursor = TAILQ_FIRST(&onode->cursor_list)) != NULL) {
673 		TAILQ_REMOVE(&onode->cursor_list, cursor, deadlk_entry);
674 		TAILQ_INSERT_TAIL(&nnode->cursor_list, cursor, deadlk_entry);
675 		KKASSERT(cursor->node == onode);
676 		cursor->node = nnode;
677 		hammer_ref_node(nnode);
678 		hammer_rel_node(onode);
679 	}
680 }
681 
682 /*
683  * node is being removed, cursors in deadlock recovery are seeked upward
684  * to the parent.
685  */
686 void
687 hammer_cursor_removed_node(hammer_node_t node, hammer_node_t parent, int index)
688 {
689 	hammer_cursor_t cursor;
690 
691 	KKASSERT(parent != NULL);
692 	while ((cursor = TAILQ_FIRST(&node->cursor_list)) != NULL) {
693 		KKASSERT(cursor->node == node);
694 		KKASSERT(cursor->index == 0);
695 		TAILQ_REMOVE(&node->cursor_list, cursor, deadlk_entry);
696 		TAILQ_INSERT_TAIL(&parent->cursor_list, cursor, deadlk_entry);
697 		cursor->flags |= HAMMER_CURSOR_TRACKED_RIPOUT;
698 		cursor->node = parent;
699 		cursor->index = index;
700 		hammer_ref_node(parent);
701 		hammer_rel_node(node);
702 	}
703 }
704 
705 /*
706  * node was split at (onode, index) with elements >= index moved to nnode.
707  */
708 void
709 hammer_cursor_split_node(hammer_node_t onode, hammer_node_t nnode, int index)
710 {
711 	hammer_cursor_t cursor;
712 
713 again:
714 	TAILQ_FOREACH(cursor, &onode->cursor_list, deadlk_entry) {
715 		KKASSERT(cursor->node == onode);
716 		if (cursor->index < index)
717 			continue;
718 		TAILQ_REMOVE(&onode->cursor_list, cursor, deadlk_entry);
719 		TAILQ_INSERT_TAIL(&nnode->cursor_list, cursor, deadlk_entry);
720 		cursor->node = nnode;
721 		cursor->index -= index;
722 		hammer_ref_node(nnode);
723 		hammer_rel_node(onode);
724 		goto again;
725 	}
726 }
727 
728 /*
729  * Deleted element at (node, index)
730  *
731  * Shift indexes >= index
732  */
733 void
734 hammer_cursor_deleted_element(hammer_node_t node, int index)
735 {
736 	hammer_cursor_t cursor;
737 
738 	TAILQ_FOREACH(cursor, &node->cursor_list, deadlk_entry) {
739 		KKASSERT(cursor->node == node);
740 		if (cursor->index == index) {
741 			cursor->flags |= HAMMER_CURSOR_TRACKED_RIPOUT;
742 		} else if (cursor->index > index) {
743 			--cursor->index;
744 		}
745 	}
746 }
747 
748 /*
749  * Inserted element at (node, index)
750  *
751  * Shift indexes >= index
752  */
753 void
754 hammer_cursor_inserted_element(hammer_node_t node, int index)
755 {
756 	hammer_cursor_t cursor;
757 
758 	TAILQ_FOREACH(cursor, &node->cursor_list, deadlk_entry) {
759 		KKASSERT(cursor->node == node);
760 		if (cursor->index >= index)
761 			++cursor->index;
762 	}
763 }
764 
765