xref: /dragonfly/sys/vfs/hammer/hammer_reblock.c (revision a563ca70)
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_reblock.c,v 1.34 2008/11/13 02:18:43 dillon Exp $
35  */
36 /*
37  * HAMMER reblocker - This code frees up fragmented physical space
38  *
39  * HAMMER only keeps track of free space on a big-block basis.  A big-block
40  * containing holes can only be freed by migrating the remaining data in
41  * that big-block into a new big-block, then freeing the big-block.
42  *
43  * This function is called from an ioctl or via the hammer support thread.
44  */
45 
46 #include "hammer.h"
47 
48 static int hammer_reblock_helper(struct hammer_ioc_reblock *reblock,
49 				 hammer_cursor_t cursor,
50 				 hammer_btree_elm_t elm);
51 static int hammer_reblock_data(struct hammer_ioc_reblock *reblock,
52 				hammer_cursor_t cursor, hammer_btree_elm_t elm);
53 static int hammer_reblock_leaf_node(struct hammer_ioc_reblock *reblock,
54 				hammer_cursor_t cursor, hammer_btree_elm_t elm);
55 static int hammer_reblock_int_node(struct hammer_ioc_reblock *reblock,
56 				hammer_cursor_t cursor, hammer_btree_elm_t elm);
57 
58 int
59 hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip,
60 	       struct hammer_ioc_reblock *reblock)
61 {
62 	struct hammer_cursor cursor;
63 	hammer_btree_elm_t elm;
64 	int checkspace_count;
65 	int error;
66 	int seq;
67 	int slop;
68 
69 	/*
70 	 * A fill level <= 20% is considered an emergency.  free_level is
71 	 * inverted from fill_level.
72 	 */
73 	if (reblock->free_level >= HAMMER_LARGEBLOCK_SIZE * 8 / 10)
74 		slop = HAMMER_CHKSPC_EMERGENCY;
75 	else
76 		slop = HAMMER_CHKSPC_REBLOCK;
77 
78 	if ((reblock->key_beg.localization | reblock->key_end.localization) &
79 	    HAMMER_LOCALIZE_PSEUDOFS_MASK) {
80 		return(EINVAL);
81 	}
82 	if (reblock->key_beg.obj_id >= reblock->key_end.obj_id)
83 		return(EINVAL);
84 	if (reblock->free_level < 0)
85 		return(EINVAL);
86 
87 	reblock->key_cur = reblock->key_beg;
88 	reblock->key_cur.localization &= HAMMER_LOCALIZE_MASK;
89 	reblock->key_cur.localization += ip->obj_localization;
90 
91 	checkspace_count = 0;
92 	seq = trans->hmp->flusher.done;
93 retry:
94 	error = hammer_init_cursor(trans, &cursor, NULL, NULL);
95 	if (error) {
96 		hammer_done_cursor(&cursor);
97 		goto failed;
98 	}
99 	cursor.key_beg.localization = reblock->key_cur.localization;
100 	cursor.key_beg.obj_id = reblock->key_cur.obj_id;
101 	cursor.key_beg.key = HAMMER_MIN_KEY;
102 	cursor.key_beg.create_tid = 1;
103 	cursor.key_beg.delete_tid = 0;
104 	cursor.key_beg.rec_type = HAMMER_MIN_RECTYPE;
105 	cursor.key_beg.obj_type = 0;
106 
107 	cursor.key_end.localization = (reblock->key_end.localization &
108 					HAMMER_LOCALIZE_MASK) +
109 				      ip->obj_localization;
110 	cursor.key_end.obj_id = reblock->key_end.obj_id;
111 	cursor.key_end.key = HAMMER_MAX_KEY;
112 	cursor.key_end.create_tid = HAMMER_MAX_TID - 1;
113 	cursor.key_end.delete_tid = 0;
114 	cursor.key_end.rec_type = HAMMER_MAX_RECTYPE;
115 	cursor.key_end.obj_type = 0;
116 
117 	cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
118 	cursor.flags |= HAMMER_CURSOR_BACKEND;
119 	cursor.flags |= HAMMER_CURSOR_NOSWAPCACHE;
120 
121 	/*
122 	 * This flag allows the btree scan code to return internal nodes,
123 	 * so we can reblock them in addition to the leafs.  Only specify it
124 	 * if we intend to reblock B-Tree nodes.
125 	 */
126 	if (reblock->head.flags & HAMMER_IOC_DO_BTREE)
127 		cursor.flags |= HAMMER_CURSOR_REBLOCKING;
128 
129 	error = hammer_btree_first(&cursor);
130 	while (error == 0) {
131 		/*
132 		 * Internal or Leaf node
133 		 */
134 		KKASSERT(cursor.index < cursor.node->ondisk->count);
135 		elm = &cursor.node->ondisk->elms[cursor.index];
136 		reblock->key_cur.obj_id = elm->base.obj_id;
137 		reblock->key_cur.localization = elm->base.localization;
138 
139 		/*
140 		 * Yield to more important tasks
141 		 */
142 		if ((error = hammer_signal_check(trans->hmp)) != 0)
143 			break;
144 
145 		/*
146 		 * If there is insufficient free space it may be due to
147 		 * reserved bigblocks, which flushing might fix.
148 		 *
149 		 * We must force a retest in case the unlocked cursor is
150 		 * moved to the end of the leaf, or moved to an internal
151 		 * node.
152 		 *
153 		 * WARNING: See warnings in hammer_unlock_cursor() function.
154 		 */
155 		if (hammer_checkspace(trans->hmp, slop)) {
156 			if (++checkspace_count == 10) {
157 				error = ENOSPC;
158 				break;
159 			}
160 			hammer_unlock_cursor(&cursor);
161 			cursor.flags |= HAMMER_CURSOR_RETEST;
162 			hammer_flusher_wait(trans->hmp, seq);
163 			hammer_lock_cursor(&cursor);
164 			seq = hammer_flusher_async(trans->hmp, NULL);
165 			goto skip;
166 		}
167 
168 		/*
169 		 * Acquiring the sync_lock prevents the operation from
170 		 * crossing a synchronization boundary.
171 		 *
172 		 * NOTE: cursor.node may have changed on return.
173 		 *
174 		 * WARNING: See warnings in hammer_unlock_cursor() function.
175 		 */
176 		hammer_sync_lock_sh(trans);
177 		error = hammer_reblock_helper(reblock, &cursor, elm);
178 		hammer_sync_unlock(trans);
179 
180 		while (hammer_flusher_meta_halflimit(trans->hmp) ||
181 		       hammer_flusher_undo_exhausted(trans, 2)) {
182 			hammer_unlock_cursor(&cursor);
183 			hammer_flusher_wait(trans->hmp, seq);
184 			hammer_lock_cursor(&cursor);
185 			seq = hammer_flusher_async_one(trans->hmp);
186 		}
187 
188 		/*
189 		 * Setup for iteration, our cursor flags may be modified by
190 		 * other threads while we are unlocked.
191 		 */
192 		cursor.flags |= HAMMER_CURSOR_ATEDISK;
193 
194 		/*
195 		 * We allocate data buffers, which atm we don't track
196 		 * dirty levels for because we allow the kernel to write
197 		 * them.  But if we allocate too many we can still deadlock
198 		 * the buffer cache.
199 		 *
200 		 * WARNING: See warnings in hammer_unlock_cursor() function.
201 		 *	    (The cursor's node and element may change!)
202 		 */
203 		if (bd_heatup()) {
204 			hammer_unlock_cursor(&cursor);
205 			bwillwrite(HAMMER_XBUFSIZE);
206 			hammer_lock_cursor(&cursor);
207 		}
208 skip:
209 		if (error == 0) {
210 			error = hammer_btree_iterate(&cursor);
211 		}
212 	}
213 	if (error == ENOENT)
214 		error = 0;
215 	hammer_done_cursor(&cursor);
216 	if (error == EWOULDBLOCK) {
217 		hammer_flusher_sync(trans->hmp);
218 		goto retry;
219 	}
220 	if (error == EDEADLK)
221 		goto retry;
222 	if (error == EINTR) {
223 		reblock->head.flags |= HAMMER_IOC_HEAD_INTR;
224 		error = 0;
225 	}
226 failed:
227 	reblock->key_cur.localization &= HAMMER_LOCALIZE_MASK;
228 	return(error);
229 }
230 
231 /*
232  * Reblock the B-Tree (leaf) node, record, and/or data if necessary.
233  *
234  * XXX We have no visibility into internal B-Tree nodes at the moment,
235  * only leaf nodes.
236  */
237 static int
238 hammer_reblock_helper(struct hammer_ioc_reblock *reblock,
239 		      hammer_cursor_t cursor, hammer_btree_elm_t elm)
240 {
241 	hammer_mount_t hmp;
242 	hammer_off_t tmp_offset;
243 	hammer_node_ondisk_t ondisk;
244 	struct hammer_btree_leaf_elm leaf;
245 	int error;
246 	int bytes;
247 	int cur;
248 	int iocflags;
249 
250 	error = 0;
251 	hmp = cursor->trans->hmp;
252 
253 	/*
254 	 * Reblock data.  Note that data embedded in a record is reblocked
255 	 * by the record reblock code.  Data processing only occurs at leaf
256 	 * nodes and for RECORD element types.
257 	 */
258 	if (cursor->node->ondisk->type != HAMMER_BTREE_TYPE_LEAF)
259 		goto skip;
260 	if (elm->leaf.base.btype != HAMMER_BTREE_TYPE_RECORD)
261 		return(0);
262 	tmp_offset = elm->leaf.data_offset;
263 	if (tmp_offset == 0)
264 		goto skip;
265 	if (error)
266 		goto skip;
267 
268 	/*
269 	 * NOTE: Localization restrictions may also have been set-up, we can't
270 	 *	 just set the match flags willy-nilly here.
271 	 */
272 	switch(elm->leaf.base.rec_type) {
273 	case HAMMER_RECTYPE_INODE:
274 	case HAMMER_RECTYPE_SNAPSHOT:
275 	case HAMMER_RECTYPE_CONFIG:
276 		iocflags = HAMMER_IOC_DO_INODES;
277 		break;
278 	case HAMMER_RECTYPE_EXT:
279 	case HAMMER_RECTYPE_FIX:
280 	case HAMMER_RECTYPE_PFS:
281 	case HAMMER_RECTYPE_DIRENTRY:
282 		iocflags = HAMMER_IOC_DO_DIRS;
283 		break;
284 	case HAMMER_RECTYPE_DATA:
285 	case HAMMER_RECTYPE_DB:
286 		iocflags = HAMMER_IOC_DO_DATA;
287 		break;
288 	default:
289 		iocflags = 0;
290 		break;
291 	}
292 	if (reblock->head.flags & iocflags) {
293 		++reblock->data_count;
294 		reblock->data_byte_count += elm->leaf.data_len;
295 		bytes = hammer_blockmap_getfree(hmp, tmp_offset, &cur, &error);
296 		if (hammer_debug_general & 0x4000)
297 			kprintf("D %6d/%d\n", bytes, reblock->free_level);
298 		if (error == 0 && (cur == 0 || reblock->free_level == 0) &&
299 		    bytes >= reblock->free_level) {
300 			/*
301 			 * This is nasty, the uncache code may have to get
302 			 * vnode locks and because of that we can't hold
303 			 * the cursor locked.
304 			 *
305 			 * WARNING: See warnings in hammer_unlock_cursor()
306 			 *	    function.
307 			 */
308 			leaf = elm->leaf;
309 			hammer_unlock_cursor(cursor);
310 			hammer_io_direct_uncache(hmp, &leaf);
311 			hammer_lock_cursor(cursor);
312 
313 			/*
314 			 * elm may have become stale or invalid, reload it.
315 			 * ondisk variable is temporary only.  Note that
316 			 * cursor->node and thus cursor->node->ondisk may
317 			 * also changed.
318 			 */
319 			ondisk = cursor->node->ondisk;
320 			elm = &ondisk->elms[cursor->index];
321 			if (cursor->flags & HAMMER_CURSOR_RETEST) {
322 				kprintf("hammer: debug: retest on "
323 					"reblocker uncache\n");
324 				error = EDEADLK;
325 			} else if (ondisk->type != HAMMER_BTREE_TYPE_LEAF ||
326 				   cursor->index >= ondisk->count) {
327 				kprintf("hammer: debug: shifted on "
328 					"reblocker uncache\n");
329 				error = EDEADLK;
330 			} else if (bcmp(&elm->leaf, &leaf, sizeof(leaf))) {
331 				kprintf("hammer: debug: changed on "
332 					"reblocker uncache\n");
333 				error = EDEADLK;
334 			}
335 			if (error == 0)
336 				error = hammer_cursor_upgrade(cursor);
337 			if (error == 0) {
338 				KKASSERT(cursor->index < ondisk->count);
339 				error = hammer_reblock_data(reblock,
340 							    cursor, elm);
341 			}
342 			if (error == 0) {
343 				++reblock->data_moves;
344 				reblock->data_byte_moves += elm->leaf.data_len;
345 			}
346 		}
347 	}
348 
349 skip:
350 	/*
351 	 * Reblock a B-Tree internal or leaf node.  A leaf node is reblocked
352 	 * on initial entry only (element 0).  An internal node is reblocked
353 	 * when entered upward from its first leaf node only (also element 0).
354 	 * Further revisits of the internal node (index > 0) are ignored.
355 	 */
356 	tmp_offset = cursor->node->node_offset;
357 	if (cursor->index == 0 &&
358 	    error == 0 && (reblock->head.flags & HAMMER_IOC_DO_BTREE)) {
359 		++reblock->btree_count;
360 		bytes = hammer_blockmap_getfree(hmp, tmp_offset, &cur, &error);
361 		if (hammer_debug_general & 0x4000)
362 			kprintf("B %6d/%d\n", bytes, reblock->free_level);
363 		if (error == 0 && (cur == 0 || reblock->free_level == 0) &&
364 		    bytes >= reblock->free_level) {
365 			error = hammer_cursor_upgrade(cursor);
366 			if (error == 0) {
367 				if (cursor->parent) {
368 					KKASSERT(cursor->parent_index <
369 						 cursor->parent->ondisk->count);
370 					elm = &cursor->parent->ondisk->elms[cursor->parent_index];
371 				} else {
372 					elm = NULL;
373 				}
374 				switch(cursor->node->ondisk->type) {
375 				case HAMMER_BTREE_TYPE_LEAF:
376 					error = hammer_reblock_leaf_node(
377 							reblock, cursor, elm);
378 					break;
379 				case HAMMER_BTREE_TYPE_INTERNAL:
380 					error = hammer_reblock_int_node(
381 							reblock, cursor, elm);
382 					break;
383 				default:
384 					panic("Illegal B-Tree node type");
385 				}
386 			}
387 			if (error == 0) {
388 				++reblock->btree_moves;
389 			}
390 		}
391 	}
392 
393 	hammer_cursor_downgrade(cursor);
394 	return(error);
395 }
396 
397 /*
398  * Reblock a record's data.  Both the B-Tree element and record pointers
399  * to the data must be adjusted.
400  */
401 static int
402 hammer_reblock_data(struct hammer_ioc_reblock *reblock,
403 		    hammer_cursor_t cursor, hammer_btree_elm_t elm)
404 {
405 	struct hammer_buffer *data_buffer = NULL;
406 	hammer_off_t ndata_offset;
407 	int error;
408 	void *ndata;
409 
410 	error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA |
411 					     HAMMER_CURSOR_GET_LEAF);
412 	if (error)
413 		return (error);
414 	ndata = hammer_alloc_data(cursor->trans, elm->leaf.data_len,
415 				  elm->leaf.base.rec_type,
416 				  &ndata_offset, &data_buffer,
417 				  0, &error);
418 	if (error)
419 		goto done;
420 	hammer_io_notmeta(data_buffer);
421 
422 	/*
423 	 * Move the data.  Note that we must invalidate any cached
424 	 * data buffer in the cursor before calling blockmap_free.
425 	 * The blockmap_free may free up the entire large-block and
426 	 * will not be able to invalidate it if the cursor is holding
427 	 * a data buffer cached in that large block.
428 	 */
429 	hammer_modify_buffer(cursor->trans, data_buffer, NULL, 0);
430 	bcopy(cursor->data, ndata, elm->leaf.data_len);
431 	hammer_modify_buffer_done(data_buffer);
432 	hammer_cursor_invalidate_cache(cursor);
433 
434 	hammer_blockmap_free(cursor->trans,
435 			     elm->leaf.data_offset, elm->leaf.data_len);
436 
437 	hammer_modify_node(cursor->trans, cursor->node,
438 			   &elm->leaf.data_offset, sizeof(hammer_off_t));
439 	elm->leaf.data_offset = ndata_offset;
440 	hammer_modify_node_done(cursor->node);
441 
442 done:
443 	if (data_buffer)
444 		hammer_rel_buffer(data_buffer, 0);
445 	return (error);
446 }
447 
448 /*
449  * Reblock a B-Tree leaf node.  The parent must be adjusted to point to
450  * the new copy of the leaf node.
451  *
452  * elm is a pointer to the parent element pointing at cursor.node.
453  */
454 static int
455 hammer_reblock_leaf_node(struct hammer_ioc_reblock *reblock,
456 			 hammer_cursor_t cursor, hammer_btree_elm_t elm)
457 {
458 	hammer_node_t onode;
459 	hammer_node_t nnode;
460 	int error;
461 
462 	/*
463 	 * Don't supply a hint when allocating the leaf.  Fills are done
464 	 * from the leaf upwards.
465 	 */
466 	onode = cursor->node;
467 	nnode = hammer_alloc_btree(cursor->trans, 0, &error);
468 
469 	if (nnode == NULL)
470 		return (error);
471 
472 	/*
473 	 * Move the node
474 	 */
475 	hammer_lock_ex(&nnode->lock);
476 	hammer_modify_node_noundo(cursor->trans, nnode);
477 	bcopy(onode->ondisk, nnode->ondisk, sizeof(*nnode->ondisk));
478 
479 	if (elm) {
480 		/*
481 		 * We are not the root of the B-Tree
482 		 */
483 		hammer_modify_node(cursor->trans, cursor->parent,
484 				   &elm->internal.subtree_offset,
485 				   sizeof(elm->internal.subtree_offset));
486 		elm->internal.subtree_offset = nnode->node_offset;
487 		hammer_modify_node_done(cursor->parent);
488 	} else {
489 		/*
490 		 * We are the root of the B-Tree
491 		 */
492                 hammer_volume_t volume;
493 
494                 volume = hammer_get_root_volume(cursor->trans->hmp, &error);
495                 KKASSERT(error == 0);
496 
497                 hammer_modify_volume_field(cursor->trans, volume,
498 					   vol0_btree_root);
499                 volume->ondisk->vol0_btree_root = nnode->node_offset;
500                 hammer_modify_volume_done(volume);
501                 hammer_rel_volume(volume, 0);
502         }
503 
504 	hammer_cursor_replaced_node(onode, nnode);
505 	hammer_delete_node(cursor->trans, onode);
506 
507 	if (hammer_debug_general & 0x4000) {
508 		kprintf("REBLOCK LNODE %016llx -> %016llx\n",
509 			(long long)onode->node_offset,
510 			(long long)nnode->node_offset);
511 	}
512 	hammer_modify_node_done(nnode);
513 	cursor->node = nnode;
514 
515 	hammer_unlock(&onode->lock);
516 	hammer_rel_node(onode);
517 
518 	return (error);
519 }
520 
521 /*
522  * Reblock a B-Tree internal node.  The parent must be adjusted to point to
523  * the new copy of the internal node, and the node's children's parent
524  * pointers must also be adjusted to point to the new copy.
525  *
526  * elm is a pointer to the parent element pointing at cursor.node.
527  */
528 static int
529 hammer_reblock_int_node(struct hammer_ioc_reblock *reblock,
530 			 hammer_cursor_t cursor, hammer_btree_elm_t elm)
531 {
532 	struct hammer_node_lock lockroot;
533 	hammer_node_t onode;
534 	hammer_node_t nnode;
535 	hammer_off_t hint;
536 	int error;
537 	int i;
538 
539 	hammer_node_lock_init(&lockroot, cursor->node);
540 	error = hammer_btree_lock_children(cursor, 1, &lockroot, NULL);
541 	if (error)
542 		goto done;
543 
544 	/*
545 	 * The internal node is visited after recursing through its
546 	 * first element.  Use the subtree offset allocated for that
547 	 * element as a hint for allocating the internal node.
548 	 */
549 	onode = cursor->node;
550 	if (onode->ondisk->count)
551 		hint = onode->ondisk->elms[0].internal.subtree_offset;
552 	else
553 		hint = 0;
554 	nnode = hammer_alloc_btree(cursor->trans, 0, &error);
555 
556 	if (nnode == NULL)
557 		goto done;
558 
559 	/*
560 	 * Move the node.  Adjust the parent's pointer to us first.
561 	 */
562 	hammer_lock_ex(&nnode->lock);
563 	hammer_modify_node_noundo(cursor->trans, nnode);
564 	bcopy(onode->ondisk, nnode->ondisk, sizeof(*nnode->ondisk));
565 
566 	if (elm) {
567 		/*
568 		 * We are not the root of the B-Tree
569 		 */
570 		hammer_modify_node(cursor->trans, cursor->parent,
571 				   &elm->internal.subtree_offset,
572 				   sizeof(elm->internal.subtree_offset));
573 		elm->internal.subtree_offset = nnode->node_offset;
574 		hammer_modify_node_done(cursor->parent);
575 	} else {
576 		/*
577 		 * We are the root of the B-Tree
578 		 */
579                 hammer_volume_t volume;
580 
581                 volume = hammer_get_root_volume(cursor->trans->hmp, &error);
582                 KKASSERT(error == 0);
583 
584                 hammer_modify_volume_field(cursor->trans, volume,
585 					   vol0_btree_root);
586                 volume->ondisk->vol0_btree_root = nnode->node_offset;
587                 hammer_modify_volume_done(volume);
588                 hammer_rel_volume(volume, 0);
589         }
590 
591 	/*
592 	 * Now adjust our children's pointers to us.
593 	 */
594 	for (i = 0; i < nnode->ondisk->count; ++i) {
595 		elm = &nnode->ondisk->elms[i];
596 		error = btree_set_parent(cursor->trans, nnode, elm);
597 		if (error)
598 			panic("reblock internal node: fixup problem");
599 	}
600 
601 	/*
602 	 * Clean up.
603 	 *
604 	 * The new node replaces the current node in the cursor.  The cursor
605 	 * expects it to be locked so leave it locked.  Discard onode.
606 	 */
607 	hammer_cursor_replaced_node(onode, nnode);
608 	hammer_delete_node(cursor->trans, onode);
609 
610 	if (hammer_debug_general & 0x4000) {
611 		kprintf("REBLOCK INODE %016llx -> %016llx\n",
612 			(long long)onode->node_offset,
613 			(long long)nnode->node_offset);
614 	}
615 	hammer_modify_node_done(nnode);
616 	cursor->node = nnode;
617 
618 	hammer_unlock(&onode->lock);
619 	hammer_rel_node(onode);
620 
621 done:
622 	hammer_btree_unlock_children(cursor->trans->hmp, &lockroot, NULL);
623 	return (error);
624 }
625 
626