xref: /dragonfly/sys/vfs/hammer/hammer_reblock.c (revision b4f25088)
1 /*
2  * Copyright (c) 2008-2012 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * HAMMER reblocker - This code frees up fragmented physical space
36  *
37  * HAMMER only keeps track of free space on a big-block basis.  A big-block
38  * containing holes can only be freed by migrating the remaining data in
39  * that big-block into a new big-block, then freeing the big-block.
40  *
41  * This function is called from an ioctl or via the hammer support thread.
42  */
43 
44 #include "hammer.h"
45 
46 static int hammer_reblock_helper(struct hammer_ioc_reblock *reblock,
47 				 hammer_cursor_t cursor,
48 				 hammer_btree_elm_t elm);
49 static int hammer_reblock_data(struct hammer_ioc_reblock *reblock,
50 				hammer_cursor_t cursor, hammer_btree_elm_t elm);
51 static int hammer_reblock_leaf_node(struct hammer_ioc_reblock *reblock,
52 				hammer_cursor_t cursor, hammer_btree_elm_t elm);
53 static int hammer_reblock_int_node(struct hammer_ioc_reblock *reblock,
54 				hammer_cursor_t cursor, hammer_btree_elm_t elm);
55 
56 int
57 hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip,
58 		   struct hammer_ioc_reblock *reblock)
59 {
60 	struct hammer_cursor cursor;
61 	hammer_btree_elm_t elm;
62 	int checkspace_count;
63 	int error;
64 	int seq;
65 	int slop;
66 
67 	/*
68 	 * A fill level <= 20% is considered an emergency.  free_level is
69 	 * inverted from fill_level.
70 	 */
71 	if (reblock->free_level >= HAMMER_LARGEBLOCK_SIZE * 8 / 10)
72 		slop = HAMMER_CHKSPC_EMERGENCY;
73 	else
74 		slop = HAMMER_CHKSPC_REBLOCK;
75 
76 	if ((reblock->key_beg.localization | reblock->key_end.localization) &
77 	    HAMMER_LOCALIZE_PSEUDOFS_MASK) {
78 		return(EINVAL);
79 	}
80 	if (reblock->key_beg.obj_id >= reblock->key_end.obj_id)
81 		return(EINVAL);
82 	if (reblock->free_level < 0)
83 		return(EINVAL);
84 
85 	reblock->key_cur = reblock->key_beg;
86 	reblock->key_cur.localization &= HAMMER_LOCALIZE_MASK;
87 	reblock->key_cur.localization += ip->obj_localization;
88 
89 	checkspace_count = 0;
90 	seq = trans->hmp->flusher.done;
91 retry:
92 	error = hammer_init_cursor(trans, &cursor, NULL, NULL);
93 	if (error) {
94 		hammer_done_cursor(&cursor);
95 		goto failed;
96 	}
97 	cursor.key_beg.localization = reblock->key_cur.localization;
98 	cursor.key_beg.obj_id = reblock->key_cur.obj_id;
99 	cursor.key_beg.key = HAMMER_MIN_KEY;
100 	cursor.key_beg.create_tid = 1;
101 	cursor.key_beg.delete_tid = 0;
102 	cursor.key_beg.rec_type = HAMMER_MIN_RECTYPE;
103 	cursor.key_beg.obj_type = 0;
104 
105 	cursor.key_end.localization = (reblock->key_end.localization &
106 					HAMMER_LOCALIZE_MASK) +
107 				      ip->obj_localization;
108 	cursor.key_end.obj_id = reblock->key_end.obj_id;
109 	cursor.key_end.key = HAMMER_MAX_KEY;
110 	cursor.key_end.create_tid = HAMMER_MAX_TID - 1;
111 	cursor.key_end.delete_tid = 0;
112 	cursor.key_end.rec_type = HAMMER_MAX_RECTYPE;
113 	cursor.key_end.obj_type = 0;
114 
115 	cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
116 	cursor.flags |= HAMMER_CURSOR_BACKEND;
117 	cursor.flags |= HAMMER_CURSOR_NOSWAPCACHE;
118 
119 	/*
120 	 * This flag allows the btree scan code to return internal nodes,
121 	 * so we can reblock them in addition to the leafs.  Only specify it
122 	 * if we intend to reblock B-Tree nodes.
123 	 */
124 	if (reblock->head.flags & HAMMER_IOC_DO_BTREE)
125 		cursor.flags |= HAMMER_CURSOR_REBLOCKING;
126 
127 	error = hammer_btree_first(&cursor);
128 	while (error == 0) {
129 		/*
130 		 * Internal or Leaf node
131 		 */
132 		KKASSERT(cursor.index < cursor.node->ondisk->count);
133 		elm = &cursor.node->ondisk->elms[cursor.index];
134 		reblock->key_cur.obj_id = elm->base.obj_id;
135 		reblock->key_cur.localization = elm->base.localization;
136 
137 		/*
138 		 * Yield to more important tasks
139 		 */
140 		if ((error = hammer_signal_check(trans->hmp)) != 0)
141 			break;
142 
143 		/*
144 		 * If there is insufficient free space it may be due to
145 		 * reserved bigblocks, which flushing might fix.
146 		 *
147 		 * We must force a retest in case the unlocked cursor is
148 		 * moved to the end of the leaf, or moved to an internal
149 		 * node.
150 		 *
151 		 * WARNING: See warnings in hammer_unlock_cursor() function.
152 		 */
153 		if (hammer_checkspace(trans->hmp, slop)) {
154 			if (++checkspace_count == 10) {
155 				error = ENOSPC;
156 				break;
157 			}
158 			hammer_unlock_cursor(&cursor);
159 			cursor.flags |= HAMMER_CURSOR_RETEST;
160 			hammer_flusher_wait(trans->hmp, seq);
161 			hammer_lock_cursor(&cursor);
162 			seq = hammer_flusher_async(trans->hmp, NULL);
163 			goto skip;
164 		}
165 
166 		/*
167 		 * Acquiring the sync_lock prevents the operation from
168 		 * crossing a synchronization boundary.
169 		 *
170 		 * NOTE: cursor.node may have changed on return.
171 		 *
172 		 * WARNING: See warnings in hammer_unlock_cursor() function.
173 		 */
174 		hammer_sync_lock_sh(trans);
175 		error = hammer_reblock_helper(reblock, &cursor, elm);
176 		hammer_sync_unlock(trans);
177 
178 		while (hammer_flusher_meta_halflimit(trans->hmp) ||
179 		       hammer_flusher_undo_exhausted(trans, 2)) {
180 			hammer_unlock_cursor(&cursor);
181 			hammer_flusher_wait(trans->hmp, seq);
182 			hammer_lock_cursor(&cursor);
183 			seq = hammer_flusher_async_one(trans->hmp);
184 		}
185 
186 		/*
187 		 * Setup for iteration, our cursor flags may be modified by
188 		 * other threads while we are unlocked.
189 		 */
190 		cursor.flags |= HAMMER_CURSOR_ATEDISK;
191 
192 		/*
193 		 * We allocate data buffers, which atm we don't track
194 		 * dirty levels for because we allow the kernel to write
195 		 * them.  But if we allocate too many we can still deadlock
196 		 * the buffer cache.
197 		 *
198 		 * WARNING: See warnings in hammer_unlock_cursor() function.
199 		 *	    (The cursor's node and element may change!)
200 		 */
201 		if (bd_heatup()) {
202 			hammer_unlock_cursor(&cursor);
203 			bwillwrite(HAMMER_XBUFSIZE);
204 			hammer_lock_cursor(&cursor);
205 		}
206 		vm_wait_nominal();
207 skip:
208 		if (error == 0) {
209 			error = hammer_btree_iterate(&cursor);
210 		}
211 	}
212 	if (error == ENOENT)
213 		error = 0;
214 	hammer_done_cursor(&cursor);
215 	if (error == EWOULDBLOCK) {
216 		hammer_flusher_sync(trans->hmp);
217 		goto retry;
218 	}
219 	if (error == EDEADLK)
220 		goto retry;
221 	if (error == EINTR) {
222 		reblock->head.flags |= HAMMER_IOC_HEAD_INTR;
223 		error = 0;
224 	}
225 failed:
226 	reblock->key_cur.localization &= HAMMER_LOCALIZE_MASK;
227 	return(error);
228 }
229 
230 /*
231  * Reblock the B-Tree (leaf) node, record, and/or data if necessary.
232  *
233  * XXX We have no visibility into internal B-Tree nodes at the moment,
234  * only leaf nodes.
235  */
236 static int
237 hammer_reblock_helper(struct hammer_ioc_reblock *reblock,
238 		      hammer_cursor_t cursor, hammer_btree_elm_t elm)
239 {
240 	hammer_mount_t hmp;
241 	hammer_off_t tmp_offset;
242 	hammer_node_ondisk_t ondisk;
243 	struct hammer_btree_leaf_elm leaf;
244 	int error;
245 	int bytes;
246 	int cur;
247 	int iocflags;
248 
249 	error = 0;
250 	hmp = cursor->trans->hmp;
251 
252 	/*
253 	 * Reblock data.  Note that data embedded in a record is reblocked
254 	 * by the record reblock code.  Data processing only occurs at leaf
255 	 * nodes and for RECORD element types.
256 	 */
257 	if (cursor->node->ondisk->type != HAMMER_BTREE_TYPE_LEAF)
258 		goto skip;
259 	if (elm->leaf.base.btype != HAMMER_BTREE_TYPE_RECORD)
260 		return(0);
261 	tmp_offset = elm->leaf.data_offset;
262 	if (tmp_offset == 0)
263 		goto skip;
264 	if (error)
265 		goto skip;
266 
267 	/*
268 	 * NOTE: Localization restrictions may also have been set-up, we can't
269 	 *	 just set the match flags willy-nilly here.
270 	 */
271 	switch(elm->leaf.base.rec_type) {
272 	case HAMMER_RECTYPE_INODE:
273 	case HAMMER_RECTYPE_SNAPSHOT:
274 	case HAMMER_RECTYPE_CONFIG:
275 		iocflags = HAMMER_IOC_DO_INODES;
276 		break;
277 	case HAMMER_RECTYPE_EXT:
278 	case HAMMER_RECTYPE_FIX:
279 	case HAMMER_RECTYPE_PFS:
280 	case HAMMER_RECTYPE_DIRENTRY:
281 		iocflags = HAMMER_IOC_DO_DIRS;
282 		break;
283 	case HAMMER_RECTYPE_DATA:
284 	case HAMMER_RECTYPE_DB:
285 		iocflags = HAMMER_IOC_DO_DATA;
286 		break;
287 	default:
288 		iocflags = 0;
289 		break;
290 	}
291 	if (reblock->head.flags & iocflags) {
292 		++reblock->data_count;
293 		reblock->data_byte_count += elm->leaf.data_len;
294 		bytes = hammer_blockmap_getfree(hmp, tmp_offset, &cur, &error);
295 		if (hammer_debug_general & 0x4000)
296 			kprintf("D %6d/%d\n", bytes, reblock->free_level);
297 		if (error == 0 && (cur == 0 || reblock->free_level == 0) &&
298 		    bytes >= reblock->free_level) {
299 			/*
300 			 * This is nasty, the uncache code may have to get
301 			 * vnode locks and because of that we can't hold
302 			 * the cursor locked.
303 			 *
304 			 * WARNING: See warnings in hammer_unlock_cursor()
305 			 *	    function.
306 			 */
307 			leaf = elm->leaf;
308 			hammer_unlock_cursor(cursor);
309 			hammer_io_direct_uncache(hmp, &leaf);
310 			hammer_lock_cursor(cursor);
311 
312 			/*
313 			 * elm may have become stale or invalid, reload it.
314 			 * ondisk variable is temporary only.  Note that
315 			 * cursor->node and thus cursor->node->ondisk may
316 			 * also changed.
317 			 */
318 			ondisk = cursor->node->ondisk;
319 			elm = &ondisk->elms[cursor->index];
320 			if (cursor->flags & HAMMER_CURSOR_RETEST) {
321 				kprintf("hammer: debug: retest on "
322 					"reblocker uncache\n");
323 				error = EDEADLK;
324 			} else if (ondisk->type != HAMMER_BTREE_TYPE_LEAF ||
325 				   cursor->index >= ondisk->count) {
326 				kprintf("hammer: debug: shifted on "
327 					"reblocker uncache\n");
328 				error = EDEADLK;
329 			} else if (bcmp(&elm->leaf, &leaf, sizeof(leaf))) {
330 				kprintf("hammer: debug: changed on "
331 					"reblocker uncache\n");
332 				error = EDEADLK;
333 			}
334 			if (error == 0)
335 				error = hammer_cursor_upgrade(cursor);
336 			if (error == 0) {
337 				KKASSERT(cursor->index < ondisk->count);
338 				error = hammer_reblock_data(reblock,
339 							    cursor, elm);
340 			}
341 			if (error == 0) {
342 				++reblock->data_moves;
343 				reblock->data_byte_moves += elm->leaf.data_len;
344 			}
345 		}
346 	}
347 
348 skip:
349 	/*
350 	 * Reblock a B-Tree internal or leaf node.  A leaf node is reblocked
351 	 * on initial entry only (element 0).  An internal node is reblocked
352 	 * when entered upward from its first leaf node only (also element 0).
353 	 * Further revisits of the internal node (index > 0) are ignored.
354 	 */
355 	tmp_offset = cursor->node->node_offset;
356 	if (cursor->index == 0 &&
357 	    error == 0 && (reblock->head.flags & HAMMER_IOC_DO_BTREE)) {
358 		++reblock->btree_count;
359 		bytes = hammer_blockmap_getfree(hmp, tmp_offset, &cur, &error);
360 		if (hammer_debug_general & 0x4000)
361 			kprintf("B %6d/%d\n", bytes, reblock->free_level);
362 		if (error == 0 && (cur == 0 || reblock->free_level == 0) &&
363 		    bytes >= reblock->free_level) {
364 			error = hammer_cursor_upgrade(cursor);
365 			if (error == 0) {
366 				if (cursor->parent) {
367 					KKASSERT(cursor->parent_index <
368 						 cursor->parent->ondisk->count);
369 					elm = &cursor->parent->ondisk->elms[cursor->parent_index];
370 				} else {
371 					elm = NULL;
372 				}
373 				switch(cursor->node->ondisk->type) {
374 				case HAMMER_BTREE_TYPE_LEAF:
375 					error = hammer_reblock_leaf_node(
376 							reblock, cursor, elm);
377 					break;
378 				case HAMMER_BTREE_TYPE_INTERNAL:
379 					error = hammer_reblock_int_node(
380 							reblock, cursor, elm);
381 					break;
382 				default:
383 					panic("Illegal B-Tree node type");
384 				}
385 			}
386 			if (error == 0) {
387 				++reblock->btree_moves;
388 			}
389 		}
390 	}
391 
392 	hammer_cursor_downgrade(cursor);
393 	return(error);
394 }
395 
396 /*
397  * Reblock a record's data.  Both the B-Tree element and record pointers
398  * to the data must be adjusted.
399  */
400 static int
401 hammer_reblock_data(struct hammer_ioc_reblock *reblock,
402 		    hammer_cursor_t cursor, hammer_btree_elm_t elm)
403 {
404 	struct hammer_buffer *data_buffer = NULL;
405 	hammer_off_t ndata_offset;
406 	int error;
407 	void *ndata;
408 
409 	error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA |
410 					     HAMMER_CURSOR_GET_LEAF);
411 	if (error)
412 		return (error);
413 	ndata = hammer_alloc_data(cursor->trans, elm->leaf.data_len,
414 				  elm->leaf.base.rec_type,
415 				  &ndata_offset, &data_buffer,
416 				  0, &error);
417 	if (error)
418 		goto done;
419 	hammer_io_notmeta(data_buffer);
420 
421 	/*
422 	 * Move the data.  Note that we must invalidate any cached
423 	 * data buffer in the cursor before calling blockmap_free.
424 	 * The blockmap_free may free up the entire large-block and
425 	 * will not be able to invalidate it if the cursor is holding
426 	 * a data buffer cached in that large block.
427 	 */
428 	hammer_modify_buffer(cursor->trans, data_buffer, NULL, 0);
429 	bcopy(cursor->data, ndata, elm->leaf.data_len);
430 	hammer_modify_buffer_done(data_buffer);
431 	hammer_cursor_invalidate_cache(cursor);
432 
433 	hammer_blockmap_free(cursor->trans,
434 			     elm->leaf.data_offset, elm->leaf.data_len);
435 
436 	hammer_modify_node(cursor->trans, cursor->node,
437 			   &elm->leaf.data_offset, sizeof(hammer_off_t));
438 	elm->leaf.data_offset = ndata_offset;
439 	hammer_modify_node_done(cursor->node);
440 
441 done:
442 	if (data_buffer)
443 		hammer_rel_buffer(data_buffer, 0);
444 	return (error);
445 }
446 
447 /*
448  * Reblock a B-Tree leaf node.  The parent must be adjusted to point to
449  * the new copy of the leaf node.
450  *
451  * elm is a pointer to the parent element pointing at cursor.node.
452  */
453 static int
454 hammer_reblock_leaf_node(struct hammer_ioc_reblock *reblock,
455 			 hammer_cursor_t cursor, hammer_btree_elm_t elm)
456 {
457 	hammer_node_t onode;
458 	hammer_node_t nnode;
459 	int error;
460 
461 	/*
462 	 * Don't supply a hint when allocating the leaf.  Fills are done
463 	 * from the leaf upwards.
464 	 */
465 	onode = cursor->node;
466 	nnode = hammer_alloc_btree(cursor->trans, 0, &error);
467 
468 	if (nnode == NULL)
469 		return (error);
470 
471 	/*
472 	 * Move the node
473 	 */
474 	hammer_lock_ex(&nnode->lock);
475 	hammer_modify_node_noundo(cursor->trans, nnode);
476 	bcopy(onode->ondisk, nnode->ondisk, sizeof(*nnode->ondisk));
477 
478 	if (elm) {
479 		/*
480 		 * We are not the root of the B-Tree
481 		 */
482 		hammer_modify_node(cursor->trans, cursor->parent,
483 				   &elm->internal.subtree_offset,
484 				   sizeof(elm->internal.subtree_offset));
485 		elm->internal.subtree_offset = nnode->node_offset;
486 		hammer_modify_node_done(cursor->parent);
487 	} else {
488 		/*
489 		 * We are the root of the B-Tree
490 		 */
491                 hammer_volume_t volume;
492 
493                 volume = hammer_get_root_volume(cursor->trans->hmp, &error);
494                 KKASSERT(error == 0);
495 
496                 hammer_modify_volume_field(cursor->trans, volume,
497 					   vol0_btree_root);
498                 volume->ondisk->vol0_btree_root = nnode->node_offset;
499                 hammer_modify_volume_done(volume);
500                 hammer_rel_volume(volume, 0);
501         }
502 
503 	hammer_cursor_replaced_node(onode, nnode);
504 	hammer_delete_node(cursor->trans, onode);
505 
506 	if (hammer_debug_general & 0x4000) {
507 		kprintf("REBLOCK LNODE %016llx -> %016llx\n",
508 			(long long)onode->node_offset,
509 			(long long)nnode->node_offset);
510 	}
511 	hammer_modify_node_done(nnode);
512 	cursor->node = nnode;
513 
514 	hammer_unlock(&onode->lock);
515 	hammer_rel_node(onode);
516 
517 	return (error);
518 }
519 
520 /*
521  * Reblock a B-Tree internal node.  The parent must be adjusted to point to
522  * the new copy of the internal node, and the node's children's parent
523  * pointers must also be adjusted to point to the new copy.
524  *
525  * elm is a pointer to the parent element pointing at cursor.node.
526  */
527 static int
528 hammer_reblock_int_node(struct hammer_ioc_reblock *reblock,
529 			 hammer_cursor_t cursor, hammer_btree_elm_t elm)
530 {
531 	struct hammer_node_lock lockroot;
532 	hammer_node_t onode;
533 	hammer_node_t nnode;
534 	int error;
535 	int i;
536 
537 	hammer_node_lock_init(&lockroot, cursor->node);
538 	error = hammer_btree_lock_children(cursor, 1, &lockroot, NULL);
539 	if (error)
540 		goto done;
541 
542 	onode = cursor->node;
543 	nnode = hammer_alloc_btree(cursor->trans, 0, &error);
544 
545 	if (nnode == NULL)
546 		goto done;
547 
548 	/*
549 	 * Move the node.  Adjust the parent's pointer to us first.
550 	 */
551 	hammer_lock_ex(&nnode->lock);
552 	hammer_modify_node_noundo(cursor->trans, nnode);
553 	bcopy(onode->ondisk, nnode->ondisk, sizeof(*nnode->ondisk));
554 
555 	if (elm) {
556 		/*
557 		 * We are not the root of the B-Tree
558 		 */
559 		hammer_modify_node(cursor->trans, cursor->parent,
560 				   &elm->internal.subtree_offset,
561 				   sizeof(elm->internal.subtree_offset));
562 		elm->internal.subtree_offset = nnode->node_offset;
563 		hammer_modify_node_done(cursor->parent);
564 	} else {
565 		/*
566 		 * We are the root of the B-Tree
567 		 */
568                 hammer_volume_t volume;
569 
570                 volume = hammer_get_root_volume(cursor->trans->hmp, &error);
571                 KKASSERT(error == 0);
572 
573                 hammer_modify_volume_field(cursor->trans, volume,
574 					   vol0_btree_root);
575                 volume->ondisk->vol0_btree_root = nnode->node_offset;
576                 hammer_modify_volume_done(volume);
577                 hammer_rel_volume(volume, 0);
578         }
579 
580 	/*
581 	 * Now adjust our children's pointers to us.
582 	 */
583 	for (i = 0; i < nnode->ondisk->count; ++i) {
584 		elm = &nnode->ondisk->elms[i];
585 		error = btree_set_parent(cursor->trans, nnode, elm);
586 		if (error)
587 			panic("reblock internal node: fixup problem");
588 	}
589 
590 	/*
591 	 * Clean up.
592 	 *
593 	 * The new node replaces the current node in the cursor.  The cursor
594 	 * expects it to be locked so leave it locked.  Discard onode.
595 	 */
596 	hammer_cursor_replaced_node(onode, nnode);
597 	hammer_delete_node(cursor->trans, onode);
598 
599 	if (hammer_debug_general & 0x4000) {
600 		kprintf("REBLOCK INODE %016llx -> %016llx\n",
601 			(long long)onode->node_offset,
602 			(long long)nnode->node_offset);
603 	}
604 	hammer_modify_node_done(nnode);
605 	cursor->node = nnode;
606 
607 	hammer_unlock(&onode->lock);
608 	hammer_rel_node(onode);
609 
610 done:
611 	hammer_btree_unlock_children(cursor->trans->hmp, &lockroot, NULL);
612 	return (error);
613 }
614 
615