1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2017, 2018 by Delphix. All rights reserved.
23  */
24 
25 #include <sys/zfs_context.h>
26 #include <sys/txg.h>
27 #include <sys/dmu_objset.h>
28 #include <sys/dmu_traverse.h>
29 #include <sys/dmu_redact.h>
30 #include <sys/bqueue.h>
31 #include <sys/objlist.h>
32 #include <sys/dmu_tx.h>
33 #ifdef _KERNEL
34 #include <sys/zfs_vfsops.h>
35 #include <sys/zap.h>
36 #include <sys/zfs_znode.h>
37 #endif
38 
39 /*
40  * This controls the number of entries in the buffer the redaction_list_update
41  * synctask uses to buffer writes to the redaction list.
42  */
43 int redact_sync_bufsize = 1024;
44 
45 /*
46  * Controls how often to update the redaction list when creating a redaction
47  * list.
48  */
49 uint64_t redaction_list_update_interval_ns = 1000 * 1000 * 1000ULL; /* NS */
50 
51 /*
52  * This tunable controls the length of the queues that zfs redact worker threads
53  * use to communicate.  If the dmu_redact_snap thread is blocking on these
54  * queues, this variable may need to be increased.  If there is a significant
55  * slowdown at the start of a redact operation as these threads consume all the
56  * available IO resources, or the queues are consuming too much memory, this
57  * variable may need to be decreased.
58  */
59 int zfs_redact_queue_length = 1024 * 1024;
60 
61 /*
62  * These tunables control the fill fraction of the queues by zfs redact. The
63  * fill fraction controls the frequency with which threads have to be
64  * cv_signaled. If a lot of cpu time is being spent on cv_signal, then these
65  * should be tuned down.  If the queues empty before the signalled thread can
66  * catch up, then these should be tuned up.
67  */
68 uint64_t zfs_redact_queue_ff = 20;
69 
70 struct redact_record {
71 	bqueue_node_t		ln;
72 	boolean_t		eos_marker; /* Marks the end of the stream */
73 	uint64_t		start_object;
74 	uint64_t		start_blkid;
75 	uint64_t		end_object;
76 	uint64_t		end_blkid;
77 	uint8_t			indblkshift;
78 	uint32_t		datablksz;
79 };
80 
81 struct redact_thread_arg {
82 	bqueue_t	q;
83 	objset_t	*os;		/* Objset to traverse */
84 	dsl_dataset_t	*ds;		/* Dataset to traverse */
85 	struct redact_record *current_record;
86 	int		error_code;
87 	boolean_t	cancel;
88 	zbookmark_phys_t resume;
89 	objlist_t	*deleted_objs;
90 	uint64_t	*num_blocks_visited;
91 	uint64_t	ignore_object;	/* ignore further callbacks on this */
92 	uint64_t	txg; /* txg to traverse since */
93 };
94 
95 /*
96  * The redaction node is a wrapper around the redaction record that is used
97  * by the redaction merging thread to sort the records and determine overlaps.
98  *
99  * It contains two nodes; one sorts the records by their start_zb, and the other
100  * sorts the records by their end_zb.
101  */
102 struct redact_node {
103 	avl_node_t			avl_node_start;
104 	avl_node_t			avl_node_end;
105 	struct redact_record		*record;
106 	struct redact_thread_arg	*rt_arg;
107 	uint32_t			thread_num;
108 };
109 
110 struct merge_data {
111 	list_t				md_redact_block_pending;
112 	redact_block_phys_t		md_coalesce_block;
113 	uint64_t			md_last_time;
114 	redact_block_phys_t		md_furthest[TXG_SIZE];
115 	/* Lists of struct redact_block_list_node. */
116 	list_t				md_blocks[TXG_SIZE];
117 	boolean_t			md_synctask_txg[TXG_SIZE];
118 	uint64_t			md_latest_synctask_txg;
119 	redaction_list_t		*md_redaction_list;
120 };
121 
122 /*
123  * A wrapper around struct redact_block so it can be stored in a list_t.
124  */
125 struct redact_block_list_node {
126 	redact_block_phys_t	block;
127 	list_node_t		node;
128 };
129 
130 /*
131  * We've found a new redaction candidate.  In order to improve performance, we
132  * coalesce these blocks when they're adjacent to each other.  This function
133  * handles that.  If the new candidate block range is immediately after the
134  * range we're building, coalesce it into the range we're building.  Otherwise,
135  * put the record we're building on the queue, and update the build pointer to
136  * point to the new record.
137  */
138 static void
139 record_merge_enqueue(bqueue_t *q, struct redact_record **build,
140     struct redact_record *new)
141 {
142 	if (new->eos_marker) {
143 		if (*build != NULL)
144 			bqueue_enqueue(q, *build, sizeof (*build));
145 		bqueue_enqueue_flush(q, new, sizeof (*new));
146 		return;
147 	}
148 	if (*build == NULL) {
149 		*build = new;
150 		return;
151 	}
152 	struct redact_record *curbuild = *build;
153 	if ((curbuild->end_object == new->start_object &&
154 	    curbuild->end_blkid + 1 == new->start_blkid &&
155 	    curbuild->end_blkid != UINT64_MAX) ||
156 	    (curbuild->end_object + 1 == new->start_object &&
157 	    curbuild->end_blkid == UINT64_MAX && new->start_blkid == 0)) {
158 		curbuild->end_object = new->end_object;
159 		curbuild->end_blkid = new->end_blkid;
160 		kmem_free(new, sizeof (*new));
161 	} else {
162 		bqueue_enqueue(q, curbuild, sizeof (*curbuild));
163 		*build = new;
164 	}
165 }
166 #ifdef _KERNEL
167 struct objnode {
168 	avl_node_t node;
169 	uint64_t obj;
170 };
171 
172 static int
173 objnode_compare(const void *o1, const void *o2)
174 {
175 	const struct objnode *obj1 = o1;
176 	const struct objnode *obj2 = o2;
177 	if (obj1->obj < obj2->obj)
178 		return (-1);
179 	if (obj1->obj > obj2->obj)
180 		return (1);
181 	return (0);
182 }
183 
184 
185 static objlist_t *
186 zfs_get_deleteq(objset_t *os)
187 {
188 	objlist_t *deleteq_objlist = objlist_create();
189 	uint64_t deleteq_obj;
190 	zap_cursor_t zc;
191 	zap_attribute_t za;
192 	dmu_object_info_t doi;
193 
194 	ASSERT3U(os->os_phys->os_type, ==, DMU_OST_ZFS);
195 	VERIFY0(dmu_object_info(os, MASTER_NODE_OBJ, &doi));
196 	ASSERT3U(doi.doi_type, ==, DMU_OT_MASTER_NODE);
197 
198 	VERIFY0(zap_lookup(os, MASTER_NODE_OBJ,
199 	    ZFS_UNLINKED_SET, sizeof (uint64_t), 1, &deleteq_obj));
200 
201 	/*
202 	 * In order to insert objects into the objlist, they must be in sorted
203 	 * order. We don't know what order we'll get them out of the ZAP in, so
204 	 * we insert them into and remove them from an avl_tree_t to sort them.
205 	 */
206 	avl_tree_t at;
207 	avl_create(&at, objnode_compare, sizeof (struct objnode),
208 	    offsetof(struct objnode, node));
209 
210 	for (zap_cursor_init(&zc, os, deleteq_obj);
211 	    zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) {
212 		struct objnode *obj = kmem_zalloc(sizeof (*obj), KM_SLEEP);
213 		obj->obj = za.za_first_integer;
214 		avl_add(&at, obj);
215 	}
216 	zap_cursor_fini(&zc);
217 
218 	struct objnode *next, *found = avl_first(&at);
219 	while (found != NULL) {
220 		next = AVL_NEXT(&at, found);
221 		objlist_insert(deleteq_objlist, found->obj);
222 		found = next;
223 	}
224 
225 	void *cookie = NULL;
226 	while ((found = avl_destroy_nodes(&at, &cookie)) != NULL)
227 		kmem_free(found, sizeof (*found));
228 	avl_destroy(&at);
229 	return (deleteq_objlist);
230 }
231 #endif
232 
233 /*
234  * This is the callback function to traverse_dataset for the redaction threads
235  * for dmu_redact_snap.  This thread is responsible for creating redaction
236  * records for all the data that is modified by the snapshots we're redacting
237  * with respect to.  Redaction records represent ranges of data that have been
238  * modified by one of the redaction snapshots, and are stored in the
239  * redact_record struct. We need to create redaction records for three
240  * cases:
241  *
242  * First, if there's a normal write, we need to create a redaction record for
243  * that block.
244  *
245  * Second, if there's a hole, we need to create a redaction record that covers
246  * the whole range of the hole.  If the hole is in the meta-dnode, it must cover
247  * every block in all of the objects in the hole.
248  *
249  * Third, if there is a deleted object, we need to create a redaction record for
250  * all of the blocks in that object.
251  */
252 /*ARGSUSED*/
253 static int
254 redact_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
255     const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
256 {
257 	struct redact_thread_arg *rta = arg;
258 	struct redact_record *record;
259 
260 	ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
261 	    zb->zb_object >= rta->resume.zb_object);
262 
263 	if (rta->cancel)
264 		return (SET_ERROR(EINTR));
265 
266 	if (rta->ignore_object == zb->zb_object)
267 		return (0);
268 
269 	/*
270 	 * If we're visiting a dnode, we need to handle the case where the
271 	 * object has been deleted.
272 	 */
273 	if (zb->zb_level == ZB_DNODE_LEVEL) {
274 		ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL);
275 
276 		if (zb->zb_object == 0)
277 			return (0);
278 
279 		/*
280 		 * If the object has been deleted, redact all of the blocks in
281 		 * it.
282 		 */
283 		if (dnp->dn_type == DMU_OT_NONE ||
284 		    objlist_exists(rta->deleted_objs, zb->zb_object)) {
285 			rta->ignore_object = zb->zb_object;
286 			record = kmem_zalloc(sizeof (struct redact_record),
287 			    KM_SLEEP);
288 
289 			record->eos_marker = B_FALSE;
290 			record->start_object = record->end_object =
291 			    zb->zb_object;
292 			record->start_blkid = 0;
293 			record->end_blkid = UINT64_MAX;
294 			record_merge_enqueue(&rta->q,
295 			    &rta->current_record, record);
296 		}
297 		return (0);
298 	} else if (zb->zb_level < 0) {
299 		return (0);
300 	} else if (zb->zb_level > 0 && !BP_IS_HOLE(bp)) {
301 		/*
302 		 * If this is an indirect block, but not a hole, it doesn't
303 		 * provide any useful information for redaction, so ignore it.
304 		 */
305 		return (0);
306 	}
307 
308 	/*
309 	 * At this point, there are two options left for the type of block we're
310 	 * looking at.  Either this is a hole (which could be in the dnode or
311 	 * the meta-dnode), or it's a level 0 block of some sort.  If it's a
312 	 * hole, we create a redaction record that covers the whole range.  If
313 	 * the hole is in a dnode, we need to redact all the blocks in that
314 	 * hole.  If the hole is in the meta-dnode, we instead need to redact
315 	 * all blocks in every object covered by that hole.  If it's a level 0
316 	 * block, we only need to redact that single block.
317 	 */
318 	record = kmem_zalloc(sizeof (struct redact_record), KM_SLEEP);
319 	record->eos_marker = B_FALSE;
320 
321 	record->start_object = record->end_object = zb->zb_object;
322 	if (BP_IS_HOLE(bp)) {
323 		record->start_blkid = zb->zb_blkid *
324 		    bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level);
325 
326 		record->end_blkid = ((zb->zb_blkid + 1) *
327 		    bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level)) - 1;
328 
329 		if (zb->zb_object == DMU_META_DNODE_OBJECT) {
330 			record->start_object = record->start_blkid *
331 			    ((SPA_MINBLOCKSIZE * dnp->dn_datablkszsec) /
332 			    sizeof (dnode_phys_t));
333 			record->start_blkid = 0;
334 			record->end_object = ((record->end_blkid +
335 			    1) * ((SPA_MINBLOCKSIZE * dnp->dn_datablkszsec) /
336 			    sizeof (dnode_phys_t))) - 1;
337 			record->end_blkid = UINT64_MAX;
338 		}
339 	} else if (zb->zb_level != 0 ||
340 	    zb->zb_object == DMU_META_DNODE_OBJECT) {
341 		kmem_free(record, sizeof (*record));
342 		return (0);
343 	} else {
344 		record->start_blkid = record->end_blkid = zb->zb_blkid;
345 	}
346 	record->indblkshift = dnp->dn_indblkshift;
347 	record->datablksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
348 	record_merge_enqueue(&rta->q, &rta->current_record, record);
349 
350 	return (0);
351 }
352 
353 static void
354 redact_traverse_thread(void *arg)
355 {
356 	struct redact_thread_arg *rt_arg = arg;
357 	int err;
358 	struct redact_record *data;
359 #ifdef _KERNEL
360 	if (rt_arg->os->os_phys->os_type == DMU_OST_ZFS)
361 		rt_arg->deleted_objs = zfs_get_deleteq(rt_arg->os);
362 	else
363 		rt_arg->deleted_objs = objlist_create();
364 #else
365 	rt_arg->deleted_objs = objlist_create();
366 #endif
367 
368 	err = traverse_dataset_resume(rt_arg->ds, rt_arg->txg,
369 	    &rt_arg->resume, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA,
370 	    redact_cb, rt_arg);
371 
372 	if (err != EINTR)
373 		rt_arg->error_code = err;
374 	objlist_destroy(rt_arg->deleted_objs);
375 	data = kmem_zalloc(sizeof (*data), KM_SLEEP);
376 	data->eos_marker = B_TRUE;
377 	record_merge_enqueue(&rt_arg->q, &rt_arg->current_record, data);
378 	thread_exit();
379 }
380 
381 static inline void
382 create_zbookmark_from_obj_off(zbookmark_phys_t *zb, uint64_t object,
383     uint64_t blkid)
384 {
385 	zb->zb_object = object;
386 	zb->zb_level = 0;
387 	zb->zb_blkid = blkid;
388 }
389 
390 /*
391  * This is a utility function that can do the comparison for the start or ends
392  * of the ranges in a redact_record.
393  */
394 static int
395 redact_range_compare(uint64_t obj1, uint64_t off1, uint32_t dbss1,
396     uint64_t obj2, uint64_t off2, uint32_t dbss2)
397 {
398 	zbookmark_phys_t z1, z2;
399 	create_zbookmark_from_obj_off(&z1, obj1, off1);
400 	create_zbookmark_from_obj_off(&z2, obj2, off2);
401 
402 	return (zbookmark_compare(dbss1 >> SPA_MINBLOCKSHIFT, 0,
403 	    dbss2 >> SPA_MINBLOCKSHIFT, 0, &z1, &z2));
404 }
405 
406 /*
407  * Compare two redaction records by their range's start location.  Also makes
408  * eos records always compare last.  We use the thread number in the redact_node
409  * to ensure that records do not compare equal (which is not allowed in our avl
410  * trees).
411  */
412 static int
413 redact_node_compare_start(const void *arg1, const void *arg2)
414 {
415 	const struct redact_node *rn1 = arg1;
416 	const struct redact_node *rn2 = arg2;
417 	const struct redact_record *rr1 = rn1->record;
418 	const struct redact_record *rr2 = rn2->record;
419 	if (rr1->eos_marker)
420 		return (1);
421 	if (rr2->eos_marker)
422 		return (-1);
423 
424 	int cmp = redact_range_compare(rr1->start_object, rr1->start_blkid,
425 	    rr1->datablksz, rr2->start_object, rr2->start_blkid,
426 	    rr2->datablksz);
427 	if (cmp == 0)
428 		cmp = (rn1->thread_num < rn2->thread_num ? -1 : 1);
429 	return (cmp);
430 }
431 
432 /*
433  * Compare two redaction records by their range's end location.  Also makes
434  * eos records always compare last.  We use the thread number in the redact_node
435  * to ensure that records do not compare equal (which is not allowed in our avl
436  * trees).
437  */
438 static int
439 redact_node_compare_end(const void *arg1, const void *arg2)
440 {
441 	const struct redact_node *rn1 = arg1;
442 	const struct redact_node *rn2 = arg2;
443 	const struct redact_record *srr1 = rn1->record;
444 	const struct redact_record *srr2 = rn2->record;
445 	if (srr1->eos_marker)
446 		return (1);
447 	if (srr2->eos_marker)
448 		return (-1);
449 
450 	int cmp = redact_range_compare(srr1->end_object, srr1->end_blkid,
451 	    srr1->datablksz, srr2->end_object, srr2->end_blkid,
452 	    srr2->datablksz);
453 	if (cmp == 0)
454 		cmp = (rn1->thread_num < rn2->thread_num ? -1 : 1);
455 	return (cmp);
456 }
457 
458 /*
459  * Utility function that compares two redaction records to determine if any part
460  * of the "from" record is before any part of the "to" record. Also causes End
461  * of Stream redaction records to compare after all others, so that the
462  * redaction merging logic can stay simple.
463  */
464 static boolean_t
465 redact_record_before(const struct redact_record *from,
466     const struct redact_record *to)
467 {
468 	if (from->eos_marker == B_TRUE)
469 		return (B_FALSE);
470 	else if (to->eos_marker == B_TRUE)
471 		return (B_TRUE);
472 	return (redact_range_compare(from->start_object, from->start_blkid,
473 	    from->datablksz, to->end_object, to->end_blkid,
474 	    to->datablksz) <= 0);
475 }
476 
477 /*
478  * Pop a new redaction record off the queue, check that the records are in the
479  * right order, and free the old data.
480  */
481 static struct redact_record *
482 get_next_redact_record(bqueue_t *bq, struct redact_record *prev)
483 {
484 	struct redact_record *next = bqueue_dequeue(bq);
485 	ASSERT(redact_record_before(prev, next));
486 	kmem_free(prev, sizeof (*prev));
487 	return (next);
488 }
489 
490 /*
491  * Remove the given redaction node from both trees, pull a new redaction record
492  * off the queue, free the old redaction record, update the redaction node, and
493  * reinsert the node into the trees.
494  */
495 static int
496 update_avl_trees(avl_tree_t *start_tree, avl_tree_t *end_tree,
497     struct redact_node *redact_node)
498 {
499 	avl_remove(start_tree, redact_node);
500 	avl_remove(end_tree, redact_node);
501 	redact_node->record = get_next_redact_record(&redact_node->rt_arg->q,
502 	    redact_node->record);
503 	avl_add(end_tree, redact_node);
504 	avl_add(start_tree, redact_node);
505 	return (redact_node->rt_arg->error_code);
506 }
507 
508 /*
509  * Synctask for updating redaction lists.  We first take this txg's list of
510  * redacted blocks and append those to the redaction list.  We then update the
511  * redaction list's bonus buffer.  We store the furthest blocks we visited and
512  * the list of snapshots that we're redacting with respect to.  We need these so
513  * that redacted sends and receives can be correctly resumed.
514  */
515 static void
516 redaction_list_update_sync(void *arg, dmu_tx_t *tx)
517 {
518 	struct merge_data *md = arg;
519 	uint64_t txg = dmu_tx_get_txg(tx);
520 	list_t *list = &md->md_blocks[txg & TXG_MASK];
521 	redact_block_phys_t *furthest_visited =
522 	    &md->md_furthest[txg & TXG_MASK];
523 	objset_t *mos = tx->tx_pool->dp_meta_objset;
524 	redaction_list_t *rl = md->md_redaction_list;
525 	int bufsize = redact_sync_bufsize;
526 	redact_block_phys_t *buf = kmem_alloc(bufsize * sizeof (*buf),
527 	    KM_SLEEP);
528 	int index = 0;
529 
530 	dmu_buf_will_dirty(rl->rl_dbuf, tx);
531 
532 	for (struct redact_block_list_node *rbln = list_remove_head(list);
533 	    rbln != NULL; rbln = list_remove_head(list)) {
534 		ASSERT3U(rbln->block.rbp_object, <=,
535 		    furthest_visited->rbp_object);
536 		ASSERT(rbln->block.rbp_object < furthest_visited->rbp_object ||
537 		    rbln->block.rbp_blkid <= furthest_visited->rbp_blkid);
538 		buf[index] = rbln->block;
539 		index++;
540 		if (index == bufsize) {
541 			dmu_write(mos, rl->rl_object,
542 			    rl->rl_phys->rlp_num_entries * sizeof (*buf),
543 			    bufsize * sizeof (*buf), buf, tx);
544 			rl->rl_phys->rlp_num_entries += bufsize;
545 			index = 0;
546 		}
547 		kmem_free(rbln, sizeof (*rbln));
548 	}
549 	if (index > 0) {
550 		dmu_write(mos, rl->rl_object, rl->rl_phys->rlp_num_entries *
551 		    sizeof (*buf), index * sizeof (*buf), buf, tx);
552 		rl->rl_phys->rlp_num_entries += index;
553 	}
554 	kmem_free(buf, bufsize * sizeof (*buf));
555 
556 	md->md_synctask_txg[txg & TXG_MASK] = B_FALSE;
557 	rl->rl_phys->rlp_last_object = furthest_visited->rbp_object;
558 	rl->rl_phys->rlp_last_blkid = furthest_visited->rbp_blkid;
559 }
560 
561 static void
562 commit_rl_updates(objset_t *os, struct merge_data *md, uint64_t object,
563     uint64_t blkid)
564 {
565 	dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(os->os_spa)->dp_mos_dir);
566 	dmu_tx_hold_space(tx, sizeof (struct redact_block_list_node));
567 	VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
568 	uint64_t txg = dmu_tx_get_txg(tx);
569 	if (!md->md_synctask_txg[txg & TXG_MASK]) {
570 		dsl_sync_task_nowait(dmu_tx_pool(tx),
571 		    redaction_list_update_sync, md, tx);
572 		md->md_synctask_txg[txg & TXG_MASK] = B_TRUE;
573 		md->md_latest_synctask_txg = txg;
574 	}
575 	md->md_furthest[txg & TXG_MASK].rbp_object = object;
576 	md->md_furthest[txg & TXG_MASK].rbp_blkid = blkid;
577 	list_move_tail(&md->md_blocks[txg & TXG_MASK],
578 	    &md->md_redact_block_pending);
579 	dmu_tx_commit(tx);
580 	md->md_last_time = gethrtime();
581 }
582 
583 /*
584  * We want to store the list of blocks that we're redacting in the bookmark's
585  * redaction list.  However, this list is stored in the MOS, which means it can
586  * only be written to in syncing context.  To get around this, we create a
587  * synctask that will write to the mos for us.  We tell it what to write by
588  * a linked list for each current transaction group; every time we decide to
589  * redact a block, we append it to the transaction group that is currently in
590  * open context.  We also update some progress information that the synctask
591  * will store to enable resumable redacted sends.
592  */
593 static void
594 update_redaction_list(struct merge_data *md, objset_t *os,
595     uint64_t object, uint64_t blkid, uint64_t endblkid, uint32_t blksz)
596 {
597 	boolean_t enqueue = B_FALSE;
598 	redact_block_phys_t cur = {0};
599 	uint64_t count = endblkid - blkid + 1;
600 	while (count > REDACT_BLOCK_MAX_COUNT) {
601 		update_redaction_list(md, os, object, blkid,
602 		    blkid + REDACT_BLOCK_MAX_COUNT - 1, blksz);
603 		blkid += REDACT_BLOCK_MAX_COUNT;
604 		count -= REDACT_BLOCK_MAX_COUNT;
605 	}
606 	redact_block_phys_t *coalesce = &md->md_coalesce_block;
607 	boolean_t new;
608 	if (coalesce->rbp_size_count == 0) {
609 		new = B_TRUE;
610 		enqueue = B_FALSE;
611 	} else  {
612 		uint64_t old_count = redact_block_get_count(coalesce);
613 		if (coalesce->rbp_object == object &&
614 		    coalesce->rbp_blkid + old_count == blkid &&
615 		    old_count + count <= REDACT_BLOCK_MAX_COUNT) {
616 			ASSERT3U(redact_block_get_size(coalesce), ==, blksz);
617 			redact_block_set_count(coalesce, old_count + count);
618 			new = B_FALSE;
619 			enqueue = B_FALSE;
620 		} else {
621 			new = B_TRUE;
622 			enqueue = B_TRUE;
623 		}
624 	}
625 
626 	if (new) {
627 		cur = *coalesce;
628 		coalesce->rbp_blkid = blkid;
629 		coalesce->rbp_object = object;
630 
631 		redact_block_set_count(coalesce, count);
632 		redact_block_set_size(coalesce, blksz);
633 	}
634 
635 	if (enqueue && redact_block_get_size(&cur) != 0) {
636 		struct redact_block_list_node *rbln =
637 		    kmem_alloc(sizeof (struct redact_block_list_node),
638 		    KM_SLEEP);
639 		rbln->block = cur;
640 		list_insert_tail(&md->md_redact_block_pending, rbln);
641 	}
642 
643 	if (gethrtime() > md->md_last_time +
644 	    redaction_list_update_interval_ns) {
645 		commit_rl_updates(os, md, object, blkid);
646 	}
647 }
648 
649 /*
650  * This thread merges all the redaction records provided by the worker threads,
651  * and determines which blocks are redacted by all the snapshots.  The algorithm
652  * for doing so is similar to performing a merge in mergesort with n sub-lists
653  * instead of 2, with some added complexity due to the fact that the entries are
654  * ranges, not just single blocks.  This algorithm relies on the fact that the
655  * queues are sorted, which is ensured by the fact that traverse_dataset
656  * traverses the dataset in a consistent order.  We pull one entry off the front
657  * of the queues of each secure dataset traversal thread.  Then we repeat the
658  * following: each record represents a range of blocks modified by one of the
659  * redaction snapshots, and each block in that range may need to be redacted in
660  * the send stream.  Find the record with the latest start of its range, and the
661  * record with the earliest end of its range. If the last start is before the
662  * first end, then we know that the blocks in the range [last_start, first_end]
663  * are covered by all of the ranges at the front of the queues, which means
664  * every thread redacts that whole range.  For example, let's say the ranges on
665  * each queue look like this:
666  *
667  * Block Id   1  2  3  4  5  6  7  8  9 10 11
668  * Thread 1 |    [====================]
669  * Thread 2 |       [========]
670  * Thread 3 |             [=================]
671  *
672  * Thread 3 has the last start (5), and the thread 2 has the last end (6).  All
673  * three threads modified the range [5,6], so that data should not be sent over
674  * the wire.  After we've determined whether or not to redact anything, we take
675  * the record with the first end.  We discard that record, and pull a new one
676  * off the front of the queue it came from.  In the above example, we would
677  * discard Thread 2's record, and pull a new one.  Let's say the next record we
678  * pulled from Thread 2 covered range [10,11].  The new layout would look like
679  * this:
680  *
681  * Block Id   1  2  3  4  5  6  7  8  9 10 11
682  * Thread 1 |    [====================]
683  * Thread 2 |                            [==]
684  * Thread 3 |             [=================]
685  *
686  * When we compare the last start (10, from Thread 2) and the first end (9, from
687  * Thread 1), we see that the last start is greater than the first end.
688  * Therefore, we do not redact anything from these records.  We'll iterate by
689  * replacing the record from Thread 1.
690  *
691  * We iterate by replacing the record with the lowest end because we know
692  * that the record with the lowest end has helped us as much as it can.  All the
693  * ranges before it that we will ever redact have been redacted.  In addition,
694  * by replacing the one with the lowest end, we guarantee we catch all ranges
695  * that need to be redacted.  For example, if in the case above we had replaced
696  * the record from Thread 1 instead, we might have ended up with the following:
697  *
698  * Block Id   1  2  3  4  5  6  7  8  9 10 11 12
699  * Thread 1 |                               [==]
700  * Thread 2 |       [========]
701  * Thread 3 |             [=================]
702  *
703  * If the next record from Thread 2 had been [8,10], for example, we should have
704  * redacted part of that range, but because we updated Thread 1's record, we
705  * missed it.
706  *
707  * We implement this algorithm by using two trees.  The first sorts the
708  * redaction records by their start_zb, and the second sorts them by their
709  * end_zb.  We use these to find the record with the last start and the record
710  * with the first end.  We create a record with that start and end, and send it
711  * on.  The overall runtime of this implementation is O(n log m), where n is the
712  * total number of redaction records from all the different redaction snapshots,
713  * and m is the number of redaction snapshots.
714  *
715  * If we redact with respect to zero snapshots, we create a redaction
716  * record with the start object and blkid to 0, and the end object and blkid to
717  * UINT64_MAX.  This will result in us redacting every block.
718  */
719 static int
720 perform_thread_merge(bqueue_t *q, uint32_t num_threads,
721     struct redact_thread_arg *thread_args, boolean_t *cancel)
722 {
723 	struct redact_node *redact_nodes = NULL;
724 	avl_tree_t start_tree, end_tree;
725 	struct redact_record *record;
726 	struct redact_record *current_record = NULL;
727 	int err = 0;
728 	struct merge_data md = { {0} };
729 	list_create(&md.md_redact_block_pending,
730 	    sizeof (struct redact_block_list_node),
731 	    offsetof(struct redact_block_list_node, node));
732 
733 	/*
734 	 * If we're redacting with respect to zero snapshots, then no data is
735 	 * permitted to be sent.  We enqueue a record that redacts all blocks,
736 	 * and an eos marker.
737 	 */
738 	if (num_threads == 0) {
739 		record = kmem_zalloc(sizeof (struct redact_record),
740 		    KM_SLEEP);
741 		// We can't redact object 0, so don't try.
742 		record->start_object = 1;
743 		record->start_blkid = 0;
744 		record->end_object = record->end_blkid = UINT64_MAX;
745 		bqueue_enqueue(q, record, sizeof (*record));
746 		return (0);
747 	}
748 	if (num_threads > 0) {
749 		redact_nodes = kmem_zalloc(num_threads *
750 		    sizeof (*redact_nodes), KM_SLEEP);
751 	}
752 
753 	avl_create(&start_tree, redact_node_compare_start,
754 	    sizeof (struct redact_node),
755 	    offsetof(struct redact_node, avl_node_start));
756 	avl_create(&end_tree, redact_node_compare_end,
757 	    sizeof (struct redact_node),
758 	    offsetof(struct redact_node, avl_node_end));
759 
760 	for (int i = 0; i < num_threads; i++) {
761 		struct redact_node *node = &redact_nodes[i];
762 		struct redact_thread_arg *targ = &thread_args[i];
763 		node->record = bqueue_dequeue(&targ->q);
764 		node->rt_arg = targ;
765 		node->thread_num = i;
766 		avl_add(&start_tree, node);
767 		avl_add(&end_tree, node);
768 	}
769 
770 	/*
771 	 * Once the first record in the end tree has returned EOS, every record
772 	 * must be an EOS record, so we should stop.
773 	 */
774 	while (err == 0 && !((struct redact_node *)avl_first(&end_tree))->
775 	    record->eos_marker) {
776 		if (*cancel) {
777 			err = EINTR;
778 			break;
779 		}
780 		struct redact_node *last_start = avl_last(&start_tree);
781 		struct redact_node *first_end = avl_first(&end_tree);
782 
783 		/*
784 		 * If the last start record is before the first end record,
785 		 * then we have blocks that are redacted by all threads.
786 		 * Therefore, we should redact them.  Copy the record, and send
787 		 * it to the main thread.
788 		 */
789 		if (redact_record_before(last_start->record,
790 		    first_end->record)) {
791 			record = kmem_zalloc(sizeof (struct redact_record),
792 			    KM_SLEEP);
793 			*record = *first_end->record;
794 			record->start_object = last_start->record->start_object;
795 			record->start_blkid = last_start->record->start_blkid;
796 			record_merge_enqueue(q, &current_record,
797 			    record);
798 		}
799 		err = update_avl_trees(&start_tree, &end_tree, first_end);
800 	}
801 
802 	/*
803 	 * We're done; if we were cancelled, we need to cancel our workers and
804 	 * clear out their queues.  Either way, we need to remove every thread's
805 	 * redact_node struct from the avl trees.
806 	 */
807 	for (int i = 0; i < num_threads; i++) {
808 		if (err != 0) {
809 			thread_args[i].cancel = B_TRUE;
810 			while (!redact_nodes[i].record->eos_marker) {
811 				(void) update_avl_trees(&start_tree, &end_tree,
812 				    &redact_nodes[i]);
813 			}
814 		}
815 		avl_remove(&start_tree, &redact_nodes[i]);
816 		avl_remove(&end_tree, &redact_nodes[i]);
817 		kmem_free(redact_nodes[i].record,
818 		    sizeof (struct redact_record));
819 		bqueue_destroy(&thread_args[i].q);
820 	}
821 
822 	avl_destroy(&start_tree);
823 	avl_destroy(&end_tree);
824 	kmem_free(redact_nodes, num_threads * sizeof (*redact_nodes));
825 	if (current_record != NULL)
826 		bqueue_enqueue(q, current_record, sizeof (current_record));
827 	return (err);
828 }
829 
830 struct redact_merge_thread_arg {
831 	bqueue_t q;
832 	spa_t *spa;
833 	int numsnaps;
834 	struct redact_thread_arg *thr_args;
835 	boolean_t cancel;
836 	int error_code;
837 };
838 
839 static void
840 redact_merge_thread(void *arg)
841 {
842 	struct redact_merge_thread_arg *rmta = arg;
843 	rmta->error_code = perform_thread_merge(&rmta->q,
844 	    rmta->numsnaps, rmta->thr_args, &rmta->cancel);
845 	struct redact_record *rec = kmem_zalloc(sizeof (*rec), KM_SLEEP);
846 	rec->eos_marker = B_TRUE;
847 	bqueue_enqueue_flush(&rmta->q, rec, 1);
848 	thread_exit();
849 }
850 
851 /*
852  * Find the next object in or after the redaction range passed in, and hold
853  * its dnode with the provided tag.  Also update *object to contain the new
854  * object number.
855  */
856 static int
857 hold_next_object(objset_t *os, struct redact_record *rec, void *tag,
858     uint64_t *object, dnode_t **dn)
859 {
860 	int err = 0;
861 	if (*dn != NULL)
862 		dnode_rele(*dn, tag);
863 	*dn = NULL;
864 	if (*object < rec->start_object) {
865 		*object = rec->start_object - 1;
866 	}
867 	err = dmu_object_next(os, object, B_FALSE, 0);
868 	if (err != 0)
869 		return (err);
870 
871 	err = dnode_hold(os, *object, tag, dn);
872 	while (err == 0 && (*object < rec->start_object ||
873 	    DMU_OT_IS_METADATA((*dn)->dn_type))) {
874 		dnode_rele(*dn, tag);
875 		*dn = NULL;
876 		err = dmu_object_next(os, object, B_FALSE, 0);
877 		if (err != 0)
878 			break;
879 		err = dnode_hold(os, *object, tag, dn);
880 	}
881 	return (err);
882 }
883 
884 static int
885 perform_redaction(objset_t *os, redaction_list_t *rl,
886     struct redact_merge_thread_arg *rmta)
887 {
888 	int err = 0;
889 	bqueue_t *q = &rmta->q;
890 	struct redact_record *rec = NULL;
891 	struct merge_data md = { {0} };
892 
893 	list_create(&md.md_redact_block_pending,
894 	    sizeof (struct redact_block_list_node),
895 	    offsetof(struct redact_block_list_node, node));
896 	md.md_redaction_list = rl;
897 
898 	for (int i = 0; i < TXG_SIZE; i++) {
899 		list_create(&md.md_blocks[i],
900 		    sizeof (struct redact_block_list_node),
901 		    offsetof(struct redact_block_list_node, node));
902 	}
903 	dnode_t *dn = NULL;
904 	uint64_t prev_obj = 0;
905 	for (rec = bqueue_dequeue(q); !rec->eos_marker && err == 0;
906 	    rec = get_next_redact_record(q, rec)) {
907 		ASSERT3U(rec->start_object, !=, 0);
908 		uint64_t object;
909 		if (prev_obj != rec->start_object) {
910 			object = rec->start_object - 1;
911 			err = hold_next_object(os, rec, FTAG, &object, &dn);
912 		} else {
913 			object = prev_obj;
914 		}
915 		while (err == 0 && object <= rec->end_object) {
916 			if (issig(JUSTLOOKING) && issig(FORREAL)) {
917 				err = EINTR;
918 				break;
919 			}
920 			/*
921 			 * Part of the current object is contained somewhere in
922 			 * the range covered by rec.
923 			 */
924 			uint64_t startblkid;
925 			uint64_t endblkid;
926 			uint64_t maxblkid = dn->dn_phys->dn_maxblkid;
927 
928 			if (rec->start_object < object)
929 				startblkid = 0;
930 			else if (rec->start_blkid > maxblkid)
931 				break;
932 			else
933 				startblkid = rec->start_blkid;
934 
935 			if (rec->end_object > object || rec->end_blkid >
936 			    maxblkid) {
937 				endblkid = maxblkid;
938 			} else {
939 				endblkid = rec->end_blkid;
940 			}
941 			update_redaction_list(&md, os, object, startblkid,
942 			    endblkid, dn->dn_datablksz);
943 
944 			if (object == rec->end_object)
945 				break;
946 			err = hold_next_object(os, rec, FTAG, &object, &dn);
947 		}
948 		if (err == ESRCH)
949 			err = 0;
950 		if (dn != NULL)
951 			prev_obj = object;
952 	}
953 	if (err == 0 && dn != NULL)
954 		dnode_rele(dn, FTAG);
955 
956 	if (err == ESRCH)
957 		err = 0;
958 	rmta->cancel = B_TRUE;
959 	while (!rec->eos_marker)
960 		rec = get_next_redact_record(q, rec);
961 	kmem_free(rec, sizeof (*rec));
962 
963 	/*
964 	 * There may be a block that's being coalesced, sync that out before we
965 	 * return.
966 	 */
967 	if (err == 0 && md.md_coalesce_block.rbp_size_count != 0) {
968 		struct redact_block_list_node *rbln =
969 		    kmem_alloc(sizeof (struct redact_block_list_node),
970 		    KM_SLEEP);
971 		rbln->block = md.md_coalesce_block;
972 		list_insert_tail(&md.md_redact_block_pending, rbln);
973 	}
974 	commit_rl_updates(os, &md, UINT64_MAX, UINT64_MAX);
975 
976 	/*
977 	 * Wait for all the redaction info to sync out before we return, so that
978 	 * anyone who attempts to resume this redaction will have all the data
979 	 * they need.
980 	 */
981 	dsl_pool_t *dp = spa_get_dsl(os->os_spa);
982 	if (md.md_latest_synctask_txg != 0)
983 		txg_wait_synced(dp, md.md_latest_synctask_txg);
984 	for (int i = 0; i < TXG_SIZE; i++)
985 		list_destroy(&md.md_blocks[i]);
986 	return (err);
987 }
988 
989 static boolean_t
990 redact_snaps_contains(uint64_t *snaps, uint64_t num_snaps, uint64_t guid)
991 {
992 	for (int i = 0; i < num_snaps; i++) {
993 		if (snaps[i] == guid)
994 			return (B_TRUE);
995 	}
996 	return (B_FALSE);
997 }
998 
999 int
1000 dmu_redact_snap(const char *snapname, nvlist_t *redactnvl,
1001     const char *redactbook)
1002 {
1003 	int err = 0;
1004 	dsl_pool_t *dp = NULL;
1005 	dsl_dataset_t *ds = NULL;
1006 	int numsnaps = 0;
1007 	objset_t *os;
1008 	struct redact_thread_arg *args = NULL;
1009 	redaction_list_t *new_rl = NULL;
1010 	char *newredactbook;
1011 
1012 	if ((err = dsl_pool_hold(snapname, FTAG, &dp)) != 0)
1013 		return (err);
1014 
1015 	newredactbook = kmem_zalloc(sizeof (char) * ZFS_MAX_DATASET_NAME_LEN,
1016 	    KM_SLEEP);
1017 
1018 	if ((err = dsl_dataset_hold_flags(dp, snapname, DS_HOLD_FLAG_DECRYPT,
1019 	    FTAG, &ds)) != 0) {
1020 		goto out;
1021 	}
1022 	dsl_dataset_long_hold(ds, FTAG);
1023 	if (!ds->ds_is_snapshot || dmu_objset_from_ds(ds, &os) != 0) {
1024 		err = EINVAL;
1025 		goto out;
1026 	}
1027 	if (dsl_dataset_feature_is_active(ds, SPA_FEATURE_REDACTED_DATASETS)) {
1028 		err = EALREADY;
1029 		goto out;
1030 	}
1031 
1032 	numsnaps = fnvlist_num_pairs(redactnvl);
1033 	if (numsnaps > 0)
1034 		args = kmem_zalloc(numsnaps * sizeof (*args), KM_SLEEP);
1035 
1036 	nvpair_t *pair = NULL;
1037 	for (int i = 0; i < numsnaps; i++) {
1038 		pair = nvlist_next_nvpair(redactnvl, pair);
1039 		const char *name = nvpair_name(pair);
1040 		struct redact_thread_arg *rta = &args[i];
1041 		err = dsl_dataset_hold_flags(dp, name, DS_HOLD_FLAG_DECRYPT,
1042 		    FTAG, &rta->ds);
1043 		if (err != 0)
1044 			break;
1045 		/*
1046 		 * We want to do the long hold before we can get any other
1047 		 * errors, because the cleanup code will release the long
1048 		 * hold if rta->ds is filled in.
1049 		 */
1050 		dsl_dataset_long_hold(rta->ds, FTAG);
1051 
1052 		err = dmu_objset_from_ds(rta->ds, &rta->os);
1053 		if (err != 0)
1054 			break;
1055 		if (!dsl_dataset_is_before(rta->ds, ds, 0)) {
1056 			err = EINVAL;
1057 			break;
1058 		}
1059 		if (dsl_dataset_feature_is_active(rta->ds,
1060 		    SPA_FEATURE_REDACTED_DATASETS)) {
1061 			err = EALREADY;
1062 			break;
1063 
1064 		}
1065 	}
1066 	if (err != 0)
1067 		goto out;
1068 	VERIFY3P(nvlist_next_nvpair(redactnvl, pair), ==, NULL);
1069 
1070 	boolean_t resuming = B_FALSE;
1071 	zfs_bookmark_phys_t bookmark;
1072 
1073 	(void) strlcpy(newredactbook, snapname, ZFS_MAX_DATASET_NAME_LEN);
1074 	char *c = strchr(newredactbook, '@');
1075 	ASSERT3P(c, !=, NULL);
1076 	int n = snprintf(c, ZFS_MAX_DATASET_NAME_LEN - (c - newredactbook),
1077 	    "#%s", redactbook);
1078 	if (n >= ZFS_MAX_DATASET_NAME_LEN - (c - newredactbook)) {
1079 		dsl_pool_rele(dp, FTAG);
1080 		kmem_free(newredactbook,
1081 		    sizeof (char) * ZFS_MAX_DATASET_NAME_LEN);
1082 		if (args != NULL)
1083 			kmem_free(args, numsnaps * sizeof (*args));
1084 		return (SET_ERROR(ENAMETOOLONG));
1085 	}
1086 	err = dsl_bookmark_lookup(dp, newredactbook, NULL, &bookmark);
1087 	if (err == 0) {
1088 		resuming = B_TRUE;
1089 		if (bookmark.zbm_redaction_obj == 0) {
1090 			err = EEXIST;
1091 			goto out;
1092 		}
1093 		err = dsl_redaction_list_hold_obj(dp,
1094 		    bookmark.zbm_redaction_obj, FTAG, &new_rl);
1095 		if (err != 0) {
1096 			err = EIO;
1097 			goto out;
1098 		}
1099 		dsl_redaction_list_long_hold(dp, new_rl, FTAG);
1100 		if (new_rl->rl_phys->rlp_num_snaps != numsnaps) {
1101 			err = ESRCH;
1102 			goto out;
1103 		}
1104 		for (int i = 0; i < numsnaps; i++) {
1105 			struct redact_thread_arg *rta = &args[i];
1106 			if (!redact_snaps_contains(new_rl->rl_phys->rlp_snaps,
1107 			    new_rl->rl_phys->rlp_num_snaps,
1108 			    dsl_dataset_phys(rta->ds)->ds_guid)) {
1109 				err = ESRCH;
1110 				goto out;
1111 			}
1112 		}
1113 		if (new_rl->rl_phys->rlp_last_blkid == UINT64_MAX &&
1114 		    new_rl->rl_phys->rlp_last_object == UINT64_MAX) {
1115 			err = EEXIST;
1116 			goto out;
1117 		}
1118 		dsl_pool_rele(dp, FTAG);
1119 		dp = NULL;
1120 	} else {
1121 		uint64_t *guids = NULL;
1122 		if (numsnaps > 0) {
1123 			guids = kmem_zalloc(numsnaps * sizeof (uint64_t),
1124 			    KM_SLEEP);
1125 		}
1126 		for (int i = 0; i < numsnaps; i++) {
1127 			struct redact_thread_arg *rta = &args[i];
1128 			guids[i] = dsl_dataset_phys(rta->ds)->ds_guid;
1129 		}
1130 
1131 		dsl_pool_rele(dp, FTAG);
1132 		dp = NULL;
1133 		err = dsl_bookmark_create_redacted(newredactbook, snapname,
1134 		    numsnaps, guids, FTAG, &new_rl);
1135 		kmem_free(guids, numsnaps * sizeof (uint64_t));
1136 		if (err != 0) {
1137 			goto out;
1138 		}
1139 	}
1140 
1141 	for (int i = 0; i < numsnaps; i++) {
1142 		struct redact_thread_arg *rta = &args[i];
1143 		(void) bqueue_init(&rta->q, zfs_redact_queue_ff,
1144 		    zfs_redact_queue_length,
1145 		    offsetof(struct redact_record, ln));
1146 		if (resuming) {
1147 			rta->resume.zb_blkid =
1148 			    new_rl->rl_phys->rlp_last_blkid;
1149 			rta->resume.zb_object =
1150 			    new_rl->rl_phys->rlp_last_object;
1151 		}
1152 		rta->txg = dsl_dataset_phys(ds)->ds_creation_txg;
1153 		(void) thread_create(NULL, 0, redact_traverse_thread, rta,
1154 		    0, curproc, TS_RUN, minclsyspri);
1155 	}
1156 
1157 	struct redact_merge_thread_arg *rmta;
1158 	rmta = kmem_zalloc(sizeof (struct redact_merge_thread_arg), KM_SLEEP);
1159 
1160 	(void) bqueue_init(&rmta->q, zfs_redact_queue_ff,
1161 	    zfs_redact_queue_length, offsetof(struct redact_record, ln));
1162 	rmta->numsnaps = numsnaps;
1163 	rmta->spa = os->os_spa;
1164 	rmta->thr_args = args;
1165 	(void) thread_create(NULL, 0, redact_merge_thread, rmta, 0, curproc,
1166 	    TS_RUN, minclsyspri);
1167 	err = perform_redaction(os, new_rl, rmta);
1168 	bqueue_destroy(&rmta->q);
1169 	kmem_free(rmta, sizeof (struct redact_merge_thread_arg));
1170 
1171 out:
1172 	kmem_free(newredactbook, sizeof (char) * ZFS_MAX_DATASET_NAME_LEN);
1173 
1174 	if (new_rl != NULL) {
1175 		dsl_redaction_list_long_rele(new_rl, FTAG);
1176 		dsl_redaction_list_rele(new_rl, FTAG);
1177 	}
1178 	for (int i = 0; i < numsnaps; i++) {
1179 		struct redact_thread_arg *rta = &args[i];
1180 		/*
1181 		 * rta->ds may be NULL if we got an error while filling
1182 		 * it in.
1183 		 */
1184 		if (rta->ds != NULL) {
1185 			dsl_dataset_long_rele(rta->ds, FTAG);
1186 			dsl_dataset_rele_flags(rta->ds,
1187 			    DS_HOLD_FLAG_DECRYPT, FTAG);
1188 		}
1189 	}
1190 
1191 	if (args != NULL)
1192 		kmem_free(args, numsnaps * sizeof (*args));
1193 	if (dp != NULL)
1194 		dsl_pool_rele(dp, FTAG);
1195 	if (ds != NULL) {
1196 		dsl_dataset_long_rele(ds, FTAG);
1197 		dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
1198 	}
1199 	return (SET_ERROR(err));
1200 
1201 }
1202