1 /*
2  * CDDL HEADER START
3  *
4  * This file and its contents are supplied under the terms of the
5  * Common Development and Distribution License ("CDDL"), version 1.0.
6  * You may only use this file in accordance with the terms of version
7  * 1.0 of the CDDL.
8  *
9  * A full copy of the text of the CDDL should have accompanied this
10  * source.  A copy of the CDDL is also available via the Internet at
11  * http://www.illumos.org/license/CDDL.
12  *
13  * CDDL HEADER END
14  */
15 
16 /*
17  * Copyright (c) 2013, 2018 by Delphix. All rights reserved.
18  * Copyright 2017 Nexenta Systems, Inc.
19  * Copyright 2019, 2020 by Christian Schwarz. All rights reserved.
20  */
21 
22 #include <sys/zfs_context.h>
23 #include <sys/dsl_dataset.h>
24 #include <sys/dsl_dir.h>
25 #include <sys/dsl_prop.h>
26 #include <sys/dsl_synctask.h>
27 #include <sys/dsl_destroy.h>
28 #include <sys/dmu_impl.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/arc.h>
31 #include <sys/zap.h>
32 #include <sys/zfeature.h>
33 #include <sys/spa.h>
34 #include <sys/dsl_bookmark.h>
35 #include <zfs_namecheck.h>
36 #include <sys/dmu_send.h>
37 
38 static int
39 dsl_bookmark_hold_ds(dsl_pool_t *dp, const char *fullname,
40     dsl_dataset_t **dsp, void *tag, char **shortnamep)
41 {
42 	char buf[ZFS_MAX_DATASET_NAME_LEN];
43 	char *hashp;
44 
45 	if (strlen(fullname) >= ZFS_MAX_DATASET_NAME_LEN)
46 		return (SET_ERROR(ENAMETOOLONG));
47 	hashp = strchr(fullname, '#');
48 	if (hashp == NULL)
49 		return (SET_ERROR(EINVAL));
50 
51 	*shortnamep = hashp + 1;
52 	if (zfs_component_namecheck(*shortnamep, NULL, NULL))
53 		return (SET_ERROR(EINVAL));
54 	(void) strlcpy(buf, fullname, hashp - fullname + 1);
55 	return (dsl_dataset_hold(dp, buf, tag, dsp));
56 }
57 
58 /*
59  * When reading BOOKMARK_V1 bookmarks, the BOOKMARK_V2 fields are guaranteed
60  * to be zeroed.
61  *
62  * Returns ESRCH if bookmark is not found.
63  * Note, we need to use the ZAP rather than the AVL to look up bookmarks
64  * by name, because only the ZAP honors the casesensitivity setting.
65  */
66 int
67 dsl_bookmark_lookup_impl(dsl_dataset_t *ds, const char *shortname,
68     zfs_bookmark_phys_t *bmark_phys)
69 {
70 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
71 	uint64_t bmark_zapobj = ds->ds_bookmarks_obj;
72 	matchtype_t mt = 0;
73 	int err;
74 
75 	if (bmark_zapobj == 0)
76 		return (SET_ERROR(ESRCH));
77 
78 	if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
79 		mt = MT_NORMALIZE;
80 
81 	/*
82 	 * Zero out the bookmark in case the one stored on disk
83 	 * is in an older, shorter format.
84 	 */
85 	memset(bmark_phys, 0, sizeof (*bmark_phys));
86 
87 	err = zap_lookup_norm(mos, bmark_zapobj, shortname, sizeof (uint64_t),
88 	    sizeof (*bmark_phys) / sizeof (uint64_t), bmark_phys, mt, NULL, 0,
89 	    NULL);
90 
91 	return (err == ENOENT ? SET_ERROR(ESRCH) : err);
92 }
93 
94 /*
95  * If later_ds is non-NULL, this will return EXDEV if the specified bookmark
96  * does not represents an earlier point in later_ds's timeline.  However,
97  * bmp will still be filled in if we return EXDEV.
98  *
99  * Returns ENOENT if the dataset containing the bookmark does not exist.
100  * Returns ESRCH if the dataset exists but the bookmark was not found in it.
101  */
102 int
103 dsl_bookmark_lookup(dsl_pool_t *dp, const char *fullname,
104     dsl_dataset_t *later_ds, zfs_bookmark_phys_t *bmp)
105 {
106 	char *shortname;
107 	dsl_dataset_t *ds;
108 	int error;
109 
110 	error = dsl_bookmark_hold_ds(dp, fullname, &ds, FTAG, &shortname);
111 	if (error != 0)
112 		return (error);
113 
114 	error = dsl_bookmark_lookup_impl(ds, shortname, bmp);
115 	if (error == 0 && later_ds != NULL) {
116 		if (!dsl_dataset_is_before(later_ds, ds, bmp->zbm_creation_txg))
117 			error = SET_ERROR(EXDEV);
118 	}
119 	dsl_dataset_rele(ds, FTAG);
120 	return (error);
121 }
122 
123 /*
124  * Validates that
125  * - bmark is a full dataset path of a bookmark (bookmark_namecheck)
126  * - source is a full path of a snapshot or bookmark
127  *   ({bookmark,snapshot}_namecheck)
128  *
129  * Returns 0 if valid, -1 otherwise.
130  */
131 static int
132 dsl_bookmark_create_nvl_validate_pair(const char *bmark, const char *source)
133 {
134 	if (bookmark_namecheck(bmark, NULL, NULL) != 0)
135 		return (-1);
136 
137 	int is_bmark, is_snap;
138 	is_bmark = bookmark_namecheck(source, NULL, NULL) == 0;
139 	is_snap = snapshot_namecheck(source, NULL, NULL) == 0;
140 	if (!is_bmark && !is_snap)
141 		return (-1);
142 
143 	return (0);
144 }
145 
146 /*
147  * Check that the given nvlist corresponds to the following schema:
148  *  { newbookmark -> source, ... }
149  * where
150  * - each pair passes dsl_bookmark_create_nvl_validate_pair
151  * - all newbookmarks are in the same pool
152  * - all newbookmarks have unique names
153  *
154  * Note that this function is only validates above schema. Callers must ensure
155  * that the bookmarks can be created, e.g. that sources exist.
156  *
157  * Returns 0 if the nvlist adheres to above schema.
158  * Returns -1 if it doesn't.
159  */
160 int
161 dsl_bookmark_create_nvl_validate(nvlist_t *bmarks)
162 {
163 	char *first = NULL;
164 	size_t first_len = 0;
165 
166 	for (nvpair_t *pair = nvlist_next_nvpair(bmarks, NULL);
167 	    pair != NULL; pair = nvlist_next_nvpair(bmarks, pair)) {
168 
169 		char *bmark = nvpair_name(pair);
170 		char *source;
171 
172 		/* list structure: values must be snapshots XOR bookmarks */
173 		if (nvpair_value_string(pair, &source) != 0)
174 			return (-1);
175 		if (dsl_bookmark_create_nvl_validate_pair(bmark, source) != 0)
176 			return (-1);
177 
178 		/* same pool check */
179 		if (first == NULL) {
180 			char *cp = strpbrk(bmark, "/#");
181 			if (cp == NULL)
182 				return (-1);
183 			first = bmark;
184 			first_len = cp - bmark;
185 		}
186 		if (strncmp(first, bmark, first_len) != 0)
187 			return (-1);
188 		switch (*(bmark + first_len)) {
189 			case '/': /* fallthrough */
190 			case '#':
191 				break;
192 			default:
193 				return (-1);
194 		}
195 
196 		/* unique newbookmark names; todo: O(n^2) */
197 		for (nvpair_t *pair2 = nvlist_next_nvpair(bmarks, pair);
198 		    pair2 != NULL; pair2 = nvlist_next_nvpair(bmarks, pair2)) {
199 			if (strcmp(nvpair_name(pair), nvpair_name(pair2)) == 0)
200 				return (-1);
201 		}
202 
203 	}
204 	return (0);
205 }
206 
207 /*
208  * expects that newbm and source have been validated using
209  * dsl_bookmark_create_nvl_validate_pair
210  */
211 static int
212 dsl_bookmark_create_check_impl(dsl_pool_t *dp,
213     const char *newbm, const char *source)
214 {
215 	ASSERT0(dsl_bookmark_create_nvl_validate_pair(newbm, source));
216 	/* defer source namecheck until we know it's a snapshot or bookmark */
217 
218 	int error;
219 	dsl_dataset_t *newbm_ds;
220 	char *newbm_short;
221 	zfs_bookmark_phys_t bmark_phys;
222 
223 	error = dsl_bookmark_hold_ds(dp, newbm, &newbm_ds, FTAG, &newbm_short);
224 	if (error != 0)
225 		return (error);
226 
227 	/* Verify that the new bookmark does not already exist */
228 	error = dsl_bookmark_lookup_impl(newbm_ds, newbm_short, &bmark_phys);
229 	switch (error) {
230 	case ESRCH:
231 		/* happy path: new bmark doesn't exist, proceed after switch */
232 		error = 0;
233 		break;
234 	case 0:
235 		error = SET_ERROR(EEXIST);
236 		goto eholdnewbmds;
237 	default:
238 		/* dsl_bookmark_lookup_impl already did SET_ERROR */
239 		goto eholdnewbmds;
240 	}
241 
242 	/* error is retval of the following if-cascade */
243 	if (strchr(source, '@') != NULL) {
244 		dsl_dataset_t *source_snap_ds;
245 		ASSERT3S(snapshot_namecheck(source, NULL, NULL), ==, 0);
246 		error = dsl_dataset_hold(dp, source, FTAG, &source_snap_ds);
247 		if (error == 0) {
248 			VERIFY(source_snap_ds->ds_is_snapshot);
249 			/*
250 			 * Verify that source snapshot is an earlier point in
251 			 * newbm_ds's timeline (source may be newbm_ds's origin)
252 			 */
253 			if (!dsl_dataset_is_before(newbm_ds, source_snap_ds, 0))
254 				error = SET_ERROR(
255 				    ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR);
256 			dsl_dataset_rele(source_snap_ds, FTAG);
257 		}
258 	} else if (strchr(source, '#') != NULL) {
259 		zfs_bookmark_phys_t source_phys;
260 		ASSERT3S(bookmark_namecheck(source, NULL, NULL), ==, 0);
261 		/*
262 		 * Source must exists and be an earlier point in newbm_ds's
263 		 * timeline (newbm_ds's origin may be a snap of source's ds)
264 		 */
265 		error = dsl_bookmark_lookup(dp, source, newbm_ds, &source_phys);
266 		switch (error) {
267 		case 0:
268 			break; /* happy path */
269 		case EXDEV:
270 			error = SET_ERROR(ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR);
271 			break;
272 		default:
273 			/* dsl_bookmark_lookup already did SET_ERROR */
274 			break;
275 		}
276 	} else {
277 		/*
278 		 * dsl_bookmark_create_nvl_validate validates that source is
279 		 * either snapshot or bookmark
280 		 */
281 		panic("unreachable code: %s", source);
282 	}
283 
284 eholdnewbmds:
285 	dsl_dataset_rele(newbm_ds, FTAG);
286 	return (error);
287 }
288 
289 int
290 dsl_bookmark_create_check(void *arg, dmu_tx_t *tx)
291 {
292 	dsl_bookmark_create_arg_t *dbca = arg;
293 	int rv = 0;
294 	int schema_err = 0;
295 	ASSERT3P(dbca, !=, NULL);
296 	ASSERT3P(dbca->dbca_bmarks, !=, NULL);
297 	/* dbca->dbca_errors is allowed to be NULL */
298 
299 	dsl_pool_t *dp = dmu_tx_pool(tx);
300 
301 	if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS))
302 		return (SET_ERROR(ENOTSUP));
303 
304 	if (dsl_bookmark_create_nvl_validate(dbca->dbca_bmarks) != 0)
305 		rv = schema_err = SET_ERROR(EINVAL);
306 
307 	for (nvpair_t *pair = nvlist_next_nvpair(dbca->dbca_bmarks, NULL);
308 	    pair != NULL; pair = nvlist_next_nvpair(dbca->dbca_bmarks, pair)) {
309 		char *new = nvpair_name(pair);
310 
311 		int error = schema_err;
312 		if (error == 0) {
313 			char *source = fnvpair_value_string(pair);
314 			error = dsl_bookmark_create_check_impl(dp, new, source);
315 			if (error != 0)
316 				error = SET_ERROR(error);
317 		}
318 
319 		if (error != 0) {
320 			rv = error;
321 			if (dbca->dbca_errors != NULL)
322 				fnvlist_add_int32(dbca->dbca_errors,
323 				    new, error);
324 		}
325 	}
326 
327 	return (rv);
328 }
329 
330 static dsl_bookmark_node_t *
331 dsl_bookmark_node_alloc(char *shortname)
332 {
333 	dsl_bookmark_node_t *dbn = kmem_alloc(sizeof (*dbn), KM_SLEEP);
334 	dbn->dbn_name = spa_strdup(shortname);
335 	dbn->dbn_dirty = B_FALSE;
336 	mutex_init(&dbn->dbn_lock, NULL, MUTEX_DEFAULT, NULL);
337 	return (dbn);
338 }
339 
340 /*
341  * Set the fields in the zfs_bookmark_phys_t based on the specified snapshot.
342  */
343 static void
344 dsl_bookmark_set_phys(zfs_bookmark_phys_t *zbm, dsl_dataset_t *snap)
345 {
346 	spa_t *spa = dsl_dataset_get_spa(snap);
347 	objset_t *mos = spa_get_dsl(spa)->dp_meta_objset;
348 	dsl_dataset_phys_t *dsp = dsl_dataset_phys(snap);
349 	zbm->zbm_guid = dsp->ds_guid;
350 	zbm->zbm_creation_txg = dsp->ds_creation_txg;
351 	zbm->zbm_creation_time = dsp->ds_creation_time;
352 	zbm->zbm_redaction_obj = 0;
353 
354 	/*
355 	 * If the dataset is encrypted create a larger bookmark to
356 	 * accommodate the IVset guid. The IVset guid was added
357 	 * after the encryption feature to prevent a problem with
358 	 * raw sends. If we encounter an encrypted dataset without
359 	 * an IVset guid we fall back to a normal bookmark.
360 	 */
361 	if (snap->ds_dir->dd_crypto_obj != 0 &&
362 	    spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
363 		(void) zap_lookup(mos, snap->ds_object,
364 		    DS_FIELD_IVSET_GUID, sizeof (uint64_t), 1,
365 		    &zbm->zbm_ivset_guid);
366 	}
367 
368 	if (spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_WRITTEN)) {
369 		zbm->zbm_flags = ZBM_FLAG_SNAPSHOT_EXISTS | ZBM_FLAG_HAS_FBN;
370 		zbm->zbm_referenced_bytes_refd = dsp->ds_referenced_bytes;
371 		zbm->zbm_compressed_bytes_refd = dsp->ds_compressed_bytes;
372 		zbm->zbm_uncompressed_bytes_refd = dsp->ds_uncompressed_bytes;
373 
374 		dsl_dataset_t *nextds;
375 		VERIFY0(dsl_dataset_hold_obj(snap->ds_dir->dd_pool,
376 		    dsp->ds_next_snap_obj, FTAG, &nextds));
377 		dsl_deadlist_space(&nextds->ds_deadlist,
378 		    &zbm->zbm_referenced_freed_before_next_snap,
379 		    &zbm->zbm_compressed_freed_before_next_snap,
380 		    &zbm->zbm_uncompressed_freed_before_next_snap);
381 		dsl_dataset_rele(nextds, FTAG);
382 	} else {
383 		memset(&zbm->zbm_flags, 0,
384 		    sizeof (zfs_bookmark_phys_t) -
385 		    offsetof(zfs_bookmark_phys_t, zbm_flags));
386 	}
387 }
388 
389 /*
390  * Add dsl_bookmark_node_t `dbn` to the given dataset and increment appropriate
391  * SPA feature counters.
392  */
393 void
394 dsl_bookmark_node_add(dsl_dataset_t *hds, dsl_bookmark_node_t *dbn,
395     dmu_tx_t *tx)
396 {
397 	dsl_pool_t *dp = dmu_tx_pool(tx);
398 	objset_t *mos = dp->dp_meta_objset;
399 
400 	if (hds->ds_bookmarks_obj == 0) {
401 		hds->ds_bookmarks_obj = zap_create_norm(mos,
402 		    U8_TEXTPREP_TOUPPER, DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0,
403 		    tx);
404 		spa_feature_incr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
405 
406 		dsl_dataset_zapify(hds, tx);
407 		VERIFY0(zap_add(mos, hds->ds_object,
408 		    DS_FIELD_BOOKMARK_NAMES,
409 		    sizeof (hds->ds_bookmarks_obj), 1,
410 		    &hds->ds_bookmarks_obj, tx));
411 	}
412 
413 	avl_add(&hds->ds_bookmarks, dbn);
414 
415 	/*
416 	 * To maintain backwards compatibility with software that doesn't
417 	 * understand SPA_FEATURE_BOOKMARK_V2, we need to use the smallest
418 	 * possible bookmark size.
419 	 */
420 	uint64_t bookmark_phys_size = BOOKMARK_PHYS_SIZE_V1;
421 	if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARK_V2) &&
422 	    (dbn->dbn_phys.zbm_ivset_guid != 0 || dbn->dbn_phys.zbm_flags &
423 	    ZBM_FLAG_HAS_FBN || dbn->dbn_phys.zbm_redaction_obj != 0)) {
424 		bookmark_phys_size = BOOKMARK_PHYS_SIZE_V2;
425 		spa_feature_incr(dp->dp_spa, SPA_FEATURE_BOOKMARK_V2, tx);
426 	}
427 
428 	zfs_bookmark_phys_t zero_phys = { 0 };
429 	ASSERT0(memcmp(((char *)&dbn->dbn_phys) + bookmark_phys_size,
430 	    &zero_phys, sizeof (zfs_bookmark_phys_t) - bookmark_phys_size));
431 
432 	VERIFY0(zap_add(mos, hds->ds_bookmarks_obj, dbn->dbn_name,
433 	    sizeof (uint64_t), bookmark_phys_size / sizeof (uint64_t),
434 	    &dbn->dbn_phys, tx));
435 }
436 
437 /*
438  * If redaction_list is non-null, we create a redacted bookmark and redaction
439  * list, and store the object number of the redaction list in redact_obj.
440  */
441 static void
442 dsl_bookmark_create_sync_impl_snap(const char *bookmark, const char *snapshot,
443     dmu_tx_t *tx, uint64_t num_redact_snaps, uint64_t *redact_snaps, void *tag,
444     redaction_list_t **redaction_list)
445 {
446 	dsl_pool_t *dp = dmu_tx_pool(tx);
447 	objset_t *mos = dp->dp_meta_objset;
448 	dsl_dataset_t *snapds, *bmark_fs;
449 	char *shortname;
450 	boolean_t bookmark_redacted;
451 	uint64_t *dsredactsnaps;
452 	uint64_t dsnumsnaps;
453 
454 	VERIFY0(dsl_dataset_hold(dp, snapshot, FTAG, &snapds));
455 	VERIFY0(dsl_bookmark_hold_ds(dp, bookmark, &bmark_fs, FTAG,
456 	    &shortname));
457 
458 	dsl_bookmark_node_t *dbn = dsl_bookmark_node_alloc(shortname);
459 	dsl_bookmark_set_phys(&dbn->dbn_phys, snapds);
460 
461 	bookmark_redacted = dsl_dataset_get_uint64_array_feature(snapds,
462 	    SPA_FEATURE_REDACTED_DATASETS, &dsnumsnaps, &dsredactsnaps);
463 	if (redaction_list != NULL || bookmark_redacted) {
464 		redaction_list_t *local_rl;
465 		if (bookmark_redacted) {
466 			redact_snaps = dsredactsnaps;
467 			num_redact_snaps = dsnumsnaps;
468 		}
469 		dbn->dbn_phys.zbm_redaction_obj = dmu_object_alloc(mos,
470 		    DMU_OTN_UINT64_METADATA, SPA_OLD_MAXBLOCKSIZE,
471 		    DMU_OTN_UINT64_METADATA, sizeof (redaction_list_phys_t) +
472 		    num_redact_snaps * sizeof (uint64_t), tx);
473 		spa_feature_incr(dp->dp_spa,
474 		    SPA_FEATURE_REDACTION_BOOKMARKS, tx);
475 
476 		VERIFY0(dsl_redaction_list_hold_obj(dp,
477 		    dbn->dbn_phys.zbm_redaction_obj, tag, &local_rl));
478 		dsl_redaction_list_long_hold(dp, local_rl, tag);
479 
480 		ASSERT3U((local_rl)->rl_dbuf->db_size, >=,
481 		    sizeof (redaction_list_phys_t) + num_redact_snaps *
482 		    sizeof (uint64_t));
483 		dmu_buf_will_dirty(local_rl->rl_dbuf, tx);
484 		memcpy(local_rl->rl_phys->rlp_snaps, redact_snaps,
485 		    sizeof (uint64_t) * num_redact_snaps);
486 		local_rl->rl_phys->rlp_num_snaps = num_redact_snaps;
487 		if (bookmark_redacted) {
488 			ASSERT3P(redaction_list, ==, NULL);
489 			local_rl->rl_phys->rlp_last_blkid = UINT64_MAX;
490 			local_rl->rl_phys->rlp_last_object = UINT64_MAX;
491 			dsl_redaction_list_long_rele(local_rl, tag);
492 			dsl_redaction_list_rele(local_rl, tag);
493 		} else {
494 			*redaction_list = local_rl;
495 		}
496 	}
497 
498 	if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
499 		spa_feature_incr(dp->dp_spa,
500 		    SPA_FEATURE_BOOKMARK_WRITTEN, tx);
501 	}
502 
503 	dsl_bookmark_node_add(bmark_fs, dbn, tx);
504 
505 	spa_history_log_internal_ds(bmark_fs, "bookmark", tx,
506 	    "name=%s creation_txg=%llu target_snap=%llu redact_obj=%llu",
507 	    shortname, (longlong_t)dbn->dbn_phys.zbm_creation_txg,
508 	    (longlong_t)snapds->ds_object,
509 	    (longlong_t)dbn->dbn_phys.zbm_redaction_obj);
510 
511 	dsl_dataset_rele(bmark_fs, FTAG);
512 	dsl_dataset_rele(snapds, FTAG);
513 }
514 
515 
516 static void
517 dsl_bookmark_create_sync_impl_book(
518     const char *new_name, const char *source_name, dmu_tx_t *tx)
519 {
520 	dsl_pool_t *dp = dmu_tx_pool(tx);
521 	dsl_dataset_t *bmark_fs_source, *bmark_fs_new;
522 	char *source_shortname, *new_shortname;
523 	zfs_bookmark_phys_t source_phys;
524 
525 	VERIFY0(dsl_bookmark_hold_ds(dp, source_name, &bmark_fs_source, FTAG,
526 	    &source_shortname));
527 	VERIFY0(dsl_bookmark_hold_ds(dp, new_name, &bmark_fs_new, FTAG,
528 	    &new_shortname));
529 
530 	/*
531 	 * create a copy of the source bookmark by copying most of its members
532 	 *
533 	 * Caveat: bookmarking a redaction bookmark yields a normal bookmark
534 	 * -----------------------------------------------------------------
535 	 * Reasoning:
536 	 * - The zbm_redaction_obj would be referred to by both source and new
537 	 *   bookmark, but would be destroyed once either source or new is
538 	 *   destroyed, resulting in use-after-free of the referred object.
539 	 * - User expectation when issuing the `zfs bookmark` command is that
540 	 *   a normal bookmark of the source is created
541 	 *
542 	 * Design Alternatives For Full Redaction Bookmark Copying:
543 	 * - reference-count the redaction object => would require on-disk
544 	 *   format change for existing redaction objects
545 	 * - Copy the redaction object => cannot be done in syncing context
546 	 *   because the redaction object might be too large
547 	 */
548 
549 	VERIFY0(dsl_bookmark_lookup_impl(bmark_fs_source, source_shortname,
550 	    &source_phys));
551 	dsl_bookmark_node_t *new_dbn = dsl_bookmark_node_alloc(new_shortname);
552 
553 	memcpy(&new_dbn->dbn_phys, &source_phys, sizeof (source_phys));
554 	new_dbn->dbn_phys.zbm_redaction_obj = 0;
555 
556 	/* update feature counters */
557 	if (new_dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
558 		spa_feature_incr(dp->dp_spa,
559 		    SPA_FEATURE_BOOKMARK_WRITTEN, tx);
560 	}
561 	/* no need for redaction bookmark counter; nulled zbm_redaction_obj */
562 	/* dsl_bookmark_node_add bumps bookmarks and v2-bookmarks counter */
563 
564 	/*
565 	 * write new bookmark
566 	 *
567 	 * Note that dsl_bookmark_lookup_impl guarantees that, if source is a
568 	 * v1 bookmark, the v2-only fields are zeroed.
569 	 * And dsl_bookmark_node_add writes back a v1-sized bookmark if
570 	 * v2 bookmarks are disabled and/or v2-only fields are zeroed.
571 	 * => bookmark copying works on pre-bookmark-v2 pools
572 	 */
573 	dsl_bookmark_node_add(bmark_fs_new, new_dbn, tx);
574 
575 	spa_history_log_internal_ds(bmark_fs_source, "bookmark", tx,
576 	    "name=%s creation_txg=%llu source_guid=%llu",
577 	    new_shortname, (longlong_t)new_dbn->dbn_phys.zbm_creation_txg,
578 	    (longlong_t)source_phys.zbm_guid);
579 
580 	dsl_dataset_rele(bmark_fs_source, FTAG);
581 	dsl_dataset_rele(bmark_fs_new, FTAG);
582 }
583 
584 void
585 dsl_bookmark_create_sync(void *arg, dmu_tx_t *tx)
586 {
587 	dsl_bookmark_create_arg_t *dbca = arg;
588 
589 	ASSERT(spa_feature_is_enabled(dmu_tx_pool(tx)->dp_spa,
590 	    SPA_FEATURE_BOOKMARKS));
591 
592 	for (nvpair_t *pair = nvlist_next_nvpair(dbca->dbca_bmarks, NULL);
593 	    pair != NULL; pair = nvlist_next_nvpair(dbca->dbca_bmarks, pair)) {
594 
595 		char *new = nvpair_name(pair);
596 		char *source = fnvpair_value_string(pair);
597 
598 		if (strchr(source, '@') != NULL) {
599 			dsl_bookmark_create_sync_impl_snap(new, source, tx,
600 			    0, NULL, NULL, NULL);
601 		} else if (strchr(source, '#') != NULL) {
602 			dsl_bookmark_create_sync_impl_book(new, source, tx);
603 		} else {
604 			panic("unreachable code");
605 		}
606 
607 	}
608 }
609 
610 /*
611  * The bookmarks must all be in the same pool.
612  */
613 int
614 dsl_bookmark_create(nvlist_t *bmarks, nvlist_t *errors)
615 {
616 	nvpair_t *pair;
617 	dsl_bookmark_create_arg_t dbca;
618 
619 	pair = nvlist_next_nvpair(bmarks, NULL);
620 	if (pair == NULL)
621 		return (0);
622 
623 	dbca.dbca_bmarks = bmarks;
624 	dbca.dbca_errors = errors;
625 
626 	return (dsl_sync_task(nvpair_name(pair), dsl_bookmark_create_check,
627 	    dsl_bookmark_create_sync, &dbca,
628 	    fnvlist_num_pairs(bmarks), ZFS_SPACE_CHECK_NORMAL));
629 }
630 
631 static int
632 dsl_bookmark_create_redacted_check(void *arg, dmu_tx_t *tx)
633 {
634 	dsl_bookmark_create_redacted_arg_t *dbcra = arg;
635 	dsl_pool_t *dp = dmu_tx_pool(tx);
636 	int rv = 0;
637 
638 	if (!spa_feature_is_enabled(dp->dp_spa,
639 	    SPA_FEATURE_REDACTION_BOOKMARKS))
640 		return (SET_ERROR(ENOTSUP));
641 	/*
642 	 * If the list of redact snaps will not fit in the bonus buffer with
643 	 * the furthest reached object and offset, fail.
644 	 */
645 	if (dbcra->dbcra_numsnaps > (dmu_bonus_max() -
646 	    sizeof (redaction_list_phys_t)) / sizeof (uint64_t))
647 		return (SET_ERROR(E2BIG));
648 
649 	if (dsl_bookmark_create_nvl_validate_pair(
650 	    dbcra->dbcra_bmark, dbcra->dbcra_snap) != 0)
651 		return (SET_ERROR(EINVAL));
652 
653 	rv = dsl_bookmark_create_check_impl(dp,
654 	    dbcra->dbcra_bmark, dbcra->dbcra_snap);
655 	return (rv);
656 }
657 
658 static void
659 dsl_bookmark_create_redacted_sync(void *arg, dmu_tx_t *tx)
660 {
661 	dsl_bookmark_create_redacted_arg_t *dbcra = arg;
662 	dsl_bookmark_create_sync_impl_snap(dbcra->dbcra_bmark,
663 	    dbcra->dbcra_snap, tx, dbcra->dbcra_numsnaps, dbcra->dbcra_snaps,
664 	    dbcra->dbcra_tag, dbcra->dbcra_rl);
665 }
666 
667 int
668 dsl_bookmark_create_redacted(const char *bookmark, const char *snapshot,
669     uint64_t numsnaps, uint64_t *snapguids, void *tag, redaction_list_t **rl)
670 {
671 	dsl_bookmark_create_redacted_arg_t dbcra;
672 
673 	dbcra.dbcra_bmark = bookmark;
674 	dbcra.dbcra_snap = snapshot;
675 	dbcra.dbcra_rl = rl;
676 	dbcra.dbcra_numsnaps = numsnaps;
677 	dbcra.dbcra_snaps = snapguids;
678 	dbcra.dbcra_tag = tag;
679 
680 	return (dsl_sync_task(bookmark, dsl_bookmark_create_redacted_check,
681 	    dsl_bookmark_create_redacted_sync, &dbcra, 5,
682 	    ZFS_SPACE_CHECK_NORMAL));
683 }
684 
685 /*
686  * Retrieve the list of properties given in the 'props' nvlist for a bookmark.
687  * If 'props' is NULL, retrieves all properties.
688  */
689 static void
690 dsl_bookmark_fetch_props(dsl_pool_t *dp, zfs_bookmark_phys_t *bmark_phys,
691     nvlist_t *props, nvlist_t *out_props)
692 {
693 	ASSERT3P(dp, !=, NULL);
694 	ASSERT3P(bmark_phys, !=, NULL);
695 	ASSERT3P(out_props, !=, NULL);
696 	ASSERT(RRW_LOCK_HELD(&dp->dp_config_rwlock));
697 
698 	if (props == NULL || nvlist_exists(props,
699 	    zfs_prop_to_name(ZFS_PROP_GUID))) {
700 		dsl_prop_nvlist_add_uint64(out_props,
701 		    ZFS_PROP_GUID, bmark_phys->zbm_guid);
702 	}
703 	if (props == NULL || nvlist_exists(props,
704 	    zfs_prop_to_name(ZFS_PROP_CREATETXG))) {
705 		dsl_prop_nvlist_add_uint64(out_props,
706 		    ZFS_PROP_CREATETXG, bmark_phys->zbm_creation_txg);
707 	}
708 	if (props == NULL || nvlist_exists(props,
709 	    zfs_prop_to_name(ZFS_PROP_CREATION))) {
710 		dsl_prop_nvlist_add_uint64(out_props,
711 		    ZFS_PROP_CREATION, bmark_phys->zbm_creation_time);
712 	}
713 	if (props == NULL || nvlist_exists(props,
714 	    zfs_prop_to_name(ZFS_PROP_IVSET_GUID))) {
715 		dsl_prop_nvlist_add_uint64(out_props,
716 		    ZFS_PROP_IVSET_GUID, bmark_phys->zbm_ivset_guid);
717 	}
718 	if (bmark_phys->zbm_flags & ZBM_FLAG_HAS_FBN) {
719 		if (props == NULL || nvlist_exists(props,
720 		    zfs_prop_to_name(ZFS_PROP_REFERENCED))) {
721 			dsl_prop_nvlist_add_uint64(out_props,
722 			    ZFS_PROP_REFERENCED,
723 			    bmark_phys->zbm_referenced_bytes_refd);
724 		}
725 		if (props == NULL || nvlist_exists(props,
726 		    zfs_prop_to_name(ZFS_PROP_LOGICALREFERENCED))) {
727 			dsl_prop_nvlist_add_uint64(out_props,
728 			    ZFS_PROP_LOGICALREFERENCED,
729 			    bmark_phys->zbm_uncompressed_bytes_refd);
730 		}
731 		if (props == NULL || nvlist_exists(props,
732 		    zfs_prop_to_name(ZFS_PROP_REFRATIO))) {
733 			uint64_t ratio =
734 			    bmark_phys->zbm_compressed_bytes_refd == 0 ? 100 :
735 			    bmark_phys->zbm_uncompressed_bytes_refd * 100 /
736 			    bmark_phys->zbm_compressed_bytes_refd;
737 			dsl_prop_nvlist_add_uint64(out_props,
738 			    ZFS_PROP_REFRATIO, ratio);
739 		}
740 	}
741 
742 	if ((props == NULL || nvlist_exists(props, "redact_snaps") ||
743 	    nvlist_exists(props, "redact_complete")) &&
744 	    bmark_phys->zbm_redaction_obj != 0) {
745 		redaction_list_t *rl;
746 		int err = dsl_redaction_list_hold_obj(dp,
747 		    bmark_phys->zbm_redaction_obj, FTAG, &rl);
748 		if (err == 0) {
749 			if (nvlist_exists(props, "redact_snaps")) {
750 				nvlist_t *nvl;
751 				nvl = fnvlist_alloc();
752 				fnvlist_add_uint64_array(nvl, ZPROP_VALUE,
753 				    rl->rl_phys->rlp_snaps,
754 				    rl->rl_phys->rlp_num_snaps);
755 				fnvlist_add_nvlist(out_props, "redact_snaps",
756 				    nvl);
757 				nvlist_free(nvl);
758 			}
759 			if (nvlist_exists(props, "redact_complete")) {
760 				nvlist_t *nvl;
761 				nvl = fnvlist_alloc();
762 				fnvlist_add_boolean_value(nvl, ZPROP_VALUE,
763 				    rl->rl_phys->rlp_last_blkid == UINT64_MAX &&
764 				    rl->rl_phys->rlp_last_object == UINT64_MAX);
765 				fnvlist_add_nvlist(out_props, "redact_complete",
766 				    nvl);
767 				nvlist_free(nvl);
768 			}
769 			dsl_redaction_list_rele(rl, FTAG);
770 		}
771 	}
772 }
773 
774 int
775 dsl_get_bookmarks_impl(dsl_dataset_t *ds, nvlist_t *props, nvlist_t *outnvl)
776 {
777 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
778 
779 	ASSERT(dsl_pool_config_held(dp));
780 
781 	if (dsl_dataset_is_snapshot(ds))
782 		return (SET_ERROR(EINVAL));
783 
784 	for (dsl_bookmark_node_t *dbn = avl_first(&ds->ds_bookmarks);
785 	    dbn != NULL; dbn = AVL_NEXT(&ds->ds_bookmarks, dbn)) {
786 		nvlist_t *out_props = fnvlist_alloc();
787 
788 		dsl_bookmark_fetch_props(dp, &dbn->dbn_phys, props, out_props);
789 
790 		fnvlist_add_nvlist(outnvl, dbn->dbn_name, out_props);
791 		fnvlist_free(out_props);
792 	}
793 	return (0);
794 }
795 
796 /*
797  * Comparison func for ds_bookmarks AVL tree.  We sort the bookmarks by
798  * their TXG, then by their FBN-ness.  The "FBN-ness" component ensures
799  * that all bookmarks at the same TXG that HAS_FBN are adjacent, which
800  * dsl_bookmark_destroy_sync_impl() depends on.  Note that there may be
801  * multiple bookmarks at the same TXG (with the same FBN-ness).  In this
802  * case we differentiate them by an arbitrary metric (in this case,
803  * their names).
804  */
805 static int
806 dsl_bookmark_compare(const void *l, const void *r)
807 {
808 	const dsl_bookmark_node_t *ldbn = l;
809 	const dsl_bookmark_node_t *rdbn = r;
810 
811 	int64_t cmp = TREE_CMP(ldbn->dbn_phys.zbm_creation_txg,
812 	    rdbn->dbn_phys.zbm_creation_txg);
813 	if (likely(cmp))
814 		return (cmp);
815 	cmp = TREE_CMP((ldbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN),
816 	    (rdbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN));
817 	if (likely(cmp))
818 		return (cmp);
819 	cmp = strcmp(ldbn->dbn_name, rdbn->dbn_name);
820 	return (TREE_ISIGN(cmp));
821 }
822 
823 /*
824  * Cache this (head) dataset's bookmarks in the ds_bookmarks AVL tree.
825  */
826 int
827 dsl_bookmark_init_ds(dsl_dataset_t *ds)
828 {
829 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
830 	objset_t *mos = dp->dp_meta_objset;
831 
832 	ASSERT(!ds->ds_is_snapshot);
833 
834 	avl_create(&ds->ds_bookmarks, dsl_bookmark_compare,
835 	    sizeof (dsl_bookmark_node_t),
836 	    offsetof(dsl_bookmark_node_t, dbn_node));
837 
838 	if (!dsl_dataset_is_zapified(ds))
839 		return (0);
840 
841 	int zaperr = zap_lookup(mos, ds->ds_object, DS_FIELD_BOOKMARK_NAMES,
842 	    sizeof (ds->ds_bookmarks_obj), 1, &ds->ds_bookmarks_obj);
843 	if (zaperr == ENOENT)
844 		return (0);
845 	if (zaperr != 0)
846 		return (zaperr);
847 
848 	if (ds->ds_bookmarks_obj == 0)
849 		return (0);
850 
851 	int err = 0;
852 	zap_cursor_t zc;
853 	zap_attribute_t attr;
854 
855 	for (zap_cursor_init(&zc, mos, ds->ds_bookmarks_obj);
856 	    (err = zap_cursor_retrieve(&zc, &attr)) == 0;
857 	    zap_cursor_advance(&zc)) {
858 		dsl_bookmark_node_t *dbn =
859 		    dsl_bookmark_node_alloc(attr.za_name);
860 
861 		err = dsl_bookmark_lookup_impl(ds,
862 		    dbn->dbn_name, &dbn->dbn_phys);
863 		ASSERT3U(err, !=, ENOENT);
864 		if (err != 0) {
865 			kmem_free(dbn, sizeof (*dbn));
866 			break;
867 		}
868 		avl_add(&ds->ds_bookmarks, dbn);
869 	}
870 	zap_cursor_fini(&zc);
871 	if (err == ENOENT)
872 		err = 0;
873 	return (err);
874 }
875 
876 void
877 dsl_bookmark_fini_ds(dsl_dataset_t *ds)
878 {
879 	void *cookie = NULL;
880 	dsl_bookmark_node_t *dbn;
881 
882 	if (ds->ds_is_snapshot)
883 		return;
884 
885 	while ((dbn = avl_destroy_nodes(&ds->ds_bookmarks, &cookie)) != NULL) {
886 		spa_strfree(dbn->dbn_name);
887 		mutex_destroy(&dbn->dbn_lock);
888 		kmem_free(dbn, sizeof (*dbn));
889 	}
890 	avl_destroy(&ds->ds_bookmarks);
891 }
892 
893 /*
894  * Retrieve the bookmarks that exist in the specified dataset, and the
895  * requested properties of each bookmark.
896  *
897  * The "props" nvlist specifies which properties are requested.
898  * See lzc_get_bookmarks() for the list of valid properties.
899  */
900 int
901 dsl_get_bookmarks(const char *dsname, nvlist_t *props, nvlist_t *outnvl)
902 {
903 	dsl_pool_t *dp;
904 	dsl_dataset_t *ds;
905 	int err;
906 
907 	err = dsl_pool_hold(dsname, FTAG, &dp);
908 	if (err != 0)
909 		return (err);
910 	err = dsl_dataset_hold(dp, dsname, FTAG, &ds);
911 	if (err != 0) {
912 		dsl_pool_rele(dp, FTAG);
913 		return (err);
914 	}
915 
916 	err = dsl_get_bookmarks_impl(ds, props, outnvl);
917 
918 	dsl_dataset_rele(ds, FTAG);
919 	dsl_pool_rele(dp, FTAG);
920 	return (err);
921 }
922 
923 /*
924  * Retrieve all properties for a single bookmark in the given dataset.
925  */
926 int
927 dsl_get_bookmark_props(const char *dsname, const char *bmname, nvlist_t *props)
928 {
929 	dsl_pool_t *dp;
930 	dsl_dataset_t *ds;
931 	zfs_bookmark_phys_t bmark_phys = { 0 };
932 	int err;
933 
934 	err = dsl_pool_hold(dsname, FTAG, &dp);
935 	if (err != 0)
936 		return (err);
937 	err = dsl_dataset_hold(dp, dsname, FTAG, &ds);
938 	if (err != 0) {
939 		dsl_pool_rele(dp, FTAG);
940 		return (err);
941 	}
942 
943 	err = dsl_bookmark_lookup_impl(ds, bmname, &bmark_phys);
944 	if (err != 0)
945 		goto out;
946 
947 	dsl_bookmark_fetch_props(dp, &bmark_phys, NULL, props);
948 out:
949 	dsl_dataset_rele(ds, FTAG);
950 	dsl_pool_rele(dp, FTAG);
951 	return (err);
952 }
953 
954 typedef struct dsl_bookmark_destroy_arg {
955 	nvlist_t *dbda_bmarks;
956 	nvlist_t *dbda_success;
957 	nvlist_t *dbda_errors;
958 } dsl_bookmark_destroy_arg_t;
959 
960 static void
961 dsl_bookmark_destroy_sync_impl(dsl_dataset_t *ds, const char *name,
962     dmu_tx_t *tx)
963 {
964 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
965 	uint64_t bmark_zapobj = ds->ds_bookmarks_obj;
966 	matchtype_t mt = 0;
967 	uint64_t int_size, num_ints;
968 	/*
969 	 * 'search' must be zeroed so that dbn_flags (which is used in
970 	 * dsl_bookmark_compare()) will be zeroed even if the on-disk
971 	 * (in ZAP) bookmark is shorter than offsetof(dbn_flags).
972 	 */
973 	dsl_bookmark_node_t search = { 0 };
974 	char realname[ZFS_MAX_DATASET_NAME_LEN];
975 
976 	/*
977 	 * Find the real name of this bookmark, which may be different
978 	 * from the given name if the dataset is case-insensitive.  Then
979 	 * use the real name to find the node in the ds_bookmarks AVL tree.
980 	 */
981 
982 	if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
983 		mt = MT_NORMALIZE;
984 
985 	VERIFY0(zap_length(mos, bmark_zapobj, name, &int_size, &num_ints));
986 
987 	ASSERT3U(int_size, ==, sizeof (uint64_t));
988 
989 	if (num_ints * int_size > BOOKMARK_PHYS_SIZE_V1) {
990 		spa_feature_decr(dmu_objset_spa(mos),
991 		    SPA_FEATURE_BOOKMARK_V2, tx);
992 	}
993 	VERIFY0(zap_lookup_norm(mos, bmark_zapobj, name, sizeof (uint64_t),
994 	    num_ints, &search.dbn_phys, mt, realname, sizeof (realname), NULL));
995 
996 	search.dbn_name = realname;
997 	dsl_bookmark_node_t *dbn = avl_find(&ds->ds_bookmarks, &search, NULL);
998 	ASSERT(dbn != NULL);
999 
1000 	if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
1001 		/*
1002 		 * If this bookmark HAS_FBN, and it is before the most
1003 		 * recent snapshot, then its TXG is a key in the head's
1004 		 * deadlist (and all clones' heads' deadlists).  If this is
1005 		 * the last thing keeping the key (i.e. there are no more
1006 		 * bookmarks with HAS_FBN at this TXG, and there is no
1007 		 * snapshot at this TXG), then remove the key.
1008 		 *
1009 		 * Note that this algorithm depends on ds_bookmarks being
1010 		 * sorted such that all bookmarks at the same TXG with
1011 		 * HAS_FBN are adjacent (with no non-HAS_FBN bookmarks
1012 		 * at the same TXG in between them).  If this were not
1013 		 * the case, we would need to examine *all* bookmarks
1014 		 * at this TXG, rather than just the adjacent ones.
1015 		 */
1016 
1017 		dsl_bookmark_node_t *dbn_prev =
1018 		    AVL_PREV(&ds->ds_bookmarks, dbn);
1019 		dsl_bookmark_node_t *dbn_next =
1020 		    AVL_NEXT(&ds->ds_bookmarks, dbn);
1021 
1022 		boolean_t more_bookmarks_at_this_txg =
1023 		    (dbn_prev != NULL && dbn_prev->dbn_phys.zbm_creation_txg ==
1024 		    dbn->dbn_phys.zbm_creation_txg &&
1025 		    (dbn_prev->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) ||
1026 		    (dbn_next != NULL && dbn_next->dbn_phys.zbm_creation_txg ==
1027 		    dbn->dbn_phys.zbm_creation_txg &&
1028 		    (dbn_next->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN));
1029 
1030 		if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_SNAPSHOT_EXISTS) &&
1031 		    !more_bookmarks_at_this_txg &&
1032 		    dbn->dbn_phys.zbm_creation_txg <
1033 		    dsl_dataset_phys(ds)->ds_prev_snap_txg) {
1034 			dsl_dir_remove_clones_key(ds->ds_dir,
1035 			    dbn->dbn_phys.zbm_creation_txg, tx);
1036 			dsl_deadlist_remove_key(&ds->ds_deadlist,
1037 			    dbn->dbn_phys.zbm_creation_txg, tx);
1038 		}
1039 
1040 		spa_feature_decr(dmu_objset_spa(mos),
1041 		    SPA_FEATURE_BOOKMARK_WRITTEN, tx);
1042 	}
1043 
1044 	if (dbn->dbn_phys.zbm_redaction_obj != 0) {
1045 		VERIFY0(dmu_object_free(mos,
1046 		    dbn->dbn_phys.zbm_redaction_obj, tx));
1047 		spa_feature_decr(dmu_objset_spa(mos),
1048 		    SPA_FEATURE_REDACTION_BOOKMARKS, tx);
1049 	}
1050 
1051 	avl_remove(&ds->ds_bookmarks, dbn);
1052 	spa_strfree(dbn->dbn_name);
1053 	mutex_destroy(&dbn->dbn_lock);
1054 	kmem_free(dbn, sizeof (*dbn));
1055 
1056 	VERIFY0(zap_remove_norm(mos, bmark_zapobj, name, mt, tx));
1057 }
1058 
1059 static int
1060 dsl_bookmark_destroy_check(void *arg, dmu_tx_t *tx)
1061 {
1062 	dsl_bookmark_destroy_arg_t *dbda = arg;
1063 	dsl_pool_t *dp = dmu_tx_pool(tx);
1064 	int rv = 0;
1065 
1066 	ASSERT(nvlist_empty(dbda->dbda_success));
1067 	ASSERT(nvlist_empty(dbda->dbda_errors));
1068 
1069 	if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS))
1070 		return (0);
1071 
1072 	for (nvpair_t *pair = nvlist_next_nvpair(dbda->dbda_bmarks, NULL);
1073 	    pair != NULL; pair = nvlist_next_nvpair(dbda->dbda_bmarks, pair)) {
1074 		const char *fullname = nvpair_name(pair);
1075 		dsl_dataset_t *ds;
1076 		zfs_bookmark_phys_t bm;
1077 		int error;
1078 		char *shortname;
1079 
1080 		error = dsl_bookmark_hold_ds(dp, fullname, &ds,
1081 		    FTAG, &shortname);
1082 		if (error == ENOENT) {
1083 			/* ignore it; the bookmark is "already destroyed" */
1084 			continue;
1085 		}
1086 		if (error == 0) {
1087 			error = dsl_bookmark_lookup_impl(ds, shortname, &bm);
1088 			dsl_dataset_rele(ds, FTAG);
1089 			if (error == ESRCH) {
1090 				/*
1091 				 * ignore it; the bookmark is
1092 				 * "already destroyed"
1093 				 */
1094 				continue;
1095 			}
1096 			if (error == 0 && bm.zbm_redaction_obj != 0) {
1097 				redaction_list_t *rl = NULL;
1098 				error = dsl_redaction_list_hold_obj(tx->tx_pool,
1099 				    bm.zbm_redaction_obj, FTAG, &rl);
1100 				if (error == ENOENT) {
1101 					error = 0;
1102 				} else if (error == 0 &&
1103 				    dsl_redaction_list_long_held(rl)) {
1104 					error = SET_ERROR(EBUSY);
1105 				}
1106 				if (rl != NULL) {
1107 					dsl_redaction_list_rele(rl, FTAG);
1108 				}
1109 			}
1110 		}
1111 		if (error == 0) {
1112 			if (dmu_tx_is_syncing(tx)) {
1113 				fnvlist_add_boolean(dbda->dbda_success,
1114 				    fullname);
1115 			}
1116 		} else {
1117 			fnvlist_add_int32(dbda->dbda_errors, fullname, error);
1118 			rv = error;
1119 		}
1120 	}
1121 	return (rv);
1122 }
1123 
1124 static void
1125 dsl_bookmark_destroy_sync(void *arg, dmu_tx_t *tx)
1126 {
1127 	dsl_bookmark_destroy_arg_t *dbda = arg;
1128 	dsl_pool_t *dp = dmu_tx_pool(tx);
1129 	objset_t *mos = dp->dp_meta_objset;
1130 
1131 	for (nvpair_t *pair = nvlist_next_nvpair(dbda->dbda_success, NULL);
1132 	    pair != NULL; pair = nvlist_next_nvpair(dbda->dbda_success, pair)) {
1133 		dsl_dataset_t *ds;
1134 		char *shortname;
1135 		uint64_t zap_cnt;
1136 
1137 		VERIFY0(dsl_bookmark_hold_ds(dp, nvpair_name(pair),
1138 		    &ds, FTAG, &shortname));
1139 		dsl_bookmark_destroy_sync_impl(ds, shortname, tx);
1140 
1141 		/*
1142 		 * If all of this dataset's bookmarks have been destroyed,
1143 		 * free the zap object and decrement the feature's use count.
1144 		 */
1145 		VERIFY0(zap_count(mos, ds->ds_bookmarks_obj, &zap_cnt));
1146 		if (zap_cnt == 0) {
1147 			dmu_buf_will_dirty(ds->ds_dbuf, tx);
1148 			VERIFY0(zap_destroy(mos, ds->ds_bookmarks_obj, tx));
1149 			ds->ds_bookmarks_obj = 0;
1150 			spa_feature_decr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
1151 			VERIFY0(zap_remove(mos, ds->ds_object,
1152 			    DS_FIELD_BOOKMARK_NAMES, tx));
1153 		}
1154 
1155 		spa_history_log_internal_ds(ds, "remove bookmark", tx,
1156 		    "name=%s", shortname);
1157 
1158 		dsl_dataset_rele(ds, FTAG);
1159 	}
1160 }
1161 
1162 /*
1163  * The bookmarks must all be in the same pool.
1164  */
1165 int
1166 dsl_bookmark_destroy(nvlist_t *bmarks, nvlist_t *errors)
1167 {
1168 	int rv;
1169 	dsl_bookmark_destroy_arg_t dbda;
1170 	nvpair_t *pair = nvlist_next_nvpair(bmarks, NULL);
1171 	if (pair == NULL)
1172 		return (0);
1173 
1174 	dbda.dbda_bmarks = bmarks;
1175 	dbda.dbda_errors = errors;
1176 	dbda.dbda_success = fnvlist_alloc();
1177 
1178 	rv = dsl_sync_task(nvpair_name(pair), dsl_bookmark_destroy_check,
1179 	    dsl_bookmark_destroy_sync, &dbda, fnvlist_num_pairs(bmarks),
1180 	    ZFS_SPACE_CHECK_RESERVED);
1181 	fnvlist_free(dbda.dbda_success);
1182 	return (rv);
1183 }
1184 
1185 /* Return B_TRUE if there are any long holds on this dataset. */
1186 boolean_t
1187 dsl_redaction_list_long_held(redaction_list_t *rl)
1188 {
1189 	return (!zfs_refcount_is_zero(&rl->rl_longholds));
1190 }
1191 
1192 void
1193 dsl_redaction_list_long_hold(dsl_pool_t *dp, redaction_list_t *rl, void *tag)
1194 {
1195 	ASSERT(dsl_pool_config_held(dp));
1196 	(void) zfs_refcount_add(&rl->rl_longholds, tag);
1197 }
1198 
1199 void
1200 dsl_redaction_list_long_rele(redaction_list_t *rl, void *tag)
1201 {
1202 	(void) zfs_refcount_remove(&rl->rl_longholds, tag);
1203 }
1204 
1205 static void
1206 redaction_list_evict_sync(void *rlu)
1207 {
1208 	redaction_list_t *rl = rlu;
1209 	zfs_refcount_destroy(&rl->rl_longholds);
1210 
1211 	kmem_free(rl, sizeof (redaction_list_t));
1212 }
1213 
1214 void
1215 dsl_redaction_list_rele(redaction_list_t *rl, void *tag)
1216 {
1217 	dmu_buf_rele(rl->rl_dbuf, tag);
1218 }
1219 
1220 int
1221 dsl_redaction_list_hold_obj(dsl_pool_t *dp, uint64_t rlobj, void *tag,
1222     redaction_list_t **rlp)
1223 {
1224 	objset_t *mos = dp->dp_meta_objset;
1225 	dmu_buf_t *dbuf;
1226 	redaction_list_t *rl;
1227 	int err;
1228 
1229 	ASSERT(dsl_pool_config_held(dp));
1230 
1231 	err = dmu_bonus_hold(mos, rlobj, tag, &dbuf);
1232 	if (err != 0)
1233 		return (err);
1234 
1235 	rl = dmu_buf_get_user(dbuf);
1236 	if (rl == NULL) {
1237 		redaction_list_t *winner = NULL;
1238 
1239 		rl = kmem_zalloc(sizeof (redaction_list_t), KM_SLEEP);
1240 		rl->rl_dbuf = dbuf;
1241 		rl->rl_object = rlobj;
1242 		rl->rl_phys = dbuf->db_data;
1243 		rl->rl_mos = dp->dp_meta_objset;
1244 		zfs_refcount_create(&rl->rl_longholds);
1245 		dmu_buf_init_user(&rl->rl_dbu, redaction_list_evict_sync, NULL,
1246 		    &rl->rl_dbuf);
1247 		if ((winner = dmu_buf_set_user_ie(dbuf, &rl->rl_dbu)) != NULL) {
1248 			kmem_free(rl, sizeof (*rl));
1249 			rl = winner;
1250 		}
1251 	}
1252 	*rlp = rl;
1253 	return (0);
1254 }
1255 
1256 /*
1257  * Snapshot ds is being destroyed.
1258  *
1259  * Adjust the "freed_before_next" of any bookmarks between this snap
1260  * and the previous snapshot, because their "next snapshot" is changing.
1261  *
1262  * If there are any bookmarks with HAS_FBN at this snapshot, remove
1263  * their HAS_SNAP flag (note: there can be at most one snapshot of
1264  * each filesystem at a given txg), and return B_TRUE.  In this case
1265  * the caller can not remove the key in the deadlist at this TXG, because
1266  * the HAS_FBN bookmarks require the key be there.
1267  *
1268  * Returns B_FALSE if there are no bookmarks with HAS_FBN at this
1269  * snapshot's TXG.  In this case the caller can remove the key in the
1270  * deadlist at this TXG.
1271  */
1272 boolean_t
1273 dsl_bookmark_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
1274 {
1275 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1276 
1277 	dsl_dataset_t *head, *next;
1278 	VERIFY0(dsl_dataset_hold_obj(dp,
1279 	    dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &head));
1280 	VERIFY0(dsl_dataset_hold_obj(dp,
1281 	    dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &next));
1282 
1283 	/*
1284 	 * Find the first bookmark that HAS_FBN at or after the
1285 	 * previous snapshot.
1286 	 */
1287 	dsl_bookmark_node_t search = { 0 };
1288 	avl_index_t idx;
1289 	search.dbn_phys.zbm_creation_txg =
1290 	    dsl_dataset_phys(ds)->ds_prev_snap_txg;
1291 	search.dbn_phys.zbm_flags = ZBM_FLAG_HAS_FBN;
1292 	/*
1293 	 * The empty-string name can't be in the AVL, and it compares
1294 	 * before any entries with this TXG.
1295 	 */
1296 	search.dbn_name = "";
1297 	VERIFY3P(avl_find(&head->ds_bookmarks, &search, &idx), ==, NULL);
1298 	dsl_bookmark_node_t *dbn =
1299 	    avl_nearest(&head->ds_bookmarks, idx, AVL_AFTER);
1300 
1301 	/*
1302 	 * Iterate over all bookmarks that are at or after the previous
1303 	 * snapshot, and before this (being deleted) snapshot.  Adjust
1304 	 * their FBN based on their new next snapshot.
1305 	 */
1306 	for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg <
1307 	    dsl_dataset_phys(ds)->ds_creation_txg;
1308 	    dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
1309 		if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN))
1310 			continue;
1311 		/*
1312 		 * Increase our FBN by the amount of space that was live
1313 		 * (referenced) at the time of this bookmark (i.e.
1314 		 * birth <= zbm_creation_txg), and killed between this
1315 		 * (being deleted) snapshot and the next snapshot (i.e.
1316 		 * on the next snapshot's deadlist).  (Space killed before
1317 		 * this are already on our FBN.)
1318 		 */
1319 		uint64_t referenced, compressed, uncompressed;
1320 		dsl_deadlist_space_range(&next->ds_deadlist,
1321 		    0, dbn->dbn_phys.zbm_creation_txg,
1322 		    &referenced, &compressed, &uncompressed);
1323 		dbn->dbn_phys.zbm_referenced_freed_before_next_snap +=
1324 		    referenced;
1325 		dbn->dbn_phys.zbm_compressed_freed_before_next_snap +=
1326 		    compressed;
1327 		dbn->dbn_phys.zbm_uncompressed_freed_before_next_snap +=
1328 		    uncompressed;
1329 		VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
1330 		    dbn->dbn_name, sizeof (uint64_t),
1331 		    sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1332 		    &dbn->dbn_phys, tx));
1333 	}
1334 	dsl_dataset_rele(next, FTAG);
1335 
1336 	/*
1337 	 * There may be several bookmarks at this txg (the TXG of the
1338 	 * snapshot being deleted).  We need to clear the SNAPSHOT_EXISTS
1339 	 * flag on all of them, and return TRUE if there is at least 1
1340 	 * bookmark here with HAS_FBN (thus preventing the deadlist
1341 	 * key from being removed).
1342 	 */
1343 	boolean_t rv = B_FALSE;
1344 	for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg ==
1345 	    dsl_dataset_phys(ds)->ds_creation_txg;
1346 	    dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
1347 		if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) {
1348 			ASSERT(!(dbn->dbn_phys.zbm_flags &
1349 			    ZBM_FLAG_SNAPSHOT_EXISTS));
1350 			continue;
1351 		}
1352 		ASSERT(dbn->dbn_phys.zbm_flags & ZBM_FLAG_SNAPSHOT_EXISTS);
1353 		dbn->dbn_phys.zbm_flags &= ~ZBM_FLAG_SNAPSHOT_EXISTS;
1354 		VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
1355 		    dbn->dbn_name, sizeof (uint64_t),
1356 		    sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1357 		    &dbn->dbn_phys, tx));
1358 		rv = B_TRUE;
1359 	}
1360 	dsl_dataset_rele(head, FTAG);
1361 	return (rv);
1362 }
1363 
1364 /*
1365  * A snapshot is being created of this (head) dataset.
1366  *
1367  * We don't keep keys in the deadlist for the most recent snapshot, or any
1368  * bookmarks at or after it, because there can't be any blocks on the
1369  * deadlist in this range.  Now that the most recent snapshot is after
1370  * all bookmarks, we need to add these keys.  Note that the caller always
1371  * adds a key at the previous snapshot, so we only add keys for bookmarks
1372  * after that.
1373  */
1374 void
1375 dsl_bookmark_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
1376 {
1377 	uint64_t last_key_added = UINT64_MAX;
1378 	for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1379 	    dbn != NULL && dbn->dbn_phys.zbm_creation_txg >
1380 	    dsl_dataset_phys(ds)->ds_prev_snap_txg;
1381 	    dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
1382 		uint64_t creation_txg = dbn->dbn_phys.zbm_creation_txg;
1383 		ASSERT3U(creation_txg, <=, last_key_added);
1384 		/*
1385 		 * Note, there may be multiple bookmarks at this TXG,
1386 		 * and we only want to add the key for this TXG once.
1387 		 * The ds_bookmarks AVL is sorted by TXG, so we will visit
1388 		 * these bookmarks in sequence.
1389 		 */
1390 		if ((dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) &&
1391 		    creation_txg != last_key_added) {
1392 			dsl_deadlist_add_key(&ds->ds_deadlist,
1393 			    creation_txg, tx);
1394 			last_key_added = creation_txg;
1395 		}
1396 	}
1397 }
1398 
1399 /*
1400  * The next snapshot of the origin dataset has changed, due to
1401  * promote or clone swap.  If there are any bookmarks at this dataset,
1402  * we need to update their zbm_*_freed_before_next_snap to reflect this.
1403  * The head dataset has the relevant bookmarks in ds_bookmarks.
1404  */
1405 void
1406 dsl_bookmark_next_changed(dsl_dataset_t *head, dsl_dataset_t *origin,
1407     dmu_tx_t *tx)
1408 {
1409 	dsl_pool_t *dp = dmu_tx_pool(tx);
1410 
1411 	/*
1412 	 * Find the first bookmark that HAS_FBN at the origin snapshot.
1413 	 */
1414 	dsl_bookmark_node_t search = { 0 };
1415 	avl_index_t idx;
1416 	search.dbn_phys.zbm_creation_txg =
1417 	    dsl_dataset_phys(origin)->ds_creation_txg;
1418 	search.dbn_phys.zbm_flags = ZBM_FLAG_HAS_FBN;
1419 	/*
1420 	 * The empty-string name can't be in the AVL, and it compares
1421 	 * before any entries with this TXG.
1422 	 */
1423 	search.dbn_name = "";
1424 	VERIFY3P(avl_find(&head->ds_bookmarks, &search, &idx), ==, NULL);
1425 	dsl_bookmark_node_t *dbn =
1426 	    avl_nearest(&head->ds_bookmarks, idx, AVL_AFTER);
1427 
1428 	/*
1429 	 * Iterate over all bookmarks that are at the origin txg.
1430 	 * Adjust their FBN based on their new next snapshot.
1431 	 */
1432 	for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg ==
1433 	    dsl_dataset_phys(origin)->ds_creation_txg &&
1434 	    (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN);
1435 	    dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
1436 
1437 		/*
1438 		 * Bookmark is at the origin, therefore its
1439 		 * "next dataset" is changing, so we need
1440 		 * to reset its FBN by recomputing it in
1441 		 * dsl_bookmark_set_phys().
1442 		 */
1443 		ASSERT3U(dbn->dbn_phys.zbm_guid, ==,
1444 		    dsl_dataset_phys(origin)->ds_guid);
1445 		ASSERT3U(dbn->dbn_phys.zbm_referenced_bytes_refd, ==,
1446 		    dsl_dataset_phys(origin)->ds_referenced_bytes);
1447 		ASSERT(dbn->dbn_phys.zbm_flags &
1448 		    ZBM_FLAG_SNAPSHOT_EXISTS);
1449 		/*
1450 		 * Save and restore the zbm_redaction_obj, which
1451 		 * is zeroed by dsl_bookmark_set_phys().
1452 		 */
1453 		uint64_t redaction_obj =
1454 		    dbn->dbn_phys.zbm_redaction_obj;
1455 		dsl_bookmark_set_phys(&dbn->dbn_phys, origin);
1456 		dbn->dbn_phys.zbm_redaction_obj = redaction_obj;
1457 
1458 		VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
1459 		    dbn->dbn_name, sizeof (uint64_t),
1460 		    sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1461 		    &dbn->dbn_phys, tx));
1462 	}
1463 }
1464 
1465 /*
1466  * This block is no longer referenced by this (head) dataset.
1467  *
1468  * Adjust the FBN of any bookmarks that reference this block, whose "next"
1469  * is the head dataset.
1470  */
1471 void
1472 dsl_bookmark_block_killed(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
1473 {
1474 	(void) tx;
1475 
1476 	/*
1477 	 * Iterate over bookmarks whose "next" is the head dataset.
1478 	 */
1479 	for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1480 	    dbn != NULL && dbn->dbn_phys.zbm_creation_txg >=
1481 	    dsl_dataset_phys(ds)->ds_prev_snap_txg;
1482 	    dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
1483 		/*
1484 		 * If the block was live (referenced) at the time of this
1485 		 * bookmark, add its space to the bookmark's FBN.
1486 		 */
1487 		if (bp->blk_birth <= dbn->dbn_phys.zbm_creation_txg &&
1488 		    (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) {
1489 			mutex_enter(&dbn->dbn_lock);
1490 			dbn->dbn_phys.zbm_referenced_freed_before_next_snap +=
1491 			    bp_get_dsize_sync(dsl_dataset_get_spa(ds), bp);
1492 			dbn->dbn_phys.zbm_compressed_freed_before_next_snap +=
1493 			    BP_GET_PSIZE(bp);
1494 			dbn->dbn_phys.zbm_uncompressed_freed_before_next_snap +=
1495 			    BP_GET_UCSIZE(bp);
1496 			/*
1497 			 * Changing the ZAP object here would be too
1498 			 * expensive.  Also, we may be called from the zio
1499 			 * interrupt thread, which can't block on i/o.
1500 			 * Therefore, we mark this bookmark as dirty and
1501 			 * modify the ZAP once per txg, in
1502 			 * dsl_bookmark_sync_done().
1503 			 */
1504 			dbn->dbn_dirty = B_TRUE;
1505 			mutex_exit(&dbn->dbn_lock);
1506 		}
1507 	}
1508 }
1509 
1510 void
1511 dsl_bookmark_sync_done(dsl_dataset_t *ds, dmu_tx_t *tx)
1512 {
1513 	dsl_pool_t *dp = dmu_tx_pool(tx);
1514 
1515 	if (dsl_dataset_is_snapshot(ds))
1516 		return;
1517 
1518 	/*
1519 	 * We only dirty bookmarks that are at or after the most recent
1520 	 * snapshot.  We can't create snapshots between
1521 	 * dsl_bookmark_block_killed() and dsl_bookmark_sync_done(), so we
1522 	 * don't need to look at any bookmarks before ds_prev_snap_txg.
1523 	 */
1524 	for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1525 	    dbn != NULL && dbn->dbn_phys.zbm_creation_txg >=
1526 	    dsl_dataset_phys(ds)->ds_prev_snap_txg;
1527 	    dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
1528 		if (dbn->dbn_dirty) {
1529 			/*
1530 			 * We only dirty nodes with HAS_FBN, therefore
1531 			 * we can always use the current bookmark struct size.
1532 			 */
1533 			ASSERT(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN);
1534 			VERIFY0(zap_update(dp->dp_meta_objset,
1535 			    ds->ds_bookmarks_obj,
1536 			    dbn->dbn_name, sizeof (uint64_t),
1537 			    sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1538 			    &dbn->dbn_phys, tx));
1539 			dbn->dbn_dirty = B_FALSE;
1540 		}
1541 	}
1542 #ifdef ZFS_DEBUG
1543 	for (dsl_bookmark_node_t *dbn = avl_first(&ds->ds_bookmarks);
1544 	    dbn != NULL; dbn = AVL_NEXT(&ds->ds_bookmarks, dbn)) {
1545 		ASSERT(!dbn->dbn_dirty);
1546 	}
1547 #endif
1548 }
1549 
1550 /*
1551  * Return the TXG of the most recent bookmark (or 0 if there are no bookmarks).
1552  */
1553 uint64_t
1554 dsl_bookmark_latest_txg(dsl_dataset_t *ds)
1555 {
1556 	ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
1557 	dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1558 	if (dbn == NULL)
1559 		return (0);
1560 	return (dbn->dbn_phys.zbm_creation_txg);
1561 }
1562 
1563 /*
1564  * Compare the redact_block_phys_t to the bookmark. If the last block in the
1565  * redact_block_phys_t is before the bookmark, return -1.  If the first block in
1566  * the redact_block_phys_t is after the bookmark, return 1.  Otherwise, the
1567  * bookmark is inside the range of the redact_block_phys_t, and we return 0.
1568  */
1569 static int
1570 redact_block_zb_compare(redact_block_phys_t *first,
1571     zbookmark_phys_t *second)
1572 {
1573 	/*
1574 	 * If the block_phys is for a previous object, or the last block in the
1575 	 * block_phys is strictly before the block in the bookmark, the
1576 	 * block_phys is earlier.
1577 	 */
1578 	if (first->rbp_object < second->zb_object ||
1579 	    (first->rbp_object == second->zb_object &&
1580 	    first->rbp_blkid + (redact_block_get_count(first) - 1) <
1581 	    second->zb_blkid)) {
1582 		return (-1);
1583 	}
1584 
1585 	/*
1586 	 * If the bookmark is for a previous object, or the block in the
1587 	 * bookmark is strictly before the first block in the block_phys, the
1588 	 * bookmark is earlier.
1589 	 */
1590 	if (first->rbp_object > second->zb_object ||
1591 	    (first->rbp_object == second->zb_object &&
1592 	    first->rbp_blkid > second->zb_blkid)) {
1593 		return (1);
1594 	}
1595 
1596 	return (0);
1597 }
1598 
1599 /*
1600  * Traverse the redaction list in the provided object, and call the callback for
1601  * each entry we find. Don't call the callback for any records before resume.
1602  */
1603 int
1604 dsl_redaction_list_traverse(redaction_list_t *rl, zbookmark_phys_t *resume,
1605     rl_traverse_callback_t cb, void *arg)
1606 {
1607 	objset_t *mos = rl->rl_mos;
1608 	int err = 0;
1609 
1610 	if (rl->rl_phys->rlp_last_object != UINT64_MAX ||
1611 	    rl->rl_phys->rlp_last_blkid != UINT64_MAX) {
1612 		/*
1613 		 * When we finish a send, we update the last object and offset
1614 		 * to UINT64_MAX.  If a send fails partway through, the last
1615 		 * object and offset will have some other value, indicating how
1616 		 * far the send got. The redaction list must be complete before
1617 		 * it can be traversed, so return EINVAL if the last object and
1618 		 * blkid are not set to UINT64_MAX.
1619 		 */
1620 		return (SET_ERROR(EINVAL));
1621 	}
1622 
1623 	/*
1624 	 * This allows us to skip the binary search and resume checking logic
1625 	 * below, if we're not resuming a redacted send.
1626 	 */
1627 	if (ZB_IS_ZERO(resume))
1628 		resume = NULL;
1629 
1630 	/*
1631 	 * Binary search for the point to resume from.
1632 	 */
1633 	uint64_t maxidx = rl->rl_phys->rlp_num_entries - 1;
1634 	uint64_t minidx = 0;
1635 	while (resume != NULL && maxidx > minidx) {
1636 		redact_block_phys_t rbp = { 0 };
1637 		ASSERT3U(maxidx, >, minidx);
1638 		uint64_t mididx = minidx + ((maxidx - minidx) / 2);
1639 		err = dmu_read(mos, rl->rl_object, mididx * sizeof (rbp),
1640 		    sizeof (rbp), &rbp, DMU_READ_NO_PREFETCH);
1641 		if (err != 0)
1642 			break;
1643 
1644 		int cmp = redact_block_zb_compare(&rbp, resume);
1645 
1646 		if (cmp == 0) {
1647 			minidx = mididx;
1648 			break;
1649 		} else if (cmp > 0) {
1650 			maxidx =
1651 			    (mididx == minidx ? minidx : mididx - 1);
1652 		} else {
1653 			minidx = mididx + 1;
1654 		}
1655 	}
1656 
1657 	unsigned int bufsize = SPA_OLD_MAXBLOCKSIZE;
1658 	redact_block_phys_t *buf = zio_data_buf_alloc(bufsize);
1659 
1660 	unsigned int entries_per_buf = bufsize / sizeof (redact_block_phys_t);
1661 	uint64_t start_block = minidx / entries_per_buf;
1662 	err = dmu_read(mos, rl->rl_object, start_block * bufsize, bufsize, buf,
1663 	    DMU_READ_PREFETCH);
1664 
1665 	for (uint64_t curidx = minidx;
1666 	    err == 0 && curidx < rl->rl_phys->rlp_num_entries;
1667 	    curidx++) {
1668 		/*
1669 		 * We read in the redaction list one block at a time.  Once we
1670 		 * finish with all the entries in a given block, we read in a
1671 		 * new one.  The predictive prefetcher will take care of any
1672 		 * prefetching, and this code shouldn't be the bottleneck, so we
1673 		 * don't need to do manual prefetching.
1674 		 */
1675 		if (curidx % entries_per_buf == 0) {
1676 			err = dmu_read(mos, rl->rl_object, curidx *
1677 			    sizeof (*buf), bufsize, buf,
1678 			    DMU_READ_PREFETCH);
1679 			if (err != 0)
1680 				break;
1681 		}
1682 		redact_block_phys_t *rb = &buf[curidx % entries_per_buf];
1683 		/*
1684 		 * If resume is non-null, we should either not send the data, or
1685 		 * null out resume so we don't have to keep doing these
1686 		 * comparisons.
1687 		 */
1688 		if (resume != NULL) {
1689 			/*
1690 			 * It is possible that after the binary search we got
1691 			 * a record before the resume point. There's two cases
1692 			 * where this can occur. If the record is the last
1693 			 * redaction record, and the resume point is after the
1694 			 * end of the redacted data, curidx will be the last
1695 			 * redaction record. In that case, the loop will end
1696 			 * after this iteration. The second case is if the
1697 			 * resume point is between two redaction records, the
1698 			 * binary search can return either the record before
1699 			 * or after the resume point. In that case, the next
1700 			 * iteration will be greater than the resume point.
1701 			 */
1702 			if (redact_block_zb_compare(rb, resume) < 0) {
1703 				ASSERT3U(curidx, ==, minidx);
1704 				continue;
1705 			} else {
1706 				/*
1707 				 * If the place to resume is in the middle of
1708 				 * the range described by this
1709 				 * redact_block_phys, then modify the
1710 				 * redact_block_phys in memory so we generate
1711 				 * the right records.
1712 				 */
1713 				if (resume->zb_object == rb->rbp_object &&
1714 				    resume->zb_blkid > rb->rbp_blkid) {
1715 					uint64_t diff = resume->zb_blkid -
1716 					    rb->rbp_blkid;
1717 					rb->rbp_blkid = resume->zb_blkid;
1718 					redact_block_set_count(rb,
1719 					    redact_block_get_count(rb) - diff);
1720 				}
1721 				resume = NULL;
1722 			}
1723 		}
1724 
1725 		if (cb(rb, arg) != 0) {
1726 			err = EINTR;
1727 			break;
1728 		}
1729 	}
1730 
1731 	zio_data_buf_free(buf, bufsize);
1732 	return (err);
1733 }
1734