1 /*
2  * CDDL HEADER START
3  *
4  * This file and its contents are supplied under the terms of the
5  * Common Development and Distribution License ("CDDL"), version 1.0.
6  * You may only use this file in accordance with the terms of version
7  * 1.0 of the CDDL.
8  *
9  * A full copy of the text of the CDDL should have accompanied this
10  * source.  A copy of the CDDL is also available via the Internet at
11  * http://www.illumos.org/license/CDDL.
12  *
13  * CDDL HEADER END
14  */
15 
16 /*
17  * Copyright (c) 2013, 2018 by Delphix. All rights reserved.
18  * Copyright 2017 Nexenta Systems, Inc.
19  * Copyright 2019, 2020 by Christian Schwarz. All rights reserved.
20  */
21 
22 #include <sys/zfs_context.h>
23 #include <sys/dsl_dataset.h>
24 #include <sys/dsl_dir.h>
25 #include <sys/dsl_prop.h>
26 #include <sys/dsl_synctask.h>
27 #include <sys/dsl_destroy.h>
28 #include <sys/dmu_impl.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/arc.h>
31 #include <sys/zap.h>
32 #include <sys/zfeature.h>
33 #include <sys/spa.h>
34 #include <sys/dsl_bookmark.h>
35 #include <zfs_namecheck.h>
36 #include <sys/dmu_send.h>
37 
38 static int
39 dsl_bookmark_hold_ds(dsl_pool_t *dp, const char *fullname,
40     dsl_dataset_t **dsp, void *tag, char **shortnamep)
41 {
42 	char buf[ZFS_MAX_DATASET_NAME_LEN];
43 	char *hashp;
44 
45 	if (strlen(fullname) >= ZFS_MAX_DATASET_NAME_LEN)
46 		return (SET_ERROR(ENAMETOOLONG));
47 	hashp = strchr(fullname, '#');
48 	if (hashp == NULL)
49 		return (SET_ERROR(EINVAL));
50 
51 	*shortnamep = hashp + 1;
52 	if (zfs_component_namecheck(*shortnamep, NULL, NULL))
53 		return (SET_ERROR(EINVAL));
54 	(void) strlcpy(buf, fullname, hashp - fullname + 1);
55 	return (dsl_dataset_hold(dp, buf, tag, dsp));
56 }
57 
58 /*
59  * When reading BOOKMARK_V1 bookmarks, the BOOKMARK_V2 fields are guaranteed
60  * to be zeroed.
61  *
62  * Returns ESRCH if bookmark is not found.
63  * Note, we need to use the ZAP rather than the AVL to look up bookmarks
64  * by name, because only the ZAP honors the casesensitivity setting.
65  */
66 int
67 dsl_bookmark_lookup_impl(dsl_dataset_t *ds, const char *shortname,
68     zfs_bookmark_phys_t *bmark_phys)
69 {
70 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
71 	uint64_t bmark_zapobj = ds->ds_bookmarks_obj;
72 	matchtype_t mt = 0;
73 	int err;
74 
75 	if (bmark_zapobj == 0)
76 		return (SET_ERROR(ESRCH));
77 
78 	if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
79 		mt = MT_NORMALIZE;
80 
81 	/*
82 	 * Zero out the bookmark in case the one stored on disk
83 	 * is in an older, shorter format.
84 	 */
85 	bzero(bmark_phys, sizeof (*bmark_phys));
86 
87 	err = zap_lookup_norm(mos, bmark_zapobj, shortname, sizeof (uint64_t),
88 	    sizeof (*bmark_phys) / sizeof (uint64_t), bmark_phys, mt, NULL, 0,
89 	    NULL);
90 
91 	return (err == ENOENT ? SET_ERROR(ESRCH) : err);
92 }
93 
94 /*
95  * If later_ds is non-NULL, this will return EXDEV if the specified bookmark
96  * does not represents an earlier point in later_ds's timeline.  However,
97  * bmp will still be filled in if we return EXDEV.
98  *
99  * Returns ENOENT if the dataset containing the bookmark does not exist.
100  * Returns ESRCH if the dataset exists but the bookmark was not found in it.
101  */
102 int
103 dsl_bookmark_lookup(dsl_pool_t *dp, const char *fullname,
104     dsl_dataset_t *later_ds, zfs_bookmark_phys_t *bmp)
105 {
106 	char *shortname;
107 	dsl_dataset_t *ds;
108 	int error;
109 
110 	error = dsl_bookmark_hold_ds(dp, fullname, &ds, FTAG, &shortname);
111 	if (error != 0)
112 		return (error);
113 
114 	error = dsl_bookmark_lookup_impl(ds, shortname, bmp);
115 	if (error == 0 && later_ds != NULL) {
116 		if (!dsl_dataset_is_before(later_ds, ds, bmp->zbm_creation_txg))
117 			error = SET_ERROR(EXDEV);
118 	}
119 	dsl_dataset_rele(ds, FTAG);
120 	return (error);
121 }
122 
123 /*
124  * Validates that
125  * - bmark is a full dataset path of a bookmark (bookmark_namecheck)
126  * - source is a full path of a snapshot or bookmark
127  *   ({bookmark,snapshot}_namecheck)
128  *
129  * Returns 0 if valid, -1 otherwise.
130  */
131 static int
132 dsl_bookmark_create_nvl_validate_pair(const char *bmark, const char *source)
133 {
134 	if (bookmark_namecheck(bmark, NULL, NULL) != 0)
135 		return (-1);
136 
137 	int is_bmark, is_snap;
138 	is_bmark = bookmark_namecheck(source, NULL, NULL) == 0;
139 	is_snap = snapshot_namecheck(source, NULL, NULL) == 0;
140 	if (!is_bmark && !is_snap)
141 		return (-1);
142 
143 	return (0);
144 }
145 
146 /*
147  * Check that the given nvlist corresponds to the following schema:
148  *  { newbookmark -> source, ... }
149  * where
150  * - each pair passes dsl_bookmark_create_nvl_validate_pair
151  * - all newbookmarks are in the same pool
152  * - all newbookmarks have unique names
153  *
154  * Note that this function is only validates above schema. Callers must ensure
155  * that the bookmarks can be created, e.g. that sources exist.
156  *
157  * Returns 0 if the nvlist adheres to above schema.
158  * Returns -1 if it doesn't.
159  */
160 int
161 dsl_bookmark_create_nvl_validate(nvlist_t *bmarks)
162 {
163 	char *first;
164 	size_t first_len;
165 
166 	first = NULL;
167 	for (nvpair_t *pair = nvlist_next_nvpair(bmarks, NULL);
168 	    pair != NULL; pair = nvlist_next_nvpair(bmarks, pair)) {
169 
170 		char *bmark = nvpair_name(pair);
171 		char *source;
172 
173 		/* list structure: values must be snapshots XOR bookmarks */
174 		if (nvpair_value_string(pair, &source) != 0)
175 			return (-1);
176 		if (dsl_bookmark_create_nvl_validate_pair(bmark, source) != 0)
177 			return (-1);
178 
179 		/* same pool check */
180 		if (first == NULL) {
181 			char *cp = strpbrk(bmark, "/#");
182 			if (cp == NULL)
183 				return (-1);
184 			first = bmark;
185 			first_len = cp - bmark;
186 		}
187 		if (strncmp(first, bmark, first_len) != 0)
188 			return (-1);
189 		switch (*(bmark + first_len)) {
190 			case '/': /* fallthrough */
191 			case '#':
192 				break;
193 			default:
194 				return (-1);
195 		}
196 
197 		/* unique newbookmark names; todo: O(n^2) */
198 		for (nvpair_t *pair2 = nvlist_next_nvpair(bmarks, pair);
199 		    pair2 != NULL; pair2 = nvlist_next_nvpair(bmarks, pair2)) {
200 			if (strcmp(nvpair_name(pair), nvpair_name(pair2)) == 0)
201 				return (-1);
202 		}
203 
204 	}
205 	return (0);
206 }
207 
208 /*
209  * expects that newbm and source have been validated using
210  * dsl_bookmark_create_nvl_validate_pair
211  */
212 static int
213 dsl_bookmark_create_check_impl(dsl_pool_t *dp,
214     const char *newbm, const char *source)
215 {
216 	ASSERT0(dsl_bookmark_create_nvl_validate_pair(newbm, source));
217 	/* defer source namecheck until we know it's a snapshot or bookmark */
218 
219 	int error;
220 	dsl_dataset_t *newbm_ds;
221 	char *newbm_short;
222 	zfs_bookmark_phys_t bmark_phys;
223 
224 	error = dsl_bookmark_hold_ds(dp, newbm, &newbm_ds, FTAG, &newbm_short);
225 	if (error != 0)
226 		return (error);
227 
228 	/* Verify that the new bookmark does not already exist */
229 	error = dsl_bookmark_lookup_impl(newbm_ds, newbm_short, &bmark_phys);
230 	switch (error) {
231 	case ESRCH:
232 		/* happy path: new bmark doesn't exist, proceed after switch */
233 		error = 0;
234 		break;
235 	case 0:
236 		error = SET_ERROR(EEXIST);
237 		goto eholdnewbmds;
238 	default:
239 		/* dsl_bookmark_lookup_impl already did SET_ERRROR */
240 		goto eholdnewbmds;
241 	}
242 
243 	/* error is retval of the following if-cascade */
244 	if (strchr(source, '@') != NULL) {
245 		dsl_dataset_t *source_snap_ds;
246 		ASSERT3S(snapshot_namecheck(source, NULL, NULL), ==, 0);
247 		error = dsl_dataset_hold(dp, source, FTAG, &source_snap_ds);
248 		if (error == 0) {
249 			VERIFY(source_snap_ds->ds_is_snapshot);
250 			/*
251 			 * Verify that source snapshot is an earlier point in
252 			 * newbm_ds's timeline (source may be newbm_ds's origin)
253 			 */
254 			if (!dsl_dataset_is_before(newbm_ds, source_snap_ds, 0))
255 				error = SET_ERROR(
256 				    ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR);
257 			dsl_dataset_rele(source_snap_ds, FTAG);
258 		}
259 	} else if (strchr(source, '#') != NULL) {
260 		zfs_bookmark_phys_t source_phys;
261 		ASSERT3S(bookmark_namecheck(source, NULL, NULL), ==, 0);
262 		/*
263 		 * Source must exists and be an earlier point in newbm_ds's
264 		 * timeline (newbm_ds's origin may be a snap of source's ds)
265 		 */
266 		error = dsl_bookmark_lookup(dp, source, newbm_ds, &source_phys);
267 		switch (error) {
268 		case 0:
269 			break; /* happy path */
270 		case EXDEV:
271 			error = SET_ERROR(ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR);
272 			break;
273 		default:
274 			/* dsl_bookmark_lookup already did SET_ERRROR */
275 			break;
276 		}
277 	} else {
278 		/*
279 		 * dsl_bookmark_create_nvl_validate validates that source is
280 		 * either snapshot or bookmark
281 		 */
282 		panic("unreachable code: %s", source);
283 	}
284 
285 eholdnewbmds:
286 	dsl_dataset_rele(newbm_ds, FTAG);
287 	return (error);
288 }
289 
290 int
291 dsl_bookmark_create_check(void *arg, dmu_tx_t *tx)
292 {
293 	dsl_bookmark_create_arg_t *dbca = arg;
294 	int rv = 0;
295 	int schema_err = 0;
296 	ASSERT3P(dbca, !=, NULL);
297 	ASSERT3P(dbca->dbca_bmarks, !=, NULL);
298 	/* dbca->dbca_errors is allowed to be NULL */
299 
300 	dsl_pool_t *dp = dmu_tx_pool(tx);
301 
302 	if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS))
303 		return (SET_ERROR(ENOTSUP));
304 
305 	if (dsl_bookmark_create_nvl_validate(dbca->dbca_bmarks) != 0)
306 		rv = schema_err = SET_ERROR(EINVAL);
307 
308 	for (nvpair_t *pair = nvlist_next_nvpair(dbca->dbca_bmarks, NULL);
309 	    pair != NULL; pair = nvlist_next_nvpair(dbca->dbca_bmarks, pair)) {
310 		char *new = nvpair_name(pair);
311 
312 		int error = schema_err;
313 		if (error == 0) {
314 			char *source = fnvpair_value_string(pair);
315 			error = dsl_bookmark_create_check_impl(dp, new, source);
316 			if (error != 0)
317 				error = SET_ERROR(error);
318 		}
319 
320 		if (error != 0) {
321 			rv = error;
322 			if (dbca->dbca_errors != NULL)
323 				fnvlist_add_int32(dbca->dbca_errors,
324 				    new, error);
325 		}
326 	}
327 
328 	return (rv);
329 }
330 
331 static dsl_bookmark_node_t *
332 dsl_bookmark_node_alloc(char *shortname)
333 {
334 	dsl_bookmark_node_t *dbn = kmem_alloc(sizeof (*dbn), KM_SLEEP);
335 	dbn->dbn_name = spa_strdup(shortname);
336 	dbn->dbn_dirty = B_FALSE;
337 	mutex_init(&dbn->dbn_lock, NULL, MUTEX_DEFAULT, NULL);
338 	return (dbn);
339 }
340 
341 /*
342  * Set the fields in the zfs_bookmark_phys_t based on the specified snapshot.
343  */
344 static void
345 dsl_bookmark_set_phys(zfs_bookmark_phys_t *zbm, dsl_dataset_t *snap)
346 {
347 	spa_t *spa = dsl_dataset_get_spa(snap);
348 	objset_t *mos = spa_get_dsl(spa)->dp_meta_objset;
349 	dsl_dataset_phys_t *dsp = dsl_dataset_phys(snap);
350 	zbm->zbm_guid = dsp->ds_guid;
351 	zbm->zbm_creation_txg = dsp->ds_creation_txg;
352 	zbm->zbm_creation_time = dsp->ds_creation_time;
353 	zbm->zbm_redaction_obj = 0;
354 
355 	/*
356 	 * If the dataset is encrypted create a larger bookmark to
357 	 * accommodate the IVset guid. The IVset guid was added
358 	 * after the encryption feature to prevent a problem with
359 	 * raw sends. If we encounter an encrypted dataset without
360 	 * an IVset guid we fall back to a normal bookmark.
361 	 */
362 	if (snap->ds_dir->dd_crypto_obj != 0 &&
363 	    spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
364 		(void) zap_lookup(mos, snap->ds_object,
365 		    DS_FIELD_IVSET_GUID, sizeof (uint64_t), 1,
366 		    &zbm->zbm_ivset_guid);
367 	}
368 
369 	if (spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_WRITTEN)) {
370 		zbm->zbm_flags = ZBM_FLAG_SNAPSHOT_EXISTS | ZBM_FLAG_HAS_FBN;
371 		zbm->zbm_referenced_bytes_refd = dsp->ds_referenced_bytes;
372 		zbm->zbm_compressed_bytes_refd = dsp->ds_compressed_bytes;
373 		zbm->zbm_uncompressed_bytes_refd = dsp->ds_uncompressed_bytes;
374 
375 		dsl_dataset_t *nextds;
376 		VERIFY0(dsl_dataset_hold_obj(snap->ds_dir->dd_pool,
377 		    dsp->ds_next_snap_obj, FTAG, &nextds));
378 		dsl_deadlist_space(&nextds->ds_deadlist,
379 		    &zbm->zbm_referenced_freed_before_next_snap,
380 		    &zbm->zbm_compressed_freed_before_next_snap,
381 		    &zbm->zbm_uncompressed_freed_before_next_snap);
382 		dsl_dataset_rele(nextds, FTAG);
383 	} else {
384 		bzero(&zbm->zbm_flags,
385 		    sizeof (zfs_bookmark_phys_t) -
386 		    offsetof(zfs_bookmark_phys_t, zbm_flags));
387 	}
388 }
389 
390 /*
391  * Add dsl_bookmark_node_t `dbn` to the given dataset and increment appropriate
392  * SPA feature counters.
393  */
394 void
395 dsl_bookmark_node_add(dsl_dataset_t *hds, dsl_bookmark_node_t *dbn,
396     dmu_tx_t *tx)
397 {
398 	dsl_pool_t *dp = dmu_tx_pool(tx);
399 	objset_t *mos = dp->dp_meta_objset;
400 
401 	if (hds->ds_bookmarks_obj == 0) {
402 		hds->ds_bookmarks_obj = zap_create_norm(mos,
403 		    U8_TEXTPREP_TOUPPER, DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0,
404 		    tx);
405 		spa_feature_incr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
406 
407 		dsl_dataset_zapify(hds, tx);
408 		VERIFY0(zap_add(mos, hds->ds_object,
409 		    DS_FIELD_BOOKMARK_NAMES,
410 		    sizeof (hds->ds_bookmarks_obj), 1,
411 		    &hds->ds_bookmarks_obj, tx));
412 	}
413 
414 	avl_add(&hds->ds_bookmarks, dbn);
415 
416 	/*
417 	 * To maintain backwards compatibility with software that doesn't
418 	 * understand SPA_FEATURE_BOOKMARK_V2, we need to use the smallest
419 	 * possible bookmark size.
420 	 */
421 	uint64_t bookmark_phys_size = BOOKMARK_PHYS_SIZE_V1;
422 	if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARK_V2) &&
423 	    (dbn->dbn_phys.zbm_ivset_guid != 0 || dbn->dbn_phys.zbm_flags &
424 	    ZBM_FLAG_HAS_FBN || dbn->dbn_phys.zbm_redaction_obj != 0)) {
425 		bookmark_phys_size = BOOKMARK_PHYS_SIZE_V2;
426 		spa_feature_incr(dp->dp_spa, SPA_FEATURE_BOOKMARK_V2, tx);
427 	}
428 
429 	__attribute__((unused)) zfs_bookmark_phys_t zero_phys = { 0 };
430 	ASSERT0(bcmp(((char *)&dbn->dbn_phys) + bookmark_phys_size,
431 	    &zero_phys, sizeof (zfs_bookmark_phys_t) - bookmark_phys_size));
432 
433 	VERIFY0(zap_add(mos, hds->ds_bookmarks_obj, dbn->dbn_name,
434 	    sizeof (uint64_t), bookmark_phys_size / sizeof (uint64_t),
435 	    &dbn->dbn_phys, tx));
436 }
437 
438 /*
439  * If redaction_list is non-null, we create a redacted bookmark and redaction
440  * list, and store the object number of the redaction list in redact_obj.
441  */
442 static void
443 dsl_bookmark_create_sync_impl_snap(const char *bookmark, const char *snapshot,
444     dmu_tx_t *tx, uint64_t num_redact_snaps, uint64_t *redact_snaps, void *tag,
445     redaction_list_t **redaction_list)
446 {
447 	dsl_pool_t *dp = dmu_tx_pool(tx);
448 	objset_t *mos = dp->dp_meta_objset;
449 	dsl_dataset_t *snapds, *bmark_fs;
450 	char *shortname;
451 	boolean_t bookmark_redacted;
452 	uint64_t *dsredactsnaps;
453 	uint64_t dsnumsnaps;
454 
455 	VERIFY0(dsl_dataset_hold(dp, snapshot, FTAG, &snapds));
456 	VERIFY0(dsl_bookmark_hold_ds(dp, bookmark, &bmark_fs, FTAG,
457 	    &shortname));
458 
459 	dsl_bookmark_node_t *dbn = dsl_bookmark_node_alloc(shortname);
460 	dsl_bookmark_set_phys(&dbn->dbn_phys, snapds);
461 
462 	bookmark_redacted = dsl_dataset_get_uint64_array_feature(snapds,
463 	    SPA_FEATURE_REDACTED_DATASETS, &dsnumsnaps, &dsredactsnaps);
464 	if (redaction_list != NULL || bookmark_redacted) {
465 		redaction_list_t *local_rl;
466 		if (bookmark_redacted) {
467 			redact_snaps = dsredactsnaps;
468 			num_redact_snaps = dsnumsnaps;
469 		}
470 		dbn->dbn_phys.zbm_redaction_obj = dmu_object_alloc(mos,
471 		    DMU_OTN_UINT64_METADATA, SPA_OLD_MAXBLOCKSIZE,
472 		    DMU_OTN_UINT64_METADATA, sizeof (redaction_list_phys_t) +
473 		    num_redact_snaps * sizeof (uint64_t), tx);
474 		spa_feature_incr(dp->dp_spa,
475 		    SPA_FEATURE_REDACTION_BOOKMARKS, tx);
476 
477 		VERIFY0(dsl_redaction_list_hold_obj(dp,
478 		    dbn->dbn_phys.zbm_redaction_obj, tag, &local_rl));
479 		dsl_redaction_list_long_hold(dp, local_rl, tag);
480 
481 		ASSERT3U((local_rl)->rl_dbuf->db_size, >=,
482 		    sizeof (redaction_list_phys_t) + num_redact_snaps *
483 		    sizeof (uint64_t));
484 		dmu_buf_will_dirty(local_rl->rl_dbuf, tx);
485 		bcopy(redact_snaps, local_rl->rl_phys->rlp_snaps,
486 		    sizeof (uint64_t) * num_redact_snaps);
487 		local_rl->rl_phys->rlp_num_snaps = num_redact_snaps;
488 		if (bookmark_redacted) {
489 			ASSERT3P(redaction_list, ==, NULL);
490 			local_rl->rl_phys->rlp_last_blkid = UINT64_MAX;
491 			local_rl->rl_phys->rlp_last_object = UINT64_MAX;
492 			dsl_redaction_list_long_rele(local_rl, tag);
493 			dsl_redaction_list_rele(local_rl, tag);
494 		} else {
495 			*redaction_list = local_rl;
496 		}
497 	}
498 
499 	if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
500 		spa_feature_incr(dp->dp_spa,
501 		    SPA_FEATURE_BOOKMARK_WRITTEN, tx);
502 	}
503 
504 	dsl_bookmark_node_add(bmark_fs, dbn, tx);
505 
506 	spa_history_log_internal_ds(bmark_fs, "bookmark", tx,
507 	    "name=%s creation_txg=%llu target_snap=%llu redact_obj=%llu",
508 	    shortname, (longlong_t)dbn->dbn_phys.zbm_creation_txg,
509 	    (longlong_t)snapds->ds_object,
510 	    (longlong_t)dbn->dbn_phys.zbm_redaction_obj);
511 
512 	dsl_dataset_rele(bmark_fs, FTAG);
513 	dsl_dataset_rele(snapds, FTAG);
514 }
515 
516 
517 static void
518 dsl_bookmark_create_sync_impl_book(
519     const char *new_name, const char *source_name, dmu_tx_t *tx)
520 {
521 	dsl_pool_t *dp = dmu_tx_pool(tx);
522 	dsl_dataset_t *bmark_fs_source, *bmark_fs_new;
523 	char *source_shortname, *new_shortname;
524 	zfs_bookmark_phys_t source_phys;
525 
526 	VERIFY0(dsl_bookmark_hold_ds(dp, source_name, &bmark_fs_source, FTAG,
527 	    &source_shortname));
528 	VERIFY0(dsl_bookmark_hold_ds(dp, new_name, &bmark_fs_new, FTAG,
529 	    &new_shortname));
530 
531 	/*
532 	 * create a copy of the source bookmark by copying most of its members
533 	 *
534 	 * Caveat: bookmarking a redaction bookmark yields a normal bookmark
535 	 * -----------------------------------------------------------------
536 	 * Reasoning:
537 	 * - The zbm_redaction_obj would be referred to by both source and new
538 	 *   bookmark, but would be destroyed once either source or new is
539 	 *   destroyed, resulting in use-after-free of the referrred object.
540 	 * - User expectation when issuing the `zfs bookmark` command is that
541 	 *   a normal bookmark of the source is created
542 	 *
543 	 * Design Alternatives For Full Redaction Bookmark Copying:
544 	 * - reference-count the redaction object => would require on-disk
545 	 *   format change for existing redaction objects
546 	 * - Copy the redaction object => cannot be done in syncing context
547 	 *   because the redaction object might be too large
548 	 */
549 
550 	VERIFY0(dsl_bookmark_lookup_impl(bmark_fs_source, source_shortname,
551 	    &source_phys));
552 	dsl_bookmark_node_t *new_dbn = dsl_bookmark_node_alloc(new_shortname);
553 
554 	memcpy(&new_dbn->dbn_phys, &source_phys, sizeof (source_phys));
555 	new_dbn->dbn_phys.zbm_redaction_obj = 0;
556 
557 	/* update feature counters */
558 	if (new_dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
559 		spa_feature_incr(dp->dp_spa,
560 		    SPA_FEATURE_BOOKMARK_WRITTEN, tx);
561 	}
562 	/* no need for redaction bookmark counter; nulled zbm_redaction_obj */
563 	/* dsl_bookmark_node_add bumps bookmarks and v2-bookmarks counter */
564 
565 	/*
566 	 * write new bookmark
567 	 *
568 	 * Note that dsl_bookmark_lookup_impl guarantees that, if source is a
569 	 * v1 bookmark, the v2-only fields are zeroed.
570 	 * And dsl_bookmark_node_add writes back a v1-sized bookmark if
571 	 * v2 bookmarks are disabled and/or v2-only fields are zeroed.
572 	 * => bookmark copying works on pre-bookmark-v2 pools
573 	 */
574 	dsl_bookmark_node_add(bmark_fs_new, new_dbn, tx);
575 
576 	spa_history_log_internal_ds(bmark_fs_source, "bookmark", tx,
577 	    "name=%s creation_txg=%llu source_guid=%llu",
578 	    new_shortname, (longlong_t)new_dbn->dbn_phys.zbm_creation_txg,
579 	    (longlong_t)source_phys.zbm_guid);
580 
581 	dsl_dataset_rele(bmark_fs_source, FTAG);
582 	dsl_dataset_rele(bmark_fs_new, FTAG);
583 }
584 
585 void
586 dsl_bookmark_create_sync(void *arg, dmu_tx_t *tx)
587 {
588 	dsl_bookmark_create_arg_t *dbca = arg;
589 
590 	ASSERT(spa_feature_is_enabled(dmu_tx_pool(tx)->dp_spa,
591 	    SPA_FEATURE_BOOKMARKS));
592 
593 	for (nvpair_t *pair = nvlist_next_nvpair(dbca->dbca_bmarks, NULL);
594 	    pair != NULL; pair = nvlist_next_nvpair(dbca->dbca_bmarks, pair)) {
595 
596 		char *new = nvpair_name(pair);
597 		char *source = fnvpair_value_string(pair);
598 
599 		if (strchr(source, '@') != NULL) {
600 			dsl_bookmark_create_sync_impl_snap(new, source, tx,
601 			    0, NULL, NULL, NULL);
602 		} else if (strchr(source, '#') != NULL) {
603 			dsl_bookmark_create_sync_impl_book(new, source, tx);
604 		} else {
605 			panic("unreachable code");
606 		}
607 
608 	}
609 }
610 
611 /*
612  * The bookmarks must all be in the same pool.
613  */
614 int
615 dsl_bookmark_create(nvlist_t *bmarks, nvlist_t *errors)
616 {
617 	nvpair_t *pair;
618 	dsl_bookmark_create_arg_t dbca;
619 
620 	pair = nvlist_next_nvpair(bmarks, NULL);
621 	if (pair == NULL)
622 		return (0);
623 
624 	dbca.dbca_bmarks = bmarks;
625 	dbca.dbca_errors = errors;
626 
627 	return (dsl_sync_task(nvpair_name(pair), dsl_bookmark_create_check,
628 	    dsl_bookmark_create_sync, &dbca,
629 	    fnvlist_num_pairs(bmarks), ZFS_SPACE_CHECK_NORMAL));
630 }
631 
632 static int
633 dsl_bookmark_create_redacted_check(void *arg, dmu_tx_t *tx)
634 {
635 	dsl_bookmark_create_redacted_arg_t *dbcra = arg;
636 	dsl_pool_t *dp = dmu_tx_pool(tx);
637 	int rv = 0;
638 
639 	if (!spa_feature_is_enabled(dp->dp_spa,
640 	    SPA_FEATURE_REDACTION_BOOKMARKS))
641 		return (SET_ERROR(ENOTSUP));
642 	/*
643 	 * If the list of redact snaps will not fit in the bonus buffer with
644 	 * the furthest reached object and offset, fail.
645 	 */
646 	if (dbcra->dbcra_numsnaps > (dmu_bonus_max() -
647 	    sizeof (redaction_list_phys_t)) / sizeof (uint64_t))
648 		return (SET_ERROR(E2BIG));
649 
650 	if (dsl_bookmark_create_nvl_validate_pair(
651 	    dbcra->dbcra_bmark, dbcra->dbcra_snap) != 0)
652 		return (SET_ERROR(EINVAL));
653 
654 	rv = dsl_bookmark_create_check_impl(dp,
655 	    dbcra->dbcra_bmark, dbcra->dbcra_snap);
656 	return (rv);
657 }
658 
659 static void
660 dsl_bookmark_create_redacted_sync(void *arg, dmu_tx_t *tx)
661 {
662 	dsl_bookmark_create_redacted_arg_t *dbcra = arg;
663 	dsl_bookmark_create_sync_impl_snap(dbcra->dbcra_bmark,
664 	    dbcra->dbcra_snap, tx, dbcra->dbcra_numsnaps, dbcra->dbcra_snaps,
665 	    dbcra->dbcra_tag, dbcra->dbcra_rl);
666 }
667 
668 int
669 dsl_bookmark_create_redacted(const char *bookmark, const char *snapshot,
670     uint64_t numsnaps, uint64_t *snapguids, void *tag, redaction_list_t **rl)
671 {
672 	dsl_bookmark_create_redacted_arg_t dbcra;
673 
674 	dbcra.dbcra_bmark = bookmark;
675 	dbcra.dbcra_snap = snapshot;
676 	dbcra.dbcra_rl = rl;
677 	dbcra.dbcra_numsnaps = numsnaps;
678 	dbcra.dbcra_snaps = snapguids;
679 	dbcra.dbcra_tag = tag;
680 
681 	return (dsl_sync_task(bookmark, dsl_bookmark_create_redacted_check,
682 	    dsl_bookmark_create_redacted_sync, &dbcra, 5,
683 	    ZFS_SPACE_CHECK_NORMAL));
684 }
685 
686 /*
687  * Retrieve the list of properties given in the 'props' nvlist for a bookmark.
688  * If 'props' is NULL, retrieves all properties.
689  */
690 static void
691 dsl_bookmark_fetch_props(dsl_pool_t *dp, zfs_bookmark_phys_t *bmark_phys,
692     nvlist_t *props, nvlist_t *out_props)
693 {
694 	ASSERT3P(dp, !=, NULL);
695 	ASSERT3P(bmark_phys, !=, NULL);
696 	ASSERT3P(out_props, !=, NULL);
697 	ASSERT(RRW_LOCK_HELD(&dp->dp_config_rwlock));
698 
699 	if (props == NULL || nvlist_exists(props,
700 	    zfs_prop_to_name(ZFS_PROP_GUID))) {
701 		dsl_prop_nvlist_add_uint64(out_props,
702 		    ZFS_PROP_GUID, bmark_phys->zbm_guid);
703 	}
704 	if (props == NULL || nvlist_exists(props,
705 	    zfs_prop_to_name(ZFS_PROP_CREATETXG))) {
706 		dsl_prop_nvlist_add_uint64(out_props,
707 		    ZFS_PROP_CREATETXG, bmark_phys->zbm_creation_txg);
708 	}
709 	if (props == NULL || nvlist_exists(props,
710 	    zfs_prop_to_name(ZFS_PROP_CREATION))) {
711 		dsl_prop_nvlist_add_uint64(out_props,
712 		    ZFS_PROP_CREATION, bmark_phys->zbm_creation_time);
713 	}
714 	if (props == NULL || nvlist_exists(props,
715 	    zfs_prop_to_name(ZFS_PROP_IVSET_GUID))) {
716 		dsl_prop_nvlist_add_uint64(out_props,
717 		    ZFS_PROP_IVSET_GUID, bmark_phys->zbm_ivset_guid);
718 	}
719 	if (bmark_phys->zbm_flags & ZBM_FLAG_HAS_FBN) {
720 		if (props == NULL || nvlist_exists(props,
721 		    zfs_prop_to_name(ZFS_PROP_REFERENCED))) {
722 			dsl_prop_nvlist_add_uint64(out_props,
723 			    ZFS_PROP_REFERENCED,
724 			    bmark_phys->zbm_referenced_bytes_refd);
725 		}
726 		if (props == NULL || nvlist_exists(props,
727 		    zfs_prop_to_name(ZFS_PROP_LOGICALREFERENCED))) {
728 			dsl_prop_nvlist_add_uint64(out_props,
729 			    ZFS_PROP_LOGICALREFERENCED,
730 			    bmark_phys->zbm_uncompressed_bytes_refd);
731 		}
732 		if (props == NULL || nvlist_exists(props,
733 		    zfs_prop_to_name(ZFS_PROP_REFRATIO))) {
734 			uint64_t ratio =
735 			    bmark_phys->zbm_compressed_bytes_refd == 0 ? 100 :
736 			    bmark_phys->zbm_uncompressed_bytes_refd * 100 /
737 			    bmark_phys->zbm_compressed_bytes_refd;
738 			dsl_prop_nvlist_add_uint64(out_props,
739 			    ZFS_PROP_REFRATIO, ratio);
740 		}
741 	}
742 
743 	if ((props == NULL || nvlist_exists(props, "redact_snaps") ||
744 	    nvlist_exists(props, "redact_complete")) &&
745 	    bmark_phys->zbm_redaction_obj != 0) {
746 		redaction_list_t *rl;
747 		int err = dsl_redaction_list_hold_obj(dp,
748 		    bmark_phys->zbm_redaction_obj, FTAG, &rl);
749 		if (err == 0) {
750 			if (nvlist_exists(props, "redact_snaps")) {
751 				nvlist_t *nvl;
752 				nvl = fnvlist_alloc();
753 				fnvlist_add_uint64_array(nvl, ZPROP_VALUE,
754 				    rl->rl_phys->rlp_snaps,
755 				    rl->rl_phys->rlp_num_snaps);
756 				fnvlist_add_nvlist(out_props, "redact_snaps",
757 				    nvl);
758 				nvlist_free(nvl);
759 			}
760 			if (nvlist_exists(props, "redact_complete")) {
761 				nvlist_t *nvl;
762 				nvl = fnvlist_alloc();
763 				fnvlist_add_boolean_value(nvl, ZPROP_VALUE,
764 				    rl->rl_phys->rlp_last_blkid == UINT64_MAX &&
765 				    rl->rl_phys->rlp_last_object == UINT64_MAX);
766 				fnvlist_add_nvlist(out_props, "redact_complete",
767 				    nvl);
768 				nvlist_free(nvl);
769 			}
770 			dsl_redaction_list_rele(rl, FTAG);
771 		}
772 	}
773 }
774 
775 int
776 dsl_get_bookmarks_impl(dsl_dataset_t *ds, nvlist_t *props, nvlist_t *outnvl)
777 {
778 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
779 
780 	ASSERT(dsl_pool_config_held(dp));
781 
782 	if (dsl_dataset_is_snapshot(ds))
783 		return (SET_ERROR(EINVAL));
784 
785 	for (dsl_bookmark_node_t *dbn = avl_first(&ds->ds_bookmarks);
786 	    dbn != NULL; dbn = AVL_NEXT(&ds->ds_bookmarks, dbn)) {
787 		nvlist_t *out_props = fnvlist_alloc();
788 
789 		dsl_bookmark_fetch_props(dp, &dbn->dbn_phys, props, out_props);
790 
791 		fnvlist_add_nvlist(outnvl, dbn->dbn_name, out_props);
792 		fnvlist_free(out_props);
793 	}
794 	return (0);
795 }
796 
797 /*
798  * Comparison func for ds_bookmarks AVL tree.  We sort the bookmarks by
799  * their TXG, then by their FBN-ness.  The "FBN-ness" component ensures
800  * that all bookmarks at the same TXG that HAS_FBN are adjacent, which
801  * dsl_bookmark_destroy_sync_impl() depends on.  Note that there may be
802  * multiple bookmarks at the same TXG (with the same FBN-ness).  In this
803  * case we differentiate them by an arbitrary metric (in this case,
804  * their names).
805  */
806 static int
807 dsl_bookmark_compare(const void *l, const void *r)
808 {
809 	const dsl_bookmark_node_t *ldbn = l;
810 	const dsl_bookmark_node_t *rdbn = r;
811 
812 	int64_t cmp = TREE_CMP(ldbn->dbn_phys.zbm_creation_txg,
813 	    rdbn->dbn_phys.zbm_creation_txg);
814 	if (likely(cmp))
815 		return (cmp);
816 	cmp = TREE_CMP((ldbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN),
817 	    (rdbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN));
818 	if (likely(cmp))
819 		return (cmp);
820 	cmp = strcmp(ldbn->dbn_name, rdbn->dbn_name);
821 	return (TREE_ISIGN(cmp));
822 }
823 
824 /*
825  * Cache this (head) dataset's bookmarks in the ds_bookmarks AVL tree.
826  */
827 int
828 dsl_bookmark_init_ds(dsl_dataset_t *ds)
829 {
830 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
831 	objset_t *mos = dp->dp_meta_objset;
832 
833 	ASSERT(!ds->ds_is_snapshot);
834 
835 	avl_create(&ds->ds_bookmarks, dsl_bookmark_compare,
836 	    sizeof (dsl_bookmark_node_t),
837 	    offsetof(dsl_bookmark_node_t, dbn_node));
838 
839 	if (!dsl_dataset_is_zapified(ds))
840 		return (0);
841 
842 	int zaperr = zap_lookup(mos, ds->ds_object, DS_FIELD_BOOKMARK_NAMES,
843 	    sizeof (ds->ds_bookmarks_obj), 1, &ds->ds_bookmarks_obj);
844 	if (zaperr == ENOENT)
845 		return (0);
846 	if (zaperr != 0)
847 		return (zaperr);
848 
849 	if (ds->ds_bookmarks_obj == 0)
850 		return (0);
851 
852 	int err = 0;
853 	zap_cursor_t zc;
854 	zap_attribute_t attr;
855 
856 	for (zap_cursor_init(&zc, mos, ds->ds_bookmarks_obj);
857 	    (err = zap_cursor_retrieve(&zc, &attr)) == 0;
858 	    zap_cursor_advance(&zc)) {
859 		dsl_bookmark_node_t *dbn =
860 		    dsl_bookmark_node_alloc(attr.za_name);
861 
862 		err = dsl_bookmark_lookup_impl(ds,
863 		    dbn->dbn_name, &dbn->dbn_phys);
864 		ASSERT3U(err, !=, ENOENT);
865 		if (err != 0) {
866 			kmem_free(dbn, sizeof (*dbn));
867 			break;
868 		}
869 		avl_add(&ds->ds_bookmarks, dbn);
870 	}
871 	zap_cursor_fini(&zc);
872 	if (err == ENOENT)
873 		err = 0;
874 	return (err);
875 }
876 
877 void
878 dsl_bookmark_fini_ds(dsl_dataset_t *ds)
879 {
880 	void *cookie = NULL;
881 	dsl_bookmark_node_t *dbn;
882 
883 	if (ds->ds_is_snapshot)
884 		return;
885 
886 	while ((dbn = avl_destroy_nodes(&ds->ds_bookmarks, &cookie)) != NULL) {
887 		spa_strfree(dbn->dbn_name);
888 		mutex_destroy(&dbn->dbn_lock);
889 		kmem_free(dbn, sizeof (*dbn));
890 	}
891 	avl_destroy(&ds->ds_bookmarks);
892 }
893 
894 /*
895  * Retrieve the bookmarks that exist in the specified dataset, and the
896  * requested properties of each bookmark.
897  *
898  * The "props" nvlist specifies which properties are requested.
899  * See lzc_get_bookmarks() for the list of valid properties.
900  */
901 int
902 dsl_get_bookmarks(const char *dsname, nvlist_t *props, nvlist_t *outnvl)
903 {
904 	dsl_pool_t *dp;
905 	dsl_dataset_t *ds;
906 	int err;
907 
908 	err = dsl_pool_hold(dsname, FTAG, &dp);
909 	if (err != 0)
910 		return (err);
911 	err = dsl_dataset_hold(dp, dsname, FTAG, &ds);
912 	if (err != 0) {
913 		dsl_pool_rele(dp, FTAG);
914 		return (err);
915 	}
916 
917 	err = dsl_get_bookmarks_impl(ds, props, outnvl);
918 
919 	dsl_dataset_rele(ds, FTAG);
920 	dsl_pool_rele(dp, FTAG);
921 	return (err);
922 }
923 
924 /*
925  * Retrieve all properties for a single bookmark in the given dataset.
926  */
927 int
928 dsl_get_bookmark_props(const char *dsname, const char *bmname, nvlist_t *props)
929 {
930 	dsl_pool_t *dp;
931 	dsl_dataset_t *ds;
932 	zfs_bookmark_phys_t bmark_phys = { 0 };
933 	int err;
934 
935 	err = dsl_pool_hold(dsname, FTAG, &dp);
936 	if (err != 0)
937 		return (err);
938 	err = dsl_dataset_hold(dp, dsname, FTAG, &ds);
939 	if (err != 0) {
940 		dsl_pool_rele(dp, FTAG);
941 		return (err);
942 	}
943 
944 	err = dsl_bookmark_lookup_impl(ds, bmname, &bmark_phys);
945 	if (err != 0)
946 		goto out;
947 
948 	dsl_bookmark_fetch_props(dp, &bmark_phys, NULL, props);
949 out:
950 	dsl_dataset_rele(ds, FTAG);
951 	dsl_pool_rele(dp, FTAG);
952 	return (err);
953 }
954 
955 typedef struct dsl_bookmark_destroy_arg {
956 	nvlist_t *dbda_bmarks;
957 	nvlist_t *dbda_success;
958 	nvlist_t *dbda_errors;
959 } dsl_bookmark_destroy_arg_t;
960 
961 static void
962 dsl_bookmark_destroy_sync_impl(dsl_dataset_t *ds, const char *name,
963     dmu_tx_t *tx)
964 {
965 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
966 	uint64_t bmark_zapobj = ds->ds_bookmarks_obj;
967 	matchtype_t mt = 0;
968 	uint64_t int_size, num_ints;
969 	/*
970 	 * 'search' must be zeroed so that dbn_flags (which is used in
971 	 * dsl_bookmark_compare()) will be zeroed even if the on-disk
972 	 * (in ZAP) bookmark is shorter than offsetof(dbn_flags).
973 	 */
974 	dsl_bookmark_node_t search = { 0 };
975 	char realname[ZFS_MAX_DATASET_NAME_LEN];
976 
977 	/*
978 	 * Find the real name of this bookmark, which may be different
979 	 * from the given name if the dataset is case-insensitive.  Then
980 	 * use the real name to find the node in the ds_bookmarks AVL tree.
981 	 */
982 
983 	if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
984 		mt = MT_NORMALIZE;
985 
986 	VERIFY0(zap_length(mos, bmark_zapobj, name, &int_size, &num_ints));
987 
988 	ASSERT3U(int_size, ==, sizeof (uint64_t));
989 
990 	if (num_ints * int_size > BOOKMARK_PHYS_SIZE_V1) {
991 		spa_feature_decr(dmu_objset_spa(mos),
992 		    SPA_FEATURE_BOOKMARK_V2, tx);
993 	}
994 	VERIFY0(zap_lookup_norm(mos, bmark_zapobj, name, sizeof (uint64_t),
995 	    num_ints, &search.dbn_phys, mt, realname, sizeof (realname), NULL));
996 
997 	search.dbn_name = realname;
998 	dsl_bookmark_node_t *dbn = avl_find(&ds->ds_bookmarks, &search, NULL);
999 	ASSERT(dbn != NULL);
1000 
1001 	if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
1002 		/*
1003 		 * If this bookmark HAS_FBN, and it is before the most
1004 		 * recent snapshot, then its TXG is a key in the head's
1005 		 * deadlist (and all clones' heads' deadlists).  If this is
1006 		 * the last thing keeping the key (i.e. there are no more
1007 		 * bookmarks with HAS_FBN at this TXG, and there is no
1008 		 * snapshot at this TXG), then remove the key.
1009 		 *
1010 		 * Note that this algorithm depends on ds_bookmarks being
1011 		 * sorted such that all bookmarks at the same TXG with
1012 		 * HAS_FBN are adjacent (with no non-HAS_FBN bookmarks
1013 		 * at the same TXG in between them).  If this were not
1014 		 * the case, we would need to examine *all* bookmarks
1015 		 * at this TXG, rather than just the adjacent ones.
1016 		 */
1017 
1018 		dsl_bookmark_node_t *dbn_prev =
1019 		    AVL_PREV(&ds->ds_bookmarks, dbn);
1020 		dsl_bookmark_node_t *dbn_next =
1021 		    AVL_NEXT(&ds->ds_bookmarks, dbn);
1022 
1023 		boolean_t more_bookmarks_at_this_txg =
1024 		    (dbn_prev != NULL && dbn_prev->dbn_phys.zbm_creation_txg ==
1025 		    dbn->dbn_phys.zbm_creation_txg &&
1026 		    (dbn_prev->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) ||
1027 		    (dbn_next != NULL && dbn_next->dbn_phys.zbm_creation_txg ==
1028 		    dbn->dbn_phys.zbm_creation_txg &&
1029 		    (dbn_next->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN));
1030 
1031 		if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_SNAPSHOT_EXISTS) &&
1032 		    !more_bookmarks_at_this_txg &&
1033 		    dbn->dbn_phys.zbm_creation_txg <
1034 		    dsl_dataset_phys(ds)->ds_prev_snap_txg) {
1035 			dsl_dir_remove_clones_key(ds->ds_dir,
1036 			    dbn->dbn_phys.zbm_creation_txg, tx);
1037 			dsl_deadlist_remove_key(&ds->ds_deadlist,
1038 			    dbn->dbn_phys.zbm_creation_txg, tx);
1039 		}
1040 
1041 		spa_feature_decr(dmu_objset_spa(mos),
1042 		    SPA_FEATURE_BOOKMARK_WRITTEN, tx);
1043 	}
1044 
1045 	if (dbn->dbn_phys.zbm_redaction_obj != 0) {
1046 		VERIFY0(dmu_object_free(mos,
1047 		    dbn->dbn_phys.zbm_redaction_obj, tx));
1048 		spa_feature_decr(dmu_objset_spa(mos),
1049 		    SPA_FEATURE_REDACTION_BOOKMARKS, tx);
1050 	}
1051 
1052 	avl_remove(&ds->ds_bookmarks, dbn);
1053 	spa_strfree(dbn->dbn_name);
1054 	mutex_destroy(&dbn->dbn_lock);
1055 	kmem_free(dbn, sizeof (*dbn));
1056 
1057 	VERIFY0(zap_remove_norm(mos, bmark_zapobj, name, mt, tx));
1058 }
1059 
1060 static int
1061 dsl_bookmark_destroy_check(void *arg, dmu_tx_t *tx)
1062 {
1063 	dsl_bookmark_destroy_arg_t *dbda = arg;
1064 	dsl_pool_t *dp = dmu_tx_pool(tx);
1065 	int rv = 0;
1066 
1067 	ASSERT(nvlist_empty(dbda->dbda_success));
1068 	ASSERT(nvlist_empty(dbda->dbda_errors));
1069 
1070 	if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS))
1071 		return (0);
1072 
1073 	for (nvpair_t *pair = nvlist_next_nvpair(dbda->dbda_bmarks, NULL);
1074 	    pair != NULL; pair = nvlist_next_nvpair(dbda->dbda_bmarks, pair)) {
1075 		const char *fullname = nvpair_name(pair);
1076 		dsl_dataset_t *ds;
1077 		zfs_bookmark_phys_t bm;
1078 		int error;
1079 		char *shortname;
1080 
1081 		error = dsl_bookmark_hold_ds(dp, fullname, &ds,
1082 		    FTAG, &shortname);
1083 		if (error == ENOENT) {
1084 			/* ignore it; the bookmark is "already destroyed" */
1085 			continue;
1086 		}
1087 		if (error == 0) {
1088 			error = dsl_bookmark_lookup_impl(ds, shortname, &bm);
1089 			dsl_dataset_rele(ds, FTAG);
1090 			if (error == ESRCH) {
1091 				/*
1092 				 * ignore it; the bookmark is
1093 				 * "already destroyed"
1094 				 */
1095 				continue;
1096 			}
1097 			if (error == 0 && bm.zbm_redaction_obj != 0) {
1098 				redaction_list_t *rl = NULL;
1099 				error = dsl_redaction_list_hold_obj(tx->tx_pool,
1100 				    bm.zbm_redaction_obj, FTAG, &rl);
1101 				if (error == ENOENT) {
1102 					error = 0;
1103 				} else if (error == 0 &&
1104 				    dsl_redaction_list_long_held(rl)) {
1105 					error = SET_ERROR(EBUSY);
1106 				}
1107 				if (rl != NULL) {
1108 					dsl_redaction_list_rele(rl, FTAG);
1109 				}
1110 			}
1111 		}
1112 		if (error == 0) {
1113 			if (dmu_tx_is_syncing(tx)) {
1114 				fnvlist_add_boolean(dbda->dbda_success,
1115 				    fullname);
1116 			}
1117 		} else {
1118 			fnvlist_add_int32(dbda->dbda_errors, fullname, error);
1119 			rv = error;
1120 		}
1121 	}
1122 	return (rv);
1123 }
1124 
1125 static void
1126 dsl_bookmark_destroy_sync(void *arg, dmu_tx_t *tx)
1127 {
1128 	dsl_bookmark_destroy_arg_t *dbda = arg;
1129 	dsl_pool_t *dp = dmu_tx_pool(tx);
1130 	objset_t *mos = dp->dp_meta_objset;
1131 
1132 	for (nvpair_t *pair = nvlist_next_nvpair(dbda->dbda_success, NULL);
1133 	    pair != NULL; pair = nvlist_next_nvpair(dbda->dbda_success, pair)) {
1134 		dsl_dataset_t *ds;
1135 		char *shortname;
1136 		uint64_t zap_cnt;
1137 
1138 		VERIFY0(dsl_bookmark_hold_ds(dp, nvpair_name(pair),
1139 		    &ds, FTAG, &shortname));
1140 		dsl_bookmark_destroy_sync_impl(ds, shortname, tx);
1141 
1142 		/*
1143 		 * If all of this dataset's bookmarks have been destroyed,
1144 		 * free the zap object and decrement the feature's use count.
1145 		 */
1146 		VERIFY0(zap_count(mos, ds->ds_bookmarks_obj, &zap_cnt));
1147 		if (zap_cnt == 0) {
1148 			dmu_buf_will_dirty(ds->ds_dbuf, tx);
1149 			VERIFY0(zap_destroy(mos, ds->ds_bookmarks_obj, tx));
1150 			ds->ds_bookmarks_obj = 0;
1151 			spa_feature_decr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
1152 			VERIFY0(zap_remove(mos, ds->ds_object,
1153 			    DS_FIELD_BOOKMARK_NAMES, tx));
1154 		}
1155 
1156 		spa_history_log_internal_ds(ds, "remove bookmark", tx,
1157 		    "name=%s", shortname);
1158 
1159 		dsl_dataset_rele(ds, FTAG);
1160 	}
1161 }
1162 
1163 /*
1164  * The bookmarks must all be in the same pool.
1165  */
1166 int
1167 dsl_bookmark_destroy(nvlist_t *bmarks, nvlist_t *errors)
1168 {
1169 	int rv;
1170 	dsl_bookmark_destroy_arg_t dbda;
1171 	nvpair_t *pair = nvlist_next_nvpair(bmarks, NULL);
1172 	if (pair == NULL)
1173 		return (0);
1174 
1175 	dbda.dbda_bmarks = bmarks;
1176 	dbda.dbda_errors = errors;
1177 	dbda.dbda_success = fnvlist_alloc();
1178 
1179 	rv = dsl_sync_task(nvpair_name(pair), dsl_bookmark_destroy_check,
1180 	    dsl_bookmark_destroy_sync, &dbda, fnvlist_num_pairs(bmarks),
1181 	    ZFS_SPACE_CHECK_RESERVED);
1182 	fnvlist_free(dbda.dbda_success);
1183 	return (rv);
1184 }
1185 
1186 /* Return B_TRUE if there are any long holds on this dataset. */
1187 boolean_t
1188 dsl_redaction_list_long_held(redaction_list_t *rl)
1189 {
1190 	return (!zfs_refcount_is_zero(&rl->rl_longholds));
1191 }
1192 
1193 void
1194 dsl_redaction_list_long_hold(dsl_pool_t *dp, redaction_list_t *rl, void *tag)
1195 {
1196 	ASSERT(dsl_pool_config_held(dp));
1197 	(void) zfs_refcount_add(&rl->rl_longholds, tag);
1198 }
1199 
1200 void
1201 dsl_redaction_list_long_rele(redaction_list_t *rl, void *tag)
1202 {
1203 	(void) zfs_refcount_remove(&rl->rl_longholds, tag);
1204 }
1205 
1206 /* ARGSUSED */
1207 static void
1208 redaction_list_evict_sync(void *rlu)
1209 {
1210 	redaction_list_t *rl = rlu;
1211 	zfs_refcount_destroy(&rl->rl_longholds);
1212 
1213 	kmem_free(rl, sizeof (redaction_list_t));
1214 }
1215 
1216 void
1217 dsl_redaction_list_rele(redaction_list_t *rl, void *tag)
1218 {
1219 	dmu_buf_rele(rl->rl_dbuf, tag);
1220 }
1221 
1222 int
1223 dsl_redaction_list_hold_obj(dsl_pool_t *dp, uint64_t rlobj, void *tag,
1224     redaction_list_t **rlp)
1225 {
1226 	objset_t *mos = dp->dp_meta_objset;
1227 	dmu_buf_t *dbuf;
1228 	redaction_list_t *rl;
1229 	int err;
1230 
1231 	ASSERT(dsl_pool_config_held(dp));
1232 
1233 	err = dmu_bonus_hold(mos, rlobj, tag, &dbuf);
1234 	if (err != 0)
1235 		return (err);
1236 
1237 	rl = dmu_buf_get_user(dbuf);
1238 	if (rl == NULL) {
1239 		redaction_list_t *winner = NULL;
1240 
1241 		rl = kmem_zalloc(sizeof (redaction_list_t), KM_SLEEP);
1242 		rl->rl_dbuf = dbuf;
1243 		rl->rl_object = rlobj;
1244 		rl->rl_phys = dbuf->db_data;
1245 		rl->rl_mos = dp->dp_meta_objset;
1246 		zfs_refcount_create(&rl->rl_longholds);
1247 		dmu_buf_init_user(&rl->rl_dbu, redaction_list_evict_sync, NULL,
1248 		    &rl->rl_dbuf);
1249 		if ((winner = dmu_buf_set_user_ie(dbuf, &rl->rl_dbu)) != NULL) {
1250 			kmem_free(rl, sizeof (*rl));
1251 			rl = winner;
1252 		}
1253 	}
1254 	*rlp = rl;
1255 	return (0);
1256 }
1257 
1258 /*
1259  * Snapshot ds is being destroyed.
1260  *
1261  * Adjust the "freed_before_next" of any bookmarks between this snap
1262  * and the previous snapshot, because their "next snapshot" is changing.
1263  *
1264  * If there are any bookmarks with HAS_FBN at this snapshot, remove
1265  * their HAS_SNAP flag (note: there can be at most one snapshot of
1266  * each filesystem at a given txg), and return B_TRUE.  In this case
1267  * the caller can not remove the key in the deadlist at this TXG, because
1268  * the HAS_FBN bookmarks require the key be there.
1269  *
1270  * Returns B_FALSE if there are no bookmarks with HAS_FBN at this
1271  * snapshot's TXG.  In this case the caller can remove the key in the
1272  * deadlist at this TXG.
1273  */
1274 boolean_t
1275 dsl_bookmark_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
1276 {
1277 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1278 
1279 	dsl_dataset_t *head, *next;
1280 	VERIFY0(dsl_dataset_hold_obj(dp,
1281 	    dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &head));
1282 	VERIFY0(dsl_dataset_hold_obj(dp,
1283 	    dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &next));
1284 
1285 	/*
1286 	 * Find the first bookmark that HAS_FBN at or after the
1287 	 * previous snapshot.
1288 	 */
1289 	dsl_bookmark_node_t search = { 0 };
1290 	avl_index_t idx;
1291 	search.dbn_phys.zbm_creation_txg =
1292 	    dsl_dataset_phys(ds)->ds_prev_snap_txg;
1293 	search.dbn_phys.zbm_flags = ZBM_FLAG_HAS_FBN;
1294 	/*
1295 	 * The empty-string name can't be in the AVL, and it compares
1296 	 * before any entries with this TXG.
1297 	 */
1298 	search.dbn_name = "";
1299 	VERIFY3P(avl_find(&head->ds_bookmarks, &search, &idx), ==, NULL);
1300 	dsl_bookmark_node_t *dbn =
1301 	    avl_nearest(&head->ds_bookmarks, idx, AVL_AFTER);
1302 
1303 	/*
1304 	 * Iterate over all bookmarks that are at or after the previous
1305 	 * snapshot, and before this (being deleted) snapshot.  Adjust
1306 	 * their FBN based on their new next snapshot.
1307 	 */
1308 	for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg <
1309 	    dsl_dataset_phys(ds)->ds_creation_txg;
1310 	    dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
1311 		if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN))
1312 			continue;
1313 		/*
1314 		 * Increase our FBN by the amount of space that was live
1315 		 * (referenced) at the time of this bookmark (i.e.
1316 		 * birth <= zbm_creation_txg), and killed between this
1317 		 * (being deleted) snapshot and the next snapshot (i.e.
1318 		 * on the next snapshot's deadlist).  (Space killed before
1319 		 * this are already on our FBN.)
1320 		 */
1321 		uint64_t referenced, compressed, uncompressed;
1322 		dsl_deadlist_space_range(&next->ds_deadlist,
1323 		    0, dbn->dbn_phys.zbm_creation_txg,
1324 		    &referenced, &compressed, &uncompressed);
1325 		dbn->dbn_phys.zbm_referenced_freed_before_next_snap +=
1326 		    referenced;
1327 		dbn->dbn_phys.zbm_compressed_freed_before_next_snap +=
1328 		    compressed;
1329 		dbn->dbn_phys.zbm_uncompressed_freed_before_next_snap +=
1330 		    uncompressed;
1331 		VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
1332 		    dbn->dbn_name, sizeof (uint64_t),
1333 		    sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1334 		    &dbn->dbn_phys, tx));
1335 	}
1336 	dsl_dataset_rele(next, FTAG);
1337 
1338 	/*
1339 	 * There may be several bookmarks at this txg (the TXG of the
1340 	 * snapshot being deleted).  We need to clear the SNAPSHOT_EXISTS
1341 	 * flag on all of them, and return TRUE if there is at least 1
1342 	 * bookmark here with HAS_FBN (thus preventing the deadlist
1343 	 * key from being removed).
1344 	 */
1345 	boolean_t rv = B_FALSE;
1346 	for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg ==
1347 	    dsl_dataset_phys(ds)->ds_creation_txg;
1348 	    dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
1349 		if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) {
1350 			ASSERT(!(dbn->dbn_phys.zbm_flags &
1351 			    ZBM_FLAG_SNAPSHOT_EXISTS));
1352 			continue;
1353 		}
1354 		ASSERT(dbn->dbn_phys.zbm_flags & ZBM_FLAG_SNAPSHOT_EXISTS);
1355 		dbn->dbn_phys.zbm_flags &= ~ZBM_FLAG_SNAPSHOT_EXISTS;
1356 		VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
1357 		    dbn->dbn_name, sizeof (uint64_t),
1358 		    sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1359 		    &dbn->dbn_phys, tx));
1360 		rv = B_TRUE;
1361 	}
1362 	dsl_dataset_rele(head, FTAG);
1363 	return (rv);
1364 }
1365 
1366 /*
1367  * A snapshot is being created of this (head) dataset.
1368  *
1369  * We don't keep keys in the deadlist for the most recent snapshot, or any
1370  * bookmarks at or after it, because there can't be any blocks on the
1371  * deadlist in this range.  Now that the most recent snapshot is after
1372  * all bookmarks, we need to add these keys.  Note that the caller always
1373  * adds a key at the previous snapshot, so we only add keys for bookmarks
1374  * after that.
1375  */
1376 void
1377 dsl_bookmark_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
1378 {
1379 	uint64_t last_key_added = UINT64_MAX;
1380 	for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1381 	    dbn != NULL && dbn->dbn_phys.zbm_creation_txg >
1382 	    dsl_dataset_phys(ds)->ds_prev_snap_txg;
1383 	    dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
1384 		uint64_t creation_txg = dbn->dbn_phys.zbm_creation_txg;
1385 		ASSERT3U(creation_txg, <=, last_key_added);
1386 		/*
1387 		 * Note, there may be multiple bookmarks at this TXG,
1388 		 * and we only want to add the key for this TXG once.
1389 		 * The ds_bookmarks AVL is sorted by TXG, so we will visit
1390 		 * these bookmarks in sequence.
1391 		 */
1392 		if ((dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) &&
1393 		    creation_txg != last_key_added) {
1394 			dsl_deadlist_add_key(&ds->ds_deadlist,
1395 			    creation_txg, tx);
1396 			last_key_added = creation_txg;
1397 		}
1398 	}
1399 }
1400 
1401 /*
1402  * The next snapshot of the origin dataset has changed, due to
1403  * promote or clone swap.  If there are any bookmarks at this dataset,
1404  * we need to update their zbm_*_freed_before_next_snap to reflect this.
1405  * The head dataset has the relevant bookmarks in ds_bookmarks.
1406  */
1407 void
1408 dsl_bookmark_next_changed(dsl_dataset_t *head, dsl_dataset_t *origin,
1409     dmu_tx_t *tx)
1410 {
1411 	dsl_pool_t *dp = dmu_tx_pool(tx);
1412 
1413 	/*
1414 	 * Find the first bookmark that HAS_FBN at the origin snapshot.
1415 	 */
1416 	dsl_bookmark_node_t search = { 0 };
1417 	avl_index_t idx;
1418 	search.dbn_phys.zbm_creation_txg =
1419 	    dsl_dataset_phys(origin)->ds_creation_txg;
1420 	search.dbn_phys.zbm_flags = ZBM_FLAG_HAS_FBN;
1421 	/*
1422 	 * The empty-string name can't be in the AVL, and it compares
1423 	 * before any entries with this TXG.
1424 	 */
1425 	search.dbn_name = "";
1426 	VERIFY3P(avl_find(&head->ds_bookmarks, &search, &idx), ==, NULL);
1427 	dsl_bookmark_node_t *dbn =
1428 	    avl_nearest(&head->ds_bookmarks, idx, AVL_AFTER);
1429 
1430 	/*
1431 	 * Iterate over all bookmarks that are at the origin txg.
1432 	 * Adjust their FBN based on their new next snapshot.
1433 	 */
1434 	for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg ==
1435 	    dsl_dataset_phys(origin)->ds_creation_txg &&
1436 	    (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN);
1437 	    dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
1438 
1439 		/*
1440 		 * Bookmark is at the origin, therefore its
1441 		 * "next dataset" is changing, so we need
1442 		 * to reset its FBN by recomputing it in
1443 		 * dsl_bookmark_set_phys().
1444 		 */
1445 		ASSERT3U(dbn->dbn_phys.zbm_guid, ==,
1446 		    dsl_dataset_phys(origin)->ds_guid);
1447 		ASSERT3U(dbn->dbn_phys.zbm_referenced_bytes_refd, ==,
1448 		    dsl_dataset_phys(origin)->ds_referenced_bytes);
1449 		ASSERT(dbn->dbn_phys.zbm_flags &
1450 		    ZBM_FLAG_SNAPSHOT_EXISTS);
1451 		/*
1452 		 * Save and restore the zbm_redaction_obj, which
1453 		 * is zeroed by dsl_bookmark_set_phys().
1454 		 */
1455 		uint64_t redaction_obj =
1456 		    dbn->dbn_phys.zbm_redaction_obj;
1457 		dsl_bookmark_set_phys(&dbn->dbn_phys, origin);
1458 		dbn->dbn_phys.zbm_redaction_obj = redaction_obj;
1459 
1460 		VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
1461 		    dbn->dbn_name, sizeof (uint64_t),
1462 		    sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1463 		    &dbn->dbn_phys, tx));
1464 	}
1465 }
1466 
1467 /*
1468  * This block is no longer referenced by this (head) dataset.
1469  *
1470  * Adjust the FBN of any bookmarks that reference this block, whose "next"
1471  * is the head dataset.
1472  */
1473 /* ARGSUSED */
1474 void
1475 dsl_bookmark_block_killed(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
1476 {
1477 	/*
1478 	 * Iterate over bookmarks whose "next" is the head dataset.
1479 	 */
1480 	for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1481 	    dbn != NULL && dbn->dbn_phys.zbm_creation_txg >=
1482 	    dsl_dataset_phys(ds)->ds_prev_snap_txg;
1483 	    dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
1484 		/*
1485 		 * If the block was live (referenced) at the time of this
1486 		 * bookmark, add its space to the bookmark's FBN.
1487 		 */
1488 		if (bp->blk_birth <= dbn->dbn_phys.zbm_creation_txg &&
1489 		    (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) {
1490 			mutex_enter(&dbn->dbn_lock);
1491 			dbn->dbn_phys.zbm_referenced_freed_before_next_snap +=
1492 			    bp_get_dsize_sync(dsl_dataset_get_spa(ds), bp);
1493 			dbn->dbn_phys.zbm_compressed_freed_before_next_snap +=
1494 			    BP_GET_PSIZE(bp);
1495 			dbn->dbn_phys.zbm_uncompressed_freed_before_next_snap +=
1496 			    BP_GET_UCSIZE(bp);
1497 			/*
1498 			 * Changing the ZAP object here would be too
1499 			 * expensive.  Also, we may be called from the zio
1500 			 * interrupt thread, which can't block on i/o.
1501 			 * Therefore, we mark this bookmark as dirty and
1502 			 * modify the ZAP once per txg, in
1503 			 * dsl_bookmark_sync_done().
1504 			 */
1505 			dbn->dbn_dirty = B_TRUE;
1506 			mutex_exit(&dbn->dbn_lock);
1507 		}
1508 	}
1509 }
1510 
1511 void
1512 dsl_bookmark_sync_done(dsl_dataset_t *ds, dmu_tx_t *tx)
1513 {
1514 	dsl_pool_t *dp = dmu_tx_pool(tx);
1515 
1516 	if (dsl_dataset_is_snapshot(ds))
1517 		return;
1518 
1519 	/*
1520 	 * We only dirty bookmarks that are at or after the most recent
1521 	 * snapshot.  We can't create snapshots between
1522 	 * dsl_bookmark_block_killed() and dsl_bookmark_sync_done(), so we
1523 	 * don't need to look at any bookmarks before ds_prev_snap_txg.
1524 	 */
1525 	for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1526 	    dbn != NULL && dbn->dbn_phys.zbm_creation_txg >=
1527 	    dsl_dataset_phys(ds)->ds_prev_snap_txg;
1528 	    dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
1529 		if (dbn->dbn_dirty) {
1530 			/*
1531 			 * We only dirty nodes with HAS_FBN, therefore
1532 			 * we can always use the current bookmark struct size.
1533 			 */
1534 			ASSERT(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN);
1535 			VERIFY0(zap_update(dp->dp_meta_objset,
1536 			    ds->ds_bookmarks_obj,
1537 			    dbn->dbn_name, sizeof (uint64_t),
1538 			    sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1539 			    &dbn->dbn_phys, tx));
1540 			dbn->dbn_dirty = B_FALSE;
1541 		}
1542 	}
1543 #ifdef ZFS_DEBUG
1544 	for (dsl_bookmark_node_t *dbn = avl_first(&ds->ds_bookmarks);
1545 	    dbn != NULL; dbn = AVL_NEXT(&ds->ds_bookmarks, dbn)) {
1546 		ASSERT(!dbn->dbn_dirty);
1547 	}
1548 #endif
1549 }
1550 
1551 /*
1552  * Return the TXG of the most recent bookmark (or 0 if there are no bookmarks).
1553  */
1554 uint64_t
1555 dsl_bookmark_latest_txg(dsl_dataset_t *ds)
1556 {
1557 	ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
1558 	dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1559 	if (dbn == NULL)
1560 		return (0);
1561 	return (dbn->dbn_phys.zbm_creation_txg);
1562 }
1563 
1564 static inline unsigned int
1565 redact_block_buf_num_entries(unsigned int size)
1566 {
1567 	return (size / sizeof (redact_block_phys_t));
1568 }
1569 
1570 /*
1571  * This function calculates the offset of the last entry in the array of
1572  * redact_block_phys_t.  If we're reading the redaction list into buffers of
1573  * size bufsize, then for all but the last buffer, the last valid entry in the
1574  * array will be the last entry in the array.  However, for the last buffer, any
1575  * amount of it may be filled.  Thus, we check to see if we're looking at the
1576  * last buffer in the redaction list, and if so, we return the total number of
1577  * entries modulo the number of entries per buffer.  Otherwise, we return the
1578  * number of entries per buffer minus one.
1579  */
1580 static inline unsigned int
1581 last_entry(redaction_list_t *rl, unsigned int bufsize, uint64_t bufid)
1582 {
1583 	if (bufid == (rl->rl_phys->rlp_num_entries - 1) /
1584 	    redact_block_buf_num_entries(bufsize)) {
1585 		return ((rl->rl_phys->rlp_num_entries - 1) %
1586 		    redact_block_buf_num_entries(bufsize));
1587 	}
1588 	return (redact_block_buf_num_entries(bufsize) - 1);
1589 }
1590 
1591 /*
1592  * Compare the redact_block_phys_t to the bookmark. If the last block in the
1593  * redact_block_phys_t is before the bookmark, return -1.  If the first block in
1594  * the redact_block_phys_t is after the bookmark, return 1.  Otherwise, the
1595  * bookmark is inside the range of the redact_block_phys_t, and we return 0.
1596  */
1597 static int
1598 redact_block_zb_compare(redact_block_phys_t *first,
1599     zbookmark_phys_t *second)
1600 {
1601 	/*
1602 	 * If the block_phys is for a previous object, or the last block in the
1603 	 * block_phys is strictly before the block in the bookmark, the
1604 	 * block_phys is earlier.
1605 	 */
1606 	if (first->rbp_object < second->zb_object ||
1607 	    (first->rbp_object == second->zb_object &&
1608 	    first->rbp_blkid + (redact_block_get_count(first) - 1) <
1609 	    second->zb_blkid)) {
1610 		return (-1);
1611 	}
1612 
1613 	/*
1614 	 * If the bookmark is for a previous object, or the block in the
1615 	 * bookmark is strictly before the first block in the block_phys, the
1616 	 * bookmark is earlier.
1617 	 */
1618 	if (first->rbp_object > second->zb_object ||
1619 	    (first->rbp_object == second->zb_object &&
1620 	    first->rbp_blkid > second->zb_blkid)) {
1621 		return (1);
1622 	}
1623 
1624 	return (0);
1625 }
1626 
1627 /*
1628  * Traverse the redaction list in the provided object, and call the callback for
1629  * each entry we find. Don't call the callback for any records before resume.
1630  */
1631 int
1632 dsl_redaction_list_traverse(redaction_list_t *rl, zbookmark_phys_t *resume,
1633     rl_traverse_callback_t cb, void *arg)
1634 {
1635 	objset_t *mos = rl->rl_mos;
1636 	redact_block_phys_t *buf;
1637 	unsigned int bufsize = SPA_OLD_MAXBLOCKSIZE;
1638 	int err = 0;
1639 
1640 	if (rl->rl_phys->rlp_last_object != UINT64_MAX ||
1641 	    rl->rl_phys->rlp_last_blkid != UINT64_MAX) {
1642 		/*
1643 		 * When we finish a send, we update the last object and offset
1644 		 * to UINT64_MAX.  If a send fails partway through, the last
1645 		 * object and offset will have some other value, indicating how
1646 		 * far the send got. The redaction list must be complete before
1647 		 * it can be traversed, so return EINVAL if the last object and
1648 		 * blkid are not set to UINT64_MAX.
1649 		 */
1650 		return (SET_ERROR(EINVAL));
1651 	}
1652 
1653 	/*
1654 	 * Binary search for the point to resume from.  The goal is to minimize
1655 	 * the number of disk reads we have to perform.
1656 	 */
1657 	buf = zio_data_buf_alloc(bufsize);
1658 	uint64_t maxbufid = (rl->rl_phys->rlp_num_entries - 1) /
1659 	    redact_block_buf_num_entries(bufsize);
1660 	uint64_t minbufid = 0;
1661 	while (resume != NULL && maxbufid - minbufid >= 1) {
1662 		ASSERT3U(maxbufid, >, minbufid);
1663 		uint64_t midbufid = minbufid + ((maxbufid - minbufid) / 2);
1664 		err = dmu_read(mos, rl->rl_object, midbufid * bufsize, bufsize,
1665 		    buf, DMU_READ_NO_PREFETCH);
1666 		if (err != 0)
1667 			break;
1668 
1669 		int cmp0 = redact_block_zb_compare(&buf[0], resume);
1670 		int cmpn = redact_block_zb_compare(
1671 		    &buf[last_entry(rl, bufsize, maxbufid)], resume);
1672 
1673 		/*
1674 		 * If the first block is before or equal to the resume point,
1675 		 * and the last one is equal or after, then the resume point is
1676 		 * in this buf, and we should start here.
1677 		 */
1678 		if (cmp0 <= 0 && cmpn >= 0)
1679 			break;
1680 
1681 		if (cmp0 > 0)
1682 			maxbufid = midbufid - 1;
1683 		else if (cmpn < 0)
1684 			minbufid = midbufid + 1;
1685 		else
1686 			panic("No progress in binary search for resume point");
1687 	}
1688 
1689 	for (uint64_t curidx = minbufid * redact_block_buf_num_entries(bufsize);
1690 	    err == 0 && curidx < rl->rl_phys->rlp_num_entries;
1691 	    curidx++) {
1692 		/*
1693 		 * We read in the redaction list one block at a time.  Once we
1694 		 * finish with all the entries in a given block, we read in a
1695 		 * new one.  The predictive prefetcher will take care of any
1696 		 * prefetching, and this code shouldn't be the bottleneck, so we
1697 		 * don't need to do manual prefetching.
1698 		 */
1699 		if (curidx % redact_block_buf_num_entries(bufsize) == 0) {
1700 			err = dmu_read(mos, rl->rl_object, curidx *
1701 			    sizeof (*buf), bufsize, buf,
1702 			    DMU_READ_PREFETCH);
1703 			if (err != 0)
1704 				break;
1705 		}
1706 		redact_block_phys_t *rb = &buf[curidx %
1707 		    redact_block_buf_num_entries(bufsize)];
1708 		/*
1709 		 * If resume is non-null, we should either not send the data, or
1710 		 * null out resume so we don't have to keep doing these
1711 		 * comparisons.
1712 		 */
1713 		if (resume != NULL) {
1714 			if (redact_block_zb_compare(rb, resume) < 0) {
1715 				continue;
1716 			} else {
1717 				/*
1718 				 * If the place to resume is in the middle of
1719 				 * the range described by this
1720 				 * redact_block_phys, then modify the
1721 				 * redact_block_phys in memory so we generate
1722 				 * the right records.
1723 				 */
1724 				if (resume->zb_object == rb->rbp_object &&
1725 				    resume->zb_blkid > rb->rbp_blkid) {
1726 					uint64_t diff = resume->zb_blkid -
1727 					    rb->rbp_blkid;
1728 					rb->rbp_blkid = resume->zb_blkid;
1729 					redact_block_set_count(rb,
1730 					    redact_block_get_count(rb) - diff);
1731 				}
1732 				resume = NULL;
1733 			}
1734 		}
1735 
1736 		if (cb(rb, arg) != 0)
1737 			break;
1738 	}
1739 
1740 	zio_data_buf_free(buf, bufsize);
1741 	return (err);
1742 }
1743