1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
24  * Copyright (c) 2013 Steven Hartland. All rights reserved.
25  * Copyright (c) 2014 Integros [integros.com]
26  * Copyright 2017 RackTop Systems.
27  */
28 
29 /*
30  * LibZFS_Core (lzc) is intended to replace most functionality in libzfs.
31  * It has the following characteristics:
32  *
33  *  - Thread Safe.  libzfs_core is accessible concurrently from multiple
34  *  threads.  This is accomplished primarily by avoiding global data
35  *  (e.g. caching).  Since it's thread-safe, there is no reason for a
36  *  process to have multiple libzfs "instances".  Therefore, we store
37  *  our few pieces of data (e.g. the file descriptor) in global
38  *  variables.  The fd is reference-counted so that the libzfs_core
39  *  library can be "initialized" multiple times (e.g. by different
40  *  consumers within the same process).
41  *
42  *  - Committed Interface.  The libzfs_core interface will be committed,
43  *  therefore consumers can compile against it and be confident that
44  *  their code will continue to work on future releases of this code.
45  *  Currently, the interface is Evolving (not Committed), but we intend
46  *  to commit to it once it is more complete and we determine that it
47  *  meets the needs of all consumers.
48  *
49  *  - Programatic Error Handling.  libzfs_core communicates errors with
50  *  defined error numbers, and doesn't print anything to stdout/stderr.
51  *
52  *  - Thin Layer.  libzfs_core is a thin layer, marshaling arguments
53  *  to/from the kernel ioctls.  There is generally a 1:1 correspondence
54  *  between libzfs_core functions and ioctls to /dev/zfs.
55  *
56  *  - Clear Atomicity.  Because libzfs_core functions are generally 1:1
57  *  with kernel ioctls, and kernel ioctls are general atomic, each
58  *  libzfs_core function is atomic.  For example, creating multiple
59  *  snapshots with a single call to lzc_snapshot() is atomic -- it
60  *  can't fail with only some of the requested snapshots created, even
61  *  in the event of power loss or system crash.
62  *
63  *  - Continued libzfs Support.  Some higher-level operations (e.g.
64  *  support for "zfs send -R") are too complicated to fit the scope of
65  *  libzfs_core.  This functionality will continue to live in libzfs.
66  *  Where appropriate, libzfs will use the underlying atomic operations
67  *  of libzfs_core.  For example, libzfs may implement "zfs send -R |
68  *  zfs receive" by using individual "send one snapshot", rename,
69  *  destroy, and "receive one snapshot" operations in libzfs_core.
70  *  /sbin/zfs and /zbin/zpool will link with both libzfs and
71  *  libzfs_core.  Other consumers should aim to use only libzfs_core,
72  *  since that will be the supported, stable interface going forwards.
73  */
74 
75 #include <libzfs_core.h>
76 #include <ctype.h>
77 #include <unistd.h>
78 #include <stdlib.h>
79 #include <string.h>
80 #include <errno.h>
81 #include <fcntl.h>
82 #include <pthread.h>
83 #include <sys/nvpair.h>
84 #include <sys/param.h>
85 #include <sys/types.h>
86 #include <sys/stat.h>
87 #include <sys/zfs_ioctl.h>
88 
89 static int g_fd = -1;
90 static pthread_mutex_t g_lock = PTHREAD_MUTEX_INITIALIZER;
91 static int g_refcount;
92 
93 int
94 libzfs_core_init(void)
95 {
96 	(void) pthread_mutex_lock(&g_lock);
97 	if (g_refcount == 0) {
98 		g_fd = open("/dev/zfs", O_RDWR);
99 		if (g_fd < 0) {
100 			(void) pthread_mutex_unlock(&g_lock);
101 			return (errno);
102 		}
103 	}
104 	g_refcount++;
105 	(void) pthread_mutex_unlock(&g_lock);
106 	return (0);
107 }
108 
109 void
110 libzfs_core_fini(void)
111 {
112 	(void) pthread_mutex_lock(&g_lock);
113 	ASSERT3S(g_refcount, >, 0);
114 
115 	if (g_refcount > 0)
116 		g_refcount--;
117 
118 	if (g_refcount == 0 && g_fd != -1) {
119 		(void) close(g_fd);
120 		g_fd = -1;
121 	}
122 	(void) pthread_mutex_unlock(&g_lock);
123 }
124 
125 static int
126 lzc_ioctl(zfs_ioc_t ioc, const char *name,
127     nvlist_t *source, nvlist_t **resultp)
128 {
129 	zfs_cmd_t zc = { 0 };
130 	int error = 0;
131 	char *packed;
132 	size_t size;
133 
134 	ASSERT3S(g_refcount, >, 0);
135 	VERIFY3S(g_fd, !=, -1);
136 
137 	(void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name));
138 
139 	packed = fnvlist_pack(source, &size);
140 	zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
141 	zc.zc_nvlist_src_size = size;
142 
143 	if (resultp != NULL) {
144 		*resultp = NULL;
145 		if (ioc == ZFS_IOC_CHANNEL_PROGRAM) {
146 			zc.zc_nvlist_dst_size = fnvlist_lookup_uint64(source,
147 			    ZCP_ARG_MEMLIMIT);
148 		} else {
149 			zc.zc_nvlist_dst_size = MAX(size * 2, 128 * 1024);
150 		}
151 		zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
152 		    malloc(zc.zc_nvlist_dst_size);
153 		if (zc.zc_nvlist_dst == NULL) {
154 			error = ENOMEM;
155 			goto out;
156 		}
157 	}
158 
159 	while (ioctl(g_fd, ioc, &zc) != 0) {
160 		/*
161 		 * If ioctl exited with ENOMEM, we retry the ioctl after
162 		 * increasing the size of the destination nvlist.
163 		 *
164 		 * Channel programs that exit with ENOMEM ran over the
165 		 * lua memory sandbox; they should not be retried.
166 		 */
167 		if (errno == ENOMEM && resultp != NULL &&
168 		    ioc != ZFS_IOC_CHANNEL_PROGRAM) {
169 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
170 			zc.zc_nvlist_dst_size *= 2;
171 			zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
172 			    malloc(zc.zc_nvlist_dst_size);
173 			if (zc.zc_nvlist_dst == NULL) {
174 				error = ENOMEM;
175 				goto out;
176 			}
177 		} else {
178 			error = errno;
179 			break;
180 		}
181 	}
182 	if (zc.zc_nvlist_dst_filled) {
183 		*resultp = fnvlist_unpack((void *)(uintptr_t)zc.zc_nvlist_dst,
184 		    zc.zc_nvlist_dst_size);
185 	}
186 
187 out:
188 	fnvlist_pack_free(packed, size);
189 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
190 	return (error);
191 }
192 
193 int
194 lzc_create(const char *fsname, enum lzc_dataset_type type, nvlist_t *props)
195 {
196 	int error;
197 	nvlist_t *args = fnvlist_alloc();
198 	fnvlist_add_int32(args, "type", (dmu_objset_type_t)type);
199 	if (props != NULL)
200 		fnvlist_add_nvlist(args, "props", props);
201 	error = lzc_ioctl(ZFS_IOC_CREATE, fsname, args, NULL);
202 	nvlist_free(args);
203 	return (error);
204 }
205 
206 int
207 lzc_clone(const char *fsname, const char *origin,
208     nvlist_t *props)
209 {
210 	int error;
211 	nvlist_t *args = fnvlist_alloc();
212 	fnvlist_add_string(args, "origin", origin);
213 	if (props != NULL)
214 		fnvlist_add_nvlist(args, "props", props);
215 	error = lzc_ioctl(ZFS_IOC_CLONE, fsname, args, NULL);
216 	nvlist_free(args);
217 	return (error);
218 }
219 
220 int
221 lzc_promote(const char *fsname, char *snapnamebuf, int snapnamelen)
222 {
223 	/*
224 	 * The promote ioctl is still legacy, so we need to construct our
225 	 * own zfs_cmd_t rather than using lzc_ioctl().
226 	 */
227 	zfs_cmd_t zc = { 0 };
228 
229 	ASSERT3S(g_refcount, >, 0);
230 	VERIFY3S(g_fd, !=, -1);
231 
232 	(void) strlcpy(zc.zc_name, fsname, sizeof (zc.zc_name));
233 	if (ioctl(g_fd, ZFS_IOC_PROMOTE, &zc) != 0) {
234 		int error = errno;
235 		if (error == EEXIST && snapnamebuf != NULL)
236 			(void) strlcpy(snapnamebuf, zc.zc_string, snapnamelen);
237 		return (error);
238 	}
239 	return (0);
240 }
241 
242 int
243 lzc_remap(const char *fsname)
244 {
245 	int error;
246 	nvlist_t *args = fnvlist_alloc();
247 	error = lzc_ioctl(ZFS_IOC_REMAP, fsname, args, NULL);
248 	nvlist_free(args);
249 	return (error);
250 }
251 
252 int
253 lzc_rename(const char *source, const char *target)
254 {
255 	zfs_cmd_t zc = { 0 };
256 	int error;
257 
258 	ASSERT3S(g_refcount, >, 0);
259 	VERIFY3S(g_fd, !=, -1);
260 
261 	(void) strlcpy(zc.zc_name, source, sizeof (zc.zc_name));
262 	(void) strlcpy(zc.zc_value, target, sizeof (zc.zc_value));
263 	error = ioctl(g_fd, ZFS_IOC_RENAME, &zc);
264 	if (error != 0)
265 		error = errno;
266 	return (error);
267 }
268 
269 int
270 lzc_destroy(const char *fsname)
271 {
272 	int error;
273 
274 	nvlist_t *args = fnvlist_alloc();
275 	error = lzc_ioctl(ZFS_IOC_DESTROY, fsname, args, NULL);
276 	nvlist_free(args);
277 	return (error);
278 }
279 
280 /*
281  * Creates snapshots.
282  *
283  * The keys in the snaps nvlist are the snapshots to be created.
284  * They must all be in the same pool.
285  *
286  * The props nvlist is properties to set.  Currently only user properties
287  * are supported.  { user:prop_name -> string value }
288  *
289  * The returned results nvlist will have an entry for each snapshot that failed.
290  * The value will be the (int32) error code.
291  *
292  * The return value will be 0 if all snapshots were created, otherwise it will
293  * be the errno of a (unspecified) snapshot that failed.
294  */
295 int
296 lzc_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t **errlist)
297 {
298 	nvpair_t *elem;
299 	nvlist_t *args;
300 	int error;
301 	char pool[ZFS_MAX_DATASET_NAME_LEN];
302 
303 	*errlist = NULL;
304 
305 	/* determine the pool name */
306 	elem = nvlist_next_nvpair(snaps, NULL);
307 	if (elem == NULL)
308 		return (0);
309 	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
310 	pool[strcspn(pool, "/@")] = '\0';
311 
312 	args = fnvlist_alloc();
313 	fnvlist_add_nvlist(args, "snaps", snaps);
314 	if (props != NULL)
315 		fnvlist_add_nvlist(args, "props", props);
316 
317 	error = lzc_ioctl(ZFS_IOC_SNAPSHOT, pool, args, errlist);
318 	nvlist_free(args);
319 
320 	return (error);
321 }
322 
323 /*
324  * Destroys snapshots.
325  *
326  * The keys in the snaps nvlist are the snapshots to be destroyed.
327  * They must all be in the same pool.
328  *
329  * Snapshots that do not exist will be silently ignored.
330  *
331  * If 'defer' is not set, and a snapshot has user holds or clones, the
332  * destroy operation will fail and none of the snapshots will be
333  * destroyed.
334  *
335  * If 'defer' is set, and a snapshot has user holds or clones, it will be
336  * marked for deferred destruction, and will be destroyed when the last hold
337  * or clone is removed/destroyed.
338  *
339  * The return value will be 0 if all snapshots were destroyed (or marked for
340  * later destruction if 'defer' is set) or didn't exist to begin with.
341  *
342  * Otherwise the return value will be the errno of a (unspecified) snapshot
343  * that failed, no snapshots will be destroyed, and the errlist will have an
344  * entry for each snapshot that failed.  The value in the errlist will be
345  * the (int32) error code.
346  */
347 int
348 lzc_destroy_snaps(nvlist_t *snaps, boolean_t defer, nvlist_t **errlist)
349 {
350 	nvpair_t *elem;
351 	nvlist_t *args;
352 	int error;
353 	char pool[ZFS_MAX_DATASET_NAME_LEN];
354 
355 	/* determine the pool name */
356 	elem = nvlist_next_nvpair(snaps, NULL);
357 	if (elem == NULL)
358 		return (0);
359 	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
360 	pool[strcspn(pool, "/@")] = '\0';
361 
362 	args = fnvlist_alloc();
363 	fnvlist_add_nvlist(args, "snaps", snaps);
364 	if (defer)
365 		fnvlist_add_boolean(args, "defer");
366 
367 	error = lzc_ioctl(ZFS_IOC_DESTROY_SNAPS, pool, args, errlist);
368 	nvlist_free(args);
369 
370 	return (error);
371 }
372 
373 int
374 lzc_snaprange_space(const char *firstsnap, const char *lastsnap,
375     uint64_t *usedp)
376 {
377 	nvlist_t *args;
378 	nvlist_t *result;
379 	int err;
380 	char fs[ZFS_MAX_DATASET_NAME_LEN];
381 	char *atp;
382 
383 	/* determine the fs name */
384 	(void) strlcpy(fs, firstsnap, sizeof (fs));
385 	atp = strchr(fs, '@');
386 	if (atp == NULL)
387 		return (EINVAL);
388 	*atp = '\0';
389 
390 	args = fnvlist_alloc();
391 	fnvlist_add_string(args, "firstsnap", firstsnap);
392 
393 	err = lzc_ioctl(ZFS_IOC_SPACE_SNAPS, lastsnap, args, &result);
394 	nvlist_free(args);
395 	if (err == 0)
396 		*usedp = fnvlist_lookup_uint64(result, "used");
397 	fnvlist_free(result);
398 
399 	return (err);
400 }
401 
402 boolean_t
403 lzc_exists(const char *dataset)
404 {
405 	/*
406 	 * The objset_stats ioctl is still legacy, so we need to construct our
407 	 * own zfs_cmd_t rather than using lzc_ioctl().
408 	 */
409 	zfs_cmd_t zc = { 0 };
410 
411 	ASSERT3S(g_refcount, >, 0);
412 	VERIFY3S(g_fd, !=, -1);
413 
414 	(void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
415 	return (ioctl(g_fd, ZFS_IOC_OBJSET_STATS, &zc) == 0);
416 }
417 
418 /*
419  * Create "user holds" on snapshots.  If there is a hold on a snapshot,
420  * the snapshot can not be destroyed.  (However, it can be marked for deletion
421  * by lzc_destroy_snaps(defer=B_TRUE).)
422  *
423  * The keys in the nvlist are snapshot names.
424  * The snapshots must all be in the same pool.
425  * The value is the name of the hold (string type).
426  *
427  * If cleanup_fd is not -1, it must be the result of open("/dev/zfs", O_EXCL).
428  * In this case, when the cleanup_fd is closed (including on process
429  * termination), the holds will be released.  If the system is shut down
430  * uncleanly, the holds will be released when the pool is next opened
431  * or imported.
432  *
433  * Holds for snapshots which don't exist will be skipped and have an entry
434  * added to errlist, but will not cause an overall failure.
435  *
436  * The return value will be 0 if all holds, for snapshots that existed,
437  * were succesfully created.
438  *
439  * Otherwise the return value will be the errno of a (unspecified) hold that
440  * failed and no holds will be created.
441  *
442  * In all cases the errlist will have an entry for each hold that failed
443  * (name = snapshot), with its value being the error code (int32).
444  */
445 int
446 lzc_hold(nvlist_t *holds, int cleanup_fd, nvlist_t **errlist)
447 {
448 	char pool[ZFS_MAX_DATASET_NAME_LEN];
449 	nvlist_t *args;
450 	nvpair_t *elem;
451 	int error;
452 
453 	/* determine the pool name */
454 	elem = nvlist_next_nvpair(holds, NULL);
455 	if (elem == NULL)
456 		return (0);
457 	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
458 	pool[strcspn(pool, "/@")] = '\0';
459 
460 	args = fnvlist_alloc();
461 	fnvlist_add_nvlist(args, "holds", holds);
462 	if (cleanup_fd != -1)
463 		fnvlist_add_int32(args, "cleanup_fd", cleanup_fd);
464 
465 	error = lzc_ioctl(ZFS_IOC_HOLD, pool, args, errlist);
466 	nvlist_free(args);
467 	return (error);
468 }
469 
470 /*
471  * Release "user holds" on snapshots.  If the snapshot has been marked for
472  * deferred destroy (by lzc_destroy_snaps(defer=B_TRUE)), it does not have
473  * any clones, and all the user holds are removed, then the snapshot will be
474  * destroyed.
475  *
476  * The keys in the nvlist are snapshot names.
477  * The snapshots must all be in the same pool.
478  * The value is a nvlist whose keys are the holds to remove.
479  *
480  * Holds which failed to release because they didn't exist will have an entry
481  * added to errlist, but will not cause an overall failure.
482  *
483  * The return value will be 0 if the nvl holds was empty or all holds that
484  * existed, were successfully removed.
485  *
486  * Otherwise the return value will be the errno of a (unspecified) hold that
487  * failed to release and no holds will be released.
488  *
489  * In all cases the errlist will have an entry for each hold that failed to
490  * to release.
491  */
492 int
493 lzc_release(nvlist_t *holds, nvlist_t **errlist)
494 {
495 	char pool[ZFS_MAX_DATASET_NAME_LEN];
496 	nvpair_t *elem;
497 
498 	/* determine the pool name */
499 	elem = nvlist_next_nvpair(holds, NULL);
500 	if (elem == NULL)
501 		return (0);
502 	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
503 	pool[strcspn(pool, "/@")] = '\0';
504 
505 	return (lzc_ioctl(ZFS_IOC_RELEASE, pool, holds, errlist));
506 }
507 
508 /*
509  * Retrieve list of user holds on the specified snapshot.
510  *
511  * On success, *holdsp will be set to a nvlist which the caller must free.
512  * The keys are the names of the holds, and the value is the creation time
513  * of the hold (uint64) in seconds since the epoch.
514  */
515 int
516 lzc_get_holds(const char *snapname, nvlist_t **holdsp)
517 {
518 	int error;
519 	nvlist_t *innvl = fnvlist_alloc();
520 	error = lzc_ioctl(ZFS_IOC_GET_HOLDS, snapname, innvl, holdsp);
521 	fnvlist_free(innvl);
522 	return (error);
523 }
524 
525 /*
526  * Generate a zfs send stream for the specified snapshot and write it to
527  * the specified file descriptor.
528  *
529  * "snapname" is the full name of the snapshot to send (e.g. "pool/fs@snap")
530  *
531  * If "from" is NULL, a full (non-incremental) stream will be sent.
532  * If "from" is non-NULL, it must be the full name of a snapshot or
533  * bookmark to send an incremental from (e.g. "pool/fs@earlier_snap" or
534  * "pool/fs#earlier_bmark").  If non-NULL, the specified snapshot or
535  * bookmark must represent an earlier point in the history of "snapname").
536  * It can be an earlier snapshot in the same filesystem or zvol as "snapname",
537  * or it can be the origin of "snapname"'s filesystem, or an earlier
538  * snapshot in the origin, etc.
539  *
540  * "fd" is the file descriptor to write the send stream to.
541  *
542  * If "flags" contains LZC_SEND_FLAG_LARGE_BLOCK, the stream is permitted
543  * to contain DRR_WRITE records with drr_length > 128K, and DRR_OBJECT
544  * records with drr_blksz > 128K.
545  *
546  * If "flags" contains LZC_SEND_FLAG_EMBED_DATA, the stream is permitted
547  * to contain DRR_WRITE_EMBEDDED records with drr_etype==BP_EMBEDDED_TYPE_DATA,
548  * which the receiving system must support (as indicated by support
549  * for the "embedded_data" feature).
550  */
551 int
552 lzc_send(const char *snapname, const char *from, int fd,
553     enum lzc_send_flags flags)
554 {
555 	return (lzc_send_resume(snapname, from, fd, flags, 0, 0));
556 }
557 
558 int
559 lzc_send_resume(const char *snapname, const char *from, int fd,
560     enum lzc_send_flags flags, uint64_t resumeobj, uint64_t resumeoff)
561 {
562 	nvlist_t *args;
563 	int err;
564 
565 	args = fnvlist_alloc();
566 	fnvlist_add_int32(args, "fd", fd);
567 	if (from != NULL)
568 		fnvlist_add_string(args, "fromsnap", from);
569 	if (flags & LZC_SEND_FLAG_LARGE_BLOCK)
570 		fnvlist_add_boolean(args, "largeblockok");
571 	if (flags & LZC_SEND_FLAG_EMBED_DATA)
572 		fnvlist_add_boolean(args, "embedok");
573 	if (flags & LZC_SEND_FLAG_COMPRESS)
574 		fnvlist_add_boolean(args, "compressok");
575 	if (resumeobj != 0 || resumeoff != 0) {
576 		fnvlist_add_uint64(args, "resume_object", resumeobj);
577 		fnvlist_add_uint64(args, "resume_offset", resumeoff);
578 	}
579 	err = lzc_ioctl(ZFS_IOC_SEND_NEW, snapname, args, NULL);
580 	nvlist_free(args);
581 	return (err);
582 }
583 
584 /*
585  * "from" can be NULL, a snapshot, or a bookmark.
586  *
587  * If from is NULL, a full (non-incremental) stream will be estimated.  This
588  * is calculated very efficiently.
589  *
590  * If from is a snapshot, lzc_send_space uses the deadlists attached to
591  * each snapshot to efficiently estimate the stream size.
592  *
593  * If from is a bookmark, the indirect blocks in the destination snapshot
594  * are traversed, looking for blocks with a birth time since the creation TXG of
595  * the snapshot this bookmark was created from.  This will result in
596  * significantly more I/O and be less efficient than a send space estimation on
597  * an equivalent snapshot.
598  */
599 int
600 lzc_send_space(const char *snapname, const char *from,
601     enum lzc_send_flags flags, uint64_t *spacep)
602 {
603 	nvlist_t *args;
604 	nvlist_t *result;
605 	int err;
606 
607 	args = fnvlist_alloc();
608 	if (from != NULL)
609 		fnvlist_add_string(args, "from", from);
610 	if (flags & LZC_SEND_FLAG_LARGE_BLOCK)
611 		fnvlist_add_boolean(args, "largeblockok");
612 	if (flags & LZC_SEND_FLAG_EMBED_DATA)
613 		fnvlist_add_boolean(args, "embedok");
614 	if (flags & LZC_SEND_FLAG_COMPRESS)
615 		fnvlist_add_boolean(args, "compressok");
616 	err = lzc_ioctl(ZFS_IOC_SEND_SPACE, snapname, args, &result);
617 	nvlist_free(args);
618 	if (err == 0)
619 		*spacep = fnvlist_lookup_uint64(result, "space");
620 	nvlist_free(result);
621 	return (err);
622 }
623 
624 static int
625 recv_read(int fd, void *buf, int ilen)
626 {
627 	char *cp = buf;
628 	int rv;
629 	int len = ilen;
630 
631 	do {
632 		rv = read(fd, cp, len);
633 		cp += rv;
634 		len -= rv;
635 	} while (rv > 0);
636 
637 	if (rv < 0 || len != 0)
638 		return (EIO);
639 
640 	return (0);
641 }
642 
643 static int
644 recv_impl(const char *snapname, nvlist_t *props, const char *origin,
645     boolean_t force, boolean_t resumable, int fd,
646     const dmu_replay_record_t *begin_record)
647 {
648 	/*
649 	 * The receive ioctl is still legacy, so we need to construct our own
650 	 * zfs_cmd_t rather than using zfsc_ioctl().
651 	 */
652 	zfs_cmd_t zc = { 0 };
653 	char *atp;
654 	char *packed = NULL;
655 	size_t size;
656 	int error;
657 
658 	ASSERT3S(g_refcount, >, 0);
659 	VERIFY3S(g_fd, !=, -1);
660 
661 	/* zc_name is name of containing filesystem */
662 	(void) strlcpy(zc.zc_name, snapname, sizeof (zc.zc_name));
663 	atp = strchr(zc.zc_name, '@');
664 	if (atp == NULL)
665 		return (EINVAL);
666 	*atp = '\0';
667 
668 	/* if the fs does not exist, try its parent. */
669 	if (!lzc_exists(zc.zc_name)) {
670 		char *slashp = strrchr(zc.zc_name, '/');
671 		if (slashp == NULL)
672 			return (ENOENT);
673 		*slashp = '\0';
674 
675 	}
676 
677 	/* zc_value is full name of the snapshot to create */
678 	(void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
679 
680 	if (props != NULL) {
681 		/* zc_nvlist_src is props to set */
682 		packed = fnvlist_pack(props, &size);
683 		zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
684 		zc.zc_nvlist_src_size = size;
685 	}
686 
687 	/* zc_string is name of clone origin (if DRR_FLAG_CLONE) */
688 	if (origin != NULL)
689 		(void) strlcpy(zc.zc_string, origin, sizeof (zc.zc_string));
690 
691 	/* zc_begin_record is non-byteswapped BEGIN record */
692 	if (begin_record == NULL) {
693 		error = recv_read(fd, &zc.zc_begin_record,
694 		    sizeof (zc.zc_begin_record));
695 		if (error != 0)
696 			goto out;
697 	} else {
698 		zc.zc_begin_record = *begin_record;
699 	}
700 
701 	/* zc_cookie is fd to read from */
702 	zc.zc_cookie = fd;
703 
704 	/* zc guid is force flag */
705 	zc.zc_guid = force;
706 
707 	zc.zc_resumable = resumable;
708 
709 	/* zc_cleanup_fd is unused */
710 	zc.zc_cleanup_fd = -1;
711 
712 	error = ioctl(g_fd, ZFS_IOC_RECV, &zc);
713 	if (error != 0)
714 		error = errno;
715 
716 out:
717 	if (packed != NULL)
718 		fnvlist_pack_free(packed, size);
719 	free((void*)(uintptr_t)zc.zc_nvlist_dst);
720 	return (error);
721 }
722 
723 /*
724  * The simplest receive case: receive from the specified fd, creating the
725  * specified snapshot.  Apply the specified properties as "received" properties
726  * (which can be overridden by locally-set properties).  If the stream is a
727  * clone, its origin snapshot must be specified by 'origin'.  The 'force'
728  * flag will cause the target filesystem to be rolled back or destroyed if
729  * necessary to receive.
730  *
731  * Return 0 on success or an errno on failure.
732  *
733  * Note: this interface does not work on dedup'd streams
734  * (those with DMU_BACKUP_FEATURE_DEDUP).
735  */
736 int
737 lzc_receive(const char *snapname, nvlist_t *props, const char *origin,
738     boolean_t force, int fd)
739 {
740 	return (recv_impl(snapname, props, origin, force, B_FALSE, fd, NULL));
741 }
742 
743 /*
744  * Like lzc_receive, but if the receive fails due to premature stream
745  * termination, the intermediate state will be preserved on disk.  In this
746  * case, ECKSUM will be returned.  The receive may subsequently be resumed
747  * with a resuming send stream generated by lzc_send_resume().
748  */
749 int
750 lzc_receive_resumable(const char *snapname, nvlist_t *props, const char *origin,
751     boolean_t force, int fd)
752 {
753 	return (recv_impl(snapname, props, origin, force, B_TRUE, fd, NULL));
754 }
755 
756 /*
757  * Like lzc_receive, but allows the caller to read the begin record and then to
758  * pass it in.  That could be useful if the caller wants to derive, for example,
759  * the snapname or the origin parameters based on the information contained in
760  * the begin record.
761  * The begin record must be in its original form as read from the stream,
762  * in other words, it should not be byteswapped.
763  *
764  * The 'resumable' parameter allows to obtain the same behavior as with
765  * lzc_receive_resumable.
766  */
767 int
768 lzc_receive_with_header(const char *snapname, nvlist_t *props,
769     const char *origin, boolean_t force, boolean_t resumable, int fd,
770     const dmu_replay_record_t *begin_record)
771 {
772 	if (begin_record == NULL)
773 		return (EINVAL);
774 	return (recv_impl(snapname, props, origin, force, resumable, fd,
775 	    begin_record));
776 }
777 
778 /*
779  * Roll back this filesystem or volume to its most recent snapshot.
780  * If snapnamebuf is not NULL, it will be filled in with the name
781  * of the most recent snapshot.
782  * Note that the latest snapshot may change if a new one is concurrently
783  * created or the current one is destroyed.  lzc_rollback_to can be used
784  * to roll back to a specific latest snapshot.
785  *
786  * Return 0 on success or an errno on failure.
787  */
788 int
789 lzc_rollback(const char *fsname, char *snapnamebuf, int snapnamelen)
790 {
791 	nvlist_t *args;
792 	nvlist_t *result;
793 	int err;
794 
795 	args = fnvlist_alloc();
796 	err = lzc_ioctl(ZFS_IOC_ROLLBACK, fsname, args, &result);
797 	nvlist_free(args);
798 	if (err == 0 && snapnamebuf != NULL) {
799 		const char *snapname = fnvlist_lookup_string(result, "target");
800 		(void) strlcpy(snapnamebuf, snapname, snapnamelen);
801 	}
802 	nvlist_free(result);
803 
804 	return (err);
805 }
806 
807 /*
808  * Roll back this filesystem or volume to the specified snapshot,
809  * if possible.
810  *
811  * Return 0 on success or an errno on failure.
812  */
813 int
814 lzc_rollback_to(const char *fsname, const char *snapname)
815 {
816 	nvlist_t *args;
817 	nvlist_t *result;
818 	int err;
819 
820 	args = fnvlist_alloc();
821 	fnvlist_add_string(args, "target", snapname);
822 	err = lzc_ioctl(ZFS_IOC_ROLLBACK, fsname, args, &result);
823 	nvlist_free(args);
824 	nvlist_free(result);
825 	return (err);
826 }
827 
828 /*
829  * Creates bookmarks.
830  *
831  * The bookmarks nvlist maps from name of the bookmark (e.g. "pool/fs#bmark") to
832  * the name of the snapshot (e.g. "pool/fs@snap").  All the bookmarks and
833  * snapshots must be in the same pool.
834  *
835  * The returned results nvlist will have an entry for each bookmark that failed.
836  * The value will be the (int32) error code.
837  *
838  * The return value will be 0 if all bookmarks were created, otherwise it will
839  * be the errno of a (undetermined) bookmarks that failed.
840  */
841 int
842 lzc_bookmark(nvlist_t *bookmarks, nvlist_t **errlist)
843 {
844 	nvpair_t *elem;
845 	int error;
846 	char pool[ZFS_MAX_DATASET_NAME_LEN];
847 
848 	/* determine the pool name */
849 	elem = nvlist_next_nvpair(bookmarks, NULL);
850 	if (elem == NULL)
851 		return (0);
852 	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
853 	pool[strcspn(pool, "/#")] = '\0';
854 
855 	error = lzc_ioctl(ZFS_IOC_BOOKMARK, pool, bookmarks, errlist);
856 
857 	return (error);
858 }
859 
860 /*
861  * Retrieve bookmarks.
862  *
863  * Retrieve the list of bookmarks for the given file system. The props
864  * parameter is an nvlist of property names (with no values) that will be
865  * returned for each bookmark.
866  *
867  * The following are valid properties on bookmarks, all of which are numbers
868  * (represented as uint64 in the nvlist)
869  *
870  * "guid" - globally unique identifier of the snapshot it refers to
871  * "createtxg" - txg when the snapshot it refers to was created
872  * "creation" - timestamp when the snapshot it refers to was created
873  *
874  * The format of the returned nvlist as follows:
875  * <short name of bookmark> -> {
876  *     <name of property> -> {
877  *         "value" -> uint64
878  *     }
879  *  }
880  */
881 int
882 lzc_get_bookmarks(const char *fsname, nvlist_t *props, nvlist_t **bmarks)
883 {
884 	return (lzc_ioctl(ZFS_IOC_GET_BOOKMARKS, fsname, props, bmarks));
885 }
886 
887 /*
888  * Destroys bookmarks.
889  *
890  * The keys in the bmarks nvlist are the bookmarks to be destroyed.
891  * They must all be in the same pool.  Bookmarks are specified as
892  * <fs>#<bmark>.
893  *
894  * Bookmarks that do not exist will be silently ignored.
895  *
896  * The return value will be 0 if all bookmarks that existed were destroyed.
897  *
898  * Otherwise the return value will be the errno of a (undetermined) bookmark
899  * that failed, no bookmarks will be destroyed, and the errlist will have an
900  * entry for each bookmarks that failed.  The value in the errlist will be
901  * the (int32) error code.
902  */
903 int
904 lzc_destroy_bookmarks(nvlist_t *bmarks, nvlist_t **errlist)
905 {
906 	nvpair_t *elem;
907 	int error;
908 	char pool[ZFS_MAX_DATASET_NAME_LEN];
909 
910 	/* determine the pool name */
911 	elem = nvlist_next_nvpair(bmarks, NULL);
912 	if (elem == NULL)
913 		return (0);
914 	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
915 	pool[strcspn(pool, "/#")] = '\0';
916 
917 	error = lzc_ioctl(ZFS_IOC_DESTROY_BOOKMARKS, pool, bmarks, errlist);
918 
919 	return (error);
920 }
921 
922 static int
923 lzc_channel_program_impl(const char *pool, const char *program, boolean_t sync,
924     uint64_t instrlimit, uint64_t memlimit, nvlist_t *argnvl, nvlist_t **outnvl)
925 {
926 	int error;
927 	nvlist_t *args;
928 
929 	args = fnvlist_alloc();
930 	fnvlist_add_string(args, ZCP_ARG_PROGRAM, program);
931 	fnvlist_add_nvlist(args, ZCP_ARG_ARGLIST, argnvl);
932 	fnvlist_add_boolean_value(args, ZCP_ARG_SYNC, sync);
933 	fnvlist_add_uint64(args, ZCP_ARG_INSTRLIMIT, instrlimit);
934 	fnvlist_add_uint64(args, ZCP_ARG_MEMLIMIT, memlimit);
935 	error = lzc_ioctl(ZFS_IOC_CHANNEL_PROGRAM, pool, args, outnvl);
936 	fnvlist_free(args);
937 
938 	return (error);
939 }
940 
941 /*
942  * Executes a channel program.
943  *
944  * If this function returns 0 the channel program was successfully loaded and
945  * ran without failing. Note that individual commands the channel program ran
946  * may have failed and the channel program is responsible for reporting such
947  * errors through outnvl if they are important.
948  *
949  * This method may also return:
950  *
951  * EINVAL   The program contains syntax errors, or an invalid memory or time
952  *          limit was given. No part of the channel program was executed.
953  *          If caused by syntax errors, 'outnvl' contains information about the
954  *          errors.
955  *
956  * ECHRNG   The program was executed, but encountered a runtime error, such as
957  *          calling a function with incorrect arguments, invoking the error()
958  *          function directly, failing an assert() command, etc. Some portion
959  *          of the channel program may have executed and committed changes.
960  *          Information about the failure can be found in 'outnvl'.
961  *
962  * ENOMEM   The program fully executed, but the output buffer was not large
963  *          enough to store the returned value. No output is returned through
964  *          'outnvl'.
965  *
966  * ENOSPC   The program was terminated because it exceeded its memory usage
967  *          limit. Some portion of the channel program may have executed and
968  *          committed changes to disk. No output is returned through 'outnvl'.
969  *
970  * ETIME    The program was terminated because it exceeded its Lua instruction
971  *          limit. Some portion of the channel program may have executed and
972  *          committed changes to disk. No output is returned through 'outnvl'.
973  */
974 int
975 lzc_channel_program(const char *pool, const char *program, uint64_t instrlimit,
976     uint64_t memlimit, nvlist_t *argnvl, nvlist_t **outnvl)
977 {
978 	return (lzc_channel_program_impl(pool, program, B_TRUE, instrlimit,
979 	    memlimit, argnvl, outnvl));
980 }
981 
982 /*
983  * Creates a checkpoint for the specified pool.
984  *
985  * If this function returns 0 the pool was successfully checkpointed.
986  *
987  * This method may also return:
988  *
989  * ZFS_ERR_CHECKPOINT_EXISTS
990  *	The pool already has a checkpoint. A pools can only have one
991  *	checkpoint at most, at any given time.
992  *
993  * ZFS_ERR_DISCARDING_CHECKPOINT
994  * 	ZFS is in the middle of discarding a checkpoint for this pool.
995  * 	The pool can be checkpointed again once the discard is done.
996  *
997  * ZFS_DEVRM_IN_PROGRESS
998  * 	A vdev is currently being removed. The pool cannot be
999  * 	checkpointed until the device removal is done.
1000  *
1001  * ZFS_VDEV_TOO_BIG
1002  * 	One or more top-level vdevs exceed the maximum vdev size
1003  * 	supported for this feature.
1004  */
1005 int
1006 lzc_pool_checkpoint(const char *pool)
1007 {
1008 	int error;
1009 
1010 	nvlist_t *result = NULL;
1011 	nvlist_t *args = fnvlist_alloc();
1012 
1013 	error = lzc_ioctl(ZFS_IOC_POOL_CHECKPOINT, pool, args, &result);
1014 
1015 	fnvlist_free(args);
1016 	fnvlist_free(result);
1017 
1018 	return (error);
1019 }
1020 
1021 /*
1022  * Discard the checkpoint from the specified pool.
1023  *
1024  * If this function returns 0 the checkpoint was successfully discarded.
1025  *
1026  * This method may also return:
1027  *
1028  * ZFS_ERR_NO_CHECKPOINT
1029  * 	The pool does not have a checkpoint.
1030  *
1031  * ZFS_ERR_DISCARDING_CHECKPOINT
1032  * 	ZFS is already in the middle of discarding the checkpoint.
1033  */
1034 int
1035 lzc_pool_checkpoint_discard(const char *pool)
1036 {
1037 	int error;
1038 
1039 	nvlist_t *result = NULL;
1040 	nvlist_t *args = fnvlist_alloc();
1041 
1042 	error = lzc_ioctl(ZFS_IOC_POOL_DISCARD_CHECKPOINT, pool, args, &result);
1043 
1044 	fnvlist_free(args);
1045 	fnvlist_free(result);
1046 
1047 	return (error);
1048 }
1049 
1050 /*
1051  * Executes a read-only channel program.
1052  *
1053  * A read-only channel program works programmatically the same way as a
1054  * normal channel program executed with lzc_channel_program(). The only
1055  * difference is it runs exclusively in open-context and therefore can
1056  * return faster. The downside to that, is that the program cannot change
1057  * on-disk state by calling functions from the zfs.sync submodule.
1058  *
1059  * The return values of this function (and their meaning) are exactly the
1060  * same as the ones described in lzc_channel_program().
1061  */
1062 int
1063 lzc_channel_program_nosync(const char *pool, const char *program,
1064     uint64_t timeout, uint64_t memlimit, nvlist_t *argnvl, nvlist_t **outnvl)
1065 {
1066 	return (lzc_channel_program_impl(pool, program, B_FALSE, timeout,
1067 	    memlimit, argnvl, outnvl));
1068 }
1069 
1070 /*
1071  * Changes initializing state.
1072  *
1073  * vdevs should be a list of (<key>, guid) where guid is a uint64 vdev GUID.
1074  * The key is ignored.
1075  *
1076  * If there are errors related to vdev arguments, per-vdev errors are returned
1077  * in an nvlist with the key "vdevs". Each error is a (guid, errno) pair where
1078  * guid is stringified with PRIu64, and errno is one of the following as
1079  * an int64_t:
1080  *	- ENODEV if the device was not found
1081  *	- EINVAL if the devices is not a leaf or is not concrete (e.g. missing)
1082  *	- EROFS if the device is not writeable
1083  *	- EBUSY start requested but the device is already being initialized
1084  *	- ESRCH cancel/suspend requested but device is not being initialized
1085  *
1086  * If the errlist is empty, then return value will be:
1087  *	- EINVAL if one or more arguments was invalid
1088  *	- Other spa_open failures
1089  *	- 0 if the operation succeeded
1090  */
1091 int
1092 lzc_initialize(const char *poolname, pool_initialize_func_t cmd_type,
1093     nvlist_t *vdevs, nvlist_t **errlist)
1094 {
1095 	int error;
1096 	nvlist_t *args = fnvlist_alloc();
1097 	fnvlist_add_uint64(args, ZPOOL_INITIALIZE_COMMAND, (uint64_t)cmd_type);
1098 	fnvlist_add_nvlist(args, ZPOOL_INITIALIZE_VDEVS, vdevs);
1099 
1100 	error = lzc_ioctl(ZFS_IOC_POOL_INITIALIZE, poolname, args, errlist);
1101 
1102 	fnvlist_free(args);
1103 
1104 	return (error);
1105 }
1106