xref: /illumos-gate/usr/src/uts/common/fs/zfs/zvol.c (revision d8154717)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  *
24  * Portions Copyright 2010 Robert Milkowski
25  *
26  * Copyright 2017 Nexenta Systems, Inc.  All rights reserved.
27  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
28  * Copyright (c) 2013, Joyent, Inc. All rights reserved.
29  * Copyright (c) 2014 Integros [integros.com]
30  */
31 
32 /*
33  * ZFS volume emulation driver.
34  *
35  * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
36  * Volumes are accessed through the symbolic links named:
37  *
38  * /dev/zvol/dsk/<pool_name>/<dataset_name>
39  * /dev/zvol/rdsk/<pool_name>/<dataset_name>
40  *
41  * These links are created by the /dev filesystem (sdev_zvolops.c).
42  * Volumes are persistent through reboot.  No user command needs to be
43  * run before opening and using a device.
44  */
45 
46 #include <sys/types.h>
47 #include <sys/param.h>
48 #include <sys/errno.h>
49 #include <sys/uio.h>
50 #include <sys/buf.h>
51 #include <sys/modctl.h>
52 #include <sys/open.h>
53 #include <sys/kmem.h>
54 #include <sys/conf.h>
55 #include <sys/cmn_err.h>
56 #include <sys/stat.h>
57 #include <sys/zap.h>
58 #include <sys/spa.h>
59 #include <sys/spa_impl.h>
60 #include <sys/zio.h>
61 #include <sys/dmu_traverse.h>
62 #include <sys/dnode.h>
63 #include <sys/dsl_dataset.h>
64 #include <sys/dsl_prop.h>
65 #include <sys/dkio.h>
66 #include <sys/efi_partition.h>
67 #include <sys/byteorder.h>
68 #include <sys/pathname.h>
69 #include <sys/ddi.h>
70 #include <sys/sunddi.h>
71 #include <sys/crc32.h>
72 #include <sys/dirent.h>
73 #include <sys/policy.h>
74 #include <sys/fs/zfs.h>
75 #include <sys/zfs_ioctl.h>
76 #include <sys/mkdev.h>
77 #include <sys/zil.h>
78 #include <sys/refcount.h>
79 #include <sys/zfs_znode.h>
80 #include <sys/zfs_rlock.h>
81 #include <sys/vdev_disk.h>
82 #include <sys/vdev_impl.h>
83 #include <sys/vdev_raidz.h>
84 #include <sys/zvol.h>
85 #include <sys/dumphdr.h>
86 #include <sys/zil_impl.h>
87 #include <sys/dbuf.h>
88 #include <sys/dmu_tx.h>
89 #include <sys/zfeature.h>
90 #include <sys/zio_checksum.h>
91 #include <sys/zil_impl.h>
92 #include <sys/dkioc_free_util.h>
93 
94 #include "zfs_namecheck.h"
95 
96 void *zfsdev_state;
97 static char *zvol_tag = "zvol_tag";
98 
99 #define	ZVOL_DUMPSIZE		"dumpsize"
100 
101 /*
102  * This lock protects the zfsdev_state structure from being modified
103  * while it's being used, e.g. an open that comes in before a create
104  * finishes.  It also protects temporary opens of the dataset so that,
105  * e.g., an open doesn't get a spurious EBUSY.
106  */
107 kmutex_t zfsdev_state_lock;
108 static uint32_t zvol_minors;
109 
110 typedef struct zvol_extent {
111 	list_node_t	ze_node;
112 	dva_t		ze_dva;		/* dva associated with this extent */
113 	uint64_t	ze_nblks;	/* number of blocks in extent */
114 } zvol_extent_t;
115 
116 /*
117  * The in-core state of each volume.
118  */
119 typedef struct zvol_state {
120 	char		zv_name[MAXPATHLEN]; /* pool/dd name */
121 	uint64_t	zv_volsize;	/* amount of space we advertise */
122 	uint64_t	zv_volblocksize; /* volume block size */
123 	minor_t		zv_minor;	/* minor number */
124 	uint8_t		zv_min_bs;	/* minimum addressable block shift */
125 	uint8_t		zv_flags;	/* readonly, dumpified, etc. */
126 	objset_t	*zv_objset;	/* objset handle */
127 	uint32_t	zv_open_count[OTYPCNT];	/* open counts */
128 	uint32_t	zv_total_opens;	/* total open count */
129 	zilog_t		*zv_zilog;	/* ZIL handle */
130 	list_t		zv_extents;	/* List of extents for dump */
131 	znode_t		zv_znode;	/* for range locking */
132 	dnode_t		*zv_dn;		/* dnode hold */
133 } zvol_state_t;
134 
135 /*
136  * zvol specific flags
137  */
138 #define	ZVOL_RDONLY	0x1
139 #define	ZVOL_DUMPIFIED	0x2
140 #define	ZVOL_EXCL	0x4
141 #define	ZVOL_WCE	0x8
142 
143 /*
144  * zvol maximum transfer in one DMU tx.
145  */
146 int zvol_maxphys = DMU_MAX_ACCESS/2;
147 
148 /*
149  * Toggle unmap functionality.
150  */
151 boolean_t zvol_unmap_enabled = B_TRUE;
152 
153 /*
154  * If true, unmaps requested as synchronous are executed synchronously,
155  * otherwise all unmaps are asynchronous.
156  */
157 boolean_t zvol_unmap_sync_enabled = B_FALSE;
158 
159 extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
160     nvlist_t *, nvlist_t *);
161 static int zvol_remove_zv(zvol_state_t *);
162 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf,
163     struct lwb *lwb, zio_t *zio);
164 static int zvol_dumpify(zvol_state_t *zv);
165 static int zvol_dump_fini(zvol_state_t *zv);
166 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
167 
168 static void
169 zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
170 {
171 	dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor);
172 
173 	zv->zv_volsize = volsize;
174 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
175 	    "Size", volsize) == DDI_SUCCESS);
176 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
177 	    "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
178 
179 	/* Notify specfs to invalidate the cached size */
180 	spec_size_invalidate(dev, VBLK);
181 	spec_size_invalidate(dev, VCHR);
182 }
183 
184 int
185 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
186 {
187 	if (volsize == 0)
188 		return (SET_ERROR(EINVAL));
189 
190 	if (volsize % blocksize != 0)
191 		return (SET_ERROR(EINVAL));
192 
193 #ifdef _ILP32
194 	if (volsize - 1 > SPEC_MAXOFFSET_T)
195 		return (SET_ERROR(EOVERFLOW));
196 #endif
197 	return (0);
198 }
199 
200 int
201 zvol_check_volblocksize(uint64_t volblocksize)
202 {
203 	if (volblocksize < SPA_MINBLOCKSIZE ||
204 	    volblocksize > SPA_OLD_MAXBLOCKSIZE ||
205 	    !ISP2(volblocksize))
206 		return (SET_ERROR(EDOM));
207 
208 	return (0);
209 }
210 
211 int
212 zvol_get_stats(objset_t *os, nvlist_t *nv)
213 {
214 	int error;
215 	dmu_object_info_t doi;
216 	uint64_t val;
217 
218 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
219 	if (error)
220 		return (error);
221 
222 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
223 
224 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
225 
226 	if (error == 0) {
227 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
228 		    doi.doi_data_block_size);
229 	}
230 
231 	return (error);
232 }
233 
234 static zvol_state_t *
235 zvol_minor_lookup(const char *name)
236 {
237 	minor_t minor;
238 	zvol_state_t *zv;
239 
240 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
241 
242 	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
243 		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
244 		if (zv == NULL)
245 			continue;
246 		if (strcmp(zv->zv_name, name) == 0)
247 			return (zv);
248 	}
249 
250 	return (NULL);
251 }
252 
253 /* extent mapping arg */
254 struct maparg {
255 	zvol_state_t	*ma_zv;
256 	uint64_t	ma_blks;
257 };
258 
259 /*ARGSUSED*/
260 static int
261 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
262     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
263 {
264 	struct maparg *ma = arg;
265 	zvol_extent_t *ze;
266 	int bs = ma->ma_zv->zv_volblocksize;
267 
268 	if (bp == NULL || BP_IS_HOLE(bp) ||
269 	    zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
270 		return (0);
271 
272 	VERIFY(!BP_IS_EMBEDDED(bp));
273 
274 	VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
275 	ma->ma_blks++;
276 
277 	/* Abort immediately if we have encountered gang blocks */
278 	if (BP_IS_GANG(bp))
279 		return (SET_ERROR(EFRAGS));
280 
281 	/*
282 	 * See if the block is at the end of the previous extent.
283 	 */
284 	ze = list_tail(&ma->ma_zv->zv_extents);
285 	if (ze &&
286 	    DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
287 	    DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
288 	    DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
289 		ze->ze_nblks++;
290 		return (0);
291 	}
292 
293 	dprintf_bp(bp, "%s", "next blkptr:");
294 
295 	/* start a new extent */
296 	ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
297 	ze->ze_dva = bp->blk_dva[0];	/* structure assignment */
298 	ze->ze_nblks = 1;
299 	list_insert_tail(&ma->ma_zv->zv_extents, ze);
300 	return (0);
301 }
302 
303 static void
304 zvol_free_extents(zvol_state_t *zv)
305 {
306 	zvol_extent_t *ze;
307 
308 	while (ze = list_head(&zv->zv_extents)) {
309 		list_remove(&zv->zv_extents, ze);
310 		kmem_free(ze, sizeof (zvol_extent_t));
311 	}
312 }
313 
314 static int
315 zvol_get_lbas(zvol_state_t *zv)
316 {
317 	objset_t *os = zv->zv_objset;
318 	struct maparg	ma;
319 	int		err;
320 
321 	ma.ma_zv = zv;
322 	ma.ma_blks = 0;
323 	zvol_free_extents(zv);
324 
325 	/* commit any in-flight changes before traversing the dataset */
326 	txg_wait_synced(dmu_objset_pool(os), 0);
327 	err = traverse_dataset(dmu_objset_ds(os), 0,
328 	    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
329 	if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
330 		zvol_free_extents(zv);
331 		return (err ? err : EIO);
332 	}
333 
334 	return (0);
335 }
336 
337 /* ARGSUSED */
338 void
339 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
340 {
341 	zfs_creat_t *zct = arg;
342 	nvlist_t *nvprops = zct->zct_props;
343 	int error;
344 	uint64_t volblocksize, volsize;
345 
346 	VERIFY(nvlist_lookup_uint64(nvprops,
347 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
348 	if (nvlist_lookup_uint64(nvprops,
349 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
350 		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
351 
352 	/*
353 	 * These properties must be removed from the list so the generic
354 	 * property setting step won't apply to them.
355 	 */
356 	VERIFY(nvlist_remove_all(nvprops,
357 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
358 	(void) nvlist_remove_all(nvprops,
359 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
360 
361 	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
362 	    DMU_OT_NONE, 0, tx);
363 	ASSERT(error == 0);
364 
365 	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
366 	    DMU_OT_NONE, 0, tx);
367 	ASSERT(error == 0);
368 
369 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
370 	ASSERT(error == 0);
371 }
372 
373 /*
374  * Replay a TX_TRUNCATE ZIL transaction if asked.  TX_TRUNCATE is how we
375  * implement DKIOCFREE/free-long-range.
376  */
377 static int
378 zvol_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
379 {
380 	zvol_state_t *zv = arg1;
381 	lr_truncate_t *lr = arg2;
382 	uint64_t offset, length;
383 
384 	if (byteswap)
385 		byteswap_uint64_array(lr, sizeof (*lr));
386 
387 	offset = lr->lr_offset;
388 	length = lr->lr_length;
389 
390 	return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
391 }
392 
393 /*
394  * Replay a TX_WRITE ZIL transaction that didn't get committed
395  * after a system failure
396  */
397 static int
398 zvol_replay_write(void *arg1, void *arg2, boolean_t byteswap)
399 {
400 	zvol_state_t *zv = arg1;
401 	lr_write_t *lr = arg2;
402 	objset_t *os = zv->zv_objset;
403 	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
404 	uint64_t offset, length;
405 	dmu_tx_t *tx;
406 	int error;
407 
408 	if (byteswap)
409 		byteswap_uint64_array(lr, sizeof (*lr));
410 
411 	offset = lr->lr_offset;
412 	length = lr->lr_length;
413 
414 	/* If it's a dmu_sync() block, write the whole block */
415 	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
416 		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
417 		if (length < blocksize) {
418 			offset -= offset % blocksize;
419 			length = blocksize;
420 		}
421 	}
422 
423 	tx = dmu_tx_create(os);
424 	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
425 	error = dmu_tx_assign(tx, TXG_WAIT);
426 	if (error) {
427 		dmu_tx_abort(tx);
428 	} else {
429 		dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
430 		dmu_tx_commit(tx);
431 	}
432 
433 	return (error);
434 }
435 
436 /* ARGSUSED */
437 static int
438 zvol_replay_err(void *arg1, void *arg2, boolean_t byteswap)
439 {
440 	return (SET_ERROR(ENOTSUP));
441 }
442 
443 /*
444  * Callback vectors for replaying records.
445  * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
446  */
447 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
448 	zvol_replay_err,	/* 0 no such transaction type */
449 	zvol_replay_err,	/* TX_CREATE */
450 	zvol_replay_err,	/* TX_MKDIR */
451 	zvol_replay_err,	/* TX_MKXATTR */
452 	zvol_replay_err,	/* TX_SYMLINK */
453 	zvol_replay_err,	/* TX_REMOVE */
454 	zvol_replay_err,	/* TX_RMDIR */
455 	zvol_replay_err,	/* TX_LINK */
456 	zvol_replay_err,	/* TX_RENAME */
457 	zvol_replay_write,	/* TX_WRITE */
458 	zvol_replay_truncate,	/* TX_TRUNCATE */
459 	zvol_replay_err,	/* TX_SETATTR */
460 	zvol_replay_err,	/* TX_ACL */
461 	zvol_replay_err,	/* TX_CREATE_ACL */
462 	zvol_replay_err,	/* TX_CREATE_ATTR */
463 	zvol_replay_err,	/* TX_CREATE_ACL_ATTR */
464 	zvol_replay_err,	/* TX_MKDIR_ACL */
465 	zvol_replay_err,	/* TX_MKDIR_ATTR */
466 	zvol_replay_err,	/* TX_MKDIR_ACL_ATTR */
467 	zvol_replay_err,	/* TX_WRITE2 */
468 };
469 
470 int
471 zvol_name2minor(const char *name, minor_t *minor)
472 {
473 	zvol_state_t *zv;
474 
475 	mutex_enter(&zfsdev_state_lock);
476 	zv = zvol_minor_lookup(name);
477 	if (minor && zv)
478 		*minor = zv->zv_minor;
479 	mutex_exit(&zfsdev_state_lock);
480 	return (zv ? 0 : -1);
481 }
482 
483 /*
484  * Create a minor node (plus a whole lot more) for the specified volume.
485  */
486 int
487 zvol_create_minor(const char *name)
488 {
489 	zfs_soft_state_t *zs;
490 	zvol_state_t *zv;
491 	objset_t *os;
492 	dmu_object_info_t doi;
493 	minor_t minor = 0;
494 	char chrbuf[30], blkbuf[30];
495 	int error;
496 
497 	mutex_enter(&zfsdev_state_lock);
498 
499 	if (zvol_minor_lookup(name) != NULL) {
500 		mutex_exit(&zfsdev_state_lock);
501 		return (SET_ERROR(EEXIST));
502 	}
503 
504 	/* lie and say we're read-only */
505 	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
506 
507 	if (error) {
508 		mutex_exit(&zfsdev_state_lock);
509 		return (error);
510 	}
511 
512 	if ((minor = zfsdev_minor_alloc()) == 0) {
513 		dmu_objset_disown(os, FTAG);
514 		mutex_exit(&zfsdev_state_lock);
515 		return (SET_ERROR(ENXIO));
516 	}
517 
518 	if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
519 		dmu_objset_disown(os, FTAG);
520 		mutex_exit(&zfsdev_state_lock);
521 		return (SET_ERROR(EAGAIN));
522 	}
523 	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
524 	    (char *)name);
525 
526 	(void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
527 
528 	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
529 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
530 		ddi_soft_state_free(zfsdev_state, minor);
531 		dmu_objset_disown(os, FTAG);
532 		mutex_exit(&zfsdev_state_lock);
533 		return (SET_ERROR(EAGAIN));
534 	}
535 
536 	(void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
537 
538 	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
539 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
540 		ddi_remove_minor_node(zfs_dip, chrbuf);
541 		ddi_soft_state_free(zfsdev_state, minor);
542 		dmu_objset_disown(os, FTAG);
543 		mutex_exit(&zfsdev_state_lock);
544 		return (SET_ERROR(EAGAIN));
545 	}
546 
547 	zs = ddi_get_soft_state(zfsdev_state, minor);
548 	zs->zss_type = ZSST_ZVOL;
549 	zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
550 	(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
551 	zv->zv_min_bs = DEV_BSHIFT;
552 	zv->zv_minor = minor;
553 	zv->zv_objset = os;
554 	if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
555 		zv->zv_flags |= ZVOL_RDONLY;
556 	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
557 	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
558 	    sizeof (rl_t), offsetof(rl_t, r_node));
559 	list_create(&zv->zv_extents, sizeof (zvol_extent_t),
560 	    offsetof(zvol_extent_t, ze_node));
561 	/* get and cache the blocksize */
562 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
563 	ASSERT(error == 0);
564 	zv->zv_volblocksize = doi.doi_data_block_size;
565 
566 	if (spa_writeable(dmu_objset_spa(os))) {
567 		if (zil_replay_disable)
568 			zil_destroy(dmu_objset_zil(os), B_FALSE);
569 		else
570 			zil_replay(os, zv, zvol_replay_vector);
571 	}
572 	dmu_objset_disown(os, FTAG);
573 	zv->zv_objset = NULL;
574 
575 	zvol_minors++;
576 
577 	mutex_exit(&zfsdev_state_lock);
578 
579 	return (0);
580 }
581 
582 /*
583  * Remove minor node for the specified volume.
584  */
585 static int
586 zvol_remove_zv(zvol_state_t *zv)
587 {
588 	char nmbuf[20];
589 	minor_t minor = zv->zv_minor;
590 
591 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
592 	if (zv->zv_total_opens != 0)
593 		return (SET_ERROR(EBUSY));
594 
595 	(void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
596 	ddi_remove_minor_node(zfs_dip, nmbuf);
597 
598 	(void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor);
599 	ddi_remove_minor_node(zfs_dip, nmbuf);
600 
601 	avl_destroy(&zv->zv_znode.z_range_avl);
602 	mutex_destroy(&zv->zv_znode.z_range_lock);
603 
604 	kmem_free(zv, sizeof (zvol_state_t));
605 
606 	ddi_soft_state_free(zfsdev_state, minor);
607 
608 	zvol_minors--;
609 	return (0);
610 }
611 
612 int
613 zvol_remove_minor(const char *name)
614 {
615 	zvol_state_t *zv;
616 	int rc;
617 
618 	mutex_enter(&zfsdev_state_lock);
619 	if ((zv = zvol_minor_lookup(name)) == NULL) {
620 		mutex_exit(&zfsdev_state_lock);
621 		return (SET_ERROR(ENXIO));
622 	}
623 	rc = zvol_remove_zv(zv);
624 	mutex_exit(&zfsdev_state_lock);
625 	return (rc);
626 }
627 
628 int
629 zvol_first_open(zvol_state_t *zv)
630 {
631 	objset_t *os;
632 	uint64_t volsize;
633 	int error;
634 	uint64_t readonly;
635 
636 	/* lie and say we're read-only */
637 	error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
638 	    zvol_tag, &os);
639 	if (error)
640 		return (error);
641 
642 	zv->zv_objset = os;
643 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
644 	if (error) {
645 		ASSERT(error == 0);
646 		dmu_objset_disown(os, zvol_tag);
647 		return (error);
648 	}
649 
650 	error = dnode_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dn);
651 	if (error) {
652 		dmu_objset_disown(os, zvol_tag);
653 		return (error);
654 	}
655 
656 	zvol_size_changed(zv, volsize);
657 	zv->zv_zilog = zil_open(os, zvol_get_data);
658 
659 	VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
660 	    NULL) == 0);
661 	if (readonly || dmu_objset_is_snapshot(os) ||
662 	    !spa_writeable(dmu_objset_spa(os)))
663 		zv->zv_flags |= ZVOL_RDONLY;
664 	else
665 		zv->zv_flags &= ~ZVOL_RDONLY;
666 	return (error);
667 }
668 
669 void
670 zvol_last_close(zvol_state_t *zv)
671 {
672 	zil_close(zv->zv_zilog);
673 	zv->zv_zilog = NULL;
674 
675 	dnode_rele(zv->zv_dn, zvol_tag);
676 	zv->zv_dn = NULL;
677 
678 	/*
679 	 * Evict cached data
680 	 */
681 	if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
682 	    !(zv->zv_flags & ZVOL_RDONLY))
683 		txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
684 	dmu_objset_evict_dbufs(zv->zv_objset);
685 
686 	dmu_objset_disown(zv->zv_objset, zvol_tag);
687 	zv->zv_objset = NULL;
688 }
689 
690 int
691 zvol_prealloc(zvol_state_t *zv)
692 {
693 	objset_t *os = zv->zv_objset;
694 	dmu_tx_t *tx;
695 	uint64_t refd, avail, usedobjs, availobjs;
696 	uint64_t resid = zv->zv_volsize;
697 	uint64_t off = 0;
698 
699 	/* Check the space usage before attempting to allocate the space */
700 	dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
701 	if (avail < zv->zv_volsize)
702 		return (SET_ERROR(ENOSPC));
703 
704 	/* Free old extents if they exist */
705 	zvol_free_extents(zv);
706 
707 	while (resid != 0) {
708 		int error;
709 		uint64_t bytes = MIN(resid, SPA_OLD_MAXBLOCKSIZE);
710 
711 		tx = dmu_tx_create(os);
712 		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
713 		error = dmu_tx_assign(tx, TXG_WAIT);
714 		if (error) {
715 			dmu_tx_abort(tx);
716 			(void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
717 			return (error);
718 		}
719 		dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
720 		dmu_tx_commit(tx);
721 		off += bytes;
722 		resid -= bytes;
723 	}
724 	txg_wait_synced(dmu_objset_pool(os), 0);
725 
726 	return (0);
727 }
728 
729 static int
730 zvol_update_volsize(objset_t *os, uint64_t volsize)
731 {
732 	dmu_tx_t *tx;
733 	int error;
734 
735 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
736 
737 	tx = dmu_tx_create(os);
738 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
739 	dmu_tx_mark_netfree(tx);
740 	error = dmu_tx_assign(tx, TXG_WAIT);
741 	if (error) {
742 		dmu_tx_abort(tx);
743 		return (error);
744 	}
745 
746 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
747 	    &volsize, tx);
748 	dmu_tx_commit(tx);
749 
750 	if (error == 0)
751 		error = dmu_free_long_range(os,
752 		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
753 	return (error);
754 }
755 
756 void
757 zvol_remove_minors(const char *name)
758 {
759 	zvol_state_t *zv;
760 	char *namebuf;
761 	minor_t minor;
762 
763 	namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP);
764 	(void) strncpy(namebuf, name, strlen(name));
765 	(void) strcat(namebuf, "/");
766 	mutex_enter(&zfsdev_state_lock);
767 	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
768 
769 		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
770 		if (zv == NULL)
771 			continue;
772 		if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0)
773 			(void) zvol_remove_zv(zv);
774 	}
775 	kmem_free(namebuf, strlen(name) + 2);
776 
777 	mutex_exit(&zfsdev_state_lock);
778 }
779 
780 static int
781 zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
782 {
783 	uint64_t old_volsize = 0ULL;
784 	int error = 0;
785 
786 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
787 
788 	/*
789 	 * Reinitialize the dump area to the new size. If we
790 	 * failed to resize the dump area then restore it back to
791 	 * its original size.  We must set the new volsize prior
792 	 * to calling dumpvp_resize() to ensure that the devices'
793 	 * size(9P) is not visible by the dump subsystem.
794 	 */
795 	old_volsize = zv->zv_volsize;
796 	zvol_size_changed(zv, volsize);
797 
798 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
799 		if ((error = zvol_dumpify(zv)) != 0 ||
800 		    (error = dumpvp_resize()) != 0) {
801 			int dumpify_error;
802 
803 			(void) zvol_update_volsize(zv->zv_objset, old_volsize);
804 			zvol_size_changed(zv, old_volsize);
805 			dumpify_error = zvol_dumpify(zv);
806 			error = dumpify_error ? dumpify_error : error;
807 		}
808 	}
809 
810 	/*
811 	 * Generate a LUN expansion event.
812 	 */
813 	if (error == 0) {
814 		sysevent_id_t eid;
815 		nvlist_t *attr;
816 		char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
817 
818 		(void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
819 		    zv->zv_minor);
820 
821 		VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
822 		VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
823 
824 		(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
825 		    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
826 
827 		nvlist_free(attr);
828 		kmem_free(physpath, MAXPATHLEN);
829 	}
830 	return (error);
831 }
832 
833 int
834 zvol_set_volsize(const char *name, uint64_t volsize)
835 {
836 	zvol_state_t *zv = NULL;
837 	objset_t *os;
838 	int error;
839 	dmu_object_info_t doi;
840 	uint64_t readonly;
841 	boolean_t owned = B_FALSE;
842 
843 	error = dsl_prop_get_integer(name,
844 	    zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
845 	if (error != 0)
846 		return (error);
847 	if (readonly)
848 		return (SET_ERROR(EROFS));
849 
850 	mutex_enter(&zfsdev_state_lock);
851 	zv = zvol_minor_lookup(name);
852 
853 	if (zv == NULL || zv->zv_objset == NULL) {
854 		if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE,
855 		    FTAG, &os)) != 0) {
856 			mutex_exit(&zfsdev_state_lock);
857 			return (error);
858 		}
859 		owned = B_TRUE;
860 		if (zv != NULL)
861 			zv->zv_objset = os;
862 	} else {
863 		os = zv->zv_objset;
864 	}
865 
866 	if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
867 	    (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0)
868 		goto out;
869 
870 	error = zvol_update_volsize(os, volsize);
871 
872 	if (error == 0 && zv != NULL)
873 		error = zvol_update_live_volsize(zv, volsize);
874 out:
875 	if (owned) {
876 		dmu_objset_disown(os, FTAG);
877 		if (zv != NULL)
878 			zv->zv_objset = NULL;
879 	}
880 	mutex_exit(&zfsdev_state_lock);
881 	return (error);
882 }
883 
884 /*ARGSUSED*/
885 int
886 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
887 {
888 	zvol_state_t *zv;
889 	int err = 0;
890 
891 	mutex_enter(&zfsdev_state_lock);
892 
893 	zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
894 	if (zv == NULL) {
895 		mutex_exit(&zfsdev_state_lock);
896 		return (SET_ERROR(ENXIO));
897 	}
898 
899 	if (zv->zv_total_opens == 0)
900 		err = zvol_first_open(zv);
901 	if (err) {
902 		mutex_exit(&zfsdev_state_lock);
903 		return (err);
904 	}
905 	if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
906 		err = SET_ERROR(EROFS);
907 		goto out;
908 	}
909 	if (zv->zv_flags & ZVOL_EXCL) {
910 		err = SET_ERROR(EBUSY);
911 		goto out;
912 	}
913 	if (flag & FEXCL) {
914 		if (zv->zv_total_opens != 0) {
915 			err = SET_ERROR(EBUSY);
916 			goto out;
917 		}
918 		zv->zv_flags |= ZVOL_EXCL;
919 	}
920 
921 	if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
922 		zv->zv_open_count[otyp]++;
923 		zv->zv_total_opens++;
924 	}
925 	mutex_exit(&zfsdev_state_lock);
926 
927 	return (err);
928 out:
929 	if (zv->zv_total_opens == 0)
930 		zvol_last_close(zv);
931 	mutex_exit(&zfsdev_state_lock);
932 	return (err);
933 }
934 
935 /*ARGSUSED*/
936 int
937 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
938 {
939 	minor_t minor = getminor(dev);
940 	zvol_state_t *zv;
941 	int error = 0;
942 
943 	mutex_enter(&zfsdev_state_lock);
944 
945 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
946 	if (zv == NULL) {
947 		mutex_exit(&zfsdev_state_lock);
948 		return (SET_ERROR(ENXIO));
949 	}
950 
951 	if (zv->zv_flags & ZVOL_EXCL) {
952 		ASSERT(zv->zv_total_opens == 1);
953 		zv->zv_flags &= ~ZVOL_EXCL;
954 	}
955 
956 	/*
957 	 * If the open count is zero, this is a spurious close.
958 	 * That indicates a bug in the kernel / DDI framework.
959 	 */
960 	ASSERT(zv->zv_open_count[otyp] != 0);
961 	ASSERT(zv->zv_total_opens != 0);
962 
963 	/*
964 	 * You may get multiple opens, but only one close.
965 	 */
966 	zv->zv_open_count[otyp]--;
967 	zv->zv_total_opens--;
968 
969 	if (zv->zv_total_opens == 0)
970 		zvol_last_close(zv);
971 
972 	mutex_exit(&zfsdev_state_lock);
973 	return (error);
974 }
975 
976 /* ARGSUSED */
977 static void
978 zvol_get_done(zgd_t *zgd, int error)
979 {
980 	if (zgd->zgd_db)
981 		dmu_buf_rele(zgd->zgd_db, zgd);
982 
983 	zfs_range_unlock(zgd->zgd_rl);
984 
985 	kmem_free(zgd, sizeof (zgd_t));
986 }
987 
988 /*
989  * Get data to generate a TX_WRITE intent log record.
990  */
991 static int
992 zvol_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
993 {
994 	zvol_state_t *zv = arg;
995 	uint64_t offset = lr->lr_offset;
996 	uint64_t size = lr->lr_length;	/* length of user data */
997 	dmu_buf_t *db;
998 	zgd_t *zgd;
999 	int error;
1000 
1001 	ASSERT3P(lwb, !=, NULL);
1002 	ASSERT3P(zio, !=, NULL);
1003 	ASSERT3U(size, !=, 0);
1004 
1005 	zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1006 	zgd->zgd_lwb = lwb;
1007 
1008 	/*
1009 	 * Write records come in two flavors: immediate and indirect.
1010 	 * For small writes it's cheaper to store the data with the
1011 	 * log record (immediate); for large writes it's cheaper to
1012 	 * sync the data and get a pointer to it (indirect) so that
1013 	 * we don't have to write the data twice.
1014 	 */
1015 	if (buf != NULL) { /* immediate write */
1016 		zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size,
1017 		    RL_READER);
1018 		error = dmu_read_by_dnode(zv->zv_dn, offset, size, buf,
1019 		    DMU_READ_NO_PREFETCH);
1020 	} else { /* indirect write */
1021 		/*
1022 		 * Have to lock the whole block to ensure when it's written out
1023 		 * and its checksum is being calculated that no one can change
1024 		 * the data. Contrarily to zfs_get_data we need not re-check
1025 		 * blocksize after we get the lock because it cannot be changed.
1026 		 */
1027 		size = zv->zv_volblocksize;
1028 		offset = P2ALIGN(offset, size);
1029 		zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size,
1030 		    RL_READER);
1031 		error = dmu_buf_hold_by_dnode(zv->zv_dn, offset, zgd, &db,
1032 		    DMU_READ_NO_PREFETCH);
1033 		if (error == 0) {
1034 			blkptr_t *bp = &lr->lr_blkptr;
1035 
1036 			zgd->zgd_db = db;
1037 			zgd->zgd_bp = bp;
1038 
1039 			ASSERT(db->db_offset == offset);
1040 			ASSERT(db->db_size == size);
1041 
1042 			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1043 			    zvol_get_done, zgd);
1044 
1045 			if (error == 0)
1046 				return (0);
1047 		}
1048 	}
1049 
1050 	zvol_get_done(zgd, error);
1051 
1052 	return (error);
1053 }
1054 
1055 /*
1056  * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1057  *
1058  * We store data in the log buffers if it's small enough.
1059  * Otherwise we will later flush the data out via dmu_sync().
1060  */
1061 ssize_t zvol_immediate_write_sz = 32768;
1062 
1063 static void
1064 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1065     boolean_t sync)
1066 {
1067 	uint32_t blocksize = zv->zv_volblocksize;
1068 	zilog_t *zilog = zv->zv_zilog;
1069 	itx_wr_state_t write_state;
1070 
1071 	if (zil_replaying(zilog, tx))
1072 		return;
1073 
1074 	if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1075 		write_state = WR_INDIRECT;
1076 	else if (!spa_has_slogs(zilog->zl_spa) &&
1077 	    resid >= blocksize && blocksize > zvol_immediate_write_sz)
1078 		write_state = WR_INDIRECT;
1079 	else if (sync)
1080 		write_state = WR_COPIED;
1081 	else
1082 		write_state = WR_NEED_COPY;
1083 
1084 	while (resid) {
1085 		itx_t *itx;
1086 		lr_write_t *lr;
1087 		itx_wr_state_t wr_state = write_state;
1088 		ssize_t len = resid;
1089 
1090 		if (wr_state == WR_COPIED && resid > ZIL_MAX_COPIED_DATA)
1091 			wr_state = WR_NEED_COPY;
1092 		else if (wr_state == WR_INDIRECT)
1093 			len = MIN(blocksize - P2PHASE(off, blocksize), resid);
1094 
1095 		itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1096 		    (wr_state == WR_COPIED ? len : 0));
1097 		lr = (lr_write_t *)&itx->itx_lr;
1098 		if (wr_state == WR_COPIED && dmu_read_by_dnode(zv->zv_dn,
1099 		    off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1100 			zil_itx_destroy(itx);
1101 			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1102 			lr = (lr_write_t *)&itx->itx_lr;
1103 			wr_state = WR_NEED_COPY;
1104 		}
1105 
1106 		itx->itx_wr_state = wr_state;
1107 		lr->lr_foid = ZVOL_OBJ;
1108 		lr->lr_offset = off;
1109 		lr->lr_length = len;
1110 		lr->lr_blkoff = 0;
1111 		BP_ZERO(&lr->lr_blkptr);
1112 
1113 		itx->itx_private = zv;
1114 		itx->itx_sync = sync;
1115 
1116 		zil_itx_assign(zilog, itx, tx);
1117 
1118 		off += len;
1119 		resid -= len;
1120 	}
1121 }
1122 
1123 static int
1124 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1125     uint64_t size, boolean_t doread, boolean_t isdump)
1126 {
1127 	vdev_disk_t *dvd;
1128 	int c;
1129 	int numerrors = 0;
1130 
1131 	if (vd->vdev_ops == &vdev_mirror_ops ||
1132 	    vd->vdev_ops == &vdev_replacing_ops ||
1133 	    vd->vdev_ops == &vdev_spare_ops) {
1134 		for (c = 0; c < vd->vdev_children; c++) {
1135 			int err = zvol_dumpio_vdev(vd->vdev_child[c],
1136 			    addr, offset, origoffset, size, doread, isdump);
1137 			if (err != 0) {
1138 				numerrors++;
1139 			} else if (doread) {
1140 				break;
1141 			}
1142 		}
1143 	}
1144 
1145 	if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1146 		return (numerrors < vd->vdev_children ? 0 : EIO);
1147 
1148 	if (doread && !vdev_readable(vd))
1149 		return (SET_ERROR(EIO));
1150 	else if (!doread && !vdev_writeable(vd))
1151 		return (SET_ERROR(EIO));
1152 
1153 	if (vd->vdev_ops == &vdev_raidz_ops) {
1154 		return (vdev_raidz_physio(vd,
1155 		    addr, size, offset, origoffset, doread, isdump));
1156 	}
1157 
1158 	offset += VDEV_LABEL_START_SIZE;
1159 
1160 	if (ddi_in_panic() || isdump) {
1161 		ASSERT(!doread);
1162 		if (doread)
1163 			return (SET_ERROR(EIO));
1164 		dvd = vd->vdev_tsd;
1165 		ASSERT3P(dvd, !=, NULL);
1166 		return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1167 		    lbtodb(size)));
1168 	} else {
1169 		dvd = vd->vdev_tsd;
1170 		ASSERT3P(dvd, !=, NULL);
1171 		return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1172 		    offset, doread ? B_READ : B_WRITE));
1173 	}
1174 }
1175 
1176 static int
1177 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1178     boolean_t doread, boolean_t isdump)
1179 {
1180 	vdev_t *vd;
1181 	int error;
1182 	zvol_extent_t *ze;
1183 	spa_t *spa = dmu_objset_spa(zv->zv_objset);
1184 
1185 	/* Must be sector aligned, and not stradle a block boundary. */
1186 	if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1187 	    P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1188 		return (SET_ERROR(EINVAL));
1189 	}
1190 	ASSERT(size <= zv->zv_volblocksize);
1191 
1192 	/* Locate the extent this belongs to */
1193 	ze = list_head(&zv->zv_extents);
1194 	while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1195 		offset -= ze->ze_nblks * zv->zv_volblocksize;
1196 		ze = list_next(&zv->zv_extents, ze);
1197 	}
1198 
1199 	if (ze == NULL)
1200 		return (SET_ERROR(EINVAL));
1201 
1202 	if (!ddi_in_panic())
1203 		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1204 
1205 	vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1206 	offset += DVA_GET_OFFSET(&ze->ze_dva);
1207 	error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1208 	    size, doread, isdump);
1209 
1210 	if (!ddi_in_panic())
1211 		spa_config_exit(spa, SCL_STATE, FTAG);
1212 
1213 	return (error);
1214 }
1215 
1216 int
1217 zvol_strategy(buf_t *bp)
1218 {
1219 	zfs_soft_state_t *zs = NULL;
1220 	zvol_state_t *zv;
1221 	uint64_t off, volsize;
1222 	size_t resid;
1223 	char *addr;
1224 	objset_t *os;
1225 	rl_t *rl;
1226 	int error = 0;
1227 	boolean_t doread = bp->b_flags & B_READ;
1228 	boolean_t is_dumpified;
1229 	boolean_t sync;
1230 
1231 	if (getminor(bp->b_edev) == 0) {
1232 		error = SET_ERROR(EINVAL);
1233 	} else {
1234 		zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1235 		if (zs == NULL)
1236 			error = SET_ERROR(ENXIO);
1237 		else if (zs->zss_type != ZSST_ZVOL)
1238 			error = SET_ERROR(EINVAL);
1239 	}
1240 
1241 	if (error) {
1242 		bioerror(bp, error);
1243 		biodone(bp);
1244 		return (0);
1245 	}
1246 
1247 	zv = zs->zss_data;
1248 
1249 	if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1250 		bioerror(bp, EROFS);
1251 		biodone(bp);
1252 		return (0);
1253 	}
1254 
1255 	off = ldbtob(bp->b_blkno);
1256 	volsize = zv->zv_volsize;
1257 
1258 	os = zv->zv_objset;
1259 	ASSERT(os != NULL);
1260 
1261 	bp_mapin(bp);
1262 	addr = bp->b_un.b_addr;
1263 	resid = bp->b_bcount;
1264 
1265 	if (resid > 0 && (off < 0 || off >= volsize)) {
1266 		bioerror(bp, EIO);
1267 		biodone(bp);
1268 		return (0);
1269 	}
1270 
1271 	is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
1272 	sync = ((!(bp->b_flags & B_ASYNC) &&
1273 	    !(zv->zv_flags & ZVOL_WCE)) ||
1274 	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1275 	    !doread && !is_dumpified;
1276 
1277 	/*
1278 	 * There must be no buffer changes when doing a dmu_sync() because
1279 	 * we can't change the data whilst calculating the checksum.
1280 	 */
1281 	rl = zfs_range_lock(&zv->zv_znode, off, resid,
1282 	    doread ? RL_READER : RL_WRITER);
1283 
1284 	while (resid != 0 && off < volsize) {
1285 		size_t size = MIN(resid, zvol_maxphys);
1286 		if (is_dumpified) {
1287 			size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1288 			error = zvol_dumpio(zv, addr, off, size,
1289 			    doread, B_FALSE);
1290 		} else if (doread) {
1291 			error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1292 			    DMU_READ_PREFETCH);
1293 		} else {
1294 			dmu_tx_t *tx = dmu_tx_create(os);
1295 			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1296 			error = dmu_tx_assign(tx, TXG_WAIT);
1297 			if (error) {
1298 				dmu_tx_abort(tx);
1299 			} else {
1300 				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1301 				zvol_log_write(zv, tx, off, size, sync);
1302 				dmu_tx_commit(tx);
1303 			}
1304 		}
1305 		if (error) {
1306 			/* convert checksum errors into IO errors */
1307 			if (error == ECKSUM)
1308 				error = SET_ERROR(EIO);
1309 			break;
1310 		}
1311 		off += size;
1312 		addr += size;
1313 		resid -= size;
1314 	}
1315 	zfs_range_unlock(rl);
1316 
1317 	if ((bp->b_resid = resid) == bp->b_bcount)
1318 		bioerror(bp, off > volsize ? EINVAL : error);
1319 
1320 	if (sync)
1321 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1322 	biodone(bp);
1323 
1324 	return (0);
1325 }
1326 
1327 /*
1328  * Set the buffer count to the zvol maximum transfer.
1329  * Using our own routine instead of the default minphys()
1330  * means that for larger writes we write bigger buffers on X86
1331  * (128K instead of 56K) and flush the disk write cache less often
1332  * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1333  * 56K on X86 and 128K on sparc).
1334  */
1335 void
1336 zvol_minphys(struct buf *bp)
1337 {
1338 	if (bp->b_bcount > zvol_maxphys)
1339 		bp->b_bcount = zvol_maxphys;
1340 }
1341 
1342 int
1343 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1344 {
1345 	minor_t minor = getminor(dev);
1346 	zvol_state_t *zv;
1347 	int error = 0;
1348 	uint64_t size;
1349 	uint64_t boff;
1350 	uint64_t resid;
1351 
1352 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1353 	if (zv == NULL)
1354 		return (SET_ERROR(ENXIO));
1355 
1356 	if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1357 		return (SET_ERROR(EINVAL));
1358 
1359 	boff = ldbtob(blkno);
1360 	resid = ldbtob(nblocks);
1361 
1362 	VERIFY3U(boff + resid, <=, zv->zv_volsize);
1363 
1364 	while (resid) {
1365 		size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1366 		error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1367 		if (error)
1368 			break;
1369 		boff += size;
1370 		addr += size;
1371 		resid -= size;
1372 	}
1373 
1374 	return (error);
1375 }
1376 
1377 /*ARGSUSED*/
1378 int
1379 zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1380 {
1381 	minor_t minor = getminor(dev);
1382 	zvol_state_t *zv;
1383 	uint64_t volsize;
1384 	rl_t *rl;
1385 	int error = 0;
1386 
1387 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1388 	if (zv == NULL)
1389 		return (SET_ERROR(ENXIO));
1390 
1391 	volsize = zv->zv_volsize;
1392 	if (uio->uio_resid > 0 &&
1393 	    (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1394 		return (SET_ERROR(EIO));
1395 
1396 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1397 		error = physio(zvol_strategy, NULL, dev, B_READ,
1398 		    zvol_minphys, uio);
1399 		return (error);
1400 	}
1401 
1402 	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1403 	    RL_READER);
1404 	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1405 		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1406 
1407 		/* don't read past the end */
1408 		if (bytes > volsize - uio->uio_loffset)
1409 			bytes = volsize - uio->uio_loffset;
1410 
1411 		error =  dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1412 		if (error) {
1413 			/* convert checksum errors into IO errors */
1414 			if (error == ECKSUM)
1415 				error = SET_ERROR(EIO);
1416 			break;
1417 		}
1418 	}
1419 	zfs_range_unlock(rl);
1420 	return (error);
1421 }
1422 
1423 /*ARGSUSED*/
1424 int
1425 zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1426 {
1427 	minor_t minor = getminor(dev);
1428 	zvol_state_t *zv;
1429 	uint64_t volsize;
1430 	rl_t *rl;
1431 	int error = 0;
1432 	boolean_t sync;
1433 
1434 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1435 	if (zv == NULL)
1436 		return (SET_ERROR(ENXIO));
1437 
1438 	volsize = zv->zv_volsize;
1439 	if (uio->uio_resid > 0 &&
1440 	    (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1441 		return (SET_ERROR(EIO));
1442 
1443 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1444 		error = physio(zvol_strategy, NULL, dev, B_WRITE,
1445 		    zvol_minphys, uio);
1446 		return (error);
1447 	}
1448 
1449 	sync = !(zv->zv_flags & ZVOL_WCE) ||
1450 	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1451 
1452 	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1453 	    RL_WRITER);
1454 	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1455 		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1456 		uint64_t off = uio->uio_loffset;
1457 		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1458 
1459 		if (bytes > volsize - off)	/* don't write past the end */
1460 			bytes = volsize - off;
1461 
1462 		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1463 		error = dmu_tx_assign(tx, TXG_WAIT);
1464 		if (error) {
1465 			dmu_tx_abort(tx);
1466 			break;
1467 		}
1468 		error = dmu_write_uio_dnode(zv->zv_dn, uio, bytes, tx);
1469 		if (error == 0)
1470 			zvol_log_write(zv, tx, off, bytes, sync);
1471 		dmu_tx_commit(tx);
1472 
1473 		if (error)
1474 			break;
1475 	}
1476 	zfs_range_unlock(rl);
1477 	if (sync)
1478 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1479 	return (error);
1480 }
1481 
1482 int
1483 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1484 {
1485 	struct uuid uuid = EFI_RESERVED;
1486 	efi_gpe_t gpe = { 0 };
1487 	uint32_t crc;
1488 	dk_efi_t efi;
1489 	int length;
1490 	char *ptr;
1491 
1492 	if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1493 		return (SET_ERROR(EFAULT));
1494 	ptr = (char *)(uintptr_t)efi.dki_data_64;
1495 	length = efi.dki_length;
1496 	/*
1497 	 * Some clients may attempt to request a PMBR for the
1498 	 * zvol.  Currently this interface will return EINVAL to
1499 	 * such requests.  These requests could be supported by
1500 	 * adding a check for lba == 0 and consing up an appropriate
1501 	 * PMBR.
1502 	 */
1503 	if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1504 		return (SET_ERROR(EINVAL));
1505 
1506 	gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1507 	gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1508 	UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1509 
1510 	if (efi.dki_lba == 1) {
1511 		efi_gpt_t gpt = { 0 };
1512 
1513 		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1514 		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1515 		gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1516 		gpt.efi_gpt_MyLBA = LE_64(1ULL);
1517 		gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1518 		gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1519 		gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1520 		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1521 		gpt.efi_gpt_SizeOfPartitionEntry =
1522 		    LE_32(sizeof (efi_gpe_t));
1523 		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1524 		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1525 		CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1526 		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1527 		if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1528 		    flag))
1529 			return (SET_ERROR(EFAULT));
1530 		ptr += sizeof (gpt);
1531 		length -= sizeof (gpt);
1532 	}
1533 	if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1534 	    length), flag))
1535 		return (SET_ERROR(EFAULT));
1536 	return (0);
1537 }
1538 
1539 /*
1540  * BEGIN entry points to allow external callers access to the volume.
1541  */
1542 /*
1543  * Return the volume parameters needed for access from an external caller.
1544  * These values are invariant as long as the volume is held open.
1545  */
1546 int
1547 zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1548     uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1549     void **rl_hdl, void **dnode_hdl)
1550 {
1551 	zvol_state_t *zv;
1552 
1553 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1554 	if (zv == NULL)
1555 		return (SET_ERROR(ENXIO));
1556 	if (zv->zv_flags & ZVOL_DUMPIFIED)
1557 		return (SET_ERROR(ENXIO));
1558 
1559 	ASSERT(blksize && max_xfer_len && minor_hdl &&
1560 	    objset_hdl && zil_hdl && rl_hdl && dnode_hdl);
1561 
1562 	*blksize = zv->zv_volblocksize;
1563 	*max_xfer_len = (uint64_t)zvol_maxphys;
1564 	*minor_hdl = zv;
1565 	*objset_hdl = zv->zv_objset;
1566 	*zil_hdl = zv->zv_zilog;
1567 	*rl_hdl = &zv->zv_znode;
1568 	*dnode_hdl = zv->zv_dn;
1569 	return (0);
1570 }
1571 
1572 /*
1573  * Return the current volume size to an external caller.
1574  * The size can change while the volume is open.
1575  */
1576 uint64_t
1577 zvol_get_volume_size(void *minor_hdl)
1578 {
1579 	zvol_state_t *zv = minor_hdl;
1580 
1581 	return (zv->zv_volsize);
1582 }
1583 
1584 /*
1585  * Return the current WCE setting to an external caller.
1586  * The WCE setting can change while the volume is open.
1587  */
1588 int
1589 zvol_get_volume_wce(void *minor_hdl)
1590 {
1591 	zvol_state_t *zv = minor_hdl;
1592 
1593 	return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
1594 }
1595 
1596 /*
1597  * Entry point for external callers to zvol_log_write
1598  */
1599 void
1600 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
1601     boolean_t sync)
1602 {
1603 	zvol_state_t *zv = minor_hdl;
1604 
1605 	zvol_log_write(zv, tx, off, resid, sync);
1606 }
1607 /*
1608  * END entry points to allow external callers access to the volume.
1609  */
1610 
1611 /*
1612  * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
1613  */
1614 static void
1615 zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
1616     boolean_t sync)
1617 {
1618 	itx_t *itx;
1619 	lr_truncate_t *lr;
1620 	zilog_t *zilog = zv->zv_zilog;
1621 
1622 	if (zil_replaying(zilog, tx))
1623 		return;
1624 
1625 	itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1626 	lr = (lr_truncate_t *)&itx->itx_lr;
1627 	lr->lr_foid = ZVOL_OBJ;
1628 	lr->lr_offset = off;
1629 	lr->lr_length = len;
1630 
1631 	itx->itx_sync = sync;
1632 	zil_itx_assign(zilog, itx, tx);
1633 }
1634 
1635 /*
1636  * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
1637  * Also a dirtbag dkio ioctl for unmap/free-block functionality.
1638  */
1639 /*ARGSUSED*/
1640 int
1641 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1642 {
1643 	zvol_state_t *zv;
1644 	struct dk_callback *dkc;
1645 	int error = 0;
1646 	rl_t *rl;
1647 
1648 	mutex_enter(&zfsdev_state_lock);
1649 
1650 	zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1651 
1652 	if (zv == NULL) {
1653 		mutex_exit(&zfsdev_state_lock);
1654 		return (SET_ERROR(ENXIO));
1655 	}
1656 	ASSERT(zv->zv_total_opens > 0);
1657 
1658 	switch (cmd) {
1659 
1660 	case DKIOCINFO:
1661 	{
1662 		struct dk_cinfo dki;
1663 
1664 		bzero(&dki, sizeof (dki));
1665 		(void) strcpy(dki.dki_cname, "zvol");
1666 		(void) strcpy(dki.dki_dname, "zvol");
1667 		dki.dki_ctype = DKC_UNKNOWN;
1668 		dki.dki_unit = getminor(dev);
1669 		dki.dki_maxtransfer =
1670 		    1 << (SPA_OLD_MAXBLOCKSHIFT - zv->zv_min_bs);
1671 		mutex_exit(&zfsdev_state_lock);
1672 		if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1673 			error = SET_ERROR(EFAULT);
1674 		return (error);
1675 	}
1676 
1677 	case DKIOCGMEDIAINFO:
1678 	{
1679 		struct dk_minfo dkm;
1680 
1681 		bzero(&dkm, sizeof (dkm));
1682 		dkm.dki_lbsize = 1U << zv->zv_min_bs;
1683 		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1684 		dkm.dki_media_type = DK_UNKNOWN;
1685 		mutex_exit(&zfsdev_state_lock);
1686 		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1687 			error = SET_ERROR(EFAULT);
1688 		return (error);
1689 	}
1690 
1691 	case DKIOCGMEDIAINFOEXT:
1692 	{
1693 		struct dk_minfo_ext dkmext;
1694 
1695 		bzero(&dkmext, sizeof (dkmext));
1696 		dkmext.dki_lbsize = 1U << zv->zv_min_bs;
1697 		dkmext.dki_pbsize = zv->zv_volblocksize;
1698 		dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1699 		dkmext.dki_media_type = DK_UNKNOWN;
1700 		mutex_exit(&zfsdev_state_lock);
1701 		if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
1702 			error = SET_ERROR(EFAULT);
1703 		return (error);
1704 	}
1705 
1706 	case DKIOCGETEFI:
1707 	{
1708 		uint64_t vs = zv->zv_volsize;
1709 		uint8_t bs = zv->zv_min_bs;
1710 
1711 		mutex_exit(&zfsdev_state_lock);
1712 		error = zvol_getefi((void *)arg, flag, vs, bs);
1713 		return (error);
1714 	}
1715 
1716 	case DKIOCFLUSHWRITECACHE:
1717 		dkc = (struct dk_callback *)arg;
1718 		mutex_exit(&zfsdev_state_lock);
1719 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1720 		if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1721 			(*dkc->dkc_callback)(dkc->dkc_cookie, error);
1722 			error = 0;
1723 		}
1724 		return (error);
1725 
1726 	case DKIOCGETWCE:
1727 	{
1728 		int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1729 		if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1730 		    flag))
1731 			error = SET_ERROR(EFAULT);
1732 		break;
1733 	}
1734 	case DKIOCSETWCE:
1735 	{
1736 		int wce;
1737 		if (ddi_copyin((void *)arg, &wce, sizeof (int),
1738 		    flag)) {
1739 			error = SET_ERROR(EFAULT);
1740 			break;
1741 		}
1742 		if (wce) {
1743 			zv->zv_flags |= ZVOL_WCE;
1744 			mutex_exit(&zfsdev_state_lock);
1745 		} else {
1746 			zv->zv_flags &= ~ZVOL_WCE;
1747 			mutex_exit(&zfsdev_state_lock);
1748 			zil_commit(zv->zv_zilog, ZVOL_OBJ);
1749 		}
1750 		return (0);
1751 	}
1752 
1753 	case DKIOCGGEOM:
1754 	case DKIOCGVTOC:
1755 		/*
1756 		 * commands using these (like prtvtoc) expect ENOTSUP
1757 		 * since we're emulating an EFI label
1758 		 */
1759 		error = SET_ERROR(ENOTSUP);
1760 		break;
1761 
1762 	case DKIOCDUMPINIT:
1763 		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1764 		    RL_WRITER);
1765 		error = zvol_dumpify(zv);
1766 		zfs_range_unlock(rl);
1767 		break;
1768 
1769 	case DKIOCDUMPFINI:
1770 		if (!(zv->zv_flags & ZVOL_DUMPIFIED))
1771 			break;
1772 		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1773 		    RL_WRITER);
1774 		error = zvol_dump_fini(zv);
1775 		zfs_range_unlock(rl);
1776 		break;
1777 
1778 	case DKIOCFREE:
1779 	{
1780 		dkioc_free_list_t *dfl;
1781 		dmu_tx_t *tx;
1782 
1783 		if (!zvol_unmap_enabled)
1784 			break;
1785 
1786 		if (!(flag & FKIOCTL)) {
1787 			error = dfl_copyin((void *)arg, &dfl, flag, KM_SLEEP);
1788 			if (error != 0)
1789 				break;
1790 		} else {
1791 			dfl = (dkioc_free_list_t *)arg;
1792 			ASSERT3U(dfl->dfl_num_exts, <=, DFL_COPYIN_MAX_EXTS);
1793 			if (dfl->dfl_num_exts > DFL_COPYIN_MAX_EXTS) {
1794 				error = SET_ERROR(EINVAL);
1795 				break;
1796 			}
1797 		}
1798 
1799 		mutex_exit(&zfsdev_state_lock);
1800 
1801 		for (int i = 0; i < dfl->dfl_num_exts; i++) {
1802 			uint64_t start = dfl->dfl_exts[i].dfle_start,
1803 			    length = dfl->dfl_exts[i].dfle_length,
1804 			    end = start + length;
1805 
1806 			/*
1807 			 * Apply Postel's Law to length-checking.  If they
1808 			 * overshoot, just blank out until the end, if there's
1809 			 * a need to blank out anything.
1810 			 */
1811 			if (start >= zv->zv_volsize)
1812 				continue;	/* No need to do anything... */
1813 			if (end > zv->zv_volsize) {
1814 				end = DMU_OBJECT_END;
1815 				length = end - start;
1816 			}
1817 
1818 			rl = zfs_range_lock(&zv->zv_znode, start, length,
1819 			    RL_WRITER);
1820 			tx = dmu_tx_create(zv->zv_objset);
1821 			error = dmu_tx_assign(tx, TXG_WAIT);
1822 			if (error != 0) {
1823 				dmu_tx_abort(tx);
1824 			} else {
1825 				zvol_log_truncate(zv, tx, start, length,
1826 				    B_TRUE);
1827 				dmu_tx_commit(tx);
1828 				error = dmu_free_long_range(zv->zv_objset,
1829 				    ZVOL_OBJ, start, length);
1830 			}
1831 
1832 			zfs_range_unlock(rl);
1833 
1834 			if (error != 0)
1835 				break;
1836 		}
1837 
1838 		/*
1839 		 * If the write-cache is disabled, 'sync' property
1840 		 * is set to 'always', or if the caller is asking for
1841 		 * a synchronous free, commit this operation to the zil.
1842 		 * This will sync any previous uncommitted writes to the
1843 		 * zvol object.
1844 		 * Can be overridden by the zvol_unmap_sync_enabled tunable.
1845 		 */
1846 		if ((error == 0) && zvol_unmap_sync_enabled &&
1847 		    (!(zv->zv_flags & ZVOL_WCE) ||
1848 		    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS) ||
1849 		    (dfl->dfl_flags & DF_WAIT_SYNC))) {
1850 			zil_commit(zv->zv_zilog, ZVOL_OBJ);
1851 		}
1852 
1853 		if (!(flag & FKIOCTL))
1854 			dfl_free(dfl);
1855 
1856 		return (error);
1857 	}
1858 
1859 	default:
1860 		error = SET_ERROR(ENOTTY);
1861 		break;
1862 
1863 	}
1864 	mutex_exit(&zfsdev_state_lock);
1865 	return (error);
1866 }
1867 
1868 int
1869 zvol_busy(void)
1870 {
1871 	return (zvol_minors != 0);
1872 }
1873 
1874 void
1875 zvol_init(void)
1876 {
1877 	VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
1878 	    1) == 0);
1879 	mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
1880 }
1881 
1882 void
1883 zvol_fini(void)
1884 {
1885 	mutex_destroy(&zfsdev_state_lock);
1886 	ddi_soft_state_fini(&zfsdev_state);
1887 }
1888 
1889 /*ARGSUSED*/
1890 static int
1891 zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
1892 {
1893 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1894 
1895 	if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1896 		return (1);
1897 	return (0);
1898 }
1899 
1900 /*ARGSUSED*/
1901 static void
1902 zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
1903 {
1904 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1905 
1906 	spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
1907 }
1908 
1909 static int
1910 zvol_dump_init(zvol_state_t *zv, boolean_t resize)
1911 {
1912 	dmu_tx_t *tx;
1913 	int error;
1914 	objset_t *os = zv->zv_objset;
1915 	spa_t *spa = dmu_objset_spa(os);
1916 	vdev_t *vd = spa->spa_root_vdev;
1917 	nvlist_t *nv = NULL;
1918 	uint64_t version = spa_version(spa);
1919 	uint64_t checksum, compress, refresrv, vbs, dedup;
1920 
1921 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
1922 	ASSERT(vd->vdev_ops == &vdev_root_ops);
1923 
1924 	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
1925 	    DMU_OBJECT_END);
1926 	if (error != 0)
1927 		return (error);
1928 	/* wait for dmu_free_long_range to actually free the blocks */
1929 	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1930 
1931 	/*
1932 	 * If the pool on which the dump device is being initialized has more
1933 	 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
1934 	 * enabled.  If so, bump that feature's counter to indicate that the
1935 	 * feature is active. We also check the vdev type to handle the
1936 	 * following case:
1937 	 *   # zpool create test raidz disk1 disk2 disk3
1938 	 *   Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
1939 	 *   the raidz vdev itself has 3 children.
1940 	 */
1941 	if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
1942 		if (!spa_feature_is_enabled(spa,
1943 		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1944 			return (SET_ERROR(ENOTSUP));
1945 		(void) dsl_sync_task(spa_name(spa),
1946 		    zfs_mvdev_dump_feature_check,
1947 		    zfs_mvdev_dump_activate_feature_sync, NULL,
1948 		    2, ZFS_SPACE_CHECK_RESERVED);
1949 	}
1950 
1951 	if (!resize) {
1952 		error = dsl_prop_get_integer(zv->zv_name,
1953 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
1954 		if (error == 0) {
1955 			error = dsl_prop_get_integer(zv->zv_name,
1956 			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum,
1957 			    NULL);
1958 		}
1959 		if (error == 0) {
1960 			error = dsl_prop_get_integer(zv->zv_name,
1961 			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
1962 			    &refresrv, NULL);
1963 		}
1964 		if (error == 0) {
1965 			error = dsl_prop_get_integer(zv->zv_name,
1966 			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs,
1967 			    NULL);
1968 		}
1969 		if (version >= SPA_VERSION_DEDUP && error == 0) {
1970 			error = dsl_prop_get_integer(zv->zv_name,
1971 			    zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
1972 		}
1973 	}
1974 	if (error != 0)
1975 		return (error);
1976 
1977 	tx = dmu_tx_create(os);
1978 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1979 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1980 	error = dmu_tx_assign(tx, TXG_WAIT);
1981 	if (error != 0) {
1982 		dmu_tx_abort(tx);
1983 		return (error);
1984 	}
1985 
1986 	/*
1987 	 * If we are resizing the dump device then we only need to
1988 	 * update the refreservation to match the newly updated
1989 	 * zvolsize. Otherwise, we save off the original state of the
1990 	 * zvol so that we can restore them if the zvol is ever undumpified.
1991 	 */
1992 	if (resize) {
1993 		error = zap_update(os, ZVOL_ZAP_OBJ,
1994 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1995 		    &zv->zv_volsize, tx);
1996 	} else {
1997 		error = zap_update(os, ZVOL_ZAP_OBJ,
1998 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
1999 		    &compress, tx);
2000 		if (error == 0) {
2001 			error = zap_update(os, ZVOL_ZAP_OBJ,
2002 			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1,
2003 			    &checksum, tx);
2004 		}
2005 		if (error == 0) {
2006 			error = zap_update(os, ZVOL_ZAP_OBJ,
2007 			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2008 			    &refresrv, tx);
2009 		}
2010 		if (error == 0) {
2011 			error = zap_update(os, ZVOL_ZAP_OBJ,
2012 			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
2013 			    &vbs, tx);
2014 		}
2015 		if (error == 0) {
2016 			error = dmu_object_set_blocksize(
2017 			    os, ZVOL_OBJ, SPA_OLD_MAXBLOCKSIZE, 0, tx);
2018 		}
2019 		if (version >= SPA_VERSION_DEDUP && error == 0) {
2020 			error = zap_update(os, ZVOL_ZAP_OBJ,
2021 			    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
2022 			    &dedup, tx);
2023 		}
2024 		if (error == 0)
2025 			zv->zv_volblocksize = SPA_OLD_MAXBLOCKSIZE;
2026 	}
2027 	dmu_tx_commit(tx);
2028 
2029 	/*
2030 	 * We only need update the zvol's property if we are initializing
2031 	 * the dump area for the first time.
2032 	 */
2033 	if (error == 0 && !resize) {
2034 		/*
2035 		 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
2036 		 * function.  Otherwise, use the old default -- OFF.
2037 		 */
2038 		checksum = spa_feature_is_active(spa,
2039 		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
2040 		    ZIO_CHECKSUM_OFF;
2041 
2042 		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2043 		VERIFY(nvlist_add_uint64(nv,
2044 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2045 		VERIFY(nvlist_add_uint64(nv,
2046 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2047 		    ZIO_COMPRESS_OFF) == 0);
2048 		VERIFY(nvlist_add_uint64(nv,
2049 		    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2050 		    checksum) == 0);
2051 		if (version >= SPA_VERSION_DEDUP) {
2052 			VERIFY(nvlist_add_uint64(nv,
2053 			    zfs_prop_to_name(ZFS_PROP_DEDUP),
2054 			    ZIO_CHECKSUM_OFF) == 0);
2055 		}
2056 
2057 		error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2058 		    nv, NULL);
2059 		nvlist_free(nv);
2060 	}
2061 
2062 	/* Allocate the space for the dump */
2063 	if (error == 0)
2064 		error = zvol_prealloc(zv);
2065 	return (error);
2066 }
2067 
2068 static int
2069 zvol_dumpify(zvol_state_t *zv)
2070 {
2071 	int error = 0;
2072 	uint64_t dumpsize = 0;
2073 	dmu_tx_t *tx;
2074 	objset_t *os = zv->zv_objset;
2075 
2076 	if (zv->zv_flags & ZVOL_RDONLY)
2077 		return (SET_ERROR(EROFS));
2078 
2079 	if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2080 	    8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
2081 		boolean_t resize = (dumpsize > 0);
2082 
2083 		if ((error = zvol_dump_init(zv, resize)) != 0) {
2084 			(void) zvol_dump_fini(zv);
2085 			return (error);
2086 		}
2087 	}
2088 
2089 	/*
2090 	 * Build up our lba mapping.
2091 	 */
2092 	error = zvol_get_lbas(zv);
2093 	if (error) {
2094 		(void) zvol_dump_fini(zv);
2095 		return (error);
2096 	}
2097 
2098 	tx = dmu_tx_create(os);
2099 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2100 	error = dmu_tx_assign(tx, TXG_WAIT);
2101 	if (error) {
2102 		dmu_tx_abort(tx);
2103 		(void) zvol_dump_fini(zv);
2104 		return (error);
2105 	}
2106 
2107 	zv->zv_flags |= ZVOL_DUMPIFIED;
2108 	error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2109 	    &zv->zv_volsize, tx);
2110 	dmu_tx_commit(tx);
2111 
2112 	if (error) {
2113 		(void) zvol_dump_fini(zv);
2114 		return (error);
2115 	}
2116 
2117 	txg_wait_synced(dmu_objset_pool(os), 0);
2118 	return (0);
2119 }
2120 
2121 static int
2122 zvol_dump_fini(zvol_state_t *zv)
2123 {
2124 	dmu_tx_t *tx;
2125 	objset_t *os = zv->zv_objset;
2126 	nvlist_t *nv;
2127 	int error = 0;
2128 	uint64_t checksum, compress, refresrv, vbs, dedup;
2129 	uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2130 
2131 	/*
2132 	 * Attempt to restore the zvol back to its pre-dumpified state.
2133 	 * This is a best-effort attempt as it's possible that not all
2134 	 * of these properties were initialized during the dumpify process
2135 	 * (i.e. error during zvol_dump_init).
2136 	 */
2137 
2138 	tx = dmu_tx_create(os);
2139 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2140 	error = dmu_tx_assign(tx, TXG_WAIT);
2141 	if (error) {
2142 		dmu_tx_abort(tx);
2143 		return (error);
2144 	}
2145 	(void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2146 	dmu_tx_commit(tx);
2147 
2148 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2149 	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2150 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2151 	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2152 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2153 	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
2154 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2155 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2156 
2157 	VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2158 	(void) nvlist_add_uint64(nv,
2159 	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2160 	(void) nvlist_add_uint64(nv,
2161 	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2162 	(void) nvlist_add_uint64(nv,
2163 	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
2164 	if (version >= SPA_VERSION_DEDUP &&
2165 	    zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2166 	    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
2167 		(void) nvlist_add_uint64(nv,
2168 		    zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
2169 	}
2170 	(void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2171 	    nv, NULL);
2172 	nvlist_free(nv);
2173 
2174 	zvol_free_extents(zv);
2175 	zv->zv_flags &= ~ZVOL_DUMPIFIED;
2176 	(void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2177 	/* wait for dmu_free_long_range to actually free the blocks */
2178 	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2179 	tx = dmu_tx_create(os);
2180 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2181 	error = dmu_tx_assign(tx, TXG_WAIT);
2182 	if (error) {
2183 		dmu_tx_abort(tx);
2184 		return (error);
2185 	}
2186 	if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2187 		zv->zv_volblocksize = vbs;
2188 	dmu_tx_commit(tx);
2189 
2190 	return (0);
2191 }
2192