1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
23  */
24 
25 #include <sys/dataset_kstats.h>
26 #include <sys/dbuf.h>
27 #include <sys/dmu_traverse.h>
28 #include <sys/dsl_dataset.h>
29 #include <sys/dsl_prop.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/zap.h>
32 #include <sys/zfeature.h>
33 #include <sys/zil_impl.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/zio.h>
36 #include <sys/zfs_rlock.h>
37 #include <sys/spa_impl.h>
38 #include <sys/zvol.h>
39 #include <sys/zvol_impl.h>
40 
41 #include <linux/blkdev_compat.h>
42 #include <linux/task_io_accounting_ops.h>
43 
44 static unsigned int zvol_major = ZVOL_MAJOR;
45 static unsigned int zvol_request_sync = 0;
46 static unsigned int zvol_prefetch_bytes = (128 * 1024);
47 static unsigned long zvol_max_discard_blocks = 16384;
48 static unsigned int zvol_threads = 32;
49 static const unsigned int zvol_open_timeout_ms = 1000;
50 
51 struct zvol_state_os {
52 	struct gendisk		*zvo_disk;	/* generic disk */
53 	struct request_queue	*zvo_queue;	/* request queue */
54 	dev_t			zvo_dev;	/* device id */
55 };
56 
57 taskq_t *zvol_taskq;
58 static struct ida zvol_ida;
59 
60 typedef struct zv_request_stack {
61 	zvol_state_t	*zv;
62 	struct bio	*bio;
63 } zv_request_t;
64 
65 typedef struct zv_request_task {
66 	zv_request_t zvr;
67 	taskq_ent_t	ent;
68 } zv_request_task_t;
69 
70 static zv_request_task_t *
71 zv_request_task_create(zv_request_t zvr)
72 {
73 	zv_request_task_t *task;
74 	task = kmem_alloc(sizeof (zv_request_task_t), KM_SLEEP);
75 	taskq_init_ent(&task->ent);
76 	task->zvr = zvr;
77 	return (task);
78 }
79 
80 static void
81 zv_request_task_free(zv_request_task_t *task)
82 {
83 	kmem_free(task, sizeof (*task));
84 }
85 
86 /*
87  * Given a path, return TRUE if path is a ZVOL.
88  */
89 boolean_t
90 zvol_os_is_zvol(const char *path)
91 {
92 	dev_t dev = 0;
93 
94 	if (vdev_lookup_bdev(path, &dev) != 0)
95 		return (B_FALSE);
96 
97 	if (MAJOR(dev) == zvol_major)
98 		return (B_TRUE);
99 
100 	return (B_FALSE);
101 }
102 
103 static void
104 zvol_write(zv_request_t *zvr)
105 {
106 	struct bio *bio = zvr->bio;
107 	int error = 0;
108 	zfs_uio_t uio;
109 
110 	zfs_uio_bvec_init(&uio, bio);
111 
112 	zvol_state_t *zv = zvr->zv;
113 	ASSERT3P(zv, !=, NULL);
114 	ASSERT3U(zv->zv_open_count, >, 0);
115 	ASSERT3P(zv->zv_zilog, !=, NULL);
116 
117 	/* bio marked as FLUSH need to flush before write */
118 	if (bio_is_flush(bio))
119 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
120 
121 	/* Some requests are just for flush and nothing else. */
122 	if (uio.uio_resid == 0) {
123 		rw_exit(&zv->zv_suspend_lock);
124 		BIO_END_IO(bio, 0);
125 		return;
126 	}
127 
128 	struct request_queue *q = zv->zv_zso->zvo_queue;
129 	struct gendisk *disk = zv->zv_zso->zvo_disk;
130 	ssize_t start_resid = uio.uio_resid;
131 	unsigned long start_time;
132 
133 	boolean_t acct = blk_queue_io_stat(q);
134 	if (acct)
135 		start_time = blk_generic_start_io_acct(q, disk, WRITE, bio);
136 
137 	boolean_t sync =
138 	    bio_is_fua(bio) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
139 
140 	zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
141 	    uio.uio_loffset, uio.uio_resid, RL_WRITER);
142 
143 	uint64_t volsize = zv->zv_volsize;
144 	while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
145 		uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
146 		uint64_t off = uio.uio_loffset;
147 		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
148 
149 		if (bytes > volsize - off)	/* don't write past the end */
150 			bytes = volsize - off;
151 
152 		dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
153 
154 		/* This will only fail for ENOSPC */
155 		error = dmu_tx_assign(tx, TXG_WAIT);
156 		if (error) {
157 			dmu_tx_abort(tx);
158 			break;
159 		}
160 		error = dmu_write_uio_dnode(zv->zv_dn, &uio, bytes, tx);
161 		if (error == 0) {
162 			zvol_log_write(zv, tx, off, bytes, sync);
163 		}
164 		dmu_tx_commit(tx);
165 
166 		if (error)
167 			break;
168 	}
169 	zfs_rangelock_exit(lr);
170 
171 	int64_t nwritten = start_resid - uio.uio_resid;
172 	dataset_kstats_update_write_kstats(&zv->zv_kstat, nwritten);
173 	task_io_account_write(nwritten);
174 
175 	if (sync)
176 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
177 
178 	rw_exit(&zv->zv_suspend_lock);
179 
180 	if (acct)
181 		blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
182 
183 	BIO_END_IO(bio, -error);
184 }
185 
186 static void
187 zvol_write_task(void *arg)
188 {
189 	zv_request_task_t *task = arg;
190 	zvol_write(&task->zvr);
191 	zv_request_task_free(task);
192 }
193 
194 static void
195 zvol_discard(zv_request_t *zvr)
196 {
197 	struct bio *bio = zvr->bio;
198 	zvol_state_t *zv = zvr->zv;
199 	uint64_t start = BIO_BI_SECTOR(bio) << 9;
200 	uint64_t size = BIO_BI_SIZE(bio);
201 	uint64_t end = start + size;
202 	boolean_t sync;
203 	int error = 0;
204 	dmu_tx_t *tx;
205 
206 	ASSERT3P(zv, !=, NULL);
207 	ASSERT3U(zv->zv_open_count, >, 0);
208 	ASSERT3P(zv->zv_zilog, !=, NULL);
209 
210 	struct request_queue *q = zv->zv_zso->zvo_queue;
211 	struct gendisk *disk = zv->zv_zso->zvo_disk;
212 	unsigned long start_time;
213 
214 	boolean_t acct = blk_queue_io_stat(q);
215 	if (acct)
216 		start_time = blk_generic_start_io_acct(q, disk, WRITE, bio);
217 
218 	sync = bio_is_fua(bio) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
219 
220 	if (end > zv->zv_volsize) {
221 		error = SET_ERROR(EIO);
222 		goto unlock;
223 	}
224 
225 	/*
226 	 * Align the request to volume block boundaries when a secure erase is
227 	 * not required.  This will prevent dnode_free_range() from zeroing out
228 	 * the unaligned parts which is slow (read-modify-write) and useless
229 	 * since we are not freeing any space by doing so.
230 	 */
231 	if (!bio_is_secure_erase(bio)) {
232 		start = P2ROUNDUP(start, zv->zv_volblocksize);
233 		end = P2ALIGN(end, zv->zv_volblocksize);
234 		size = end - start;
235 	}
236 
237 	if (start >= end)
238 		goto unlock;
239 
240 	zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
241 	    start, size, RL_WRITER);
242 
243 	tx = dmu_tx_create(zv->zv_objset);
244 	dmu_tx_mark_netfree(tx);
245 	error = dmu_tx_assign(tx, TXG_WAIT);
246 	if (error != 0) {
247 		dmu_tx_abort(tx);
248 	} else {
249 		zvol_log_truncate(zv, tx, start, size, B_TRUE);
250 		dmu_tx_commit(tx);
251 		error = dmu_free_long_range(zv->zv_objset,
252 		    ZVOL_OBJ, start, size);
253 	}
254 	zfs_rangelock_exit(lr);
255 
256 	if (error == 0 && sync)
257 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
258 
259 unlock:
260 	rw_exit(&zv->zv_suspend_lock);
261 
262 	if (acct)
263 		blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
264 
265 	BIO_END_IO(bio, -error);
266 }
267 
268 static void
269 zvol_discard_task(void *arg)
270 {
271 	zv_request_task_t *task = arg;
272 	zvol_discard(&task->zvr);
273 	zv_request_task_free(task);
274 }
275 
276 static void
277 zvol_read(zv_request_t *zvr)
278 {
279 	struct bio *bio = zvr->bio;
280 	int error = 0;
281 	zfs_uio_t uio;
282 
283 	zfs_uio_bvec_init(&uio, bio);
284 
285 	zvol_state_t *zv = zvr->zv;
286 	ASSERT3P(zv, !=, NULL);
287 	ASSERT3U(zv->zv_open_count, >, 0);
288 
289 	struct request_queue *q = zv->zv_zso->zvo_queue;
290 	struct gendisk *disk = zv->zv_zso->zvo_disk;
291 	ssize_t start_resid = uio.uio_resid;
292 	unsigned long start_time;
293 
294 	boolean_t acct = blk_queue_io_stat(q);
295 	if (acct)
296 		start_time = blk_generic_start_io_acct(q, disk, READ, bio);
297 
298 	zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
299 	    uio.uio_loffset, uio.uio_resid, RL_READER);
300 
301 	uint64_t volsize = zv->zv_volsize;
302 	while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
303 		uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
304 
305 		/* don't read past the end */
306 		if (bytes > volsize - uio.uio_loffset)
307 			bytes = volsize - uio.uio_loffset;
308 
309 		error = dmu_read_uio_dnode(zv->zv_dn, &uio, bytes);
310 		if (error) {
311 			/* convert checksum errors into IO errors */
312 			if (error == ECKSUM)
313 				error = SET_ERROR(EIO);
314 			break;
315 		}
316 	}
317 	zfs_rangelock_exit(lr);
318 
319 	int64_t nread = start_resid - uio.uio_resid;
320 	dataset_kstats_update_read_kstats(&zv->zv_kstat, nread);
321 	task_io_account_read(nread);
322 
323 	rw_exit(&zv->zv_suspend_lock);
324 
325 	if (acct)
326 		blk_generic_end_io_acct(q, disk, READ, bio, start_time);
327 
328 	BIO_END_IO(bio, -error);
329 }
330 
331 static void
332 zvol_read_task(void *arg)
333 {
334 	zv_request_task_t *task = arg;
335 	zvol_read(&task->zvr);
336 	zv_request_task_free(task);
337 }
338 
339 #ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
340 #ifdef HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID
341 static void
342 zvol_submit_bio(struct bio *bio)
343 #else
344 static blk_qc_t
345 zvol_submit_bio(struct bio *bio)
346 #endif
347 #else
348 static MAKE_REQUEST_FN_RET
349 zvol_request(struct request_queue *q, struct bio *bio)
350 #endif
351 {
352 #ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
353 #if defined(HAVE_BIO_BDEV_DISK)
354 	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
355 #else
356 	struct request_queue *q = bio->bi_disk->queue;
357 #endif
358 #endif
359 	zvol_state_t *zv = q->queuedata;
360 	fstrans_cookie_t cookie = spl_fstrans_mark();
361 	uint64_t offset = BIO_BI_SECTOR(bio) << 9;
362 	uint64_t size = BIO_BI_SIZE(bio);
363 	int rw = bio_data_dir(bio);
364 
365 	if (bio_has_data(bio) && offset + size > zv->zv_volsize) {
366 		printk(KERN_INFO
367 		    "%s: bad access: offset=%llu, size=%lu\n",
368 		    zv->zv_zso->zvo_disk->disk_name,
369 		    (long long unsigned)offset,
370 		    (long unsigned)size);
371 
372 		BIO_END_IO(bio, -SET_ERROR(EIO));
373 		goto out;
374 	}
375 
376 	zv_request_t zvr = {
377 		.zv = zv,
378 		.bio = bio,
379 	};
380 	zv_request_task_t *task;
381 
382 	if (rw == WRITE) {
383 		if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
384 			BIO_END_IO(bio, -SET_ERROR(EROFS));
385 			goto out;
386 		}
387 
388 		/*
389 		 * Prevents the zvol from being suspended, or the ZIL being
390 		 * concurrently opened.  Will be released after the i/o
391 		 * completes.
392 		 */
393 		rw_enter(&zv->zv_suspend_lock, RW_READER);
394 
395 		/*
396 		 * Open a ZIL if this is the first time we have written to this
397 		 * zvol. We protect zv->zv_zilog with zv_suspend_lock rather
398 		 * than zv_state_lock so that we don't need to acquire an
399 		 * additional lock in this path.
400 		 */
401 		if (zv->zv_zilog == NULL) {
402 			rw_exit(&zv->zv_suspend_lock);
403 			rw_enter(&zv->zv_suspend_lock, RW_WRITER);
404 			if (zv->zv_zilog == NULL) {
405 				zv->zv_zilog = zil_open(zv->zv_objset,
406 				    zvol_get_data);
407 				zv->zv_flags |= ZVOL_WRITTEN_TO;
408 				/* replay / destroy done in zvol_create_minor */
409 				VERIFY0((zv->zv_zilog->zl_header->zh_flags &
410 				    ZIL_REPLAY_NEEDED));
411 			}
412 			rw_downgrade(&zv->zv_suspend_lock);
413 		}
414 
415 		/*
416 		 * We don't want this thread to be blocked waiting for i/o to
417 		 * complete, so we instead wait from a taskq callback. The
418 		 * i/o may be a ZIL write (via zil_commit()), or a read of an
419 		 * indirect block, or a read of a data block (if this is a
420 		 * partial-block write).  We will indicate that the i/o is
421 		 * complete by calling BIO_END_IO() from the taskq callback.
422 		 *
423 		 * This design allows the calling thread to continue and
424 		 * initiate more concurrent operations by calling
425 		 * zvol_request() again. There are typically only a small
426 		 * number of threads available to call zvol_request() (e.g.
427 		 * one per iSCSI target), so keeping the latency of
428 		 * zvol_request() low is important for performance.
429 		 *
430 		 * The zvol_request_sync module parameter allows this
431 		 * behavior to be altered, for performance evaluation
432 		 * purposes.  If the callback blocks, setting
433 		 * zvol_request_sync=1 will result in much worse performance.
434 		 *
435 		 * We can have up to zvol_threads concurrent i/o's being
436 		 * processed for all zvols on the system.  This is typically
437 		 * a vast improvement over the zvol_request_sync=1 behavior
438 		 * of one i/o at a time per zvol.  However, an even better
439 		 * design would be for zvol_request() to initiate the zio
440 		 * directly, and then be notified by the zio_done callback,
441 		 * which would call BIO_END_IO().  Unfortunately, the DMU/ZIL
442 		 * interfaces lack this functionality (they block waiting for
443 		 * the i/o to complete).
444 		 */
445 		if (bio_is_discard(bio) || bio_is_secure_erase(bio)) {
446 			if (zvol_request_sync) {
447 				zvol_discard(&zvr);
448 			} else {
449 				task = zv_request_task_create(zvr);
450 				taskq_dispatch_ent(zvol_taskq,
451 				    zvol_discard_task, task, 0, &task->ent);
452 			}
453 		} else {
454 			if (zvol_request_sync) {
455 				zvol_write(&zvr);
456 			} else {
457 				task = zv_request_task_create(zvr);
458 				taskq_dispatch_ent(zvol_taskq,
459 				    zvol_write_task, task, 0, &task->ent);
460 			}
461 		}
462 	} else {
463 		/*
464 		 * The SCST driver, and possibly others, may issue READ I/Os
465 		 * with a length of zero bytes.  These empty I/Os contain no
466 		 * data and require no additional handling.
467 		 */
468 		if (size == 0) {
469 			BIO_END_IO(bio, 0);
470 			goto out;
471 		}
472 
473 		rw_enter(&zv->zv_suspend_lock, RW_READER);
474 
475 		/* See comment in WRITE case above. */
476 		if (zvol_request_sync) {
477 			zvol_read(&zvr);
478 		} else {
479 			task = zv_request_task_create(zvr);
480 			taskq_dispatch_ent(zvol_taskq,
481 			    zvol_read_task, task, 0, &task->ent);
482 		}
483 	}
484 
485 out:
486 	spl_fstrans_unmark(cookie);
487 #if (defined(HAVE_MAKE_REQUEST_FN_RET_QC) || \
488 	defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS)) && \
489 	!defined(HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID)
490 	return (BLK_QC_T_NONE);
491 #endif
492 }
493 
494 static int
495 zvol_open(struct block_device *bdev, fmode_t flag)
496 {
497 	zvol_state_t *zv;
498 	int error = 0;
499 	boolean_t drop_suspend = B_FALSE;
500 #ifndef HAVE_BLKDEV_GET_ERESTARTSYS
501 	hrtime_t timeout = MSEC2NSEC(zvol_open_timeout_ms);
502 	hrtime_t start = gethrtime();
503 
504 retry:
505 #endif
506 	rw_enter(&zvol_state_lock, RW_READER);
507 	/*
508 	 * Obtain a copy of private_data under the zvol_state_lock to make
509 	 * sure that either the result of zvol free code path setting
510 	 * bdev->bd_disk->private_data to NULL is observed, or zvol_os_free()
511 	 * is not called on this zv because of the positive zv_open_count.
512 	 */
513 	zv = bdev->bd_disk->private_data;
514 	if (zv == NULL) {
515 		rw_exit(&zvol_state_lock);
516 		return (SET_ERROR(-ENXIO));
517 	}
518 
519 	mutex_enter(&zv->zv_state_lock);
520 	/*
521 	 * Make sure zvol is not suspended during first open
522 	 * (hold zv_suspend_lock) and respect proper lock acquisition
523 	 * ordering - zv_suspend_lock before zv_state_lock
524 	 */
525 	if (zv->zv_open_count == 0) {
526 		if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
527 			mutex_exit(&zv->zv_state_lock);
528 			rw_enter(&zv->zv_suspend_lock, RW_READER);
529 			mutex_enter(&zv->zv_state_lock);
530 			/* check to see if zv_suspend_lock is needed */
531 			if (zv->zv_open_count != 0) {
532 				rw_exit(&zv->zv_suspend_lock);
533 			} else {
534 				drop_suspend = B_TRUE;
535 			}
536 		} else {
537 			drop_suspend = B_TRUE;
538 		}
539 	}
540 	rw_exit(&zvol_state_lock);
541 
542 	ASSERT(MUTEX_HELD(&zv->zv_state_lock));
543 
544 	if (zv->zv_open_count == 0) {
545 		boolean_t drop_namespace = B_FALSE;
546 
547 		ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
548 
549 		/*
550 		 * In all other call paths the spa_namespace_lock is taken
551 		 * before the bdev->bd_mutex lock.  However, on open(2)
552 		 * the __blkdev_get() function calls fops->open() with the
553 		 * bdev->bd_mutex lock held.  This can result in a deadlock
554 		 * when zvols from one pool are used as vdevs in another.
555 		 *
556 		 * To prevent a lock inversion deadlock we preemptively
557 		 * take the spa_namespace_lock.  Normally the lock will not
558 		 * be contended and this is safe because spa_open_common()
559 		 * handles the case where the caller already holds the
560 		 * spa_namespace_lock.
561 		 *
562 		 * When the lock cannot be aquired after multiple retries
563 		 * this must be the vdev on zvol deadlock case and we have
564 		 * no choice but to return an error.  For 5.12 and older
565 		 * kernels returning -ERESTARTSYS will result in the
566 		 * bdev->bd_mutex being dropped, then reacquired, and
567 		 * fops->open() being called again.  This process can be
568 		 * repeated safely until both locks are acquired.  For 5.13
569 		 * and newer the -ERESTARTSYS retry logic was removed from
570 		 * the kernel so the only option is to return the error for
571 		 * the caller to handle it.
572 		 */
573 		if (!mutex_owned(&spa_namespace_lock)) {
574 			if (!mutex_tryenter(&spa_namespace_lock)) {
575 				mutex_exit(&zv->zv_state_lock);
576 				rw_exit(&zv->zv_suspend_lock);
577 
578 #ifdef HAVE_BLKDEV_GET_ERESTARTSYS
579 				schedule();
580 				return (SET_ERROR(-ERESTARTSYS));
581 #else
582 				if ((gethrtime() - start) > timeout)
583 					return (SET_ERROR(-ERESTARTSYS));
584 
585 				schedule_timeout(MSEC_TO_TICK(10));
586 				goto retry;
587 #endif
588 			} else {
589 				drop_namespace = B_TRUE;
590 			}
591 		}
592 
593 		error = -zvol_first_open(zv, !(flag & FMODE_WRITE));
594 
595 		if (drop_namespace)
596 			mutex_exit(&spa_namespace_lock);
597 	}
598 
599 	if (error == 0) {
600 		if ((flag & FMODE_WRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
601 			if (zv->zv_open_count == 0)
602 				zvol_last_close(zv);
603 
604 			error = SET_ERROR(-EROFS);
605 		} else {
606 			zv->zv_open_count++;
607 		}
608 	}
609 
610 	mutex_exit(&zv->zv_state_lock);
611 	if (drop_suspend)
612 		rw_exit(&zv->zv_suspend_lock);
613 
614 	if (error == 0)
615 		zfs_check_media_change(bdev);
616 
617 	return (error);
618 }
619 
620 static void
621 zvol_release(struct gendisk *disk, fmode_t mode)
622 {
623 	zvol_state_t *zv;
624 	boolean_t drop_suspend = B_TRUE;
625 
626 	rw_enter(&zvol_state_lock, RW_READER);
627 	zv = disk->private_data;
628 
629 	mutex_enter(&zv->zv_state_lock);
630 	ASSERT3U(zv->zv_open_count, >, 0);
631 	/*
632 	 * make sure zvol is not suspended during last close
633 	 * (hold zv_suspend_lock) and respect proper lock acquisition
634 	 * ordering - zv_suspend_lock before zv_state_lock
635 	 */
636 	if (zv->zv_open_count == 1) {
637 		if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
638 			mutex_exit(&zv->zv_state_lock);
639 			rw_enter(&zv->zv_suspend_lock, RW_READER);
640 			mutex_enter(&zv->zv_state_lock);
641 			/* check to see if zv_suspend_lock is needed */
642 			if (zv->zv_open_count != 1) {
643 				rw_exit(&zv->zv_suspend_lock);
644 				drop_suspend = B_FALSE;
645 			}
646 		}
647 	} else {
648 		drop_suspend = B_FALSE;
649 	}
650 	rw_exit(&zvol_state_lock);
651 
652 	ASSERT(MUTEX_HELD(&zv->zv_state_lock));
653 
654 	zv->zv_open_count--;
655 	if (zv->zv_open_count == 0) {
656 		ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
657 		zvol_last_close(zv);
658 	}
659 
660 	mutex_exit(&zv->zv_state_lock);
661 
662 	if (drop_suspend)
663 		rw_exit(&zv->zv_suspend_lock);
664 }
665 
666 static int
667 zvol_ioctl(struct block_device *bdev, fmode_t mode,
668     unsigned int cmd, unsigned long arg)
669 {
670 	zvol_state_t *zv = bdev->bd_disk->private_data;
671 	int error = 0;
672 
673 	ASSERT3U(zv->zv_open_count, >, 0);
674 
675 	switch (cmd) {
676 	case BLKFLSBUF:
677 		fsync_bdev(bdev);
678 		invalidate_bdev(bdev);
679 		rw_enter(&zv->zv_suspend_lock, RW_READER);
680 
681 		if (!(zv->zv_flags & ZVOL_RDONLY))
682 			txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
683 
684 		rw_exit(&zv->zv_suspend_lock);
685 		break;
686 
687 	case BLKZNAME:
688 		mutex_enter(&zv->zv_state_lock);
689 		error = copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
690 		mutex_exit(&zv->zv_state_lock);
691 		break;
692 
693 	default:
694 		error = -ENOTTY;
695 		break;
696 	}
697 
698 	return (SET_ERROR(error));
699 }
700 
701 #ifdef CONFIG_COMPAT
702 static int
703 zvol_compat_ioctl(struct block_device *bdev, fmode_t mode,
704     unsigned cmd, unsigned long arg)
705 {
706 	return (zvol_ioctl(bdev, mode, cmd, arg));
707 }
708 #else
709 #define	zvol_compat_ioctl	NULL
710 #endif
711 
712 static unsigned int
713 zvol_check_events(struct gendisk *disk, unsigned int clearing)
714 {
715 	unsigned int mask = 0;
716 
717 	rw_enter(&zvol_state_lock, RW_READER);
718 
719 	zvol_state_t *zv = disk->private_data;
720 	if (zv != NULL) {
721 		mutex_enter(&zv->zv_state_lock);
722 		mask = zv->zv_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
723 		zv->zv_changed = 0;
724 		mutex_exit(&zv->zv_state_lock);
725 	}
726 
727 	rw_exit(&zvol_state_lock);
728 
729 	return (mask);
730 }
731 
732 static int
733 zvol_revalidate_disk(struct gendisk *disk)
734 {
735 	rw_enter(&zvol_state_lock, RW_READER);
736 
737 	zvol_state_t *zv = disk->private_data;
738 	if (zv != NULL) {
739 		mutex_enter(&zv->zv_state_lock);
740 		set_capacity(zv->zv_zso->zvo_disk,
741 		    zv->zv_volsize >> SECTOR_BITS);
742 		mutex_exit(&zv->zv_state_lock);
743 	}
744 
745 	rw_exit(&zvol_state_lock);
746 
747 	return (0);
748 }
749 
750 int
751 zvol_os_update_volsize(zvol_state_t *zv, uint64_t volsize)
752 {
753 	struct gendisk *disk = zv->zv_zso->zvo_disk;
754 
755 #if defined(HAVE_REVALIDATE_DISK_SIZE)
756 	revalidate_disk_size(disk, zvol_revalidate_disk(disk) == 0);
757 #elif defined(HAVE_REVALIDATE_DISK)
758 	revalidate_disk(disk);
759 #else
760 	zvol_revalidate_disk(disk);
761 #endif
762 	return (0);
763 }
764 
765 void
766 zvol_os_clear_private(zvol_state_t *zv)
767 {
768 	/*
769 	 * Cleared while holding zvol_state_lock as a writer
770 	 * which will prevent zvol_open() from opening it.
771 	 */
772 	zv->zv_zso->zvo_disk->private_data = NULL;
773 }
774 
775 /*
776  * Provide a simple virtual geometry for legacy compatibility.  For devices
777  * smaller than 1 MiB a small head and sector count is used to allow very
778  * tiny devices.  For devices over 1 Mib a standard head and sector count
779  * is used to keep the cylinders count reasonable.
780  */
781 static int
782 zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
783 {
784 	zvol_state_t *zv = bdev->bd_disk->private_data;
785 	sector_t sectors;
786 
787 	ASSERT3U(zv->zv_open_count, >, 0);
788 
789 	sectors = get_capacity(zv->zv_zso->zvo_disk);
790 
791 	if (sectors > 2048) {
792 		geo->heads = 16;
793 		geo->sectors = 63;
794 	} else {
795 		geo->heads = 2;
796 		geo->sectors = 4;
797 	}
798 
799 	geo->start = 0;
800 	geo->cylinders = sectors / (geo->heads * geo->sectors);
801 
802 	return (0);
803 }
804 
805 static const struct block_device_operations zvol_ops = {
806 	.open			= zvol_open,
807 	.release		= zvol_release,
808 	.ioctl			= zvol_ioctl,
809 	.compat_ioctl		= zvol_compat_ioctl,
810 	.check_events		= zvol_check_events,
811 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
812 	.revalidate_disk	= zvol_revalidate_disk,
813 #endif
814 	.getgeo			= zvol_getgeo,
815 	.owner			= THIS_MODULE,
816 #ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
817 	.submit_bio		= zvol_submit_bio,
818 #endif
819 };
820 
821 /*
822  * Allocate memory for a new zvol_state_t and setup the required
823  * request queue and generic disk structures for the block device.
824  */
825 static zvol_state_t *
826 zvol_alloc(dev_t dev, const char *name)
827 {
828 	zvol_state_t *zv;
829 	struct zvol_state_os *zso;
830 	uint64_t volmode;
831 
832 	if (dsl_prop_get_integer(name, "volmode", &volmode, NULL) != 0)
833 		return (NULL);
834 
835 	if (volmode == ZFS_VOLMODE_DEFAULT)
836 		volmode = zvol_volmode;
837 
838 	if (volmode == ZFS_VOLMODE_NONE)
839 		return (NULL);
840 
841 	zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
842 	zso = kmem_zalloc(sizeof (struct zvol_state_os), KM_SLEEP);
843 	zv->zv_zso = zso;
844 	zv->zv_volmode = volmode;
845 
846 	list_link_init(&zv->zv_next);
847 	mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
848 
849 #ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
850 #ifdef HAVE_BLK_ALLOC_DISK
851 	zso->zvo_disk = blk_alloc_disk(NUMA_NO_NODE);
852 	if (zso->zvo_disk == NULL)
853 		goto out_kmem;
854 
855 	zso->zvo_disk->minors = ZVOL_MINORS;
856 	zso->zvo_queue = zso->zvo_disk->queue;
857 #else
858 	zso->zvo_queue = blk_alloc_queue(NUMA_NO_NODE);
859 	if (zso->zvo_queue == NULL)
860 		goto out_kmem;
861 
862 	zso->zvo_disk = alloc_disk(ZVOL_MINORS);
863 	if (zso->zvo_disk == NULL) {
864 		blk_cleanup_queue(zso->zvo_queue);
865 		goto out_kmem;
866 	}
867 
868 	zso->zvo_disk->queue = zso->zvo_queue;
869 #endif /* HAVE_BLK_ALLOC_DISK */
870 #else
871 	zso->zvo_queue = blk_generic_alloc_queue(zvol_request, NUMA_NO_NODE);
872 	if (zso->zvo_queue == NULL)
873 		goto out_kmem;
874 
875 	zso->zvo_disk = alloc_disk(ZVOL_MINORS);
876 	if (zso->zvo_disk == NULL) {
877 		blk_cleanup_queue(zso->zvo_queue);
878 		goto out_kmem;
879 	}
880 
881 	zso->zvo_disk->queue = zso->zvo_queue;
882 #endif /* HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
883 
884 	blk_queue_set_write_cache(zso->zvo_queue, B_TRUE, B_TRUE);
885 
886 	/* Limit read-ahead to a single page to prevent over-prefetching. */
887 	blk_queue_set_read_ahead(zso->zvo_queue, 1);
888 
889 	/* Disable write merging in favor of the ZIO pipeline. */
890 	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, zso->zvo_queue);
891 
892 	/* Enable /proc/diskstats */
893 	blk_queue_flag_set(QUEUE_FLAG_IO_STAT, zso->zvo_queue);
894 
895 	zso->zvo_queue->queuedata = zv;
896 	zso->zvo_dev = dev;
897 	zv->zv_open_count = 0;
898 	strlcpy(zv->zv_name, name, MAXNAMELEN);
899 
900 	zfs_rangelock_init(&zv->zv_rangelock, NULL, NULL);
901 	rw_init(&zv->zv_suspend_lock, NULL, RW_DEFAULT, NULL);
902 
903 	zso->zvo_disk->major = zvol_major;
904 	zso->zvo_disk->events = DISK_EVENT_MEDIA_CHANGE;
905 
906 	if (volmode == ZFS_VOLMODE_DEV) {
907 		/*
908 		 * ZFS_VOLMODE_DEV disable partitioning on ZVOL devices: set
909 		 * gendisk->minors = 1 as noted in include/linux/genhd.h.
910 		 * Also disable extended partition numbers (GENHD_FL_EXT_DEVT)
911 		 * and suppresses partition scanning (GENHD_FL_NO_PART_SCAN)
912 		 * setting gendisk->flags accordingly.
913 		 */
914 		zso->zvo_disk->minors = 1;
915 #if defined(GENHD_FL_EXT_DEVT)
916 		zso->zvo_disk->flags &= ~GENHD_FL_EXT_DEVT;
917 #endif
918 #if defined(GENHD_FL_NO_PART_SCAN)
919 		zso->zvo_disk->flags |= GENHD_FL_NO_PART_SCAN;
920 #endif
921 	}
922 	zso->zvo_disk->first_minor = (dev & MINORMASK);
923 	zso->zvo_disk->fops = &zvol_ops;
924 	zso->zvo_disk->private_data = zv;
925 	snprintf(zso->zvo_disk->disk_name, DISK_NAME_LEN, "%s%d",
926 	    ZVOL_DEV_NAME, (dev & MINORMASK));
927 
928 	return (zv);
929 
930 out_kmem:
931 	kmem_free(zso, sizeof (struct zvol_state_os));
932 	kmem_free(zv, sizeof (zvol_state_t));
933 	return (NULL);
934 }
935 
936 /*
937  * Cleanup then free a zvol_state_t which was created by zvol_alloc().
938  * At this time, the structure is not opened by anyone, is taken off
939  * the zvol_state_list, and has its private data set to NULL.
940  * The zvol_state_lock is dropped.
941  *
942  * This function may take many milliseconds to complete (e.g. we've seen
943  * it take over 256ms), due to the calls to "blk_cleanup_queue" and
944  * "del_gendisk". Thus, consumers need to be careful to account for this
945  * latency when calling this function.
946  */
947 void
948 zvol_os_free(zvol_state_t *zv)
949 {
950 
951 	ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
952 	ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
953 	ASSERT0(zv->zv_open_count);
954 	ASSERT3P(zv->zv_zso->zvo_disk->private_data, ==, NULL);
955 
956 	rw_destroy(&zv->zv_suspend_lock);
957 	zfs_rangelock_fini(&zv->zv_rangelock);
958 
959 	del_gendisk(zv->zv_zso->zvo_disk);
960 #if defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS) && \
961 	defined(HAVE_BLK_ALLOC_DISK)
962 	blk_cleanup_disk(zv->zv_zso->zvo_disk);
963 #else
964 	blk_cleanup_queue(zv->zv_zso->zvo_queue);
965 	put_disk(zv->zv_zso->zvo_disk);
966 #endif
967 
968 	ida_simple_remove(&zvol_ida,
969 	    MINOR(zv->zv_zso->zvo_dev) >> ZVOL_MINOR_BITS);
970 
971 	mutex_destroy(&zv->zv_state_lock);
972 	dataset_kstats_destroy(&zv->zv_kstat);
973 
974 	kmem_free(zv->zv_zso, sizeof (struct zvol_state_os));
975 	kmem_free(zv, sizeof (zvol_state_t));
976 }
977 
978 void
979 zvol_wait_close(zvol_state_t *zv)
980 {
981 }
982 
983 /*
984  * Create a block device minor node and setup the linkage between it
985  * and the specified volume.  Once this function returns the block
986  * device is live and ready for use.
987  */
988 int
989 zvol_os_create_minor(const char *name)
990 {
991 	zvol_state_t *zv;
992 	objset_t *os;
993 	dmu_object_info_t *doi;
994 	uint64_t volsize;
995 	uint64_t len;
996 	unsigned minor = 0;
997 	int error = 0;
998 	int idx;
999 	uint64_t hash = zvol_name_hash(name);
1000 
1001 	if (zvol_inhibit_dev)
1002 		return (0);
1003 
1004 	idx = ida_simple_get(&zvol_ida, 0, 0, kmem_flags_convert(KM_SLEEP));
1005 	if (idx < 0)
1006 		return (SET_ERROR(-idx));
1007 	minor = idx << ZVOL_MINOR_BITS;
1008 
1009 	zv = zvol_find_by_name_hash(name, hash, RW_NONE);
1010 	if (zv) {
1011 		ASSERT(MUTEX_HELD(&zv->zv_state_lock));
1012 		mutex_exit(&zv->zv_state_lock);
1013 		ida_simple_remove(&zvol_ida, idx);
1014 		return (SET_ERROR(EEXIST));
1015 	}
1016 
1017 	doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
1018 
1019 	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os);
1020 	if (error)
1021 		goto out_doi;
1022 
1023 	error = dmu_object_info(os, ZVOL_OBJ, doi);
1024 	if (error)
1025 		goto out_dmu_objset_disown;
1026 
1027 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
1028 	if (error)
1029 		goto out_dmu_objset_disown;
1030 
1031 	zv = zvol_alloc(MKDEV(zvol_major, minor), name);
1032 	if (zv == NULL) {
1033 		error = SET_ERROR(EAGAIN);
1034 		goto out_dmu_objset_disown;
1035 	}
1036 	zv->zv_hash = hash;
1037 
1038 	if (dmu_objset_is_snapshot(os))
1039 		zv->zv_flags |= ZVOL_RDONLY;
1040 
1041 	zv->zv_volblocksize = doi->doi_data_block_size;
1042 	zv->zv_volsize = volsize;
1043 	zv->zv_objset = os;
1044 
1045 	set_capacity(zv->zv_zso->zvo_disk, zv->zv_volsize >> 9);
1046 
1047 	blk_queue_max_hw_sectors(zv->zv_zso->zvo_queue,
1048 	    (DMU_MAX_ACCESS / 4) >> 9);
1049 	blk_queue_max_segments(zv->zv_zso->zvo_queue, UINT16_MAX);
1050 	blk_queue_max_segment_size(zv->zv_zso->zvo_queue, UINT_MAX);
1051 	blk_queue_physical_block_size(zv->zv_zso->zvo_queue,
1052 	    zv->zv_volblocksize);
1053 	blk_queue_io_opt(zv->zv_zso->zvo_queue, zv->zv_volblocksize);
1054 	blk_queue_max_discard_sectors(zv->zv_zso->zvo_queue,
1055 	    (zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
1056 	blk_queue_discard_granularity(zv->zv_zso->zvo_queue,
1057 	    zv->zv_volblocksize);
1058 	blk_queue_flag_set(QUEUE_FLAG_DISCARD, zv->zv_zso->zvo_queue);
1059 #ifdef QUEUE_FLAG_NONROT
1060 	blk_queue_flag_set(QUEUE_FLAG_NONROT, zv->zv_zso->zvo_queue);
1061 #endif
1062 #ifdef QUEUE_FLAG_ADD_RANDOM
1063 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zv->zv_zso->zvo_queue);
1064 #endif
1065 	/* This flag was introduced in kernel version 4.12. */
1066 #ifdef QUEUE_FLAG_SCSI_PASSTHROUGH
1067 	blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, zv->zv_zso->zvo_queue);
1068 #endif
1069 
1070 	ASSERT3P(zv->zv_zilog, ==, NULL);
1071 	zv->zv_zilog = zil_open(os, zvol_get_data);
1072 	if (spa_writeable(dmu_objset_spa(os))) {
1073 		if (zil_replay_disable)
1074 			zil_destroy(zv->zv_zilog, B_FALSE);
1075 		else
1076 			zil_replay(os, zv, zvol_replay_vector);
1077 	}
1078 	zil_close(zv->zv_zilog);
1079 	zv->zv_zilog = NULL;
1080 	ASSERT3P(zv->zv_kstat.dk_kstats, ==, NULL);
1081 	dataset_kstats_create(&zv->zv_kstat, zv->zv_objset);
1082 
1083 	/*
1084 	 * When udev detects the addition of the device it will immediately
1085 	 * invoke blkid(8) to determine the type of content on the device.
1086 	 * Prefetching the blocks commonly scanned by blkid(8) will speed
1087 	 * up this process.
1088 	 */
1089 	len = MIN(MAX(zvol_prefetch_bytes, 0), SPA_MAXBLOCKSIZE);
1090 	if (len > 0) {
1091 		dmu_prefetch(os, ZVOL_OBJ, 0, 0, len, ZIO_PRIORITY_SYNC_READ);
1092 		dmu_prefetch(os, ZVOL_OBJ, 0, volsize - len, len,
1093 		    ZIO_PRIORITY_SYNC_READ);
1094 	}
1095 
1096 	zv->zv_objset = NULL;
1097 out_dmu_objset_disown:
1098 	dmu_objset_disown(os, B_TRUE, FTAG);
1099 out_doi:
1100 	kmem_free(doi, sizeof (dmu_object_info_t));
1101 
1102 	/*
1103 	 * Keep in mind that once add_disk() is called, the zvol is
1104 	 * announced to the world, and zvol_open()/zvol_release() can
1105 	 * be called at any time. Incidentally, add_disk() itself calls
1106 	 * zvol_open()->zvol_first_open() and zvol_release()->zvol_last_close()
1107 	 * directly as well.
1108 	 */
1109 	if (error == 0) {
1110 		rw_enter(&zvol_state_lock, RW_WRITER);
1111 		zvol_insert(zv);
1112 		rw_exit(&zvol_state_lock);
1113 #ifdef HAVE_ADD_DISK_RET
1114 		error = add_disk(zv->zv_zso->zvo_disk);
1115 #else
1116 		add_disk(zv->zv_zso->zvo_disk);
1117 #endif
1118 	} else {
1119 		ida_simple_remove(&zvol_ida, idx);
1120 	}
1121 
1122 	return (error);
1123 }
1124 
1125 void
1126 zvol_os_rename_minor(zvol_state_t *zv, const char *newname)
1127 {
1128 	int readonly = get_disk_ro(zv->zv_zso->zvo_disk);
1129 
1130 	ASSERT(RW_LOCK_HELD(&zvol_state_lock));
1131 	ASSERT(MUTEX_HELD(&zv->zv_state_lock));
1132 
1133 	strlcpy(zv->zv_name, newname, sizeof (zv->zv_name));
1134 
1135 	/* move to new hashtable entry  */
1136 	zv->zv_hash = zvol_name_hash(zv->zv_name);
1137 	hlist_del(&zv->zv_hlink);
1138 	hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
1139 
1140 	/*
1141 	 * The block device's read-only state is briefly changed causing
1142 	 * a KOBJ_CHANGE uevent to be issued.  This ensures udev detects
1143 	 * the name change and fixes the symlinks.  This does not change
1144 	 * ZVOL_RDONLY in zv->zv_flags so the actual read-only state never
1145 	 * changes.  This would normally be done using kobject_uevent() but
1146 	 * that is a GPL-only symbol which is why we need this workaround.
1147 	 */
1148 	set_disk_ro(zv->zv_zso->zvo_disk, !readonly);
1149 	set_disk_ro(zv->zv_zso->zvo_disk, readonly);
1150 }
1151 
1152 void
1153 zvol_os_set_disk_ro(zvol_state_t *zv, int flags)
1154 {
1155 
1156 	set_disk_ro(zv->zv_zso->zvo_disk, flags);
1157 }
1158 
1159 void
1160 zvol_os_set_capacity(zvol_state_t *zv, uint64_t capacity)
1161 {
1162 
1163 	set_capacity(zv->zv_zso->zvo_disk, capacity);
1164 }
1165 
1166 int
1167 zvol_init(void)
1168 {
1169 	int error;
1170 	int threads = MIN(MAX(zvol_threads, 1), 1024);
1171 
1172 	error = register_blkdev(zvol_major, ZVOL_DRIVER);
1173 	if (error) {
1174 		printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
1175 		return (error);
1176 	}
1177 	zvol_taskq = taskq_create(ZVOL_DRIVER, threads, maxclsyspri,
1178 	    threads * 2, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
1179 	if (zvol_taskq == NULL) {
1180 		unregister_blkdev(zvol_major, ZVOL_DRIVER);
1181 		return (-ENOMEM);
1182 	}
1183 	zvol_init_impl();
1184 	ida_init(&zvol_ida);
1185 	return (0);
1186 }
1187 
1188 void
1189 zvol_fini(void)
1190 {
1191 	zvol_fini_impl();
1192 	unregister_blkdev(zvol_major, ZVOL_DRIVER);
1193 	taskq_destroy(zvol_taskq);
1194 	ida_destroy(&zvol_ida);
1195 }
1196 
1197 /* BEGIN CSTYLED */
1198 module_param(zvol_inhibit_dev, uint, 0644);
1199 MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
1200 
1201 module_param(zvol_major, uint, 0444);
1202 MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
1203 
1204 module_param(zvol_threads, uint, 0444);
1205 MODULE_PARM_DESC(zvol_threads, "Max number of threads to handle I/O requests");
1206 
1207 module_param(zvol_request_sync, uint, 0644);
1208 MODULE_PARM_DESC(zvol_request_sync, "Synchronously handle bio requests");
1209 
1210 module_param(zvol_max_discard_blocks, ulong, 0444);
1211 MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard");
1212 
1213 module_param(zvol_prefetch_bytes, uint, 0644);
1214 MODULE_PARM_DESC(zvol_prefetch_bytes, "Prefetch N bytes at zvol start+end");
1215 
1216 module_param(zvol_volmode, uint, 0644);
1217 MODULE_PARM_DESC(zvol_volmode, "Default volmode property value");
1218 /* END CSTYLED */
1219