1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (C) 2011 Lawrence Livermore National Security, LLC.
24  * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
25  * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
26  * LLNL-CODE-403049.
27  */
28 
29 #ifndef _ZFS_BLKDEV_H
30 #define	_ZFS_BLKDEV_H
31 
32 #include <linux/blkdev.h>
33 #include <linux/backing-dev.h>
34 #include <linux/hdreg.h>
35 #include <linux/major.h>
36 #include <linux/msdos_fs.h>	/* for SECTOR_* */
37 #include <linux/bio.h>
38 
39 #ifdef HAVE_BLK_MQ
40 #include <linux/blk-mq.h>
41 #endif
42 
43 #ifndef HAVE_BLK_QUEUE_FLAG_SET
44 static inline void
45 blk_queue_flag_set(unsigned int flag, struct request_queue *q)
46 {
47 	queue_flag_set(flag, q);
48 }
49 #endif
50 
51 #ifndef HAVE_BLK_QUEUE_FLAG_CLEAR
52 static inline void
53 blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
54 {
55 	queue_flag_clear(flag, q);
56 }
57 #endif
58 
59 /*
60  * 4.7 API,
61  * The blk_queue_write_cache() interface has replaced blk_queue_flush()
62  * interface.  However, the new interface is GPL-only thus we implement
63  * our own trivial wrapper when the GPL-only version is detected.
64  *
65  * 2.6.36 - 4.6 API,
66  * The blk_queue_flush() interface has replaced blk_queue_ordered()
67  * interface.  However, while the old interface was available to all the
68  * new one is GPL-only.   Thus if the GPL-only version is detected we
69  * implement our own trivial helper.
70  */
71 static inline void
72 blk_queue_set_write_cache(struct request_queue *q, bool wc, bool fua)
73 {
74 #if defined(HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY)
75 	if (wc)
76 		blk_queue_flag_set(QUEUE_FLAG_WC, q);
77 	else
78 		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
79 	if (fua)
80 		blk_queue_flag_set(QUEUE_FLAG_FUA, q);
81 	else
82 		blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
83 #elif defined(HAVE_BLK_QUEUE_WRITE_CACHE)
84 	blk_queue_write_cache(q, wc, fua);
85 #elif defined(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY)
86 	if (wc)
87 		q->flush_flags |= REQ_FLUSH;
88 	if (fua)
89 		q->flush_flags |= REQ_FUA;
90 #elif defined(HAVE_BLK_QUEUE_FLUSH)
91 	blk_queue_flush(q, (wc ? REQ_FLUSH : 0) | (fua ? REQ_FUA : 0));
92 #else
93 #error "Unsupported kernel"
94 #endif
95 }
96 
97 static inline void
98 blk_queue_set_read_ahead(struct request_queue *q, unsigned long ra_pages)
99 {
100 #if !defined(HAVE_BLK_QUEUE_UPDATE_READAHEAD) && \
101 	!defined(HAVE_DISK_UPDATE_READAHEAD)
102 #ifdef HAVE_BLK_QUEUE_BDI_DYNAMIC
103 	q->backing_dev_info->ra_pages = ra_pages;
104 #else
105 	q->backing_dev_info.ra_pages = ra_pages;
106 #endif
107 #endif
108 }
109 
110 #ifdef HAVE_BIO_BVEC_ITER
111 #define	BIO_BI_SECTOR(bio)	(bio)->bi_iter.bi_sector
112 #define	BIO_BI_SIZE(bio)	(bio)->bi_iter.bi_size
113 #define	BIO_BI_IDX(bio)		(bio)->bi_iter.bi_idx
114 #define	BIO_BI_SKIP(bio)	(bio)->bi_iter.bi_bvec_done
115 #define	bio_for_each_segment4(bv, bvp, b, i)	\
116 	bio_for_each_segment((bv), (b), (i))
117 typedef struct bvec_iter bvec_iterator_t;
118 #else
119 #define	BIO_BI_SECTOR(bio)	(bio)->bi_sector
120 #define	BIO_BI_SIZE(bio)	(bio)->bi_size
121 #define	BIO_BI_IDX(bio)		(bio)->bi_idx
122 #define	BIO_BI_SKIP(bio)	(0)
123 #define	bio_for_each_segment4(bv, bvp, b, i)	\
124 	bio_for_each_segment((bvp), (b), (i))
125 typedef int bvec_iterator_t;
126 #endif
127 
128 static inline void
129 bio_set_flags_failfast(struct block_device *bdev, int *flags, bool dev,
130     bool transport, bool driver)
131 {
132 #ifdef CONFIG_BUG
133 	/*
134 	 * Disable FAILFAST for loopback devices because of the
135 	 * following incorrect BUG_ON() in loop_make_request().
136 	 * This support is also disabled for md devices because the
137 	 * test suite layers md devices on top of loopback devices.
138 	 * This may be removed when the loopback driver is fixed.
139 	 *
140 	 *   BUG_ON(!lo || (rw != READ && rw != WRITE));
141 	 */
142 	if ((MAJOR(bdev->bd_dev) == LOOP_MAJOR) ||
143 	    (MAJOR(bdev->bd_dev) == MD_MAJOR))
144 		return;
145 
146 #ifdef BLOCK_EXT_MAJOR
147 	if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
148 		return;
149 #endif /* BLOCK_EXT_MAJOR */
150 #endif /* CONFIG_BUG */
151 
152 	if (dev)
153 		*flags |= REQ_FAILFAST_DEV;
154 	if (transport)
155 		*flags |= REQ_FAILFAST_TRANSPORT;
156 	if (driver)
157 		*flags |= REQ_FAILFAST_DRIVER;
158 }
159 
160 /*
161  * Maximum disk label length, it may be undefined for some kernels.
162  */
163 #if !defined(DISK_NAME_LEN)
164 #define	DISK_NAME_LEN	32
165 #endif /* DISK_NAME_LEN */
166 
167 #ifdef HAVE_BIO_BI_STATUS
168 static inline int
169 bi_status_to_errno(blk_status_t status)
170 {
171 	switch (status)	{
172 	case BLK_STS_OK:
173 		return (0);
174 	case BLK_STS_NOTSUPP:
175 		return (EOPNOTSUPP);
176 	case BLK_STS_TIMEOUT:
177 		return (ETIMEDOUT);
178 	case BLK_STS_NOSPC:
179 		return (ENOSPC);
180 	case BLK_STS_TRANSPORT:
181 		return (ENOLINK);
182 	case BLK_STS_TARGET:
183 		return (EREMOTEIO);
184 	case BLK_STS_NEXUS:
185 		return (EBADE);
186 	case BLK_STS_MEDIUM:
187 		return (ENODATA);
188 	case BLK_STS_PROTECTION:
189 		return (EILSEQ);
190 	case BLK_STS_RESOURCE:
191 		return (ENOMEM);
192 	case BLK_STS_AGAIN:
193 		return (EAGAIN);
194 	case BLK_STS_IOERR:
195 		return (EIO);
196 	default:
197 		return (EIO);
198 	}
199 }
200 
201 static inline blk_status_t
202 errno_to_bi_status(int error)
203 {
204 	switch (error) {
205 	case 0:
206 		return (BLK_STS_OK);
207 	case EOPNOTSUPP:
208 		return (BLK_STS_NOTSUPP);
209 	case ETIMEDOUT:
210 		return (BLK_STS_TIMEOUT);
211 	case ENOSPC:
212 		return (BLK_STS_NOSPC);
213 	case ENOLINK:
214 		return (BLK_STS_TRANSPORT);
215 	case EREMOTEIO:
216 		return (BLK_STS_TARGET);
217 	case EBADE:
218 		return (BLK_STS_NEXUS);
219 	case ENODATA:
220 		return (BLK_STS_MEDIUM);
221 	case EILSEQ:
222 		return (BLK_STS_PROTECTION);
223 	case ENOMEM:
224 		return (BLK_STS_RESOURCE);
225 	case EAGAIN:
226 		return (BLK_STS_AGAIN);
227 	case EIO:
228 		return (BLK_STS_IOERR);
229 	default:
230 		return (BLK_STS_IOERR);
231 	}
232 }
233 #endif /* HAVE_BIO_BI_STATUS */
234 
235 /*
236  * 4.3 API change
237  * The bio_endio() prototype changed slightly.  These are helper
238  * macro's to ensure the prototype and invocation are handled.
239  */
240 #ifdef HAVE_1ARG_BIO_END_IO_T
241 #ifdef HAVE_BIO_BI_STATUS
242 #define	BIO_END_IO_ERROR(bio)		bi_status_to_errno(bio->bi_status)
243 #define	BIO_END_IO_PROTO(fn, x, z)	static void fn(struct bio *x)
244 #define	BIO_END_IO(bio, error)		bio_set_bi_status(bio, error)
245 static inline void
246 bio_set_bi_status(struct bio *bio, int error)
247 {
248 	ASSERT3S(error, <=, 0);
249 	bio->bi_status = errno_to_bi_status(-error);
250 	bio_endio(bio);
251 }
252 #else
253 #define	BIO_END_IO_ERROR(bio)		(-(bio->bi_error))
254 #define	BIO_END_IO_PROTO(fn, x, z)	static void fn(struct bio *x)
255 #define	BIO_END_IO(bio, error)		bio_set_bi_error(bio, error)
256 static inline void
257 bio_set_bi_error(struct bio *bio, int error)
258 {
259 	ASSERT3S(error, <=, 0);
260 	bio->bi_error = error;
261 	bio_endio(bio);
262 }
263 #endif /* HAVE_BIO_BI_STATUS */
264 
265 #else
266 #define	BIO_END_IO_PROTO(fn, x, z)	static void fn(struct bio *x, int z)
267 #define	BIO_END_IO(bio, error)		bio_endio(bio, error);
268 #endif /* HAVE_1ARG_BIO_END_IO_T */
269 
270 /*
271  * 5.15 MACRO,
272  *   GD_DEAD
273  *
274  * 2.6.36 - 5.14 MACRO,
275  *   GENHD_FL_UP
276  *
277  * Check the disk status and return B_TRUE if alive
278  * otherwise B_FALSE
279  */
280 static inline boolean_t
281 zfs_check_disk_status(struct block_device *bdev)
282 {
283 #if defined(GENHD_FL_UP)
284 	return (!!(bdev->bd_disk->flags & GENHD_FL_UP));
285 #elif defined(GD_DEAD)
286 	return (!test_bit(GD_DEAD, &bdev->bd_disk->state));
287 #else
288 /*
289  * This is encountered if neither GENHD_FL_UP nor GD_DEAD is available in
290  * the kernel - likely due to an MACRO change that needs to be chased down.
291  */
292 #error "Unsupported kernel: no usable disk status check"
293 #endif
294 }
295 
296 /*
297  * 4.1 API,
298  * 3.10.0 CentOS 7.x API,
299  *   blkdev_reread_part()
300  *
301  * For older kernels trigger a re-reading of the partition table by calling
302  * check_disk_change() which calls flush_disk() to invalidate the device.
303  *
304  * For newer kernels (as of 5.10), bdev_check_media_change is used, in favor of
305  * check_disk_change(), with the modification that invalidation is no longer
306  * forced.
307  */
308 #ifdef HAVE_CHECK_DISK_CHANGE
309 #define	zfs_check_media_change(bdev)	check_disk_change(bdev)
310 #ifdef HAVE_BLKDEV_REREAD_PART
311 #define	vdev_bdev_reread_part(bdev)	blkdev_reread_part(bdev)
312 #else
313 #define	vdev_bdev_reread_part(bdev)	check_disk_change(bdev)
314 #endif /* HAVE_BLKDEV_REREAD_PART */
315 #else
316 #ifdef HAVE_BDEV_CHECK_MEDIA_CHANGE
317 static inline int
318 zfs_check_media_change(struct block_device *bdev)
319 {
320 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
321 	struct gendisk *gd = bdev->bd_disk;
322 	const struct block_device_operations *bdo = gd->fops;
323 #endif
324 
325 	if (!bdev_check_media_change(bdev))
326 		return (0);
327 
328 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
329 	/*
330 	 * Force revalidation, to mimic the old behavior of
331 	 * check_disk_change()
332 	 */
333 	if (bdo->revalidate_disk)
334 		bdo->revalidate_disk(gd);
335 #endif
336 
337 	return (0);
338 }
339 #define	vdev_bdev_reread_part(bdev)	zfs_check_media_change(bdev)
340 #else
341 /*
342  * This is encountered if check_disk_change() and bdev_check_media_change()
343  * are not available in the kernel - likely due to an API change that needs
344  * to be chased down.
345  */
346 #error "Unsupported kernel: no usable disk change check"
347 #endif /* HAVE_BDEV_CHECK_MEDIA_CHANGE */
348 #endif /* HAVE_CHECK_DISK_CHANGE */
349 
350 /*
351  * 2.6.27 API change
352  * The function was exported for use, prior to this it existed but the
353  * symbol was not exported.
354  *
355  * 4.4.0-6.21 API change for Ubuntu
356  * lookup_bdev() gained a second argument, FMODE_*, to check inode permissions.
357  *
358  * 5.11 API change
359  * Changed to take a dev_t argument which is set on success and return a
360  * non-zero error code on failure.
361  */
362 static inline int
363 vdev_lookup_bdev(const char *path, dev_t *dev)
364 {
365 #if defined(HAVE_DEVT_LOOKUP_BDEV)
366 	return (lookup_bdev(path, dev));
367 #elif defined(HAVE_1ARG_LOOKUP_BDEV)
368 	struct block_device *bdev = lookup_bdev(path);
369 	if (IS_ERR(bdev))
370 		return (PTR_ERR(bdev));
371 
372 	*dev = bdev->bd_dev;
373 	bdput(bdev);
374 
375 	return (0);
376 #elif defined(HAVE_MODE_LOOKUP_BDEV)
377 	struct block_device *bdev = lookup_bdev(path, FMODE_READ);
378 	if (IS_ERR(bdev))
379 		return (PTR_ERR(bdev));
380 
381 	*dev = bdev->bd_dev;
382 	bdput(bdev);
383 
384 	return (0);
385 #else
386 #error "Unsupported kernel"
387 #endif
388 }
389 
390 /*
391  * Kernels without bio_set_op_attrs use bi_rw for the bio flags.
392  */
393 #if !defined(HAVE_BIO_SET_OP_ATTRS)
394 static inline void
395 bio_set_op_attrs(struct bio *bio, unsigned rw, unsigned flags)
396 {
397 	bio->bi_rw |= rw | flags;
398 }
399 #endif
400 
401 /*
402  * bio_set_flush - Set the appropriate flags in a bio to guarantee
403  * data are on non-volatile media on completion.
404  *
405  * 2.6.37 - 4.8 API,
406  *   Introduce WRITE_FLUSH, WRITE_FUA, and WRITE_FLUSH_FUA flags as a
407  *   replacement for WRITE_BARRIER to allow expressing richer semantics
408  *   to the block layer.  It's up to the block layer to implement the
409  *   semantics correctly. Use the WRITE_FLUSH_FUA flag combination.
410  *
411  * 4.8 - 4.9 API,
412  *   REQ_FLUSH was renamed to REQ_PREFLUSH.  For consistency with previous
413  *   OpenZFS releases, prefer the WRITE_FLUSH_FUA flag set if it's available.
414  *
415  * 4.10 API,
416  *   The read/write flags and their modifiers, including WRITE_FLUSH,
417  *   WRITE_FUA and WRITE_FLUSH_FUA were removed from fs.h in
418  *   torvalds/linux@70fd7614 and replaced by direct flag modification
419  *   of the REQ_ flags in bio->bi_opf.  Use REQ_PREFLUSH.
420  */
421 static inline void
422 bio_set_flush(struct bio *bio)
423 {
424 #if defined(HAVE_REQ_PREFLUSH)	/* >= 4.10 */
425 	bio_set_op_attrs(bio, 0, REQ_PREFLUSH);
426 #elif defined(WRITE_FLUSH_FUA)	/* >= 2.6.37 and <= 4.9 */
427 	bio_set_op_attrs(bio, 0, WRITE_FLUSH_FUA);
428 #else
429 #error	"Allowing the build will cause bio_set_flush requests to be ignored."
430 #endif
431 }
432 
433 /*
434  * 4.8 API,
435  *   REQ_OP_FLUSH
436  *
437  * 4.8-rc0 - 4.8-rc1,
438  *   REQ_PREFLUSH
439  *
440  * 2.6.36 - 4.7 API,
441  *   REQ_FLUSH
442  *
443  * in all cases but may have a performance impact for some kernels.  It
444  * has the advantage of minimizing kernel specific changes in the zvol code.
445  *
446  */
447 static inline boolean_t
448 bio_is_flush(struct bio *bio)
449 {
450 #if defined(HAVE_REQ_OP_FLUSH) && defined(HAVE_BIO_BI_OPF)
451 	return ((bio_op(bio) == REQ_OP_FLUSH) || (bio->bi_opf & REQ_PREFLUSH));
452 #elif defined(HAVE_REQ_PREFLUSH) && defined(HAVE_BIO_BI_OPF)
453 	return (bio->bi_opf & REQ_PREFLUSH);
454 #elif defined(HAVE_REQ_PREFLUSH) && !defined(HAVE_BIO_BI_OPF)
455 	return (bio->bi_rw & REQ_PREFLUSH);
456 #elif defined(HAVE_REQ_FLUSH)
457 	return (bio->bi_rw & REQ_FLUSH);
458 #else
459 #error	"Unsupported kernel"
460 #endif
461 }
462 
463 /*
464  * 4.8 API,
465  *   REQ_FUA flag moved to bio->bi_opf
466  *
467  * 2.6.x - 4.7 API,
468  *   REQ_FUA
469  */
470 static inline boolean_t
471 bio_is_fua(struct bio *bio)
472 {
473 #if defined(HAVE_BIO_BI_OPF)
474 	return (bio->bi_opf & REQ_FUA);
475 #elif defined(REQ_FUA)
476 	return (bio->bi_rw & REQ_FUA);
477 #else
478 #error	"Allowing the build will cause fua requests to be ignored."
479 #endif
480 }
481 
482 /*
483  * 4.8 API,
484  *   REQ_OP_DISCARD
485  *
486  * 2.6.36 - 4.7 API,
487  *   REQ_DISCARD
488  *
489  * In all cases the normal I/O path is used for discards.  The only
490  * difference is how the kernel tags individual I/Os as discards.
491  */
492 static inline boolean_t
493 bio_is_discard(struct bio *bio)
494 {
495 #if defined(HAVE_REQ_OP_DISCARD)
496 	return (bio_op(bio) == REQ_OP_DISCARD);
497 #elif defined(HAVE_REQ_DISCARD)
498 	return (bio->bi_rw & REQ_DISCARD);
499 #else
500 #error "Unsupported kernel"
501 #endif
502 }
503 
504 /*
505  * 4.8 API,
506  *   REQ_OP_SECURE_ERASE
507  *
508  * 2.6.36 - 4.7 API,
509  *   REQ_SECURE
510  */
511 static inline boolean_t
512 bio_is_secure_erase(struct bio *bio)
513 {
514 #if defined(HAVE_REQ_OP_SECURE_ERASE)
515 	return (bio_op(bio) == REQ_OP_SECURE_ERASE);
516 #elif defined(REQ_SECURE)
517 	return (bio->bi_rw & REQ_SECURE);
518 #else
519 	return (0);
520 #endif
521 }
522 
523 /*
524  * 2.6.33 API change
525  * Discard granularity and alignment restrictions may now be set.  For
526  * older kernels which do not support this it is safe to skip it.
527  */
528 static inline void
529 blk_queue_discard_granularity(struct request_queue *q, unsigned int dg)
530 {
531 	q->limits.discard_granularity = dg;
532 }
533 
534 /*
535  * 5.19 API,
536  *   bdev_max_discard_sectors()
537  *
538  * 2.6.32 API,
539  *   blk_queue_discard()
540  */
541 static inline boolean_t
542 bdev_discard_supported(struct block_device *bdev)
543 {
544 #if defined(HAVE_BDEV_MAX_DISCARD_SECTORS)
545 	return (!!bdev_max_discard_sectors(bdev));
546 #elif defined(HAVE_BLK_QUEUE_DISCARD)
547 	return (!!blk_queue_discard(bdev_get_queue(bdev)));
548 #else
549 #error "Unsupported kernel"
550 #endif
551 }
552 
553 /*
554  * 5.19 API,
555  *   bdev_max_secure_erase_sectors()
556  *
557  * 4.8 API,
558  *   blk_queue_secure_erase()
559  *
560  * 2.6.36 - 4.7 API,
561  *   blk_queue_secdiscard()
562  */
563 static inline boolean_t
564 bdev_secure_discard_supported(struct block_device *bdev)
565 {
566 #if defined(HAVE_BDEV_MAX_SECURE_ERASE_SECTORS)
567 	return (!!bdev_max_secure_erase_sectors(bdev));
568 #elif defined(HAVE_BLK_QUEUE_SECURE_ERASE)
569 	return (!!blk_queue_secure_erase(bdev_get_queue(bdev)));
570 #elif defined(HAVE_BLK_QUEUE_SECDISCARD)
571 	return (!!blk_queue_secdiscard(bdev_get_queue(bdev)));
572 #else
573 #error "Unsupported kernel"
574 #endif
575 }
576 
577 /*
578  * A common holder for vdev_bdev_open() is used to relax the exclusive open
579  * semantics slightly.  Internal vdev disk callers may pass VDEV_HOLDER to
580  * allow them to open the device multiple times.  Other kernel callers and
581  * user space processes which don't pass this value will get EBUSY.  This is
582  * currently required for the correct operation of hot spares.
583  */
584 #define	VDEV_HOLDER			((void *)0x2401de7)
585 
586 static inline unsigned long
587 blk_generic_start_io_acct(struct request_queue *q __attribute__((unused)),
588     struct gendisk *disk __attribute__((unused)),
589     int rw __attribute__((unused)), struct bio *bio)
590 {
591 #if defined(HAVE_BDEV_IO_ACCT)
592 	return (bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
593 	    bio_op(bio), jiffies));
594 #elif defined(HAVE_DISK_IO_ACCT)
595 	return (disk_start_io_acct(disk, bio_sectors(bio), bio_op(bio)));
596 #elif defined(HAVE_BIO_IO_ACCT)
597 	return (bio_start_io_acct(bio));
598 #elif defined(HAVE_GENERIC_IO_ACCT_3ARG)
599 	unsigned long start_time = jiffies;
600 	generic_start_io_acct(rw, bio_sectors(bio), &disk->part0);
601 	return (start_time);
602 #elif defined(HAVE_GENERIC_IO_ACCT_4ARG)
603 	unsigned long start_time = jiffies;
604 	generic_start_io_acct(q, rw, bio_sectors(bio), &disk->part0);
605 	return (start_time);
606 #else
607 	/* Unsupported */
608 	return (0);
609 #endif
610 }
611 
612 static inline void
613 blk_generic_end_io_acct(struct request_queue *q __attribute__((unused)),
614     struct gendisk *disk __attribute__((unused)),
615     int rw __attribute__((unused)), struct bio *bio, unsigned long start_time)
616 {
617 #if defined(HAVE_BDEV_IO_ACCT)
618 	bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time);
619 #elif defined(HAVE_DISK_IO_ACCT)
620 	disk_end_io_acct(disk, bio_op(bio), start_time);
621 #elif defined(HAVE_BIO_IO_ACCT)
622 	bio_end_io_acct(bio, start_time);
623 #elif defined(HAVE_GENERIC_IO_ACCT_3ARG)
624 	generic_end_io_acct(rw, &disk->part0, start_time);
625 #elif defined(HAVE_GENERIC_IO_ACCT_4ARG)
626 	generic_end_io_acct(q, rw, &disk->part0, start_time);
627 #endif
628 }
629 
630 #ifndef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
631 static inline struct request_queue *
632 blk_generic_alloc_queue(make_request_fn make_request, int node_id)
633 {
634 #if defined(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN)
635 	return (blk_alloc_queue(make_request, node_id));
636 #elif defined(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH)
637 	return (blk_alloc_queue_rh(make_request, node_id));
638 #else
639 	struct request_queue *q = blk_alloc_queue(GFP_KERNEL);
640 	if (q != NULL)
641 		blk_queue_make_request(q, make_request);
642 
643 	return (q);
644 #endif
645 }
646 #endif /* !HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
647 
648 /*
649  * All the io_*() helper functions below can operate on a bio, or a rq, but
650  * not both.  The older submit_bio() codepath will pass a bio, and the
651  * newer blk-mq codepath will pass a rq.
652  */
653 static inline int
654 io_data_dir(struct bio *bio, struct request *rq)
655 {
656 #ifdef HAVE_BLK_MQ
657 	if (rq != NULL) {
658 		if (op_is_write(req_op(rq))) {
659 			return (WRITE);
660 		} else {
661 			return (READ);
662 		}
663 	}
664 #else
665 	ASSERT3P(rq, ==, NULL);
666 #endif
667 	return (bio_data_dir(bio));
668 }
669 
670 static inline int
671 io_is_flush(struct bio *bio, struct request *rq)
672 {
673 #ifdef HAVE_BLK_MQ
674 	if (rq != NULL)
675 		return (req_op(rq) == REQ_OP_FLUSH);
676 #else
677 	ASSERT3P(rq, ==, NULL);
678 #endif
679 	return (bio_is_flush(bio));
680 }
681 
682 static inline int
683 io_is_discard(struct bio *bio, struct request *rq)
684 {
685 #ifdef HAVE_BLK_MQ
686 	if (rq != NULL)
687 		return (req_op(rq) == REQ_OP_DISCARD);
688 #else
689 	ASSERT3P(rq, ==, NULL);
690 #endif
691 	return (bio_is_discard(bio));
692 }
693 
694 static inline int
695 io_is_secure_erase(struct bio *bio, struct request *rq)
696 {
697 #ifdef HAVE_BLK_MQ
698 	if (rq != NULL)
699 		return (req_op(rq) == REQ_OP_SECURE_ERASE);
700 #else
701 	ASSERT3P(rq, ==, NULL);
702 #endif
703 	return (bio_is_secure_erase(bio));
704 }
705 
706 static inline int
707 io_is_fua(struct bio *bio, struct request *rq)
708 {
709 #ifdef HAVE_BLK_MQ
710 	if (rq != NULL)
711 		return (rq->cmd_flags & REQ_FUA);
712 #else
713 	ASSERT3P(rq, ==, NULL);
714 #endif
715 	return (bio_is_fua(bio));
716 }
717 
718 
719 static inline uint64_t
720 io_offset(struct bio *bio, struct request *rq)
721 {
722 #ifdef HAVE_BLK_MQ
723 	if (rq != NULL)
724 		return (blk_rq_pos(rq) << 9);
725 #else
726 	ASSERT3P(rq, ==, NULL);
727 #endif
728 	return (BIO_BI_SECTOR(bio) << 9);
729 }
730 
731 static inline uint64_t
732 io_size(struct bio *bio, struct request *rq)
733 {
734 #ifdef HAVE_BLK_MQ
735 	if (rq != NULL)
736 		return (blk_rq_bytes(rq));
737 #else
738 	ASSERT3P(rq, ==, NULL);
739 #endif
740 	return (BIO_BI_SIZE(bio));
741 }
742 
743 static inline int
744 io_has_data(struct bio *bio, struct request *rq)
745 {
746 #ifdef HAVE_BLK_MQ
747 	if (rq != NULL)
748 		return (bio_has_data(rq->bio));
749 #else
750 	ASSERT3P(rq, ==, NULL);
751 #endif
752 	return (bio_has_data(bio));
753 }
754 #endif /* _ZFS_BLKDEV_H */
755