1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (C) 2011 Lawrence Livermore National Security, LLC. 24 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). 25 * Written by Brian Behlendorf <behlendorf1@llnl.gov>. 26 * LLNL-CODE-403049. 27 */ 28 29 #ifndef _ZFS_BLKDEV_H 30 #define _ZFS_BLKDEV_H 31 32 #include <linux/blkdev.h> 33 #include <linux/backing-dev.h> 34 #include <linux/hdreg.h> 35 #include <linux/major.h> 36 #include <linux/msdos_fs.h> /* for SECTOR_* */ 37 #include <linux/bio.h> 38 39 #ifdef HAVE_BLK_MQ 40 #include <linux/blk-mq.h> 41 #endif 42 43 #ifndef HAVE_BLK_QUEUE_FLAG_SET 44 static inline void 45 blk_queue_flag_set(unsigned int flag, struct request_queue *q) 46 { 47 queue_flag_set(flag, q); 48 } 49 #endif 50 51 #ifndef HAVE_BLK_QUEUE_FLAG_CLEAR 52 static inline void 53 blk_queue_flag_clear(unsigned int flag, struct request_queue *q) 54 { 55 queue_flag_clear(flag, q); 56 } 57 #endif 58 59 /* 60 * 4.7 API, 61 * The blk_queue_write_cache() interface has replaced blk_queue_flush() 62 * interface. However, the new interface is GPL-only thus we implement 63 * our own trivial wrapper when the GPL-only version is detected. 64 * 65 * 2.6.36 - 4.6 API, 66 * The blk_queue_flush() interface has replaced blk_queue_ordered() 67 * interface. However, while the old interface was available to all the 68 * new one is GPL-only. Thus if the GPL-only version is detected we 69 * implement our own trivial helper. 70 */ 71 static inline void 72 blk_queue_set_write_cache(struct request_queue *q, bool wc, bool fua) 73 { 74 #if defined(HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY) 75 if (wc) 76 blk_queue_flag_set(QUEUE_FLAG_WC, q); 77 else 78 blk_queue_flag_clear(QUEUE_FLAG_WC, q); 79 if (fua) 80 blk_queue_flag_set(QUEUE_FLAG_FUA, q); 81 else 82 blk_queue_flag_clear(QUEUE_FLAG_FUA, q); 83 #elif defined(HAVE_BLK_QUEUE_WRITE_CACHE) 84 blk_queue_write_cache(q, wc, fua); 85 #elif defined(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY) 86 if (wc) 87 q->flush_flags |= REQ_FLUSH; 88 if (fua) 89 q->flush_flags |= REQ_FUA; 90 #elif defined(HAVE_BLK_QUEUE_FLUSH) 91 blk_queue_flush(q, (wc ? REQ_FLUSH : 0) | (fua ? REQ_FUA : 0)); 92 #else 93 #error "Unsupported kernel" 94 #endif 95 } 96 97 static inline void 98 blk_queue_set_read_ahead(struct request_queue *q, unsigned long ra_pages) 99 { 100 #if !defined(HAVE_BLK_QUEUE_UPDATE_READAHEAD) && \ 101 !defined(HAVE_DISK_UPDATE_READAHEAD) 102 #ifdef HAVE_BLK_QUEUE_BDI_DYNAMIC 103 q->backing_dev_info->ra_pages = ra_pages; 104 #else 105 q->backing_dev_info.ra_pages = ra_pages; 106 #endif 107 #endif 108 } 109 110 #ifdef HAVE_BIO_BVEC_ITER 111 #define BIO_BI_SECTOR(bio) (bio)->bi_iter.bi_sector 112 #define BIO_BI_SIZE(bio) (bio)->bi_iter.bi_size 113 #define BIO_BI_IDX(bio) (bio)->bi_iter.bi_idx 114 #define BIO_BI_SKIP(bio) (bio)->bi_iter.bi_bvec_done 115 #define bio_for_each_segment4(bv, bvp, b, i) \ 116 bio_for_each_segment((bv), (b), (i)) 117 typedef struct bvec_iter bvec_iterator_t; 118 #else 119 #define BIO_BI_SECTOR(bio) (bio)->bi_sector 120 #define BIO_BI_SIZE(bio) (bio)->bi_size 121 #define BIO_BI_IDX(bio) (bio)->bi_idx 122 #define BIO_BI_SKIP(bio) (0) 123 #define bio_for_each_segment4(bv, bvp, b, i) \ 124 bio_for_each_segment((bvp), (b), (i)) 125 typedef int bvec_iterator_t; 126 #endif 127 128 static inline void 129 bio_set_flags_failfast(struct block_device *bdev, int *flags) 130 { 131 #ifdef CONFIG_BUG 132 /* 133 * Disable FAILFAST for loopback devices because of the 134 * following incorrect BUG_ON() in loop_make_request(). 135 * This support is also disabled for md devices because the 136 * test suite layers md devices on top of loopback devices. 137 * This may be removed when the loopback driver is fixed. 138 * 139 * BUG_ON(!lo || (rw != READ && rw != WRITE)); 140 */ 141 if ((MAJOR(bdev->bd_dev) == LOOP_MAJOR) || 142 (MAJOR(bdev->bd_dev) == MD_MAJOR)) 143 return; 144 145 #ifdef BLOCK_EXT_MAJOR 146 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR) 147 return; 148 #endif /* BLOCK_EXT_MAJOR */ 149 #endif /* CONFIG_BUG */ 150 151 *flags |= REQ_FAILFAST_MASK; 152 } 153 154 /* 155 * Maximum disk label length, it may be undefined for some kernels. 156 */ 157 #if !defined(DISK_NAME_LEN) 158 #define DISK_NAME_LEN 32 159 #endif /* DISK_NAME_LEN */ 160 161 #ifdef HAVE_BIO_BI_STATUS 162 static inline int 163 bi_status_to_errno(blk_status_t status) 164 { 165 switch (status) { 166 case BLK_STS_OK: 167 return (0); 168 case BLK_STS_NOTSUPP: 169 return (EOPNOTSUPP); 170 case BLK_STS_TIMEOUT: 171 return (ETIMEDOUT); 172 case BLK_STS_NOSPC: 173 return (ENOSPC); 174 case BLK_STS_TRANSPORT: 175 return (ENOLINK); 176 case BLK_STS_TARGET: 177 return (EREMOTEIO); 178 case BLK_STS_NEXUS: 179 return (EBADE); 180 case BLK_STS_MEDIUM: 181 return (ENODATA); 182 case BLK_STS_PROTECTION: 183 return (EILSEQ); 184 case BLK_STS_RESOURCE: 185 return (ENOMEM); 186 case BLK_STS_AGAIN: 187 return (EAGAIN); 188 case BLK_STS_IOERR: 189 return (EIO); 190 default: 191 return (EIO); 192 } 193 } 194 195 static inline blk_status_t 196 errno_to_bi_status(int error) 197 { 198 switch (error) { 199 case 0: 200 return (BLK_STS_OK); 201 case EOPNOTSUPP: 202 return (BLK_STS_NOTSUPP); 203 case ETIMEDOUT: 204 return (BLK_STS_TIMEOUT); 205 case ENOSPC: 206 return (BLK_STS_NOSPC); 207 case ENOLINK: 208 return (BLK_STS_TRANSPORT); 209 case EREMOTEIO: 210 return (BLK_STS_TARGET); 211 case EBADE: 212 return (BLK_STS_NEXUS); 213 case ENODATA: 214 return (BLK_STS_MEDIUM); 215 case EILSEQ: 216 return (BLK_STS_PROTECTION); 217 case ENOMEM: 218 return (BLK_STS_RESOURCE); 219 case EAGAIN: 220 return (BLK_STS_AGAIN); 221 case EIO: 222 return (BLK_STS_IOERR); 223 default: 224 return (BLK_STS_IOERR); 225 } 226 } 227 #endif /* HAVE_BIO_BI_STATUS */ 228 229 /* 230 * 4.3 API change 231 * The bio_endio() prototype changed slightly. These are helper 232 * macro's to ensure the prototype and invocation are handled. 233 */ 234 #ifdef HAVE_1ARG_BIO_END_IO_T 235 #ifdef HAVE_BIO_BI_STATUS 236 #define BIO_END_IO_ERROR(bio) bi_status_to_errno(bio->bi_status) 237 #define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x) 238 #define BIO_END_IO(bio, error) bio_set_bi_status(bio, error) 239 static inline void 240 bio_set_bi_status(struct bio *bio, int error) 241 { 242 ASSERT3S(error, <=, 0); 243 bio->bi_status = errno_to_bi_status(-error); 244 bio_endio(bio); 245 } 246 #else 247 #define BIO_END_IO_ERROR(bio) (-(bio->bi_error)) 248 #define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x) 249 #define BIO_END_IO(bio, error) bio_set_bi_error(bio, error) 250 static inline void 251 bio_set_bi_error(struct bio *bio, int error) 252 { 253 ASSERT3S(error, <=, 0); 254 bio->bi_error = error; 255 bio_endio(bio); 256 } 257 #endif /* HAVE_BIO_BI_STATUS */ 258 259 #else 260 #define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x, int z) 261 #define BIO_END_IO(bio, error) bio_endio(bio, error); 262 #endif /* HAVE_1ARG_BIO_END_IO_T */ 263 264 /* 265 * 4.1 API, 266 * 3.10.0 CentOS 7.x API, 267 * blkdev_reread_part() 268 * 269 * For older kernels trigger a re-reading of the partition table by calling 270 * check_disk_change() which calls flush_disk() to invalidate the device. 271 * 272 * For newer kernels (as of 5.10), bdev_check_media_change is used, in favor of 273 * check_disk_change(), with the modification that invalidation is no longer 274 * forced. 275 */ 276 #ifdef HAVE_CHECK_DISK_CHANGE 277 #define zfs_check_media_change(bdev) check_disk_change(bdev) 278 #ifdef HAVE_BLKDEV_REREAD_PART 279 #define vdev_bdev_reread_part(bdev) blkdev_reread_part(bdev) 280 #else 281 #define vdev_bdev_reread_part(bdev) check_disk_change(bdev) 282 #endif /* HAVE_BLKDEV_REREAD_PART */ 283 #else 284 #ifdef HAVE_BDEV_CHECK_MEDIA_CHANGE 285 static inline int 286 zfs_check_media_change(struct block_device *bdev) 287 { 288 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK 289 struct gendisk *gd = bdev->bd_disk; 290 const struct block_device_operations *bdo = gd->fops; 291 #endif 292 293 if (!bdev_check_media_change(bdev)) 294 return (0); 295 296 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK 297 /* 298 * Force revalidation, to mimic the old behavior of 299 * check_disk_change() 300 */ 301 if (bdo->revalidate_disk) 302 bdo->revalidate_disk(gd); 303 #endif 304 305 return (0); 306 } 307 #define vdev_bdev_reread_part(bdev) zfs_check_media_change(bdev) 308 #else 309 /* 310 * This is encountered if check_disk_change() and bdev_check_media_change() 311 * are not available in the kernel - likely due to an API change that needs 312 * to be chased down. 313 */ 314 #error "Unsupported kernel: no usable disk change check" 315 #endif /* HAVE_BDEV_CHECK_MEDIA_CHANGE */ 316 #endif /* HAVE_CHECK_DISK_CHANGE */ 317 318 /* 319 * 2.6.27 API change 320 * The function was exported for use, prior to this it existed but the 321 * symbol was not exported. 322 * 323 * 4.4.0-6.21 API change for Ubuntu 324 * lookup_bdev() gained a second argument, FMODE_*, to check inode permissions. 325 * 326 * 5.11 API change 327 * Changed to take a dev_t argument which is set on success and return a 328 * non-zero error code on failure. 329 */ 330 static inline int 331 vdev_lookup_bdev(const char *path, dev_t *dev) 332 { 333 #if defined(HAVE_DEVT_LOOKUP_BDEV) 334 return (lookup_bdev(path, dev)); 335 #elif defined(HAVE_1ARG_LOOKUP_BDEV) 336 struct block_device *bdev = lookup_bdev(path); 337 if (IS_ERR(bdev)) 338 return (PTR_ERR(bdev)); 339 340 *dev = bdev->bd_dev; 341 bdput(bdev); 342 343 return (0); 344 #elif defined(HAVE_MODE_LOOKUP_BDEV) 345 struct block_device *bdev = lookup_bdev(path, FMODE_READ); 346 if (IS_ERR(bdev)) 347 return (PTR_ERR(bdev)); 348 349 *dev = bdev->bd_dev; 350 bdput(bdev); 351 352 return (0); 353 #else 354 #error "Unsupported kernel" 355 #endif 356 } 357 358 /* 359 * Kernels without bio_set_op_attrs use bi_rw for the bio flags. 360 */ 361 #if !defined(HAVE_BIO_SET_OP_ATTRS) 362 static inline void 363 bio_set_op_attrs(struct bio *bio, unsigned rw, unsigned flags) 364 { 365 bio->bi_rw |= rw | flags; 366 } 367 #endif 368 369 /* 370 * bio_set_flush - Set the appropriate flags in a bio to guarantee 371 * data are on non-volatile media on completion. 372 * 373 * 2.6.37 - 4.8 API, 374 * Introduce WRITE_FLUSH, WRITE_FUA, and WRITE_FLUSH_FUA flags as a 375 * replacement for WRITE_BARRIER to allow expressing richer semantics 376 * to the block layer. It's up to the block layer to implement the 377 * semantics correctly. Use the WRITE_FLUSH_FUA flag combination. 378 * 379 * 4.8 - 4.9 API, 380 * REQ_FLUSH was renamed to REQ_PREFLUSH. For consistency with previous 381 * OpenZFS releases, prefer the WRITE_FLUSH_FUA flag set if it's available. 382 * 383 * 4.10 API, 384 * The read/write flags and their modifiers, including WRITE_FLUSH, 385 * WRITE_FUA and WRITE_FLUSH_FUA were removed from fs.h in 386 * torvalds/linux@70fd7614 and replaced by direct flag modification 387 * of the REQ_ flags in bio->bi_opf. Use REQ_PREFLUSH. 388 */ 389 static inline void 390 bio_set_flush(struct bio *bio) 391 { 392 #if defined(HAVE_REQ_PREFLUSH) /* >= 4.10 */ 393 bio_set_op_attrs(bio, 0, REQ_PREFLUSH); 394 #elif defined(WRITE_FLUSH_FUA) /* >= 2.6.37 and <= 4.9 */ 395 bio_set_op_attrs(bio, 0, WRITE_FLUSH_FUA); 396 #else 397 #error "Allowing the build will cause bio_set_flush requests to be ignored." 398 #endif 399 } 400 401 /* 402 * 4.8 API, 403 * REQ_OP_FLUSH 404 * 405 * 4.8-rc0 - 4.8-rc1, 406 * REQ_PREFLUSH 407 * 408 * 2.6.36 - 4.7 API, 409 * REQ_FLUSH 410 * 411 * in all cases but may have a performance impact for some kernels. It 412 * has the advantage of minimizing kernel specific changes in the zvol code. 413 * 414 */ 415 static inline boolean_t 416 bio_is_flush(struct bio *bio) 417 { 418 #if defined(HAVE_REQ_OP_FLUSH) && defined(HAVE_BIO_BI_OPF) 419 return ((bio_op(bio) == REQ_OP_FLUSH) || (bio->bi_opf & REQ_PREFLUSH)); 420 #elif defined(HAVE_REQ_PREFLUSH) && defined(HAVE_BIO_BI_OPF) 421 return (bio->bi_opf & REQ_PREFLUSH); 422 #elif defined(HAVE_REQ_PREFLUSH) && !defined(HAVE_BIO_BI_OPF) 423 return (bio->bi_rw & REQ_PREFLUSH); 424 #elif defined(HAVE_REQ_FLUSH) 425 return (bio->bi_rw & REQ_FLUSH); 426 #else 427 #error "Unsupported kernel" 428 #endif 429 } 430 431 /* 432 * 4.8 API, 433 * REQ_FUA flag moved to bio->bi_opf 434 * 435 * 2.6.x - 4.7 API, 436 * REQ_FUA 437 */ 438 static inline boolean_t 439 bio_is_fua(struct bio *bio) 440 { 441 #if defined(HAVE_BIO_BI_OPF) 442 return (bio->bi_opf & REQ_FUA); 443 #elif defined(REQ_FUA) 444 return (bio->bi_rw & REQ_FUA); 445 #else 446 #error "Allowing the build will cause fua requests to be ignored." 447 #endif 448 } 449 450 /* 451 * 4.8 API, 452 * REQ_OP_DISCARD 453 * 454 * 2.6.36 - 4.7 API, 455 * REQ_DISCARD 456 * 457 * In all cases the normal I/O path is used for discards. The only 458 * difference is how the kernel tags individual I/Os as discards. 459 */ 460 static inline boolean_t 461 bio_is_discard(struct bio *bio) 462 { 463 #if defined(HAVE_REQ_OP_DISCARD) 464 return (bio_op(bio) == REQ_OP_DISCARD); 465 #elif defined(HAVE_REQ_DISCARD) 466 return (bio->bi_rw & REQ_DISCARD); 467 #else 468 #error "Unsupported kernel" 469 #endif 470 } 471 472 /* 473 * 4.8 API, 474 * REQ_OP_SECURE_ERASE 475 * 476 * 2.6.36 - 4.7 API, 477 * REQ_SECURE 478 */ 479 static inline boolean_t 480 bio_is_secure_erase(struct bio *bio) 481 { 482 #if defined(HAVE_REQ_OP_SECURE_ERASE) 483 return (bio_op(bio) == REQ_OP_SECURE_ERASE); 484 #elif defined(REQ_SECURE) 485 return (bio->bi_rw & REQ_SECURE); 486 #else 487 return (0); 488 #endif 489 } 490 491 /* 492 * 2.6.33 API change 493 * Discard granularity and alignment restrictions may now be set. For 494 * older kernels which do not support this it is safe to skip it. 495 */ 496 static inline void 497 blk_queue_discard_granularity(struct request_queue *q, unsigned int dg) 498 { 499 q->limits.discard_granularity = dg; 500 } 501 502 /* 503 * 5.19 API, 504 * bdev_max_discard_sectors() 505 * 506 * 2.6.32 API, 507 * blk_queue_discard() 508 */ 509 static inline boolean_t 510 bdev_discard_supported(struct block_device *bdev) 511 { 512 #if defined(HAVE_BDEV_MAX_DISCARD_SECTORS) 513 return (!!bdev_max_discard_sectors(bdev)); 514 #elif defined(HAVE_BLK_QUEUE_DISCARD) 515 return (!!blk_queue_discard(bdev_get_queue(bdev))); 516 #else 517 #error "Unsupported kernel" 518 #endif 519 } 520 521 /* 522 * 5.19 API, 523 * bdev_max_secure_erase_sectors() 524 * 525 * 4.8 API, 526 * blk_queue_secure_erase() 527 * 528 * 2.6.36 - 4.7 API, 529 * blk_queue_secdiscard() 530 */ 531 static inline boolean_t 532 bdev_secure_discard_supported(struct block_device *bdev) 533 { 534 #if defined(HAVE_BDEV_MAX_SECURE_ERASE_SECTORS) 535 return (!!bdev_max_secure_erase_sectors(bdev)); 536 #elif defined(HAVE_BLK_QUEUE_SECURE_ERASE) 537 return (!!blk_queue_secure_erase(bdev_get_queue(bdev))); 538 #elif defined(HAVE_BLK_QUEUE_SECDISCARD) 539 return (!!blk_queue_secdiscard(bdev_get_queue(bdev))); 540 #else 541 #error "Unsupported kernel" 542 #endif 543 } 544 545 /* 546 * A common holder for vdev_bdev_open() is used to relax the exclusive open 547 * semantics slightly. Internal vdev disk callers may pass VDEV_HOLDER to 548 * allow them to open the device multiple times. Other kernel callers and 549 * user space processes which don't pass this value will get EBUSY. This is 550 * currently required for the correct operation of hot spares. 551 */ 552 #define VDEV_HOLDER ((void *)0x2401de7) 553 554 static inline unsigned long 555 blk_generic_start_io_acct(struct request_queue *q __attribute__((unused)), 556 struct gendisk *disk __attribute__((unused)), 557 int rw __attribute__((unused)), struct bio *bio) 558 { 559 #if defined(HAVE_BDEV_IO_ACCT) 560 return (bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio), 561 bio_op(bio), jiffies)); 562 #elif defined(HAVE_DISK_IO_ACCT) 563 return (disk_start_io_acct(disk, bio_sectors(bio), bio_op(bio))); 564 #elif defined(HAVE_BIO_IO_ACCT) 565 return (bio_start_io_acct(bio)); 566 #elif defined(HAVE_GENERIC_IO_ACCT_3ARG) 567 unsigned long start_time = jiffies; 568 generic_start_io_acct(rw, bio_sectors(bio), &disk->part0); 569 return (start_time); 570 #elif defined(HAVE_GENERIC_IO_ACCT_4ARG) 571 unsigned long start_time = jiffies; 572 generic_start_io_acct(q, rw, bio_sectors(bio), &disk->part0); 573 return (start_time); 574 #else 575 /* Unsupported */ 576 return (0); 577 #endif 578 } 579 580 static inline void 581 blk_generic_end_io_acct(struct request_queue *q __attribute__((unused)), 582 struct gendisk *disk __attribute__((unused)), 583 int rw __attribute__((unused)), struct bio *bio, unsigned long start_time) 584 { 585 #if defined(HAVE_BDEV_IO_ACCT) 586 bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time); 587 #elif defined(HAVE_DISK_IO_ACCT) 588 disk_end_io_acct(disk, bio_op(bio), start_time); 589 #elif defined(HAVE_BIO_IO_ACCT) 590 bio_end_io_acct(bio, start_time); 591 #elif defined(HAVE_GENERIC_IO_ACCT_3ARG) 592 generic_end_io_acct(rw, &disk->part0, start_time); 593 #elif defined(HAVE_GENERIC_IO_ACCT_4ARG) 594 generic_end_io_acct(q, rw, &disk->part0, start_time); 595 #endif 596 } 597 598 #ifndef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS 599 static inline struct request_queue * 600 blk_generic_alloc_queue(make_request_fn make_request, int node_id) 601 { 602 #if defined(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN) 603 return (blk_alloc_queue(make_request, node_id)); 604 #elif defined(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH) 605 return (blk_alloc_queue_rh(make_request, node_id)); 606 #else 607 struct request_queue *q = blk_alloc_queue(GFP_KERNEL); 608 if (q != NULL) 609 blk_queue_make_request(q, make_request); 610 611 return (q); 612 #endif 613 } 614 #endif /* !HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */ 615 616 /* 617 * All the io_*() helper functions below can operate on a bio, or a rq, but 618 * not both. The older submit_bio() codepath will pass a bio, and the 619 * newer blk-mq codepath will pass a rq. 620 */ 621 static inline int 622 io_data_dir(struct bio *bio, struct request *rq) 623 { 624 #ifdef HAVE_BLK_MQ 625 if (rq != NULL) { 626 if (op_is_write(req_op(rq))) { 627 return (WRITE); 628 } else { 629 return (READ); 630 } 631 } 632 #else 633 ASSERT3P(rq, ==, NULL); 634 #endif 635 return (bio_data_dir(bio)); 636 } 637 638 static inline int 639 io_is_flush(struct bio *bio, struct request *rq) 640 { 641 #ifdef HAVE_BLK_MQ 642 if (rq != NULL) 643 return (req_op(rq) == REQ_OP_FLUSH); 644 #else 645 ASSERT3P(rq, ==, NULL); 646 #endif 647 return (bio_is_flush(bio)); 648 } 649 650 static inline int 651 io_is_discard(struct bio *bio, struct request *rq) 652 { 653 #ifdef HAVE_BLK_MQ 654 if (rq != NULL) 655 return (req_op(rq) == REQ_OP_DISCARD); 656 #else 657 ASSERT3P(rq, ==, NULL); 658 #endif 659 return (bio_is_discard(bio)); 660 } 661 662 static inline int 663 io_is_secure_erase(struct bio *bio, struct request *rq) 664 { 665 #ifdef HAVE_BLK_MQ 666 if (rq != NULL) 667 return (req_op(rq) == REQ_OP_SECURE_ERASE); 668 #else 669 ASSERT3P(rq, ==, NULL); 670 #endif 671 return (bio_is_secure_erase(bio)); 672 } 673 674 static inline int 675 io_is_fua(struct bio *bio, struct request *rq) 676 { 677 #ifdef HAVE_BLK_MQ 678 if (rq != NULL) 679 return (rq->cmd_flags & REQ_FUA); 680 #else 681 ASSERT3P(rq, ==, NULL); 682 #endif 683 return (bio_is_fua(bio)); 684 } 685 686 687 static inline uint64_t 688 io_offset(struct bio *bio, struct request *rq) 689 { 690 #ifdef HAVE_BLK_MQ 691 if (rq != NULL) 692 return (blk_rq_pos(rq) << 9); 693 #else 694 ASSERT3P(rq, ==, NULL); 695 #endif 696 return (BIO_BI_SECTOR(bio) << 9); 697 } 698 699 static inline uint64_t 700 io_size(struct bio *bio, struct request *rq) 701 { 702 #ifdef HAVE_BLK_MQ 703 if (rq != NULL) 704 return (blk_rq_bytes(rq)); 705 #else 706 ASSERT3P(rq, ==, NULL); 707 #endif 708 return (BIO_BI_SIZE(bio)); 709 } 710 711 static inline int 712 io_has_data(struct bio *bio, struct request *rq) 713 { 714 #ifdef HAVE_BLK_MQ 715 if (rq != NULL) 716 return (bio_has_data(rq->bio)); 717 #else 718 ASSERT3P(rq, ==, NULL); 719 #endif 720 return (bio_has_data(bio)); 721 } 722 #endif /* _ZFS_BLKDEV_H */ 723