1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Block data types and constants. Directly include this file only to
4 * break include dependency loop.
5 */
6 #ifndef __LINUX_BLK_TYPES_H
7 #define __LINUX_BLK_TYPES_H
8
9 #include <linux/types.h>
10 #include <linux/bvec.h>
11 #include <linux/device.h>
12 #include <linux/ktime.h>
13 #include <linux/rw_hint.h>
14
15 struct bio_set;
16 struct bio;
17 struct bio_integrity_payload;
18 struct page;
19 struct io_context;
20 struct cgroup_subsys_state;
21 typedef void (bio_end_io_t) (struct bio *);
22 struct bio_crypt_ctx;
23
24 /*
25 * The basic unit of block I/O is a sector. It is used in a number of contexts
26 * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
27 * bytes. Variables of type sector_t represent an offset or size that is a
28 * multiple of 512 bytes. Hence these two constants.
29 */
30 #ifndef SECTOR_SHIFT
31 #define SECTOR_SHIFT 9
32 #endif
33 #ifndef SECTOR_SIZE
34 #define SECTOR_SIZE (1 << SECTOR_SHIFT)
35 #endif
36
37 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
38 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
39 #define SECTOR_MASK (PAGE_SECTORS - 1)
40
41 struct block_device {
42 sector_t bd_start_sect;
43 sector_t bd_nr_sectors;
44 struct gendisk * bd_disk;
45 struct request_queue * bd_queue;
46 struct disk_stats __percpu *bd_stats;
47 unsigned long bd_stamp;
48 atomic_t __bd_flags; // partition number + flags
49 #define BD_PARTNO 255 // lower 8 bits; assign-once
50 #define BD_READ_ONLY (1u<<8) // read-only policy
51 #define BD_WRITE_HOLDER (1u<<9)
52 #define BD_HAS_SUBMIT_BIO (1u<<10)
53 #define BD_RO_WARNED (1u<<11)
54 #ifdef CONFIG_FAIL_MAKE_REQUEST
55 #define BD_MAKE_IT_FAIL (1u<<12)
56 #endif
57 dev_t bd_dev;
58 struct address_space *bd_mapping; /* page cache */
59
60 atomic_t bd_openers;
61 spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
62 void * bd_claiming;
63 void * bd_holder;
64 const struct blk_holder_ops *bd_holder_ops;
65 struct mutex bd_holder_lock;
66 int bd_holders;
67 struct kobject *bd_holder_dir;
68
69 atomic_t bd_fsfreeze_count; /* number of freeze requests */
70 struct mutex bd_fsfreeze_mutex; /* serialize freeze/thaw */
71
72 struct partition_meta_info *bd_meta_info;
73 int bd_writers;
74 /*
75 * keep this out-of-line as it's both big and not needed in the fast
76 * path
77 */
78 struct device bd_device;
79 } __randomize_layout;
80
81 #define bdev_whole(_bdev) \
82 ((_bdev)->bd_disk->part0)
83
84 #define dev_to_bdev(device) \
85 container_of((device), struct block_device, bd_device)
86
87 #define bdev_kobj(_bdev) \
88 (&((_bdev)->bd_device.kobj))
89
90 /*
91 * Block error status values. See block/blk-core:blk_errors for the details.
92 */
93 typedef u8 __bitwise blk_status_t;
94 typedef u16 blk_short_t;
95 #define BLK_STS_OK 0
96 #define BLK_STS_NOTSUPP ((__force blk_status_t)1)
97 #define BLK_STS_TIMEOUT ((__force blk_status_t)2)
98 #define BLK_STS_NOSPC ((__force blk_status_t)3)
99 #define BLK_STS_TRANSPORT ((__force blk_status_t)4)
100 #define BLK_STS_TARGET ((__force blk_status_t)5)
101 #define BLK_STS_RESV_CONFLICT ((__force blk_status_t)6)
102 #define BLK_STS_MEDIUM ((__force blk_status_t)7)
103 #define BLK_STS_PROTECTION ((__force blk_status_t)8)
104 #define BLK_STS_RESOURCE ((__force blk_status_t)9)
105 #define BLK_STS_IOERR ((__force blk_status_t)10)
106
107 /* hack for device mapper, don't use elsewhere: */
108 #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
109
110 /*
111 * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set
112 * and the bio would block (cf bio_wouldblock_error())
113 */
114 #define BLK_STS_AGAIN ((__force blk_status_t)12)
115
116 /*
117 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
118 * device related resources are unavailable, but the driver can guarantee
119 * that the queue will be rerun in the future once resources become
120 * available again. This is typically the case for device specific
121 * resources that are consumed for IO. If the driver fails allocating these
122 * resources, we know that inflight (or pending) IO will free these
123 * resource upon completion.
124 *
125 * This is different from BLK_STS_RESOURCE in that it explicitly references
126 * a device specific resource. For resources of wider scope, allocation
127 * failure can happen without having pending IO. This means that we can't
128 * rely on request completions freeing these resources, as IO may not be in
129 * flight. Examples of that are kernel memory allocations, DMA mappings, or
130 * any other system wide resources.
131 */
132 #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
133
134 /*
135 * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
136 * path if the device returns a status indicating that too many zone resources
137 * are currently open. The same command should be successful if resubmitted
138 * after the number of open zones decreases below the device's limits, which is
139 * reported in the request_queue's max_open_zones.
140 */
141 #define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)14)
142
143 /*
144 * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
145 * path if the device returns a status indicating that too many zone resources
146 * are currently active. The same command should be successful if resubmitted
147 * after the number of active zones decreases below the device's limits, which
148 * is reported in the request_queue's max_active_zones.
149 */
150 #define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)15)
151
152 /*
153 * BLK_STS_OFFLINE is returned from the driver when the target device is offline
154 * or is being taken offline. This could help differentiate the case where a
155 * device is intentionally being shut down from a real I/O error.
156 */
157 #define BLK_STS_OFFLINE ((__force blk_status_t)16)
158
159 /*
160 * BLK_STS_DURATION_LIMIT is returned from the driver when the target device
161 * aborted the command because it exceeded one of its Command Duration Limits.
162 */
163 #define BLK_STS_DURATION_LIMIT ((__force blk_status_t)17)
164
165 /*
166 * Invalid size or alignment.
167 */
168 #define BLK_STS_INVAL ((__force blk_status_t)19)
169
170 /**
171 * blk_path_error - returns true if error may be path related
172 * @error: status the request was completed with
173 *
174 * Description:
175 * This classifies block error status into non-retryable errors and ones
176 * that may be successful if retried on a failover path.
177 *
178 * Return:
179 * %false - retrying failover path will not help
180 * %true - may succeed if retried
181 */
blk_path_error(blk_status_t error)182 static inline bool blk_path_error(blk_status_t error)
183 {
184 switch (error) {
185 case BLK_STS_NOTSUPP:
186 case BLK_STS_NOSPC:
187 case BLK_STS_TARGET:
188 case BLK_STS_RESV_CONFLICT:
189 case BLK_STS_MEDIUM:
190 case BLK_STS_PROTECTION:
191 return false;
192 }
193
194 /* Anything else could be a path failure, so should be retried */
195 return true;
196 }
197
198 struct bio_issue {
199 u64 value;
200 };
201
202 typedef __u32 __bitwise blk_opf_t;
203
204 typedef unsigned int blk_qc_t;
205 #define BLK_QC_T_NONE -1U
206
207 /*
208 * main unit of I/O for the block layer and lower layers (ie drivers and
209 * stacking drivers)
210 */
211 struct bio {
212 struct bio *bi_next; /* request queue link */
213 struct block_device *bi_bdev;
214 blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits
215 * req_flags.
216 */
217 unsigned short bi_flags; /* BIO_* below */
218 unsigned short bi_ioprio;
219 enum rw_hint bi_write_hint;
220 blk_status_t bi_status;
221 atomic_t __bi_remaining;
222
223 struct bvec_iter bi_iter;
224
225 union {
226 /* for polled bios: */
227 blk_qc_t bi_cookie;
228 /* for plugged zoned writes only: */
229 unsigned int __bi_nr_segments;
230 };
231 bio_end_io_t *bi_end_io;
232 void *bi_private;
233 #ifdef CONFIG_BLK_CGROUP
234 /*
235 * Represents the association of the css and request_queue for the bio.
236 * If a bio goes direct to device, it will not have a blkg as it will
237 * not have a request_queue associated with it. The reference is put
238 * on release of the bio.
239 */
240 struct blkcg_gq *bi_blkg;
241 struct bio_issue bi_issue;
242 #ifdef CONFIG_BLK_CGROUP_IOCOST
243 u64 bi_iocost_cost;
244 #endif
245 #endif
246
247 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
248 struct bio_crypt_ctx *bi_crypt_context;
249 #endif
250
251 union {
252 #if defined(CONFIG_BLK_DEV_INTEGRITY)
253 struct bio_integrity_payload *bi_integrity; /* data integrity */
254 #endif
255 };
256
257 unsigned short bi_vcnt; /* how many bio_vec's */
258
259 /*
260 * Everything starting with bi_max_vecs will be preserved by bio_reset()
261 */
262
263 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
264
265 atomic_t __bi_cnt; /* pin count */
266
267 struct bio_vec *bi_io_vec; /* the actual vec list */
268
269 struct bio_set *bi_pool;
270
271 /*
272 * We can inline a number of vecs at the end of the bio, to avoid
273 * double allocations for a small number of bio_vecs. This member
274 * MUST obviously be kept at the very end of the bio.
275 */
276 struct bio_vec bi_inline_vecs[];
277 };
278
279 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
280 #define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT)
281
282 /*
283 * bio flags
284 */
285 enum {
286 BIO_PAGE_PINNED, /* Unpin pages in bio_release_pages() */
287 BIO_CLONED, /* doesn't own data */
288 BIO_BOUNCED, /* bio is a bounce bio */
289 BIO_QUIET, /* Make BIO Quiet */
290 BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
291 BIO_REFFED, /* bio has elevated ->bi_cnt */
292 BIO_BPS_THROTTLED, /* This bio has already been subjected to
293 * throttling rules. Don't do it again. */
294 BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion
295 * of this bio. */
296 BIO_CGROUP_ACCT, /* has been accounted to a cgroup */
297 BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */
298 BIO_QOS_MERGED, /* but went through rq_qos merge path */
299 BIO_REMAPPED,
300 BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */
301 BIO_EMULATES_ZONE_APPEND, /* bio emulates a zone append operation */
302 BIO_FLAG_LAST
303 };
304
305 typedef __u32 __bitwise blk_mq_req_flags_t;
306
307 #define REQ_OP_BITS 8
308 #define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1)
309 #define REQ_FLAG_BITS 24
310
311 /**
312 * enum req_op - Operations common to the bio and request structures.
313 * We use 8 bits for encoding the operation, and the remaining 24 for flags.
314 *
315 * The least significant bit of the operation number indicates the data
316 * transfer direction:
317 *
318 * - if the least significant bit is set transfers are TO the device
319 * - if the least significant bit is not set transfers are FROM the device
320 *
321 * If a operation does not transfer data the least significant bit has no
322 * meaning.
323 */
324 enum req_op {
325 /* read sectors from the device */
326 REQ_OP_READ = (__force blk_opf_t)0,
327 /* write sectors to the device */
328 REQ_OP_WRITE = (__force blk_opf_t)1,
329 /* flush the volatile write cache */
330 REQ_OP_FLUSH = (__force blk_opf_t)2,
331 /* discard sectors */
332 REQ_OP_DISCARD = (__force blk_opf_t)3,
333 /* securely erase sectors */
334 REQ_OP_SECURE_ERASE = (__force blk_opf_t)5,
335 /* write data at the current zone write pointer */
336 REQ_OP_ZONE_APPEND = (__force blk_opf_t)7,
337 /* write the zero filled sector many times */
338 REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9,
339 /* Open a zone */
340 REQ_OP_ZONE_OPEN = (__force blk_opf_t)10,
341 /* Close a zone */
342 REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11,
343 /* Transition a zone to full */
344 REQ_OP_ZONE_FINISH = (__force blk_opf_t)12,
345 /* reset a zone write pointer */
346 REQ_OP_ZONE_RESET = (__force blk_opf_t)13,
347 /* reset all the zone present on the device */
348 REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)15,
349
350 /* Driver private requests */
351 REQ_OP_DRV_IN = (__force blk_opf_t)34,
352 REQ_OP_DRV_OUT = (__force blk_opf_t)35,
353
354 REQ_OP_LAST = (__force blk_opf_t)36,
355 };
356
357 /* Keep cmd_flag_name[] in sync with the definitions below */
358 enum req_flag_bits {
359 __REQ_FAILFAST_DEV = /* no driver retries of device errors */
360 REQ_OP_BITS,
361 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
362 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
363 __REQ_SYNC, /* request is sync (sync write or read) */
364 __REQ_META, /* metadata io request */
365 __REQ_PRIO, /* boost priority in cfq */
366 __REQ_NOMERGE, /* don't touch this for merging */
367 __REQ_IDLE, /* anticipate more IO after this one */
368 __REQ_INTEGRITY, /* I/O includes block integrity payload */
369 __REQ_FUA, /* forced unit access */
370 __REQ_PREFLUSH, /* request for cache flush */
371 __REQ_RAHEAD, /* read ahead, can fail anytime */
372 __REQ_BACKGROUND, /* background IO */
373 __REQ_NOWAIT, /* Don't wait if request will block */
374 __REQ_POLLED, /* caller polls for completion using bio_poll */
375 __REQ_ALLOC_CACHE, /* allocate IO from cache if available */
376 __REQ_SWAP, /* swap I/O */
377 __REQ_DRV, /* for driver use */
378 __REQ_FS_PRIVATE, /* for file system (submitter) use */
379 __REQ_ATOMIC, /* for atomic write operations */
380 /*
381 * Command specific flags, keep last:
382 */
383 /* for REQ_OP_WRITE_ZEROES: */
384 __REQ_NOUNMAP, /* do not free blocks when zeroing */
385
386 __REQ_NR_BITS, /* stops here */
387 };
388
389 #define REQ_FAILFAST_DEV \
390 (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV)
391 #define REQ_FAILFAST_TRANSPORT \
392 (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT)
393 #define REQ_FAILFAST_DRIVER \
394 (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER)
395 #define REQ_SYNC (__force blk_opf_t)(1ULL << __REQ_SYNC)
396 #define REQ_META (__force blk_opf_t)(1ULL << __REQ_META)
397 #define REQ_PRIO (__force blk_opf_t)(1ULL << __REQ_PRIO)
398 #define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE)
399 #define REQ_IDLE (__force blk_opf_t)(1ULL << __REQ_IDLE)
400 #define REQ_INTEGRITY (__force blk_opf_t)(1ULL << __REQ_INTEGRITY)
401 #define REQ_FUA (__force blk_opf_t)(1ULL << __REQ_FUA)
402 #define REQ_PREFLUSH (__force blk_opf_t)(1ULL << __REQ_PREFLUSH)
403 #define REQ_RAHEAD (__force blk_opf_t)(1ULL << __REQ_RAHEAD)
404 #define REQ_BACKGROUND (__force blk_opf_t)(1ULL << __REQ_BACKGROUND)
405 #define REQ_NOWAIT (__force blk_opf_t)(1ULL << __REQ_NOWAIT)
406 #define REQ_POLLED (__force blk_opf_t)(1ULL << __REQ_POLLED)
407 #define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE)
408 #define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP)
409 #define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV)
410 #define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE)
411 #define REQ_ATOMIC (__force blk_opf_t)(1ULL << __REQ_ATOMIC)
412
413 #define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
414
415 #define REQ_FAILFAST_MASK \
416 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
417
418 #define REQ_NOMERGE_FLAGS \
419 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
420
421 enum stat_group {
422 STAT_READ,
423 STAT_WRITE,
424 STAT_DISCARD,
425 STAT_FLUSH,
426
427 NR_STAT_GROUPS
428 };
429
bio_op(const struct bio * bio)430 static inline enum req_op bio_op(const struct bio *bio)
431 {
432 return bio->bi_opf & REQ_OP_MASK;
433 }
434
op_is_write(blk_opf_t op)435 static inline bool op_is_write(blk_opf_t op)
436 {
437 return !!(op & (__force blk_opf_t)1);
438 }
439
440 /*
441 * Check if the bio or request is one that needs special treatment in the
442 * flush state machine.
443 */
op_is_flush(blk_opf_t op)444 static inline bool op_is_flush(blk_opf_t op)
445 {
446 return op & (REQ_FUA | REQ_PREFLUSH);
447 }
448
449 /*
450 * Reads are always treated as synchronous, as are requests with the FUA or
451 * PREFLUSH flag. Other operations may be marked as synchronous using the
452 * REQ_SYNC flag.
453 */
op_is_sync(blk_opf_t op)454 static inline bool op_is_sync(blk_opf_t op)
455 {
456 return (op & REQ_OP_MASK) == REQ_OP_READ ||
457 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
458 }
459
op_is_discard(blk_opf_t op)460 static inline bool op_is_discard(blk_opf_t op)
461 {
462 return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
463 }
464
465 /*
466 * Check if a bio or request operation is a zone management operation, with
467 * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
468 * due to its different handling in the block layer and device response in
469 * case of command failure.
470 */
op_is_zone_mgmt(enum req_op op)471 static inline bool op_is_zone_mgmt(enum req_op op)
472 {
473 switch (op & REQ_OP_MASK) {
474 case REQ_OP_ZONE_RESET:
475 case REQ_OP_ZONE_OPEN:
476 case REQ_OP_ZONE_CLOSE:
477 case REQ_OP_ZONE_FINISH:
478 return true;
479 default:
480 return false;
481 }
482 }
483
op_stat_group(enum req_op op)484 static inline int op_stat_group(enum req_op op)
485 {
486 if (op_is_discard(op))
487 return STAT_DISCARD;
488 return op_is_write(op);
489 }
490
491 struct blk_rq_stat {
492 u64 mean;
493 u64 min;
494 u64 max;
495 u32 nr_samples;
496 u64 batch;
497 };
498
499 #endif /* __LINUX_BLK_TYPES_H */
500