1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLK_MQ_H
3 #define BLK_MQ_H
4
5 #include <linux/blkdev.h>
6 #include <linux/sbitmap.h>
7 #include <linux/lockdep.h>
8 #include <linux/scatterlist.h>
9 #include <linux/prefetch.h>
10 #include <linux/srcu.h>
11 #include <linux/rw_hint.h>
12
13 struct blk_mq_tags;
14 struct blk_flush_queue;
15
16 #define BLKDEV_MIN_RQ 4
17 #define BLKDEV_DEFAULT_RQ 128
18
19 enum rq_end_io_ret {
20 RQ_END_IO_NONE,
21 RQ_END_IO_FREE,
22 };
23
24 typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
25
26 /*
27 * request flags */
28 typedef __u32 __bitwise req_flags_t;
29
30 /* Keep rqf_name[] in sync with the definitions below */
31 enum {
32 /* drive already may have started this one */
33 __RQF_STARTED,
34 /* request for flush sequence */
35 __RQF_FLUSH_SEQ,
36 /* merge of different types, fail separately */
37 __RQF_MIXED_MERGE,
38 /* don't call prep for this one */
39 __RQF_DONTPREP,
40 /* use hctx->sched_tags */
41 __RQF_SCHED_TAGS,
42 /* use an I/O scheduler for this request */
43 __RQF_USE_SCHED,
44 /* vaguely specified driver internal error. Ignored by block layer */
45 __RQF_FAILED,
46 /* don't warn about errors */
47 __RQF_QUIET,
48 /* account into disk and partition IO statistics */
49 __RQF_IO_STAT,
50 /* runtime pm request */
51 __RQF_PM,
52 /* on IO scheduler merge hash */
53 __RQF_HASHED,
54 /* track IO completion time */
55 __RQF_STATS,
56 /* Look at ->special_vec for the actual data payload instead of the
57 bio chain. */
58 __RQF_SPECIAL_PAYLOAD,
59 /* request completion needs to be signaled to zone write plugging. */
60 __RQF_ZONE_WRITE_PLUGGING,
61 /* ->timeout has been called, don't expire again */
62 __RQF_TIMED_OUT,
63 __RQF_RESV,
64 __RQF_BITS
65 };
66
67 #define RQF_STARTED ((__force req_flags_t)(1 << __RQF_STARTED))
68 #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << __RQF_FLUSH_SEQ))
69 #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << __RQF_MIXED_MERGE))
70 #define RQF_DONTPREP ((__force req_flags_t)(1 << __RQF_DONTPREP))
71 #define RQF_SCHED_TAGS ((__force req_flags_t)(1 << __RQF_SCHED_TAGS))
72 #define RQF_USE_SCHED ((__force req_flags_t)(1 << __RQF_USE_SCHED))
73 #define RQF_FAILED ((__force req_flags_t)(1 << __RQF_FAILED))
74 #define RQF_QUIET ((__force req_flags_t)(1 << __RQF_QUIET))
75 #define RQF_IO_STAT ((__force req_flags_t)(1 << __RQF_IO_STAT))
76 #define RQF_PM ((__force req_flags_t)(1 << __RQF_PM))
77 #define RQF_HASHED ((__force req_flags_t)(1 << __RQF_HASHED))
78 #define RQF_STATS ((__force req_flags_t)(1 << __RQF_STATS))
79 #define RQF_SPECIAL_PAYLOAD \
80 ((__force req_flags_t)(1 << __RQF_SPECIAL_PAYLOAD))
81 #define RQF_ZONE_WRITE_PLUGGING \
82 ((__force req_flags_t)(1 << __RQF_ZONE_WRITE_PLUGGING))
83 #define RQF_TIMED_OUT ((__force req_flags_t)(1 << __RQF_TIMED_OUT))
84 #define RQF_RESV ((__force req_flags_t)(1 << __RQF_RESV))
85
86 /* flags that prevent us from merging requests: */
87 #define RQF_NOMERGE_FLAGS \
88 (RQF_STARTED | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
89
90 enum mq_rq_state {
91 MQ_RQ_IDLE = 0,
92 MQ_RQ_IN_FLIGHT = 1,
93 MQ_RQ_COMPLETE = 2,
94 };
95
96 /*
97 * Try to put the fields that are referenced together in the same cacheline.
98 *
99 * If you modify this structure, make sure to update blk_rq_init() and
100 * especially blk_mq_rq_ctx_init() to take care of the added fields.
101 */
102 struct request {
103 struct request_queue *q;
104 struct blk_mq_ctx *mq_ctx;
105 struct blk_mq_hw_ctx *mq_hctx;
106
107 blk_opf_t cmd_flags; /* op and common flags */
108 req_flags_t rq_flags;
109
110 int tag;
111 int internal_tag;
112
113 unsigned int timeout;
114
115 /* the following two fields are internal, NEVER access directly */
116 unsigned int __data_len; /* total data len */
117 sector_t __sector; /* sector cursor */
118
119 struct bio *bio;
120 struct bio *biotail;
121
122 union {
123 struct list_head queuelist;
124 struct request *rq_next;
125 };
126
127 struct block_device *part;
128 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
129 /* Time that the first bio started allocating this request. */
130 u64 alloc_time_ns;
131 #endif
132 /* Time that this request was allocated for this IO. */
133 u64 start_time_ns;
134 /* Time that I/O was submitted to the device. */
135 u64 io_start_time_ns;
136
137 #ifdef CONFIG_BLK_WBT
138 unsigned short wbt_flags;
139 #endif
140 /*
141 * rq sectors used for blk stats. It has the same value
142 * with blk_rq_sectors(rq), except that it never be zeroed
143 * by completion.
144 */
145 unsigned short stats_sectors;
146
147 /*
148 * Number of scatter-gather DMA addr+len pairs after
149 * physical address coalescing is performed.
150 */
151 unsigned short nr_phys_segments;
152 unsigned short nr_integrity_segments;
153
154 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
155 struct bio_crypt_ctx *crypt_ctx;
156 struct blk_crypto_keyslot *crypt_keyslot;
157 #endif
158
159 enum rw_hint write_hint;
160 unsigned short ioprio;
161
162 enum mq_rq_state state;
163 atomic_t ref;
164
165 unsigned long deadline;
166
167 /*
168 * The hash is used inside the scheduler, and killed once the
169 * request reaches the dispatch list. The ipi_list is only used
170 * to queue the request for softirq completion, which is long
171 * after the request has been unhashed (and even removed from
172 * the dispatch list).
173 */
174 union {
175 struct hlist_node hash; /* merge hash */
176 struct llist_node ipi_list;
177 };
178
179 /*
180 * The rb_node is only used inside the io scheduler, requests
181 * are pruned when moved to the dispatch queue. special_vec must
182 * only be used if RQF_SPECIAL_PAYLOAD is set, and those cannot be
183 * insert into an IO scheduler.
184 */
185 union {
186 struct rb_node rb_node; /* sort/lookup */
187 struct bio_vec special_vec;
188 };
189
190 /*
191 * Three pointers are available for the IO schedulers, if they need
192 * more they have to dynamically allocate it.
193 */
194 struct {
195 struct io_cq *icq;
196 void *priv[2];
197 } elv;
198
199 struct {
200 unsigned int seq;
201 rq_end_io_fn *saved_end_io;
202 } flush;
203
204 u64 fifo_time;
205
206 /*
207 * completion callback.
208 */
209 rq_end_io_fn *end_io;
210 void *end_io_data;
211 };
212
req_op(const struct request * req)213 static inline enum req_op req_op(const struct request *req)
214 {
215 return req->cmd_flags & REQ_OP_MASK;
216 }
217
blk_rq_is_passthrough(struct request * rq)218 static inline bool blk_rq_is_passthrough(struct request *rq)
219 {
220 return blk_op_is_passthrough(rq->cmd_flags);
221 }
222
req_get_ioprio(struct request * req)223 static inline unsigned short req_get_ioprio(struct request *req)
224 {
225 return req->ioprio;
226 }
227
228 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
229
230 #define rq_dma_dir(rq) \
231 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
232
233 #define rq_list_add(listptr, rq) do { \
234 (rq)->rq_next = *(listptr); \
235 *(listptr) = rq; \
236 } while (0)
237
238 #define rq_list_add_tail(lastpptr, rq) do { \
239 (rq)->rq_next = NULL; \
240 **(lastpptr) = rq; \
241 *(lastpptr) = &rq->rq_next; \
242 } while (0)
243
244 #define rq_list_pop(listptr) \
245 ({ \
246 struct request *__req = NULL; \
247 if ((listptr) && *(listptr)) { \
248 __req = *(listptr); \
249 *(listptr) = __req->rq_next; \
250 } \
251 __req; \
252 })
253
254 #define rq_list_peek(listptr) \
255 ({ \
256 struct request *__req = NULL; \
257 if ((listptr) && *(listptr)) \
258 __req = *(listptr); \
259 __req; \
260 })
261
262 #define rq_list_for_each(listptr, pos) \
263 for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos))
264
265 #define rq_list_for_each_safe(listptr, pos, nxt) \
266 for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \
267 pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL)
268
269 #define rq_list_next(rq) (rq)->rq_next
270 #define rq_list_empty(list) ((list) == (struct request *) NULL)
271
272 /**
273 * rq_list_move() - move a struct request from one list to another
274 * @src: The source list @rq is currently in
275 * @dst: The destination list that @rq will be appended to
276 * @rq: The request to move
277 * @prev: The request preceding @rq in @src (NULL if @rq is the head)
278 */
rq_list_move(struct request ** src,struct request ** dst,struct request * rq,struct request * prev)279 static inline void rq_list_move(struct request **src, struct request **dst,
280 struct request *rq, struct request *prev)
281 {
282 if (prev)
283 prev->rq_next = rq->rq_next;
284 else
285 *src = rq->rq_next;
286 rq_list_add(dst, rq);
287 }
288
289 /**
290 * enum blk_eh_timer_return - How the timeout handler should proceed
291 * @BLK_EH_DONE: The block driver completed the command or will complete it at
292 * a later time.
293 * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the
294 * request to complete.
295 */
296 enum blk_eh_timer_return {
297 BLK_EH_DONE,
298 BLK_EH_RESET_TIMER,
299 };
300
301 /* Keep alloc_policy_name[] in sync with the definitions below */
302 enum {
303 BLK_TAG_ALLOC_FIFO, /* allocate starting from 0 */
304 BLK_TAG_ALLOC_RR, /* allocate starting from last allocated tag */
305 BLK_TAG_ALLOC_MAX
306 };
307
308 /**
309 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
310 * block device
311 */
312 struct blk_mq_hw_ctx {
313 struct {
314 /** @lock: Protects the dispatch list. */
315 spinlock_t lock;
316 /**
317 * @dispatch: Used for requests that are ready to be
318 * dispatched to the hardware but for some reason (e.g. lack of
319 * resources) could not be sent to the hardware. As soon as the
320 * driver can send new requests, requests at this list will
321 * be sent first for a fairer dispatch.
322 */
323 struct list_head dispatch;
324 /**
325 * @state: BLK_MQ_S_* flags. Defines the state of the hw
326 * queue (active, scheduled to restart, stopped).
327 */
328 unsigned long state;
329 } ____cacheline_aligned_in_smp;
330
331 /**
332 * @run_work: Used for scheduling a hardware queue run at a later time.
333 */
334 struct delayed_work run_work;
335 /** @cpumask: Map of available CPUs where this hctx can run. */
336 cpumask_var_t cpumask;
337 /**
338 * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
339 * selection from @cpumask.
340 */
341 int next_cpu;
342 /**
343 * @next_cpu_batch: Counter of how many works left in the batch before
344 * changing to the next CPU.
345 */
346 int next_cpu_batch;
347
348 /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
349 unsigned long flags;
350
351 /**
352 * @sched_data: Pointer owned by the IO scheduler attached to a request
353 * queue. It's up to the IO scheduler how to use this pointer.
354 */
355 void *sched_data;
356 /**
357 * @queue: Pointer to the request queue that owns this hardware context.
358 */
359 struct request_queue *queue;
360 /** @fq: Queue of requests that need to perform a flush operation. */
361 struct blk_flush_queue *fq;
362
363 /**
364 * @driver_data: Pointer to data owned by the block driver that created
365 * this hctx
366 */
367 void *driver_data;
368
369 /**
370 * @ctx_map: Bitmap for each software queue. If bit is on, there is a
371 * pending request in that software queue.
372 */
373 struct sbitmap ctx_map;
374
375 /**
376 * @dispatch_from: Software queue to be used when no scheduler was
377 * selected.
378 */
379 struct blk_mq_ctx *dispatch_from;
380 /**
381 * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
382 * decide if the hw_queue is busy using Exponential Weighted Moving
383 * Average algorithm.
384 */
385 unsigned int dispatch_busy;
386
387 /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
388 unsigned short type;
389 /** @nr_ctx: Number of software queues. */
390 unsigned short nr_ctx;
391 /** @ctxs: Array of software queues. */
392 struct blk_mq_ctx **ctxs;
393
394 /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
395 spinlock_t dispatch_wait_lock;
396 /**
397 * @dispatch_wait: Waitqueue to put requests when there is no tag
398 * available at the moment, to wait for another try in the future.
399 */
400 wait_queue_entry_t dispatch_wait;
401
402 /**
403 * @wait_index: Index of next available dispatch_wait queue to insert
404 * requests.
405 */
406 atomic_t wait_index;
407
408 /**
409 * @tags: Tags owned by the block driver. A tag at this set is only
410 * assigned when a request is dispatched from a hardware queue.
411 */
412 struct blk_mq_tags *tags;
413 /**
414 * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
415 * scheduler associated with a request queue, a tag is assigned when
416 * that request is allocated. Else, this member is not used.
417 */
418 struct blk_mq_tags *sched_tags;
419
420 /** @numa_node: NUMA node the storage adapter has been connected to. */
421 unsigned int numa_node;
422 /** @queue_num: Index of this hardware queue. */
423 unsigned int queue_num;
424
425 /**
426 * @nr_active: Number of active requests. Only used when a tag set is
427 * shared across request queues.
428 */
429 atomic_t nr_active;
430
431 /** @cpuhp_online: List to store request if CPU is going to die */
432 struct hlist_node cpuhp_online;
433 /** @cpuhp_dead: List to store request if some CPU die. */
434 struct hlist_node cpuhp_dead;
435 /** @kobj: Kernel object for sysfs. */
436 struct kobject kobj;
437
438 #ifdef CONFIG_BLK_DEBUG_FS
439 /**
440 * @debugfs_dir: debugfs directory for this hardware queue. Named
441 * as cpu<cpu_number>.
442 */
443 struct dentry *debugfs_dir;
444 /** @sched_debugfs_dir: debugfs directory for the scheduler. */
445 struct dentry *sched_debugfs_dir;
446 #endif
447
448 /**
449 * @hctx_list: if this hctx is not in use, this is an entry in
450 * q->unused_hctx_list.
451 */
452 struct list_head hctx_list;
453 };
454
455 /**
456 * struct blk_mq_queue_map - Map software queues to hardware queues
457 * @mq_map: CPU ID to hardware queue index map. This is an array
458 * with nr_cpu_ids elements. Each element has a value in the range
459 * [@queue_offset, @queue_offset + @nr_queues).
460 * @nr_queues: Number of hardware queues to map CPU IDs onto.
461 * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
462 * driver to map each hardware queue type (enum hctx_type) onto a distinct
463 * set of hardware queues.
464 */
465 struct blk_mq_queue_map {
466 unsigned int *mq_map;
467 unsigned int nr_queues;
468 unsigned int queue_offset;
469 };
470
471 /**
472 * enum hctx_type - Type of hardware queue
473 * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for.
474 * @HCTX_TYPE_READ: Just for READ I/O.
475 * @HCTX_TYPE_POLL: Polled I/O of any kind.
476 * @HCTX_MAX_TYPES: Number of types of hctx.
477 */
478 enum hctx_type {
479 HCTX_TYPE_DEFAULT,
480 HCTX_TYPE_READ,
481 HCTX_TYPE_POLL,
482
483 HCTX_MAX_TYPES,
484 };
485
486 /**
487 * struct blk_mq_tag_set - tag set that can be shared between request queues
488 * @ops: Pointers to functions that implement block driver behavior.
489 * @map: One or more ctx -> hctx mappings. One map exists for each
490 * hardware queue type (enum hctx_type) that the driver wishes
491 * to support. There are no restrictions on maps being of the
492 * same size, and it's perfectly legal to share maps between
493 * types.
494 * @nr_maps: Number of elements in the @map array. A number in the range
495 * [1, HCTX_MAX_TYPES].
496 * @nr_hw_queues: Number of hardware queues supported by the block driver that
497 * owns this data structure.
498 * @queue_depth: Number of tags per hardware queue, reserved tags included.
499 * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
500 * allocations.
501 * @cmd_size: Number of additional bytes to allocate per request. The block
502 * driver owns these additional bytes.
503 * @numa_node: NUMA node the storage adapter has been connected to.
504 * @timeout: Request processing timeout in jiffies.
505 * @flags: Zero or more BLK_MQ_F_* flags.
506 * @driver_data: Pointer to data owned by the block driver that created this
507 * tag set.
508 * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
509 * elements.
510 * @shared_tags:
511 * Shared set of tags. Has @nr_hw_queues elements. If set,
512 * shared by all @tags.
513 * @tag_list_lock: Serializes tag_list accesses.
514 * @tag_list: List of the request queues that use this tag set. See also
515 * request_queue.tag_set_list.
516 * @srcu: Use as lock when type of the request queue is blocking
517 * (BLK_MQ_F_BLOCKING).
518 */
519 struct blk_mq_tag_set {
520 const struct blk_mq_ops *ops;
521 struct blk_mq_queue_map map[HCTX_MAX_TYPES];
522 unsigned int nr_maps;
523 unsigned int nr_hw_queues;
524 unsigned int queue_depth;
525 unsigned int reserved_tags;
526 unsigned int cmd_size;
527 int numa_node;
528 unsigned int timeout;
529 unsigned int flags;
530 void *driver_data;
531
532 struct blk_mq_tags **tags;
533
534 struct blk_mq_tags *shared_tags;
535
536 struct mutex tag_list_lock;
537 struct list_head tag_list;
538 struct srcu_struct *srcu;
539 };
540
541 /**
542 * struct blk_mq_queue_data - Data about a request inserted in a queue
543 *
544 * @rq: Request pointer.
545 * @last: If it is the last request in the queue.
546 */
547 struct blk_mq_queue_data {
548 struct request *rq;
549 bool last;
550 };
551
552 typedef bool (busy_tag_iter_fn)(struct request *, void *);
553
554 /**
555 * struct blk_mq_ops - Callback functions that implements block driver
556 * behaviour.
557 */
558 struct blk_mq_ops {
559 /**
560 * @queue_rq: Queue a new request from block IO.
561 */
562 blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
563 const struct blk_mq_queue_data *);
564
565 /**
566 * @commit_rqs: If a driver uses bd->last to judge when to submit
567 * requests to hardware, it must define this function. In case of errors
568 * that make us stop issuing further requests, this hook serves the
569 * purpose of kicking the hardware (which the last request otherwise
570 * would have done).
571 */
572 void (*commit_rqs)(struct blk_mq_hw_ctx *);
573
574 /**
575 * @queue_rqs: Queue a list of new requests. Driver is guaranteed
576 * that each request belongs to the same queue. If the driver doesn't
577 * empty the @rqlist completely, then the rest will be queued
578 * individually by the block layer upon return.
579 */
580 void (*queue_rqs)(struct request **rqlist);
581
582 /**
583 * @get_budget: Reserve budget before queue request, once .queue_rq is
584 * run, it is driver's responsibility to release the
585 * reserved budget. Also we have to handle failure case
586 * of .get_budget for avoiding I/O deadlock.
587 */
588 int (*get_budget)(struct request_queue *);
589
590 /**
591 * @put_budget: Release the reserved budget.
592 */
593 void (*put_budget)(struct request_queue *, int);
594
595 /**
596 * @set_rq_budget_token: store rq's budget token
597 */
598 void (*set_rq_budget_token)(struct request *, int);
599 /**
600 * @get_rq_budget_token: retrieve rq's budget token
601 */
602 int (*get_rq_budget_token)(struct request *);
603
604 /**
605 * @timeout: Called on request timeout.
606 */
607 enum blk_eh_timer_return (*timeout)(struct request *);
608
609 /**
610 * @poll: Called to poll for completion of a specific tag.
611 */
612 int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
613
614 /**
615 * @complete: Mark the request as complete.
616 */
617 void (*complete)(struct request *);
618
619 /**
620 * @init_hctx: Called when the block layer side of a hardware queue has
621 * been set up, allowing the driver to allocate/init matching
622 * structures.
623 */
624 int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
625 /**
626 * @exit_hctx: Ditto for exit/teardown.
627 */
628 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
629
630 /**
631 * @init_request: Called for every command allocated by the block layer
632 * to allow the driver to set up driver specific data.
633 *
634 * Tag greater than or equal to queue_depth is for setting up
635 * flush request.
636 */
637 int (*init_request)(struct blk_mq_tag_set *set, struct request *,
638 unsigned int, unsigned int);
639 /**
640 * @exit_request: Ditto for exit/teardown.
641 */
642 void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
643 unsigned int);
644
645 /**
646 * @cleanup_rq: Called before freeing one request which isn't completed
647 * yet, and usually for freeing the driver private data.
648 */
649 void (*cleanup_rq)(struct request *);
650
651 /**
652 * @busy: If set, returns whether or not this queue currently is busy.
653 */
654 bool (*busy)(struct request_queue *);
655
656 /**
657 * @map_queues: This allows drivers specify their own queue mapping by
658 * overriding the setup-time function that builds the mq_map.
659 */
660 void (*map_queues)(struct blk_mq_tag_set *set);
661
662 #ifdef CONFIG_BLK_DEBUG_FS
663 /**
664 * @show_rq: Used by the debugfs implementation to show driver-specific
665 * information about a request.
666 */
667 void (*show_rq)(struct seq_file *m, struct request *rq);
668 #endif
669 };
670
671 /* Keep hctx_flag_name[] in sync with the definitions below */
672 enum {
673 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
674 BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
675 /*
676 * Set when this device requires underlying blk-mq device for
677 * completing IO:
678 */
679 BLK_MQ_F_STACKING = 1 << 2,
680 BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
681 BLK_MQ_F_BLOCKING = 1 << 4,
682 /* Do not allow an I/O scheduler to be configured. */
683 BLK_MQ_F_NO_SCHED = 1 << 5,
684
685 /*
686 * Select 'none' during queue registration in case of a single hwq
687 * or shared hwqs instead of 'mq-deadline'.
688 */
689 BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 6,
690 BLK_MQ_F_ALLOC_POLICY_START_BIT = 7,
691 BLK_MQ_F_ALLOC_POLICY_BITS = 1,
692 };
693 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
694 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
695 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
696 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
697 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
698 << BLK_MQ_F_ALLOC_POLICY_START_BIT)
699
700 #define BLK_MQ_MAX_DEPTH (10240)
701 #define BLK_MQ_NO_HCTX_IDX (-1U)
702
703 enum {
704 /* Keep hctx_state_name[] in sync with the definitions below */
705 BLK_MQ_S_STOPPED,
706 BLK_MQ_S_TAG_ACTIVE,
707 BLK_MQ_S_SCHED_RESTART,
708 /* hw queue is inactive after all its CPUs become offline */
709 BLK_MQ_S_INACTIVE,
710 BLK_MQ_S_MAX
711 };
712
713 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set,
714 struct queue_limits *lim, void *queuedata,
715 struct lock_class_key *lkclass);
716 #define blk_mq_alloc_disk(set, lim, queuedata) \
717 ({ \
718 static struct lock_class_key __key; \
719 \
720 __blk_mq_alloc_disk(set, lim, queuedata, &__key); \
721 })
722 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
723 struct lock_class_key *lkclass);
724 struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
725 struct queue_limits *lim, void *queuedata);
726 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
727 struct request_queue *q);
728 void blk_mq_destroy_queue(struct request_queue *);
729
730 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
731 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
732 const struct blk_mq_ops *ops, unsigned int queue_depth,
733 unsigned int set_flags);
734 void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
735
736 void blk_mq_free_request(struct request *rq);
737 int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
738 unsigned int poll_flags);
739
740 bool blk_mq_queue_inflight(struct request_queue *q);
741
742 enum {
743 /* return when out of requests */
744 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
745 /* allocate from reserved pool */
746 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
747 /* set RQF_PM */
748 BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
749 };
750
751 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
752 blk_mq_req_flags_t flags);
753 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
754 blk_opf_t opf, blk_mq_req_flags_t flags,
755 unsigned int hctx_idx);
756
757 /*
758 * Tag address space map.
759 */
760 struct blk_mq_tags {
761 unsigned int nr_tags;
762 unsigned int nr_reserved_tags;
763 unsigned int active_queues;
764
765 struct sbitmap_queue bitmap_tags;
766 struct sbitmap_queue breserved_tags;
767
768 struct request **rqs;
769 struct request **static_rqs;
770 struct list_head page_list;
771
772 /*
773 * used to clear request reference in rqs[] before freeing one
774 * request pool
775 */
776 spinlock_t lock;
777 };
778
blk_mq_tag_to_rq(struct blk_mq_tags * tags,unsigned int tag)779 static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
780 unsigned int tag)
781 {
782 if (tag < tags->nr_tags) {
783 prefetch(tags->rqs[tag]);
784 return tags->rqs[tag];
785 }
786
787 return NULL;
788 }
789
790 enum {
791 BLK_MQ_UNIQUE_TAG_BITS = 16,
792 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
793 };
794
795 u32 blk_mq_unique_tag(struct request *rq);
796
blk_mq_unique_tag_to_hwq(u32 unique_tag)797 static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
798 {
799 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
800 }
801
blk_mq_unique_tag_to_tag(u32 unique_tag)802 static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
803 {
804 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
805 }
806
807 /**
808 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
809 * @rq: target request.
810 */
blk_mq_rq_state(struct request * rq)811 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
812 {
813 return READ_ONCE(rq->state);
814 }
815
blk_mq_request_started(struct request * rq)816 static inline int blk_mq_request_started(struct request *rq)
817 {
818 return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
819 }
820
blk_mq_request_completed(struct request * rq)821 static inline int blk_mq_request_completed(struct request *rq)
822 {
823 return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
824 }
825
826 /*
827 *
828 * Set the state to complete when completing a request from inside ->queue_rq.
829 * This is used by drivers that want to ensure special complete actions that
830 * need access to the request are called on failure, e.g. by nvme for
831 * multipathing.
832 */
blk_mq_set_request_complete(struct request * rq)833 static inline void blk_mq_set_request_complete(struct request *rq)
834 {
835 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
836 }
837
838 /*
839 * Complete the request directly instead of deferring it to softirq or
840 * completing it another CPU. Useful in preemptible instead of an interrupt.
841 */
blk_mq_complete_request_direct(struct request * rq,void (* complete)(struct request * rq))842 static inline void blk_mq_complete_request_direct(struct request *rq,
843 void (*complete)(struct request *rq))
844 {
845 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
846 complete(rq);
847 }
848
849 void blk_mq_start_request(struct request *rq);
850 void blk_mq_end_request(struct request *rq, blk_status_t error);
851 void __blk_mq_end_request(struct request *rq, blk_status_t error);
852 void blk_mq_end_request_batch(struct io_comp_batch *ib);
853
854 /*
855 * Only need start/end time stamping if we have iostat or
856 * blk stats enabled, or using an IO scheduler.
857 */
blk_mq_need_time_stamp(struct request * rq)858 static inline bool blk_mq_need_time_stamp(struct request *rq)
859 {
860 /*
861 * passthrough io doesn't use iostat accounting, cgroup stats
862 * and io scheduler functionalities.
863 */
864 if (blk_rq_is_passthrough(rq))
865 return false;
866 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED));
867 }
868
blk_mq_is_reserved_rq(struct request * rq)869 static inline bool blk_mq_is_reserved_rq(struct request *rq)
870 {
871 return rq->rq_flags & RQF_RESV;
872 }
873
874 /*
875 * Batched completions only work when there is no I/O error and no special
876 * ->end_io handler.
877 */
blk_mq_add_to_batch(struct request * req,struct io_comp_batch * iob,int ioerror,void (* complete)(struct io_comp_batch *))878 static inline bool blk_mq_add_to_batch(struct request *req,
879 struct io_comp_batch *iob, int ioerror,
880 void (*complete)(struct io_comp_batch *))
881 {
882 /*
883 * blk_mq_end_request_batch() can't end request allocated from
884 * sched tags
885 */
886 if (!iob || (req->rq_flags & RQF_SCHED_TAGS) || ioerror ||
887 (req->end_io && !blk_rq_is_passthrough(req)))
888 return false;
889
890 if (!iob->complete)
891 iob->complete = complete;
892 else if (iob->complete != complete)
893 return false;
894 iob->need_ts |= blk_mq_need_time_stamp(req);
895 rq_list_add(&iob->req_list, req);
896 return true;
897 }
898
899 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
900 void blk_mq_kick_requeue_list(struct request_queue *q);
901 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
902 void blk_mq_complete_request(struct request *rq);
903 bool blk_mq_complete_request_remote(struct request *rq);
904 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
905 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
906 void blk_mq_stop_hw_queues(struct request_queue *q);
907 void blk_mq_start_hw_queues(struct request_queue *q);
908 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
909 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
910 void blk_mq_quiesce_queue(struct request_queue *q);
911 void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set);
912 void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set);
913 void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set);
914 void blk_mq_unquiesce_queue(struct request_queue *q);
915 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
916 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
917 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
918 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
919 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
920 busy_tag_iter_fn *fn, void *priv);
921 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
922 void blk_mq_freeze_queue(struct request_queue *q);
923 void blk_mq_unfreeze_queue(struct request_queue *q);
924 void blk_freeze_queue_start(struct request_queue *q);
925 void blk_mq_freeze_queue_wait(struct request_queue *q);
926 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
927 unsigned long timeout);
928
929 void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
930 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
931
932 void blk_mq_quiesce_queue_nowait(struct request_queue *q);
933
934 unsigned int blk_mq_rq_cpu(struct request *rq);
935
936 bool __blk_should_fake_timeout(struct request_queue *q);
blk_should_fake_timeout(struct request_queue * q)937 static inline bool blk_should_fake_timeout(struct request_queue *q)
938 {
939 if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
940 test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
941 return __blk_should_fake_timeout(q);
942 return false;
943 }
944
945 /**
946 * blk_mq_rq_from_pdu - cast a PDU to a request
947 * @pdu: the PDU (Protocol Data Unit) to be casted
948 *
949 * Return: request
950 *
951 * Driver command data is immediately after the request. So subtract request
952 * size to get back to the original request.
953 */
blk_mq_rq_from_pdu(void * pdu)954 static inline struct request *blk_mq_rq_from_pdu(void *pdu)
955 {
956 return pdu - sizeof(struct request);
957 }
958
959 /**
960 * blk_mq_rq_to_pdu - cast a request to a PDU
961 * @rq: the request to be casted
962 *
963 * Return: pointer to the PDU
964 *
965 * Driver command data is immediately after the request. So add request to get
966 * the PDU.
967 */
blk_mq_rq_to_pdu(struct request * rq)968 static inline void *blk_mq_rq_to_pdu(struct request *rq)
969 {
970 return rq + 1;
971 }
972
973 #define queue_for_each_hw_ctx(q, hctx, i) \
974 xa_for_each(&(q)->hctx_table, (i), (hctx))
975
976 #define hctx_for_each_ctx(hctx, ctx, i) \
977 for ((i) = 0; (i) < (hctx)->nr_ctx && \
978 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
979
blk_mq_cleanup_rq(struct request * rq)980 static inline void blk_mq_cleanup_rq(struct request *rq)
981 {
982 if (rq->q->mq_ops->cleanup_rq)
983 rq->q->mq_ops->cleanup_rq(rq);
984 }
985
blk_rq_bio_prep(struct request * rq,struct bio * bio,unsigned int nr_segs)986 static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
987 unsigned int nr_segs)
988 {
989 rq->nr_phys_segments = nr_segs;
990 rq->__data_len = bio->bi_iter.bi_size;
991 rq->bio = rq->biotail = bio;
992 rq->ioprio = bio_prio(bio);
993 }
994
995 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
996 struct lock_class_key *key);
997
rq_is_sync(struct request * rq)998 static inline bool rq_is_sync(struct request *rq)
999 {
1000 return op_is_sync(rq->cmd_flags);
1001 }
1002
1003 void blk_rq_init(struct request_queue *q, struct request *rq);
1004 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
1005 struct bio_set *bs, gfp_t gfp_mask,
1006 int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
1007 void blk_rq_unprep_clone(struct request *rq);
1008 blk_status_t blk_insert_cloned_request(struct request *rq);
1009
1010 struct rq_map_data {
1011 struct page **pages;
1012 unsigned long offset;
1013 unsigned short page_order;
1014 unsigned short nr_entries;
1015 bool null_mapped;
1016 bool from_user;
1017 };
1018
1019 int blk_rq_map_user(struct request_queue *, struct request *,
1020 struct rq_map_data *, void __user *, unsigned long, gfp_t);
1021 int blk_rq_map_user_io(struct request *, struct rq_map_data *,
1022 void __user *, unsigned long, gfp_t, bool, int, bool, int);
1023 int blk_rq_map_user_iov(struct request_queue *, struct request *,
1024 struct rq_map_data *, const struct iov_iter *, gfp_t);
1025 int blk_rq_unmap_user(struct bio *);
1026 int blk_rq_map_kern(struct request_queue *, struct request *, void *,
1027 unsigned int, gfp_t);
1028 int blk_rq_append_bio(struct request *rq, struct bio *bio);
1029 void blk_execute_rq_nowait(struct request *rq, bool at_head);
1030 blk_status_t blk_execute_rq(struct request *rq, bool at_head);
1031 bool blk_rq_is_poll(struct request *rq);
1032
1033 struct req_iterator {
1034 struct bvec_iter iter;
1035 struct bio *bio;
1036 };
1037
1038 #define __rq_for_each_bio(_bio, rq) \
1039 if ((rq->bio)) \
1040 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
1041
1042 #define rq_for_each_segment(bvl, _rq, _iter) \
1043 __rq_for_each_bio(_iter.bio, _rq) \
1044 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
1045
1046 #define rq_for_each_bvec(bvl, _rq, _iter) \
1047 __rq_for_each_bio(_iter.bio, _rq) \
1048 bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
1049
1050 #define rq_iter_last(bvec, _iter) \
1051 (_iter.bio->bi_next == NULL && \
1052 bio_iter_last(bvec, _iter.iter))
1053
1054 /*
1055 * blk_rq_pos() : the current sector
1056 * blk_rq_bytes() : bytes left in the entire request
1057 * blk_rq_cur_bytes() : bytes left in the current segment
1058 * blk_rq_sectors() : sectors left in the entire request
1059 * blk_rq_cur_sectors() : sectors left in the current segment
1060 * blk_rq_stats_sectors() : sectors of the entire request used for stats
1061 */
blk_rq_pos(const struct request * rq)1062 static inline sector_t blk_rq_pos(const struct request *rq)
1063 {
1064 return rq->__sector;
1065 }
1066
blk_rq_bytes(const struct request * rq)1067 static inline unsigned int blk_rq_bytes(const struct request *rq)
1068 {
1069 return rq->__data_len;
1070 }
1071
blk_rq_cur_bytes(const struct request * rq)1072 static inline int blk_rq_cur_bytes(const struct request *rq)
1073 {
1074 if (!rq->bio)
1075 return 0;
1076 if (!bio_has_data(rq->bio)) /* dataless requests such as discard */
1077 return rq->bio->bi_iter.bi_size;
1078 return bio_iovec(rq->bio).bv_len;
1079 }
1080
blk_rq_sectors(const struct request * rq)1081 static inline unsigned int blk_rq_sectors(const struct request *rq)
1082 {
1083 return blk_rq_bytes(rq) >> SECTOR_SHIFT;
1084 }
1085
blk_rq_cur_sectors(const struct request * rq)1086 static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1087 {
1088 return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
1089 }
1090
blk_rq_stats_sectors(const struct request * rq)1091 static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
1092 {
1093 return rq->stats_sectors;
1094 }
1095
1096 /*
1097 * Some commands like WRITE SAME have a payload or data transfer size which
1098 * is different from the size of the request. Any driver that supports such
1099 * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
1100 * calculate the data transfer size.
1101 */
blk_rq_payload_bytes(struct request * rq)1102 static inline unsigned int blk_rq_payload_bytes(struct request *rq)
1103 {
1104 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1105 return rq->special_vec.bv_len;
1106 return blk_rq_bytes(rq);
1107 }
1108
1109 /*
1110 * Return the first full biovec in the request. The caller needs to check that
1111 * there are any bvecs before calling this helper.
1112 */
req_bvec(struct request * rq)1113 static inline struct bio_vec req_bvec(struct request *rq)
1114 {
1115 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1116 return rq->special_vec;
1117 return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
1118 }
1119
blk_rq_count_bios(struct request * rq)1120 static inline unsigned int blk_rq_count_bios(struct request *rq)
1121 {
1122 unsigned int nr_bios = 0;
1123 struct bio *bio;
1124
1125 __rq_for_each_bio(bio, rq)
1126 nr_bios++;
1127
1128 return nr_bios;
1129 }
1130
1131 void blk_steal_bios(struct bio_list *list, struct request *rq);
1132
1133 /*
1134 * Request completion related functions.
1135 *
1136 * blk_update_request() completes given number of bytes and updates
1137 * the request without completing it.
1138 */
1139 bool blk_update_request(struct request *rq, blk_status_t error,
1140 unsigned int nr_bytes);
1141 void blk_abort_request(struct request *);
1142
1143 /*
1144 * Number of physical segments as sent to the device.
1145 *
1146 * Normally this is the number of discontiguous data segments sent by the
1147 * submitter. But for data-less command like discard we might have no
1148 * actual data segments submitted, but the driver might have to add it's
1149 * own special payload. In that case we still return 1 here so that this
1150 * special payload will be mapped.
1151 */
blk_rq_nr_phys_segments(struct request * rq)1152 static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1153 {
1154 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1155 return 1;
1156 return rq->nr_phys_segments;
1157 }
1158
1159 /*
1160 * Number of discard segments (or ranges) the driver needs to fill in.
1161 * Each discard bio merged into a request is counted as one segment.
1162 */
blk_rq_nr_discard_segments(struct request * rq)1163 static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1164 {
1165 return max_t(unsigned short, rq->nr_phys_segments, 1);
1166 }
1167
1168 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1169 struct scatterlist *sglist, struct scatterlist **last_sg);
blk_rq_map_sg(struct request_queue * q,struct request * rq,struct scatterlist * sglist)1170 static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1171 struct scatterlist *sglist)
1172 {
1173 struct scatterlist *last_sg = NULL;
1174
1175 return __blk_rq_map_sg(q, rq, sglist, &last_sg);
1176 }
1177 void blk_dump_rq_flags(struct request *, char *);
1178
1179 #endif /* BLK_MQ_H */
1180