xref: /linux/include/linux/blk-mq.h (revision e91c37f1)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLK_MQ_H
3 #define BLK_MQ_H
4 
5 #include <linux/blkdev.h>
6 #include <linux/sbitmap.h>
7 #include <linux/lockdep.h>
8 #include <linux/scatterlist.h>
9 #include <linux/prefetch.h>
10 #include <linux/srcu.h>
11 
12 struct blk_mq_tags;
13 struct blk_flush_queue;
14 
15 #define BLKDEV_MIN_RQ	4
16 #define BLKDEV_DEFAULT_RQ	128
17 
18 enum rq_end_io_ret {
19 	RQ_END_IO_NONE,
20 	RQ_END_IO_FREE,
21 };
22 
23 typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
24 
25 /*
26  * request flags */
27 typedef __u32 __bitwise req_flags_t;
28 
29 /* drive already may have started this one */
30 #define RQF_STARTED		((__force req_flags_t)(1 << 1))
31 /* request for flush sequence */
32 #define RQF_FLUSH_SEQ		((__force req_flags_t)(1 << 4))
33 /* merge of different types, fail separately */
34 #define RQF_MIXED_MERGE		((__force req_flags_t)(1 << 5))
35 /* don't call prep for this one */
36 #define RQF_DONTPREP		((__force req_flags_t)(1 << 7))
37 /* use hctx->sched_tags */
38 #define RQF_SCHED_TAGS		((__force req_flags_t)(1 << 8))
39 /* use an I/O scheduler for this request */
40 #define RQF_USE_SCHED		((__force req_flags_t)(1 << 9))
41 /* vaguely specified driver internal error.  Ignored by the block layer */
42 #define RQF_FAILED		((__force req_flags_t)(1 << 10))
43 /* don't warn about errors */
44 #define RQF_QUIET		((__force req_flags_t)(1 << 11))
45 /* account into disk and partition IO statistics */
46 #define RQF_IO_STAT		((__force req_flags_t)(1 << 13))
47 /* runtime pm request */
48 #define RQF_PM			((__force req_flags_t)(1 << 15))
49 /* on IO scheduler merge hash */
50 #define RQF_HASHED		((__force req_flags_t)(1 << 16))
51 /* track IO completion time */
52 #define RQF_STATS		((__force req_flags_t)(1 << 17))
53 /* Look at ->special_vec for the actual data payload instead of the
54    bio chain. */
55 #define RQF_SPECIAL_PAYLOAD	((__force req_flags_t)(1 << 18))
56 /* The per-zone write lock is held for this request */
57 #define RQF_ZONE_WRITE_LOCKED	((__force req_flags_t)(1 << 19))
58 /* ->timeout has been called, don't expire again */
59 #define RQF_TIMED_OUT		((__force req_flags_t)(1 << 21))
60 #define RQF_RESV		((__force req_flags_t)(1 << 23))
61 
62 /* flags that prevent us from merging requests: */
63 #define RQF_NOMERGE_FLAGS \
64 	(RQF_STARTED | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
65 
66 enum mq_rq_state {
67 	MQ_RQ_IDLE		= 0,
68 	MQ_RQ_IN_FLIGHT		= 1,
69 	MQ_RQ_COMPLETE		= 2,
70 };
71 
72 /*
73  * Try to put the fields that are referenced together in the same cacheline.
74  *
75  * If you modify this structure, make sure to update blk_rq_init() and
76  * especially blk_mq_rq_ctx_init() to take care of the added fields.
77  */
78 struct request {
79 	struct request_queue *q;
80 	struct blk_mq_ctx *mq_ctx;
81 	struct blk_mq_hw_ctx *mq_hctx;
82 
83 	blk_opf_t cmd_flags;		/* op and common flags */
84 	req_flags_t rq_flags;
85 
86 	int tag;
87 	int internal_tag;
88 
89 	unsigned int timeout;
90 
91 	/* the following two fields are internal, NEVER access directly */
92 	unsigned int __data_len;	/* total data len */
93 	sector_t __sector;		/* sector cursor */
94 
95 	struct bio *bio;
96 	struct bio *biotail;
97 
98 	union {
99 		struct list_head queuelist;
100 		struct request *rq_next;
101 	};
102 
103 	struct block_device *part;
104 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
105 	/* Time that the first bio started allocating this request. */
106 	u64 alloc_time_ns;
107 #endif
108 	/* Time that this request was allocated for this IO. */
109 	u64 start_time_ns;
110 	/* Time that I/O was submitted to the device. */
111 	u64 io_start_time_ns;
112 
113 #ifdef CONFIG_BLK_WBT
114 	unsigned short wbt_flags;
115 #endif
116 	/*
117 	 * rq sectors used for blk stats. It has the same value
118 	 * with blk_rq_sectors(rq), except that it never be zeroed
119 	 * by completion.
120 	 */
121 	unsigned short stats_sectors;
122 
123 	/*
124 	 * Number of scatter-gather DMA addr+len pairs after
125 	 * physical address coalescing is performed.
126 	 */
127 	unsigned short nr_phys_segments;
128 
129 #ifdef CONFIG_BLK_DEV_INTEGRITY
130 	unsigned short nr_integrity_segments;
131 #endif
132 
133 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
134 	struct bio_crypt_ctx *crypt_ctx;
135 	struct blk_crypto_keyslot *crypt_keyslot;
136 #endif
137 
138 	unsigned short ioprio;
139 
140 	enum mq_rq_state state;
141 	atomic_t ref;
142 
143 	unsigned long deadline;
144 
145 	/*
146 	 * The hash is used inside the scheduler, and killed once the
147 	 * request reaches the dispatch list. The ipi_list is only used
148 	 * to queue the request for softirq completion, which is long
149 	 * after the request has been unhashed (and even removed from
150 	 * the dispatch list).
151 	 */
152 	union {
153 		struct hlist_node hash;	/* merge hash */
154 		struct llist_node ipi_list;
155 	};
156 
157 	/*
158 	 * The rb_node is only used inside the io scheduler, requests
159 	 * are pruned when moved to the dispatch queue. special_vec must
160 	 * only be used if RQF_SPECIAL_PAYLOAD is set, and those cannot be
161 	 * insert into an IO scheduler.
162 	 */
163 	union {
164 		struct rb_node rb_node;	/* sort/lookup */
165 		struct bio_vec special_vec;
166 	};
167 
168 	/*
169 	 * Three pointers are available for the IO schedulers, if they need
170 	 * more they have to dynamically allocate it.
171 	 */
172 	struct {
173 		struct io_cq		*icq;
174 		void			*priv[2];
175 	} elv;
176 
177 	struct {
178 		unsigned int		seq;
179 		rq_end_io_fn		*saved_end_io;
180 	} flush;
181 
182 	u64 fifo_time;
183 
184 	/*
185 	 * completion callback.
186 	 */
187 	rq_end_io_fn *end_io;
188 	void *end_io_data;
189 };
190 
191 static inline enum req_op req_op(const struct request *req)
192 {
193 	return req->cmd_flags & REQ_OP_MASK;
194 }
195 
196 static inline bool blk_rq_is_passthrough(struct request *rq)
197 {
198 	return blk_op_is_passthrough(rq->cmd_flags);
199 }
200 
201 static inline unsigned short req_get_ioprio(struct request *req)
202 {
203 	return req->ioprio;
204 }
205 
206 #define rq_data_dir(rq)		(op_is_write(req_op(rq)) ? WRITE : READ)
207 
208 #define rq_dma_dir(rq) \
209 	(op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
210 
211 #define rq_list_add(listptr, rq)	do {		\
212 	(rq)->rq_next = *(listptr);			\
213 	*(listptr) = rq;				\
214 } while (0)
215 
216 #define rq_list_add_tail(lastpptr, rq)	do {		\
217 	(rq)->rq_next = NULL;				\
218 	**(lastpptr) = rq;				\
219 	*(lastpptr) = &rq->rq_next;			\
220 } while (0)
221 
222 #define rq_list_pop(listptr)				\
223 ({							\
224 	struct request *__req = NULL;			\
225 	if ((listptr) && *(listptr))	{		\
226 		__req = *(listptr);			\
227 		*(listptr) = __req->rq_next;		\
228 	}						\
229 	__req;						\
230 })
231 
232 #define rq_list_peek(listptr)				\
233 ({							\
234 	struct request *__req = NULL;			\
235 	if ((listptr) && *(listptr))			\
236 		__req = *(listptr);			\
237 	__req;						\
238 })
239 
240 #define rq_list_for_each(listptr, pos)			\
241 	for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos))
242 
243 #define rq_list_for_each_safe(listptr, pos, nxt)			\
244 	for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos);	\
245 		pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL)
246 
247 #define rq_list_next(rq)	(rq)->rq_next
248 #define rq_list_empty(list)	((list) == (struct request *) NULL)
249 
250 /**
251  * rq_list_move() - move a struct request from one list to another
252  * @src: The source list @rq is currently in
253  * @dst: The destination list that @rq will be appended to
254  * @rq: The request to move
255  * @prev: The request preceding @rq in @src (NULL if @rq is the head)
256  */
257 static inline void rq_list_move(struct request **src, struct request **dst,
258 				struct request *rq, struct request *prev)
259 {
260 	if (prev)
261 		prev->rq_next = rq->rq_next;
262 	else
263 		*src = rq->rq_next;
264 	rq_list_add(dst, rq);
265 }
266 
267 /**
268  * enum blk_eh_timer_return - How the timeout handler should proceed
269  * @BLK_EH_DONE: The block driver completed the command or will complete it at
270  *	a later time.
271  * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the
272  *	request to complete.
273  */
274 enum blk_eh_timer_return {
275 	BLK_EH_DONE,
276 	BLK_EH_RESET_TIMER,
277 };
278 
279 #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
280 #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
281 
282 /**
283  * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
284  * block device
285  */
286 struct blk_mq_hw_ctx {
287 	struct {
288 		/** @lock: Protects the dispatch list. */
289 		spinlock_t		lock;
290 		/**
291 		 * @dispatch: Used for requests that are ready to be
292 		 * dispatched to the hardware but for some reason (e.g. lack of
293 		 * resources) could not be sent to the hardware. As soon as the
294 		 * driver can send new requests, requests at this list will
295 		 * be sent first for a fairer dispatch.
296 		 */
297 		struct list_head	dispatch;
298 		 /**
299 		  * @state: BLK_MQ_S_* flags. Defines the state of the hw
300 		  * queue (active, scheduled to restart, stopped).
301 		  */
302 		unsigned long		state;
303 	} ____cacheline_aligned_in_smp;
304 
305 	/**
306 	 * @run_work: Used for scheduling a hardware queue run at a later time.
307 	 */
308 	struct delayed_work	run_work;
309 	/** @cpumask: Map of available CPUs where this hctx can run. */
310 	cpumask_var_t		cpumask;
311 	/**
312 	 * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
313 	 * selection from @cpumask.
314 	 */
315 	int			next_cpu;
316 	/**
317 	 * @next_cpu_batch: Counter of how many works left in the batch before
318 	 * changing to the next CPU.
319 	 */
320 	int			next_cpu_batch;
321 
322 	/** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
323 	unsigned long		flags;
324 
325 	/**
326 	 * @sched_data: Pointer owned by the IO scheduler attached to a request
327 	 * queue. It's up to the IO scheduler how to use this pointer.
328 	 */
329 	void			*sched_data;
330 	/**
331 	 * @queue: Pointer to the request queue that owns this hardware context.
332 	 */
333 	struct request_queue	*queue;
334 	/** @fq: Queue of requests that need to perform a flush operation. */
335 	struct blk_flush_queue	*fq;
336 
337 	/**
338 	 * @driver_data: Pointer to data owned by the block driver that created
339 	 * this hctx
340 	 */
341 	void			*driver_data;
342 
343 	/**
344 	 * @ctx_map: Bitmap for each software queue. If bit is on, there is a
345 	 * pending request in that software queue.
346 	 */
347 	struct sbitmap		ctx_map;
348 
349 	/**
350 	 * @dispatch_from: Software queue to be used when no scheduler was
351 	 * selected.
352 	 */
353 	struct blk_mq_ctx	*dispatch_from;
354 	/**
355 	 * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
356 	 * decide if the hw_queue is busy using Exponential Weighted Moving
357 	 * Average algorithm.
358 	 */
359 	unsigned int		dispatch_busy;
360 
361 	/** @type: HCTX_TYPE_* flags. Type of hardware queue. */
362 	unsigned short		type;
363 	/** @nr_ctx: Number of software queues. */
364 	unsigned short		nr_ctx;
365 	/** @ctxs: Array of software queues. */
366 	struct blk_mq_ctx	**ctxs;
367 
368 	/** @dispatch_wait_lock: Lock for dispatch_wait queue. */
369 	spinlock_t		dispatch_wait_lock;
370 	/**
371 	 * @dispatch_wait: Waitqueue to put requests when there is no tag
372 	 * available at the moment, to wait for another try in the future.
373 	 */
374 	wait_queue_entry_t	dispatch_wait;
375 
376 	/**
377 	 * @wait_index: Index of next available dispatch_wait queue to insert
378 	 * requests.
379 	 */
380 	atomic_t		wait_index;
381 
382 	/**
383 	 * @tags: Tags owned by the block driver. A tag at this set is only
384 	 * assigned when a request is dispatched from a hardware queue.
385 	 */
386 	struct blk_mq_tags	*tags;
387 	/**
388 	 * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
389 	 * scheduler associated with a request queue, a tag is assigned when
390 	 * that request is allocated. Else, this member is not used.
391 	 */
392 	struct blk_mq_tags	*sched_tags;
393 
394 	/** @numa_node: NUMA node the storage adapter has been connected to. */
395 	unsigned int		numa_node;
396 	/** @queue_num: Index of this hardware queue. */
397 	unsigned int		queue_num;
398 
399 	/**
400 	 * @nr_active: Number of active requests. Only used when a tag set is
401 	 * shared across request queues.
402 	 */
403 	atomic_t		nr_active;
404 
405 	/** @cpuhp_online: List to store request if CPU is going to die */
406 	struct hlist_node	cpuhp_online;
407 	/** @cpuhp_dead: List to store request if some CPU die. */
408 	struct hlist_node	cpuhp_dead;
409 	/** @kobj: Kernel object for sysfs. */
410 	struct kobject		kobj;
411 
412 #ifdef CONFIG_BLK_DEBUG_FS
413 	/**
414 	 * @debugfs_dir: debugfs directory for this hardware queue. Named
415 	 * as cpu<cpu_number>.
416 	 */
417 	struct dentry		*debugfs_dir;
418 	/** @sched_debugfs_dir:	debugfs directory for the scheduler. */
419 	struct dentry		*sched_debugfs_dir;
420 #endif
421 
422 	/**
423 	 * @hctx_list: if this hctx is not in use, this is an entry in
424 	 * q->unused_hctx_list.
425 	 */
426 	struct list_head	hctx_list;
427 };
428 
429 /**
430  * struct blk_mq_queue_map - Map software queues to hardware queues
431  * @mq_map:       CPU ID to hardware queue index map. This is an array
432  *	with nr_cpu_ids elements. Each element has a value in the range
433  *	[@queue_offset, @queue_offset + @nr_queues).
434  * @nr_queues:    Number of hardware queues to map CPU IDs onto.
435  * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
436  *	driver to map each hardware queue type (enum hctx_type) onto a distinct
437  *	set of hardware queues.
438  */
439 struct blk_mq_queue_map {
440 	unsigned int *mq_map;
441 	unsigned int nr_queues;
442 	unsigned int queue_offset;
443 };
444 
445 /**
446  * enum hctx_type - Type of hardware queue
447  * @HCTX_TYPE_DEFAULT:	All I/O not otherwise accounted for.
448  * @HCTX_TYPE_READ:	Just for READ I/O.
449  * @HCTX_TYPE_POLL:	Polled I/O of any kind.
450  * @HCTX_MAX_TYPES:	Number of types of hctx.
451  */
452 enum hctx_type {
453 	HCTX_TYPE_DEFAULT,
454 	HCTX_TYPE_READ,
455 	HCTX_TYPE_POLL,
456 
457 	HCTX_MAX_TYPES,
458 };
459 
460 /**
461  * struct blk_mq_tag_set - tag set that can be shared between request queues
462  * @ops:	   Pointers to functions that implement block driver behavior.
463  * @map:	   One or more ctx -> hctx mappings. One map exists for each
464  *		   hardware queue type (enum hctx_type) that the driver wishes
465  *		   to support. There are no restrictions on maps being of the
466  *		   same size, and it's perfectly legal to share maps between
467  *		   types.
468  * @nr_maps:	   Number of elements in the @map array. A number in the range
469  *		   [1, HCTX_MAX_TYPES].
470  * @nr_hw_queues:  Number of hardware queues supported by the block driver that
471  *		   owns this data structure.
472  * @queue_depth:   Number of tags per hardware queue, reserved tags included.
473  * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
474  *		   allocations.
475  * @cmd_size:	   Number of additional bytes to allocate per request. The block
476  *		   driver owns these additional bytes.
477  * @numa_node:	   NUMA node the storage adapter has been connected to.
478  * @timeout:	   Request processing timeout in jiffies.
479  * @flags:	   Zero or more BLK_MQ_F_* flags.
480  * @driver_data:   Pointer to data owned by the block driver that created this
481  *		   tag set.
482  * @tags:	   Tag sets. One tag set per hardware queue. Has @nr_hw_queues
483  *		   elements.
484  * @shared_tags:
485  *		   Shared set of tags. Has @nr_hw_queues elements. If set,
486  *		   shared by all @tags.
487  * @tag_list_lock: Serializes tag_list accesses.
488  * @tag_list:	   List of the request queues that use this tag set. See also
489  *		   request_queue.tag_set_list.
490  * @srcu:	   Use as lock when type of the request queue is blocking
491  *		   (BLK_MQ_F_BLOCKING).
492  */
493 struct blk_mq_tag_set {
494 	const struct blk_mq_ops	*ops;
495 	struct blk_mq_queue_map	map[HCTX_MAX_TYPES];
496 	unsigned int		nr_maps;
497 	unsigned int		nr_hw_queues;
498 	unsigned int		queue_depth;
499 	unsigned int		reserved_tags;
500 	unsigned int		cmd_size;
501 	int			numa_node;
502 	unsigned int		timeout;
503 	unsigned int		flags;
504 	void			*driver_data;
505 
506 	struct blk_mq_tags	**tags;
507 
508 	struct blk_mq_tags	*shared_tags;
509 
510 	struct mutex		tag_list_lock;
511 	struct list_head	tag_list;
512 	struct srcu_struct	*srcu;
513 };
514 
515 /**
516  * struct blk_mq_queue_data - Data about a request inserted in a queue
517  *
518  * @rq:   Request pointer.
519  * @last: If it is the last request in the queue.
520  */
521 struct blk_mq_queue_data {
522 	struct request *rq;
523 	bool last;
524 };
525 
526 typedef bool (busy_tag_iter_fn)(struct request *, void *);
527 
528 /**
529  * struct blk_mq_ops - Callback functions that implements block driver
530  * behaviour.
531  */
532 struct blk_mq_ops {
533 	/**
534 	 * @queue_rq: Queue a new request from block IO.
535 	 */
536 	blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
537 				 const struct blk_mq_queue_data *);
538 
539 	/**
540 	 * @commit_rqs: If a driver uses bd->last to judge when to submit
541 	 * requests to hardware, it must define this function. In case of errors
542 	 * that make us stop issuing further requests, this hook serves the
543 	 * purpose of kicking the hardware (which the last request otherwise
544 	 * would have done).
545 	 */
546 	void (*commit_rqs)(struct blk_mq_hw_ctx *);
547 
548 	/**
549 	 * @queue_rqs: Queue a list of new requests. Driver is guaranteed
550 	 * that each request belongs to the same queue. If the driver doesn't
551 	 * empty the @rqlist completely, then the rest will be queued
552 	 * individually by the block layer upon return.
553 	 */
554 	void (*queue_rqs)(struct request **rqlist);
555 
556 	/**
557 	 * @get_budget: Reserve budget before queue request, once .queue_rq is
558 	 * run, it is driver's responsibility to release the
559 	 * reserved budget. Also we have to handle failure case
560 	 * of .get_budget for avoiding I/O deadlock.
561 	 */
562 	int (*get_budget)(struct request_queue *);
563 
564 	/**
565 	 * @put_budget: Release the reserved budget.
566 	 */
567 	void (*put_budget)(struct request_queue *, int);
568 
569 	/**
570 	 * @set_rq_budget_token: store rq's budget token
571 	 */
572 	void (*set_rq_budget_token)(struct request *, int);
573 	/**
574 	 * @get_rq_budget_token: retrieve rq's budget token
575 	 */
576 	int (*get_rq_budget_token)(struct request *);
577 
578 	/**
579 	 * @timeout: Called on request timeout.
580 	 */
581 	enum blk_eh_timer_return (*timeout)(struct request *);
582 
583 	/**
584 	 * @poll: Called to poll for completion of a specific tag.
585 	 */
586 	int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
587 
588 	/**
589 	 * @complete: Mark the request as complete.
590 	 */
591 	void (*complete)(struct request *);
592 
593 	/**
594 	 * @init_hctx: Called when the block layer side of a hardware queue has
595 	 * been set up, allowing the driver to allocate/init matching
596 	 * structures.
597 	 */
598 	int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
599 	/**
600 	 * @exit_hctx: Ditto for exit/teardown.
601 	 */
602 	void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
603 
604 	/**
605 	 * @init_request: Called for every command allocated by the block layer
606 	 * to allow the driver to set up driver specific data.
607 	 *
608 	 * Tag greater than or equal to queue_depth is for setting up
609 	 * flush request.
610 	 */
611 	int (*init_request)(struct blk_mq_tag_set *set, struct request *,
612 			    unsigned int, unsigned int);
613 	/**
614 	 * @exit_request: Ditto for exit/teardown.
615 	 */
616 	void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
617 			     unsigned int);
618 
619 	/**
620 	 * @cleanup_rq: Called before freeing one request which isn't completed
621 	 * yet, and usually for freeing the driver private data.
622 	 */
623 	void (*cleanup_rq)(struct request *);
624 
625 	/**
626 	 * @busy: If set, returns whether or not this queue currently is busy.
627 	 */
628 	bool (*busy)(struct request_queue *);
629 
630 	/**
631 	 * @map_queues: This allows drivers specify their own queue mapping by
632 	 * overriding the setup-time function that builds the mq_map.
633 	 */
634 	void (*map_queues)(struct blk_mq_tag_set *set);
635 
636 #ifdef CONFIG_BLK_DEBUG_FS
637 	/**
638 	 * @show_rq: Used by the debugfs implementation to show driver-specific
639 	 * information about a request.
640 	 */
641 	void (*show_rq)(struct seq_file *m, struct request *rq);
642 #endif
643 };
644 
645 enum {
646 	BLK_MQ_F_SHOULD_MERGE	= 1 << 0,
647 	BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
648 	/*
649 	 * Set when this device requires underlying blk-mq device for
650 	 * completing IO:
651 	 */
652 	BLK_MQ_F_STACKING	= 1 << 2,
653 	BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
654 	BLK_MQ_F_BLOCKING	= 1 << 5,
655 	/* Do not allow an I/O scheduler to be configured. */
656 	BLK_MQ_F_NO_SCHED	= 1 << 6,
657 	/*
658 	 * Select 'none' during queue registration in case of a single hwq
659 	 * or shared hwqs instead of 'mq-deadline'.
660 	 */
661 	BLK_MQ_F_NO_SCHED_BY_DEFAULT	= 1 << 7,
662 	BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
663 	BLK_MQ_F_ALLOC_POLICY_BITS = 1,
664 
665 	BLK_MQ_S_STOPPED	= 0,
666 	BLK_MQ_S_TAG_ACTIVE	= 1,
667 	BLK_MQ_S_SCHED_RESTART	= 2,
668 
669 	/* hw queue is inactive after all its CPUs become offline */
670 	BLK_MQ_S_INACTIVE	= 3,
671 
672 	BLK_MQ_MAX_DEPTH	= 10240,
673 
674 	BLK_MQ_CPU_WORK_BATCH	= 8,
675 };
676 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
677 	((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
678 		((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
679 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
680 	((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
681 		<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
682 
683 #define BLK_MQ_NO_HCTX_IDX	(-1U)
684 
685 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
686 		struct lock_class_key *lkclass);
687 #define blk_mq_alloc_disk(set, queuedata)				\
688 ({									\
689 	static struct lock_class_key __key;				\
690 									\
691 	__blk_mq_alloc_disk(set, queuedata, &__key);			\
692 })
693 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
694 		struct lock_class_key *lkclass);
695 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
696 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
697 		struct request_queue *q);
698 void blk_mq_destroy_queue(struct request_queue *);
699 
700 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
701 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
702 		const struct blk_mq_ops *ops, unsigned int queue_depth,
703 		unsigned int set_flags);
704 void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
705 
706 void blk_mq_free_request(struct request *rq);
707 int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
708 		unsigned int poll_flags);
709 
710 bool blk_mq_queue_inflight(struct request_queue *q);
711 
712 enum {
713 	/* return when out of requests */
714 	BLK_MQ_REQ_NOWAIT	= (__force blk_mq_req_flags_t)(1 << 0),
715 	/* allocate from reserved pool */
716 	BLK_MQ_REQ_RESERVED	= (__force blk_mq_req_flags_t)(1 << 1),
717 	/* set RQF_PM */
718 	BLK_MQ_REQ_PM		= (__force blk_mq_req_flags_t)(1 << 2),
719 };
720 
721 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
722 		blk_mq_req_flags_t flags);
723 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
724 		blk_opf_t opf, blk_mq_req_flags_t flags,
725 		unsigned int hctx_idx);
726 
727 /*
728  * Tag address space map.
729  */
730 struct blk_mq_tags {
731 	unsigned int nr_tags;
732 	unsigned int nr_reserved_tags;
733 	unsigned int active_queues;
734 
735 	struct sbitmap_queue bitmap_tags;
736 	struct sbitmap_queue breserved_tags;
737 
738 	struct request **rqs;
739 	struct request **static_rqs;
740 	struct list_head page_list;
741 
742 	/*
743 	 * used to clear request reference in rqs[] before freeing one
744 	 * request pool
745 	 */
746 	spinlock_t lock;
747 };
748 
749 static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
750 					       unsigned int tag)
751 {
752 	if (tag < tags->nr_tags) {
753 		prefetch(tags->rqs[tag]);
754 		return tags->rqs[tag];
755 	}
756 
757 	return NULL;
758 }
759 
760 enum {
761 	BLK_MQ_UNIQUE_TAG_BITS = 16,
762 	BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
763 };
764 
765 u32 blk_mq_unique_tag(struct request *rq);
766 
767 static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
768 {
769 	return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
770 }
771 
772 static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
773 {
774 	return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
775 }
776 
777 /**
778  * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
779  * @rq: target request.
780  */
781 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
782 {
783 	return READ_ONCE(rq->state);
784 }
785 
786 static inline int blk_mq_request_started(struct request *rq)
787 {
788 	return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
789 }
790 
791 static inline int blk_mq_request_completed(struct request *rq)
792 {
793 	return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
794 }
795 
796 /*
797  *
798  * Set the state to complete when completing a request from inside ->queue_rq.
799  * This is used by drivers that want to ensure special complete actions that
800  * need access to the request are called on failure, e.g. by nvme for
801  * multipathing.
802  */
803 static inline void blk_mq_set_request_complete(struct request *rq)
804 {
805 	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
806 }
807 
808 /*
809  * Complete the request directly instead of deferring it to softirq or
810  * completing it another CPU. Useful in preemptible instead of an interrupt.
811  */
812 static inline void blk_mq_complete_request_direct(struct request *rq,
813 		   void (*complete)(struct request *rq))
814 {
815 	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
816 	complete(rq);
817 }
818 
819 void blk_mq_start_request(struct request *rq);
820 void blk_mq_end_request(struct request *rq, blk_status_t error);
821 void __blk_mq_end_request(struct request *rq, blk_status_t error);
822 void blk_mq_end_request_batch(struct io_comp_batch *ib);
823 
824 /*
825  * Only need start/end time stamping if we have iostat or
826  * blk stats enabled, or using an IO scheduler.
827  */
828 static inline bool blk_mq_need_time_stamp(struct request *rq)
829 {
830 	/*
831 	 * passthrough io doesn't use iostat accounting, cgroup stats
832 	 * and io scheduler functionalities.
833 	 */
834 	if (blk_rq_is_passthrough(rq))
835 		return false;
836 	return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED));
837 }
838 
839 static inline bool blk_mq_is_reserved_rq(struct request *rq)
840 {
841 	return rq->rq_flags & RQF_RESV;
842 }
843 
844 /*
845  * Batched completions only work when there is no I/O error and no special
846  * ->end_io handler.
847  */
848 static inline bool blk_mq_add_to_batch(struct request *req,
849 				       struct io_comp_batch *iob, int ioerror,
850 				       void (*complete)(struct io_comp_batch *))
851 {
852 	/*
853 	 * blk_mq_end_request_batch() can't end request allocated from
854 	 * sched tags
855 	 */
856 	if (!iob || (req->rq_flags & RQF_SCHED_TAGS) || ioerror ||
857 			(req->end_io && !blk_rq_is_passthrough(req)))
858 		return false;
859 
860 	if (!iob->complete)
861 		iob->complete = complete;
862 	else if (iob->complete != complete)
863 		return false;
864 	iob->need_ts |= blk_mq_need_time_stamp(req);
865 	rq_list_add(&iob->req_list, req);
866 	return true;
867 }
868 
869 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
870 void blk_mq_kick_requeue_list(struct request_queue *q);
871 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
872 void blk_mq_complete_request(struct request *rq);
873 bool blk_mq_complete_request_remote(struct request *rq);
874 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
875 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
876 void blk_mq_stop_hw_queues(struct request_queue *q);
877 void blk_mq_start_hw_queues(struct request_queue *q);
878 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
879 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
880 void blk_mq_quiesce_queue(struct request_queue *q);
881 void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set);
882 void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set);
883 void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set);
884 void blk_mq_unquiesce_queue(struct request_queue *q);
885 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
886 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
887 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
888 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
889 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
890 		busy_tag_iter_fn *fn, void *priv);
891 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
892 void blk_mq_freeze_queue(struct request_queue *q);
893 void blk_mq_unfreeze_queue(struct request_queue *q);
894 void blk_freeze_queue_start(struct request_queue *q);
895 void blk_mq_freeze_queue_wait(struct request_queue *q);
896 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
897 				     unsigned long timeout);
898 
899 void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
900 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
901 
902 void blk_mq_quiesce_queue_nowait(struct request_queue *q);
903 
904 unsigned int blk_mq_rq_cpu(struct request *rq);
905 
906 bool __blk_should_fake_timeout(struct request_queue *q);
907 static inline bool blk_should_fake_timeout(struct request_queue *q)
908 {
909 	if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
910 	    test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
911 		return __blk_should_fake_timeout(q);
912 	return false;
913 }
914 
915 /**
916  * blk_mq_rq_from_pdu - cast a PDU to a request
917  * @pdu: the PDU (Protocol Data Unit) to be casted
918  *
919  * Return: request
920  *
921  * Driver command data is immediately after the request. So subtract request
922  * size to get back to the original request.
923  */
924 static inline struct request *blk_mq_rq_from_pdu(void *pdu)
925 {
926 	return pdu - sizeof(struct request);
927 }
928 
929 /**
930  * blk_mq_rq_to_pdu - cast a request to a PDU
931  * @rq: the request to be casted
932  *
933  * Return: pointer to the PDU
934  *
935  * Driver command data is immediately after the request. So add request to get
936  * the PDU.
937  */
938 static inline void *blk_mq_rq_to_pdu(struct request *rq)
939 {
940 	return rq + 1;
941 }
942 
943 #define queue_for_each_hw_ctx(q, hctx, i)				\
944 	xa_for_each(&(q)->hctx_table, (i), (hctx))
945 
946 #define hctx_for_each_ctx(hctx, ctx, i)					\
947 	for ((i) = 0; (i) < (hctx)->nr_ctx &&				\
948 	     ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
949 
950 static inline void blk_mq_cleanup_rq(struct request *rq)
951 {
952 	if (rq->q->mq_ops->cleanup_rq)
953 		rq->q->mq_ops->cleanup_rq(rq);
954 }
955 
956 static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
957 		unsigned int nr_segs)
958 {
959 	rq->nr_phys_segments = nr_segs;
960 	rq->__data_len = bio->bi_iter.bi_size;
961 	rq->bio = rq->biotail = bio;
962 	rq->ioprio = bio_prio(bio);
963 }
964 
965 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
966 		struct lock_class_key *key);
967 
968 static inline bool rq_is_sync(struct request *rq)
969 {
970 	return op_is_sync(rq->cmd_flags);
971 }
972 
973 void blk_rq_init(struct request_queue *q, struct request *rq);
974 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
975 		struct bio_set *bs, gfp_t gfp_mask,
976 		int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
977 void blk_rq_unprep_clone(struct request *rq);
978 blk_status_t blk_insert_cloned_request(struct request *rq);
979 
980 struct rq_map_data {
981 	struct page **pages;
982 	unsigned long offset;
983 	unsigned short page_order;
984 	unsigned short nr_entries;
985 	bool null_mapped;
986 	bool from_user;
987 };
988 
989 int blk_rq_map_user(struct request_queue *, struct request *,
990 		struct rq_map_data *, void __user *, unsigned long, gfp_t);
991 int blk_rq_map_user_io(struct request *, struct rq_map_data *,
992 		void __user *, unsigned long, gfp_t, bool, int, bool, int);
993 int blk_rq_map_user_iov(struct request_queue *, struct request *,
994 		struct rq_map_data *, const struct iov_iter *, gfp_t);
995 int blk_rq_unmap_user(struct bio *);
996 int blk_rq_map_kern(struct request_queue *, struct request *, void *,
997 		unsigned int, gfp_t);
998 int blk_rq_append_bio(struct request *rq, struct bio *bio);
999 void blk_execute_rq_nowait(struct request *rq, bool at_head);
1000 blk_status_t blk_execute_rq(struct request *rq, bool at_head);
1001 bool blk_rq_is_poll(struct request *rq);
1002 
1003 struct req_iterator {
1004 	struct bvec_iter iter;
1005 	struct bio *bio;
1006 };
1007 
1008 #define __rq_for_each_bio(_bio, rq)	\
1009 	if ((rq->bio))			\
1010 		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
1011 
1012 #define rq_for_each_segment(bvl, _rq, _iter)			\
1013 	__rq_for_each_bio(_iter.bio, _rq)			\
1014 		bio_for_each_segment(bvl, _iter.bio, _iter.iter)
1015 
1016 #define rq_for_each_bvec(bvl, _rq, _iter)			\
1017 	__rq_for_each_bio(_iter.bio, _rq)			\
1018 		bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
1019 
1020 #define rq_iter_last(bvec, _iter)				\
1021 		(_iter.bio->bi_next == NULL &&			\
1022 		 bio_iter_last(bvec, _iter.iter))
1023 
1024 /*
1025  * blk_rq_pos()			: the current sector
1026  * blk_rq_bytes()		: bytes left in the entire request
1027  * blk_rq_cur_bytes()		: bytes left in the current segment
1028  * blk_rq_sectors()		: sectors left in the entire request
1029  * blk_rq_cur_sectors()		: sectors left in the current segment
1030  * blk_rq_stats_sectors()	: sectors of the entire request used for stats
1031  */
1032 static inline sector_t blk_rq_pos(const struct request *rq)
1033 {
1034 	return rq->__sector;
1035 }
1036 
1037 static inline unsigned int blk_rq_bytes(const struct request *rq)
1038 {
1039 	return rq->__data_len;
1040 }
1041 
1042 static inline int blk_rq_cur_bytes(const struct request *rq)
1043 {
1044 	if (!rq->bio)
1045 		return 0;
1046 	if (!bio_has_data(rq->bio))	/* dataless requests such as discard */
1047 		return rq->bio->bi_iter.bi_size;
1048 	return bio_iovec(rq->bio).bv_len;
1049 }
1050 
1051 static inline unsigned int blk_rq_sectors(const struct request *rq)
1052 {
1053 	return blk_rq_bytes(rq) >> SECTOR_SHIFT;
1054 }
1055 
1056 static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1057 {
1058 	return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
1059 }
1060 
1061 static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
1062 {
1063 	return rq->stats_sectors;
1064 }
1065 
1066 /*
1067  * Some commands like WRITE SAME have a payload or data transfer size which
1068  * is different from the size of the request.  Any driver that supports such
1069  * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
1070  * calculate the data transfer size.
1071  */
1072 static inline unsigned int blk_rq_payload_bytes(struct request *rq)
1073 {
1074 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1075 		return rq->special_vec.bv_len;
1076 	return blk_rq_bytes(rq);
1077 }
1078 
1079 /*
1080  * Return the first full biovec in the request.  The caller needs to check that
1081  * there are any bvecs before calling this helper.
1082  */
1083 static inline struct bio_vec req_bvec(struct request *rq)
1084 {
1085 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1086 		return rq->special_vec;
1087 	return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
1088 }
1089 
1090 static inline unsigned int blk_rq_count_bios(struct request *rq)
1091 {
1092 	unsigned int nr_bios = 0;
1093 	struct bio *bio;
1094 
1095 	__rq_for_each_bio(bio, rq)
1096 		nr_bios++;
1097 
1098 	return nr_bios;
1099 }
1100 
1101 void blk_steal_bios(struct bio_list *list, struct request *rq);
1102 
1103 /*
1104  * Request completion related functions.
1105  *
1106  * blk_update_request() completes given number of bytes and updates
1107  * the request without completing it.
1108  */
1109 bool blk_update_request(struct request *rq, blk_status_t error,
1110 			       unsigned int nr_bytes);
1111 void blk_abort_request(struct request *);
1112 
1113 /*
1114  * Number of physical segments as sent to the device.
1115  *
1116  * Normally this is the number of discontiguous data segments sent by the
1117  * submitter.  But for data-less command like discard we might have no
1118  * actual data segments submitted, but the driver might have to add it's
1119  * own special payload.  In that case we still return 1 here so that this
1120  * special payload will be mapped.
1121  */
1122 static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1123 {
1124 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1125 		return 1;
1126 	return rq->nr_phys_segments;
1127 }
1128 
1129 /*
1130  * Number of discard segments (or ranges) the driver needs to fill in.
1131  * Each discard bio merged into a request is counted as one segment.
1132  */
1133 static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1134 {
1135 	return max_t(unsigned short, rq->nr_phys_segments, 1);
1136 }
1137 
1138 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1139 		struct scatterlist *sglist, struct scatterlist **last_sg);
1140 static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1141 		struct scatterlist *sglist)
1142 {
1143 	struct scatterlist *last_sg = NULL;
1144 
1145 	return __blk_rq_map_sg(q, rq, sglist, &last_sg);
1146 }
1147 void blk_dump_rq_flags(struct request *, char *);
1148 
1149 #ifdef CONFIG_BLK_DEV_ZONED
1150 static inline unsigned int blk_rq_zone_no(struct request *rq)
1151 {
1152 	return disk_zone_no(rq->q->disk, blk_rq_pos(rq));
1153 }
1154 
1155 static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
1156 {
1157 	return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq));
1158 }
1159 
1160 /**
1161  * blk_rq_is_seq_zoned_write() - Check if @rq requires write serialization.
1162  * @rq: Request to examine.
1163  *
1164  * Note: REQ_OP_ZONE_APPEND requests do not require serialization.
1165  */
1166 static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
1167 {
1168 	return op_needs_zoned_write_locking(req_op(rq)) &&
1169 		blk_rq_zone_is_seq(rq);
1170 }
1171 
1172 bool blk_req_needs_zone_write_lock(struct request *rq);
1173 bool blk_req_zone_write_trylock(struct request *rq);
1174 void __blk_req_zone_write_lock(struct request *rq);
1175 void __blk_req_zone_write_unlock(struct request *rq);
1176 
1177 static inline void blk_req_zone_write_lock(struct request *rq)
1178 {
1179 	if (blk_req_needs_zone_write_lock(rq))
1180 		__blk_req_zone_write_lock(rq);
1181 }
1182 
1183 static inline void blk_req_zone_write_unlock(struct request *rq)
1184 {
1185 	if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
1186 		__blk_req_zone_write_unlock(rq);
1187 }
1188 
1189 static inline bool blk_req_zone_is_write_locked(struct request *rq)
1190 {
1191 	return rq->q->disk->seq_zones_wlock &&
1192 		test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock);
1193 }
1194 
1195 static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1196 {
1197 	if (!blk_req_needs_zone_write_lock(rq))
1198 		return true;
1199 	return !blk_req_zone_is_write_locked(rq);
1200 }
1201 #else /* CONFIG_BLK_DEV_ZONED */
1202 static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
1203 {
1204 	return false;
1205 }
1206 
1207 static inline bool blk_req_needs_zone_write_lock(struct request *rq)
1208 {
1209 	return false;
1210 }
1211 
1212 static inline void blk_req_zone_write_lock(struct request *rq)
1213 {
1214 }
1215 
1216 static inline void blk_req_zone_write_unlock(struct request *rq)
1217 {
1218 }
1219 static inline bool blk_req_zone_is_write_locked(struct request *rq)
1220 {
1221 	return false;
1222 }
1223 
1224 static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1225 {
1226 	return true;
1227 }
1228 #endif /* CONFIG_BLK_DEV_ZONED */
1229 
1230 #endif /* BLK_MQ_H */
1231