xref: /linux/block/blk-rq-qos.h (revision 7caa4715)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef RQ_QOS_H
3 #define RQ_QOS_H
4 
5 #include <linux/kernel.h>
6 #include <linux/blkdev.h>
7 #include <linux/blk_types.h>
8 #include <linux/atomic.h>
9 #include <linux/wait.h>
10 
11 #include "blk-mq-debugfs.h"
12 
13 struct blk_mq_debugfs_attr;
14 
15 enum rq_qos_id {
16 	RQ_QOS_WBT,
17 	RQ_QOS_LATENCY,
18 	RQ_QOS_COST,
19 };
20 
21 struct rq_wait {
22 	wait_queue_head_t wait;
23 	atomic_t inflight;
24 };
25 
26 struct rq_qos {
27 	struct rq_qos_ops *ops;
28 	struct request_queue *q;
29 	enum rq_qos_id id;
30 	struct rq_qos *next;
31 #ifdef CONFIG_BLK_DEBUG_FS
32 	struct dentry *debugfs_dir;
33 #endif
34 };
35 
36 struct rq_qos_ops {
37 	void (*throttle)(struct rq_qos *, struct bio *);
38 	void (*track)(struct rq_qos *, struct request *, struct bio *);
39 	void (*merge)(struct rq_qos *, struct request *, struct bio *);
40 	void (*issue)(struct rq_qos *, struct request *);
41 	void (*requeue)(struct rq_qos *, struct request *);
42 	void (*done)(struct rq_qos *, struct request *);
43 	void (*done_bio)(struct rq_qos *, struct bio *);
44 	void (*cleanup)(struct rq_qos *, struct bio *);
45 	void (*queue_depth_changed)(struct rq_qos *);
46 	void (*exit)(struct rq_qos *);
47 	const struct blk_mq_debugfs_attr *debugfs_attrs;
48 };
49 
50 struct rq_depth {
51 	unsigned int max_depth;
52 
53 	int scale_step;
54 	bool scaled_max;
55 
56 	unsigned int queue_depth;
57 	unsigned int default_depth;
58 };
59 
60 static inline struct rq_qos *rq_qos_id(struct request_queue *q,
61 				       enum rq_qos_id id)
62 {
63 	struct rq_qos *rqos;
64 	for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
65 		if (rqos->id == id)
66 			break;
67 	}
68 	return rqos;
69 }
70 
71 static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
72 {
73 	return rq_qos_id(q, RQ_QOS_WBT);
74 }
75 
76 static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
77 {
78 	return rq_qos_id(q, RQ_QOS_LATENCY);
79 }
80 
81 static inline const char *rq_qos_id_to_name(enum rq_qos_id id)
82 {
83 	switch (id) {
84 	case RQ_QOS_WBT:
85 		return "wbt";
86 	case RQ_QOS_LATENCY:
87 		return "latency";
88 	case RQ_QOS_COST:
89 		return "cost";
90 	}
91 	return "unknown";
92 }
93 
94 static inline void rq_wait_init(struct rq_wait *rq_wait)
95 {
96 	atomic_set(&rq_wait->inflight, 0);
97 	init_waitqueue_head(&rq_wait->wait);
98 }
99 
100 static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
101 {
102 	rqos->next = q->rq_qos;
103 	q->rq_qos = rqos;
104 
105 	if (rqos->ops->debugfs_attrs)
106 		blk_mq_debugfs_register_rqos(rqos);
107 }
108 
109 static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
110 {
111 	struct rq_qos *cur, *prev = NULL;
112 	for (cur = q->rq_qos; cur; cur = cur->next) {
113 		if (cur == rqos) {
114 			if (prev)
115 				prev->next = rqos->next;
116 			else
117 				q->rq_qos = cur;
118 			break;
119 		}
120 		prev = cur;
121 	}
122 
123 	blk_mq_debugfs_unregister_rqos(rqos);
124 }
125 
126 typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
127 typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
128 
129 void rq_qos_wait(struct rq_wait *rqw, void *private_data,
130 		 acquire_inflight_cb_t *acquire_inflight_cb,
131 		 cleanup_cb_t *cleanup_cb);
132 bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
133 void rq_depth_scale_up(struct rq_depth *rqd);
134 void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
135 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
136 
137 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
138 void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
139 void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
140 void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
141 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
142 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
143 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
144 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
145 void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
146 
147 static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
148 {
149 	if (q->rq_qos)
150 		__rq_qos_cleanup(q->rq_qos, bio);
151 }
152 
153 static inline void rq_qos_done(struct request_queue *q, struct request *rq)
154 {
155 	if (q->rq_qos)
156 		__rq_qos_done(q->rq_qos, rq);
157 }
158 
159 static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
160 {
161 	if (q->rq_qos)
162 		__rq_qos_issue(q->rq_qos, rq);
163 }
164 
165 static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
166 {
167 	if (q->rq_qos)
168 		__rq_qos_requeue(q->rq_qos, rq);
169 }
170 
171 static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
172 {
173 	if (q->rq_qos)
174 		__rq_qos_done_bio(q->rq_qos, bio);
175 }
176 
177 static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
178 {
179 	/*
180 	 * BIO_TRACKED lets controllers know that a bio went through the
181 	 * normal rq_qos path.
182 	 */
183 	bio_set_flag(bio, BIO_TRACKED);
184 	if (q->rq_qos)
185 		__rq_qos_throttle(q->rq_qos, bio);
186 }
187 
188 static inline void rq_qos_track(struct request_queue *q, struct request *rq,
189 				struct bio *bio)
190 {
191 	if (q->rq_qos)
192 		__rq_qos_track(q->rq_qos, rq, bio);
193 }
194 
195 static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
196 				struct bio *bio)
197 {
198 	if (q->rq_qos)
199 		__rq_qos_merge(q->rq_qos, rq, bio);
200 }
201 
202 static inline void rq_qos_queue_depth_changed(struct request_queue *q)
203 {
204 	if (q->rq_qos)
205 		__rq_qos_queue_depth_changed(q->rq_qos);
206 }
207 
208 void rq_qos_exit(struct request_queue *);
209 
210 #endif
211