xref: /linux/block/blk-cgroup.h (revision 2da68a77)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BLK_CGROUP_PRIVATE_H
3 #define _BLK_CGROUP_PRIVATE_H
4 /*
5  * block cgroup private header
6  *
7  * Based on ideas and code from CFQ, CFS and BFQ:
8  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9  *
10  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11  *		      Paolo Valente <paolo.valente@unimore.it>
12  *
13  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14  * 	              Nauman Rafique <nauman@google.com>
15  */
16 
17 #include <linux/blk-cgroup.h>
18 #include <linux/cgroup.h>
19 #include <linux/kthread.h>
20 #include <linux/blk-mq.h>
21 
22 struct blkcg_gq;
23 struct blkg_policy_data;
24 
25 
26 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
27 #define BLKG_STAT_CPU_BATCH	(INT_MAX / 2)
28 
29 #ifdef CONFIG_BLK_CGROUP
30 
31 enum blkg_iostat_type {
32 	BLKG_IOSTAT_READ,
33 	BLKG_IOSTAT_WRITE,
34 	BLKG_IOSTAT_DISCARD,
35 
36 	BLKG_IOSTAT_NR,
37 };
38 
39 struct blkg_iostat {
40 	u64				bytes[BLKG_IOSTAT_NR];
41 	u64				ios[BLKG_IOSTAT_NR];
42 };
43 
44 struct blkg_iostat_set {
45 	struct u64_stats_sync		sync;
46 	struct blkg_iostat		cur;
47 	struct blkg_iostat		last;
48 };
49 
50 /* association between a blk cgroup and a request queue */
51 struct blkcg_gq {
52 	/* Pointer to the associated request_queue */
53 	struct request_queue		*q;
54 	struct list_head		q_node;
55 	struct hlist_node		blkcg_node;
56 	struct blkcg			*blkcg;
57 
58 	/* all non-root blkcg_gq's are guaranteed to have access to parent */
59 	struct blkcg_gq			*parent;
60 
61 	/* reference count */
62 	struct percpu_ref		refcnt;
63 
64 	/* is this blkg online? protected by both blkcg and q locks */
65 	bool				online;
66 
67 	struct blkg_iostat_set __percpu	*iostat_cpu;
68 	struct blkg_iostat_set		iostat;
69 
70 	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
71 
72 	spinlock_t			async_bio_lock;
73 	struct bio_list			async_bios;
74 	union {
75 		struct work_struct	async_bio_work;
76 		struct work_struct	free_work;
77 	};
78 
79 	atomic_t			use_delay;
80 	atomic64_t			delay_nsec;
81 	atomic64_t			delay_start;
82 	u64				last_delay;
83 	int				last_use;
84 
85 	struct rcu_head			rcu_head;
86 };
87 
88 struct blkcg {
89 	struct cgroup_subsys_state	css;
90 	spinlock_t			lock;
91 	refcount_t			online_pin;
92 
93 	struct radix_tree_root		blkg_tree;
94 	struct blkcg_gq	__rcu		*blkg_hint;
95 	struct hlist_head		blkg_list;
96 
97 	struct blkcg_policy_data	*cpd[BLKCG_MAX_POLS];
98 
99 	struct list_head		all_blkcgs_node;
100 #ifdef CONFIG_BLK_CGROUP_FC_APPID
101 	char                            fc_app_id[FC_APPID_LEN];
102 #endif
103 #ifdef CONFIG_CGROUP_WRITEBACK
104 	struct list_head		cgwb_list;
105 #endif
106 };
107 
108 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
109 {
110 	return css ? container_of(css, struct blkcg, css) : NULL;
111 }
112 
113 /*
114  * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
115  * request_queue (q).  This is used by blkcg policies which need to track
116  * information per blkcg - q pair.
117  *
118  * There can be multiple active blkcg policies and each blkg:policy pair is
119  * represented by a blkg_policy_data which is allocated and freed by each
120  * policy's pd_alloc/free_fn() methods.  A policy can allocate private data
121  * area by allocating larger data structure which embeds blkg_policy_data
122  * at the beginning.
123  */
124 struct blkg_policy_data {
125 	/* the blkg and policy id this per-policy data belongs to */
126 	struct blkcg_gq			*blkg;
127 	int				plid;
128 };
129 
130 /*
131  * Policies that need to keep per-blkcg data which is independent from any
132  * request_queue associated to it should implement cpd_alloc/free_fn()
133  * methods.  A policy can allocate private data area by allocating larger
134  * data structure which embeds blkcg_policy_data at the beginning.
135  * cpd_init() is invoked to let each policy handle per-blkcg data.
136  */
137 struct blkcg_policy_data {
138 	/* the blkcg and policy id this per-policy data belongs to */
139 	struct blkcg			*blkcg;
140 	int				plid;
141 };
142 
143 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
144 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
145 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
146 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
147 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp,
148 				struct request_queue *q, struct blkcg *blkcg);
149 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
150 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
151 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
152 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
153 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
154 typedef void (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd,
155 				struct seq_file *s);
156 
157 struct blkcg_policy {
158 	int				plid;
159 	/* cgroup files for the policy */
160 	struct cftype			*dfl_cftypes;
161 	struct cftype			*legacy_cftypes;
162 
163 	/* operations */
164 	blkcg_pol_alloc_cpd_fn		*cpd_alloc_fn;
165 	blkcg_pol_init_cpd_fn		*cpd_init_fn;
166 	blkcg_pol_free_cpd_fn		*cpd_free_fn;
167 	blkcg_pol_bind_cpd_fn		*cpd_bind_fn;
168 
169 	blkcg_pol_alloc_pd_fn		*pd_alloc_fn;
170 	blkcg_pol_init_pd_fn		*pd_init_fn;
171 	blkcg_pol_online_pd_fn		*pd_online_fn;
172 	blkcg_pol_offline_pd_fn		*pd_offline_fn;
173 	blkcg_pol_free_pd_fn		*pd_free_fn;
174 	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
175 	blkcg_pol_stat_pd_fn		*pd_stat_fn;
176 };
177 
178 extern struct blkcg blkcg_root;
179 extern bool blkcg_debug_stats;
180 
181 int blkcg_init_disk(struct gendisk *disk);
182 void blkcg_exit_disk(struct gendisk *disk);
183 
184 /* Blkio controller policy registration */
185 int blkcg_policy_register(struct blkcg_policy *pol);
186 void blkcg_policy_unregister(struct blkcg_policy *pol);
187 int blkcg_activate_policy(struct request_queue *q,
188 			  const struct blkcg_policy *pol);
189 void blkcg_deactivate_policy(struct request_queue *q,
190 			     const struct blkcg_policy *pol);
191 
192 const char *blkg_dev_name(struct blkcg_gq *blkg);
193 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
194 		       u64 (*prfill)(struct seq_file *,
195 				     struct blkg_policy_data *, int),
196 		       const struct blkcg_policy *pol, int data,
197 		       bool show_total);
198 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
199 
200 struct blkg_conf_ctx {
201 	struct block_device		*bdev;
202 	struct blkcg_gq			*blkg;
203 	char				*body;
204 };
205 
206 struct block_device *blkcg_conf_open_bdev(char **inputp);
207 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
208 		   char *input, struct blkg_conf_ctx *ctx);
209 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
210 
211 /**
212  * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
213  * @return: true if this bio needs to be submitted with the root blkg context.
214  *
215  * In order to avoid priority inversions we sometimes need to issue a bio as if
216  * it were attached to the root blkg, and then backcharge to the actual owning
217  * blkg.  The idea is we do bio_blkcg_css() to look up the actual context for
218  * the bio and attach the appropriate blkg to the bio.  Then we call this helper
219  * and if it is true run with the root blkg for that queue and then do any
220  * backcharging to the originating cgroup once the io is complete.
221  */
222 static inline bool bio_issue_as_root_blkg(struct bio *bio)
223 {
224 	return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
225 }
226 
227 /**
228  * blkg_lookup - lookup blkg for the specified blkcg - q pair
229  * @blkcg: blkcg of interest
230  * @q: request_queue of interest
231  *
232  * Lookup blkg for the @blkcg - @q pair.
233 
234  * Must be called in a RCU critical section.
235  */
236 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
237 					   struct request_queue *q)
238 {
239 	struct blkcg_gq *blkg;
240 
241 	WARN_ON_ONCE(!rcu_read_lock_held());
242 
243 	if (blkcg == &blkcg_root)
244 		return q->root_blkg;
245 
246 	blkg = rcu_dereference(blkcg->blkg_hint);
247 	if (blkg && blkg->q == q)
248 		return blkg;
249 
250 	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
251 	if (blkg && blkg->q != q)
252 		blkg = NULL;
253 	return blkg;
254 }
255 
256 /**
257  * blkg_to_pdata - get policy private data
258  * @blkg: blkg of interest
259  * @pol: policy of interest
260  *
261  * Return pointer to private data associated with the @blkg-@pol pair.
262  */
263 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
264 						  struct blkcg_policy *pol)
265 {
266 	return blkg ? blkg->pd[pol->plid] : NULL;
267 }
268 
269 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
270 						     struct blkcg_policy *pol)
271 {
272 	return blkcg ? blkcg->cpd[pol->plid] : NULL;
273 }
274 
275 /**
276  * pdata_to_blkg - get blkg associated with policy private data
277  * @pd: policy private data of interest
278  *
279  * @pd is policy private data.  Determine the blkg it's associated with.
280  */
281 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
282 {
283 	return pd ? pd->blkg : NULL;
284 }
285 
286 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
287 {
288 	return cpd ? cpd->blkcg : NULL;
289 }
290 
291 /**
292  * blkg_path - format cgroup path of blkg
293  * @blkg: blkg of interest
294  * @buf: target buffer
295  * @buflen: target buffer length
296  *
297  * Format the path of the cgroup of @blkg into @buf.
298  */
299 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
300 {
301 	return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
302 }
303 
304 /**
305  * blkg_get - get a blkg reference
306  * @blkg: blkg to get
307  *
308  * The caller should be holding an existing reference.
309  */
310 static inline void blkg_get(struct blkcg_gq *blkg)
311 {
312 	percpu_ref_get(&blkg->refcnt);
313 }
314 
315 /**
316  * blkg_tryget - try and get a blkg reference
317  * @blkg: blkg to get
318  *
319  * This is for use when doing an RCU lookup of the blkg.  We may be in the midst
320  * of freeing this blkg, so we can only use it if the refcnt is not zero.
321  */
322 static inline bool blkg_tryget(struct blkcg_gq *blkg)
323 {
324 	return blkg && percpu_ref_tryget(&blkg->refcnt);
325 }
326 
327 /**
328  * blkg_put - put a blkg reference
329  * @blkg: blkg to put
330  */
331 static inline void blkg_put(struct blkcg_gq *blkg)
332 {
333 	percpu_ref_put(&blkg->refcnt);
334 }
335 
336 /**
337  * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
338  * @d_blkg: loop cursor pointing to the current descendant
339  * @pos_css: used for iteration
340  * @p_blkg: target blkg to walk descendants of
341  *
342  * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
343  * read locked.  If called under either blkcg or queue lock, the iteration
344  * is guaranteed to include all and only online blkgs.  The caller may
345  * update @pos_css by calling css_rightmost_descendant() to skip subtree.
346  * @p_blkg is included in the iteration and the first node to be visited.
347  */
348 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
349 	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
350 		if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css),	\
351 					    (p_blkg)->q)))
352 
353 /**
354  * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
355  * @d_blkg: loop cursor pointing to the current descendant
356  * @pos_css: used for iteration
357  * @p_blkg: target blkg to walk descendants of
358  *
359  * Similar to blkg_for_each_descendant_pre() but performs post-order
360  * traversal instead.  Synchronization rules are the same.  @p_blkg is
361  * included in the iteration and the last node to be visited.
362  */
363 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
364 	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
365 		if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css),	\
366 					    (p_blkg)->q)))
367 
368 bool __blkcg_punt_bio_submit(struct bio *bio);
369 
370 static inline bool blkcg_punt_bio_submit(struct bio *bio)
371 {
372 	if (bio->bi_opf & REQ_CGROUP_PUNT)
373 		return __blkcg_punt_bio_submit(bio);
374 	else
375 		return false;
376 }
377 
378 static inline void blkcg_bio_issue_init(struct bio *bio)
379 {
380 	bio_issue_init(&bio->bi_issue, bio_sectors(bio));
381 }
382 
383 static inline void blkcg_use_delay(struct blkcg_gq *blkg)
384 {
385 	if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
386 		return;
387 	if (atomic_add_return(1, &blkg->use_delay) == 1)
388 		atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
389 }
390 
391 static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
392 {
393 	int old = atomic_read(&blkg->use_delay);
394 
395 	if (WARN_ON_ONCE(old < 0))
396 		return 0;
397 	if (old == 0)
398 		return 0;
399 
400 	/*
401 	 * We do this song and dance because we can race with somebody else
402 	 * adding or removing delay.  If we just did an atomic_dec we'd end up
403 	 * negative and we'd already be in trouble.  We need to subtract 1 and
404 	 * then check to see if we were the last delay so we can drop the
405 	 * congestion count on the cgroup.
406 	 */
407 	while (old && !atomic_try_cmpxchg(&blkg->use_delay, &old, old - 1))
408 		;
409 
410 	if (old == 0)
411 		return 0;
412 	if (old == 1)
413 		atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
414 	return 1;
415 }
416 
417 /**
418  * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
419  * @blkg: target blkg
420  * @delay: delay duration in nsecs
421  *
422  * When enabled with this function, the delay is not decayed and must be
423  * explicitly cleared with blkcg_clear_delay(). Must not be mixed with
424  * blkcg_[un]use_delay() and blkcg_add_delay() usages.
425  */
426 static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
427 {
428 	int old = atomic_read(&blkg->use_delay);
429 
430 	/* We only want 1 person setting the congestion count for this blkg. */
431 	if (!old && atomic_try_cmpxchg(&blkg->use_delay, &old, -1))
432 		atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
433 
434 	atomic64_set(&blkg->delay_nsec, delay);
435 }
436 
437 /**
438  * blkcg_clear_delay - Disable allocator delay mechanism
439  * @blkg: target blkg
440  *
441  * Disable use_delay mechanism. See blkcg_set_delay().
442  */
443 static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
444 {
445 	int old = atomic_read(&blkg->use_delay);
446 
447 	/* We only want 1 person clearing the congestion count for this blkg. */
448 	if (old && atomic_try_cmpxchg(&blkg->use_delay, &old, 0))
449 		atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
450 }
451 
452 /**
453  * blk_cgroup_mergeable - Determine whether to allow or disallow merges
454  * @rq: request to merge into
455  * @bio: bio to merge
456  *
457  * @bio and @rq should belong to the same cgroup and their issue_as_root should
458  * match. The latter is necessary as we don't want to throttle e.g. a metadata
459  * update because it happens to be next to a regular IO.
460  */
461 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
462 {
463 	return rq->bio->bi_blkg == bio->bi_blkg &&
464 		bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
465 }
466 
467 void blk_cgroup_bio_start(struct bio *bio);
468 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
469 #else	/* CONFIG_BLK_CGROUP */
470 
471 struct blkg_policy_data {
472 };
473 
474 struct blkcg_policy_data {
475 };
476 
477 struct blkcg_policy {
478 };
479 
480 struct blkcg {
481 };
482 
483 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
484 static inline int blkcg_init_disk(struct gendisk *disk) { return 0; }
485 static inline void blkcg_exit_disk(struct gendisk *disk) { }
486 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
487 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
488 static inline int blkcg_activate_policy(struct request_queue *q,
489 					const struct blkcg_policy *pol) { return 0; }
490 static inline void blkcg_deactivate_policy(struct request_queue *q,
491 					   const struct blkcg_policy *pol) { }
492 
493 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
494 						  struct blkcg_policy *pol) { return NULL; }
495 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
496 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
497 static inline void blkg_get(struct blkcg_gq *blkg) { }
498 static inline void blkg_put(struct blkcg_gq *blkg) { }
499 
500 static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
501 static inline void blkcg_bio_issue_init(struct bio *bio) { }
502 static inline void blk_cgroup_bio_start(struct bio *bio) { }
503 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
504 
505 #define blk_queue_for_each_rl(rl, q)	\
506 	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
507 
508 #endif	/* CONFIG_BLK_CGROUP */
509 
510 #endif /* _BLK_CGROUP_PRIVATE_H */
511