1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __CGROUP_INTERNAL_H
3 #define __CGROUP_INTERNAL_H
4
5 #include <linux/cgroup.h>
6 #include <linux/kernfs.h>
7 #include <linux/workqueue.h>
8 #include <linux/list.h>
9 #include <linux/refcount.h>
10 #include <linux/fs_parser.h>
11
12 #define TRACE_CGROUP_PATH_LEN 1024
13 extern spinlock_t trace_cgroup_path_lock;
14 extern char trace_cgroup_path[TRACE_CGROUP_PATH_LEN];
15 extern void __init enable_debug_cgroup(void);
16
17 /*
18 * cgroup_path() takes a spin lock. It is good practice not to take
19 * spin locks within trace point handlers, as they are mostly hidden
20 * from normal view. As cgroup_path() can take the kernfs_rename_lock
21 * spin lock, it is best to not call that function from the trace event
22 * handler.
23 *
24 * Note: trace_cgroup_##type##_enabled() is a static branch that will only
25 * be set when the trace event is enabled.
26 */
27 #define TRACE_CGROUP_PATH(type, cgrp, ...) \
28 do { \
29 if (trace_cgroup_##type##_enabled()) { \
30 unsigned long flags; \
31 spin_lock_irqsave(&trace_cgroup_path_lock, \
32 flags); \
33 cgroup_path(cgrp, trace_cgroup_path, \
34 TRACE_CGROUP_PATH_LEN); \
35 trace_cgroup_##type(cgrp, trace_cgroup_path, \
36 ##__VA_ARGS__); \
37 spin_unlock_irqrestore(&trace_cgroup_path_lock, \
38 flags); \
39 } \
40 } while (0)
41
42 /*
43 * The cgroup filesystem superblock creation/mount context.
44 */
45 struct cgroup_fs_context {
46 struct kernfs_fs_context kfc;
47 struct cgroup_root *root;
48 struct cgroup_namespace *ns;
49 unsigned int flags; /* CGRP_ROOT_* flags */
50
51 /* cgroup1 bits */
52 bool cpuset_clone_children;
53 bool none; /* User explicitly requested empty subsystem */
54 bool all_ss; /* Seen 'all' option */
55 u16 subsys_mask; /* Selected subsystems */
56 char *name; /* Hierarchy name */
57 char *release_agent; /* Path for release notifications */
58 };
59
cgroup_fc2context(struct fs_context * fc)60 static inline struct cgroup_fs_context *cgroup_fc2context(struct fs_context *fc)
61 {
62 struct kernfs_fs_context *kfc = fc->fs_private;
63
64 return container_of(kfc, struct cgroup_fs_context, kfc);
65 }
66
67 struct cgroup_pidlist;
68
69 struct cgroup_file_ctx {
70 struct cgroup_namespace *ns;
71
72 struct {
73 void *trigger;
74 } psi;
75
76 struct {
77 bool started;
78 struct css_task_iter iter;
79 } procs;
80
81 struct {
82 struct cgroup_pidlist *pidlist;
83 } procs1;
84
85 struct cgroup_of_peak peak;
86 };
87
88 /*
89 * A cgroup can be associated with multiple css_sets as different tasks may
90 * belong to different cgroups on different hierarchies. In the other
91 * direction, a css_set is naturally associated with multiple cgroups.
92 * This M:N relationship is represented by the following link structure
93 * which exists for each association and allows traversing the associations
94 * from both sides.
95 */
96 struct cgrp_cset_link {
97 /* the cgroup and css_set this link associates */
98 struct cgroup *cgrp;
99 struct css_set *cset;
100
101 /* list of cgrp_cset_links anchored at cgrp->cset_links */
102 struct list_head cset_link;
103
104 /* list of cgrp_cset_links anchored at css_set->cgrp_links */
105 struct list_head cgrp_link;
106 };
107
108 /* used to track tasks and csets during migration */
109 struct cgroup_taskset {
110 /* the src and dst cset list running through cset->mg_node */
111 struct list_head src_csets;
112 struct list_head dst_csets;
113
114 /* the number of tasks in the set */
115 int nr_tasks;
116
117 /* the subsys currently being processed */
118 int ssid;
119
120 /*
121 * Fields for cgroup_taskset_*() iteration.
122 *
123 * Before migration is committed, the target migration tasks are on
124 * ->mg_tasks of the csets on ->src_csets. After, on ->mg_tasks of
125 * the csets on ->dst_csets. ->csets point to either ->src_csets
126 * or ->dst_csets depending on whether migration is committed.
127 *
128 * ->cur_csets and ->cur_task point to the current task position
129 * during iteration.
130 */
131 struct list_head *csets;
132 struct css_set *cur_cset;
133 struct task_struct *cur_task;
134 };
135
136 /* migration context also tracks preloading */
137 struct cgroup_mgctx {
138 /*
139 * Preloaded source and destination csets. Used to guarantee
140 * atomic success or failure on actual migration.
141 */
142 struct list_head preloaded_src_csets;
143 struct list_head preloaded_dst_csets;
144
145 /* tasks and csets to migrate */
146 struct cgroup_taskset tset;
147
148 /* subsystems affected by migration */
149 u16 ss_mask;
150 };
151
152 #define CGROUP_TASKSET_INIT(tset) \
153 { \
154 .src_csets = LIST_HEAD_INIT(tset.src_csets), \
155 .dst_csets = LIST_HEAD_INIT(tset.dst_csets), \
156 .csets = &tset.src_csets, \
157 }
158
159 #define CGROUP_MGCTX_INIT(name) \
160 { \
161 LIST_HEAD_INIT(name.preloaded_src_csets), \
162 LIST_HEAD_INIT(name.preloaded_dst_csets), \
163 CGROUP_TASKSET_INIT(name.tset), \
164 }
165
166 #define DEFINE_CGROUP_MGCTX(name) \
167 struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name)
168
169 extern struct cgroup_subsys *cgroup_subsys[];
170 extern struct list_head cgroup_roots;
171
172 /* iterate across the hierarchies */
173 #define for_each_root(root) \
174 list_for_each_entry_rcu((root), &cgroup_roots, root_list, \
175 lockdep_is_held(&cgroup_mutex))
176
177 /**
178 * for_each_subsys - iterate all enabled cgroup subsystems
179 * @ss: the iteration cursor
180 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
181 */
182 #define for_each_subsys(ss, ssid) \
183 for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \
184 (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
185
cgroup_is_dead(const struct cgroup * cgrp)186 static inline bool cgroup_is_dead(const struct cgroup *cgrp)
187 {
188 return !(cgrp->self.flags & CSS_ONLINE);
189 }
190
notify_on_release(const struct cgroup * cgrp)191 static inline bool notify_on_release(const struct cgroup *cgrp)
192 {
193 return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
194 }
195
196 void put_css_set_locked(struct css_set *cset);
197
put_css_set(struct css_set * cset)198 static inline void put_css_set(struct css_set *cset)
199 {
200 unsigned long flags;
201
202 /*
203 * Ensure that the refcount doesn't hit zero while any readers
204 * can see it. Similar to atomic_dec_and_lock(), but for an
205 * rwlock
206 */
207 if (refcount_dec_not_one(&cset->refcount))
208 return;
209
210 spin_lock_irqsave(&css_set_lock, flags);
211 put_css_set_locked(cset);
212 spin_unlock_irqrestore(&css_set_lock, flags);
213 }
214
215 /*
216 * refcounted get/put for css_set objects
217 */
get_css_set(struct css_set * cset)218 static inline void get_css_set(struct css_set *cset)
219 {
220 refcount_inc(&cset->refcount);
221 }
222
223 bool cgroup_ssid_enabled(int ssid);
224 bool cgroup_on_dfl(const struct cgroup *cgrp);
225
226 struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root);
227 struct cgroup *task_cgroup_from_root(struct task_struct *task,
228 struct cgroup_root *root);
229 struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline);
230 void cgroup_kn_unlock(struct kernfs_node *kn);
231 int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
232 struct cgroup_namespace *ns);
233
234 void cgroup_favor_dynmods(struct cgroup_root *root, bool favor);
235 void cgroup_free_root(struct cgroup_root *root);
236 void init_cgroup_root(struct cgroup_fs_context *ctx);
237 int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask);
238 int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask);
239 int cgroup_do_get_tree(struct fs_context *fc);
240
241 int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp);
242 void cgroup_migrate_finish(struct cgroup_mgctx *mgctx);
243 void cgroup_migrate_add_src(struct css_set *src_cset, struct cgroup *dst_cgrp,
244 struct cgroup_mgctx *mgctx);
245 int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx);
246 int cgroup_migrate(struct task_struct *leader, bool threadgroup,
247 struct cgroup_mgctx *mgctx);
248
249 int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
250 bool threadgroup);
251 void cgroup_attach_lock(bool lock_threadgroup);
252 void cgroup_attach_unlock(bool lock_threadgroup);
253 struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
254 bool *locked)
255 __acquires(&cgroup_threadgroup_rwsem);
256 void cgroup_procs_write_finish(struct task_struct *task, bool locked)
257 __releases(&cgroup_threadgroup_rwsem);
258
259 void cgroup_lock_and_drain_offline(struct cgroup *cgrp);
260
261 int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode);
262 int cgroup_rmdir(struct kernfs_node *kn);
263 int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
264 struct kernfs_root *kf_root);
265
266 int __cgroup_task_count(const struct cgroup *cgrp);
267 int cgroup_task_count(const struct cgroup *cgrp);
268
269 /*
270 * rstat.c
271 */
272 int cgroup_rstat_init(struct cgroup *cgrp);
273 void cgroup_rstat_exit(struct cgroup *cgrp);
274 void cgroup_rstat_boot(void);
275 void cgroup_base_stat_cputime_show(struct seq_file *seq);
276
277 /*
278 * namespace.c
279 */
280 extern const struct proc_ns_operations cgroupns_operations;
281
282 /*
283 * cgroup-v1.c
284 */
285 extern struct cftype cgroup1_base_files[];
286 extern struct kernfs_syscall_ops cgroup1_kf_syscall_ops;
287 extern const struct fs_parameter_spec cgroup1_fs_parameters[];
288
289 int proc_cgroupstats_show(struct seq_file *m, void *v);
290 bool cgroup1_ssid_disabled(int ssid);
291 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp);
292 void cgroup1_release_agent(struct work_struct *work);
293 void cgroup1_check_for_release(struct cgroup *cgrp);
294 int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param);
295 int cgroup1_get_tree(struct fs_context *fc);
296 int cgroup1_reconfigure(struct fs_context *ctx);
297
298 #endif /* __CGROUP_INTERNAL_H */
299