xref: /linux/include/linux/hugetlb_cgroup.h (revision d642ef71)
1 /*
2  * Copyright IBM Corporation, 2012
3  * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2.1 of the GNU Lesser General Public License
7  * as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12  *
13  */
14 
15 #ifndef _LINUX_HUGETLB_CGROUP_H
16 #define _LINUX_HUGETLB_CGROUP_H
17 
18 #include <linux/mmdebug.h>
19 
20 struct hugetlb_cgroup;
21 struct resv_map;
22 struct file_region;
23 
24 #ifdef CONFIG_CGROUP_HUGETLB
25 enum hugetlb_memory_event {
26 	HUGETLB_MAX,
27 	HUGETLB_NR_MEMORY_EVENTS,
28 };
29 
30 struct hugetlb_cgroup_per_node {
31 	/* hugetlb usage in pages over all hstates. */
32 	unsigned long usage[HUGE_MAX_HSTATE];
33 };
34 
35 struct hugetlb_cgroup {
36 	struct cgroup_subsys_state css;
37 
38 	/*
39 	 * the counter to account for hugepages from hugetlb.
40 	 */
41 	struct page_counter hugepage[HUGE_MAX_HSTATE];
42 
43 	/*
44 	 * the counter to account for hugepage reservations from hugetlb.
45 	 */
46 	struct page_counter rsvd_hugepage[HUGE_MAX_HSTATE];
47 
48 	atomic_long_t events[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
49 	atomic_long_t events_local[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
50 
51 	/* Handle for "hugetlb.events" */
52 	struct cgroup_file events_file[HUGE_MAX_HSTATE];
53 
54 	/* Handle for "hugetlb.events.local" */
55 	struct cgroup_file events_local_file[HUGE_MAX_HSTATE];
56 
57 	struct hugetlb_cgroup_per_node *nodeinfo[];
58 };
59 
60 static inline struct hugetlb_cgroup *
61 __hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd)
62 {
63 	VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
64 	if (rsvd)
65 		return folio->_hugetlb_cgroup_rsvd;
66 	else
67 		return folio->_hugetlb_cgroup;
68 }
69 
70 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio)
71 {
72 	return __hugetlb_cgroup_from_folio(folio, false);
73 }
74 
75 static inline struct hugetlb_cgroup *
76 hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
77 {
78 	return __hugetlb_cgroup_from_folio(folio, true);
79 }
80 
81 static inline void __set_hugetlb_cgroup(struct folio *folio,
82 				       struct hugetlb_cgroup *h_cg, bool rsvd)
83 {
84 	VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
85 	if (rsvd)
86 		folio->_hugetlb_cgroup_rsvd = h_cg;
87 	else
88 		folio->_hugetlb_cgroup = h_cg;
89 }
90 
91 static inline void set_hugetlb_cgroup(struct folio *folio,
92 				     struct hugetlb_cgroup *h_cg)
93 {
94 	__set_hugetlb_cgroup(folio, h_cg, false);
95 }
96 
97 static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
98 					  struct hugetlb_cgroup *h_cg)
99 {
100 	__set_hugetlb_cgroup(folio, h_cg, true);
101 }
102 
103 static inline bool hugetlb_cgroup_disabled(void)
104 {
105 	return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
106 }
107 
108 static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
109 {
110 	css_put(&h_cg->css);
111 }
112 
113 static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
114 						struct resv_map *resv_map)
115 {
116 	if (resv_map->css)
117 		css_get(resv_map->css);
118 }
119 
120 static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
121 						struct resv_map *resv_map)
122 {
123 	if (resv_map->css)
124 		css_put(resv_map->css);
125 }
126 
127 extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
128 					struct hugetlb_cgroup **ptr);
129 extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
130 					     struct hugetlb_cgroup **ptr);
131 extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
132 					 struct hugetlb_cgroup *h_cg,
133 					 struct folio *folio);
134 extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
135 					      struct hugetlb_cgroup *h_cg,
136 					      struct folio *folio);
137 extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
138 					 struct folio *folio);
139 extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
140 					      struct folio *folio);
141 
142 extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
143 					   struct hugetlb_cgroup *h_cg);
144 extern void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
145 						struct hugetlb_cgroup *h_cg);
146 extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
147 					    unsigned long start,
148 					    unsigned long end);
149 
150 extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
151 						struct file_region *rg,
152 						unsigned long nr_pages,
153 						bool region_del);
154 
155 extern void hugetlb_cgroup_file_init(void) __init;
156 extern void hugetlb_cgroup_migrate(struct folio *old_folio,
157 				   struct folio *new_folio);
158 
159 #else
160 static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
161 						       struct file_region *rg,
162 						       unsigned long nr_pages,
163 						       bool region_del)
164 {
165 }
166 
167 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio)
168 {
169 	return NULL;
170 }
171 
172 static inline struct hugetlb_cgroup *
173 hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
174 {
175 	return NULL;
176 }
177 
178 static inline void set_hugetlb_cgroup(struct folio *folio,
179 				     struct hugetlb_cgroup *h_cg)
180 {
181 }
182 
183 static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
184 					  struct hugetlb_cgroup *h_cg)
185 {
186 }
187 
188 static inline bool hugetlb_cgroup_disabled(void)
189 {
190 	return true;
191 }
192 
193 static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
194 {
195 }
196 
197 static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
198 						struct resv_map *resv_map)
199 {
200 }
201 
202 static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
203 						struct resv_map *resv_map)
204 {
205 }
206 
207 static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
208 					       struct hugetlb_cgroup **ptr)
209 {
210 	return 0;
211 }
212 
213 static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,
214 						    unsigned long nr_pages,
215 						    struct hugetlb_cgroup **ptr)
216 {
217 	return 0;
218 }
219 
220 static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
221 						struct hugetlb_cgroup *h_cg,
222 						struct folio *folio)
223 {
224 }
225 
226 static inline void
227 hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
228 				  struct hugetlb_cgroup *h_cg,
229 				  struct folio *folio)
230 {
231 }
232 
233 static inline void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
234 						struct folio *folio)
235 {
236 }
237 
238 static inline void hugetlb_cgroup_uncharge_folio_rsvd(int idx,
239 						     unsigned long nr_pages,
240 						     struct folio *folio)
241 {
242 }
243 static inline void hugetlb_cgroup_uncharge_cgroup(int idx,
244 						  unsigned long nr_pages,
245 						  struct hugetlb_cgroup *h_cg)
246 {
247 }
248 
249 static inline void
250 hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
251 				    struct hugetlb_cgroup *h_cg)
252 {
253 }
254 
255 static inline void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
256 						   unsigned long start,
257 						   unsigned long end)
258 {
259 }
260 
261 static inline void hugetlb_cgroup_file_init(void)
262 {
263 }
264 
265 static inline void hugetlb_cgroup_migrate(struct folio *old_folio,
266 					  struct folio *new_folio)
267 {
268 }
269 
270 #endif  /* CONFIG_MEM_RES_CTLR_HUGETLB */
271 #endif
272