1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Lockless hierarchical page accounting & limiting
4 *
5 * Copyright (C) 2014 Red Hat, Inc., Johannes Weiner
6 */
7
8 #include <linux/page_counter.h>
9 #include <linux/atomic.h>
10 #include <linux/kernel.h>
11 #include <linux/string.h>
12 #include <linux/sched.h>
13 #include <linux/bug.h>
14 #include <asm/page.h>
15
track_protection(struct page_counter * c)16 static bool track_protection(struct page_counter *c)
17 {
18 return c->protection_support;
19 }
20
propagate_protected_usage(struct page_counter * c,unsigned long usage)21 static void propagate_protected_usage(struct page_counter *c,
22 unsigned long usage)
23 {
24 unsigned long protected, old_protected;
25 long delta;
26
27 if (!c->parent)
28 return;
29
30 protected = min(usage, READ_ONCE(c->min));
31 old_protected = atomic_long_read(&c->min_usage);
32 if (protected != old_protected) {
33 old_protected = atomic_long_xchg(&c->min_usage, protected);
34 delta = protected - old_protected;
35 if (delta)
36 atomic_long_add(delta, &c->parent->children_min_usage);
37 }
38
39 protected = min(usage, READ_ONCE(c->low));
40 old_protected = atomic_long_read(&c->low_usage);
41 if (protected != old_protected) {
42 old_protected = atomic_long_xchg(&c->low_usage, protected);
43 delta = protected - old_protected;
44 if (delta)
45 atomic_long_add(delta, &c->parent->children_low_usage);
46 }
47 }
48
49 /**
50 * page_counter_cancel - take pages out of the local counter
51 * @counter: counter
52 * @nr_pages: number of pages to cancel
53 */
page_counter_cancel(struct page_counter * counter,unsigned long nr_pages)54 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
55 {
56 long new;
57
58 new = atomic_long_sub_return(nr_pages, &counter->usage);
59 /* More uncharges than charges? */
60 if (WARN_ONCE(new < 0, "page_counter underflow: %ld nr_pages=%lu\n",
61 new, nr_pages)) {
62 new = 0;
63 atomic_long_set(&counter->usage, new);
64 }
65 if (track_protection(counter))
66 propagate_protected_usage(counter, new);
67 }
68
69 /**
70 * page_counter_charge - hierarchically charge pages
71 * @counter: counter
72 * @nr_pages: number of pages to charge
73 *
74 * NOTE: This does not consider any configured counter limits.
75 */
page_counter_charge(struct page_counter * counter,unsigned long nr_pages)76 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
77 {
78 struct page_counter *c;
79 bool protection = track_protection(counter);
80
81 for (c = counter; c; c = c->parent) {
82 long new;
83
84 new = atomic_long_add_return(nr_pages, &c->usage);
85 if (protection)
86 propagate_protected_usage(c, new);
87 /*
88 * This is indeed racy, but we can live with some
89 * inaccuracy in the watermark.
90 *
91 * Notably, we have two watermarks to allow for both a globally
92 * visible peak and one that can be reset at a smaller scope.
93 *
94 * Since we reset both watermarks when the global reset occurs,
95 * we can guarantee that watermark >= local_watermark, so we
96 * don't need to do both comparisons every time.
97 *
98 * On systems with branch predictors, the inner condition should
99 * be almost free.
100 */
101 if (new > READ_ONCE(c->local_watermark)) {
102 WRITE_ONCE(c->local_watermark, new);
103 if (new > READ_ONCE(c->watermark))
104 WRITE_ONCE(c->watermark, new);
105 }
106 }
107 }
108
109 /**
110 * page_counter_try_charge - try to hierarchically charge pages
111 * @counter: counter
112 * @nr_pages: number of pages to charge
113 * @fail: points first counter to hit its limit, if any
114 *
115 * Returns %true on success, or %false and @fail if the counter or one
116 * of its ancestors has hit its configured limit.
117 */
page_counter_try_charge(struct page_counter * counter,unsigned long nr_pages,struct page_counter ** fail)118 bool page_counter_try_charge(struct page_counter *counter,
119 unsigned long nr_pages,
120 struct page_counter **fail)
121 {
122 struct page_counter *c;
123 bool protection = track_protection(counter);
124
125 for (c = counter; c; c = c->parent) {
126 long new;
127 /*
128 * Charge speculatively to avoid an expensive CAS. If
129 * a bigger charge fails, it might falsely lock out a
130 * racing smaller charge and send it into reclaim
131 * early, but the error is limited to the difference
132 * between the two sizes, which is less than 2M/4M in
133 * case of a THP locking out a regular page charge.
134 *
135 * The atomic_long_add_return() implies a full memory
136 * barrier between incrementing the count and reading
137 * the limit. When racing with page_counter_set_max(),
138 * we either see the new limit or the setter sees the
139 * counter has changed and retries.
140 */
141 new = atomic_long_add_return(nr_pages, &c->usage);
142 if (new > c->max) {
143 atomic_long_sub(nr_pages, &c->usage);
144 /*
145 * This is racy, but we can live with some
146 * inaccuracy in the failcnt which is only used
147 * to report stats.
148 */
149 data_race(c->failcnt++);
150 *fail = c;
151 goto failed;
152 }
153 if (protection)
154 propagate_protected_usage(c, new);
155
156 /* see comment on page_counter_charge */
157 if (new > READ_ONCE(c->local_watermark)) {
158 WRITE_ONCE(c->local_watermark, new);
159 if (new > READ_ONCE(c->watermark))
160 WRITE_ONCE(c->watermark, new);
161 }
162 }
163 return true;
164
165 failed:
166 for (c = counter; c != *fail; c = c->parent)
167 page_counter_cancel(c, nr_pages);
168
169 return false;
170 }
171
172 /**
173 * page_counter_uncharge - hierarchically uncharge pages
174 * @counter: counter
175 * @nr_pages: number of pages to uncharge
176 */
page_counter_uncharge(struct page_counter * counter,unsigned long nr_pages)177 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages)
178 {
179 struct page_counter *c;
180
181 for (c = counter; c; c = c->parent)
182 page_counter_cancel(c, nr_pages);
183 }
184
185 /**
186 * page_counter_set_max - set the maximum number of pages allowed
187 * @counter: counter
188 * @nr_pages: limit to set
189 *
190 * Returns 0 on success, -EBUSY if the current number of pages on the
191 * counter already exceeds the specified limit.
192 *
193 * The caller must serialize invocations on the same counter.
194 */
page_counter_set_max(struct page_counter * counter,unsigned long nr_pages)195 int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages)
196 {
197 for (;;) {
198 unsigned long old;
199 long usage;
200
201 /*
202 * Update the limit while making sure that it's not
203 * below the concurrently-changing counter value.
204 *
205 * The xchg implies two full memory barriers before
206 * and after, so the read-swap-read is ordered and
207 * ensures coherency with page_counter_try_charge():
208 * that function modifies the count before checking
209 * the limit, so if it sees the old limit, we see the
210 * modified counter and retry.
211 */
212 usage = page_counter_read(counter);
213
214 if (usage > nr_pages)
215 return -EBUSY;
216
217 old = xchg(&counter->max, nr_pages);
218
219 if (page_counter_read(counter) <= usage || nr_pages >= old)
220 return 0;
221
222 counter->max = old;
223 cond_resched();
224 }
225 }
226
227 /**
228 * page_counter_set_min - set the amount of protected memory
229 * @counter: counter
230 * @nr_pages: value to set
231 *
232 * The caller must serialize invocations on the same counter.
233 */
page_counter_set_min(struct page_counter * counter,unsigned long nr_pages)234 void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages)
235 {
236 struct page_counter *c;
237
238 WRITE_ONCE(counter->min, nr_pages);
239
240 for (c = counter; c; c = c->parent)
241 propagate_protected_usage(c, atomic_long_read(&c->usage));
242 }
243
244 /**
245 * page_counter_set_low - set the amount of protected memory
246 * @counter: counter
247 * @nr_pages: value to set
248 *
249 * The caller must serialize invocations on the same counter.
250 */
page_counter_set_low(struct page_counter * counter,unsigned long nr_pages)251 void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages)
252 {
253 struct page_counter *c;
254
255 WRITE_ONCE(counter->low, nr_pages);
256
257 for (c = counter; c; c = c->parent)
258 propagate_protected_usage(c, atomic_long_read(&c->usage));
259 }
260
261 /**
262 * page_counter_memparse - memparse() for page counter limits
263 * @buf: string to parse
264 * @max: string meaning maximum possible value
265 * @nr_pages: returns the result in number of pages
266 *
267 * Returns -EINVAL, or 0 and @nr_pages on success. @nr_pages will be
268 * limited to %PAGE_COUNTER_MAX.
269 */
page_counter_memparse(const char * buf,const char * max,unsigned long * nr_pages)270 int page_counter_memparse(const char *buf, const char *max,
271 unsigned long *nr_pages)
272 {
273 char *end;
274 u64 bytes;
275
276 if (!strcmp(buf, max)) {
277 *nr_pages = PAGE_COUNTER_MAX;
278 return 0;
279 }
280
281 bytes = memparse(buf, &end);
282 if (*end != '\0')
283 return -EINVAL;
284
285 *nr_pages = min(bytes / PAGE_SIZE, (u64)PAGE_COUNTER_MAX);
286
287 return 0;
288 }
289
290
291 #ifdef CONFIG_MEMCG
292 /*
293 * This function calculates an individual page counter's effective
294 * protection which is derived from its own memory.min/low, its
295 * parent's and siblings' settings, as well as the actual memory
296 * distribution in the tree.
297 *
298 * The following rules apply to the effective protection values:
299 *
300 * 1. At the first level of reclaim, effective protection is equal to
301 * the declared protection in memory.min and memory.low.
302 *
303 * 2. To enable safe delegation of the protection configuration, at
304 * subsequent levels the effective protection is capped to the
305 * parent's effective protection.
306 *
307 * 3. To make complex and dynamic subtrees easier to configure, the
308 * user is allowed to overcommit the declared protection at a given
309 * level. If that is the case, the parent's effective protection is
310 * distributed to the children in proportion to how much protection
311 * they have declared and how much of it they are utilizing.
312 *
313 * This makes distribution proportional, but also work-conserving:
314 * if one counter claims much more protection than it uses memory,
315 * the unused remainder is available to its siblings.
316 *
317 * 4. Conversely, when the declared protection is undercommitted at a
318 * given level, the distribution of the larger parental protection
319 * budget is NOT proportional. A counter's protection from a sibling
320 * is capped to its own memory.min/low setting.
321 *
322 * 5. However, to allow protecting recursive subtrees from each other
323 * without having to declare each individual counter's fixed share
324 * of the ancestor's claim to protection, any unutilized -
325 * "floating" - protection from up the tree is distributed in
326 * proportion to each counter's *usage*. This makes the protection
327 * neutral wrt sibling cgroups and lets them compete freely over
328 * the shared parental protection budget, but it protects the
329 * subtree as a whole from neighboring subtrees.
330 *
331 * Note that 4. and 5. are not in conflict: 4. is about protecting
332 * against immediate siblings whereas 5. is about protecting against
333 * neighboring subtrees.
334 */
effective_protection(unsigned long usage,unsigned long parent_usage,unsigned long setting,unsigned long parent_effective,unsigned long siblings_protected,bool recursive_protection)335 static unsigned long effective_protection(unsigned long usage,
336 unsigned long parent_usage,
337 unsigned long setting,
338 unsigned long parent_effective,
339 unsigned long siblings_protected,
340 bool recursive_protection)
341 {
342 unsigned long protected;
343 unsigned long ep;
344
345 protected = min(usage, setting);
346 /*
347 * If all cgroups at this level combined claim and use more
348 * protection than what the parent affords them, distribute
349 * shares in proportion to utilization.
350 *
351 * We are using actual utilization rather than the statically
352 * claimed protection in order to be work-conserving: claimed
353 * but unused protection is available to siblings that would
354 * otherwise get a smaller chunk than what they claimed.
355 */
356 if (siblings_protected > parent_effective)
357 return protected * parent_effective / siblings_protected;
358
359 /*
360 * Ok, utilized protection of all children is within what the
361 * parent affords them, so we know whatever this child claims
362 * and utilizes is effectively protected.
363 *
364 * If there is unprotected usage beyond this value, reclaim
365 * will apply pressure in proportion to that amount.
366 *
367 * If there is unutilized protection, the cgroup will be fully
368 * shielded from reclaim, but we do return a smaller value for
369 * protection than what the group could enjoy in theory. This
370 * is okay. With the overcommit distribution above, effective
371 * protection is always dependent on how memory is actually
372 * consumed among the siblings anyway.
373 */
374 ep = protected;
375
376 /*
377 * If the children aren't claiming (all of) the protection
378 * afforded to them by the parent, distribute the remainder in
379 * proportion to the (unprotected) memory of each cgroup. That
380 * way, cgroups that aren't explicitly prioritized wrt each
381 * other compete freely over the allowance, but they are
382 * collectively protected from neighboring trees.
383 *
384 * We're using unprotected memory for the weight so that if
385 * some cgroups DO claim explicit protection, we don't protect
386 * the same bytes twice.
387 *
388 * Check both usage and parent_usage against the respective
389 * protected values. One should imply the other, but they
390 * aren't read atomically - make sure the division is sane.
391 */
392 if (!recursive_protection)
393 return ep;
394
395 if (parent_effective > siblings_protected &&
396 parent_usage > siblings_protected &&
397 usage > protected) {
398 unsigned long unclaimed;
399
400 unclaimed = parent_effective - siblings_protected;
401 unclaimed *= usage - protected;
402 unclaimed /= parent_usage - siblings_protected;
403
404 ep += unclaimed;
405 }
406
407 return ep;
408 }
409
410
411 /**
412 * page_counter_calculate_protection - check if memory consumption is in the normal range
413 * @root: the top ancestor of the sub-tree being checked
414 * @counter: the page_counter the counter to update
415 * @recursive_protection: Whether to use memory_recursiveprot behavior.
416 *
417 * Calculates elow/emin thresholds for given page_counter.
418 *
419 * WARNING: This function is not stateless! It can only be used as part
420 * of a top-down tree iteration, not for isolated queries.
421 */
page_counter_calculate_protection(struct page_counter * root,struct page_counter * counter,bool recursive_protection)422 void page_counter_calculate_protection(struct page_counter *root,
423 struct page_counter *counter,
424 bool recursive_protection)
425 {
426 unsigned long usage, parent_usage;
427 struct page_counter *parent = counter->parent;
428
429 /*
430 * Effective values of the reclaim targets are ignored so they
431 * can be stale. Have a look at mem_cgroup_protection for more
432 * details.
433 * TODO: calculation should be more robust so that we do not need
434 * that special casing.
435 */
436 if (root == counter)
437 return;
438
439 usage = page_counter_read(counter);
440 if (!usage)
441 return;
442
443 if (parent == root) {
444 counter->emin = READ_ONCE(counter->min);
445 counter->elow = READ_ONCE(counter->low);
446 return;
447 }
448
449 parent_usage = page_counter_read(parent);
450
451 WRITE_ONCE(counter->emin, effective_protection(usage, parent_usage,
452 READ_ONCE(counter->min),
453 READ_ONCE(parent->emin),
454 atomic_long_read(&parent->children_min_usage),
455 recursive_protection));
456
457 WRITE_ONCE(counter->elow, effective_protection(usage, parent_usage,
458 READ_ONCE(counter->low),
459 READ_ONCE(parent->elow),
460 atomic_long_read(&parent->children_low_usage),
461 recursive_protection));
462 }
463 #endif /* CONFIG_MEMCG */
464