1 /* $NetBSD: i915_gem_shrinker.c,v 1.3 2021/12/19 11:33:49 riastradh Exp $ */
2
3 /*
4 * SPDX-License-Identifier: MIT
5 *
6 * Copyright © 2008-2015 Intel Corporation
7 */
8
9 #include <sys/cdefs.h>
10 __KERNEL_RCSID(0, "$NetBSD: i915_gem_shrinker.c,v 1.3 2021/12/19 11:33:49 riastradh Exp $");
11
12 #include <linux/oom.h>
13 #include <linux/sched/mm.h>
14 #include <linux/shmem_fs.h>
15 #include <linux/slab.h>
16 #include <linux/swap.h>
17 #include <linux/pci.h>
18 #include <linux/dma-buf.h>
19 #include <linux/vmalloc.h>
20 #include <drm/i915_drm.h>
21
22 #include "i915_trace.h"
23
swap_available(void)24 static bool swap_available(void)
25 {
26 return get_nr_swap_pages() > 0;
27 }
28
can_release_pages(struct drm_i915_gem_object * obj)29 static bool can_release_pages(struct drm_i915_gem_object *obj)
30 {
31 /* Consider only shrinkable ojects. */
32 if (!i915_gem_object_is_shrinkable(obj))
33 return false;
34
35 /*
36 * Only report true if by unbinding the object and putting its pages
37 * we can actually make forward progress towards freeing physical
38 * pages.
39 *
40 * If the pages are pinned for any other reason than being bound
41 * to the GPU, simply unbinding from the GPU is not going to succeed
42 * in releasing our pin count on the pages themselves.
43 */
44 if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
45 return false;
46
47 /*
48 * We can only return physical pages to the system if we can either
49 * discard the contents (because the user has marked them as being
50 * purgeable) or if we can move their contents out to swap.
51 */
52 return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
53 }
54
unsafe_drop_pages(struct drm_i915_gem_object * obj,unsigned long shrink)55 static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
56 unsigned long shrink)
57 {
58 unsigned long flags;
59
60 flags = 0;
61 if (shrink & I915_SHRINK_ACTIVE)
62 flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
63
64 if (i915_gem_object_unbind(obj, flags) == 0)
65 __i915_gem_object_put_pages(obj);
66
67 return !i915_gem_object_has_pages(obj);
68 }
69
try_to_writeback(struct drm_i915_gem_object * obj,unsigned int flags)70 static void try_to_writeback(struct drm_i915_gem_object *obj,
71 unsigned int flags)
72 {
73 switch (obj->mm.madv) {
74 case I915_MADV_DONTNEED:
75 i915_gem_object_truncate(obj);
76 case __I915_MADV_PURGED:
77 return;
78 }
79
80 if (flags & I915_SHRINK_WRITEBACK)
81 i915_gem_object_writeback(obj);
82 }
83
84 /**
85 * i915_gem_shrink - Shrink buffer object caches
86 * @i915: i915 device
87 * @target: amount of memory to make available, in pages
88 * @nr_scanned: optional output for number of pages scanned (incremental)
89 * @shrink: control flags for selecting cache types
90 *
91 * This function is the main interface to the shrinker. It will try to release
92 * up to @target pages of main memory backing storage from buffer objects.
93 * Selection of the specific caches can be done with @flags. This is e.g. useful
94 * when purgeable objects should be removed from caches preferentially.
95 *
96 * Note that it's not guaranteed that released amount is actually available as
97 * free system memory - the pages might still be in-used to due to other reasons
98 * (like cpu mmaps) or the mm core has reused them before we could grab them.
99 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
100 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
101 *
102 * Also note that any kind of pinning (both per-vma address space pins and
103 * backing storage pins at the buffer object level) result in the shrinker code
104 * having to skip the object.
105 *
106 * Returns:
107 * The number of pages of backing storage actually released.
108 */
109 unsigned long
i915_gem_shrink(struct drm_i915_private * i915,unsigned long target,unsigned long * nr_scanned,unsigned int shrink)110 i915_gem_shrink(struct drm_i915_private *i915,
111 unsigned long target,
112 unsigned long *nr_scanned,
113 unsigned int shrink)
114 {
115 const struct {
116 struct list_head *list;
117 unsigned int bit;
118 } phases[] = {
119 { &i915->mm.purge_list, ~0u },
120 {
121 &i915->mm.shrink_list,
122 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND
123 },
124 { NULL, 0 },
125 }, *phase;
126 intel_wakeref_t wakeref = 0;
127 unsigned long count = 0;
128 unsigned long scanned = 0;
129
130 /*
131 * When shrinking the active list, we should also consider active
132 * contexts. Active contexts are pinned until they are retired, and
133 * so can not be simply unbound to retire and unpin their pages. To
134 * shrink the contexts, we must wait until the gpu is idle and
135 * completed its switch to the kernel context. In short, we do
136 * not have a good mechanism for idling a specific context.
137 */
138
139 trace_i915_gem_shrink(i915, target, shrink);
140
141 /*
142 * Unbinding of objects will require HW access; Let us not wake the
143 * device just to recover a little memory. If absolutely necessary,
144 * we will force the wake during oom-notifier.
145 */
146 if (shrink & I915_SHRINK_BOUND) {
147 wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
148 if (!wakeref)
149 shrink &= ~I915_SHRINK_BOUND;
150 }
151
152 /*
153 * As we may completely rewrite the (un)bound list whilst unbinding
154 * (due to retiring requests) we have to strictly process only
155 * one element of the list at the time, and recheck the list
156 * on every iteration.
157 *
158 * In particular, we must hold a reference whilst removing the
159 * object as we may end up waiting for and/or retiring the objects.
160 * This might release the final reference (held by the active list)
161 * and result in the object being freed from under us. This is
162 * similar to the precautions the eviction code must take whilst
163 * removing objects.
164 *
165 * Also note that although these lists do not hold a reference to
166 * the object we can safely grab one here: The final object
167 * unreferencing and the bound_list are both protected by the
168 * dev->struct_mutex and so we won't ever be able to observe an
169 * object on the bound_list with a reference count equals 0.
170 */
171 for (phase = phases; phase->list; phase++) {
172 struct list_head still_in_list;
173 struct drm_i915_gem_object *obj;
174 unsigned long flags;
175
176 if ((shrink & phase->bit) == 0)
177 continue;
178
179 INIT_LIST_HEAD(&still_in_list);
180
181 /*
182 * We serialize our access to unreferenced objects through
183 * the use of the struct_mutex. While the objects are not
184 * yet freed (due to RCU then a workqueue) we still want
185 * to be able to shrink their pages, so they remain on
186 * the unbound/bound list until actually freed.
187 */
188 spin_lock_irqsave(&i915->mm.obj_lock, flags);
189 while (count < target &&
190 (obj = list_first_entry_or_null(phase->list,
191 typeof(*obj),
192 mm.link))) {
193 list_move_tail(&obj->mm.link, &still_in_list);
194
195 if (shrink & I915_SHRINK_VMAPS &&
196 !is_vmalloc_addr(obj->mm.mapping))
197 continue;
198
199 if (!(shrink & I915_SHRINK_ACTIVE) &&
200 i915_gem_object_is_framebuffer(obj))
201 continue;
202
203 if (!(shrink & I915_SHRINK_BOUND) &&
204 atomic_read(&obj->bind_count))
205 continue;
206
207 if (!can_release_pages(obj))
208 continue;
209
210 if (!kref_get_unless_zero(&obj->base.refcount))
211 continue;
212
213 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
214
215 if (unsafe_drop_pages(obj, shrink)) {
216 /* May arrive from get_pages on another bo */
217 mutex_lock(&obj->mm.lock);
218 if (!i915_gem_object_has_pages(obj)) {
219 try_to_writeback(obj, shrink);
220 count += obj->base.size >> PAGE_SHIFT;
221 }
222 mutex_unlock(&obj->mm.lock);
223 }
224
225 scanned += obj->base.size >> PAGE_SHIFT;
226 i915_gem_object_put(obj);
227
228 spin_lock_irqsave(&i915->mm.obj_lock, flags);
229 }
230 list_splice_tail(&still_in_list, phase->list);
231 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
232 }
233
234 if (shrink & I915_SHRINK_BOUND)
235 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
236
237 if (nr_scanned)
238 *nr_scanned += scanned;
239 return count;
240 }
241
242 /**
243 * i915_gem_shrink_all - Shrink buffer object caches completely
244 * @i915: i915 device
245 *
246 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
247 * caches completely. It also first waits for and retires all outstanding
248 * requests to also be able to release backing storage for active objects.
249 *
250 * This should only be used in code to intentionally quiescent the gpu or as a
251 * last-ditch effort when memory seems to have run out.
252 *
253 * Returns:
254 * The number of pages of backing storage actually released.
255 */
i915_gem_shrink_all(struct drm_i915_private * i915)256 unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
257 {
258 intel_wakeref_t wakeref;
259 unsigned long freed = 0;
260
261 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
262 freed = i915_gem_shrink(i915, -1UL, NULL,
263 I915_SHRINK_BOUND |
264 I915_SHRINK_UNBOUND |
265 I915_SHRINK_ACTIVE);
266 }
267
268 return freed;
269 }
270
271 static unsigned long
i915_gem_shrinker_count(struct shrinker * shrinker,struct shrink_control * sc)272 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
273 {
274 struct drm_i915_private *i915 =
275 container_of(shrinker, struct drm_i915_private, mm.shrinker);
276 unsigned long num_objects;
277 unsigned long count;
278
279 count = READ_ONCE(i915->mm.shrink_memory) >> PAGE_SHIFT;
280 num_objects = READ_ONCE(i915->mm.shrink_count);
281
282 /*
283 * Update our preferred vmscan batch size for the next pass.
284 * Our rough guess for an effective batch size is roughly 2
285 * available GEM objects worth of pages. That is we don't want
286 * the shrinker to fire, until it is worth the cost of freeing an
287 * entire GEM object.
288 */
289 if (num_objects) {
290 unsigned long avg = 2 * count / num_objects;
291
292 i915->mm.shrinker.batch =
293 max((i915->mm.shrinker.batch + avg) >> 1,
294 128ul /* default SHRINK_BATCH */);
295 }
296
297 return count;
298 }
299
300 static unsigned long
i915_gem_shrinker_scan(struct shrinker * shrinker,struct shrink_control * sc)301 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
302 {
303 struct drm_i915_private *i915 =
304 container_of(shrinker, struct drm_i915_private, mm.shrinker);
305 unsigned long freed;
306
307 sc->nr_scanned = 0;
308
309 freed = i915_gem_shrink(i915,
310 sc->nr_to_scan,
311 &sc->nr_scanned,
312 I915_SHRINK_BOUND |
313 I915_SHRINK_UNBOUND);
314 if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
315 intel_wakeref_t wakeref;
316
317 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
318 freed += i915_gem_shrink(i915,
319 sc->nr_to_scan - sc->nr_scanned,
320 &sc->nr_scanned,
321 I915_SHRINK_ACTIVE |
322 I915_SHRINK_BOUND |
323 I915_SHRINK_UNBOUND |
324 I915_SHRINK_WRITEBACK);
325 }
326 }
327
328 return sc->nr_scanned ? freed : SHRINK_STOP;
329 }
330
331 static int
i915_gem_shrinker_oom(struct notifier_block * nb,unsigned long event,void * ptr)332 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
333 {
334 struct drm_i915_private *i915 =
335 container_of(nb, struct drm_i915_private, mm.oom_notifier);
336 struct drm_i915_gem_object *obj;
337 unsigned long unevictable, available, freed_pages;
338 intel_wakeref_t wakeref;
339 unsigned long flags;
340
341 freed_pages = 0;
342 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
343 freed_pages += i915_gem_shrink(i915, -1UL, NULL,
344 I915_SHRINK_ACTIVE |
345 I915_SHRINK_BOUND |
346 I915_SHRINK_UNBOUND |
347 I915_SHRINK_WRITEBACK);
348
349 /* Because we may be allocating inside our own driver, we cannot
350 * assert that there are no objects with pinned pages that are not
351 * being pointed to by hardware.
352 */
353 available = unevictable = 0;
354 spin_lock_irqsave(&i915->mm.obj_lock, flags);
355 list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
356 if (!can_release_pages(obj))
357 unevictable += obj->base.size >> PAGE_SHIFT;
358 else
359 available += obj->base.size >> PAGE_SHIFT;
360 }
361 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
362
363 if (freed_pages || available)
364 pr_info("Purging GPU memory, %lu pages freed, "
365 "%lu pages still pinned, %lu pages left available.\n",
366 freed_pages, unevictable, available);
367
368 *(unsigned long *)ptr += freed_pages;
369 return NOTIFY_DONE;
370 }
371
372 static int
i915_gem_shrinker_vmap(struct notifier_block * nb,unsigned long event,void * ptr)373 i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
374 {
375 struct drm_i915_private *i915 =
376 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
377 struct i915_vma *vma, *next;
378 unsigned long freed_pages = 0;
379 intel_wakeref_t wakeref;
380
381 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
382 freed_pages += i915_gem_shrink(i915, -1UL, NULL,
383 I915_SHRINK_BOUND |
384 I915_SHRINK_UNBOUND |
385 I915_SHRINK_VMAPS);
386
387 /* We also want to clear any cached iomaps as they wrap vmap */
388 mutex_lock(&i915->ggtt.vm.mutex);
389 list_for_each_entry_safe(vma, next,
390 &i915->ggtt.vm.bound_list, vm_link) {
391 unsigned long count = vma->node.size >> PAGE_SHIFT;
392
393 if (!vma->iomap || i915_vma_is_active(vma))
394 continue;
395
396 if (__i915_vma_unbind(vma) == 0)
397 freed_pages += count;
398 }
399 mutex_unlock(&i915->ggtt.vm.mutex);
400
401 *(unsigned long *)ptr += freed_pages;
402 return NOTIFY_DONE;
403 }
404
i915_gem_driver_register__shrinker(struct drm_i915_private * i915)405 void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
406 {
407 i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
408 i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
409 i915->mm.shrinker.seeks = DEFAULT_SEEKS;
410 i915->mm.shrinker.batch = 4096;
411 WARN_ON(register_shrinker(&i915->mm.shrinker));
412
413 i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
414 WARN_ON(register_oom_notifier(&i915->mm.oom_notifier));
415
416 i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
417 WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
418 }
419
i915_gem_driver_unregister__shrinker(struct drm_i915_private * i915)420 void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
421 {
422 WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
423 WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
424 unregister_shrinker(&i915->mm.shrinker);
425 }
426
i915_gem_shrinker_taints_mutex(struct drm_i915_private * i915,struct mutex * mutex)427 void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
428 struct mutex *mutex)
429 {
430 #if IS_ENABLED(CONFIG_LOCKDEP)
431 bool unlock = false;
432
433 if (!IS_ENABLED(CONFIG_LOCKDEP))
434 return;
435
436 if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) {
437 mutex_acquire(&i915->drm.struct_mutex.dep_map,
438 I915_MM_NORMAL, 0, _RET_IP_);
439 unlock = true;
440 }
441
442 fs_reclaim_acquire(GFP_KERNEL);
443
444 mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
445 mutex_release(&mutex->dep_map, _RET_IP_);
446
447 fs_reclaim_release(GFP_KERNEL);
448
449 if (unlock)
450 mutex_release(&i915->drm.struct_mutex.dep_map, _RET_IP_);
451 #endif
452 }
453
454 #define obj_to_i915(obj__) to_i915((obj__)->base.dev)
455
i915_gem_object_make_unshrinkable(struct drm_i915_gem_object * obj)456 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
457 {
458 struct drm_i915_private *i915 = obj_to_i915(obj);
459 unsigned long flags;
460
461 /*
462 * We can only be called while the pages are pinned or when
463 * the pages are released. If pinned, we should only be called
464 * from a single caller under controlled conditions; and on release
465 * only one caller may release us. Neither the two may cross.
466 */
467 if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
468 return;
469
470 spin_lock_irqsave(&i915->mm.obj_lock, flags);
471 if (!atomic_fetch_inc(&obj->mm.shrink_pin) &&
472 !list_empty(&obj->mm.link)) {
473 list_del_init(&obj->mm.link);
474 i915->mm.shrink_count--;
475 i915->mm.shrink_memory -= obj->base.size;
476 }
477 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
478 }
479
__i915_gem_object_make_shrinkable(struct drm_i915_gem_object * obj,struct list_head * head)480 static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
481 struct list_head *head)
482 {
483 struct drm_i915_private *i915 = obj_to_i915(obj);
484 unsigned long flags;
485
486 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
487 if (!i915_gem_object_is_shrinkable(obj))
488 return;
489
490 if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
491 return;
492
493 spin_lock_irqsave(&i915->mm.obj_lock, flags);
494 GEM_BUG_ON(!kref_read(&obj->base.refcount));
495 if (atomic_dec_and_test(&obj->mm.shrink_pin)) {
496 GEM_BUG_ON(!list_empty(&obj->mm.link));
497
498 list_add_tail(&obj->mm.link, head);
499 i915->mm.shrink_count++;
500 i915->mm.shrink_memory += obj->base.size;
501
502 }
503 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
504 }
505
i915_gem_object_make_shrinkable(struct drm_i915_gem_object * obj)506 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
507 {
508 __i915_gem_object_make_shrinkable(obj,
509 &obj_to_i915(obj)->mm.shrink_list);
510 }
511
i915_gem_object_make_purgeable(struct drm_i915_gem_object * obj)512 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
513 {
514 __i915_gem_object_make_shrinkable(obj,
515 &obj_to_i915(obj)->mm.purge_list);
516 }
517