1 /* $NetBSD: i915_vma.c,v 1.12 2021/12/19 12:27:49 riastradh Exp $ */
2
3 /*
4 * Copyright © 2016 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 */
26
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: i915_vma.c,v 1.12 2021/12/19 12:27:49 riastradh Exp $");
29
30 #include <linux/sched/mm.h>
31 #include <drm/drm_gem.h>
32
33 #include "display/intel_frontbuffer.h"
34
35 #include "gt/intel_engine.h"
36 #include "gt/intel_engine_heartbeat.h"
37 #include "gt/intel_gt.h"
38 #include "gt/intel_gt_requests.h"
39
40 #include "i915_drv.h"
41 #include "i915_globals.h"
42 #include "i915_sw_fence_work.h"
43 #include "i915_trace.h"
44 #include "i915_vma.h"
45
46 #include <linux/nbsd-namespace.h>
47
48 static struct i915_global_vma {
49 struct i915_global base;
50 struct kmem_cache *slab_vmas;
51 } global;
52
i915_vma_alloc(void)53 struct i915_vma *i915_vma_alloc(void)
54 {
55 return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
56 }
57
i915_vma_free(struct i915_vma * vma)58 void i915_vma_free(struct i915_vma *vma)
59 {
60 mutex_destroy(&vma->pages_mutex);
61 return kmem_cache_free(global.slab_vmas, vma);
62 }
63
64 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
65
66 #include <linux/stackdepot.h>
67
vma_print_allocator(struct i915_vma * vma,const char * reason)68 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
69 {
70 unsigned long *entries;
71 unsigned int nr_entries;
72 char buf[512];
73
74 if (!vma->node.stack) {
75 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
76 vma->node.start, vma->node.size, reason);
77 return;
78 }
79
80 nr_entries = stack_depot_fetch(vma->node.stack, &entries);
81 stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
82 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
83 vma->node.start, vma->node.size, reason, buf);
84 }
85
86 #else
87
vma_print_allocator(struct i915_vma * vma,const char * reason)88 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
89 {
90 }
91
92 #endif
93
active_to_vma(struct i915_active * ref)94 static inline struct i915_vma *active_to_vma(struct i915_active *ref)
95 {
96 return container_of(ref, typeof(struct i915_vma), active);
97 }
98
__i915_vma_active(struct i915_active * ref)99 static int __i915_vma_active(struct i915_active *ref)
100 {
101 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
102 }
103
104 __i915_active_call
__i915_vma_retire(struct i915_active * ref)105 static void __i915_vma_retire(struct i915_active *ref)
106 {
107 i915_vma_put(active_to_vma(ref));
108 }
109
110 #ifdef __NetBSD__
111 struct i915_vma_key {
112 struct i915_address_space *vm;
113 const struct i915_ggtt_view *view;
114 };
115
116 static int
compare_vma(void * cookie,const void * va,const void * vb)117 compare_vma(void *cookie, const void *va, const void *vb)
118 {
119 const struct i915_vma *a = va;
120 const struct i915_vma *b = vb;
121 long cmp = i915_vma_compare(__UNCONST(a), b->vm,
122 b->ggtt_view.type == I915_GGTT_VIEW_NORMAL ? NULL : &b->ggtt_view);
123
124 return (cmp < 0 ? -1 : cmp > 0 ? +1 : 0);
125 }
126
127 static int
compare_vma_key(void * cookie,const void * vn,const void * vk)128 compare_vma_key(void *cookie, const void *vn, const void *vk)
129 {
130 const struct i915_vma *vma = vn;
131 const struct i915_vma_key *key = vk;
132 long cmp = i915_vma_compare(__UNCONST(vma), key->vm, key->view);
133
134 return (cmp < 0 ? -1 : cmp > 0 ? +1 : 0);
135 }
136
137 static const rb_tree_ops_t vma_tree_rb_ops = {
138 .rbto_compare_nodes = compare_vma,
139 .rbto_compare_key = compare_vma_key,
140 .rbto_node_offset = offsetof(struct i915_vma, obj_node),
141 };
142 #endif
143
144 void
i915_vma_tree_init(struct drm_i915_gem_object * obj)145 i915_vma_tree_init(struct drm_i915_gem_object *obj)
146 {
147 #ifdef __NetBSD__
148 rb_tree_init(&obj->vma.tree.rbr_tree, &vma_tree_rb_ops);
149 #else
150 obj->vma.tree = RB_ROOT;
151 #endif
152 }
153
154 static struct i915_vma *
vma_create(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_ggtt_view * view)155 vma_create(struct drm_i915_gem_object *obj,
156 struct i915_address_space *vm,
157 const struct i915_ggtt_view *view)
158 {
159 struct i915_vma *vma;
160 struct rb_node *rb, **p;
161
162 /* The aliasing_ppgtt should never be used directly! */
163 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
164
165 vma = i915_vma_alloc();
166 if (vma == NULL)
167 return ERR_PTR(-ENOMEM);
168
169 kref_init(&vma->ref);
170 mutex_init(&vma->pages_mutex);
171 vma->vm = i915_vm_get(vm);
172 vma->ops = &vm->vma_ops;
173 vma->obj = obj;
174 vma->resv = obj->base.resv;
175 vma->size = obj->base.size;
176 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
177
178 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
179
180 /* Declare ourselves safe for use inside shrinkers */
181 if (IS_ENABLED(CONFIG_LOCKDEP)) {
182 fs_reclaim_acquire(GFP_KERNEL);
183 might_lock(&vma->active.mutex);
184 fs_reclaim_release(GFP_KERNEL);
185 }
186
187 INIT_LIST_HEAD(&vma->closed_link);
188
189 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
190 vma->ggtt_view = *view;
191 if (view->type == I915_GGTT_VIEW_PARTIAL) {
192 GEM_BUG_ON(range_overflows_t(u64,
193 view->partial.offset,
194 view->partial.size,
195 obj->base.size >> PAGE_SHIFT));
196 vma->size = view->partial.size;
197 vma->size <<= PAGE_SHIFT;
198 GEM_BUG_ON(vma->size > obj->base.size);
199 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
200 vma->size = intel_rotation_info_size(&view->rotated);
201 vma->size <<= PAGE_SHIFT;
202 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
203 vma->size = intel_remapped_info_size(&view->remapped);
204 vma->size <<= PAGE_SHIFT;
205 }
206 }
207
208 if (unlikely(vma->size > vm->total))
209 goto err_vma;
210
211 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
212
213 if (i915_is_ggtt(vm)) {
214 if (unlikely(overflows_type(vma->size, u32)))
215 goto err_vma;
216
217 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
218 i915_gem_object_get_tiling(obj),
219 i915_gem_object_get_stride(obj));
220 if (unlikely(vma->fence_size < vma->size || /* overflow */
221 vma->fence_size > vm->total))
222 goto err_vma;
223
224 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
225
226 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
227 i915_gem_object_get_tiling(obj),
228 i915_gem_object_get_stride(obj));
229 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
230
231 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
232 }
233
234 spin_lock(&obj->vma.lock);
235
236 #ifdef __NetBSD__
237 __USE(rb);
238 __USE(p);
239 struct i915_vma *collision __diagused;
240 collision = rb_tree_insert_node(&obj->vma.tree.rbr_tree, vma);
241 KASSERT(collision == vma);
242 #else
243 rb = NULL;
244 p = &obj->vma.tree.rb_node;
245 while (*p) {
246 struct i915_vma *pos;
247 long cmp;
248
249 rb = *p;
250 pos = rb_entry(rb, struct i915_vma, obj_node);
251
252 /*
253 * If the view already exists in the tree, another thread
254 * already created a matching vma, so return the older instance
255 * and dispose of ours.
256 */
257 cmp = i915_vma_compare(pos, vm, view);
258 if (cmp == 0) {
259 spin_unlock(&obj->vma.lock);
260 i915_vma_free(vma);
261 return pos;
262 }
263
264 if (cmp < 0)
265 p = &rb->rb_right;
266 else
267 p = &rb->rb_left;
268 }
269 rb_link_node(&vma->obj_node, rb, p);
270 rb_insert_color(&vma->obj_node, &obj->vma.tree);
271 #endif
272
273 if (i915_vma_is_ggtt(vma))
274 /*
275 * We put the GGTT vma at the start of the vma-list, followed
276 * by the ppGGTT vma. This allows us to break early when
277 * iterating over only the GGTT vma for an object, see
278 * for_each_ggtt_vma()
279 */
280 list_add(&vma->obj_link, &obj->vma.list);
281 else
282 list_add_tail(&vma->obj_link, &obj->vma.list);
283
284 spin_unlock(&obj->vma.lock);
285
286 return vma;
287
288 err_vma:
289 i915_vma_free(vma);
290 return ERR_PTR(-E2BIG);
291 }
292
293 static struct i915_vma *
vma_lookup(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_ggtt_view * view)294 vma_lookup(struct drm_i915_gem_object *obj,
295 struct i915_address_space *vm,
296 const struct i915_ggtt_view *view)
297 {
298 #ifdef __NetBSD__
299 const struct i915_vma_key key = { .vm = vm, .view = view };
300
301 return rb_tree_find_node(&obj->vma.tree.rbr_tree, &key);
302 #else
303 struct rb_node *rb;
304
305 rb = obj->vma.tree.rb_node;
306 while (rb) {
307 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
308 long cmp;
309
310 cmp = i915_vma_compare(vma, vm, view);
311 if (cmp == 0)
312 return vma;
313
314 if (cmp < 0)
315 rb = rb->rb_right;
316 else
317 rb = rb->rb_left;
318 }
319
320 return NULL;
321 #endif
322 }
323
324 /**
325 * i915_vma_instance - return the singleton instance of the VMA
326 * @obj: parent &struct drm_i915_gem_object to be mapped
327 * @vm: address space in which the mapping is located
328 * @view: additional mapping requirements
329 *
330 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
331 * the same @view characteristics. If a match is not found, one is created.
332 * Once created, the VMA is kept until either the object is freed, or the
333 * address space is closed.
334 *
335 * Returns the vma, or an error pointer.
336 */
337 struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_ggtt_view * view)338 i915_vma_instance(struct drm_i915_gem_object *obj,
339 struct i915_address_space *vm,
340 const struct i915_ggtt_view *view)
341 {
342 struct i915_vma *vma;
343
344 GEM_BUG_ON(view && !i915_is_ggtt(vm));
345 GEM_BUG_ON(!atomic_read(&vm->open));
346
347 spin_lock(&obj->vma.lock);
348 vma = vma_lookup(obj, vm, view);
349 spin_unlock(&obj->vma.lock);
350
351 /* vma_create() will resolve the race if another creates the vma */
352 if (unlikely(!vma))
353 vma = vma_create(obj, vm, view);
354
355 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
356 return vma;
357 }
358
359 struct i915_vma_work {
360 struct dma_fence_work base;
361 struct i915_vma *vma;
362 struct drm_i915_gem_object *pinned;
363 enum i915_cache_level cache_level;
364 unsigned int flags;
365 };
366
__vma_bind(struct dma_fence_work * work)367 static int __vma_bind(struct dma_fence_work *work)
368 {
369 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
370 struct i915_vma *vma = vw->vma;
371 int err;
372
373 err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
374 if (err)
375 atomic_or(I915_VMA_ERROR, &vma->flags);
376
377 return err;
378 }
379
__vma_release(struct dma_fence_work * work)380 static void __vma_release(struct dma_fence_work *work)
381 {
382 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
383
384 if (vw->pinned)
385 __i915_gem_object_unpin_pages(vw->pinned);
386 }
387
388 static const struct dma_fence_work_ops bind_ops = {
389 .name = "bind",
390 .work = __vma_bind,
391 .release = __vma_release,
392 };
393
i915_vma_work(void)394 struct i915_vma_work *i915_vma_work(void)
395 {
396 struct i915_vma_work *vw;
397
398 vw = kzalloc(sizeof(*vw), GFP_KERNEL);
399 if (!vw)
400 return NULL;
401
402 dma_fence_work_init(&vw->base, &bind_ops);
403 vw->base.dma.error = -EAGAIN; /* disable the worker by default */
404
405 return vw;
406 }
407
408 /**
409 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
410 * @vma: VMA to map
411 * @cache_level: mapping cache level
412 * @flags: flags like global or local mapping
413 * @work: preallocated worker for allocating and binding the PTE
414 *
415 * DMA addresses are taken from the scatter-gather table of this object (or of
416 * this VMA in case of non-default GGTT views) and PTE entries set up.
417 * Note that DMA addresses are also the only part of the SG table we care about.
418 */
i915_vma_bind(struct i915_vma * vma,enum i915_cache_level cache_level,u32 flags,struct i915_vma_work * work)419 int i915_vma_bind(struct i915_vma *vma,
420 enum i915_cache_level cache_level,
421 u32 flags,
422 struct i915_vma_work *work)
423 {
424 u32 bind_flags;
425 u32 vma_flags;
426 int ret;
427
428 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
429 GEM_BUG_ON(vma->size > vma->node.size);
430
431 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
432 vma->node.size,
433 vma->vm->total)))
434 return -ENODEV;
435
436 if (GEM_DEBUG_WARN_ON(!flags))
437 return -EINVAL;
438
439 bind_flags = flags;
440 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
441
442 vma_flags = atomic_read(&vma->flags);
443 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
444 if (flags & PIN_UPDATE)
445 bind_flags |= vma_flags;
446 else
447 bind_flags &= ~vma_flags;
448 if (bind_flags == 0)
449 return 0;
450
451 GEM_BUG_ON(!vma->pages);
452
453 trace_i915_vma_bind(vma, bind_flags);
454 if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
455 work->vma = vma;
456 work->cache_level = cache_level;
457 work->flags = bind_flags | I915_VMA_ALLOC;
458
459 /*
460 * Note we only want to chain up to the migration fence on
461 * the pages (not the object itself). As we don't track that,
462 * yet, we have to use the exclusive fence instead.
463 *
464 * Also note that we do not want to track the async vma as
465 * part of the obj->resv->excl_fence as it only affects
466 * execution and not content or object's backing store lifetime.
467 */
468 GEM_BUG_ON(i915_active_has_exclusive(&vma->active));
469 i915_active_set_exclusive(&vma->active, &work->base.dma);
470 work->base.dma.error = 0; /* enable the queue_work() */
471
472 if (vma->obj) {
473 __i915_gem_object_pin_pages(vma->obj);
474 work->pinned = vma->obj;
475 }
476 } else {
477 GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
478 ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
479 if (ret)
480 return ret;
481 }
482
483 atomic_or(bind_flags, &vma->flags);
484 return 0;
485 }
486
487 #ifdef __NetBSD__
488 # define __iomem __i915_vma_iomem
489 #endif
490
i915_vma_pin_iomap(struct i915_vma * vma)491 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
492 {
493 void __iomem *ptr;
494 int err;
495
496 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
497 err = -ENODEV;
498 goto err;
499 }
500
501 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
502 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
503
504 ptr = READ_ONCE(vma->iomap);
505 if (ptr == NULL) {
506 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
507 vma->node.start,
508 vma->node.size);
509 if (ptr == NULL) {
510 err = -ENOMEM;
511 goto err;
512 }
513
514 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
515 #ifdef __NetBSD__
516 io_mapping_unmap(&i915_vm_to_ggtt(vma->vm)->iomap, ptr,
517 vma->node.size);
518 #else
519 io_mapping_unmap(ptr);
520 #endif
521 ptr = vma->iomap;
522 }
523 }
524
525 __i915_vma_pin(vma);
526
527 err = i915_vma_pin_fence(vma);
528 if (err)
529 goto err_unpin;
530
531 i915_vma_set_ggtt_write(vma);
532
533 /* NB Access through the GTT requires the device to be awake. */
534 return ptr;
535
536 err_unpin:
537 __i915_vma_unpin(vma);
538 err:
539 return IO_ERR_PTR(err);
540 }
541
542 #ifdef __NetBSD__
543 # undef __iomem
544 #endif
545
i915_vma_flush_writes(struct i915_vma * vma)546 void i915_vma_flush_writes(struct i915_vma *vma)
547 {
548 if (i915_vma_unset_ggtt_write(vma))
549 intel_gt_flush_ggtt_writes(vma->vm->gt);
550 }
551
i915_vma_unpin_iomap(struct i915_vma * vma)552 void i915_vma_unpin_iomap(struct i915_vma *vma)
553 {
554 GEM_BUG_ON(vma->iomap == NULL);
555
556 i915_vma_flush_writes(vma);
557
558 i915_vma_unpin_fence(vma);
559 i915_vma_unpin(vma);
560 }
561
i915_vma_unpin_and_release(struct i915_vma ** p_vma,unsigned int flags)562 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
563 {
564 struct i915_vma *vma;
565 struct drm_i915_gem_object *obj;
566
567 vma = fetch_and_zero(p_vma);
568 if (!vma)
569 return;
570
571 obj = vma->obj;
572 GEM_BUG_ON(!obj);
573
574 i915_vma_unpin(vma);
575 i915_vma_close(vma);
576
577 if (flags & I915_VMA_RELEASE_MAP)
578 i915_gem_object_unpin_map(obj);
579
580 i915_gem_object_put(obj);
581 }
582
i915_vma_misplaced(const struct i915_vma * vma,u64 size,u64 alignment,u64 flags)583 bool i915_vma_misplaced(const struct i915_vma *vma,
584 u64 size, u64 alignment, u64 flags)
585 {
586 if (!drm_mm_node_allocated(&vma->node))
587 return false;
588
589 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags_const(vma)))
590 return true;
591
592 if (vma->node.size < size)
593 return true;
594
595 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
596 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
597 return true;
598
599 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
600 return true;
601
602 if (flags & PIN_OFFSET_BIAS &&
603 vma->node.start < (flags & PIN_OFFSET_MASK))
604 return true;
605
606 if (flags & PIN_OFFSET_FIXED &&
607 vma->node.start != (flags & PIN_OFFSET_MASK))
608 return true;
609
610 return false;
611 }
612
__i915_vma_set_map_and_fenceable(struct i915_vma * vma)613 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
614 {
615 bool mappable, fenceable;
616
617 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
618 GEM_BUG_ON(!vma->fence_size);
619
620 fenceable = (vma->node.size >= vma->fence_size &&
621 IS_ALIGNED(vma->node.start, vma->fence_alignment));
622
623 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
624
625 if (mappable && fenceable)
626 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
627 else
628 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
629 }
630
i915_gem_valid_gtt_space(struct i915_vma * vma,unsigned long color)631 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
632 {
633 struct drm_mm_node *node = &vma->node;
634 struct drm_mm_node *other;
635
636 /*
637 * On some machines we have to be careful when putting differing types
638 * of snoopable memory together to avoid the prefetcher crossing memory
639 * domains and dying. During vm initialisation, we decide whether or not
640 * these constraints apply and set the drm_mm.color_adjust
641 * appropriately.
642 */
643 if (!i915_vm_has_cache_coloring(vma->vm))
644 return true;
645
646 /* Only valid to be called on an already inserted vma */
647 GEM_BUG_ON(!drm_mm_node_allocated(node));
648 GEM_BUG_ON(list_empty(&node->node_list));
649
650 other = list_prev_entry(node, node_list);
651 if (i915_node_color_differs(other, color) &&
652 !drm_mm_hole_follows(other))
653 return false;
654
655 other = list_next_entry(node, node_list);
656 if (i915_node_color_differs(other, color) &&
657 !drm_mm_hole_follows(node))
658 return false;
659
660 return true;
661 }
662
assert_bind_count(const struct drm_i915_gem_object * obj)663 static void assert_bind_count(const struct drm_i915_gem_object *obj)
664 {
665 /*
666 * Combine the assertion that the object is bound and that we have
667 * pinned its pages. But we should never have bound the object
668 * more than we have pinned its pages. (For complete accuracy, we
669 * assume that no else is pinning the pages, but as a rough assertion
670 * that we will not run into problems later, this will do!)
671 */
672 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
673 }
674
675 /**
676 * i915_vma_insert - finds a slot for the vma in its address space
677 * @vma: the vma
678 * @size: requested size in bytes (can be larger than the VMA)
679 * @alignment: required alignment
680 * @flags: mask of PIN_* flags to use
681 *
682 * First we try to allocate some free space that meets the requirements for
683 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
684 * preferrably the oldest idle entry to make room for the new VMA.
685 *
686 * Returns:
687 * 0 on success, negative error code otherwise.
688 */
689 static int
i915_vma_insert(struct i915_vma * vma,u64 size,u64 alignment,u64 flags)690 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
691 {
692 unsigned long color;
693 u64 start, end;
694 int ret;
695
696 GEM_BUG_ON(i915_vma_is_closed(vma));
697 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
698 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
699
700 size = max(size, vma->size);
701 alignment = max(alignment, vma->display_alignment);
702 if (flags & PIN_MAPPABLE) {
703 size = max_t(typeof(size), size, vma->fence_size);
704 alignment = max_t(typeof(alignment),
705 alignment, vma->fence_alignment);
706 }
707
708 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
709 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
710 GEM_BUG_ON(!is_power_of_2(alignment));
711
712 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
713 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
714
715 end = vma->vm->total;
716 if (flags & PIN_MAPPABLE)
717 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
718 if (flags & PIN_ZONE_4G)
719 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
720 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
721
722 /* If binding the object/GGTT view requires more space than the entire
723 * aperture has, reject it early before evicting everything in a vain
724 * attempt to find space.
725 */
726 if (size > end) {
727 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%"PRIu64" > %s aperture=%"PRIu64"\n",
728 size, flags & PIN_MAPPABLE ? "mappable" : "total",
729 end);
730 return -ENOSPC;
731 }
732
733 color = 0;
734 if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
735 color = vma->obj->cache_level;
736
737 if (flags & PIN_OFFSET_FIXED) {
738 u64 offset = flags & PIN_OFFSET_MASK;
739 if (!IS_ALIGNED(offset, alignment) ||
740 range_overflows(offset, size, end))
741 return -EINVAL;
742
743 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
744 size, offset, color,
745 flags);
746 if (ret)
747 return ret;
748 } else {
749 /*
750 * We only support huge gtt pages through the 48b PPGTT,
751 * however we also don't want to force any alignment for
752 * objects which need to be tightly packed into the low 32bits.
753 *
754 * Note that we assume that GGTT are limited to 4GiB for the
755 * forseeable future. See also i915_ggtt_offset().
756 */
757 if (upper_32_bits(end - 1) &&
758 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
759 /*
760 * We can't mix 64K and 4K PTEs in the same page-table
761 * (2M block), and so to avoid the ugliness and
762 * complexity of coloring we opt for just aligning 64K
763 * objects to 2M.
764 */
765 u64 page_alignment =
766 rounddown_pow_of_two(vma->page_sizes.sg |
767 I915_GTT_PAGE_SIZE_2M);
768
769 /*
770 * Check we don't expand for the limited Global GTT
771 * (mappable aperture is even more precious!). This
772 * also checks that we exclude the aliasing-ppgtt.
773 */
774 GEM_BUG_ON(i915_vma_is_ggtt(vma));
775
776 alignment = max(alignment, page_alignment);
777
778 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
779 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
780 }
781
782 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
783 size, alignment, color,
784 start, end, flags);
785 if (ret)
786 return ret;
787
788 GEM_BUG_ON(vma->node.start < start);
789 GEM_BUG_ON(vma->node.start + vma->node.size > end);
790 }
791 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
792 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
793
794 if (vma->obj) {
795 struct drm_i915_gem_object *obj = vma->obj;
796
797 atomic_inc(&obj->bind_count);
798 assert_bind_count(obj);
799 }
800 list_add_tail(&vma->vm_link, &vma->vm->bound_list);
801
802 return 0;
803 }
804
805 static void
i915_vma_detach(struct i915_vma * vma)806 i915_vma_detach(struct i915_vma *vma)
807 {
808 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
809 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
810
811 /*
812 * And finally now the object is completely decoupled from this
813 * vma, we can drop its hold on the backing storage and allow
814 * it to be reaped by the shrinker.
815 */
816 list_del(&vma->vm_link);
817 if (vma->obj) {
818 struct drm_i915_gem_object *obj = vma->obj;
819
820 assert_bind_count(obj);
821 atomic_dec(&obj->bind_count);
822 }
823 }
824
try_qad_pin(struct i915_vma * vma,unsigned int flags)825 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
826 {
827 unsigned int bound;
828 bool pinned = true;
829
830 bound = atomic_read(&vma->flags);
831 do {
832 if (unlikely(flags & ~bound))
833 return false;
834
835 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
836 return false;
837
838 if (!(bound & I915_VMA_PIN_MASK))
839 goto unpinned;
840
841 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
842 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
843
844 return true;
845
846 unpinned:
847 /*
848 * If pin_count==0, but we are bound, check under the lock to avoid
849 * racing with a concurrent i915_vma_unbind().
850 */
851 mutex_lock(&vma->vm->mutex);
852 do {
853 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
854 pinned = false;
855 break;
856 }
857
858 if (unlikely(flags & ~bound)) {
859 pinned = false;
860 break;
861 }
862 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
863 mutex_unlock(&vma->vm->mutex);
864
865 return pinned;
866 }
867
vma_get_pages(struct i915_vma * vma)868 static int vma_get_pages(struct i915_vma *vma)
869 {
870 int err = 0;
871
872 if (atomic_add_unless(&vma->pages_count, 1, 0))
873 return 0;
874
875 /* Allocations ahoy! */
876 if (mutex_lock_interruptible(&vma->pages_mutex))
877 return -EINTR;
878
879 if (!atomic_read(&vma->pages_count)) {
880 if (vma->obj) {
881 err = i915_gem_object_pin_pages(vma->obj);
882 if (err)
883 goto unlock;
884 }
885
886 err = vma->ops->set_pages(vma);
887 if (err) {
888 if (vma->obj)
889 i915_gem_object_unpin_pages(vma->obj);
890 goto unlock;
891 }
892 }
893 atomic_inc(&vma->pages_count);
894
895 unlock:
896 mutex_unlock(&vma->pages_mutex);
897
898 return err;
899 }
900
__vma_put_pages(struct i915_vma * vma,unsigned int count)901 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
902 {
903 /* We allocate under vma_get_pages, so beware the shrinker */
904 mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
905 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
906 if (atomic_sub_return(count, &vma->pages_count) == 0) {
907 vma->ops->clear_pages(vma);
908 GEM_BUG_ON(vma->pages);
909 if (vma->obj)
910 i915_gem_object_unpin_pages(vma->obj);
911 }
912 mutex_unlock(&vma->pages_mutex);
913 }
914
vma_put_pages(struct i915_vma * vma)915 static void vma_put_pages(struct i915_vma *vma)
916 {
917 if (atomic_add_unless(&vma->pages_count, -1, 1))
918 return;
919
920 __vma_put_pages(vma, 1);
921 }
922
vma_unbind_pages(struct i915_vma * vma)923 static void vma_unbind_pages(struct i915_vma *vma)
924 {
925 unsigned int count;
926
927 lockdep_assert_held(&vma->vm->mutex);
928
929 /* The upper portion of pages_count is the number of bindings */
930 count = atomic_read(&vma->pages_count);
931 count >>= I915_VMA_PAGES_BIAS;
932 GEM_BUG_ON(!count);
933
934 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
935 }
936
i915_vma_pin(struct i915_vma * vma,u64 size,u64 alignment,u64 flags)937 int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
938 {
939 struct i915_vma_work *work = NULL;
940 intel_wakeref_t wakeref = 0;
941 unsigned int bound;
942 int err;
943
944 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
945 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
946
947 GEM_BUG_ON(flags & PIN_UPDATE);
948 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
949
950 /* First try and grab the pin without rebinding the vma */
951 if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
952 return 0;
953
954 err = vma_get_pages(vma);
955 if (err)
956 return err;
957
958 if (flags & vma->vm->bind_async_flags) {
959 work = i915_vma_work();
960 if (!work) {
961 err = -ENOMEM;
962 goto err_pages;
963 }
964 }
965
966 if (flags & PIN_GLOBAL)
967 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
968
969 /* No more allocations allowed once we hold vm->mutex */
970 err = mutex_lock_interruptible(&vma->vm->mutex);
971 if (err)
972 goto err_fence;
973
974 bound = atomic_read(&vma->flags);
975 if (unlikely(bound & I915_VMA_ERROR)) {
976 err = -ENOMEM;
977 goto err_unlock;
978 }
979
980 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
981 err = -EAGAIN; /* pins are meant to be fairly temporary */
982 goto err_unlock;
983 }
984
985 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
986 __i915_vma_pin(vma);
987 goto err_unlock;
988 }
989
990 err = i915_active_acquire(&vma->active);
991 if (err)
992 goto err_unlock;
993
994 if (!(bound & I915_VMA_BIND_MASK)) {
995 err = i915_vma_insert(vma, size, alignment, flags);
996 if (err)
997 goto err_active;
998
999 if (i915_is_ggtt(vma->vm))
1000 __i915_vma_set_map_and_fenceable(vma);
1001 }
1002
1003 GEM_BUG_ON(!vma->pages);
1004 err = i915_vma_bind(vma,
1005 vma->obj ? vma->obj->cache_level : 0,
1006 flags, work);
1007 if (err)
1008 goto err_remove;
1009
1010 /* There should only be at most 2 active bindings (user, global) */
1011 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
1012 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
1013 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1014
1015 __i915_vma_pin(vma);
1016 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1017 GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
1018 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
1019
1020 err_remove:
1021 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
1022 i915_vma_detach(vma);
1023 drm_mm_remove_node(&vma->node);
1024 }
1025 err_active:
1026 i915_active_release(&vma->active);
1027 err_unlock:
1028 mutex_unlock(&vma->vm->mutex);
1029 err_fence:
1030 if (work)
1031 dma_fence_work_commit(&work->base);
1032 if (wakeref)
1033 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1034 err_pages:
1035 vma_put_pages(vma);
1036 return err;
1037 }
1038
flush_idle_contexts(struct intel_gt * gt)1039 static void flush_idle_contexts(struct intel_gt *gt)
1040 {
1041 struct intel_engine_cs *engine;
1042 enum intel_engine_id id;
1043
1044 for_each_engine(engine, gt, id)
1045 intel_engine_flush_barriers(engine);
1046
1047 intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1048 }
1049
i915_ggtt_pin(struct i915_vma * vma,u32 align,unsigned int flags)1050 int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
1051 {
1052 struct i915_address_space *vm = vma->vm;
1053 int err;
1054
1055 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1056
1057 do {
1058 err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
1059 if (err != -ENOSPC)
1060 return err;
1061
1062 /* Unlike i915_vma_pin, we don't take no for an answer! */
1063 flush_idle_contexts(vm->gt);
1064 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1065 i915_gem_evict_vm(vm);
1066 mutex_unlock(&vm->mutex);
1067 }
1068 } while (1);
1069 }
1070
i915_vma_close(struct i915_vma * vma)1071 void i915_vma_close(struct i915_vma *vma)
1072 {
1073 struct intel_gt *gt = vma->vm->gt;
1074 unsigned long flags;
1075
1076 GEM_BUG_ON(i915_vma_is_closed(vma));
1077
1078 /*
1079 * We defer actually closing, unbinding and destroying the VMA until
1080 * the next idle point, or if the object is freed in the meantime. By
1081 * postponing the unbind, we allow for it to be resurrected by the
1082 * client, avoiding the work required to rebind the VMA. This is
1083 * advantageous for DRI, where the client/server pass objects
1084 * between themselves, temporarily opening a local VMA to the
1085 * object, and then closing it again. The same object is then reused
1086 * on the next frame (or two, depending on the depth of the swap queue)
1087 * causing us to rebind the VMA once more. This ends up being a lot
1088 * of wasted work for the steady state.
1089 */
1090 spin_lock_irqsave(>->closed_lock, flags);
1091 list_add(&vma->closed_link, >->closed_vma);
1092 spin_unlock_irqrestore(>->closed_lock, flags);
1093 }
1094
__i915_vma_remove_closed(struct i915_vma * vma)1095 static void __i915_vma_remove_closed(struct i915_vma *vma)
1096 {
1097 struct intel_gt *gt = vma->vm->gt;
1098
1099 spin_lock_irq(>->closed_lock);
1100 list_del_init(&vma->closed_link);
1101 spin_unlock_irq(>->closed_lock);
1102 }
1103
i915_vma_reopen(struct i915_vma * vma)1104 void i915_vma_reopen(struct i915_vma *vma)
1105 {
1106 if (i915_vma_is_closed(vma))
1107 __i915_vma_remove_closed(vma);
1108 }
1109
i915_vma_release(struct kref * ref)1110 void i915_vma_release(struct kref *ref)
1111 {
1112 struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
1113
1114 if (drm_mm_node_allocated(&vma->node)) {
1115 mutex_lock(&vma->vm->mutex);
1116 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1117 WARN_ON(__i915_vma_unbind(vma));
1118 mutex_unlock(&vma->vm->mutex);
1119 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1120 }
1121 GEM_BUG_ON(i915_vma_is_active(vma));
1122
1123 if (vma->obj) {
1124 struct drm_i915_gem_object *obj = vma->obj;
1125
1126 spin_lock(&obj->vma.lock);
1127 list_del(&vma->obj_link);
1128 rb_erase(&vma->obj_node, &obj->vma.tree);
1129 spin_unlock(&obj->vma.lock);
1130 }
1131
1132 __i915_vma_remove_closed(vma);
1133 i915_vm_put(vma->vm);
1134
1135 i915_active_fini(&vma->active);
1136 i915_vma_free(vma);
1137 }
1138
i915_vma_parked(struct intel_gt * gt)1139 void i915_vma_parked(struct intel_gt *gt)
1140 {
1141 struct i915_vma *vma, *next;
1142
1143 spin_lock_irq(>->closed_lock);
1144 list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) {
1145 struct drm_i915_gem_object *obj = vma->obj;
1146 struct i915_address_space *vm = vma->vm;
1147
1148 /* XXX All to avoid keeping a reference on i915_vma itself */
1149
1150 if (!kref_get_unless_zero(&obj->base.refcount))
1151 continue;
1152
1153 if (i915_vm_tryopen(vm)) {
1154 list_del_init(&vma->closed_link);
1155 } else {
1156 i915_gem_object_put(obj);
1157 obj = NULL;
1158 }
1159
1160 spin_unlock_irq(>->closed_lock);
1161
1162 if (obj) {
1163 __i915_vma_put(vma);
1164 i915_gem_object_put(obj);
1165 }
1166
1167 i915_vm_close(vm);
1168
1169 /* Restart after dropping lock */
1170 spin_lock_irq(>->closed_lock);
1171 next = list_first_entry(>->closed_vma,
1172 typeof(*next), closed_link);
1173 }
1174 spin_unlock_irq(>->closed_lock);
1175 }
1176
__i915_vma_iounmap(struct i915_vma * vma)1177 static void __i915_vma_iounmap(struct i915_vma *vma)
1178 {
1179 GEM_BUG_ON(i915_vma_is_pinned(vma));
1180
1181 if (vma->iomap == NULL)
1182 return;
1183
1184 #ifdef __NetBSD__
1185 io_mapping_unmap(&i915_vm_to_ggtt(vma->vm)->iomap, vma->iomap,
1186 vma->node.size);
1187 #else
1188 io_mapping_unmap(vma->iomap);
1189 #endif
1190 vma->iomap = NULL;
1191 }
1192
i915_vma_revoke_mmap(struct i915_vma * vma)1193 void i915_vma_revoke_mmap(struct i915_vma *vma)
1194 {
1195 struct drm_vma_offset_node *node;
1196 u64 vma_offset;
1197
1198 if (!i915_vma_has_userfault(vma))
1199 return;
1200
1201 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1202 GEM_BUG_ON(!vma->obj->userfault_count);
1203
1204 #ifdef __NetBSD__
1205 __USE(vma_offset);
1206 __USE(node);
1207 struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
1208 paddr_t pa = i915->ggtt.gmadr.start + vma->node.start;
1209 vsize_t npgs = vma->size >> PAGE_SHIFT;
1210 while (npgs --> 0)
1211 pmap_pv_protect(pa + (npgs << PAGE_SHIFT), VM_PROT_NONE);
1212 #else
1213 node = &vma->mmo->vma_node;
1214 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1215 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1216 drm_vma_node_offset_addr(node) + vma_offset,
1217 vma->size,
1218 1);
1219 #endif
1220
1221 i915_vma_unset_userfault(vma);
1222 if (!--vma->obj->userfault_count)
1223 list_del(&vma->obj->userfault_link);
1224 }
1225
__i915_vma_move_to_active(struct i915_vma * vma,struct i915_request * rq)1226 int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1227 {
1228 int err;
1229
1230 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1231
1232 /* Wait for the vma to be bound before we start! */
1233 err = i915_request_await_active(rq, &vma->active);
1234 if (err)
1235 return err;
1236
1237 return i915_active_add_request(&vma->active, rq);
1238 }
1239
i915_vma_move_to_active(struct i915_vma * vma,struct i915_request * rq,unsigned int flags)1240 int i915_vma_move_to_active(struct i915_vma *vma,
1241 struct i915_request *rq,
1242 unsigned int flags)
1243 {
1244 struct drm_i915_gem_object *obj = vma->obj;
1245 int err;
1246
1247 assert_object_held(obj);
1248
1249 err = __i915_vma_move_to_active(vma, rq);
1250 if (unlikely(err))
1251 return err;
1252
1253 if (flags & EXEC_OBJECT_WRITE) {
1254 struct intel_frontbuffer *front;
1255
1256 front = __intel_frontbuffer_get(obj);
1257 if (unlikely(front)) {
1258 if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1259 i915_active_add_request(&front->write, rq);
1260 intel_frontbuffer_put(front);
1261 }
1262
1263 dma_resv_add_excl_fence(vma->resv, &rq->fence);
1264 obj->write_domain = I915_GEM_DOMAIN_RENDER;
1265 obj->read_domains = 0;
1266 } else {
1267 err = dma_resv_reserve_shared(vma->resv, 1);
1268 if (unlikely(err))
1269 return err;
1270
1271 dma_resv_add_shared_fence(vma->resv, &rq->fence);
1272 obj->write_domain = 0;
1273 }
1274 obj->read_domains |= I915_GEM_GPU_DOMAINS;
1275 obj->mm.dirty = true;
1276
1277 GEM_BUG_ON(!i915_vma_is_active(vma));
1278 return 0;
1279 }
1280
__i915_vma_unbind(struct i915_vma * vma)1281 int __i915_vma_unbind(struct i915_vma *vma)
1282 {
1283 int ret;
1284
1285 lockdep_assert_held(&vma->vm->mutex);
1286
1287 /*
1288 * First wait upon any activity as retiring the request may
1289 * have side-effects such as unpinning or even unbinding this vma.
1290 *
1291 * XXX Actually waiting under the vm->mutex is a hinderance and
1292 * should be pipelined wherever possible. In cases where that is
1293 * unavoidable, we should lift the wait to before the mutex.
1294 */
1295 ret = i915_vma_sync(vma);
1296 if (ret)
1297 return ret;
1298
1299 if (i915_vma_is_pinned(vma)) {
1300 vma_print_allocator(vma, "is pinned");
1301 return -EAGAIN;
1302 }
1303
1304 /*
1305 * After confirming that no one else is pinning this vma, wait for
1306 * any laggards who may have crept in during the wait (through
1307 * a residual pin skipping the vm->mutex) to complete.
1308 */
1309 ret = i915_vma_sync(vma);
1310 if (ret)
1311 return ret;
1312
1313 if (!drm_mm_node_allocated(&vma->node))
1314 return 0;
1315
1316 GEM_BUG_ON(i915_vma_is_pinned(vma));
1317 GEM_BUG_ON(i915_vma_is_active(vma));
1318
1319 if (i915_vma_is_map_and_fenceable(vma)) {
1320 /*
1321 * Check that we have flushed all writes through the GGTT
1322 * before the unbind, other due to non-strict nature of those
1323 * indirect writes they may end up referencing the GGTT PTE
1324 * after the unbind.
1325 */
1326 i915_vma_flush_writes(vma);
1327 GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
1328
1329 /* release the fence reg _after_ flushing */
1330 ret = i915_vma_revoke_fence(vma);
1331 if (ret)
1332 return ret;
1333
1334 /* Force a pagefault for domain tracking on next user access */
1335 i915_vma_revoke_mmap(vma);
1336
1337 __i915_vma_iounmap(vma);
1338 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
1339 }
1340 GEM_BUG_ON(vma->fence);
1341 GEM_BUG_ON(i915_vma_has_userfault(vma));
1342
1343 if (likely(atomic_read(&vma->vm->open))) {
1344 trace_i915_vma_unbind(vma);
1345 vma->ops->unbind_vma(vma);
1346 }
1347 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags);
1348
1349 i915_vma_detach(vma);
1350 vma_unbind_pages(vma);
1351
1352 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
1353 return 0;
1354 }
1355
i915_vma_unbind(struct i915_vma * vma)1356 int i915_vma_unbind(struct i915_vma *vma)
1357 {
1358 struct i915_address_space *vm = vma->vm;
1359 intel_wakeref_t wakeref = 0;
1360 int err;
1361
1362 if (!drm_mm_node_allocated(&vma->node))
1363 return 0;
1364
1365 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1366 /* XXX not always required: nop_clear_range */
1367 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1368
1369 err = mutex_lock_interruptible(&vm->mutex);
1370 if (err)
1371 return err;
1372
1373 err = __i915_vma_unbind(vma);
1374 mutex_unlock(&vm->mutex);
1375
1376 if (wakeref)
1377 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
1378
1379 return err;
1380 }
1381
i915_vma_make_unshrinkable(struct i915_vma * vma)1382 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1383 {
1384 i915_gem_object_make_unshrinkable(vma->obj);
1385 return vma;
1386 }
1387
i915_vma_make_shrinkable(struct i915_vma * vma)1388 void i915_vma_make_shrinkable(struct i915_vma *vma)
1389 {
1390 i915_gem_object_make_shrinkable(vma->obj);
1391 }
1392
i915_vma_make_purgeable(struct i915_vma * vma)1393 void i915_vma_make_purgeable(struct i915_vma *vma)
1394 {
1395 i915_gem_object_make_purgeable(vma->obj);
1396 }
1397
1398 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1399 #include "selftests/i915_vma.c"
1400 #endif
1401
i915_global_vma_shrink(void)1402 static void i915_global_vma_shrink(void)
1403 {
1404 kmem_cache_shrink(global.slab_vmas);
1405 }
1406
i915_global_vma_exit(void)1407 static void i915_global_vma_exit(void)
1408 {
1409 kmem_cache_destroy(global.slab_vmas);
1410 }
1411
1412 static struct i915_global_vma global = { {
1413 .shrink = i915_global_vma_shrink,
1414 .exit = i915_global_vma_exit,
1415 } };
1416
i915_global_vma_init(void)1417 int __init i915_global_vma_init(void)
1418 {
1419 global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1420 if (!global.slab_vmas)
1421 return -ENOMEM;
1422
1423 i915_global_register(&global.base);
1424 return 0;
1425 }
1426