1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_pt.h"
7
8 #include "regs/xe_gtt_defs.h"
9 #include "xe_bo.h"
10 #include "xe_device.h"
11 #include "xe_drm_client.h"
12 #include "xe_gt.h"
13 #include "xe_gt_tlb_invalidation.h"
14 #include "xe_migrate.h"
15 #include "xe_pt_types.h"
16 #include "xe_pt_walk.h"
17 #include "xe_res_cursor.h"
18 #include "xe_trace.h"
19 #include "xe_ttm_stolen_mgr.h"
20 #include "xe_vm.h"
21
22 struct xe_pt_dir {
23 struct xe_pt pt;
24 /** @children: Array of page-table child nodes */
25 struct xe_ptw *children[XE_PDES];
26 };
27
28 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
29 #define xe_pt_set_addr(__xe_pt, __addr) ((__xe_pt)->addr = (__addr))
30 #define xe_pt_addr(__xe_pt) ((__xe_pt)->addr)
31 #else
32 #define xe_pt_set_addr(__xe_pt, __addr)
33 #define xe_pt_addr(__xe_pt) 0ull
34 #endif
35
36 static const u64 xe_normal_pt_shifts[] = {12, 21, 30, 39, 48};
37 static const u64 xe_compact_pt_shifts[] = {16, 21, 30, 39, 48};
38
39 #define XE_PT_HIGHEST_LEVEL (ARRAY_SIZE(xe_normal_pt_shifts) - 1)
40
as_xe_pt_dir(struct xe_pt * pt)41 static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt)
42 {
43 return container_of(pt, struct xe_pt_dir, pt);
44 }
45
xe_pt_entry(struct xe_pt_dir * pt_dir,unsigned int index)46 static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index)
47 {
48 return container_of(pt_dir->children[index], struct xe_pt, base);
49 }
50
__xe_pt_empty_pte(struct xe_tile * tile,struct xe_vm * vm,unsigned int level)51 static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
52 unsigned int level)
53 {
54 struct xe_device *xe = tile_to_xe(tile);
55 u16 pat_index = xe->pat.idx[XE_CACHE_WB];
56 u8 id = tile->id;
57
58 if (!xe_vm_has_scratch(vm))
59 return 0;
60
61 if (level > MAX_HUGEPTE_LEVEL)
62 return vm->pt_ops->pde_encode_bo(vm->scratch_pt[id][level - 1]->bo,
63 0, pat_index);
64
65 return vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0) |
66 XE_PTE_NULL;
67 }
68
xe_pt_free(struct xe_pt * pt)69 static void xe_pt_free(struct xe_pt *pt)
70 {
71 if (pt->level)
72 kfree(as_xe_pt_dir(pt));
73 else
74 kfree(pt);
75 }
76
77 /**
78 * xe_pt_create() - Create a page-table.
79 * @vm: The vm to create for.
80 * @tile: The tile to create for.
81 * @level: The page-table level.
82 *
83 * Allocate and initialize a single struct xe_pt metadata structure. Also
84 * create the corresponding page-table bo, but don't initialize it. If the
85 * level is grater than zero, then it's assumed to be a directory page-
86 * table and the directory structure is also allocated and initialized to
87 * NULL pointers.
88 *
89 * Return: A valid struct xe_pt pointer on success, Pointer error code on
90 * error.
91 */
xe_pt_create(struct xe_vm * vm,struct xe_tile * tile,unsigned int level)92 struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
93 unsigned int level)
94 {
95 struct xe_pt *pt;
96 struct xe_bo *bo;
97 int err;
98
99 if (level) {
100 struct xe_pt_dir *dir = kzalloc(sizeof(*dir), GFP_KERNEL);
101
102 pt = (dir) ? &dir->pt : NULL;
103 } else {
104 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
105 }
106 if (!pt)
107 return ERR_PTR(-ENOMEM);
108
109 pt->level = level;
110 bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
111 ttm_bo_type_kernel,
112 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
113 XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
114 XE_BO_FLAG_PINNED |
115 XE_BO_FLAG_NO_RESV_EVICT |
116 XE_BO_FLAG_PAGETABLE);
117 if (IS_ERR(bo)) {
118 err = PTR_ERR(bo);
119 goto err_kfree;
120 }
121 pt->bo = bo;
122 pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL;
123
124 if (vm->xef)
125 xe_drm_client_add_bo(vm->xef->client, pt->bo);
126 xe_tile_assert(tile, level <= XE_VM_MAX_LEVEL);
127
128 return pt;
129
130 err_kfree:
131 xe_pt_free(pt);
132 return ERR_PTR(err);
133 }
134
135 /**
136 * xe_pt_populate_empty() - Populate a page-table bo with scratch- or zero
137 * entries.
138 * @tile: The tile the scratch pagetable of which to use.
139 * @vm: The vm we populate for.
140 * @pt: The pagetable the bo of which to initialize.
141 *
142 * Populate the page-table bo of @pt with entries pointing into the tile's
143 * scratch page-table tree if any. Otherwise populate with zeros.
144 */
xe_pt_populate_empty(struct xe_tile * tile,struct xe_vm * vm,struct xe_pt * pt)145 void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
146 struct xe_pt *pt)
147 {
148 struct iosys_map *map = &pt->bo->vmap;
149 u64 empty;
150 int i;
151
152 if (!xe_vm_has_scratch(vm)) {
153 /*
154 * FIXME: Some memory is allocated already allocated to zero?
155 * Find out which memory that is and avoid this memset...
156 */
157 xe_map_memset(vm->xe, map, 0, 0, SZ_4K);
158 } else {
159 empty = __xe_pt_empty_pte(tile, vm, pt->level);
160 for (i = 0; i < XE_PDES; i++)
161 xe_pt_write(vm->xe, map, i, empty);
162 }
163 }
164
165 /**
166 * xe_pt_shift() - Return the ilog2 value of the size of the address range of
167 * a page-table at a certain level.
168 * @level: The level.
169 *
170 * Return: The ilog2 value of the size of the address range of a page-table
171 * at level @level.
172 */
xe_pt_shift(unsigned int level)173 unsigned int xe_pt_shift(unsigned int level)
174 {
175 return XE_PTE_SHIFT + XE_PDE_SHIFT * level;
176 }
177
178 /**
179 * xe_pt_destroy() - Destroy a page-table tree.
180 * @pt: The root of the page-table tree to destroy.
181 * @flags: vm flags. Currently unused.
182 * @deferred: List head of lockless list for deferred putting. NULL for
183 * immediate putting.
184 *
185 * Puts the page-table bo, recursively calls xe_pt_destroy on all children
186 * and finally frees @pt. TODO: Can we remove the @flags argument?
187 */
xe_pt_destroy(struct xe_pt * pt,u32 flags,struct llist_head * deferred)188 void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
189 {
190 int i;
191
192 if (!pt)
193 return;
194
195 XE_WARN_ON(!list_empty(&pt->bo->ttm.base.gpuva.list));
196 xe_bo_unpin(pt->bo);
197 xe_bo_put_deferred(pt->bo, deferred);
198
199 if (pt->level > 0 && pt->num_live) {
200 struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
201
202 for (i = 0; i < XE_PDES; i++) {
203 if (xe_pt_entry(pt_dir, i))
204 xe_pt_destroy(xe_pt_entry(pt_dir, i), flags,
205 deferred);
206 }
207 }
208 xe_pt_free(pt);
209 }
210
211 /**
212 * DOC: Pagetable building
213 *
214 * Below we use the term "page-table" for both page-directories, containing
215 * pointers to lower level page-directories or page-tables, and level 0
216 * page-tables that contain only page-table-entries pointing to memory pages.
217 *
218 * When inserting an address range in an already existing page-table tree
219 * there will typically be a set of page-tables that are shared with other
220 * address ranges, and a set that are private to this address range.
221 * The set of shared page-tables can be at most two per level,
222 * and those can't be updated immediately because the entries of those
223 * page-tables may still be in use by the gpu for other mappings. Therefore
224 * when inserting entries into those, we instead stage those insertions by
225 * adding insertion data into struct xe_vm_pgtable_update structures. This
226 * data, (subtrees for the cpu and page-table-entries for the gpu) is then
227 * added in a separate commit step. CPU-data is committed while still under the
228 * vm lock, the object lock and for userptr, the notifier lock in read mode.
229 * The GPU async data is committed either by the GPU or CPU after fulfilling
230 * relevant dependencies.
231 * For non-shared page-tables (and, in fact, for shared ones that aren't
232 * existing at the time of staging), we add the data in-place without the
233 * special update structures. This private part of the page-table tree will
234 * remain disconnected from the vm page-table tree until data is committed to
235 * the shared page tables of the vm tree in the commit phase.
236 */
237
238 struct xe_pt_update {
239 /** @update: The update structure we're building for this parent. */
240 struct xe_vm_pgtable_update *update;
241 /** @parent: The parent. Used to detect a parent change. */
242 struct xe_pt *parent;
243 /** @preexisting: Whether the parent was pre-existing or allocated */
244 bool preexisting;
245 };
246
247 struct xe_pt_stage_bind_walk {
248 /** base: The base class. */
249 struct xe_pt_walk base;
250
251 /* Input parameters for the walk */
252 /** @vm: The vm we're building for. */
253 struct xe_vm *vm;
254 /** @tile: The tile we're building for. */
255 struct xe_tile *tile;
256 /** @default_pte: PTE flag only template. No address is associated */
257 u64 default_pte;
258 /** @dma_offset: DMA offset to add to the PTE. */
259 u64 dma_offset;
260 /**
261 * @needs_64k: This address range enforces 64K alignment and
262 * granularity.
263 */
264 bool needs_64K;
265 /**
266 * @vma: VMA being mapped
267 */
268 struct xe_vma *vma;
269
270 /* Also input, but is updated during the walk*/
271 /** @curs: The DMA address cursor. */
272 struct xe_res_cursor *curs;
273 /** @va_curs_start: The Virtual address coresponding to @curs->start */
274 u64 va_curs_start;
275
276 /* Output */
277 struct xe_walk_update {
278 /** @wupd.entries: Caller provided storage. */
279 struct xe_vm_pgtable_update *entries;
280 /** @wupd.num_used_entries: Number of update @entries used. */
281 unsigned int num_used_entries;
282 /** @wupd.updates: Tracks the update entry at a given level */
283 struct xe_pt_update updates[XE_VM_MAX_LEVEL + 1];
284 } wupd;
285
286 /* Walk state */
287 /**
288 * @l0_end_addr: The end address of the current l0 leaf. Used for
289 * 64K granularity detection.
290 */
291 u64 l0_end_addr;
292 /** @addr_64K: The start address of the current 64K chunk. */
293 u64 addr_64K;
294 /** @found_64: Whether @add_64K actually points to a 64K chunk. */
295 bool found_64K;
296 };
297
298 static int
xe_pt_new_shared(struct xe_walk_update * wupd,struct xe_pt * parent,pgoff_t offset,bool alloc_entries)299 xe_pt_new_shared(struct xe_walk_update *wupd, struct xe_pt *parent,
300 pgoff_t offset, bool alloc_entries)
301 {
302 struct xe_pt_update *upd = &wupd->updates[parent->level];
303 struct xe_vm_pgtable_update *entry;
304
305 /*
306 * For *each level*, we could only have one active
307 * struct xt_pt_update at any one time. Once we move on to a
308 * new parent and page-directory, the old one is complete, and
309 * updates are either already stored in the build tree or in
310 * @wupd->entries
311 */
312 if (likely(upd->parent == parent))
313 return 0;
314
315 upd->parent = parent;
316 upd->preexisting = true;
317
318 if (wupd->num_used_entries == XE_VM_MAX_LEVEL * 2 + 1)
319 return -EINVAL;
320
321 entry = wupd->entries + wupd->num_used_entries++;
322 upd->update = entry;
323 entry->ofs = offset;
324 entry->pt_bo = parent->bo;
325 entry->pt = parent;
326 entry->flags = 0;
327 entry->qwords = 0;
328
329 if (alloc_entries) {
330 entry->pt_entries = kmalloc_array(XE_PDES,
331 sizeof(*entry->pt_entries),
332 GFP_KERNEL);
333 if (!entry->pt_entries)
334 return -ENOMEM;
335 }
336
337 return 0;
338 }
339
340 /*
341 * NOTE: This is a very frequently called function so we allow ourselves
342 * to annotate (using branch prediction hints) the fastpath of updating a
343 * non-pre-existing pagetable with leaf ptes.
344 */
345 static int
xe_pt_insert_entry(struct xe_pt_stage_bind_walk * xe_walk,struct xe_pt * parent,pgoff_t offset,struct xe_pt * xe_child,u64 pte)346 xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent,
347 pgoff_t offset, struct xe_pt *xe_child, u64 pte)
348 {
349 struct xe_pt_update *upd = &xe_walk->wupd.updates[parent->level];
350 struct xe_pt_update *child_upd = xe_child ?
351 &xe_walk->wupd.updates[xe_child->level] : NULL;
352 int ret;
353
354 ret = xe_pt_new_shared(&xe_walk->wupd, parent, offset, true);
355 if (unlikely(ret))
356 return ret;
357
358 /*
359 * Register this new pagetable so that it won't be recognized as
360 * a shared pagetable by a subsequent insertion.
361 */
362 if (unlikely(child_upd)) {
363 child_upd->update = NULL;
364 child_upd->parent = xe_child;
365 child_upd->preexisting = false;
366 }
367
368 if (likely(!upd->preexisting)) {
369 /* Continue building a non-connected subtree. */
370 struct iosys_map *map = &parent->bo->vmap;
371
372 if (unlikely(xe_child))
373 parent->base.children[offset] = &xe_child->base;
374
375 xe_pt_write(xe_walk->vm->xe, map, offset, pte);
376 parent->num_live++;
377 } else {
378 /* Shared pt. Stage update. */
379 unsigned int idx;
380 struct xe_vm_pgtable_update *entry = upd->update;
381
382 idx = offset - entry->ofs;
383 entry->pt_entries[idx].pt = xe_child;
384 entry->pt_entries[idx].pte = pte;
385 entry->qwords++;
386 }
387
388 return 0;
389 }
390
xe_pt_hugepte_possible(u64 addr,u64 next,unsigned int level,struct xe_pt_stage_bind_walk * xe_walk)391 static bool xe_pt_hugepte_possible(u64 addr, u64 next, unsigned int level,
392 struct xe_pt_stage_bind_walk *xe_walk)
393 {
394 u64 size, dma;
395
396 if (level > MAX_HUGEPTE_LEVEL)
397 return false;
398
399 /* Does the virtual range requested cover a huge pte? */
400 if (!xe_pt_covers(addr, next, level, &xe_walk->base))
401 return false;
402
403 /* Does the DMA segment cover the whole pte? */
404 if (next - xe_walk->va_curs_start > xe_walk->curs->size)
405 return false;
406
407 /* null VMA's do not have dma addresses */
408 if (xe_vma_is_null(xe_walk->vma))
409 return true;
410
411 /* Is the DMA address huge PTE size aligned? */
412 size = next - addr;
413 dma = addr - xe_walk->va_curs_start + xe_res_dma(xe_walk->curs);
414
415 return IS_ALIGNED(dma, size);
416 }
417
418 /*
419 * Scan the requested mapping to check whether it can be done entirely
420 * with 64K PTEs.
421 */
422 static bool
xe_pt_scan_64K(u64 addr,u64 next,struct xe_pt_stage_bind_walk * xe_walk)423 xe_pt_scan_64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk)
424 {
425 struct xe_res_cursor curs = *xe_walk->curs;
426
427 if (!IS_ALIGNED(addr, SZ_64K))
428 return false;
429
430 if (next > xe_walk->l0_end_addr)
431 return false;
432
433 /* null VMA's do not have dma addresses */
434 if (xe_vma_is_null(xe_walk->vma))
435 return true;
436
437 xe_res_next(&curs, addr - xe_walk->va_curs_start);
438 for (; addr < next; addr += SZ_64K) {
439 if (!IS_ALIGNED(xe_res_dma(&curs), SZ_64K) || curs.size < SZ_64K)
440 return false;
441
442 xe_res_next(&curs, SZ_64K);
443 }
444
445 return addr == next;
446 }
447
448 /*
449 * For non-compact "normal" 4K level-0 pagetables, we want to try to group
450 * addresses together in 64K-contigous regions to add a 64K TLB hint for the
451 * device to the PTE.
452 * This function determines whether the address is part of such a
453 * segment. For VRAM in normal pagetables, this is strictly necessary on
454 * some devices.
455 */
456 static bool
xe_pt_is_pte_ps64K(u64 addr,u64 next,struct xe_pt_stage_bind_walk * xe_walk)457 xe_pt_is_pte_ps64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk)
458 {
459 /* Address is within an already found 64k region */
460 if (xe_walk->found_64K && addr - xe_walk->addr_64K < SZ_64K)
461 return true;
462
463 xe_walk->found_64K = xe_pt_scan_64K(addr, addr + SZ_64K, xe_walk);
464 xe_walk->addr_64K = addr;
465
466 return xe_walk->found_64K;
467 }
468
469 static int
xe_pt_stage_bind_entry(struct xe_ptw * parent,pgoff_t offset,unsigned int level,u64 addr,u64 next,struct xe_ptw ** child,enum page_walk_action * action,struct xe_pt_walk * walk)470 xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
471 unsigned int level, u64 addr, u64 next,
472 struct xe_ptw **child,
473 enum page_walk_action *action,
474 struct xe_pt_walk *walk)
475 {
476 struct xe_pt_stage_bind_walk *xe_walk =
477 container_of(walk, typeof(*xe_walk), base);
478 u16 pat_index = xe_walk->vma->pat_index;
479 struct xe_pt *xe_parent = container_of(parent, typeof(*xe_parent), base);
480 struct xe_vm *vm = xe_walk->vm;
481 struct xe_pt *xe_child;
482 bool covers;
483 int ret = 0;
484 u64 pte;
485
486 /* Is this a leaf entry ?*/
487 if (level == 0 || xe_pt_hugepte_possible(addr, next, level, xe_walk)) {
488 struct xe_res_cursor *curs = xe_walk->curs;
489 bool is_null = xe_vma_is_null(xe_walk->vma);
490
491 XE_WARN_ON(xe_walk->va_curs_start != addr);
492
493 pte = vm->pt_ops->pte_encode_vma(is_null ? 0 :
494 xe_res_dma(curs) + xe_walk->dma_offset,
495 xe_walk->vma, pat_index, level);
496 pte |= xe_walk->default_pte;
497
498 /*
499 * Set the XE_PTE_PS64 hint if possible, otherwise if
500 * this device *requires* 64K PTE size for VRAM, fail.
501 */
502 if (level == 0 && !xe_parent->is_compact) {
503 if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
504 xe_walk->vma->gpuva.flags |= XE_VMA_PTE_64K;
505 pte |= XE_PTE_PS64;
506 } else if (XE_WARN_ON(xe_walk->needs_64K)) {
507 return -EINVAL;
508 }
509 }
510
511 ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, NULL, pte);
512 if (unlikely(ret))
513 return ret;
514
515 if (!is_null)
516 xe_res_next(curs, next - addr);
517 xe_walk->va_curs_start = next;
518 xe_walk->vma->gpuva.flags |= (XE_VMA_PTE_4K << level);
519 *action = ACTION_CONTINUE;
520
521 return ret;
522 }
523
524 /*
525 * Descending to lower level. Determine if we need to allocate a
526 * new page table or -directory, which we do if there is no
527 * previous one or there is one we can completely replace.
528 */
529 if (level == 1) {
530 walk->shifts = xe_normal_pt_shifts;
531 xe_walk->l0_end_addr = next;
532 }
533
534 covers = xe_pt_covers(addr, next, level, &xe_walk->base);
535 if (covers || !*child) {
536 u64 flags = 0;
537
538 xe_child = xe_pt_create(xe_walk->vm, xe_walk->tile, level - 1);
539 if (IS_ERR(xe_child))
540 return PTR_ERR(xe_child);
541
542 xe_pt_set_addr(xe_child,
543 round_down(addr, 1ull << walk->shifts[level]));
544
545 if (!covers)
546 xe_pt_populate_empty(xe_walk->tile, xe_walk->vm, xe_child);
547
548 *child = &xe_child->base;
549
550 /*
551 * Prefer the compact pagetable layout for L0 if possible. Only
552 * possible if VMA covers entire 2MB region as compact 64k and
553 * 4k pages cannot be mixed within a 2MB region.
554 * TODO: Suballocate the pt bo to avoid wasting a lot of
555 * memory.
556 */
557 if (GRAPHICS_VERx100(tile_to_xe(xe_walk->tile)) >= 1250 && level == 1 &&
558 covers && xe_pt_scan_64K(addr, next, xe_walk)) {
559 walk->shifts = xe_compact_pt_shifts;
560 xe_walk->vma->gpuva.flags |= XE_VMA_PTE_COMPACT;
561 flags |= XE_PDE_64K;
562 xe_child->is_compact = true;
563 }
564
565 pte = vm->pt_ops->pde_encode_bo(xe_child->bo, 0, pat_index) | flags;
566 ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, xe_child,
567 pte);
568 }
569
570 *action = ACTION_SUBTREE;
571 return ret;
572 }
573
574 static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
575 .pt_entry = xe_pt_stage_bind_entry,
576 };
577
578 /**
579 * xe_pt_stage_bind() - Build a disconnected page-table tree for a given address
580 * range.
581 * @tile: The tile we're building for.
582 * @vma: The vma indicating the address range.
583 * @entries: Storage for the update entries used for connecting the tree to
584 * the main tree at commit time.
585 * @num_entries: On output contains the number of @entries used.
586 *
587 * This function builds a disconnected page-table tree for a given address
588 * range. The tree is connected to the main vm tree for the gpu using
589 * xe_migrate_update_pgtables() and for the cpu using xe_pt_commit_bind().
590 * The function builds xe_vm_pgtable_update structures for already existing
591 * shared page-tables, and non-existing shared and non-shared page-tables
592 * are built and populated directly.
593 *
594 * Return 0 on success, negative error code on error.
595 */
596 static int
xe_pt_stage_bind(struct xe_tile * tile,struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 * num_entries)597 xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
598 struct xe_vm_pgtable_update *entries, u32 *num_entries)
599 {
600 struct xe_device *xe = tile_to_xe(tile);
601 struct xe_bo *bo = xe_vma_bo(vma);
602 bool is_devmem = !xe_vma_is_userptr(vma) && bo &&
603 (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo));
604 struct xe_res_cursor curs;
605 struct xe_pt_stage_bind_walk xe_walk = {
606 .base = {
607 .ops = &xe_pt_stage_bind_ops,
608 .shifts = xe_normal_pt_shifts,
609 .max_level = XE_PT_HIGHEST_LEVEL,
610 },
611 .vm = xe_vma_vm(vma),
612 .tile = tile,
613 .curs = &curs,
614 .va_curs_start = xe_vma_start(vma),
615 .vma = vma,
616 .wupd.entries = entries,
617 .needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAG_64K) && is_devmem,
618 };
619 struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
620 int ret;
621
622 if ((vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) &&
623 (is_devmem || !IS_DGFX(xe)))
624 xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
625
626 if (is_devmem) {
627 xe_walk.default_pte |= XE_PPGTT_PTE_DM;
628 xe_walk.dma_offset = vram_region_gpu_offset(bo->ttm.resource);
629 }
630
631 if (!xe_vma_has_no_bo(vma) && xe_bo_is_stolen(bo))
632 xe_walk.dma_offset = xe_ttm_stolen_gpu_offset(xe_bo_device(bo));
633
634 xe_bo_assert_held(bo);
635
636 if (!xe_vma_is_null(vma)) {
637 if (xe_vma_is_userptr(vma))
638 xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0,
639 xe_vma_size(vma), &curs);
640 else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo))
641 xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma),
642 xe_vma_size(vma), &curs);
643 else
644 xe_res_first_sg(xe_bo_sg(bo), xe_vma_bo_offset(vma),
645 xe_vma_size(vma), &curs);
646 } else {
647 curs.size = xe_vma_size(vma);
648 }
649
650 ret = xe_pt_walk_range(&pt->base, pt->level, xe_vma_start(vma),
651 xe_vma_end(vma), &xe_walk.base);
652
653 *num_entries = xe_walk.wupd.num_used_entries;
654 return ret;
655 }
656
657 /**
658 * xe_pt_nonshared_offsets() - Determine the non-shared entry offsets of a
659 * shared pagetable.
660 * @addr: The start address within the non-shared pagetable.
661 * @end: The end address within the non-shared pagetable.
662 * @level: The level of the non-shared pagetable.
663 * @walk: Walk info. The function adjusts the walk action.
664 * @action: next action to perform (see enum page_walk_action)
665 * @offset: Ignored on input, First non-shared entry on output.
666 * @end_offset: Ignored on input, Last non-shared entry + 1 on output.
667 *
668 * A non-shared page-table has some entries that belong to the address range
669 * and others that don't. This function determines the entries that belong
670 * fully to the address range. Depending on level, some entries may
671 * partially belong to the address range (that can't happen at level 0).
672 * The function detects that and adjust those offsets to not include those
673 * partial entries. Iff it does detect partial entries, we know that there must
674 * be shared page tables also at lower levels, so it adjusts the walk action
675 * accordingly.
676 *
677 * Return: true if there were non-shared entries, false otherwise.
678 */
xe_pt_nonshared_offsets(u64 addr,u64 end,unsigned int level,struct xe_pt_walk * walk,enum page_walk_action * action,pgoff_t * offset,pgoff_t * end_offset)679 static bool xe_pt_nonshared_offsets(u64 addr, u64 end, unsigned int level,
680 struct xe_pt_walk *walk,
681 enum page_walk_action *action,
682 pgoff_t *offset, pgoff_t *end_offset)
683 {
684 u64 size = 1ull << walk->shifts[level];
685
686 *offset = xe_pt_offset(addr, level, walk);
687 *end_offset = xe_pt_num_entries(addr, end, level, walk) + *offset;
688
689 if (!level)
690 return true;
691
692 /*
693 * If addr or next are not size aligned, there are shared pts at lower
694 * level, so in that case traverse down the subtree
695 */
696 *action = ACTION_CONTINUE;
697 if (!IS_ALIGNED(addr, size)) {
698 *action = ACTION_SUBTREE;
699 (*offset)++;
700 }
701
702 if (!IS_ALIGNED(end, size)) {
703 *action = ACTION_SUBTREE;
704 (*end_offset)--;
705 }
706
707 return *end_offset > *offset;
708 }
709
710 struct xe_pt_zap_ptes_walk {
711 /** @base: The walk base-class */
712 struct xe_pt_walk base;
713
714 /* Input parameters for the walk */
715 /** @tile: The tile we're building for */
716 struct xe_tile *tile;
717
718 /* Output */
719 /** @needs_invalidate: Whether we need to invalidate TLB*/
720 bool needs_invalidate;
721 };
722
xe_pt_zap_ptes_entry(struct xe_ptw * parent,pgoff_t offset,unsigned int level,u64 addr,u64 next,struct xe_ptw ** child,enum page_walk_action * action,struct xe_pt_walk * walk)723 static int xe_pt_zap_ptes_entry(struct xe_ptw *parent, pgoff_t offset,
724 unsigned int level, u64 addr, u64 next,
725 struct xe_ptw **child,
726 enum page_walk_action *action,
727 struct xe_pt_walk *walk)
728 {
729 struct xe_pt_zap_ptes_walk *xe_walk =
730 container_of(walk, typeof(*xe_walk), base);
731 struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
732 pgoff_t end_offset;
733
734 XE_WARN_ON(!*child);
735 XE_WARN_ON(!level && xe_child->is_compact);
736
737 /*
738 * Note that we're called from an entry callback, and we're dealing
739 * with the child of that entry rather than the parent, so need to
740 * adjust level down.
741 */
742 if (xe_pt_nonshared_offsets(addr, next, --level, walk, action, &offset,
743 &end_offset)) {
744 xe_map_memset(tile_to_xe(xe_walk->tile), &xe_child->bo->vmap,
745 offset * sizeof(u64), 0,
746 (end_offset - offset) * sizeof(u64));
747 xe_walk->needs_invalidate = true;
748 }
749
750 return 0;
751 }
752
753 static const struct xe_pt_walk_ops xe_pt_zap_ptes_ops = {
754 .pt_entry = xe_pt_zap_ptes_entry,
755 };
756
757 /**
758 * xe_pt_zap_ptes() - Zap (zero) gpu ptes of an address range
759 * @tile: The tile we're zapping for.
760 * @vma: GPU VMA detailing address range.
761 *
762 * Eviction and Userptr invalidation needs to be able to zap the
763 * gpu ptes of a given address range in pagefaulting mode.
764 * In order to be able to do that, that function needs access to the shared
765 * page-table entrieaso it can either clear the leaf PTEs or
766 * clear the pointers to lower-level page-tables. The caller is required
767 * to hold the necessary locks to ensure neither the page-table connectivity
768 * nor the page-table entries of the range is updated from under us.
769 *
770 * Return: Whether ptes were actually updated and a TLB invalidation is
771 * required.
772 */
xe_pt_zap_ptes(struct xe_tile * tile,struct xe_vma * vma)773 bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma)
774 {
775 struct xe_pt_zap_ptes_walk xe_walk = {
776 .base = {
777 .ops = &xe_pt_zap_ptes_ops,
778 .shifts = xe_normal_pt_shifts,
779 .max_level = XE_PT_HIGHEST_LEVEL,
780 },
781 .tile = tile,
782 };
783 struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
784
785 if (!(vma->tile_present & BIT(tile->id)))
786 return false;
787
788 (void)xe_pt_walk_shared(&pt->base, pt->level, xe_vma_start(vma),
789 xe_vma_end(vma), &xe_walk.base);
790
791 return xe_walk.needs_invalidate;
792 }
793
794 static void
xe_vm_populate_pgtable(struct xe_migrate_pt_update * pt_update,struct xe_tile * tile,struct iosys_map * map,void * data,u32 qword_ofs,u32 num_qwords,const struct xe_vm_pgtable_update * update)795 xe_vm_populate_pgtable(struct xe_migrate_pt_update *pt_update, struct xe_tile *tile,
796 struct iosys_map *map, void *data,
797 u32 qword_ofs, u32 num_qwords,
798 const struct xe_vm_pgtable_update *update)
799 {
800 struct xe_pt_entry *ptes = update->pt_entries;
801 u64 *ptr = data;
802 u32 i;
803
804 for (i = 0; i < num_qwords; i++) {
805 if (map)
806 xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) *
807 sizeof(u64), u64, ptes[i].pte);
808 else
809 ptr[i] = ptes[i].pte;
810 }
811 }
812
xe_pt_abort_bind(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries)813 static void xe_pt_abort_bind(struct xe_vma *vma,
814 struct xe_vm_pgtable_update *entries,
815 u32 num_entries)
816 {
817 u32 i, j;
818
819 for (i = 0; i < num_entries; i++) {
820 if (!entries[i].pt_entries)
821 continue;
822
823 for (j = 0; j < entries[i].qwords; j++)
824 xe_pt_destroy(entries[i].pt_entries[j].pt, xe_vma_vm(vma)->flags, NULL);
825 kfree(entries[i].pt_entries);
826 }
827 }
828
xe_pt_commit_locks_assert(struct xe_vma * vma)829 static void xe_pt_commit_locks_assert(struct xe_vma *vma)
830 {
831 struct xe_vm *vm = xe_vma_vm(vma);
832
833 lockdep_assert_held(&vm->lock);
834
835 if (xe_vma_is_userptr(vma))
836 lockdep_assert_held_read(&vm->userptr.notifier_lock);
837 else if (!xe_vma_is_null(vma))
838 dma_resv_assert_held(xe_vma_bo(vma)->ttm.base.resv);
839
840 xe_vm_assert_held(vm);
841 }
842
xe_pt_commit_bind(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries,bool rebind,struct llist_head * deferred)843 static void xe_pt_commit_bind(struct xe_vma *vma,
844 struct xe_vm_pgtable_update *entries,
845 u32 num_entries, bool rebind,
846 struct llist_head *deferred)
847 {
848 u32 i, j;
849
850 xe_pt_commit_locks_assert(vma);
851
852 for (i = 0; i < num_entries; i++) {
853 struct xe_pt *pt = entries[i].pt;
854 struct xe_pt_dir *pt_dir;
855
856 if (!rebind)
857 pt->num_live += entries[i].qwords;
858
859 if (!pt->level) {
860 kfree(entries[i].pt_entries);
861 continue;
862 }
863
864 pt_dir = as_xe_pt_dir(pt);
865 for (j = 0; j < entries[i].qwords; j++) {
866 u32 j_ = j + entries[i].ofs;
867 struct xe_pt *newpte = entries[i].pt_entries[j].pt;
868
869 if (xe_pt_entry(pt_dir, j_))
870 xe_pt_destroy(xe_pt_entry(pt_dir, j_),
871 xe_vma_vm(vma)->flags, deferred);
872
873 pt_dir->children[j_] = &newpte->base;
874 }
875 kfree(entries[i].pt_entries);
876 }
877 }
878
879 static int
xe_pt_prepare_bind(struct xe_tile * tile,struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 * num_entries)880 xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
881 struct xe_vm_pgtable_update *entries, u32 *num_entries)
882 {
883 int err;
884
885 *num_entries = 0;
886 err = xe_pt_stage_bind(tile, vma, entries, num_entries);
887 if (!err)
888 xe_tile_assert(tile, *num_entries);
889 else /* abort! */
890 xe_pt_abort_bind(vma, entries, *num_entries);
891
892 return err;
893 }
894
xe_vm_dbg_print_entries(struct xe_device * xe,const struct xe_vm_pgtable_update * entries,unsigned int num_entries)895 static void xe_vm_dbg_print_entries(struct xe_device *xe,
896 const struct xe_vm_pgtable_update *entries,
897 unsigned int num_entries)
898 #if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM))
899 {
900 unsigned int i;
901
902 vm_dbg(&xe->drm, "%u entries to update\n", num_entries);
903 for (i = 0; i < num_entries; i++) {
904 const struct xe_vm_pgtable_update *entry = &entries[i];
905 struct xe_pt *xe_pt = entry->pt;
906 u64 page_size = 1ull << xe_pt_shift(xe_pt->level);
907 u64 end;
908 u64 start;
909
910 xe_assert(xe, !entry->pt->is_compact);
911 start = entry->ofs * page_size;
912 end = start + page_size * entry->qwords;
913 vm_dbg(&xe->drm,
914 "\t%u: Update level %u at (%u + %u) [%llx...%llx) f:%x\n",
915 i, xe_pt->level, entry->ofs, entry->qwords,
916 xe_pt_addr(xe_pt) + start, xe_pt_addr(xe_pt) + end, 0);
917 }
918 }
919 #else
920 {}
921 #endif
922
923 #ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
924
xe_pt_userptr_inject_eagain(struct xe_userptr_vma * uvma)925 static int xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
926 {
927 u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
928 static u32 count;
929
930 if (count++ % divisor == divisor - 1) {
931 struct xe_vm *vm = xe_vma_vm(&uvma->vma);
932
933 uvma->userptr.divisor = divisor << 1;
934 spin_lock(&vm->userptr.invalidated_lock);
935 list_move_tail(&uvma->userptr.invalidate_link,
936 &vm->userptr.invalidated);
937 spin_unlock(&vm->userptr.invalidated_lock);
938 return true;
939 }
940
941 return false;
942 }
943
944 #else
945
xe_pt_userptr_inject_eagain(struct xe_userptr_vma * uvma)946 static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
947 {
948 return false;
949 }
950
951 #endif
952
953 /**
954 * struct xe_pt_migrate_pt_update - Callback argument for pre-commit callbacks
955 * @base: Base we derive from.
956 * @bind: Whether this is a bind or an unbind operation. A bind operation
957 * makes the pre-commit callback error with -EAGAIN if it detects a
958 * pending invalidation.
959 * @locked: Whether the pre-commit callback locked the userptr notifier lock
960 * and it needs unlocking.
961 */
962 struct xe_pt_migrate_pt_update {
963 struct xe_migrate_pt_update base;
964 bool bind;
965 bool locked;
966 };
967
968 /*
969 * This function adds the needed dependencies to a page-table update job
970 * to make sure racing jobs for separate bind engines don't race writing
971 * to the same page-table range, wreaking havoc. Initially use a single
972 * fence for the entire VM. An optimization would use smaller granularity.
973 */
xe_pt_vm_dependencies(struct xe_sched_job * job,struct xe_range_fence_tree * rftree,u64 start,u64 last)974 static int xe_pt_vm_dependencies(struct xe_sched_job *job,
975 struct xe_range_fence_tree *rftree,
976 u64 start, u64 last)
977 {
978 struct xe_range_fence *rtfence;
979 struct dma_fence *fence;
980 int err;
981
982 rtfence = xe_range_fence_tree_first(rftree, start, last);
983 while (rtfence) {
984 fence = rtfence->fence;
985
986 if (!dma_fence_is_signaled(fence)) {
987 /*
988 * Is this a CPU update? GPU is busy updating, so return
989 * an error
990 */
991 if (!job)
992 return -ETIME;
993
994 dma_fence_get(fence);
995 err = drm_sched_job_add_dependency(&job->drm, fence);
996 if (err)
997 return err;
998 }
999
1000 rtfence = xe_range_fence_tree_next(rtfence, start, last);
1001 }
1002
1003 return 0;
1004 }
1005
xe_pt_pre_commit(struct xe_migrate_pt_update * pt_update)1006 static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update)
1007 {
1008 struct xe_range_fence_tree *rftree =
1009 &xe_vma_vm(pt_update->vma)->rftree[pt_update->tile_id];
1010
1011 return xe_pt_vm_dependencies(pt_update->job, rftree,
1012 pt_update->start, pt_update->last);
1013 }
1014
xe_pt_userptr_pre_commit(struct xe_migrate_pt_update * pt_update)1015 static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
1016 {
1017 struct xe_pt_migrate_pt_update *userptr_update =
1018 container_of(pt_update, typeof(*userptr_update), base);
1019 struct xe_userptr_vma *uvma = to_userptr_vma(pt_update->vma);
1020 unsigned long notifier_seq = uvma->userptr.notifier_seq;
1021 struct xe_vm *vm = xe_vma_vm(&uvma->vma);
1022 int err = xe_pt_vm_dependencies(pt_update->job,
1023 &vm->rftree[pt_update->tile_id],
1024 pt_update->start,
1025 pt_update->last);
1026
1027 if (err)
1028 return err;
1029
1030 userptr_update->locked = false;
1031
1032 /*
1033 * Wait until nobody is running the invalidation notifier, and
1034 * since we're exiting the loop holding the notifier lock,
1035 * nobody can proceed invalidating either.
1036 *
1037 * Note that we don't update the vma->userptr.notifier_seq since
1038 * we don't update the userptr pages.
1039 */
1040 do {
1041 down_read(&vm->userptr.notifier_lock);
1042 if (!mmu_interval_read_retry(&uvma->userptr.notifier,
1043 notifier_seq))
1044 break;
1045
1046 up_read(&vm->userptr.notifier_lock);
1047
1048 if (userptr_update->bind)
1049 return -EAGAIN;
1050
1051 notifier_seq = mmu_interval_read_begin(&uvma->userptr.notifier);
1052 } while (true);
1053
1054 /* Inject errors to test_whether they are handled correctly */
1055 if (userptr_update->bind && xe_pt_userptr_inject_eagain(uvma)) {
1056 up_read(&vm->userptr.notifier_lock);
1057 return -EAGAIN;
1058 }
1059
1060 userptr_update->locked = true;
1061
1062 return 0;
1063 }
1064
1065 static const struct xe_migrate_pt_update_ops bind_ops = {
1066 .populate = xe_vm_populate_pgtable,
1067 .pre_commit = xe_pt_pre_commit,
1068 };
1069
1070 static const struct xe_migrate_pt_update_ops userptr_bind_ops = {
1071 .populate = xe_vm_populate_pgtable,
1072 .pre_commit = xe_pt_userptr_pre_commit,
1073 };
1074
1075 struct invalidation_fence {
1076 struct xe_gt_tlb_invalidation_fence base;
1077 struct xe_gt *gt;
1078 struct xe_vma *vma;
1079 struct dma_fence *fence;
1080 struct dma_fence_cb cb;
1081 struct work_struct work;
1082 };
1083
1084 static const char *
invalidation_fence_get_driver_name(struct dma_fence * dma_fence)1085 invalidation_fence_get_driver_name(struct dma_fence *dma_fence)
1086 {
1087 return "xe";
1088 }
1089
1090 static const char *
invalidation_fence_get_timeline_name(struct dma_fence * dma_fence)1091 invalidation_fence_get_timeline_name(struct dma_fence *dma_fence)
1092 {
1093 return "invalidation_fence";
1094 }
1095
1096 static const struct dma_fence_ops invalidation_fence_ops = {
1097 .get_driver_name = invalidation_fence_get_driver_name,
1098 .get_timeline_name = invalidation_fence_get_timeline_name,
1099 };
1100
invalidation_fence_cb(struct dma_fence * fence,struct dma_fence_cb * cb)1101 static void invalidation_fence_cb(struct dma_fence *fence,
1102 struct dma_fence_cb *cb)
1103 {
1104 struct invalidation_fence *ifence =
1105 container_of(cb, struct invalidation_fence, cb);
1106
1107 trace_xe_gt_tlb_invalidation_fence_cb(&ifence->base);
1108 if (!ifence->fence->error) {
1109 queue_work(system_wq, &ifence->work);
1110 } else {
1111 ifence->base.base.error = ifence->fence->error;
1112 dma_fence_signal(&ifence->base.base);
1113 dma_fence_put(&ifence->base.base);
1114 }
1115 dma_fence_put(ifence->fence);
1116 }
1117
invalidation_fence_work_func(struct work_struct * w)1118 static void invalidation_fence_work_func(struct work_struct *w)
1119 {
1120 struct invalidation_fence *ifence =
1121 container_of(w, struct invalidation_fence, work);
1122
1123 trace_xe_gt_tlb_invalidation_fence_work_func(&ifence->base);
1124 xe_gt_tlb_invalidation_vma(ifence->gt, &ifence->base, ifence->vma);
1125 }
1126
invalidation_fence_init(struct xe_gt * gt,struct invalidation_fence * ifence,struct dma_fence * fence,struct xe_vma * vma)1127 static int invalidation_fence_init(struct xe_gt *gt,
1128 struct invalidation_fence *ifence,
1129 struct dma_fence *fence,
1130 struct xe_vma *vma)
1131 {
1132 int ret;
1133
1134 trace_xe_gt_tlb_invalidation_fence_create(&ifence->base);
1135
1136 spin_lock_irq(>->tlb_invalidation.lock);
1137 dma_fence_init(&ifence->base.base, &invalidation_fence_ops,
1138 >->tlb_invalidation.lock,
1139 dma_fence_context_alloc(1), 1);
1140 spin_unlock_irq(>->tlb_invalidation.lock);
1141
1142 INIT_LIST_HEAD(&ifence->base.link);
1143
1144 dma_fence_get(&ifence->base.base); /* Ref for caller */
1145 ifence->fence = fence;
1146 ifence->gt = gt;
1147 ifence->vma = vma;
1148
1149 INIT_WORK(&ifence->work, invalidation_fence_work_func);
1150 ret = dma_fence_add_callback(fence, &ifence->cb, invalidation_fence_cb);
1151 if (ret == -ENOENT) {
1152 dma_fence_put(ifence->fence); /* Usually dropped in CB */
1153 invalidation_fence_work_func(&ifence->work);
1154 } else if (ret) {
1155 dma_fence_put(&ifence->base.base); /* Caller ref */
1156 dma_fence_put(&ifence->base.base); /* Creation ref */
1157 }
1158
1159 xe_gt_assert(gt, !ret || ret == -ENOENT);
1160
1161 return ret && ret != -ENOENT ? ret : 0;
1162 }
1163
xe_pt_calc_rfence_interval(struct xe_vma * vma,struct xe_pt_migrate_pt_update * update,struct xe_vm_pgtable_update * entries,u32 num_entries)1164 static void xe_pt_calc_rfence_interval(struct xe_vma *vma,
1165 struct xe_pt_migrate_pt_update *update,
1166 struct xe_vm_pgtable_update *entries,
1167 u32 num_entries)
1168 {
1169 int i, level = 0;
1170
1171 for (i = 0; i < num_entries; i++) {
1172 const struct xe_vm_pgtable_update *entry = &entries[i];
1173
1174 if (entry->pt->level > level)
1175 level = entry->pt->level;
1176 }
1177
1178 /* Greedy (non-optimal) calculation but simple */
1179 update->base.start = ALIGN_DOWN(xe_vma_start(vma),
1180 0x1ull << xe_pt_shift(level));
1181 update->base.last = ALIGN(xe_vma_end(vma),
1182 0x1ull << xe_pt_shift(level)) - 1;
1183 }
1184
1185 /**
1186 * __xe_pt_bind_vma() - Build and connect a page-table tree for the vma
1187 * address range.
1188 * @tile: The tile to bind for.
1189 * @vma: The vma to bind.
1190 * @q: The exec_queue with which to do pipelined page-table updates.
1191 * @syncs: Entries to sync on before binding the built tree to the live vm tree.
1192 * @num_syncs: Number of @sync entries.
1193 * @rebind: Whether we're rebinding this vma to the same address range without
1194 * an unbind in-between.
1195 *
1196 * This function builds a page-table tree (see xe_pt_stage_bind() for more
1197 * information on page-table building), and the xe_vm_pgtable_update entries
1198 * abstracting the operations needed to attach it to the main vm tree. It
1199 * then takes the relevant locks and updates the metadata side of the main
1200 * vm tree and submits the operations for pipelined attachment of the
1201 * gpu page-table to the vm main tree, (which can be done either by the
1202 * cpu and the GPU).
1203 *
1204 * Return: A valid dma-fence representing the pipelined attachment operation
1205 * on success, an error pointer on error.
1206 */
1207 struct dma_fence *
__xe_pt_bind_vma(struct xe_tile * tile,struct xe_vma * vma,struct xe_exec_queue * q,struct xe_sync_entry * syncs,u32 num_syncs,bool rebind)1208 __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
1209 struct xe_sync_entry *syncs, u32 num_syncs,
1210 bool rebind)
1211 {
1212 struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
1213 struct xe_pt_migrate_pt_update bind_pt_update = {
1214 .base = {
1215 .ops = xe_vma_is_userptr(vma) ? &userptr_bind_ops : &bind_ops,
1216 .vma = vma,
1217 .tile_id = tile->id,
1218 },
1219 .bind = true,
1220 };
1221 struct xe_vm *vm = xe_vma_vm(vma);
1222 u32 num_entries;
1223 struct dma_fence *fence;
1224 struct invalidation_fence *ifence = NULL;
1225 struct xe_range_fence *rfence;
1226 int err;
1227
1228 bind_pt_update.locked = false;
1229 xe_bo_assert_held(xe_vma_bo(vma));
1230 xe_vm_assert_held(vm);
1231
1232 vm_dbg(&xe_vma_vm(vma)->xe->drm,
1233 "Preparing bind, with range [%llx...%llx) engine %p.\n",
1234 xe_vma_start(vma), xe_vma_end(vma), q);
1235
1236 err = xe_pt_prepare_bind(tile, vma, entries, &num_entries);
1237 if (err)
1238 goto err;
1239
1240 err = dma_resv_reserve_fences(xe_vm_resv(vm), 1);
1241 if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
1242 err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1);
1243 if (err)
1244 goto err;
1245
1246 xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
1247
1248 xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
1249 xe_pt_calc_rfence_interval(vma, &bind_pt_update, entries,
1250 num_entries);
1251
1252 /*
1253 * If rebind, we have to invalidate TLB on !LR vms to invalidate
1254 * cached PTEs point to freed memory. on LR vms this is done
1255 * automatically when the context is re-enabled by the rebind worker,
1256 * or in fault mode it was invalidated on PTE zapping.
1257 *
1258 * If !rebind, and scratch enabled VMs, there is a chance the scratch
1259 * PTE is already cached in the TLB so it needs to be invalidated.
1260 * on !LR VMs this is done in the ring ops preceding a batch, but on
1261 * non-faulting LR, in particular on user-space batch buffer chaining,
1262 * it needs to be done here.
1263 */
1264 if ((!rebind && xe_vm_has_scratch(vm) && xe_vm_in_preempt_fence_mode(vm))) {
1265 ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
1266 if (!ifence)
1267 return ERR_PTR(-ENOMEM);
1268 } else if (rebind && !xe_vm_in_lr_mode(vm)) {
1269 /* We bump also if batch_invalidate_tlb is true */
1270 vm->tlb_flush_seqno++;
1271 }
1272
1273 rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
1274 if (!rfence) {
1275 kfree(ifence);
1276 return ERR_PTR(-ENOMEM);
1277 }
1278
1279 fence = xe_migrate_update_pgtables(tile->migrate,
1280 vm, xe_vma_bo(vma), q,
1281 entries, num_entries,
1282 syncs, num_syncs,
1283 &bind_pt_update.base);
1284 if (!IS_ERR(fence)) {
1285 bool last_munmap_rebind = vma->gpuva.flags & XE_VMA_LAST_REBIND;
1286 LLIST_HEAD(deferred);
1287 int err;
1288
1289 err = xe_range_fence_insert(&vm->rftree[tile->id], rfence,
1290 &xe_range_fence_kfree_ops,
1291 bind_pt_update.base.start,
1292 bind_pt_update.base.last, fence);
1293 if (err)
1294 dma_fence_wait(fence, false);
1295
1296 /* TLB invalidation must be done before signaling rebind */
1297 if (ifence) {
1298 int err = invalidation_fence_init(tile->primary_gt, ifence, fence,
1299 vma);
1300 if (err) {
1301 dma_fence_put(fence);
1302 kfree(ifence);
1303 return ERR_PTR(err);
1304 }
1305 fence = &ifence->base.base;
1306 }
1307
1308 /* add shared fence now for pagetable delayed destroy */
1309 dma_resv_add_fence(xe_vm_resv(vm), fence, rebind ||
1310 last_munmap_rebind ?
1311 DMA_RESV_USAGE_KERNEL :
1312 DMA_RESV_USAGE_BOOKKEEP);
1313
1314 if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
1315 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
1316 DMA_RESV_USAGE_BOOKKEEP);
1317 xe_pt_commit_bind(vma, entries, num_entries, rebind,
1318 bind_pt_update.locked ? &deferred : NULL);
1319
1320 /* This vma is live (again?) now */
1321 vma->tile_present |= BIT(tile->id);
1322
1323 if (bind_pt_update.locked) {
1324 to_userptr_vma(vma)->userptr.initial_bind = true;
1325 up_read(&vm->userptr.notifier_lock);
1326 xe_bo_put_commit(&deferred);
1327 }
1328 if (!rebind && last_munmap_rebind &&
1329 xe_vm_in_preempt_fence_mode(vm))
1330 xe_vm_queue_rebind_worker(vm);
1331 } else {
1332 kfree(rfence);
1333 kfree(ifence);
1334 if (bind_pt_update.locked)
1335 up_read(&vm->userptr.notifier_lock);
1336 xe_pt_abort_bind(vma, entries, num_entries);
1337 }
1338
1339 return fence;
1340
1341 err:
1342 return ERR_PTR(err);
1343 }
1344
1345 struct xe_pt_stage_unbind_walk {
1346 /** @base: The pagewalk base-class. */
1347 struct xe_pt_walk base;
1348
1349 /* Input parameters for the walk */
1350 /** @tile: The tile we're unbinding from. */
1351 struct xe_tile *tile;
1352
1353 /**
1354 * @modified_start: Walk range start, modified to include any
1355 * shared pagetables that we're the only user of and can thus
1356 * treat as private.
1357 */
1358 u64 modified_start;
1359 /** @modified_end: Walk range start, modified like @modified_start. */
1360 u64 modified_end;
1361
1362 /* Output */
1363 /* @wupd: Structure to track the page-table updates we're building */
1364 struct xe_walk_update wupd;
1365 };
1366
1367 /*
1368 * Check whether this range is the only one populating this pagetable,
1369 * and in that case, update the walk range checks so that higher levels don't
1370 * view us as a shared pagetable.
1371 */
xe_pt_check_kill(u64 addr,u64 next,unsigned int level,const struct xe_pt * child,enum page_walk_action * action,struct xe_pt_walk * walk)1372 static bool xe_pt_check_kill(u64 addr, u64 next, unsigned int level,
1373 const struct xe_pt *child,
1374 enum page_walk_action *action,
1375 struct xe_pt_walk *walk)
1376 {
1377 struct xe_pt_stage_unbind_walk *xe_walk =
1378 container_of(walk, typeof(*xe_walk), base);
1379 unsigned int shift = walk->shifts[level];
1380 u64 size = 1ull << shift;
1381
1382 if (IS_ALIGNED(addr, size) && IS_ALIGNED(next, size) &&
1383 ((next - addr) >> shift) == child->num_live) {
1384 u64 size = 1ull << walk->shifts[level + 1];
1385
1386 *action = ACTION_CONTINUE;
1387
1388 if (xe_walk->modified_start >= addr)
1389 xe_walk->modified_start = round_down(addr, size);
1390 if (xe_walk->modified_end <= next)
1391 xe_walk->modified_end = round_up(next, size);
1392
1393 return true;
1394 }
1395
1396 return false;
1397 }
1398
xe_pt_stage_unbind_entry(struct xe_ptw * parent,pgoff_t offset,unsigned int level,u64 addr,u64 next,struct xe_ptw ** child,enum page_walk_action * action,struct xe_pt_walk * walk)1399 static int xe_pt_stage_unbind_entry(struct xe_ptw *parent, pgoff_t offset,
1400 unsigned int level, u64 addr, u64 next,
1401 struct xe_ptw **child,
1402 enum page_walk_action *action,
1403 struct xe_pt_walk *walk)
1404 {
1405 struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
1406
1407 XE_WARN_ON(!*child);
1408 XE_WARN_ON(!level && xe_child->is_compact);
1409
1410 xe_pt_check_kill(addr, next, level - 1, xe_child, action, walk);
1411
1412 return 0;
1413 }
1414
1415 static int
xe_pt_stage_unbind_post_descend(struct xe_ptw * parent,pgoff_t offset,unsigned int level,u64 addr,u64 next,struct xe_ptw ** child,enum page_walk_action * action,struct xe_pt_walk * walk)1416 xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset,
1417 unsigned int level, u64 addr, u64 next,
1418 struct xe_ptw **child,
1419 enum page_walk_action *action,
1420 struct xe_pt_walk *walk)
1421 {
1422 struct xe_pt_stage_unbind_walk *xe_walk =
1423 container_of(walk, typeof(*xe_walk), base);
1424 struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
1425 pgoff_t end_offset;
1426 u64 size = 1ull << walk->shifts[--level];
1427
1428 if (!IS_ALIGNED(addr, size))
1429 addr = xe_walk->modified_start;
1430 if (!IS_ALIGNED(next, size))
1431 next = xe_walk->modified_end;
1432
1433 /* Parent == *child is the root pt. Don't kill it. */
1434 if (parent != *child &&
1435 xe_pt_check_kill(addr, next, level, xe_child, action, walk))
1436 return 0;
1437
1438 if (!xe_pt_nonshared_offsets(addr, next, level, walk, action, &offset,
1439 &end_offset))
1440 return 0;
1441
1442 (void)xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, false);
1443 xe_walk->wupd.updates[level].update->qwords = end_offset - offset;
1444
1445 return 0;
1446 }
1447
1448 static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = {
1449 .pt_entry = xe_pt_stage_unbind_entry,
1450 .pt_post_descend = xe_pt_stage_unbind_post_descend,
1451 };
1452
1453 /**
1454 * xe_pt_stage_unbind() - Build page-table update structures for an unbind
1455 * operation
1456 * @tile: The tile we're unbinding for.
1457 * @vma: The vma we're unbinding.
1458 * @entries: Caller-provided storage for the update structures.
1459 *
1460 * Builds page-table update structures for an unbind operation. The function
1461 * will attempt to remove all page-tables that we're the only user
1462 * of, and for that to work, the unbind operation must be committed in the
1463 * same critical section that blocks racing binds to the same page-table tree.
1464 *
1465 * Return: The number of entries used.
1466 */
xe_pt_stage_unbind(struct xe_tile * tile,struct xe_vma * vma,struct xe_vm_pgtable_update * entries)1467 static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma,
1468 struct xe_vm_pgtable_update *entries)
1469 {
1470 struct xe_pt_stage_unbind_walk xe_walk = {
1471 .base = {
1472 .ops = &xe_pt_stage_unbind_ops,
1473 .shifts = xe_normal_pt_shifts,
1474 .max_level = XE_PT_HIGHEST_LEVEL,
1475 },
1476 .tile = tile,
1477 .modified_start = xe_vma_start(vma),
1478 .modified_end = xe_vma_end(vma),
1479 .wupd.entries = entries,
1480 };
1481 struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
1482
1483 (void)xe_pt_walk_shared(&pt->base, pt->level, xe_vma_start(vma),
1484 xe_vma_end(vma), &xe_walk.base);
1485
1486 return xe_walk.wupd.num_used_entries;
1487 }
1488
1489 static void
xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update * pt_update,struct xe_tile * tile,struct iosys_map * map,void * ptr,u32 qword_ofs,u32 num_qwords,const struct xe_vm_pgtable_update * update)1490 xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update,
1491 struct xe_tile *tile, struct iosys_map *map,
1492 void *ptr, u32 qword_ofs, u32 num_qwords,
1493 const struct xe_vm_pgtable_update *update)
1494 {
1495 struct xe_vma *vma = pt_update->vma;
1496 u64 empty = __xe_pt_empty_pte(tile, xe_vma_vm(vma), update->pt->level);
1497 int i;
1498
1499 if (map && map->is_iomem)
1500 for (i = 0; i < num_qwords; ++i)
1501 xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) *
1502 sizeof(u64), u64, empty);
1503 else if (map)
1504 memset64(map->vaddr + qword_ofs * sizeof(u64), empty,
1505 num_qwords);
1506 else
1507 memset64(ptr, empty, num_qwords);
1508 }
1509
1510 static void
xe_pt_commit_unbind(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries,struct llist_head * deferred)1511 xe_pt_commit_unbind(struct xe_vma *vma,
1512 struct xe_vm_pgtable_update *entries, u32 num_entries,
1513 struct llist_head *deferred)
1514 {
1515 u32 j;
1516
1517 xe_pt_commit_locks_assert(vma);
1518
1519 for (j = 0; j < num_entries; ++j) {
1520 struct xe_vm_pgtable_update *entry = &entries[j];
1521 struct xe_pt *pt = entry->pt;
1522
1523 pt->num_live -= entry->qwords;
1524 if (pt->level) {
1525 struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
1526 u32 i;
1527
1528 for (i = entry->ofs; i < entry->ofs + entry->qwords;
1529 i++) {
1530 if (xe_pt_entry(pt_dir, i))
1531 xe_pt_destroy(xe_pt_entry(pt_dir, i),
1532 xe_vma_vm(vma)->flags, deferred);
1533
1534 pt_dir->children[i] = NULL;
1535 }
1536 }
1537 }
1538 }
1539
1540 static const struct xe_migrate_pt_update_ops unbind_ops = {
1541 .populate = xe_migrate_clear_pgtable_callback,
1542 .pre_commit = xe_pt_pre_commit,
1543 };
1544
1545 static const struct xe_migrate_pt_update_ops userptr_unbind_ops = {
1546 .populate = xe_migrate_clear_pgtable_callback,
1547 .pre_commit = xe_pt_userptr_pre_commit,
1548 };
1549
1550 /**
1551 * __xe_pt_unbind_vma() - Disconnect and free a page-table tree for the vma
1552 * address range.
1553 * @tile: The tile to unbind for.
1554 * @vma: The vma to unbind.
1555 * @q: The exec_queue with which to do pipelined page-table updates.
1556 * @syncs: Entries to sync on before disconnecting the tree to be destroyed.
1557 * @num_syncs: Number of @sync entries.
1558 *
1559 * This function builds a the xe_vm_pgtable_update entries abstracting the
1560 * operations needed to detach the page-table tree to be destroyed from the
1561 * man vm tree.
1562 * It then takes the relevant locks and submits the operations for
1563 * pipelined detachment of the gpu page-table from the vm main tree,
1564 * (which can be done either by the cpu and the GPU), Finally it frees the
1565 * detached page-table tree.
1566 *
1567 * Return: A valid dma-fence representing the pipelined detachment operation
1568 * on success, an error pointer on error.
1569 */
1570 struct dma_fence *
__xe_pt_unbind_vma(struct xe_tile * tile,struct xe_vma * vma,struct xe_exec_queue * q,struct xe_sync_entry * syncs,u32 num_syncs)1571 __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
1572 struct xe_sync_entry *syncs, u32 num_syncs)
1573 {
1574 struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
1575 struct xe_pt_migrate_pt_update unbind_pt_update = {
1576 .base = {
1577 .ops = xe_vma_is_userptr(vma) ? &userptr_unbind_ops :
1578 &unbind_ops,
1579 .vma = vma,
1580 .tile_id = tile->id,
1581 },
1582 };
1583 struct xe_vm *vm = xe_vma_vm(vma);
1584 u32 num_entries;
1585 struct dma_fence *fence = NULL;
1586 struct invalidation_fence *ifence;
1587 struct xe_range_fence *rfence;
1588 int err;
1589
1590 LLIST_HEAD(deferred);
1591
1592 xe_bo_assert_held(xe_vma_bo(vma));
1593 xe_vm_assert_held(vm);
1594
1595 vm_dbg(&xe_vma_vm(vma)->xe->drm,
1596 "Preparing unbind, with range [%llx...%llx) engine %p.\n",
1597 xe_vma_start(vma), xe_vma_end(vma), q);
1598
1599 num_entries = xe_pt_stage_unbind(tile, vma, entries);
1600 xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
1601
1602 xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
1603 xe_pt_calc_rfence_interval(vma, &unbind_pt_update, entries,
1604 num_entries);
1605
1606 err = dma_resv_reserve_fences(xe_vm_resv(vm), 1);
1607 if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
1608 err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1);
1609 if (err)
1610 return ERR_PTR(err);
1611
1612 ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
1613 if (!ifence)
1614 return ERR_PTR(-ENOMEM);
1615
1616 rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
1617 if (!rfence) {
1618 kfree(ifence);
1619 return ERR_PTR(-ENOMEM);
1620 }
1621
1622 /*
1623 * Even if we were already evicted and unbind to destroy, we need to
1624 * clear again here. The eviction may have updated pagetables at a
1625 * lower level, because it needs to be more conservative.
1626 */
1627 fence = xe_migrate_update_pgtables(tile->migrate,
1628 vm, NULL, q ? q :
1629 vm->q[tile->id],
1630 entries, num_entries,
1631 syncs, num_syncs,
1632 &unbind_pt_update.base);
1633 if (!IS_ERR(fence)) {
1634 int err;
1635
1636 err = xe_range_fence_insert(&vm->rftree[tile->id], rfence,
1637 &xe_range_fence_kfree_ops,
1638 unbind_pt_update.base.start,
1639 unbind_pt_update.base.last, fence);
1640 if (err)
1641 dma_fence_wait(fence, false);
1642
1643 /* TLB invalidation must be done before signaling unbind */
1644 err = invalidation_fence_init(tile->primary_gt, ifence, fence, vma);
1645 if (err) {
1646 dma_fence_put(fence);
1647 kfree(ifence);
1648 return ERR_PTR(err);
1649 }
1650 fence = &ifence->base.base;
1651
1652 /* add shared fence now for pagetable delayed destroy */
1653 dma_resv_add_fence(xe_vm_resv(vm), fence,
1654 DMA_RESV_USAGE_BOOKKEEP);
1655
1656 /* This fence will be installed by caller when doing eviction */
1657 if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
1658 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
1659 DMA_RESV_USAGE_BOOKKEEP);
1660 xe_pt_commit_unbind(vma, entries, num_entries,
1661 unbind_pt_update.locked ? &deferred : NULL);
1662 vma->tile_present &= ~BIT(tile->id);
1663 } else {
1664 kfree(rfence);
1665 kfree(ifence);
1666 }
1667
1668 if (!vma->tile_present)
1669 list_del_init(&vma->combined_links.rebind);
1670
1671 if (unbind_pt_update.locked) {
1672 xe_tile_assert(tile, xe_vma_is_userptr(vma));
1673
1674 if (!vma->tile_present) {
1675 spin_lock(&vm->userptr.invalidated_lock);
1676 list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
1677 spin_unlock(&vm->userptr.invalidated_lock);
1678 }
1679 up_read(&vm->userptr.notifier_lock);
1680 xe_bo_put_commit(&deferred);
1681 }
1682
1683 return fence;
1684 }
1685