1fb4d8502Sjsg /*
2fb4d8502Sjsg * Copyright 2016 Advanced Micro Devices, Inc.
3fb4d8502Sjsg *
4fb4d8502Sjsg * Permission is hereby granted, free of charge, to any person obtaining a
5fb4d8502Sjsg * copy of this software and associated documentation files (the "Software"),
6fb4d8502Sjsg * to deal in the Software without restriction, including without limitation
7fb4d8502Sjsg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8fb4d8502Sjsg * and/or sell copies of the Software, and to permit persons to whom the
9fb4d8502Sjsg * Software is furnished to do so, subject to the following conditions:
10fb4d8502Sjsg *
11fb4d8502Sjsg * The above copyright notice and this permission notice shall be included in
12fb4d8502Sjsg * all copies or substantial portions of the Software.
13fb4d8502Sjsg *
14fb4d8502Sjsg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15fb4d8502Sjsg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16fb4d8502Sjsg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17fb4d8502Sjsg * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18fb4d8502Sjsg * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19fb4d8502Sjsg * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20fb4d8502Sjsg * OTHER DEALINGS IN THE SOFTWARE.
21fb4d8502Sjsg *
22fb4d8502Sjsg * Authors: Christian König
23fb4d8502Sjsg */
24fb4d8502Sjsg #ifndef __AMDGPU_VM_H__
25fb4d8502Sjsg #define __AMDGPU_VM_H__
26fb4d8502Sjsg
27fb4d8502Sjsg #include <linux/idr.h>
28fb4d8502Sjsg #include <linux/kfifo.h>
29fb4d8502Sjsg #include <linux/rbtree.h>
30fb4d8502Sjsg #include <drm/gpu_scheduler.h>
31fb4d8502Sjsg #include <drm/drm_file.h>
32*f005ef32Sjsg #include <drm/ttm/ttm_bo.h>
33c349dbc7Sjsg #include <linux/sched/mm.h>
34fb4d8502Sjsg
35fb4d8502Sjsg #include "amdgpu_sync.h"
36fb4d8502Sjsg #include "amdgpu_ring.h"
37fb4d8502Sjsg #include "amdgpu_ids.h"
38fb4d8502Sjsg
39*f005ef32Sjsg struct drm_exec;
40*f005ef32Sjsg
41fb4d8502Sjsg struct amdgpu_bo_va;
42fb4d8502Sjsg struct amdgpu_job;
43fb4d8502Sjsg struct amdgpu_bo_list_entry;
445ca02815Sjsg struct amdgpu_bo_vm;
45*f005ef32Sjsg struct amdgpu_mem_stats;
46fb4d8502Sjsg
47fb4d8502Sjsg /*
48fb4d8502Sjsg * GPUVM handling
49fb4d8502Sjsg */
50fb4d8502Sjsg
51fb4d8502Sjsg /* Maximum number of PTEs the hardware can write with one command */
52fb4d8502Sjsg #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
53fb4d8502Sjsg
54fb4d8502Sjsg /* number of entries in page table */
55fb4d8502Sjsg #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
56fb4d8502Sjsg
57fb4d8502Sjsg #define AMDGPU_PTE_VALID (1ULL << 0)
58fb4d8502Sjsg #define AMDGPU_PTE_SYSTEM (1ULL << 1)
59fb4d8502Sjsg #define AMDGPU_PTE_SNOOPED (1ULL << 2)
60fb4d8502Sjsg
61ad8b1aafSjsg /* RV+ */
62ad8b1aafSjsg #define AMDGPU_PTE_TMZ (1ULL << 3)
63ad8b1aafSjsg
64fb4d8502Sjsg /* VI only */
65fb4d8502Sjsg #define AMDGPU_PTE_EXECUTABLE (1ULL << 4)
66fb4d8502Sjsg
67fb4d8502Sjsg #define AMDGPU_PTE_READABLE (1ULL << 5)
68fb4d8502Sjsg #define AMDGPU_PTE_WRITEABLE (1ULL << 6)
69fb4d8502Sjsg
70fb4d8502Sjsg #define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7)
71fb4d8502Sjsg
72fb4d8502Sjsg /* TILED for VEGA10, reserved for older ASICs */
73fb4d8502Sjsg #define AMDGPU_PTE_PRT (1ULL << 51)
74fb4d8502Sjsg
75fb4d8502Sjsg /* PDE is handled as PTE for VEGA10 */
76fb4d8502Sjsg #define AMDGPU_PDE_PTE (1ULL << 54)
77fb4d8502Sjsg
78c349dbc7Sjsg #define AMDGPU_PTE_LOG (1ULL << 55)
79c349dbc7Sjsg
80fb4d8502Sjsg /* PTE is handled as PDE for VEGA10 (Translate Further) */
81fb4d8502Sjsg #define AMDGPU_PTE_TF (1ULL << 56)
82fb4d8502Sjsg
835ca02815Sjsg /* MALL noalloc for sienna_cichlid, reserved for older ASICs */
845ca02815Sjsg #define AMDGPU_PTE_NOALLOC (1ULL << 58)
855ca02815Sjsg
86fb4d8502Sjsg /* PDE Block Fragment Size for VEGA10 */
87fb4d8502Sjsg #define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59)
88fb4d8502Sjsg
89*f005ef32Sjsg /* Flag combination to set no-retry with TF disabled */
90*f005ef32Sjsg #define AMDGPU_VM_NORETRY_FLAGS (AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | \
91*f005ef32Sjsg AMDGPU_PTE_TF)
92fb4d8502Sjsg
93*f005ef32Sjsg /* Flag combination to set no-retry with TF enabled */
94*f005ef32Sjsg #define AMDGPU_VM_NORETRY_FLAGS_TF (AMDGPU_PTE_VALID | AMDGPU_PTE_SYSTEM | \
95*f005ef32Sjsg AMDGPU_PTE_PRT)
96fb4d8502Sjsg /* For GFX9 */
97c349dbc7Sjsg #define AMDGPU_PTE_MTYPE_VG10(a) ((uint64_t)(a) << 57)
98c349dbc7Sjsg #define AMDGPU_PTE_MTYPE_VG10_MASK AMDGPU_PTE_MTYPE_VG10(3ULL)
99fb4d8502Sjsg
100fb4d8502Sjsg #define AMDGPU_MTYPE_NC 0
101fb4d8502Sjsg #define AMDGPU_MTYPE_CC 2
102fb4d8502Sjsg
103fb4d8502Sjsg #define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
104fb4d8502Sjsg | AMDGPU_PTE_SNOOPED \
105fb4d8502Sjsg | AMDGPU_PTE_EXECUTABLE \
106fb4d8502Sjsg | AMDGPU_PTE_READABLE \
107fb4d8502Sjsg | AMDGPU_PTE_WRITEABLE \
108c349dbc7Sjsg | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC))
109c349dbc7Sjsg
110c349dbc7Sjsg /* gfx10 */
111c349dbc7Sjsg #define AMDGPU_PTE_MTYPE_NV10(a) ((uint64_t)(a) << 48)
112c349dbc7Sjsg #define AMDGPU_PTE_MTYPE_NV10_MASK AMDGPU_PTE_MTYPE_NV10(7ULL)
113fb4d8502Sjsg
114ad8b1aafSjsg /* How to program VM fault handling */
115fb4d8502Sjsg #define AMDGPU_VM_FAULT_STOP_NEVER 0
116fb4d8502Sjsg #define AMDGPU_VM_FAULT_STOP_FIRST 1
117fb4d8502Sjsg #define AMDGPU_VM_FAULT_STOP_ALWAYS 2
118fb4d8502Sjsg
119c349dbc7Sjsg /* Reserve 4MB VRAM for page tables */
1205ca02815Sjsg #define AMDGPU_VM_RESERVED_VRAM (8ULL << 20)
121c349dbc7Sjsg
122*f005ef32Sjsg /*
123*f005ef32Sjsg * max number of VMHUB
124*f005ef32Sjsg * layout: max 8 GFXHUB + 4 MMHUB0 + 1 MMHUB1
125*f005ef32Sjsg */
126*f005ef32Sjsg #define AMDGPU_MAX_VMHUBS 13
127*f005ef32Sjsg #define AMDGPU_GFXHUB(x) (x)
128*f005ef32Sjsg #define AMDGPU_MMHUB0(x) (8 + x)
129*f005ef32Sjsg #define AMDGPU_MMHUB1(x) (8 + 4 + x)
130fb4d8502Sjsg
131ad8b1aafSjsg /* Reserve 2MB at top/bottom of address space for kernel use */
132ad8b1aafSjsg #define AMDGPU_VA_RESERVED_SIZE (2ULL << 20)
133fb4d8502Sjsg
134fb4d8502Sjsg /* See vm_update_mode */
135fb4d8502Sjsg #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
136fb4d8502Sjsg #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
137fb4d8502Sjsg
138fb4d8502Sjsg /* VMPT level enumerate, and the hiberachy is:
139fb4d8502Sjsg * PDB2->PDB1->PDB0->PTB
140fb4d8502Sjsg */
141fb4d8502Sjsg enum amdgpu_vm_level {
142fb4d8502Sjsg AMDGPU_VM_PDB2,
143fb4d8502Sjsg AMDGPU_VM_PDB1,
144fb4d8502Sjsg AMDGPU_VM_PDB0,
145fb4d8502Sjsg AMDGPU_VM_PTB
146fb4d8502Sjsg };
147fb4d8502Sjsg
148fb4d8502Sjsg /* base structure for tracking BO usage in a VM */
149fb4d8502Sjsg struct amdgpu_vm_bo_base {
150fb4d8502Sjsg /* constant after initialization */
151fb4d8502Sjsg struct amdgpu_vm *vm;
152fb4d8502Sjsg struct amdgpu_bo *bo;
153fb4d8502Sjsg
154fb4d8502Sjsg /* protected by bo being reserved */
155c349dbc7Sjsg struct amdgpu_vm_bo_base *next;
156fb4d8502Sjsg
157fb4d8502Sjsg /* protected by spinlock */
158fb4d8502Sjsg struct list_head vm_status;
159fb4d8502Sjsg
160fb4d8502Sjsg /* protected by the BO being reserved */
161fb4d8502Sjsg bool moved;
162fb4d8502Sjsg };
163fb4d8502Sjsg
164c349dbc7Sjsg /* provided by hw blocks that can write ptes, e.g., sdma */
165c349dbc7Sjsg struct amdgpu_vm_pte_funcs {
166c349dbc7Sjsg /* number of dw to reserve per operation */
167c349dbc7Sjsg unsigned copy_pte_num_dw;
168fb4d8502Sjsg
169c349dbc7Sjsg /* copy pte entries from GART */
170c349dbc7Sjsg void (*copy_pte)(struct amdgpu_ib *ib,
171c349dbc7Sjsg uint64_t pe, uint64_t src,
172c349dbc7Sjsg unsigned count);
173c349dbc7Sjsg
174c349dbc7Sjsg /* write pte one entry at a time with addr mapping */
175c349dbc7Sjsg void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
176c349dbc7Sjsg uint64_t value, unsigned count,
177c349dbc7Sjsg uint32_t incr);
178c349dbc7Sjsg /* for linear pte/pde updates without addr mapping */
179c349dbc7Sjsg void (*set_pte_pde)(struct amdgpu_ib *ib,
180c349dbc7Sjsg uint64_t pe,
181c349dbc7Sjsg uint64_t addr, unsigned count,
182c349dbc7Sjsg uint32_t incr, uint64_t flags);
183c349dbc7Sjsg };
184fb4d8502Sjsg
185fb4d8502Sjsg struct amdgpu_task_info {
186fb4d8502Sjsg char process_name[TASK_COMM_LEN];
187fb4d8502Sjsg char task_name[TASK_COMM_LEN];
188fb4d8502Sjsg pid_t pid;
189fb4d8502Sjsg pid_t tgid;
190fb4d8502Sjsg };
191fb4d8502Sjsg
192c349dbc7Sjsg /**
193c349dbc7Sjsg * struct amdgpu_vm_update_params
194c349dbc7Sjsg *
195c349dbc7Sjsg * Encapsulate some VM table update parameters to reduce
196c349dbc7Sjsg * the number of function parameters
197c349dbc7Sjsg *
198c349dbc7Sjsg */
199c349dbc7Sjsg struct amdgpu_vm_update_params {
200c349dbc7Sjsg
201c349dbc7Sjsg /**
202c349dbc7Sjsg * @adev: amdgpu device we do this update for
203c349dbc7Sjsg */
204c349dbc7Sjsg struct amdgpu_device *adev;
205c349dbc7Sjsg
206c349dbc7Sjsg /**
207c349dbc7Sjsg * @vm: optional amdgpu_vm we do this update for
208c349dbc7Sjsg */
209c349dbc7Sjsg struct amdgpu_vm *vm;
210c349dbc7Sjsg
211c349dbc7Sjsg /**
212ad8b1aafSjsg * @immediate: if changes should be made immediately
213c349dbc7Sjsg */
214ad8b1aafSjsg bool immediate;
215ad8b1aafSjsg
216ad8b1aafSjsg /**
217ad8b1aafSjsg * @unlocked: true if the root BO is not locked
218ad8b1aafSjsg */
219ad8b1aafSjsg bool unlocked;
220c349dbc7Sjsg
221c349dbc7Sjsg /**
222c349dbc7Sjsg * @pages_addr:
223c349dbc7Sjsg *
224c349dbc7Sjsg * DMA addresses to use for mapping
225c349dbc7Sjsg */
226c349dbc7Sjsg dma_addr_t *pages_addr;
227c349dbc7Sjsg
228c349dbc7Sjsg /**
229c349dbc7Sjsg * @job: job to used for hw submission
230c349dbc7Sjsg */
231c349dbc7Sjsg struct amdgpu_job *job;
232c349dbc7Sjsg
233c349dbc7Sjsg /**
234c349dbc7Sjsg * @num_dw_left: number of dw left for the IB
235c349dbc7Sjsg */
236c349dbc7Sjsg unsigned int num_dw_left;
2375ca02815Sjsg
2385ca02815Sjsg /**
2395ca02815Sjsg * @table_freed: return true if page table is freed when updating
2405ca02815Sjsg */
2415ca02815Sjsg bool table_freed;
242c349dbc7Sjsg };
243c349dbc7Sjsg
244c349dbc7Sjsg struct amdgpu_vm_update_funcs {
2455ca02815Sjsg int (*map_table)(struct amdgpu_bo_vm *bo);
246c349dbc7Sjsg int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
247c349dbc7Sjsg enum amdgpu_sync_mode sync_mode);
248c349dbc7Sjsg int (*update)(struct amdgpu_vm_update_params *p,
2495ca02815Sjsg struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
250c349dbc7Sjsg unsigned count, uint32_t incr, uint64_t flags);
251c349dbc7Sjsg int (*commit)(struct amdgpu_vm_update_params *p,
252c349dbc7Sjsg struct dma_fence **fence);
253c349dbc7Sjsg };
254c349dbc7Sjsg
255fb4d8502Sjsg struct amdgpu_vm_fault {
256fb4d8502Sjsg SIMPLEQ_ENTRY(amdgpu_vm_fault) vm_fault_entry;
257fb4d8502Sjsg uint64_t val;
258fb4d8502Sjsg };
259fb4d8502Sjsg SIMPLEQ_HEAD(amdgpu_vm_faults, amdgpu_vm_fault);
260fb4d8502Sjsg
261fb4d8502Sjsg struct amdgpu_vm {
262fb4d8502Sjsg /* tree of virtual addresses mapped */
263fb4d8502Sjsg struct rb_root_cached va;
264fb4d8502Sjsg
265c349dbc7Sjsg /* Lock to prevent eviction while we are updating page tables
266c349dbc7Sjsg * use vm_eviction_lock/unlock(vm)
267c349dbc7Sjsg */
268c349dbc7Sjsg struct rwlock eviction_lock;
269c349dbc7Sjsg bool evicting;
270c349dbc7Sjsg unsigned int saved_flags;
271c349dbc7Sjsg
2721bb76ff1Sjsg /* Lock to protect vm_bo add/del/move on all lists of vm */
2731bb76ff1Sjsg spinlock_t status_lock;
2741bb76ff1Sjsg
275fb4d8502Sjsg /* BOs who needs a validation */
276fb4d8502Sjsg struct list_head evicted;
277fb4d8502Sjsg
278fb4d8502Sjsg /* PT BOs which relocated and their parent need an update */
279fb4d8502Sjsg struct list_head relocated;
280fb4d8502Sjsg
281c349dbc7Sjsg /* per VM BOs moved, but not yet updated in the PT */
282fb4d8502Sjsg struct list_head moved;
283fb4d8502Sjsg
284fb4d8502Sjsg /* All BOs of this VM not currently in the state machine */
285fb4d8502Sjsg struct list_head idle;
286fb4d8502Sjsg
287c349dbc7Sjsg /* regular invalidated BOs, but not yet updated in the PT */
288c349dbc7Sjsg struct list_head invalidated;
289c349dbc7Sjsg
290fb4d8502Sjsg /* BO mappings freed, but not yet updated in the PT */
291fb4d8502Sjsg struct list_head freed;
292fb4d8502Sjsg
2935ca02815Sjsg /* BOs which are invalidated, has been updated in the PTs */
2945ca02815Sjsg struct list_head done;
2955ca02815Sjsg
2961bb76ff1Sjsg /* PT BOs scheduled to free and fill with zero if vm_resv is not hold */
2971bb76ff1Sjsg struct list_head pt_freed;
2981bb76ff1Sjsg struct work_struct pt_free_work;
2991bb76ff1Sjsg
300fb4d8502Sjsg /* contains the page directory */
3015ca02815Sjsg struct amdgpu_vm_bo_base root;
302fb4d8502Sjsg struct dma_fence *last_update;
303fb4d8502Sjsg
304c349dbc7Sjsg /* Scheduler entities for page table updates */
305ad8b1aafSjsg struct drm_sched_entity immediate;
306c349dbc7Sjsg struct drm_sched_entity delayed;
307c349dbc7Sjsg
3081bb76ff1Sjsg /* Last finished delayed update */
3091bb76ff1Sjsg atomic64_t tlb_seq;
3101bb76ff1Sjsg struct dma_fence *last_tlb_flush;
3111bb76ff1Sjsg
312*f005ef32Sjsg /* How many times we had to re-generate the page tables */
313*f005ef32Sjsg uint64_t generation;
314*f005ef32Sjsg
315ad8b1aafSjsg /* Last unlocked submission to the scheduler entities */
316ad8b1aafSjsg struct dma_fence *last_unlocked;
317fb4d8502Sjsg
318fb4d8502Sjsg unsigned int pasid;
319*f005ef32Sjsg bool reserved_vmid[AMDGPU_MAX_VMHUBS];
320fb4d8502Sjsg
321fb4d8502Sjsg /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
322fb4d8502Sjsg bool use_cpu_for_update;
323fb4d8502Sjsg
324c349dbc7Sjsg /* Functions to use for VM table updates */
325c349dbc7Sjsg const struct amdgpu_vm_update_funcs *update_funcs;
326c349dbc7Sjsg
327fb4d8502Sjsg /* Flag to indicate ATS support from PTE for GFX9 */
328fb4d8502Sjsg bool pte_support_ats;
329fb4d8502Sjsg
330fb4d8502Sjsg #ifdef __linux__
331fb4d8502Sjsg /* Up to 128 pending retry page faults */
332fb4d8502Sjsg DECLARE_KFIFO(faults, u64, 128);
333fb4d8502Sjsg #else
334fb4d8502Sjsg struct amdgpu_vm_faults faults;
335fb4d8502Sjsg #endif
336fb4d8502Sjsg
337fb4d8502Sjsg /* Points to the KFD process VM info */
338fb4d8502Sjsg struct amdkfd_process_info *process_info;
339fb4d8502Sjsg
340fb4d8502Sjsg /* List node in amdkfd_process_info.vm_list_head */
341fb4d8502Sjsg struct list_head vm_list_node;
342fb4d8502Sjsg
343fb4d8502Sjsg /* Valid while the PD is reserved or fenced */
344fb4d8502Sjsg uint64_t pd_phys_addr;
345fb4d8502Sjsg
346fb4d8502Sjsg /* Some basic info about the task */
347fb4d8502Sjsg struct amdgpu_task_info task_info;
348c349dbc7Sjsg
349c349dbc7Sjsg /* Store positions of group of BOs */
350c349dbc7Sjsg struct ttm_lru_bulk_move lru_bulk_move;
351c349dbc7Sjsg /* Flag to indicate if VM is used for compute */
352c349dbc7Sjsg bool is_compute_context;
353*f005ef32Sjsg
354*f005ef32Sjsg /* Memory partition number, -1 means any partition */
355*f005ef32Sjsg int8_t mem_id;
356fb4d8502Sjsg };
357fb4d8502Sjsg
358fb4d8502Sjsg struct amdgpu_vm_manager {
359fb4d8502Sjsg /* Handling of VMIDs */
360fb4d8502Sjsg struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS];
361ad8b1aafSjsg unsigned int first_kfd_vmid;
362ad8b1aafSjsg bool concurrent_flush;
363fb4d8502Sjsg
364fb4d8502Sjsg /* Handling of VM fences */
365fb4d8502Sjsg u64 fence_context;
366fb4d8502Sjsg unsigned seqno[AMDGPU_MAX_RINGS];
367fb4d8502Sjsg
368fb4d8502Sjsg uint64_t max_pfn;
369fb4d8502Sjsg uint32_t num_level;
370fb4d8502Sjsg uint32_t block_size;
371fb4d8502Sjsg uint32_t fragment_size;
372fb4d8502Sjsg enum amdgpu_vm_level root_level;
373fb4d8502Sjsg /* vram base address for page table entry */
374fb4d8502Sjsg u64 vram_base_offset;
375fb4d8502Sjsg /* vm pte handling */
376fb4d8502Sjsg const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
377c349dbc7Sjsg struct drm_gpu_scheduler *vm_pte_scheds[AMDGPU_MAX_RINGS];
378c349dbc7Sjsg unsigned vm_pte_num_scheds;
379c349dbc7Sjsg struct amdgpu_ring *page_fault;
380fb4d8502Sjsg
381fb4d8502Sjsg /* partial resident texture handling */
382fb4d8502Sjsg spinlock_t prt_lock;
383fb4d8502Sjsg atomic_t num_prt_users;
384fb4d8502Sjsg
385fb4d8502Sjsg /* controls how VM page tables are updated for Graphics and Compute.
386fb4d8502Sjsg * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
387fb4d8502Sjsg * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
388fb4d8502Sjsg */
389fb4d8502Sjsg int vm_update_mode;
390fb4d8502Sjsg
391fb4d8502Sjsg /* PASID to VM mapping, will be used in interrupt context to
392fb4d8502Sjsg * look up VM of a page fault
393fb4d8502Sjsg */
3945ca02815Sjsg struct xarray pasids;
395fb4d8502Sjsg };
396fb4d8502Sjsg
3975ca02815Sjsg struct amdgpu_bo_va_mapping;
3985ca02815Sjsg
399c349dbc7Sjsg #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
400c349dbc7Sjsg #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
401c349dbc7Sjsg #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
402c349dbc7Sjsg
403c349dbc7Sjsg extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
404c349dbc7Sjsg extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
405c349dbc7Sjsg
406fb4d8502Sjsg void amdgpu_vm_manager_init(struct amdgpu_device *adev);
407fb4d8502Sjsg void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
408c349dbc7Sjsg
4095ca02815Sjsg int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
4105ca02815Sjsg u32 pasid);
4115ca02815Sjsg
412c349dbc7Sjsg long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
413*f005ef32Sjsg int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
4145ca02815Sjsg int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
415c349dbc7Sjsg void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
416fb4d8502Sjsg void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
417*f005ef32Sjsg int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
418*f005ef32Sjsg unsigned int num_fences);
419fb4d8502Sjsg bool amdgpu_vm_ready(struct amdgpu_vm *vm);
420*f005ef32Sjsg uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
421fb4d8502Sjsg int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
422fb4d8502Sjsg int (*callback)(void *p, struct amdgpu_bo *bo),
423fb4d8502Sjsg void *param);
424fb4d8502Sjsg int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
425c349dbc7Sjsg int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
426ad8b1aafSjsg struct amdgpu_vm *vm, bool immediate);
427fb4d8502Sjsg int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
428fb4d8502Sjsg struct amdgpu_vm *vm,
429fb4d8502Sjsg struct dma_fence **fence);
430fb4d8502Sjsg int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
431fb4d8502Sjsg struct amdgpu_vm *vm);
4321bb76ff1Sjsg void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
4331bb76ff1Sjsg struct amdgpu_vm *vm, struct amdgpu_bo *bo);
4341bb76ff1Sjsg int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
4351bb76ff1Sjsg bool immediate, bool unlocked, bool flush_tlb,
4361bb76ff1Sjsg struct dma_resv *resv, uint64_t start, uint64_t last,
4371bb76ff1Sjsg uint64_t flags, uint64_t offset, uint64_t vram_base,
4381bb76ff1Sjsg struct ttm_resource *res, dma_addr_t *pages_addr,
4391bb76ff1Sjsg struct dma_fence **fence);
440fb4d8502Sjsg int amdgpu_vm_bo_update(struct amdgpu_device *adev,
441fb4d8502Sjsg struct amdgpu_bo_va *bo_va,
4421bb76ff1Sjsg bool clear);
443c349dbc7Sjsg bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
444fb4d8502Sjsg void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
445fb4d8502Sjsg struct amdgpu_bo *bo, bool evicted);
446c349dbc7Sjsg uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
447fb4d8502Sjsg struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
448fb4d8502Sjsg struct amdgpu_bo *bo);
449fb4d8502Sjsg struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
450fb4d8502Sjsg struct amdgpu_vm *vm,
451fb4d8502Sjsg struct amdgpu_bo *bo);
452fb4d8502Sjsg int amdgpu_vm_bo_map(struct amdgpu_device *adev,
453fb4d8502Sjsg struct amdgpu_bo_va *bo_va,
454fb4d8502Sjsg uint64_t addr, uint64_t offset,
455fb4d8502Sjsg uint64_t size, uint64_t flags);
456fb4d8502Sjsg int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
457fb4d8502Sjsg struct amdgpu_bo_va *bo_va,
458fb4d8502Sjsg uint64_t addr, uint64_t offset,
459fb4d8502Sjsg uint64_t size, uint64_t flags);
460fb4d8502Sjsg int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
461fb4d8502Sjsg struct amdgpu_bo_va *bo_va,
462fb4d8502Sjsg uint64_t addr);
463fb4d8502Sjsg int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
464fb4d8502Sjsg struct amdgpu_vm *vm,
465fb4d8502Sjsg uint64_t saddr, uint64_t size);
466fb4d8502Sjsg struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
467fb4d8502Sjsg uint64_t addr);
468fb4d8502Sjsg void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
4691bb76ff1Sjsg void amdgpu_vm_bo_del(struct amdgpu_device *adev,
470fb4d8502Sjsg struct amdgpu_bo_va *bo_va);
471fb4d8502Sjsg void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
472fb4d8502Sjsg uint32_t fragment_size_default, unsigned max_level,
473fb4d8502Sjsg unsigned max_bits);
474fb4d8502Sjsg int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
475fb4d8502Sjsg bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
476fb4d8502Sjsg struct amdgpu_job *job);
477fb4d8502Sjsg void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
478fb4d8502Sjsg
479ad8b1aafSjsg void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
480fb4d8502Sjsg struct amdgpu_task_info *task_info);
481ad8b1aafSjsg bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
482*f005ef32Sjsg u32 vmid, u32 node_id, uint64_t addr,
483*f005ef32Sjsg bool write_fault);
484fb4d8502Sjsg
485fb4d8502Sjsg void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
486fb4d8502Sjsg
487c349dbc7Sjsg void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
488c349dbc7Sjsg struct amdgpu_vm *vm);
489*f005ef32Sjsg void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
490*f005ef32Sjsg struct amdgpu_mem_stats *stats);
4915ca02815Sjsg
4921bb76ff1Sjsg int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
4931bb76ff1Sjsg struct amdgpu_bo_vm *vmbo, bool immediate);
4941bb76ff1Sjsg int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
495*f005ef32Sjsg int level, bool immediate, struct amdgpu_bo_vm **vmbo,
496*f005ef32Sjsg int32_t xcp_id);
4971bb76ff1Sjsg void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
4981bb76ff1Sjsg bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev,
4991bb76ff1Sjsg struct amdgpu_vm *vm);
5001bb76ff1Sjsg
5011bb76ff1Sjsg int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
5021bb76ff1Sjsg struct amdgpu_vm_bo_base *entry);
5031bb76ff1Sjsg int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
5041bb76ff1Sjsg uint64_t start, uint64_t end,
5051bb76ff1Sjsg uint64_t dst, uint64_t flags);
5061bb76ff1Sjsg void amdgpu_vm_pt_free_work(struct work_struct *work);
5071bb76ff1Sjsg
5085ca02815Sjsg #if defined(CONFIG_DEBUG_FS)
5095ca02815Sjsg void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
5105ca02815Sjsg #endif
511c349dbc7Sjsg
512*f005ef32Sjsg int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm);
513*f005ef32Sjsg
5141bb76ff1Sjsg /**
5151bb76ff1Sjsg * amdgpu_vm_tlb_seq - return tlb flush sequence number
5161bb76ff1Sjsg * @vm: the amdgpu_vm structure to query
5171bb76ff1Sjsg *
5181bb76ff1Sjsg * Returns the tlb flush sequence number which indicates that the VM TLBs needs
5191bb76ff1Sjsg * to be invalidated whenever the sequence number change.
5201bb76ff1Sjsg */
amdgpu_vm_tlb_seq(struct amdgpu_vm * vm)5211bb76ff1Sjsg static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
5221bb76ff1Sjsg {
5231bb76ff1Sjsg unsigned long flags;
5241bb76ff1Sjsg spinlock_t *lock;
5251bb76ff1Sjsg
5261bb76ff1Sjsg /*
5271bb76ff1Sjsg * Workaround to stop racing between the fence signaling and handling
5281bb76ff1Sjsg * the cb. The lock is static after initially setting it up, just make
5291bb76ff1Sjsg * sure that the dma_fence structure isn't freed up.
5301bb76ff1Sjsg */
5311bb76ff1Sjsg rcu_read_lock();
5321bb76ff1Sjsg lock = vm->last_tlb_flush->lock;
5331bb76ff1Sjsg rcu_read_unlock();
5341bb76ff1Sjsg
5351bb76ff1Sjsg spin_lock_irqsave(lock, flags);
5361bb76ff1Sjsg spin_unlock_irqrestore(lock, flags);
5371bb76ff1Sjsg
5381bb76ff1Sjsg return atomic64_read(&vm->tlb_seq);
5391bb76ff1Sjsg }
5401bb76ff1Sjsg
5411bb76ff1Sjsg /*
5421bb76ff1Sjsg * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
5431bb76ff1Sjsg * happens while holding this lock anywhere to prevent deadlocks when
5441bb76ff1Sjsg * an MMU notifier runs in reclaim-FS context.
5451bb76ff1Sjsg */
amdgpu_vm_eviction_lock(struct amdgpu_vm * vm)5461bb76ff1Sjsg static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
5471bb76ff1Sjsg {
5481bb76ff1Sjsg mutex_lock(&vm->eviction_lock);
5491bb76ff1Sjsg #ifdef notyet
5501bb76ff1Sjsg vm->saved_flags = memalloc_noreclaim_save();
5511bb76ff1Sjsg #endif
5521bb76ff1Sjsg }
5531bb76ff1Sjsg
amdgpu_vm_eviction_trylock(struct amdgpu_vm * vm)5541bb76ff1Sjsg static inline bool amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
5551bb76ff1Sjsg {
5561bb76ff1Sjsg if (mutex_trylock(&vm->eviction_lock)) {
5571bb76ff1Sjsg #ifdef notyet
5581bb76ff1Sjsg vm->saved_flags = memalloc_noreclaim_save();
5591bb76ff1Sjsg #endif
5601bb76ff1Sjsg return true;
5611bb76ff1Sjsg }
5621bb76ff1Sjsg return false;
5631bb76ff1Sjsg }
5641bb76ff1Sjsg
amdgpu_vm_eviction_unlock(struct amdgpu_vm * vm)5651bb76ff1Sjsg static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
5661bb76ff1Sjsg {
5671bb76ff1Sjsg #ifdef notyet
5681bb76ff1Sjsg memalloc_noreclaim_restore(vm->saved_flags);
5691bb76ff1Sjsg #endif
5701bb76ff1Sjsg mutex_unlock(&vm->eviction_lock);
5711bb76ff1Sjsg }
5721bb76ff1Sjsg
573fb4d8502Sjsg #endif
574