1 /* $NetBSD: amdgpu_vm.h,v 1.3 2021/12/19 12:22:37 riastradh Exp $ */ 2 3 /* 4 * Copyright 2016 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Christian König 25 */ 26 #ifndef __AMDGPU_VM_H__ 27 #define __AMDGPU_VM_H__ 28 29 #include <linux/idr.h> 30 #include <linux/kfifo.h> 31 #include <linux/rbtree.h> 32 #include <drm/gpu_scheduler.h> 33 #include <drm/drm_file.h> 34 #include <drm/ttm/ttm_bo_driver.h> 35 #include <linux/sched/mm.h> 36 37 #include "amdgpu_sync.h" 38 #include "amdgpu_ring.h" 39 #include "amdgpu_ids.h" 40 41 struct amdgpu_bo_va; 42 struct amdgpu_job; 43 struct amdgpu_bo_list_entry; 44 45 /* 46 * GPUVM handling 47 */ 48 49 /* Maximum number of PTEs the hardware can write with one command */ 50 #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF 51 52 /* number of entries in page table */ 53 #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size) 54 55 #define AMDGPU_PTE_VALID (1ULL << 0) 56 #define AMDGPU_PTE_SYSTEM (1ULL << 1) 57 #define AMDGPU_PTE_SNOOPED (1ULL << 2) 58 59 /* VI only */ 60 #define AMDGPU_PTE_EXECUTABLE (1ULL << 4) 61 62 #define AMDGPU_PTE_READABLE (1ULL << 5) 63 #define AMDGPU_PTE_WRITEABLE (1ULL << 6) 64 65 #define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7) 66 67 /* TILED for VEGA10, reserved for older ASICs */ 68 #define AMDGPU_PTE_PRT (1ULL << 51) 69 70 /* PDE is handled as PTE for VEGA10 */ 71 #define AMDGPU_PDE_PTE (1ULL << 54) 72 73 #define AMDGPU_PTE_LOG (1ULL << 55) 74 75 /* PTE is handled as PDE for VEGA10 (Translate Further) */ 76 #define AMDGPU_PTE_TF (1ULL << 56) 77 78 /* PDE Block Fragment Size for VEGA10 */ 79 #define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59) 80 81 82 /* For GFX9 */ 83 #define AMDGPU_PTE_MTYPE_VG10(a) ((uint64_t)(a) << 57) 84 #define AMDGPU_PTE_MTYPE_VG10_MASK AMDGPU_PTE_MTYPE_VG10(3ULL) 85 86 #define AMDGPU_MTYPE_NC 0 87 #define AMDGPU_MTYPE_CC 2 88 89 #define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \ 90 | AMDGPU_PTE_SNOOPED \ 91 | AMDGPU_PTE_EXECUTABLE \ 92 | AMDGPU_PTE_READABLE \ 93 | AMDGPU_PTE_WRITEABLE \ 94 | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC)) 95 96 /* gfx10 */ 97 #define AMDGPU_PTE_MTYPE_NV10(a) ((uint64_t)(a) << 48) 98 #define AMDGPU_PTE_MTYPE_NV10_MASK AMDGPU_PTE_MTYPE_NV10(7ULL) 99 100 /* How to programm VM fault handling */ 101 #define AMDGPU_VM_FAULT_STOP_NEVER 0 102 #define AMDGPU_VM_FAULT_STOP_FIRST 1 103 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 104 105 /* Reserve 4MB VRAM for page tables */ 106 #define AMDGPU_VM_RESERVED_VRAM (4ULL << 20) 107 108 /* max number of VMHUB */ 109 #define AMDGPU_MAX_VMHUBS 3 110 #define AMDGPU_GFXHUB_0 0 111 #define AMDGPU_MMHUB_0 1 112 #define AMDGPU_MMHUB_1 2 113 114 /* hardcode that limit for now */ 115 #define AMDGPU_VA_RESERVED_SIZE (1ULL << 20) 116 117 /* max vmids dedicated for process */ 118 #define AMDGPU_VM_MAX_RESERVED_VMID 1 119 120 #define AMDGPU_VM_CONTEXT_GFX 0 121 #define AMDGPU_VM_CONTEXT_COMPUTE 1 122 123 /* See vm_update_mode */ 124 #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0) 125 #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1) 126 127 /* VMPT level enumerate, and the hiberachy is: 128 * PDB2->PDB1->PDB0->PTB 129 */ 130 enum amdgpu_vm_level { 131 AMDGPU_VM_PDB2, 132 AMDGPU_VM_PDB1, 133 AMDGPU_VM_PDB0, 134 AMDGPU_VM_PTB 135 }; 136 137 /* base structure for tracking BO usage in a VM */ 138 struct amdgpu_vm_bo_base { 139 /* constant after initialization */ 140 struct amdgpu_vm *vm; 141 struct amdgpu_bo *bo; 142 143 /* protected by bo being reserved */ 144 struct amdgpu_vm_bo_base *next; 145 146 /* protected by spinlock */ 147 struct list_head vm_status; 148 149 /* protected by the BO being reserved */ 150 bool moved; 151 }; 152 153 struct amdgpu_vm_pt { 154 struct amdgpu_vm_bo_base base; 155 156 /* array of page tables, one for each directory entry */ 157 struct amdgpu_vm_pt *entries; 158 }; 159 160 /* provided by hw blocks that can write ptes, e.g., sdma */ 161 struct amdgpu_vm_pte_funcs { 162 /* number of dw to reserve per operation */ 163 unsigned copy_pte_num_dw; 164 165 /* copy pte entries from GART */ 166 void (*copy_pte)(struct amdgpu_ib *ib, 167 uint64_t pe, uint64_t src, 168 unsigned count); 169 170 /* write pte one entry at a time with addr mapping */ 171 void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, 172 uint64_t value, unsigned count, 173 uint32_t incr); 174 /* for linear pte/pde updates without addr mapping */ 175 void (*set_pte_pde)(struct amdgpu_ib *ib, 176 uint64_t pe, 177 uint64_t addr, unsigned count, 178 uint32_t incr, uint64_t flags); 179 }; 180 181 struct amdgpu_task_info { 182 char process_name[TASK_COMM_LEN]; 183 char task_name[TASK_COMM_LEN]; 184 pid_t pid; 185 pid_t tgid; 186 }; 187 188 /** 189 * struct amdgpu_vm_update_params 190 * 191 * Encapsulate some VM table update parameters to reduce 192 * the number of function parameters 193 * 194 */ 195 struct amdgpu_vm_update_params { 196 197 /** 198 * @adev: amdgpu device we do this update for 199 */ 200 struct amdgpu_device *adev; 201 202 /** 203 * @vm: optional amdgpu_vm we do this update for 204 */ 205 struct amdgpu_vm *vm; 206 207 /** 208 * @direct: if changes should be made directly 209 */ 210 bool direct; 211 212 /** 213 * @pages_addr: 214 * 215 * DMA addresses to use for mapping 216 */ 217 #ifdef __NetBSD__ 218 bus_dma_segment_t *pages_addr; 219 #else 220 dma_addr_t *pages_addr; 221 #endif 222 223 /** 224 * @job: job to used for hw submission 225 */ 226 struct amdgpu_job *job; 227 228 /** 229 * @num_dw_left: number of dw left for the IB 230 */ 231 unsigned int num_dw_left; 232 }; 233 234 struct amdgpu_vm_update_funcs { 235 int (*map_table)(struct amdgpu_bo *bo); 236 int (*prepare)(struct amdgpu_vm_update_params *p, void * owner, 237 struct dma_fence *exclusive); 238 int (*update)(struct amdgpu_vm_update_params *p, 239 struct amdgpu_bo *bo, uint64_t pe, uint64_t addr, 240 unsigned count, uint32_t incr, uint64_t flags); 241 int (*commit)(struct amdgpu_vm_update_params *p, 242 struct dma_fence **fence); 243 }; 244 245 struct amdgpu_vm { 246 /* tree of virtual addresses mapped */ 247 struct rb_root_cached va; 248 249 /* Lock to prevent eviction while we are updating page tables 250 * use vm_eviction_lock/unlock(vm) 251 */ 252 struct mutex eviction_lock; 253 bool evicting; 254 unsigned int saved_flags; 255 256 /* BOs who needs a validation */ 257 struct list_head evicted; 258 259 /* PT BOs which relocated and their parent need an update */ 260 struct list_head relocated; 261 262 /* per VM BOs moved, but not yet updated in the PT */ 263 struct list_head moved; 264 265 /* All BOs of this VM not currently in the state machine */ 266 struct list_head idle; 267 268 /* regular invalidated BOs, but not yet updated in the PT */ 269 struct list_head invalidated; 270 spinlock_t invalidated_lock; 271 272 /* BO mappings freed, but not yet updated in the PT */ 273 struct list_head freed; 274 275 /* contains the page directory */ 276 struct amdgpu_vm_pt root; 277 struct dma_fence *last_update; 278 279 /* Scheduler entities for page table updates */ 280 struct drm_sched_entity direct; 281 struct drm_sched_entity delayed; 282 283 /* Last submission to the scheduler entities */ 284 struct dma_fence *last_direct; 285 struct dma_fence *last_delayed; 286 287 unsigned int pasid; 288 /* dedicated to vm */ 289 struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS]; 290 291 /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ 292 bool use_cpu_for_update; 293 294 /* Functions to use for VM table updates */ 295 const struct amdgpu_vm_update_funcs *update_funcs; 296 297 /* Flag to indicate ATS support from PTE for GFX9 */ 298 bool pte_support_ats; 299 300 /* Up to 128 pending retry page faults */ 301 DECLARE_KFIFO(faults, u64, 128); 302 303 /* Points to the KFD process VM info */ 304 struct amdkfd_process_info *process_info; 305 306 /* List node in amdkfd_process_info.vm_list_head */ 307 struct list_head vm_list_node; 308 309 /* Valid while the PD is reserved or fenced */ 310 uint64_t pd_phys_addr; 311 312 /* Some basic info about the task */ 313 struct amdgpu_task_info task_info; 314 315 /* Store positions of group of BOs */ 316 struct ttm_lru_bulk_move lru_bulk_move; 317 /* mark whether can do the bulk move */ 318 bool bulk_moveable; 319 /* Flag to indicate if VM is used for compute */ 320 bool is_compute_context; 321 }; 322 323 struct amdgpu_vm_manager { 324 /* Handling of VMIDs */ 325 struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS]; 326 327 /* Handling of VM fences */ 328 u64 fence_context; 329 unsigned seqno[AMDGPU_MAX_RINGS]; 330 331 uint64_t max_pfn; 332 uint32_t num_level; 333 uint32_t block_size; 334 uint32_t fragment_size; 335 enum amdgpu_vm_level root_level; 336 /* vram base address for page table entry */ 337 u64 vram_base_offset; 338 /* vm pte handling */ 339 const struct amdgpu_vm_pte_funcs *vm_pte_funcs; 340 struct drm_gpu_scheduler *vm_pte_scheds[AMDGPU_MAX_RINGS]; 341 unsigned vm_pte_num_scheds; 342 struct amdgpu_ring *page_fault; 343 344 /* partial resident texture handling */ 345 spinlock_t prt_lock; 346 atomic_t num_prt_users; 347 348 /* controls how VM page tables are updated for Graphics and Compute. 349 * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU 350 * BIT1[= 0] Compute updated by SDMA [= 1] by CPU 351 */ 352 int vm_update_mode; 353 354 /* PASID to VM mapping, will be used in interrupt context to 355 * look up VM of a page fault 356 */ 357 struct idr pasid_idr; 358 spinlock_t pasid_lock; 359 360 /* counter of mapped memory through xgmi */ 361 uint32_t xgmi_map_counter; 362 struct mutex lock_pstate; 363 }; 364 365 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) 366 #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) 367 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) 368 369 extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs; 370 extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs; 371 372 void amdgpu_vm_manager_init(struct amdgpu_device *adev); 373 void amdgpu_vm_manager_fini(struct amdgpu_device *adev); 374 375 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout); 376 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, 377 int vm_context, unsigned int pasid); 378 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid); 379 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); 380 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); 381 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, 382 struct list_head *validated, 383 struct amdgpu_bo_list_entry *entry); 384 bool amdgpu_vm_ready(struct amdgpu_vm *vm); 385 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, 386 int (*callback)(void *p, struct amdgpu_bo *bo), 387 void *param); 388 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); 389 int amdgpu_vm_update_pdes(struct amdgpu_device *adev, 390 struct amdgpu_vm *vm, bool direct); 391 int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 392 struct amdgpu_vm *vm, 393 struct dma_fence **fence); 394 int amdgpu_vm_handle_moved(struct amdgpu_device *adev, 395 struct amdgpu_vm *vm); 396 int amdgpu_vm_bo_update(struct amdgpu_device *adev, 397 struct amdgpu_bo_va *bo_va, 398 bool clear); 399 bool amdgpu_vm_evictable(struct amdgpu_bo *bo); 400 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 401 struct amdgpu_bo *bo, bool evicted); 402 #ifdef __NetBSD__ 403 uint64_t amdgpu_vm_map_gart(const bus_dma_segment_t *pages_addr, uint64_t addr); 404 #else 405 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); 406 #endif 407 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 408 struct amdgpu_bo *bo); 409 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 410 struct amdgpu_vm *vm, 411 struct amdgpu_bo *bo); 412 int amdgpu_vm_bo_map(struct amdgpu_device *adev, 413 struct amdgpu_bo_va *bo_va, 414 uint64_t addr, uint64_t offset, 415 uint64_t size, uint64_t flags); 416 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, 417 struct amdgpu_bo_va *bo_va, 418 uint64_t addr, uint64_t offset, 419 uint64_t size, uint64_t flags); 420 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 421 struct amdgpu_bo_va *bo_va, 422 uint64_t addr); 423 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, 424 struct amdgpu_vm *vm, 425 uint64_t saddr, uint64_t size); 426 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, 427 uint64_t addr); 428 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); 429 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 430 struct amdgpu_bo_va *bo_va); 431 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, 432 uint32_t fragment_size_default, unsigned max_level, 433 unsigned max_bits); 434 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 435 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, 436 struct amdgpu_job *job); 437 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); 438 439 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, 440 struct amdgpu_task_info *task_info); 441 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid, 442 uint64_t addr); 443 444 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); 445 446 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, 447 struct amdgpu_vm *vm); 448 void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo); 449 450 #endif 451