1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König
23 */
24 #ifndef __AMDGPU_VM_H__
25 #define __AMDGPU_VM_H__
26
27 #include <linux/idr.h>
28 #include <linux/kfifo.h>
29 #include <linux/rbtree.h>
30 #include <drm/gpu_scheduler.h>
31 #include <drm/drm_file.h>
32 #include <drm/ttm/ttm_bo.h>
33 #include <linux/sched/mm.h>
34
35 #include "amdgpu_sync.h"
36 #include "amdgpu_ring.h"
37 #include "amdgpu_ids.h"
38
39 struct drm_exec;
40
41 struct amdgpu_bo_va;
42 struct amdgpu_job;
43 struct amdgpu_bo_list_entry;
44 struct amdgpu_bo_vm;
45 struct amdgpu_mem_stats;
46
47 /*
48 * GPUVM handling
49 */
50
51 /* Maximum number of PTEs the hardware can write with one command */
52 #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
53
54 /* number of entries in page table */
55 #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
56
57 #define AMDGPU_PTE_VALID (1ULL << 0)
58 #define AMDGPU_PTE_SYSTEM (1ULL << 1)
59 #define AMDGPU_PTE_SNOOPED (1ULL << 2)
60
61 /* RV+ */
62 #define AMDGPU_PTE_TMZ (1ULL << 3)
63
64 /* VI only */
65 #define AMDGPU_PTE_EXECUTABLE (1ULL << 4)
66
67 #define AMDGPU_PTE_READABLE (1ULL << 5)
68 #define AMDGPU_PTE_WRITEABLE (1ULL << 6)
69
70 #define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7)
71
72 /* TILED for VEGA10, reserved for older ASICs */
73 #define AMDGPU_PTE_PRT (1ULL << 51)
74
75 /* PDE is handled as PTE for VEGA10 */
76 #define AMDGPU_PDE_PTE (1ULL << 54)
77
78 #define AMDGPU_PTE_LOG (1ULL << 55)
79
80 /* PTE is handled as PDE for VEGA10 (Translate Further) */
81 #define AMDGPU_PTE_TF (1ULL << 56)
82
83 /* MALL noalloc for sienna_cichlid, reserved for older ASICs */
84 #define AMDGPU_PTE_NOALLOC (1ULL << 58)
85
86 /* PDE Block Fragment Size for VEGA10 */
87 #define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59)
88
89 /* Flag combination to set no-retry with TF disabled */
90 #define AMDGPU_VM_NORETRY_FLAGS (AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | \
91 AMDGPU_PTE_TF)
92
93 /* Flag combination to set no-retry with TF enabled */
94 #define AMDGPU_VM_NORETRY_FLAGS_TF (AMDGPU_PTE_VALID | AMDGPU_PTE_SYSTEM | \
95 AMDGPU_PTE_PRT)
96 /* For GFX9 */
97 #define AMDGPU_PTE_MTYPE_VG10(a) ((uint64_t)(a) << 57)
98 #define AMDGPU_PTE_MTYPE_VG10_MASK AMDGPU_PTE_MTYPE_VG10(3ULL)
99
100 #define AMDGPU_MTYPE_NC 0
101 #define AMDGPU_MTYPE_CC 2
102
103 #define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
104 | AMDGPU_PTE_SNOOPED \
105 | AMDGPU_PTE_EXECUTABLE \
106 | AMDGPU_PTE_READABLE \
107 | AMDGPU_PTE_WRITEABLE \
108 | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC))
109
110 /* gfx10 */
111 #define AMDGPU_PTE_MTYPE_NV10(a) ((uint64_t)(a) << 48)
112 #define AMDGPU_PTE_MTYPE_NV10_MASK AMDGPU_PTE_MTYPE_NV10(7ULL)
113
114 /* How to program VM fault handling */
115 #define AMDGPU_VM_FAULT_STOP_NEVER 0
116 #define AMDGPU_VM_FAULT_STOP_FIRST 1
117 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2
118
119 /* Reserve 4MB VRAM for page tables */
120 #define AMDGPU_VM_RESERVED_VRAM (8ULL << 20)
121
122 /*
123 * max number of VMHUB
124 * layout: max 8 GFXHUB + 4 MMHUB0 + 1 MMHUB1
125 */
126 #define AMDGPU_MAX_VMHUBS 13
127 #define AMDGPU_GFXHUB(x) (x)
128 #define AMDGPU_MMHUB0(x) (8 + x)
129 #define AMDGPU_MMHUB1(x) (8 + 4 + x)
130
131 /* Reserve 2MB at top/bottom of address space for kernel use */
132 #define AMDGPU_VA_RESERVED_SIZE (2ULL << 20)
133
134 /* See vm_update_mode */
135 #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
136 #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
137
138 /* VMPT level enumerate, and the hiberachy is:
139 * PDB2->PDB1->PDB0->PTB
140 */
141 enum amdgpu_vm_level {
142 AMDGPU_VM_PDB2,
143 AMDGPU_VM_PDB1,
144 AMDGPU_VM_PDB0,
145 AMDGPU_VM_PTB
146 };
147
148 /* base structure for tracking BO usage in a VM */
149 struct amdgpu_vm_bo_base {
150 /* constant after initialization */
151 struct amdgpu_vm *vm;
152 struct amdgpu_bo *bo;
153
154 /* protected by bo being reserved */
155 struct amdgpu_vm_bo_base *next;
156
157 /* protected by spinlock */
158 struct list_head vm_status;
159
160 /* protected by the BO being reserved */
161 bool moved;
162 };
163
164 /* provided by hw blocks that can write ptes, e.g., sdma */
165 struct amdgpu_vm_pte_funcs {
166 /* number of dw to reserve per operation */
167 unsigned copy_pte_num_dw;
168
169 /* copy pte entries from GART */
170 void (*copy_pte)(struct amdgpu_ib *ib,
171 uint64_t pe, uint64_t src,
172 unsigned count);
173
174 /* write pte one entry at a time with addr mapping */
175 void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
176 uint64_t value, unsigned count,
177 uint32_t incr);
178 /* for linear pte/pde updates without addr mapping */
179 void (*set_pte_pde)(struct amdgpu_ib *ib,
180 uint64_t pe,
181 uint64_t addr, unsigned count,
182 uint32_t incr, uint64_t flags);
183 };
184
185 struct amdgpu_task_info {
186 char process_name[TASK_COMM_LEN];
187 char task_name[TASK_COMM_LEN];
188 pid_t pid;
189 pid_t tgid;
190 };
191
192 /**
193 * struct amdgpu_vm_update_params
194 *
195 * Encapsulate some VM table update parameters to reduce
196 * the number of function parameters
197 *
198 */
199 struct amdgpu_vm_update_params {
200
201 /**
202 * @adev: amdgpu device we do this update for
203 */
204 struct amdgpu_device *adev;
205
206 /**
207 * @vm: optional amdgpu_vm we do this update for
208 */
209 struct amdgpu_vm *vm;
210
211 /**
212 * @immediate: if changes should be made immediately
213 */
214 bool immediate;
215
216 /**
217 * @unlocked: true if the root BO is not locked
218 */
219 bool unlocked;
220
221 /**
222 * @pages_addr:
223 *
224 * DMA addresses to use for mapping
225 */
226 dma_addr_t *pages_addr;
227
228 /**
229 * @job: job to used for hw submission
230 */
231 struct amdgpu_job *job;
232
233 /**
234 * @num_dw_left: number of dw left for the IB
235 */
236 unsigned int num_dw_left;
237
238 /**
239 * @table_freed: return true if page table is freed when updating
240 */
241 bool table_freed;
242 };
243
244 struct amdgpu_vm_update_funcs {
245 int (*map_table)(struct amdgpu_bo_vm *bo);
246 int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
247 enum amdgpu_sync_mode sync_mode);
248 int (*update)(struct amdgpu_vm_update_params *p,
249 struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
250 unsigned count, uint32_t incr, uint64_t flags);
251 int (*commit)(struct amdgpu_vm_update_params *p,
252 struct dma_fence **fence);
253 };
254
255 struct amdgpu_vm_fault {
256 SIMPLEQ_ENTRY(amdgpu_vm_fault) vm_fault_entry;
257 uint64_t val;
258 };
259 SIMPLEQ_HEAD(amdgpu_vm_faults, amdgpu_vm_fault);
260
261 struct amdgpu_vm {
262 /* tree of virtual addresses mapped */
263 struct rb_root_cached va;
264
265 /* Lock to prevent eviction while we are updating page tables
266 * use vm_eviction_lock/unlock(vm)
267 */
268 struct rwlock eviction_lock;
269 bool evicting;
270 unsigned int saved_flags;
271
272 /* Lock to protect vm_bo add/del/move on all lists of vm */
273 spinlock_t status_lock;
274
275 /* BOs who needs a validation */
276 struct list_head evicted;
277
278 /* PT BOs which relocated and their parent need an update */
279 struct list_head relocated;
280
281 /* per VM BOs moved, but not yet updated in the PT */
282 struct list_head moved;
283
284 /* All BOs of this VM not currently in the state machine */
285 struct list_head idle;
286
287 /* regular invalidated BOs, but not yet updated in the PT */
288 struct list_head invalidated;
289
290 /* BO mappings freed, but not yet updated in the PT */
291 struct list_head freed;
292
293 /* BOs which are invalidated, has been updated in the PTs */
294 struct list_head done;
295
296 /* PT BOs scheduled to free and fill with zero if vm_resv is not hold */
297 struct list_head pt_freed;
298 struct work_struct pt_free_work;
299
300 /* contains the page directory */
301 struct amdgpu_vm_bo_base root;
302 struct dma_fence *last_update;
303
304 /* Scheduler entities for page table updates */
305 struct drm_sched_entity immediate;
306 struct drm_sched_entity delayed;
307
308 /* Last finished delayed update */
309 atomic64_t tlb_seq;
310 struct dma_fence *last_tlb_flush;
311
312 /* How many times we had to re-generate the page tables */
313 uint64_t generation;
314
315 /* Last unlocked submission to the scheduler entities */
316 struct dma_fence *last_unlocked;
317
318 unsigned int pasid;
319 bool reserved_vmid[AMDGPU_MAX_VMHUBS];
320
321 /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
322 bool use_cpu_for_update;
323
324 /* Functions to use for VM table updates */
325 const struct amdgpu_vm_update_funcs *update_funcs;
326
327 /* Flag to indicate ATS support from PTE for GFX9 */
328 bool pte_support_ats;
329
330 #ifdef __linux__
331 /* Up to 128 pending retry page faults */
332 DECLARE_KFIFO(faults, u64, 128);
333 #else
334 struct amdgpu_vm_faults faults;
335 #endif
336
337 /* Points to the KFD process VM info */
338 struct amdkfd_process_info *process_info;
339
340 /* List node in amdkfd_process_info.vm_list_head */
341 struct list_head vm_list_node;
342
343 /* Valid while the PD is reserved or fenced */
344 uint64_t pd_phys_addr;
345
346 /* Some basic info about the task */
347 struct amdgpu_task_info task_info;
348
349 /* Store positions of group of BOs */
350 struct ttm_lru_bulk_move lru_bulk_move;
351 /* Flag to indicate if VM is used for compute */
352 bool is_compute_context;
353
354 /* Memory partition number, -1 means any partition */
355 int8_t mem_id;
356 };
357
358 struct amdgpu_vm_manager {
359 /* Handling of VMIDs */
360 struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS];
361 unsigned int first_kfd_vmid;
362 bool concurrent_flush;
363
364 /* Handling of VM fences */
365 u64 fence_context;
366 unsigned seqno[AMDGPU_MAX_RINGS];
367
368 uint64_t max_pfn;
369 uint32_t num_level;
370 uint32_t block_size;
371 uint32_t fragment_size;
372 enum amdgpu_vm_level root_level;
373 /* vram base address for page table entry */
374 u64 vram_base_offset;
375 /* vm pte handling */
376 const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
377 struct drm_gpu_scheduler *vm_pte_scheds[AMDGPU_MAX_RINGS];
378 unsigned vm_pte_num_scheds;
379 struct amdgpu_ring *page_fault;
380
381 /* partial resident texture handling */
382 spinlock_t prt_lock;
383 atomic_t num_prt_users;
384
385 /* controls how VM page tables are updated for Graphics and Compute.
386 * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
387 * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
388 */
389 int vm_update_mode;
390
391 /* PASID to VM mapping, will be used in interrupt context to
392 * look up VM of a page fault
393 */
394 struct xarray pasids;
395 };
396
397 struct amdgpu_bo_va_mapping;
398
399 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
400 #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
401 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
402
403 extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
404 extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
405
406 void amdgpu_vm_manager_init(struct amdgpu_device *adev);
407 void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
408
409 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
410 u32 pasid);
411
412 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
413 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
414 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
415 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
416 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
417 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
418 unsigned int num_fences);
419 bool amdgpu_vm_ready(struct amdgpu_vm *vm);
420 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
421 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
422 int (*callback)(void *p, struct amdgpu_bo *bo),
423 void *param);
424 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
425 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
426 struct amdgpu_vm *vm, bool immediate);
427 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
428 struct amdgpu_vm *vm,
429 struct dma_fence **fence);
430 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
431 struct amdgpu_vm *vm);
432 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
433 struct amdgpu_vm *vm, struct amdgpu_bo *bo);
434 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
435 bool immediate, bool unlocked, bool flush_tlb,
436 struct dma_resv *resv, uint64_t start, uint64_t last,
437 uint64_t flags, uint64_t offset, uint64_t vram_base,
438 struct ttm_resource *res, dma_addr_t *pages_addr,
439 struct dma_fence **fence);
440 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
441 struct amdgpu_bo_va *bo_va,
442 bool clear);
443 bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
444 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
445 struct amdgpu_bo *bo, bool evicted);
446 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
447 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
448 struct amdgpu_bo *bo);
449 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
450 struct amdgpu_vm *vm,
451 struct amdgpu_bo *bo);
452 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
453 struct amdgpu_bo_va *bo_va,
454 uint64_t addr, uint64_t offset,
455 uint64_t size, uint64_t flags);
456 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
457 struct amdgpu_bo_va *bo_va,
458 uint64_t addr, uint64_t offset,
459 uint64_t size, uint64_t flags);
460 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
461 struct amdgpu_bo_va *bo_va,
462 uint64_t addr);
463 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
464 struct amdgpu_vm *vm,
465 uint64_t saddr, uint64_t size);
466 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
467 uint64_t addr);
468 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
469 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
470 struct amdgpu_bo_va *bo_va);
471 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
472 uint32_t fragment_size_default, unsigned max_level,
473 unsigned max_bits);
474 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
475 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
476 struct amdgpu_job *job);
477 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
478
479 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
480 struct amdgpu_task_info *task_info);
481 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
482 u32 vmid, u32 node_id, uint64_t addr,
483 bool write_fault);
484
485 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
486
487 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
488 struct amdgpu_vm *vm);
489 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
490 struct amdgpu_mem_stats *stats);
491
492 int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
493 struct amdgpu_bo_vm *vmbo, bool immediate);
494 int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
495 int level, bool immediate, struct amdgpu_bo_vm **vmbo,
496 int32_t xcp_id);
497 void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
498 bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev,
499 struct amdgpu_vm *vm);
500
501 int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
502 struct amdgpu_vm_bo_base *entry);
503 int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
504 uint64_t start, uint64_t end,
505 uint64_t dst, uint64_t flags);
506 void amdgpu_vm_pt_free_work(struct work_struct *work);
507
508 #if defined(CONFIG_DEBUG_FS)
509 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
510 #endif
511
512 int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm);
513
514 /**
515 * amdgpu_vm_tlb_seq - return tlb flush sequence number
516 * @vm: the amdgpu_vm structure to query
517 *
518 * Returns the tlb flush sequence number which indicates that the VM TLBs needs
519 * to be invalidated whenever the sequence number change.
520 */
amdgpu_vm_tlb_seq(struct amdgpu_vm * vm)521 static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
522 {
523 unsigned long flags;
524 spinlock_t *lock;
525
526 /*
527 * Workaround to stop racing between the fence signaling and handling
528 * the cb. The lock is static after initially setting it up, just make
529 * sure that the dma_fence structure isn't freed up.
530 */
531 rcu_read_lock();
532 lock = vm->last_tlb_flush->lock;
533 rcu_read_unlock();
534
535 spin_lock_irqsave(lock, flags);
536 spin_unlock_irqrestore(lock, flags);
537
538 return atomic64_read(&vm->tlb_seq);
539 }
540
541 /*
542 * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
543 * happens while holding this lock anywhere to prevent deadlocks when
544 * an MMU notifier runs in reclaim-FS context.
545 */
amdgpu_vm_eviction_lock(struct amdgpu_vm * vm)546 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
547 {
548 mutex_lock(&vm->eviction_lock);
549 #ifdef notyet
550 vm->saved_flags = memalloc_noreclaim_save();
551 #endif
552 }
553
amdgpu_vm_eviction_trylock(struct amdgpu_vm * vm)554 static inline bool amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
555 {
556 if (mutex_trylock(&vm->eviction_lock)) {
557 #ifdef notyet
558 vm->saved_flags = memalloc_noreclaim_save();
559 #endif
560 return true;
561 }
562 return false;
563 }
564
amdgpu_vm_eviction_unlock(struct amdgpu_vm * vm)565 static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
566 {
567 #ifdef notyet
568 memalloc_noreclaim_restore(vm->saved_flags);
569 #endif
570 mutex_unlock(&vm->eviction_lock);
571 }
572
573 #endif
574