1 /*	$NetBSD: amdgpu_amdkfd_gpuvm.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2014-2018 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 #include <sys/cdefs.h>
25 __KERNEL_RCSID(0, "$NetBSD: amdgpu_amdkfd_gpuvm.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $");
26 
27 #include <linux/dma-buf.h>
28 #include <linux/list.h>
29 #include <linux/pagemap.h>
30 #include <linux/sched/mm.h>
31 #include <linux/sched/task.h>
32 
33 #include "amdgpu_object.h"
34 #include "amdgpu_vm.h"
35 #include "amdgpu_amdkfd.h"
36 #include "amdgpu_dma_buf.h"
37 
38 /* BO flag to indicate a KFD userptr BO */
39 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
40 
41 /* Userptr restore delay, just long enough to allow consecutive VM
42  * changes to accumulate
43  */
44 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
45 
46 /* Impose limit on how much memory KFD can use */
47 static struct {
48 	uint64_t max_system_mem_limit;
49 	uint64_t max_ttm_mem_limit;
50 	int64_t system_mem_used;
51 	int64_t ttm_mem_used;
52 	spinlock_t mem_limit_lock;
53 } kfd_mem_limit;
54 
55 /* Struct used for amdgpu_amdkfd_bo_validate */
56 struct amdgpu_vm_parser {
57 	uint32_t        domain;
58 	bool            wait;
59 };
60 
61 static const char * const domain_bit_to_string[] = {
62 		"CPU",
63 		"GTT",
64 		"VRAM",
65 		"GDS",
66 		"GWS",
67 		"OA"
68 };
69 
70 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
71 
72 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
73 
74 
get_amdgpu_device(struct kgd_dev * kgd)75 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
76 {
77 	return (struct amdgpu_device *)kgd;
78 }
79 
check_if_add_bo_to_vm(struct amdgpu_vm * avm,struct kgd_mem * mem)80 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
81 		struct kgd_mem *mem)
82 {
83 	struct kfd_bo_va_list *entry;
84 
85 	list_for_each_entry(entry, &mem->bo_va_list, bo_list)
86 		if (entry->bo_va->base.vm == avm)
87 			return false;
88 
89 	return true;
90 }
91 
92 /* Set memory usage limits. Current, limits are
93  *  System (TTM + userptr) memory - 15/16th System RAM
94  *  TTM memory - 3/8th System RAM
95  */
amdgpu_amdkfd_gpuvm_init_mem_limits(void)96 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
97 {
98 	struct sysinfo si;
99 	uint64_t mem;
100 
101 	si_meminfo(&si);
102 	mem = si.totalram - si.totalhigh;
103 	mem *= si.mem_unit;
104 
105 	spin_lock_init(&kfd_mem_limit.mem_limit_lock);
106 	kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
107 	kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
108 	pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
109 		(kfd_mem_limit.max_system_mem_limit >> 20),
110 		(kfd_mem_limit.max_ttm_mem_limit >> 20));
111 }
112 
113 /* Estimate page table size needed to represent a given memory size
114  *
115  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
116  * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
117  * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
118  * for 2MB pages for TLB efficiency. However, small allocations and
119  * fragmented system memory still need some 4KB pages. We choose a
120  * compromise that should work in most cases without reserving too
121  * much memory for page tables unnecessarily (factor 16K, >> 14).
122  */
123 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
124 
amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device * adev,uint64_t size,u32 domain,bool sg)125 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
126 		uint64_t size, u32 domain, bool sg)
127 {
128 	uint64_t reserved_for_pt =
129 		ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
130 	size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
131 	int ret = 0;
132 
133 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
134 				       sizeof(struct amdgpu_bo));
135 
136 	vram_needed = 0;
137 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
138 		/* TTM GTT memory */
139 		system_mem_needed = acc_size + size;
140 		ttm_mem_needed = acc_size + size;
141 	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
142 		/* Userptr */
143 		system_mem_needed = acc_size + size;
144 		ttm_mem_needed = acc_size;
145 	} else {
146 		/* VRAM and SG */
147 		system_mem_needed = acc_size;
148 		ttm_mem_needed = acc_size;
149 		if (domain == AMDGPU_GEM_DOMAIN_VRAM)
150 			vram_needed = size;
151 	}
152 
153 	spin_lock(&kfd_mem_limit.mem_limit_lock);
154 
155 	if ((kfd_mem_limit.system_mem_used + system_mem_needed >
156 	     kfd_mem_limit.max_system_mem_limit) ||
157 	    (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
158 	     kfd_mem_limit.max_ttm_mem_limit) ||
159 	    (adev->kfd.vram_used + vram_needed >
160 	     adev->gmc.real_vram_size - reserved_for_pt)) {
161 		ret = -ENOMEM;
162 	} else {
163 		kfd_mem_limit.system_mem_used += system_mem_needed;
164 		kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
165 		adev->kfd.vram_used += vram_needed;
166 	}
167 
168 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
169 	return ret;
170 }
171 
unreserve_mem_limit(struct amdgpu_device * adev,uint64_t size,u32 domain,bool sg)172 static void unreserve_mem_limit(struct amdgpu_device *adev,
173 		uint64_t size, u32 domain, bool sg)
174 {
175 	size_t acc_size;
176 
177 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
178 				       sizeof(struct amdgpu_bo));
179 
180 	spin_lock(&kfd_mem_limit.mem_limit_lock);
181 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
182 		kfd_mem_limit.system_mem_used -= (acc_size + size);
183 		kfd_mem_limit.ttm_mem_used -= (acc_size + size);
184 	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
185 		kfd_mem_limit.system_mem_used -= (acc_size + size);
186 		kfd_mem_limit.ttm_mem_used -= acc_size;
187 	} else {
188 		kfd_mem_limit.system_mem_used -= acc_size;
189 		kfd_mem_limit.ttm_mem_used -= acc_size;
190 		if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
191 			adev->kfd.vram_used -= size;
192 			WARN_ONCE(adev->kfd.vram_used < 0,
193 				  "kfd VRAM memory accounting unbalanced");
194 		}
195 	}
196 	WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
197 		  "kfd system memory accounting unbalanced");
198 	WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
199 		  "kfd TTM memory accounting unbalanced");
200 
201 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
202 }
203 
amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo * bo)204 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
205 {
206 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
207 	u32 domain = bo->preferred_domains;
208 	bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
209 
210 	if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
211 		domain = AMDGPU_GEM_DOMAIN_CPU;
212 		sg = false;
213 	}
214 
215 	unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
216 }
217 
218 
219 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
220  *  reservation object.
221  *
222  * @bo: [IN] Remove eviction fence(s) from this BO
223  * @ef: [IN] This eviction fence is removed if it
224  *  is present in the shared list.
225  *
226  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
227  */
amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo * bo,struct amdgpu_amdkfd_fence * ef)228 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
229 					struct amdgpu_amdkfd_fence *ef)
230 {
231 	struct dma_resv *resv = bo->tbo.base.resv;
232 	struct dma_resv_list *old, *new;
233 	unsigned int i, j, k;
234 
235 	if (!ef)
236 		return -EINVAL;
237 
238 	old = dma_resv_get_list(resv);
239 	if (!old)
240 		return 0;
241 
242 	new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
243 		      GFP_KERNEL);
244 	if (!new)
245 		return -ENOMEM;
246 
247 	/* Go through all the shared fences in the resevation object and sort
248 	 * the interesting ones to the end of the list.
249 	 */
250 	for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
251 		struct dma_fence *f;
252 
253 		f = rcu_dereference_protected(old->shared[i],
254 					      dma_resv_held(resv));
255 
256 		if (f->context == ef->base.context)
257 			RCU_INIT_POINTER(new->shared[--j], f);
258 		else
259 			RCU_INIT_POINTER(new->shared[k++], f);
260 	}
261 	new->shared_max = old->shared_max;
262 	new->shared_count = k;
263 
264 	/* Install the new fence list, seqcount provides the barriers */
265 	preempt_disable();
266 	write_seqcount_begin(&resv->seq);
267 	RCU_INIT_POINTER(resv->fence, new);
268 	write_seqcount_end(&resv->seq);
269 	preempt_enable();
270 
271 	/* Drop the references to the removed fences or move them to ef_list */
272 	for (i = j, k = 0; i < old->shared_count; ++i) {
273 		struct dma_fence *f;
274 
275 		f = rcu_dereference_protected(new->shared[i],
276 					      dma_resv_held(resv));
277 		dma_fence_put(f);
278 	}
279 	kfree_rcu(old, rcu);
280 
281 	return 0;
282 }
283 
amdgpu_amdkfd_bo_validate(struct amdgpu_bo * bo,uint32_t domain,bool wait)284 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
285 				     bool wait)
286 {
287 	struct ttm_operation_ctx ctx = { false, false };
288 	int ret;
289 
290 	if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
291 		 "Called with userptr BO"))
292 		return -EINVAL;
293 
294 	amdgpu_bo_placement_from_domain(bo, domain);
295 
296 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
297 	if (ret)
298 		goto validate_fail;
299 	if (wait)
300 		amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
301 
302 validate_fail:
303 	return ret;
304 }
305 
amdgpu_amdkfd_validate(void * param,struct amdgpu_bo * bo)306 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
307 {
308 	struct amdgpu_vm_parser *p = param;
309 
310 	return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
311 }
312 
313 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
314  *
315  * Page directories are not updated here because huge page handling
316  * during page table updates can invalidate page directory entries
317  * again. Page directories are only updated after updating page
318  * tables.
319  */
vm_validate_pt_pd_bos(struct amdgpu_vm * vm)320 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
321 {
322 	struct amdgpu_bo *pd = vm->root.base.bo;
323 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
324 	struct amdgpu_vm_parser param;
325 	int ret;
326 
327 	param.domain = AMDGPU_GEM_DOMAIN_VRAM;
328 	param.wait = false;
329 
330 	ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
331 					&param);
332 	if (ret) {
333 		pr_err("amdgpu: failed to validate PT BOs\n");
334 		return ret;
335 	}
336 
337 	ret = amdgpu_amdkfd_validate(&param, pd);
338 	if (ret) {
339 		pr_err("amdgpu: failed to validate PD\n");
340 		return ret;
341 	}
342 
343 	vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
344 
345 	if (vm->use_cpu_for_update) {
346 		ret = amdgpu_bo_kmap(pd, NULL);
347 		if (ret) {
348 			pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
349 			return ret;
350 		}
351 	}
352 
353 	return 0;
354 }
355 
vm_update_pds(struct amdgpu_vm * vm,struct amdgpu_sync * sync)356 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
357 {
358 	struct amdgpu_bo *pd = vm->root.base.bo;
359 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
360 	int ret;
361 
362 	ret = amdgpu_vm_update_pdes(adev, vm, false);
363 	if (ret)
364 		return ret;
365 
366 	return amdgpu_sync_fence(sync, vm->last_update, false);
367 }
368 
get_pte_flags(struct amdgpu_device * adev,struct kgd_mem * mem)369 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
370 {
371 	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
372 	bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT;
373 	uint32_t mapping_flags;
374 
375 	mapping_flags = AMDGPU_VM_PAGE_READABLE;
376 	if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE)
377 		mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
378 	if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE)
379 		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
380 
381 	switch (adev->asic_type) {
382 	case CHIP_ARCTURUS:
383 		if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) {
384 			if (bo_adev == adev)
385 				mapping_flags |= coherent ?
386 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
387 			else
388 				mapping_flags |= AMDGPU_VM_MTYPE_UC;
389 		} else {
390 			mapping_flags |= coherent ?
391 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
392 		}
393 		break;
394 	default:
395 		mapping_flags |= coherent ?
396 			AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
397 	}
398 
399 	return amdgpu_gem_va_map_flags(adev, mapping_flags);
400 }
401 
402 /* add_bo_to_vm - Add a BO to a VM
403  *
404  * Everything that needs to bo done only once when a BO is first added
405  * to a VM. It can later be mapped and unmapped many times without
406  * repeating these steps.
407  *
408  * 1. Allocate and initialize BO VA entry data structure
409  * 2. Add BO to the VM
410  * 3. Determine ASIC-specific PTE flags
411  * 4. Alloc page tables and directories if needed
412  * 4a.  Validate new page tables and directories
413  */
add_bo_to_vm(struct amdgpu_device * adev,struct kgd_mem * mem,struct amdgpu_vm * vm,bool is_aql,struct kfd_bo_va_list ** p_bo_va_entry)414 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
415 		struct amdgpu_vm *vm, bool is_aql,
416 		struct kfd_bo_va_list **p_bo_va_entry)
417 {
418 	int ret;
419 	struct kfd_bo_va_list *bo_va_entry;
420 	struct amdgpu_bo *bo = mem->bo;
421 	uint64_t va = mem->va;
422 	struct list_head *list_bo_va = &mem->bo_va_list;
423 	unsigned long bo_size = bo->tbo.mem.size;
424 
425 	if (!va) {
426 		pr_err("Invalid VA when adding BO to VM\n");
427 		return -EINVAL;
428 	}
429 
430 	if (is_aql)
431 		va += bo_size;
432 
433 	bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
434 	if (!bo_va_entry)
435 		return -ENOMEM;
436 
437 	pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
438 			va + bo_size, vm);
439 
440 	/* Add BO to VM internal data structures*/
441 	bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
442 	if (!bo_va_entry->bo_va) {
443 		ret = -EINVAL;
444 		pr_err("Failed to add BO object to VM. ret == %d\n",
445 				ret);
446 		goto err_vmadd;
447 	}
448 
449 	bo_va_entry->va = va;
450 	bo_va_entry->pte_flags = get_pte_flags(adev, mem);
451 	bo_va_entry->kgd_dev = (void *)adev;
452 	list_add(&bo_va_entry->bo_list, list_bo_va);
453 
454 	if (p_bo_va_entry)
455 		*p_bo_va_entry = bo_va_entry;
456 
457 	/* Allocate validate page tables if needed */
458 	ret = vm_validate_pt_pd_bos(vm);
459 	if (ret) {
460 		pr_err("validate_pt_pd_bos() failed\n");
461 		goto err_alloc_pts;
462 	}
463 
464 	return 0;
465 
466 err_alloc_pts:
467 	amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
468 	list_del(&bo_va_entry->bo_list);
469 err_vmadd:
470 	kfree(bo_va_entry);
471 	return ret;
472 }
473 
remove_bo_from_vm(struct amdgpu_device * adev,struct kfd_bo_va_list * entry,unsigned long size)474 static void remove_bo_from_vm(struct amdgpu_device *adev,
475 		struct kfd_bo_va_list *entry, unsigned long size)
476 {
477 	pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
478 			entry->va,
479 			entry->va + size, entry);
480 	amdgpu_vm_bo_rmv(adev, entry->bo_va);
481 	list_del(&entry->bo_list);
482 	kfree(entry);
483 }
484 
add_kgd_mem_to_kfd_bo_list(struct kgd_mem * mem,struct amdkfd_process_info * process_info,bool userptr)485 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
486 				struct amdkfd_process_info *process_info,
487 				bool userptr)
488 {
489 	struct ttm_validate_buffer *entry = &mem->validate_list;
490 	struct amdgpu_bo *bo = mem->bo;
491 
492 	INIT_LIST_HEAD(&entry->head);
493 	entry->num_shared = 1;
494 	entry->bo = &bo->tbo;
495 	mutex_lock(&process_info->lock);
496 	if (userptr)
497 		list_add_tail(&entry->head, &process_info->userptr_valid_list);
498 	else
499 		list_add_tail(&entry->head, &process_info->kfd_bo_list);
500 	mutex_unlock(&process_info->lock);
501 }
502 
remove_kgd_mem_from_kfd_bo_list(struct kgd_mem * mem,struct amdkfd_process_info * process_info)503 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
504 		struct amdkfd_process_info *process_info)
505 {
506 	struct ttm_validate_buffer *bo_list_entry;
507 
508 	bo_list_entry = &mem->validate_list;
509 	mutex_lock(&process_info->lock);
510 	list_del(&bo_list_entry->head);
511 	mutex_unlock(&process_info->lock);
512 }
513 
514 /* Initializes user pages. It registers the MMU notifier and validates
515  * the userptr BO in the GTT domain.
516  *
517  * The BO must already be on the userptr_valid_list. Otherwise an
518  * eviction and restore may happen that leaves the new BO unmapped
519  * with the user mode queues running.
520  *
521  * Takes the process_info->lock to protect against concurrent restore
522  * workers.
523  *
524  * Returns 0 for success, negative errno for errors.
525  */
init_user_pages(struct kgd_mem * mem,uint64_t user_addr)526 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
527 {
528 	struct amdkfd_process_info *process_info = mem->process_info;
529 	struct amdgpu_bo *bo = mem->bo;
530 	struct ttm_operation_ctx ctx = { true, false };
531 	int ret = 0;
532 
533 	mutex_lock(&process_info->lock);
534 
535 	ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
536 	if (ret) {
537 		pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
538 		goto out;
539 	}
540 
541 	ret = amdgpu_mn_register(bo, user_addr);
542 	if (ret) {
543 		pr_err("%s: Failed to register MMU notifier: %d\n",
544 		       __func__, ret);
545 		goto out;
546 	}
547 
548 	ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
549 	if (ret) {
550 		pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
551 		goto unregister_out;
552 	}
553 
554 	ret = amdgpu_bo_reserve(bo, true);
555 	if (ret) {
556 		pr_err("%s: Failed to reserve BO\n", __func__);
557 		goto release_out;
558 	}
559 	amdgpu_bo_placement_from_domain(bo, mem->domain);
560 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
561 	if (ret)
562 		pr_err("%s: failed to validate BO\n", __func__);
563 	amdgpu_bo_unreserve(bo);
564 
565 release_out:
566 	amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
567 unregister_out:
568 	if (ret)
569 		amdgpu_mn_unregister(bo);
570 out:
571 	mutex_unlock(&process_info->lock);
572 	return ret;
573 }
574 
575 /* Reserving a BO and its page table BOs must happen atomically to
576  * avoid deadlocks. Some operations update multiple VMs at once. Track
577  * all the reservation info in a context structure. Optionally a sync
578  * object can track VM updates.
579  */
580 struct bo_vm_reservation_context {
581 	struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
582 	unsigned int n_vms;		    /* Number of VMs reserved	    */
583 	struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
584 	struct ww_acquire_ctx ticket;	    /* Reservation ticket	    */
585 	struct list_head list, duplicates;  /* BO lists			    */
586 	struct amdgpu_sync *sync;	    /* Pointer to sync object	    */
587 	bool reserved;			    /* Whether BOs are reserved	    */
588 };
589 
590 enum bo_vm_match {
591 	BO_VM_NOT_MAPPED = 0,	/* Match VMs where a BO is not mapped */
592 	BO_VM_MAPPED,		/* Match VMs where a BO is mapped     */
593 	BO_VM_ALL,		/* Match all VMs a BO was added to    */
594 };
595 
596 /**
597  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
598  * @mem: KFD BO structure.
599  * @vm: the VM to reserve.
600  * @ctx: the struct that will be used in unreserve_bo_and_vms().
601  */
reserve_bo_and_vm(struct kgd_mem * mem,struct amdgpu_vm * vm,struct bo_vm_reservation_context * ctx)602 static int reserve_bo_and_vm(struct kgd_mem *mem,
603 			      struct amdgpu_vm *vm,
604 			      struct bo_vm_reservation_context *ctx)
605 {
606 	struct amdgpu_bo *bo = mem->bo;
607 	int ret;
608 
609 	WARN_ON(!vm);
610 
611 	ctx->reserved = false;
612 	ctx->n_vms = 1;
613 	ctx->sync = &mem->sync;
614 
615 	INIT_LIST_HEAD(&ctx->list);
616 	INIT_LIST_HEAD(&ctx->duplicates);
617 
618 	ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
619 	if (!ctx->vm_pd)
620 		return -ENOMEM;
621 
622 	ctx->kfd_bo.priority = 0;
623 	ctx->kfd_bo.tv.bo = &bo->tbo;
624 	ctx->kfd_bo.tv.num_shared = 1;
625 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
626 
627 	amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
628 
629 	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
630 				     false, &ctx->duplicates);
631 	if (!ret)
632 		ctx->reserved = true;
633 	else {
634 		pr_err("Failed to reserve buffers in ttm\n");
635 		kfree(ctx->vm_pd);
636 		ctx->vm_pd = NULL;
637 	}
638 
639 	return ret;
640 }
641 
642 /**
643  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
644  * @mem: KFD BO structure.
645  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
646  * is used. Otherwise, a single VM associated with the BO.
647  * @map_type: the mapping status that will be used to filter the VMs.
648  * @ctx: the struct that will be used in unreserve_bo_and_vms().
649  *
650  * Returns 0 for success, negative for failure.
651  */
reserve_bo_and_cond_vms(struct kgd_mem * mem,struct amdgpu_vm * vm,enum bo_vm_match map_type,struct bo_vm_reservation_context * ctx)652 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
653 				struct amdgpu_vm *vm, enum bo_vm_match map_type,
654 				struct bo_vm_reservation_context *ctx)
655 {
656 	struct amdgpu_bo *bo = mem->bo;
657 	struct kfd_bo_va_list *entry;
658 	unsigned int i;
659 	int ret;
660 
661 	ctx->reserved = false;
662 	ctx->n_vms = 0;
663 	ctx->vm_pd = NULL;
664 	ctx->sync = &mem->sync;
665 
666 	INIT_LIST_HEAD(&ctx->list);
667 	INIT_LIST_HEAD(&ctx->duplicates);
668 
669 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
670 		if ((vm && vm != entry->bo_va->base.vm) ||
671 			(entry->is_mapped != map_type
672 			&& map_type != BO_VM_ALL))
673 			continue;
674 
675 		ctx->n_vms++;
676 	}
677 
678 	if (ctx->n_vms != 0) {
679 		ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
680 				     GFP_KERNEL);
681 		if (!ctx->vm_pd)
682 			return -ENOMEM;
683 	}
684 
685 	ctx->kfd_bo.priority = 0;
686 	ctx->kfd_bo.tv.bo = &bo->tbo;
687 	ctx->kfd_bo.tv.num_shared = 1;
688 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
689 
690 	i = 0;
691 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
692 		if ((vm && vm != entry->bo_va->base.vm) ||
693 			(entry->is_mapped != map_type
694 			&& map_type != BO_VM_ALL))
695 			continue;
696 
697 		amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
698 				&ctx->vm_pd[i]);
699 		i++;
700 	}
701 
702 	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
703 				     false, &ctx->duplicates);
704 	if (!ret)
705 		ctx->reserved = true;
706 	else
707 		pr_err("Failed to reserve buffers in ttm.\n");
708 
709 	if (ret) {
710 		kfree(ctx->vm_pd);
711 		ctx->vm_pd = NULL;
712 	}
713 
714 	return ret;
715 }
716 
717 /**
718  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
719  * @ctx: Reservation context to unreserve
720  * @wait: Optionally wait for a sync object representing pending VM updates
721  * @intr: Whether the wait is interruptible
722  *
723  * Also frees any resources allocated in
724  * reserve_bo_and_(cond_)vm(s). Returns the status from
725  * amdgpu_sync_wait.
726  */
unreserve_bo_and_vms(struct bo_vm_reservation_context * ctx,bool wait,bool intr)727 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
728 				 bool wait, bool intr)
729 {
730 	int ret = 0;
731 
732 	if (wait)
733 		ret = amdgpu_sync_wait(ctx->sync, intr);
734 
735 	if (ctx->reserved)
736 		ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
737 	kfree(ctx->vm_pd);
738 
739 	ctx->sync = NULL;
740 
741 	ctx->reserved = false;
742 	ctx->vm_pd = NULL;
743 
744 	return ret;
745 }
746 
unmap_bo_from_gpuvm(struct amdgpu_device * adev,struct kfd_bo_va_list * entry,struct amdgpu_sync * sync)747 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
748 				struct kfd_bo_va_list *entry,
749 				struct amdgpu_sync *sync)
750 {
751 	struct amdgpu_bo_va *bo_va = entry->bo_va;
752 	struct amdgpu_vm *vm = bo_va->base.vm;
753 
754 	amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
755 
756 	amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
757 
758 	amdgpu_sync_fence(sync, bo_va->last_pt_update, false);
759 
760 	return 0;
761 }
762 
update_gpuvm_pte(struct amdgpu_device * adev,struct kfd_bo_va_list * entry,struct amdgpu_sync * sync)763 static int update_gpuvm_pte(struct amdgpu_device *adev,
764 		struct kfd_bo_va_list *entry,
765 		struct amdgpu_sync *sync)
766 {
767 	int ret;
768 	struct amdgpu_bo_va *bo_va = entry->bo_va;
769 
770 	/* Update the page tables  */
771 	ret = amdgpu_vm_bo_update(adev, bo_va, false);
772 	if (ret) {
773 		pr_err("amdgpu_vm_bo_update failed\n");
774 		return ret;
775 	}
776 
777 	return amdgpu_sync_fence(sync, bo_va->last_pt_update, false);
778 }
779 
map_bo_to_gpuvm(struct amdgpu_device * adev,struct kfd_bo_va_list * entry,struct amdgpu_sync * sync,bool no_update_pte)780 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
781 		struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
782 		bool no_update_pte)
783 {
784 	int ret;
785 
786 	/* Set virtual address for the allocation */
787 	ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
788 			       amdgpu_bo_size(entry->bo_va->base.bo),
789 			       entry->pte_flags);
790 	if (ret) {
791 		pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
792 				entry->va, ret);
793 		return ret;
794 	}
795 
796 	if (no_update_pte)
797 		return 0;
798 
799 	ret = update_gpuvm_pte(adev, entry, sync);
800 	if (ret) {
801 		pr_err("update_gpuvm_pte() failed\n");
802 		goto update_gpuvm_pte_failed;
803 	}
804 
805 	return 0;
806 
807 update_gpuvm_pte_failed:
808 	unmap_bo_from_gpuvm(adev, entry, sync);
809 	return ret;
810 }
811 
create_doorbell_sg(uint64_t addr,uint32_t size)812 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
813 {
814 	struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
815 
816 	if (!sg)
817 		return NULL;
818 	if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
819 		kfree(sg);
820 		return NULL;
821 	}
822 	sg->sgl->dma_address = addr;
823 	sg->sgl->length = size;
824 #ifdef CONFIG_NEED_SG_DMA_LENGTH
825 	sg->sgl->dma_length = size;
826 #endif
827 	return sg;
828 }
829 
process_validate_vms(struct amdkfd_process_info * process_info)830 static int process_validate_vms(struct amdkfd_process_info *process_info)
831 {
832 	struct amdgpu_vm *peer_vm;
833 	int ret;
834 
835 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
836 			    vm_list_node) {
837 		ret = vm_validate_pt_pd_bos(peer_vm);
838 		if (ret)
839 			return ret;
840 	}
841 
842 	return 0;
843 }
844 
process_sync_pds_resv(struct amdkfd_process_info * process_info,struct amdgpu_sync * sync)845 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
846 				 struct amdgpu_sync *sync)
847 {
848 	struct amdgpu_vm *peer_vm;
849 	int ret;
850 
851 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
852 			    vm_list_node) {
853 		struct amdgpu_bo *pd = peer_vm->root.base.bo;
854 
855 		ret = amdgpu_sync_resv(NULL,
856 					sync, pd->tbo.base.resv,
857 					AMDGPU_FENCE_OWNER_KFD, false);
858 		if (ret)
859 			return ret;
860 	}
861 
862 	return 0;
863 }
864 
process_update_pds(struct amdkfd_process_info * process_info,struct amdgpu_sync * sync)865 static int process_update_pds(struct amdkfd_process_info *process_info,
866 			      struct amdgpu_sync *sync)
867 {
868 	struct amdgpu_vm *peer_vm;
869 	int ret;
870 
871 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
872 			    vm_list_node) {
873 		ret = vm_update_pds(peer_vm, sync);
874 		if (ret)
875 			return ret;
876 	}
877 
878 	return 0;
879 }
880 
init_kfd_vm(struct amdgpu_vm * vm,void ** process_info,struct dma_fence ** ef)881 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
882 		       struct dma_fence **ef)
883 {
884 	struct amdkfd_process_info *info = NULL;
885 	int ret;
886 
887 	if (!*process_info) {
888 		info = kzalloc(sizeof(*info), GFP_KERNEL);
889 		if (!info)
890 			return -ENOMEM;
891 
892 		mutex_init(&info->lock);
893 		INIT_LIST_HEAD(&info->vm_list_head);
894 		INIT_LIST_HEAD(&info->kfd_bo_list);
895 		INIT_LIST_HEAD(&info->userptr_valid_list);
896 		INIT_LIST_HEAD(&info->userptr_inval_list);
897 
898 		info->eviction_fence =
899 			amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
900 						   current->mm);
901 		if (!info->eviction_fence) {
902 			pr_err("Failed to create eviction fence\n");
903 			ret = -ENOMEM;
904 			goto create_evict_fence_fail;
905 		}
906 
907 		info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
908 		atomic_set(&info->evicted_bos, 0);
909 		INIT_DELAYED_WORK(&info->restore_userptr_work,
910 				  amdgpu_amdkfd_restore_userptr_worker);
911 
912 		*process_info = info;
913 		*ef = dma_fence_get(&info->eviction_fence->base);
914 	}
915 
916 	vm->process_info = *process_info;
917 
918 	/* Validate page directory and attach eviction fence */
919 	ret = amdgpu_bo_reserve(vm->root.base.bo, true);
920 	if (ret)
921 		goto reserve_pd_fail;
922 	ret = vm_validate_pt_pd_bos(vm);
923 	if (ret) {
924 		pr_err("validate_pt_pd_bos() failed\n");
925 		goto validate_pd_fail;
926 	}
927 	ret = amdgpu_bo_sync_wait(vm->root.base.bo,
928 				  AMDGPU_FENCE_OWNER_KFD, false);
929 	if (ret)
930 		goto wait_pd_fail;
931 	ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
932 	if (ret)
933 		goto reserve_shared_fail;
934 	amdgpu_bo_fence(vm->root.base.bo,
935 			&vm->process_info->eviction_fence->base, true);
936 	amdgpu_bo_unreserve(vm->root.base.bo);
937 
938 	/* Update process info */
939 	mutex_lock(&vm->process_info->lock);
940 	list_add_tail(&vm->vm_list_node,
941 			&(vm->process_info->vm_list_head));
942 	vm->process_info->n_vms++;
943 	mutex_unlock(&vm->process_info->lock);
944 
945 	return 0;
946 
947 reserve_shared_fail:
948 wait_pd_fail:
949 validate_pd_fail:
950 	amdgpu_bo_unreserve(vm->root.base.bo);
951 reserve_pd_fail:
952 	vm->process_info = NULL;
953 	if (info) {
954 		/* Two fence references: one in info and one in *ef */
955 		dma_fence_put(&info->eviction_fence->base);
956 		dma_fence_put(*ef);
957 		*ef = NULL;
958 		*process_info = NULL;
959 		put_pid(info->pid);
960 create_evict_fence_fail:
961 		mutex_destroy(&info->lock);
962 		kfree(info);
963 	}
964 	return ret;
965 }
966 
amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev * kgd,unsigned int pasid,void ** vm,void ** process_info,struct dma_fence ** ef)967 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
968 					  void **vm, void **process_info,
969 					  struct dma_fence **ef)
970 {
971 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
972 	struct amdgpu_vm *new_vm;
973 	int ret;
974 
975 	new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
976 	if (!new_vm)
977 		return -ENOMEM;
978 
979 	/* Initialize AMDGPU part of the VM */
980 	ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
981 	if (ret) {
982 		pr_err("Failed init vm ret %d\n", ret);
983 		goto amdgpu_vm_init_fail;
984 	}
985 
986 	/* Initialize KFD part of the VM and process info */
987 	ret = init_kfd_vm(new_vm, process_info, ef);
988 	if (ret)
989 		goto init_kfd_vm_fail;
990 
991 	*vm = (void *) new_vm;
992 
993 	return 0;
994 
995 init_kfd_vm_fail:
996 	amdgpu_vm_fini(adev, new_vm);
997 amdgpu_vm_init_fail:
998 	kfree(new_vm);
999 	return ret;
1000 }
1001 
amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev * kgd,struct file * filp,unsigned int pasid,void ** vm,void ** process_info,struct dma_fence ** ef)1002 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1003 					   struct file *filp, unsigned int pasid,
1004 					   void **vm, void **process_info,
1005 					   struct dma_fence **ef)
1006 {
1007 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1008 	struct drm_file *drm_priv = filp->private_data;
1009 	struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
1010 	struct amdgpu_vm *avm = &drv_priv->vm;
1011 	int ret;
1012 
1013 	/* Already a compute VM? */
1014 	if (avm->process_info)
1015 		return -EINVAL;
1016 
1017 	/* Convert VM into a compute VM */
1018 	ret = amdgpu_vm_make_compute(adev, avm, pasid);
1019 	if (ret)
1020 		return ret;
1021 
1022 	/* Initialize KFD part of the VM and process info */
1023 	ret = init_kfd_vm(avm, process_info, ef);
1024 	if (ret)
1025 		return ret;
1026 
1027 	*vm = (void *)avm;
1028 
1029 	return 0;
1030 }
1031 
amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device * adev,struct amdgpu_vm * vm)1032 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1033 				    struct amdgpu_vm *vm)
1034 {
1035 	struct amdkfd_process_info *process_info = vm->process_info;
1036 	struct amdgpu_bo *pd = vm->root.base.bo;
1037 
1038 	if (!process_info)
1039 		return;
1040 
1041 	/* Release eviction fence from PD */
1042 	amdgpu_bo_reserve(pd, false);
1043 	amdgpu_bo_fence(pd, NULL, false);
1044 	amdgpu_bo_unreserve(pd);
1045 
1046 	/* Update process info */
1047 	mutex_lock(&process_info->lock);
1048 	process_info->n_vms--;
1049 	list_del(&vm->vm_list_node);
1050 	mutex_unlock(&process_info->lock);
1051 
1052 	/* Release per-process resources when last compute VM is destroyed */
1053 	if (!process_info->n_vms) {
1054 		WARN_ON(!list_empty(&process_info->kfd_bo_list));
1055 		WARN_ON(!list_empty(&process_info->userptr_valid_list));
1056 		WARN_ON(!list_empty(&process_info->userptr_inval_list));
1057 
1058 		dma_fence_put(&process_info->eviction_fence->base);
1059 		cancel_delayed_work_sync(&process_info->restore_userptr_work);
1060 		put_pid(process_info->pid);
1061 		mutex_destroy(&process_info->lock);
1062 		kfree(process_info);
1063 	}
1064 }
1065 
amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev * kgd,void * vm)1066 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1067 {
1068 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1069 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1070 
1071 	if (WARN_ON(!kgd || !vm))
1072 		return;
1073 
1074 	pr_debug("Destroying process vm %p\n", vm);
1075 
1076 	/* Release the VM context */
1077 	amdgpu_vm_fini(adev, avm);
1078 	kfree(vm);
1079 }
1080 
amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev * kgd,void * vm)1081 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1082 {
1083 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1084         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1085 
1086 	if (WARN_ON(!kgd || !vm))
1087                 return;
1088 
1089         pr_debug("Releasing process vm %p\n", vm);
1090 
1091         /* The original pasid of amdgpu vm has already been
1092          * released during making a amdgpu vm to a compute vm
1093          * The current pasid is managed by kfd and will be
1094          * released on kfd process destroy. Set amdgpu pasid
1095          * to 0 to avoid duplicate release.
1096          */
1097 	amdgpu_vm_release_compute(adev, avm);
1098 }
1099 
amdgpu_amdkfd_gpuvm_get_process_page_dir(void * vm)1100 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1101 {
1102 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1103 	struct amdgpu_bo *pd = avm->root.base.bo;
1104 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1105 
1106 	if (adev->asic_type < CHIP_VEGA10)
1107 		return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1108 	return avm->pd_phys_addr;
1109 }
1110 
amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(struct kgd_dev * kgd,uint64_t va,uint64_t size,void * vm,struct kgd_mem ** mem,uint64_t * offset,uint32_t flags)1111 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1112 		struct kgd_dev *kgd, uint64_t va, uint64_t size,
1113 		void *vm, struct kgd_mem **mem,
1114 		uint64_t *offset, uint32_t flags)
1115 {
1116 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1117 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1118 	enum ttm_bo_type bo_type = ttm_bo_type_device;
1119 	struct sg_table *sg = NULL;
1120 	uint64_t user_addr = 0;
1121 	struct amdgpu_bo *bo;
1122 	struct amdgpu_bo_param bp;
1123 	u32 domain, alloc_domain;
1124 	u64 alloc_flags;
1125 	int ret;
1126 
1127 	/*
1128 	 * Check on which domain to allocate BO
1129 	 */
1130 	if (flags & ALLOC_MEM_FLAGS_VRAM) {
1131 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1132 		alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1133 		alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
1134 			AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1135 			AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1136 	} else if (flags & ALLOC_MEM_FLAGS_GTT) {
1137 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1138 		alloc_flags = 0;
1139 	} else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
1140 		domain = AMDGPU_GEM_DOMAIN_GTT;
1141 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1142 		alloc_flags = 0;
1143 		if (!offset || !*offset)
1144 			return -EINVAL;
1145 		user_addr = untagged_addr(*offset);
1146 	} else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
1147 			ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1148 		domain = AMDGPU_GEM_DOMAIN_GTT;
1149 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1150 		bo_type = ttm_bo_type_sg;
1151 		alloc_flags = 0;
1152 		if (size > UINT_MAX)
1153 			return -EINVAL;
1154 		sg = create_doorbell_sg(*offset, size);
1155 		if (!sg)
1156 			return -ENOMEM;
1157 	} else {
1158 		return -EINVAL;
1159 	}
1160 
1161 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1162 	if (!*mem) {
1163 		ret = -ENOMEM;
1164 		goto err;
1165 	}
1166 	INIT_LIST_HEAD(&(*mem)->bo_va_list);
1167 	mutex_init(&(*mem)->lock);
1168 	(*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1169 
1170 	/* Workaround for AQL queue wraparound bug. Map the same
1171 	 * memory twice. That means we only actually allocate half
1172 	 * the memory.
1173 	 */
1174 	if ((*mem)->aql_queue)
1175 		size = size >> 1;
1176 
1177 	(*mem)->alloc_flags = flags;
1178 
1179 	amdgpu_sync_create(&(*mem)->sync);
1180 
1181 	ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1182 	if (ret) {
1183 		pr_debug("Insufficient system memory\n");
1184 		goto err_reserve_limit;
1185 	}
1186 
1187 	pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1188 			va, size, domain_string(alloc_domain));
1189 
1190 	memset(&bp, 0, sizeof(bp));
1191 	bp.size = size;
1192 	bp.byte_align = 1;
1193 	bp.domain = alloc_domain;
1194 	bp.flags = alloc_flags;
1195 	bp.type = bo_type;
1196 	bp.resv = NULL;
1197 	ret = amdgpu_bo_create(adev, &bp, &bo);
1198 	if (ret) {
1199 		pr_debug("Failed to create BO on domain %s. ret %d\n",
1200 				domain_string(alloc_domain), ret);
1201 		goto err_bo_create;
1202 	}
1203 	if (bo_type == ttm_bo_type_sg) {
1204 		bo->tbo.sg = sg;
1205 		bo->tbo.ttm->sg = sg;
1206 	}
1207 	bo->kfd_bo = *mem;
1208 	(*mem)->bo = bo;
1209 	if (user_addr)
1210 		bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1211 
1212 	(*mem)->va = va;
1213 	(*mem)->domain = domain;
1214 	(*mem)->mapped_to_gpu_memory = 0;
1215 	(*mem)->process_info = avm->process_info;
1216 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1217 
1218 	if (user_addr) {
1219 		ret = init_user_pages(*mem, user_addr);
1220 		if (ret)
1221 			goto allocate_init_user_pages_failed;
1222 	}
1223 
1224 	if (offset)
1225 		*offset = amdgpu_bo_mmap_offset(bo);
1226 
1227 	return 0;
1228 
1229 allocate_init_user_pages_failed:
1230 	remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1231 	amdgpu_bo_unref(&bo);
1232 	/* Don't unreserve system mem limit twice */
1233 	goto err_reserve_limit;
1234 err_bo_create:
1235 	unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1236 err_reserve_limit:
1237 	mutex_destroy(&(*mem)->lock);
1238 	kfree(*mem);
1239 err:
1240 	if (sg) {
1241 		sg_free_table(sg);
1242 		kfree(sg);
1243 	}
1244 	return ret;
1245 }
1246 
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(struct kgd_dev * kgd,struct kgd_mem * mem)1247 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1248 		struct kgd_dev *kgd, struct kgd_mem *mem)
1249 {
1250 	struct amdkfd_process_info *process_info = mem->process_info;
1251 	unsigned long bo_size = mem->bo->tbo.mem.size;
1252 	struct kfd_bo_va_list *entry, *tmp;
1253 	struct bo_vm_reservation_context ctx;
1254 	struct ttm_validate_buffer *bo_list_entry;
1255 	int ret;
1256 
1257 	mutex_lock(&mem->lock);
1258 
1259 	if (mem->mapped_to_gpu_memory > 0) {
1260 		pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1261 				mem->va, bo_size);
1262 		mutex_unlock(&mem->lock);
1263 		return -EBUSY;
1264 	}
1265 
1266 	mutex_unlock(&mem->lock);
1267 	/* lock is not needed after this, since mem is unused and will
1268 	 * be freed anyway
1269 	 */
1270 
1271 	/* No more MMU notifiers */
1272 	amdgpu_mn_unregister(mem->bo);
1273 
1274 	/* Make sure restore workers don't access the BO any more */
1275 	bo_list_entry = &mem->validate_list;
1276 	mutex_lock(&process_info->lock);
1277 	list_del(&bo_list_entry->head);
1278 	mutex_unlock(&process_info->lock);
1279 
1280 	ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1281 	if (unlikely(ret))
1282 		return ret;
1283 
1284 	/* The eviction fence should be removed by the last unmap.
1285 	 * TODO: Log an error condition if the bo still has the eviction fence
1286 	 * attached
1287 	 */
1288 	amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1289 					process_info->eviction_fence);
1290 	pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1291 		mem->va + bo_size * (1 + mem->aql_queue));
1292 
1293 	/* Remove from VM internal data structures */
1294 	list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1295 		remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1296 				entry, bo_size);
1297 
1298 	ret = unreserve_bo_and_vms(&ctx, false, false);
1299 
1300 	/* Free the sync object */
1301 	amdgpu_sync_free(&mem->sync);
1302 
1303 	/* If the SG is not NULL, it's one we created for a doorbell or mmio
1304 	 * remap BO. We need to free it.
1305 	 */
1306 	if (mem->bo->tbo.sg) {
1307 		sg_free_table(mem->bo->tbo.sg);
1308 		kfree(mem->bo->tbo.sg);
1309 	}
1310 
1311 	/* Free the BO*/
1312 	amdgpu_bo_unref(&mem->bo);
1313 	mutex_destroy(&mem->lock);
1314 	kfree(mem);
1315 
1316 	return ret;
1317 }
1318 
amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct kgd_dev * kgd,struct kgd_mem * mem,void * vm)1319 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1320 		struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1321 {
1322 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1323 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1324 	int ret;
1325 	struct amdgpu_bo *bo;
1326 	uint32_t domain;
1327 	struct kfd_bo_va_list *entry;
1328 	struct bo_vm_reservation_context ctx;
1329 	struct kfd_bo_va_list *bo_va_entry = NULL;
1330 	struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1331 	unsigned long bo_size;
1332 	bool is_invalid_userptr = false;
1333 
1334 	bo = mem->bo;
1335 	if (!bo) {
1336 		pr_err("Invalid BO when mapping memory to GPU\n");
1337 		return -EINVAL;
1338 	}
1339 
1340 	/* Make sure restore is not running concurrently. Since we
1341 	 * don't map invalid userptr BOs, we rely on the next restore
1342 	 * worker to do the mapping
1343 	 */
1344 	mutex_lock(&mem->process_info->lock);
1345 
1346 	/* Lock mmap-sem. If we find an invalid userptr BO, we can be
1347 	 * sure that the MMU notifier is no longer running
1348 	 * concurrently and the queues are actually stopped
1349 	 */
1350 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1351 		down_write(&current->mm->mmap_sem);
1352 		is_invalid_userptr = atomic_read(&mem->invalid);
1353 		up_write(&current->mm->mmap_sem);
1354 	}
1355 
1356 	mutex_lock(&mem->lock);
1357 
1358 	domain = mem->domain;
1359 	bo_size = bo->tbo.mem.size;
1360 
1361 	pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1362 			mem->va,
1363 			mem->va + bo_size * (1 + mem->aql_queue),
1364 			vm, domain_string(domain));
1365 
1366 	ret = reserve_bo_and_vm(mem, vm, &ctx);
1367 	if (unlikely(ret))
1368 		goto out;
1369 
1370 	/* Userptr can be marked as "not invalid", but not actually be
1371 	 * validated yet (still in the system domain). In that case
1372 	 * the queues are still stopped and we can leave mapping for
1373 	 * the next restore worker
1374 	 */
1375 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1376 	    bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1377 		is_invalid_userptr = true;
1378 
1379 	if (check_if_add_bo_to_vm(avm, mem)) {
1380 		ret = add_bo_to_vm(adev, mem, avm, false,
1381 				&bo_va_entry);
1382 		if (ret)
1383 			goto add_bo_to_vm_failed;
1384 		if (mem->aql_queue) {
1385 			ret = add_bo_to_vm(adev, mem, avm,
1386 					true, &bo_va_entry_aql);
1387 			if (ret)
1388 				goto add_bo_to_vm_failed_aql;
1389 		}
1390 	} else {
1391 		ret = vm_validate_pt_pd_bos(avm);
1392 		if (unlikely(ret))
1393 			goto add_bo_to_vm_failed;
1394 	}
1395 
1396 	if (mem->mapped_to_gpu_memory == 0 &&
1397 	    !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1398 		/* Validate BO only once. The eviction fence gets added to BO
1399 		 * the first time it is mapped. Validate will wait for all
1400 		 * background evictions to complete.
1401 		 */
1402 		ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1403 		if (ret) {
1404 			pr_debug("Validate failed\n");
1405 			goto map_bo_to_gpuvm_failed;
1406 		}
1407 	}
1408 
1409 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1410 		if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1411 			pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1412 					entry->va, entry->va + bo_size,
1413 					entry);
1414 
1415 			ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1416 					      is_invalid_userptr);
1417 			if (ret) {
1418 				pr_err("Failed to map bo to gpuvm\n");
1419 				goto map_bo_to_gpuvm_failed;
1420 			}
1421 
1422 			ret = vm_update_pds(vm, ctx.sync);
1423 			if (ret) {
1424 				pr_err("Failed to update page directories\n");
1425 				goto map_bo_to_gpuvm_failed;
1426 			}
1427 
1428 			entry->is_mapped = true;
1429 			mem->mapped_to_gpu_memory++;
1430 			pr_debug("\t INC mapping count %d\n",
1431 					mem->mapped_to_gpu_memory);
1432 		}
1433 	}
1434 
1435 	if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1436 		amdgpu_bo_fence(bo,
1437 				&avm->process_info->eviction_fence->base,
1438 				true);
1439 	ret = unreserve_bo_and_vms(&ctx, false, false);
1440 
1441 	goto out;
1442 
1443 map_bo_to_gpuvm_failed:
1444 	if (bo_va_entry_aql)
1445 		remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1446 add_bo_to_vm_failed_aql:
1447 	if (bo_va_entry)
1448 		remove_bo_from_vm(adev, bo_va_entry, bo_size);
1449 add_bo_to_vm_failed:
1450 	unreserve_bo_and_vms(&ctx, false, false);
1451 out:
1452 	mutex_unlock(&mem->process_info->lock);
1453 	mutex_unlock(&mem->lock);
1454 	return ret;
1455 }
1456 
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(struct kgd_dev * kgd,struct kgd_mem * mem,void * vm)1457 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1458 		struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1459 {
1460 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1461 	struct amdkfd_process_info *process_info =
1462 		((struct amdgpu_vm *)vm)->process_info;
1463 	unsigned long bo_size = mem->bo->tbo.mem.size;
1464 	struct kfd_bo_va_list *entry;
1465 	struct bo_vm_reservation_context ctx;
1466 	int ret;
1467 
1468 	mutex_lock(&mem->lock);
1469 
1470 	ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1471 	if (unlikely(ret))
1472 		goto out;
1473 	/* If no VMs were reserved, it means the BO wasn't actually mapped */
1474 	if (ctx.n_vms == 0) {
1475 		ret = -EINVAL;
1476 		goto unreserve_out;
1477 	}
1478 
1479 	ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1480 	if (unlikely(ret))
1481 		goto unreserve_out;
1482 
1483 	pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1484 		mem->va,
1485 		mem->va + bo_size * (1 + mem->aql_queue),
1486 		vm);
1487 
1488 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1489 		if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1490 			pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1491 					entry->va,
1492 					entry->va + bo_size,
1493 					entry);
1494 
1495 			ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1496 			if (ret == 0) {
1497 				entry->is_mapped = false;
1498 			} else {
1499 				pr_err("failed to unmap VA 0x%llx\n",
1500 						mem->va);
1501 				goto unreserve_out;
1502 			}
1503 
1504 			mem->mapped_to_gpu_memory--;
1505 			pr_debug("\t DEC mapping count %d\n",
1506 					mem->mapped_to_gpu_memory);
1507 		}
1508 	}
1509 
1510 	/* If BO is unmapped from all VMs, unfence it. It can be evicted if
1511 	 * required.
1512 	 */
1513 	if (mem->mapped_to_gpu_memory == 0 &&
1514 	    !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1515 		amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1516 						process_info->eviction_fence);
1517 
1518 unreserve_out:
1519 	unreserve_bo_and_vms(&ctx, false, false);
1520 out:
1521 	mutex_unlock(&mem->lock);
1522 	return ret;
1523 }
1524 
amdgpu_amdkfd_gpuvm_sync_memory(struct kgd_dev * kgd,struct kgd_mem * mem,bool intr)1525 int amdgpu_amdkfd_gpuvm_sync_memory(
1526 		struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1527 {
1528 	struct amdgpu_sync sync;
1529 	int ret;
1530 
1531 	amdgpu_sync_create(&sync);
1532 
1533 	mutex_lock(&mem->lock);
1534 	amdgpu_sync_clone(&mem->sync, &sync);
1535 	mutex_unlock(&mem->lock);
1536 
1537 	ret = amdgpu_sync_wait(&sync, intr);
1538 	amdgpu_sync_free(&sync);
1539 	return ret;
1540 }
1541 
amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev * kgd,struct kgd_mem * mem,void ** kptr,uint64_t * size)1542 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1543 		struct kgd_mem *mem, void **kptr, uint64_t *size)
1544 {
1545 	int ret;
1546 	struct amdgpu_bo *bo = mem->bo;
1547 
1548 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1549 		pr_err("userptr can't be mapped to kernel\n");
1550 		return -EINVAL;
1551 	}
1552 
1553 	/* delete kgd_mem from kfd_bo_list to avoid re-validating
1554 	 * this BO in BO's restoring after eviction.
1555 	 */
1556 	mutex_lock(&mem->process_info->lock);
1557 
1558 	ret = amdgpu_bo_reserve(bo, true);
1559 	if (ret) {
1560 		pr_err("Failed to reserve bo. ret %d\n", ret);
1561 		goto bo_reserve_failed;
1562 	}
1563 
1564 	ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1565 	if (ret) {
1566 		pr_err("Failed to pin bo. ret %d\n", ret);
1567 		goto pin_failed;
1568 	}
1569 
1570 	ret = amdgpu_bo_kmap(bo, kptr);
1571 	if (ret) {
1572 		pr_err("Failed to map bo to kernel. ret %d\n", ret);
1573 		goto kmap_failed;
1574 	}
1575 
1576 	amdgpu_amdkfd_remove_eviction_fence(
1577 		bo, mem->process_info->eviction_fence);
1578 	list_del_init(&mem->validate_list.head);
1579 
1580 	if (size)
1581 		*size = amdgpu_bo_size(bo);
1582 
1583 	amdgpu_bo_unreserve(bo);
1584 
1585 	mutex_unlock(&mem->process_info->lock);
1586 	return 0;
1587 
1588 kmap_failed:
1589 	amdgpu_bo_unpin(bo);
1590 pin_failed:
1591 	amdgpu_bo_unreserve(bo);
1592 bo_reserve_failed:
1593 	mutex_unlock(&mem->process_info->lock);
1594 
1595 	return ret;
1596 }
1597 
amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev * kgd,struct kfd_vm_fault_info * mem)1598 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1599 					      struct kfd_vm_fault_info *mem)
1600 {
1601 	struct amdgpu_device *adev;
1602 
1603 	adev = (struct amdgpu_device *)kgd;
1604 	if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1605 		*mem = *adev->gmc.vm_fault_info;
1606 		mb();
1607 		atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1608 	}
1609 	return 0;
1610 }
1611 
amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev * kgd,struct dma_buf * dma_buf,uint64_t va,void * vm,struct kgd_mem ** mem,uint64_t * size,uint64_t * mmap_offset)1612 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1613 				      struct dma_buf *dma_buf,
1614 				      uint64_t va, void *vm,
1615 				      struct kgd_mem **mem, uint64_t *size,
1616 				      uint64_t *mmap_offset)
1617 {
1618 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1619 	struct drm_gem_object *obj;
1620 	struct amdgpu_bo *bo;
1621 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1622 
1623 	if (dma_buf->ops != &amdgpu_dmabuf_ops)
1624 		/* Can't handle non-graphics buffers */
1625 		return -EINVAL;
1626 
1627 	obj = dma_buf->priv;
1628 	if (obj->dev->dev_private != adev)
1629 		/* Can't handle buffers from other devices */
1630 		return -EINVAL;
1631 
1632 	bo = gem_to_amdgpu_bo(obj);
1633 	if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1634 				    AMDGPU_GEM_DOMAIN_GTT)))
1635 		/* Only VRAM and GTT BOs are supported */
1636 		return -EINVAL;
1637 
1638 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1639 	if (!*mem)
1640 		return -ENOMEM;
1641 
1642 	if (size)
1643 		*size = amdgpu_bo_size(bo);
1644 
1645 	if (mmap_offset)
1646 		*mmap_offset = amdgpu_bo_mmap_offset(bo);
1647 
1648 	INIT_LIST_HEAD(&(*mem)->bo_va_list);
1649 	mutex_init(&(*mem)->lock);
1650 	(*mem)->alloc_flags =
1651 		((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1652 		 ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) |
1653 		ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE;
1654 
1655 	(*mem)->bo = amdgpu_bo_ref(bo);
1656 	(*mem)->va = va;
1657 	(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1658 		AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1659 	(*mem)->mapped_to_gpu_memory = 0;
1660 	(*mem)->process_info = avm->process_info;
1661 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1662 	amdgpu_sync_create(&(*mem)->sync);
1663 
1664 	return 0;
1665 }
1666 
1667 /* Evict a userptr BO by stopping the queues if necessary
1668  *
1669  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1670  * cannot do any memory allocations, and cannot take any locks that
1671  * are held elsewhere while allocating memory. Therefore this is as
1672  * simple as possible, using atomic counters.
1673  *
1674  * It doesn't do anything to the BO itself. The real work happens in
1675  * restore, where we get updated page addresses. This function only
1676  * ensures that GPU access to the BO is stopped.
1677  */
amdgpu_amdkfd_evict_userptr(struct kgd_mem * mem,struct mm_struct * mm)1678 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1679 				struct mm_struct *mm)
1680 {
1681 	struct amdkfd_process_info *process_info = mem->process_info;
1682 	int evicted_bos;
1683 	int r = 0;
1684 
1685 	atomic_inc(&mem->invalid);
1686 	evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1687 	if (evicted_bos == 1) {
1688 		/* First eviction, stop the queues */
1689 		r = kgd2kfd_quiesce_mm(mm);
1690 		if (r)
1691 			pr_err("Failed to quiesce KFD\n");
1692 		schedule_delayed_work(&process_info->restore_userptr_work,
1693 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1694 	}
1695 
1696 	return r;
1697 }
1698 
1699 /* Update invalid userptr BOs
1700  *
1701  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1702  * userptr_inval_list and updates user pages for all BOs that have
1703  * been invalidated since their last update.
1704  */
update_invalid_user_pages(struct amdkfd_process_info * process_info,struct mm_struct * mm)1705 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1706 				     struct mm_struct *mm)
1707 {
1708 	struct kgd_mem *mem, *tmp_mem;
1709 	struct amdgpu_bo *bo;
1710 	struct ttm_operation_ctx ctx = { false, false };
1711 	int invalid, ret;
1712 
1713 	/* Move all invalidated BOs to the userptr_inval_list and
1714 	 * release their user pages by migration to the CPU domain
1715 	 */
1716 	list_for_each_entry_safe(mem, tmp_mem,
1717 				 &process_info->userptr_valid_list,
1718 				 validate_list.head) {
1719 		if (!atomic_read(&mem->invalid))
1720 			continue; /* BO is still valid */
1721 
1722 		bo = mem->bo;
1723 
1724 		if (amdgpu_bo_reserve(bo, true))
1725 			return -EAGAIN;
1726 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1727 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1728 		amdgpu_bo_unreserve(bo);
1729 		if (ret) {
1730 			pr_err("%s: Failed to invalidate userptr BO\n",
1731 			       __func__);
1732 			return -EAGAIN;
1733 		}
1734 
1735 		list_move_tail(&mem->validate_list.head,
1736 			       &process_info->userptr_inval_list);
1737 	}
1738 
1739 	if (list_empty(&process_info->userptr_inval_list))
1740 		return 0; /* All evicted userptr BOs were freed */
1741 
1742 	/* Go through userptr_inval_list and update any invalid user_pages */
1743 	list_for_each_entry(mem, &process_info->userptr_inval_list,
1744 			    validate_list.head) {
1745 		invalid = atomic_read(&mem->invalid);
1746 		if (!invalid)
1747 			/* BO hasn't been invalidated since the last
1748 			 * revalidation attempt. Keep its BO list.
1749 			 */
1750 			continue;
1751 
1752 		bo = mem->bo;
1753 
1754 		/* Get updated user pages */
1755 		ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1756 		if (ret) {
1757 			pr_debug("%s: Failed to get user pages: %d\n",
1758 				__func__, ret);
1759 
1760 			/* Return error -EBUSY or -ENOMEM, retry restore */
1761 			return ret;
1762 		}
1763 
1764 		/*
1765 		 * FIXME: Cannot ignore the return code, must hold
1766 		 * notifier_lock
1767 		 */
1768 		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1769 
1770 		/* Mark the BO as valid unless it was invalidated
1771 		 * again concurrently.
1772 		 */
1773 		if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1774 			return -EAGAIN;
1775 	}
1776 
1777 	return 0;
1778 }
1779 
1780 /* Validate invalid userptr BOs
1781  *
1782  * Validates BOs on the userptr_inval_list, and moves them back to the
1783  * userptr_valid_list. Also updates GPUVM page tables with new page
1784  * addresses and waits for the page table updates to complete.
1785  */
validate_invalid_user_pages(struct amdkfd_process_info * process_info)1786 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1787 {
1788 	struct amdgpu_bo_list_entry *pd_bo_list_entries;
1789 	struct list_head resv_list, duplicates;
1790 	struct ww_acquire_ctx ticket;
1791 	struct amdgpu_sync sync;
1792 
1793 	struct amdgpu_vm *peer_vm;
1794 	struct kgd_mem *mem, *tmp_mem;
1795 	struct amdgpu_bo *bo;
1796 	struct ttm_operation_ctx ctx = { false, false };
1797 	int i, ret;
1798 
1799 	pd_bo_list_entries = kcalloc(process_info->n_vms,
1800 				     sizeof(struct amdgpu_bo_list_entry),
1801 				     GFP_KERNEL);
1802 	if (!pd_bo_list_entries) {
1803 		pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1804 		ret = -ENOMEM;
1805 		goto out_no_mem;
1806 	}
1807 
1808 	INIT_LIST_HEAD(&resv_list);
1809 	INIT_LIST_HEAD(&duplicates);
1810 
1811 	/* Get all the page directory BOs that need to be reserved */
1812 	i = 0;
1813 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1814 			    vm_list_node)
1815 		amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1816 				    &pd_bo_list_entries[i++]);
1817 	/* Add the userptr_inval_list entries to resv_list */
1818 	list_for_each_entry(mem, &process_info->userptr_inval_list,
1819 			    validate_list.head) {
1820 		list_add_tail(&mem->resv_list.head, &resv_list);
1821 		mem->resv_list.bo = mem->validate_list.bo;
1822 		mem->resv_list.num_shared = mem->validate_list.num_shared;
1823 	}
1824 
1825 	/* Reserve all BOs and page tables for validation */
1826 	ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1827 	WARN(!list_empty(&duplicates), "Duplicates should be empty");
1828 	if (ret)
1829 		goto out_free;
1830 
1831 	amdgpu_sync_create(&sync);
1832 
1833 	ret = process_validate_vms(process_info);
1834 	if (ret)
1835 		goto unreserve_out;
1836 
1837 	/* Validate BOs and update GPUVM page tables */
1838 	list_for_each_entry_safe(mem, tmp_mem,
1839 				 &process_info->userptr_inval_list,
1840 				 validate_list.head) {
1841 		struct kfd_bo_va_list *bo_va_entry;
1842 
1843 		bo = mem->bo;
1844 
1845 		/* Validate the BO if we got user pages */
1846 		if (bo->tbo.ttm->pages[0]) {
1847 			amdgpu_bo_placement_from_domain(bo, mem->domain);
1848 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1849 			if (ret) {
1850 				pr_err("%s: failed to validate BO\n", __func__);
1851 				goto unreserve_out;
1852 			}
1853 		}
1854 
1855 		list_move_tail(&mem->validate_list.head,
1856 			       &process_info->userptr_valid_list);
1857 
1858 		/* Update mapping. If the BO was not validated
1859 		 * (because we couldn't get user pages), this will
1860 		 * clear the page table entries, which will result in
1861 		 * VM faults if the GPU tries to access the invalid
1862 		 * memory.
1863 		 */
1864 		list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1865 			if (!bo_va_entry->is_mapped)
1866 				continue;
1867 
1868 			ret = update_gpuvm_pte((struct amdgpu_device *)
1869 					       bo_va_entry->kgd_dev,
1870 					       bo_va_entry, &sync);
1871 			if (ret) {
1872 				pr_err("%s: update PTE failed\n", __func__);
1873 				/* make sure this gets validated again */
1874 				atomic_inc(&mem->invalid);
1875 				goto unreserve_out;
1876 			}
1877 		}
1878 	}
1879 
1880 	/* Update page directories */
1881 	ret = process_update_pds(process_info, &sync);
1882 
1883 unreserve_out:
1884 	ttm_eu_backoff_reservation(&ticket, &resv_list);
1885 	amdgpu_sync_wait(&sync, false);
1886 	amdgpu_sync_free(&sync);
1887 out_free:
1888 	kfree(pd_bo_list_entries);
1889 out_no_mem:
1890 
1891 	return ret;
1892 }
1893 
1894 /* Worker callback to restore evicted userptr BOs
1895  *
1896  * Tries to update and validate all userptr BOs. If successful and no
1897  * concurrent evictions happened, the queues are restarted. Otherwise,
1898  * reschedule for another attempt later.
1899  */
amdgpu_amdkfd_restore_userptr_worker(struct work_struct * work)1900 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1901 {
1902 	struct delayed_work *dwork = to_delayed_work(work);
1903 	struct amdkfd_process_info *process_info =
1904 		container_of(dwork, struct amdkfd_process_info,
1905 			     restore_userptr_work);
1906 	struct task_struct *usertask;
1907 	struct mm_struct *mm;
1908 	int evicted_bos;
1909 
1910 	evicted_bos = atomic_read(&process_info->evicted_bos);
1911 	if (!evicted_bos)
1912 		return;
1913 
1914 	/* Reference task and mm in case of concurrent process termination */
1915 	usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1916 	if (!usertask)
1917 		return;
1918 	mm = get_task_mm(usertask);
1919 	if (!mm) {
1920 		put_task_struct(usertask);
1921 		return;
1922 	}
1923 
1924 	mutex_lock(&process_info->lock);
1925 
1926 	if (update_invalid_user_pages(process_info, mm))
1927 		goto unlock_out;
1928 	/* userptr_inval_list can be empty if all evicted userptr BOs
1929 	 * have been freed. In that case there is nothing to validate
1930 	 * and we can just restart the queues.
1931 	 */
1932 	if (!list_empty(&process_info->userptr_inval_list)) {
1933 		if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1934 			goto unlock_out; /* Concurrent eviction, try again */
1935 
1936 		if (validate_invalid_user_pages(process_info))
1937 			goto unlock_out;
1938 	}
1939 	/* Final check for concurrent evicton and atomic update. If
1940 	 * another eviction happens after successful update, it will
1941 	 * be a first eviction that calls quiesce_mm. The eviction
1942 	 * reference counting inside KFD will handle this case.
1943 	 */
1944 	if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1945 	    evicted_bos)
1946 		goto unlock_out;
1947 	evicted_bos = 0;
1948 	if (kgd2kfd_resume_mm(mm)) {
1949 		pr_err("%s: Failed to resume KFD\n", __func__);
1950 		/* No recovery from this failure. Probably the CP is
1951 		 * hanging. No point trying again.
1952 		 */
1953 	}
1954 
1955 unlock_out:
1956 	mutex_unlock(&process_info->lock);
1957 	mmput(mm);
1958 	put_task_struct(usertask);
1959 
1960 	/* If validation failed, reschedule another attempt */
1961 	if (evicted_bos)
1962 		schedule_delayed_work(&process_info->restore_userptr_work,
1963 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1964 }
1965 
1966 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1967  *   KFD process identified by process_info
1968  *
1969  * @process_info: amdkfd_process_info of the KFD process
1970  *
1971  * After memory eviction, restore thread calls this function. The function
1972  * should be called when the Process is still valid. BO restore involves -
1973  *
1974  * 1.  Release old eviction fence and create new one
1975  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
1976  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
1977  *     BOs that need to be reserved.
1978  * 4.  Reserve all the BOs
1979  * 5.  Validate of PD and PT BOs.
1980  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
1981  * 7.  Add fence to all PD and PT BOs.
1982  * 8.  Unreserve all BOs
1983  */
amdgpu_amdkfd_gpuvm_restore_process_bos(void * info,struct dma_fence ** ef)1984 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
1985 {
1986 	struct amdgpu_bo_list_entry *pd_bo_list;
1987 	struct amdkfd_process_info *process_info = info;
1988 	struct amdgpu_vm *peer_vm;
1989 	struct kgd_mem *mem;
1990 	struct bo_vm_reservation_context ctx;
1991 	struct amdgpu_amdkfd_fence *new_fence;
1992 	int ret = 0, i;
1993 	struct list_head duplicate_save;
1994 	struct amdgpu_sync sync_obj;
1995 
1996 	INIT_LIST_HEAD(&duplicate_save);
1997 	INIT_LIST_HEAD(&ctx.list);
1998 	INIT_LIST_HEAD(&ctx.duplicates);
1999 
2000 	pd_bo_list = kcalloc(process_info->n_vms,
2001 			     sizeof(struct amdgpu_bo_list_entry),
2002 			     GFP_KERNEL);
2003 	if (!pd_bo_list)
2004 		return -ENOMEM;
2005 
2006 	i = 0;
2007 	mutex_lock(&process_info->lock);
2008 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2009 			vm_list_node)
2010 		amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2011 
2012 	/* Reserve all BOs and page tables/directory. Add all BOs from
2013 	 * kfd_bo_list to ctx.list
2014 	 */
2015 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2016 			    validate_list.head) {
2017 
2018 		list_add_tail(&mem->resv_list.head, &ctx.list);
2019 		mem->resv_list.bo = mem->validate_list.bo;
2020 		mem->resv_list.num_shared = mem->validate_list.num_shared;
2021 	}
2022 
2023 	ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2024 				     false, &duplicate_save);
2025 	if (ret) {
2026 		pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2027 		goto ttm_reserve_fail;
2028 	}
2029 
2030 	amdgpu_sync_create(&sync_obj);
2031 
2032 	/* Validate PDs and PTs */
2033 	ret = process_validate_vms(process_info);
2034 	if (ret)
2035 		goto validate_map_fail;
2036 
2037 	ret = process_sync_pds_resv(process_info, &sync_obj);
2038 	if (ret) {
2039 		pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2040 		goto validate_map_fail;
2041 	}
2042 
2043 	/* Validate BOs and map them to GPUVM (update VM page tables). */
2044 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2045 			    validate_list.head) {
2046 
2047 		struct amdgpu_bo *bo = mem->bo;
2048 		uint32_t domain = mem->domain;
2049 		struct kfd_bo_va_list *bo_va_entry;
2050 
2051 		ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2052 		if (ret) {
2053 			pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2054 			goto validate_map_fail;
2055 		}
2056 		ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving, false);
2057 		if (ret) {
2058 			pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2059 			goto validate_map_fail;
2060 		}
2061 		list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2062 				    bo_list) {
2063 			ret = update_gpuvm_pte((struct amdgpu_device *)
2064 					      bo_va_entry->kgd_dev,
2065 					      bo_va_entry,
2066 					      &sync_obj);
2067 			if (ret) {
2068 				pr_debug("Memory eviction: update PTE failed. Try again\n");
2069 				goto validate_map_fail;
2070 			}
2071 		}
2072 	}
2073 
2074 	/* Update page directories */
2075 	ret = process_update_pds(process_info, &sync_obj);
2076 	if (ret) {
2077 		pr_debug("Memory eviction: update PDs failed. Try again\n");
2078 		goto validate_map_fail;
2079 	}
2080 
2081 	/* Wait for validate and PT updates to finish */
2082 	amdgpu_sync_wait(&sync_obj, false);
2083 
2084 	/* Release old eviction fence and create new one, because fence only
2085 	 * goes from unsignaled to signaled, fence cannot be reused.
2086 	 * Use context and mm from the old fence.
2087 	 */
2088 	new_fence = amdgpu_amdkfd_fence_create(
2089 				process_info->eviction_fence->base.context,
2090 				process_info->eviction_fence->mm);
2091 	if (!new_fence) {
2092 		pr_err("Failed to create eviction fence\n");
2093 		ret = -ENOMEM;
2094 		goto validate_map_fail;
2095 	}
2096 	dma_fence_put(&process_info->eviction_fence->base);
2097 	process_info->eviction_fence = new_fence;
2098 	*ef = dma_fence_get(&new_fence->base);
2099 
2100 	/* Attach new eviction fence to all BOs */
2101 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2102 		validate_list.head)
2103 		amdgpu_bo_fence(mem->bo,
2104 			&process_info->eviction_fence->base, true);
2105 
2106 	/* Attach eviction fence to PD / PT BOs */
2107 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2108 			    vm_list_node) {
2109 		struct amdgpu_bo *bo = peer_vm->root.base.bo;
2110 
2111 		amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2112 	}
2113 
2114 validate_map_fail:
2115 	ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2116 	amdgpu_sync_free(&sync_obj);
2117 ttm_reserve_fail:
2118 	mutex_unlock(&process_info->lock);
2119 	kfree(pd_bo_list);
2120 	return ret;
2121 }
2122 
amdgpu_amdkfd_add_gws_to_process(void * info,void * gws,struct kgd_mem ** mem)2123 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2124 {
2125 	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2126 	struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2127 	int ret;
2128 
2129 	if (!info || !gws)
2130 		return -EINVAL;
2131 
2132 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2133 	if (!*mem)
2134 		return -ENOMEM;
2135 
2136 	mutex_init(&(*mem)->lock);
2137 	INIT_LIST_HEAD(&(*mem)->bo_va_list);
2138 	(*mem)->bo = amdgpu_bo_ref(gws_bo);
2139 	(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2140 	(*mem)->process_info = process_info;
2141 	add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2142 	amdgpu_sync_create(&(*mem)->sync);
2143 
2144 
2145 	/* Validate gws bo the first time it is added to process */
2146 	mutex_lock(&(*mem)->process_info->lock);
2147 	ret = amdgpu_bo_reserve(gws_bo, false);
2148 	if (unlikely(ret)) {
2149 		pr_err("Reserve gws bo failed %d\n", ret);
2150 		goto bo_reservation_failure;
2151 	}
2152 
2153 	ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2154 	if (ret) {
2155 		pr_err("GWS BO validate failed %d\n", ret);
2156 		goto bo_validation_failure;
2157 	}
2158 	/* GWS resource is shared b/t amdgpu and amdkfd
2159 	 * Add process eviction fence to bo so they can
2160 	 * evict each other.
2161 	 */
2162 	ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2163 	if (ret)
2164 		goto reserve_shared_fail;
2165 	amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2166 	amdgpu_bo_unreserve(gws_bo);
2167 	mutex_unlock(&(*mem)->process_info->lock);
2168 
2169 	return ret;
2170 
2171 reserve_shared_fail:
2172 bo_validation_failure:
2173 	amdgpu_bo_unreserve(gws_bo);
2174 bo_reservation_failure:
2175 	mutex_unlock(&(*mem)->process_info->lock);
2176 	amdgpu_sync_free(&(*mem)->sync);
2177 	remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2178 	amdgpu_bo_unref(&gws_bo);
2179 	mutex_destroy(&(*mem)->lock);
2180 	kfree(*mem);
2181 	*mem = NULL;
2182 	return ret;
2183 }
2184 
amdgpu_amdkfd_remove_gws_from_process(void * info,void * mem)2185 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2186 {
2187 	int ret;
2188 	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2189 	struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2190 	struct amdgpu_bo *gws_bo = kgd_mem->bo;
2191 
2192 	/* Remove BO from process's validate list so restore worker won't touch
2193 	 * it anymore
2194 	 */
2195 	remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2196 
2197 	ret = amdgpu_bo_reserve(gws_bo, false);
2198 	if (unlikely(ret)) {
2199 		pr_err("Reserve gws bo failed %d\n", ret);
2200 		//TODO add BO back to validate_list?
2201 		return ret;
2202 	}
2203 	amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2204 			process_info->eviction_fence);
2205 	amdgpu_bo_unreserve(gws_bo);
2206 	amdgpu_sync_free(&kgd_mem->sync);
2207 	amdgpu_bo_unref(&gws_bo);
2208 	mutex_destroy(&kgd_mem->lock);
2209 	kfree(mem);
2210 	return 0;
2211 }
2212