xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c (revision 2da68a77)
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <linux/dma-buf.h>
35 
36 #include <drm/drm_drv.h>
37 #include <drm/amdgpu_drm.h>
38 #include <drm/drm_cache.h>
39 #include "amdgpu.h"
40 #include "amdgpu_trace.h"
41 #include "amdgpu_amdkfd.h"
42 
43 /**
44  * DOC: amdgpu_object
45  *
46  * This defines the interfaces to operate on an &amdgpu_bo buffer object which
47  * represents memory used by driver (VRAM, system memory, etc.). The driver
48  * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
49  * to create/destroy/set buffer object which are then managed by the kernel TTM
50  * memory manager.
51  * The interfaces are also used internally by kernel clients, including gfx,
52  * uvd, etc. for kernel managed allocations used by the GPU.
53  *
54  */
55 
56 static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
57 {
58 	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
59 
60 	amdgpu_bo_kunmap(bo);
61 
62 	if (bo->tbo.base.import_attach)
63 		drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
64 	drm_gem_object_release(&bo->tbo.base);
65 	amdgpu_bo_unref(&bo->parent);
66 	kvfree(bo);
67 }
68 
69 static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
70 {
71 	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
72 	struct amdgpu_bo_user *ubo;
73 
74 	ubo = to_amdgpu_bo_user(bo);
75 	kfree(ubo->metadata);
76 	amdgpu_bo_destroy(tbo);
77 }
78 
79 static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
80 {
81 	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
82 	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
83 	struct amdgpu_bo_vm *vmbo;
84 
85 	vmbo = to_amdgpu_bo_vm(bo);
86 	/* in case amdgpu_device_recover_vram got NULL of bo->parent */
87 	if (!list_empty(&vmbo->shadow_list)) {
88 		mutex_lock(&adev->shadow_list_lock);
89 		list_del_init(&vmbo->shadow_list);
90 		mutex_unlock(&adev->shadow_list_lock);
91 	}
92 
93 	amdgpu_bo_destroy(tbo);
94 }
95 
96 /**
97  * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
98  * @bo: buffer object to be checked
99  *
100  * Uses destroy function associated with the object to determine if this is
101  * an &amdgpu_bo.
102  *
103  * Returns:
104  * true if the object belongs to &amdgpu_bo, false if not.
105  */
106 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
107 {
108 	if (bo->destroy == &amdgpu_bo_destroy ||
109 	    bo->destroy == &amdgpu_bo_user_destroy ||
110 	    bo->destroy == &amdgpu_bo_vm_destroy)
111 		return true;
112 
113 	return false;
114 }
115 
116 /**
117  * amdgpu_bo_placement_from_domain - set buffer's placement
118  * @abo: &amdgpu_bo buffer object whose placement is to be set
119  * @domain: requested domain
120  *
121  * Sets buffer's placement according to requested domain and the buffer's
122  * flags.
123  */
124 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
125 {
126 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
127 	struct ttm_placement *placement = &abo->placement;
128 	struct ttm_place *places = abo->placements;
129 	u64 flags = abo->flags;
130 	u32 c = 0;
131 
132 	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
133 		unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
134 
135 		places[c].fpfn = 0;
136 		places[c].lpfn = 0;
137 		places[c].mem_type = TTM_PL_VRAM;
138 		places[c].flags = 0;
139 
140 		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
141 			places[c].lpfn = visible_pfn;
142 		else
143 			places[c].flags |= TTM_PL_FLAG_TOPDOWN;
144 
145 		if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
146 			places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
147 		c++;
148 	}
149 
150 	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
151 		places[c].fpfn = 0;
152 		places[c].lpfn = 0;
153 		places[c].mem_type =
154 			abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
155 			AMDGPU_PL_PREEMPT : TTM_PL_TT;
156 		places[c].flags = 0;
157 		c++;
158 	}
159 
160 	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
161 		places[c].fpfn = 0;
162 		places[c].lpfn = 0;
163 		places[c].mem_type = TTM_PL_SYSTEM;
164 		places[c].flags = 0;
165 		c++;
166 	}
167 
168 	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
169 		places[c].fpfn = 0;
170 		places[c].lpfn = 0;
171 		places[c].mem_type = AMDGPU_PL_GDS;
172 		places[c].flags = 0;
173 		c++;
174 	}
175 
176 	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
177 		places[c].fpfn = 0;
178 		places[c].lpfn = 0;
179 		places[c].mem_type = AMDGPU_PL_GWS;
180 		places[c].flags = 0;
181 		c++;
182 	}
183 
184 	if (domain & AMDGPU_GEM_DOMAIN_OA) {
185 		places[c].fpfn = 0;
186 		places[c].lpfn = 0;
187 		places[c].mem_type = AMDGPU_PL_OA;
188 		places[c].flags = 0;
189 		c++;
190 	}
191 
192 	if (!c) {
193 		places[c].fpfn = 0;
194 		places[c].lpfn = 0;
195 		places[c].mem_type = TTM_PL_SYSTEM;
196 		places[c].flags = 0;
197 		c++;
198 	}
199 
200 	BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
201 
202 	placement->num_placement = c;
203 	placement->placement = places;
204 
205 	placement->num_busy_placement = c;
206 	placement->busy_placement = places;
207 }
208 
209 /**
210  * amdgpu_bo_create_reserved - create reserved BO for kernel use
211  *
212  * @adev: amdgpu device object
213  * @size: size for the new BO
214  * @align: alignment for the new BO
215  * @domain: where to place it
216  * @bo_ptr: used to initialize BOs in structures
217  * @gpu_addr: GPU addr of the pinned BO
218  * @cpu_addr: optional CPU address mapping
219  *
220  * Allocates and pins a BO for kernel internal use, and returns it still
221  * reserved.
222  *
223  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
224  *
225  * Returns:
226  * 0 on success, negative error code otherwise.
227  */
228 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
229 			      unsigned long size, int align,
230 			      u32 domain, struct amdgpu_bo **bo_ptr,
231 			      u64 *gpu_addr, void **cpu_addr)
232 {
233 	struct amdgpu_bo_param bp;
234 	bool free = false;
235 	int r;
236 
237 	if (!size) {
238 		amdgpu_bo_unref(bo_ptr);
239 		return 0;
240 	}
241 
242 	memset(&bp, 0, sizeof(bp));
243 	bp.size = size;
244 	bp.byte_align = align;
245 	bp.domain = domain;
246 	bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
247 		: AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
248 	bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
249 	bp.type = ttm_bo_type_kernel;
250 	bp.resv = NULL;
251 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
252 
253 	if (!*bo_ptr) {
254 		r = amdgpu_bo_create(adev, &bp, bo_ptr);
255 		if (r) {
256 			dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
257 				r);
258 			return r;
259 		}
260 		free = true;
261 	}
262 
263 	r = amdgpu_bo_reserve(*bo_ptr, false);
264 	if (r) {
265 		dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
266 		goto error_free;
267 	}
268 
269 	r = amdgpu_bo_pin(*bo_ptr, domain);
270 	if (r) {
271 		dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
272 		goto error_unreserve;
273 	}
274 
275 	r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
276 	if (r) {
277 		dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
278 		goto error_unpin;
279 	}
280 
281 	if (gpu_addr)
282 		*gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
283 
284 	if (cpu_addr) {
285 		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
286 		if (r) {
287 			dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
288 			goto error_unpin;
289 		}
290 	}
291 
292 	return 0;
293 
294 error_unpin:
295 	amdgpu_bo_unpin(*bo_ptr);
296 error_unreserve:
297 	amdgpu_bo_unreserve(*bo_ptr);
298 
299 error_free:
300 	if (free)
301 		amdgpu_bo_unref(bo_ptr);
302 
303 	return r;
304 }
305 
306 /**
307  * amdgpu_bo_create_kernel - create BO for kernel use
308  *
309  * @adev: amdgpu device object
310  * @size: size for the new BO
311  * @align: alignment for the new BO
312  * @domain: where to place it
313  * @bo_ptr:  used to initialize BOs in structures
314  * @gpu_addr: GPU addr of the pinned BO
315  * @cpu_addr: optional CPU address mapping
316  *
317  * Allocates and pins a BO for kernel internal use.
318  *
319  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
320  *
321  * Returns:
322  * 0 on success, negative error code otherwise.
323  */
324 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
325 			    unsigned long size, int align,
326 			    u32 domain, struct amdgpu_bo **bo_ptr,
327 			    u64 *gpu_addr, void **cpu_addr)
328 {
329 	int r;
330 
331 	r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
332 				      gpu_addr, cpu_addr);
333 
334 	if (r)
335 		return r;
336 
337 	if (*bo_ptr)
338 		amdgpu_bo_unreserve(*bo_ptr);
339 
340 	return 0;
341 }
342 
343 /**
344  * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
345  *
346  * @adev: amdgpu device object
347  * @offset: offset of the BO
348  * @size: size of the BO
349  * @domain: where to place it
350  * @bo_ptr:  used to initialize BOs in structures
351  * @cpu_addr: optional CPU address mapping
352  *
353  * Creates a kernel BO at a specific offset in the address space of the domain.
354  *
355  * Returns:
356  * 0 on success, negative error code otherwise.
357  */
358 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
359 			       uint64_t offset, uint64_t size, uint32_t domain,
360 			       struct amdgpu_bo **bo_ptr, void **cpu_addr)
361 {
362 	struct ttm_operation_ctx ctx = { false, false };
363 	unsigned int i;
364 	int r;
365 
366 	offset &= PAGE_MASK;
367 	size = ALIGN(size, PAGE_SIZE);
368 
369 	r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
370 				      NULL, cpu_addr);
371 	if (r)
372 		return r;
373 
374 	if ((*bo_ptr) == NULL)
375 		return 0;
376 
377 	/*
378 	 * Remove the original mem node and create a new one at the request
379 	 * position.
380 	 */
381 	if (cpu_addr)
382 		amdgpu_bo_kunmap(*bo_ptr);
383 
384 	ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
385 
386 	for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
387 		(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
388 		(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
389 	}
390 	r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
391 			     &(*bo_ptr)->tbo.resource, &ctx);
392 	if (r)
393 		goto error;
394 
395 	if (cpu_addr) {
396 		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
397 		if (r)
398 			goto error;
399 	}
400 
401 	amdgpu_bo_unreserve(*bo_ptr);
402 	return 0;
403 
404 error:
405 	amdgpu_bo_unreserve(*bo_ptr);
406 	amdgpu_bo_unref(bo_ptr);
407 	return r;
408 }
409 
410 /**
411  * amdgpu_bo_free_kernel - free BO for kernel use
412  *
413  * @bo: amdgpu BO to free
414  * @gpu_addr: pointer to where the BO's GPU memory space address was stored
415  * @cpu_addr: pointer to where the BO's CPU memory space address was stored
416  *
417  * unmaps and unpin a BO for kernel internal use.
418  */
419 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
420 			   void **cpu_addr)
421 {
422 	if (*bo == NULL)
423 		return;
424 
425 	if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
426 		if (cpu_addr)
427 			amdgpu_bo_kunmap(*bo);
428 
429 		amdgpu_bo_unpin(*bo);
430 		amdgpu_bo_unreserve(*bo);
431 	}
432 	amdgpu_bo_unref(bo);
433 
434 	if (gpu_addr)
435 		*gpu_addr = 0;
436 
437 	if (cpu_addr)
438 		*cpu_addr = NULL;
439 }
440 
441 /* Validate bo size is bit bigger then the request domain */
442 static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
443 					  unsigned long size, u32 domain)
444 {
445 	struct ttm_resource_manager *man = NULL;
446 
447 	/*
448 	 * If GTT is part of requested domains the check must succeed to
449 	 * allow fall back to GTT
450 	 */
451 	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
452 		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
453 
454 		if (size < man->size)
455 			return true;
456 		else
457 			goto fail;
458 	}
459 
460 	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
461 		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
462 
463 		if (size < man->size)
464 			return true;
465 		else
466 			goto fail;
467 	}
468 
469 
470 	/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
471 	return true;
472 
473 fail:
474 	DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
475 		  man->size);
476 	return false;
477 }
478 
479 bool amdgpu_bo_support_uswc(u64 bo_flags)
480 {
481 
482 #ifdef CONFIG_X86_32
483 	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
484 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
485 	 */
486 	return false;
487 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
488 	/* Don't try to enable write-combining when it can't work, or things
489 	 * may be slow
490 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
491 	 */
492 
493 #ifndef CONFIG_COMPILE_TEST
494 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
495 	 thanks to write-combining
496 #endif
497 
498 	if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
499 		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
500 			      "better performance thanks to write-combining\n");
501 	return false;
502 #else
503 	/* For architectures that don't support WC memory,
504 	 * mask out the WC flag from the BO
505 	 */
506 	if (!drm_arch_can_wc_memory())
507 		return false;
508 
509 	return true;
510 #endif
511 }
512 
513 /**
514  * amdgpu_bo_create - create an &amdgpu_bo buffer object
515  * @adev: amdgpu device object
516  * @bp: parameters to be used for the buffer object
517  * @bo_ptr: pointer to the buffer object pointer
518  *
519  * Creates an &amdgpu_bo buffer object.
520  *
521  * Returns:
522  * 0 for success or a negative error code on failure.
523  */
524 int amdgpu_bo_create(struct amdgpu_device *adev,
525 			       struct amdgpu_bo_param *bp,
526 			       struct amdgpu_bo **bo_ptr)
527 {
528 	struct ttm_operation_ctx ctx = {
529 		.interruptible = (bp->type != ttm_bo_type_kernel),
530 		.no_wait_gpu = bp->no_wait_gpu,
531 		/* We opt to avoid OOM on system pages allocations */
532 		.gfp_retry_mayfail = true,
533 		.allow_res_evict = bp->type != ttm_bo_type_kernel,
534 		.resv = bp->resv
535 	};
536 	struct amdgpu_bo *bo;
537 	unsigned long page_align, size = bp->size;
538 	int r;
539 
540 	/* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
541 	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
542 		/* GWS and OA don't need any alignment. */
543 		page_align = bp->byte_align;
544 		size <<= PAGE_SHIFT;
545 	} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
546 		/* Both size and alignment must be a multiple of 4. */
547 		page_align = ALIGN(bp->byte_align, 4);
548 		size = ALIGN(size, 4) << PAGE_SHIFT;
549 	} else {
550 		/* Memory should be aligned at least to a page size. */
551 		page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
552 		size = ALIGN(size, PAGE_SIZE);
553 	}
554 
555 	if (!amdgpu_bo_validate_size(adev, size, bp->domain))
556 		return -ENOMEM;
557 
558 	BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
559 
560 	*bo_ptr = NULL;
561 	bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
562 	if (bo == NULL)
563 		return -ENOMEM;
564 	drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
565 	bo->vm_bo = NULL;
566 	bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
567 		bp->domain;
568 	bo->allowed_domains = bo->preferred_domains;
569 	if (bp->type != ttm_bo_type_kernel &&
570 	    !(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE) &&
571 	    bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
572 		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
573 
574 	bo->flags = bp->flags;
575 
576 	if (!amdgpu_bo_support_uswc(bo->flags))
577 		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
578 
579 	if (adev->ras_enabled)
580 		bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
581 
582 	bo->tbo.bdev = &adev->mman.bdev;
583 	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
584 			  AMDGPU_GEM_DOMAIN_GDS))
585 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
586 	else
587 		amdgpu_bo_placement_from_domain(bo, bp->domain);
588 	if (bp->type == ttm_bo_type_kernel)
589 		bo->tbo.priority = 1;
590 
591 	if (!bp->destroy)
592 		bp->destroy = &amdgpu_bo_destroy;
593 
594 	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, bp->type,
595 				 &bo->placement, page_align, &ctx,  NULL,
596 				 bp->resv, bp->destroy);
597 	if (unlikely(r != 0))
598 		return r;
599 
600 	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
601 	    bo->tbo.resource->mem_type == TTM_PL_VRAM &&
602 	    bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
603 		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
604 					     ctx.bytes_moved);
605 	else
606 		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
607 
608 	if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
609 	    bo->tbo.resource->mem_type == TTM_PL_VRAM) {
610 		struct dma_fence *fence;
611 
612 		r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
613 		if (unlikely(r))
614 			goto fail_unreserve;
615 
616 		dma_resv_add_fence(bo->tbo.base.resv, fence,
617 				   DMA_RESV_USAGE_KERNEL);
618 		dma_fence_put(fence);
619 	}
620 	if (!bp->resv)
621 		amdgpu_bo_unreserve(bo);
622 	*bo_ptr = bo;
623 
624 	trace_amdgpu_bo_create(bo);
625 
626 	/* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
627 	if (bp->type == ttm_bo_type_device)
628 		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
629 
630 	return 0;
631 
632 fail_unreserve:
633 	if (!bp->resv)
634 		dma_resv_unlock(bo->tbo.base.resv);
635 	amdgpu_bo_unref(&bo);
636 	return r;
637 }
638 
639 /**
640  * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
641  * @adev: amdgpu device object
642  * @bp: parameters to be used for the buffer object
643  * @ubo_ptr: pointer to the buffer object pointer
644  *
645  * Create a BO to be used by user application;
646  *
647  * Returns:
648  * 0 for success or a negative error code on failure.
649  */
650 
651 int amdgpu_bo_create_user(struct amdgpu_device *adev,
652 			  struct amdgpu_bo_param *bp,
653 			  struct amdgpu_bo_user **ubo_ptr)
654 {
655 	struct amdgpu_bo *bo_ptr;
656 	int r;
657 
658 	bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
659 	bp->destroy = &amdgpu_bo_user_destroy;
660 	r = amdgpu_bo_create(adev, bp, &bo_ptr);
661 	if (r)
662 		return r;
663 
664 	*ubo_ptr = to_amdgpu_bo_user(bo_ptr);
665 	return r;
666 }
667 
668 /**
669  * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object
670  * @adev: amdgpu device object
671  * @bp: parameters to be used for the buffer object
672  * @vmbo_ptr: pointer to the buffer object pointer
673  *
674  * Create a BO to be for GPUVM.
675  *
676  * Returns:
677  * 0 for success or a negative error code on failure.
678  */
679 
680 int amdgpu_bo_create_vm(struct amdgpu_device *adev,
681 			struct amdgpu_bo_param *bp,
682 			struct amdgpu_bo_vm **vmbo_ptr)
683 {
684 	struct amdgpu_bo *bo_ptr;
685 	int r;
686 
687 	/* bo_ptr_size will be determined by the caller and it depends on
688 	 * num of amdgpu_vm_pt entries.
689 	 */
690 	BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
691 	r = amdgpu_bo_create(adev, bp, &bo_ptr);
692 	if (r)
693 		return r;
694 
695 	*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
696 	INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
697 	/* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list
698 	 * is initialized.
699 	 */
700 	bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy;
701 	return r;
702 }
703 
704 /**
705  * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
706  *
707  * @vmbo: BO that will be inserted into the shadow list
708  *
709  * Insert a BO to the shadow list.
710  */
711 void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
712 {
713 	struct amdgpu_device *adev = amdgpu_ttm_adev(vmbo->bo.tbo.bdev);
714 
715 	mutex_lock(&adev->shadow_list_lock);
716 	list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
717 	mutex_unlock(&adev->shadow_list_lock);
718 }
719 
720 /**
721  * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
722  *
723  * @shadow: &amdgpu_bo shadow to be restored
724  * @fence: dma_fence associated with the operation
725  *
726  * Copies a buffer object's shadow content back to the object.
727  * This is used for recovering a buffer from its shadow in case of a gpu
728  * reset where vram context may be lost.
729  *
730  * Returns:
731  * 0 for success or a negative error code on failure.
732  */
733 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
734 
735 {
736 	struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
737 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
738 	uint64_t shadow_addr, parent_addr;
739 
740 	shadow_addr = amdgpu_bo_gpu_offset(shadow);
741 	parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
742 
743 	return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
744 				  amdgpu_bo_size(shadow), NULL, fence,
745 				  true, false, false);
746 }
747 
748 /**
749  * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
750  * @bo: &amdgpu_bo buffer object to be mapped
751  * @ptr: kernel virtual address to be returned
752  *
753  * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
754  * amdgpu_bo_kptr() to get the kernel virtual address.
755  *
756  * Returns:
757  * 0 for success or a negative error code on failure.
758  */
759 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
760 {
761 	void *kptr;
762 	long r;
763 
764 	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
765 		return -EPERM;
766 
767 	r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
768 				  false, MAX_SCHEDULE_TIMEOUT);
769 	if (r < 0)
770 		return r;
771 
772 	kptr = amdgpu_bo_kptr(bo);
773 	if (kptr) {
774 		if (ptr)
775 			*ptr = kptr;
776 		return 0;
777 	}
778 
779 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
780 	if (r)
781 		return r;
782 
783 	if (ptr)
784 		*ptr = amdgpu_bo_kptr(bo);
785 
786 	return 0;
787 }
788 
789 /**
790  * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
791  * @bo: &amdgpu_bo buffer object
792  *
793  * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
794  *
795  * Returns:
796  * the virtual address of a buffer object area.
797  */
798 void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
799 {
800 	bool is_iomem;
801 
802 	return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
803 }
804 
805 /**
806  * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
807  * @bo: &amdgpu_bo buffer object to be unmapped
808  *
809  * Unmaps a kernel map set up by amdgpu_bo_kmap().
810  */
811 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
812 {
813 	if (bo->kmap.bo)
814 		ttm_bo_kunmap(&bo->kmap);
815 }
816 
817 /**
818  * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
819  * @bo: &amdgpu_bo buffer object
820  *
821  * References the contained &ttm_buffer_object.
822  *
823  * Returns:
824  * a refcounted pointer to the &amdgpu_bo buffer object.
825  */
826 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
827 {
828 	if (bo == NULL)
829 		return NULL;
830 
831 	ttm_bo_get(&bo->tbo);
832 	return bo;
833 }
834 
835 /**
836  * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
837  * @bo: &amdgpu_bo buffer object
838  *
839  * Unreferences the contained &ttm_buffer_object and clear the pointer
840  */
841 void amdgpu_bo_unref(struct amdgpu_bo **bo)
842 {
843 	struct ttm_buffer_object *tbo;
844 
845 	if ((*bo) == NULL)
846 		return;
847 
848 	tbo = &((*bo)->tbo);
849 	ttm_bo_put(tbo);
850 	*bo = NULL;
851 }
852 
853 /**
854  * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
855  * @bo: &amdgpu_bo buffer object to be pinned
856  * @domain: domain to be pinned to
857  * @min_offset: the start of requested address range
858  * @max_offset: the end of requested address range
859  *
860  * Pins the buffer object according to requested domain and address range. If
861  * the memory is unbound gart memory, binds the pages into gart table. Adjusts
862  * pin_count and pin_size accordingly.
863  *
864  * Pinning means to lock pages in memory along with keeping them at a fixed
865  * offset. It is required when a buffer can not be moved, for example, when
866  * a display buffer is being scanned out.
867  *
868  * Compared with amdgpu_bo_pin(), this function gives more flexibility on
869  * where to pin a buffer if there are specific restrictions on where a buffer
870  * must be located.
871  *
872  * Returns:
873  * 0 for success or a negative error code on failure.
874  */
875 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
876 			     u64 min_offset, u64 max_offset)
877 {
878 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
879 	struct ttm_operation_ctx ctx = { false, false };
880 	int r, i;
881 
882 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
883 		return -EPERM;
884 
885 	if (WARN_ON_ONCE(min_offset > max_offset))
886 		return -EINVAL;
887 
888 	/* Check domain to be pinned to against preferred domains */
889 	if (bo->preferred_domains & domain)
890 		domain = bo->preferred_domains & domain;
891 
892 	/* A shared bo cannot be migrated to VRAM */
893 	if (bo->tbo.base.import_attach) {
894 		if (domain & AMDGPU_GEM_DOMAIN_GTT)
895 			domain = AMDGPU_GEM_DOMAIN_GTT;
896 		else
897 			return -EINVAL;
898 	}
899 
900 	if (bo->tbo.pin_count) {
901 		uint32_t mem_type = bo->tbo.resource->mem_type;
902 		uint32_t mem_flags = bo->tbo.resource->placement;
903 
904 		if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
905 			return -EINVAL;
906 
907 		if ((mem_type == TTM_PL_VRAM) &&
908 		    (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) &&
909 		    !(mem_flags & TTM_PL_FLAG_CONTIGUOUS))
910 			return -EINVAL;
911 
912 		ttm_bo_pin(&bo->tbo);
913 
914 		if (max_offset != 0) {
915 			u64 domain_start = amdgpu_ttm_domain_start(adev,
916 								   mem_type);
917 			WARN_ON_ONCE(max_offset <
918 				     (amdgpu_bo_gpu_offset(bo) - domain_start));
919 		}
920 
921 		return 0;
922 	}
923 
924 	/* This assumes only APU display buffers are pinned with (VRAM|GTT).
925 	 * See function amdgpu_display_supported_domains()
926 	 */
927 	domain = amdgpu_bo_get_preferred_domain(adev, domain);
928 
929 	if (bo->tbo.base.import_attach)
930 		dma_buf_pin(bo->tbo.base.import_attach);
931 
932 	/* force to pin into visible video ram */
933 	if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
934 		bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
935 	amdgpu_bo_placement_from_domain(bo, domain);
936 	for (i = 0; i < bo->placement.num_placement; i++) {
937 		unsigned fpfn, lpfn;
938 
939 		fpfn = min_offset >> PAGE_SHIFT;
940 		lpfn = max_offset >> PAGE_SHIFT;
941 
942 		if (fpfn > bo->placements[i].fpfn)
943 			bo->placements[i].fpfn = fpfn;
944 		if (!bo->placements[i].lpfn ||
945 		    (lpfn && lpfn < bo->placements[i].lpfn))
946 			bo->placements[i].lpfn = lpfn;
947 	}
948 
949 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
950 	if (unlikely(r)) {
951 		dev_err(adev->dev, "%p pin failed\n", bo);
952 		goto error;
953 	}
954 
955 	ttm_bo_pin(&bo->tbo);
956 
957 	domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
958 	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
959 		atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
960 		atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
961 			     &adev->visible_pin_size);
962 	} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
963 		atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
964 	}
965 
966 error:
967 	return r;
968 }
969 
970 /**
971  * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
972  * @bo: &amdgpu_bo buffer object to be pinned
973  * @domain: domain to be pinned to
974  *
975  * A simple wrapper to amdgpu_bo_pin_restricted().
976  * Provides a simpler API for buffers that do not have any strict restrictions
977  * on where a buffer must be located.
978  *
979  * Returns:
980  * 0 for success or a negative error code on failure.
981  */
982 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
983 {
984 	bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
985 	return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
986 }
987 
988 /**
989  * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
990  * @bo: &amdgpu_bo buffer object to be unpinned
991  *
992  * Decreases the pin_count, and clears the flags if pin_count reaches 0.
993  * Changes placement and pin size accordingly.
994  *
995  * Returns:
996  * 0 for success or a negative error code on failure.
997  */
998 void amdgpu_bo_unpin(struct amdgpu_bo *bo)
999 {
1000 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1001 
1002 	ttm_bo_unpin(&bo->tbo);
1003 	if (bo->tbo.pin_count)
1004 		return;
1005 
1006 	if (bo->tbo.base.import_attach)
1007 		dma_buf_unpin(bo->tbo.base.import_attach);
1008 
1009 	if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
1010 		atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
1011 		atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
1012 			     &adev->visible_pin_size);
1013 	} else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
1014 		atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
1015 	}
1016 }
1017 
1018 static const char *amdgpu_vram_names[] = {
1019 	"UNKNOWN",
1020 	"GDDR1",
1021 	"DDR2",
1022 	"GDDR3",
1023 	"GDDR4",
1024 	"GDDR5",
1025 	"HBM",
1026 	"DDR3",
1027 	"DDR4",
1028 	"GDDR6",
1029 	"DDR5",
1030 	"LPDDR4",
1031 	"LPDDR5"
1032 };
1033 
1034 /**
1035  * amdgpu_bo_init - initialize memory manager
1036  * @adev: amdgpu device object
1037  *
1038  * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
1039  *
1040  * Returns:
1041  * 0 for success or a negative error code on failure.
1042  */
1043 int amdgpu_bo_init(struct amdgpu_device *adev)
1044 {
1045 	/* On A+A platform, VRAM can be mapped as WB */
1046 	if (!adev->gmc.xgmi.connected_to_cpu) {
1047 		/* reserve PAT memory space to WC for VRAM */
1048 		int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1049 				adev->gmc.aper_size);
1050 
1051 		if (r) {
1052 			DRM_ERROR("Unable to set WC memtype for the aperture base\n");
1053 			return r;
1054 		}
1055 
1056 		/* Add an MTRR for the VRAM */
1057 		adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1058 				adev->gmc.aper_size);
1059 	}
1060 
1061 	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
1062 		 adev->gmc.mc_vram_size >> 20,
1063 		 (unsigned long long)adev->gmc.aper_size >> 20);
1064 	DRM_INFO("RAM width %dbits %s\n",
1065 		 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1066 	return amdgpu_ttm_init(adev);
1067 }
1068 
1069 /**
1070  * amdgpu_bo_fini - tear down memory manager
1071  * @adev: amdgpu device object
1072  *
1073  * Reverses amdgpu_bo_init() to tear down memory manager.
1074  */
1075 void amdgpu_bo_fini(struct amdgpu_device *adev)
1076 {
1077 	int idx;
1078 
1079 	amdgpu_ttm_fini(adev);
1080 
1081 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
1082 
1083 		if (!adev->gmc.xgmi.connected_to_cpu) {
1084 			arch_phys_wc_del(adev->gmc.vram_mtrr);
1085 			arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1086 		}
1087 		drm_dev_exit(idx);
1088 	}
1089 }
1090 
1091 /**
1092  * amdgpu_bo_set_tiling_flags - set tiling flags
1093  * @bo: &amdgpu_bo buffer object
1094  * @tiling_flags: new flags
1095  *
1096  * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
1097  * kernel driver to set the tiling flags on a buffer.
1098  *
1099  * Returns:
1100  * 0 for success or a negative error code on failure.
1101  */
1102 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1103 {
1104 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1105 	struct amdgpu_bo_user *ubo;
1106 
1107 	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1108 	if (adev->family <= AMDGPU_FAMILY_CZ &&
1109 	    AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1110 		return -EINVAL;
1111 
1112 	ubo = to_amdgpu_bo_user(bo);
1113 	ubo->tiling_flags = tiling_flags;
1114 	return 0;
1115 }
1116 
1117 /**
1118  * amdgpu_bo_get_tiling_flags - get tiling flags
1119  * @bo: &amdgpu_bo buffer object
1120  * @tiling_flags: returned flags
1121  *
1122  * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
1123  * set the tiling flags on a buffer.
1124  */
1125 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1126 {
1127 	struct amdgpu_bo_user *ubo;
1128 
1129 	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1130 	dma_resv_assert_held(bo->tbo.base.resv);
1131 	ubo = to_amdgpu_bo_user(bo);
1132 
1133 	if (tiling_flags)
1134 		*tiling_flags = ubo->tiling_flags;
1135 }
1136 
1137 /**
1138  * amdgpu_bo_set_metadata - set metadata
1139  * @bo: &amdgpu_bo buffer object
1140  * @metadata: new metadata
1141  * @metadata_size: size of the new metadata
1142  * @flags: flags of the new metadata
1143  *
1144  * Sets buffer object's metadata, its size and flags.
1145  * Used via GEM ioctl.
1146  *
1147  * Returns:
1148  * 0 for success or a negative error code on failure.
1149  */
1150 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
1151 			    uint32_t metadata_size, uint64_t flags)
1152 {
1153 	struct amdgpu_bo_user *ubo;
1154 	void *buffer;
1155 
1156 	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1157 	ubo = to_amdgpu_bo_user(bo);
1158 	if (!metadata_size) {
1159 		if (ubo->metadata_size) {
1160 			kfree(ubo->metadata);
1161 			ubo->metadata = NULL;
1162 			ubo->metadata_size = 0;
1163 		}
1164 		return 0;
1165 	}
1166 
1167 	if (metadata == NULL)
1168 		return -EINVAL;
1169 
1170 	buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1171 	if (buffer == NULL)
1172 		return -ENOMEM;
1173 
1174 	kfree(ubo->metadata);
1175 	ubo->metadata_flags = flags;
1176 	ubo->metadata = buffer;
1177 	ubo->metadata_size = metadata_size;
1178 
1179 	return 0;
1180 }
1181 
1182 /**
1183  * amdgpu_bo_get_metadata - get metadata
1184  * @bo: &amdgpu_bo buffer object
1185  * @buffer: returned metadata
1186  * @buffer_size: size of the buffer
1187  * @metadata_size: size of the returned metadata
1188  * @flags: flags of the returned metadata
1189  *
1190  * Gets buffer object's metadata, its size and flags. buffer_size shall not be
1191  * less than metadata_size.
1192  * Used via GEM ioctl.
1193  *
1194  * Returns:
1195  * 0 for success or a negative error code on failure.
1196  */
1197 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1198 			   size_t buffer_size, uint32_t *metadata_size,
1199 			   uint64_t *flags)
1200 {
1201 	struct amdgpu_bo_user *ubo;
1202 
1203 	if (!buffer && !metadata_size)
1204 		return -EINVAL;
1205 
1206 	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1207 	ubo = to_amdgpu_bo_user(bo);
1208 	if (metadata_size)
1209 		*metadata_size = ubo->metadata_size;
1210 
1211 	if (buffer) {
1212 		if (buffer_size < ubo->metadata_size)
1213 			return -EINVAL;
1214 
1215 		if (ubo->metadata_size)
1216 			memcpy(buffer, ubo->metadata, ubo->metadata_size);
1217 	}
1218 
1219 	if (flags)
1220 		*flags = ubo->metadata_flags;
1221 
1222 	return 0;
1223 }
1224 
1225 /**
1226  * amdgpu_bo_move_notify - notification about a memory move
1227  * @bo: pointer to a buffer object
1228  * @evict: if this move is evicting the buffer from the graphics address space
1229  * @new_mem: new information of the bufer object
1230  *
1231  * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
1232  * bookkeeping.
1233  * TTM driver callback which is called when ttm moves a buffer.
1234  */
1235 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1236 			   bool evict,
1237 			   struct ttm_resource *new_mem)
1238 {
1239 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1240 	struct amdgpu_bo *abo;
1241 	struct ttm_resource *old_mem = bo->resource;
1242 
1243 	if (!amdgpu_bo_is_amdgpu_bo(bo))
1244 		return;
1245 
1246 	abo = ttm_to_amdgpu_bo(bo);
1247 	amdgpu_vm_bo_invalidate(adev, abo, evict);
1248 
1249 	amdgpu_bo_kunmap(abo);
1250 
1251 	if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
1252 	    bo->resource->mem_type != TTM_PL_SYSTEM)
1253 		dma_buf_move_notify(abo->tbo.base.dma_buf);
1254 
1255 	/* remember the eviction */
1256 	if (evict)
1257 		atomic64_inc(&adev->num_evictions);
1258 
1259 	/* update statistics */
1260 	if (!new_mem)
1261 		return;
1262 
1263 	/* move_notify is called before move happens */
1264 	trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
1265 }
1266 
1267 void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
1268 				uint64_t *gtt_mem, uint64_t *cpu_mem)
1269 {
1270 	unsigned int domain;
1271 
1272 	domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
1273 	switch (domain) {
1274 	case AMDGPU_GEM_DOMAIN_VRAM:
1275 		*vram_mem += amdgpu_bo_size(bo);
1276 		break;
1277 	case AMDGPU_GEM_DOMAIN_GTT:
1278 		*gtt_mem += amdgpu_bo_size(bo);
1279 		break;
1280 	case AMDGPU_GEM_DOMAIN_CPU:
1281 	default:
1282 		*cpu_mem += amdgpu_bo_size(bo);
1283 		break;
1284 	}
1285 }
1286 
1287 /**
1288  * amdgpu_bo_release_notify - notification about a BO being released
1289  * @bo: pointer to a buffer object
1290  *
1291  * Wipes VRAM buffers whose contents should not be leaked before the
1292  * memory is released.
1293  */
1294 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1295 {
1296 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1297 	struct dma_fence *fence = NULL;
1298 	struct amdgpu_bo *abo;
1299 	int r;
1300 
1301 	if (!amdgpu_bo_is_amdgpu_bo(bo))
1302 		return;
1303 
1304 	abo = ttm_to_amdgpu_bo(bo);
1305 
1306 	if (abo->kfd_bo)
1307 		amdgpu_amdkfd_release_notify(abo);
1308 
1309 	/* We only remove the fence if the resv has individualized. */
1310 	WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
1311 			&& bo->base.resv != &bo->base._resv);
1312 	if (bo->base.resv == &bo->base._resv)
1313 		amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
1314 
1315 	if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
1316 	    !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
1317 	    adev->in_suspend || adev->shutdown)
1318 		return;
1319 
1320 	if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
1321 		return;
1322 
1323 	r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
1324 	if (!WARN_ON(r)) {
1325 		amdgpu_bo_fence(abo, fence, false);
1326 		dma_fence_put(fence);
1327 	}
1328 
1329 	dma_resv_unlock(bo->base.resv);
1330 }
1331 
1332 /**
1333  * amdgpu_bo_fault_reserve_notify - notification about a memory fault
1334  * @bo: pointer to a buffer object
1335  *
1336  * Notifies the driver we are taking a fault on this BO and have reserved it,
1337  * also performs bookkeeping.
1338  * TTM driver callback for dealing with vm faults.
1339  *
1340  * Returns:
1341  * 0 for success or a negative error code on failure.
1342  */
1343 vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1344 {
1345 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1346 	struct ttm_operation_ctx ctx = { false, false };
1347 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1348 	unsigned long offset;
1349 	int r;
1350 
1351 	/* Remember that this BO was accessed by the CPU */
1352 	abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1353 
1354 	if (bo->resource->mem_type != TTM_PL_VRAM)
1355 		return 0;
1356 
1357 	offset = bo->resource->start << PAGE_SHIFT;
1358 	if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
1359 		return 0;
1360 
1361 	/* Can't move a pinned BO to visible VRAM */
1362 	if (abo->tbo.pin_count > 0)
1363 		return VM_FAULT_SIGBUS;
1364 
1365 	/* hurrah the memory is not visible ! */
1366 	atomic64_inc(&adev->num_vram_cpu_page_faults);
1367 	amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1368 					AMDGPU_GEM_DOMAIN_GTT);
1369 
1370 	/* Avoid costly evictions; only set GTT as a busy placement */
1371 	abo->placement.num_busy_placement = 1;
1372 	abo->placement.busy_placement = &abo->placements[1];
1373 
1374 	r = ttm_bo_validate(bo, &abo->placement, &ctx);
1375 	if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
1376 		return VM_FAULT_NOPAGE;
1377 	else if (unlikely(r))
1378 		return VM_FAULT_SIGBUS;
1379 
1380 	offset = bo->resource->start << PAGE_SHIFT;
1381 	/* this should never happen */
1382 	if (bo->resource->mem_type == TTM_PL_VRAM &&
1383 	    (offset + bo->base.size) > adev->gmc.visible_vram_size)
1384 		return VM_FAULT_SIGBUS;
1385 
1386 	ttm_bo_move_to_lru_tail_unlocked(bo);
1387 	return 0;
1388 }
1389 
1390 /**
1391  * amdgpu_bo_fence - add fence to buffer object
1392  *
1393  * @bo: buffer object in question
1394  * @fence: fence to add
1395  * @shared: true if fence should be added shared
1396  *
1397  */
1398 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1399 		     bool shared)
1400 {
1401 	struct dma_resv *resv = bo->tbo.base.resv;
1402 	int r;
1403 
1404 	r = dma_resv_reserve_fences(resv, 1);
1405 	if (r) {
1406 		/* As last resort on OOM we block for the fence */
1407 		dma_fence_wait(fence, false);
1408 		return;
1409 	}
1410 
1411 	dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
1412 			   DMA_RESV_USAGE_WRITE);
1413 }
1414 
1415 /**
1416  * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
1417  *
1418  * @adev: amdgpu device pointer
1419  * @resv: reservation object to sync to
1420  * @sync_mode: synchronization mode
1421  * @owner: fence owner
1422  * @intr: Whether the wait is interruptible
1423  *
1424  * Extract the fences from the reservation object and waits for them to finish.
1425  *
1426  * Returns:
1427  * 0 on success, errno otherwise.
1428  */
1429 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
1430 			     enum amdgpu_sync_mode sync_mode, void *owner,
1431 			     bool intr)
1432 {
1433 	struct amdgpu_sync sync;
1434 	int r;
1435 
1436 	amdgpu_sync_create(&sync);
1437 	amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
1438 	r = amdgpu_sync_wait(&sync, intr);
1439 	amdgpu_sync_free(&sync);
1440 	return r;
1441 }
1442 
1443 /**
1444  * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
1445  * @bo: buffer object to wait for
1446  * @owner: fence owner
1447  * @intr: Whether the wait is interruptible
1448  *
1449  * Wrapper to wait for fences in a BO.
1450  * Returns:
1451  * 0 on success, errno otherwise.
1452  */
1453 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1454 {
1455 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1456 
1457 	return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
1458 					AMDGPU_SYNC_NE_OWNER, owner, intr);
1459 }
1460 
1461 /**
1462  * amdgpu_bo_gpu_offset - return GPU offset of bo
1463  * @bo:	amdgpu object for which we query the offset
1464  *
1465  * Note: object should either be pinned or reserved when calling this
1466  * function, it might be useful to add check for this for debugging.
1467  *
1468  * Returns:
1469  * current GPU offset of the object.
1470  */
1471 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1472 {
1473 	WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
1474 	WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
1475 		     !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
1476 	WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
1477 	WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
1478 		     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1479 
1480 	return amdgpu_bo_gpu_offset_no_check(bo);
1481 }
1482 
1483 /**
1484  * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
1485  * @bo:	amdgpu object for which we query the offset
1486  *
1487  * Returns:
1488  * current GPU offset of the object without raising warnings.
1489  */
1490 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
1491 {
1492 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1493 	uint64_t offset;
1494 
1495 	offset = (bo->tbo.resource->start << PAGE_SHIFT) +
1496 		 amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
1497 
1498 	return amdgpu_gmc_sign_extend(offset);
1499 }
1500 
1501 /**
1502  * amdgpu_bo_get_preferred_domain - get preferred domain
1503  * @adev: amdgpu device object
1504  * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
1505  *
1506  * Returns:
1507  * Which of the allowed domains is preferred for allocating the BO.
1508  */
1509 uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
1510 					    uint32_t domain)
1511 {
1512 	if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
1513 		domain = AMDGPU_GEM_DOMAIN_VRAM;
1514 		if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1515 			domain = AMDGPU_GEM_DOMAIN_GTT;
1516 	}
1517 	return domain;
1518 }
1519 
1520 #if defined(CONFIG_DEBUG_FS)
1521 #define amdgpu_bo_print_flag(m, bo, flag)		        \
1522 	do {							\
1523 		if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) {	\
1524 			seq_printf((m), " " #flag);		\
1525 		}						\
1526 	} while (0)
1527 
1528 /**
1529  * amdgpu_bo_print_info - print BO info in debugfs file
1530  *
1531  * @id: Index or Id of the BO
1532  * @bo: Requested BO for printing info
1533  * @m: debugfs file
1534  *
1535  * Print BO information in debugfs file
1536  *
1537  * Returns:
1538  * Size of the BO in bytes.
1539  */
1540 u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
1541 {
1542 	struct dma_buf_attachment *attachment;
1543 	struct dma_buf *dma_buf;
1544 	unsigned int domain;
1545 	const char *placement;
1546 	unsigned int pin_count;
1547 	u64 size;
1548 
1549 	domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
1550 	switch (domain) {
1551 	case AMDGPU_GEM_DOMAIN_VRAM:
1552 		placement = "VRAM";
1553 		break;
1554 	case AMDGPU_GEM_DOMAIN_GTT:
1555 		placement = " GTT";
1556 		break;
1557 	case AMDGPU_GEM_DOMAIN_CPU:
1558 	default:
1559 		placement = " CPU";
1560 		break;
1561 	}
1562 
1563 	size = amdgpu_bo_size(bo);
1564 	seq_printf(m, "\t\t0x%08x: %12lld byte %s",
1565 			id, size, placement);
1566 
1567 	pin_count = READ_ONCE(bo->tbo.pin_count);
1568 	if (pin_count)
1569 		seq_printf(m, " pin count %d", pin_count);
1570 
1571 	dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
1572 	attachment = READ_ONCE(bo->tbo.base.import_attach);
1573 
1574 	if (attachment)
1575 		seq_printf(m, " imported from %p", dma_buf);
1576 	else if (dma_buf)
1577 		seq_printf(m, " exported as %p", dma_buf);
1578 
1579 	amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
1580 	amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
1581 	amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC);
1582 	amdgpu_bo_print_flag(m, bo, VRAM_CLEARED);
1583 	amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
1584 	amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
1585 	amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
1586 
1587 	seq_puts(m, "\n");
1588 
1589 	return size;
1590 }
1591 #endif
1592