1 /* $NetBSD: amdgpu_object.c,v 1.7 2021/12/19 12:33:19 riastradh Exp $ */
2
3 /*
4 * Copyright 2009 Jerome Glisse.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 *
27 */
28 /*
29 * Authors:
30 * Jerome Glisse <glisse@freedesktop.org>
31 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32 * Dave Airlie
33 */
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: amdgpu_object.c,v 1.7 2021/12/19 12:33:19 riastradh Exp $");
36
37 #include <linux/io.h>
38 #include <linux/list.h>
39 #include <linux/slab.h>
40
41 #include <drm/amdgpu_drm.h>
42 #include <drm/drm_cache.h>
43 #include "amdgpu.h"
44 #include "amdgpu_trace.h"
45 #include "amdgpu_amdkfd.h"
46 #include <linux/nbsd-namespace.h>
47
48 /**
49 * DOC: amdgpu_object
50 *
51 * This defines the interfaces to operate on an &amdgpu_bo buffer object which
52 * represents memory used by driver (VRAM, system memory, etc.). The driver
53 * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
54 * to create/destroy/set buffer object which are then managed by the kernel TTM
55 * memory manager.
56 * The interfaces are also used internally by kernel clients, including gfx,
57 * uvd, etc. for kernel managed allocations used by the GPU.
58 *
59 */
60
61 /**
62 * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
63 *
64 * @bo: &amdgpu_bo buffer object
65 *
66 * This function is called when a BO stops being pinned, and updates the
67 * &amdgpu_device pin_size values accordingly.
68 */
amdgpu_bo_subtract_pin_size(struct amdgpu_bo * bo)69 static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
70 {
71 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
72
73 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
74 atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
75 atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
76 &adev->visible_pin_size);
77 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
78 atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
79 }
80 }
81
amdgpu_bo_destroy(struct ttm_buffer_object * tbo)82 static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
83 {
84 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
85 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
86
87 if (bo->pin_count > 0)
88 amdgpu_bo_subtract_pin_size(bo);
89
90 amdgpu_bo_kunmap(bo);
91
92 if (bo->tbo.base.import_attach)
93 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
94 drm_gem_object_release(&bo->tbo.base);
95 /* in case amdgpu_device_recover_vram got NULL of bo->parent */
96 if (!list_empty(&bo->shadow_list)) {
97 mutex_lock(&adev->shadow_list_lock);
98 list_del_init(&bo->shadow_list);
99 mutex_unlock(&adev->shadow_list_lock);
100 }
101 amdgpu_bo_unref(&bo->parent);
102
103 kfree(bo->metadata);
104 kfree(bo);
105 }
106
107 /**
108 * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
109 * @bo: buffer object to be checked
110 *
111 * Uses destroy function associated with the object to determine if this is
112 * an &amdgpu_bo.
113 *
114 * Returns:
115 * true if the object belongs to &amdgpu_bo, false if not.
116 */
amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object * bo)117 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
118 {
119 if (bo->destroy == &amdgpu_bo_destroy)
120 return true;
121 return false;
122 }
123
124 /**
125 * amdgpu_bo_placement_from_domain - set buffer's placement
126 * @abo: &amdgpu_bo buffer object whose placement is to be set
127 * @domain: requested domain
128 *
129 * Sets buffer's placement according to requested domain and the buffer's
130 * flags.
131 */
amdgpu_bo_placement_from_domain(struct amdgpu_bo * abo,u32 domain)132 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
133 {
134 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
135 struct ttm_placement *placement = &abo->placement;
136 struct ttm_place *places = abo->placements;
137 u64 flags = abo->flags;
138 u32 c = 0;
139
140 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
141 unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
142
143 places[c].fpfn = 0;
144 places[c].lpfn = 0;
145 places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
146 TTM_PL_FLAG_VRAM;
147
148 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
149 places[c].lpfn = visible_pfn;
150 else
151 places[c].flags |= TTM_PL_FLAG_TOPDOWN;
152
153 if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
154 places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
155 c++;
156 }
157
158 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
159 places[c].fpfn = 0;
160 places[c].lpfn = 0;
161 places[c].flags = TTM_PL_FLAG_TT;
162 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
163 places[c].flags |= TTM_PL_FLAG_WC |
164 TTM_PL_FLAG_UNCACHED;
165 else
166 places[c].flags |= TTM_PL_FLAG_CACHED;
167 c++;
168 }
169
170 if (domain & AMDGPU_GEM_DOMAIN_CPU) {
171 places[c].fpfn = 0;
172 places[c].lpfn = 0;
173 places[c].flags = TTM_PL_FLAG_SYSTEM;
174 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
175 places[c].flags |= TTM_PL_FLAG_WC |
176 TTM_PL_FLAG_UNCACHED;
177 else
178 places[c].flags |= TTM_PL_FLAG_CACHED;
179 c++;
180 }
181
182 if (domain & AMDGPU_GEM_DOMAIN_GDS) {
183 places[c].fpfn = 0;
184 places[c].lpfn = 0;
185 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
186 c++;
187 }
188
189 if (domain & AMDGPU_GEM_DOMAIN_GWS) {
190 places[c].fpfn = 0;
191 places[c].lpfn = 0;
192 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
193 c++;
194 }
195
196 if (domain & AMDGPU_GEM_DOMAIN_OA) {
197 places[c].fpfn = 0;
198 places[c].lpfn = 0;
199 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
200 c++;
201 }
202
203 if (!c) {
204 places[c].fpfn = 0;
205 places[c].lpfn = 0;
206 places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
207 c++;
208 }
209
210 BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
211
212 placement->num_placement = c;
213 placement->placement = places;
214
215 placement->num_busy_placement = c;
216 placement->busy_placement = places;
217 }
218
219 /**
220 * amdgpu_bo_create_reserved - create reserved BO for kernel use
221 *
222 * @adev: amdgpu device object
223 * @size: size for the new BO
224 * @align: alignment for the new BO
225 * @domain: where to place it
226 * @bo_ptr: used to initialize BOs in structures
227 * @gpu_addr: GPU addr of the pinned BO
228 * @cpu_addr: optional CPU address mapping
229 *
230 * Allocates and pins a BO for kernel internal use, and returns it still
231 * reserved.
232 *
233 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
234 *
235 * Returns:
236 * 0 on success, negative error code otherwise.
237 */
amdgpu_bo_create_reserved(struct amdgpu_device * adev,unsigned long size,int align,u32 domain,struct amdgpu_bo ** bo_ptr,u64 * gpu_addr,void ** cpu_addr)238 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
239 unsigned long size, int align,
240 u32 domain, struct amdgpu_bo **bo_ptr,
241 u64 *gpu_addr, void **cpu_addr)
242 {
243 struct amdgpu_bo_param bp;
244 bool free = false;
245 int r;
246
247 if (!size) {
248 amdgpu_bo_unref(bo_ptr);
249 return 0;
250 }
251
252 memset(&bp, 0, sizeof(bp));
253 bp.size = size;
254 bp.byte_align = align;
255 bp.domain = domain;
256 bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
257 : AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
258 bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
259 bp.type = ttm_bo_type_kernel;
260 bp.resv = NULL;
261
262 if (!*bo_ptr) {
263 r = amdgpu_bo_create(adev, &bp, bo_ptr);
264 if (r) {
265 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
266 r);
267 return r;
268 }
269 free = true;
270 }
271
272 r = amdgpu_bo_reserve(*bo_ptr, false);
273 if (r) {
274 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
275 goto error_free;
276 }
277
278 r = amdgpu_bo_pin(*bo_ptr, domain);
279 if (r) {
280 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
281 goto error_unreserve;
282 }
283
284 r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
285 if (r) {
286 dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
287 goto error_unpin;
288 }
289
290 if (gpu_addr)
291 *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
292
293 if (cpu_addr) {
294 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
295 if (r) {
296 dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
297 goto error_unpin;
298 }
299 }
300
301 return 0;
302
303 error_unpin:
304 amdgpu_bo_unpin(*bo_ptr);
305 error_unreserve:
306 amdgpu_bo_unreserve(*bo_ptr);
307
308 error_free:
309 if (free)
310 amdgpu_bo_unref(bo_ptr);
311
312 return r;
313 }
314
315 /**
316 * amdgpu_bo_create_kernel - create BO for kernel use
317 *
318 * @adev: amdgpu device object
319 * @size: size for the new BO
320 * @align: alignment for the new BO
321 * @domain: where to place it
322 * @bo_ptr: used to initialize BOs in structures
323 * @gpu_addr: GPU addr of the pinned BO
324 * @cpu_addr: optional CPU address mapping
325 *
326 * Allocates and pins a BO for kernel internal use.
327 *
328 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
329 *
330 * Returns:
331 * 0 on success, negative error code otherwise.
332 */
amdgpu_bo_create_kernel(struct amdgpu_device * adev,unsigned long size,int align,u32 domain,struct amdgpu_bo ** bo_ptr,u64 * gpu_addr,void ** cpu_addr)333 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
334 unsigned long size, int align,
335 u32 domain, struct amdgpu_bo **bo_ptr,
336 u64 *gpu_addr, void **cpu_addr)
337 {
338 int r;
339
340 r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
341 gpu_addr, cpu_addr);
342
343 if (r)
344 return r;
345
346 if (*bo_ptr)
347 amdgpu_bo_unreserve(*bo_ptr);
348
349 return 0;
350 }
351
352 /**
353 * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
354 *
355 * @adev: amdgpu device object
356 * @offset: offset of the BO
357 * @size: size of the BO
358 * @domain: where to place it
359 * @bo_ptr: used to initialize BOs in structures
360 * @cpu_addr: optional CPU address mapping
361 *
362 * Creates a kernel BO at a specific offset in the address space of the domain.
363 *
364 * Returns:
365 * 0 on success, negative error code otherwise.
366 */
amdgpu_bo_create_kernel_at(struct amdgpu_device * adev,uint64_t offset,uint64_t size,uint32_t domain,struct amdgpu_bo ** bo_ptr,void ** cpu_addr)367 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
368 uint64_t offset, uint64_t size, uint32_t domain,
369 struct amdgpu_bo **bo_ptr, void **cpu_addr)
370 {
371 struct ttm_operation_ctx ctx = { false, false };
372 unsigned int i;
373 int r;
374
375 offset &= PAGE_MASK;
376 size = ALIGN(size, PAGE_SIZE);
377
378 r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
379 NULL, cpu_addr);
380 if (r)
381 return r;
382
383 /*
384 * Remove the original mem node and create a new one at the request
385 * position.
386 */
387 if (cpu_addr)
388 amdgpu_bo_kunmap(*bo_ptr);
389
390 ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
391
392 for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
393 (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
394 (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
395 }
396 r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
397 &(*bo_ptr)->tbo.mem, &ctx);
398 if (r)
399 goto error;
400
401 if (cpu_addr) {
402 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
403 if (r)
404 goto error;
405 }
406
407 amdgpu_bo_unreserve(*bo_ptr);
408 return 0;
409
410 error:
411 amdgpu_bo_unreserve(*bo_ptr);
412 amdgpu_bo_unref(bo_ptr);
413 return r;
414 }
415
416 /**
417 * amdgpu_bo_free_kernel - free BO for kernel use
418 *
419 * @bo: amdgpu BO to free
420 * @gpu_addr: pointer to where the BO's GPU memory space address was stored
421 * @cpu_addr: pointer to where the BO's CPU memory space address was stored
422 *
423 * unmaps and unpin a BO for kernel internal use.
424 */
amdgpu_bo_free_kernel(struct amdgpu_bo ** bo,u64 * gpu_addr,void ** cpu_addr)425 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
426 void **cpu_addr)
427 {
428 if (*bo == NULL)
429 return;
430
431 if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
432 if (cpu_addr)
433 amdgpu_bo_kunmap(*bo);
434
435 amdgpu_bo_unpin(*bo);
436 amdgpu_bo_unreserve(*bo);
437 }
438 amdgpu_bo_unref(bo);
439
440 if (gpu_addr)
441 *gpu_addr = 0;
442
443 if (cpu_addr)
444 *cpu_addr = NULL;
445 }
446
447 /* Validate bo size is bit bigger then the request domain */
amdgpu_bo_validate_size(struct amdgpu_device * adev,unsigned long size,u32 domain)448 static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
449 unsigned long size, u32 domain)
450 {
451 struct ttm_mem_type_manager *man = NULL;
452
453 /*
454 * If GTT is part of requested domains the check must succeed to
455 * allow fall back to GTT
456 */
457 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
458 man = &adev->mman.bdev.man[TTM_PL_TT];
459
460 if (size < (man->size << PAGE_SHIFT))
461 return true;
462 else
463 goto fail;
464 }
465
466 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
467 man = &adev->mman.bdev.man[TTM_PL_VRAM];
468
469 if (size < (man->size << PAGE_SHIFT))
470 return true;
471 else
472 goto fail;
473 }
474
475
476 /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
477 return true;
478
479 fail:
480 DRM_DEBUG("BO size %lu > total memory in domain: %"PRIu64"\n", size,
481 man->size << PAGE_SHIFT);
482 return false;
483 }
484
amdgpu_bo_support_uswc(u64 bo_flags)485 bool amdgpu_bo_support_uswc(u64 bo_flags)
486 {
487
488 #ifdef CONFIG_X86_32
489 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
490 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
491 */
492 return false;
493 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
494 /* Don't try to enable write-combining when it can't work, or things
495 * may be slow
496 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
497 */
498
499 #ifndef CONFIG_COMPILE_TEST
500 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
501 thanks to write-combining
502 #endif
503
504 if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
505 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
506 "better performance thanks to write-combining\n");
507 return false;
508 #else
509 /* For architectures that don't support WC memory,
510 * mask out the WC flag from the BO
511 */
512 if (!drm_arch_can_wc_memory())
513 return false;
514
515 return true;
516 #endif
517 }
518
amdgpu_bo_do_create(struct amdgpu_device * adev,struct amdgpu_bo_param * bp,struct amdgpu_bo ** bo_ptr)519 static int amdgpu_bo_do_create(struct amdgpu_device *adev,
520 struct amdgpu_bo_param *bp,
521 struct amdgpu_bo **bo_ptr)
522 {
523 struct ttm_operation_ctx ctx = {
524 .interruptible = (bp->type != ttm_bo_type_kernel),
525 .no_wait_gpu = bp->no_wait_gpu,
526 .resv = bp->resv,
527 .flags = bp->type != ttm_bo_type_kernel ?
528 TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
529 };
530 struct amdgpu_bo *bo;
531 unsigned long page_align, size = bp->size;
532 size_t acc_size;
533 int r;
534
535 /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
536 if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
537 /* GWS and OA don't need any alignment. */
538 page_align = bp->byte_align;
539 size <<= PAGE_SHIFT;
540 } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
541 /* Both size and alignment must be a multiple of 4. */
542 page_align = ALIGN(bp->byte_align, 4);
543 size = ALIGN(size, 4) << PAGE_SHIFT;
544 } else {
545 /* Memory should be aligned at least to a page size. */
546 page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
547 size = ALIGN(size, PAGE_SIZE);
548 }
549
550 if (!amdgpu_bo_validate_size(adev, size, bp->domain))
551 return -ENOMEM;
552
553 *bo_ptr = NULL;
554
555 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
556 sizeof(struct amdgpu_bo));
557
558 bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
559 if (bo == NULL)
560 return -ENOMEM;
561 drm_gem_private_object_init(adev->ddev, &bo->tbo.base, size);
562 INIT_LIST_HEAD(&bo->shadow_list);
563 bo->vm_bo = NULL;
564 bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
565 bp->domain;
566 bo->allowed_domains = bo->preferred_domains;
567 if (bp->type != ttm_bo_type_kernel &&
568 bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
569 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
570
571 bo->flags = bp->flags;
572
573 if (!amdgpu_bo_support_uswc(bo->flags))
574 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
575
576 bo->tbo.bdev = &adev->mman.bdev;
577 if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
578 AMDGPU_GEM_DOMAIN_GDS))
579 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
580 else
581 amdgpu_bo_placement_from_domain(bo, bp->domain);
582 if (bp->type == ttm_bo_type_kernel)
583 bo->tbo.priority = 1;
584
585 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
586 &bo->placement, page_align, &ctx, acc_size,
587 NULL, bp->resv, &amdgpu_bo_destroy);
588 if (unlikely(r != 0))
589 return r;
590
591 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
592 bo->tbo.mem.mem_type == TTM_PL_VRAM &&
593 bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
594 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
595 ctx.bytes_moved);
596 else
597 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
598
599 if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
600 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
601 struct dma_fence *fence;
602
603 r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
604 if (unlikely(r))
605 goto fail_unreserve;
606
607 amdgpu_bo_fence(bo, fence, false);
608 dma_fence_put(bo->tbo.moving);
609 bo->tbo.moving = dma_fence_get(fence);
610 dma_fence_put(fence);
611 }
612 if (!bp->resv)
613 amdgpu_bo_unreserve(bo);
614 *bo_ptr = bo;
615
616 trace_amdgpu_bo_create(bo);
617
618 /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
619 if (bp->type == ttm_bo_type_device)
620 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
621
622 return 0;
623
624 fail_unreserve:
625 if (!bp->resv)
626 dma_resv_unlock(bo->tbo.base.resv);
627 amdgpu_bo_unref(&bo);
628 return r;
629 }
630
amdgpu_bo_create_shadow(struct amdgpu_device * adev,unsigned long size,struct amdgpu_bo * bo)631 static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
632 unsigned long size,
633 struct amdgpu_bo *bo)
634 {
635 struct amdgpu_bo_param bp;
636 int r;
637
638 if (bo->shadow)
639 return 0;
640
641 memset(&bp, 0, sizeof(bp));
642 bp.size = size;
643 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
644 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
645 AMDGPU_GEM_CREATE_SHADOW;
646 bp.type = ttm_bo_type_kernel;
647 bp.resv = bo->tbo.base.resv;
648
649 r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
650 if (!r) {
651 bo->shadow->parent = amdgpu_bo_ref(bo);
652 mutex_lock(&adev->shadow_list_lock);
653 list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list);
654 mutex_unlock(&adev->shadow_list_lock);
655 }
656
657 return r;
658 }
659
660 /**
661 * amdgpu_bo_create - create an &amdgpu_bo buffer object
662 * @adev: amdgpu device object
663 * @bp: parameters to be used for the buffer object
664 * @bo_ptr: pointer to the buffer object pointer
665 *
666 * Creates an &amdgpu_bo buffer object; and if requested, also creates a
667 * shadow object.
668 * Shadow object is used to backup the original buffer object, and is always
669 * in GTT.
670 *
671 * Returns:
672 * 0 for success or a negative error code on failure.
673 */
amdgpu_bo_create(struct amdgpu_device * adev,struct amdgpu_bo_param * bp,struct amdgpu_bo ** bo_ptr)674 int amdgpu_bo_create(struct amdgpu_device *adev,
675 struct amdgpu_bo_param *bp,
676 struct amdgpu_bo **bo_ptr)
677 {
678 u64 flags = bp->flags;
679 int r;
680
681 bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
682 r = amdgpu_bo_do_create(adev, bp, bo_ptr);
683 if (r)
684 return r;
685
686 if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
687 if (!bp->resv)
688 WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv,
689 NULL));
690
691 r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
692
693 if (!bp->resv)
694 dma_resv_unlock((*bo_ptr)->tbo.base.resv);
695
696 if (r)
697 amdgpu_bo_unref(bo_ptr);
698 }
699
700 return r;
701 }
702
703 /**
704 * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
705 * @bo: pointer to the buffer object
706 *
707 * Sets placement according to domain; and changes placement and caching
708 * policy of the buffer object according to the placement.
709 * This is used for validating shadow bos. It calls ttm_bo_validate() to
710 * make sure the buffer is resident where it needs to be.
711 *
712 * Returns:
713 * 0 for success or a negative error code on failure.
714 */
amdgpu_bo_validate(struct amdgpu_bo * bo)715 int amdgpu_bo_validate(struct amdgpu_bo *bo)
716 {
717 struct ttm_operation_ctx ctx = { false, false };
718 uint32_t domain;
719 int r;
720
721 if (bo->pin_count)
722 return 0;
723
724 domain = bo->preferred_domains;
725
726 retry:
727 amdgpu_bo_placement_from_domain(bo, domain);
728 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
729 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
730 domain = bo->allowed_domains;
731 goto retry;
732 }
733
734 return r;
735 }
736
737 /**
738 * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
739 *
740 * @shadow: &amdgpu_bo shadow to be restored
741 * @fence: dma_fence associated with the operation
742 *
743 * Copies a buffer object's shadow content back to the object.
744 * This is used for recovering a buffer from its shadow in case of a gpu
745 * reset where vram context may be lost.
746 *
747 * Returns:
748 * 0 for success or a negative error code on failure.
749 */
amdgpu_bo_restore_shadow(struct amdgpu_bo * shadow,struct dma_fence ** fence)750 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
751
752 {
753 struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
754 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
755 uint64_t shadow_addr, parent_addr;
756
757 shadow_addr = amdgpu_bo_gpu_offset(shadow);
758 parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
759
760 return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
761 amdgpu_bo_size(shadow), NULL, fence,
762 true, false);
763 }
764
765 /**
766 * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
767 * @bo: &amdgpu_bo buffer object to be mapped
768 * @ptr: kernel virtual address to be returned
769 *
770 * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
771 * amdgpu_bo_kptr() to get the kernel virtual address.
772 *
773 * Returns:
774 * 0 for success or a negative error code on failure.
775 */
amdgpu_bo_kmap(struct amdgpu_bo * bo,void ** ptr)776 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
777 {
778 void *kptr;
779 long r;
780
781 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
782 return -EPERM;
783
784 kptr = amdgpu_bo_kptr(bo);
785 if (kptr) {
786 if (ptr)
787 *ptr = kptr;
788 return 0;
789 }
790
791 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
792 MAX_SCHEDULE_TIMEOUT);
793 if (r < 0)
794 return r;
795
796 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
797 if (r)
798 return r;
799
800 if (ptr)
801 *ptr = amdgpu_bo_kptr(bo);
802
803 return 0;
804 }
805
806 /**
807 * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
808 * @bo: &amdgpu_bo buffer object
809 *
810 * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
811 *
812 * Returns:
813 * the virtual address of a buffer object area.
814 */
amdgpu_bo_kptr(struct amdgpu_bo * bo)815 void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
816 {
817 bool is_iomem;
818
819 return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
820 }
821
822 /**
823 * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
824 * @bo: &amdgpu_bo buffer object to be unmapped
825 *
826 * Unmaps a kernel map set up by amdgpu_bo_kmap().
827 */
amdgpu_bo_kunmap(struct amdgpu_bo * bo)828 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
829 {
830 if (bo->kmap.bo)
831 ttm_bo_kunmap(&bo->kmap);
832 }
833
834 /**
835 * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
836 * @bo: &amdgpu_bo buffer object
837 *
838 * References the contained &ttm_buffer_object.
839 *
840 * Returns:
841 * a refcounted pointer to the &amdgpu_bo buffer object.
842 */
amdgpu_bo_ref(struct amdgpu_bo * bo)843 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
844 {
845 if (bo == NULL)
846 return NULL;
847
848 ttm_bo_get(&bo->tbo);
849 return bo;
850 }
851
852 /**
853 * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
854 * @bo: &amdgpu_bo buffer object
855 *
856 * Unreferences the contained &ttm_buffer_object and clear the pointer
857 */
amdgpu_bo_unref(struct amdgpu_bo ** bo)858 void amdgpu_bo_unref(struct amdgpu_bo **bo)
859 {
860 struct ttm_buffer_object *tbo;
861
862 if ((*bo) == NULL)
863 return;
864
865 tbo = &((*bo)->tbo);
866 ttm_bo_put(tbo);
867 *bo = NULL;
868 }
869
870 /**
871 * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
872 * @bo: &amdgpu_bo buffer object to be pinned
873 * @domain: domain to be pinned to
874 * @min_offset: the start of requested address range
875 * @max_offset: the end of requested address range
876 *
877 * Pins the buffer object according to requested domain and address range. If
878 * the memory is unbound gart memory, binds the pages into gart table. Adjusts
879 * pin_count and pin_size accordingly.
880 *
881 * Pinning means to lock pages in memory along with keeping them at a fixed
882 * offset. It is required when a buffer can not be moved, for example, when
883 * a display buffer is being scanned out.
884 *
885 * Compared with amdgpu_bo_pin(), this function gives more flexibility on
886 * where to pin a buffer if there are specific restrictions on where a buffer
887 * must be located.
888 *
889 * Returns:
890 * 0 for success or a negative error code on failure.
891 */
amdgpu_bo_pin_restricted(struct amdgpu_bo * bo,u32 domain,u64 min_offset,u64 max_offset)892 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
893 u64 min_offset, u64 max_offset)
894 {
895 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
896 struct ttm_operation_ctx ctx = { false, false };
897 int r, i;
898
899 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
900 return -EPERM;
901
902 if (WARN_ON_ONCE(min_offset > max_offset))
903 return -EINVAL;
904
905 /* A shared bo cannot be migrated to VRAM */
906 if (bo->prime_shared_count) {
907 if (domain & AMDGPU_GEM_DOMAIN_GTT)
908 domain = AMDGPU_GEM_DOMAIN_GTT;
909 else
910 return -EINVAL;
911 }
912
913 /* This assumes only APU display buffers are pinned with (VRAM|GTT).
914 * See function amdgpu_display_supported_domains()
915 */
916 domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
917
918 if (bo->pin_count) {
919 uint32_t mem_type = bo->tbo.mem.mem_type;
920
921 if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
922 return -EINVAL;
923
924 bo->pin_count++;
925
926 if (max_offset != 0) {
927 u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
928 WARN_ON_ONCE(max_offset <
929 (amdgpu_bo_gpu_offset(bo) - domain_start));
930 }
931
932 return 0;
933 }
934
935 bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
936 /* force to pin into visible video ram */
937 if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
938 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
939 amdgpu_bo_placement_from_domain(bo, domain);
940 for (i = 0; i < bo->placement.num_placement; i++) {
941 unsigned fpfn, lpfn;
942
943 fpfn = min_offset >> PAGE_SHIFT;
944 lpfn = max_offset >> PAGE_SHIFT;
945
946 if (fpfn > bo->placements[i].fpfn)
947 bo->placements[i].fpfn = fpfn;
948 if (!bo->placements[i].lpfn ||
949 (lpfn && lpfn < bo->placements[i].lpfn))
950 bo->placements[i].lpfn = lpfn;
951 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
952 }
953
954 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
955 if (unlikely(r)) {
956 dev_err(adev->dev, "%p pin failed\n", bo);
957 goto error;
958 }
959
960 bo->pin_count = 1;
961
962 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
963 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
964 atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
965 atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
966 &adev->visible_pin_size);
967 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
968 atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
969 }
970
971 error:
972 return r;
973 }
974
975 /**
976 * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
977 * @bo: &amdgpu_bo buffer object to be pinned
978 * @domain: domain to be pinned to
979 *
980 * A simple wrapper to amdgpu_bo_pin_restricted().
981 * Provides a simpler API for buffers that do not have any strict restrictions
982 * on where a buffer must be located.
983 *
984 * Returns:
985 * 0 for success or a negative error code on failure.
986 */
amdgpu_bo_pin(struct amdgpu_bo * bo,u32 domain)987 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
988 {
989 return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
990 }
991
992 /**
993 * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
994 * @bo: &amdgpu_bo buffer object to be unpinned
995 *
996 * Decreases the pin_count, and clears the flags if pin_count reaches 0.
997 * Changes placement and pin size accordingly.
998 *
999 * Returns:
1000 * 0 for success or a negative error code on failure.
1001 */
amdgpu_bo_unpin(struct amdgpu_bo * bo)1002 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
1003 {
1004 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1005 struct ttm_operation_ctx ctx = { false, false };
1006 int r, i;
1007
1008 if (WARN_ON_ONCE(!bo->pin_count)) {
1009 dev_warn(adev->dev, "%p unpin not necessary\n", bo);
1010 return 0;
1011 }
1012 bo->pin_count--;
1013 if (bo->pin_count)
1014 return 0;
1015
1016 amdgpu_bo_subtract_pin_size(bo);
1017
1018 for (i = 0; i < bo->placement.num_placement; i++) {
1019 bo->placements[i].lpfn = 0;
1020 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
1021 }
1022 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1023 if (unlikely(r))
1024 dev_err(adev->dev, "%p validate failed for unpin\n", bo);
1025
1026 return r;
1027 }
1028
1029 /**
1030 * amdgpu_bo_evict_vram - evict VRAM buffers
1031 * @adev: amdgpu device object
1032 *
1033 * Evicts all VRAM buffers on the lru list of the memory type.
1034 * Mainly used for evicting vram at suspend time.
1035 *
1036 * Returns:
1037 * 0 for success or a negative error code on failure.
1038 */
amdgpu_bo_evict_vram(struct amdgpu_device * adev)1039 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
1040 {
1041 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
1042 #ifndef CONFIG_HIBERNATION
1043 if (adev->flags & AMD_IS_APU) {
1044 /* Useless to evict on IGP chips */
1045 return 0;
1046 }
1047 #endif
1048 return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
1049 }
1050
1051 static const char *amdgpu_vram_names[] = {
1052 "UNKNOWN",
1053 "GDDR1",
1054 "DDR2",
1055 "GDDR3",
1056 "GDDR4",
1057 "GDDR5",
1058 "HBM",
1059 "DDR3",
1060 "DDR4",
1061 "GDDR6",
1062 };
1063
1064 /**
1065 * amdgpu_bo_init - initialize memory manager
1066 * @adev: amdgpu device object
1067 *
1068 * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
1069 *
1070 * Returns:
1071 * 0 for success or a negative error code on failure.
1072 */
amdgpu_bo_init(struct amdgpu_device * adev)1073 int amdgpu_bo_init(struct amdgpu_device *adev)
1074 {
1075 /* reserve PAT memory space to WC for VRAM */
1076 arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1077 adev->gmc.aper_size);
1078
1079 /* Add an MTRR for the VRAM */
1080 adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1081 adev->gmc.aper_size);
1082 #ifdef __NetBSD__
1083 if (adev->gmc.aper_base)
1084 pmap_pv_track(adev->gmc.aper_base, adev->gmc.aper_size);
1085 #endif
1086 DRM_INFO("Detected VRAM RAM=%"PRIu64"M, BAR=%lluM\n",
1087 adev->gmc.mc_vram_size >> 20,
1088 (unsigned long long)adev->gmc.aper_size >> 20);
1089 DRM_INFO("RAM width %dbits %s\n",
1090 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1091 return amdgpu_ttm_init(adev);
1092 }
1093
1094 /**
1095 * amdgpu_bo_late_init - late init
1096 * @adev: amdgpu device object
1097 *
1098 * Calls amdgpu_ttm_late_init() to free resources used earlier during
1099 * initialization.
1100 *
1101 * Returns:
1102 * 0 for success or a negative error code on failure.
1103 */
amdgpu_bo_late_init(struct amdgpu_device * adev)1104 int amdgpu_bo_late_init(struct amdgpu_device *adev)
1105 {
1106 amdgpu_ttm_late_init(adev);
1107
1108 return 0;
1109 }
1110
1111 /**
1112 * amdgpu_bo_fini - tear down memory manager
1113 * @adev: amdgpu device object
1114 *
1115 * Reverses amdgpu_bo_init() to tear down memory manager.
1116 */
amdgpu_bo_fini(struct amdgpu_device * adev)1117 void amdgpu_bo_fini(struct amdgpu_device *adev)
1118 {
1119 amdgpu_ttm_fini(adev);
1120 #ifdef __NetBSD__
1121 if (adev->gmc.aper_base)
1122 pmap_pv_untrack(adev->gmc.aper_base, adev->gmc.aper_size);
1123 #endif
1124 arch_phys_wc_del(adev->gmc.vram_mtrr);
1125 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1126 }
1127
1128 #ifndef __NetBSD__ /* XXX unused? */
1129 /**
1130 * amdgpu_bo_fbdev_mmap - mmap fbdev memory
1131 * @bo: &amdgpu_bo buffer object
1132 * @vma: vma as input from the fbdev mmap method
1133 *
1134 * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo.
1135 *
1136 * Returns:
1137 * 0 for success or a negative error code on failure.
1138 */
amdgpu_bo_fbdev_mmap(struct amdgpu_bo * bo,struct vm_area_struct * vma)1139 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
1140 struct vm_area_struct *vma)
1141 {
1142 if (vma->vm_pgoff != 0)
1143 return -EACCES;
1144
1145 return ttm_bo_mmap_obj(vma, &bo->tbo);
1146 }
1147 #endif
1148
1149 /**
1150 * amdgpu_bo_set_tiling_flags - set tiling flags
1151 * @bo: &amdgpu_bo buffer object
1152 * @tiling_flags: new flags
1153 *
1154 * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
1155 * kernel driver to set the tiling flags on a buffer.
1156 *
1157 * Returns:
1158 * 0 for success or a negative error code on failure.
1159 */
amdgpu_bo_set_tiling_flags(struct amdgpu_bo * bo,u64 tiling_flags)1160 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1161 {
1162 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1163
1164 if (adev->family <= AMDGPU_FAMILY_CZ &&
1165 AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1166 return -EINVAL;
1167
1168 bo->tiling_flags = tiling_flags;
1169 return 0;
1170 }
1171
1172 /**
1173 * amdgpu_bo_get_tiling_flags - get tiling flags
1174 * @bo: &amdgpu_bo buffer object
1175 * @tiling_flags: returned flags
1176 *
1177 * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
1178 * set the tiling flags on a buffer.
1179 */
amdgpu_bo_get_tiling_flags(struct amdgpu_bo * bo,u64 * tiling_flags)1180 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1181 {
1182 dma_resv_assert_held(bo->tbo.base.resv);
1183
1184 if (tiling_flags)
1185 *tiling_flags = bo->tiling_flags;
1186 }
1187
1188 /**
1189 * amdgpu_bo_set_metadata - set metadata
1190 * @bo: &amdgpu_bo buffer object
1191 * @metadata: new metadata
1192 * @metadata_size: size of the new metadata
1193 * @flags: flags of the new metadata
1194 *
1195 * Sets buffer object's metadata, its size and flags.
1196 * Used via GEM ioctl.
1197 *
1198 * Returns:
1199 * 0 for success or a negative error code on failure.
1200 */
amdgpu_bo_set_metadata(struct amdgpu_bo * bo,void * metadata,uint32_t metadata_size,uint64_t flags)1201 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
1202 uint32_t metadata_size, uint64_t flags)
1203 {
1204 void *buffer;
1205
1206 if (!metadata_size) {
1207 if (bo->metadata_size) {
1208 kfree(bo->metadata);
1209 bo->metadata = NULL;
1210 bo->metadata_size = 0;
1211 }
1212 return 0;
1213 }
1214
1215 if (metadata == NULL)
1216 return -EINVAL;
1217
1218 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1219 if (buffer == NULL)
1220 return -ENOMEM;
1221
1222 kfree(bo->metadata);
1223 bo->metadata_flags = flags;
1224 bo->metadata = buffer;
1225 bo->metadata_size = metadata_size;
1226
1227 return 0;
1228 }
1229
1230 /**
1231 * amdgpu_bo_get_metadata - get metadata
1232 * @bo: &amdgpu_bo buffer object
1233 * @buffer: returned metadata
1234 * @buffer_size: size of the buffer
1235 * @metadata_size: size of the returned metadata
1236 * @flags: flags of the returned metadata
1237 *
1238 * Gets buffer object's metadata, its size and flags. buffer_size shall not be
1239 * less than metadata_size.
1240 * Used via GEM ioctl.
1241 *
1242 * Returns:
1243 * 0 for success or a negative error code on failure.
1244 */
amdgpu_bo_get_metadata(struct amdgpu_bo * bo,void * buffer,size_t buffer_size,uint32_t * metadata_size,uint64_t * flags)1245 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1246 size_t buffer_size, uint32_t *metadata_size,
1247 uint64_t *flags)
1248 {
1249 if (!buffer && !metadata_size)
1250 return -EINVAL;
1251
1252 if (buffer) {
1253 if (buffer_size < bo->metadata_size)
1254 return -EINVAL;
1255
1256 if (bo->metadata_size)
1257 memcpy(buffer, bo->metadata, bo->metadata_size);
1258 }
1259
1260 if (metadata_size)
1261 *metadata_size = bo->metadata_size;
1262 if (flags)
1263 *flags = bo->metadata_flags;
1264
1265 return 0;
1266 }
1267
1268 /**
1269 * amdgpu_bo_move_notify - notification about a memory move
1270 * @bo: pointer to a buffer object
1271 * @evict: if this move is evicting the buffer from the graphics address space
1272 * @new_mem: new information of the bufer object
1273 *
1274 * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
1275 * bookkeeping.
1276 * TTM driver callback which is called when ttm moves a buffer.
1277 */
amdgpu_bo_move_notify(struct ttm_buffer_object * bo,bool evict,struct ttm_mem_reg * new_mem)1278 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1279 bool evict,
1280 struct ttm_mem_reg *new_mem)
1281 {
1282 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1283 struct amdgpu_bo *abo;
1284 struct ttm_mem_reg *old_mem = &bo->mem;
1285
1286 if (!amdgpu_bo_is_amdgpu_bo(bo))
1287 return;
1288
1289 abo = ttm_to_amdgpu_bo(bo);
1290 amdgpu_vm_bo_invalidate(adev, abo, evict);
1291
1292 amdgpu_bo_kunmap(abo);
1293
1294 /* remember the eviction */
1295 if (evict)
1296 atomic64_inc(&adev->num_evictions);
1297
1298 /* update statistics */
1299 if (!new_mem)
1300 return;
1301
1302 /* move_notify is called before move happens */
1303 trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
1304 }
1305
1306 /**
1307 * amdgpu_bo_move_notify - notification about a BO being released
1308 * @bo: pointer to a buffer object
1309 *
1310 * Wipes VRAM buffers whose contents should not be leaked before the
1311 * memory is released.
1312 */
amdgpu_bo_release_notify(struct ttm_buffer_object * bo)1313 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1314 {
1315 struct dma_fence *fence = NULL;
1316 struct amdgpu_bo *abo;
1317 int r;
1318
1319 if (!amdgpu_bo_is_amdgpu_bo(bo))
1320 return;
1321
1322 abo = ttm_to_amdgpu_bo(bo);
1323
1324 if (abo->kfd_bo)
1325 amdgpu_amdkfd_unreserve_memory_limit(abo);
1326
1327 if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
1328 !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
1329 return;
1330
1331 dma_resv_lock(bo->base.resv, NULL);
1332
1333 r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
1334 if (!WARN_ON(r)) {
1335 amdgpu_bo_fence(abo, fence, false);
1336 dma_fence_put(fence);
1337 }
1338
1339 dma_resv_unlock(bo->base.resv);
1340 }
1341
1342 /**
1343 * amdgpu_bo_fault_reserve_notify - notification about a memory fault
1344 * @bo: pointer to a buffer object
1345 *
1346 * Notifies the driver we are taking a fault on this BO and have reserved it,
1347 * also performs bookkeeping.
1348 * TTM driver callback for dealing with vm faults.
1349 *
1350 * Returns:
1351 * 0 for success or a negative error code on failure.
1352 */
amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object * bo)1353 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1354 {
1355 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1356 struct ttm_operation_ctx ctx = { false, false };
1357 struct amdgpu_bo *abo;
1358 unsigned long offset, size;
1359 int r;
1360
1361 if (!amdgpu_bo_is_amdgpu_bo(bo))
1362 return 0;
1363
1364 abo = ttm_to_amdgpu_bo(bo);
1365
1366 /* Remember that this BO was accessed by the CPU */
1367 abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1368
1369 if (bo->mem.mem_type != TTM_PL_VRAM)
1370 return 0;
1371
1372 size = bo->mem.num_pages << PAGE_SHIFT;
1373 offset = bo->mem.start << PAGE_SHIFT;
1374 if ((offset + size) <= adev->gmc.visible_vram_size)
1375 return 0;
1376
1377 /* Can't move a pinned BO to visible VRAM */
1378 if (abo->pin_count > 0)
1379 return -EINVAL;
1380
1381 /* hurrah the memory is not visible ! */
1382 atomic64_inc(&adev->num_vram_cpu_page_faults);
1383 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1384 AMDGPU_GEM_DOMAIN_GTT);
1385
1386 /* Avoid costly evictions; only set GTT as a busy placement */
1387 abo->placement.num_busy_placement = 1;
1388 abo->placement.busy_placement = &abo->placements[1];
1389
1390 r = ttm_bo_validate(bo, &abo->placement, &ctx);
1391 if (unlikely(r != 0))
1392 return r;
1393
1394 offset = bo->mem.start << PAGE_SHIFT;
1395 /* this should never happen */
1396 if (bo->mem.mem_type == TTM_PL_VRAM &&
1397 (offset + size) > adev->gmc.visible_vram_size)
1398 return -EINVAL;
1399
1400 return 0;
1401 }
1402
1403 /**
1404 * amdgpu_bo_fence - add fence to buffer object
1405 *
1406 * @bo: buffer object in question
1407 * @fence: fence to add
1408 * @shared: true if fence should be added shared
1409 *
1410 */
amdgpu_bo_fence(struct amdgpu_bo * bo,struct dma_fence * fence,bool shared)1411 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1412 bool shared)
1413 {
1414 struct dma_resv *resv = bo->tbo.base.resv;
1415
1416 if (shared)
1417 dma_resv_add_shared_fence(resv, fence);
1418 else
1419 dma_resv_add_excl_fence(resv, fence);
1420 }
1421
1422 /**
1423 * amdgpu_sync_wait_resv - Wait for BO reservation fences
1424 *
1425 * @bo: buffer object
1426 * @owner: fence owner
1427 * @intr: Whether the wait is interruptible
1428 *
1429 * Returns:
1430 * 0 on success, errno otherwise.
1431 */
amdgpu_bo_sync_wait(struct amdgpu_bo * bo,void * owner,bool intr)1432 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1433 {
1434 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1435 struct amdgpu_sync sync;
1436 int r;
1437
1438 amdgpu_sync_create(&sync);
1439 amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false);
1440 r = amdgpu_sync_wait(&sync, intr);
1441 amdgpu_sync_free(&sync);
1442
1443 return r;
1444 }
1445
1446 /**
1447 * amdgpu_bo_gpu_offset - return GPU offset of bo
1448 * @bo: amdgpu object for which we query the offset
1449 *
1450 * Note: object should either be pinned or reserved when calling this
1451 * function, it might be useful to add check for this for debugging.
1452 *
1453 * Returns:
1454 * current GPU offset of the object.
1455 */
amdgpu_bo_gpu_offset(struct amdgpu_bo * bo)1456 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1457 {
1458 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
1459 WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
1460 !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
1461 WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
1462 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
1463 !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1464
1465 return amdgpu_gmc_sign_extend(bo->tbo.offset);
1466 }
1467
1468 /**
1469 * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
1470 * @adev: amdgpu device object
1471 * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
1472 *
1473 * Returns:
1474 * Which of the allowed domains is preferred for pinning the BO for scanout.
1475 */
amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device * adev,uint32_t domain)1476 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
1477 uint32_t domain)
1478 {
1479 if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
1480 domain = AMDGPU_GEM_DOMAIN_VRAM;
1481 if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1482 domain = AMDGPU_GEM_DOMAIN_GTT;
1483 }
1484 return domain;
1485 }
1486