1 /*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26 /*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32 #include <drm/ttm/ttm_bo_api.h>
33 #include <drm/ttm/ttm_bo_driver.h>
34 #include <drm/ttm/ttm_placement.h>
35 #include <drm/ttm/ttm_module.h>
36 #include <drm/ttm/ttm_page_alloc.h>
37 #include <drm/drmP.h>
38 #include <drm/amdgpu_drm.h>
39 #include <linux/seq_file.h>
40 #include <linux/slab.h>
41 #include <linux/swiotlb.h>
42 #include <linux/swap.h>
43 #include <linux/pagemap.h>
44 #include <linux/debugfs.h>
45 #if 0
46 #include <linux/iommu.h>
47 #endif
48 #include "amdgpu.h"
49 #include "amdgpu_object.h"
50 #include "amdgpu_trace.h"
51 #include "amdgpu_amdkfd.h"
52 #include "bif/bif_4_1_d.h"
53
54 #include <linux/pfn_t.h>
55
56 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
57
58 static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
59 struct ttm_mem_reg *mem, unsigned num_pages,
60 uint64_t offset, unsigned window,
61 struct amdgpu_ring *ring,
62 uint64_t *addr);
63
64 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
65 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
66
67 /*
68 * Global memory.
69 */
70
71 /**
72 * amdgpu_ttm_mem_global_init - Initialize and acquire reference to
73 * memory object
74 *
75 * @ref: Object for initialization.
76 *
77 * This is called by drm_global_item_ref() when an object is being
78 * initialized.
79 */
amdgpu_ttm_mem_global_init(struct drm_global_reference * ref)80 static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
81 {
82 return ttm_mem_global_init(ref->object);
83 }
84
85 /**
86 * amdgpu_ttm_mem_global_release - Drop reference to a memory object
87 *
88 * @ref: Object being removed
89 *
90 * This is called by drm_global_item_unref() when an object is being
91 * released.
92 */
amdgpu_ttm_mem_global_release(struct drm_global_reference * ref)93 static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
94 {
95 ttm_mem_global_release(ref->object);
96 }
97
98 /**
99 * amdgpu_ttm_global_init - Initialize global TTM memory reference structures.
100 *
101 * @adev: AMDGPU device for which the global structures need to be registered.
102 *
103 * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
104 * during bring up.
105 */
amdgpu_ttm_global_init(struct amdgpu_device * adev)106 static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
107 {
108 struct drm_global_reference *global_ref;
109 int r;
110
111 /* ensure reference is false in case init fails */
112 adev->mman.mem_global_referenced = false;
113
114 global_ref = &adev->mman.mem_global_ref;
115 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
116 global_ref->size = sizeof(struct ttm_mem_global);
117 global_ref->init = &amdgpu_ttm_mem_global_init;
118 global_ref->release = &amdgpu_ttm_mem_global_release;
119 r = drm_global_item_ref(global_ref);
120 if (r) {
121 DRM_ERROR("Failed setting up TTM memory accounting "
122 "subsystem.\n");
123 goto error_mem;
124 }
125
126 adev->mman.bo_global_ref.mem_glob =
127 adev->mman.mem_global_ref.object;
128 global_ref = &adev->mman.bo_global_ref.ref;
129 global_ref->global_type = DRM_GLOBAL_TTM_BO;
130 global_ref->size = sizeof(struct ttm_bo_global);
131 global_ref->init = &ttm_bo_global_init;
132 global_ref->release = &ttm_bo_global_release;
133 r = drm_global_item_ref(global_ref);
134 if (r) {
135 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
136 goto error_bo;
137 }
138
139 lockinit(&adev->mman.gtt_window_lock, "agmgtwl", 0, LK_CANRECURSE);
140
141 adev->mman.mem_global_referenced = true;
142
143 return 0;
144
145 error_bo:
146 drm_global_item_unref(&adev->mman.mem_global_ref);
147 error_mem:
148 return r;
149 }
150
amdgpu_ttm_global_fini(struct amdgpu_device * adev)151 static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
152 {
153 if (adev->mman.mem_global_referenced) {
154 mutex_destroy(&adev->mman.gtt_window_lock);
155 drm_global_item_unref(&adev->mman.bo_global_ref.ref);
156 drm_global_item_unref(&adev->mman.mem_global_ref);
157 adev->mman.mem_global_referenced = false;
158 }
159 }
160
amdgpu_invalidate_caches(struct ttm_bo_device * bdev,uint32_t flags)161 static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
162 {
163 return 0;
164 }
165
166 /**
167 * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
168 * memory request.
169 *
170 * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
171 * @type: The type of memory requested
172 * @man: The memory type manager for each domain
173 *
174 * This is called by ttm_bo_init_mm() when a buffer object is being
175 * initialized.
176 */
amdgpu_init_mem_type(struct ttm_bo_device * bdev,uint32_t type,struct ttm_mem_type_manager * man)177 static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
178 struct ttm_mem_type_manager *man)
179 {
180 struct amdgpu_device *adev;
181
182 adev = amdgpu_ttm_adev(bdev);
183
184 switch (type) {
185 case TTM_PL_SYSTEM:
186 /* System memory */
187 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
188 man->available_caching = TTM_PL_MASK_CACHING;
189 man->default_caching = TTM_PL_FLAG_CACHED;
190 break;
191 case TTM_PL_TT:
192 /* GTT memory */
193 man->func = &amdgpu_gtt_mgr_func;
194 man->gpu_offset = adev->gmc.gart_start;
195 man->available_caching = TTM_PL_MASK_CACHING;
196 man->default_caching = TTM_PL_FLAG_CACHED;
197 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
198 break;
199 case TTM_PL_VRAM:
200 /* "On-card" video ram */
201 man->func = &amdgpu_vram_mgr_func;
202 man->gpu_offset = adev->gmc.vram_start;
203 man->flags = TTM_MEMTYPE_FLAG_FIXED |
204 TTM_MEMTYPE_FLAG_MAPPABLE;
205 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
206 man->default_caching = TTM_PL_FLAG_WC;
207 break;
208 case AMDGPU_PL_GDS:
209 case AMDGPU_PL_GWS:
210 case AMDGPU_PL_OA:
211 /* On-chip GDS memory*/
212 man->func = &ttm_bo_manager_func;
213 man->gpu_offset = 0;
214 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
215 man->available_caching = TTM_PL_FLAG_UNCACHED;
216 man->default_caching = TTM_PL_FLAG_UNCACHED;
217 break;
218 default:
219 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
220 return -EINVAL;
221 }
222 return 0;
223 }
224
225 /**
226 * amdgpu_evict_flags - Compute placement flags
227 *
228 * @bo: The buffer object to evict
229 * @placement: Possible destination(s) for evicted BO
230 *
231 * Fill in placement data when ttm_bo_evict() is called
232 */
amdgpu_evict_flags(struct ttm_buffer_object * bo,struct ttm_placement * placement)233 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
234 struct ttm_placement *placement)
235 {
236 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
237 struct amdgpu_bo *abo;
238 static const struct ttm_place placements = {
239 .fpfn = 0,
240 .lpfn = 0,
241 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
242 };
243
244 /* Don't handle scatter gather BOs */
245 if (bo->type == ttm_bo_type_sg) {
246 placement->num_placement = 0;
247 placement->num_busy_placement = 0;
248 return;
249 }
250
251 /* Object isn't an AMDGPU object so ignore */
252 if (!amdgpu_bo_is_amdgpu_bo(bo)) {
253 placement->placement = &placements;
254 placement->busy_placement = &placements;
255 placement->num_placement = 1;
256 placement->num_busy_placement = 1;
257 return;
258 }
259
260 abo = ttm_to_amdgpu_bo(bo);
261 switch (bo->mem.mem_type) {
262 case TTM_PL_VRAM:
263 if (!adev->mman.buffer_funcs_enabled) {
264 /* Move to system memory */
265 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
266 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
267 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
268 amdgpu_bo_in_cpu_visible_vram(abo)) {
269
270 /* Try evicting to the CPU inaccessible part of VRAM
271 * first, but only set GTT as busy placement, so this
272 * BO will be evicted to GTT rather than causing other
273 * BOs to be evicted from VRAM
274 */
275 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
276 AMDGPU_GEM_DOMAIN_GTT);
277 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
278 abo->placements[0].lpfn = 0;
279 abo->placement.busy_placement = &abo->placements[1];
280 abo->placement.num_busy_placement = 1;
281 } else {
282 /* Move to GTT memory */
283 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
284 }
285 break;
286 case TTM_PL_TT:
287 default:
288 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
289 }
290 *placement = abo->placement;
291 }
292
293 /**
294 * amdgpu_verify_access - Verify access for a mmap call
295 *
296 * @bo: The buffer object to map
297 * @filp: The file pointer from the process performing the mmap
298 *
299 * This is called by ttm_bo_mmap() to verify whether a process
300 * has the right to mmap a BO to their process space.
301 */
amdgpu_verify_access(struct ttm_buffer_object * bo,struct file * filp)302 static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
303 {
304 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
305
306 /*
307 * Don't verify access for KFD BOs. They don't have a GEM
308 * object associated with them.
309 */
310 if (abo->kfd_bo)
311 return 0;
312
313 if (amdgpu_ttm_tt_get_usermm(bo->ttm))
314 return -EPERM;
315 return drm_vma_node_verify_access(&abo->gem_base.vma_node,
316 filp->private_data);
317 }
318
319 /**
320 * amdgpu_move_null - Register memory for a buffer object
321 *
322 * @bo: The bo to assign the memory to
323 * @new_mem: The memory to be assigned.
324 *
325 * Assign the memory from new_mem to the memory of the buffer object bo.
326 */
amdgpu_move_null(struct ttm_buffer_object * bo,struct ttm_mem_reg * new_mem)327 static void amdgpu_move_null(struct ttm_buffer_object *bo,
328 struct ttm_mem_reg *new_mem)
329 {
330 struct ttm_mem_reg *old_mem = &bo->mem;
331
332 BUG_ON(old_mem->mm_node != NULL);
333 *old_mem = *new_mem;
334 new_mem->mm_node = NULL;
335 }
336
337 /**
338 * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
339 *
340 * @bo: The bo to assign the memory to.
341 * @mm_node: Memory manager node for drm allocator.
342 * @mem: The region where the bo resides.
343 *
344 */
amdgpu_mm_node_addr(struct ttm_buffer_object * bo,struct drm_mm_node * mm_node,struct ttm_mem_reg * mem)345 static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
346 struct drm_mm_node *mm_node,
347 struct ttm_mem_reg *mem)
348 {
349 uint64_t addr = 0;
350
351 if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) {
352 addr = mm_node->start << PAGE_SHIFT;
353 addr += bo->bdev->man[mem->mem_type].gpu_offset;
354 }
355 return addr;
356 }
357
358 /**
359 * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
360 * @offset. It also modifies the offset to be within the drm_mm_node returned
361 *
362 * @mem: The region where the bo resides.
363 * @offset: The offset that drm_mm_node is used for finding.
364 *
365 */
amdgpu_find_mm_node(struct ttm_mem_reg * mem,unsigned long * offset)366 static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
367 unsigned long *offset)
368 {
369 struct drm_mm_node *mm_node = mem->mm_node;
370
371 while (*offset >= (mm_node->size << PAGE_SHIFT)) {
372 *offset -= (mm_node->size << PAGE_SHIFT);
373 ++mm_node;
374 }
375 return mm_node;
376 }
377
378 /**
379 * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
380 *
381 * The function copies @size bytes from {src->mem + src->offset} to
382 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
383 * move and different for a BO to BO copy.
384 *
385 * @f: Returns the last fence if multiple jobs are submitted.
386 */
amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device * adev,struct amdgpu_copy_mem * src,struct amdgpu_copy_mem * dst,uint64_t size,struct reservation_object * resv,struct dma_fence ** f)387 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
388 struct amdgpu_copy_mem *src,
389 struct amdgpu_copy_mem *dst,
390 uint64_t size,
391 struct reservation_object *resv,
392 struct dma_fence **f)
393 {
394 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
395 struct drm_mm_node *src_mm, *dst_mm;
396 uint64_t src_node_start, dst_node_start, src_node_size,
397 dst_node_size, src_page_offset, dst_page_offset;
398 struct dma_fence *fence = NULL;
399 int r = 0;
400 const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
401 AMDGPU_GPU_PAGE_SIZE);
402
403 if (!adev->mman.buffer_funcs_enabled) {
404 DRM_ERROR("Trying to move memory with ring turned off.\n");
405 return -EINVAL;
406 }
407
408 src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
409 src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
410 src->offset;
411 src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
412 src_page_offset = src_node_start & (PAGE_SIZE - 1);
413
414 dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
415 dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
416 dst->offset;
417 dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
418 dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
419
420 mutex_lock(&adev->mman.gtt_window_lock);
421
422 while (size) {
423 unsigned long cur_size;
424 uint64_t from = src_node_start, to = dst_node_start;
425 struct dma_fence *next;
426
427 /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
428 * begins at an offset, then adjust the size accordingly
429 */
430 cur_size = min3(min(src_node_size, dst_node_size), size,
431 GTT_MAX_BYTES);
432 if (cur_size + src_page_offset > GTT_MAX_BYTES ||
433 cur_size + dst_page_offset > GTT_MAX_BYTES)
434 cur_size -= max(src_page_offset, dst_page_offset);
435
436 /* Map only what needs to be accessed. Map src to window 0 and
437 * dst to window 1
438 */
439 if (src->mem->mem_type == TTM_PL_TT &&
440 !amdgpu_gtt_mgr_has_gart_addr(src->mem)) {
441 r = amdgpu_map_buffer(src->bo, src->mem,
442 PFN_UP(cur_size + src_page_offset),
443 src_node_start, 0, ring,
444 &from);
445 if (r)
446 goto error;
447 /* Adjust the offset because amdgpu_map_buffer returns
448 * start of mapped page
449 */
450 from += src_page_offset;
451 }
452
453 if (dst->mem->mem_type == TTM_PL_TT &&
454 !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) {
455 r = amdgpu_map_buffer(dst->bo, dst->mem,
456 PFN_UP(cur_size + dst_page_offset),
457 dst_node_start, 1, ring,
458 &to);
459 if (r)
460 goto error;
461 to += dst_page_offset;
462 }
463
464 r = amdgpu_copy_buffer(ring, from, to, cur_size,
465 resv, &next, false, true);
466 if (r)
467 goto error;
468
469 dma_fence_put(fence);
470 fence = next;
471
472 size -= cur_size;
473 if (!size)
474 break;
475
476 src_node_size -= cur_size;
477 if (!src_node_size) {
478 src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
479 src->mem);
480 src_node_size = (src_mm->size << PAGE_SHIFT);
481 } else {
482 src_node_start += cur_size;
483 src_page_offset = src_node_start & (PAGE_SIZE - 1);
484 }
485 dst_node_size -= cur_size;
486 if (!dst_node_size) {
487 dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
488 dst->mem);
489 dst_node_size = (dst_mm->size << PAGE_SHIFT);
490 } else {
491 dst_node_start += cur_size;
492 dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
493 }
494 }
495 error:
496 mutex_unlock(&adev->mman.gtt_window_lock);
497 if (f)
498 *f = dma_fence_get(fence);
499 dma_fence_put(fence);
500 return r;
501 }
502
503 /**
504 * amdgpu_move_blit - Copy an entire buffer to another buffer
505 *
506 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
507 * help move buffers to and from VRAM.
508 */
amdgpu_move_blit(struct ttm_buffer_object * bo,bool evict,bool no_wait_gpu,struct ttm_mem_reg * new_mem,struct ttm_mem_reg * old_mem)509 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
510 bool evict, bool no_wait_gpu,
511 struct ttm_mem_reg *new_mem,
512 struct ttm_mem_reg *old_mem)
513 {
514 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
515 struct amdgpu_copy_mem src, dst;
516 struct dma_fence *fence = NULL;
517 int r;
518
519 src.bo = bo;
520 dst.bo = bo;
521 src.mem = old_mem;
522 dst.mem = new_mem;
523 src.offset = 0;
524 dst.offset = 0;
525
526 r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
527 new_mem->num_pages << PAGE_SHIFT,
528 bo->resv, &fence);
529 if (r)
530 goto error;
531
532 r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
533 dma_fence_put(fence);
534 return r;
535
536 error:
537 if (fence)
538 dma_fence_wait(fence, false);
539 dma_fence_put(fence);
540 return r;
541 }
542
543 /**
544 * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer
545 *
546 * Called by amdgpu_bo_move().
547 */
amdgpu_move_vram_ram(struct ttm_buffer_object * bo,bool evict,struct ttm_operation_ctx * ctx,struct ttm_mem_reg * new_mem)548 static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
549 struct ttm_operation_ctx *ctx,
550 struct ttm_mem_reg *new_mem)
551 {
552 struct amdgpu_device *adev;
553 struct ttm_mem_reg *old_mem = &bo->mem;
554 struct ttm_mem_reg tmp_mem;
555 struct ttm_place placements;
556 struct ttm_placement placement;
557 int r;
558
559 adev = amdgpu_ttm_adev(bo->bdev);
560
561 /* create space/pages for new_mem in GTT space */
562 tmp_mem = *new_mem;
563 tmp_mem.mm_node = NULL;
564 placement.num_placement = 1;
565 placement.placement = &placements;
566 placement.num_busy_placement = 1;
567 placement.busy_placement = &placements;
568 placements.fpfn = 0;
569 placements.lpfn = 0;
570 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
571 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
572 if (unlikely(r)) {
573 return r;
574 }
575
576 /* set caching flags */
577 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
578 if (unlikely(r)) {
579 goto out_cleanup;
580 }
581
582 /* Bind the memory to the GTT space */
583 r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
584 if (unlikely(r)) {
585 goto out_cleanup;
586 }
587
588 /* blit VRAM to GTT */
589 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem);
590 if (unlikely(r)) {
591 goto out_cleanup;
592 }
593
594 /* move BO (in tmp_mem) to new_mem */
595 r = ttm_bo_move_ttm(bo, ctx, new_mem);
596 out_cleanup:
597 ttm_bo_mem_put(bo, &tmp_mem);
598 return r;
599 }
600
601 /**
602 * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM
603 *
604 * Called by amdgpu_bo_move().
605 */
amdgpu_move_ram_vram(struct ttm_buffer_object * bo,bool evict,struct ttm_operation_ctx * ctx,struct ttm_mem_reg * new_mem)606 static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
607 struct ttm_operation_ctx *ctx,
608 struct ttm_mem_reg *new_mem)
609 {
610 struct amdgpu_device *adev;
611 struct ttm_mem_reg *old_mem = &bo->mem;
612 struct ttm_mem_reg tmp_mem;
613 struct ttm_placement placement;
614 struct ttm_place placements;
615 int r;
616
617 adev = amdgpu_ttm_adev(bo->bdev);
618
619 /* make space in GTT for old_mem buffer */
620 tmp_mem = *new_mem;
621 tmp_mem.mm_node = NULL;
622 placement.num_placement = 1;
623 placement.placement = &placements;
624 placement.num_busy_placement = 1;
625 placement.busy_placement = &placements;
626 placements.fpfn = 0;
627 placements.lpfn = 0;
628 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
629 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
630 if (unlikely(r)) {
631 return r;
632 }
633
634 /* move/bind old memory to GTT space */
635 r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
636 if (unlikely(r)) {
637 goto out_cleanup;
638 }
639
640 /* copy to VRAM */
641 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem);
642 if (unlikely(r)) {
643 goto out_cleanup;
644 }
645 out_cleanup:
646 ttm_bo_mem_put(bo, &tmp_mem);
647 return r;
648 }
649
650 /**
651 * amdgpu_bo_move - Move a buffer object to a new memory location
652 *
653 * Called by ttm_bo_handle_move_mem()
654 */
amdgpu_bo_move(struct ttm_buffer_object * bo,bool evict,struct ttm_operation_ctx * ctx,struct ttm_mem_reg * new_mem)655 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
656 struct ttm_operation_ctx *ctx,
657 struct ttm_mem_reg *new_mem)
658 {
659 struct amdgpu_device *adev;
660 struct amdgpu_bo *abo;
661 struct ttm_mem_reg *old_mem = &bo->mem;
662 int r;
663
664 /* Can't move a pinned BO */
665 abo = ttm_to_amdgpu_bo(bo);
666 if (WARN_ON_ONCE(abo->pin_count > 0))
667 return -EINVAL;
668
669 adev = amdgpu_ttm_adev(bo->bdev);
670
671 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
672 amdgpu_move_null(bo, new_mem);
673 return 0;
674 }
675 if ((old_mem->mem_type == TTM_PL_TT &&
676 new_mem->mem_type == TTM_PL_SYSTEM) ||
677 (old_mem->mem_type == TTM_PL_SYSTEM &&
678 new_mem->mem_type == TTM_PL_TT)) {
679 /* bind is enough */
680 amdgpu_move_null(bo, new_mem);
681 return 0;
682 }
683
684 if (!adev->mman.buffer_funcs_enabled)
685 goto memcpy;
686
687 if (old_mem->mem_type == TTM_PL_VRAM &&
688 new_mem->mem_type == TTM_PL_SYSTEM) {
689 r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
690 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
691 new_mem->mem_type == TTM_PL_VRAM) {
692 r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
693 } else {
694 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
695 new_mem, old_mem);
696 }
697
698 if (r) {
699 memcpy:
700 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
701 if (r) {
702 return r;
703 }
704 }
705
706 if (bo->type == ttm_bo_type_device &&
707 new_mem->mem_type == TTM_PL_VRAM &&
708 old_mem->mem_type != TTM_PL_VRAM) {
709 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
710 * accesses the BO after it's moved.
711 */
712 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
713 }
714
715 /* update statistics */
716 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
717 return 0;
718 }
719
720 /**
721 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
722 *
723 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
724 */
amdgpu_ttm_io_mem_reserve(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem)725 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
726 {
727 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
728 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
729 struct drm_mm_node *mm_node = mem->mm_node;
730
731 mem->bus.addr = NULL;
732 mem->bus.offset = 0;
733 mem->bus.size = mem->num_pages << PAGE_SHIFT;
734 mem->bus.base = 0;
735 mem->bus.is_iomem = false;
736 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
737 return -EINVAL;
738 switch (mem->mem_type) {
739 case TTM_PL_SYSTEM:
740 /* system memory */
741 return 0;
742 case TTM_PL_TT:
743 break;
744 case TTM_PL_VRAM:
745 mem->bus.offset = mem->start << PAGE_SHIFT;
746 /* check if it's visible */
747 if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
748 return -EINVAL;
749 /* Only physically contiguous buffers apply. In a contiguous
750 * buffer, size of the first mm_node would match the number of
751 * pages in ttm_mem_reg.
752 */
753 if (adev->mman.aper_base_kaddr &&
754 (mm_node->size == mem->num_pages))
755 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
756 mem->bus.offset;
757
758 mem->bus.base = adev->gmc.aper_base;
759 mem->bus.is_iomem = true;
760 break;
761 default:
762 return -EINVAL;
763 }
764 return 0;
765 }
766
amdgpu_ttm_io_mem_free(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem)767 static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
768 {
769 }
770
amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object * bo,unsigned long page_offset)771 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
772 unsigned long page_offset)
773 {
774 struct drm_mm_node *mm;
775 unsigned long offset = (page_offset << PAGE_SHIFT);
776
777 mm = amdgpu_find_mm_node(&bo->mem, &offset);
778 return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
779 (offset >> PAGE_SHIFT);
780 }
781
782 /*
783 * TTM backend functions.
784 */
785 struct amdgpu_ttm_gup_task_list {
786 struct list_head list;
787 struct task_struct *task;
788 };
789
790 struct amdgpu_ttm_tt {
791 struct ttm_dma_tt ttm;
792 u64 offset;
793 uint64_t userptr;
794 struct task_struct *usertask;
795 uint32_t userflags;
796 struct spinlock guptasklock;
797 struct list_head guptasks;
798 atomic_t mmu_invalidations;
799 uint32_t last_set_pages;
800 };
801
802 /**
803 * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to by a USERPTR
804 * pointer to memory
805 *
806 * Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos().
807 * This provides a wrapper around the get_user_pages() call to provide
808 * device accessible pages that back user memory.
809 */
amdgpu_ttm_tt_get_user_pages(struct ttm_tt * ttm,struct page ** pages)810 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
811 {
812 struct amdgpu_ttm_tt *gtt = (void *)ttm;
813 struct mm_struct *mm = gtt->usertask->mm;
814 unsigned int flags = 0;
815 unsigned pinned = 0;
816 int r;
817
818 if (!mm) /* Happens during process shutdown */
819 return -ESRCH;
820
821 if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
822 flags |= FOLL_WRITE;
823
824 down_read(&mm->mmap_sem);
825
826 #if 0
827 if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
828 /*
829 * check that we only use anonymous memory to prevent problems
830 * with writeback
831 */
832 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
833 struct vm_area_struct *vma;
834
835 vma = find_vma(mm, gtt->userptr);
836 if (!vma || vma->vm_file || vma->vm_end < end) {
837 up_read(&mm->mmap_sem);
838 return -EPERM;
839 }
840 }
841 #endif
842
843 /* loop enough times using contiguous pages of memory */
844 do {
845 unsigned num_pages = ttm->num_pages - pinned;
846 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
847 struct page **p = pages + pinned;
848 struct amdgpu_ttm_gup_task_list guptask;
849
850 guptask.task = current;
851 spin_lock(>t->guptasklock);
852 list_add(&guptask.list, >t->guptasks);
853 spin_unlock(>t->guptasklock);
854
855 r = get_user_pages(userptr, num_pages, flags, p, NULL);
856
857 #if 0
858 if (mm == current->mm)
859 r = get_user_pages(userptr, num_pages, flags, p, NULL);
860 else
861 r = get_user_pages_remote(gtt->usertask,
862 mm, userptr, num_pages,
863 flags, p, NULL, NULL);
864 #endif
865
866 spin_lock(>t->guptasklock);
867 list_del(&guptask.list);
868 spin_unlock(>t->guptasklock);
869
870 if (r < 0)
871 goto release_pages;
872
873 pinned += r;
874
875 } while (pinned < ttm->num_pages);
876
877 up_read(&mm->mmap_sem);
878 return 0;
879
880 release_pages:
881 release_pages(pages, pinned);
882 up_read(&mm->mmap_sem);
883 return r;
884 }
885
886 /**
887 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
888 *
889 * Called by amdgpu_cs_list_validate(). This creates the page list
890 * that backs user memory and will ultimately be mapped into the device
891 * address space.
892 */
amdgpu_ttm_tt_set_user_pages(struct ttm_tt * ttm,struct page ** pages)893 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
894 {
895 struct amdgpu_ttm_tt *gtt = (void *)ttm;
896 unsigned i;
897
898 gtt->last_set_pages = atomic_read(>t->mmu_invalidations);
899 for (i = 0; i < ttm->num_pages; ++i) {
900 if (ttm->pages[i])
901 put_page(ttm->pages[i]);
902
903 ttm->pages[i] = pages ? pages[i] : NULL;
904 }
905 }
906
907 /**
908 * amdgpu_ttm_tt_mark_user_page - Mark pages as dirty
909 *
910 * Called while unpinning userptr pages
911 */
amdgpu_ttm_tt_mark_user_pages(struct ttm_tt * ttm)912 void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
913 {
914 struct amdgpu_ttm_tt *gtt = (void *)ttm;
915 unsigned i;
916
917 for (i = 0; i < ttm->num_pages; ++i) {
918 struct page *page = ttm->pages[i];
919
920 if (!page)
921 continue;
922
923 if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
924 set_page_dirty(page);
925
926 mark_page_accessed(page);
927 }
928 }
929
930 /**
931 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
932 *
933 * Called by amdgpu_ttm_backend_bind()
934 **/
amdgpu_ttm_tt_pin_userptr(struct ttm_tt * ttm)935 static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
936 {
937 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
938 struct amdgpu_ttm_tt *gtt = (void *)ttm;
939 unsigned nents;
940 int r;
941
942 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
943 enum dma_data_direction direction = write ?
944 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
945
946 /* Allocate an SG array and squash pages into it */
947 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
948 ttm->num_pages << PAGE_SHIFT,
949 GFP_KERNEL);
950 if (r)
951 goto release_sg;
952
953 /* Map SG to device */
954 r = -ENOMEM;
955 nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
956 if (nents != ttm->sg->nents)
957 goto release_sg;
958
959 /* convert SG to linear array of pages and dma addresses */
960 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
961 gtt->ttm.dma_address, ttm->num_pages);
962
963 return 0;
964
965 release_sg:
966 kfree(ttm->sg);
967 ttm->sg = NULL;
968 return r;
969 }
970
971 /**
972 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
973 */
amdgpu_ttm_tt_unpin_userptr(struct ttm_tt * ttm)974 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
975 {
976 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
977 struct amdgpu_ttm_tt *gtt = (void *)ttm;
978
979 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
980 enum dma_data_direction direction = write ?
981 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
982
983 /* double check that we don't free the table twice */
984 if (!ttm->sg || !ttm->sg->sgl)
985 return;
986
987 /* unmap the pages mapped to the device */
988 dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
989
990 /* mark the pages as dirty */
991 amdgpu_ttm_tt_mark_user_pages(ttm);
992
993 sg_free_table(ttm->sg);
994 }
995
996 int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
997 struct ttm_buffer_object *tbo,
998 uint64_t flags);
amdgpu_ttm_gart_bind(struct amdgpu_device * adev,struct ttm_buffer_object * tbo,uint64_t flags)999 int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
1000 struct ttm_buffer_object *tbo,
1001 uint64_t flags)
1002 {
1003 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
1004 struct ttm_tt *ttm = tbo->ttm;
1005 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1006 int r;
1007
1008 if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) {
1009 uint64_t page_idx = 1;
1010
1011 r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
1012 ttm->pages, gtt->ttm.dma_address, flags);
1013 if (r)
1014 goto gart_bind_fail;
1015
1016 /* Patch mtype of the second part BO */
1017 flags &= ~AMDGPU_PTE_MTYPE_MASK;
1018 flags |= AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_NC);
1019
1020 r = amdgpu_gart_bind(adev,
1021 gtt->offset + (page_idx << PAGE_SHIFT),
1022 ttm->num_pages - page_idx,
1023 &ttm->pages[page_idx],
1024 &(gtt->ttm.dma_address[page_idx]), flags);
1025 } else {
1026 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
1027 ttm->pages, gtt->ttm.dma_address, flags);
1028 }
1029
1030 gart_bind_fail:
1031 if (r)
1032 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
1033 ttm->num_pages, gtt->offset);
1034
1035 return r;
1036 }
1037
1038 /**
1039 * amdgpu_ttm_backend_bind - Bind GTT memory
1040 *
1041 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
1042 * This handles binding GTT memory to the device address space.
1043 */
amdgpu_ttm_backend_bind(struct ttm_tt * ttm,struct ttm_mem_reg * bo_mem)1044 static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
1045 struct ttm_mem_reg *bo_mem)
1046 {
1047 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1048 struct amdgpu_ttm_tt *gtt = (void*)ttm;
1049 uint64_t flags;
1050 int r = 0;
1051
1052 if (gtt->userptr) {
1053 r = amdgpu_ttm_tt_pin_userptr(ttm);
1054 if (r) {
1055 DRM_ERROR("failed to pin userptr\n");
1056 return r;
1057 }
1058 }
1059 if (!ttm->num_pages) {
1060 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
1061 ttm->num_pages, bo_mem, ttm);
1062 }
1063
1064 if (bo_mem->mem_type == AMDGPU_PL_GDS ||
1065 bo_mem->mem_type == AMDGPU_PL_GWS ||
1066 bo_mem->mem_type == AMDGPU_PL_OA)
1067 return -EINVAL;
1068
1069 if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
1070 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
1071 return 0;
1072 }
1073
1074 /* compute PTE flags relevant to this BO memory */
1075 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
1076
1077 /* bind pages into GART page tables */
1078 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
1079 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
1080 ttm->pages, gtt->ttm.dma_address, flags);
1081
1082 if (r)
1083 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
1084 ttm->num_pages, gtt->offset);
1085 return r;
1086 }
1087
1088 /**
1089 * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object
1090 */
amdgpu_ttm_alloc_gart(struct ttm_buffer_object * bo)1091 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
1092 {
1093 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1094 struct ttm_operation_ctx ctx = { false, false };
1095 struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
1096 struct ttm_mem_reg tmp;
1097 struct ttm_placement placement;
1098 struct ttm_place placements;
1099 uint64_t flags;
1100 int r;
1101
1102 if (bo->mem.mem_type != TTM_PL_TT ||
1103 amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
1104 return 0;
1105
1106 /* allocate GTT space */
1107 tmp = bo->mem;
1108 tmp.mm_node = NULL;
1109 placement.num_placement = 1;
1110 placement.placement = &placements;
1111 placement.num_busy_placement = 1;
1112 placement.busy_placement = &placements;
1113 placements.fpfn = 0;
1114 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
1115 placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
1116 TTM_PL_FLAG_TT;
1117
1118 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
1119 if (unlikely(r))
1120 return r;
1121
1122 /* compute PTE flags for this buffer object */
1123 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
1124
1125 /* Bind pages */
1126 gtt->offset = (u64)tmp.start << PAGE_SHIFT;
1127 r = amdgpu_ttm_gart_bind(adev, bo, flags);
1128 if (unlikely(r)) {
1129 ttm_bo_mem_put(bo, &tmp);
1130 return r;
1131 }
1132
1133 ttm_bo_mem_put(bo, &bo->mem);
1134 bo->mem = tmp;
1135 bo->offset = (bo->mem.start << PAGE_SHIFT) +
1136 bo->bdev->man[bo->mem.mem_type].gpu_offset;
1137
1138 return 0;
1139 }
1140
1141 /**
1142 * amdgpu_ttm_recover_gart - Rebind GTT pages
1143 *
1144 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1145 * rebind GTT pages during a GPU reset.
1146 */
amdgpu_ttm_recover_gart(struct ttm_buffer_object * tbo)1147 int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1148 {
1149 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1150 uint64_t flags;
1151 int r;
1152
1153 if (!tbo->ttm)
1154 return 0;
1155
1156 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
1157 r = amdgpu_ttm_gart_bind(adev, tbo, flags);
1158
1159 return r;
1160 }
1161
1162 /**
1163 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1164 *
1165 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1166 * ttm_tt_destroy().
1167 */
amdgpu_ttm_backend_unbind(struct ttm_tt * ttm)1168 static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
1169 {
1170 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1171 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1172 int r;
1173
1174 /* if the pages have userptr pinning then clear that first */
1175 if (gtt->userptr)
1176 amdgpu_ttm_tt_unpin_userptr(ttm);
1177
1178 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1179 return 0;
1180
1181 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1182 r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1183 if (r)
1184 DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
1185 gtt->ttm.ttm.num_pages, gtt->offset);
1186 return r;
1187 }
1188
amdgpu_ttm_backend_destroy(struct ttm_tt * ttm)1189 static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
1190 {
1191 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1192
1193 if (gtt->usertask)
1194 put_task_struct(gtt->usertask);
1195
1196 ttm_dma_tt_fini(>t->ttm);
1197 kfree(gtt);
1198 }
1199
1200 static struct ttm_backend_func amdgpu_backend_func = {
1201 .bind = &amdgpu_ttm_backend_bind,
1202 .unbind = &amdgpu_ttm_backend_unbind,
1203 .destroy = &amdgpu_ttm_backend_destroy,
1204 };
1205
1206 /**
1207 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1208 *
1209 * @bo: The buffer object to create a GTT ttm_tt object around
1210 *
1211 * Called by ttm_tt_create().
1212 */
amdgpu_ttm_tt_create(struct ttm_buffer_object * bo,uint32_t page_flags)1213 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1214 uint32_t page_flags)
1215 {
1216 struct amdgpu_device *adev;
1217 struct amdgpu_ttm_tt *gtt;
1218
1219 adev = amdgpu_ttm_adev(bo->bdev);
1220
1221 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1222 if (gtt == NULL) {
1223 return NULL;
1224 }
1225 gtt->ttm.ttm.func = &amdgpu_backend_func;
1226
1227 /* allocate space for the uninitialized page entries */
1228 if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) {
1229 kfree(gtt);
1230 return NULL;
1231 }
1232 return >t->ttm.ttm;
1233 }
1234
1235 /**
1236 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1237 *
1238 * Map the pages of a ttm_tt object to an address space visible
1239 * to the underlying device.
1240 */
amdgpu_ttm_tt_populate(struct ttm_tt * ttm,struct ttm_operation_ctx * ctx)1241 static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
1242 struct ttm_operation_ctx *ctx)
1243 {
1244 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1245 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1246 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1247
1248 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1249 if (gtt && gtt->userptr) {
1250 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1251 if (!ttm->sg)
1252 return -ENOMEM;
1253
1254 ttm->page_flags |= TTM_PAGE_FLAG_SG;
1255 ttm->state = tt_unbound;
1256 return 0;
1257 }
1258
1259 if (slave && ttm->sg) {
1260 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1261 gtt->ttm.dma_address,
1262 ttm->num_pages);
1263 ttm->state = tt_unbound;
1264 return 0;
1265 }
1266
1267 #ifdef CONFIG_SWIOTLB
1268 if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1269 return ttm_dma_populate(>t->ttm, adev->dev, ctx);
1270 }
1271 #endif
1272
1273 /* fall back to generic helper to populate the page array
1274 * and map them to the device */
1275 return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx);
1276 }
1277
1278 /**
1279 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1280 *
1281 * Unmaps pages of a ttm_tt object from the device address space and
1282 * unpopulates the page array backing it.
1283 */
amdgpu_ttm_tt_unpopulate(struct ttm_tt * ttm)1284 static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
1285 {
1286 struct amdgpu_device *adev;
1287 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1288 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1289
1290 if (gtt && gtt->userptr) {
1291 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1292 kfree(ttm->sg);
1293 ttm->sg = NULL;
1294 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
1295 return;
1296 }
1297
1298 if (slave)
1299 return;
1300
1301 adev = amdgpu_ttm_adev(ttm->bdev);
1302
1303 #ifdef CONFIG_SWIOTLB
1304 if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1305 ttm_dma_unpopulate(>t->ttm, adev->dev);
1306 return;
1307 }
1308 #endif
1309
1310 /* fall back to generic helper to unmap and unpopulate array */
1311 ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm);
1312 }
1313
1314 /**
1315 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1316 * task
1317 *
1318 * @ttm: The ttm_tt object to bind this userptr object to
1319 * @addr: The address in the current tasks VM space to use
1320 * @flags: Requirements of userptr object.
1321 *
1322 * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
1323 * to current task
1324 */
amdgpu_ttm_tt_set_userptr(struct ttm_tt * ttm,uint64_t addr,uint32_t flags)1325 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
1326 uint32_t flags)
1327 {
1328 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1329
1330 if (gtt == NULL)
1331 return -EINVAL;
1332
1333 gtt->userptr = addr;
1334 gtt->userflags = flags;
1335
1336 if (gtt->usertask)
1337 put_task_struct(gtt->usertask);
1338 kprintf("amdgpu_ttm_tt_set_userptr: gtt->usertask will not be set\n");
1339 #if 0
1340 gtt->usertask = current->group_leader;
1341 get_task_struct(gtt->usertask);
1342 #endif
1343
1344 spin_init(>t->guptasklock, "agttgutl");
1345 INIT_LIST_HEAD(>t->guptasks);
1346 atomic_set(>t->mmu_invalidations, 0);
1347 gtt->last_set_pages = 0;
1348
1349 return 0;
1350 }
1351
1352 /**
1353 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1354 */
amdgpu_ttm_tt_get_usermm(struct ttm_tt * ttm)1355 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1356 {
1357 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1358
1359 if (gtt == NULL)
1360 return NULL;
1361
1362 if (gtt->usertask == NULL)
1363 return NULL;
1364
1365 return gtt->usertask->mm;
1366 }
1367
1368 /**
1369 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1370 * address range for the current task.
1371 *
1372 */
amdgpu_ttm_tt_affect_userptr(struct ttm_tt * ttm,unsigned long start,unsigned long end)1373 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1374 unsigned long end)
1375 {
1376 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1377 struct amdgpu_ttm_gup_task_list *entry;
1378 unsigned long size;
1379
1380 if (gtt == NULL || !gtt->userptr)
1381 return false;
1382
1383 /* Return false if no part of the ttm_tt object lies within
1384 * the range
1385 */
1386 size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
1387 if (gtt->userptr > end || gtt->userptr + size <= start)
1388 return false;
1389
1390 /* Search the lists of tasks that hold this mapping and see
1391 * if current is one of them. If it is return false.
1392 */
1393 spin_lock(>t->guptasklock);
1394 list_for_each_entry(entry, >t->guptasks, list) {
1395 if (entry->task == current) {
1396 spin_unlock(>t->guptasklock);
1397 return false;
1398 }
1399 }
1400 spin_unlock(>t->guptasklock);
1401
1402 atomic_inc(>t->mmu_invalidations);
1403
1404 return true;
1405 }
1406
1407 /**
1408 * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been invalidated?
1409 */
amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt * ttm,int * last_invalidated)1410 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
1411 int *last_invalidated)
1412 {
1413 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1414 int prev_invalidated = *last_invalidated;
1415
1416 *last_invalidated = atomic_read(>t->mmu_invalidations);
1417 return prev_invalidated != *last_invalidated;
1418 }
1419
1420 /**
1421 * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this ttm_tt object
1422 * been invalidated since the last time they've been set?
1423 */
amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt * ttm)1424 bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
1425 {
1426 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1427
1428 if (gtt == NULL || !gtt->userptr)
1429 return false;
1430
1431 return atomic_read(>t->mmu_invalidations) != gtt->last_set_pages;
1432 }
1433
1434 /**
1435 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1436 */
amdgpu_ttm_tt_is_readonly(struct ttm_tt * ttm)1437 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1438 {
1439 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1440
1441 if (gtt == NULL)
1442 return false;
1443
1444 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1445 }
1446
1447 /**
1448 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1449 *
1450 * @ttm: The ttm_tt object to compute the flags for
1451 * @mem: The memory registry backing this ttm_tt object
1452 */
amdgpu_ttm_tt_pte_flags(struct amdgpu_device * adev,struct ttm_tt * ttm,struct ttm_mem_reg * mem)1453 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1454 struct ttm_mem_reg *mem)
1455 {
1456 uint64_t flags = 0;
1457
1458 if (mem && mem->mem_type != TTM_PL_SYSTEM)
1459 flags |= AMDGPU_PTE_VALID;
1460
1461 if (mem && mem->mem_type == TTM_PL_TT) {
1462 flags |= AMDGPU_PTE_SYSTEM;
1463
1464 if (ttm->caching_state == tt_cached)
1465 flags |= AMDGPU_PTE_SNOOPED;
1466 }
1467
1468 flags |= adev->gart.gart_pte_flags;
1469 flags |= AMDGPU_PTE_READABLE;
1470
1471 if (!amdgpu_ttm_tt_is_readonly(ttm))
1472 flags |= AMDGPU_PTE_WRITEABLE;
1473
1474 return flags;
1475 }
1476
1477 /**
1478 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1479 * object.
1480 *
1481 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1482 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1483 * it can find space for a new object and by ttm_bo_force_list_clean() which is
1484 * used to clean out a memory space.
1485 */
amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object * bo,const struct ttm_place * place)1486 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1487 const struct ttm_place *place)
1488 {
1489 unsigned long num_pages = bo->mem.num_pages;
1490 struct drm_mm_node *node = bo->mem.mm_node;
1491 struct reservation_object_list *flist;
1492 struct dma_fence *f;
1493 int i;
1494
1495 /* If bo is a KFD BO, check if the bo belongs to the current process.
1496 * If true, then return false as any KFD process needs all its BOs to
1497 * be resident to run successfully
1498 */
1499 flist = reservation_object_get_list(bo->resv);
1500 if (flist) {
1501 for (i = 0; i < flist->shared_count; ++i) {
1502 f = rcu_dereference_protected(flist->shared[i],
1503 reservation_object_held(bo->resv));
1504 if (amdkfd_fence_check_mm(f, current->mm))
1505 return false;
1506 }
1507 }
1508
1509 switch (bo->mem.mem_type) {
1510 case TTM_PL_TT:
1511 return true;
1512
1513 case TTM_PL_VRAM:
1514 /* Check each drm MM node individually */
1515 while (num_pages) {
1516 if (place->fpfn < (node->start + node->size) &&
1517 !(place->lpfn && place->lpfn <= node->start))
1518 return true;
1519
1520 num_pages -= node->size;
1521 ++node;
1522 }
1523 return false;
1524
1525 default:
1526 break;
1527 }
1528
1529 return ttm_bo_eviction_valuable(bo, place);
1530 }
1531
1532 /**
1533 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1534 *
1535 * @bo: The buffer object to read/write
1536 * @offset: Offset into buffer object
1537 * @buf: Secondary buffer to write/read from
1538 * @len: Length in bytes of access
1539 * @write: true if writing
1540 *
1541 * This is used to access VRAM that backs a buffer object via MMIO
1542 * access for debugging purposes.
1543 */
amdgpu_ttm_access_memory(struct ttm_buffer_object * bo,unsigned long offset,void * buf,int len,int write)1544 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1545 unsigned long offset,
1546 void *buf, int len, int write)
1547 {
1548 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1549 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1550 struct drm_mm_node *nodes;
1551 uint32_t value = 0;
1552 int ret = 0;
1553 uint64_t pos;
1554 unsigned long flags;
1555
1556 if (bo->mem.mem_type != TTM_PL_VRAM)
1557 return -EIO;
1558
1559 nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
1560 pos = (nodes->start << PAGE_SHIFT) + offset;
1561
1562 while (len && pos < adev->gmc.mc_vram_size) {
1563 uint64_t aligned_pos = pos & ~(uint64_t)3;
1564 uint32_t bytes = 4 - (pos & 3);
1565 uint32_t shift = (pos & 3) * 8;
1566 uint32_t mask = 0xffffffff << shift;
1567
1568 if (len < bytes) {
1569 mask &= 0xffffffff >> (bytes - len) * 8;
1570 bytes = len;
1571 }
1572
1573 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1574 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1575 WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1576 if (!write || mask != 0xffffffff)
1577 value = RREG32_NO_KIQ(mmMM_DATA);
1578 if (write) {
1579 value &= ~mask;
1580 value |= (*(uint32_t *)buf << shift) & mask;
1581 WREG32_NO_KIQ(mmMM_DATA, value);
1582 }
1583 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1584 if (!write) {
1585 value = (value & mask) >> shift;
1586 memcpy(buf, &value, bytes);
1587 }
1588
1589 ret += bytes;
1590 buf = (uint8_t *)buf + bytes;
1591 pos += bytes;
1592 len -= bytes;
1593 if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
1594 ++nodes;
1595 pos = (nodes->start << PAGE_SHIFT);
1596 }
1597 }
1598
1599 return ret;
1600 }
1601
1602 static struct ttm_bo_driver amdgpu_bo_driver = {
1603 .ttm_tt_create = &amdgpu_ttm_tt_create,
1604 .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1605 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1606 .invalidate_caches = &amdgpu_invalidate_caches,
1607 .init_mem_type = &amdgpu_init_mem_type,
1608 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1609 .evict_flags = &amdgpu_evict_flags,
1610 .move = &amdgpu_bo_move,
1611 .verify_access = &amdgpu_verify_access,
1612 .move_notify = &amdgpu_bo_move_notify,
1613 .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
1614 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1615 .io_mem_free = &amdgpu_ttm_io_mem_free,
1616 .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1617 .access_memory = &amdgpu_ttm_access_memory
1618 };
1619
1620 /*
1621 * Firmware Reservation functions
1622 */
1623 /**
1624 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1625 *
1626 * @adev: amdgpu_device pointer
1627 *
1628 * free fw reserved vram if it has been reserved.
1629 */
amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device * adev)1630 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1631 {
1632 amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
1633 NULL, &adev->fw_vram_usage.va);
1634 }
1635
1636 /**
1637 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1638 *
1639 * @adev: amdgpu_device pointer
1640 *
1641 * create bo vram reservation from fw.
1642 */
amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device * adev)1643 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1644 {
1645 struct ttm_operation_ctx ctx = { false, false };
1646 struct amdgpu_bo_param bp;
1647 int r = 0;
1648 int i;
1649 u64 vram_size = adev->gmc.visible_vram_size;
1650 u64 offset = adev->fw_vram_usage.start_offset;
1651 u64 size = adev->fw_vram_usage.size;
1652 struct amdgpu_bo *bo;
1653
1654 memset(&bp, 0, sizeof(bp));
1655 bp.size = adev->fw_vram_usage.size;
1656 bp.byte_align = PAGE_SIZE;
1657 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
1658 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1659 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1660 bp.type = ttm_bo_type_kernel;
1661 bp.resv = NULL;
1662 adev->fw_vram_usage.va = NULL;
1663 adev->fw_vram_usage.reserved_bo = NULL;
1664
1665 if (adev->fw_vram_usage.size > 0 &&
1666 adev->fw_vram_usage.size <= vram_size) {
1667
1668 r = amdgpu_bo_create(adev, &bp,
1669 &adev->fw_vram_usage.reserved_bo);
1670 if (r)
1671 goto error_create;
1672
1673 r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
1674 if (r)
1675 goto error_reserve;
1676
1677 /* remove the original mem node and create a new one at the
1678 * request position
1679 */
1680 bo = adev->fw_vram_usage.reserved_bo;
1681 offset = ALIGN(offset, PAGE_SIZE);
1682 for (i = 0; i < bo->placement.num_placement; ++i) {
1683 bo->placements[i].fpfn = offset >> PAGE_SHIFT;
1684 bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
1685 }
1686
1687 ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
1688 r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
1689 &bo->tbo.mem, &ctx);
1690 if (r)
1691 goto error_pin;
1692
1693 r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
1694 AMDGPU_GEM_DOMAIN_VRAM,
1695 adev->fw_vram_usage.start_offset,
1696 (adev->fw_vram_usage.start_offset +
1697 adev->fw_vram_usage.size));
1698 if (r)
1699 goto error_pin;
1700 r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
1701 &adev->fw_vram_usage.va);
1702 if (r)
1703 goto error_kmap;
1704
1705 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
1706 }
1707 return r;
1708
1709 error_kmap:
1710 amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
1711 error_pin:
1712 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
1713 error_reserve:
1714 amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
1715 error_create:
1716 adev->fw_vram_usage.va = NULL;
1717 adev->fw_vram_usage.reserved_bo = NULL;
1718 return r;
1719 }
1720 /**
1721 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1722 * gtt/vram related fields.
1723 *
1724 * This initializes all of the memory space pools that the TTM layer
1725 * will need such as the GTT space (system memory mapped to the device),
1726 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1727 * can be mapped per VMID.
1728 */
amdgpu_ttm_init(struct amdgpu_device * adev)1729 int amdgpu_ttm_init(struct amdgpu_device *adev)
1730 {
1731 uint64_t gtt_size;
1732 int r;
1733 u64 vis_vram_limit;
1734
1735 /* initialize global references for vram/gtt */
1736 r = amdgpu_ttm_global_init(adev);
1737 if (r) {
1738 return r;
1739 }
1740 /* No others user of address space so set it to 0 */
1741 r = ttm_bo_device_init(&adev->mman.bdev,
1742 adev->mman.bo_global_ref.ref.object,
1743 &amdgpu_bo_driver,
1744 #if 0
1745 adev->ddev->anon_inode->i_mapping,
1746 #endif
1747 NULL,
1748 DRM_FILE_PAGE_OFFSET,
1749 adev->need_dma32);
1750 if (r) {
1751 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1752 return r;
1753 }
1754 adev->ddev->drm_ttm_bdev = &adev->mman.bdev;
1755 adev->mman.initialized = true;
1756
1757 /* We opt to avoid OOM on system pages allocations */
1758 adev->mman.bdev.no_retry = true;
1759
1760 /* Initialize VRAM pool with all of VRAM divided into pages */
1761 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
1762 adev->gmc.real_vram_size >> PAGE_SHIFT);
1763 if (r) {
1764 DRM_ERROR("Failed initializing VRAM heap.\n");
1765 return r;
1766 }
1767
1768 /* Reduce size of CPU-visible VRAM if requested */
1769 vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1770 if (amdgpu_vis_vram_limit > 0 &&
1771 vis_vram_limit <= adev->gmc.visible_vram_size)
1772 adev->gmc.visible_vram_size = vis_vram_limit;
1773
1774 /* Change the size here instead of the init above so only lpfn is affected */
1775 amdgpu_ttm_set_buffer_funcs_status(adev, false);
1776 #ifdef CONFIG_64BIT
1777 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1778 adev->gmc.visible_vram_size);
1779 #endif
1780
1781 /*
1782 *The reserved vram for firmware must be pinned to the specified
1783 *place on the VRAM, so reserve it early.
1784 */
1785 r = amdgpu_ttm_fw_reserve_vram_init(adev);
1786 if (r) {
1787 return r;
1788 }
1789
1790 /* allocate memory as required for VGA
1791 * This is used for VGA emulation and pre-OS scanout buffers to
1792 * avoid display artifacts while transitioning between pre-OS
1793 * and driver. */
1794 if (adev->gmc.stolen_size) {
1795 r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
1796 AMDGPU_GEM_DOMAIN_VRAM,
1797 &adev->stolen_vga_memory,
1798 NULL, NULL);
1799 if (r)
1800 return r;
1801 }
1802 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1803 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1804
1805 /* Compute GTT size, either bsaed on 3/4th the size of RAM size
1806 * or whatever the user passed on module init */
1807 if (amdgpu_gtt_size == -1) {
1808 struct sysinfo si;
1809
1810 si_meminfo(&si);
1811 gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1812 adev->gmc.mc_vram_size),
1813 ((uint64_t)si.totalram * si.mem_unit * 3/4));
1814 }
1815 else
1816 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1817
1818 /* Initialize GTT memory pool */
1819 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
1820 if (r) {
1821 DRM_ERROR("Failed initializing GTT heap.\n");
1822 return r;
1823 }
1824 DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1825 (unsigned)(gtt_size / (1024 * 1024)));
1826
1827 /* Initialize various on-chip memory pools */
1828 adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
1829 adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
1830 adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
1831 adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT;
1832 adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
1833 adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
1834 adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT;
1835 adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
1836 adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
1837 /* GDS Memory */
1838 if (adev->gds.mem.total_size) {
1839 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
1840 adev->gds.mem.total_size >> PAGE_SHIFT);
1841 if (r) {
1842 DRM_ERROR("Failed initializing GDS heap.\n");
1843 return r;
1844 }
1845 }
1846
1847 /* GWS */
1848 if (adev->gds.gws.total_size) {
1849 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
1850 adev->gds.gws.total_size >> PAGE_SHIFT);
1851 if (r) {
1852 DRM_ERROR("Failed initializing gws heap.\n");
1853 return r;
1854 }
1855 }
1856
1857 /* OA */
1858 if (adev->gds.oa.total_size) {
1859 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
1860 adev->gds.oa.total_size >> PAGE_SHIFT);
1861 if (r) {
1862 DRM_ERROR("Failed initializing oa heap.\n");
1863 return r;
1864 }
1865 }
1866
1867 /* Register debugfs entries for amdgpu_ttm */
1868 r = amdgpu_ttm_debugfs_init(adev);
1869 if (r) {
1870 DRM_ERROR("Failed to init debugfs\n");
1871 return r;
1872 }
1873 return 0;
1874 }
1875
1876 /**
1877 * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
1878 */
amdgpu_ttm_late_init(struct amdgpu_device * adev)1879 void amdgpu_ttm_late_init(struct amdgpu_device *adev)
1880 {
1881 /* return the VGA stolen memory (if any) back to VRAM */
1882 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
1883 }
1884
1885 /**
1886 * amdgpu_ttm_fini - De-initialize the TTM memory pools
1887 */
amdgpu_ttm_fini(struct amdgpu_device * adev)1888 void amdgpu_ttm_fini(struct amdgpu_device *adev)
1889 {
1890 if (!adev->mman.initialized)
1891 return;
1892
1893 amdgpu_ttm_debugfs_fini(adev);
1894 amdgpu_ttm_fw_reserve_vram_fini(adev);
1895 if (adev->mman.aper_base_kaddr)
1896 iounmap(adev->mman.aper_base_kaddr);
1897 adev->mman.aper_base_kaddr = NULL;
1898
1899 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
1900 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
1901 if (adev->gds.mem.total_size)
1902 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
1903 if (adev->gds.gws.total_size)
1904 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
1905 if (adev->gds.oa.total_size)
1906 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
1907 ttm_bo_device_release(&adev->mman.bdev);
1908 amdgpu_ttm_global_fini(adev);
1909 adev->mman.initialized = false;
1910 DRM_INFO("amdgpu: ttm finalized\n");
1911 }
1912
1913 /**
1914 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1915 *
1916 * @adev: amdgpu_device pointer
1917 * @enable: true when we can use buffer functions.
1918 *
1919 * Enable/disable use of buffer functions during suspend/resume. This should
1920 * only be called at bootup or when userspace isn't running.
1921 */
amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device * adev,bool enable)1922 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1923 {
1924 struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
1925 uint64_t size;
1926 int r;
1927
1928 if (!adev->mman.initialized || adev->in_gpu_reset ||
1929 adev->mman.buffer_funcs_enabled == enable)
1930 return;
1931
1932 if (enable) {
1933 struct amdgpu_ring *ring;
1934 struct drm_sched_rq *rq;
1935
1936 ring = adev->mman.buffer_funcs_ring;
1937 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
1938 r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
1939 if (r) {
1940 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
1941 r);
1942 return;
1943 }
1944 } else {
1945 drm_sched_entity_destroy(&adev->mman.entity);
1946 dma_fence_put(man->move);
1947 man->move = NULL;
1948 }
1949
1950 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1951 if (enable)
1952 size = adev->gmc.real_vram_size;
1953 else
1954 size = adev->gmc.visible_vram_size;
1955 man->size = size >> PAGE_SHIFT;
1956 adev->mman.buffer_funcs_enabled = enable;
1957 }
1958
amdgpu_mmap(struct file * filp,struct vm_area_struct * vma)1959 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
1960 {
1961 struct drm_file *file_priv;
1962 struct amdgpu_device *adev;
1963
1964 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
1965 return -EINVAL;
1966
1967 file_priv = filp->private_data;
1968 adev = file_priv->minor->dev->dev_private;
1969 if (adev == NULL)
1970 return -EINVAL;
1971
1972 return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
1973 }
1974
amdgpu_map_buffer(struct ttm_buffer_object * bo,struct ttm_mem_reg * mem,unsigned num_pages,uint64_t offset,unsigned window,struct amdgpu_ring * ring,uint64_t * addr)1975 static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
1976 struct ttm_mem_reg *mem, unsigned num_pages,
1977 uint64_t offset, unsigned window,
1978 struct amdgpu_ring *ring,
1979 uint64_t *addr)
1980 {
1981 struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
1982 struct amdgpu_device *adev = ring->adev;
1983 struct ttm_tt *ttm = bo->ttm;
1984 struct amdgpu_job *job;
1985 unsigned num_dw, num_bytes;
1986 dma_addr_t *dma_address;
1987 struct dma_fence *fence;
1988 uint64_t src_addr, dst_addr;
1989 uint64_t flags;
1990 int r;
1991
1992 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
1993 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
1994
1995 *addr = adev->gmc.gart_start;
1996 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
1997 AMDGPU_GPU_PAGE_SIZE;
1998
1999 num_dw = adev->mman.buffer_funcs->copy_num_dw;
2000 while (num_dw & 0x7)
2001 num_dw++;
2002
2003 num_bytes = num_pages * 8;
2004
2005 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
2006 if (r)
2007 return r;
2008
2009 src_addr = num_dw * 4;
2010 src_addr += job->ibs[0].gpu_addr;
2011
2012 dst_addr = adev->gart.table_addr;
2013 dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
2014 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
2015 dst_addr, num_bytes);
2016
2017 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2018 WARN_ON(job->ibs[0].length_dw > num_dw);
2019
2020 dma_address = >t->ttm.dma_address[offset >> PAGE_SHIFT];
2021 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
2022 r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
2023 &job->ibs[0].ptr[num_dw]);
2024 if (r)
2025 goto error_free;
2026
2027 r = amdgpu_job_submit(job, &adev->mman.entity,
2028 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
2029 if (r)
2030 goto error_free;
2031
2032 dma_fence_put(fence);
2033
2034 return r;
2035
2036 error_free:
2037 amdgpu_job_free(job);
2038 return r;
2039 }
2040
amdgpu_copy_buffer(struct amdgpu_ring * ring,uint64_t src_offset,uint64_t dst_offset,uint32_t byte_count,struct reservation_object * resv,struct dma_fence ** fence,bool direct_submit,bool vm_needs_flush)2041 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
2042 uint64_t dst_offset, uint32_t byte_count,
2043 struct reservation_object *resv,
2044 struct dma_fence **fence, bool direct_submit,
2045 bool vm_needs_flush)
2046 {
2047 struct amdgpu_device *adev = ring->adev;
2048 struct amdgpu_job *job;
2049
2050 uint32_t max_bytes;
2051 unsigned num_loops, num_dw;
2052 unsigned i;
2053 int r;
2054
2055 if (direct_submit && !ring->ready) {
2056 DRM_ERROR("Trying to move memory with ring turned off.\n");
2057 return -EINVAL;
2058 }
2059
2060 max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2061 num_loops = DIV_ROUND_UP(byte_count, max_bytes);
2062 num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
2063
2064 /* for IB padding */
2065 while (num_dw & 0x7)
2066 num_dw++;
2067
2068 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
2069 if (r)
2070 return r;
2071
2072 job->vm_needs_flush = vm_needs_flush;
2073 if (resv) {
2074 r = amdgpu_sync_resv(adev, &job->sync, resv,
2075 AMDGPU_FENCE_OWNER_UNDEFINED,
2076 false);
2077 if (r) {
2078 DRM_ERROR("sync failed (%d).\n", r);
2079 goto error_free;
2080 }
2081 }
2082
2083 for (i = 0; i < num_loops; i++) {
2084 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2085
2086 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
2087 dst_offset, cur_size_in_bytes);
2088
2089 src_offset += cur_size_in_bytes;
2090 dst_offset += cur_size_in_bytes;
2091 byte_count -= cur_size_in_bytes;
2092 }
2093
2094 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2095 WARN_ON(job->ibs[0].length_dw > num_dw);
2096 if (direct_submit)
2097 r = amdgpu_job_submit_direct(job, ring, fence);
2098 else
2099 r = amdgpu_job_submit(job, &adev->mman.entity,
2100 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2101 if (r)
2102 goto error_free;
2103
2104 return r;
2105
2106 error_free:
2107 amdgpu_job_free(job);
2108 DRM_ERROR("Error scheduling IBs (%d)\n", r);
2109 return r;
2110 }
2111
amdgpu_fill_buffer(struct amdgpu_bo * bo,uint32_t src_data,struct reservation_object * resv,struct dma_fence ** fence)2112 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
2113 uint32_t src_data,
2114 struct reservation_object *resv,
2115 struct dma_fence **fence)
2116 {
2117 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2118 uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2119 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2120
2121 struct drm_mm_node *mm_node;
2122 unsigned long num_pages;
2123 unsigned int num_loops, num_dw;
2124
2125 struct amdgpu_job *job;
2126 int r;
2127
2128 if (!adev->mman.buffer_funcs_enabled) {
2129 DRM_ERROR("Trying to clear memory with ring turned off.\n");
2130 return -EINVAL;
2131 }
2132
2133 if (bo->tbo.mem.mem_type == TTM_PL_TT) {
2134 r = amdgpu_ttm_alloc_gart(&bo->tbo);
2135 if (r)
2136 return r;
2137 }
2138
2139 num_pages = bo->tbo.num_pages;
2140 mm_node = bo->tbo.mem.mm_node;
2141 num_loops = 0;
2142 while (num_pages) {
2143 uint32_t byte_count = mm_node->size << PAGE_SHIFT;
2144
2145 num_loops += DIV_ROUND_UP(byte_count, max_bytes);
2146 num_pages -= mm_node->size;
2147 ++mm_node;
2148 }
2149 num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
2150
2151 /* for IB padding */
2152 num_dw += 64;
2153
2154 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
2155 if (r)
2156 return r;
2157
2158 if (resv) {
2159 r = amdgpu_sync_resv(adev, &job->sync, resv,
2160 AMDGPU_FENCE_OWNER_UNDEFINED, false);
2161 if (r) {
2162 DRM_ERROR("sync failed (%d).\n", r);
2163 goto error_free;
2164 }
2165 }
2166
2167 num_pages = bo->tbo.num_pages;
2168 mm_node = bo->tbo.mem.mm_node;
2169
2170 while (num_pages) {
2171 uint32_t byte_count = mm_node->size << PAGE_SHIFT;
2172 uint64_t dst_addr;
2173
2174 dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
2175 while (byte_count) {
2176 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2177
2178 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
2179 dst_addr, cur_size_in_bytes);
2180
2181 dst_addr += cur_size_in_bytes;
2182 byte_count -= cur_size_in_bytes;
2183 }
2184
2185 num_pages -= mm_node->size;
2186 ++mm_node;
2187 }
2188
2189 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2190 WARN_ON(job->ibs[0].length_dw > num_dw);
2191 r = amdgpu_job_submit(job, &adev->mman.entity,
2192 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2193 if (r)
2194 goto error_free;
2195
2196 return 0;
2197
2198 error_free:
2199 amdgpu_job_free(job);
2200 return r;
2201 }
2202
2203 #if defined(CONFIG_DEBUG_FS)
2204
amdgpu_mm_dump_table(struct seq_file * m,void * data)2205 static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
2206 {
2207 struct drm_info_node *node = (struct drm_info_node *)m->private;
2208 unsigned ttm_pl = *(int *)node->info_ent->data;
2209 struct drm_device *dev = node->minor->dev;
2210 struct amdgpu_device *adev = dev->dev_private;
2211 struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
2212 struct drm_printer p = drm_seq_file_printer(m);
2213
2214 man->func->debug(man, &p);
2215 return 0;
2216 }
2217
2218 static int ttm_pl_vram = TTM_PL_VRAM;
2219 static int ttm_pl_tt = TTM_PL_TT;
2220
2221 static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
2222 {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
2223 {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
2224 {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
2225 #ifdef CONFIG_SWIOTLB
2226 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
2227 #endif
2228 };
2229
2230 /**
2231 * amdgpu_ttm_vram_read - Linear read access to VRAM
2232 *
2233 * Accesses VRAM via MMIO for debugging purposes.
2234 */
amdgpu_ttm_vram_read(struct file * f,char __user * buf,size_t size,loff_t * pos)2235 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2236 size_t size, loff_t *pos)
2237 {
2238 struct amdgpu_device *adev = file_inode(f)->i_private;
2239 ssize_t result = 0;
2240 int r;
2241
2242 if (size & 0x3 || *pos & 0x3)
2243 return -EINVAL;
2244
2245 if (*pos >= adev->gmc.mc_vram_size)
2246 return -ENXIO;
2247
2248 while (size) {
2249 unsigned long flags;
2250 uint32_t value;
2251
2252 if (*pos >= adev->gmc.mc_vram_size)
2253 return result;
2254
2255 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
2256 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
2257 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
2258 value = RREG32_NO_KIQ(mmMM_DATA);
2259 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
2260
2261 r = put_user(value, (uint32_t *)buf);
2262 if (r)
2263 return r;
2264
2265 result += 4;
2266 buf += 4;
2267 *pos += 4;
2268 size -= 4;
2269 }
2270
2271 return result;
2272 }
2273
2274 /**
2275 * amdgpu_ttm_vram_write - Linear write access to VRAM
2276 *
2277 * Accesses VRAM via MMIO for debugging purposes.
2278 */
amdgpu_ttm_vram_write(struct file * f,const char __user * buf,size_t size,loff_t * pos)2279 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2280 size_t size, loff_t *pos)
2281 {
2282 struct amdgpu_device *adev = file_inode(f)->i_private;
2283 ssize_t result = 0;
2284 int r;
2285
2286 if (size & 0x3 || *pos & 0x3)
2287 return -EINVAL;
2288
2289 if (*pos >= adev->gmc.mc_vram_size)
2290 return -ENXIO;
2291
2292 while (size) {
2293 unsigned long flags;
2294 uint32_t value;
2295
2296 if (*pos >= adev->gmc.mc_vram_size)
2297 return result;
2298
2299 r = get_user(value, (uint32_t *)buf);
2300 if (r)
2301 return r;
2302
2303 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
2304 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
2305 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
2306 WREG32_NO_KIQ(mmMM_DATA, value);
2307 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
2308
2309 result += 4;
2310 buf += 4;
2311 *pos += 4;
2312 size -= 4;
2313 }
2314
2315 return result;
2316 }
2317
2318 static const struct file_operations amdgpu_ttm_vram_fops = {
2319 .owner = THIS_MODULE,
2320 .read = amdgpu_ttm_vram_read,
2321 .write = amdgpu_ttm_vram_write,
2322 .llseek = default_llseek,
2323 };
2324
2325 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2326
2327 /**
2328 * amdgpu_ttm_gtt_read - Linear read access to GTT memory
2329 */
amdgpu_ttm_gtt_read(struct file * f,char __user * buf,size_t size,loff_t * pos)2330 static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
2331 size_t size, loff_t *pos)
2332 {
2333 struct amdgpu_device *adev = file_inode(f)->i_private;
2334 ssize_t result = 0;
2335 int r;
2336
2337 while (size) {
2338 loff_t p = *pos / PAGE_SIZE;
2339 unsigned off = *pos & ~PAGE_MASK;
2340 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
2341 struct page *page;
2342 void *ptr;
2343
2344 if (p >= adev->gart.num_cpu_pages)
2345 return result;
2346
2347 page = adev->gart.pages[p];
2348 if (page) {
2349 ptr = kmap(page);
2350 ptr += off;
2351
2352 r = copy_to_user(buf, ptr, cur_size);
2353 kunmap(adev->gart.pages[p]);
2354 } else
2355 r = clear_user(buf, cur_size);
2356
2357 if (r)
2358 return -EFAULT;
2359
2360 result += cur_size;
2361 buf += cur_size;
2362 *pos += cur_size;
2363 size -= cur_size;
2364 }
2365
2366 return result;
2367 }
2368
2369 static const struct file_operations amdgpu_ttm_gtt_fops = {
2370 .owner = THIS_MODULE,
2371 .read = amdgpu_ttm_gtt_read,
2372 .llseek = default_llseek
2373 };
2374
2375 #endif
2376
2377 /**
2378 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2379 *
2380 * This function is used to read memory that has been mapped to the
2381 * GPU and the known addresses are not physical addresses but instead
2382 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2383 */
amdgpu_iomem_read(struct file * f,char __user * buf,size_t size,loff_t * pos)2384 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2385 size_t size, loff_t *pos)
2386 {
2387 struct amdgpu_device *adev = file_inode(f)->i_private;
2388 struct iommu_domain *dom;
2389 ssize_t result = 0;
2390 int r;
2391
2392 /* retrieve the IOMMU domain if any for this device */
2393 dom = iommu_get_domain_for_dev(adev->dev);
2394
2395 while (size) {
2396 phys_addr_t addr = *pos & PAGE_MASK;
2397 loff_t off = *pos & ~PAGE_MASK;
2398 size_t bytes = PAGE_SIZE - off;
2399 unsigned long pfn;
2400 struct page *p;
2401 void *ptr;
2402
2403 bytes = bytes < size ? bytes : size;
2404
2405 /* Translate the bus address to a physical address. If
2406 * the domain is NULL it means there is no IOMMU active
2407 * and the address translation is the identity
2408 */
2409 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2410
2411 pfn = addr >> PAGE_SHIFT;
2412 if (!pfn_valid(pfn))
2413 return -EPERM;
2414
2415 p = pfn_to_page(pfn);
2416 if (p->mapping != adev->mman.bdev.dev_mapping)
2417 return -EPERM;
2418
2419 ptr = kmap(p);
2420 r = copy_to_user(buf, ptr + off, bytes);
2421 kunmap(p);
2422 if (r)
2423 return -EFAULT;
2424
2425 size -= bytes;
2426 *pos += bytes;
2427 result += bytes;
2428 }
2429
2430 return result;
2431 }
2432
2433 /**
2434 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2435 *
2436 * This function is used to write memory that has been mapped to the
2437 * GPU and the known addresses are not physical addresses but instead
2438 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2439 */
amdgpu_iomem_write(struct file * f,const char __user * buf,size_t size,loff_t * pos)2440 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2441 size_t size, loff_t *pos)
2442 {
2443 struct amdgpu_device *adev = file_inode(f)->i_private;
2444 struct iommu_domain *dom;
2445 ssize_t result = 0;
2446 int r;
2447
2448 dom = iommu_get_domain_for_dev(adev->dev);
2449
2450 while (size) {
2451 phys_addr_t addr = *pos & PAGE_MASK;
2452 loff_t off = *pos & ~PAGE_MASK;
2453 size_t bytes = PAGE_SIZE - off;
2454 unsigned long pfn;
2455 struct page *p;
2456 void *ptr;
2457
2458 bytes = bytes < size ? bytes : size;
2459
2460 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2461
2462 pfn = addr >> PAGE_SHIFT;
2463 if (!pfn_valid(pfn))
2464 return -EPERM;
2465
2466 p = pfn_to_page(pfn);
2467 if (p->mapping != adev->mman.bdev.dev_mapping)
2468 return -EPERM;
2469
2470 ptr = kmap(p);
2471 r = copy_from_user(ptr + off, buf, bytes);
2472 kunmap(p);
2473 if (r)
2474 return -EFAULT;
2475
2476 size -= bytes;
2477 *pos += bytes;
2478 result += bytes;
2479 }
2480
2481 return result;
2482 }
2483
2484 static const struct file_operations amdgpu_ttm_iomem_fops = {
2485 .owner = THIS_MODULE,
2486 .read = amdgpu_iomem_read,
2487 .write = amdgpu_iomem_write,
2488 .llseek = default_llseek
2489 };
2490
2491 static const struct {
2492 char *name;
2493 const struct file_operations *fops;
2494 int domain;
2495 } ttm_debugfs_entries[] = {
2496 { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
2497 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2498 { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
2499 #endif
2500 { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
2501 };
2502
2503 #endif
2504
amdgpu_ttm_debugfs_init(struct amdgpu_device * adev)2505 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2506 {
2507 #if defined(CONFIG_DEBUG_FS)
2508 unsigned count;
2509
2510 struct drm_minor *minor = adev->ddev->primary;
2511 struct dentry *ent, *root = minor->debugfs_root;
2512
2513 for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
2514 ent = debugfs_create_file(
2515 ttm_debugfs_entries[count].name,
2516 S_IFREG | S_IRUGO, root,
2517 adev,
2518 ttm_debugfs_entries[count].fops);
2519 if (IS_ERR(ent))
2520 return PTR_ERR(ent);
2521 if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
2522 i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
2523 else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
2524 i_size_write(ent->d_inode, adev->gmc.gart_size);
2525 adev->mman.debugfs_entries[count] = ent;
2526 }
2527
2528 count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
2529
2530 #ifdef CONFIG_SWIOTLB
2531 if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
2532 --count;
2533 #endif
2534
2535 return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
2536 #else
2537 return 0;
2538 #endif
2539 }
2540
amdgpu_ttm_debugfs_fini(struct amdgpu_device * adev)2541 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
2542 {
2543 #if defined(CONFIG_DEBUG_FS)
2544 unsigned i;
2545
2546 for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
2547 debugfs_remove(adev->mman.debugfs_entries[i]);
2548 #endif
2549 }
2550