1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 30 * Dave Airlie 31 */ 32 33 #include <linux/dma-mapping.h> 34 #include <linux/iommu.h> 35 #include <linux/hmm.h> 36 #include <linux/pagemap.h> 37 #include <linux/sched/task.h> 38 #include <linux/sched/mm.h> 39 #include <linux/seq_file.h> 40 #include <linux/slab.h> 41 #include <linux/swap.h> 42 #include <linux/swiotlb.h> 43 #include <linux/dma-buf.h> 44 #include <linux/sizes.h> 45 46 #include <drm/ttm/ttm_bo_api.h> 47 #include <drm/ttm/ttm_bo_driver.h> 48 #include <drm/ttm/ttm_placement.h> 49 #include <drm/ttm/ttm_module.h> 50 #include <drm/ttm/ttm_page_alloc.h> 51 52 #include <drm/drm_debugfs.h> 53 #include <drm/amdgpu_drm.h> 54 55 #include "amdgpu.h" 56 #include "amdgpu_object.h" 57 #include "amdgpu_trace.h" 58 #include "amdgpu_amdkfd.h" 59 #include "amdgpu_sdma.h" 60 #include "amdgpu_ras.h" 61 #include "bif/bif_4_1_d.h" 62 63 #define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128 64 65 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) 66 67 static int amdgpu_map_buffer(struct ttm_buffer_object *bo, 68 struct ttm_mem_reg *mem, unsigned num_pages, 69 uint64_t offset, unsigned window, 70 struct amdgpu_ring *ring, 71 uint64_t *addr); 72 73 /** 74 * amdgpu_init_mem_type - Initialize a memory manager for a specific type of 75 * memory request. 76 * 77 * @bdev: The TTM BO device object (contains a reference to amdgpu_device) 78 * @type: The type of memory requested 79 * @man: The memory type manager for each domain 80 * 81 * This is called by ttm_bo_init_mm() when a buffer object is being 82 * initialized. 83 */ 84 static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 85 struct ttm_mem_type_manager *man) 86 { 87 struct amdgpu_device *adev; 88 89 adev = amdgpu_ttm_adev(bdev); 90 91 switch (type) { 92 case TTM_PL_SYSTEM: 93 /* System memory */ 94 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 95 man->available_caching = TTM_PL_MASK_CACHING; 96 man->default_caching = TTM_PL_FLAG_CACHED; 97 break; 98 case TTM_PL_TT: 99 /* GTT memory */ 100 man->func = &amdgpu_gtt_mgr_func; 101 man->gpu_offset = adev->gmc.gart_start; 102 man->available_caching = TTM_PL_MASK_CACHING; 103 man->default_caching = TTM_PL_FLAG_CACHED; 104 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; 105 break; 106 case TTM_PL_VRAM: 107 /* "On-card" video ram */ 108 man->func = &amdgpu_vram_mgr_func; 109 man->gpu_offset = adev->gmc.vram_start; 110 man->flags = TTM_MEMTYPE_FLAG_FIXED | 111 TTM_MEMTYPE_FLAG_MAPPABLE; 112 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; 113 man->default_caching = TTM_PL_FLAG_WC; 114 break; 115 case AMDGPU_PL_GDS: 116 case AMDGPU_PL_GWS: 117 case AMDGPU_PL_OA: 118 /* On-chip GDS memory*/ 119 man->func = &ttm_bo_manager_func; 120 man->gpu_offset = 0; 121 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA; 122 man->available_caching = TTM_PL_FLAG_UNCACHED; 123 man->default_caching = TTM_PL_FLAG_UNCACHED; 124 break; 125 default: 126 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); 127 return -EINVAL; 128 } 129 return 0; 130 } 131 132 /** 133 * amdgpu_evict_flags - Compute placement flags 134 * 135 * @bo: The buffer object to evict 136 * @placement: Possible destination(s) for evicted BO 137 * 138 * Fill in placement data when ttm_bo_evict() is called 139 */ 140 static void amdgpu_evict_flags(struct ttm_buffer_object *bo, 141 struct ttm_placement *placement) 142 { 143 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 144 struct amdgpu_bo *abo; 145 static const struct ttm_place placements = { 146 .fpfn = 0, 147 .lpfn = 0, 148 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM 149 }; 150 151 /* Don't handle scatter gather BOs */ 152 if (bo->type == ttm_bo_type_sg) { 153 placement->num_placement = 0; 154 placement->num_busy_placement = 0; 155 return; 156 } 157 158 /* Object isn't an AMDGPU object so ignore */ 159 if (!amdgpu_bo_is_amdgpu_bo(bo)) { 160 placement->placement = &placements; 161 placement->busy_placement = &placements; 162 placement->num_placement = 1; 163 placement->num_busy_placement = 1; 164 return; 165 } 166 167 abo = ttm_to_amdgpu_bo(bo); 168 switch (bo->mem.mem_type) { 169 case AMDGPU_PL_GDS: 170 case AMDGPU_PL_GWS: 171 case AMDGPU_PL_OA: 172 placement->num_placement = 0; 173 placement->num_busy_placement = 0; 174 return; 175 176 case TTM_PL_VRAM: 177 if (!adev->mman.buffer_funcs_enabled) { 178 /* Move to system memory */ 179 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); 180 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 181 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && 182 amdgpu_bo_in_cpu_visible_vram(abo)) { 183 184 /* Try evicting to the CPU inaccessible part of VRAM 185 * first, but only set GTT as busy placement, so this 186 * BO will be evicted to GTT rather than causing other 187 * BOs to be evicted from VRAM 188 */ 189 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | 190 AMDGPU_GEM_DOMAIN_GTT); 191 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; 192 abo->placements[0].lpfn = 0; 193 abo->placement.busy_placement = &abo->placements[1]; 194 abo->placement.num_busy_placement = 1; 195 } else { 196 /* Move to GTT memory */ 197 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); 198 } 199 break; 200 case TTM_PL_TT: 201 default: 202 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); 203 break; 204 } 205 *placement = abo->placement; 206 } 207 208 /** 209 * amdgpu_verify_access - Verify access for a mmap call 210 * 211 * @bo: The buffer object to map 212 * @filp: The file pointer from the process performing the mmap 213 * 214 * This is called by ttm_bo_mmap() to verify whether a process 215 * has the right to mmap a BO to their process space. 216 */ 217 static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) 218 { 219 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 220 struct drm_file *file_priv = (void *)filp; 221 222 /* 223 * Don't verify access for KFD BOs. They don't have a GEM 224 * object associated with them. 225 */ 226 if (abo->kfd_bo) 227 return 0; 228 229 if (amdgpu_ttm_tt_get_usermm(bo->ttm)) 230 return -EPERM; 231 return drm_vma_node_verify_access(&abo->tbo.base.vma_node, file_priv); 232 } 233 234 /** 235 * amdgpu_move_null - Register memory for a buffer object 236 * 237 * @bo: The bo to assign the memory to 238 * @new_mem: The memory to be assigned. 239 * 240 * Assign the memory from new_mem to the memory of the buffer object bo. 241 */ 242 static void amdgpu_move_null(struct ttm_buffer_object *bo, 243 struct ttm_mem_reg *new_mem) 244 { 245 struct ttm_mem_reg *old_mem = &bo->mem; 246 247 BUG_ON(old_mem->mm_node != NULL); 248 *old_mem = *new_mem; 249 new_mem->mm_node = NULL; 250 } 251 252 /** 253 * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer. 254 * 255 * @bo: The bo to assign the memory to. 256 * @mm_node: Memory manager node for drm allocator. 257 * @mem: The region where the bo resides. 258 * 259 */ 260 static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, 261 struct drm_mm_node *mm_node, 262 struct ttm_mem_reg *mem) 263 { 264 uint64_t addr = 0; 265 266 if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) { 267 addr = mm_node->start << PAGE_SHIFT; 268 addr += bo->bdev->man[mem->mem_type].gpu_offset; 269 } 270 return addr; 271 } 272 273 /** 274 * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to 275 * @offset. It also modifies the offset to be within the drm_mm_node returned 276 * 277 * @mem: The region where the bo resides. 278 * @offset: The offset that drm_mm_node is used for finding. 279 * 280 */ 281 static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem, 282 unsigned long *offset) 283 { 284 struct drm_mm_node *mm_node = mem->mm_node; 285 286 while (*offset >= (mm_node->size << PAGE_SHIFT)) { 287 *offset -= (mm_node->size << PAGE_SHIFT); 288 ++mm_node; 289 } 290 return mm_node; 291 } 292 293 /** 294 * amdgpu_copy_ttm_mem_to_mem - Helper function for copy 295 * 296 * The function copies @size bytes from {src->mem + src->offset} to 297 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a 298 * move and different for a BO to BO copy. 299 * 300 * @f: Returns the last fence if multiple jobs are submitted. 301 */ 302 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, 303 struct amdgpu_copy_mem *src, 304 struct amdgpu_copy_mem *dst, 305 uint64_t size, 306 struct dma_resv *resv, 307 struct dma_fence **f) 308 { 309 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 310 struct drm_mm_node *src_mm, *dst_mm; 311 uint64_t src_node_start, dst_node_start, src_node_size, 312 dst_node_size, src_page_offset, dst_page_offset; 313 struct dma_fence *fence = NULL; 314 int r = 0; 315 const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE * 316 AMDGPU_GPU_PAGE_SIZE); 317 318 if (!adev->mman.buffer_funcs_enabled) { 319 DRM_ERROR("Trying to move memory with ring turned off.\n"); 320 return -EINVAL; 321 } 322 323 src_mm = amdgpu_find_mm_node(src->mem, &src->offset); 324 src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) + 325 src->offset; 326 src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset; 327 src_page_offset = src_node_start & (PAGE_SIZE - 1); 328 329 dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset); 330 dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) + 331 dst->offset; 332 dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset; 333 dst_page_offset = dst_node_start & (PAGE_SIZE - 1); 334 335 mutex_lock(&adev->mman.gtt_window_lock); 336 337 while (size) { 338 unsigned long cur_size; 339 uint64_t from = src_node_start, to = dst_node_start; 340 struct dma_fence *next; 341 342 /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst 343 * begins at an offset, then adjust the size accordingly 344 */ 345 cur_size = min3(min(src_node_size, dst_node_size), size, 346 GTT_MAX_BYTES); 347 if (cur_size + src_page_offset > GTT_MAX_BYTES || 348 cur_size + dst_page_offset > GTT_MAX_BYTES) 349 cur_size -= max(src_page_offset, dst_page_offset); 350 351 /* Map only what needs to be accessed. Map src to window 0 and 352 * dst to window 1 353 */ 354 if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) { 355 r = amdgpu_map_buffer(src->bo, src->mem, 356 PFN_UP(cur_size + src_page_offset), 357 src_node_start, 0, ring, 358 &from); 359 if (r) 360 goto error; 361 /* Adjust the offset because amdgpu_map_buffer returns 362 * start of mapped page 363 */ 364 from += src_page_offset; 365 } 366 367 if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) { 368 r = amdgpu_map_buffer(dst->bo, dst->mem, 369 PFN_UP(cur_size + dst_page_offset), 370 dst_node_start, 1, ring, 371 &to); 372 if (r) 373 goto error; 374 to += dst_page_offset; 375 } 376 377 r = amdgpu_copy_buffer(ring, from, to, cur_size, 378 resv, &next, false, true); 379 if (r) 380 goto error; 381 382 dma_fence_put(fence); 383 fence = next; 384 385 size -= cur_size; 386 if (!size) 387 break; 388 389 src_node_size -= cur_size; 390 if (!src_node_size) { 391 src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm, 392 src->mem); 393 src_node_size = (src_mm->size << PAGE_SHIFT); 394 src_page_offset = 0; 395 } else { 396 src_node_start += cur_size; 397 src_page_offset = src_node_start & (PAGE_SIZE - 1); 398 } 399 dst_node_size -= cur_size; 400 if (!dst_node_size) { 401 dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm, 402 dst->mem); 403 dst_node_size = (dst_mm->size << PAGE_SHIFT); 404 dst_page_offset = 0; 405 } else { 406 dst_node_start += cur_size; 407 dst_page_offset = dst_node_start & (PAGE_SIZE - 1); 408 } 409 } 410 error: 411 mutex_unlock(&adev->mman.gtt_window_lock); 412 if (f) 413 *f = dma_fence_get(fence); 414 dma_fence_put(fence); 415 return r; 416 } 417 418 /** 419 * amdgpu_move_blit - Copy an entire buffer to another buffer 420 * 421 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to 422 * help move buffers to and from VRAM. 423 */ 424 static int amdgpu_move_blit(struct ttm_buffer_object *bo, 425 bool evict, bool no_wait_gpu, 426 struct ttm_mem_reg *new_mem, 427 struct ttm_mem_reg *old_mem) 428 { 429 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 430 struct amdgpu_copy_mem src, dst; 431 struct dma_fence *fence = NULL; 432 int r; 433 434 src.bo = bo; 435 dst.bo = bo; 436 src.mem = old_mem; 437 dst.mem = new_mem; 438 src.offset = 0; 439 dst.offset = 0; 440 441 r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst, 442 new_mem->num_pages << PAGE_SHIFT, 443 bo->base.resv, &fence); 444 if (r) 445 goto error; 446 447 /* clear the space being freed */ 448 if (old_mem->mem_type == TTM_PL_VRAM && 449 (ttm_to_amdgpu_bo(bo)->flags & 450 AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) { 451 struct dma_fence *wipe_fence = NULL; 452 453 r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON, 454 NULL, &wipe_fence); 455 if (r) { 456 goto error; 457 } else if (wipe_fence) { 458 dma_fence_put(fence); 459 fence = wipe_fence; 460 } 461 } 462 463 /* Always block for VM page tables before committing the new location */ 464 if (bo->type == ttm_bo_type_kernel) 465 r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem); 466 else 467 r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); 468 dma_fence_put(fence); 469 return r; 470 471 error: 472 if (fence) 473 dma_fence_wait(fence, false); 474 dma_fence_put(fence); 475 return r; 476 } 477 478 /** 479 * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer 480 * 481 * Called by amdgpu_bo_move(). 482 */ 483 static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, 484 struct ttm_operation_ctx *ctx, 485 struct ttm_mem_reg *new_mem) 486 { 487 struct ttm_mem_reg *old_mem = &bo->mem; 488 struct ttm_mem_reg tmp_mem; 489 struct ttm_place placements; 490 struct ttm_placement placement; 491 int r; 492 493 /* create space/pages for new_mem in GTT space */ 494 tmp_mem = *new_mem; 495 tmp_mem.mm_node = NULL; 496 placement.num_placement = 1; 497 placement.placement = &placements; 498 placement.num_busy_placement = 1; 499 placement.busy_placement = &placements; 500 placements.fpfn = 0; 501 placements.lpfn = 0; 502 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 503 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); 504 if (unlikely(r)) { 505 pr_err("Failed to find GTT space for blit from VRAM\n"); 506 return r; 507 } 508 509 /* set caching flags */ 510 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); 511 if (unlikely(r)) { 512 goto out_cleanup; 513 } 514 515 /* Bind the memory to the GTT space */ 516 r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx); 517 if (unlikely(r)) { 518 goto out_cleanup; 519 } 520 521 /* blit VRAM to GTT */ 522 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem); 523 if (unlikely(r)) { 524 goto out_cleanup; 525 } 526 527 /* move BO (in tmp_mem) to new_mem */ 528 r = ttm_bo_move_ttm(bo, ctx, new_mem); 529 out_cleanup: 530 ttm_bo_mem_put(bo, &tmp_mem); 531 return r; 532 } 533 534 /** 535 * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM 536 * 537 * Called by amdgpu_bo_move(). 538 */ 539 static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, 540 struct ttm_operation_ctx *ctx, 541 struct ttm_mem_reg *new_mem) 542 { 543 struct ttm_mem_reg *old_mem = &bo->mem; 544 struct ttm_mem_reg tmp_mem; 545 struct ttm_placement placement; 546 struct ttm_place placements; 547 int r; 548 549 /* make space in GTT for old_mem buffer */ 550 tmp_mem = *new_mem; 551 tmp_mem.mm_node = NULL; 552 placement.num_placement = 1; 553 placement.placement = &placements; 554 placement.num_busy_placement = 1; 555 placement.busy_placement = &placements; 556 placements.fpfn = 0; 557 placements.lpfn = 0; 558 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 559 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); 560 if (unlikely(r)) { 561 pr_err("Failed to find GTT space for blit to VRAM\n"); 562 return r; 563 } 564 565 /* move/bind old memory to GTT space */ 566 r = ttm_bo_move_ttm(bo, ctx, &tmp_mem); 567 if (unlikely(r)) { 568 goto out_cleanup; 569 } 570 571 /* copy to VRAM */ 572 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem); 573 if (unlikely(r)) { 574 goto out_cleanup; 575 } 576 out_cleanup: 577 ttm_bo_mem_put(bo, &tmp_mem); 578 return r; 579 } 580 581 /** 582 * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy 583 * 584 * Called by amdgpu_bo_move() 585 */ 586 static bool amdgpu_mem_visible(struct amdgpu_device *adev, 587 struct ttm_mem_reg *mem) 588 { 589 struct drm_mm_node *nodes = mem->mm_node; 590 591 if (mem->mem_type == TTM_PL_SYSTEM || 592 mem->mem_type == TTM_PL_TT) 593 return true; 594 if (mem->mem_type != TTM_PL_VRAM) 595 return false; 596 597 /* ttm_mem_reg_ioremap only supports contiguous memory */ 598 if (nodes->size != mem->num_pages) 599 return false; 600 601 return ((nodes->start + nodes->size) << PAGE_SHIFT) 602 <= adev->gmc.visible_vram_size; 603 } 604 605 /** 606 * amdgpu_bo_move - Move a buffer object to a new memory location 607 * 608 * Called by ttm_bo_handle_move_mem() 609 */ 610 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, 611 struct ttm_operation_ctx *ctx, 612 struct ttm_mem_reg *new_mem) 613 { 614 struct amdgpu_device *adev; 615 struct amdgpu_bo *abo; 616 struct ttm_mem_reg *old_mem = &bo->mem; 617 int r; 618 619 /* Can't move a pinned BO */ 620 abo = ttm_to_amdgpu_bo(bo); 621 if (WARN_ON_ONCE(abo->pin_count > 0)) 622 return -EINVAL; 623 624 adev = amdgpu_ttm_adev(bo->bdev); 625 626 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 627 amdgpu_move_null(bo, new_mem); 628 return 0; 629 } 630 if ((old_mem->mem_type == TTM_PL_TT && 631 new_mem->mem_type == TTM_PL_SYSTEM) || 632 (old_mem->mem_type == TTM_PL_SYSTEM && 633 new_mem->mem_type == TTM_PL_TT)) { 634 /* bind is enough */ 635 amdgpu_move_null(bo, new_mem); 636 return 0; 637 } 638 if (old_mem->mem_type == AMDGPU_PL_GDS || 639 old_mem->mem_type == AMDGPU_PL_GWS || 640 old_mem->mem_type == AMDGPU_PL_OA || 641 new_mem->mem_type == AMDGPU_PL_GDS || 642 new_mem->mem_type == AMDGPU_PL_GWS || 643 new_mem->mem_type == AMDGPU_PL_OA) { 644 /* Nothing to save here */ 645 amdgpu_move_null(bo, new_mem); 646 return 0; 647 } 648 649 if (!adev->mman.buffer_funcs_enabled) { 650 r = -ENODEV; 651 goto memcpy; 652 } 653 654 if (old_mem->mem_type == TTM_PL_VRAM && 655 new_mem->mem_type == TTM_PL_SYSTEM) { 656 r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem); 657 } else if (old_mem->mem_type == TTM_PL_SYSTEM && 658 new_mem->mem_type == TTM_PL_VRAM) { 659 r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem); 660 } else { 661 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, 662 new_mem, old_mem); 663 } 664 665 if (r) { 666 memcpy: 667 /* Check that all memory is CPU accessible */ 668 if (!amdgpu_mem_visible(adev, old_mem) || 669 !amdgpu_mem_visible(adev, new_mem)) { 670 pr_err("Move buffer fallback to memcpy unavailable\n"); 671 return r; 672 } 673 674 r = ttm_bo_move_memcpy(bo, ctx, new_mem); 675 if (r) 676 return r; 677 } 678 679 if (bo->type == ttm_bo_type_device && 680 new_mem->mem_type == TTM_PL_VRAM && 681 old_mem->mem_type != TTM_PL_VRAM) { 682 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU 683 * accesses the BO after it's moved. 684 */ 685 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 686 } 687 688 /* update statistics */ 689 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); 690 return 0; 691 } 692 693 /** 694 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault 695 * 696 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault() 697 */ 698 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 699 { 700 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 701 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 702 struct drm_mm_node *mm_node = mem->mm_node; 703 704 mem->bus.addr = NULL; 705 mem->bus.offset = 0; 706 mem->bus.size = mem->num_pages << PAGE_SHIFT; 707 mem->bus.base = 0; 708 mem->bus.is_iomem = false; 709 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) 710 return -EINVAL; 711 switch (mem->mem_type) { 712 case TTM_PL_SYSTEM: 713 /* system memory */ 714 return 0; 715 case TTM_PL_TT: 716 break; 717 case TTM_PL_VRAM: 718 mem->bus.offset = mem->start << PAGE_SHIFT; 719 /* check if it's visible */ 720 if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size) 721 return -EINVAL; 722 /* Only physically contiguous buffers apply. In a contiguous 723 * buffer, size of the first mm_node would match the number of 724 * pages in ttm_mem_reg. 725 */ 726 if (adev->mman.aper_base_kaddr && 727 (mm_node->size == mem->num_pages)) 728 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr + 729 mem->bus.offset; 730 731 mem->bus.base = adev->gmc.aper_base; 732 mem->bus.is_iomem = true; 733 break; 734 default: 735 return -EINVAL; 736 } 737 return 0; 738 } 739 740 static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 741 { 742 } 743 744 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, 745 unsigned long page_offset) 746 { 747 struct drm_mm_node *mm; 748 unsigned long offset = (page_offset << PAGE_SHIFT); 749 750 mm = amdgpu_find_mm_node(&bo->mem, &offset); 751 return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + 752 (offset >> PAGE_SHIFT); 753 } 754 755 /* 756 * TTM backend functions. 757 */ 758 struct amdgpu_ttm_tt { 759 struct ttm_dma_tt ttm; 760 struct drm_gem_object *gobj; 761 u64 offset; 762 uint64_t userptr; 763 struct task_struct *usertask; 764 uint32_t userflags; 765 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) 766 struct hmm_range *range; 767 #endif 768 }; 769 770 #ifdef CONFIG_DRM_AMDGPU_USERPTR 771 /* flags used by HMM internal, not related to CPU/GPU PTE flags */ 772 static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { 773 (1 << 0), /* HMM_PFN_VALID */ 774 (1 << 1), /* HMM_PFN_WRITE */ 775 }; 776 777 static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { 778 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */ 779 0, /* HMM_PFN_NONE */ 780 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */ 781 }; 782 783 /** 784 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user 785 * memory and start HMM tracking CPU page table update 786 * 787 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only 788 * once afterwards to stop HMM tracking 789 */ 790 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct vm_page **pages) 791 { 792 struct ttm_tt *ttm = bo->tbo.ttm; 793 struct amdgpu_ttm_tt *gtt = (void *)ttm; 794 unsigned long start = gtt->userptr; 795 struct vm_area_struct *vma; 796 struct hmm_range *range; 797 unsigned long timeout; 798 struct mm_struct *mm; 799 unsigned long i; 800 int r = 0; 801 802 mm = bo->notifier.mm; 803 if (unlikely(!mm)) { 804 DRM_DEBUG_DRIVER("BO is not registered?\n"); 805 return -EFAULT; 806 } 807 808 /* Another get_user_pages is running at the same time?? */ 809 if (WARN_ON(gtt->range)) 810 return -EFAULT; 811 812 if (!mmget_not_zero(mm)) /* Happens during process shutdown */ 813 return -ESRCH; 814 815 range = kzalloc(sizeof(*range), GFP_KERNEL); 816 if (unlikely(!range)) { 817 r = -ENOMEM; 818 goto out; 819 } 820 range->notifier = &bo->notifier; 821 range->flags = hmm_range_flags; 822 range->values = hmm_range_values; 823 range->pfn_shift = PAGE_SHIFT; 824 range->start = bo->notifier.interval_tree.start; 825 range->end = bo->notifier.interval_tree.last + 1; 826 range->default_flags = hmm_range_flags[HMM_PFN_VALID]; 827 if (!amdgpu_ttm_tt_is_readonly(ttm)) 828 range->default_flags |= range->flags[HMM_PFN_WRITE]; 829 830 range->pfns = kvmalloc_array(ttm->num_pages, sizeof(*range->pfns), 831 GFP_KERNEL); 832 if (unlikely(!range->pfns)) { 833 r = -ENOMEM; 834 goto out_free_ranges; 835 } 836 837 down_read(&mm->mmap_sem); 838 vma = find_vma(mm, start); 839 if (unlikely(!vma || start < vma->vm_start)) { 840 r = -EFAULT; 841 goto out_unlock; 842 } 843 if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) && 844 vma->vm_file)) { 845 r = -EPERM; 846 goto out_unlock; 847 } 848 up_read(&mm->mmap_sem); 849 timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); 850 851 retry: 852 range->notifier_seq = mmu_interval_read_begin(&bo->notifier); 853 854 down_read(&mm->mmap_sem); 855 r = hmm_range_fault(range); 856 up_read(&mm->mmap_sem); 857 if (unlikely(r <= 0)) { 858 /* 859 * FIXME: This timeout should encompass the retry from 860 * mmu_interval_read_retry() as well. 861 */ 862 if ((r == 0 || r == -EBUSY) && !time_after(jiffies, timeout)) 863 goto retry; 864 goto out_free_pfns; 865 } 866 867 for (i = 0; i < ttm->num_pages; i++) { 868 /* FIXME: The pages cannot be touched outside the notifier_lock */ 869 pages[i] = hmm_device_entry_to_page(range, range->pfns[i]); 870 if (unlikely(!pages[i])) { 871 pr_err("Page fault failed for pfn[%lu] = 0x%llx\n", 872 i, range->pfns[i]); 873 r = -ENOMEM; 874 875 goto out_free_pfns; 876 } 877 } 878 879 gtt->range = range; 880 mmput(mm); 881 882 return 0; 883 884 out_unlock: 885 up_read(&mm->mmap_sem); 886 out_free_pfns: 887 kvfree(range->pfns); 888 out_free_ranges: 889 kfree(range); 890 out: 891 mmput(mm); 892 return r; 893 } 894 895 /** 896 * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change 897 * Check if the pages backing this ttm range have been invalidated 898 * 899 * Returns: true if pages are still valid 900 */ 901 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) 902 { 903 struct amdgpu_ttm_tt *gtt = (void *)ttm; 904 bool r = false; 905 906 if (!gtt || !gtt->userptr) 907 return false; 908 909 DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n", 910 gtt->userptr, ttm->num_pages); 911 912 WARN_ONCE(!gtt->range || !gtt->range->pfns, 913 "No user pages to check\n"); 914 915 if (gtt->range) { 916 /* 917 * FIXME: Must always hold notifier_lock for this, and must 918 * not ignore the return code. 919 */ 920 r = mmu_interval_read_retry(gtt->range->notifier, 921 gtt->range->notifier_seq); 922 kvfree(gtt->range->pfns); 923 kfree(gtt->range); 924 gtt->range = NULL; 925 } 926 927 return !r; 928 } 929 #endif 930 931 /** 932 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary. 933 * 934 * Called by amdgpu_cs_list_validate(). This creates the page list 935 * that backs user memory and will ultimately be mapped into the device 936 * address space. 937 */ 938 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct vm_page **pages) 939 { 940 unsigned long i; 941 942 for (i = 0; i < ttm->num_pages; ++i) 943 ttm->pages[i] = pages ? pages[i] : NULL; 944 } 945 946 /** 947 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages 948 * 949 * Called by amdgpu_ttm_backend_bind() 950 **/ 951 static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) 952 { 953 STUB(); 954 return -ENOSYS; 955 #ifdef notyet 956 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); 957 struct amdgpu_ttm_tt *gtt = (void *)ttm; 958 unsigned nents; 959 int r; 960 961 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 962 enum dma_data_direction direction = write ? 963 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 964 965 /* Allocate an SG array and squash pages into it */ 966 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, 967 ttm->num_pages << PAGE_SHIFT, 968 GFP_KERNEL); 969 if (r) 970 goto release_sg; 971 972 /* Map SG to device */ 973 r = -ENOMEM; 974 nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); 975 if (nents == 0) 976 goto release_sg; 977 978 /* convert SG to linear array of pages and dma addresses */ 979 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, 980 gtt->ttm.dma_address, ttm->num_pages); 981 982 return 0; 983 984 release_sg: 985 kfree(ttm->sg); 986 return r; 987 #endif 988 } 989 990 /** 991 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages 992 */ 993 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) 994 { 995 STUB(); 996 #ifdef notyet 997 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); 998 struct amdgpu_ttm_tt *gtt = (void *)ttm; 999 1000 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 1001 enum dma_data_direction direction = write ? 1002 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 1003 1004 /* double check that we don't free the table twice */ 1005 if (!ttm->sg->sgl) 1006 return; 1007 1008 /* unmap the pages mapped to the device */ 1009 dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); 1010 1011 sg_free_table(ttm->sg); 1012 1013 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) 1014 if (gtt->range) { 1015 unsigned long i; 1016 1017 for (i = 0; i < ttm->num_pages; i++) { 1018 if (ttm->pages[i] != 1019 hmm_device_entry_to_page(gtt->range, 1020 gtt->range->pfns[i])) 1021 break; 1022 } 1023 1024 WARN((i == ttm->num_pages), "Missing get_user_page_done\n"); 1025 } 1026 #endif 1027 #endif 1028 } 1029 1030 int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, 1031 struct ttm_buffer_object *tbo, 1032 uint64_t flags) 1033 { 1034 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo); 1035 struct ttm_tt *ttm = tbo->ttm; 1036 struct amdgpu_ttm_tt *gtt = (void *)ttm; 1037 int r; 1038 1039 if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) { 1040 uint64_t page_idx = 1; 1041 1042 r = amdgpu_gart_bind(adev, gtt->offset, page_idx, 1043 ttm->pages, gtt->ttm.dma_address, flags); 1044 if (r) 1045 goto gart_bind_fail; 1046 1047 /* The memory type of the first page defaults to UC. Now 1048 * modify the memory type to NC from the second page of 1049 * the BO onward. 1050 */ 1051 flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; 1052 flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC); 1053 1054 r = amdgpu_gart_bind(adev, 1055 gtt->offset + (page_idx << PAGE_SHIFT), 1056 ttm->num_pages - page_idx, 1057 &ttm->pages[page_idx], 1058 &(gtt->ttm.dma_address[page_idx]), flags); 1059 } else { 1060 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, 1061 ttm->pages, gtt->ttm.dma_address, flags); 1062 } 1063 1064 gart_bind_fail: 1065 if (r) 1066 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", 1067 ttm->num_pages, gtt->offset); 1068 1069 return r; 1070 } 1071 1072 /** 1073 * amdgpu_ttm_backend_bind - Bind GTT memory 1074 * 1075 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem(). 1076 * This handles binding GTT memory to the device address space. 1077 */ 1078 static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, 1079 struct ttm_mem_reg *bo_mem) 1080 { 1081 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); 1082 struct amdgpu_ttm_tt *gtt = (void*)ttm; 1083 uint64_t flags; 1084 int r = 0; 1085 1086 if (gtt->userptr) { 1087 r = amdgpu_ttm_tt_pin_userptr(ttm); 1088 if (r) { 1089 DRM_ERROR("failed to pin userptr\n"); 1090 return r; 1091 } 1092 } 1093 if (!ttm->num_pages) { 1094 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", 1095 ttm->num_pages, bo_mem, ttm); 1096 } 1097 1098 if (bo_mem->mem_type == AMDGPU_PL_GDS || 1099 bo_mem->mem_type == AMDGPU_PL_GWS || 1100 bo_mem->mem_type == AMDGPU_PL_OA) 1101 return -EINVAL; 1102 1103 if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { 1104 gtt->offset = AMDGPU_BO_INVALID_OFFSET; 1105 return 0; 1106 } 1107 1108 /* compute PTE flags relevant to this BO memory */ 1109 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem); 1110 1111 /* bind pages into GART page tables */ 1112 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; 1113 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, 1114 ttm->pages, gtt->ttm.dma_address, flags); 1115 1116 if (r) 1117 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", 1118 ttm->num_pages, gtt->offset); 1119 return r; 1120 } 1121 1122 /** 1123 * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object 1124 */ 1125 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) 1126 { 1127 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 1128 struct ttm_operation_ctx ctx = { false, false }; 1129 struct amdgpu_ttm_tt *gtt = (void*)bo->ttm; 1130 struct ttm_mem_reg tmp; 1131 struct ttm_placement placement; 1132 struct ttm_place placements; 1133 uint64_t addr, flags; 1134 int r; 1135 1136 if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET) 1137 return 0; 1138 1139 addr = amdgpu_gmc_agp_addr(bo); 1140 if (addr != AMDGPU_BO_INVALID_OFFSET) { 1141 bo->mem.start = addr >> PAGE_SHIFT; 1142 } else { 1143 1144 /* allocate GART space */ 1145 tmp = bo->mem; 1146 tmp.mm_node = NULL; 1147 placement.num_placement = 1; 1148 placement.placement = &placements; 1149 placement.num_busy_placement = 1; 1150 placement.busy_placement = &placements; 1151 placements.fpfn = 0; 1152 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; 1153 placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) | 1154 TTM_PL_FLAG_TT; 1155 1156 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); 1157 if (unlikely(r)) 1158 return r; 1159 1160 /* compute PTE flags for this buffer object */ 1161 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); 1162 1163 /* Bind pages */ 1164 gtt->offset = (u64)tmp.start << PAGE_SHIFT; 1165 r = amdgpu_ttm_gart_bind(adev, bo, flags); 1166 if (unlikely(r)) { 1167 ttm_bo_mem_put(bo, &tmp); 1168 return r; 1169 } 1170 1171 ttm_bo_mem_put(bo, &bo->mem); 1172 bo->mem = tmp; 1173 } 1174 1175 bo->offset = (bo->mem.start << PAGE_SHIFT) + 1176 bo->bdev->man[bo->mem.mem_type].gpu_offset; 1177 1178 return 0; 1179 } 1180 1181 /** 1182 * amdgpu_ttm_recover_gart - Rebind GTT pages 1183 * 1184 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to 1185 * rebind GTT pages during a GPU reset. 1186 */ 1187 int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) 1188 { 1189 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); 1190 uint64_t flags; 1191 int r; 1192 1193 if (!tbo->ttm) 1194 return 0; 1195 1196 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem); 1197 r = amdgpu_ttm_gart_bind(adev, tbo, flags); 1198 1199 return r; 1200 } 1201 1202 /** 1203 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages 1204 * 1205 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and 1206 * ttm_tt_destroy(). 1207 */ 1208 static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) 1209 { 1210 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); 1211 struct amdgpu_ttm_tt *gtt = (void *)ttm; 1212 int r; 1213 1214 /* if the pages have userptr pinning then clear that first */ 1215 if (gtt->userptr) 1216 amdgpu_ttm_tt_unpin_userptr(ttm); 1217 1218 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) 1219 return 0; 1220 1221 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ 1222 r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); 1223 if (r) 1224 DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n", 1225 gtt->ttm.ttm.num_pages, gtt->offset); 1226 return r; 1227 } 1228 1229 static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) 1230 { 1231 struct amdgpu_ttm_tt *gtt = (void *)ttm; 1232 1233 #ifdef notyet 1234 if (gtt->usertask) 1235 put_task_struct(gtt->usertask); 1236 #endif 1237 1238 ttm_dma_tt_fini(>t->ttm); 1239 kfree(gtt); 1240 } 1241 1242 static struct ttm_backend_func amdgpu_backend_func = { 1243 .bind = &amdgpu_ttm_backend_bind, 1244 .unbind = &amdgpu_ttm_backend_unbind, 1245 .destroy = &amdgpu_ttm_backend_destroy, 1246 }; 1247 1248 /** 1249 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO 1250 * 1251 * @bo: The buffer object to create a GTT ttm_tt object around 1252 * 1253 * Called by ttm_tt_create(). 1254 */ 1255 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, 1256 uint32_t page_flags) 1257 { 1258 struct amdgpu_ttm_tt *gtt; 1259 1260 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); 1261 if (gtt == NULL) { 1262 return NULL; 1263 } 1264 gtt->ttm.ttm.func = &amdgpu_backend_func; 1265 gtt->gobj = &bo->base; 1266 1267 /* allocate space for the uninitialized page entries */ 1268 if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) { 1269 kfree(gtt); 1270 return NULL; 1271 } 1272 return >t->ttm.ttm; 1273 } 1274 1275 /** 1276 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device 1277 * 1278 * Map the pages of a ttm_tt object to an address space visible 1279 * to the underlying device. 1280 */ 1281 static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, 1282 struct ttm_operation_ctx *ctx) 1283 { 1284 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); 1285 struct amdgpu_ttm_tt *gtt = (void *)ttm; 1286 1287 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ 1288 if (gtt && gtt->userptr) { 1289 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 1290 if (!ttm->sg) 1291 return -ENOMEM; 1292 1293 ttm->page_flags |= TTM_PAGE_FLAG_SG; 1294 ttm->state = tt_unbound; 1295 return 0; 1296 } 1297 1298 if (ttm->page_flags & TTM_PAGE_FLAG_SG) { 1299 if (!ttm->sg) { 1300 struct dma_buf_attachment *attach; 1301 struct sg_table *sgt; 1302 1303 attach = gtt->gobj->import_attach; 1304 #ifdef notyet 1305 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 1306 if (IS_ERR(sgt)) 1307 return PTR_ERR(sgt); 1308 #else 1309 STUB(); 1310 return -ENOSYS; 1311 #endif 1312 1313 ttm->sg = sgt; 1314 } 1315 1316 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, 1317 gtt->ttm.dma_address, 1318 ttm->num_pages); 1319 ttm->state = tt_unbound; 1320 return 0; 1321 } 1322 1323 #ifdef CONFIG_SWIOTLB 1324 if (adev->need_swiotlb && swiotlb_nr_tbl()) { 1325 return ttm_dma_populate(>t->ttm, adev->dev, ctx); 1326 } 1327 #endif 1328 1329 /* fall back to generic helper to populate the page array 1330 * and map them to the device */ 1331 return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx); 1332 } 1333 1334 /** 1335 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays 1336 * 1337 * Unmaps pages of a ttm_tt object from the device address space and 1338 * unpopulates the page array backing it. 1339 */ 1340 static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) 1341 { 1342 struct amdgpu_ttm_tt *gtt = (void *)ttm; 1343 struct amdgpu_device *adev; 1344 1345 if (gtt && gtt->userptr) { 1346 amdgpu_ttm_tt_set_user_pages(ttm, NULL); 1347 kfree(ttm->sg); 1348 ttm->page_flags &= ~TTM_PAGE_FLAG_SG; 1349 return; 1350 } 1351 1352 if (ttm->sg && gtt->gobj->import_attach) { 1353 struct dma_buf_attachment *attach; 1354 1355 attach = gtt->gobj->import_attach; 1356 #ifdef notyet 1357 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); 1358 #else 1359 STUB(); 1360 #endif 1361 ttm->sg = NULL; 1362 return; 1363 } 1364 1365 if (ttm->page_flags & TTM_PAGE_FLAG_SG) 1366 return; 1367 1368 adev = amdgpu_ttm_adev(ttm->bdev); 1369 1370 #ifdef CONFIG_SWIOTLB 1371 if (adev->need_swiotlb && swiotlb_nr_tbl()) { 1372 ttm_dma_unpopulate(>t->ttm, adev->dev); 1373 return; 1374 } 1375 #endif 1376 1377 /* fall back to generic helper to unmap and unpopulate array */ 1378 ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm); 1379 } 1380 1381 /** 1382 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current 1383 * task 1384 * 1385 * @ttm: The ttm_tt object to bind this userptr object to 1386 * @addr: The address in the current tasks VM space to use 1387 * @flags: Requirements of userptr object. 1388 * 1389 * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages 1390 * to current task 1391 */ 1392 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, 1393 uint32_t flags) 1394 { 1395 struct amdgpu_ttm_tt *gtt = (void *)ttm; 1396 1397 if (gtt == NULL) 1398 return -EINVAL; 1399 1400 gtt->userptr = addr; 1401 gtt->userflags = flags; 1402 1403 #ifdef notyet 1404 if (gtt->usertask) 1405 put_task_struct(gtt->usertask); 1406 gtt->usertask = current->group_leader; 1407 get_task_struct(gtt->usertask); 1408 #endif 1409 1410 return 0; 1411 } 1412 1413 /** 1414 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object 1415 */ 1416 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) 1417 { 1418 struct amdgpu_ttm_tt *gtt = (void *)ttm; 1419 1420 if (gtt == NULL) 1421 return NULL; 1422 1423 if (gtt->usertask == NULL) 1424 return NULL; 1425 1426 #ifdef notyet 1427 return gtt->usertask->mm; 1428 #else 1429 STUB(); 1430 return NULL; 1431 #endif 1432 } 1433 1434 /** 1435 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an 1436 * address range for the current task. 1437 * 1438 */ 1439 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, 1440 unsigned long end) 1441 { 1442 struct amdgpu_ttm_tt *gtt = (void *)ttm; 1443 unsigned long size; 1444 1445 if (gtt == NULL || !gtt->userptr) 1446 return false; 1447 1448 /* Return false if no part of the ttm_tt object lies within 1449 * the range 1450 */ 1451 size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; 1452 if (gtt->userptr > end || gtt->userptr + size <= start) 1453 return false; 1454 1455 return true; 1456 } 1457 1458 /** 1459 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr? 1460 */ 1461 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm) 1462 { 1463 struct amdgpu_ttm_tt *gtt = (void *)ttm; 1464 1465 if (gtt == NULL || !gtt->userptr) 1466 return false; 1467 1468 return true; 1469 } 1470 1471 /** 1472 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only? 1473 */ 1474 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) 1475 { 1476 struct amdgpu_ttm_tt *gtt = (void *)ttm; 1477 1478 if (gtt == NULL) 1479 return false; 1480 1481 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 1482 } 1483 1484 /** 1485 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object 1486 * 1487 * @ttm: The ttm_tt object to compute the flags for 1488 * @mem: The memory registry backing this ttm_tt object 1489 * 1490 * Figure out the flags to use for a VM PDE (Page Directory Entry). 1491 */ 1492 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem) 1493 { 1494 uint64_t flags = 0; 1495 1496 if (mem && mem->mem_type != TTM_PL_SYSTEM) 1497 flags |= AMDGPU_PTE_VALID; 1498 1499 if (mem && mem->mem_type == TTM_PL_TT) { 1500 flags |= AMDGPU_PTE_SYSTEM; 1501 1502 if (ttm->caching_state == tt_cached) 1503 flags |= AMDGPU_PTE_SNOOPED; 1504 } 1505 1506 return flags; 1507 } 1508 1509 /** 1510 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object 1511 * 1512 * @ttm: The ttm_tt object to compute the flags for 1513 * @mem: The memory registry backing this ttm_tt object 1514 1515 * Figure out the flags to use for a VM PTE (Page Table Entry). 1516 */ 1517 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, 1518 struct ttm_mem_reg *mem) 1519 { 1520 uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem); 1521 1522 flags |= adev->gart.gart_pte_flags; 1523 flags |= AMDGPU_PTE_READABLE; 1524 1525 if (!amdgpu_ttm_tt_is_readonly(ttm)) 1526 flags |= AMDGPU_PTE_WRITEABLE; 1527 1528 return flags; 1529 } 1530 1531 /** 1532 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer 1533 * object. 1534 * 1535 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on 1536 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until 1537 * it can find space for a new object and by ttm_bo_force_list_clean() which is 1538 * used to clean out a memory space. 1539 */ 1540 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, 1541 const struct ttm_place *place) 1542 { 1543 unsigned long num_pages = bo->mem.num_pages; 1544 struct drm_mm_node *node = bo->mem.mm_node; 1545 struct dma_resv_list *flist; 1546 struct dma_fence *f; 1547 int i; 1548 1549 if (bo->type == ttm_bo_type_kernel && 1550 !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo))) 1551 return false; 1552 1553 /* If bo is a KFD BO, check if the bo belongs to the current process. 1554 * If true, then return false as any KFD process needs all its BOs to 1555 * be resident to run successfully 1556 */ 1557 flist = dma_resv_get_list(bo->base.resv); 1558 if (flist) { 1559 for (i = 0; i < flist->shared_count; ++i) { 1560 f = rcu_dereference_protected(flist->shared[i], 1561 dma_resv_held(bo->base.resv)); 1562 #ifdef notyet 1563 if (amdkfd_fence_check_mm(f, current->mm)) 1564 return false; 1565 #endif 1566 } 1567 } 1568 1569 switch (bo->mem.mem_type) { 1570 case TTM_PL_TT: 1571 return true; 1572 1573 case TTM_PL_VRAM: 1574 /* Check each drm MM node individually */ 1575 while (num_pages) { 1576 if (place->fpfn < (node->start + node->size) && 1577 !(place->lpfn && place->lpfn <= node->start)) 1578 return true; 1579 1580 num_pages -= node->size; 1581 ++node; 1582 } 1583 return false; 1584 1585 default: 1586 break; 1587 } 1588 1589 return ttm_bo_eviction_valuable(bo, place); 1590 } 1591 1592 /** 1593 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object. 1594 * 1595 * @bo: The buffer object to read/write 1596 * @offset: Offset into buffer object 1597 * @buf: Secondary buffer to write/read from 1598 * @len: Length in bytes of access 1599 * @write: true if writing 1600 * 1601 * This is used to access VRAM that backs a buffer object via MMIO 1602 * access for debugging purposes. 1603 */ 1604 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, 1605 unsigned long offset, 1606 void *buf, int len, int write) 1607 { 1608 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 1609 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); 1610 struct drm_mm_node *nodes; 1611 uint32_t value = 0; 1612 int ret = 0; 1613 uint64_t pos; 1614 unsigned long flags; 1615 1616 if (bo->mem.mem_type != TTM_PL_VRAM) 1617 return -EIO; 1618 1619 nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset); 1620 pos = (nodes->start << PAGE_SHIFT) + offset; 1621 1622 while (len && pos < adev->gmc.mc_vram_size) { 1623 uint64_t aligned_pos = pos & ~(uint64_t)3; 1624 uint64_t bytes = 4 - (pos & 3); 1625 uint32_t shift = (pos & 3) * 8; 1626 uint32_t mask = 0xffffffff << shift; 1627 1628 if (len < bytes) { 1629 mask &= 0xffffffff >> (bytes - len) * 8; 1630 bytes = len; 1631 } 1632 1633 if (mask != 0xffffffff) { 1634 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 1635 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); 1636 WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31); 1637 if (!write || mask != 0xffffffff) 1638 value = RREG32_NO_KIQ(mmMM_DATA); 1639 if (write) { 1640 value &= ~mask; 1641 value |= (*(uint32_t *)buf << shift) & mask; 1642 WREG32_NO_KIQ(mmMM_DATA, value); 1643 } 1644 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 1645 if (!write) { 1646 value = (value & mask) >> shift; 1647 memcpy(buf, &value, bytes); 1648 } 1649 } else { 1650 bytes = (nodes->start + nodes->size) << PAGE_SHIFT; 1651 bytes = min(bytes - pos, (uint64_t)len & ~0x3ull); 1652 1653 amdgpu_device_vram_access(adev, pos, (uint32_t *)buf, 1654 bytes, write); 1655 } 1656 1657 ret += bytes; 1658 buf = (uint8_t *)buf + bytes; 1659 pos += bytes; 1660 len -= bytes; 1661 if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) { 1662 ++nodes; 1663 pos = (nodes->start << PAGE_SHIFT); 1664 } 1665 } 1666 1667 return ret; 1668 } 1669 1670 static struct ttm_bo_driver amdgpu_bo_driver = { 1671 .ttm_tt_create = &amdgpu_ttm_tt_create, 1672 .ttm_tt_populate = &amdgpu_ttm_tt_populate, 1673 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, 1674 .init_mem_type = &amdgpu_init_mem_type, 1675 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, 1676 .evict_flags = &amdgpu_evict_flags, 1677 .move = &amdgpu_bo_move, 1678 .verify_access = &amdgpu_verify_access, 1679 .move_notify = &amdgpu_bo_move_notify, 1680 .release_notify = &amdgpu_bo_release_notify, 1681 .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, 1682 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, 1683 .io_mem_free = &amdgpu_ttm_io_mem_free, 1684 .io_mem_pfn = amdgpu_ttm_io_mem_pfn, 1685 .access_memory = &amdgpu_ttm_access_memory, 1686 .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify 1687 }; 1688 1689 /* 1690 * Firmware Reservation functions 1691 */ 1692 /** 1693 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram 1694 * 1695 * @adev: amdgpu_device pointer 1696 * 1697 * free fw reserved vram if it has been reserved. 1698 */ 1699 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) 1700 { 1701 amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo, 1702 NULL, &adev->fw_vram_usage.va); 1703 } 1704 1705 /** 1706 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw 1707 * 1708 * @adev: amdgpu_device pointer 1709 * 1710 * create bo vram reservation from fw. 1711 */ 1712 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) 1713 { 1714 uint64_t vram_size = adev->gmc.visible_vram_size; 1715 1716 adev->fw_vram_usage.va = NULL; 1717 adev->fw_vram_usage.reserved_bo = NULL; 1718 1719 if (adev->fw_vram_usage.size == 0 || 1720 adev->fw_vram_usage.size > vram_size) 1721 return 0; 1722 1723 return amdgpu_bo_create_kernel_at(adev, 1724 adev->fw_vram_usage.start_offset, 1725 adev->fw_vram_usage.size, 1726 AMDGPU_GEM_DOMAIN_VRAM, 1727 &adev->fw_vram_usage.reserved_bo, 1728 &adev->fw_vram_usage.va); 1729 } 1730 1731 /* 1732 * Memoy training reservation functions 1733 */ 1734 1735 /** 1736 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram 1737 * 1738 * @adev: amdgpu_device pointer 1739 * 1740 * free memory training reserved vram if it has been reserved. 1741 */ 1742 static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev) 1743 { 1744 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; 1745 1746 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 1747 amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL); 1748 ctx->c2p_bo = NULL; 1749 1750 return 0; 1751 } 1752 1753 static u64 amdgpu_ttm_training_get_c2p_offset(u64 vram_size) 1754 { 1755 if ((vram_size & (SZ_1M - 1)) < (SZ_4K + 1) ) 1756 vram_size -= SZ_1M; 1757 1758 return roundup2(vram_size, SZ_1M); 1759 } 1760 1761 /** 1762 * amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training 1763 * 1764 * @adev: amdgpu_device pointer 1765 * 1766 * create bo vram reservation from memory training. 1767 */ 1768 static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev) 1769 { 1770 int ret; 1771 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; 1772 1773 memset(ctx, 0, sizeof(*ctx)); 1774 if (!adev->fw_vram_usage.mem_train_support) { 1775 DRM_DEBUG("memory training does not support!\n"); 1776 return 0; 1777 } 1778 1779 ctx->c2p_train_data_offset = amdgpu_ttm_training_get_c2p_offset(adev->gmc.mc_vram_size); 1780 ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET); 1781 ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES; 1782 1783 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 1784 ctx->train_data_size, 1785 ctx->p2c_train_data_offset, 1786 ctx->c2p_train_data_offset); 1787 1788 ret = amdgpu_bo_create_kernel_at(adev, 1789 ctx->c2p_train_data_offset, 1790 ctx->train_data_size, 1791 AMDGPU_GEM_DOMAIN_VRAM, 1792 &ctx->c2p_bo, 1793 NULL); 1794 if (ret) { 1795 DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret); 1796 amdgpu_ttm_training_reserve_vram_fini(adev); 1797 return ret; 1798 } 1799 1800 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS; 1801 return 0; 1802 } 1803 1804 /** 1805 * amdgpu_ttm_init - Init the memory management (ttm) as well as various 1806 * gtt/vram related fields. 1807 * 1808 * This initializes all of the memory space pools that the TTM layer 1809 * will need such as the GTT space (system memory mapped to the device), 1810 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which 1811 * can be mapped per VMID. 1812 */ 1813 int amdgpu_ttm_init(struct amdgpu_device *adev) 1814 { 1815 uint64_t gtt_size; 1816 int r; 1817 u64 vis_vram_limit; 1818 void *stolen_vga_buf; 1819 1820 rw_init(&adev->mman.gtt_window_lock, "gttwin"); 1821 1822 /* No others user of address space so set it to 0 */ 1823 #ifdef notyet 1824 r = ttm_bo_device_init(&adev->mman.bdev, 1825 &amdgpu_bo_driver, 1826 adev->ddev->anon_inode->i_mapping, 1827 adev->ddev->vma_offset_manager, 1828 dma_addressing_limited(adev->dev)); 1829 #else 1830 r = ttm_bo_device_init(&adev->mman.bdev, 1831 &amdgpu_bo_driver, 1832 /*adev->ddev->anon_inode->i_mapping*/ NULL, 1833 adev->ddev->vma_offset_manager, 1834 dma_addressing_limited(adev->dev)); 1835 #endif 1836 if (r) { 1837 DRM_ERROR("failed initializing buffer object driver(%d).\n", r); 1838 return r; 1839 } 1840 adev->mman.bdev.iot = adev->iot; 1841 adev->mman.bdev.memt = adev->memt; 1842 adev->mman.bdev.dmat = adev->dmat; 1843 adev->mman.initialized = true; 1844 1845 /* We opt to avoid OOM on system pages allocations */ 1846 adev->mman.bdev.no_retry = true; 1847 1848 /* Initialize VRAM pool with all of VRAM divided into pages */ 1849 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, 1850 adev->gmc.real_vram_size >> PAGE_SHIFT); 1851 if (r) { 1852 DRM_ERROR("Failed initializing VRAM heap.\n"); 1853 return r; 1854 } 1855 1856 /* Reduce size of CPU-visible VRAM if requested */ 1857 vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024; 1858 if (amdgpu_vis_vram_limit > 0 && 1859 vis_vram_limit <= adev->gmc.visible_vram_size) 1860 adev->gmc.visible_vram_size = vis_vram_limit; 1861 1862 /* Change the size here instead of the init above so only lpfn is affected */ 1863 amdgpu_ttm_set_buffer_funcs_status(adev, false); 1864 #ifdef CONFIG_64BIT 1865 #ifdef __linux__ 1866 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base, 1867 adev->gmc.visible_vram_size); 1868 #else 1869 if (bus_space_map(adev->memt, adev->gmc.aper_base, 1870 adev->gmc.visible_vram_size, 1871 BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE, 1872 &adev->mman.aper_bsh)) { 1873 adev->mman.aper_base_kaddr = NULL; 1874 } else { 1875 adev->mman.aper_base_kaddr = bus_space_vaddr(adev->memt, 1876 adev->mman.aper_bsh); 1877 } 1878 #endif 1879 #endif 1880 1881 /* 1882 *The reserved vram for firmware must be pinned to the specified 1883 *place on the VRAM, so reserve it early. 1884 */ 1885 r = amdgpu_ttm_fw_reserve_vram_init(adev); 1886 if (r) { 1887 return r; 1888 } 1889 1890 /* 1891 *The reserved vram for memory training must be pinned to the specified 1892 *place on the VRAM, so reserve it early. 1893 */ 1894 if (!amdgpu_sriov_vf(adev)) { 1895 r = amdgpu_ttm_training_reserve_vram_init(adev); 1896 if (r) 1897 return r; 1898 } 1899 1900 /* allocate memory as required for VGA 1901 * This is used for VGA emulation and pre-OS scanout buffers to 1902 * avoid display artifacts while transitioning between pre-OS 1903 * and driver. */ 1904 r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE, 1905 AMDGPU_GEM_DOMAIN_VRAM, 1906 &adev->stolen_vga_memory, 1907 NULL, &stolen_vga_buf); 1908 if (r) 1909 return r; 1910 1911 /* 1912 * reserve one TMR (64K) memory at the top of VRAM which holds 1913 * IP Discovery data and is protected by PSP. 1914 */ 1915 r = amdgpu_bo_create_kernel_at(adev, 1916 adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE, 1917 DISCOVERY_TMR_SIZE, 1918 AMDGPU_GEM_DOMAIN_VRAM, 1919 &adev->discovery_memory, 1920 NULL); 1921 if (r) 1922 return r; 1923 1924 DRM_INFO("amdgpu: %uM of VRAM memory ready\n", 1925 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); 1926 1927 /* Compute GTT size, either bsaed on 3/4th the size of RAM size 1928 * or whatever the user passed on module init */ 1929 if (amdgpu_gtt_size == -1) { 1930 #ifdef __linux__ 1931 struct sysinfo si; 1932 1933 si_meminfo(&si); 1934 gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), 1935 adev->gmc.mc_vram_size), 1936 ((uint64_t)si.totalram * si.mem_unit * 3/4)); 1937 #else 1938 gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), 1939 adev->gmc.mc_vram_size), 1940 ((uint64_t)ptoa(physmem) * 3/4)); 1941 #endif 1942 } 1943 else 1944 gtt_size = (uint64_t)amdgpu_gtt_size << 20; 1945 1946 /* Initialize GTT memory pool */ 1947 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT); 1948 if (r) { 1949 DRM_ERROR("Failed initializing GTT heap.\n"); 1950 return r; 1951 } 1952 DRM_INFO("amdgpu: %uM of GTT memory ready.\n", 1953 (unsigned)(gtt_size / (1024 * 1024))); 1954 1955 /* Initialize various on-chip memory pools */ 1956 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS, 1957 adev->gds.gds_size); 1958 if (r) { 1959 DRM_ERROR("Failed initializing GDS heap.\n"); 1960 return r; 1961 } 1962 1963 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS, 1964 adev->gds.gws_size); 1965 if (r) { 1966 DRM_ERROR("Failed initializing gws heap.\n"); 1967 return r; 1968 } 1969 1970 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA, 1971 adev->gds.oa_size); 1972 if (r) { 1973 DRM_ERROR("Failed initializing oa heap.\n"); 1974 return r; 1975 } 1976 1977 return 0; 1978 } 1979 1980 /** 1981 * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm 1982 */ 1983 void amdgpu_ttm_late_init(struct amdgpu_device *adev) 1984 { 1985 void *stolen_vga_buf; 1986 /* return the VGA stolen memory (if any) back to VRAM */ 1987 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf); 1988 } 1989 1990 /** 1991 * amdgpu_ttm_fini - De-initialize the TTM memory pools 1992 */ 1993 void amdgpu_ttm_fini(struct amdgpu_device *adev) 1994 { 1995 if (!adev->mman.initialized) 1996 return; 1997 1998 amdgpu_ttm_training_reserve_vram_fini(adev); 1999 /* return the IP Discovery TMR memory back to VRAM */ 2000 amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL); 2001 amdgpu_ttm_fw_reserve_vram_fini(adev); 2002 2003 #ifdef __linux__ 2004 if (adev->mman.aper_base_kaddr) 2005 iounmap(adev->mman.aper_base_kaddr); 2006 #else 2007 if (adev->mman.aper_base_kaddr) 2008 bus_space_unmap(adev->memt, adev->mman.aper_bsh, 2009 adev->gmc.visible_vram_size); 2010 #endif 2011 adev->mman.aper_base_kaddr = NULL; 2012 2013 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); 2014 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); 2015 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS); 2016 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS); 2017 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); 2018 ttm_bo_device_release(&adev->mman.bdev); 2019 adev->mman.initialized = false; 2020 DRM_INFO("amdgpu: ttm finalized\n"); 2021 } 2022 2023 /** 2024 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions 2025 * 2026 * @adev: amdgpu_device pointer 2027 * @enable: true when we can use buffer functions. 2028 * 2029 * Enable/disable use of buffer functions during suspend/resume. This should 2030 * only be called at bootup or when userspace isn't running. 2031 */ 2032 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) 2033 { 2034 struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM]; 2035 uint64_t size; 2036 int r; 2037 2038 if (!adev->mman.initialized || adev->in_gpu_reset || 2039 adev->mman.buffer_funcs_enabled == enable) 2040 return; 2041 2042 if (enable) { 2043 struct amdgpu_ring *ring; 2044 struct drm_gpu_scheduler *sched; 2045 2046 ring = adev->mman.buffer_funcs_ring; 2047 sched = &ring->sched; 2048 r = drm_sched_entity_init(&adev->mman.entity, 2049 DRM_SCHED_PRIORITY_KERNEL, &sched, 2050 1, NULL); 2051 if (r) { 2052 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", 2053 r); 2054 return; 2055 } 2056 } else { 2057 drm_sched_entity_destroy(&adev->mman.entity); 2058 dma_fence_put(man->move); 2059 man->move = NULL; 2060 } 2061 2062 /* this just adjusts TTM size idea, which sets lpfn to the correct value */ 2063 if (enable) 2064 size = adev->gmc.real_vram_size; 2065 else 2066 size = adev->gmc.visible_vram_size; 2067 man->size = size >> PAGE_SHIFT; 2068 adev->mman.buffer_funcs_enabled = enable; 2069 } 2070 2071 #ifdef __linux__ 2072 2073 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) 2074 { 2075 struct drm_file *file_priv = filp->private_data; 2076 struct amdgpu_device *adev = file_priv->minor->dev->dev_private; 2077 2078 if (adev == NULL) 2079 return -EINVAL; 2080 2081 return ttm_bo_mmap(filp, vma, &adev->mman.bdev); 2082 } 2083 2084 #else 2085 2086 struct uvm_object * 2087 amdgpu_mmap(struct file *filp, vm_prot_t accessprot, voff_t off, vsize_t size) 2088 { 2089 struct drm_file *file_priv = (void *)filp; 2090 struct amdgpu_device *adev = file_priv->minor->dev->dev_private; 2091 2092 if (adev == NULL) 2093 return NULL; 2094 2095 if (unlikely(off < DRM_FILE_PAGE_OFFSET)) 2096 return NULL; 2097 2098 return ttm_bo_mmap(filp, off, size, &adev->mman.bdev); 2099 } 2100 2101 #endif 2102 2103 static int amdgpu_map_buffer(struct ttm_buffer_object *bo, 2104 struct ttm_mem_reg *mem, unsigned num_pages, 2105 uint64_t offset, unsigned window, 2106 struct amdgpu_ring *ring, 2107 uint64_t *addr) 2108 { 2109 struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; 2110 struct amdgpu_device *adev = ring->adev; 2111 struct ttm_tt *ttm = bo->ttm; 2112 struct amdgpu_job *job; 2113 unsigned num_dw, num_bytes; 2114 dma_addr_t *dma_address; 2115 struct dma_fence *fence; 2116 uint64_t src_addr, dst_addr; 2117 uint64_t flags; 2118 int r; 2119 2120 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < 2121 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); 2122 2123 *addr = adev->gmc.gart_start; 2124 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 2125 AMDGPU_GPU_PAGE_SIZE; 2126 2127 num_dw = roundup2(adev->mman.buffer_funcs->copy_num_dw, 8); 2128 num_bytes = num_pages * 8; 2129 2130 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job); 2131 if (r) 2132 return r; 2133 2134 src_addr = num_dw * 4; 2135 src_addr += job->ibs[0].gpu_addr; 2136 2137 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); 2138 dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8; 2139 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, 2140 dst_addr, num_bytes); 2141 2142 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 2143 WARN_ON(job->ibs[0].length_dw > num_dw); 2144 2145 dma_address = >t->ttm.dma_address[offset >> PAGE_SHIFT]; 2146 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem); 2147 r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, 2148 &job->ibs[0].ptr[num_dw]); 2149 if (r) 2150 goto error_free; 2151 2152 r = amdgpu_job_submit(job, &adev->mman.entity, 2153 AMDGPU_FENCE_OWNER_UNDEFINED, &fence); 2154 if (r) 2155 goto error_free; 2156 2157 dma_fence_put(fence); 2158 2159 return r; 2160 2161 error_free: 2162 amdgpu_job_free(job); 2163 return r; 2164 } 2165 2166 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, 2167 uint64_t dst_offset, uint32_t byte_count, 2168 struct dma_resv *resv, 2169 struct dma_fence **fence, bool direct_submit, 2170 bool vm_needs_flush) 2171 { 2172 struct amdgpu_device *adev = ring->adev; 2173 struct amdgpu_job *job; 2174 2175 uint32_t max_bytes; 2176 unsigned num_loops, num_dw; 2177 unsigned i; 2178 int r; 2179 2180 if (direct_submit && !ring->sched.ready) { 2181 DRM_ERROR("Trying to move memory with ring turned off.\n"); 2182 return -EINVAL; 2183 } 2184 2185 max_bytes = adev->mman.buffer_funcs->copy_max_bytes; 2186 num_loops = DIV_ROUND_UP(byte_count, max_bytes); 2187 num_dw = roundup2(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8); 2188 2189 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); 2190 if (r) 2191 return r; 2192 2193 if (vm_needs_flush) { 2194 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo); 2195 job->vm_needs_flush = true; 2196 } 2197 if (resv) { 2198 r = amdgpu_sync_resv(adev, &job->sync, resv, 2199 AMDGPU_SYNC_ALWAYS, 2200 AMDGPU_FENCE_OWNER_UNDEFINED); 2201 if (r) { 2202 DRM_ERROR("sync failed (%d).\n", r); 2203 goto error_free; 2204 } 2205 } 2206 2207 for (i = 0; i < num_loops; i++) { 2208 uint32_t cur_size_in_bytes = min(byte_count, max_bytes); 2209 2210 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset, 2211 dst_offset, cur_size_in_bytes); 2212 2213 src_offset += cur_size_in_bytes; 2214 dst_offset += cur_size_in_bytes; 2215 byte_count -= cur_size_in_bytes; 2216 } 2217 2218 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 2219 WARN_ON(job->ibs[0].length_dw > num_dw); 2220 if (direct_submit) 2221 r = amdgpu_job_submit_direct(job, ring, fence); 2222 else 2223 r = amdgpu_job_submit(job, &adev->mman.entity, 2224 AMDGPU_FENCE_OWNER_UNDEFINED, fence); 2225 if (r) 2226 goto error_free; 2227 2228 return r; 2229 2230 error_free: 2231 amdgpu_job_free(job); 2232 DRM_ERROR("Error scheduling IBs (%d)\n", r); 2233 return r; 2234 } 2235 2236 int amdgpu_fill_buffer(struct amdgpu_bo *bo, 2237 uint32_t src_data, 2238 struct dma_resv *resv, 2239 struct dma_fence **fence) 2240 { 2241 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 2242 uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes; 2243 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 2244 2245 struct drm_mm_node *mm_node; 2246 unsigned long num_pages; 2247 unsigned int num_loops, num_dw; 2248 2249 struct amdgpu_job *job; 2250 int r; 2251 2252 if (!adev->mman.buffer_funcs_enabled) { 2253 DRM_ERROR("Trying to clear memory with ring turned off.\n"); 2254 return -EINVAL; 2255 } 2256 2257 if (bo->tbo.mem.mem_type == TTM_PL_TT) { 2258 r = amdgpu_ttm_alloc_gart(&bo->tbo); 2259 if (r) 2260 return r; 2261 } 2262 2263 num_pages = bo->tbo.num_pages; 2264 mm_node = bo->tbo.mem.mm_node; 2265 num_loops = 0; 2266 while (num_pages) { 2267 uint64_t byte_count = mm_node->size << PAGE_SHIFT; 2268 2269 num_loops += DIV_ROUND_UP_ULL(byte_count, max_bytes); 2270 num_pages -= mm_node->size; 2271 ++mm_node; 2272 } 2273 num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw; 2274 2275 /* for IB padding */ 2276 num_dw += 64; 2277 2278 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); 2279 if (r) 2280 return r; 2281 2282 if (resv) { 2283 r = amdgpu_sync_resv(adev, &job->sync, resv, 2284 AMDGPU_SYNC_ALWAYS, 2285 AMDGPU_FENCE_OWNER_UNDEFINED); 2286 if (r) { 2287 DRM_ERROR("sync failed (%d).\n", r); 2288 goto error_free; 2289 } 2290 } 2291 2292 num_pages = bo->tbo.num_pages; 2293 mm_node = bo->tbo.mem.mm_node; 2294 2295 while (num_pages) { 2296 uint64_t byte_count = mm_node->size << PAGE_SHIFT; 2297 uint64_t dst_addr; 2298 2299 dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); 2300 while (byte_count) { 2301 uint32_t cur_size_in_bytes = min_t(uint64_t, byte_count, 2302 max_bytes); 2303 2304 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, 2305 dst_addr, cur_size_in_bytes); 2306 2307 dst_addr += cur_size_in_bytes; 2308 byte_count -= cur_size_in_bytes; 2309 } 2310 2311 num_pages -= mm_node->size; 2312 ++mm_node; 2313 } 2314 2315 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 2316 WARN_ON(job->ibs[0].length_dw > num_dw); 2317 r = amdgpu_job_submit(job, &adev->mman.entity, 2318 AMDGPU_FENCE_OWNER_UNDEFINED, fence); 2319 if (r) 2320 goto error_free; 2321 2322 return 0; 2323 2324 error_free: 2325 amdgpu_job_free(job); 2326 return r; 2327 } 2328 2329 #if defined(CONFIG_DEBUG_FS) 2330 2331 static int amdgpu_mm_dump_table(struct seq_file *m, void *data) 2332 { 2333 struct drm_info_node *node = (struct drm_info_node *)m->private; 2334 unsigned ttm_pl = (uintptr_t)node->info_ent->data; 2335 struct drm_device *dev = node->minor->dev; 2336 struct amdgpu_device *adev = dev->dev_private; 2337 struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl]; 2338 struct drm_printer p = drm_seq_file_printer(m); 2339 2340 man->func->debug(man, &p); 2341 return 0; 2342 } 2343 2344 static const struct drm_info_list amdgpu_ttm_debugfs_list[] = { 2345 {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM}, 2346 {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT}, 2347 {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS}, 2348 {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS}, 2349 {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA}, 2350 {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, 2351 #ifdef CONFIG_SWIOTLB 2352 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} 2353 #endif 2354 }; 2355 2356 /** 2357 * amdgpu_ttm_vram_read - Linear read access to VRAM 2358 * 2359 * Accesses VRAM via MMIO for debugging purposes. 2360 */ 2361 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, 2362 size_t size, loff_t *pos) 2363 { 2364 struct amdgpu_device *adev = file_inode(f)->i_private; 2365 ssize_t result = 0; 2366 2367 if (size & 0x3 || *pos & 0x3) 2368 return -EINVAL; 2369 2370 if (*pos >= adev->gmc.mc_vram_size) 2371 return -ENXIO; 2372 2373 size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos)); 2374 while (size) { 2375 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4); 2376 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ]; 2377 2378 amdgpu_device_vram_access(adev, *pos, value, bytes, false); 2379 if (copy_to_user(buf, value, bytes)) 2380 return -EFAULT; 2381 2382 result += bytes; 2383 buf += bytes; 2384 *pos += bytes; 2385 size -= bytes; 2386 } 2387 2388 return result; 2389 } 2390 2391 /** 2392 * amdgpu_ttm_vram_write - Linear write access to VRAM 2393 * 2394 * Accesses VRAM via MMIO for debugging purposes. 2395 */ 2396 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, 2397 size_t size, loff_t *pos) 2398 { 2399 struct amdgpu_device *adev = file_inode(f)->i_private; 2400 ssize_t result = 0; 2401 int r; 2402 2403 if (size & 0x3 || *pos & 0x3) 2404 return -EINVAL; 2405 2406 if (*pos >= adev->gmc.mc_vram_size) 2407 return -ENXIO; 2408 2409 while (size) { 2410 unsigned long flags; 2411 uint32_t value; 2412 2413 if (*pos >= adev->gmc.mc_vram_size) 2414 return result; 2415 2416 r = get_user(value, (uint32_t *)buf); 2417 if (r) 2418 return r; 2419 2420 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 2421 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); 2422 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); 2423 WREG32_NO_KIQ(mmMM_DATA, value); 2424 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 2425 2426 result += 4; 2427 buf += 4; 2428 *pos += 4; 2429 size -= 4; 2430 } 2431 2432 return result; 2433 } 2434 2435 static const struct file_operations amdgpu_ttm_vram_fops = { 2436 .owner = THIS_MODULE, 2437 .read = amdgpu_ttm_vram_read, 2438 .write = amdgpu_ttm_vram_write, 2439 .llseek = default_llseek, 2440 }; 2441 2442 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS 2443 2444 /** 2445 * amdgpu_ttm_gtt_read - Linear read access to GTT memory 2446 */ 2447 static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf, 2448 size_t size, loff_t *pos) 2449 { 2450 struct amdgpu_device *adev = file_inode(f)->i_private; 2451 ssize_t result = 0; 2452 int r; 2453 2454 while (size) { 2455 loff_t p = *pos / PAGE_SIZE; 2456 unsigned off = *pos & PAGE_MASK; 2457 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); 2458 struct vm_page *page; 2459 void *ptr; 2460 2461 if (p >= adev->gart.num_cpu_pages) 2462 return result; 2463 2464 page = adev->gart.pages[p]; 2465 if (page) { 2466 ptr = kmap(page); 2467 ptr += off; 2468 2469 r = copy_to_user(buf, ptr, cur_size); 2470 kunmap(adev->gart.pages[p]); 2471 } else 2472 r = clear_user(buf, cur_size); 2473 2474 if (r) 2475 return -EFAULT; 2476 2477 result += cur_size; 2478 buf += cur_size; 2479 *pos += cur_size; 2480 size -= cur_size; 2481 } 2482 2483 return result; 2484 } 2485 2486 static const struct file_operations amdgpu_ttm_gtt_fops = { 2487 .owner = THIS_MODULE, 2488 .read = amdgpu_ttm_gtt_read, 2489 .llseek = default_llseek 2490 }; 2491 2492 #endif 2493 2494 /** 2495 * amdgpu_iomem_read - Virtual read access to GPU mapped memory 2496 * 2497 * This function is used to read memory that has been mapped to the 2498 * GPU and the known addresses are not physical addresses but instead 2499 * bus addresses (e.g., what you'd put in an IB or ring buffer). 2500 */ 2501 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, 2502 size_t size, loff_t *pos) 2503 { 2504 struct amdgpu_device *adev = file_inode(f)->i_private; 2505 struct iommu_domain *dom; 2506 ssize_t result = 0; 2507 int r; 2508 2509 /* retrieve the IOMMU domain if any for this device */ 2510 dom = iommu_get_domain_for_dev(adev->dev); 2511 2512 while (size) { 2513 phys_addr_t addr = *pos & ~PAGE_MASK; 2514 loff_t off = *pos & PAGE_MASK; 2515 size_t bytes = PAGE_SIZE - off; 2516 unsigned long pfn; 2517 struct vm_page *p; 2518 void *ptr; 2519 2520 bytes = bytes < size ? bytes : size; 2521 2522 /* Translate the bus address to a physical address. If 2523 * the domain is NULL it means there is no IOMMU active 2524 * and the address translation is the identity 2525 */ 2526 addr = dom ? iommu_iova_to_phys(dom, addr) : addr; 2527 2528 pfn = addr >> PAGE_SHIFT; 2529 if (!pfn_valid(pfn)) 2530 return -EPERM; 2531 2532 p = pfn_to_page(pfn); 2533 if (p->mapping != adev->mman.bdev.dev_mapping) 2534 return -EPERM; 2535 2536 ptr = kmap(p); 2537 r = copy_to_user(buf, ptr + off, bytes); 2538 kunmap(p); 2539 if (r) 2540 return -EFAULT; 2541 2542 size -= bytes; 2543 *pos += bytes; 2544 result += bytes; 2545 } 2546 2547 return result; 2548 } 2549 2550 /** 2551 * amdgpu_iomem_write - Virtual write access to GPU mapped memory 2552 * 2553 * This function is used to write memory that has been mapped to the 2554 * GPU and the known addresses are not physical addresses but instead 2555 * bus addresses (e.g., what you'd put in an IB or ring buffer). 2556 */ 2557 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf, 2558 size_t size, loff_t *pos) 2559 { 2560 struct amdgpu_device *adev = file_inode(f)->i_private; 2561 struct iommu_domain *dom; 2562 ssize_t result = 0; 2563 int r; 2564 2565 dom = iommu_get_domain_for_dev(adev->dev); 2566 2567 while (size) { 2568 phys_addr_t addr = *pos & ~PAGE_MASK; 2569 loff_t off = *pos & PAGE_MASK; 2570 size_t bytes = PAGE_SIZE - off; 2571 unsigned long pfn; 2572 struct vm_page *p; 2573 void *ptr; 2574 2575 bytes = bytes < size ? bytes : size; 2576 2577 addr = dom ? iommu_iova_to_phys(dom, addr) : addr; 2578 2579 pfn = addr >> PAGE_SHIFT; 2580 if (!pfn_valid(pfn)) 2581 return -EPERM; 2582 2583 p = pfn_to_page(pfn); 2584 if (p->mapping != adev->mman.bdev.dev_mapping) 2585 return -EPERM; 2586 2587 ptr = kmap(p); 2588 r = copy_from_user(ptr + off, buf, bytes); 2589 kunmap(p); 2590 if (r) 2591 return -EFAULT; 2592 2593 size -= bytes; 2594 *pos += bytes; 2595 result += bytes; 2596 } 2597 2598 return result; 2599 } 2600 2601 static const struct file_operations amdgpu_ttm_iomem_fops = { 2602 .owner = THIS_MODULE, 2603 .read = amdgpu_iomem_read, 2604 .write = amdgpu_iomem_write, 2605 .llseek = default_llseek 2606 }; 2607 2608 static const struct { 2609 char *name; 2610 const struct file_operations *fops; 2611 int domain; 2612 } ttm_debugfs_entries[] = { 2613 { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM }, 2614 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS 2615 { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT }, 2616 #endif 2617 { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM }, 2618 }; 2619 2620 #endif 2621 2622 int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) 2623 { 2624 #if defined(CONFIG_DEBUG_FS) 2625 unsigned count; 2626 2627 struct drm_minor *minor = adev->ddev->primary; 2628 struct dentry *ent, *root = minor->debugfs_root; 2629 2630 for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) { 2631 ent = debugfs_create_file( 2632 ttm_debugfs_entries[count].name, 2633 S_IFREG | S_IRUGO, root, 2634 adev, 2635 ttm_debugfs_entries[count].fops); 2636 if (IS_ERR(ent)) 2637 return PTR_ERR(ent); 2638 if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM) 2639 i_size_write(ent->d_inode, adev->gmc.mc_vram_size); 2640 else if (ttm_debugfs_entries[count].domain == TTM_PL_TT) 2641 i_size_write(ent->d_inode, adev->gmc.gart_size); 2642 adev->mman.debugfs_entries[count] = ent; 2643 } 2644 2645 count = ARRAY_SIZE(amdgpu_ttm_debugfs_list); 2646 2647 #ifdef CONFIG_SWIOTLB 2648 if (!(adev->need_swiotlb && swiotlb_nr_tbl())) 2649 --count; 2650 #endif 2651 2652 return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count); 2653 #else 2654 return 0; 2655 #endif 2656 } 2657