1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 30 * Dave Airlie 31 */ 32 33 #include <linux/dma-mapping.h> 34 #include <linux/iommu.h> 35 #include <linux/pagemap.h> 36 #include <linux/sched/task.h> 37 #include <linux/sched/mm.h> 38 #include <linux/seq_file.h> 39 #include <linux/slab.h> 40 #include <linux/swap.h> 41 #include <linux/dma-buf.h> 42 #include <linux/sizes.h> 43 #include <linux/module.h> 44 45 #include <drm/drm_drv.h> 46 #include <drm/ttm/ttm_bo.h> 47 #include <drm/ttm/ttm_placement.h> 48 #include <drm/ttm/ttm_range_manager.h> 49 #include <drm/ttm/ttm_tt.h> 50 51 #include <drm/amdgpu_drm.h> 52 53 #include "amdgpu.h" 54 #include "amdgpu_object.h" 55 #include "amdgpu_trace.h" 56 #include "amdgpu_amdkfd.h" 57 #include "amdgpu_sdma.h" 58 #include "amdgpu_ras.h" 59 #include "amdgpu_hmm.h" 60 #include "amdgpu_atomfirmware.h" 61 #include "amdgpu_res_cursor.h" 62 #include "bif/bif_4_1_d.h" 63 64 MODULE_IMPORT_NS(DMA_BUF); 65 66 #define AMDGPU_TTM_VRAM_MAX_DW_READ ((size_t)128) 67 68 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, 69 struct ttm_tt *ttm, 70 struct ttm_resource *bo_mem); 71 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, 72 struct ttm_tt *ttm); 73 74 static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev, 75 unsigned int type, 76 uint64_t size_in_page) 77 { 78 return ttm_range_man_init(&adev->mman.bdev, type, 79 false, size_in_page); 80 } 81 82 /** 83 * amdgpu_evict_flags - Compute placement flags 84 * 85 * @bo: The buffer object to evict 86 * @placement: Possible destination(s) for evicted BO 87 * 88 * Fill in placement data when ttm_bo_evict() is called 89 */ 90 static void amdgpu_evict_flags(struct ttm_buffer_object *bo, 91 struct ttm_placement *placement) 92 { 93 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 94 struct amdgpu_bo *abo; 95 static const struct ttm_place placements = { 96 .fpfn = 0, 97 .lpfn = 0, 98 .mem_type = TTM_PL_SYSTEM, 99 .flags = 0 100 }; 101 102 /* Don't handle scatter gather BOs */ 103 if (bo->type == ttm_bo_type_sg) { 104 placement->num_placement = 0; 105 return; 106 } 107 108 /* Object isn't an AMDGPU object so ignore */ 109 if (!amdgpu_bo_is_amdgpu_bo(bo)) { 110 placement->placement = &placements; 111 placement->num_placement = 1; 112 return; 113 } 114 115 abo = ttm_to_amdgpu_bo(bo); 116 if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) { 117 placement->num_placement = 0; 118 return; 119 } 120 121 switch (bo->resource->mem_type) { 122 case AMDGPU_PL_GDS: 123 case AMDGPU_PL_GWS: 124 case AMDGPU_PL_OA: 125 case AMDGPU_PL_DOORBELL: 126 placement->num_placement = 0; 127 return; 128 129 case TTM_PL_VRAM: 130 if (!adev->mman.buffer_funcs_enabled) { 131 /* Move to system memory */ 132 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); 133 134 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 135 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && 136 amdgpu_bo_in_cpu_visible_vram(abo)) { 137 138 /* Try evicting to the CPU inaccessible part of VRAM 139 * first, but only set GTT as busy placement, so this 140 * BO will be evicted to GTT rather than causing other 141 * BOs to be evicted from VRAM 142 */ 143 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | 144 AMDGPU_GEM_DOMAIN_GTT | 145 AMDGPU_GEM_DOMAIN_CPU); 146 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; 147 abo->placements[0].lpfn = 0; 148 abo->placements[0].flags |= TTM_PL_FLAG_DESIRED; 149 } else { 150 /* Move to GTT memory */ 151 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT | 152 AMDGPU_GEM_DOMAIN_CPU); 153 } 154 break; 155 case TTM_PL_TT: 156 case AMDGPU_PL_PREEMPT: 157 default: 158 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); 159 break; 160 } 161 *placement = abo->placement; 162 } 163 164 /** 165 * amdgpu_ttm_map_buffer - Map memory into the GART windows 166 * @bo: buffer object to map 167 * @mem: memory object to map 168 * @mm_cur: range to map 169 * @window: which GART window to use 170 * @ring: DMA ring to use for the copy 171 * @tmz: if we should setup a TMZ enabled mapping 172 * @size: in number of bytes to map, out number of bytes mapped 173 * @addr: resulting address inside the MC address space 174 * 175 * Setup one of the GART windows to access a specific piece of memory or return 176 * the physical address for local memory. 177 */ 178 static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, 179 struct ttm_resource *mem, 180 struct amdgpu_res_cursor *mm_cur, 181 unsigned int window, struct amdgpu_ring *ring, 182 bool tmz, uint64_t *size, uint64_t *addr) 183 { 184 struct amdgpu_device *adev = ring->adev; 185 unsigned int offset, num_pages, num_dw, num_bytes; 186 uint64_t src_addr, dst_addr; 187 struct amdgpu_job *job; 188 void *cpu_addr; 189 uint64_t flags; 190 unsigned int i; 191 int r; 192 193 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < 194 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); 195 196 if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT)) 197 return -EINVAL; 198 199 /* Map only what can't be accessed directly */ 200 if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) { 201 *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) + 202 mm_cur->start; 203 return 0; 204 } 205 206 207 /* 208 * If start begins at an offset inside the page, then adjust the size 209 * and addr accordingly 210 */ 211 offset = mm_cur->start & ~PAGE_MASK; 212 213 num_pages = PFN_UP(*size + offset); 214 num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE); 215 216 *size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset); 217 218 *addr = adev->gmc.gart_start; 219 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 220 AMDGPU_GPU_PAGE_SIZE; 221 *addr += offset; 222 223 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); 224 num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE; 225 226 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr, 227 AMDGPU_FENCE_OWNER_UNDEFINED, 228 num_dw * 4 + num_bytes, 229 AMDGPU_IB_POOL_DELAYED, &job); 230 if (r) 231 return r; 232 233 src_addr = num_dw * 4; 234 src_addr += job->ibs[0].gpu_addr; 235 236 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); 237 dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8; 238 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, 239 dst_addr, num_bytes, false); 240 241 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 242 WARN_ON(job->ibs[0].length_dw > num_dw); 243 244 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem); 245 if (tmz) 246 flags |= AMDGPU_PTE_TMZ; 247 248 cpu_addr = &job->ibs[0].ptr[num_dw]; 249 250 if (mem->mem_type == TTM_PL_TT) { 251 dma_addr_t *dma_addr; 252 253 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT]; 254 amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr); 255 } else { 256 dma_addr_t dma_address; 257 258 dma_address = mm_cur->start; 259 dma_address += adev->vm_manager.vram_base_offset; 260 261 for (i = 0; i < num_pages; ++i) { 262 amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address, 263 flags, cpu_addr); 264 dma_address += PAGE_SIZE; 265 } 266 } 267 268 dma_fence_put(amdgpu_job_submit(job)); 269 return 0; 270 } 271 272 /** 273 * amdgpu_ttm_copy_mem_to_mem - Helper function for copy 274 * @adev: amdgpu device 275 * @src: buffer/address where to read from 276 * @dst: buffer/address where to write to 277 * @size: number of bytes to copy 278 * @tmz: if a secure copy should be used 279 * @resv: resv object to sync to 280 * @f: Returns the last fence if multiple jobs are submitted. 281 * 282 * The function copies @size bytes from {src->mem + src->offset} to 283 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a 284 * move and different for a BO to BO copy. 285 * 286 */ 287 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, 288 const struct amdgpu_copy_mem *src, 289 const struct amdgpu_copy_mem *dst, 290 uint64_t size, bool tmz, 291 struct dma_resv *resv, 292 struct dma_fence **f) 293 { 294 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 295 struct amdgpu_res_cursor src_mm, dst_mm; 296 struct dma_fence *fence = NULL; 297 int r = 0; 298 299 if (!adev->mman.buffer_funcs_enabled) { 300 DRM_ERROR("Trying to move memory with ring turned off.\n"); 301 return -EINVAL; 302 } 303 304 amdgpu_res_first(src->mem, src->offset, size, &src_mm); 305 amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm); 306 307 mutex_lock(&adev->mman.gtt_window_lock); 308 while (src_mm.remaining) { 309 uint64_t from, to, cur_size; 310 struct dma_fence *next; 311 312 /* Never copy more than 256MiB at once to avoid a timeout */ 313 cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20); 314 315 /* Map src to window 0 and dst to window 1. */ 316 r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm, 317 0, ring, tmz, &cur_size, &from); 318 if (r) 319 goto error; 320 321 r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm, 322 1, ring, tmz, &cur_size, &to); 323 if (r) 324 goto error; 325 326 r = amdgpu_copy_buffer(ring, from, to, cur_size, 327 resv, &next, false, true, tmz); 328 if (r) 329 goto error; 330 331 dma_fence_put(fence); 332 fence = next; 333 334 amdgpu_res_next(&src_mm, cur_size); 335 amdgpu_res_next(&dst_mm, cur_size); 336 } 337 error: 338 mutex_unlock(&adev->mman.gtt_window_lock); 339 if (f) 340 *f = dma_fence_get(fence); 341 dma_fence_put(fence); 342 return r; 343 } 344 345 /* 346 * amdgpu_move_blit - Copy an entire buffer to another buffer 347 * 348 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to 349 * help move buffers to and from VRAM. 350 */ 351 static int amdgpu_move_blit(struct ttm_buffer_object *bo, 352 bool evict, 353 struct ttm_resource *new_mem, 354 struct ttm_resource *old_mem) 355 { 356 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 357 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 358 struct amdgpu_copy_mem src, dst; 359 struct dma_fence *fence = NULL; 360 int r; 361 362 src.bo = bo; 363 dst.bo = bo; 364 src.mem = old_mem; 365 dst.mem = new_mem; 366 src.offset = 0; 367 dst.offset = 0; 368 369 r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst, 370 new_mem->size, 371 amdgpu_bo_encrypted(abo), 372 bo->base.resv, &fence); 373 if (r) 374 goto error; 375 376 /* clear the space being freed */ 377 if (old_mem->mem_type == TTM_PL_VRAM && 378 (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) { 379 struct dma_fence *wipe_fence = NULL; 380 381 r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence, 382 false); 383 if (r) { 384 goto error; 385 } else if (wipe_fence) { 386 dma_fence_put(fence); 387 fence = wipe_fence; 388 } 389 } 390 391 /* Always block for VM page tables before committing the new location */ 392 if (bo->type == ttm_bo_type_kernel) 393 r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem); 394 else 395 r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem); 396 dma_fence_put(fence); 397 return r; 398 399 error: 400 if (fence) 401 dma_fence_wait(fence, false); 402 dma_fence_put(fence); 403 return r; 404 } 405 406 /* 407 * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy 408 * 409 * Called by amdgpu_bo_move() 410 */ 411 static bool amdgpu_mem_visible(struct amdgpu_device *adev, 412 struct ttm_resource *mem) 413 { 414 u64 mem_size = (u64)mem->size; 415 struct amdgpu_res_cursor cursor; 416 u64 end; 417 418 if (mem->mem_type == TTM_PL_SYSTEM || 419 mem->mem_type == TTM_PL_TT) 420 return true; 421 if (mem->mem_type != TTM_PL_VRAM) 422 return false; 423 424 amdgpu_res_first(mem, 0, mem_size, &cursor); 425 end = cursor.start + cursor.size; 426 while (cursor.remaining) { 427 amdgpu_res_next(&cursor, cursor.size); 428 429 if (!cursor.remaining) 430 break; 431 432 /* ttm_resource_ioremap only supports contiguous memory */ 433 if (end != cursor.start) 434 return false; 435 436 end = cursor.start + cursor.size; 437 } 438 439 return end <= adev->gmc.visible_vram_size; 440 } 441 442 /* 443 * amdgpu_bo_move - Move a buffer object to a new memory location 444 * 445 * Called by ttm_bo_handle_move_mem() 446 */ 447 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, 448 struct ttm_operation_ctx *ctx, 449 struct ttm_resource *new_mem, 450 struct ttm_place *hop) 451 { 452 struct amdgpu_device *adev; 453 struct amdgpu_bo *abo; 454 struct ttm_resource *old_mem = bo->resource; 455 int r; 456 457 if (new_mem->mem_type == TTM_PL_TT || 458 new_mem->mem_type == AMDGPU_PL_PREEMPT) { 459 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); 460 if (r) 461 return r; 462 } 463 464 abo = ttm_to_amdgpu_bo(bo); 465 adev = amdgpu_ttm_adev(bo->bdev); 466 467 if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM && 468 bo->ttm == NULL)) { 469 ttm_bo_move_null(bo, new_mem); 470 goto out; 471 } 472 if (old_mem->mem_type == TTM_PL_SYSTEM && 473 (new_mem->mem_type == TTM_PL_TT || 474 new_mem->mem_type == AMDGPU_PL_PREEMPT)) { 475 ttm_bo_move_null(bo, new_mem); 476 goto out; 477 } 478 if ((old_mem->mem_type == TTM_PL_TT || 479 old_mem->mem_type == AMDGPU_PL_PREEMPT) && 480 new_mem->mem_type == TTM_PL_SYSTEM) { 481 r = ttm_bo_wait_ctx(bo, ctx); 482 if (r) 483 return r; 484 485 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); 486 ttm_resource_free(bo, &bo->resource); 487 ttm_bo_assign_mem(bo, new_mem); 488 goto out; 489 } 490 491 if (old_mem->mem_type == AMDGPU_PL_GDS || 492 old_mem->mem_type == AMDGPU_PL_GWS || 493 old_mem->mem_type == AMDGPU_PL_OA || 494 old_mem->mem_type == AMDGPU_PL_DOORBELL || 495 new_mem->mem_type == AMDGPU_PL_GDS || 496 new_mem->mem_type == AMDGPU_PL_GWS || 497 new_mem->mem_type == AMDGPU_PL_OA || 498 new_mem->mem_type == AMDGPU_PL_DOORBELL) { 499 /* Nothing to save here */ 500 ttm_bo_move_null(bo, new_mem); 501 goto out; 502 } 503 504 if (bo->type == ttm_bo_type_device && 505 new_mem->mem_type == TTM_PL_VRAM && 506 old_mem->mem_type != TTM_PL_VRAM) { 507 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU 508 * accesses the BO after it's moved. 509 */ 510 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 511 } 512 513 if (adev->mman.buffer_funcs_enabled) { 514 if (((old_mem->mem_type == TTM_PL_SYSTEM && 515 new_mem->mem_type == TTM_PL_VRAM) || 516 (old_mem->mem_type == TTM_PL_VRAM && 517 new_mem->mem_type == TTM_PL_SYSTEM))) { 518 hop->fpfn = 0; 519 hop->lpfn = 0; 520 hop->mem_type = TTM_PL_TT; 521 hop->flags = TTM_PL_FLAG_TEMPORARY; 522 return -EMULTIHOP; 523 } 524 525 r = amdgpu_move_blit(bo, evict, new_mem, old_mem); 526 } else { 527 r = -ENODEV; 528 } 529 530 if (r) { 531 /* Check that all memory is CPU accessible */ 532 if (!amdgpu_mem_visible(adev, old_mem) || 533 !amdgpu_mem_visible(adev, new_mem)) { 534 pr_err("Move buffer fallback to memcpy unavailable\n"); 535 return r; 536 } 537 538 r = ttm_bo_move_memcpy(bo, ctx, new_mem); 539 if (r) 540 return r; 541 } 542 543 trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type); 544 out: 545 /* update statistics */ 546 atomic64_add(bo->base.size, &adev->num_bytes_moved); 547 amdgpu_bo_move_notify(bo, evict); 548 return 0; 549 } 550 551 /* 552 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault 553 * 554 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault() 555 */ 556 static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, 557 struct ttm_resource *mem) 558 { 559 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 560 size_t bus_size = (size_t)mem->size; 561 562 switch (mem->mem_type) { 563 case TTM_PL_SYSTEM: 564 /* system memory */ 565 return 0; 566 case TTM_PL_TT: 567 case AMDGPU_PL_PREEMPT: 568 break; 569 case TTM_PL_VRAM: 570 mem->bus.offset = mem->start << PAGE_SHIFT; 571 /* check if it's visible */ 572 if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size) 573 return -EINVAL; 574 575 if (adev->mman.aper_base_kaddr && 576 mem->placement & TTM_PL_FLAG_CONTIGUOUS) 577 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr + 578 mem->bus.offset; 579 580 mem->bus.offset += adev->gmc.aper_base; 581 mem->bus.is_iomem = true; 582 break; 583 case AMDGPU_PL_DOORBELL: 584 mem->bus.offset = mem->start << PAGE_SHIFT; 585 mem->bus.offset += adev->doorbell.base; 586 mem->bus.is_iomem = true; 587 mem->bus.caching = ttm_uncached; 588 break; 589 default: 590 return -EINVAL; 591 } 592 return 0; 593 } 594 595 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, 596 unsigned long page_offset) 597 { 598 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 599 struct amdgpu_res_cursor cursor; 600 601 amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0, 602 &cursor); 603 604 if (bo->resource->mem_type == AMDGPU_PL_DOORBELL) 605 return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT; 606 607 return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT; 608 } 609 610 /** 611 * amdgpu_ttm_domain_start - Returns GPU start address 612 * @adev: amdgpu device object 613 * @type: type of the memory 614 * 615 * Returns: 616 * GPU start address of a memory domain 617 */ 618 619 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type) 620 { 621 switch (type) { 622 case TTM_PL_TT: 623 return adev->gmc.gart_start; 624 case TTM_PL_VRAM: 625 return adev->gmc.vram_start; 626 } 627 628 return 0; 629 } 630 631 /* 632 * TTM backend functions. 633 */ 634 struct amdgpu_ttm_tt { 635 struct ttm_tt ttm; 636 struct drm_gem_object *gobj; 637 u64 offset; 638 uint64_t userptr; 639 struct task_struct *usertask; 640 uint32_t userflags; 641 bool bound; 642 int32_t pool_id; 643 }; 644 645 #define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm) 646 647 #ifdef CONFIG_DRM_AMDGPU_USERPTR 648 /* 649 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user 650 * memory and start HMM tracking CPU page table update 651 * 652 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only 653 * once afterwards to stop HMM tracking 654 */ 655 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages, 656 struct hmm_range **range) 657 { 658 struct ttm_tt *ttm = bo->tbo.ttm; 659 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 660 unsigned long start = gtt->userptr; 661 struct vm_area_struct *vma; 662 struct mm_struct *mm; 663 bool readonly; 664 int r = 0; 665 666 /* Make sure get_user_pages_done() can cleanup gracefully */ 667 *range = NULL; 668 669 mm = bo->notifier.mm; 670 if (unlikely(!mm)) { 671 DRM_DEBUG_DRIVER("BO is not registered?\n"); 672 return -EFAULT; 673 } 674 675 if (!mmget_not_zero(mm)) /* Happens during process shutdown */ 676 return -ESRCH; 677 678 mmap_read_lock(mm); 679 vma = vma_lookup(mm, start); 680 if (unlikely(!vma)) { 681 r = -EFAULT; 682 goto out_unlock; 683 } 684 if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) && 685 vma->vm_file)) { 686 r = -EPERM; 687 goto out_unlock; 688 } 689 690 readonly = amdgpu_ttm_tt_is_readonly(ttm); 691 r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages, 692 readonly, NULL, pages, range); 693 out_unlock: 694 mmap_read_unlock(mm); 695 if (r) 696 pr_debug("failed %d to get user pages 0x%lx\n", r, start); 697 698 mmput(mm); 699 700 return r; 701 } 702 703 /* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations 704 */ 705 void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, 706 struct hmm_range *range) 707 { 708 struct amdgpu_ttm_tt *gtt = (void *)ttm; 709 710 if (gtt && gtt->userptr && range) 711 amdgpu_hmm_range_get_pages_done(range); 712 } 713 714 /* 715 * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change 716 * Check if the pages backing this ttm range have been invalidated 717 * 718 * Returns: true if pages are still valid 719 */ 720 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, 721 struct hmm_range *range) 722 { 723 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 724 725 if (!gtt || !gtt->userptr || !range) 726 return false; 727 728 DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n", 729 gtt->userptr, ttm->num_pages); 730 731 WARN_ONCE(!range->hmm_pfns, "No user pages to check\n"); 732 733 return !amdgpu_hmm_range_get_pages_done(range); 734 } 735 #endif 736 737 /* 738 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary. 739 * 740 * Called by amdgpu_cs_list_validate(). This creates the page list 741 * that backs user memory and will ultimately be mapped into the device 742 * address space. 743 */ 744 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) 745 { 746 unsigned long i; 747 748 for (i = 0; i < ttm->num_pages; ++i) 749 ttm->pages[i] = pages ? pages[i] : NULL; 750 } 751 752 /* 753 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages 754 * 755 * Called by amdgpu_ttm_backend_bind() 756 **/ 757 static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev, 758 struct ttm_tt *ttm) 759 { 760 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 761 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 762 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 763 enum dma_data_direction direction = write ? 764 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 765 int r; 766 767 /* Allocate an SG array and squash pages into it */ 768 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, 769 (u64)ttm->num_pages << PAGE_SHIFT, 770 GFP_KERNEL); 771 if (r) 772 goto release_sg; 773 774 /* Map SG to device */ 775 r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); 776 if (r) 777 goto release_sg; 778 779 /* convert SG to linear array of pages and dma addresses */ 780 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, 781 ttm->num_pages); 782 783 return 0; 784 785 release_sg: 786 kfree(ttm->sg); 787 ttm->sg = NULL; 788 return r; 789 } 790 791 /* 792 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages 793 */ 794 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev, 795 struct ttm_tt *ttm) 796 { 797 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 798 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 799 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 800 enum dma_data_direction direction = write ? 801 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 802 803 /* double check that we don't free the table twice */ 804 if (!ttm->sg || !ttm->sg->sgl) 805 return; 806 807 /* unmap the pages mapped to the device */ 808 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); 809 sg_free_table(ttm->sg); 810 } 811 812 /* 813 * total_pages is constructed as MQD0+CtrlStack0 + MQD1+CtrlStack1 + ... 814 * MQDn+CtrlStackn where n is the number of XCCs per partition. 815 * pages_per_xcc is the size of one MQD+CtrlStack. The first page is MQD 816 * and uses memory type default, UC. The rest of pages_per_xcc are 817 * Ctrl stack and modify their memory type to NC. 818 */ 819 static void amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device *adev, 820 struct ttm_tt *ttm, uint64_t flags) 821 { 822 struct amdgpu_ttm_tt *gtt = (void *)ttm; 823 uint64_t total_pages = ttm->num_pages; 824 int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp); 825 uint64_t page_idx, pages_per_xcc; 826 int i; 827 uint64_t ctrl_flags = (flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | 828 AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC); 829 830 pages_per_xcc = total_pages; 831 do_div(pages_per_xcc, num_xcc); 832 833 for (i = 0, page_idx = 0; i < num_xcc; i++, page_idx += pages_per_xcc) { 834 /* MQD page: use default flags */ 835 amdgpu_gart_bind(adev, 836 gtt->offset + (page_idx << PAGE_SHIFT), 837 1, >t->ttm.dma_address[page_idx], flags); 838 /* 839 * Ctrl pages - modify the memory type to NC (ctrl_flags) from 840 * the second page of the BO onward. 841 */ 842 amdgpu_gart_bind(adev, 843 gtt->offset + ((page_idx + 1) << PAGE_SHIFT), 844 pages_per_xcc - 1, 845 >t->ttm.dma_address[page_idx + 1], 846 ctrl_flags); 847 } 848 } 849 850 static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev, 851 struct ttm_buffer_object *tbo, 852 uint64_t flags) 853 { 854 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo); 855 struct ttm_tt *ttm = tbo->ttm; 856 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 857 858 if (amdgpu_bo_encrypted(abo)) 859 flags |= AMDGPU_PTE_TMZ; 860 861 if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) { 862 amdgpu_ttm_gart_bind_gfx9_mqd(adev, ttm, flags); 863 } else { 864 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, 865 gtt->ttm.dma_address, flags); 866 } 867 } 868 869 /* 870 * amdgpu_ttm_backend_bind - Bind GTT memory 871 * 872 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem(). 873 * This handles binding GTT memory to the device address space. 874 */ 875 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, 876 struct ttm_tt *ttm, 877 struct ttm_resource *bo_mem) 878 { 879 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 880 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 881 uint64_t flags; 882 int r; 883 884 if (!bo_mem) 885 return -EINVAL; 886 887 if (gtt->bound) 888 return 0; 889 890 if (gtt->userptr) { 891 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm); 892 if (r) { 893 DRM_ERROR("failed to pin userptr\n"); 894 return r; 895 } 896 } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) { 897 if (!ttm->sg) { 898 struct dma_buf_attachment *attach; 899 struct sg_table *sgt; 900 901 attach = gtt->gobj->import_attach; 902 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 903 if (IS_ERR(sgt)) 904 return PTR_ERR(sgt); 905 906 ttm->sg = sgt; 907 } 908 909 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, 910 ttm->num_pages); 911 } 912 913 if (!ttm->num_pages) { 914 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n", 915 ttm->num_pages, bo_mem, ttm); 916 } 917 918 if (bo_mem->mem_type != TTM_PL_TT || 919 !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { 920 gtt->offset = AMDGPU_BO_INVALID_OFFSET; 921 return 0; 922 } 923 924 /* compute PTE flags relevant to this BO memory */ 925 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem); 926 927 /* bind pages into GART page tables */ 928 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; 929 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, 930 gtt->ttm.dma_address, flags); 931 gtt->bound = true; 932 return 0; 933 } 934 935 /* 936 * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either 937 * through AGP or GART aperture. 938 * 939 * If bo is accessible through AGP aperture, then use AGP aperture 940 * to access bo; otherwise allocate logical space in GART aperture 941 * and map bo to GART aperture. 942 */ 943 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) 944 { 945 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 946 struct ttm_operation_ctx ctx = { false, false }; 947 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm); 948 struct ttm_placement placement; 949 struct ttm_place placements; 950 struct ttm_resource *tmp; 951 uint64_t addr, flags; 952 int r; 953 954 if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET) 955 return 0; 956 957 addr = amdgpu_gmc_agp_addr(bo); 958 if (addr != AMDGPU_BO_INVALID_OFFSET) 959 return 0; 960 961 /* allocate GART space */ 962 placement.num_placement = 1; 963 placement.placement = &placements; 964 placements.fpfn = 0; 965 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; 966 placements.mem_type = TTM_PL_TT; 967 placements.flags = bo->resource->placement; 968 969 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); 970 if (unlikely(r)) 971 return r; 972 973 /* compute PTE flags for this buffer object */ 974 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp); 975 976 /* Bind pages */ 977 gtt->offset = (u64)tmp->start << PAGE_SHIFT; 978 amdgpu_ttm_gart_bind(adev, bo, flags); 979 amdgpu_gart_invalidate_tlb(adev); 980 ttm_resource_free(bo, &bo->resource); 981 ttm_bo_assign_mem(bo, tmp); 982 983 return 0; 984 } 985 986 /* 987 * amdgpu_ttm_recover_gart - Rebind GTT pages 988 * 989 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to 990 * rebind GTT pages during a GPU reset. 991 */ 992 void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) 993 { 994 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); 995 uint64_t flags; 996 997 if (!tbo->ttm) 998 return; 999 1000 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource); 1001 amdgpu_ttm_gart_bind(adev, tbo, flags); 1002 } 1003 1004 /* 1005 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages 1006 * 1007 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and 1008 * ttm_tt_destroy(). 1009 */ 1010 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, 1011 struct ttm_tt *ttm) 1012 { 1013 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 1014 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1015 1016 /* if the pages have userptr pinning then clear that first */ 1017 if (gtt->userptr) { 1018 amdgpu_ttm_tt_unpin_userptr(bdev, ttm); 1019 } else if (ttm->sg && gtt->gobj->import_attach) { 1020 struct dma_buf_attachment *attach; 1021 1022 attach = gtt->gobj->import_attach; 1023 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); 1024 ttm->sg = NULL; 1025 } 1026 1027 if (!gtt->bound) 1028 return; 1029 1030 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) 1031 return; 1032 1033 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ 1034 amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); 1035 gtt->bound = false; 1036 } 1037 1038 static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev, 1039 struct ttm_tt *ttm) 1040 { 1041 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1042 1043 if (gtt->usertask) 1044 put_task_struct(gtt->usertask); 1045 1046 ttm_tt_fini(>t->ttm); 1047 kfree(gtt); 1048 } 1049 1050 /** 1051 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO 1052 * 1053 * @bo: The buffer object to create a GTT ttm_tt object around 1054 * @page_flags: Page flags to be added to the ttm_tt object 1055 * 1056 * Called by ttm_tt_create(). 1057 */ 1058 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, 1059 uint32_t page_flags) 1060 { 1061 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 1062 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 1063 struct amdgpu_ttm_tt *gtt; 1064 enum ttm_caching caching; 1065 1066 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); 1067 if (!gtt) 1068 return NULL; 1069 1070 gtt->gobj = &bo->base; 1071 if (adev->gmc.mem_partitions && abo->xcp_id >= 0) 1072 gtt->pool_id = KFD_XCP_MEM_ID(adev, abo->xcp_id); 1073 else 1074 gtt->pool_id = abo->xcp_id; 1075 1076 if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) 1077 caching = ttm_write_combined; 1078 else 1079 caching = ttm_cached; 1080 1081 /* allocate space for the uninitialized page entries */ 1082 if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) { 1083 kfree(gtt); 1084 return NULL; 1085 } 1086 return >t->ttm; 1087 } 1088 1089 /* 1090 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device 1091 * 1092 * Map the pages of a ttm_tt object to an address space visible 1093 * to the underlying device. 1094 */ 1095 static int amdgpu_ttm_tt_populate(struct ttm_device *bdev, 1096 struct ttm_tt *ttm, 1097 struct ttm_operation_ctx *ctx) 1098 { 1099 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 1100 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1101 struct ttm_pool *pool; 1102 pgoff_t i; 1103 int ret; 1104 1105 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ 1106 if (gtt->userptr) { 1107 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 1108 if (!ttm->sg) 1109 return -ENOMEM; 1110 return 0; 1111 } 1112 1113 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) 1114 return 0; 1115 1116 if (adev->mman.ttm_pools && gtt->pool_id >= 0) 1117 pool = &adev->mman.ttm_pools[gtt->pool_id]; 1118 else 1119 pool = &adev->mman.bdev.pool; 1120 ret = ttm_pool_alloc(pool, ttm, ctx); 1121 if (ret) 1122 return ret; 1123 1124 for (i = 0; i < ttm->num_pages; ++i) 1125 ttm->pages[i]->mapping = bdev->dev_mapping; 1126 1127 return 0; 1128 } 1129 1130 /* 1131 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays 1132 * 1133 * Unmaps pages of a ttm_tt object from the device address space and 1134 * unpopulates the page array backing it. 1135 */ 1136 static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev, 1137 struct ttm_tt *ttm) 1138 { 1139 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1140 struct amdgpu_device *adev; 1141 struct ttm_pool *pool; 1142 pgoff_t i; 1143 1144 amdgpu_ttm_backend_unbind(bdev, ttm); 1145 1146 if (gtt->userptr) { 1147 amdgpu_ttm_tt_set_user_pages(ttm, NULL); 1148 kfree(ttm->sg); 1149 ttm->sg = NULL; 1150 return; 1151 } 1152 1153 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) 1154 return; 1155 1156 for (i = 0; i < ttm->num_pages; ++i) 1157 ttm->pages[i]->mapping = NULL; 1158 1159 adev = amdgpu_ttm_adev(bdev); 1160 1161 if (adev->mman.ttm_pools && gtt->pool_id >= 0) 1162 pool = &adev->mman.ttm_pools[gtt->pool_id]; 1163 else 1164 pool = &adev->mman.bdev.pool; 1165 1166 return ttm_pool_free(pool, ttm); 1167 } 1168 1169 /** 1170 * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current 1171 * task 1172 * 1173 * @tbo: The ttm_buffer_object that contains the userptr 1174 * @user_addr: The returned value 1175 */ 1176 int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo, 1177 uint64_t *user_addr) 1178 { 1179 struct amdgpu_ttm_tt *gtt; 1180 1181 if (!tbo->ttm) 1182 return -EINVAL; 1183 1184 gtt = (void *)tbo->ttm; 1185 *user_addr = gtt->userptr; 1186 return 0; 1187 } 1188 1189 /** 1190 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current 1191 * task 1192 * 1193 * @bo: The ttm_buffer_object to bind this userptr to 1194 * @addr: The address in the current tasks VM space to use 1195 * @flags: Requirements of userptr object. 1196 * 1197 * Called by amdgpu_gem_userptr_ioctl() and kfd_ioctl_alloc_memory_of_gpu() to 1198 * bind userptr pages to current task and by kfd_ioctl_acquire_vm() to 1199 * initialize GPU VM for a KFD process. 1200 */ 1201 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, 1202 uint64_t addr, uint32_t flags) 1203 { 1204 struct amdgpu_ttm_tt *gtt; 1205 1206 if (!bo->ttm) { 1207 /* TODO: We want a separate TTM object type for userptrs */ 1208 bo->ttm = amdgpu_ttm_tt_create(bo, 0); 1209 if (bo->ttm == NULL) 1210 return -ENOMEM; 1211 } 1212 1213 /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */ 1214 bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL; 1215 1216 gtt = ttm_to_amdgpu_ttm_tt(bo->ttm); 1217 gtt->userptr = addr; 1218 gtt->userflags = flags; 1219 1220 if (gtt->usertask) 1221 put_task_struct(gtt->usertask); 1222 gtt->usertask = current->group_leader; 1223 get_task_struct(gtt->usertask); 1224 1225 return 0; 1226 } 1227 1228 /* 1229 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object 1230 */ 1231 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) 1232 { 1233 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1234 1235 if (gtt == NULL) 1236 return NULL; 1237 1238 if (gtt->usertask == NULL) 1239 return NULL; 1240 1241 return gtt->usertask->mm; 1242 } 1243 1244 /* 1245 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an 1246 * address range for the current task. 1247 * 1248 */ 1249 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, 1250 unsigned long end, unsigned long *userptr) 1251 { 1252 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1253 unsigned long size; 1254 1255 if (gtt == NULL || !gtt->userptr) 1256 return false; 1257 1258 /* Return false if no part of the ttm_tt object lies within 1259 * the range 1260 */ 1261 size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE; 1262 if (gtt->userptr > end || gtt->userptr + size <= start) 1263 return false; 1264 1265 if (userptr) 1266 *userptr = gtt->userptr; 1267 return true; 1268 } 1269 1270 /* 1271 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr? 1272 */ 1273 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm) 1274 { 1275 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1276 1277 if (gtt == NULL || !gtt->userptr) 1278 return false; 1279 1280 return true; 1281 } 1282 1283 /* 1284 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only? 1285 */ 1286 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) 1287 { 1288 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1289 1290 if (gtt == NULL) 1291 return false; 1292 1293 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 1294 } 1295 1296 /** 1297 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object 1298 * 1299 * @ttm: The ttm_tt object to compute the flags for 1300 * @mem: The memory registry backing this ttm_tt object 1301 * 1302 * Figure out the flags to use for a VM PDE (Page Directory Entry). 1303 */ 1304 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) 1305 { 1306 uint64_t flags = 0; 1307 1308 if (mem && mem->mem_type != TTM_PL_SYSTEM) 1309 flags |= AMDGPU_PTE_VALID; 1310 1311 if (mem && (mem->mem_type == TTM_PL_TT || 1312 mem->mem_type == AMDGPU_PL_DOORBELL || 1313 mem->mem_type == AMDGPU_PL_PREEMPT)) { 1314 flags |= AMDGPU_PTE_SYSTEM; 1315 1316 if (ttm->caching == ttm_cached) 1317 flags |= AMDGPU_PTE_SNOOPED; 1318 } 1319 1320 if (mem && mem->mem_type == TTM_PL_VRAM && 1321 mem->bus.caching == ttm_cached) 1322 flags |= AMDGPU_PTE_SNOOPED; 1323 1324 return flags; 1325 } 1326 1327 /** 1328 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object 1329 * 1330 * @adev: amdgpu_device pointer 1331 * @ttm: The ttm_tt object to compute the flags for 1332 * @mem: The memory registry backing this ttm_tt object 1333 * 1334 * Figure out the flags to use for a VM PTE (Page Table Entry). 1335 */ 1336 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, 1337 struct ttm_resource *mem) 1338 { 1339 uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem); 1340 1341 flags |= adev->gart.gart_pte_flags; 1342 flags |= AMDGPU_PTE_READABLE; 1343 1344 if (!amdgpu_ttm_tt_is_readonly(ttm)) 1345 flags |= AMDGPU_PTE_WRITEABLE; 1346 1347 return flags; 1348 } 1349 1350 /* 1351 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer 1352 * object. 1353 * 1354 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on 1355 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until 1356 * it can find space for a new object and by ttm_bo_force_list_clean() which is 1357 * used to clean out a memory space. 1358 */ 1359 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, 1360 const struct ttm_place *place) 1361 { 1362 struct dma_resv_iter resv_cursor; 1363 struct dma_fence *f; 1364 1365 if (!amdgpu_bo_is_amdgpu_bo(bo)) 1366 return ttm_bo_eviction_valuable(bo, place); 1367 1368 /* Swapout? */ 1369 if (bo->resource->mem_type == TTM_PL_SYSTEM) 1370 return true; 1371 1372 if (bo->type == ttm_bo_type_kernel && 1373 !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo))) 1374 return false; 1375 1376 /* If bo is a KFD BO, check if the bo belongs to the current process. 1377 * If true, then return false as any KFD process needs all its BOs to 1378 * be resident to run successfully 1379 */ 1380 dma_resv_for_each_fence(&resv_cursor, bo->base.resv, 1381 DMA_RESV_USAGE_BOOKKEEP, f) { 1382 if (amdkfd_fence_check_mm(f, current->mm)) 1383 return false; 1384 } 1385 1386 /* Preemptible BOs don't own system resources managed by the 1387 * driver (pages, VRAM, GART space). They point to resources 1388 * owned by someone else (e.g. pageable memory in user mode 1389 * or a DMABuf). They are used in a preemptible context so we 1390 * can guarantee no deadlocks and good QoS in case of MMU 1391 * notifiers or DMABuf move notifiers from the resource owner. 1392 */ 1393 if (bo->resource->mem_type == AMDGPU_PL_PREEMPT) 1394 return false; 1395 1396 if (bo->resource->mem_type == TTM_PL_TT && 1397 amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo))) 1398 return false; 1399 1400 return ttm_bo_eviction_valuable(bo, place); 1401 } 1402 1403 static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos, 1404 void *buf, size_t size, bool write) 1405 { 1406 while (size) { 1407 uint64_t aligned_pos = ALIGN_DOWN(pos, 4); 1408 uint64_t bytes = 4 - (pos & 0x3); 1409 uint32_t shift = (pos & 0x3) * 8; 1410 uint32_t mask = 0xffffffff << shift; 1411 uint32_t value = 0; 1412 1413 if (size < bytes) { 1414 mask &= 0xffffffff >> (bytes - size) * 8; 1415 bytes = size; 1416 } 1417 1418 if (mask != 0xffffffff) { 1419 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false); 1420 if (write) { 1421 value &= ~mask; 1422 value |= (*(uint32_t *)buf << shift) & mask; 1423 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true); 1424 } else { 1425 value = (value & mask) >> shift; 1426 memcpy(buf, &value, bytes); 1427 } 1428 } else { 1429 amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write); 1430 } 1431 1432 pos += bytes; 1433 buf += bytes; 1434 size -= bytes; 1435 } 1436 } 1437 1438 static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, 1439 unsigned long offset, void *buf, 1440 int len, int write) 1441 { 1442 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 1443 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); 1444 struct amdgpu_res_cursor src_mm; 1445 struct amdgpu_job *job; 1446 struct dma_fence *fence; 1447 uint64_t src_addr, dst_addr; 1448 unsigned int num_dw; 1449 int r, idx; 1450 1451 if (len != PAGE_SIZE) 1452 return -EINVAL; 1453 1454 if (!adev->mman.sdma_access_ptr) 1455 return -EACCES; 1456 1457 if (!drm_dev_enter(adev_to_drm(adev), &idx)) 1458 return -ENODEV; 1459 1460 if (write) 1461 memcpy(adev->mman.sdma_access_ptr, buf, len); 1462 1463 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); 1464 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr, 1465 AMDGPU_FENCE_OWNER_UNDEFINED, 1466 num_dw * 4, AMDGPU_IB_POOL_DELAYED, 1467 &job); 1468 if (r) 1469 goto out; 1470 1471 amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm); 1472 src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + 1473 src_mm.start; 1474 dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo); 1475 if (write) 1476 swap(src_addr, dst_addr); 1477 1478 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, 1479 PAGE_SIZE, false); 1480 1481 amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]); 1482 WARN_ON(job->ibs[0].length_dw > num_dw); 1483 1484 fence = amdgpu_job_submit(job); 1485 1486 if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout)) 1487 r = -ETIMEDOUT; 1488 dma_fence_put(fence); 1489 1490 if (!(r || write)) 1491 memcpy(buf, adev->mman.sdma_access_ptr, len); 1492 out: 1493 drm_dev_exit(idx); 1494 return r; 1495 } 1496 1497 /** 1498 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object. 1499 * 1500 * @bo: The buffer object to read/write 1501 * @offset: Offset into buffer object 1502 * @buf: Secondary buffer to write/read from 1503 * @len: Length in bytes of access 1504 * @write: true if writing 1505 * 1506 * This is used to access VRAM that backs a buffer object via MMIO 1507 * access for debugging purposes. 1508 */ 1509 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, 1510 unsigned long offset, void *buf, int len, 1511 int write) 1512 { 1513 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 1514 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); 1515 struct amdgpu_res_cursor cursor; 1516 int ret = 0; 1517 1518 if (bo->resource->mem_type != TTM_PL_VRAM) 1519 return -EIO; 1520 1521 if (amdgpu_device_has_timeouts_enabled(adev) && 1522 !amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write)) 1523 return len; 1524 1525 amdgpu_res_first(bo->resource, offset, len, &cursor); 1526 while (cursor.remaining) { 1527 size_t count, size = cursor.size; 1528 loff_t pos = cursor.start; 1529 1530 count = amdgpu_device_aper_access(adev, pos, buf, size, write); 1531 size -= count; 1532 if (size) { 1533 /* using MM to access rest vram and handle un-aligned address */ 1534 pos += count; 1535 buf += count; 1536 amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write); 1537 } 1538 1539 ret += cursor.size; 1540 buf += cursor.size; 1541 amdgpu_res_next(&cursor, cursor.size); 1542 } 1543 1544 return ret; 1545 } 1546 1547 static void 1548 amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo) 1549 { 1550 amdgpu_bo_move_notify(bo, false); 1551 } 1552 1553 static struct ttm_device_funcs amdgpu_bo_driver = { 1554 .ttm_tt_create = &amdgpu_ttm_tt_create, 1555 .ttm_tt_populate = &amdgpu_ttm_tt_populate, 1556 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, 1557 .ttm_tt_destroy = &amdgpu_ttm_backend_destroy, 1558 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, 1559 .evict_flags = &amdgpu_evict_flags, 1560 .move = &amdgpu_bo_move, 1561 .delete_mem_notify = &amdgpu_bo_delete_mem_notify, 1562 .release_notify = &amdgpu_bo_release_notify, 1563 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, 1564 .io_mem_pfn = amdgpu_ttm_io_mem_pfn, 1565 .access_memory = &amdgpu_ttm_access_memory, 1566 }; 1567 1568 /* 1569 * Firmware Reservation functions 1570 */ 1571 /** 1572 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram 1573 * 1574 * @adev: amdgpu_device pointer 1575 * 1576 * free fw reserved vram if it has been reserved. 1577 */ 1578 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) 1579 { 1580 amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo, 1581 NULL, &adev->mman.fw_vram_usage_va); 1582 } 1583 1584 /* 1585 * Driver Reservation functions 1586 */ 1587 /** 1588 * amdgpu_ttm_drv_reserve_vram_fini - free drv reserved vram 1589 * 1590 * @adev: amdgpu_device pointer 1591 * 1592 * free drv reserved vram if it has been reserved. 1593 */ 1594 static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev) 1595 { 1596 amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo, 1597 NULL, 1598 &adev->mman.drv_vram_usage_va); 1599 } 1600 1601 /** 1602 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw 1603 * 1604 * @adev: amdgpu_device pointer 1605 * 1606 * create bo vram reservation from fw. 1607 */ 1608 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) 1609 { 1610 uint64_t vram_size = adev->gmc.visible_vram_size; 1611 1612 adev->mman.fw_vram_usage_va = NULL; 1613 adev->mman.fw_vram_usage_reserved_bo = NULL; 1614 1615 if (adev->mman.fw_vram_usage_size == 0 || 1616 adev->mman.fw_vram_usage_size > vram_size) 1617 return 0; 1618 1619 return amdgpu_bo_create_kernel_at(adev, 1620 adev->mman.fw_vram_usage_start_offset, 1621 adev->mman.fw_vram_usage_size, 1622 &adev->mman.fw_vram_usage_reserved_bo, 1623 &adev->mman.fw_vram_usage_va); 1624 } 1625 1626 /** 1627 * amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver 1628 * 1629 * @adev: amdgpu_device pointer 1630 * 1631 * create bo vram reservation from drv. 1632 */ 1633 static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev) 1634 { 1635 u64 vram_size = adev->gmc.visible_vram_size; 1636 1637 adev->mman.drv_vram_usage_va = NULL; 1638 adev->mman.drv_vram_usage_reserved_bo = NULL; 1639 1640 if (adev->mman.drv_vram_usage_size == 0 || 1641 adev->mman.drv_vram_usage_size > vram_size) 1642 return 0; 1643 1644 return amdgpu_bo_create_kernel_at(adev, 1645 adev->mman.drv_vram_usage_start_offset, 1646 adev->mman.drv_vram_usage_size, 1647 &adev->mman.drv_vram_usage_reserved_bo, 1648 &adev->mman.drv_vram_usage_va); 1649 } 1650 1651 /* 1652 * Memoy training reservation functions 1653 */ 1654 1655 /** 1656 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram 1657 * 1658 * @adev: amdgpu_device pointer 1659 * 1660 * free memory training reserved vram if it has been reserved. 1661 */ 1662 static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev) 1663 { 1664 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; 1665 1666 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 1667 amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL); 1668 ctx->c2p_bo = NULL; 1669 1670 return 0; 1671 } 1672 1673 static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev, 1674 uint32_t reserve_size) 1675 { 1676 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; 1677 1678 memset(ctx, 0, sizeof(*ctx)); 1679 1680 ctx->c2p_train_data_offset = 1681 ALIGN((adev->gmc.mc_vram_size - reserve_size - SZ_1M), SZ_1M); 1682 ctx->p2c_train_data_offset = 1683 (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET); 1684 ctx->train_data_size = 1685 GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES; 1686 1687 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 1688 ctx->train_data_size, 1689 ctx->p2c_train_data_offset, 1690 ctx->c2p_train_data_offset); 1691 } 1692 1693 /* 1694 * reserve TMR memory at the top of VRAM which holds 1695 * IP Discovery data and is protected by PSP. 1696 */ 1697 static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) 1698 { 1699 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; 1700 bool mem_train_support = false; 1701 uint32_t reserve_size = 0; 1702 int ret; 1703 1704 if (adev->bios && !amdgpu_sriov_vf(adev)) { 1705 if (amdgpu_atomfirmware_mem_training_supported(adev)) 1706 mem_train_support = true; 1707 else 1708 DRM_DEBUG("memory training does not support!\n"); 1709 } 1710 1711 /* 1712 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all 1713 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc) 1714 * 1715 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip 1716 * discovery data and G6 memory training data respectively 1717 */ 1718 if (adev->bios) 1719 reserve_size = 1720 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev); 1721 1722 if (!adev->bios && 1723 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) 1724 reserve_size = max(reserve_size, (uint32_t)280 << 20); 1725 else if (!reserve_size) 1726 reserve_size = DISCOVERY_TMR_OFFSET; 1727 1728 if (mem_train_support) { 1729 /* reserve vram for mem train according to TMR location */ 1730 amdgpu_ttm_training_data_block_init(adev, reserve_size); 1731 ret = amdgpu_bo_create_kernel_at(adev, 1732 ctx->c2p_train_data_offset, 1733 ctx->train_data_size, 1734 &ctx->c2p_bo, 1735 NULL); 1736 if (ret) { 1737 DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret); 1738 amdgpu_ttm_training_reserve_vram_fini(adev); 1739 return ret; 1740 } 1741 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS; 1742 } 1743 1744 if (!adev->gmc.is_app_apu) { 1745 ret = amdgpu_bo_create_kernel_at( 1746 adev, adev->gmc.real_vram_size - reserve_size, 1747 reserve_size, &adev->mman.fw_reserved_memory, NULL); 1748 if (ret) { 1749 DRM_ERROR("alloc tmr failed(%d)!\n", ret); 1750 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, 1751 NULL, NULL); 1752 return ret; 1753 } 1754 } else { 1755 DRM_DEBUG_DRIVER("backdoor fw loading path for PSP TMR, no reservation needed\n"); 1756 } 1757 1758 return 0; 1759 } 1760 1761 static int amdgpu_ttm_pools_init(struct amdgpu_device *adev) 1762 { 1763 int i; 1764 1765 if (!adev->gmc.is_app_apu || !adev->gmc.num_mem_partitions) 1766 return 0; 1767 1768 adev->mman.ttm_pools = kcalloc(adev->gmc.num_mem_partitions, 1769 sizeof(*adev->mman.ttm_pools), 1770 GFP_KERNEL); 1771 if (!adev->mman.ttm_pools) 1772 return -ENOMEM; 1773 1774 for (i = 0; i < adev->gmc.num_mem_partitions; i++) { 1775 ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev, 1776 adev->gmc.mem_partitions[i].numa.node, 1777 false, false); 1778 } 1779 return 0; 1780 } 1781 1782 static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev) 1783 { 1784 int i; 1785 1786 if (!adev->gmc.is_app_apu || !adev->mman.ttm_pools) 1787 return; 1788 1789 for (i = 0; i < adev->gmc.num_mem_partitions; i++) 1790 ttm_pool_fini(&adev->mman.ttm_pools[i]); 1791 1792 kfree(adev->mman.ttm_pools); 1793 adev->mman.ttm_pools = NULL; 1794 } 1795 1796 /* 1797 * amdgpu_ttm_init - Init the memory management (ttm) as well as various 1798 * gtt/vram related fields. 1799 * 1800 * This initializes all of the memory space pools that the TTM layer 1801 * will need such as the GTT space (system memory mapped to the device), 1802 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which 1803 * can be mapped per VMID. 1804 */ 1805 int amdgpu_ttm_init(struct amdgpu_device *adev) 1806 { 1807 uint64_t gtt_size; 1808 int r; 1809 1810 mutex_init(&adev->mman.gtt_window_lock); 1811 1812 /* No others user of address space so set it to 0 */ 1813 r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, 1814 adev_to_drm(adev)->anon_inode->i_mapping, 1815 adev_to_drm(adev)->vma_offset_manager, 1816 adev->need_swiotlb, 1817 dma_addressing_limited(adev->dev)); 1818 if (r) { 1819 DRM_ERROR("failed initializing buffer object driver(%d).\n", r); 1820 return r; 1821 } 1822 1823 r = amdgpu_ttm_pools_init(adev); 1824 if (r) { 1825 DRM_ERROR("failed to init ttm pools(%d).\n", r); 1826 return r; 1827 } 1828 adev->mman.initialized = true; 1829 1830 /* Initialize VRAM pool with all of VRAM divided into pages */ 1831 r = amdgpu_vram_mgr_init(adev); 1832 if (r) { 1833 DRM_ERROR("Failed initializing VRAM heap.\n"); 1834 return r; 1835 } 1836 1837 /* Change the size here instead of the init above so only lpfn is affected */ 1838 amdgpu_ttm_set_buffer_funcs_status(adev, false); 1839 #ifdef CONFIG_64BIT 1840 #ifdef CONFIG_X86 1841 if (adev->gmc.xgmi.connected_to_cpu) 1842 adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base, 1843 adev->gmc.visible_vram_size); 1844 1845 else if (adev->gmc.is_app_apu) 1846 DRM_DEBUG_DRIVER( 1847 "No need to ioremap when real vram size is 0\n"); 1848 else 1849 #endif 1850 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base, 1851 adev->gmc.visible_vram_size); 1852 #endif 1853 1854 /* 1855 *The reserved vram for firmware must be pinned to the specified 1856 *place on the VRAM, so reserve it early. 1857 */ 1858 r = amdgpu_ttm_fw_reserve_vram_init(adev); 1859 if (r) 1860 return r; 1861 1862 /* 1863 *The reserved vram for driver must be pinned to the specified 1864 *place on the VRAM, so reserve it early. 1865 */ 1866 r = amdgpu_ttm_drv_reserve_vram_init(adev); 1867 if (r) 1868 return r; 1869 1870 /* 1871 * only NAVI10 and onwards ASIC support for IP discovery. 1872 * If IP discovery enabled, a block of memory should be 1873 * reserved for IP discovey. 1874 */ 1875 if (adev->mman.discovery_bin) { 1876 r = amdgpu_ttm_reserve_tmr(adev); 1877 if (r) 1878 return r; 1879 } 1880 1881 /* allocate memory as required for VGA 1882 * This is used for VGA emulation and pre-OS scanout buffers to 1883 * avoid display artifacts while transitioning between pre-OS 1884 * and driver. 1885 */ 1886 if (!adev->gmc.is_app_apu) { 1887 r = amdgpu_bo_create_kernel_at(adev, 0, 1888 adev->mman.stolen_vga_size, 1889 &adev->mman.stolen_vga_memory, 1890 NULL); 1891 if (r) 1892 return r; 1893 1894 r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size, 1895 adev->mman.stolen_extended_size, 1896 &adev->mman.stolen_extended_memory, 1897 NULL); 1898 1899 if (r) 1900 return r; 1901 1902 r = amdgpu_bo_create_kernel_at(adev, 1903 adev->mman.stolen_reserved_offset, 1904 adev->mman.stolen_reserved_size, 1905 &adev->mman.stolen_reserved_memory, 1906 NULL); 1907 if (r) 1908 return r; 1909 } else { 1910 DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n"); 1911 } 1912 1913 DRM_INFO("amdgpu: %uM of VRAM memory ready\n", 1914 (unsigned int)(adev->gmc.real_vram_size / (1024 * 1024))); 1915 1916 /* Compute GTT size, either based on TTM limit 1917 * or whatever the user passed on module init. 1918 */ 1919 if (amdgpu_gtt_size == -1) 1920 gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT; 1921 else 1922 gtt_size = (uint64_t)amdgpu_gtt_size << 20; 1923 1924 /* Initialize GTT memory pool */ 1925 r = amdgpu_gtt_mgr_init(adev, gtt_size); 1926 if (r) { 1927 DRM_ERROR("Failed initializing GTT heap.\n"); 1928 return r; 1929 } 1930 DRM_INFO("amdgpu: %uM of GTT memory ready.\n", 1931 (unsigned int)(gtt_size / (1024 * 1024))); 1932 1933 /* Initiailize doorbell pool on PCI BAR */ 1934 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE); 1935 if (r) { 1936 DRM_ERROR("Failed initializing doorbell heap.\n"); 1937 return r; 1938 } 1939 1940 /* Create a boorbell page for kernel usages */ 1941 r = amdgpu_doorbell_create_kernel_doorbells(adev); 1942 if (r) { 1943 DRM_ERROR("Failed to initialize kernel doorbells.\n"); 1944 return r; 1945 } 1946 1947 /* Initialize preemptible memory pool */ 1948 r = amdgpu_preempt_mgr_init(adev); 1949 if (r) { 1950 DRM_ERROR("Failed initializing PREEMPT heap.\n"); 1951 return r; 1952 } 1953 1954 /* Initialize various on-chip memory pools */ 1955 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size); 1956 if (r) { 1957 DRM_ERROR("Failed initializing GDS heap.\n"); 1958 return r; 1959 } 1960 1961 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size); 1962 if (r) { 1963 DRM_ERROR("Failed initializing gws heap.\n"); 1964 return r; 1965 } 1966 1967 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size); 1968 if (r) { 1969 DRM_ERROR("Failed initializing oa heap.\n"); 1970 return r; 1971 } 1972 if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, 1973 AMDGPU_GEM_DOMAIN_GTT, 1974 &adev->mman.sdma_access_bo, NULL, 1975 &adev->mman.sdma_access_ptr)) 1976 DRM_WARN("Debug VRAM access will use slowpath MM access\n"); 1977 1978 return 0; 1979 } 1980 1981 /* 1982 * amdgpu_ttm_fini - De-initialize the TTM memory pools 1983 */ 1984 void amdgpu_ttm_fini(struct amdgpu_device *adev) 1985 { 1986 int idx; 1987 1988 if (!adev->mman.initialized) 1989 return; 1990 1991 amdgpu_ttm_pools_fini(adev); 1992 1993 amdgpu_ttm_training_reserve_vram_fini(adev); 1994 /* return the stolen vga memory back to VRAM */ 1995 if (!adev->gmc.is_app_apu) { 1996 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); 1997 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); 1998 /* return the FW reserved memory back to VRAM */ 1999 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, 2000 NULL); 2001 if (adev->mman.stolen_reserved_size) 2002 amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory, 2003 NULL, NULL); 2004 } 2005 amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL, 2006 &adev->mman.sdma_access_ptr); 2007 amdgpu_ttm_fw_reserve_vram_fini(adev); 2008 amdgpu_ttm_drv_reserve_vram_fini(adev); 2009 2010 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 2011 2012 if (adev->mman.aper_base_kaddr) 2013 iounmap(adev->mman.aper_base_kaddr); 2014 adev->mman.aper_base_kaddr = NULL; 2015 2016 drm_dev_exit(idx); 2017 } 2018 2019 amdgpu_vram_mgr_fini(adev); 2020 amdgpu_gtt_mgr_fini(adev); 2021 amdgpu_preempt_mgr_fini(adev); 2022 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS); 2023 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS); 2024 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA); 2025 ttm_device_fini(&adev->mman.bdev); 2026 adev->mman.initialized = false; 2027 DRM_INFO("amdgpu: ttm finalized\n"); 2028 } 2029 2030 /** 2031 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions 2032 * 2033 * @adev: amdgpu_device pointer 2034 * @enable: true when we can use buffer functions. 2035 * 2036 * Enable/disable use of buffer functions during suspend/resume. This should 2037 * only be called at bootup or when userspace isn't running. 2038 */ 2039 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) 2040 { 2041 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); 2042 uint64_t size; 2043 int r; 2044 2045 if (!adev->mman.initialized || amdgpu_in_reset(adev) || 2046 adev->mman.buffer_funcs_enabled == enable || adev->gmc.is_app_apu) 2047 return; 2048 2049 if (enable) { 2050 struct amdgpu_ring *ring; 2051 struct drm_gpu_scheduler *sched; 2052 2053 ring = adev->mman.buffer_funcs_ring; 2054 sched = &ring->sched; 2055 r = drm_sched_entity_init(&adev->mman.high_pr, 2056 DRM_SCHED_PRIORITY_KERNEL, &sched, 2057 1, NULL); 2058 if (r) { 2059 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", 2060 r); 2061 return; 2062 } 2063 2064 r = drm_sched_entity_init(&adev->mman.low_pr, 2065 DRM_SCHED_PRIORITY_NORMAL, &sched, 2066 1, NULL); 2067 if (r) { 2068 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", 2069 r); 2070 goto error_free_entity; 2071 } 2072 } else { 2073 drm_sched_entity_destroy(&adev->mman.high_pr); 2074 drm_sched_entity_destroy(&adev->mman.low_pr); 2075 dma_fence_put(man->move); 2076 man->move = NULL; 2077 } 2078 2079 /* this just adjusts TTM size idea, which sets lpfn to the correct value */ 2080 if (enable) 2081 size = adev->gmc.real_vram_size; 2082 else 2083 size = adev->gmc.visible_vram_size; 2084 man->size = size; 2085 adev->mman.buffer_funcs_enabled = enable; 2086 2087 return; 2088 2089 error_free_entity: 2090 drm_sched_entity_destroy(&adev->mman.high_pr); 2091 } 2092 2093 static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, 2094 bool direct_submit, 2095 unsigned int num_dw, 2096 struct dma_resv *resv, 2097 bool vm_needs_flush, 2098 struct amdgpu_job **job, 2099 bool delayed) 2100 { 2101 enum amdgpu_ib_pool_type pool = direct_submit ? 2102 AMDGPU_IB_POOL_DIRECT : 2103 AMDGPU_IB_POOL_DELAYED; 2104 int r; 2105 struct drm_sched_entity *entity = delayed ? &adev->mman.low_pr : 2106 &adev->mman.high_pr; 2107 r = amdgpu_job_alloc_with_ib(adev, entity, 2108 AMDGPU_FENCE_OWNER_UNDEFINED, 2109 num_dw * 4, pool, job); 2110 if (r) 2111 return r; 2112 2113 if (vm_needs_flush) { 2114 (*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ? 2115 adev->gmc.pdb0_bo : 2116 adev->gart.bo); 2117 (*job)->vm_needs_flush = true; 2118 } 2119 if (!resv) 2120 return 0; 2121 2122 return drm_sched_job_add_resv_dependencies(&(*job)->base, resv, 2123 DMA_RESV_USAGE_BOOKKEEP); 2124 } 2125 2126 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, 2127 uint64_t dst_offset, uint32_t byte_count, 2128 struct dma_resv *resv, 2129 struct dma_fence **fence, bool direct_submit, 2130 bool vm_needs_flush, bool tmz) 2131 { 2132 struct amdgpu_device *adev = ring->adev; 2133 unsigned int num_loops, num_dw; 2134 struct amdgpu_job *job; 2135 uint32_t max_bytes; 2136 unsigned int i; 2137 int r; 2138 2139 if (!direct_submit && !ring->sched.ready) { 2140 DRM_ERROR("Trying to move memory with ring turned off.\n"); 2141 return -EINVAL; 2142 } 2143 2144 max_bytes = adev->mman.buffer_funcs->copy_max_bytes; 2145 num_loops = DIV_ROUND_UP(byte_count, max_bytes); 2146 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8); 2147 r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw, 2148 resv, vm_needs_flush, &job, false); 2149 if (r) 2150 return r; 2151 2152 for (i = 0; i < num_loops; i++) { 2153 uint32_t cur_size_in_bytes = min(byte_count, max_bytes); 2154 2155 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset, 2156 dst_offset, cur_size_in_bytes, tmz); 2157 2158 src_offset += cur_size_in_bytes; 2159 dst_offset += cur_size_in_bytes; 2160 byte_count -= cur_size_in_bytes; 2161 } 2162 2163 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 2164 WARN_ON(job->ibs[0].length_dw > num_dw); 2165 if (direct_submit) 2166 r = amdgpu_job_submit_direct(job, ring, fence); 2167 else 2168 *fence = amdgpu_job_submit(job); 2169 if (r) 2170 goto error_free; 2171 2172 return r; 2173 2174 error_free: 2175 amdgpu_job_free(job); 2176 DRM_ERROR("Error scheduling IBs (%d)\n", r); 2177 return r; 2178 } 2179 2180 static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data, 2181 uint64_t dst_addr, uint32_t byte_count, 2182 struct dma_resv *resv, 2183 struct dma_fence **fence, 2184 bool vm_needs_flush, bool delayed) 2185 { 2186 struct amdgpu_device *adev = ring->adev; 2187 unsigned int num_loops, num_dw; 2188 struct amdgpu_job *job; 2189 uint32_t max_bytes; 2190 unsigned int i; 2191 int r; 2192 2193 max_bytes = adev->mman.buffer_funcs->fill_max_bytes; 2194 num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes); 2195 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8); 2196 r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush, 2197 &job, delayed); 2198 if (r) 2199 return r; 2200 2201 for (i = 0; i < num_loops; i++) { 2202 uint32_t cur_size = min(byte_count, max_bytes); 2203 2204 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr, 2205 cur_size); 2206 2207 dst_addr += cur_size; 2208 byte_count -= cur_size; 2209 } 2210 2211 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 2212 WARN_ON(job->ibs[0].length_dw > num_dw); 2213 *fence = amdgpu_job_submit(job); 2214 return 0; 2215 } 2216 2217 int amdgpu_fill_buffer(struct amdgpu_bo *bo, 2218 uint32_t src_data, 2219 struct dma_resv *resv, 2220 struct dma_fence **f, 2221 bool delayed) 2222 { 2223 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 2224 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 2225 struct dma_fence *fence = NULL; 2226 struct amdgpu_res_cursor dst; 2227 int r; 2228 2229 if (!adev->mman.buffer_funcs_enabled) { 2230 DRM_ERROR("Trying to clear memory with ring turned off.\n"); 2231 return -EINVAL; 2232 } 2233 2234 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst); 2235 2236 mutex_lock(&adev->mman.gtt_window_lock); 2237 while (dst.remaining) { 2238 struct dma_fence *next; 2239 uint64_t cur_size, to; 2240 2241 /* Never fill more than 256MiB at once to avoid timeouts */ 2242 cur_size = min(dst.size, 256ULL << 20); 2243 2244 r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &dst, 2245 1, ring, false, &cur_size, &to); 2246 if (r) 2247 goto error; 2248 2249 r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv, 2250 &next, true, delayed); 2251 if (r) 2252 goto error; 2253 2254 dma_fence_put(fence); 2255 fence = next; 2256 2257 amdgpu_res_next(&dst, cur_size); 2258 } 2259 error: 2260 mutex_unlock(&adev->mman.gtt_window_lock); 2261 if (f) 2262 *f = dma_fence_get(fence); 2263 dma_fence_put(fence); 2264 return r; 2265 } 2266 2267 /** 2268 * amdgpu_ttm_evict_resources - evict memory buffers 2269 * @adev: amdgpu device object 2270 * @mem_type: evicted BO's memory type 2271 * 2272 * Evicts all @mem_type buffers on the lru list of the memory type. 2273 * 2274 * Returns: 2275 * 0 for success or a negative error code on failure. 2276 */ 2277 int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type) 2278 { 2279 struct ttm_resource_manager *man; 2280 2281 switch (mem_type) { 2282 case TTM_PL_VRAM: 2283 case TTM_PL_TT: 2284 case AMDGPU_PL_GWS: 2285 case AMDGPU_PL_GDS: 2286 case AMDGPU_PL_OA: 2287 man = ttm_manager_type(&adev->mman.bdev, mem_type); 2288 break; 2289 default: 2290 DRM_ERROR("Trying to evict invalid memory type\n"); 2291 return -EINVAL; 2292 } 2293 2294 return ttm_resource_manager_evict_all(&adev->mman.bdev, man); 2295 } 2296 2297 #if defined(CONFIG_DEBUG_FS) 2298 2299 static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused) 2300 { 2301 struct amdgpu_device *adev = m->private; 2302 2303 return ttm_pool_debugfs(&adev->mman.bdev.pool, m); 2304 } 2305 2306 DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool); 2307 2308 /* 2309 * amdgpu_ttm_vram_read - Linear read access to VRAM 2310 * 2311 * Accesses VRAM via MMIO for debugging purposes. 2312 */ 2313 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, 2314 size_t size, loff_t *pos) 2315 { 2316 struct amdgpu_device *adev = file_inode(f)->i_private; 2317 ssize_t result = 0; 2318 2319 if (size & 0x3 || *pos & 0x3) 2320 return -EINVAL; 2321 2322 if (*pos >= adev->gmc.mc_vram_size) 2323 return -ENXIO; 2324 2325 size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos)); 2326 while (size) { 2327 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4); 2328 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ]; 2329 2330 amdgpu_device_vram_access(adev, *pos, value, bytes, false); 2331 if (copy_to_user(buf, value, bytes)) 2332 return -EFAULT; 2333 2334 result += bytes; 2335 buf += bytes; 2336 *pos += bytes; 2337 size -= bytes; 2338 } 2339 2340 return result; 2341 } 2342 2343 /* 2344 * amdgpu_ttm_vram_write - Linear write access to VRAM 2345 * 2346 * Accesses VRAM via MMIO for debugging purposes. 2347 */ 2348 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, 2349 size_t size, loff_t *pos) 2350 { 2351 struct amdgpu_device *adev = file_inode(f)->i_private; 2352 ssize_t result = 0; 2353 int r; 2354 2355 if (size & 0x3 || *pos & 0x3) 2356 return -EINVAL; 2357 2358 if (*pos >= adev->gmc.mc_vram_size) 2359 return -ENXIO; 2360 2361 while (size) { 2362 uint32_t value; 2363 2364 if (*pos >= adev->gmc.mc_vram_size) 2365 return result; 2366 2367 r = get_user(value, (uint32_t *)buf); 2368 if (r) 2369 return r; 2370 2371 amdgpu_device_mm_access(adev, *pos, &value, 4, true); 2372 2373 result += 4; 2374 buf += 4; 2375 *pos += 4; 2376 size -= 4; 2377 } 2378 2379 return result; 2380 } 2381 2382 static const struct file_operations amdgpu_ttm_vram_fops = { 2383 .owner = THIS_MODULE, 2384 .read = amdgpu_ttm_vram_read, 2385 .write = amdgpu_ttm_vram_write, 2386 .llseek = default_llseek, 2387 }; 2388 2389 /* 2390 * amdgpu_iomem_read - Virtual read access to GPU mapped memory 2391 * 2392 * This function is used to read memory that has been mapped to the 2393 * GPU and the known addresses are not physical addresses but instead 2394 * bus addresses (e.g., what you'd put in an IB or ring buffer). 2395 */ 2396 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, 2397 size_t size, loff_t *pos) 2398 { 2399 struct amdgpu_device *adev = file_inode(f)->i_private; 2400 struct iommu_domain *dom; 2401 ssize_t result = 0; 2402 int r; 2403 2404 /* retrieve the IOMMU domain if any for this device */ 2405 dom = iommu_get_domain_for_dev(adev->dev); 2406 2407 while (size) { 2408 phys_addr_t addr = *pos & PAGE_MASK; 2409 loff_t off = *pos & ~PAGE_MASK; 2410 size_t bytes = PAGE_SIZE - off; 2411 unsigned long pfn; 2412 struct page *p; 2413 void *ptr; 2414 2415 bytes = min(bytes, size); 2416 2417 /* Translate the bus address to a physical address. If 2418 * the domain is NULL it means there is no IOMMU active 2419 * and the address translation is the identity 2420 */ 2421 addr = dom ? iommu_iova_to_phys(dom, addr) : addr; 2422 2423 pfn = addr >> PAGE_SHIFT; 2424 if (!pfn_valid(pfn)) 2425 return -EPERM; 2426 2427 p = pfn_to_page(pfn); 2428 if (p->mapping != adev->mman.bdev.dev_mapping) 2429 return -EPERM; 2430 2431 ptr = kmap_local_page(p); 2432 r = copy_to_user(buf, ptr + off, bytes); 2433 kunmap_local(ptr); 2434 if (r) 2435 return -EFAULT; 2436 2437 size -= bytes; 2438 *pos += bytes; 2439 result += bytes; 2440 } 2441 2442 return result; 2443 } 2444 2445 /* 2446 * amdgpu_iomem_write - Virtual write access to GPU mapped memory 2447 * 2448 * This function is used to write memory that has been mapped to the 2449 * GPU and the known addresses are not physical addresses but instead 2450 * bus addresses (e.g., what you'd put in an IB or ring buffer). 2451 */ 2452 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf, 2453 size_t size, loff_t *pos) 2454 { 2455 struct amdgpu_device *adev = file_inode(f)->i_private; 2456 struct iommu_domain *dom; 2457 ssize_t result = 0; 2458 int r; 2459 2460 dom = iommu_get_domain_for_dev(adev->dev); 2461 2462 while (size) { 2463 phys_addr_t addr = *pos & PAGE_MASK; 2464 loff_t off = *pos & ~PAGE_MASK; 2465 size_t bytes = PAGE_SIZE - off; 2466 unsigned long pfn; 2467 struct page *p; 2468 void *ptr; 2469 2470 bytes = min(bytes, size); 2471 2472 addr = dom ? iommu_iova_to_phys(dom, addr) : addr; 2473 2474 pfn = addr >> PAGE_SHIFT; 2475 if (!pfn_valid(pfn)) 2476 return -EPERM; 2477 2478 p = pfn_to_page(pfn); 2479 if (p->mapping != adev->mman.bdev.dev_mapping) 2480 return -EPERM; 2481 2482 ptr = kmap_local_page(p); 2483 r = copy_from_user(ptr + off, buf, bytes); 2484 kunmap_local(ptr); 2485 if (r) 2486 return -EFAULT; 2487 2488 size -= bytes; 2489 *pos += bytes; 2490 result += bytes; 2491 } 2492 2493 return result; 2494 } 2495 2496 static const struct file_operations amdgpu_ttm_iomem_fops = { 2497 .owner = THIS_MODULE, 2498 .read = amdgpu_iomem_read, 2499 .write = amdgpu_iomem_write, 2500 .llseek = default_llseek 2501 }; 2502 2503 #endif 2504 2505 void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) 2506 { 2507 #if defined(CONFIG_DEBUG_FS) 2508 struct drm_minor *minor = adev_to_drm(adev)->primary; 2509 struct dentry *root = minor->debugfs_root; 2510 2511 debugfs_create_file_size("amdgpu_vram", 0444, root, adev, 2512 &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size); 2513 debugfs_create_file("amdgpu_iomem", 0444, root, adev, 2514 &amdgpu_ttm_iomem_fops); 2515 debugfs_create_file("ttm_page_pool", 0444, root, adev, 2516 &amdgpu_ttm_page_pool_fops); 2517 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, 2518 TTM_PL_VRAM), 2519 root, "amdgpu_vram_mm"); 2520 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, 2521 TTM_PL_TT), 2522 root, "amdgpu_gtt_mm"); 2523 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, 2524 AMDGPU_PL_GDS), 2525 root, "amdgpu_gds_mm"); 2526 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, 2527 AMDGPU_PL_GWS), 2528 root, "amdgpu_gws_mm"); 2529 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, 2530 AMDGPU_PL_OA), 2531 root, "amdgpu_oa_mm"); 2532 2533 #endif 2534 } 2535