1 /* 2 * Copyright 2009 VMware, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Michel Dänzer 23 */ 24 #include <drm/drmP.h> 25 #include <drm/radeon_drm.h> 26 #include "radeon_reg.h" 27 #include "radeon.h" 28 29 #define RADEON_TEST_COPY_BLIT 1 30 #define RADEON_TEST_COPY_DMA 0 31 32 33 /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ 34 static void radeon_do_test_moves(struct radeon_device *rdev, int flag) 35 { 36 struct radeon_bo *vram_obj = NULL; 37 struct radeon_bo **gtt_obj = NULL; 38 struct radeon_fence *fence = NULL; 39 u64 gtt_addr, vram_addr; 40 unsigned n, size; 41 int i, r, ring; 42 43 switch (flag) { 44 case RADEON_TEST_COPY_DMA: 45 ring = radeon_copy_dma_ring_index(rdev); 46 break; 47 case RADEON_TEST_COPY_BLIT: 48 ring = radeon_copy_blit_ring_index(rdev); 49 break; 50 default: 51 DRM_ERROR("Unknown copy method\n"); 52 return; 53 } 54 55 size = 1024 * 1024; 56 57 /* Number of tests = 58 * (Total GTT - IB pool - writeback page - ring buffers) / test size 59 */ 60 n = rdev->mc.gtt_size - rdev->gart_pin_size; 61 n /= size; 62 63 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); 64 if (!gtt_obj) { 65 DRM_ERROR("Failed to allocate %d pointers\n", n); 66 r = 1; 67 goto out_cleanup; 68 } 69 70 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 71 0, NULL, &vram_obj); 72 if (r) { 73 DRM_ERROR("Failed to create VRAM object\n"); 74 goto out_cleanup; 75 } 76 r = radeon_bo_reserve(vram_obj, false); 77 if (unlikely(r != 0)) 78 goto out_unref; 79 r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); 80 if (r) { 81 DRM_ERROR("Failed to pin VRAM object\n"); 82 goto out_unres; 83 } 84 for (i = 0; i < n; i++) { 85 void *gtt_map, *vram_map; 86 void **gtt_start, **gtt_end; 87 void **vram_start, **vram_end; 88 89 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, 90 RADEON_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i); 91 if (r) { 92 DRM_ERROR("Failed to create GTT object %d\n", i); 93 goto out_lclean; 94 } 95 96 r = radeon_bo_reserve(gtt_obj[i], false); 97 if (unlikely(r != 0)) 98 goto out_lclean_unref; 99 r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); 100 if (r) { 101 DRM_ERROR("Failed to pin GTT object %d\n", i); 102 goto out_lclean_unres; 103 } 104 105 r = radeon_bo_kmap(gtt_obj[i], >t_map); 106 if (r) { 107 DRM_ERROR("Failed to map GTT object %d\n", i); 108 goto out_lclean_unpin; 109 } 110 111 for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size); 112 gtt_start < gtt_end; 113 gtt_start++) 114 *gtt_start = gtt_start; 115 116 radeon_bo_kunmap(gtt_obj[i]); 117 118 if (ring == R600_RING_TYPE_DMA_INDEX) 119 fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, 120 size / RADEON_GPU_PAGE_SIZE, 121 NULL); 122 else 123 fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, 124 size / RADEON_GPU_PAGE_SIZE, 125 NULL); 126 if (IS_ERR(fence)) { 127 DRM_ERROR("Failed GTT->VRAM copy %d\n", i); 128 r = PTR_ERR(fence); 129 goto out_lclean_unpin; 130 } 131 132 r = radeon_fence_wait(fence, false); 133 if (r) { 134 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); 135 goto out_lclean_unpin; 136 } 137 138 radeon_fence_unref(&fence); 139 140 r = radeon_bo_kmap(vram_obj, &vram_map); 141 if (r) { 142 DRM_ERROR("Failed to map VRAM object after copy %d\n", i); 143 goto out_lclean_unpin; 144 } 145 146 for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size), 147 vram_start = vram_map, vram_end = (void *)((uintptr_t)vram_map + size); 148 vram_start < vram_end; 149 gtt_start++, vram_start++) { 150 if (*vram_start != gtt_start) { 151 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " 152 "expected 0x%p (GTT/VRAM offset " 153 "0x%16llx/0x%16llx)\n", 154 i, *vram_start, gtt_start, 155 (unsigned long long) 156 ((uintptr_t)gtt_addr - (uintptr_t)rdev->mc.gtt_start + 157 (uintptr_t)gtt_start - (uintptr_t)gtt_map), 158 (unsigned long long) 159 ((uintptr_t)vram_addr - (uintptr_t)rdev->mc.vram_start + 160 (uintptr_t)gtt_start - (uintptr_t)gtt_map)); 161 radeon_bo_kunmap(vram_obj); 162 goto out_lclean_unpin; 163 } 164 *vram_start = vram_start; 165 } 166 167 radeon_bo_kunmap(vram_obj); 168 169 if (ring == R600_RING_TYPE_DMA_INDEX) 170 fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, 171 size / RADEON_GPU_PAGE_SIZE, 172 NULL); 173 else 174 fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, 175 size / RADEON_GPU_PAGE_SIZE, 176 NULL); 177 if (IS_ERR(fence)) { 178 DRM_ERROR("Failed VRAM->GTT copy %d\n", i); 179 r = PTR_ERR(fence); 180 goto out_lclean_unpin; 181 } 182 183 r = radeon_fence_wait(fence, false); 184 if (r) { 185 DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); 186 goto out_lclean_unpin; 187 } 188 189 radeon_fence_unref(&fence); 190 191 r = radeon_bo_kmap(gtt_obj[i], >t_map); 192 if (r) { 193 DRM_ERROR("Failed to map GTT object after copy %d\n", i); 194 goto out_lclean_unpin; 195 } 196 197 for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size), 198 vram_start = vram_map, vram_end = (void *)((uintptr_t)vram_map + size); 199 gtt_start < gtt_end; 200 gtt_start++, vram_start++) { 201 if (*gtt_start != vram_start) { 202 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " 203 "expected 0x%p (VRAM/GTT offset " 204 "0x%16llx/0x%16llx)\n", 205 i, *gtt_start, vram_start, 206 (unsigned long long) 207 ((uintptr_t)vram_addr - (uintptr_t)rdev->mc.vram_start + 208 (uintptr_t)vram_start - (uintptr_t)vram_map), 209 (unsigned long long) 210 ((uintptr_t)gtt_addr - (uintptr_t)rdev->mc.gtt_start + 211 (uintptr_t)vram_start - (uintptr_t)vram_map)); 212 radeon_bo_kunmap(gtt_obj[i]); 213 goto out_lclean_unpin; 214 } 215 } 216 217 radeon_bo_kunmap(gtt_obj[i]); 218 219 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", 220 (uintmax_t)gtt_addr - rdev->mc.gtt_start); 221 continue; 222 223 out_lclean_unpin: 224 radeon_bo_unpin(gtt_obj[i]); 225 out_lclean_unres: 226 radeon_bo_unreserve(gtt_obj[i]); 227 out_lclean_unref: 228 radeon_bo_unref(>t_obj[i]); 229 out_lclean: 230 for (--i; i >= 0; --i) { 231 radeon_bo_unpin(gtt_obj[i]); 232 radeon_bo_unreserve(gtt_obj[i]); 233 radeon_bo_unref(>t_obj[i]); 234 } 235 if (fence && !IS_ERR(fence)) 236 radeon_fence_unref(&fence); 237 break; 238 } 239 240 radeon_bo_unpin(vram_obj); 241 out_unres: 242 radeon_bo_unreserve(vram_obj); 243 out_unref: 244 radeon_bo_unref(&vram_obj); 245 out_cleanup: 246 if (gtt_obj) 247 kfree(gtt_obj); 248 if (fence) { 249 radeon_fence_unref(&fence); 250 } 251 if (r) { 252 printk(KERN_WARNING "Error while testing BO move.\n"); 253 } 254 } 255 256 void radeon_test_moves(struct radeon_device *rdev) 257 { 258 if (rdev->asic->copy.dma) 259 radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA); 260 if (rdev->asic->copy.blit) 261 radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT); 262 } 263 264 static int radeon_test_create_and_emit_fence(struct radeon_device *rdev, 265 struct radeon_ring *ring, 266 struct radeon_fence **fence) 267 { 268 uint32_t handle = ring->idx ^ 0xdeafbeef; 269 int r; 270 271 if (ring->idx == R600_RING_TYPE_UVD_INDEX) { 272 r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL); 273 if (r) { 274 DRM_ERROR("Failed to get dummy create msg\n"); 275 return r; 276 } 277 278 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence); 279 if (r) { 280 DRM_ERROR("Failed to get dummy destroy msg\n"); 281 return r; 282 } 283 284 } else if (ring->idx == TN_RING_TYPE_VCE1_INDEX || 285 ring->idx == TN_RING_TYPE_VCE2_INDEX) { 286 r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL); 287 if (r) { 288 DRM_ERROR("Failed to get dummy create msg\n"); 289 return r; 290 } 291 292 r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence); 293 if (r) { 294 DRM_ERROR("Failed to get dummy destroy msg\n"); 295 return r; 296 } 297 298 } else { 299 r = radeon_ring_lock(rdev, ring, 64); 300 if (r) { 301 DRM_ERROR("Failed to lock ring A %d\n", ring->idx); 302 return r; 303 } 304 radeon_fence_emit(rdev, fence, ring->idx); 305 radeon_ring_unlock_commit(rdev, ring, false); 306 } 307 return 0; 308 } 309 310 void radeon_test_ring_sync(struct radeon_device *rdev, 311 struct radeon_ring *ringA, 312 struct radeon_ring *ringB) 313 { 314 struct radeon_fence *fence1 = NULL, *fence2 = NULL; 315 struct radeon_semaphore *semaphore = NULL; 316 int r; 317 318 r = radeon_semaphore_create(rdev, &semaphore); 319 if (r) { 320 DRM_ERROR("Failed to create semaphore\n"); 321 goto out_cleanup; 322 } 323 324 r = radeon_ring_lock(rdev, ringA, 64); 325 if (r) { 326 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 327 goto out_cleanup; 328 } 329 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 330 radeon_ring_unlock_commit(rdev, ringA, false); 331 332 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1); 333 if (r) 334 goto out_cleanup; 335 336 r = radeon_ring_lock(rdev, ringA, 64); 337 if (r) { 338 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 339 goto out_cleanup; 340 } 341 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 342 radeon_ring_unlock_commit(rdev, ringA, false); 343 344 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2); 345 if (r) 346 goto out_cleanup; 347 348 mdelay(1000); 349 350 if (radeon_fence_signaled(fence1)) { 351 DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n"); 352 goto out_cleanup; 353 } 354 355 r = radeon_ring_lock(rdev, ringB, 64); 356 if (r) { 357 DRM_ERROR("Failed to lock ring B %p\n", ringB); 358 goto out_cleanup; 359 } 360 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); 361 radeon_ring_unlock_commit(rdev, ringB, false); 362 363 r = radeon_fence_wait(fence1, false); 364 if (r) { 365 DRM_ERROR("Failed to wait for sync fence 1\n"); 366 goto out_cleanup; 367 } 368 369 mdelay(1000); 370 371 if (radeon_fence_signaled(fence2)) { 372 DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n"); 373 goto out_cleanup; 374 } 375 376 r = radeon_ring_lock(rdev, ringB, 64); 377 if (r) { 378 DRM_ERROR("Failed to lock ring B %p\n", ringB); 379 goto out_cleanup; 380 } 381 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); 382 radeon_ring_unlock_commit(rdev, ringB, false); 383 384 r = radeon_fence_wait(fence2, false); 385 if (r) { 386 DRM_ERROR("Failed to wait for sync fence 1\n"); 387 goto out_cleanup; 388 } 389 390 out_cleanup: 391 radeon_semaphore_free(rdev, &semaphore, NULL); 392 393 if (fence1) 394 radeon_fence_unref(&fence1); 395 396 if (fence2) 397 radeon_fence_unref(&fence2); 398 399 if (r) 400 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); 401 } 402 403 static void radeon_test_ring_sync2(struct radeon_device *rdev, 404 struct radeon_ring *ringA, 405 struct radeon_ring *ringB, 406 struct radeon_ring *ringC) 407 { 408 struct radeon_fence *fenceA = NULL, *fenceB = NULL; 409 struct radeon_semaphore *semaphore = NULL; 410 bool sigA, sigB; 411 int i, r; 412 413 r = radeon_semaphore_create(rdev, &semaphore); 414 if (r) { 415 DRM_ERROR("Failed to create semaphore\n"); 416 goto out_cleanup; 417 } 418 419 r = radeon_ring_lock(rdev, ringA, 64); 420 if (r) { 421 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 422 goto out_cleanup; 423 } 424 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 425 radeon_ring_unlock_commit(rdev, ringA, false); 426 427 r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA); 428 if (r) 429 goto out_cleanup; 430 431 r = radeon_ring_lock(rdev, ringB, 64); 432 if (r) { 433 DRM_ERROR("Failed to lock ring B %d\n", ringB->idx); 434 goto out_cleanup; 435 } 436 radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); 437 radeon_ring_unlock_commit(rdev, ringB, false); 438 r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB); 439 if (r) 440 goto out_cleanup; 441 442 mdelay(1000); 443 444 if (radeon_fence_signaled(fenceA)) { 445 DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); 446 goto out_cleanup; 447 } 448 if (radeon_fence_signaled(fenceB)) { 449 DRM_ERROR("Fence B signaled without waiting for semaphore.\n"); 450 goto out_cleanup; 451 } 452 453 r = radeon_ring_lock(rdev, ringC, 64); 454 if (r) { 455 DRM_ERROR("Failed to lock ring B %p\n", ringC); 456 goto out_cleanup; 457 } 458 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); 459 radeon_ring_unlock_commit(rdev, ringC, false); 460 461 for (i = 0; i < 30; ++i) { 462 mdelay(100); 463 sigA = radeon_fence_signaled(fenceA); 464 sigB = radeon_fence_signaled(fenceB); 465 if (sigA || sigB) 466 break; 467 } 468 469 if (!sigA && !sigB) { 470 DRM_ERROR("Neither fence A nor B has been signaled\n"); 471 goto out_cleanup; 472 } else if (sigA && sigB) { 473 DRM_ERROR("Both fence A and B has been signaled\n"); 474 goto out_cleanup; 475 } 476 477 DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B'); 478 479 r = radeon_ring_lock(rdev, ringC, 64); 480 if (r) { 481 DRM_ERROR("Failed to lock ring B %p\n", ringC); 482 goto out_cleanup; 483 } 484 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); 485 radeon_ring_unlock_commit(rdev, ringC, false); 486 487 mdelay(1000); 488 489 r = radeon_fence_wait(fenceA, false); 490 if (r) { 491 DRM_ERROR("Failed to wait for sync fence A\n"); 492 goto out_cleanup; 493 } 494 r = radeon_fence_wait(fenceB, false); 495 if (r) { 496 DRM_ERROR("Failed to wait for sync fence B\n"); 497 goto out_cleanup; 498 } 499 500 out_cleanup: 501 radeon_semaphore_free(rdev, &semaphore, NULL); 502 503 if (fenceA) 504 radeon_fence_unref(&fenceA); 505 506 if (fenceB) 507 radeon_fence_unref(&fenceB); 508 509 if (r) 510 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); 511 } 512 513 static bool radeon_test_sync_possible(struct radeon_ring *ringA, 514 struct radeon_ring *ringB) 515 { 516 if (ringA->idx == TN_RING_TYPE_VCE2_INDEX && 517 ringB->idx == TN_RING_TYPE_VCE1_INDEX) 518 return false; 519 520 return true; 521 } 522 523 void radeon_test_syncing(struct radeon_device *rdev) 524 { 525 int i, j, k; 526 527 for (i = 1; i < RADEON_NUM_RINGS; ++i) { 528 struct radeon_ring *ringA = &rdev->ring[i]; 529 if (!ringA->ready) 530 continue; 531 532 for (j = 0; j < i; ++j) { 533 struct radeon_ring *ringB = &rdev->ring[j]; 534 if (!ringB->ready) 535 continue; 536 537 if (!radeon_test_sync_possible(ringA, ringB)) 538 continue; 539 540 DRM_INFO("Testing syncing between rings %d and %d...\n", i, j); 541 radeon_test_ring_sync(rdev, ringA, ringB); 542 543 DRM_INFO("Testing syncing between rings %d and %d...\n", j, i); 544 radeon_test_ring_sync(rdev, ringB, ringA); 545 546 for (k = 0; k < j; ++k) { 547 struct radeon_ring *ringC = &rdev->ring[k]; 548 if (!ringC->ready) 549 continue; 550 551 if (!radeon_test_sync_possible(ringA, ringC)) 552 continue; 553 554 if (!radeon_test_sync_possible(ringB, ringC)) 555 continue; 556 557 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k); 558 radeon_test_ring_sync2(rdev, ringA, ringB, ringC); 559 560 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j); 561 radeon_test_ring_sync2(rdev, ringA, ringC, ringB); 562 563 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k); 564 radeon_test_ring_sync2(rdev, ringB, ringA, ringC); 565 566 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i); 567 radeon_test_ring_sync2(rdev, ringB, ringC, ringA); 568 569 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j); 570 radeon_test_ring_sync2(rdev, ringC, ringA, ringB); 571 572 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i); 573 radeon_test_ring_sync2(rdev, ringC, ringB, ringA); 574 } 575 } 576 } 577 } 578