1 /* 2 * Copyright 2009 VMware, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Michel Dänzer 23 */ 24 #include <drm/drmP.h> 25 #include <uapi_drm/radeon_drm.h> 26 #include "radeon_reg.h" 27 #include "radeon.h" 28 29 #define RADEON_TEST_COPY_BLIT 1 30 #define RADEON_TEST_COPY_DMA 0 31 32 33 /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ 34 static void radeon_do_test_moves(struct radeon_device *rdev, int flag) 35 { 36 struct radeon_bo *vram_obj = NULL; 37 struct radeon_bo **gtt_obj = NULL; 38 struct radeon_fence *fence = NULL; 39 u64 gtt_addr, vram_addr; 40 unsigned n, size; 41 int i, r, ring; 42 43 switch (flag) { 44 case RADEON_TEST_COPY_DMA: 45 ring = radeon_copy_dma_ring_index(rdev); 46 break; 47 case RADEON_TEST_COPY_BLIT: 48 ring = radeon_copy_blit_ring_index(rdev); 49 break; 50 default: 51 DRM_ERROR("Unknown copy method\n"); 52 return; 53 } 54 55 size = 1024 * 1024; 56 57 /* Number of tests = 58 * (Total GTT - IB pool - writeback page - ring buffers) / test size 59 */ 60 n = rdev->mc.gtt_size - rdev->gart_pin_size; 61 n /= size; 62 63 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); 64 if (!gtt_obj) { 65 DRM_ERROR("Failed to allocate %d pointers\n", n); 66 r = 1; 67 goto out_cleanup; 68 } 69 70 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 71 0, NULL, &vram_obj); 72 if (r) { 73 DRM_ERROR("Failed to create VRAM object\n"); 74 goto out_cleanup; 75 } 76 r = radeon_bo_reserve(vram_obj, false); 77 if (unlikely(r != 0)) 78 goto out_cleanup; 79 r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); 80 if (r) { 81 DRM_ERROR("Failed to pin VRAM object\n"); 82 goto out_cleanup; 83 } 84 for (i = 0; i < n; i++) { 85 void *gtt_map, *vram_map; 86 void **gtt_start, **gtt_end; 87 void **vram_start, **vram_end; 88 89 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, 90 RADEON_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i); 91 if (r) { 92 DRM_ERROR("Failed to create GTT object %d\n", i); 93 goto out_cleanup; 94 } 95 96 r = radeon_bo_reserve(gtt_obj[i], false); 97 if (unlikely(r != 0)) 98 goto out_cleanup; 99 r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); 100 if (r) { 101 DRM_ERROR("Failed to pin GTT object %d\n", i); 102 goto out_cleanup; 103 } 104 105 r = radeon_bo_kmap(gtt_obj[i], >t_map); 106 if (r) { 107 DRM_ERROR("Failed to map GTT object %d\n", i); 108 goto out_cleanup; 109 } 110 111 for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size); 112 gtt_start < gtt_end; 113 gtt_start++) 114 *gtt_start = gtt_start; 115 116 radeon_bo_kunmap(gtt_obj[i]); 117 118 if (ring == R600_RING_TYPE_DMA_INDEX) 119 r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); 120 else 121 r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); 122 if (r) { 123 DRM_ERROR("Failed GTT->VRAM copy %d\n", i); 124 goto out_cleanup; 125 } 126 127 r = radeon_fence_wait(fence, false); 128 if (r) { 129 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); 130 goto out_cleanup; 131 } 132 133 radeon_fence_unref(&fence); 134 135 r = radeon_bo_kmap(vram_obj, &vram_map); 136 if (r) { 137 DRM_ERROR("Failed to map VRAM object after copy %d\n", i); 138 goto out_cleanup; 139 } 140 141 for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size), 142 vram_start = vram_map, vram_end = (void *)((uintptr_t)vram_map + size); 143 vram_start < vram_end; 144 gtt_start++, vram_start++) { 145 if (*vram_start != gtt_start) { 146 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " 147 "expected 0x%p (GTT/VRAM offset " 148 "0x%16llx/0x%16llx)\n", 149 i, *vram_start, gtt_start, 150 (unsigned long long) 151 ((uintptr_t)gtt_addr - (uintptr_t)rdev->mc.gtt_start + 152 (uintptr_t)gtt_start - (uintptr_t)gtt_map), 153 (unsigned long long) 154 ((uintptr_t)vram_addr - (uintptr_t)rdev->mc.vram_start + 155 (uintptr_t)gtt_start - (uintptr_t)gtt_map)); 156 radeon_bo_kunmap(vram_obj); 157 goto out_cleanup; 158 } 159 *vram_start = vram_start; 160 } 161 162 radeon_bo_kunmap(vram_obj); 163 164 if (ring == R600_RING_TYPE_DMA_INDEX) 165 r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); 166 else 167 r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); 168 if (r) { 169 DRM_ERROR("Failed VRAM->GTT copy %d\n", i); 170 goto out_cleanup; 171 } 172 173 r = radeon_fence_wait(fence, false); 174 if (r) { 175 DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); 176 goto out_cleanup; 177 } 178 179 radeon_fence_unref(&fence); 180 181 r = radeon_bo_kmap(gtt_obj[i], >t_map); 182 if (r) { 183 DRM_ERROR("Failed to map GTT object after copy %d\n", i); 184 goto out_cleanup; 185 } 186 187 for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size), 188 vram_start = vram_map, vram_end = (void *)((uintptr_t)vram_map + size); 189 gtt_start < gtt_end; 190 gtt_start++, vram_start++) { 191 if (*gtt_start != vram_start) { 192 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " 193 "expected 0x%p (VRAM/GTT offset " 194 "0x%16llx/0x%16llx)\n", 195 i, *gtt_start, vram_start, 196 (unsigned long long) 197 ((uintptr_t)vram_addr - (uintptr_t)rdev->mc.vram_start + 198 (uintptr_t)vram_start - (uintptr_t)vram_map), 199 (unsigned long long) 200 ((uintptr_t)gtt_addr - (uintptr_t)rdev->mc.gtt_start + 201 (uintptr_t)vram_start - (uintptr_t)vram_map)); 202 radeon_bo_kunmap(gtt_obj[i]); 203 goto out_cleanup; 204 } 205 } 206 207 radeon_bo_kunmap(gtt_obj[i]); 208 209 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", 210 (uintmax_t)gtt_addr - rdev->mc.gtt_start); 211 } 212 213 out_cleanup: 214 if (vram_obj) { 215 if (radeon_bo_is_reserved(vram_obj)) { 216 radeon_bo_unpin(vram_obj); 217 radeon_bo_unreserve(vram_obj); 218 } 219 radeon_bo_unref(&vram_obj); 220 } 221 if (gtt_obj) { 222 for (i = 0; i < n; i++) { 223 if (gtt_obj[i]) { 224 if (radeon_bo_is_reserved(gtt_obj[i])) { 225 radeon_bo_unpin(gtt_obj[i]); 226 radeon_bo_unreserve(gtt_obj[i]); 227 } 228 radeon_bo_unref(>t_obj[i]); 229 } 230 } 231 kfree(gtt_obj); 232 } 233 if (fence) { 234 radeon_fence_unref(&fence); 235 } 236 if (r) { 237 printk(KERN_WARNING "Error while testing BO move.\n"); 238 } 239 } 240 241 void radeon_test_moves(struct radeon_device *rdev) 242 { 243 if (rdev->asic->copy.dma) 244 radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA); 245 if (rdev->asic->copy.blit) 246 radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT); 247 } 248 249 static int radeon_test_create_and_emit_fence(struct radeon_device *rdev, 250 struct radeon_ring *ring, 251 struct radeon_fence **fence) 252 { 253 uint32_t handle = ring->idx ^ 0xdeafbeef; 254 int r; 255 256 if (ring->idx == R600_RING_TYPE_UVD_INDEX) { 257 r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL); 258 if (r) { 259 DRM_ERROR("Failed to get dummy create msg\n"); 260 return r; 261 } 262 263 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence); 264 if (r) { 265 DRM_ERROR("Failed to get dummy destroy msg\n"); 266 return r; 267 } 268 269 } else if (ring->idx == TN_RING_TYPE_VCE1_INDEX || 270 ring->idx == TN_RING_TYPE_VCE2_INDEX) { 271 r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL); 272 if (r) { 273 DRM_ERROR("Failed to get dummy create msg\n"); 274 return r; 275 } 276 277 r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence); 278 if (r) { 279 DRM_ERROR("Failed to get dummy destroy msg\n"); 280 return r; 281 } 282 283 } else { 284 r = radeon_ring_lock(rdev, ring, 64); 285 if (r) { 286 DRM_ERROR("Failed to lock ring A %d\n", ring->idx); 287 return r; 288 } 289 radeon_fence_emit(rdev, fence, ring->idx); 290 radeon_ring_unlock_commit(rdev, ring, false); 291 } 292 return 0; 293 } 294 295 void radeon_test_ring_sync(struct radeon_device *rdev, 296 struct radeon_ring *ringA, 297 struct radeon_ring *ringB) 298 { 299 struct radeon_fence *fence1 = NULL, *fence2 = NULL; 300 struct radeon_semaphore *semaphore = NULL; 301 int r; 302 303 r = radeon_semaphore_create(rdev, &semaphore); 304 if (r) { 305 DRM_ERROR("Failed to create semaphore\n"); 306 goto out_cleanup; 307 } 308 309 r = radeon_ring_lock(rdev, ringA, 64); 310 if (r) { 311 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 312 goto out_cleanup; 313 } 314 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 315 radeon_ring_unlock_commit(rdev, ringA, false); 316 317 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1); 318 if (r) 319 goto out_cleanup; 320 321 r = radeon_ring_lock(rdev, ringA, 64); 322 if (r) { 323 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 324 goto out_cleanup; 325 } 326 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 327 radeon_ring_unlock_commit(rdev, ringA, false); 328 329 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2); 330 if (r) 331 goto out_cleanup; 332 333 mdelay(1000); 334 335 if (radeon_fence_signaled(fence1)) { 336 DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n"); 337 goto out_cleanup; 338 } 339 340 r = radeon_ring_lock(rdev, ringB, 64); 341 if (r) { 342 DRM_ERROR("Failed to lock ring B %p\n", ringB); 343 goto out_cleanup; 344 } 345 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); 346 radeon_ring_unlock_commit(rdev, ringB, false); 347 348 r = radeon_fence_wait(fence1, false); 349 if (r) { 350 DRM_ERROR("Failed to wait for sync fence 1\n"); 351 goto out_cleanup; 352 } 353 354 mdelay(1000); 355 356 if (radeon_fence_signaled(fence2)) { 357 DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n"); 358 goto out_cleanup; 359 } 360 361 r = radeon_ring_lock(rdev, ringB, 64); 362 if (r) { 363 DRM_ERROR("Failed to lock ring B %p\n", ringB); 364 goto out_cleanup; 365 } 366 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); 367 radeon_ring_unlock_commit(rdev, ringB, false); 368 369 r = radeon_fence_wait(fence2, false); 370 if (r) { 371 DRM_ERROR("Failed to wait for sync fence 1\n"); 372 goto out_cleanup; 373 } 374 375 out_cleanup: 376 radeon_semaphore_free(rdev, &semaphore, NULL); 377 378 if (fence1) 379 radeon_fence_unref(&fence1); 380 381 if (fence2) 382 radeon_fence_unref(&fence2); 383 384 if (r) 385 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); 386 } 387 388 static void radeon_test_ring_sync2(struct radeon_device *rdev, 389 struct radeon_ring *ringA, 390 struct radeon_ring *ringB, 391 struct radeon_ring *ringC) 392 { 393 struct radeon_fence *fenceA = NULL, *fenceB = NULL; 394 struct radeon_semaphore *semaphore = NULL; 395 bool sigA, sigB; 396 int i, r; 397 398 r = radeon_semaphore_create(rdev, &semaphore); 399 if (r) { 400 DRM_ERROR("Failed to create semaphore\n"); 401 goto out_cleanup; 402 } 403 404 r = radeon_ring_lock(rdev, ringA, 64); 405 if (r) { 406 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 407 goto out_cleanup; 408 } 409 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 410 radeon_ring_unlock_commit(rdev, ringA, false); 411 412 r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA); 413 if (r) 414 goto out_cleanup; 415 416 r = radeon_ring_lock(rdev, ringB, 64); 417 if (r) { 418 DRM_ERROR("Failed to lock ring B %d\n", ringB->idx); 419 goto out_cleanup; 420 } 421 radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); 422 radeon_ring_unlock_commit(rdev, ringB, false); 423 r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB); 424 if (r) 425 goto out_cleanup; 426 427 mdelay(1000); 428 429 if (radeon_fence_signaled(fenceA)) { 430 DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); 431 goto out_cleanup; 432 } 433 if (radeon_fence_signaled(fenceB)) { 434 DRM_ERROR("Fence B signaled without waiting for semaphore.\n"); 435 goto out_cleanup; 436 } 437 438 r = radeon_ring_lock(rdev, ringC, 64); 439 if (r) { 440 DRM_ERROR("Failed to lock ring B %p\n", ringC); 441 goto out_cleanup; 442 } 443 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); 444 radeon_ring_unlock_commit(rdev, ringC, false); 445 446 for (i = 0; i < 30; ++i) { 447 mdelay(100); 448 sigA = radeon_fence_signaled(fenceA); 449 sigB = radeon_fence_signaled(fenceB); 450 if (sigA || sigB) 451 break; 452 } 453 454 if (!sigA && !sigB) { 455 DRM_ERROR("Neither fence A nor B has been signaled\n"); 456 goto out_cleanup; 457 } else if (sigA && sigB) { 458 DRM_ERROR("Both fence A and B has been signaled\n"); 459 goto out_cleanup; 460 } 461 462 DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B'); 463 464 r = radeon_ring_lock(rdev, ringC, 64); 465 if (r) { 466 DRM_ERROR("Failed to lock ring B %p\n", ringC); 467 goto out_cleanup; 468 } 469 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); 470 radeon_ring_unlock_commit(rdev, ringC, false); 471 472 mdelay(1000); 473 474 r = radeon_fence_wait(fenceA, false); 475 if (r) { 476 DRM_ERROR("Failed to wait for sync fence A\n"); 477 goto out_cleanup; 478 } 479 r = radeon_fence_wait(fenceB, false); 480 if (r) { 481 DRM_ERROR("Failed to wait for sync fence B\n"); 482 goto out_cleanup; 483 } 484 485 out_cleanup: 486 radeon_semaphore_free(rdev, &semaphore, NULL); 487 488 if (fenceA) 489 radeon_fence_unref(&fenceA); 490 491 if (fenceB) 492 radeon_fence_unref(&fenceB); 493 494 if (r) 495 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); 496 } 497 498 static bool radeon_test_sync_possible(struct radeon_ring *ringA, 499 struct radeon_ring *ringB) 500 { 501 if (ringA->idx == TN_RING_TYPE_VCE2_INDEX && 502 ringB->idx == TN_RING_TYPE_VCE1_INDEX) 503 return false; 504 505 return true; 506 } 507 508 void radeon_test_syncing(struct radeon_device *rdev) 509 { 510 int i, j, k; 511 512 for (i = 1; i < RADEON_NUM_RINGS; ++i) { 513 struct radeon_ring *ringA = &rdev->ring[i]; 514 if (!ringA->ready) 515 continue; 516 517 for (j = 0; j < i; ++j) { 518 struct radeon_ring *ringB = &rdev->ring[j]; 519 if (!ringB->ready) 520 continue; 521 522 if (!radeon_test_sync_possible(ringA, ringB)) 523 continue; 524 525 DRM_INFO("Testing syncing between rings %d and %d...\n", i, j); 526 radeon_test_ring_sync(rdev, ringA, ringB); 527 528 DRM_INFO("Testing syncing between rings %d and %d...\n", j, i); 529 radeon_test_ring_sync(rdev, ringB, ringA); 530 531 for (k = 0; k < j; ++k) { 532 struct radeon_ring *ringC = &rdev->ring[k]; 533 if (!ringC->ready) 534 continue; 535 536 if (!radeon_test_sync_possible(ringA, ringC)) 537 continue; 538 539 if (!radeon_test_sync_possible(ringB, ringC)) 540 continue; 541 542 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k); 543 radeon_test_ring_sync2(rdev, ringA, ringB, ringC); 544 545 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j); 546 radeon_test_ring_sync2(rdev, ringA, ringC, ringB); 547 548 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k); 549 radeon_test_ring_sync2(rdev, ringB, ringA, ringC); 550 551 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i); 552 radeon_test_ring_sync2(rdev, ringB, ringC, ringA); 553 554 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j); 555 radeon_test_ring_sync2(rdev, ringC, ringA, ringB); 556 557 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i); 558 radeon_test_ring_sync2(rdev, ringC, ringB, ringA); 559 } 560 } 561 } 562 } 563