1 /* 2 * Copyright 2009 VMware, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Michel Dänzer 23 * 24 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_test.c 254885 2013-08-25 19:37:15Z dumbbell $ 25 */ 26 27 #include <drm/drmP.h> 28 #include <uapi_drm/radeon_drm.h> 29 #include "radeon_reg.h" 30 #include "radeon.h" 31 32 #define RADEON_TEST_COPY_BLIT 1 33 #define RADEON_TEST_COPY_DMA 0 34 35 36 /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ 37 static void radeon_do_test_moves(struct radeon_device *rdev, int flag) 38 { 39 struct radeon_bo *vram_obj = NULL; 40 struct radeon_bo **gtt_obj = NULL; 41 struct radeon_fence *fence = NULL; 42 uint64_t gtt_addr, vram_addr; 43 unsigned i, n, size; 44 int r, ring; 45 46 switch (flag) { 47 case RADEON_TEST_COPY_DMA: 48 ring = radeon_copy_dma_ring_index(rdev); 49 break; 50 case RADEON_TEST_COPY_BLIT: 51 ring = radeon_copy_blit_ring_index(rdev); 52 break; 53 default: 54 DRM_ERROR("Unknown copy method\n"); 55 return; 56 } 57 58 size = 1024 * 1024; 59 60 /* Number of tests = 61 * (Total GTT - IB pool - writeback page - ring buffers) / test size 62 */ 63 n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024; 64 for (i = 0; i < RADEON_NUM_RINGS; ++i) 65 n -= rdev->ring[i].ring_size; 66 if (rdev->wb.wb_obj) 67 n -= RADEON_GPU_PAGE_SIZE; 68 if (rdev->ih.ring_obj) 69 n -= rdev->ih.ring_size; 70 n /= size; 71 72 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); 73 if (!gtt_obj) { 74 DRM_ERROR("Failed to allocate %d pointers\n", n); 75 r = 1; 76 goto out_cleanup; 77 } 78 79 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 80 NULL, &vram_obj); 81 if (r) { 82 DRM_ERROR("Failed to create VRAM object\n"); 83 goto out_cleanup; 84 } 85 r = radeon_bo_reserve(vram_obj, false); 86 if (unlikely(r != 0)) 87 goto out_cleanup; 88 r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); 89 if (r) { 90 DRM_ERROR("Failed to pin VRAM object\n"); 91 goto out_cleanup; 92 } 93 for (i = 0; i < n; i++) { 94 void *gtt_map, *vram_map; 95 void **gtt_start, **gtt_end; 96 void **vram_start, **vram_end; 97 98 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, 99 RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i); 100 if (r) { 101 DRM_ERROR("Failed to create GTT object %d\n", i); 102 goto out_cleanup; 103 } 104 105 r = radeon_bo_reserve(gtt_obj[i], false); 106 if (unlikely(r != 0)) 107 goto out_cleanup; 108 r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); 109 if (r) { 110 DRM_ERROR("Failed to pin GTT object %d\n", i); 111 goto out_cleanup; 112 } 113 114 r = radeon_bo_kmap(gtt_obj[i], >t_map); 115 if (r) { 116 DRM_ERROR("Failed to map GTT object %d\n", i); 117 goto out_cleanup; 118 } 119 120 for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size); 121 gtt_start < gtt_end; 122 gtt_start++) 123 *gtt_start = gtt_start; 124 125 radeon_bo_kunmap(gtt_obj[i]); 126 127 if (ring == R600_RING_TYPE_DMA_INDEX) 128 r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); 129 else 130 r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); 131 if (r) { 132 DRM_ERROR("Failed GTT->VRAM copy %d\n", i); 133 goto out_cleanup; 134 } 135 136 r = radeon_fence_wait(fence, false); 137 if (r) { 138 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); 139 goto out_cleanup; 140 } 141 142 radeon_fence_unref(&fence); 143 144 r = radeon_bo_kmap(vram_obj, &vram_map); 145 if (r) { 146 DRM_ERROR("Failed to map VRAM object after copy %d\n", i); 147 goto out_cleanup; 148 } 149 150 for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size), 151 vram_start = vram_map, vram_end = (void *)((uintptr_t)vram_map + size); 152 vram_start < vram_end; 153 gtt_start++, vram_start++) { 154 if (*vram_start != gtt_start) { 155 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " 156 "expected 0x%p (GTT/VRAM offset " 157 "0x%16llx/0x%16llx)\n", 158 i, *vram_start, gtt_start, 159 (unsigned long long) 160 ((uintptr_t)gtt_addr - (uintptr_t)rdev->mc.gtt_start + 161 (uintptr_t)gtt_start - (uintptr_t)gtt_map), 162 (unsigned long long) 163 ((uintptr_t)vram_addr - (uintptr_t)rdev->mc.vram_start + 164 (uintptr_t)gtt_start - (uintptr_t)gtt_map)); 165 radeon_bo_kunmap(vram_obj); 166 goto out_cleanup; 167 } 168 *vram_start = vram_start; 169 } 170 171 radeon_bo_kunmap(vram_obj); 172 173 if (ring == R600_RING_TYPE_DMA_INDEX) 174 r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); 175 else 176 r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); 177 if (r) { 178 DRM_ERROR("Failed VRAM->GTT copy %d\n", i); 179 goto out_cleanup; 180 } 181 182 r = radeon_fence_wait(fence, false); 183 if (r) { 184 DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); 185 goto out_cleanup; 186 } 187 188 radeon_fence_unref(&fence); 189 190 r = radeon_bo_kmap(gtt_obj[i], >t_map); 191 if (r) { 192 DRM_ERROR("Failed to map GTT object after copy %d\n", i); 193 goto out_cleanup; 194 } 195 196 for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size), 197 vram_start = vram_map, vram_end = (void *)((uintptr_t)vram_map + size); 198 gtt_start < gtt_end; 199 gtt_start++, vram_start++) { 200 if (*gtt_start != vram_start) { 201 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " 202 "expected 0x%p (VRAM/GTT offset " 203 "0x%16llx/0x%16llx)\n", 204 i, *gtt_start, vram_start, 205 (unsigned long long) 206 ((uintptr_t)vram_addr - (uintptr_t)rdev->mc.vram_start + 207 (uintptr_t)vram_start - (uintptr_t)vram_map), 208 (unsigned long long) 209 ((uintptr_t)gtt_addr - (uintptr_t)rdev->mc.gtt_start + 210 (uintptr_t)vram_start - (uintptr_t)vram_map)); 211 radeon_bo_kunmap(gtt_obj[i]); 212 goto out_cleanup; 213 } 214 } 215 216 radeon_bo_kunmap(gtt_obj[i]); 217 218 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%jx\n", 219 (uintmax_t)gtt_addr - rdev->mc.gtt_start); 220 } 221 222 out_cleanup: 223 if (vram_obj) { 224 if (radeon_bo_is_reserved(vram_obj)) { 225 radeon_bo_unpin(vram_obj); 226 radeon_bo_unreserve(vram_obj); 227 } 228 radeon_bo_unref(&vram_obj); 229 } 230 if (gtt_obj) { 231 for (i = 0; i < n; i++) { 232 if (gtt_obj[i]) { 233 if (radeon_bo_is_reserved(gtt_obj[i])) { 234 radeon_bo_unpin(gtt_obj[i]); 235 radeon_bo_unreserve(gtt_obj[i]); 236 } 237 radeon_bo_unref(>t_obj[i]); 238 } 239 } 240 kfree(gtt_obj); 241 } 242 if (fence) { 243 radeon_fence_unref(&fence); 244 } 245 if (r) { 246 printk(KERN_WARNING "Error while testing BO move.\n"); 247 } 248 } 249 250 void radeon_test_moves(struct radeon_device *rdev) 251 { 252 if (rdev->asic->copy.dma) 253 radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA); 254 if (rdev->asic->copy.blit) 255 radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT); 256 } 257 258 static int radeon_test_create_and_emit_fence(struct radeon_device *rdev, 259 struct radeon_ring *ring, 260 struct radeon_fence **fence) 261 { 262 int r; 263 264 if (ring->idx == R600_RING_TYPE_UVD_INDEX) { 265 r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL); 266 if (r) { 267 DRM_ERROR("Failed to get dummy create msg\n"); 268 return r; 269 } 270 271 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, fence); 272 if (r) { 273 DRM_ERROR("Failed to get dummy destroy msg\n"); 274 return r; 275 } 276 } else { 277 r = radeon_ring_lock(rdev, ring, 64); 278 if (r) { 279 DRM_ERROR("Failed to lock ring A %d\n", ring->idx); 280 return r; 281 } 282 radeon_fence_emit(rdev, fence, ring->idx); 283 radeon_ring_unlock_commit(rdev, ring); 284 } 285 return 0; 286 } 287 288 void radeon_test_ring_sync(struct radeon_device *rdev, 289 struct radeon_ring *ringA, 290 struct radeon_ring *ringB) 291 { 292 struct radeon_fence *fence1 = NULL, *fence2 = NULL; 293 struct radeon_semaphore *semaphore = NULL; 294 int r; 295 296 r = radeon_semaphore_create(rdev, &semaphore); 297 if (r) { 298 DRM_ERROR("Failed to create semaphore\n"); 299 goto out_cleanup; 300 } 301 302 r = radeon_ring_lock(rdev, ringA, 64); 303 if (r) { 304 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 305 goto out_cleanup; 306 } 307 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 308 radeon_ring_unlock_commit(rdev, ringA); 309 310 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1); 311 if (r) 312 goto out_cleanup; 313 314 r = radeon_ring_lock(rdev, ringA, 64); 315 if (r) { 316 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 317 goto out_cleanup; 318 } 319 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 320 radeon_ring_unlock_commit(rdev, ringA); 321 322 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2); 323 if (r) 324 goto out_cleanup; 325 326 mdelay(1000); 327 328 if (radeon_fence_signaled(fence1)) { 329 DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n"); 330 goto out_cleanup; 331 } 332 333 r = radeon_ring_lock(rdev, ringB, 64); 334 if (r) { 335 DRM_ERROR("Failed to lock ring B %p\n", ringB); 336 goto out_cleanup; 337 } 338 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); 339 radeon_ring_unlock_commit(rdev, ringB); 340 341 r = radeon_fence_wait(fence1, false); 342 if (r) { 343 DRM_ERROR("Failed to wait for sync fence 1\n"); 344 goto out_cleanup; 345 } 346 347 mdelay(1000); 348 349 if (radeon_fence_signaled(fence2)) { 350 DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n"); 351 goto out_cleanup; 352 } 353 354 r = radeon_ring_lock(rdev, ringB, 64); 355 if (r) { 356 DRM_ERROR("Failed to lock ring B %p\n", ringB); 357 goto out_cleanup; 358 } 359 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); 360 radeon_ring_unlock_commit(rdev, ringB); 361 362 r = radeon_fence_wait(fence2, false); 363 if (r) { 364 DRM_ERROR("Failed to wait for sync fence 1\n"); 365 goto out_cleanup; 366 } 367 368 out_cleanup: 369 radeon_semaphore_free(rdev, &semaphore, NULL); 370 371 if (fence1) 372 radeon_fence_unref(&fence1); 373 374 if (fence2) 375 radeon_fence_unref(&fence2); 376 377 if (r) 378 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); 379 } 380 381 static void radeon_test_ring_sync2(struct radeon_device *rdev, 382 struct radeon_ring *ringA, 383 struct radeon_ring *ringB, 384 struct radeon_ring *ringC) 385 { 386 struct radeon_fence *fenceA = NULL, *fenceB = NULL; 387 struct radeon_semaphore *semaphore = NULL; 388 bool sigA, sigB; 389 int i, r; 390 391 r = radeon_semaphore_create(rdev, &semaphore); 392 if (r) { 393 DRM_ERROR("Failed to create semaphore\n"); 394 goto out_cleanup; 395 } 396 397 r = radeon_ring_lock(rdev, ringA, 64); 398 if (r) { 399 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 400 goto out_cleanup; 401 } 402 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 403 radeon_ring_unlock_commit(rdev, ringA); 404 405 r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA); 406 if (r) 407 goto out_cleanup; 408 409 r = radeon_ring_lock(rdev, ringB, 64); 410 if (r) { 411 DRM_ERROR("Failed to lock ring B %d\n", ringB->idx); 412 goto out_cleanup; 413 } 414 radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); 415 radeon_ring_unlock_commit(rdev, ringB); 416 r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB); 417 if (r) 418 goto out_cleanup; 419 420 mdelay(1000); 421 422 if (radeon_fence_signaled(fenceA)) { 423 DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); 424 goto out_cleanup; 425 } 426 if (radeon_fence_signaled(fenceB)) { 427 DRM_ERROR("Fence B signaled without waiting for semaphore.\n"); 428 goto out_cleanup; 429 } 430 431 r = radeon_ring_lock(rdev, ringC, 64); 432 if (r) { 433 DRM_ERROR("Failed to lock ring B %p\n", ringC); 434 goto out_cleanup; 435 } 436 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); 437 radeon_ring_unlock_commit(rdev, ringC); 438 439 for (i = 0; i < 30; ++i) { 440 mdelay(100); 441 sigA = radeon_fence_signaled(fenceA); 442 sigB = radeon_fence_signaled(fenceB); 443 if (sigA || sigB) 444 break; 445 } 446 447 if (!sigA && !sigB) { 448 DRM_ERROR("Neither fence A nor B has been signaled\n"); 449 goto out_cleanup; 450 } else if (sigA && sigB) { 451 DRM_ERROR("Both fence A and B has been signaled\n"); 452 goto out_cleanup; 453 } 454 455 DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B'); 456 457 r = radeon_ring_lock(rdev, ringC, 64); 458 if (r) { 459 DRM_ERROR("Failed to lock ring B %p\n", ringC); 460 goto out_cleanup; 461 } 462 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); 463 radeon_ring_unlock_commit(rdev, ringC); 464 465 mdelay(1000); 466 467 r = radeon_fence_wait(fenceA, false); 468 if (r) { 469 DRM_ERROR("Failed to wait for sync fence A\n"); 470 goto out_cleanup; 471 } 472 r = radeon_fence_wait(fenceB, false); 473 if (r) { 474 DRM_ERROR("Failed to wait for sync fence B\n"); 475 goto out_cleanup; 476 } 477 478 out_cleanup: 479 radeon_semaphore_free(rdev, &semaphore, NULL); 480 481 if (fenceA) 482 radeon_fence_unref(&fenceA); 483 484 if (fenceB) 485 radeon_fence_unref(&fenceB); 486 487 if (r) 488 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); 489 } 490 491 void radeon_test_syncing(struct radeon_device *rdev) 492 { 493 int i, j, k; 494 495 for (i = 1; i < RADEON_NUM_RINGS; ++i) { 496 struct radeon_ring *ringA = &rdev->ring[i]; 497 if (!ringA->ready) 498 continue; 499 500 for (j = 0; j < i; ++j) { 501 struct radeon_ring *ringB = &rdev->ring[j]; 502 if (!ringB->ready) 503 continue; 504 505 DRM_INFO("Testing syncing between rings %d and %d...\n", i, j); 506 radeon_test_ring_sync(rdev, ringA, ringB); 507 508 DRM_INFO("Testing syncing between rings %d and %d...\n", j, i); 509 radeon_test_ring_sync(rdev, ringB, ringA); 510 511 for (k = 0; k < j; ++k) { 512 struct radeon_ring *ringC = &rdev->ring[k]; 513 if (!ringC->ready) 514 continue; 515 516 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k); 517 radeon_test_ring_sync2(rdev, ringA, ringB, ringC); 518 519 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j); 520 radeon_test_ring_sync2(rdev, ringA, ringC, ringB); 521 522 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k); 523 radeon_test_ring_sync2(rdev, ringB, ringA, ringC); 524 525 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i); 526 radeon_test_ring_sync2(rdev, ringB, ringC, ringA); 527 528 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j); 529 radeon_test_ring_sync2(rdev, ringC, ringA, ringB); 530 531 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i); 532 radeon_test_ring_sync2(rdev, ringC, ringB, ringA); 533 } 534 } 535 } 536 } 537