xref: /openbsd/sys/dev/pci/drm/radeon/radeon_test.c (revision f005ef32)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2009 VMware, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Michel Dänzer
24  */
25 
26 #include <drm/radeon_drm.h>
27 #include "radeon_reg.h"
28 #include "radeon.h"
29 
30 #define RADEON_TEST_COPY_BLIT 1
31 #define RADEON_TEST_COPY_DMA  0
32 
33 
34 /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
radeon_do_test_moves(struct radeon_device * rdev,int flag)35 static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
36 {
37 	struct radeon_bo *vram_obj = NULL;
38 	struct radeon_bo **gtt_obj = NULL;
39 	uint64_t gtt_addr, vram_addr;
40 	unsigned n, size;
41 	int i, r, ring;
42 
43 	switch (flag) {
44 	case RADEON_TEST_COPY_DMA:
45 		ring = radeon_copy_dma_ring_index(rdev);
46 		break;
47 	case RADEON_TEST_COPY_BLIT:
48 		ring = radeon_copy_blit_ring_index(rdev);
49 		break;
50 	default:
51 		DRM_ERROR("Unknown copy method\n");
52 		return;
53 	}
54 
55 	size = 1024 * 1024;
56 
57 	/* Number of tests =
58 	 * (Total GTT - IB pool - writeback page - ring buffers) / test size
59 	 */
60 	n = rdev->mc.gtt_size - rdev->gart_pin_size;
61 	n /= size;
62 
63 	gtt_obj = kcalloc(n, sizeof(*gtt_obj), GFP_KERNEL);
64 	if (!gtt_obj) {
65 		DRM_ERROR("Failed to allocate %d pointers\n", n);
66 		r = 1;
67 		goto out_cleanup;
68 	}
69 
70 	r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
71 			     0, NULL, NULL, &vram_obj);
72 	if (r) {
73 		DRM_ERROR("Failed to create VRAM object\n");
74 		goto out_cleanup;
75 	}
76 	r = radeon_bo_reserve(vram_obj, false);
77 	if (unlikely(r != 0))
78 		goto out_unref;
79 	r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
80 	if (r) {
81 		DRM_ERROR("Failed to pin VRAM object\n");
82 		goto out_unres;
83 	}
84 	for (i = 0; i < n; i++) {
85 		void *gtt_map, *vram_map;
86 		void **gtt_start, **gtt_end;
87 		void **vram_start, **vram_end;
88 		struct radeon_fence *fence = NULL;
89 
90 		r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
91 				     RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
92 				     gtt_obj + i);
93 		if (r) {
94 			DRM_ERROR("Failed to create GTT object %d\n", i);
95 			goto out_lclean;
96 		}
97 
98 		r = radeon_bo_reserve(gtt_obj[i], false);
99 		if (unlikely(r != 0))
100 			goto out_lclean_unref;
101 		r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
102 		if (r) {
103 			DRM_ERROR("Failed to pin GTT object %d\n", i);
104 			goto out_lclean_unres;
105 		}
106 
107 		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
108 		if (r) {
109 			DRM_ERROR("Failed to map GTT object %d\n", i);
110 			goto out_lclean_unpin;
111 		}
112 
113 		for (gtt_start = gtt_map, gtt_end = gtt_map + size;
114 		     gtt_start < gtt_end;
115 		     gtt_start++)
116 			*gtt_start = gtt_start;
117 
118 		radeon_bo_kunmap(gtt_obj[i]);
119 
120 		if (ring == R600_RING_TYPE_DMA_INDEX)
121 			fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
122 						size / RADEON_GPU_PAGE_SIZE,
123 						vram_obj->tbo.base.resv);
124 		else
125 			fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
126 						 size / RADEON_GPU_PAGE_SIZE,
127 						 vram_obj->tbo.base.resv);
128 		if (IS_ERR(fence)) {
129 			DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
130 			r = PTR_ERR(fence);
131 			goto out_lclean_unpin;
132 		}
133 
134 		r = radeon_fence_wait(fence, false);
135 		if (r) {
136 			DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
137 			goto out_lclean_unpin;
138 		}
139 
140 		radeon_fence_unref(&fence);
141 
142 		r = radeon_bo_kmap(vram_obj, &vram_map);
143 		if (r) {
144 			DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
145 			goto out_lclean_unpin;
146 		}
147 
148 		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
149 		     vram_start = vram_map, vram_end = vram_map + size;
150 		     vram_start < vram_end;
151 		     gtt_start++, vram_start++) {
152 			if (*vram_start != gtt_start) {
153 				DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
154 					  "expected 0x%p (GTT/VRAM offset "
155 					  "0x%16llx/0x%16llx)\n",
156 					  i, *vram_start, gtt_start,
157 					  (unsigned long long)
158 					  (gtt_addr - rdev->mc.gtt_start +
159 					   (void *)gtt_start - gtt_map),
160 					  (unsigned long long)
161 					  (vram_addr - rdev->mc.vram_start +
162 					   (void *)gtt_start - gtt_map));
163 				radeon_bo_kunmap(vram_obj);
164 				goto out_lclean_unpin;
165 			}
166 			*vram_start = vram_start;
167 		}
168 
169 		radeon_bo_kunmap(vram_obj);
170 
171 		if (ring == R600_RING_TYPE_DMA_INDEX)
172 			fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
173 						size / RADEON_GPU_PAGE_SIZE,
174 						vram_obj->tbo.base.resv);
175 		else
176 			fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
177 						 size / RADEON_GPU_PAGE_SIZE,
178 						 vram_obj->tbo.base.resv);
179 		if (IS_ERR(fence)) {
180 			DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
181 			r = PTR_ERR(fence);
182 			goto out_lclean_unpin;
183 		}
184 
185 		r = radeon_fence_wait(fence, false);
186 		if (r) {
187 			DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
188 			goto out_lclean_unpin;
189 		}
190 
191 		radeon_fence_unref(&fence);
192 
193 		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
194 		if (r) {
195 			DRM_ERROR("Failed to map GTT object after copy %d\n", i);
196 			goto out_lclean_unpin;
197 		}
198 
199 		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
200 		     vram_start = vram_map, vram_end = vram_map + size;
201 		     gtt_start < gtt_end;
202 		     gtt_start++, vram_start++) {
203 			if (*gtt_start != vram_start) {
204 				DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
205 					  "expected 0x%p (VRAM/GTT offset "
206 					  "0x%16llx/0x%16llx)\n",
207 					  i, *gtt_start, vram_start,
208 					  (unsigned long long)
209 					  (vram_addr - rdev->mc.vram_start +
210 					   (void *)vram_start - vram_map),
211 					  (unsigned long long)
212 					  (gtt_addr - rdev->mc.gtt_start +
213 					   (void *)vram_start - vram_map));
214 				radeon_bo_kunmap(gtt_obj[i]);
215 				goto out_lclean_unpin;
216 			}
217 		}
218 
219 		radeon_bo_kunmap(gtt_obj[i]);
220 
221 		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
222 			 gtt_addr - rdev->mc.gtt_start);
223 		continue;
224 
225 out_lclean_unpin:
226 		radeon_bo_unpin(gtt_obj[i]);
227 out_lclean_unres:
228 		radeon_bo_unreserve(gtt_obj[i]);
229 out_lclean_unref:
230 		radeon_bo_unref(&gtt_obj[i]);
231 out_lclean:
232 		for (--i; i >= 0; --i) {
233 			radeon_bo_unpin(gtt_obj[i]);
234 			radeon_bo_unreserve(gtt_obj[i]);
235 			radeon_bo_unref(&gtt_obj[i]);
236 		}
237 		if (fence && !IS_ERR(fence))
238 			radeon_fence_unref(&fence);
239 		break;
240 	}
241 
242 	radeon_bo_unpin(vram_obj);
243 out_unres:
244 	radeon_bo_unreserve(vram_obj);
245 out_unref:
246 	radeon_bo_unref(&vram_obj);
247 out_cleanup:
248 	kfree(gtt_obj);
249 	if (r) {
250 		pr_warn("Error while testing BO move\n");
251 	}
252 }
253 
radeon_test_moves(struct radeon_device * rdev)254 void radeon_test_moves(struct radeon_device *rdev)
255 {
256 	if (rdev->asic->copy.dma)
257 		radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
258 	if (rdev->asic->copy.blit)
259 		radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
260 }
261 
radeon_test_create_and_emit_fence(struct radeon_device * rdev,struct radeon_ring * ring,struct radeon_fence ** fence)262 static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
263 					     struct radeon_ring *ring,
264 					     struct radeon_fence **fence)
265 {
266 	uint32_t handle = ring->idx ^ 0xdeafbeef;
267 	int r;
268 
269 	if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
270 		r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL);
271 		if (r) {
272 			DRM_ERROR("Failed to get dummy create msg\n");
273 			return r;
274 		}
275 
276 		r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence);
277 		if (r) {
278 			DRM_ERROR("Failed to get dummy destroy msg\n");
279 			return r;
280 		}
281 
282 	} else if (ring->idx == TN_RING_TYPE_VCE1_INDEX ||
283 		   ring->idx == TN_RING_TYPE_VCE2_INDEX) {
284 		r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL);
285 		if (r) {
286 			DRM_ERROR("Failed to get dummy create msg\n");
287 			return r;
288 		}
289 
290 		r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence);
291 		if (r) {
292 			DRM_ERROR("Failed to get dummy destroy msg\n");
293 			return r;
294 		}
295 
296 	} else {
297 		r = radeon_ring_lock(rdev, ring, 64);
298 		if (r) {
299 			DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
300 			return r;
301 		}
302 		r = radeon_fence_emit(rdev, fence, ring->idx);
303 		if (r) {
304 			DRM_ERROR("Failed to emit fence\n");
305 			radeon_ring_unlock_undo(rdev, ring);
306 			return r;
307 		}
308 		radeon_ring_unlock_commit(rdev, ring, false);
309 	}
310 	return 0;
311 }
312 
radeon_test_ring_sync(struct radeon_device * rdev,struct radeon_ring * ringA,struct radeon_ring * ringB)313 void radeon_test_ring_sync(struct radeon_device *rdev,
314 			   struct radeon_ring *ringA,
315 			   struct radeon_ring *ringB)
316 {
317 	struct radeon_fence *fence1 = NULL, *fence2 = NULL;
318 	struct radeon_semaphore *semaphore = NULL;
319 	int r;
320 
321 	r = radeon_semaphore_create(rdev, &semaphore);
322 	if (r) {
323 		DRM_ERROR("Failed to create semaphore\n");
324 		goto out_cleanup;
325 	}
326 
327 	r = radeon_ring_lock(rdev, ringA, 64);
328 	if (r) {
329 		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
330 		goto out_cleanup;
331 	}
332 	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
333 	radeon_ring_unlock_commit(rdev, ringA, false);
334 
335 	r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
336 	if (r)
337 		goto out_cleanup;
338 
339 	r = radeon_ring_lock(rdev, ringA, 64);
340 	if (r) {
341 		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
342 		goto out_cleanup;
343 	}
344 	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
345 	radeon_ring_unlock_commit(rdev, ringA, false);
346 
347 	r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
348 	if (r)
349 		goto out_cleanup;
350 
351 	drm_msleep(1000);
352 
353 	if (radeon_fence_signaled(fence1)) {
354 		DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
355 		goto out_cleanup;
356 	}
357 
358 	r = radeon_ring_lock(rdev, ringB, 64);
359 	if (r) {
360 		DRM_ERROR("Failed to lock ring B %p\n", ringB);
361 		goto out_cleanup;
362 	}
363 	radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
364 	radeon_ring_unlock_commit(rdev, ringB, false);
365 
366 	r = radeon_fence_wait(fence1, false);
367 	if (r) {
368 		DRM_ERROR("Failed to wait for sync fence 1\n");
369 		goto out_cleanup;
370 	}
371 
372 	drm_msleep(1000);
373 
374 	if (radeon_fence_signaled(fence2)) {
375 		DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
376 		goto out_cleanup;
377 	}
378 
379 	r = radeon_ring_lock(rdev, ringB, 64);
380 	if (r) {
381 		DRM_ERROR("Failed to lock ring B %p\n", ringB);
382 		goto out_cleanup;
383 	}
384 	radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
385 	radeon_ring_unlock_commit(rdev, ringB, false);
386 
387 	r = radeon_fence_wait(fence2, false);
388 	if (r) {
389 		DRM_ERROR("Failed to wait for sync fence 1\n");
390 		goto out_cleanup;
391 	}
392 
393 out_cleanup:
394 	radeon_semaphore_free(rdev, &semaphore, NULL);
395 
396 	if (fence1)
397 		radeon_fence_unref(&fence1);
398 
399 	if (fence2)
400 		radeon_fence_unref(&fence2);
401 
402 	if (r)
403 		pr_warn("Error while testing ring sync (%d)\n", r);
404 }
405 
radeon_test_ring_sync2(struct radeon_device * rdev,struct radeon_ring * ringA,struct radeon_ring * ringB,struct radeon_ring * ringC)406 static void radeon_test_ring_sync2(struct radeon_device *rdev,
407 			    struct radeon_ring *ringA,
408 			    struct radeon_ring *ringB,
409 			    struct radeon_ring *ringC)
410 {
411 	struct radeon_fence *fenceA = NULL, *fenceB = NULL;
412 	struct radeon_semaphore *semaphore = NULL;
413 	bool sigA, sigB;
414 	int i, r;
415 
416 	r = radeon_semaphore_create(rdev, &semaphore);
417 	if (r) {
418 		DRM_ERROR("Failed to create semaphore\n");
419 		goto out_cleanup;
420 	}
421 
422 	r = radeon_ring_lock(rdev, ringA, 64);
423 	if (r) {
424 		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
425 		goto out_cleanup;
426 	}
427 	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
428 	radeon_ring_unlock_commit(rdev, ringA, false);
429 
430 	r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
431 	if (r)
432 		goto out_cleanup;
433 
434 	r = radeon_ring_lock(rdev, ringB, 64);
435 	if (r) {
436 		DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
437 		goto out_cleanup;
438 	}
439 	radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
440 	radeon_ring_unlock_commit(rdev, ringB, false);
441 	r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
442 	if (r)
443 		goto out_cleanup;
444 
445 	drm_msleep(1000);
446 
447 	if (radeon_fence_signaled(fenceA)) {
448 		DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
449 		goto out_cleanup;
450 	}
451 	if (radeon_fence_signaled(fenceB)) {
452 		DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
453 		goto out_cleanup;
454 	}
455 
456 	r = radeon_ring_lock(rdev, ringC, 64);
457 	if (r) {
458 		DRM_ERROR("Failed to lock ring B %p\n", ringC);
459 		goto out_cleanup;
460 	}
461 	radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
462 	radeon_ring_unlock_commit(rdev, ringC, false);
463 
464 	for (i = 0; i < 30; ++i) {
465 		drm_msleep(100);
466 		sigA = radeon_fence_signaled(fenceA);
467 		sigB = radeon_fence_signaled(fenceB);
468 		if (sigA || sigB)
469 			break;
470 	}
471 
472 	if (!sigA && !sigB) {
473 		DRM_ERROR("Neither fence A nor B has been signaled\n");
474 		goto out_cleanup;
475 	} else if (sigA && sigB) {
476 		DRM_ERROR("Both fence A and B has been signaled\n");
477 		goto out_cleanup;
478 	}
479 
480 	DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
481 
482 	r = radeon_ring_lock(rdev, ringC, 64);
483 	if (r) {
484 		DRM_ERROR("Failed to lock ring B %p\n", ringC);
485 		goto out_cleanup;
486 	}
487 	radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
488 	radeon_ring_unlock_commit(rdev, ringC, false);
489 
490 	drm_msleep(1000);
491 
492 	r = radeon_fence_wait(fenceA, false);
493 	if (r) {
494 		DRM_ERROR("Failed to wait for sync fence A\n");
495 		goto out_cleanup;
496 	}
497 	r = radeon_fence_wait(fenceB, false);
498 	if (r) {
499 		DRM_ERROR("Failed to wait for sync fence B\n");
500 		goto out_cleanup;
501 	}
502 
503 out_cleanup:
504 	radeon_semaphore_free(rdev, &semaphore, NULL);
505 
506 	if (fenceA)
507 		radeon_fence_unref(&fenceA);
508 
509 	if (fenceB)
510 		radeon_fence_unref(&fenceB);
511 
512 	if (r)
513 		pr_warn("Error while testing ring sync (%d)\n", r);
514 }
515 
radeon_test_sync_possible(struct radeon_ring * ringA,struct radeon_ring * ringB)516 static bool radeon_test_sync_possible(struct radeon_ring *ringA,
517 				      struct radeon_ring *ringB)
518 {
519 	if (ringA->idx == TN_RING_TYPE_VCE2_INDEX &&
520 	    ringB->idx == TN_RING_TYPE_VCE1_INDEX)
521 		return false;
522 
523 	return true;
524 }
525 
radeon_test_syncing(struct radeon_device * rdev)526 void radeon_test_syncing(struct radeon_device *rdev)
527 {
528 	int i, j, k;
529 
530 	for (i = 1; i < RADEON_NUM_RINGS; ++i) {
531 		struct radeon_ring *ringA = &rdev->ring[i];
532 		if (!ringA->ready)
533 			continue;
534 
535 		for (j = 0; j < i; ++j) {
536 			struct radeon_ring *ringB = &rdev->ring[j];
537 			if (!ringB->ready)
538 				continue;
539 
540 			if (!radeon_test_sync_possible(ringA, ringB))
541 				continue;
542 
543 			DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
544 			radeon_test_ring_sync(rdev, ringA, ringB);
545 
546 			DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
547 			radeon_test_ring_sync(rdev, ringB, ringA);
548 
549 			for (k = 0; k < j; ++k) {
550 				struct radeon_ring *ringC = &rdev->ring[k];
551 				if (!ringC->ready)
552 					continue;
553 
554 				if (!radeon_test_sync_possible(ringA, ringC))
555 					continue;
556 
557 				if (!radeon_test_sync_possible(ringB, ringC))
558 					continue;
559 
560 				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
561 				radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
562 
563 				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
564 				radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
565 
566 				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
567 				radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
568 
569 				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
570 				radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
571 
572 				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
573 				radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
574 
575 				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
576 				radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
577 			}
578 		}
579 	}
580 }
581