1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24 #include <drm/drmP.h>
25 #include "radeon.h"
26 #include "radeon_asic.h"
27 #include "r600d.h"
28
29 /*
30 * DMA
31 * Starting with R600, the GPU has an asynchronous
32 * DMA engine. The programming model is very similar
33 * to the 3D engine (ring buffer, IBs, etc.), but the
34 * DMA controller has it's own packet format that is
35 * different form the PM4 format used by the 3D engine.
36 * It supports copying data, writing embedded data,
37 * solid fills, and a number of other things. It also
38 * has support for tiling/detiling of buffers.
39 */
40
41 /**
42 * r600_dma_get_rptr - get the current read pointer
43 *
44 * @rdev: radeon_device pointer
45 * @ring: radeon ring pointer
46 *
47 * Get the current rptr from the hardware (r6xx+).
48 */
r600_dma_get_rptr(struct radeon_device * rdev,struct radeon_ring * ring)49 uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
50 struct radeon_ring *ring)
51 {
52 u32 rptr;
53
54 if (rdev->wb.enabled)
55 rptr = rdev->wb.wb[ring->rptr_offs/4];
56 else
57 rptr = RREG32(DMA_RB_RPTR);
58
59 return (rptr & 0x3fffc) >> 2;
60 }
61
62 /**
63 * r600_dma_get_wptr - get the current write pointer
64 *
65 * @rdev: radeon_device pointer
66 * @ring: radeon ring pointer
67 *
68 * Get the current wptr from the hardware (r6xx+).
69 */
r600_dma_get_wptr(struct radeon_device * rdev,struct radeon_ring * ring)70 uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
71 struct radeon_ring *ring)
72 {
73 return (RREG32(DMA_RB_WPTR) & 0x3fffc) >> 2;
74 }
75
76 /**
77 * r600_dma_set_wptr - commit the write pointer
78 *
79 * @rdev: radeon_device pointer
80 * @ring: radeon ring pointer
81 *
82 * Write the wptr back to the hardware (r6xx+).
83 */
r600_dma_set_wptr(struct radeon_device * rdev,struct radeon_ring * ring)84 void r600_dma_set_wptr(struct radeon_device *rdev,
85 struct radeon_ring *ring)
86 {
87 WREG32(DMA_RB_WPTR, (ring->wptr << 2) & 0x3fffc);
88 }
89
90 /**
91 * r600_dma_stop - stop the async dma engine
92 *
93 * @rdev: radeon_device pointer
94 *
95 * Stop the async dma engine (r6xx-evergreen).
96 */
r600_dma_stop(struct radeon_device * rdev)97 void r600_dma_stop(struct radeon_device *rdev)
98 {
99 u32 rb_cntl = RREG32(DMA_RB_CNTL);
100
101 if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
102 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
103
104 rb_cntl &= ~DMA_RB_ENABLE;
105 WREG32(DMA_RB_CNTL, rb_cntl);
106
107 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
108 }
109
110 /**
111 * r600_dma_resume - setup and start the async dma engine
112 *
113 * @rdev: radeon_device pointer
114 *
115 * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
116 * Returns 0 for success, error for failure.
117 */
r600_dma_resume(struct radeon_device * rdev)118 int r600_dma_resume(struct radeon_device *rdev)
119 {
120 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
121 u32 rb_cntl, dma_cntl, ib_cntl;
122 u32 rb_bufsz;
123 int r;
124
125 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
126 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
127
128 /* Set ring buffer size in dwords */
129 rb_bufsz = order_base_2(ring->ring_size / 4);
130 rb_cntl = rb_bufsz << 1;
131 #ifdef __BIG_ENDIAN
132 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
133 #endif
134 WREG32(DMA_RB_CNTL, rb_cntl);
135
136 /* Initialize the ring buffer's read and write pointers */
137 WREG32(DMA_RB_RPTR, 0);
138 WREG32(DMA_RB_WPTR, 0);
139
140 /* set the wb address whether it's enabled or not */
141 WREG32(DMA_RB_RPTR_ADDR_HI,
142 upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
143 WREG32(DMA_RB_RPTR_ADDR_LO,
144 ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
145
146 if (rdev->wb.enabled)
147 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
148
149 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
150
151 /* enable DMA IBs */
152 ib_cntl = DMA_IB_ENABLE;
153 #ifdef __BIG_ENDIAN
154 ib_cntl |= DMA_IB_SWAP_ENABLE;
155 #endif
156 WREG32(DMA_IB_CNTL, ib_cntl);
157
158 dma_cntl = RREG32(DMA_CNTL);
159 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
160 WREG32(DMA_CNTL, dma_cntl);
161
162 if (rdev->family >= CHIP_RV770)
163 WREG32(DMA_MODE, 1);
164
165 ring->wptr = 0;
166 WREG32(DMA_RB_WPTR, ring->wptr << 2);
167
168 WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
169
170 ring->ready = true;
171
172 r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
173 if (r) {
174 ring->ready = false;
175 return r;
176 }
177
178 if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
179 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
180
181 return 0;
182 }
183
184 /**
185 * r600_dma_fini - tear down the async dma engine
186 *
187 * @rdev: radeon_device pointer
188 *
189 * Stop the async dma engine and free the ring (r6xx-evergreen).
190 */
r600_dma_fini(struct radeon_device * rdev)191 void r600_dma_fini(struct radeon_device *rdev)
192 {
193 r600_dma_stop(rdev);
194 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
195 }
196
197 /**
198 * r600_dma_is_lockup - Check if the DMA engine is locked up
199 *
200 * @rdev: radeon_device pointer
201 * @ring: radeon_ring structure holding ring information
202 *
203 * Check if the async DMA engine is locked up.
204 * Returns true if the engine appears to be locked up, false if not.
205 */
r600_dma_is_lockup(struct radeon_device * rdev,struct radeon_ring * ring)206 bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
207 {
208 u32 reset_mask = r600_gpu_check_soft_reset(rdev);
209
210 if (!(reset_mask & RADEON_RESET_DMA)) {
211 radeon_ring_lockup_update(rdev, ring);
212 return false;
213 }
214 return radeon_ring_test_lockup(rdev, ring);
215 }
216
217
218 /**
219 * r600_dma_ring_test - simple async dma engine test
220 *
221 * @rdev: radeon_device pointer
222 * @ring: radeon_ring structure holding ring information
223 *
224 * Test the DMA engine by writing using it to write an
225 * value to memory. (r6xx-SI).
226 * Returns 0 for success, error for failure.
227 */
r600_dma_ring_test(struct radeon_device * rdev,struct radeon_ring * ring)228 int r600_dma_ring_test(struct radeon_device *rdev,
229 struct radeon_ring *ring)
230 {
231 unsigned i;
232 int r;
233 unsigned index;
234 u32 tmp;
235 u64 gpu_addr;
236
237 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
238 index = R600_WB_DMA_RING_TEST_OFFSET;
239 else
240 index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
241
242 gpu_addr = rdev->wb.gpu_addr + index;
243
244 tmp = 0xCAFEDEAD;
245 rdev->wb.wb[index/4] = cpu_to_le32(tmp);
246
247 r = radeon_ring_lock(rdev, ring, 4);
248 if (r) {
249 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
250 return r;
251 }
252 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
253 radeon_ring_write(ring, lower_32_bits(gpu_addr));
254 radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
255 radeon_ring_write(ring, 0xDEADBEEF);
256 radeon_ring_unlock_commit(rdev, ring, false);
257
258 for (i = 0; i < rdev->usec_timeout; i++) {
259 tmp = le32_to_cpu(rdev->wb.wb[index/4]);
260 if (tmp == 0xDEADBEEF)
261 break;
262 DRM_UDELAY(1);
263 }
264
265 if (i < rdev->usec_timeout) {
266 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
267 } else {
268 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
269 ring->idx, tmp);
270 r = -EINVAL;
271 }
272 return r;
273 }
274
275 /**
276 * r600_dma_fence_ring_emit - emit a fence on the DMA ring
277 *
278 * @rdev: radeon_device pointer
279 * @fence: radeon fence object
280 *
281 * Add a DMA fence packet to the ring to write
282 * the fence seq number and DMA trap packet to generate
283 * an interrupt if needed (r6xx-r7xx).
284 */
r600_dma_fence_ring_emit(struct radeon_device * rdev,struct radeon_fence * fence)285 void r600_dma_fence_ring_emit(struct radeon_device *rdev,
286 struct radeon_fence *fence)
287 {
288 struct radeon_ring *ring = &rdev->ring[fence->ring];
289 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
290
291 /* write the fence */
292 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
293 radeon_ring_write(ring, addr & 0xfffffffc);
294 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
295 radeon_ring_write(ring, lower_32_bits(fence->seq));
296 /* generate an interrupt */
297 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
298 }
299
300 /**
301 * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
302 *
303 * @rdev: radeon_device pointer
304 * @ring: radeon_ring structure holding ring information
305 * @semaphore: radeon semaphore object
306 * @emit_wait: wait or signal semaphore
307 *
308 * Add a DMA semaphore packet to the ring wait on or signal
309 * other rings (r6xx-SI).
310 */
r600_dma_semaphore_ring_emit(struct radeon_device * rdev,struct radeon_ring * ring,struct radeon_semaphore * semaphore,bool emit_wait)311 bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
312 struct radeon_ring *ring,
313 struct radeon_semaphore *semaphore,
314 bool emit_wait)
315 {
316 u64 addr = semaphore->gpu_addr;
317 u32 s = emit_wait ? 0 : 1;
318
319 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
320 radeon_ring_write(ring, addr & 0xfffffffc);
321 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
322
323 return true;
324 }
325
326 /**
327 * r600_dma_ib_test - test an IB on the DMA engine
328 *
329 * @rdev: radeon_device pointer
330 * @ring: radeon_ring structure holding ring information
331 *
332 * Test a simple IB in the DMA ring (r6xx-SI).
333 * Returns 0 on success, error on failure.
334 */
r600_dma_ib_test(struct radeon_device * rdev,struct radeon_ring * ring)335 int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
336 {
337 struct radeon_ib ib;
338 unsigned i;
339 unsigned index;
340 int r;
341 u32 tmp = 0;
342 u64 gpu_addr;
343
344 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
345 index = R600_WB_DMA_RING_TEST_OFFSET;
346 else
347 index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
348
349 gpu_addr = rdev->wb.gpu_addr + index;
350
351 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
352 if (r) {
353 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
354 return r;
355 }
356
357 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
358 ib.ptr[1] = lower_32_bits(gpu_addr);
359 ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
360 ib.ptr[3] = 0xDEADBEEF;
361 ib.length_dw = 4;
362
363 r = radeon_ib_schedule(rdev, &ib, NULL, false);
364 if (r) {
365 radeon_ib_free(rdev, &ib);
366 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
367 return r;
368 }
369 r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
370 RADEON_USEC_IB_TEST_TIMEOUT));
371 if (r < 0) {
372 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
373 return r;
374 } else if (r == 0) {
375 DRM_ERROR("radeon: fence wait timed out.\n");
376 return -ETIMEDOUT;
377 }
378 r = 0;
379 for (i = 0; i < rdev->usec_timeout; i++) {
380 tmp = le32_to_cpu(rdev->wb.wb[index/4]);
381 if (tmp == 0xDEADBEEF)
382 break;
383 DRM_UDELAY(1);
384 }
385 if (i < rdev->usec_timeout) {
386 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
387 } else {
388 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
389 r = -EINVAL;
390 }
391 radeon_ib_free(rdev, &ib);
392 return r;
393 }
394
395 /**
396 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
397 *
398 * @rdev: radeon_device pointer
399 * @ib: IB object to schedule
400 *
401 * Schedule an IB in the DMA ring (r6xx-r7xx).
402 */
r600_dma_ring_ib_execute(struct radeon_device * rdev,struct radeon_ib * ib)403 void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
404 {
405 struct radeon_ring *ring = &rdev->ring[ib->ring];
406
407 if (rdev->wb.enabled) {
408 u32 next_rptr = ring->wptr + 4;
409 while ((next_rptr & 7) != 5)
410 next_rptr++;
411 next_rptr += 3;
412 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
413 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
414 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
415 radeon_ring_write(ring, next_rptr);
416 }
417
418 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
419 * Pad as necessary with NOPs.
420 */
421 while ((ring->wptr & 7) != 5)
422 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
423 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
424 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
425 radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
426
427 }
428
429 /**
430 * r600_copy_dma - copy pages using the DMA engine
431 *
432 * @rdev: radeon_device pointer
433 * @src_offset: src GPU address
434 * @dst_offset: dst GPU address
435 * @num_gpu_pages: number of GPU pages to xfer
436 * @resv: reservation object to sync to
437 *
438 * Copy GPU paging using the DMA engine (r6xx).
439 * Used by the radeon ttm implementation to move pages if
440 * registered as the asic copy callback.
441 */
r600_copy_dma(struct radeon_device * rdev,uint64_t src_offset,uint64_t dst_offset,unsigned num_gpu_pages,struct reservation_object * resv)442 struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
443 uint64_t src_offset, uint64_t dst_offset,
444 unsigned num_gpu_pages,
445 struct reservation_object *resv)
446 {
447 struct radeon_fence *fence;
448 struct radeon_sync sync;
449 int ring_index = rdev->asic->copy.dma_ring_index;
450 struct radeon_ring *ring = &rdev->ring[ring_index];
451 u32 size_in_dw, cur_size_in_dw;
452 int i, num_loops;
453 int r = 0;
454
455 radeon_sync_create(&sync);
456
457 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
458 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
459 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
460 if (r) {
461 DRM_ERROR("radeon: moving bo (%d).\n", r);
462 radeon_sync_free(rdev, &sync, NULL);
463 return ERR_PTR(r);
464 }
465
466 radeon_sync_resv(rdev, &sync, resv, false);
467 radeon_sync_rings(rdev, &sync, ring->idx);
468
469 for (i = 0; i < num_loops; i++) {
470 cur_size_in_dw = size_in_dw;
471 if (cur_size_in_dw > 0xFFFE)
472 cur_size_in_dw = 0xFFFE;
473 size_in_dw -= cur_size_in_dw;
474 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
475 radeon_ring_write(ring, dst_offset & 0xfffffffc);
476 radeon_ring_write(ring, src_offset & 0xfffffffc);
477 radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
478 (upper_32_bits(src_offset) & 0xff)));
479 src_offset += cur_size_in_dw * 4;
480 dst_offset += cur_size_in_dw * 4;
481 }
482
483 r = radeon_fence_emit(rdev, &fence, ring->idx);
484 if (r) {
485 radeon_ring_unlock_undo(rdev, ring);
486 radeon_sync_free(rdev, &sync, NULL);
487 return ERR_PTR(r);
488 }
489
490 radeon_ring_unlock_commit(rdev, ring, false);
491 radeon_sync_free(rdev, &sync, fence);
492
493 return fence;
494 }
495