1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24 #include <linux/firmware.h>
25 #include <drm/drmP.h>
26 #include "radeon.h"
27 #include "radeon_ucode.h"
28 #include "radeon_asic.h"
29 #include "radeon_trace.h"
30 #include "cikd.h"
31
32 /* sdma */
33 #define CIK_SDMA_UCODE_SIZE 1050
34 #define CIK_SDMA_UCODE_VERSION 64
35
36 /*
37 * sDMA - System DMA
38 * Starting with CIK, the GPU has new asynchronous
39 * DMA engines. These engines are used for compute
40 * and gfx. There are two DMA engines (SDMA0, SDMA1)
41 * and each one supports 1 ring buffer used for gfx
42 * and 2 queues used for compute.
43 *
44 * The programming model is very similar to the CP
45 * (ring buffer, IBs, etc.), but sDMA has it's own
46 * packet format that is different from the PM4 format
47 * used by the CP. sDMA supports copying data, writing
48 * embedded data, solid fills, and a number of other
49 * things. It also has support for tiling/detiling of
50 * buffers.
51 */
52
53 /**
54 * cik_sdma_get_rptr - get the current read pointer
55 *
56 * @rdev: radeon_device pointer
57 * @ring: radeon ring pointer
58 *
59 * Get the current rptr from the hardware (CIK+).
60 */
cik_sdma_get_rptr(struct radeon_device * rdev,struct radeon_ring * ring)61 uint32_t cik_sdma_get_rptr(struct radeon_device *rdev,
62 struct radeon_ring *ring)
63 {
64 u32 rptr, reg;
65
66 if (rdev->wb.enabled) {
67 rptr = rdev->wb.wb[ring->rptr_offs/4];
68 } else {
69 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
70 reg = SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET;
71 else
72 reg = SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET;
73
74 rptr = RREG32(reg);
75 }
76
77 return (rptr & 0x3fffc) >> 2;
78 }
79
80 /**
81 * cik_sdma_get_wptr - get the current write pointer
82 *
83 * @rdev: radeon_device pointer
84 * @ring: radeon ring pointer
85 *
86 * Get the current wptr from the hardware (CIK+).
87 */
cik_sdma_get_wptr(struct radeon_device * rdev,struct radeon_ring * ring)88 uint32_t cik_sdma_get_wptr(struct radeon_device *rdev,
89 struct radeon_ring *ring)
90 {
91 u32 reg;
92
93 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
94 reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET;
95 else
96 reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
97
98 return (RREG32(reg) & 0x3fffc) >> 2;
99 }
100
101 /**
102 * cik_sdma_set_wptr - commit the write pointer
103 *
104 * @rdev: radeon_device pointer
105 * @ring: radeon ring pointer
106 *
107 * Write the wptr back to the hardware (CIK+).
108 */
cik_sdma_set_wptr(struct radeon_device * rdev,struct radeon_ring * ring)109 void cik_sdma_set_wptr(struct radeon_device *rdev,
110 struct radeon_ring *ring)
111 {
112 u32 reg;
113
114 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
115 reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET;
116 else
117 reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
118
119 WREG32(reg, (ring->wptr << 2) & 0x3fffc);
120 (void)RREG32(reg);
121 }
122
123 /**
124 * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
125 *
126 * @rdev: radeon_device pointer
127 * @ib: IB object to schedule
128 *
129 * Schedule an IB in the DMA ring (CIK).
130 */
cik_sdma_ring_ib_execute(struct radeon_device * rdev,struct radeon_ib * ib)131 void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
132 struct radeon_ib *ib)
133 {
134 struct radeon_ring *ring = &rdev->ring[ib->ring];
135 u32 extra_bits = (ib->vm ? ib->vm->ids[ib->ring].id : 0) & 0xf;
136
137 if (rdev->wb.enabled) {
138 u32 next_rptr = ring->wptr + 5;
139 while ((next_rptr & 7) != 4)
140 next_rptr++;
141 next_rptr += 4;
142 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
143 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
144 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
145 radeon_ring_write(ring, 1); /* number of DWs to follow */
146 radeon_ring_write(ring, next_rptr);
147 }
148
149 /* IB packet must end on a 8 DW boundary */
150 while ((ring->wptr & 7) != 4)
151 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
152 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
153 radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
154 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr));
155 radeon_ring_write(ring, ib->length_dw);
156
157 }
158
159 /**
160 * cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
161 *
162 * @rdev: radeon_device pointer
163 * @ridx: radeon ring index
164 *
165 * Emit an hdp flush packet on the requested DMA ring.
166 */
cik_sdma_hdp_flush_ring_emit(struct radeon_device * rdev,int ridx)167 static void cik_sdma_hdp_flush_ring_emit(struct radeon_device *rdev,
168 int ridx)
169 {
170 struct radeon_ring *ring = &rdev->ring[ridx];
171 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
172 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
173 u32 ref_and_mask;
174
175 if (ridx == R600_RING_TYPE_DMA_INDEX)
176 ref_and_mask = SDMA0;
177 else
178 ref_and_mask = SDMA1;
179
180 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
181 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
182 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
183 radeon_ring_write(ring, ref_and_mask); /* reference */
184 radeon_ring_write(ring, ref_and_mask); /* mask */
185 radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
186 }
187
188 /**
189 * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
190 *
191 * @rdev: radeon_device pointer
192 * @fence: radeon fence object
193 *
194 * Add a DMA fence packet to the ring to write
195 * the fence seq number and DMA trap packet to generate
196 * an interrupt if needed (CIK).
197 */
cik_sdma_fence_ring_emit(struct radeon_device * rdev,struct radeon_fence * fence)198 void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
199 struct radeon_fence *fence)
200 {
201 struct radeon_ring *ring = &rdev->ring[fence->ring];
202 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
203
204 /* write the fence */
205 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
206 radeon_ring_write(ring, lower_32_bits(addr));
207 radeon_ring_write(ring, upper_32_bits(addr));
208 radeon_ring_write(ring, fence->seq);
209 /* generate an interrupt */
210 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
211 /* flush HDP */
212 cik_sdma_hdp_flush_ring_emit(rdev, fence->ring);
213 }
214
215 /**
216 * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
217 *
218 * @rdev: radeon_device pointer
219 * @ring: radeon_ring structure holding ring information
220 * @semaphore: radeon semaphore object
221 * @emit_wait: wait or signal semaphore
222 *
223 * Add a DMA semaphore packet to the ring wait on or signal
224 * other rings (CIK).
225 */
cik_sdma_semaphore_ring_emit(struct radeon_device * rdev,struct radeon_ring * ring,struct radeon_semaphore * semaphore,bool emit_wait)226 bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
227 struct radeon_ring *ring,
228 struct radeon_semaphore *semaphore,
229 bool emit_wait)
230 {
231 u64 addr = semaphore->gpu_addr;
232 u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
233
234 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
235 radeon_ring_write(ring, addr & 0xfffffff8);
236 radeon_ring_write(ring, upper_32_bits(addr));
237
238 return true;
239 }
240
241 /**
242 * cik_sdma_gfx_stop - stop the gfx async dma engines
243 *
244 * @rdev: radeon_device pointer
245 *
246 * Stop the gfx async dma ring buffers (CIK).
247 */
cik_sdma_gfx_stop(struct radeon_device * rdev)248 static void cik_sdma_gfx_stop(struct radeon_device *rdev)
249 {
250 u32 rb_cntl, reg_offset;
251 int i;
252
253 if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
254 (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
255 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
256
257 for (i = 0; i < 2; i++) {
258 if (i == 0)
259 reg_offset = SDMA0_REGISTER_OFFSET;
260 else
261 reg_offset = SDMA1_REGISTER_OFFSET;
262 rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
263 rb_cntl &= ~SDMA_RB_ENABLE;
264 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
265 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
266 }
267 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
268 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
269
270 /* FIXME use something else than big hammer but after few days can not
271 * seem to find good combination so reset SDMA blocks as it seems we
272 * do not shut them down properly. This fix hibernation and does not
273 * affect suspend to ram.
274 */
275 WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
276 (void)RREG32(SRBM_SOFT_RESET);
277 udelay(50);
278 WREG32(SRBM_SOFT_RESET, 0);
279 (void)RREG32(SRBM_SOFT_RESET);
280 }
281
282 /**
283 * cik_sdma_rlc_stop - stop the compute async dma engines
284 *
285 * @rdev: radeon_device pointer
286 *
287 * Stop the compute async dma queues (CIK).
288 */
cik_sdma_rlc_stop(struct radeon_device * rdev)289 static void cik_sdma_rlc_stop(struct radeon_device *rdev)
290 {
291 /* XXX todo */
292 }
293
294 /**
295 * cik_sdma_ctx_switch_enable - enable/disable sdma engine preemption
296 *
297 * @rdev: radeon_device pointer
298 * @enable: enable/disable preemption.
299 *
300 * Halt or unhalt the async dma engines (CIK).
301 */
cik_sdma_ctx_switch_enable(struct radeon_device * rdev,bool enable)302 static void cik_sdma_ctx_switch_enable(struct radeon_device *rdev, bool enable)
303 {
304 uint32_t reg_offset, value;
305 int i;
306
307 for (i = 0; i < 2; i++) {
308 if (i == 0)
309 reg_offset = SDMA0_REGISTER_OFFSET;
310 else
311 reg_offset = SDMA1_REGISTER_OFFSET;
312 value = RREG32(SDMA0_CNTL + reg_offset);
313 if (enable)
314 value |= AUTO_CTXSW_ENABLE;
315 else
316 value &= ~AUTO_CTXSW_ENABLE;
317 WREG32(SDMA0_CNTL + reg_offset, value);
318 }
319 }
320
321 /**
322 * cik_sdma_enable - stop the async dma engines
323 *
324 * @rdev: radeon_device pointer
325 * @enable: enable/disable the DMA MEs.
326 *
327 * Halt or unhalt the async dma engines (CIK).
328 */
cik_sdma_enable(struct radeon_device * rdev,bool enable)329 void cik_sdma_enable(struct radeon_device *rdev, bool enable)
330 {
331 u32 me_cntl, reg_offset;
332 int i;
333
334 if (enable == false) {
335 cik_sdma_gfx_stop(rdev);
336 cik_sdma_rlc_stop(rdev);
337 }
338
339 for (i = 0; i < 2; i++) {
340 if (i == 0)
341 reg_offset = SDMA0_REGISTER_OFFSET;
342 else
343 reg_offset = SDMA1_REGISTER_OFFSET;
344 me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
345 if (enable)
346 me_cntl &= ~SDMA_HALT;
347 else
348 me_cntl |= SDMA_HALT;
349 WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
350 }
351
352 cik_sdma_ctx_switch_enable(rdev, enable);
353 }
354
355 /**
356 * cik_sdma_gfx_resume - setup and start the async dma engines
357 *
358 * @rdev: radeon_device pointer
359 *
360 * Set up the gfx DMA ring buffers and enable them (CIK).
361 * Returns 0 for success, error for failure.
362 */
cik_sdma_gfx_resume(struct radeon_device * rdev)363 static int cik_sdma_gfx_resume(struct radeon_device *rdev)
364 {
365 struct radeon_ring *ring;
366 u32 rb_cntl, ib_cntl;
367 u32 rb_bufsz;
368 u32 reg_offset, wb_offset;
369 int i, r;
370
371 for (i = 0; i < 2; i++) {
372 if (i == 0) {
373 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
374 reg_offset = SDMA0_REGISTER_OFFSET;
375 wb_offset = R600_WB_DMA_RPTR_OFFSET;
376 } else {
377 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
378 reg_offset = SDMA1_REGISTER_OFFSET;
379 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
380 }
381
382 WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
383 WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
384
385 /* Set ring buffer size in dwords */
386 rb_bufsz = order_base_2(ring->ring_size / 4);
387 rb_cntl = rb_bufsz << 1;
388 #ifdef __BIG_ENDIAN
389 rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
390 #endif
391 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
392
393 /* Initialize the ring buffer's read and write pointers */
394 WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
395 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
396
397 /* set the wb address whether it's enabled or not */
398 WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
399 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
400 WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
401 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
402
403 if (rdev->wb.enabled)
404 rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
405
406 WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
407 WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
408
409 ring->wptr = 0;
410 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
411
412 /* enable DMA RB */
413 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
414
415 ib_cntl = SDMA_IB_ENABLE;
416 #ifdef __BIG_ENDIAN
417 ib_cntl |= SDMA_IB_SWAP_ENABLE;
418 #endif
419 /* enable DMA IBs */
420 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
421
422 ring->ready = true;
423
424 r = radeon_ring_test(rdev, ring->idx, ring);
425 if (r) {
426 ring->ready = false;
427 return r;
428 }
429 }
430
431 if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
432 (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
433 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
434
435 return 0;
436 }
437
438 /**
439 * cik_sdma_rlc_resume - setup and start the async dma engines
440 *
441 * @rdev: radeon_device pointer
442 *
443 * Set up the compute DMA queues and enable them (CIK).
444 * Returns 0 for success, error for failure.
445 */
cik_sdma_rlc_resume(struct radeon_device * rdev)446 static int cik_sdma_rlc_resume(struct radeon_device *rdev)
447 {
448 /* XXX todo */
449 return 0;
450 }
451
452 /**
453 * cik_sdma_load_microcode - load the sDMA ME ucode
454 *
455 * @rdev: radeon_device pointer
456 *
457 * Loads the sDMA0/1 ucode.
458 * Returns 0 for success, -EINVAL if the ucode is not available.
459 */
cik_sdma_load_microcode(struct radeon_device * rdev)460 static int cik_sdma_load_microcode(struct radeon_device *rdev)
461 {
462 int i;
463
464 if (!rdev->sdma_fw)
465 return -EINVAL;
466
467 /* halt the MEs */
468 cik_sdma_enable(rdev, false);
469
470 if (rdev->new_fw) {
471 const struct sdma_firmware_header_v1_0 *hdr =
472 (const struct sdma_firmware_header_v1_0 *)rdev->sdma_fw->data;
473 const __le32 *fw_data;
474 u32 fw_size;
475
476 radeon_ucode_print_sdma_hdr(&hdr->header);
477
478 /* sdma0 */
479 fw_data = (const __le32 *)
480 (rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
481 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
482 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
483 for (i = 0; i < fw_size; i++)
484 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, le32_to_cpup(fw_data++));
485 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
486
487 /* sdma1 */
488 fw_data = (const __le32 *)
489 (rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
490 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
491 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
492 for (i = 0; i < fw_size; i++)
493 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, le32_to_cpup(fw_data++));
494 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
495 } else {
496 const __be32 *fw_data;
497
498 /* sdma0 */
499 fw_data = (const __be32 *)rdev->sdma_fw->data;
500 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
501 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
502 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
503 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
504
505 /* sdma1 */
506 fw_data = (const __be32 *)rdev->sdma_fw->data;
507 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
508 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
509 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
510 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
511 }
512
513 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
514 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
515 return 0;
516 }
517
518 /**
519 * cik_sdma_resume - setup and start the async dma engines
520 *
521 * @rdev: radeon_device pointer
522 *
523 * Set up the DMA engines and enable them (CIK).
524 * Returns 0 for success, error for failure.
525 */
cik_sdma_resume(struct radeon_device * rdev)526 int cik_sdma_resume(struct radeon_device *rdev)
527 {
528 int r;
529
530 r = cik_sdma_load_microcode(rdev);
531 if (r)
532 return r;
533
534 /* unhalt the MEs */
535 cik_sdma_enable(rdev, true);
536
537 /* start the gfx rings and rlc compute queues */
538 r = cik_sdma_gfx_resume(rdev);
539 if (r)
540 return r;
541 r = cik_sdma_rlc_resume(rdev);
542 if (r)
543 return r;
544
545 return 0;
546 }
547
548 /**
549 * cik_sdma_fini - tear down the async dma engines
550 *
551 * @rdev: radeon_device pointer
552 *
553 * Stop the async dma engines and free the rings (CIK).
554 */
cik_sdma_fini(struct radeon_device * rdev)555 void cik_sdma_fini(struct radeon_device *rdev)
556 {
557 /* halt the MEs */
558 cik_sdma_enable(rdev, false);
559 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
560 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
561 /* XXX - compute dma queue tear down */
562 }
563
564 /**
565 * cik_copy_dma - copy pages using the DMA engine
566 *
567 * @rdev: radeon_device pointer
568 * @src_offset: src GPU address
569 * @dst_offset: dst GPU address
570 * @num_gpu_pages: number of GPU pages to xfer
571 * @resv: reservation object to sync to
572 *
573 * Copy GPU paging using the DMA engine (CIK).
574 * Used by the radeon ttm implementation to move pages if
575 * registered as the asic copy callback.
576 */
cik_copy_dma(struct radeon_device * rdev,uint64_t src_offset,uint64_t dst_offset,unsigned num_gpu_pages,struct reservation_object * resv)577 struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
578 uint64_t src_offset, uint64_t dst_offset,
579 unsigned num_gpu_pages,
580 struct reservation_object *resv)
581 {
582 struct radeon_fence *fence;
583 struct radeon_sync sync;
584 int ring_index = rdev->asic->copy.dma_ring_index;
585 struct radeon_ring *ring = &rdev->ring[ring_index];
586 u32 size_in_bytes, cur_size_in_bytes;
587 int i, num_loops;
588 int r = 0;
589
590 radeon_sync_create(&sync);
591
592 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
593 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
594 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
595 if (r) {
596 DRM_ERROR("radeon: moving bo (%d).\n", r);
597 radeon_sync_free(rdev, &sync, NULL);
598 return ERR_PTR(r);
599 }
600
601 radeon_sync_resv(rdev, &sync, resv, false);
602 radeon_sync_rings(rdev, &sync, ring->idx);
603
604 for (i = 0; i < num_loops; i++) {
605 cur_size_in_bytes = size_in_bytes;
606 if (cur_size_in_bytes > 0x1fffff)
607 cur_size_in_bytes = 0x1fffff;
608 size_in_bytes -= cur_size_in_bytes;
609 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
610 radeon_ring_write(ring, cur_size_in_bytes);
611 radeon_ring_write(ring, 0); /* src/dst endian swap */
612 radeon_ring_write(ring, lower_32_bits(src_offset));
613 radeon_ring_write(ring, upper_32_bits(src_offset));
614 radeon_ring_write(ring, lower_32_bits(dst_offset));
615 radeon_ring_write(ring, upper_32_bits(dst_offset));
616 src_offset += cur_size_in_bytes;
617 dst_offset += cur_size_in_bytes;
618 }
619
620 r = radeon_fence_emit(rdev, &fence, ring->idx);
621 if (r) {
622 radeon_ring_unlock_undo(rdev, ring);
623 radeon_sync_free(rdev, &sync, NULL);
624 return ERR_PTR(r);
625 }
626
627 radeon_ring_unlock_commit(rdev, ring, false);
628 radeon_sync_free(rdev, &sync, fence);
629
630 return fence;
631 }
632
633 /**
634 * cik_sdma_ring_test - simple async dma engine test
635 *
636 * @rdev: radeon_device pointer
637 * @ring: radeon_ring structure holding ring information
638 *
639 * Test the DMA engine by writing using it to write an
640 * value to memory. (CIK).
641 * Returns 0 for success, error for failure.
642 */
cik_sdma_ring_test(struct radeon_device * rdev,struct radeon_ring * ring)643 int cik_sdma_ring_test(struct radeon_device *rdev,
644 struct radeon_ring *ring)
645 {
646 unsigned i;
647 int r;
648 unsigned index;
649 u32 tmp;
650 u64 gpu_addr;
651
652 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
653 index = R600_WB_DMA_RING_TEST_OFFSET;
654 else
655 index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
656
657 gpu_addr = rdev->wb.gpu_addr + index;
658
659 tmp = 0xCAFEDEAD;
660 rdev->wb.wb[index/4] = cpu_to_le32(tmp);
661
662 r = radeon_ring_lock(rdev, ring, 5);
663 if (r) {
664 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
665 return r;
666 }
667 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
668 radeon_ring_write(ring, lower_32_bits(gpu_addr));
669 radeon_ring_write(ring, upper_32_bits(gpu_addr));
670 radeon_ring_write(ring, 1); /* number of DWs to follow */
671 radeon_ring_write(ring, 0xDEADBEEF);
672 radeon_ring_unlock_commit(rdev, ring, false);
673
674 for (i = 0; i < rdev->usec_timeout; i++) {
675 tmp = le32_to_cpu(rdev->wb.wb[index/4]);
676 if (tmp == 0xDEADBEEF)
677 break;
678 DRM_UDELAY(1);
679 }
680
681 if (i < rdev->usec_timeout) {
682 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
683 } else {
684 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
685 ring->idx, tmp);
686 r = -EINVAL;
687 }
688 return r;
689 }
690
691 /**
692 * cik_sdma_ib_test - test an IB on the DMA engine
693 *
694 * @rdev: radeon_device pointer
695 * @ring: radeon_ring structure holding ring information
696 *
697 * Test a simple IB in the DMA ring (CIK).
698 * Returns 0 on success, error on failure.
699 */
cik_sdma_ib_test(struct radeon_device * rdev,struct radeon_ring * ring)700 int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
701 {
702 struct radeon_ib ib;
703 unsigned i;
704 unsigned index;
705 int r;
706 u32 tmp = 0;
707 u64 gpu_addr;
708
709 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
710 index = R600_WB_DMA_RING_TEST_OFFSET;
711 else
712 index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
713
714 gpu_addr = rdev->wb.gpu_addr + index;
715
716 tmp = 0xCAFEDEAD;
717 rdev->wb.wb[index/4] = cpu_to_le32(tmp);
718
719 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
720 if (r) {
721 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
722 return r;
723 }
724
725 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
726 ib.ptr[1] = lower_32_bits(gpu_addr);
727 ib.ptr[2] = upper_32_bits(gpu_addr);
728 ib.ptr[3] = 1;
729 ib.ptr[4] = 0xDEADBEEF;
730 ib.length_dw = 5;
731
732 r = radeon_ib_schedule(rdev, &ib, NULL, false);
733 if (r) {
734 radeon_ib_free(rdev, &ib);
735 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
736 return r;
737 }
738 r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
739 RADEON_USEC_IB_TEST_TIMEOUT));
740 if (r < 0) {
741 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
742 return r;
743 } else if (r == 0) {
744 DRM_ERROR("radeon: fence wait timed out.\n");
745 return -ETIMEDOUT;
746 }
747 r = 0;
748 for (i = 0; i < rdev->usec_timeout; i++) {
749 tmp = le32_to_cpu(rdev->wb.wb[index/4]);
750 if (tmp == 0xDEADBEEF)
751 break;
752 DRM_UDELAY(1);
753 }
754 if (i < rdev->usec_timeout) {
755 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
756 } else {
757 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
758 r = -EINVAL;
759 }
760 radeon_ib_free(rdev, &ib);
761 return r;
762 }
763
764 /**
765 * cik_sdma_is_lockup - Check if the DMA engine is locked up
766 *
767 * @rdev: radeon_device pointer
768 * @ring: radeon_ring structure holding ring information
769 *
770 * Check if the async DMA engine is locked up (CIK).
771 * Returns true if the engine appears to be locked up, false if not.
772 */
cik_sdma_is_lockup(struct radeon_device * rdev,struct radeon_ring * ring)773 bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
774 {
775 u32 reset_mask = cik_gpu_check_soft_reset(rdev);
776 u32 mask;
777
778 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
779 mask = RADEON_RESET_DMA;
780 else
781 mask = RADEON_RESET_DMA1;
782
783 if (!(reset_mask & mask)) {
784 radeon_ring_lockup_update(rdev, ring);
785 return false;
786 }
787 return radeon_ring_test_lockup(rdev, ring);
788 }
789
790 /**
791 * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
792 *
793 * @rdev: radeon_device pointer
794 * @ib: indirect buffer to fill with commands
795 * @pe: addr of the page entry
796 * @src: src addr to copy from
797 * @count: number of page entries to update
798 *
799 * Update PTEs by copying them from the GART using sDMA (CIK).
800 */
cik_sdma_vm_copy_pages(struct radeon_device * rdev,struct radeon_ib * ib,uint64_t pe,uint64_t src,unsigned count)801 void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
802 struct radeon_ib *ib,
803 uint64_t pe, uint64_t src,
804 unsigned count)
805 {
806 while (count) {
807 unsigned bytes = count * 8;
808 if (bytes > 0x1FFFF8)
809 bytes = 0x1FFFF8;
810
811 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
812 SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
813 ib->ptr[ib->length_dw++] = bytes;
814 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
815 ib->ptr[ib->length_dw++] = lower_32_bits(src);
816 ib->ptr[ib->length_dw++] = upper_32_bits(src);
817 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
818 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
819
820 pe += bytes;
821 src += bytes;
822 count -= bytes / 8;
823 }
824 }
825
826 /**
827 * cik_sdma_vm_write_pages - update PTEs by writing them manually
828 *
829 * @rdev: radeon_device pointer
830 * @ib: indirect buffer to fill with commands
831 * @pe: addr of the page entry
832 * @addr: dst addr to write into pe
833 * @count: number of page entries to update
834 * @incr: increase next addr by incr bytes
835 * @flags: access flags
836 *
837 * Update PTEs by writing them manually using sDMA (CIK).
838 */
cik_sdma_vm_write_pages(struct radeon_device * rdev,struct radeon_ib * ib,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint32_t flags)839 void cik_sdma_vm_write_pages(struct radeon_device *rdev,
840 struct radeon_ib *ib,
841 uint64_t pe,
842 uint64_t addr, unsigned count,
843 uint32_t incr, uint32_t flags)
844 {
845 uint64_t value;
846 unsigned ndw;
847
848 while (count) {
849 ndw = count * 2;
850 if (ndw > 0xFFFFE)
851 ndw = 0xFFFFE;
852
853 /* for non-physically contiguous pages (system) */
854 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
855 SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
856 ib->ptr[ib->length_dw++] = pe;
857 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
858 ib->ptr[ib->length_dw++] = ndw;
859 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
860 if (flags & R600_PTE_SYSTEM) {
861 value = radeon_vm_map_gart(rdev, addr);
862 } else if (flags & R600_PTE_VALID) {
863 value = addr;
864 } else {
865 value = 0;
866 }
867 addr += incr;
868 value |= flags;
869 ib->ptr[ib->length_dw++] = value;
870 ib->ptr[ib->length_dw++] = upper_32_bits(value);
871 }
872 }
873 }
874
875 /**
876 * cik_sdma_vm_set_pages - update the page tables using sDMA
877 *
878 * @rdev: radeon_device pointer
879 * @ib: indirect buffer to fill with commands
880 * @pe: addr of the page entry
881 * @addr: dst addr to write into pe
882 * @count: number of page entries to update
883 * @incr: increase next addr by incr bytes
884 * @flags: access flags
885 *
886 * Update the page tables using sDMA (CIK).
887 */
cik_sdma_vm_set_pages(struct radeon_device * rdev,struct radeon_ib * ib,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint32_t flags)888 void cik_sdma_vm_set_pages(struct radeon_device *rdev,
889 struct radeon_ib *ib,
890 uint64_t pe,
891 uint64_t addr, unsigned count,
892 uint32_t incr, uint32_t flags)
893 {
894 uint64_t value;
895 unsigned ndw;
896
897 while (count) {
898 ndw = count;
899 if (ndw > 0x7FFFF)
900 ndw = 0x7FFFF;
901
902 if (flags & R600_PTE_VALID)
903 value = addr;
904 else
905 value = 0;
906
907 /* for physically contiguous pages (vram) */
908 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
909 ib->ptr[ib->length_dw++] = pe; /* dst addr */
910 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
911 ib->ptr[ib->length_dw++] = flags; /* mask */
912 ib->ptr[ib->length_dw++] = 0;
913 ib->ptr[ib->length_dw++] = value; /* value */
914 ib->ptr[ib->length_dw++] = upper_32_bits(value);
915 ib->ptr[ib->length_dw++] = incr; /* increment size */
916 ib->ptr[ib->length_dw++] = 0;
917 ib->ptr[ib->length_dw++] = ndw; /* number of entries */
918
919 pe += ndw * 8;
920 addr += ndw * incr;
921 count -= ndw;
922 }
923 }
924
925 /**
926 * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
927 *
928 * @ib: indirect buffer to fill with padding
929 *
930 */
cik_sdma_vm_pad_ib(struct radeon_ib * ib)931 void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
932 {
933 while (ib->length_dw & 0x7)
934 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
935 }
936
937 /**
938 * cik_dma_vm_flush - cik vm flush using sDMA
939 *
940 * @rdev: radeon_device pointer
941 *
942 * Update the page table base and flush the VM TLB
943 * using sDMA (CIK).
944 */
cik_dma_vm_flush(struct radeon_device * rdev,struct radeon_ring * ring,unsigned vm_id,uint64_t pd_addr)945 void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
946 unsigned vm_id, uint64_t pd_addr)
947 {
948 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
949 SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
950
951 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
952 if (vm_id < 8) {
953 radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
954 } else {
955 radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
956 }
957 radeon_ring_write(ring, pd_addr >> 12);
958
959 /* update SH_MEM_* regs */
960 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
961 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
962 radeon_ring_write(ring, VMID(vm_id));
963
964 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
965 radeon_ring_write(ring, SH_MEM_BASES >> 2);
966 radeon_ring_write(ring, 0);
967
968 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
969 radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
970 radeon_ring_write(ring, 0);
971
972 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
973 radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
974 radeon_ring_write(ring, 1);
975
976 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
977 radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
978 radeon_ring_write(ring, 0);
979
980 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
981 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
982 radeon_ring_write(ring, VMID(0));
983
984 /* flush HDP */
985 cik_sdma_hdp_flush_ring_emit(rdev, ring->idx);
986
987 /* flush TLB */
988 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
989 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
990 radeon_ring_write(ring, 1 << vm_id);
991
992 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
993 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
994 radeon_ring_write(ring, 0);
995 radeon_ring_write(ring, 0); /* reference */
996 radeon_ring_write(ring, 0); /* mask */
997 radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
998 }
999
1000