xref: /dragonfly/sys/dev/drm/radeon/cik_sdma.c (revision 62dc643e)
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 #include <linux/firmware.h>
25 #include <drm/drmP.h>
26 #include "radeon.h"
27 #include "radeon_ucode.h"
28 #include "radeon_asic.h"
29 #ifdef TRACE_TODO
30 #include "radeon_trace.h"
31 #endif
32 #include "cikd.h"
33 
34 /* sdma */
35 #define CIK_SDMA_UCODE_SIZE 1050
36 #define CIK_SDMA_UCODE_VERSION 64
37 
38 /*
39  * sDMA - System DMA
40  * Starting with CIK, the GPU has new asynchronous
41  * DMA engines.  These engines are used for compute
42  * and gfx.  There are two DMA engines (SDMA0, SDMA1)
43  * and each one supports 1 ring buffer used for gfx
44  * and 2 queues used for compute.
45  *
46  * The programming model is very similar to the CP
47  * (ring buffer, IBs, etc.), but sDMA has it's own
48  * packet format that is different from the PM4 format
49  * used by the CP. sDMA supports copying data, writing
50  * embedded data, solid fills, and a number of other
51  * things.  It also has support for tiling/detiling of
52  * buffers.
53  */
54 
55 /**
56  * cik_sdma_get_rptr - get the current read pointer
57  *
58  * @rdev: radeon_device pointer
59  * @ring: radeon ring pointer
60  *
61  * Get the current rptr from the hardware (CIK+).
62  */
63 uint32_t cik_sdma_get_rptr(struct radeon_device *rdev,
64 			   struct radeon_ring *ring)
65 {
66 	u32 rptr, reg;
67 
68 	if (rdev->wb.enabled) {
69 		rptr = rdev->wb.wb[ring->rptr_offs/4];
70 	} else {
71 		if (ring->idx == R600_RING_TYPE_DMA_INDEX)
72 			reg = SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET;
73 		else
74 			reg = SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET;
75 
76 		rptr = RREG32(reg);
77 	}
78 
79 	return (rptr & 0x3fffc) >> 2;
80 }
81 
82 /**
83  * cik_sdma_get_wptr - get the current write pointer
84  *
85  * @rdev: radeon_device pointer
86  * @ring: radeon ring pointer
87  *
88  * Get the current wptr from the hardware (CIK+).
89  */
90 uint32_t cik_sdma_get_wptr(struct radeon_device *rdev,
91 			   struct radeon_ring *ring)
92 {
93 	u32 reg;
94 
95 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
96 		reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET;
97 	else
98 		reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
99 
100 	return (RREG32(reg) & 0x3fffc) >> 2;
101 }
102 
103 /**
104  * cik_sdma_set_wptr - commit the write pointer
105  *
106  * @rdev: radeon_device pointer
107  * @ring: radeon ring pointer
108  *
109  * Write the wptr back to the hardware (CIK+).
110  */
111 void cik_sdma_set_wptr(struct radeon_device *rdev,
112 		       struct radeon_ring *ring)
113 {
114 	u32 reg;
115 
116 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
117 		reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET;
118 	else
119 		reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
120 
121 	WREG32(reg, (ring->wptr << 2) & 0x3fffc);
122 	(void)RREG32(reg);
123 }
124 
125 /**
126  * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
127  *
128  * @rdev: radeon_device pointer
129  * @ib: IB object to schedule
130  *
131  * Schedule an IB in the DMA ring (CIK).
132  */
133 void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
134 			      struct radeon_ib *ib)
135 {
136 	struct radeon_ring *ring = &rdev->ring[ib->ring];
137 	u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
138 
139 	if (rdev->wb.enabled) {
140 		u32 next_rptr = ring->wptr + 5;
141 		while ((next_rptr & 7) != 4)
142 			next_rptr++;
143 		next_rptr += 4;
144 		radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
145 		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
146 		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
147 		radeon_ring_write(ring, 1); /* number of DWs to follow */
148 		radeon_ring_write(ring, next_rptr);
149 	}
150 
151 	/* IB packet must end on a 8 DW boundary */
152 	while ((ring->wptr & 7) != 4)
153 		radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
154 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
155 	radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
156 	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr));
157 	radeon_ring_write(ring, ib->length_dw);
158 
159 }
160 
161 /**
162  * cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
163  *
164  * @rdev: radeon_device pointer
165  * @ridx: radeon ring index
166  *
167  * Emit an hdp flush packet on the requested DMA ring.
168  */
169 static void cik_sdma_hdp_flush_ring_emit(struct radeon_device *rdev,
170 					 int ridx)
171 {
172 	struct radeon_ring *ring = &rdev->ring[ridx];
173 	u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
174 			  SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
175 	u32 ref_and_mask;
176 
177 	if (ridx == R600_RING_TYPE_DMA_INDEX)
178 		ref_and_mask = SDMA0;
179 	else
180 		ref_and_mask = SDMA1;
181 
182 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
183 	radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
184 	radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
185 	radeon_ring_write(ring, ref_and_mask); /* reference */
186 	radeon_ring_write(ring, ref_and_mask); /* mask */
187 	radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
188 }
189 
190 /**
191  * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
192  *
193  * @rdev: radeon_device pointer
194  * @fence: radeon fence object
195  *
196  * Add a DMA fence packet to the ring to write
197  * the fence seq number and DMA trap packet to generate
198  * an interrupt if needed (CIK).
199  */
200 void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
201 			      struct radeon_fence *fence)
202 {
203 	struct radeon_ring *ring = &rdev->ring[fence->ring];
204 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
205 
206 	/* write the fence */
207 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
208 	radeon_ring_write(ring, lower_32_bits(addr));
209 	radeon_ring_write(ring, upper_32_bits(addr));
210 	radeon_ring_write(ring, fence->seq);
211 	/* generate an interrupt */
212 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
213 	/* flush HDP */
214 	cik_sdma_hdp_flush_ring_emit(rdev, fence->ring);
215 }
216 
217 /**
218  * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
219  *
220  * @rdev: radeon_device pointer
221  * @ring: radeon_ring structure holding ring information
222  * @semaphore: radeon semaphore object
223  * @emit_wait: wait or signal semaphore
224  *
225  * Add a DMA semaphore packet to the ring wait on or signal
226  * other rings (CIK).
227  */
228 bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
229 				  struct radeon_ring *ring,
230 				  struct radeon_semaphore *semaphore,
231 				  bool emit_wait)
232 {
233 	u64 addr = semaphore->gpu_addr;
234 	u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
235 
236 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
237 	radeon_ring_write(ring, addr & 0xfffffff8);
238 	radeon_ring_write(ring, upper_32_bits(addr));
239 
240 	return true;
241 }
242 
243 /**
244  * cik_sdma_gfx_stop - stop the gfx async dma engines
245  *
246  * @rdev: radeon_device pointer
247  *
248  * Stop the gfx async dma ring buffers (CIK).
249  */
250 static void cik_sdma_gfx_stop(struct radeon_device *rdev)
251 {
252 	u32 rb_cntl, reg_offset;
253 	int i;
254 
255 	if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
256 	    (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
257 		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
258 
259 	for (i = 0; i < 2; i++) {
260 		if (i == 0)
261 			reg_offset = SDMA0_REGISTER_OFFSET;
262 		else
263 			reg_offset = SDMA1_REGISTER_OFFSET;
264 		rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
265 		rb_cntl &= ~SDMA_RB_ENABLE;
266 		WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
267 		WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
268 	}
269 	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
270 	rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
271 
272 	/* FIXME use something else than big hammer but after few days can not
273 	 * seem to find good combination so reset SDMA blocks as it seems we
274 	 * do not shut them down properly. This fix hibernation and does not
275 	 * affect suspend to ram.
276 	 */
277 	WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
278 	(void)RREG32(SRBM_SOFT_RESET);
279 	udelay(50);
280 	WREG32(SRBM_SOFT_RESET, 0);
281 	(void)RREG32(SRBM_SOFT_RESET);
282 }
283 
284 /**
285  * cik_sdma_rlc_stop - stop the compute async dma engines
286  *
287  * @rdev: radeon_device pointer
288  *
289  * Stop the compute async dma queues (CIK).
290  */
291 static void cik_sdma_rlc_stop(struct radeon_device *rdev)
292 {
293 	/* XXX todo */
294 }
295 
296 /**
297  * cik_sdma_ctx_switch_enable - enable/disable sdma engine preemption
298  *
299  * @rdev: radeon_device pointer
300  * @enable: enable/disable preemption.
301  *
302  * Halt or unhalt the async dma engines (CIK).
303  */
304 static void cik_sdma_ctx_switch_enable(struct radeon_device *rdev, bool enable)
305 {
306 	uint32_t reg_offset, value;
307 	int i;
308 
309 	for (i = 0; i < 2; i++) {
310 		if (i == 0)
311 			reg_offset = SDMA0_REGISTER_OFFSET;
312 		else
313 			reg_offset = SDMA1_REGISTER_OFFSET;
314 		value = RREG32(SDMA0_CNTL + reg_offset);
315 		if (enable)
316 			value |= AUTO_CTXSW_ENABLE;
317 		else
318 			value &= ~AUTO_CTXSW_ENABLE;
319 		WREG32(SDMA0_CNTL + reg_offset, value);
320 	}
321 }
322 
323 /**
324  * cik_sdma_enable - stop the async dma engines
325  *
326  * @rdev: radeon_device pointer
327  * @enable: enable/disable the DMA MEs.
328  *
329  * Halt or unhalt the async dma engines (CIK).
330  */
331 void cik_sdma_enable(struct radeon_device *rdev, bool enable)
332 {
333 	u32 me_cntl, reg_offset;
334 	int i;
335 
336 	if (enable == false) {
337 		cik_sdma_gfx_stop(rdev);
338 		cik_sdma_rlc_stop(rdev);
339 	}
340 
341 	for (i = 0; i < 2; i++) {
342 		if (i == 0)
343 			reg_offset = SDMA0_REGISTER_OFFSET;
344 		else
345 			reg_offset = SDMA1_REGISTER_OFFSET;
346 		me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
347 		if (enable)
348 			me_cntl &= ~SDMA_HALT;
349 		else
350 			me_cntl |= SDMA_HALT;
351 		WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
352 	}
353 
354 	cik_sdma_ctx_switch_enable(rdev, enable);
355 }
356 
357 /**
358  * cik_sdma_gfx_resume - setup and start the async dma engines
359  *
360  * @rdev: radeon_device pointer
361  *
362  * Set up the gfx DMA ring buffers and enable them (CIK).
363  * Returns 0 for success, error for failure.
364  */
365 static int cik_sdma_gfx_resume(struct radeon_device *rdev)
366 {
367 	struct radeon_ring *ring;
368 	u32 rb_cntl, ib_cntl;
369 	u32 rb_bufsz;
370 	u32 reg_offset, wb_offset;
371 	int i, r;
372 
373 	for (i = 0; i < 2; i++) {
374 		if (i == 0) {
375 			ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
376 			reg_offset = SDMA0_REGISTER_OFFSET;
377 			wb_offset = R600_WB_DMA_RPTR_OFFSET;
378 		} else {
379 			ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
380 			reg_offset = SDMA1_REGISTER_OFFSET;
381 			wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
382 		}
383 
384 		WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
385 		WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
386 
387 		/* Set ring buffer size in dwords */
388 		rb_bufsz = order_base_2(ring->ring_size / 4);
389 		rb_cntl = rb_bufsz << 1;
390 #ifdef __BIG_ENDIAN
391 		rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
392 #endif
393 		WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
394 
395 		/* Initialize the ring buffer's read and write pointers */
396 		WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
397 		WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
398 
399 		/* set the wb address whether it's enabled or not */
400 		WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
401 		       upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
402 		WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
403 		       ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
404 
405 		if (rdev->wb.enabled)
406 			rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
407 
408 		WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
409 		WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
410 
411 		ring->wptr = 0;
412 		WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
413 
414 		/* enable DMA RB */
415 		WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
416 
417 		ib_cntl = SDMA_IB_ENABLE;
418 #ifdef __BIG_ENDIAN
419 		ib_cntl |= SDMA_IB_SWAP_ENABLE;
420 #endif
421 		/* enable DMA IBs */
422 		WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
423 
424 		ring->ready = true;
425 
426 		r = radeon_ring_test(rdev, ring->idx, ring);
427 		if (r) {
428 			ring->ready = false;
429 			return r;
430 		}
431 	}
432 
433 	if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
434 	    (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
435 		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
436 
437 	return 0;
438 }
439 
440 /**
441  * cik_sdma_rlc_resume - setup and start the async dma engines
442  *
443  * @rdev: radeon_device pointer
444  *
445  * Set up the compute DMA queues and enable them (CIK).
446  * Returns 0 for success, error for failure.
447  */
448 static int cik_sdma_rlc_resume(struct radeon_device *rdev)
449 {
450 	/* XXX todo */
451 	return 0;
452 }
453 
454 /**
455  * cik_sdma_load_microcode - load the sDMA ME ucode
456  *
457  * @rdev: radeon_device pointer
458  *
459  * Loads the sDMA0/1 ucode.
460  * Returns 0 for success, -EINVAL if the ucode is not available.
461  */
462 static int cik_sdma_load_microcode(struct radeon_device *rdev)
463 {
464 	int i;
465 
466 	if (!rdev->sdma_fw)
467 		return -EINVAL;
468 
469 	/* halt the MEs */
470 	cik_sdma_enable(rdev, false);
471 
472 	if (rdev->new_fw) {
473 		const struct sdma_firmware_header_v1_0 *hdr =
474 			(const struct sdma_firmware_header_v1_0 *)rdev->sdma_fw->data;
475 		const __le32 *fw_data;
476 		u32 fw_size;
477 
478 		radeon_ucode_print_sdma_hdr(&hdr->header);
479 
480 		/* sdma0 */
481 		fw_data = (const __le32 *)
482 			((const char *)rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
483 		fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
484 		WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
485 		for (i = 0; i < fw_size; i++)
486 			WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, le32_to_cpup(fw_data++));
487 		WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
488 
489 		/* sdma1 */
490 		fw_data = (const __le32 *)
491 			((const char *)rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
492 		fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
493 		WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
494 		for (i = 0; i < fw_size; i++)
495 			WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, le32_to_cpup(fw_data++));
496 		WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
497 	} else {
498 		const __be32 *fw_data;
499 
500 		/* sdma0 */
501 		fw_data = (const __be32 *)rdev->sdma_fw->data;
502 		WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
503 		for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
504 			WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
505 		WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
506 
507 		/* sdma1 */
508 		fw_data = (const __be32 *)rdev->sdma_fw->data;
509 		WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
510 		for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
511 			WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
512 		WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
513 	}
514 
515 	WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
516 	WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
517 	return 0;
518 }
519 
520 /**
521  * cik_sdma_resume - setup and start the async dma engines
522  *
523  * @rdev: radeon_device pointer
524  *
525  * Set up the DMA engines and enable them (CIK).
526  * Returns 0 for success, error for failure.
527  */
528 int cik_sdma_resume(struct radeon_device *rdev)
529 {
530 	int r;
531 
532 	r = cik_sdma_load_microcode(rdev);
533 	if (r)
534 		return r;
535 
536 	/* unhalt the MEs */
537 	cik_sdma_enable(rdev, true);
538 
539 	/* start the gfx rings and rlc compute queues */
540 	r = cik_sdma_gfx_resume(rdev);
541 	if (r)
542 		return r;
543 	r = cik_sdma_rlc_resume(rdev);
544 	if (r)
545 		return r;
546 
547 	return 0;
548 }
549 
550 /**
551  * cik_sdma_fini - tear down the async dma engines
552  *
553  * @rdev: radeon_device pointer
554  *
555  * Stop the async dma engines and free the rings (CIK).
556  */
557 void cik_sdma_fini(struct radeon_device *rdev)
558 {
559 	/* halt the MEs */
560 	cik_sdma_enable(rdev, false);
561 	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
562 	radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
563 	/* XXX - compute dma queue tear down */
564 }
565 
566 /**
567  * cik_copy_dma - copy pages using the DMA engine
568  *
569  * @rdev: radeon_device pointer
570  * @src_offset: src GPU address
571  * @dst_offset: dst GPU address
572  * @num_gpu_pages: number of GPU pages to xfer
573  * @fence: radeon fence object
574  *
575  * Copy GPU paging using the DMA engine (CIK).
576  * Used by the radeon ttm implementation to move pages if
577  * registered as the asic copy callback.
578  */
579 int cik_copy_dma(struct radeon_device *rdev,
580 		 uint64_t src_offset, uint64_t dst_offset,
581 		 unsigned num_gpu_pages,
582 		 struct radeon_fence **fence)
583 {
584 	struct radeon_semaphore *sem = NULL;
585 	int ring_index = rdev->asic->copy.dma_ring_index;
586 	struct radeon_ring *ring = &rdev->ring[ring_index];
587 	u32 size_in_bytes, cur_size_in_bytes;
588 	int i, num_loops;
589 	int r = 0;
590 
591 	r = radeon_semaphore_create(rdev, &sem);
592 	if (r) {
593 		DRM_ERROR("radeon: moving bo (%d).\n", r);
594 		return r;
595 	}
596 
597 	size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
598 	num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
599 	r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
600 	if (r) {
601 		DRM_ERROR("radeon: moving bo (%d).\n", r);
602 		radeon_semaphore_free(rdev, &sem, NULL);
603 		return r;
604 	}
605 
606 	radeon_semaphore_sync_to(sem, *fence);
607 	radeon_semaphore_sync_rings(rdev, sem, ring->idx);
608 
609 	for (i = 0; i < num_loops; i++) {
610 		cur_size_in_bytes = size_in_bytes;
611 		if (cur_size_in_bytes > 0x1fffff)
612 			cur_size_in_bytes = 0x1fffff;
613 		size_in_bytes -= cur_size_in_bytes;
614 		radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
615 		radeon_ring_write(ring, cur_size_in_bytes);
616 		radeon_ring_write(ring, 0); /* src/dst endian swap */
617 		radeon_ring_write(ring, lower_32_bits(src_offset));
618 		radeon_ring_write(ring, upper_32_bits(src_offset));
619 		radeon_ring_write(ring, lower_32_bits(dst_offset));
620 		radeon_ring_write(ring, upper_32_bits(dst_offset));
621 		src_offset += cur_size_in_bytes;
622 		dst_offset += cur_size_in_bytes;
623 	}
624 
625 	r = radeon_fence_emit(rdev, fence, ring->idx);
626 	if (r) {
627 		radeon_ring_unlock_undo(rdev, ring);
628 		radeon_semaphore_free(rdev, &sem, NULL);
629 		return r;
630 	}
631 
632 	radeon_ring_unlock_commit(rdev, ring, false);
633 	radeon_semaphore_free(rdev, &sem, *fence);
634 
635 	return r;
636 }
637 
638 /**
639  * cik_sdma_ring_test - simple async dma engine test
640  *
641  * @rdev: radeon_device pointer
642  * @ring: radeon_ring structure holding ring information
643  *
644  * Test the DMA engine by writing using it to write an
645  * value to memory. (CIK).
646  * Returns 0 for success, error for failure.
647  */
648 int cik_sdma_ring_test(struct radeon_device *rdev,
649 		       struct radeon_ring *ring)
650 {
651 	unsigned i;
652 	int r;
653 	unsigned index;
654 	u32 tmp;
655 	u64 gpu_addr;
656 
657 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
658 		index = R600_WB_DMA_RING_TEST_OFFSET;
659 	else
660 		index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
661 
662 	gpu_addr = rdev->wb.gpu_addr + index;
663 
664 	tmp = 0xCAFEDEAD;
665 	rdev->wb.wb[index/4] = cpu_to_le32(tmp);
666 
667 	r = radeon_ring_lock(rdev, ring, 5);
668 	if (r) {
669 		DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
670 		return r;
671 	}
672 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
673 	radeon_ring_write(ring, lower_32_bits(gpu_addr));
674 	radeon_ring_write(ring, upper_32_bits(gpu_addr));
675 	radeon_ring_write(ring, 1); /* number of DWs to follow */
676 	radeon_ring_write(ring, 0xDEADBEEF);
677 	radeon_ring_unlock_commit(rdev, ring, false);
678 
679 	for (i = 0; i < rdev->usec_timeout; i++) {
680 		tmp = le32_to_cpu(rdev->wb.wb[index/4]);
681 		if (tmp == 0xDEADBEEF)
682 			break;
683 		DRM_UDELAY(1);
684 	}
685 
686 	if (i < rdev->usec_timeout) {
687 		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
688 	} else {
689 		DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
690 			  ring->idx, tmp);
691 		r = -EINVAL;
692 	}
693 	return r;
694 }
695 
696 /**
697  * cik_sdma_ib_test - test an IB on the DMA engine
698  *
699  * @rdev: radeon_device pointer
700  * @ring: radeon_ring structure holding ring information
701  *
702  * Test a simple IB in the DMA ring (CIK).
703  * Returns 0 on success, error on failure.
704  */
705 int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
706 {
707 	struct radeon_ib ib;
708 	unsigned i;
709 	unsigned index;
710 	int r;
711 	u32 tmp = 0;
712 	u64 gpu_addr;
713 
714 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
715 		index = R600_WB_DMA_RING_TEST_OFFSET;
716 	else
717 		index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
718 
719 	gpu_addr = rdev->wb.gpu_addr + index;
720 
721 	tmp = 0xCAFEDEAD;
722 	rdev->wb.wb[index/4] = cpu_to_le32(tmp);
723 
724 	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
725 	if (r) {
726 		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
727 		return r;
728 	}
729 
730 	ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
731 	ib.ptr[1] = lower_32_bits(gpu_addr);
732 	ib.ptr[2] = upper_32_bits(gpu_addr);
733 	ib.ptr[3] = 1;
734 	ib.ptr[4] = 0xDEADBEEF;
735 	ib.length_dw = 5;
736 
737 	r = radeon_ib_schedule(rdev, &ib, NULL, false);
738 	if (r) {
739 		radeon_ib_free(rdev, &ib);
740 		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
741 		return r;
742 	}
743 	r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
744 		RADEON_USEC_IB_TEST_TIMEOUT));
745 	if (r < 0) {
746  		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
747  		return r;
748 	} else if (r == 0) {
749 		DRM_ERROR("radeon: fence wait timed out.\n");
750 #if 0
751 		return -ETIMEDOUT;
752 #endif
753  	}
754 	r = 0;
755 	for (i = 0; i < rdev->usec_timeout; i++) {
756 		tmp = le32_to_cpu(rdev->wb.wb[index/4]);
757 		if (tmp == 0xDEADBEEF)
758 			break;
759 		DRM_UDELAY(1);
760 	}
761 	if (i < rdev->usec_timeout) {
762 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
763 	} else {
764 		DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
765 		r = -EINVAL;
766 	}
767 	radeon_ib_free(rdev, &ib);
768 	return r;
769 }
770 
771 /**
772  * cik_sdma_is_lockup - Check if the DMA engine is locked up
773  *
774  * @rdev: radeon_device pointer
775  * @ring: radeon_ring structure holding ring information
776  *
777  * Check if the async DMA engine is locked up (CIK).
778  * Returns true if the engine appears to be locked up, false if not.
779  */
780 bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
781 {
782 	u32 reset_mask = cik_gpu_check_soft_reset(rdev);
783 	u32 mask;
784 
785 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
786 		mask = RADEON_RESET_DMA;
787 	else
788 		mask = RADEON_RESET_DMA1;
789 
790 	if (!(reset_mask & mask)) {
791 		radeon_ring_lockup_update(rdev, ring);
792 		return false;
793 	}
794 	return radeon_ring_test_lockup(rdev, ring);
795 }
796 
797 /**
798  * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
799  *
800  * @rdev: radeon_device pointer
801  * @ib: indirect buffer to fill with commands
802  * @pe: addr of the page entry
803  * @src: src addr to copy from
804  * @count: number of page entries to update
805  *
806  * Update PTEs by copying them from the GART using sDMA (CIK).
807  */
808 void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
809 			    struct radeon_ib *ib,
810 			    uint64_t pe, uint64_t src,
811 			    unsigned count)
812 {
813 	while (count) {
814 		unsigned bytes = count * 8;
815 		if (bytes > 0x1FFFF8)
816 			bytes = 0x1FFFF8;
817 
818 		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
819 			SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
820 		ib->ptr[ib->length_dw++] = bytes;
821 		ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
822 		ib->ptr[ib->length_dw++] = lower_32_bits(src);
823 		ib->ptr[ib->length_dw++] = upper_32_bits(src);
824 		ib->ptr[ib->length_dw++] = lower_32_bits(pe);
825 		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
826 
827 		pe += bytes;
828 		src += bytes;
829 		count -= bytes / 8;
830 	}
831 }
832 
833 /**
834  * cik_sdma_vm_write_pages - update PTEs by writing them manually
835  *
836  * @rdev: radeon_device pointer
837  * @ib: indirect buffer to fill with commands
838  * @pe: addr of the page entry
839  * @addr: dst addr to write into pe
840  * @count: number of page entries to update
841  * @incr: increase next addr by incr bytes
842  * @flags: access flags
843  *
844  * Update PTEs by writing them manually using sDMA (CIK).
845  */
846 void cik_sdma_vm_write_pages(struct radeon_device *rdev,
847 			     struct radeon_ib *ib,
848 			     uint64_t pe,
849 			     uint64_t addr, unsigned count,
850 			     uint32_t incr, uint32_t flags)
851 {
852 	uint64_t value;
853 	unsigned ndw;
854 
855 	while (count) {
856 		ndw = count * 2;
857 		if (ndw > 0xFFFFE)
858 			ndw = 0xFFFFE;
859 
860 		/* for non-physically contiguous pages (system) */
861 		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
862 			SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
863 		ib->ptr[ib->length_dw++] = pe;
864 		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
865 		ib->ptr[ib->length_dw++] = ndw;
866 		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
867 			if (flags & R600_PTE_SYSTEM) {
868 				value = radeon_vm_map_gart(rdev, addr);
869 			} else if (flags & R600_PTE_VALID) {
870 				value = addr;
871 			} else {
872 				value = 0;
873 			}
874 			addr += incr;
875 			value |= flags;
876 			ib->ptr[ib->length_dw++] = value;
877 			ib->ptr[ib->length_dw++] = upper_32_bits(value);
878 		}
879 	}
880 }
881 
882 /**
883  * cik_sdma_vm_set_pages - update the page tables using sDMA
884  *
885  * @rdev: radeon_device pointer
886  * @ib: indirect buffer to fill with commands
887  * @pe: addr of the page entry
888  * @addr: dst addr to write into pe
889  * @count: number of page entries to update
890  * @incr: increase next addr by incr bytes
891  * @flags: access flags
892  *
893  * Update the page tables using sDMA (CIK).
894  */
895 void cik_sdma_vm_set_pages(struct radeon_device *rdev,
896 			   struct radeon_ib *ib,
897 			   uint64_t pe,
898 			   uint64_t addr, unsigned count,
899 			   uint32_t incr, uint32_t flags)
900 {
901 	uint64_t value;
902 	unsigned ndw;
903 
904 	while (count) {
905 		ndw = count;
906 		if (ndw > 0x7FFFF)
907 			ndw = 0x7FFFF;
908 
909 		if (flags & R600_PTE_VALID)
910 			value = addr;
911 		else
912 			value = 0;
913 
914 		/* for physically contiguous pages (vram) */
915 		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
916 		ib->ptr[ib->length_dw++] = pe; /* dst addr */
917 		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
918 		ib->ptr[ib->length_dw++] = flags; /* mask */
919 		ib->ptr[ib->length_dw++] = 0;
920 		ib->ptr[ib->length_dw++] = value; /* value */
921 		ib->ptr[ib->length_dw++] = upper_32_bits(value);
922 		ib->ptr[ib->length_dw++] = incr; /* increment size */
923 		ib->ptr[ib->length_dw++] = 0;
924 		ib->ptr[ib->length_dw++] = ndw; /* number of entries */
925 
926 		pe += ndw * 8;
927 		addr += ndw * incr;
928 		count -= ndw;
929 	}
930 }
931 
932 /**
933  * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
934  *
935  * @ib: indirect buffer to fill with padding
936  *
937  */
938 void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
939 {
940 	while (ib->length_dw & 0x7)
941 		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
942 }
943 
944 /**
945  * cik_dma_vm_flush - cik vm flush using sDMA
946  *
947  * @rdev: radeon_device pointer
948  *
949  * Update the page table base and flush the VM TLB
950  * using sDMA (CIK).
951  */
952 void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
953 		      unsigned vm_id, uint64_t pd_addr)
954 {
955 	u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
956 			  SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
957 
958 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
959 	if (vm_id < 8) {
960 		radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
961 	} else {
962 		radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
963 	}
964 	radeon_ring_write(ring, pd_addr >> 12);
965 
966 	/* update SH_MEM_* regs */
967 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
968 	radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
969 	radeon_ring_write(ring, VMID(vm_id));
970 
971 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
972 	radeon_ring_write(ring, SH_MEM_BASES >> 2);
973 	radeon_ring_write(ring, 0);
974 
975 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
976 	radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
977 	radeon_ring_write(ring, 0);
978 
979 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
980 	radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
981 	radeon_ring_write(ring, 1);
982 
983 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
984 	radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
985 	radeon_ring_write(ring, 0);
986 
987 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
988 	radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
989 	radeon_ring_write(ring, VMID(0));
990 
991 	/* flush HDP */
992 	cik_sdma_hdp_flush_ring_emit(rdev, ring->idx);
993 
994 	/* flush TLB */
995 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
996 	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
997 	radeon_ring_write(ring, 1 << vm_id);
998 
999 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
1000 	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
1001 	radeon_ring_write(ring, 0);
1002 	radeon_ring_write(ring, 0); /* reference */
1003 	radeon_ring_write(ring, 0); /* mask */
1004 	radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
1005 }
1006