1 /*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24 #include <drm/drmP.h>
25 #include "radeon.h"
26 #include "radeon_asic.h"
27 #include "evergreend.h"
28
29 /**
30 * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
31 *
32 * @rdev: radeon_device pointer
33 * @fence: radeon fence object
34 *
35 * Add a DMA fence packet to the ring to write
36 * the fence seq number and DMA trap packet to generate
37 * an interrupt if needed (evergreen-SI).
38 */
evergreen_dma_fence_ring_emit(struct radeon_device * rdev,struct radeon_fence * fence)39 void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
40 struct radeon_fence *fence)
41 {
42 struct radeon_ring *ring = &rdev->ring[fence->ring];
43 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
44 /* write the fence */
45 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
46 radeon_ring_write(ring, addr & 0xfffffffc);
47 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
48 radeon_ring_write(ring, fence->seq);
49 /* generate an interrupt */
50 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
51 /* flush HDP */
52 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
53 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
54 radeon_ring_write(ring, 1);
55 }
56
57 /**
58 * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
59 *
60 * @rdev: radeon_device pointer
61 * @ib: IB object to schedule
62 *
63 * Schedule an IB in the DMA ring (evergreen).
64 */
evergreen_dma_ring_ib_execute(struct radeon_device * rdev,struct radeon_ib * ib)65 void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
66 struct radeon_ib *ib)
67 {
68 struct radeon_ring *ring = &rdev->ring[ib->ring];
69
70 if (rdev->wb.enabled) {
71 u32 next_rptr = ring->wptr + 4;
72 while ((next_rptr & 7) != 5)
73 next_rptr++;
74 next_rptr += 3;
75 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
76 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
77 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
78 radeon_ring_write(ring, next_rptr);
79 }
80
81 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
82 * Pad as necessary with NOPs.
83 */
84 while ((ring->wptr & 7) != 5)
85 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
86 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
87 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
88 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
89
90 }
91
92 /**
93 * evergreen_copy_dma - copy pages using the DMA engine
94 *
95 * @rdev: radeon_device pointer
96 * @src_offset: src GPU address
97 * @dst_offset: dst GPU address
98 * @num_gpu_pages: number of GPU pages to xfer
99 * @fence: radeon fence object
100 *
101 * Copy GPU paging using the DMA engine (evergreen-cayman).
102 * Used by the radeon ttm implementation to move pages if
103 * registered as the asic copy callback.
104 */
evergreen_copy_dma(struct radeon_device * rdev,uint64_t src_offset,uint64_t dst_offset,unsigned num_gpu_pages,struct reservation_object * resv)105 struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
106 uint64_t src_offset,
107 uint64_t dst_offset,
108 unsigned num_gpu_pages,
109 struct reservation_object *resv)
110 {
111 struct radeon_fence *fence;
112 struct radeon_sync sync;
113 int ring_index = rdev->asic->copy.dma_ring_index;
114 struct radeon_ring *ring = &rdev->ring[ring_index];
115 u32 size_in_dw, cur_size_in_dw;
116 int i, num_loops;
117 int r = 0;
118
119 radeon_sync_create(&sync);
120
121 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
122 num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
123 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
124 if (r) {
125 DRM_ERROR("radeon: moving bo (%d).\n", r);
126 radeon_sync_free(rdev, &sync, NULL);
127 return ERR_PTR(r);
128 }
129
130 radeon_sync_resv(rdev, &sync, resv, false);
131 radeon_sync_rings(rdev, &sync, ring->idx);
132
133 for (i = 0; i < num_loops; i++) {
134 cur_size_in_dw = size_in_dw;
135 if (cur_size_in_dw > 0xFFFFF)
136 cur_size_in_dw = 0xFFFFF;
137 size_in_dw -= cur_size_in_dw;
138 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
139 radeon_ring_write(ring, dst_offset & 0xfffffffc);
140 radeon_ring_write(ring, src_offset & 0xfffffffc);
141 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
142 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
143 src_offset += cur_size_in_dw * 4;
144 dst_offset += cur_size_in_dw * 4;
145 }
146
147 r = radeon_fence_emit(rdev, &fence, ring->idx);
148 if (r) {
149 radeon_ring_unlock_undo(rdev, ring);
150 radeon_sync_free(rdev, &sync, NULL);
151 return ERR_PTR(r);
152 }
153
154 radeon_ring_unlock_commit(rdev, ring, false);
155 radeon_sync_free(rdev, &sync, fence);
156
157 return fence;
158 }
159
160 /**
161 * evergreen_dma_is_lockup - Check if the DMA engine is locked up
162 *
163 * @rdev: radeon_device pointer
164 * @ring: radeon_ring structure holding ring information
165 *
166 * Check if the async DMA engine is locked up.
167 * Returns true if the engine appears to be locked up, false if not.
168 */
evergreen_dma_is_lockup(struct radeon_device * rdev,struct radeon_ring * ring)169 bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
170 {
171 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
172
173 if (!(reset_mask & RADEON_RESET_DMA)) {
174 radeon_ring_lockup_update(rdev, ring);
175 return false;
176 }
177 return radeon_ring_test_lockup(rdev, ring);
178 }
179