1 /*
2 * Copyright 2017 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Andres Rodriguez
23 */
24
25 #include "amdgpu.h"
26 #include "amdgpu_ring.h"
27
amdgpu_queue_mapper_init(struct amdgpu_queue_mapper * mapper,int hw_ip)28 static int amdgpu_queue_mapper_init(struct amdgpu_queue_mapper *mapper,
29 int hw_ip)
30 {
31 if (!mapper)
32 return -EINVAL;
33
34 if (hw_ip > AMDGPU_MAX_IP_NUM)
35 return -EINVAL;
36
37 mapper->hw_ip = hw_ip;
38 lockinit(&mapper->lock, "aqml", 0, LK_CANRECURSE);
39
40 memset(mapper->queue_map, 0, sizeof(mapper->queue_map));
41
42 return 0;
43 }
44
amdgpu_get_cached_map(struct amdgpu_queue_mapper * mapper,int ring)45 static struct amdgpu_ring *amdgpu_get_cached_map(struct amdgpu_queue_mapper *mapper,
46 int ring)
47 {
48 return mapper->queue_map[ring];
49 }
50
amdgpu_update_cached_map(struct amdgpu_queue_mapper * mapper,int ring,struct amdgpu_ring * pring)51 static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper,
52 int ring, struct amdgpu_ring *pring)
53 {
54 if (WARN_ON(mapper->queue_map[ring])) {
55 DRM_ERROR("Un-expected ring re-map\n");
56 return -EINVAL;
57 }
58
59 mapper->queue_map[ring] = pring;
60
61 return 0;
62 }
63
amdgpu_identity_map(struct amdgpu_device * adev,struct amdgpu_queue_mapper * mapper,u32 ring,struct amdgpu_ring ** out_ring)64 static int amdgpu_identity_map(struct amdgpu_device *adev,
65 struct amdgpu_queue_mapper *mapper,
66 u32 ring,
67 struct amdgpu_ring **out_ring)
68 {
69 switch (mapper->hw_ip) {
70 case AMDGPU_HW_IP_GFX:
71 *out_ring = &adev->gfx.gfx_ring[ring];
72 break;
73 case AMDGPU_HW_IP_COMPUTE:
74 *out_ring = &adev->gfx.compute_ring[ring];
75 break;
76 case AMDGPU_HW_IP_DMA:
77 *out_ring = &adev->sdma.instance[ring].ring;
78 break;
79 case AMDGPU_HW_IP_UVD:
80 *out_ring = &adev->uvd.inst[0].ring;
81 break;
82 case AMDGPU_HW_IP_VCE:
83 *out_ring = &adev->vce.ring[ring];
84 break;
85 case AMDGPU_HW_IP_UVD_ENC:
86 *out_ring = &adev->uvd.inst[0].ring_enc[ring];
87 break;
88 case AMDGPU_HW_IP_VCN_DEC:
89 *out_ring = &adev->vcn.ring_dec;
90 break;
91 case AMDGPU_HW_IP_VCN_ENC:
92 *out_ring = &adev->vcn.ring_enc[ring];
93 break;
94 case AMDGPU_HW_IP_VCN_JPEG:
95 *out_ring = &adev->vcn.ring_jpeg;
96 break;
97 default:
98 *out_ring = NULL;
99 DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
100 return -EINVAL;
101 }
102
103 return amdgpu_update_cached_map(mapper, ring, *out_ring);
104 }
105
amdgpu_hw_ip_to_ring_type(int hw_ip)106 static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip)
107 {
108 switch (hw_ip) {
109 case AMDGPU_HW_IP_GFX:
110 return AMDGPU_RING_TYPE_GFX;
111 case AMDGPU_HW_IP_COMPUTE:
112 return AMDGPU_RING_TYPE_COMPUTE;
113 case AMDGPU_HW_IP_DMA:
114 return AMDGPU_RING_TYPE_SDMA;
115 case AMDGPU_HW_IP_UVD:
116 return AMDGPU_RING_TYPE_UVD;
117 case AMDGPU_HW_IP_VCE:
118 return AMDGPU_RING_TYPE_VCE;
119 default:
120 DRM_ERROR("Invalid HW IP specified %d\n", hw_ip);
121 return -1;
122 }
123 }
124
amdgpu_lru_map(struct amdgpu_device * adev,struct amdgpu_queue_mapper * mapper,u32 user_ring,bool lru_pipe_order,struct amdgpu_ring ** out_ring)125 static int amdgpu_lru_map(struct amdgpu_device *adev,
126 struct amdgpu_queue_mapper *mapper,
127 u32 user_ring, bool lru_pipe_order,
128 struct amdgpu_ring **out_ring)
129 {
130 int r, i, j;
131 int ring_type = amdgpu_hw_ip_to_ring_type(mapper->hw_ip);
132 int ring_blacklist[AMDGPU_MAX_RINGS];
133 struct amdgpu_ring *ring;
134
135 /* 0 is a valid ring index, so initialize to -1 */
136 memset(ring_blacklist, 0xff, sizeof(ring_blacklist));
137
138 for (i = 0, j = 0; i < AMDGPU_MAX_RINGS; i++) {
139 ring = mapper->queue_map[i];
140 if (ring)
141 ring_blacklist[j++] = ring->idx;
142 }
143
144 r = amdgpu_ring_lru_get(adev, ring_type, ring_blacklist,
145 j, lru_pipe_order, out_ring);
146 if (r)
147 return r;
148
149 return amdgpu_update_cached_map(mapper, user_ring, *out_ring);
150 }
151
152 /**
153 * amdgpu_queue_mgr_init - init an amdgpu_queue_mgr struct
154 *
155 * @adev: amdgpu_device pointer
156 * @mgr: amdgpu_queue_mgr structure holding queue information
157 *
158 * Initialize the the selected @mgr (all asics).
159 *
160 * Returns 0 on success, error on failure.
161 */
amdgpu_queue_mgr_init(struct amdgpu_device * adev,struct amdgpu_queue_mgr * mgr)162 int amdgpu_queue_mgr_init(struct amdgpu_device *adev,
163 struct amdgpu_queue_mgr *mgr)
164 {
165 int i, r;
166
167 if (!adev || !mgr)
168 return -EINVAL;
169
170 memset(mgr, 0, sizeof(*mgr));
171
172 for (i = 0; i < AMDGPU_MAX_IP_NUM; ++i) {
173 r = amdgpu_queue_mapper_init(&mgr->mapper[i], i);
174 if (r)
175 return r;
176 }
177
178 return 0;
179 }
180
181 /**
182 * amdgpu_queue_mgr_fini - de-initialize an amdgpu_queue_mgr struct
183 *
184 * @adev: amdgpu_device pointer
185 * @mgr: amdgpu_queue_mgr structure holding queue information
186 *
187 * De-initialize the the selected @mgr (all asics).
188 *
189 * Returns 0 on success, error on failure.
190 */
amdgpu_queue_mgr_fini(struct amdgpu_device * adev,struct amdgpu_queue_mgr * mgr)191 int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
192 struct amdgpu_queue_mgr *mgr)
193 {
194 return 0;
195 }
196
197 /**
198 * amdgpu_queue_mgr_map - Map a userspace ring id to an amdgpu_ring
199 *
200 * @adev: amdgpu_device pointer
201 * @mgr: amdgpu_queue_mgr structure holding queue information
202 * @hw_ip: HW IP enum
203 * @instance: HW instance
204 * @ring: user ring id
205 * @our_ring: pointer to mapped amdgpu_ring
206 *
207 * Map a userspace ring id to an appropriate kernel ring. Different
208 * policies are configurable at a HW IP level.
209 *
210 * Returns 0 on success, error on failure.
211 */
amdgpu_queue_mgr_map(struct amdgpu_device * adev,struct amdgpu_queue_mgr * mgr,u32 hw_ip,u32 instance,u32 ring,struct amdgpu_ring ** out_ring)212 int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
213 struct amdgpu_queue_mgr *mgr,
214 u32 hw_ip, u32 instance, u32 ring,
215 struct amdgpu_ring **out_ring)
216 {
217 int i, r, ip_num_rings = 0;
218 struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip];
219
220 if (!adev || !mgr || !out_ring)
221 return -EINVAL;
222
223 if (hw_ip >= AMDGPU_MAX_IP_NUM)
224 return -EINVAL;
225
226 if (ring >= AMDGPU_MAX_RINGS)
227 return -EINVAL;
228
229 /* Right now all IPs have only one instance - multiple rings. */
230 if (instance != 0) {
231 DRM_DEBUG("invalid ip instance: %d\n", instance);
232 return -EINVAL;
233 }
234
235 switch (hw_ip) {
236 case AMDGPU_HW_IP_GFX:
237 ip_num_rings = adev->gfx.num_gfx_rings;
238 break;
239 case AMDGPU_HW_IP_COMPUTE:
240 ip_num_rings = adev->gfx.num_compute_rings;
241 break;
242 case AMDGPU_HW_IP_DMA:
243 ip_num_rings = adev->sdma.num_instances;
244 break;
245 case AMDGPU_HW_IP_UVD:
246 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
247 if (!(adev->uvd.harvest_config & (1 << i)))
248 ip_num_rings++;
249 }
250 break;
251 case AMDGPU_HW_IP_VCE:
252 ip_num_rings = adev->vce.num_rings;
253 break;
254 case AMDGPU_HW_IP_UVD_ENC:
255 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
256 if (!(adev->uvd.harvest_config & (1 << i)))
257 ip_num_rings++;
258 }
259 ip_num_rings =
260 adev->uvd.num_enc_rings * ip_num_rings;
261 break;
262 case AMDGPU_HW_IP_VCN_DEC:
263 ip_num_rings = 1;
264 break;
265 case AMDGPU_HW_IP_VCN_ENC:
266 ip_num_rings = adev->vcn.num_enc_rings;
267 break;
268 case AMDGPU_HW_IP_VCN_JPEG:
269 ip_num_rings = 1;
270 break;
271 default:
272 DRM_DEBUG("unknown ip type: %d\n", hw_ip);
273 return -EINVAL;
274 }
275
276 if (ring >= ip_num_rings) {
277 DRM_DEBUG("Ring index:%d exceeds maximum:%d for ip:%d\n",
278 ring, ip_num_rings, hw_ip);
279 return -EINVAL;
280 }
281
282 mutex_lock(&mapper->lock);
283
284 *out_ring = amdgpu_get_cached_map(mapper, ring);
285 if (*out_ring) {
286 /* cache hit */
287 r = 0;
288 goto out_unlock;
289 }
290
291 switch (mapper->hw_ip) {
292 case AMDGPU_HW_IP_GFX:
293 case AMDGPU_HW_IP_UVD:
294 case AMDGPU_HW_IP_VCE:
295 case AMDGPU_HW_IP_UVD_ENC:
296 case AMDGPU_HW_IP_VCN_DEC:
297 case AMDGPU_HW_IP_VCN_ENC:
298 case AMDGPU_HW_IP_VCN_JPEG:
299 r = amdgpu_identity_map(adev, mapper, ring, out_ring);
300 break;
301 case AMDGPU_HW_IP_DMA:
302 r = amdgpu_lru_map(adev, mapper, ring, false, out_ring);
303 break;
304 case AMDGPU_HW_IP_COMPUTE:
305 r = amdgpu_lru_map(adev, mapper, ring, true, out_ring);
306 break;
307 default:
308 *out_ring = NULL;
309 r = -EINVAL;
310 DRM_DEBUG("unknown HW IP type: %d\n", mapper->hw_ip);
311 }
312
313 out_unlock:
314 mutex_unlock(&mapper->lock);
315 return r;
316 }
317