1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: monk liu <monk.liu@amd.com>
23 */
24
25 #include <drm/drmP.h>
26 #include <drm/drm_auth.h>
27 #include "amdgpu.h"
28 #include "amdgpu_sched.h"
29
amdgpu_ctx_priority_permit(struct drm_file * filp,enum drm_sched_priority priority)30 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
31 enum drm_sched_priority priority)
32 {
33 /* NORMAL and below are accessible by everyone */
34 if (priority <= DRM_SCHED_PRIORITY_NORMAL)
35 return 0;
36
37 #if 0
38 if (capable(CAP_SYS_NICE))
39 return 0;
40 #endif
41
42 if (drm_is_current_master(filp))
43 return 0;
44
45 return -EACCES;
46 }
47
amdgpu_ctx_init(struct amdgpu_device * adev,enum drm_sched_priority priority,struct drm_file * filp,struct amdgpu_ctx * ctx)48 static int amdgpu_ctx_init(struct amdgpu_device *adev,
49 enum drm_sched_priority priority,
50 struct drm_file *filp,
51 struct amdgpu_ctx *ctx)
52 {
53 unsigned i, j;
54 int r;
55
56 if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
57 return -EINVAL;
58
59 r = amdgpu_ctx_priority_permit(filp, priority);
60 if (r)
61 return r;
62
63 memset(ctx, 0, sizeof(*ctx));
64 ctx->adev = adev;
65 kref_init(&ctx->refcount);
66 spin_init(&ctx->ring_lock, "agcrl");
67 ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
68 sizeof(struct dma_fence*), GFP_KERNEL);
69 if (!ctx->fences)
70 return -ENOMEM;
71
72 lockinit(&ctx->lock, "agctxl", 0, LK_CANRECURSE);
73
74 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
75 ctx->rings[i].sequence = 1;
76 ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
77 }
78
79 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
80 ctx->reset_counter_query = ctx->reset_counter;
81 ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
82 ctx->init_priority = priority;
83 ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
84
85 /* create context entity for each ring */
86 for (i = 0; i < adev->num_rings; i++) {
87 struct amdgpu_ring *ring = adev->rings[i];
88 struct drm_sched_rq *rq;
89
90 rq = &ring->sched.sched_rq[priority];
91
92 if (ring == &adev->gfx.kiq.ring)
93 continue;
94
95 r = drm_sched_entity_init(&ctx->rings[i].entity,
96 &rq, 1, &ctx->guilty);
97 if (r)
98 goto failed;
99 }
100
101 r = amdgpu_queue_mgr_init(adev, &ctx->queue_mgr);
102 if (r)
103 goto failed;
104
105 return 0;
106
107 failed:
108 for (j = 0; j < i; j++)
109 drm_sched_entity_destroy(&ctx->rings[j].entity);
110 kfree(ctx->fences);
111 ctx->fences = NULL;
112 return r;
113 }
114
amdgpu_ctx_fini(struct kref * ref)115 static void amdgpu_ctx_fini(struct kref *ref)
116 {
117 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
118 struct amdgpu_device *adev = ctx->adev;
119 unsigned i, j;
120
121 if (!adev)
122 return;
123
124 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
125 for (j = 0; j < amdgpu_sched_jobs; ++j)
126 dma_fence_put(ctx->rings[i].fences[j]);
127 kfree(ctx->fences);
128 ctx->fences = NULL;
129
130 amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
131
132 mutex_destroy(&ctx->lock);
133
134 kfree(ctx);
135 }
136
amdgpu_ctx_alloc(struct amdgpu_device * adev,struct amdgpu_fpriv * fpriv,struct drm_file * filp,enum drm_sched_priority priority,uint32_t * id)137 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
138 struct amdgpu_fpriv *fpriv,
139 struct drm_file *filp,
140 enum drm_sched_priority priority,
141 uint32_t *id)
142 {
143 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
144 struct amdgpu_ctx *ctx;
145 int r;
146
147 ctx = kmalloc(sizeof(*ctx), M_DRM, GFP_KERNEL);
148 if (!ctx)
149 return -ENOMEM;
150
151 mutex_lock(&mgr->lock);
152 r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
153 if (r < 0) {
154 mutex_unlock(&mgr->lock);
155 kfree(ctx);
156 return r;
157 }
158
159 *id = (uint32_t)r;
160 r = amdgpu_ctx_init(adev, priority, filp, ctx);
161 if (r) {
162 idr_remove(&mgr->ctx_handles, *id);
163 *id = 0;
164 kfree(ctx);
165 }
166 mutex_unlock(&mgr->lock);
167 return r;
168 }
169
amdgpu_ctx_do_release(struct kref * ref)170 static void amdgpu_ctx_do_release(struct kref *ref)
171 {
172 struct amdgpu_ctx *ctx;
173 u32 i;
174
175 ctx = container_of(ref, struct amdgpu_ctx, refcount);
176
177 for (i = 0; i < ctx->adev->num_rings; i++) {
178
179 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
180 continue;
181
182 drm_sched_entity_destroy(&ctx->rings[i].entity);
183 }
184
185 amdgpu_ctx_fini(ref);
186 }
187
amdgpu_ctx_free(struct amdgpu_fpriv * fpriv,uint32_t id)188 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
189 {
190 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
191 struct amdgpu_ctx *ctx;
192
193 mutex_lock(&mgr->lock);
194 ctx = idr_remove(&mgr->ctx_handles, id);
195 if (ctx)
196 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
197 mutex_unlock(&mgr->lock);
198 return ctx ? 0 : -EINVAL;
199 }
200
amdgpu_ctx_query(struct amdgpu_device * adev,struct amdgpu_fpriv * fpriv,uint32_t id,union drm_amdgpu_ctx_out * out)201 static int amdgpu_ctx_query(struct amdgpu_device *adev,
202 struct amdgpu_fpriv *fpriv, uint32_t id,
203 union drm_amdgpu_ctx_out *out)
204 {
205 struct amdgpu_ctx *ctx;
206 struct amdgpu_ctx_mgr *mgr;
207 unsigned reset_counter;
208
209 if (!fpriv)
210 return -EINVAL;
211
212 mgr = &fpriv->ctx_mgr;
213 mutex_lock(&mgr->lock);
214 ctx = idr_find(&mgr->ctx_handles, id);
215 if (!ctx) {
216 mutex_unlock(&mgr->lock);
217 return -EINVAL;
218 }
219
220 /* TODO: these two are always zero */
221 out->state.flags = 0x0;
222 out->state.hangs = 0x0;
223
224 /* determine if a GPU reset has occured since the last call */
225 reset_counter = atomic_read(&adev->gpu_reset_counter);
226 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
227 if (ctx->reset_counter_query == reset_counter)
228 out->state.reset_status = AMDGPU_CTX_NO_RESET;
229 else
230 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
231 ctx->reset_counter_query = reset_counter;
232
233 mutex_unlock(&mgr->lock);
234 return 0;
235 }
236
amdgpu_ctx_query2(struct amdgpu_device * adev,struct amdgpu_fpriv * fpriv,uint32_t id,union drm_amdgpu_ctx_out * out)237 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
238 struct amdgpu_fpriv *fpriv, uint32_t id,
239 union drm_amdgpu_ctx_out *out)
240 {
241 struct amdgpu_ctx *ctx;
242 struct amdgpu_ctx_mgr *mgr;
243
244 if (!fpriv)
245 return -EINVAL;
246
247 mgr = &fpriv->ctx_mgr;
248 mutex_lock(&mgr->lock);
249 ctx = idr_find(&mgr->ctx_handles, id);
250 if (!ctx) {
251 mutex_unlock(&mgr->lock);
252 return -EINVAL;
253 }
254
255 out->state.flags = 0x0;
256 out->state.hangs = 0x0;
257
258 if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
259 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
260
261 if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
262 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
263
264 if (atomic_read(&ctx->guilty))
265 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
266
267 mutex_unlock(&mgr->lock);
268 return 0;
269 }
270
amdgpu_ctx_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)271 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
272 struct drm_file *filp)
273 {
274 int r;
275 uint32_t id;
276 enum drm_sched_priority priority;
277
278 union drm_amdgpu_ctx *args = data;
279 struct amdgpu_device *adev = dev->dev_private;
280 struct amdgpu_fpriv *fpriv = filp->driver_priv;
281
282 r = 0;
283 id = args->in.ctx_id;
284 priority = amdgpu_to_sched_priority(args->in.priority);
285
286 /* For backwards compatibility reasons, we need to accept
287 * ioctls with garbage in the priority field */
288 if (priority == DRM_SCHED_PRIORITY_INVALID)
289 priority = DRM_SCHED_PRIORITY_NORMAL;
290
291 switch (args->in.op) {
292 case AMDGPU_CTX_OP_ALLOC_CTX:
293 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
294 args->out.alloc.ctx_id = id;
295 break;
296 case AMDGPU_CTX_OP_FREE_CTX:
297 r = amdgpu_ctx_free(fpriv, id);
298 break;
299 case AMDGPU_CTX_OP_QUERY_STATE:
300 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
301 break;
302 case AMDGPU_CTX_OP_QUERY_STATE2:
303 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
304 break;
305 default:
306 return -EINVAL;
307 }
308
309 return r;
310 }
311
amdgpu_ctx_get(struct amdgpu_fpriv * fpriv,uint32_t id)312 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
313 {
314 struct amdgpu_ctx *ctx;
315 struct amdgpu_ctx_mgr *mgr;
316
317 if (!fpriv)
318 return NULL;
319
320 mgr = &fpriv->ctx_mgr;
321
322 mutex_lock(&mgr->lock);
323 ctx = idr_find(&mgr->ctx_handles, id);
324 if (ctx)
325 kref_get(&ctx->refcount);
326 mutex_unlock(&mgr->lock);
327 return ctx;
328 }
329
amdgpu_ctx_put(struct amdgpu_ctx * ctx)330 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
331 {
332 if (ctx == NULL)
333 return -EINVAL;
334
335 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
336 return 0;
337 }
338
amdgpu_ctx_add_fence(struct amdgpu_ctx * ctx,struct amdgpu_ring * ring,struct dma_fence * fence,uint64_t * handler)339 int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
340 struct dma_fence *fence, uint64_t* handler)
341 {
342 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
343 uint64_t seq = cring->sequence;
344 unsigned idx = 0;
345 struct dma_fence *other = NULL;
346
347 idx = seq & (amdgpu_sched_jobs - 1);
348 other = cring->fences[idx];
349 if (other)
350 BUG_ON(!dma_fence_is_signaled(other));
351
352 dma_fence_get(fence);
353
354 spin_lock(&ctx->ring_lock);
355 cring->fences[idx] = fence;
356 cring->sequence++;
357 spin_unlock(&ctx->ring_lock);
358
359 dma_fence_put(other);
360 if (handler)
361 *handler = seq;
362
363 return 0;
364 }
365
amdgpu_ctx_get_fence(struct amdgpu_ctx * ctx,struct amdgpu_ring * ring,uint64_t seq)366 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
367 struct amdgpu_ring *ring, uint64_t seq)
368 {
369 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
370 struct dma_fence *fence;
371
372 spin_lock(&ctx->ring_lock);
373
374 if (seq == ~0ull)
375 seq = ctx->rings[ring->idx].sequence - 1;
376
377 if (seq >= cring->sequence) {
378 spin_unlock(&ctx->ring_lock);
379 return ERR_PTR(-EINVAL);
380 }
381
382
383 if (seq + amdgpu_sched_jobs < cring->sequence) {
384 spin_unlock(&ctx->ring_lock);
385 return NULL;
386 }
387
388 fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
389 spin_unlock(&ctx->ring_lock);
390
391 return fence;
392 }
393
amdgpu_ctx_priority_override(struct amdgpu_ctx * ctx,enum drm_sched_priority priority)394 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
395 enum drm_sched_priority priority)
396 {
397 int i;
398 struct amdgpu_device *adev = ctx->adev;
399 struct drm_sched_rq *rq;
400 struct drm_sched_entity *entity;
401 struct amdgpu_ring *ring;
402 enum drm_sched_priority ctx_prio;
403
404 ctx->override_priority = priority;
405
406 ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
407 ctx->init_priority : ctx->override_priority;
408
409 for (i = 0; i < adev->num_rings; i++) {
410 ring = adev->rings[i];
411 entity = &ctx->rings[i].entity;
412 rq = &ring->sched.sched_rq[ctx_prio];
413
414 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
415 continue;
416
417 drm_sched_entity_set_rq(entity, rq);
418 }
419 }
420
amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx * ctx,unsigned ring_id)421 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
422 {
423 struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id];
424 unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1);
425 struct dma_fence *other = cring->fences[idx];
426
427 if (other) {
428 signed long r;
429 r = dma_fence_wait(other, true);
430 if (r < 0) {
431 if (r != -ERESTARTSYS)
432 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
433
434 return r;
435 }
436 }
437
438 return 0;
439 }
440
amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr * mgr)441 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
442 {
443 lockinit(&mgr->lock, "agml", 0, LK_CANRECURSE);
444 idr_init(&mgr->ctx_handles);
445 }
446
amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr * mgr)447 void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
448 {
449 struct amdgpu_ctx *ctx;
450 struct idr *idp;
451 uint32_t id, i;
452 long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
453
454 idp = &mgr->ctx_handles;
455
456 mutex_lock(&mgr->lock);
457 idr_for_each_entry(idp, ctx, id) {
458
459 if (!ctx->adev) {
460 mutex_unlock(&mgr->lock);
461 return;
462 }
463
464 for (i = 0; i < ctx->adev->num_rings; i++) {
465
466 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
467 continue;
468
469 max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
470 max_wait);
471 }
472 }
473 mutex_unlock(&mgr->lock);
474 }
475
amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr * mgr)476 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
477 {
478 struct amdgpu_ctx *ctx;
479 struct idr *idp;
480 uint32_t id, i;
481
482 idp = &mgr->ctx_handles;
483
484 idr_for_each_entry(idp, ctx, id) {
485
486 if (!ctx->adev)
487 return;
488
489 for (i = 0; i < ctx->adev->num_rings; i++) {
490
491 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
492 continue;
493
494 if (kref_read(&ctx->refcount) == 1)
495 drm_sched_entity_fini(&ctx->rings[i].entity);
496 else
497 DRM_ERROR("ctx %p is still alive\n", ctx);
498 }
499 }
500 }
501
amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr * mgr)502 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
503 {
504 struct amdgpu_ctx *ctx;
505 struct idr *idp;
506 uint32_t id;
507
508 amdgpu_ctx_mgr_entity_fini(mgr);
509
510 idp = &mgr->ctx_handles;
511
512 idr_for_each_entry(idp, ctx, id) {
513 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
514 DRM_ERROR("ctx %p is still alive\n", ctx);
515 }
516
517 idr_destroy(&mgr->ctx_handles);
518 mutex_destroy(&mgr->lock);
519 }
520