1 /* $NetBSD: amdgpu_ctx.c,v 1.8 2021/12/19 12:38:41 riastradh Exp $ */
2
3 /*
4 * Copyright 2015 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: monk liu <monk.liu@amd.com>
25 */
26
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: amdgpu_ctx.c,v 1.8 2021/12/19 12:38:41 riastradh Exp $");
29
30 #include <drm/drm_auth.h>
31 #include "amdgpu.h"
32 #include "amdgpu_sched.h"
33 #include "amdgpu_ras.h"
34
35 #include <linux/nbsd-namespace.h>
36 #define to_amdgpu_ctx_entity(e) \
37 container_of((e), struct amdgpu_ctx_entity, entity)
38
39 const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
40 [AMDGPU_HW_IP_GFX] = 1,
41 [AMDGPU_HW_IP_COMPUTE] = 4,
42 [AMDGPU_HW_IP_DMA] = 2,
43 [AMDGPU_HW_IP_UVD] = 1,
44 [AMDGPU_HW_IP_VCE] = 1,
45 [AMDGPU_HW_IP_UVD_ENC] = 1,
46 [AMDGPU_HW_IP_VCN_DEC] = 1,
47 [AMDGPU_HW_IP_VCN_ENC] = 1,
48 [AMDGPU_HW_IP_VCN_JPEG] = 1,
49 };
50
amdgpu_ctx_priority_permit(struct drm_file * filp,enum drm_sched_priority priority)51 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
52 enum drm_sched_priority priority)
53 {
54 if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
55 return -EINVAL;
56
57 /* NORMAL and below are accessible by everyone */
58 if (priority <= DRM_SCHED_PRIORITY_NORMAL)
59 return 0;
60
61 if (capable(CAP_SYS_NICE))
62 return 0;
63
64 if (drm_is_current_master(filp))
65 return 0;
66
67 return -EACCES;
68 }
69
amdgpu_ctx_init_entity(struct amdgpu_ctx * ctx,const u32 hw_ip,const u32 ring)70 static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
71 {
72 struct amdgpu_device *adev = ctx->adev;
73 struct amdgpu_ctx_entity *entity;
74 struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
75 unsigned num_scheds = 0;
76 enum drm_sched_priority priority;
77 int r;
78
79 entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]),
80 GFP_KERNEL);
81 if (!entity)
82 return -ENOMEM;
83
84 entity->sequence = 1;
85 priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
86 ctx->init_priority : ctx->override_priority;
87 switch (hw_ip) {
88 case AMDGPU_HW_IP_GFX:
89 sched = &adev->gfx.gfx_ring[0].sched;
90 scheds = &sched;
91 num_scheds = 1;
92 break;
93 case AMDGPU_HW_IP_COMPUTE:
94 scheds = adev->gfx.compute_sched;
95 num_scheds = adev->gfx.num_compute_sched;
96 break;
97 case AMDGPU_HW_IP_DMA:
98 scheds = adev->sdma.sdma_sched;
99 num_scheds = adev->sdma.num_sdma_sched;
100 break;
101 case AMDGPU_HW_IP_UVD:
102 sched = &adev->uvd.inst[0].ring.sched;
103 scheds = &sched;
104 num_scheds = 1;
105 break;
106 case AMDGPU_HW_IP_VCE:
107 sched = &adev->vce.ring[0].sched;
108 scheds = &sched;
109 num_scheds = 1;
110 break;
111 case AMDGPU_HW_IP_UVD_ENC:
112 sched = &adev->uvd.inst[0].ring_enc[0].sched;
113 scheds = &sched;
114 num_scheds = 1;
115 break;
116 case AMDGPU_HW_IP_VCN_DEC:
117 scheds = adev->vcn.vcn_dec_sched;
118 num_scheds = adev->vcn.num_vcn_dec_sched;
119 break;
120 case AMDGPU_HW_IP_VCN_ENC:
121 scheds = adev->vcn.vcn_enc_sched;
122 num_scheds = adev->vcn.num_vcn_enc_sched;
123 break;
124 case AMDGPU_HW_IP_VCN_JPEG:
125 scheds = adev->jpeg.jpeg_sched;
126 num_scheds = adev->jpeg.num_jpeg_sched;
127 break;
128 }
129
130 r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
131 &ctx->guilty);
132 if (r)
133 goto error_free_entity;
134
135 ctx->entities[hw_ip][ring] = entity;
136 return 0;
137
138 error_free_entity:
139 kfree(entity);
140
141 return r;
142 }
143
amdgpu_ctx_init(struct amdgpu_device * adev,enum drm_sched_priority priority,struct drm_file * filp,struct amdgpu_ctx * ctx)144 static int amdgpu_ctx_init(struct amdgpu_device *adev,
145 enum drm_sched_priority priority,
146 struct drm_file *filp,
147 struct amdgpu_ctx *ctx)
148 {
149 int r;
150
151 r = amdgpu_ctx_priority_permit(filp, priority);
152 if (r)
153 return r;
154
155 memset(ctx, 0, sizeof(*ctx));
156
157 ctx->adev = adev;
158
159 kref_init(&ctx->refcount);
160 spin_lock_init(&ctx->ring_lock);
161 mutex_init(&ctx->lock);
162
163 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
164 ctx->reset_counter_query = ctx->reset_counter;
165 ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
166 ctx->init_priority = priority;
167 ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
168
169 return 0;
170
171 }
172
amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity * entity)173 static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
174 {
175
176 int i;
177
178 if (!entity)
179 return;
180
181 for (i = 0; i < amdgpu_sched_jobs; ++i)
182 dma_fence_put(entity->fences[i]);
183
184 kfree(entity);
185 }
186
amdgpu_ctx_fini(struct kref * ref)187 static void amdgpu_ctx_fini(struct kref *ref)
188 {
189 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
190 struct amdgpu_device *adev = ctx->adev;
191 unsigned i, j;
192
193 if (!adev)
194 return;
195
196 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
197 for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
198 amdgpu_ctx_fini_entity(ctx->entities[i][j]);
199 ctx->entities[i][j] = NULL;
200 }
201 }
202
203 mutex_destroy(&ctx->lock);
204 spin_lock_destroy(&ctx->ring_lock);
205 kfree(ctx);
206 }
207
amdgpu_ctx_get_entity(struct amdgpu_ctx * ctx,u32 hw_ip,u32 instance,u32 ring,struct drm_sched_entity ** entity)208 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
209 u32 ring, struct drm_sched_entity **entity)
210 {
211 int r;
212
213 if (hw_ip >= AMDGPU_HW_IP_NUM) {
214 DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
215 return -EINVAL;
216 }
217
218 /* Right now all IPs have only one instance - multiple rings. */
219 if (instance != 0) {
220 DRM_DEBUG("invalid ip instance: %d\n", instance);
221 return -EINVAL;
222 }
223
224 if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
225 DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
226 return -EINVAL;
227 }
228
229 if (ctx->entities[hw_ip][ring] == NULL) {
230 r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
231 if (r)
232 return r;
233 }
234
235 *entity = &ctx->entities[hw_ip][ring]->entity;
236 return 0;
237 }
238
amdgpu_ctx_alloc(struct amdgpu_device * adev,struct amdgpu_fpriv * fpriv,struct drm_file * filp,enum drm_sched_priority priority,uint32_t * id)239 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
240 struct amdgpu_fpriv *fpriv,
241 struct drm_file *filp,
242 enum drm_sched_priority priority,
243 uint32_t *id)
244 {
245 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
246 struct amdgpu_ctx *ctx;
247 int r;
248
249 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
250 if (!ctx)
251 return -ENOMEM;
252
253 idr_preload(GFP_KERNEL);
254 mutex_lock(&mgr->lock);
255 r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
256 if (r < 0) {
257 mutex_unlock(&mgr->lock);
258 idr_preload_end();
259 kfree(ctx);
260 return r;
261 }
262
263 *id = (uint32_t)r;
264 r = amdgpu_ctx_init(adev, priority, filp, ctx);
265 if (r) {
266 idr_remove(&mgr->ctx_handles, *id);
267 *id = 0;
268 kfree(ctx);
269 }
270 mutex_unlock(&mgr->lock);
271 idr_preload_end();
272
273 return r;
274 }
275
amdgpu_ctx_do_release(struct kref * ref)276 static void amdgpu_ctx_do_release(struct kref *ref)
277 {
278 struct amdgpu_ctx *ctx;
279 u32 i, j;
280
281 ctx = container_of(ref, struct amdgpu_ctx, refcount);
282 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
283 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
284 if (!ctx->entities[i][j])
285 continue;
286
287 drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
288 }
289 }
290
291 amdgpu_ctx_fini(ref);
292 }
293
amdgpu_ctx_free(struct amdgpu_fpriv * fpriv,uint32_t id)294 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
295 {
296 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
297 struct amdgpu_ctx *ctx;
298
299 mutex_lock(&mgr->lock);
300 ctx = idr_remove(&mgr->ctx_handles, id);
301 if (ctx)
302 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
303 mutex_unlock(&mgr->lock);
304 return ctx ? 0 : -EINVAL;
305 }
306
amdgpu_ctx_query(struct amdgpu_device * adev,struct amdgpu_fpriv * fpriv,uint32_t id,union drm_amdgpu_ctx_out * out)307 static int amdgpu_ctx_query(struct amdgpu_device *adev,
308 struct amdgpu_fpriv *fpriv, uint32_t id,
309 union drm_amdgpu_ctx_out *out)
310 {
311 struct amdgpu_ctx *ctx;
312 struct amdgpu_ctx_mgr *mgr;
313 unsigned reset_counter;
314
315 if (!fpriv)
316 return -EINVAL;
317
318 mgr = &fpriv->ctx_mgr;
319 mutex_lock(&mgr->lock);
320 ctx = idr_find(&mgr->ctx_handles, id);
321 if (!ctx) {
322 mutex_unlock(&mgr->lock);
323 return -EINVAL;
324 }
325
326 /* TODO: these two are always zero */
327 out->state.flags = 0x0;
328 out->state.hangs = 0x0;
329
330 /* determine if a GPU reset has occured since the last call */
331 reset_counter = atomic_read(&adev->gpu_reset_counter);
332 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
333 if (ctx->reset_counter_query == reset_counter)
334 out->state.reset_status = AMDGPU_CTX_NO_RESET;
335 else
336 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
337 ctx->reset_counter_query = reset_counter;
338
339 mutex_unlock(&mgr->lock);
340 return 0;
341 }
342
amdgpu_ctx_query2(struct amdgpu_device * adev,struct amdgpu_fpriv * fpriv,uint32_t id,union drm_amdgpu_ctx_out * out)343 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
344 struct amdgpu_fpriv *fpriv, uint32_t id,
345 union drm_amdgpu_ctx_out *out)
346 {
347 struct amdgpu_ctx *ctx;
348 struct amdgpu_ctx_mgr *mgr;
349 unsigned long ras_counter;
350
351 if (!fpriv)
352 return -EINVAL;
353
354 mgr = &fpriv->ctx_mgr;
355 mutex_lock(&mgr->lock);
356 ctx = idr_find(&mgr->ctx_handles, id);
357 if (!ctx) {
358 mutex_unlock(&mgr->lock);
359 return -EINVAL;
360 }
361
362 out->state.flags = 0x0;
363 out->state.hangs = 0x0;
364
365 if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
366 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
367
368 if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
369 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
370
371 if (atomic_read(&ctx->guilty))
372 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
373
374 /*query ue count*/
375 ras_counter = amdgpu_ras_query_error_count(adev, false);
376 /*ras counter is monotonic increasing*/
377 if (ras_counter != ctx->ras_counter_ue) {
378 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
379 ctx->ras_counter_ue = ras_counter;
380 }
381
382 /*query ce count*/
383 ras_counter = amdgpu_ras_query_error_count(adev, true);
384 if (ras_counter != ctx->ras_counter_ce) {
385 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
386 ctx->ras_counter_ce = ras_counter;
387 }
388
389 mutex_unlock(&mgr->lock);
390 return 0;
391 }
392
amdgpu_ctx_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)393 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
394 struct drm_file *filp)
395 {
396 int r;
397 uint32_t id;
398 enum drm_sched_priority priority;
399
400 union drm_amdgpu_ctx *args = data;
401 struct amdgpu_device *adev = dev->dev_private;
402 struct amdgpu_fpriv *fpriv = filp->driver_priv;
403
404 r = 0;
405 id = args->in.ctx_id;
406 priority = amdgpu_to_sched_priority(args->in.priority);
407
408 /* For backwards compatibility reasons, we need to accept
409 * ioctls with garbage in the priority field */
410 if (priority == DRM_SCHED_PRIORITY_INVALID)
411 priority = DRM_SCHED_PRIORITY_NORMAL;
412
413 switch (args->in.op) {
414 case AMDGPU_CTX_OP_ALLOC_CTX:
415 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
416 args->out.alloc.ctx_id = id;
417 break;
418 case AMDGPU_CTX_OP_FREE_CTX:
419 r = amdgpu_ctx_free(fpriv, id);
420 break;
421 case AMDGPU_CTX_OP_QUERY_STATE:
422 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
423 break;
424 case AMDGPU_CTX_OP_QUERY_STATE2:
425 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
426 break;
427 default:
428 return -EINVAL;
429 }
430
431 return r;
432 }
433
amdgpu_ctx_get(struct amdgpu_fpriv * fpriv,uint32_t id)434 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
435 {
436 struct amdgpu_ctx *ctx;
437 struct amdgpu_ctx_mgr *mgr;
438
439 if (!fpriv)
440 return NULL;
441
442 mgr = &fpriv->ctx_mgr;
443
444 mutex_lock(&mgr->lock);
445 ctx = idr_find(&mgr->ctx_handles, id);
446 if (ctx)
447 kref_get(&ctx->refcount);
448 mutex_unlock(&mgr->lock);
449 return ctx;
450 }
451
amdgpu_ctx_put(struct amdgpu_ctx * ctx)452 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
453 {
454 if (ctx == NULL)
455 return -EINVAL;
456
457 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
458 return 0;
459 }
460
amdgpu_ctx_add_fence(struct amdgpu_ctx * ctx,struct drm_sched_entity * entity,struct dma_fence * fence,uint64_t * handle)461 void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
462 struct drm_sched_entity *entity,
463 struct dma_fence *fence, uint64_t* handle)
464 {
465 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
466 uint64_t seq;
467 struct dma_fence *other = NULL;
468 unsigned idx = 0;
469
470 spin_lock(&ctx->ring_lock);
471 seq = centity->sequence;
472 idx = seq & (amdgpu_sched_jobs - 1);
473 other = centity->fences[idx];
474 if (other)
475 BUG_ON(!dma_fence_is_signaled(other));
476
477 dma_fence_get(fence);
478 centity->fences[idx] = fence;
479 centity->sequence++;
480 spin_unlock(&ctx->ring_lock);
481
482 dma_fence_put(other);
483 if (handle)
484 *handle = seq;
485 }
486
amdgpu_ctx_get_fence(struct amdgpu_ctx * ctx,struct drm_sched_entity * entity,uint64_t seq)487 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
488 struct drm_sched_entity *entity,
489 uint64_t seq)
490 {
491 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
492 struct dma_fence *fence;
493
494 spin_lock(&ctx->ring_lock);
495
496 if (seq == ~0ull)
497 seq = centity->sequence - 1;
498
499 if (seq >= centity->sequence) {
500 spin_unlock(&ctx->ring_lock);
501 return ERR_PTR(-EINVAL);
502 }
503
504
505 if (seq + amdgpu_sched_jobs < centity->sequence) {
506 spin_unlock(&ctx->ring_lock);
507 return NULL;
508 }
509
510 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
511 spin_unlock(&ctx->ring_lock);
512
513 return fence;
514 }
515
amdgpu_ctx_priority_override(struct amdgpu_ctx * ctx,enum drm_sched_priority priority)516 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
517 enum drm_sched_priority priority)
518 {
519 enum drm_sched_priority ctx_prio;
520 unsigned i, j;
521
522 ctx->override_priority = priority;
523
524 ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
525 ctx->init_priority : ctx->override_priority;
526 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
527 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
528 struct drm_sched_entity *entity;
529
530 if (!ctx->entities[i][j])
531 continue;
532
533 entity = &ctx->entities[i][j]->entity;
534 drm_sched_entity_set_priority(entity, ctx_prio);
535 }
536 }
537 }
538
amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx * ctx,struct drm_sched_entity * entity)539 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
540 struct drm_sched_entity *entity)
541 {
542 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
543 struct dma_fence *other;
544 unsigned idx;
545 long r;
546
547 spin_lock(&ctx->ring_lock);
548 idx = centity->sequence & (amdgpu_sched_jobs - 1);
549 other = dma_fence_get(centity->fences[idx]);
550 spin_unlock(&ctx->ring_lock);
551
552 if (!other)
553 return 0;
554
555 r = dma_fence_wait(other, true);
556 if (r < 0 && r != -ERESTARTSYS)
557 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
558
559 dma_fence_put(other);
560 return r;
561 }
562
amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr * mgr)563 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
564 {
565 mutex_init(&mgr->lock);
566 idr_init(&mgr->ctx_handles);
567 }
568
amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr * mgr,long timeout)569 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
570 {
571 struct amdgpu_ctx *ctx;
572 struct idr *idp;
573 uint32_t id, i, j;
574
575 idp = &mgr->ctx_handles;
576
577 mutex_lock(&mgr->lock);
578 idr_for_each_entry(idp, ctx, id) {
579 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
580 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
581 struct drm_sched_entity *entity;
582
583 if (!ctx->entities[i][j])
584 continue;
585
586 entity = &ctx->entities[i][j]->entity;
587 timeout = drm_sched_entity_flush(entity, timeout);
588 }
589 }
590 }
591 mutex_unlock(&mgr->lock);
592 return timeout;
593 }
594
amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr * mgr)595 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
596 {
597 struct amdgpu_ctx *ctx;
598 struct idr *idp;
599 uint32_t id, i, j;
600
601 idp = &mgr->ctx_handles;
602
603 idr_for_each_entry(idp, ctx, id) {
604 if (kref_read(&ctx->refcount) != 1) {
605 DRM_ERROR("ctx %p is still alive\n", ctx);
606 continue;
607 }
608
609 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
610 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
611 struct drm_sched_entity *entity;
612
613 if (!ctx->entities[i][j])
614 continue;
615
616 entity = &ctx->entities[i][j]->entity;
617 drm_sched_entity_fini(entity);
618 }
619 }
620 }
621 }
622
amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr * mgr)623 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
624 {
625 struct amdgpu_ctx *ctx;
626 struct idr *idp;
627 uint32_t id;
628
629 amdgpu_ctx_mgr_entity_fini(mgr);
630
631 idp = &mgr->ctx_handles;
632
633 idr_for_each_entry(idp, ctx, id) {
634 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
635 DRM_ERROR("ctx %p is still alive\n", ctx);
636 }
637
638 idr_destroy(&mgr->ctx_handles);
639 mutex_destroy(&mgr->lock);
640 }
641
amdgpu_ctx_init_sched(struct amdgpu_device * adev)642 void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
643 {
644 int i, j;
645
646 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
647 adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
648 adev->gfx.num_gfx_sched++;
649 }
650
651 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
652 adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched;
653 adev->gfx.num_compute_sched++;
654 }
655
656 for (i = 0; i < adev->sdma.num_instances; i++) {
657 adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
658 adev->sdma.num_sdma_sched++;
659 }
660
661 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
662 if (adev->vcn.harvest_config & (1 << i))
663 continue;
664 adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
665 &adev->vcn.inst[i].ring_dec.sched;
666 }
667
668 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
669 if (adev->vcn.harvest_config & (1 << i))
670 continue;
671 for (j = 0; j < adev->vcn.num_enc_rings; ++j)
672 adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
673 &adev->vcn.inst[i].ring_enc[j].sched;
674 }
675
676 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
677 if (adev->jpeg.harvest_config & (1 << i))
678 continue;
679 adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
680 &adev->jpeg.inst[i].ring_dec.sched;
681 }
682 }
683