1 /*
2 * Copyright 2008 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Jerome Glisse <glisse@freedesktop.org>
26 */
27
28 #include <linux/file.h>
29 #include <linux/pagemap.h>
30 #include <linux/sync_file.h>
31 #include <linux/dma-buf.h>
32
33 #include <drm/amdgpu_drm.h>
34 #include <drm/drm_syncobj.h>
35 #include <drm/ttm/ttm_tt.h>
36
37 #include "amdgpu_cs.h"
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40 #include "amdgpu_gmc.h"
41 #include "amdgpu_gem.h"
42 #include "amdgpu_ras.h"
43
amdgpu_cs_parser_init(struct amdgpu_cs_parser * p,struct amdgpu_device * adev,struct drm_file * filp,union drm_amdgpu_cs * cs)44 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
45 struct amdgpu_device *adev,
46 struct drm_file *filp,
47 union drm_amdgpu_cs *cs)
48 {
49 struct amdgpu_fpriv *fpriv = filp->driver_priv;
50
51 if (cs->in.num_chunks == 0)
52 return -EINVAL;
53
54 memset(p, 0, sizeof(*p));
55 p->adev = adev;
56 p->filp = filp;
57
58 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
59 if (!p->ctx)
60 return -EINVAL;
61
62 if (atomic_read(&p->ctx->guilty)) {
63 amdgpu_ctx_put(p->ctx);
64 return -ECANCELED;
65 }
66
67 amdgpu_sync_create(&p->sync);
68 drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
69 DRM_EXEC_IGNORE_DUPLICATES);
70 return 0;
71 }
72
amdgpu_cs_job_idx(struct amdgpu_cs_parser * p,struct drm_amdgpu_cs_chunk_ib * chunk_ib)73 static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p,
74 struct drm_amdgpu_cs_chunk_ib *chunk_ib)
75 {
76 struct drm_sched_entity *entity;
77 unsigned int i;
78 int r;
79
80 r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type,
81 chunk_ib->ip_instance,
82 chunk_ib->ring, &entity);
83 if (r)
84 return r;
85
86 /*
87 * Abort if there is no run queue associated with this entity.
88 * Possibly because of disabled HW IP.
89 */
90 if (entity->rq == NULL)
91 return -EINVAL;
92
93 /* Check if we can add this IB to some existing job */
94 for (i = 0; i < p->gang_size; ++i)
95 if (p->entities[i] == entity)
96 return i;
97
98 /* If not increase the gang size if possible */
99 if (i == AMDGPU_CS_GANG_SIZE)
100 return -EINVAL;
101
102 p->entities[i] = entity;
103 p->gang_size = i + 1;
104 return i;
105 }
106
amdgpu_cs_p1_ib(struct amdgpu_cs_parser * p,struct drm_amdgpu_cs_chunk_ib * chunk_ib,unsigned int * num_ibs)107 static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
108 struct drm_amdgpu_cs_chunk_ib *chunk_ib,
109 unsigned int *num_ibs)
110 {
111 int r;
112
113 r = amdgpu_cs_job_idx(p, chunk_ib);
114 if (r < 0)
115 return r;
116
117 if (num_ibs[r] >= amdgpu_ring_max_ibs(chunk_ib->ip_type))
118 return -EINVAL;
119
120 ++(num_ibs[r]);
121 p->gang_leader_idx = r;
122 return 0;
123 }
124
amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser * p,struct drm_amdgpu_cs_chunk_fence * data,uint32_t * offset)125 static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
126 struct drm_amdgpu_cs_chunk_fence *data,
127 uint32_t *offset)
128 {
129 struct drm_gem_object *gobj;
130 unsigned long size;
131
132 gobj = drm_gem_object_lookup(p->filp, data->handle);
133 if (gobj == NULL)
134 return -EINVAL;
135
136 p->uf_bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
137 drm_gem_object_put(gobj);
138
139 size = amdgpu_bo_size(p->uf_bo);
140 if (size != PAGE_SIZE || data->offset > (size - 8))
141 return -EINVAL;
142
143 if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm))
144 return -EINVAL;
145
146 *offset = data->offset;
147 return 0;
148 }
149
amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser * p,struct drm_amdgpu_bo_list_in * data)150 static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
151 struct drm_amdgpu_bo_list_in *data)
152 {
153 struct drm_amdgpu_bo_list_entry *info;
154 int r;
155
156 r = amdgpu_bo_create_list_entry_array(data, &info);
157 if (r)
158 return r;
159
160 r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
161 &p->bo_list);
162 if (r)
163 goto error_free;
164
165 kvfree(info);
166 return 0;
167
168 error_free:
169 kvfree(info);
170
171 return r;
172 }
173
174 /* Copy the data from userspace and go over it the first time */
amdgpu_cs_pass1(struct amdgpu_cs_parser * p,union drm_amdgpu_cs * cs)175 static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
176 union drm_amdgpu_cs *cs)
177 {
178 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
179 unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
180 struct amdgpu_vm *vm = &fpriv->vm;
181 uint64_t *chunk_array_user;
182 uint64_t *chunk_array;
183 uint32_t uf_offset = 0;
184 size_t size;
185 int ret;
186 int i;
187
188 chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
189 GFP_KERNEL);
190 if (!chunk_array)
191 return -ENOMEM;
192
193 /* get chunks */
194 chunk_array_user = u64_to_user_ptr(cs->in.chunks);
195 if (copy_from_user(chunk_array, chunk_array_user,
196 sizeof(uint64_t)*cs->in.num_chunks)) {
197 ret = -EFAULT;
198 goto free_chunk;
199 }
200
201 p->nchunks = cs->in.num_chunks;
202 p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
203 GFP_KERNEL);
204 if (!p->chunks) {
205 ret = -ENOMEM;
206 goto free_chunk;
207 }
208
209 for (i = 0; i < p->nchunks; i++) {
210 struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
211 struct drm_amdgpu_cs_chunk user_chunk;
212 uint32_t __user *cdata;
213
214 chunk_ptr = u64_to_user_ptr(chunk_array[i]);
215 if (copy_from_user(&user_chunk, chunk_ptr,
216 sizeof(struct drm_amdgpu_cs_chunk))) {
217 ret = -EFAULT;
218 i--;
219 goto free_partial_kdata;
220 }
221 p->chunks[i].chunk_id = user_chunk.chunk_id;
222 p->chunks[i].length_dw = user_chunk.length_dw;
223
224 size = p->chunks[i].length_dw;
225 cdata = u64_to_user_ptr(user_chunk.chunk_data);
226
227 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
228 GFP_KERNEL);
229 if (p->chunks[i].kdata == NULL) {
230 ret = -ENOMEM;
231 i--;
232 goto free_partial_kdata;
233 }
234 size *= sizeof(uint32_t);
235 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
236 ret = -EFAULT;
237 goto free_partial_kdata;
238 }
239
240 /* Assume the worst on the following checks */
241 ret = -EINVAL;
242 switch (p->chunks[i].chunk_id) {
243 case AMDGPU_CHUNK_ID_IB:
244 if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
245 goto free_partial_kdata;
246
247 ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs);
248 if (ret)
249 goto free_partial_kdata;
250 break;
251
252 case AMDGPU_CHUNK_ID_FENCE:
253 if (size < sizeof(struct drm_amdgpu_cs_chunk_fence))
254 goto free_partial_kdata;
255
256 ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata,
257 &uf_offset);
258 if (ret)
259 goto free_partial_kdata;
260 break;
261
262 case AMDGPU_CHUNK_ID_BO_HANDLES:
263 if (size < sizeof(struct drm_amdgpu_bo_list_in))
264 goto free_partial_kdata;
265
266 ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
267 if (ret)
268 goto free_partial_kdata;
269 break;
270
271 case AMDGPU_CHUNK_ID_DEPENDENCIES:
272 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
273 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
274 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
275 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
276 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
277 case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
278 break;
279
280 default:
281 goto free_partial_kdata;
282 }
283 }
284
285 if (!p->gang_size) {
286 ret = -EINVAL;
287 goto free_all_kdata;
288 }
289
290 for (i = 0; i < p->gang_size; ++i) {
291 ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
292 num_ibs[i], &p->jobs[i]);
293 if (ret)
294 goto free_all_kdata;
295 }
296 p->gang_leader = p->jobs[p->gang_leader_idx];
297
298 if (p->ctx->generation != p->gang_leader->generation) {
299 ret = -ECANCELED;
300 goto free_all_kdata;
301 }
302
303 if (p->uf_bo)
304 p->gang_leader->uf_addr = uf_offset;
305 kvfree(chunk_array);
306
307 /* Use this opportunity to fill in task info for the vm */
308 amdgpu_vm_set_task_info(vm);
309
310 return 0;
311
312 free_all_kdata:
313 i = p->nchunks - 1;
314 free_partial_kdata:
315 for (; i >= 0; i--)
316 kvfree(p->chunks[i].kdata);
317 kvfree(p->chunks);
318 p->chunks = NULL;
319 p->nchunks = 0;
320 free_chunk:
321 kvfree(chunk_array);
322
323 return ret;
324 }
325
amdgpu_cs_p2_ib(struct amdgpu_cs_parser * p,struct amdgpu_cs_chunk * chunk,unsigned int * ce_preempt,unsigned int * de_preempt)326 static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
327 struct amdgpu_cs_chunk *chunk,
328 unsigned int *ce_preempt,
329 unsigned int *de_preempt)
330 {
331 struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata;
332 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
333 struct amdgpu_vm *vm = &fpriv->vm;
334 struct amdgpu_ring *ring;
335 struct amdgpu_job *job;
336 struct amdgpu_ib *ib;
337 int r;
338
339 r = amdgpu_cs_job_idx(p, chunk_ib);
340 if (r < 0)
341 return r;
342
343 job = p->jobs[r];
344 ring = amdgpu_job_ring(job);
345 ib = &job->ibs[job->num_ibs++];
346
347 /* MM engine doesn't support user fences */
348 if (p->uf_bo && ring->funcs->no_user_fence)
349 return -EINVAL;
350
351 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
352 chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
353 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
354 (*ce_preempt)++;
355 else
356 (*de_preempt)++;
357
358 /* Each GFX command submit allows only 1 IB max
359 * preemptible for CE & DE */
360 if (*ce_preempt > 1 || *de_preempt > 1)
361 return -EINVAL;
362 }
363
364 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
365 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
366
367 r = amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ?
368 chunk_ib->ib_bytes : 0,
369 AMDGPU_IB_POOL_DELAYED, ib);
370 if (r) {
371 DRM_ERROR("Failed to get ib !\n");
372 return r;
373 }
374
375 ib->gpu_addr = chunk_ib->va_start;
376 ib->length_dw = chunk_ib->ib_bytes / 4;
377 ib->flags = chunk_ib->flags;
378 return 0;
379 }
380
amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser * p,struct amdgpu_cs_chunk * chunk)381 static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
382 struct amdgpu_cs_chunk *chunk)
383 {
384 struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
385 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
386 unsigned int num_deps;
387 int i, r;
388
389 num_deps = chunk->length_dw * 4 /
390 sizeof(struct drm_amdgpu_cs_chunk_dep);
391
392 for (i = 0; i < num_deps; ++i) {
393 struct amdgpu_ctx *ctx;
394 struct drm_sched_entity *entity;
395 struct dma_fence *fence;
396
397 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
398 if (ctx == NULL)
399 return -EINVAL;
400
401 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
402 deps[i].ip_instance,
403 deps[i].ring, &entity);
404 if (r) {
405 amdgpu_ctx_put(ctx);
406 return r;
407 }
408
409 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
410 amdgpu_ctx_put(ctx);
411
412 if (IS_ERR(fence))
413 return PTR_ERR(fence);
414 else if (!fence)
415 continue;
416
417 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
418 struct drm_sched_fence *s_fence;
419 struct dma_fence *old = fence;
420
421 s_fence = to_drm_sched_fence(fence);
422 fence = dma_fence_get(&s_fence->scheduled);
423 dma_fence_put(old);
424 }
425
426 r = amdgpu_sync_fence(&p->sync, fence);
427 dma_fence_put(fence);
428 if (r)
429 return r;
430 }
431 return 0;
432 }
433
amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser * p,uint32_t handle,u64 point,u64 flags)434 static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
435 uint32_t handle, u64 point,
436 u64 flags)
437 {
438 struct dma_fence *fence;
439 int r;
440
441 r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
442 if (r) {
443 DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
444 handle, point, r);
445 return r;
446 }
447
448 r = amdgpu_sync_fence(&p->sync, fence);
449 dma_fence_put(fence);
450 return r;
451 }
452
amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser * p,struct amdgpu_cs_chunk * chunk)453 static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
454 struct amdgpu_cs_chunk *chunk)
455 {
456 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
457 unsigned int num_deps;
458 int i, r;
459
460 num_deps = chunk->length_dw * 4 /
461 sizeof(struct drm_amdgpu_cs_chunk_sem);
462 for (i = 0; i < num_deps; ++i) {
463 r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0);
464 if (r)
465 return r;
466 }
467
468 return 0;
469 }
470
amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser * p,struct amdgpu_cs_chunk * chunk)471 static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
472 struct amdgpu_cs_chunk *chunk)
473 {
474 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
475 unsigned int num_deps;
476 int i, r;
477
478 num_deps = chunk->length_dw * 4 /
479 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
480 for (i = 0; i < num_deps; ++i) {
481 r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle,
482 syncobj_deps[i].point,
483 syncobj_deps[i].flags);
484 if (r)
485 return r;
486 }
487
488 return 0;
489 }
490
amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser * p,struct amdgpu_cs_chunk * chunk)491 static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
492 struct amdgpu_cs_chunk *chunk)
493 {
494 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
495 unsigned int num_deps;
496 int i;
497
498 num_deps = chunk->length_dw * 4 /
499 sizeof(struct drm_amdgpu_cs_chunk_sem);
500
501 if (p->post_deps)
502 return -EINVAL;
503
504 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
505 GFP_KERNEL);
506 p->num_post_deps = 0;
507
508 if (!p->post_deps)
509 return -ENOMEM;
510
511
512 for (i = 0; i < num_deps; ++i) {
513 p->post_deps[i].syncobj =
514 drm_syncobj_find(p->filp, deps[i].handle);
515 if (!p->post_deps[i].syncobj)
516 return -EINVAL;
517 p->post_deps[i].chain = NULL;
518 p->post_deps[i].point = 0;
519 p->num_post_deps++;
520 }
521
522 return 0;
523 }
524
amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser * p,struct amdgpu_cs_chunk * chunk)525 static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
526 struct amdgpu_cs_chunk *chunk)
527 {
528 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
529 unsigned int num_deps;
530 int i;
531
532 num_deps = chunk->length_dw * 4 /
533 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
534
535 if (p->post_deps)
536 return -EINVAL;
537
538 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
539 GFP_KERNEL);
540 p->num_post_deps = 0;
541
542 if (!p->post_deps)
543 return -ENOMEM;
544
545 for (i = 0; i < num_deps; ++i) {
546 struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
547
548 dep->chain = NULL;
549 if (syncobj_deps[i].point) {
550 dep->chain = dma_fence_chain_alloc();
551 if (!dep->chain)
552 return -ENOMEM;
553 }
554
555 dep->syncobj = drm_syncobj_find(p->filp,
556 syncobj_deps[i].handle);
557 if (!dep->syncobj) {
558 dma_fence_chain_free(dep->chain);
559 return -EINVAL;
560 }
561 dep->point = syncobj_deps[i].point;
562 p->num_post_deps++;
563 }
564
565 return 0;
566 }
567
amdgpu_cs_p2_shadow(struct amdgpu_cs_parser * p,struct amdgpu_cs_chunk * chunk)568 static int amdgpu_cs_p2_shadow(struct amdgpu_cs_parser *p,
569 struct amdgpu_cs_chunk *chunk)
570 {
571 struct drm_amdgpu_cs_chunk_cp_gfx_shadow *shadow = chunk->kdata;
572 int i;
573
574 if (shadow->flags & ~AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW)
575 return -EINVAL;
576
577 for (i = 0; i < p->gang_size; ++i) {
578 p->jobs[i]->shadow_va = shadow->shadow_va;
579 p->jobs[i]->csa_va = shadow->csa_va;
580 p->jobs[i]->gds_va = shadow->gds_va;
581 p->jobs[i]->init_shadow =
582 shadow->flags & AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW;
583 }
584
585 return 0;
586 }
587
amdgpu_cs_pass2(struct amdgpu_cs_parser * p)588 static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
589 {
590 unsigned int ce_preempt = 0, de_preempt = 0;
591 int i, r;
592
593 for (i = 0; i < p->nchunks; ++i) {
594 struct amdgpu_cs_chunk *chunk;
595
596 chunk = &p->chunks[i];
597
598 switch (chunk->chunk_id) {
599 case AMDGPU_CHUNK_ID_IB:
600 r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt);
601 if (r)
602 return r;
603 break;
604 case AMDGPU_CHUNK_ID_DEPENDENCIES:
605 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
606 r = amdgpu_cs_p2_dependencies(p, chunk);
607 if (r)
608 return r;
609 break;
610 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
611 r = amdgpu_cs_p2_syncobj_in(p, chunk);
612 if (r)
613 return r;
614 break;
615 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
616 r = amdgpu_cs_p2_syncobj_out(p, chunk);
617 if (r)
618 return r;
619 break;
620 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
621 r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk);
622 if (r)
623 return r;
624 break;
625 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
626 r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk);
627 if (r)
628 return r;
629 break;
630 case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
631 r = amdgpu_cs_p2_shadow(p, chunk);
632 if (r)
633 return r;
634 break;
635 }
636 }
637
638 return 0;
639 }
640
641 /* Convert microseconds to bytes. */
us_to_bytes(struct amdgpu_device * adev,s64 us)642 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
643 {
644 if (us <= 0 || !adev->mm_stats.log2_max_MBps)
645 return 0;
646
647 /* Since accum_us is incremented by a million per second, just
648 * multiply it by the number of MB/s to get the number of bytes.
649 */
650 return us << adev->mm_stats.log2_max_MBps;
651 }
652
bytes_to_us(struct amdgpu_device * adev,u64 bytes)653 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
654 {
655 if (!adev->mm_stats.log2_max_MBps)
656 return 0;
657
658 return bytes >> adev->mm_stats.log2_max_MBps;
659 }
660
661 /* Returns how many bytes TTM can move right now. If no bytes can be moved,
662 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
663 * which means it can go over the threshold once. If that happens, the driver
664 * will be in debt and no other buffer migrations can be done until that debt
665 * is repaid.
666 *
667 * This approach allows moving a buffer of any size (it's important to allow
668 * that).
669 *
670 * The currency is simply time in microseconds and it increases as the clock
671 * ticks. The accumulated microseconds (us) are converted to bytes and
672 * returned.
673 */
amdgpu_cs_get_threshold_for_moves(struct amdgpu_device * adev,u64 * max_bytes,u64 * max_vis_bytes)674 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
675 u64 *max_bytes,
676 u64 *max_vis_bytes)
677 {
678 s64 time_us, increment_us;
679 u64 free_vram, total_vram, used_vram;
680 /* Allow a maximum of 200 accumulated ms. This is basically per-IB
681 * throttling.
682 *
683 * It means that in order to get full max MBps, at least 5 IBs per
684 * second must be submitted and not more than 200ms apart from each
685 * other.
686 */
687 const s64 us_upper_bound = 200000;
688
689 if (!adev->mm_stats.log2_max_MBps) {
690 *max_bytes = 0;
691 *max_vis_bytes = 0;
692 return;
693 }
694
695 total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
696 used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
697 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
698
699 spin_lock(&adev->mm_stats.lock);
700
701 /* Increase the amount of accumulated us. */
702 time_us = ktime_to_us(ktime_get());
703 increment_us = time_us - adev->mm_stats.last_update_us;
704 adev->mm_stats.last_update_us = time_us;
705 adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
706 us_upper_bound);
707
708 /* This prevents the short period of low performance when the VRAM
709 * usage is low and the driver is in debt or doesn't have enough
710 * accumulated us to fill VRAM quickly.
711 *
712 * The situation can occur in these cases:
713 * - a lot of VRAM is freed by userspace
714 * - the presence of a big buffer causes a lot of evictions
715 * (solution: split buffers into smaller ones)
716 *
717 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
718 * accum_us to a positive number.
719 */
720 if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
721 s64 min_us;
722
723 /* Be more aggressive on dGPUs. Try to fill a portion of free
724 * VRAM now.
725 */
726 if (!(adev->flags & AMD_IS_APU))
727 min_us = bytes_to_us(adev, free_vram / 4);
728 else
729 min_us = 0; /* Reset accum_us on APUs. */
730
731 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
732 }
733
734 /* This is set to 0 if the driver is in debt to disallow (optional)
735 * buffer moves.
736 */
737 *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
738
739 /* Do the same for visible VRAM if half of it is free */
740 if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
741 u64 total_vis_vram = adev->gmc.visible_vram_size;
742 u64 used_vis_vram =
743 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
744
745 if (used_vis_vram < total_vis_vram) {
746 u64 free_vis_vram = total_vis_vram - used_vis_vram;
747
748 adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
749 increment_us, us_upper_bound);
750
751 if (free_vis_vram >= total_vis_vram / 2)
752 adev->mm_stats.accum_us_vis =
753 max(bytes_to_us(adev, free_vis_vram / 2),
754 adev->mm_stats.accum_us_vis);
755 }
756
757 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
758 } else {
759 *max_vis_bytes = 0;
760 }
761
762 spin_unlock(&adev->mm_stats.lock);
763 }
764
765 /* Report how many bytes have really been moved for the last command
766 * submission. This can result in a debt that can stop buffer migrations
767 * temporarily.
768 */
amdgpu_cs_report_moved_bytes(struct amdgpu_device * adev,u64 num_bytes,u64 num_vis_bytes)769 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
770 u64 num_vis_bytes)
771 {
772 spin_lock(&adev->mm_stats.lock);
773 adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
774 adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
775 spin_unlock(&adev->mm_stats.lock);
776 }
777
amdgpu_cs_bo_validate(void * param,struct amdgpu_bo * bo)778 static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
779 {
780 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
781 struct amdgpu_cs_parser *p = param;
782 struct ttm_operation_ctx ctx = {
783 .interruptible = true,
784 .no_wait_gpu = false,
785 .resv = bo->tbo.base.resv
786 };
787 uint32_t domain;
788 int r;
789
790 if (bo->tbo.pin_count)
791 return 0;
792
793 /* Don't move this buffer if we have depleted our allowance
794 * to move it. Don't move anything if the threshold is zero.
795 */
796 if (p->bytes_moved < p->bytes_moved_threshold &&
797 (!bo->tbo.base.dma_buf ||
798 list_empty(&bo->tbo.base.dma_buf->attachments))) {
799 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
800 (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
801 /* And don't move a CPU_ACCESS_REQUIRED BO to limited
802 * visible VRAM if we've depleted our allowance to do
803 * that.
804 */
805 if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
806 domain = bo->preferred_domains;
807 else
808 domain = bo->allowed_domains;
809 } else {
810 domain = bo->preferred_domains;
811 }
812 } else {
813 domain = bo->allowed_domains;
814 }
815
816 retry:
817 amdgpu_bo_placement_from_domain(bo, domain);
818 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
819
820 p->bytes_moved += ctx.bytes_moved;
821 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
822 amdgpu_res_cpu_visible(adev, bo->tbo.resource))
823 p->bytes_moved_vis += ctx.bytes_moved;
824
825 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
826 domain = bo->allowed_domains;
827 goto retry;
828 }
829
830 return r;
831 }
832
amdgpu_cs_parser_bos(struct amdgpu_cs_parser * p,union drm_amdgpu_cs * cs)833 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
834 union drm_amdgpu_cs *cs)
835 {
836 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
837 struct ttm_operation_ctx ctx = { true, false };
838 struct amdgpu_vm *vm = &fpriv->vm;
839 struct amdgpu_bo_list_entry *e;
840 struct drm_gem_object *obj;
841 unsigned long index;
842 unsigned int i;
843 int r;
844
845 /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
846 if (cs->in.bo_list_handle) {
847 if (p->bo_list)
848 return -EINVAL;
849
850 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
851 &p->bo_list);
852 if (r)
853 return r;
854 } else if (!p->bo_list) {
855 /* Create a empty bo_list when no handle is provided */
856 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
857 &p->bo_list);
858 if (r)
859 return r;
860 }
861
862 mutex_lock(&p->bo_list->bo_list_mutex);
863
864 /* Get userptr backing pages. If pages are updated after registered
865 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
866 * amdgpu_ttm_backend_bind() to flush and invalidate new pages
867 */
868 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
869 bool userpage_invalidated = false;
870 struct amdgpu_bo *bo = e->bo;
871 int i;
872
873 e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
874 sizeof(struct vm_page *),
875 GFP_KERNEL | __GFP_ZERO);
876 if (!e->user_pages) {
877 DRM_ERROR("kvmalloc_array failure\n");
878 r = -ENOMEM;
879 goto out_free_user_pages;
880 }
881
882 r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
883 if (r) {
884 kvfree(e->user_pages);
885 e->user_pages = NULL;
886 goto out_free_user_pages;
887 }
888
889 for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
890 if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
891 userpage_invalidated = true;
892 break;
893 }
894 }
895 e->user_invalidated = userpage_invalidated;
896 }
897
898 drm_exec_until_all_locked(&p->exec) {
899 r = amdgpu_vm_lock_pd(&fpriv->vm, &p->exec, 1 + p->gang_size);
900 drm_exec_retry_on_contention(&p->exec);
901 if (unlikely(r))
902 goto out_free_user_pages;
903
904 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
905 /* One fence for TTM and one for each CS job */
906 r = drm_exec_prepare_obj(&p->exec, &e->bo->tbo.base,
907 1 + p->gang_size);
908 drm_exec_retry_on_contention(&p->exec);
909 if (unlikely(r))
910 goto out_free_user_pages;
911
912 e->bo_va = amdgpu_vm_bo_find(vm, e->bo);
913 }
914
915 if (p->uf_bo) {
916 r = drm_exec_prepare_obj(&p->exec, &p->uf_bo->tbo.base,
917 1 + p->gang_size);
918 drm_exec_retry_on_contention(&p->exec);
919 if (unlikely(r))
920 goto out_free_user_pages;
921 }
922 }
923
924 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
925 #ifdef notyet
926 struct mm_struct *usermm;
927
928 usermm = amdgpu_ttm_tt_get_usermm(e->bo->tbo.ttm);
929 if (usermm && usermm != current->mm) {
930 r = -EPERM;
931 goto out_free_user_pages;
932 }
933 #endif
934
935 if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) &&
936 e->user_invalidated && e->user_pages) {
937 amdgpu_bo_placement_from_domain(e->bo,
938 AMDGPU_GEM_DOMAIN_CPU);
939 r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement,
940 &ctx);
941 if (r)
942 goto out_free_user_pages;
943
944 amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm,
945 e->user_pages);
946 }
947
948 kvfree(e->user_pages);
949 e->user_pages = NULL;
950 }
951
952 amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
953 &p->bytes_moved_vis_threshold);
954 p->bytes_moved = 0;
955 p->bytes_moved_vis = 0;
956
957 r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
958 amdgpu_cs_bo_validate, p);
959 if (r) {
960 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
961 goto out_free_user_pages;
962 }
963
964 drm_exec_for_each_locked_object(&p->exec, index, obj) {
965 r = amdgpu_cs_bo_validate(p, gem_to_amdgpu_bo(obj));
966 if (unlikely(r))
967 goto out_free_user_pages;
968 }
969
970 if (p->uf_bo) {
971 r = amdgpu_ttm_alloc_gart(&p->uf_bo->tbo);
972 if (unlikely(r))
973 goto out_free_user_pages;
974
975 p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(p->uf_bo);
976 }
977
978 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
979 p->bytes_moved_vis);
980
981 for (i = 0; i < p->gang_size; ++i)
982 amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj,
983 p->bo_list->gws_obj,
984 p->bo_list->oa_obj);
985 return 0;
986
987 out_free_user_pages:
988 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
989 struct amdgpu_bo *bo = e->bo;
990
991 if (!e->user_pages)
992 continue;
993 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
994 kvfree(e->user_pages);
995 e->user_pages = NULL;
996 e->range = NULL;
997 }
998 mutex_unlock(&p->bo_list->bo_list_mutex);
999 return r;
1000 }
1001
trace_amdgpu_cs_ibs(struct amdgpu_cs_parser * p)1002 static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p)
1003 {
1004 int i, j;
1005
1006 if (!trace_amdgpu_cs_enabled())
1007 return;
1008
1009 for (i = 0; i < p->gang_size; ++i) {
1010 struct amdgpu_job *job = p->jobs[i];
1011
1012 for (j = 0; j < job->num_ibs; ++j)
1013 trace_amdgpu_cs(p, job, &job->ibs[j]);
1014 }
1015 }
1016
amdgpu_cs_patch_ibs(struct amdgpu_cs_parser * p,struct amdgpu_job * job)1017 static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
1018 struct amdgpu_job *job)
1019 {
1020 struct amdgpu_ring *ring = amdgpu_job_ring(job);
1021 unsigned int i;
1022 int r;
1023
1024 /* Only for UVD/VCE VM emulation */
1025 if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place)
1026 return 0;
1027
1028 for (i = 0; i < job->num_ibs; ++i) {
1029 struct amdgpu_ib *ib = &job->ibs[i];
1030 struct amdgpu_bo_va_mapping *m;
1031 struct amdgpu_bo *aobj;
1032 uint64_t va_start;
1033 uint8_t *kptr;
1034
1035 va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
1036 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
1037 if (r) {
1038 DRM_ERROR("IB va_start is invalid\n");
1039 return r;
1040 }
1041
1042 if ((va_start + ib->length_dw * 4) >
1043 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
1044 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
1045 return -EINVAL;
1046 }
1047
1048 /* the IB should be reserved at this point */
1049 r = amdgpu_bo_kmap(aobj, (void **)&kptr);
1050 if (r)
1051 return r;
1052
1053 kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
1054
1055 if (ring->funcs->parse_cs) {
1056 memcpy(ib->ptr, kptr, ib->length_dw * 4);
1057 amdgpu_bo_kunmap(aobj);
1058
1059 r = amdgpu_ring_parse_cs(ring, p, job, ib);
1060 if (r)
1061 return r;
1062 } else {
1063 ib->ptr = (uint32_t *)kptr;
1064 r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
1065 amdgpu_bo_kunmap(aobj);
1066 if (r)
1067 return r;
1068 }
1069 }
1070
1071 return 0;
1072 }
1073
amdgpu_cs_patch_jobs(struct amdgpu_cs_parser * p)1074 static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p)
1075 {
1076 unsigned int i;
1077 int r;
1078
1079 for (i = 0; i < p->gang_size; ++i) {
1080 r = amdgpu_cs_patch_ibs(p, p->jobs[i]);
1081 if (r)
1082 return r;
1083 }
1084 return 0;
1085 }
1086
amdgpu_cs_vm_handling(struct amdgpu_cs_parser * p)1087 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
1088 {
1089 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1090 struct amdgpu_job *job = p->gang_leader;
1091 struct amdgpu_device *adev = p->adev;
1092 struct amdgpu_vm *vm = &fpriv->vm;
1093 struct amdgpu_bo_list_entry *e;
1094 struct amdgpu_bo_va *bo_va;
1095 unsigned int i;
1096 int r;
1097
1098 r = amdgpu_vm_clear_freed(adev, vm, NULL);
1099 if (r)
1100 return r;
1101
1102 r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
1103 if (r)
1104 return r;
1105
1106 r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update);
1107 if (r)
1108 return r;
1109
1110 if (fpriv->csa_va) {
1111 bo_va = fpriv->csa_va;
1112 BUG_ON(!bo_va);
1113 r = amdgpu_vm_bo_update(adev, bo_va, false);
1114 if (r)
1115 return r;
1116
1117 r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
1118 if (r)
1119 return r;
1120 }
1121
1122 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1123 bo_va = e->bo_va;
1124 if (bo_va == NULL)
1125 continue;
1126
1127 r = amdgpu_vm_bo_update(adev, bo_va, false);
1128 if (r)
1129 return r;
1130
1131 r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
1132 if (r)
1133 return r;
1134 }
1135
1136 r = amdgpu_vm_handle_moved(adev, vm);
1137 if (r)
1138 return r;
1139
1140 r = amdgpu_vm_update_pdes(adev, vm, false);
1141 if (r)
1142 return r;
1143
1144 r = amdgpu_sync_fence(&p->sync, vm->last_update);
1145 if (r)
1146 return r;
1147
1148 for (i = 0; i < p->gang_size; ++i) {
1149 job = p->jobs[i];
1150
1151 if (!job->vm)
1152 continue;
1153
1154 job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
1155 }
1156
1157 if (amdgpu_vm_debug) {
1158 /* Invalidate all BOs to test for userspace bugs */
1159 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1160 struct amdgpu_bo *bo = e->bo;
1161
1162 /* ignore duplicates */
1163 if (!bo)
1164 continue;
1165
1166 amdgpu_vm_bo_invalidate(adev, bo, false);
1167 }
1168 }
1169
1170 return 0;
1171 }
1172
amdgpu_cs_sync_rings(struct amdgpu_cs_parser * p)1173 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
1174 {
1175 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1176 struct drm_gpu_scheduler *sched;
1177 struct drm_gem_object *obj;
1178 struct dma_fence *fence;
1179 unsigned long index;
1180 unsigned int i;
1181 int r;
1182
1183 r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
1184 if (r) {
1185 if (r != -ERESTARTSYS)
1186 DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
1187 return r;
1188 }
1189
1190 drm_exec_for_each_locked_object(&p->exec, index, obj) {
1191 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
1192
1193 struct dma_resv *resv = bo->tbo.base.resv;
1194 enum amdgpu_sync_mode sync_mode;
1195
1196 sync_mode = amdgpu_bo_explicit_sync(bo) ?
1197 AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
1198 r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode,
1199 &fpriv->vm);
1200 if (r)
1201 return r;
1202 }
1203
1204 for (i = 0; i < p->gang_size; ++i) {
1205 r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]);
1206 if (r)
1207 return r;
1208 }
1209
1210 sched = p->gang_leader->base.entity->rq->sched;
1211 while ((fence = amdgpu_sync_get_fence(&p->sync))) {
1212 struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
1213
1214 /*
1215 * When we have an dependency it might be necessary to insert a
1216 * pipeline sync to make sure that all caches etc are flushed and the
1217 * next job actually sees the results from the previous one
1218 * before we start executing on the same scheduler ring.
1219 */
1220 if (!s_fence || s_fence->sched != sched) {
1221 dma_fence_put(fence);
1222 continue;
1223 }
1224
1225 r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
1226 dma_fence_put(fence);
1227 if (r)
1228 return r;
1229 }
1230 return 0;
1231 }
1232
amdgpu_cs_post_dependencies(struct amdgpu_cs_parser * p)1233 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1234 {
1235 int i;
1236
1237 for (i = 0; i < p->num_post_deps; ++i) {
1238 if (p->post_deps[i].chain && p->post_deps[i].point) {
1239 drm_syncobj_add_point(p->post_deps[i].syncobj,
1240 p->post_deps[i].chain,
1241 p->fence, p->post_deps[i].point);
1242 p->post_deps[i].chain = NULL;
1243 } else {
1244 drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1245 p->fence);
1246 }
1247 }
1248 }
1249
amdgpu_cs_submit(struct amdgpu_cs_parser * p,union drm_amdgpu_cs * cs)1250 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1251 union drm_amdgpu_cs *cs)
1252 {
1253 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1254 struct amdgpu_job *leader = p->gang_leader;
1255 struct amdgpu_bo_list_entry *e;
1256 struct drm_gem_object *gobj;
1257 unsigned long index;
1258 unsigned int i;
1259 uint64_t seq;
1260 int r;
1261
1262 for (i = 0; i < p->gang_size; ++i)
1263 drm_sched_job_arm(&p->jobs[i]->base);
1264
1265 for (i = 0; i < p->gang_size; ++i) {
1266 struct dma_fence *fence;
1267
1268 if (p->jobs[i] == leader)
1269 continue;
1270
1271 fence = &p->jobs[i]->base.s_fence->scheduled;
1272 dma_fence_get(fence);
1273 r = drm_sched_job_add_dependency(&leader->base, fence);
1274 if (r) {
1275 dma_fence_put(fence);
1276 return r;
1277 }
1278 }
1279
1280 if (p->gang_size > 1) {
1281 for (i = 0; i < p->gang_size; ++i)
1282 amdgpu_job_set_gang_leader(p->jobs[i], leader);
1283 }
1284
1285 /* No memory allocation is allowed while holding the notifier lock.
1286 * The lock is held until amdgpu_cs_submit is finished and fence is
1287 * added to BOs.
1288 */
1289 mutex_lock(&p->adev->notifier_lock);
1290
1291 /* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1292 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1293 */
1294 r = 0;
1295 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1296 r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm,
1297 e->range);
1298 e->range = NULL;
1299 }
1300 if (r) {
1301 r = -EAGAIN;
1302 mutex_unlock(&p->adev->notifier_lock);
1303 return r;
1304 }
1305
1306 p->fence = dma_fence_get(&leader->base.s_fence->finished);
1307 drm_exec_for_each_locked_object(&p->exec, index, gobj) {
1308
1309 ttm_bo_move_to_lru_tail_unlocked(&gem_to_amdgpu_bo(gobj)->tbo);
1310
1311 /* Everybody except for the gang leader uses READ */
1312 for (i = 0; i < p->gang_size; ++i) {
1313 if (p->jobs[i] == leader)
1314 continue;
1315
1316 dma_resv_add_fence(gobj->resv,
1317 &p->jobs[i]->base.s_fence->finished,
1318 DMA_RESV_USAGE_READ);
1319 }
1320
1321 /* The gang leader as remembered as writer */
1322 dma_resv_add_fence(gobj->resv, p->fence, DMA_RESV_USAGE_WRITE);
1323 }
1324
1325 seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
1326 p->fence);
1327 amdgpu_cs_post_dependencies(p);
1328
1329 if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1330 !p->ctx->preamble_presented) {
1331 leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1332 p->ctx->preamble_presented = true;
1333 }
1334
1335 cs->out.handle = seq;
1336 leader->uf_sequence = seq;
1337
1338 amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->exec.ticket);
1339 for (i = 0; i < p->gang_size; ++i) {
1340 amdgpu_job_free_resources(p->jobs[i]);
1341 trace_amdgpu_cs_ioctl(p->jobs[i]);
1342 drm_sched_entity_push_job(&p->jobs[i]->base);
1343 p->jobs[i] = NULL;
1344 }
1345
1346 amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1347
1348 mutex_unlock(&p->adev->notifier_lock);
1349 mutex_unlock(&p->bo_list->bo_list_mutex);
1350 return 0;
1351 }
1352
1353 /* Cleanup the parser structure */
amdgpu_cs_parser_fini(struct amdgpu_cs_parser * parser)1354 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
1355 {
1356 unsigned int i;
1357
1358 amdgpu_sync_free(&parser->sync);
1359 drm_exec_fini(&parser->exec);
1360
1361 for (i = 0; i < parser->num_post_deps; i++) {
1362 drm_syncobj_put(parser->post_deps[i].syncobj);
1363 kfree(parser->post_deps[i].chain);
1364 }
1365 kfree(parser->post_deps);
1366
1367 dma_fence_put(parser->fence);
1368
1369 if (parser->ctx)
1370 amdgpu_ctx_put(parser->ctx);
1371 if (parser->bo_list)
1372 amdgpu_bo_list_put(parser->bo_list);
1373
1374 for (i = 0; i < parser->nchunks; i++)
1375 kvfree(parser->chunks[i].kdata);
1376 kvfree(parser->chunks);
1377 for (i = 0; i < parser->gang_size; ++i) {
1378 if (parser->jobs[i])
1379 amdgpu_job_free(parser->jobs[i]);
1380 }
1381 amdgpu_bo_unref(&parser->uf_bo);
1382 }
1383
amdgpu_cs_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)1384 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1385 {
1386 struct amdgpu_device *adev = drm_to_adev(dev);
1387 struct amdgpu_cs_parser parser;
1388 int r;
1389
1390 if (amdgpu_ras_intr_triggered())
1391 return -EHWPOISON;
1392
1393 if (!adev->accel_working)
1394 return -EBUSY;
1395
1396 r = amdgpu_cs_parser_init(&parser, adev, filp, data);
1397 if (r) {
1398 if (printk_ratelimit())
1399 DRM_ERROR("Failed to initialize parser %d!\n", r);
1400 return r;
1401 }
1402
1403 r = amdgpu_cs_pass1(&parser, data);
1404 if (r)
1405 goto error_fini;
1406
1407 r = amdgpu_cs_pass2(&parser);
1408 if (r)
1409 goto error_fini;
1410
1411 r = amdgpu_cs_parser_bos(&parser, data);
1412 if (r) {
1413 if (r == -ENOMEM)
1414 DRM_ERROR("Not enough memory for command submission!\n");
1415 else if (r != -ERESTARTSYS && r != -EAGAIN)
1416 DRM_DEBUG("Failed to process the buffer list %d!\n", r);
1417 goto error_fini;
1418 }
1419
1420 r = amdgpu_cs_patch_jobs(&parser);
1421 if (r)
1422 goto error_backoff;
1423
1424 r = amdgpu_cs_vm_handling(&parser);
1425 if (r)
1426 goto error_backoff;
1427
1428 r = amdgpu_cs_sync_rings(&parser);
1429 if (r)
1430 goto error_backoff;
1431
1432 trace_amdgpu_cs_ibs(&parser);
1433
1434 r = amdgpu_cs_submit(&parser, data);
1435 if (r)
1436 goto error_backoff;
1437
1438 amdgpu_cs_parser_fini(&parser);
1439 return 0;
1440
1441 error_backoff:
1442 mutex_unlock(&parser.bo_list->bo_list_mutex);
1443
1444 error_fini:
1445 amdgpu_cs_parser_fini(&parser);
1446 return r;
1447 }
1448
1449 /**
1450 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1451 *
1452 * @dev: drm device
1453 * @data: data from userspace
1454 * @filp: file private
1455 *
1456 * Wait for the command submission identified by handle to finish.
1457 */
amdgpu_cs_wait_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)1458 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1459 struct drm_file *filp)
1460 {
1461 union drm_amdgpu_wait_cs *wait = data;
1462 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1463 struct drm_sched_entity *entity;
1464 struct amdgpu_ctx *ctx;
1465 struct dma_fence *fence;
1466 long r;
1467
1468 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1469 if (ctx == NULL)
1470 return -EINVAL;
1471
1472 r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1473 wait->in.ring, &entity);
1474 if (r) {
1475 amdgpu_ctx_put(ctx);
1476 return r;
1477 }
1478
1479 fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1480 if (IS_ERR(fence))
1481 r = PTR_ERR(fence);
1482 else if (fence) {
1483 r = dma_fence_wait_timeout(fence, true, timeout);
1484 if (r > 0 && fence->error)
1485 r = fence->error;
1486 dma_fence_put(fence);
1487 } else
1488 r = 1;
1489
1490 amdgpu_ctx_put(ctx);
1491 if (r < 0)
1492 return r;
1493
1494 memset(wait, 0, sizeof(*wait));
1495 wait->out.status = (r == 0);
1496
1497 return 0;
1498 }
1499
1500 /**
1501 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1502 *
1503 * @adev: amdgpu device
1504 * @filp: file private
1505 * @user: drm_amdgpu_fence copied from user space
1506 */
amdgpu_cs_get_fence(struct amdgpu_device * adev,struct drm_file * filp,struct drm_amdgpu_fence * user)1507 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1508 struct drm_file *filp,
1509 struct drm_amdgpu_fence *user)
1510 {
1511 struct drm_sched_entity *entity;
1512 struct amdgpu_ctx *ctx;
1513 struct dma_fence *fence;
1514 int r;
1515
1516 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1517 if (ctx == NULL)
1518 return ERR_PTR(-EINVAL);
1519
1520 r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1521 user->ring, &entity);
1522 if (r) {
1523 amdgpu_ctx_put(ctx);
1524 return ERR_PTR(r);
1525 }
1526
1527 fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1528 amdgpu_ctx_put(ctx);
1529
1530 return fence;
1531 }
1532
amdgpu_cs_fence_to_handle_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)1533 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1534 struct drm_file *filp)
1535 {
1536 struct amdgpu_device *adev = drm_to_adev(dev);
1537 union drm_amdgpu_fence_to_handle *info = data;
1538 struct dma_fence *fence;
1539 struct drm_syncobj *syncobj;
1540 struct sync_file *sync_file;
1541 int fd, r;
1542
1543 fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1544 if (IS_ERR(fence))
1545 return PTR_ERR(fence);
1546
1547 if (!fence)
1548 fence = dma_fence_get_stub();
1549
1550 switch (info->in.what) {
1551 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1552 r = drm_syncobj_create(&syncobj, 0, fence);
1553 dma_fence_put(fence);
1554 if (r)
1555 return r;
1556 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1557 drm_syncobj_put(syncobj);
1558 return r;
1559
1560 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1561 r = drm_syncobj_create(&syncobj, 0, fence);
1562 dma_fence_put(fence);
1563 if (r)
1564 return r;
1565 r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
1566 drm_syncobj_put(syncobj);
1567 return r;
1568
1569 case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1570 fd = get_unused_fd_flags(O_CLOEXEC);
1571 if (fd < 0) {
1572 dma_fence_put(fence);
1573 return fd;
1574 }
1575
1576 sync_file = sync_file_create(fence);
1577 dma_fence_put(fence);
1578 if (!sync_file) {
1579 put_unused_fd(fd);
1580 return -ENOMEM;
1581 }
1582
1583 fd_install(fd, sync_file->file);
1584 info->out.handle = fd;
1585 return 0;
1586
1587 default:
1588 dma_fence_put(fence);
1589 return -EINVAL;
1590 }
1591 }
1592
1593 /**
1594 * amdgpu_cs_wait_all_fences - wait on all fences to signal
1595 *
1596 * @adev: amdgpu device
1597 * @filp: file private
1598 * @wait: wait parameters
1599 * @fences: array of drm_amdgpu_fence
1600 */
amdgpu_cs_wait_all_fences(struct amdgpu_device * adev,struct drm_file * filp,union drm_amdgpu_wait_fences * wait,struct drm_amdgpu_fence * fences)1601 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1602 struct drm_file *filp,
1603 union drm_amdgpu_wait_fences *wait,
1604 struct drm_amdgpu_fence *fences)
1605 {
1606 uint32_t fence_count = wait->in.fence_count;
1607 unsigned int i;
1608 long r = 1;
1609
1610 for (i = 0; i < fence_count; i++) {
1611 struct dma_fence *fence;
1612 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1613
1614 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1615 if (IS_ERR(fence))
1616 return PTR_ERR(fence);
1617 else if (!fence)
1618 continue;
1619
1620 r = dma_fence_wait_timeout(fence, true, timeout);
1621 if (r > 0 && fence->error)
1622 r = fence->error;
1623
1624 dma_fence_put(fence);
1625 if (r < 0)
1626 return r;
1627
1628 if (r == 0)
1629 break;
1630 }
1631
1632 memset(wait, 0, sizeof(*wait));
1633 wait->out.status = (r > 0);
1634
1635 return 0;
1636 }
1637
1638 /**
1639 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1640 *
1641 * @adev: amdgpu device
1642 * @filp: file private
1643 * @wait: wait parameters
1644 * @fences: array of drm_amdgpu_fence
1645 */
amdgpu_cs_wait_any_fence(struct amdgpu_device * adev,struct drm_file * filp,union drm_amdgpu_wait_fences * wait,struct drm_amdgpu_fence * fences)1646 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1647 struct drm_file *filp,
1648 union drm_amdgpu_wait_fences *wait,
1649 struct drm_amdgpu_fence *fences)
1650 {
1651 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1652 uint32_t fence_count = wait->in.fence_count;
1653 uint32_t first = ~0;
1654 struct dma_fence **array;
1655 unsigned int i;
1656 long r;
1657
1658 /* Prepare the fence array */
1659 array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1660
1661 if (array == NULL)
1662 return -ENOMEM;
1663
1664 for (i = 0; i < fence_count; i++) {
1665 struct dma_fence *fence;
1666
1667 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1668 if (IS_ERR(fence)) {
1669 r = PTR_ERR(fence);
1670 goto err_free_fence_array;
1671 } else if (fence) {
1672 array[i] = fence;
1673 } else { /* NULL, the fence has been already signaled */
1674 r = 1;
1675 first = i;
1676 goto out;
1677 }
1678 }
1679
1680 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1681 &first);
1682 if (r < 0)
1683 goto err_free_fence_array;
1684
1685 out:
1686 memset(wait, 0, sizeof(*wait));
1687 wait->out.status = (r > 0);
1688 wait->out.first_signaled = first;
1689
1690 if (first < fence_count && array[first])
1691 r = array[first]->error;
1692 else
1693 r = 0;
1694
1695 err_free_fence_array:
1696 for (i = 0; i < fence_count; i++)
1697 dma_fence_put(array[i]);
1698 kfree(array);
1699
1700 return r;
1701 }
1702
1703 /**
1704 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1705 *
1706 * @dev: drm device
1707 * @data: data from userspace
1708 * @filp: file private
1709 */
amdgpu_cs_wait_fences_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)1710 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1711 struct drm_file *filp)
1712 {
1713 struct amdgpu_device *adev = drm_to_adev(dev);
1714 union drm_amdgpu_wait_fences *wait = data;
1715 uint32_t fence_count = wait->in.fence_count;
1716 struct drm_amdgpu_fence *fences_user;
1717 struct drm_amdgpu_fence *fences;
1718 int r;
1719
1720 /* Get the fences from userspace */
1721 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1722 GFP_KERNEL);
1723 if (fences == NULL)
1724 return -ENOMEM;
1725
1726 fences_user = u64_to_user_ptr(wait->in.fences);
1727 if (copy_from_user(fences, fences_user,
1728 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1729 r = -EFAULT;
1730 goto err_free_fences;
1731 }
1732
1733 if (wait->in.wait_all)
1734 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1735 else
1736 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1737
1738 err_free_fences:
1739 kfree(fences);
1740
1741 return r;
1742 }
1743
1744 /**
1745 * amdgpu_cs_find_mapping - find bo_va for VM address
1746 *
1747 * @parser: command submission parser context
1748 * @addr: VM address
1749 * @bo: resulting BO of the mapping found
1750 * @map: Placeholder to return found BO mapping
1751 *
1752 * Search the buffer objects in the command submission context for a certain
1753 * virtual memory address. Returns allocation structure when found, NULL
1754 * otherwise.
1755 */
amdgpu_cs_find_mapping(struct amdgpu_cs_parser * parser,uint64_t addr,struct amdgpu_bo ** bo,struct amdgpu_bo_va_mapping ** map)1756 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1757 uint64_t addr, struct amdgpu_bo **bo,
1758 struct amdgpu_bo_va_mapping **map)
1759 {
1760 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1761 struct ttm_operation_ctx ctx = { false, false };
1762 struct amdgpu_vm *vm = &fpriv->vm;
1763 struct amdgpu_bo_va_mapping *mapping;
1764 int r;
1765
1766 addr /= AMDGPU_GPU_PAGE_SIZE;
1767
1768 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1769 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1770 return -EINVAL;
1771
1772 *bo = mapping->bo_va->base.bo;
1773 *map = mapping;
1774
1775 /* Double check that the BO is reserved by this CS */
1776 if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
1777 return -EINVAL;
1778
1779 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1780 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1781 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1782 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1783 if (r)
1784 return r;
1785 }
1786
1787 return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1788 }
1789