xref: /linux/drivers/gpu/drm/msm/msm_gem_submit.c (revision f86fd32d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/file.h>
8 #include <linux/sync_file.h>
9 #include <linux/uaccess.h>
10 
11 #include <drm/drm_file.h>
12 
13 #include "msm_drv.h"
14 #include "msm_gpu.h"
15 #include "msm_gem.h"
16 #include "msm_gpu_trace.h"
17 
18 /*
19  * Cmdstream submission:
20  */
21 
22 /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
23 #define BO_VALID    0x8000   /* is current addr in cmdstream correct/valid? */
24 #define BO_LOCKED   0x4000
25 #define BO_PINNED   0x2000
26 
27 static struct msm_gem_submit *submit_create(struct drm_device *dev,
28 		struct msm_gpu *gpu, struct msm_gem_address_space *aspace,
29 		struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
30 		uint32_t nr_cmds)
31 {
32 	struct msm_gem_submit *submit;
33 	uint64_t sz = struct_size(submit, bos, nr_bos) +
34 				  ((u64)nr_cmds * sizeof(submit->cmd[0]));
35 
36 	if (sz > SIZE_MAX)
37 		return NULL;
38 
39 	submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
40 	if (!submit)
41 		return NULL;
42 
43 	submit->dev = dev;
44 	submit->aspace = aspace;
45 	submit->gpu = gpu;
46 	submit->fence = NULL;
47 	submit->cmd = (void *)&submit->bos[nr_bos];
48 	submit->queue = queue;
49 	submit->ring = gpu->rb[queue->prio];
50 
51 	/* initially, until copy_from_user() and bo lookup succeeds: */
52 	submit->nr_bos = 0;
53 	submit->nr_cmds = 0;
54 
55 	INIT_LIST_HEAD(&submit->node);
56 	INIT_LIST_HEAD(&submit->bo_list);
57 
58 	return submit;
59 }
60 
61 void msm_gem_submit_free(struct msm_gem_submit *submit)
62 {
63 	dma_fence_put(submit->fence);
64 	list_del(&submit->node);
65 	put_pid(submit->pid);
66 	msm_submitqueue_put(submit->queue);
67 
68 	kfree(submit);
69 }
70 
71 static int submit_lookup_objects(struct msm_gem_submit *submit,
72 		struct drm_msm_gem_submit *args, struct drm_file *file)
73 {
74 	unsigned i;
75 	int ret = 0;
76 
77 	for (i = 0; i < args->nr_bos; i++) {
78 		struct drm_msm_gem_submit_bo submit_bo;
79 		void __user *userptr =
80 			u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
81 
82 		/* make sure we don't have garbage flags, in case we hit
83 		 * error path before flags is initialized:
84 		 */
85 		submit->bos[i].flags = 0;
86 
87 		if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
88 			ret = -EFAULT;
89 			i = 0;
90 			goto out;
91 		}
92 
93 /* at least one of READ and/or WRITE flags should be set: */
94 #define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
95 
96 		if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
97 			!(submit_bo.flags & MANDATORY_FLAGS)) {
98 			DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
99 			ret = -EINVAL;
100 			i = 0;
101 			goto out;
102 		}
103 
104 		submit->bos[i].handle = submit_bo.handle;
105 		submit->bos[i].flags = submit_bo.flags;
106 		/* in validate_objects() we figure out if this is true: */
107 		submit->bos[i].iova  = submit_bo.presumed;
108 	}
109 
110 	spin_lock(&file->table_lock);
111 
112 	for (i = 0; i < args->nr_bos; i++) {
113 		struct drm_gem_object *obj;
114 		struct msm_gem_object *msm_obj;
115 
116 		/* normally use drm_gem_object_lookup(), but for bulk lookup
117 		 * all under single table_lock just hit object_idr directly:
118 		 */
119 		obj = idr_find(&file->object_idr, submit->bos[i].handle);
120 		if (!obj) {
121 			DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
122 			ret = -EINVAL;
123 			goto out_unlock;
124 		}
125 
126 		msm_obj = to_msm_bo(obj);
127 
128 		if (!list_empty(&msm_obj->submit_entry)) {
129 			DRM_ERROR("handle %u at index %u already on submit list\n",
130 					submit->bos[i].handle, i);
131 			ret = -EINVAL;
132 			goto out_unlock;
133 		}
134 
135 		drm_gem_object_get(obj);
136 
137 		submit->bos[i].obj = msm_obj;
138 
139 		list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
140 	}
141 
142 out_unlock:
143 	spin_unlock(&file->table_lock);
144 
145 out:
146 	submit->nr_bos = i;
147 
148 	return ret;
149 }
150 
151 static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
152 		int i, bool backoff)
153 {
154 	struct msm_gem_object *msm_obj = submit->bos[i].obj;
155 
156 	if (submit->bos[i].flags & BO_PINNED)
157 		msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
158 
159 	if (submit->bos[i].flags & BO_LOCKED)
160 		dma_resv_unlock(msm_obj->base.resv);
161 
162 	if (backoff && !(submit->bos[i].flags & BO_VALID))
163 		submit->bos[i].iova = 0;
164 
165 	submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
166 }
167 
168 /* This is where we make sure all the bo's are reserved and pin'd: */
169 static int submit_lock_objects(struct msm_gem_submit *submit)
170 {
171 	int contended, slow_locked = -1, i, ret = 0;
172 
173 retry:
174 	for (i = 0; i < submit->nr_bos; i++) {
175 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
176 
177 		if (slow_locked == i)
178 			slow_locked = -1;
179 
180 		contended = i;
181 
182 		if (!(submit->bos[i].flags & BO_LOCKED)) {
183 			ret = dma_resv_lock_interruptible(msm_obj->base.resv,
184 							  &submit->ticket);
185 			if (ret)
186 				goto fail;
187 			submit->bos[i].flags |= BO_LOCKED;
188 		}
189 	}
190 
191 	ww_acquire_done(&submit->ticket);
192 
193 	return 0;
194 
195 fail:
196 	for (; i >= 0; i--)
197 		submit_unlock_unpin_bo(submit, i, true);
198 
199 	if (slow_locked > 0)
200 		submit_unlock_unpin_bo(submit, slow_locked, true);
201 
202 	if (ret == -EDEADLK) {
203 		struct msm_gem_object *msm_obj = submit->bos[contended].obj;
204 		/* we lost out in a seqno race, lock and retry.. */
205 		ret = dma_resv_lock_slow_interruptible(msm_obj->base.resv,
206 						       &submit->ticket);
207 		if (!ret) {
208 			submit->bos[contended].flags |= BO_LOCKED;
209 			slow_locked = contended;
210 			goto retry;
211 		}
212 	}
213 
214 	return ret;
215 }
216 
217 static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
218 {
219 	int i, ret = 0;
220 
221 	for (i = 0; i < submit->nr_bos; i++) {
222 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
223 		bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
224 
225 		if (!write) {
226 			/* NOTE: _reserve_shared() must happen before
227 			 * _add_shared_fence(), which makes this a slightly
228 			 * strange place to call it.  OTOH this is a
229 			 * convenient can-fail point to hook it in.
230 			 */
231 			ret = dma_resv_reserve_shared(msm_obj->base.resv,
232 								1);
233 			if (ret)
234 				return ret;
235 		}
236 
237 		if (no_implicit)
238 			continue;
239 
240 		ret = msm_gem_sync_object(&msm_obj->base, submit->ring->fctx,
241 			write);
242 		if (ret)
243 			break;
244 	}
245 
246 	return ret;
247 }
248 
249 static int submit_pin_objects(struct msm_gem_submit *submit)
250 {
251 	int i, ret = 0;
252 
253 	submit->valid = true;
254 
255 	for (i = 0; i < submit->nr_bos; i++) {
256 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
257 		uint64_t iova;
258 
259 		/* if locking succeeded, pin bo: */
260 		ret = msm_gem_get_and_pin_iova(&msm_obj->base,
261 				submit->aspace, &iova);
262 
263 		if (ret)
264 			break;
265 
266 		submit->bos[i].flags |= BO_PINNED;
267 
268 		if (iova == submit->bos[i].iova) {
269 			submit->bos[i].flags |= BO_VALID;
270 		} else {
271 			submit->bos[i].iova = iova;
272 			/* iova changed, so address in cmdstream is not valid: */
273 			submit->bos[i].flags &= ~BO_VALID;
274 			submit->valid = false;
275 		}
276 	}
277 
278 	return ret;
279 }
280 
281 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
282 		struct msm_gem_object **obj, uint64_t *iova, bool *valid)
283 {
284 	if (idx >= submit->nr_bos) {
285 		DRM_ERROR("invalid buffer index: %u (out of %u)\n",
286 				idx, submit->nr_bos);
287 		return -EINVAL;
288 	}
289 
290 	if (obj)
291 		*obj = submit->bos[idx].obj;
292 	if (iova)
293 		*iova = submit->bos[idx].iova;
294 	if (valid)
295 		*valid = !!(submit->bos[idx].flags & BO_VALID);
296 
297 	return 0;
298 }
299 
300 /* process the reloc's and patch up the cmdstream as needed: */
301 static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
302 		uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
303 {
304 	uint32_t i, last_offset = 0;
305 	uint32_t *ptr;
306 	int ret = 0;
307 
308 	if (!nr_relocs)
309 		return 0;
310 
311 	if (offset % 4) {
312 		DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
313 		return -EINVAL;
314 	}
315 
316 	/* For now, just map the entire thing.  Eventually we probably
317 	 * to do it page-by-page, w/ kmap() if not vmap()d..
318 	 */
319 	ptr = msm_gem_get_vaddr(&obj->base);
320 
321 	if (IS_ERR(ptr)) {
322 		ret = PTR_ERR(ptr);
323 		DBG("failed to map: %d", ret);
324 		return ret;
325 	}
326 
327 	for (i = 0; i < nr_relocs; i++) {
328 		struct drm_msm_gem_submit_reloc submit_reloc;
329 		void __user *userptr =
330 			u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
331 		uint32_t off;
332 		uint64_t iova;
333 		bool valid;
334 
335 		if (copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc))) {
336 			ret = -EFAULT;
337 			goto out;
338 		}
339 
340 		if (submit_reloc.submit_offset % 4) {
341 			DRM_ERROR("non-aligned reloc offset: %u\n",
342 					submit_reloc.submit_offset);
343 			ret = -EINVAL;
344 			goto out;
345 		}
346 
347 		/* offset in dwords: */
348 		off = submit_reloc.submit_offset / 4;
349 
350 		if ((off >= (obj->base.size / 4)) ||
351 				(off < last_offset)) {
352 			DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
353 			ret = -EINVAL;
354 			goto out;
355 		}
356 
357 		ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
358 		if (ret)
359 			goto out;
360 
361 		if (valid)
362 			continue;
363 
364 		iova += submit_reloc.reloc_offset;
365 
366 		if (submit_reloc.shift < 0)
367 			iova >>= -submit_reloc.shift;
368 		else
369 			iova <<= submit_reloc.shift;
370 
371 		ptr[off] = iova | submit_reloc.or;
372 
373 		last_offset = off;
374 	}
375 
376 out:
377 	msm_gem_put_vaddr(&obj->base);
378 
379 	return ret;
380 }
381 
382 static void submit_cleanup(struct msm_gem_submit *submit)
383 {
384 	unsigned i;
385 
386 	for (i = 0; i < submit->nr_bos; i++) {
387 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
388 		submit_unlock_unpin_bo(submit, i, false);
389 		list_del_init(&msm_obj->submit_entry);
390 		drm_gem_object_put(&msm_obj->base);
391 	}
392 }
393 
394 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
395 		struct drm_file *file)
396 {
397 	static atomic_t ident = ATOMIC_INIT(0);
398 	struct msm_drm_private *priv = dev->dev_private;
399 	struct drm_msm_gem_submit *args = data;
400 	struct msm_file_private *ctx = file->driver_priv;
401 	struct msm_gem_submit *submit;
402 	struct msm_gpu *gpu = priv->gpu;
403 	struct sync_file *sync_file = NULL;
404 	struct msm_gpu_submitqueue *queue;
405 	struct msm_ringbuffer *ring;
406 	int out_fence_fd = -1;
407 	struct pid *pid = get_pid(task_pid(current));
408 	bool has_ww_ticket = false;
409 	unsigned i;
410 	int ret, submitid;
411 	if (!gpu)
412 		return -ENXIO;
413 
414 	/* for now, we just have 3d pipe.. eventually this would need to
415 	 * be more clever to dispatch to appropriate gpu module:
416 	 */
417 	if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
418 		return -EINVAL;
419 
420 	if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
421 		return -EINVAL;
422 
423 	if (args->flags & MSM_SUBMIT_SUDO) {
424 		if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
425 		    !capable(CAP_SYS_RAWIO))
426 			return -EINVAL;
427 	}
428 
429 	queue = msm_submitqueue_get(ctx, args->queueid);
430 	if (!queue)
431 		return -ENOENT;
432 
433 	/* Get a unique identifier for the submission for logging purposes */
434 	submitid = atomic_inc_return(&ident) - 1;
435 
436 	ring = gpu->rb[queue->prio];
437 	trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
438 		args->nr_bos, args->nr_cmds);
439 
440 	if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
441 		struct dma_fence *in_fence;
442 
443 		in_fence = sync_file_get_fence(args->fence_fd);
444 
445 		if (!in_fence)
446 			return -EINVAL;
447 
448 		/*
449 		 * Wait if the fence is from a foreign context, or if the fence
450 		 * array contains any fence from a foreign context.
451 		 */
452 		ret = 0;
453 		if (!dma_fence_match_context(in_fence, ring->fctx->context))
454 			ret = dma_fence_wait(in_fence, true);
455 
456 		dma_fence_put(in_fence);
457 		if (ret)
458 			return ret;
459 	}
460 
461 	ret = mutex_lock_interruptible(&dev->struct_mutex);
462 	if (ret)
463 		return ret;
464 
465 	if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
466 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
467 		if (out_fence_fd < 0) {
468 			ret = out_fence_fd;
469 			goto out_unlock;
470 		}
471 	}
472 
473 	submit = submit_create(dev, gpu, ctx->aspace, queue, args->nr_bos,
474 		args->nr_cmds);
475 	if (!submit) {
476 		ret = -ENOMEM;
477 		goto out_unlock;
478 	}
479 
480 	submit->pid = pid;
481 	submit->ident = submitid;
482 
483 	if (args->flags & MSM_SUBMIT_SUDO)
484 		submit->in_rb = true;
485 
486 	ret = submit_lookup_objects(submit, args, file);
487 	if (ret)
488 		goto out;
489 
490 	/* copy_*_user while holding a ww ticket upsets lockdep */
491 	ww_acquire_init(&submit->ticket, &reservation_ww_class);
492 	has_ww_ticket = true;
493 	ret = submit_lock_objects(submit);
494 	if (ret)
495 		goto out;
496 
497 	ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
498 	if (ret)
499 		goto out;
500 
501 	ret = submit_pin_objects(submit);
502 	if (ret)
503 		goto out;
504 
505 	for (i = 0; i < args->nr_cmds; i++) {
506 		struct drm_msm_gem_submit_cmd submit_cmd;
507 		void __user *userptr =
508 			u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
509 		struct msm_gem_object *msm_obj;
510 		uint64_t iova;
511 
512 		ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
513 		if (ret) {
514 			ret = -EFAULT;
515 			goto out;
516 		}
517 
518 		/* validate input from userspace: */
519 		switch (submit_cmd.type) {
520 		case MSM_SUBMIT_CMD_BUF:
521 		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
522 		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
523 			break;
524 		default:
525 			DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
526 			ret = -EINVAL;
527 			goto out;
528 		}
529 
530 		ret = submit_bo(submit, submit_cmd.submit_idx,
531 				&msm_obj, &iova, NULL);
532 		if (ret)
533 			goto out;
534 
535 		if (submit_cmd.size % 4) {
536 			DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
537 					submit_cmd.size);
538 			ret = -EINVAL;
539 			goto out;
540 		}
541 
542 		if (!submit_cmd.size ||
543 			((submit_cmd.size + submit_cmd.submit_offset) >
544 				msm_obj->base.size)) {
545 			DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
546 			ret = -EINVAL;
547 			goto out;
548 		}
549 
550 		submit->cmd[i].type = submit_cmd.type;
551 		submit->cmd[i].size = submit_cmd.size / 4;
552 		submit->cmd[i].iova = iova + submit_cmd.submit_offset;
553 		submit->cmd[i].idx  = submit_cmd.submit_idx;
554 
555 		if (submit->valid)
556 			continue;
557 
558 		ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
559 				submit_cmd.nr_relocs, submit_cmd.relocs);
560 		if (ret)
561 			goto out;
562 	}
563 
564 	submit->nr_cmds = i;
565 
566 	submit->fence = msm_fence_alloc(ring->fctx);
567 	if (IS_ERR(submit->fence)) {
568 		ret = PTR_ERR(submit->fence);
569 		submit->fence = NULL;
570 		goto out;
571 	}
572 
573 	if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
574 		sync_file = sync_file_create(submit->fence);
575 		if (!sync_file) {
576 			ret = -ENOMEM;
577 			goto out;
578 		}
579 	}
580 
581 	msm_gpu_submit(gpu, submit, ctx);
582 
583 	args->fence = submit->fence->seqno;
584 
585 	if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
586 		fd_install(out_fence_fd, sync_file->file);
587 		args->fence_fd = out_fence_fd;
588 	}
589 
590 out:
591 	submit_cleanup(submit);
592 	if (has_ww_ticket)
593 		ww_acquire_fini(&submit->ticket);
594 	if (ret)
595 		msm_gem_submit_free(submit);
596 out_unlock:
597 	if (ret && (out_fence_fd >= 0))
598 		put_unused_fd(out_fence_fd);
599 	mutex_unlock(&dev->struct_mutex);
600 	return ret;
601 }
602