xref: /linux/drivers/gpu/drm/panthor/panthor_drv.c (revision 2b55639a)
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
4 /* Copyright 2019 Collabora ltd. */
5 
6 #include <linux/list.h>
7 #include <linux/module.h>
8 #include <linux/of_platform.h>
9 #include <linux/pagemap.h>
10 #include <linux/platform_device.h>
11 #include <linux/pm_runtime.h>
12 
13 #include <drm/drm_auth.h>
14 #include <drm/drm_debugfs.h>
15 #include <drm/drm_drv.h>
16 #include <drm/drm_exec.h>
17 #include <drm/drm_ioctl.h>
18 #include <drm/drm_syncobj.h>
19 #include <drm/drm_utils.h>
20 #include <drm/gpu_scheduler.h>
21 #include <drm/panthor_drm.h>
22 
23 #include "panthor_device.h"
24 #include "panthor_fw.h"
25 #include "panthor_gem.h"
26 #include "panthor_gpu.h"
27 #include "panthor_heap.h"
28 #include "panthor_mmu.h"
29 #include "panthor_regs.h"
30 #include "panthor_sched.h"
31 
32 /**
33  * DOC: user <-> kernel object copy helpers.
34  */
35 
36 /**
37  * panthor_set_uobj() - Copy kernel object to user object.
38  * @usr_ptr: Users pointer.
39  * @usr_size: Size of the user object.
40  * @min_size: Minimum size for this object.
41  * @kern_size: Size of the kernel object.
42  * @in: Address of the kernel object to copy.
43  *
44  * Helper automating kernel -> user object copies.
45  *
46  * Don't use this function directly, use PANTHOR_UOBJ_SET() instead.
47  *
48  * Return: 0 on success, a negative error code otherwise.
49  */
50 static int
panthor_set_uobj(u64 usr_ptr,u32 usr_size,u32 min_size,u32 kern_size,const void * in)51 panthor_set_uobj(u64 usr_ptr, u32 usr_size, u32 min_size, u32 kern_size, const void *in)
52 {
53 	/* User size shouldn't be smaller than the minimal object size. */
54 	if (usr_size < min_size)
55 		return -EINVAL;
56 
57 	if (copy_to_user(u64_to_user_ptr(usr_ptr), in, min_t(u32, usr_size, kern_size)))
58 		return -EFAULT;
59 
60 	/* When the kernel object is smaller than the user object, we fill the gap with
61 	 * zeros.
62 	 */
63 	if (usr_size > kern_size &&
64 	    clear_user(u64_to_user_ptr(usr_ptr + kern_size), usr_size - kern_size)) {
65 		return -EFAULT;
66 	}
67 
68 	return 0;
69 }
70 
71 /**
72  * panthor_get_uobj_array() - Copy a user object array into a kernel accessible object array.
73  * @in: The object array to copy.
74  * @min_stride: Minimum array stride.
75  * @obj_size: Kernel object size.
76  *
77  * Helper automating user -> kernel object copies.
78  *
79  * Don't use this function directly, use PANTHOR_UOBJ_GET_ARRAY() instead.
80  *
81  * Return: newly allocated object array or an ERR_PTR on error.
82  */
83 static void *
panthor_get_uobj_array(const struct drm_panthor_obj_array * in,u32 min_stride,u32 obj_size)84 panthor_get_uobj_array(const struct drm_panthor_obj_array *in, u32 min_stride,
85 		       u32 obj_size)
86 {
87 	int ret = 0;
88 	void *out_alloc;
89 
90 	if (!in->count)
91 		return NULL;
92 
93 	/* User stride must be at least the minimum object size, otherwise it might
94 	 * lack useful information.
95 	 */
96 	if (in->stride < min_stride)
97 		return ERR_PTR(-EINVAL);
98 
99 	out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL);
100 	if (!out_alloc)
101 		return ERR_PTR(-ENOMEM);
102 
103 	if (obj_size == in->stride) {
104 		/* Fast path when user/kernel have the same uAPI header version. */
105 		if (copy_from_user(out_alloc, u64_to_user_ptr(in->array),
106 				   (unsigned long)obj_size * in->count))
107 			ret = -EFAULT;
108 	} else {
109 		void __user *in_ptr = u64_to_user_ptr(in->array);
110 		void *out_ptr = out_alloc;
111 
112 		/* If the sizes differ, we need to copy elements one by one. */
113 		for (u32 i = 0; i < in->count; i++) {
114 			ret = copy_struct_from_user(out_ptr, obj_size, in_ptr, in->stride);
115 			if (ret)
116 				break;
117 
118 			out_ptr += obj_size;
119 			in_ptr += in->stride;
120 		}
121 	}
122 
123 	if (ret) {
124 		kvfree(out_alloc);
125 		return ERR_PTR(ret);
126 	}
127 
128 	return out_alloc;
129 }
130 
131 /**
132  * PANTHOR_UOBJ_MIN_SIZE_INTERNAL() - Get the minimum user object size
133  * @_typename: Object type.
134  * @_last_mandatory_field: Last mandatory field.
135  *
136  * Get the minimum user object size based on the last mandatory field name,
137  * A.K.A, the name of the last field of the structure at the time this
138  * structure was added to the uAPI.
139  *
140  * Don't use directly, use PANTHOR_UOBJ_DECL() instead.
141  */
142 #define PANTHOR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field) \
143 	(offsetof(_typename, _last_mandatory_field) + \
144 	 sizeof(((_typename *)NULL)->_last_mandatory_field))
145 
146 /**
147  * PANTHOR_UOBJ_DECL() - Declare a new uAPI object whose subject to
148  * evolutions.
149  * @_typename: Object type.
150  * @_last_mandatory_field: Last mandatory field.
151  *
152  * Should be used to extend the PANTHOR_UOBJ_MIN_SIZE() list.
153  */
154 #define PANTHOR_UOBJ_DECL(_typename, _last_mandatory_field) \
155 	_typename : PANTHOR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field)
156 
157 /**
158  * PANTHOR_UOBJ_MIN_SIZE() - Get the minimum size of a given uAPI object
159  * @_obj_name: Object to get the minimum size of.
160  *
161  * Don't use this macro directly, it's automatically called by
162  * PANTHOR_UOBJ_{SET,GET_ARRAY}().
163  */
164 #define PANTHOR_UOBJ_MIN_SIZE(_obj_name) \
165 	_Generic(_obj_name, \
166 		 PANTHOR_UOBJ_DECL(struct drm_panthor_gpu_info, tiler_present), \
167 		 PANTHOR_UOBJ_DECL(struct drm_panthor_csif_info, pad), \
168 		 PANTHOR_UOBJ_DECL(struct drm_panthor_sync_op, timeline_value), \
169 		 PANTHOR_UOBJ_DECL(struct drm_panthor_queue_submit, syncs), \
170 		 PANTHOR_UOBJ_DECL(struct drm_panthor_queue_create, ringbuf_size), \
171 		 PANTHOR_UOBJ_DECL(struct drm_panthor_vm_bind_op, syncs))
172 
173 /**
174  * PANTHOR_UOBJ_SET() - Copy a kernel object to a user object.
175  * @_dest_usr_ptr: User pointer to copy to.
176  * @_usr_size: Size of the user object.
177  * @_src_obj: Kernel object to copy (not a pointer).
178  *
179  * Return: 0 on success, a negative error code otherwise.
180  */
181 #define PANTHOR_UOBJ_SET(_dest_usr_ptr, _usr_size, _src_obj) \
182 	panthor_set_uobj(_dest_usr_ptr, _usr_size, \
183 			 PANTHOR_UOBJ_MIN_SIZE(_src_obj), \
184 			 sizeof(_src_obj), &(_src_obj))
185 
186 /**
187  * PANTHOR_UOBJ_GET_ARRAY() - Copy a user object array to a kernel accessible
188  * object array.
189  * @_dest_array: Local variable that will hold the newly allocated kernel
190  * object array.
191  * @_uobj_array: The drm_panthor_obj_array object describing the user object
192  * array.
193  *
194  * Return: 0 on success, a negative error code otherwise.
195  */
196 #define PANTHOR_UOBJ_GET_ARRAY(_dest_array, _uobj_array) \
197 	({ \
198 		typeof(_dest_array) _tmp; \
199 		_tmp = panthor_get_uobj_array(_uobj_array, \
200 					      PANTHOR_UOBJ_MIN_SIZE((_dest_array)[0]), \
201 					      sizeof((_dest_array)[0])); \
202 		if (!IS_ERR(_tmp)) \
203 			_dest_array = _tmp; \
204 		PTR_ERR_OR_ZERO(_tmp); \
205 	})
206 
207 /**
208  * struct panthor_sync_signal - Represent a synchronization object point to attach
209  * our job fence to.
210  *
211  * This structure is here to keep track of fences that are currently bound to
212  * a specific syncobj point.
213  *
214  * At the beginning of a job submission, the fence
215  * is retrieved from the syncobj itself, and can be NULL if no fence was attached
216  * to this point.
217  *
218  * At the end, it points to the fence of the last job that had a
219  * %DRM_PANTHOR_SYNC_OP_SIGNAL on this syncobj.
220  *
221  * With jobs being submitted in batches, the fence might change several times during
222  * the process, allowing one job to wait on a job that's part of the same submission
223  * but appears earlier in the drm_panthor_group_submit::queue_submits array.
224  */
225 struct panthor_sync_signal {
226 	/** @node: list_head to track signal ops within a submit operation */
227 	struct list_head node;
228 
229 	/** @handle: The syncobj handle. */
230 	u32 handle;
231 
232 	/**
233 	 * @point: The syncobj point.
234 	 *
235 	 * Zero for regular syncobjs, and non-zero for timeline syncobjs.
236 	 */
237 	u64 point;
238 
239 	/**
240 	 * @syncobj: The sync object pointed by @handle.
241 	 */
242 	struct drm_syncobj *syncobj;
243 
244 	/**
245 	 * @chain: Chain object used to link the new fence to an existing
246 	 * timeline syncobj.
247 	 *
248 	 * NULL for regular syncobj, non-NULL for timeline syncobjs.
249 	 */
250 	struct dma_fence_chain *chain;
251 
252 	/**
253 	 * @fence: The fence to assign to the syncobj or syncobj-point.
254 	 */
255 	struct dma_fence *fence;
256 };
257 
258 /**
259  * struct panthor_job_ctx - Job context
260  */
261 struct panthor_job_ctx {
262 	/** @job: The job that is about to be submitted to drm_sched. */
263 	struct drm_sched_job *job;
264 
265 	/** @syncops: Array of sync operations. */
266 	struct drm_panthor_sync_op *syncops;
267 
268 	/** @syncop_count: Number of sync operations. */
269 	u32 syncop_count;
270 };
271 
272 /**
273  * struct panthor_submit_ctx - Submission context
274  *
275  * Anything that's related to a submission (%DRM_IOCTL_PANTHOR_VM_BIND or
276  * %DRM_IOCTL_PANTHOR_GROUP_SUBMIT) is kept here, so we can automate the
277  * initialization and cleanup steps.
278  */
279 struct panthor_submit_ctx {
280 	/** @file: DRM file this submission happens on. */
281 	struct drm_file *file;
282 
283 	/**
284 	 * @signals: List of struct panthor_sync_signal.
285 	 *
286 	 * %DRM_PANTHOR_SYNC_OP_SIGNAL operations will be recorded here,
287 	 * and %DRM_PANTHOR_SYNC_OP_WAIT will first check if an entry
288 	 * matching the syncobj+point exists before calling
289 	 * drm_syncobj_find_fence(). This allows us to describe dependencies
290 	 * existing between jobs that are part of the same batch.
291 	 */
292 	struct list_head signals;
293 
294 	/** @jobs: Array of jobs. */
295 	struct panthor_job_ctx *jobs;
296 
297 	/** @job_count: Number of entries in the @jobs array. */
298 	u32 job_count;
299 
300 	/** @exec: drm_exec context used to acquire and prepare resv objects. */
301 	struct drm_exec exec;
302 };
303 
304 #define PANTHOR_SYNC_OP_FLAGS_MASK \
305 	(DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK | DRM_PANTHOR_SYNC_OP_SIGNAL)
306 
sync_op_is_signal(const struct drm_panthor_sync_op * sync_op)307 static bool sync_op_is_signal(const struct drm_panthor_sync_op *sync_op)
308 {
309 	return !!(sync_op->flags & DRM_PANTHOR_SYNC_OP_SIGNAL);
310 }
311 
sync_op_is_wait(const struct drm_panthor_sync_op * sync_op)312 static bool sync_op_is_wait(const struct drm_panthor_sync_op *sync_op)
313 {
314 	/* Note that DRM_PANTHOR_SYNC_OP_WAIT == 0 */
315 	return !(sync_op->flags & DRM_PANTHOR_SYNC_OP_SIGNAL);
316 }
317 
318 /**
319  * panthor_check_sync_op() - Check drm_panthor_sync_op fields
320  * @sync_op: The sync operation to check.
321  *
322  * Return: 0 on success, -EINVAL otherwise.
323  */
324 static int
panthor_check_sync_op(const struct drm_panthor_sync_op * sync_op)325 panthor_check_sync_op(const struct drm_panthor_sync_op *sync_op)
326 {
327 	u8 handle_type;
328 
329 	if (sync_op->flags & ~PANTHOR_SYNC_OP_FLAGS_MASK)
330 		return -EINVAL;
331 
332 	handle_type = sync_op->flags & DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK;
333 	if (handle_type != DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ &&
334 	    handle_type != DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ)
335 		return -EINVAL;
336 
337 	if (handle_type == DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ &&
338 	    sync_op->timeline_value != 0)
339 		return -EINVAL;
340 
341 	return 0;
342 }
343 
344 /**
345  * panthor_sync_signal_free() - Release resources and free a panthor_sync_signal object
346  * @sig_sync: Signal object to free.
347  */
348 static void
panthor_sync_signal_free(struct panthor_sync_signal * sig_sync)349 panthor_sync_signal_free(struct panthor_sync_signal *sig_sync)
350 {
351 	if (!sig_sync)
352 		return;
353 
354 	drm_syncobj_put(sig_sync->syncobj);
355 	dma_fence_chain_free(sig_sync->chain);
356 	dma_fence_put(sig_sync->fence);
357 	kfree(sig_sync);
358 }
359 
360 /**
361  * panthor_submit_ctx_add_sync_signal() - Add a signal operation to a submit context
362  * @ctx: Context to add the signal operation to.
363  * @handle: Syncobj handle.
364  * @point: Syncobj point.
365  *
366  * Return: 0 on success, otherwise negative error value.
367  */
368 static int
panthor_submit_ctx_add_sync_signal(struct panthor_submit_ctx * ctx,u32 handle,u64 point)369 panthor_submit_ctx_add_sync_signal(struct panthor_submit_ctx *ctx, u32 handle, u64 point)
370 {
371 	struct panthor_sync_signal *sig_sync;
372 	struct dma_fence *cur_fence;
373 	int ret;
374 
375 	sig_sync = kzalloc(sizeof(*sig_sync), GFP_KERNEL);
376 	if (!sig_sync)
377 		return -ENOMEM;
378 
379 	sig_sync->handle = handle;
380 	sig_sync->point = point;
381 
382 	if (point > 0) {
383 		sig_sync->chain = dma_fence_chain_alloc();
384 		if (!sig_sync->chain) {
385 			ret = -ENOMEM;
386 			goto err_free_sig_sync;
387 		}
388 	}
389 
390 	sig_sync->syncobj = drm_syncobj_find(ctx->file, handle);
391 	if (!sig_sync->syncobj) {
392 		ret = -EINVAL;
393 		goto err_free_sig_sync;
394 	}
395 
396 	/* Retrieve the current fence attached to that point. It's
397 	 * perfectly fine to get a NULL fence here, it just means there's
398 	 * no fence attached to that point yet.
399 	 */
400 	if (!drm_syncobj_find_fence(ctx->file, handle, point, 0, &cur_fence))
401 		sig_sync->fence = cur_fence;
402 
403 	list_add_tail(&sig_sync->node, &ctx->signals);
404 
405 	return 0;
406 
407 err_free_sig_sync:
408 	panthor_sync_signal_free(sig_sync);
409 	return ret;
410 }
411 
412 /**
413  * panthor_submit_ctx_search_sync_signal() - Search an existing signal operation in a
414  * submit context.
415  * @ctx: Context to search the signal operation in.
416  * @handle: Syncobj handle.
417  * @point: Syncobj point.
418  *
419  * Return: A valid panthor_sync_signal object if found, NULL otherwise.
420  */
421 static struct panthor_sync_signal *
panthor_submit_ctx_search_sync_signal(struct panthor_submit_ctx * ctx,u32 handle,u64 point)422 panthor_submit_ctx_search_sync_signal(struct panthor_submit_ctx *ctx, u32 handle, u64 point)
423 {
424 	struct panthor_sync_signal *sig_sync;
425 
426 	list_for_each_entry(sig_sync, &ctx->signals, node) {
427 		if (handle == sig_sync->handle && point == sig_sync->point)
428 			return sig_sync;
429 	}
430 
431 	return NULL;
432 }
433 
434 /**
435  * panthor_submit_ctx_add_job() - Add a job to a submit context
436  * @ctx: Context to search the signal operation in.
437  * @idx: Index of the job in the context.
438  * @job: Job to add.
439  * @syncs: Sync operations provided by userspace.
440  *
441  * Return: 0 on success, a negative error code otherwise.
442  */
443 static int
panthor_submit_ctx_add_job(struct panthor_submit_ctx * ctx,u32 idx,struct drm_sched_job * job,const struct drm_panthor_obj_array * syncs)444 panthor_submit_ctx_add_job(struct panthor_submit_ctx *ctx, u32 idx,
445 			   struct drm_sched_job *job,
446 			   const struct drm_panthor_obj_array *syncs)
447 {
448 	int ret;
449 
450 	ctx->jobs[idx].job = job;
451 
452 	ret = PANTHOR_UOBJ_GET_ARRAY(ctx->jobs[idx].syncops, syncs);
453 	if (ret)
454 		return ret;
455 
456 	ctx->jobs[idx].syncop_count = syncs->count;
457 	return 0;
458 }
459 
460 /**
461  * panthor_submit_ctx_get_sync_signal() - Search signal operation and add one if none was found.
462  * @ctx: Context to search the signal operation in.
463  * @handle: Syncobj handle.
464  * @point: Syncobj point.
465  *
466  * Return: 0 on success, a negative error code otherwise.
467  */
468 static int
panthor_submit_ctx_get_sync_signal(struct panthor_submit_ctx * ctx,u32 handle,u64 point)469 panthor_submit_ctx_get_sync_signal(struct panthor_submit_ctx *ctx, u32 handle, u64 point)
470 {
471 	struct panthor_sync_signal *sig_sync;
472 
473 	sig_sync = panthor_submit_ctx_search_sync_signal(ctx, handle, point);
474 	if (sig_sync)
475 		return 0;
476 
477 	return panthor_submit_ctx_add_sync_signal(ctx, handle, point);
478 }
479 
480 /**
481  * panthor_submit_ctx_update_job_sync_signal_fences() - Update fences
482  * on the signal operations specified by a job.
483  * @ctx: Context to search the signal operation in.
484  * @job_idx: Index of the job to operate on.
485  *
486  * Return: 0 on success, a negative error code otherwise.
487  */
488 static int
panthor_submit_ctx_update_job_sync_signal_fences(struct panthor_submit_ctx * ctx,u32 job_idx)489 panthor_submit_ctx_update_job_sync_signal_fences(struct panthor_submit_ctx *ctx,
490 						 u32 job_idx)
491 {
492 	struct panthor_device *ptdev = container_of(ctx->file->minor->dev,
493 						    struct panthor_device,
494 						    base);
495 	struct dma_fence *done_fence = &ctx->jobs[job_idx].job->s_fence->finished;
496 	const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops;
497 	u32 sync_op_count = ctx->jobs[job_idx].syncop_count;
498 
499 	for (u32 i = 0; i < sync_op_count; i++) {
500 		struct dma_fence *old_fence;
501 		struct panthor_sync_signal *sig_sync;
502 
503 		if (!sync_op_is_signal(&sync_ops[i]))
504 			continue;
505 
506 		sig_sync = panthor_submit_ctx_search_sync_signal(ctx, sync_ops[i].handle,
507 								 sync_ops[i].timeline_value);
508 		if (drm_WARN_ON(&ptdev->base, !sig_sync))
509 			return -EINVAL;
510 
511 		old_fence = sig_sync->fence;
512 		sig_sync->fence = dma_fence_get(done_fence);
513 		dma_fence_put(old_fence);
514 
515 		if (drm_WARN_ON(&ptdev->base, !sig_sync->fence))
516 			return -EINVAL;
517 	}
518 
519 	return 0;
520 }
521 
522 /**
523  * panthor_submit_ctx_collect_job_signal_ops() - Iterate over all job signal operations
524  * and add them to the context.
525  * @ctx: Context to search the signal operation in.
526  * @job_idx: Index of the job to operate on.
527  *
528  * Return: 0 on success, a negative error code otherwise.
529  */
530 static int
panthor_submit_ctx_collect_job_signal_ops(struct panthor_submit_ctx * ctx,u32 job_idx)531 panthor_submit_ctx_collect_job_signal_ops(struct panthor_submit_ctx *ctx,
532 					  u32 job_idx)
533 {
534 	const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops;
535 	u32 sync_op_count = ctx->jobs[job_idx].syncop_count;
536 
537 	for (u32 i = 0; i < sync_op_count; i++) {
538 		int ret;
539 
540 		if (!sync_op_is_signal(&sync_ops[i]))
541 			continue;
542 
543 		ret = panthor_check_sync_op(&sync_ops[i]);
544 		if (ret)
545 			return ret;
546 
547 		ret = panthor_submit_ctx_get_sync_signal(ctx,
548 							 sync_ops[i].handle,
549 							 sync_ops[i].timeline_value);
550 		if (ret)
551 			return ret;
552 	}
553 
554 	return 0;
555 }
556 
557 /**
558  * panthor_submit_ctx_push_fences() - Iterate over the signal array, and for each entry, push
559  * the currently assigned fence to the associated syncobj.
560  * @ctx: Context to push fences on.
561  *
562  * This is the last step of a submission procedure, and is done once we know the submission
563  * is effective and job fences are guaranteed to be signaled in finite time.
564  */
565 static void
panthor_submit_ctx_push_fences(struct panthor_submit_ctx * ctx)566 panthor_submit_ctx_push_fences(struct panthor_submit_ctx *ctx)
567 {
568 	struct panthor_sync_signal *sig_sync;
569 
570 	list_for_each_entry(sig_sync, &ctx->signals, node) {
571 		if (sig_sync->chain) {
572 			drm_syncobj_add_point(sig_sync->syncobj, sig_sync->chain,
573 					      sig_sync->fence, sig_sync->point);
574 			sig_sync->chain = NULL;
575 		} else {
576 			drm_syncobj_replace_fence(sig_sync->syncobj, sig_sync->fence);
577 		}
578 	}
579 }
580 
581 /**
582  * panthor_submit_ctx_add_sync_deps_to_job() - Add sync wait operations as
583  * job dependencies.
584  * @ctx: Submit context.
585  * @job_idx: Index of the job to operate on.
586  *
587  * Return: 0 on success, a negative error code otherwise.
588  */
589 static int
panthor_submit_ctx_add_sync_deps_to_job(struct panthor_submit_ctx * ctx,u32 job_idx)590 panthor_submit_ctx_add_sync_deps_to_job(struct panthor_submit_ctx *ctx,
591 					u32 job_idx)
592 {
593 	struct panthor_device *ptdev = container_of(ctx->file->minor->dev,
594 						    struct panthor_device,
595 						    base);
596 	const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops;
597 	struct drm_sched_job *job = ctx->jobs[job_idx].job;
598 	u32 sync_op_count = ctx->jobs[job_idx].syncop_count;
599 	int ret = 0;
600 
601 	for (u32 i = 0; i < sync_op_count; i++) {
602 		struct panthor_sync_signal *sig_sync;
603 		struct dma_fence *fence;
604 
605 		if (!sync_op_is_wait(&sync_ops[i]))
606 			continue;
607 
608 		ret = panthor_check_sync_op(&sync_ops[i]);
609 		if (ret)
610 			return ret;
611 
612 		sig_sync = panthor_submit_ctx_search_sync_signal(ctx, sync_ops[i].handle,
613 								 sync_ops[i].timeline_value);
614 		if (sig_sync) {
615 			if (drm_WARN_ON(&ptdev->base, !sig_sync->fence))
616 				return -EINVAL;
617 
618 			fence = dma_fence_get(sig_sync->fence);
619 		} else {
620 			ret = drm_syncobj_find_fence(ctx->file, sync_ops[i].handle,
621 						     sync_ops[i].timeline_value,
622 						     0, &fence);
623 			if (ret)
624 				return ret;
625 		}
626 
627 		ret = drm_sched_job_add_dependency(job, fence);
628 		if (ret)
629 			return ret;
630 	}
631 
632 	return 0;
633 }
634 
635 /**
636  * panthor_submit_ctx_collect_jobs_signal_ops() - Collect all signal operations
637  * and add them to the submit context.
638  * @ctx: Submit context.
639  *
640  * Return: 0 on success, a negative error code otherwise.
641  */
642 static int
panthor_submit_ctx_collect_jobs_signal_ops(struct panthor_submit_ctx * ctx)643 panthor_submit_ctx_collect_jobs_signal_ops(struct panthor_submit_ctx *ctx)
644 {
645 	for (u32 i = 0; i < ctx->job_count; i++) {
646 		int ret;
647 
648 		ret = panthor_submit_ctx_collect_job_signal_ops(ctx, i);
649 		if (ret)
650 			return ret;
651 	}
652 
653 	return 0;
654 }
655 
656 /**
657  * panthor_submit_ctx_add_deps_and_arm_jobs() - Add jobs dependencies and arm jobs
658  * @ctx: Submit context.
659  *
660  * Must be called after the resv preparation has been taken care of.
661  *
662  * Return: 0 on success, a negative error code otherwise.
663  */
664 static int
panthor_submit_ctx_add_deps_and_arm_jobs(struct panthor_submit_ctx * ctx)665 panthor_submit_ctx_add_deps_and_arm_jobs(struct panthor_submit_ctx *ctx)
666 {
667 	for (u32 i = 0; i < ctx->job_count; i++) {
668 		int ret;
669 
670 		ret = panthor_submit_ctx_add_sync_deps_to_job(ctx, i);
671 		if (ret)
672 			return ret;
673 
674 		drm_sched_job_arm(ctx->jobs[i].job);
675 
676 		ret = panthor_submit_ctx_update_job_sync_signal_fences(ctx, i);
677 		if (ret)
678 			return ret;
679 	}
680 
681 	return 0;
682 }
683 
684 /**
685  * panthor_submit_ctx_push_jobs() - Push jobs to their scheduling entities.
686  * @ctx: Submit context.
687  * @upd_resvs: Callback used to update reservation objects that were previously
688  * preapred.
689  */
690 static void
panthor_submit_ctx_push_jobs(struct panthor_submit_ctx * ctx,void (* upd_resvs)(struct drm_exec *,struct drm_sched_job *))691 panthor_submit_ctx_push_jobs(struct panthor_submit_ctx *ctx,
692 			     void (*upd_resvs)(struct drm_exec *, struct drm_sched_job *))
693 {
694 	for (u32 i = 0; i < ctx->job_count; i++) {
695 		upd_resvs(&ctx->exec, ctx->jobs[i].job);
696 		drm_sched_entity_push_job(ctx->jobs[i].job);
697 
698 		/* Job is owned by the scheduler now. */
699 		ctx->jobs[i].job = NULL;
700 	}
701 
702 	panthor_submit_ctx_push_fences(ctx);
703 }
704 
705 /**
706  * panthor_submit_ctx_init() - Initializes a submission context
707  * @ctx: Submit context to initialize.
708  * @file: drm_file this submission happens on.
709  * @job_count: Number of jobs that will be submitted.
710  *
711  * Return: 0 on success, a negative error code otherwise.
712  */
panthor_submit_ctx_init(struct panthor_submit_ctx * ctx,struct drm_file * file,u32 job_count)713 static int panthor_submit_ctx_init(struct panthor_submit_ctx *ctx,
714 				   struct drm_file *file, u32 job_count)
715 {
716 	ctx->jobs = kvmalloc_array(job_count, sizeof(*ctx->jobs),
717 				   GFP_KERNEL | __GFP_ZERO);
718 	if (!ctx->jobs)
719 		return -ENOMEM;
720 
721 	ctx->file = file;
722 	ctx->job_count = job_count;
723 	INIT_LIST_HEAD(&ctx->signals);
724 	drm_exec_init(&ctx->exec,
725 		      DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES,
726 		      0);
727 	return 0;
728 }
729 
730 /**
731  * panthor_submit_ctx_cleanup() - Cleanup a submission context
732  * @ctx: Submit context to cleanup.
733  * @job_put: Job put callback.
734  */
panthor_submit_ctx_cleanup(struct panthor_submit_ctx * ctx,void (* job_put)(struct drm_sched_job *))735 static void panthor_submit_ctx_cleanup(struct panthor_submit_ctx *ctx,
736 				       void (*job_put)(struct drm_sched_job *))
737 {
738 	struct panthor_sync_signal *sig_sync, *tmp;
739 	unsigned long i;
740 
741 	drm_exec_fini(&ctx->exec);
742 
743 	list_for_each_entry_safe(sig_sync, tmp, &ctx->signals, node)
744 		panthor_sync_signal_free(sig_sync);
745 
746 	for (i = 0; i < ctx->job_count; i++) {
747 		job_put(ctx->jobs[i].job);
748 		kvfree(ctx->jobs[i].syncops);
749 	}
750 
751 	kvfree(ctx->jobs);
752 }
753 
panthor_ioctl_dev_query(struct drm_device * ddev,void * data,struct drm_file * file)754 static int panthor_ioctl_dev_query(struct drm_device *ddev, void *data, struct drm_file *file)
755 {
756 	struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
757 	struct drm_panthor_dev_query *args = data;
758 
759 	if (!args->pointer) {
760 		switch (args->type) {
761 		case DRM_PANTHOR_DEV_QUERY_GPU_INFO:
762 			args->size = sizeof(ptdev->gpu_info);
763 			return 0;
764 
765 		case DRM_PANTHOR_DEV_QUERY_CSIF_INFO:
766 			args->size = sizeof(ptdev->csif_info);
767 			return 0;
768 
769 		default:
770 			return -EINVAL;
771 		}
772 	}
773 
774 	switch (args->type) {
775 	case DRM_PANTHOR_DEV_QUERY_GPU_INFO:
776 		return PANTHOR_UOBJ_SET(args->pointer, args->size, ptdev->gpu_info);
777 
778 	case DRM_PANTHOR_DEV_QUERY_CSIF_INFO:
779 		return PANTHOR_UOBJ_SET(args->pointer, args->size, ptdev->csif_info);
780 
781 	default:
782 		return -EINVAL;
783 	}
784 }
785 
786 #define PANTHOR_VM_CREATE_FLAGS			0
787 
panthor_ioctl_vm_create(struct drm_device * ddev,void * data,struct drm_file * file)788 static int panthor_ioctl_vm_create(struct drm_device *ddev, void *data,
789 				   struct drm_file *file)
790 {
791 	struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
792 	struct panthor_file *pfile = file->driver_priv;
793 	struct drm_panthor_vm_create *args = data;
794 	int cookie, ret;
795 
796 	if (!drm_dev_enter(ddev, &cookie))
797 		return -ENODEV;
798 
799 	ret = panthor_vm_pool_create_vm(ptdev, pfile->vms,  args);
800 	if (ret >= 0) {
801 		args->id = ret;
802 		ret = 0;
803 	}
804 
805 	drm_dev_exit(cookie);
806 	return ret;
807 }
808 
panthor_ioctl_vm_destroy(struct drm_device * ddev,void * data,struct drm_file * file)809 static int panthor_ioctl_vm_destroy(struct drm_device *ddev, void *data,
810 				    struct drm_file *file)
811 {
812 	struct panthor_file *pfile = file->driver_priv;
813 	struct drm_panthor_vm_destroy *args = data;
814 
815 	if (args->pad)
816 		return -EINVAL;
817 
818 	return panthor_vm_pool_destroy_vm(pfile->vms, args->id);
819 }
820 
821 #define PANTHOR_BO_FLAGS		DRM_PANTHOR_BO_NO_MMAP
822 
panthor_ioctl_bo_create(struct drm_device * ddev,void * data,struct drm_file * file)823 static int panthor_ioctl_bo_create(struct drm_device *ddev, void *data,
824 				   struct drm_file *file)
825 {
826 	struct panthor_file *pfile = file->driver_priv;
827 	struct drm_panthor_bo_create *args = data;
828 	struct panthor_vm *vm = NULL;
829 	int cookie, ret;
830 
831 	if (!drm_dev_enter(ddev, &cookie))
832 		return -ENODEV;
833 
834 	if (!args->size || args->pad ||
835 	    (args->flags & ~PANTHOR_BO_FLAGS)) {
836 		ret = -EINVAL;
837 		goto out_dev_exit;
838 	}
839 
840 	if (args->exclusive_vm_id) {
841 		vm = panthor_vm_pool_get_vm(pfile->vms, args->exclusive_vm_id);
842 		if (!vm) {
843 			ret = -EINVAL;
844 			goto out_dev_exit;
845 		}
846 	}
847 
848 	ret = panthor_gem_create_with_handle(file, ddev, vm, &args->size,
849 					     args->flags, &args->handle);
850 
851 	panthor_vm_put(vm);
852 
853 out_dev_exit:
854 	drm_dev_exit(cookie);
855 	return ret;
856 }
857 
panthor_ioctl_bo_mmap_offset(struct drm_device * ddev,void * data,struct drm_file * file)858 static int panthor_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data,
859 					struct drm_file *file)
860 {
861 	struct drm_panthor_bo_mmap_offset *args = data;
862 	struct drm_gem_object *obj;
863 	int ret;
864 
865 	if (args->pad)
866 		return -EINVAL;
867 
868 	obj = drm_gem_object_lookup(file, args->handle);
869 	if (!obj)
870 		return -ENOENT;
871 
872 	ret = drm_gem_create_mmap_offset(obj);
873 	if (ret)
874 		goto out;
875 
876 	args->offset = drm_vma_node_offset_addr(&obj->vma_node);
877 
878 out:
879 	drm_gem_object_put(obj);
880 	return ret;
881 }
882 
panthor_ioctl_group_submit(struct drm_device * ddev,void * data,struct drm_file * file)883 static int panthor_ioctl_group_submit(struct drm_device *ddev, void *data,
884 				      struct drm_file *file)
885 {
886 	struct panthor_file *pfile = file->driver_priv;
887 	struct drm_panthor_group_submit *args = data;
888 	struct drm_panthor_queue_submit *jobs_args;
889 	struct panthor_submit_ctx ctx;
890 	int ret = 0, cookie;
891 
892 	if (args->pad)
893 		return -EINVAL;
894 
895 	if (!drm_dev_enter(ddev, &cookie))
896 		return -ENODEV;
897 
898 	ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->queue_submits);
899 	if (ret)
900 		goto out_dev_exit;
901 
902 	ret = panthor_submit_ctx_init(&ctx, file, args->queue_submits.count);
903 	if (ret)
904 		goto out_free_jobs_args;
905 
906 	/* Create jobs and attach sync operations */
907 	for (u32 i = 0; i < args->queue_submits.count; i++) {
908 		const struct drm_panthor_queue_submit *qsubmit = &jobs_args[i];
909 		struct drm_sched_job *job;
910 
911 		job = panthor_job_create(pfile, args->group_handle, qsubmit);
912 		if (IS_ERR(job)) {
913 			ret = PTR_ERR(job);
914 			goto out_cleanup_submit_ctx;
915 		}
916 
917 		ret = panthor_submit_ctx_add_job(&ctx, i, job, &qsubmit->syncs);
918 		if (ret)
919 			goto out_cleanup_submit_ctx;
920 	}
921 
922 	/*
923 	 * Collect signal operations on all jobs, such that each job can pick
924 	 * from it for its dependencies and update the fence to signal when the
925 	 * job is submitted.
926 	 */
927 	ret = panthor_submit_ctx_collect_jobs_signal_ops(&ctx);
928 	if (ret)
929 		goto out_cleanup_submit_ctx;
930 
931 	/*
932 	 * We acquire/prepare revs on all jobs before proceeding with the
933 	 * dependency registration.
934 	 *
935 	 * This is solving two problems:
936 	 * 1. drm_sched_job_arm() and drm_sched_entity_push_job() must be
937 	 *    protected by a lock to make sure no concurrent access to the same
938 	 *    entity get interleaved, which would mess up with the fence seqno
939 	 *    ordering. Luckily, one of the resv being acquired is the VM resv,
940 	 *    and a scheduling entity is only bound to a single VM. As soon as
941 	 *    we acquire the VM resv, we should be safe.
942 	 * 2. Jobs might depend on fences that were issued by previous jobs in
943 	 *    the same batch, so we can't add dependencies on all jobs before
944 	 *    arming previous jobs and registering the fence to the signal
945 	 *    array, otherwise we might miss dependencies, or point to an
946 	 *    outdated fence.
947 	 */
948 	if (args->queue_submits.count > 0) {
949 		/* All jobs target the same group, so they also point to the same VM. */
950 		struct panthor_vm *vm = panthor_job_vm(ctx.jobs[0].job);
951 
952 		drm_exec_until_all_locked(&ctx.exec) {
953 			ret = panthor_vm_prepare_mapped_bos_resvs(&ctx.exec, vm,
954 								  args->queue_submits.count);
955 		}
956 
957 		if (ret)
958 			goto out_cleanup_submit_ctx;
959 	}
960 
961 	/*
962 	 * Now that resvs are locked/prepared, we can iterate over each job to
963 	 * add the dependencies, arm the job fence, register the job fence to
964 	 * the signal array.
965 	 */
966 	ret = panthor_submit_ctx_add_deps_and_arm_jobs(&ctx);
967 	if (ret)
968 		goto out_cleanup_submit_ctx;
969 
970 	/* Nothing can fail after that point, so we can make our job fences
971 	 * visible to the outside world. Push jobs and set the job fences to
972 	 * the resv slots we reserved.  This also pushes the fences to the
973 	 * syncobjs that are part of the signal array.
974 	 */
975 	panthor_submit_ctx_push_jobs(&ctx, panthor_job_update_resvs);
976 
977 out_cleanup_submit_ctx:
978 	panthor_submit_ctx_cleanup(&ctx, panthor_job_put);
979 
980 out_free_jobs_args:
981 	kvfree(jobs_args);
982 
983 out_dev_exit:
984 	drm_dev_exit(cookie);
985 	return ret;
986 }
987 
panthor_ioctl_group_destroy(struct drm_device * ddev,void * data,struct drm_file * file)988 static int panthor_ioctl_group_destroy(struct drm_device *ddev, void *data,
989 				       struct drm_file *file)
990 {
991 	struct panthor_file *pfile = file->driver_priv;
992 	struct drm_panthor_group_destroy *args = data;
993 
994 	if (args->pad)
995 		return -EINVAL;
996 
997 	return panthor_group_destroy(pfile, args->group_handle);
998 }
999 
group_priority_permit(struct drm_file * file,u8 priority)1000 static int group_priority_permit(struct drm_file *file,
1001 				 u8 priority)
1002 {
1003 	/* Ensure that priority is valid */
1004 	if (priority > PANTHOR_GROUP_PRIORITY_HIGH)
1005 		return -EINVAL;
1006 
1007 	/* Medium priority and below are always allowed */
1008 	if (priority <= PANTHOR_GROUP_PRIORITY_MEDIUM)
1009 		return 0;
1010 
1011 	/* Higher priorities require CAP_SYS_NICE or DRM_MASTER */
1012 	if (capable(CAP_SYS_NICE) || drm_is_current_master(file))
1013 		return 0;
1014 
1015 	return -EACCES;
1016 }
1017 
panthor_ioctl_group_create(struct drm_device * ddev,void * data,struct drm_file * file)1018 static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
1019 				      struct drm_file *file)
1020 {
1021 	struct panthor_file *pfile = file->driver_priv;
1022 	struct drm_panthor_group_create *args = data;
1023 	struct drm_panthor_queue_create *queue_args;
1024 	int ret;
1025 
1026 	if (!args->queues.count)
1027 		return -EINVAL;
1028 
1029 	ret = PANTHOR_UOBJ_GET_ARRAY(queue_args, &args->queues);
1030 	if (ret)
1031 		return ret;
1032 
1033 	ret = group_priority_permit(file, args->priority);
1034 	if (ret)
1035 		return ret;
1036 
1037 	ret = panthor_group_create(pfile, args, queue_args);
1038 	if (ret >= 0) {
1039 		args->group_handle = ret;
1040 		ret = 0;
1041 	}
1042 
1043 	kvfree(queue_args);
1044 	return ret;
1045 }
1046 
panthor_ioctl_group_get_state(struct drm_device * ddev,void * data,struct drm_file * file)1047 static int panthor_ioctl_group_get_state(struct drm_device *ddev, void *data,
1048 					 struct drm_file *file)
1049 {
1050 	struct panthor_file *pfile = file->driver_priv;
1051 	struct drm_panthor_group_get_state *args = data;
1052 
1053 	return panthor_group_get_state(pfile, args);
1054 }
1055 
panthor_ioctl_tiler_heap_create(struct drm_device * ddev,void * data,struct drm_file * file)1056 static int panthor_ioctl_tiler_heap_create(struct drm_device *ddev, void *data,
1057 					   struct drm_file *file)
1058 {
1059 	struct panthor_file *pfile = file->driver_priv;
1060 	struct drm_panthor_tiler_heap_create *args = data;
1061 	struct panthor_heap_pool *pool;
1062 	struct panthor_vm *vm;
1063 	int ret;
1064 
1065 	vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id);
1066 	if (!vm)
1067 		return -EINVAL;
1068 
1069 	pool = panthor_vm_get_heap_pool(vm, true);
1070 	if (IS_ERR(pool)) {
1071 		ret = PTR_ERR(pool);
1072 		goto out_put_vm;
1073 	}
1074 
1075 	ret = panthor_heap_create(pool,
1076 				  args->initial_chunk_count,
1077 				  args->chunk_size,
1078 				  args->max_chunks,
1079 				  args->target_in_flight,
1080 				  &args->tiler_heap_ctx_gpu_va,
1081 				  &args->first_heap_chunk_gpu_va);
1082 	if (ret < 0)
1083 		goto out_put_heap_pool;
1084 
1085 	/* Heap pools are per-VM. We combine the VM and HEAP id to make
1086 	 * a unique heap handle.
1087 	 */
1088 	args->handle = (args->vm_id << 16) | ret;
1089 	ret = 0;
1090 
1091 out_put_heap_pool:
1092 	panthor_heap_pool_put(pool);
1093 
1094 out_put_vm:
1095 	panthor_vm_put(vm);
1096 	return ret;
1097 }
1098 
panthor_ioctl_tiler_heap_destroy(struct drm_device * ddev,void * data,struct drm_file * file)1099 static int panthor_ioctl_tiler_heap_destroy(struct drm_device *ddev, void *data,
1100 					    struct drm_file *file)
1101 {
1102 	struct panthor_file *pfile = file->driver_priv;
1103 	struct drm_panthor_tiler_heap_destroy *args = data;
1104 	struct panthor_heap_pool *pool;
1105 	struct panthor_vm *vm;
1106 	int ret;
1107 
1108 	if (args->pad)
1109 		return -EINVAL;
1110 
1111 	vm = panthor_vm_pool_get_vm(pfile->vms, args->handle >> 16);
1112 	if (!vm)
1113 		return -EINVAL;
1114 
1115 	pool = panthor_vm_get_heap_pool(vm, false);
1116 	if (IS_ERR(pool)) {
1117 		ret = PTR_ERR(pool);
1118 		goto out_put_vm;
1119 	}
1120 
1121 	ret = panthor_heap_destroy(pool, args->handle & GENMASK(15, 0));
1122 	panthor_heap_pool_put(pool);
1123 
1124 out_put_vm:
1125 	panthor_vm_put(vm);
1126 	return ret;
1127 }
1128 
panthor_ioctl_vm_bind_async(struct drm_device * ddev,struct drm_panthor_vm_bind * args,struct drm_file * file)1129 static int panthor_ioctl_vm_bind_async(struct drm_device *ddev,
1130 				       struct drm_panthor_vm_bind *args,
1131 				       struct drm_file *file)
1132 {
1133 	struct panthor_file *pfile = file->driver_priv;
1134 	struct drm_panthor_vm_bind_op *jobs_args;
1135 	struct panthor_submit_ctx ctx;
1136 	struct panthor_vm *vm;
1137 	int ret = 0;
1138 
1139 	vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id);
1140 	if (!vm)
1141 		return -EINVAL;
1142 
1143 	ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->ops);
1144 	if (ret)
1145 		goto out_put_vm;
1146 
1147 	ret = panthor_submit_ctx_init(&ctx, file, args->ops.count);
1148 	if (ret)
1149 		goto out_free_jobs_args;
1150 
1151 	for (u32 i = 0; i < args->ops.count; i++) {
1152 		struct drm_panthor_vm_bind_op *op = &jobs_args[i];
1153 		struct drm_sched_job *job;
1154 
1155 		job = panthor_vm_bind_job_create(file, vm, op);
1156 		if (IS_ERR(job)) {
1157 			ret = PTR_ERR(job);
1158 			goto out_cleanup_submit_ctx;
1159 		}
1160 
1161 		ret = panthor_submit_ctx_add_job(&ctx, i, job, &op->syncs);
1162 		if (ret)
1163 			goto out_cleanup_submit_ctx;
1164 	}
1165 
1166 	ret = panthor_submit_ctx_collect_jobs_signal_ops(&ctx);
1167 	if (ret)
1168 		goto out_cleanup_submit_ctx;
1169 
1170 	/* Prepare reservation objects for each VM_BIND job. */
1171 	drm_exec_until_all_locked(&ctx.exec) {
1172 		for (u32 i = 0; i < ctx.job_count; i++) {
1173 			ret = panthor_vm_bind_job_prepare_resvs(&ctx.exec, ctx.jobs[i].job);
1174 			drm_exec_retry_on_contention(&ctx.exec);
1175 			if (ret)
1176 				goto out_cleanup_submit_ctx;
1177 		}
1178 	}
1179 
1180 	ret = panthor_submit_ctx_add_deps_and_arm_jobs(&ctx);
1181 	if (ret)
1182 		goto out_cleanup_submit_ctx;
1183 
1184 	/* Nothing can fail after that point. */
1185 	panthor_submit_ctx_push_jobs(&ctx, panthor_vm_bind_job_update_resvs);
1186 
1187 out_cleanup_submit_ctx:
1188 	panthor_submit_ctx_cleanup(&ctx, panthor_vm_bind_job_put);
1189 
1190 out_free_jobs_args:
1191 	kvfree(jobs_args);
1192 
1193 out_put_vm:
1194 	panthor_vm_put(vm);
1195 	return ret;
1196 }
1197 
panthor_ioctl_vm_bind_sync(struct drm_device * ddev,struct drm_panthor_vm_bind * args,struct drm_file * file)1198 static int panthor_ioctl_vm_bind_sync(struct drm_device *ddev,
1199 				      struct drm_panthor_vm_bind *args,
1200 				      struct drm_file *file)
1201 {
1202 	struct panthor_file *pfile = file->driver_priv;
1203 	struct drm_panthor_vm_bind_op *jobs_args;
1204 	struct panthor_vm *vm;
1205 	int ret;
1206 
1207 	vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id);
1208 	if (!vm)
1209 		return -EINVAL;
1210 
1211 	ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->ops);
1212 	if (ret)
1213 		goto out_put_vm;
1214 
1215 	for (u32 i = 0; i < args->ops.count; i++) {
1216 		ret = panthor_vm_bind_exec_sync_op(file, vm, &jobs_args[i]);
1217 		if (ret) {
1218 			/* Update ops.count so the user knows where things failed. */
1219 			args->ops.count = i;
1220 			break;
1221 		}
1222 	}
1223 
1224 	kvfree(jobs_args);
1225 
1226 out_put_vm:
1227 	panthor_vm_put(vm);
1228 	return ret;
1229 }
1230 
1231 #define PANTHOR_VM_BIND_FLAGS DRM_PANTHOR_VM_BIND_ASYNC
1232 
panthor_ioctl_vm_bind(struct drm_device * ddev,void * data,struct drm_file * file)1233 static int panthor_ioctl_vm_bind(struct drm_device *ddev, void *data,
1234 				 struct drm_file *file)
1235 {
1236 	struct drm_panthor_vm_bind *args = data;
1237 	int cookie, ret;
1238 
1239 	if (!drm_dev_enter(ddev, &cookie))
1240 		return -ENODEV;
1241 
1242 	if (args->flags & DRM_PANTHOR_VM_BIND_ASYNC)
1243 		ret = panthor_ioctl_vm_bind_async(ddev, args, file);
1244 	else
1245 		ret = panthor_ioctl_vm_bind_sync(ddev, args, file);
1246 
1247 	drm_dev_exit(cookie);
1248 	return ret;
1249 }
1250 
panthor_ioctl_vm_get_state(struct drm_device * ddev,void * data,struct drm_file * file)1251 static int panthor_ioctl_vm_get_state(struct drm_device *ddev, void *data,
1252 				      struct drm_file *file)
1253 {
1254 	struct panthor_file *pfile = file->driver_priv;
1255 	struct drm_panthor_vm_get_state *args = data;
1256 	struct panthor_vm *vm;
1257 
1258 	vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id);
1259 	if (!vm)
1260 		return -EINVAL;
1261 
1262 	if (panthor_vm_is_unusable(vm))
1263 		args->state = DRM_PANTHOR_VM_STATE_UNUSABLE;
1264 	else
1265 		args->state = DRM_PANTHOR_VM_STATE_USABLE;
1266 
1267 	panthor_vm_put(vm);
1268 	return 0;
1269 }
1270 
1271 static int
panthor_open(struct drm_device * ddev,struct drm_file * file)1272 panthor_open(struct drm_device *ddev, struct drm_file *file)
1273 {
1274 	struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
1275 	struct panthor_file *pfile;
1276 	int ret;
1277 
1278 	if (!try_module_get(THIS_MODULE))
1279 		return -EINVAL;
1280 
1281 	pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
1282 	if (!pfile) {
1283 		ret = -ENOMEM;
1284 		goto err_put_mod;
1285 	}
1286 
1287 	pfile->ptdev = ptdev;
1288 
1289 	ret = panthor_vm_pool_create(pfile);
1290 	if (ret)
1291 		goto err_free_file;
1292 
1293 	ret = panthor_group_pool_create(pfile);
1294 	if (ret)
1295 		goto err_destroy_vm_pool;
1296 
1297 	file->driver_priv = pfile;
1298 	return 0;
1299 
1300 err_destroy_vm_pool:
1301 	panthor_vm_pool_destroy(pfile);
1302 
1303 err_free_file:
1304 	kfree(pfile);
1305 
1306 err_put_mod:
1307 	module_put(THIS_MODULE);
1308 	return ret;
1309 }
1310 
1311 static void
panthor_postclose(struct drm_device * ddev,struct drm_file * file)1312 panthor_postclose(struct drm_device *ddev, struct drm_file *file)
1313 {
1314 	struct panthor_file *pfile = file->driver_priv;
1315 
1316 	panthor_group_pool_destroy(pfile);
1317 	panthor_vm_pool_destroy(pfile);
1318 
1319 	kfree(pfile);
1320 	module_put(THIS_MODULE);
1321 }
1322 
1323 static const struct drm_ioctl_desc panthor_drm_driver_ioctls[] = {
1324 #define PANTHOR_IOCTL(n, func, flags) \
1325 	DRM_IOCTL_DEF_DRV(PANTHOR_##n, panthor_ioctl_##func, flags)
1326 
1327 	PANTHOR_IOCTL(DEV_QUERY, dev_query, DRM_RENDER_ALLOW),
1328 	PANTHOR_IOCTL(VM_CREATE, vm_create, DRM_RENDER_ALLOW),
1329 	PANTHOR_IOCTL(VM_DESTROY, vm_destroy, DRM_RENDER_ALLOW),
1330 	PANTHOR_IOCTL(VM_BIND, vm_bind, DRM_RENDER_ALLOW),
1331 	PANTHOR_IOCTL(VM_GET_STATE, vm_get_state, DRM_RENDER_ALLOW),
1332 	PANTHOR_IOCTL(BO_CREATE, bo_create, DRM_RENDER_ALLOW),
1333 	PANTHOR_IOCTL(BO_MMAP_OFFSET, bo_mmap_offset, DRM_RENDER_ALLOW),
1334 	PANTHOR_IOCTL(GROUP_CREATE, group_create, DRM_RENDER_ALLOW),
1335 	PANTHOR_IOCTL(GROUP_DESTROY, group_destroy, DRM_RENDER_ALLOW),
1336 	PANTHOR_IOCTL(GROUP_GET_STATE, group_get_state, DRM_RENDER_ALLOW),
1337 	PANTHOR_IOCTL(TILER_HEAP_CREATE, tiler_heap_create, DRM_RENDER_ALLOW),
1338 	PANTHOR_IOCTL(TILER_HEAP_DESTROY, tiler_heap_destroy, DRM_RENDER_ALLOW),
1339 	PANTHOR_IOCTL(GROUP_SUBMIT, group_submit, DRM_RENDER_ALLOW),
1340 };
1341 
panthor_mmap(struct file * filp,struct vm_area_struct * vma)1342 static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
1343 {
1344 	struct drm_file *file = filp->private_data;
1345 	struct panthor_file *pfile = file->driver_priv;
1346 	struct panthor_device *ptdev = pfile->ptdev;
1347 	u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
1348 	int ret, cookie;
1349 
1350 	if (!drm_dev_enter(file->minor->dev, &cookie))
1351 		return -ENODEV;
1352 
1353 #ifdef CONFIG_ARM64
1354 	/*
1355 	 * With 32-bit systems being limited by the 32-bit representation of
1356 	 * mmap2's pgoffset field, we need to make the MMIO offset arch
1357 	 * specific. This converts a user MMIO offset into something the kernel
1358 	 * driver understands.
1359 	 */
1360 	if (test_tsk_thread_flag(current, TIF_32BIT) &&
1361 	    offset >= DRM_PANTHOR_USER_MMIO_OFFSET_32BIT) {
1362 		offset += DRM_PANTHOR_USER_MMIO_OFFSET_64BIT -
1363 			  DRM_PANTHOR_USER_MMIO_OFFSET_32BIT;
1364 		vma->vm_pgoff = offset >> PAGE_SHIFT;
1365 	}
1366 #endif
1367 
1368 	if (offset >= DRM_PANTHOR_USER_MMIO_OFFSET)
1369 		ret = panthor_device_mmap_io(ptdev, vma);
1370 	else
1371 		ret = drm_gem_mmap(filp, vma);
1372 
1373 	drm_dev_exit(cookie);
1374 	return ret;
1375 }
1376 
1377 static const struct file_operations panthor_drm_driver_fops = {
1378 	.open = drm_open,
1379 	.release = drm_release,
1380 	.unlocked_ioctl = drm_ioctl,
1381 	.compat_ioctl = drm_compat_ioctl,
1382 	.poll = drm_poll,
1383 	.read = drm_read,
1384 	.llseek = noop_llseek,
1385 	.mmap = panthor_mmap,
1386 	.fop_flags = FOP_UNSIGNED_OFFSET,
1387 };
1388 
1389 #ifdef CONFIG_DEBUG_FS
panthor_debugfs_init(struct drm_minor * minor)1390 static void panthor_debugfs_init(struct drm_minor *minor)
1391 {
1392 	panthor_mmu_debugfs_init(minor);
1393 }
1394 #endif
1395 
1396 /*
1397  * PanCSF driver version:
1398  * - 1.0 - initial interface
1399  */
1400 static const struct drm_driver panthor_drm_driver = {
1401 	.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ |
1402 			   DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA,
1403 	.open = panthor_open,
1404 	.postclose = panthor_postclose,
1405 	.ioctls = panthor_drm_driver_ioctls,
1406 	.num_ioctls = ARRAY_SIZE(panthor_drm_driver_ioctls),
1407 	.fops = &panthor_drm_driver_fops,
1408 	.name = "panthor",
1409 	.desc = "Panthor DRM driver",
1410 	.date = "20230801",
1411 	.major = 1,
1412 	.minor = 0,
1413 
1414 	.gem_create_object = panthor_gem_create_object,
1415 	.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
1416 #ifdef CONFIG_DEBUG_FS
1417 	.debugfs_init = panthor_debugfs_init,
1418 #endif
1419 };
1420 
panthor_probe(struct platform_device * pdev)1421 static int panthor_probe(struct platform_device *pdev)
1422 {
1423 	struct panthor_device *ptdev;
1424 
1425 	ptdev = devm_drm_dev_alloc(&pdev->dev, &panthor_drm_driver,
1426 				   struct panthor_device, base);
1427 	if (IS_ERR(ptdev))
1428 		return -ENOMEM;
1429 
1430 	platform_set_drvdata(pdev, ptdev);
1431 
1432 	return panthor_device_init(ptdev);
1433 }
1434 
panthor_remove(struct platform_device * pdev)1435 static void panthor_remove(struct platform_device *pdev)
1436 {
1437 	struct panthor_device *ptdev = platform_get_drvdata(pdev);
1438 
1439 	panthor_device_unplug(ptdev);
1440 }
1441 
1442 static const struct of_device_id dt_match[] = {
1443 	{ .compatible = "rockchip,rk3588-mali" },
1444 	{ .compatible = "arm,mali-valhall-csf" },
1445 	{}
1446 };
1447 MODULE_DEVICE_TABLE(of, dt_match);
1448 
1449 static DEFINE_RUNTIME_DEV_PM_OPS(panthor_pm_ops,
1450 				 panthor_device_suspend,
1451 				 panthor_device_resume,
1452 				 NULL);
1453 
1454 static struct platform_driver panthor_driver = {
1455 	.probe = panthor_probe,
1456 	.remove_new = panthor_remove,
1457 	.driver = {
1458 		.name = "panthor",
1459 		.pm = pm_ptr(&panthor_pm_ops),
1460 		.of_match_table = dt_match,
1461 	},
1462 };
1463 
1464 /*
1465  * Workqueue used to cleanup stuff.
1466  *
1467  * We create a dedicated workqueue so we can drain on unplug and
1468  * make sure all resources are freed before the module is unloaded.
1469  */
1470 struct workqueue_struct *panthor_cleanup_wq;
1471 
panthor_init(void)1472 static int __init panthor_init(void)
1473 {
1474 	int ret;
1475 
1476 	ret = panthor_mmu_pt_cache_init();
1477 	if (ret)
1478 		return ret;
1479 
1480 	panthor_cleanup_wq = alloc_workqueue("panthor-cleanup", WQ_UNBOUND, 0);
1481 	if (!panthor_cleanup_wq) {
1482 		pr_err("panthor: Failed to allocate the workqueues");
1483 		ret = -ENOMEM;
1484 		goto err_mmu_pt_cache_fini;
1485 	}
1486 
1487 	ret = platform_driver_register(&panthor_driver);
1488 	if (ret)
1489 		goto err_destroy_cleanup_wq;
1490 
1491 	return 0;
1492 
1493 err_destroy_cleanup_wq:
1494 	destroy_workqueue(panthor_cleanup_wq);
1495 
1496 err_mmu_pt_cache_fini:
1497 	panthor_mmu_pt_cache_fini();
1498 	return ret;
1499 }
1500 module_init(panthor_init);
1501 
panthor_exit(void)1502 static void __exit panthor_exit(void)
1503 {
1504 	platform_driver_unregister(&panthor_driver);
1505 	destroy_workqueue(panthor_cleanup_wq);
1506 	panthor_mmu_pt_cache_fini();
1507 }
1508 module_exit(panthor_exit);
1509 
1510 MODULE_AUTHOR("Panthor Project Developers");
1511 MODULE_DESCRIPTION("Panthor DRM Driver");
1512 MODULE_LICENSE("Dual MIT/GPL");
1513