xref: /linux/include/uapi/drm/panthor_drm.h (revision 5f776204)
1 /* SPDX-License-Identifier: MIT */
2 /* Copyright (C) 2023 Collabora ltd. */
3 #ifndef _PANTHOR_DRM_H_
4 #define _PANTHOR_DRM_H_
5 
6 #include "drm.h"
7 
8 #if defined(__cplusplus)
9 extern "C" {
10 #endif
11 
12 /**
13  * DOC: Introduction
14  *
15  * This documentation describes the Panthor IOCTLs.
16  *
17  * Just a few generic rules about the data passed to the Panthor IOCTLs:
18  *
19  * - Structures must be aligned on 64-bit/8-byte. If the object is not
20  *   naturally aligned, a padding field must be added.
21  * - Fields must be explicitly aligned to their natural type alignment with
22  *   pad[0..N] fields.
23  * - All padding fields will be checked by the driver to make sure they are
24  *   zeroed.
25  * - Flags can be added, but not removed/replaced.
26  * - New fields can be added to the main structures (the structures
27  *   directly passed to the ioctl). Those fields can be added at the end of
28  *   the structure, or replace existing padding fields. Any new field being
29  *   added must preserve the behavior that existed before those fields were
30  *   added when a value of zero is passed.
31  * - New fields can be added to indirect objects (objects pointed by the
32  *   main structure), iff those objects are passed a size to reflect the
33  *   size known by the userspace driver (see drm_panthor_obj_array::stride
34  *   or drm_panthor_dev_query::size).
35  * - If the kernel driver is too old to know some fields, those will be
36  *   ignored if zero, and otherwise rejected (and so will be zero on output).
37  * - If userspace is too old to know some fields, those will be zeroed
38  *   (input) before the structure is parsed by the kernel driver.
39  * - Each new flag/field addition must come with a driver version update so
40  *   the userspace driver doesn't have to trial and error to know which
41  *   flags are supported.
42  * - Structures should not contain unions, as this would defeat the
43  *   extensibility of such structures.
44  * - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed
45  *   at the end of the drm_panthor_ioctl_id enum.
46  */
47 
48 /**
49  * DOC: MMIO regions exposed to userspace.
50  *
51  * .. c:macro:: DRM_PANTHOR_USER_MMIO_OFFSET
52  *
53  * File offset for all MMIO regions being exposed to userspace. Don't use
54  * this value directly, use DRM_PANTHOR_USER_<name>_OFFSET values instead.
55  * pgoffset passed to mmap2() is an unsigned long, which forces us to use a
56  * different offset on 32-bit and 64-bit systems.
57  *
58  * .. c:macro:: DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET
59  *
60  * File offset for the LATEST_FLUSH_ID register. The Userspace driver controls
61  * GPU cache flushing through CS instructions, but the flush reduction
62  * mechanism requires a flush_id. This flush_id could be queried with an
63  * ioctl, but Arm provides a well-isolated register page containing only this
64  * read-only register, so let's expose this page through a static mmap offset
65  * and allow direct mapping of this MMIO region so we can avoid the
66  * user <-> kernel round-trip.
67  */
68 #define DRM_PANTHOR_USER_MMIO_OFFSET_32BIT	(1ull << 43)
69 #define DRM_PANTHOR_USER_MMIO_OFFSET_64BIT	(1ull << 56)
70 #define DRM_PANTHOR_USER_MMIO_OFFSET		(sizeof(unsigned long) < 8 ? \
71 						 DRM_PANTHOR_USER_MMIO_OFFSET_32BIT : \
72 						 DRM_PANTHOR_USER_MMIO_OFFSET_64BIT)
73 #define DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET	(DRM_PANTHOR_USER_MMIO_OFFSET | 0)
74 
75 /**
76  * DOC: IOCTL IDs
77  *
78  * enum drm_panthor_ioctl_id - IOCTL IDs
79  *
80  * Place new ioctls at the end, don't re-order, don't replace or remove entries.
81  *
82  * These IDs are not meant to be used directly. Use the DRM_IOCTL_PANTHOR_xxx
83  * definitions instead.
84  */
85 enum drm_panthor_ioctl_id {
86 	/** @DRM_PANTHOR_DEV_QUERY: Query device information. */
87 	DRM_PANTHOR_DEV_QUERY = 0,
88 
89 	/** @DRM_PANTHOR_VM_CREATE: Create a VM. */
90 	DRM_PANTHOR_VM_CREATE,
91 
92 	/** @DRM_PANTHOR_VM_DESTROY: Destroy a VM. */
93 	DRM_PANTHOR_VM_DESTROY,
94 
95 	/** @DRM_PANTHOR_VM_BIND: Bind/unbind memory to a VM. */
96 	DRM_PANTHOR_VM_BIND,
97 
98 	/** @DRM_PANTHOR_VM_GET_STATE: Get VM state. */
99 	DRM_PANTHOR_VM_GET_STATE,
100 
101 	/** @DRM_PANTHOR_BO_CREATE: Create a buffer object. */
102 	DRM_PANTHOR_BO_CREATE,
103 
104 	/**
105 	 * @DRM_PANTHOR_BO_MMAP_OFFSET: Get the file offset to pass to
106 	 * mmap to map a GEM object.
107 	 */
108 	DRM_PANTHOR_BO_MMAP_OFFSET,
109 
110 	/** @DRM_PANTHOR_GROUP_CREATE: Create a scheduling group. */
111 	DRM_PANTHOR_GROUP_CREATE,
112 
113 	/** @DRM_PANTHOR_GROUP_DESTROY: Destroy a scheduling group. */
114 	DRM_PANTHOR_GROUP_DESTROY,
115 
116 	/**
117 	 * @DRM_PANTHOR_GROUP_SUBMIT: Submit jobs to queues belonging
118 	 * to a specific scheduling group.
119 	 */
120 	DRM_PANTHOR_GROUP_SUBMIT,
121 
122 	/** @DRM_PANTHOR_GROUP_GET_STATE: Get the state of a scheduling group. */
123 	DRM_PANTHOR_GROUP_GET_STATE,
124 
125 	/** @DRM_PANTHOR_TILER_HEAP_CREATE: Create a tiler heap. */
126 	DRM_PANTHOR_TILER_HEAP_CREATE,
127 
128 	/** @DRM_PANTHOR_TILER_HEAP_DESTROY: Destroy a tiler heap. */
129 	DRM_PANTHOR_TILER_HEAP_DESTROY,
130 };
131 
132 /**
133  * DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number
134  * @__access: Access type. Must be R, W or RW.
135  * @__id: One of the DRM_PANTHOR_xxx id.
136  * @__type: Suffix of the type being passed to the IOCTL.
137  *
138  * Don't use this macro directly, use the DRM_IOCTL_PANTHOR_xxx
139  * values instead.
140  *
141  * Return: An IOCTL number to be passed to ioctl() from userspace.
142  */
143 #define DRM_IOCTL_PANTHOR(__access, __id, __type) \
144 	DRM_IO ## __access(DRM_COMMAND_BASE + DRM_PANTHOR_ ## __id, \
145 			   struct drm_panthor_ ## __type)
146 
147 #define DRM_IOCTL_PANTHOR_DEV_QUERY \
148 	DRM_IOCTL_PANTHOR(WR, DEV_QUERY, dev_query)
149 #define DRM_IOCTL_PANTHOR_VM_CREATE \
150 	DRM_IOCTL_PANTHOR(WR, VM_CREATE, vm_create)
151 #define DRM_IOCTL_PANTHOR_VM_DESTROY \
152 	DRM_IOCTL_PANTHOR(WR, VM_DESTROY, vm_destroy)
153 #define DRM_IOCTL_PANTHOR_VM_BIND \
154 	DRM_IOCTL_PANTHOR(WR, VM_BIND, vm_bind)
155 #define DRM_IOCTL_PANTHOR_VM_GET_STATE \
156 	DRM_IOCTL_PANTHOR(WR, VM_GET_STATE, vm_get_state)
157 #define DRM_IOCTL_PANTHOR_BO_CREATE \
158 	DRM_IOCTL_PANTHOR(WR, BO_CREATE, bo_create)
159 #define DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET \
160 	DRM_IOCTL_PANTHOR(WR, BO_MMAP_OFFSET, bo_mmap_offset)
161 #define DRM_IOCTL_PANTHOR_GROUP_CREATE \
162 	DRM_IOCTL_PANTHOR(WR, GROUP_CREATE, group_create)
163 #define DRM_IOCTL_PANTHOR_GROUP_DESTROY \
164 	DRM_IOCTL_PANTHOR(WR, GROUP_DESTROY, group_destroy)
165 #define DRM_IOCTL_PANTHOR_GROUP_SUBMIT \
166 	DRM_IOCTL_PANTHOR(WR, GROUP_SUBMIT, group_submit)
167 #define DRM_IOCTL_PANTHOR_GROUP_GET_STATE \
168 	DRM_IOCTL_PANTHOR(WR, GROUP_GET_STATE, group_get_state)
169 #define DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE \
170 	DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create)
171 #define DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY \
172 	DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy)
173 
174 /**
175  * DOC: IOCTL arguments
176  */
177 
178 /**
179  * struct drm_panthor_obj_array - Object array.
180  *
181  * This object is used to pass an array of objects whose size is subject to changes in
182  * future versions of the driver. In order to support this mutability, we pass a stride
183  * describing the size of the object as known by userspace.
184  *
185  * You shouldn't fill drm_panthor_obj_array fields directly. You should instead use
186  * the DRM_PANTHOR_OBJ_ARRAY() macro that takes care of initializing the stride to
187  * the object size.
188  */
189 struct drm_panthor_obj_array {
190 	/** @stride: Stride of object struct. Used for versioning. */
191 	__u32 stride;
192 
193 	/** @count: Number of objects in the array. */
194 	__u32 count;
195 
196 	/** @array: User pointer to an array of objects. */
197 	__u64 array;
198 };
199 
200 /**
201  * DRM_PANTHOR_OBJ_ARRAY() - Initialize a drm_panthor_obj_array field.
202  * @cnt: Number of elements in the array.
203  * @ptr: Pointer to the array to pass to the kernel.
204  *
205  * Macro initializing a drm_panthor_obj_array based on the object size as known
206  * by userspace.
207  */
208 #define DRM_PANTHOR_OBJ_ARRAY(cnt, ptr) \
209 	{ .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) }
210 
211 /**
212  * enum drm_panthor_sync_op_flags - Synchronization operation flags.
213  */
214 enum drm_panthor_sync_op_flags {
215 	/** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK: Synchronization handle type mask. */
216 	DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK = 0xff,
217 
218 	/** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ: Synchronization object type. */
219 	DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ = 0,
220 
221 	/**
222 	 * @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ: Timeline synchronization
223 	 * object type.
224 	 */
225 	DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ = 1,
226 
227 	/** @DRM_PANTHOR_SYNC_OP_WAIT: Wait operation. */
228 	DRM_PANTHOR_SYNC_OP_WAIT = 0 << 31,
229 
230 	/** @DRM_PANTHOR_SYNC_OP_SIGNAL: Signal operation. */
231 	DRM_PANTHOR_SYNC_OP_SIGNAL = (int)(1u << 31),
232 };
233 
234 /**
235  * struct drm_panthor_sync_op - Synchronization operation.
236  */
237 struct drm_panthor_sync_op {
238 	/** @flags: Synchronization operation flags. Combination of DRM_PANTHOR_SYNC_OP values. */
239 	__u32 flags;
240 
241 	/** @handle: Sync handle. */
242 	__u32 handle;
243 
244 	/**
245 	 * @timeline_value: MBZ if
246 	 * (flags & DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK) !=
247 	 * DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ.
248 	 */
249 	__u64 timeline_value;
250 };
251 
252 /**
253  * enum drm_panthor_dev_query_type - Query type
254  *
255  * Place new types at the end, don't re-order, don't remove or replace.
256  */
257 enum drm_panthor_dev_query_type {
258 	/** @DRM_PANTHOR_DEV_QUERY_GPU_INFO: Query GPU information. */
259 	DRM_PANTHOR_DEV_QUERY_GPU_INFO = 0,
260 
261 	/** @DRM_PANTHOR_DEV_QUERY_CSIF_INFO: Query command-stream interface information. */
262 	DRM_PANTHOR_DEV_QUERY_CSIF_INFO,
263 };
264 
265 /**
266  * struct drm_panthor_gpu_info - GPU information
267  *
268  * Structure grouping all queryable information relating to the GPU.
269  */
270 struct drm_panthor_gpu_info {
271 	/** @gpu_id : GPU ID. */
272 	__u32 gpu_id;
273 #define DRM_PANTHOR_ARCH_MAJOR(x)		((x) >> 28)
274 #define DRM_PANTHOR_ARCH_MINOR(x)		(((x) >> 24) & 0xf)
275 #define DRM_PANTHOR_ARCH_REV(x)			(((x) >> 20) & 0xf)
276 #define DRM_PANTHOR_PRODUCT_MAJOR(x)		(((x) >> 16) & 0xf)
277 #define DRM_PANTHOR_VERSION_MAJOR(x)		(((x) >> 12) & 0xf)
278 #define DRM_PANTHOR_VERSION_MINOR(x)		(((x) >> 4) & 0xff)
279 #define DRM_PANTHOR_VERSION_STATUS(x)		((x) & 0xf)
280 
281 	/** @gpu_rev: GPU revision. */
282 	__u32 gpu_rev;
283 
284 	/** @csf_id: Command stream frontend ID. */
285 	__u32 csf_id;
286 #define DRM_PANTHOR_CSHW_MAJOR(x)		(((x) >> 26) & 0x3f)
287 #define DRM_PANTHOR_CSHW_MINOR(x)		(((x) >> 20) & 0x3f)
288 #define DRM_PANTHOR_CSHW_REV(x)			(((x) >> 16) & 0xf)
289 #define DRM_PANTHOR_MCU_MAJOR(x)		(((x) >> 10) & 0x3f)
290 #define DRM_PANTHOR_MCU_MINOR(x)		(((x) >> 4) & 0x3f)
291 #define DRM_PANTHOR_MCU_REV(x)			((x) & 0xf)
292 
293 	/** @l2_features: L2-cache features. */
294 	__u32 l2_features;
295 
296 	/** @tiler_features: Tiler features. */
297 	__u32 tiler_features;
298 
299 	/** @mem_features: Memory features. */
300 	__u32 mem_features;
301 
302 	/** @mmu_features: MMU features. */
303 	__u32 mmu_features;
304 #define DRM_PANTHOR_MMU_VA_BITS(x)		((x) & 0xff)
305 
306 	/** @thread_features: Thread features. */
307 	__u32 thread_features;
308 
309 	/** @max_threads: Maximum number of threads. */
310 	__u32 max_threads;
311 
312 	/** @thread_max_workgroup_size: Maximum workgroup size. */
313 	__u32 thread_max_workgroup_size;
314 
315 	/**
316 	 * @thread_max_barrier_size: Maximum number of threads that can wait
317 	 * simultaneously on a barrier.
318 	 */
319 	__u32 thread_max_barrier_size;
320 
321 	/** @coherency_features: Coherency features. */
322 	__u32 coherency_features;
323 
324 	/** @texture_features: Texture features. */
325 	__u32 texture_features[4];
326 
327 	/** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */
328 	__u32 as_present;
329 
330 	/** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */
331 	__u64 shader_present;
332 
333 	/** @l2_present: Bitmask encoding the L2 caches exposed by the GPU. */
334 	__u64 l2_present;
335 
336 	/** @tiler_present: Bitmask encoding the tiler units exposed by the GPU. */
337 	__u64 tiler_present;
338 
339 	/** @core_features: Used to discriminate core variants when they exist. */
340 	__u32 core_features;
341 
342 	/** @pad: MBZ. */
343 	__u32 pad;
344 };
345 
346 /**
347  * struct drm_panthor_csif_info - Command stream interface information
348  *
349  * Structure grouping all queryable information relating to the command stream interface.
350  */
351 struct drm_panthor_csif_info {
352 	/** @csg_slot_count: Number of command stream group slots exposed by the firmware. */
353 	__u32 csg_slot_count;
354 
355 	/** @cs_slot_count: Number of command stream slots per group. */
356 	__u32 cs_slot_count;
357 
358 	/** @cs_reg_count: Number of command stream registers. */
359 	__u32 cs_reg_count;
360 
361 	/** @scoreboard_slot_count: Number of scoreboard slots. */
362 	__u32 scoreboard_slot_count;
363 
364 	/**
365 	 * @unpreserved_cs_reg_count: Number of command stream registers reserved by
366 	 * the kernel driver to call a userspace command stream.
367 	 *
368 	 * All registers can be used by a userspace command stream, but the
369 	 * [cs_slot_count - unpreserved_cs_reg_count .. cs_slot_count] registers are
370 	 * used by the kernel when DRM_PANTHOR_IOCTL_GROUP_SUBMIT is called.
371 	 */
372 	__u32 unpreserved_cs_reg_count;
373 
374 	/**
375 	 * @pad: Padding field, set to zero.
376 	 */
377 	__u32 pad;
378 };
379 
380 /**
381  * struct drm_panthor_dev_query - Arguments passed to DRM_PANTHOR_IOCTL_DEV_QUERY
382  */
383 struct drm_panthor_dev_query {
384 	/** @type: the query type (see drm_panthor_dev_query_type). */
385 	__u32 type;
386 
387 	/**
388 	 * @size: size of the type being queried.
389 	 *
390 	 * If pointer is NULL, size is updated by the driver to provide the
391 	 * output structure size. If pointer is not NULL, the driver will
392 	 * only copy min(size, actual_structure_size) bytes to the pointer,
393 	 * and update the size accordingly. This allows us to extend query
394 	 * types without breaking userspace.
395 	 */
396 	__u32 size;
397 
398 	/**
399 	 * @pointer: user pointer to a query type struct.
400 	 *
401 	 * Pointer can be NULL, in which case, nothing is copied, but the
402 	 * actual structure size is returned. If not NULL, it must point to
403 	 * a location that's large enough to hold size bytes.
404 	 */
405 	__u64 pointer;
406 };
407 
408 /**
409  * struct drm_panthor_vm_create - Arguments passed to DRM_PANTHOR_IOCTL_VM_CREATE
410  */
411 struct drm_panthor_vm_create {
412 	/** @flags: VM flags, MBZ. */
413 	__u32 flags;
414 
415 	/** @id: Returned VM ID. */
416 	__u32 id;
417 
418 	/**
419 	 * @user_va_range: Size of the VA space reserved for user objects.
420 	 *
421 	 * The kernel will pick the remaining space to map kernel-only objects to the
422 	 * VM (heap chunks, heap context, ring buffers, kernel synchronization objects,
423 	 * ...). If the space left for kernel objects is too small, kernel object
424 	 * allocation will fail further down the road. One can use
425 	 * drm_panthor_gpu_info::mmu_features to extract the total virtual address
426 	 * range, and chose a user_va_range that leaves some space to the kernel.
427 	 *
428 	 * If user_va_range is zero, the kernel will pick a sensible value based on
429 	 * TASK_SIZE and the virtual range supported by the GPU MMU (the kernel/user
430 	 * split should leave enough VA space for userspace processes to support SVM,
431 	 * while still allowing the kernel to map some amount of kernel objects in
432 	 * the kernel VA range). The value chosen by the driver will be returned in
433 	 * @user_va_range.
434 	 *
435 	 * User VA space always starts at 0x0, kernel VA space is always placed after
436 	 * the user VA range.
437 	 */
438 	__u64 user_va_range;
439 };
440 
441 /**
442  * struct drm_panthor_vm_destroy - Arguments passed to DRM_PANTHOR_IOCTL_VM_DESTROY
443  */
444 struct drm_panthor_vm_destroy {
445 	/** @id: ID of the VM to destroy. */
446 	__u32 id;
447 
448 	/** @pad: MBZ. */
449 	__u32 pad;
450 };
451 
452 /**
453  * enum drm_panthor_vm_bind_op_flags - VM bind operation flags
454  */
455 enum drm_panthor_vm_bind_op_flags {
456 	/**
457 	 * @DRM_PANTHOR_VM_BIND_OP_MAP_READONLY: Map the memory read-only.
458 	 *
459 	 * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
460 	 */
461 	DRM_PANTHOR_VM_BIND_OP_MAP_READONLY = 1 << 0,
462 
463 	/**
464 	 * @DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC: Map the memory not-executable.
465 	 *
466 	 * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
467 	 */
468 	DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC = 1 << 1,
469 
470 	/**
471 	 * @DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED: Map the memory uncached.
472 	 *
473 	 * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
474 	 */
475 	DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED = 1 << 2,
476 
477 	/**
478 	 * @DRM_PANTHOR_VM_BIND_OP_TYPE_MASK: Mask used to determine the type of operation.
479 	 */
480 	DRM_PANTHOR_VM_BIND_OP_TYPE_MASK = (int)(0xfu << 28),
481 
482 	/** @DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: Map operation. */
483 	DRM_PANTHOR_VM_BIND_OP_TYPE_MAP = 0 << 28,
484 
485 	/** @DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: Unmap operation. */
486 	DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP = 1 << 28,
487 
488 	/**
489 	 * @DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY: No VM operation.
490 	 *
491 	 * Just serves as a synchronization point on a VM queue.
492 	 *
493 	 * Only valid if %DRM_PANTHOR_VM_BIND_ASYNC is set in drm_panthor_vm_bind::flags,
494 	 * and drm_panthor_vm_bind_op::syncs contains at least one element.
495 	 */
496 	DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY = 2 << 28,
497 };
498 
499 /**
500  * struct drm_panthor_vm_bind_op - VM bind operation
501  */
502 struct drm_panthor_vm_bind_op {
503 	/** @flags: Combination of drm_panthor_vm_bind_op_flags flags. */
504 	__u32 flags;
505 
506 	/**
507 	 * @bo_handle: Handle of the buffer object to map.
508 	 * MBZ for unmap or sync-only operations.
509 	 */
510 	__u32 bo_handle;
511 
512 	/**
513 	 * @bo_offset: Buffer object offset.
514 	 * MBZ for unmap or sync-only operations.
515 	 */
516 	__u64 bo_offset;
517 
518 	/**
519 	 * @va: Virtual address to map/unmap.
520 	 * MBZ for sync-only operations.
521 	 */
522 	__u64 va;
523 
524 	/**
525 	 * @size: Size to map/unmap.
526 	 * MBZ for sync-only operations.
527 	 */
528 	__u64 size;
529 
530 	/**
531 	 * @syncs: Array of struct drm_panthor_sync_op synchronization
532 	 * operations.
533 	 *
534 	 * This array must be empty if %DRM_PANTHOR_VM_BIND_ASYNC is not set on
535 	 * the drm_panthor_vm_bind object containing this VM bind operation.
536 	 *
537 	 * This array shall not be empty for sync-only operations.
538 	 */
539 	struct drm_panthor_obj_array syncs;
540 
541 };
542 
543 /**
544  * enum drm_panthor_vm_bind_flags - VM bind flags
545  */
546 enum drm_panthor_vm_bind_flags {
547 	/**
548 	 * @DRM_PANTHOR_VM_BIND_ASYNC: VM bind operations are queued to the VM
549 	 * queue instead of being executed synchronously.
550 	 */
551 	DRM_PANTHOR_VM_BIND_ASYNC = 1 << 0,
552 };
553 
554 /**
555  * struct drm_panthor_vm_bind - Arguments passed to DRM_IOCTL_PANTHOR_VM_BIND
556  */
557 struct drm_panthor_vm_bind {
558 	/** @vm_id: VM targeted by the bind request. */
559 	__u32 vm_id;
560 
561 	/** @flags: Combination of drm_panthor_vm_bind_flags flags. */
562 	__u32 flags;
563 
564 	/** @ops: Array of struct drm_panthor_vm_bind_op bind operations. */
565 	struct drm_panthor_obj_array ops;
566 };
567 
568 /**
569  * enum drm_panthor_vm_state - VM states.
570  */
571 enum drm_panthor_vm_state {
572 	/**
573 	 * @DRM_PANTHOR_VM_STATE_USABLE: VM is usable.
574 	 *
575 	 * New VM operations will be accepted on this VM.
576 	 */
577 	DRM_PANTHOR_VM_STATE_USABLE,
578 
579 	/**
580 	 * @DRM_PANTHOR_VM_STATE_UNUSABLE: VM is unusable.
581 	 *
582 	 * Something put the VM in an unusable state (like an asynchronous
583 	 * VM_BIND request failing for any reason).
584 	 *
585 	 * Once the VM is in this state, all new MAP operations will be
586 	 * rejected, and any GPU job targeting this VM will fail.
587 	 * UNMAP operations are still accepted.
588 	 *
589 	 * The only way to recover from an unusable VM is to create a new
590 	 * VM, and destroy the old one.
591 	 */
592 	DRM_PANTHOR_VM_STATE_UNUSABLE,
593 };
594 
595 /**
596  * struct drm_panthor_vm_get_state - Get VM state.
597  */
598 struct drm_panthor_vm_get_state {
599 	/** @vm_id: VM targeted by the get_state request. */
600 	__u32 vm_id;
601 
602 	/**
603 	 * @state: state returned by the driver.
604 	 *
605 	 * Must be one of the enum drm_panthor_vm_state values.
606 	 */
607 	__u32 state;
608 };
609 
610 /**
611  * enum drm_panthor_bo_flags - Buffer object flags, passed at creation time.
612  */
613 enum drm_panthor_bo_flags {
614 	/** @DRM_PANTHOR_BO_NO_MMAP: The buffer object will never be CPU-mapped in userspace. */
615 	DRM_PANTHOR_BO_NO_MMAP = (1 << 0),
616 };
617 
618 /**
619  * struct drm_panthor_bo_create - Arguments passed to DRM_IOCTL_PANTHOR_BO_CREATE.
620  */
621 struct drm_panthor_bo_create {
622 	/**
623 	 * @size: Requested size for the object
624 	 *
625 	 * The (page-aligned) allocated size for the object will be returned.
626 	 */
627 	__u64 size;
628 
629 	/**
630 	 * @flags: Flags. Must be a combination of drm_panthor_bo_flags flags.
631 	 */
632 	__u32 flags;
633 
634 	/**
635 	 * @exclusive_vm_id: Exclusive VM this buffer object will be mapped to.
636 	 *
637 	 * If not zero, the field must refer to a valid VM ID, and implies that:
638 	 *  - the buffer object will only ever be bound to that VM
639 	 *  - cannot be exported as a PRIME fd
640 	 */
641 	__u32 exclusive_vm_id;
642 
643 	/**
644 	 * @handle: Returned handle for the object.
645 	 *
646 	 * Object handles are nonzero.
647 	 */
648 	__u32 handle;
649 
650 	/** @pad: MBZ. */
651 	__u32 pad;
652 };
653 
654 /**
655  * struct drm_panthor_bo_mmap_offset - Arguments passed to DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET.
656  */
657 struct drm_panthor_bo_mmap_offset {
658 	/** @handle: Handle of the object we want an mmap offset for. */
659 	__u32 handle;
660 
661 	/** @pad: MBZ. */
662 	__u32 pad;
663 
664 	/** @offset: The fake offset to use for subsequent mmap calls. */
665 	__u64 offset;
666 };
667 
668 /**
669  * struct drm_panthor_queue_create - Queue creation arguments.
670  */
671 struct drm_panthor_queue_create {
672 	/**
673 	 * @priority: Defines the priority of queues inside a group. Goes from 0 to 15,
674 	 * 15 being the highest priority.
675 	 */
676 	__u8 priority;
677 
678 	/** @pad: Padding fields, MBZ. */
679 	__u8 pad[3];
680 
681 	/** @ringbuf_size: Size of the ring buffer to allocate to this queue. */
682 	__u32 ringbuf_size;
683 };
684 
685 /**
686  * enum drm_panthor_group_priority - Scheduling group priority
687  */
688 enum drm_panthor_group_priority {
689 	/** @PANTHOR_GROUP_PRIORITY_LOW: Low priority group. */
690 	PANTHOR_GROUP_PRIORITY_LOW = 0,
691 
692 	/** @PANTHOR_GROUP_PRIORITY_MEDIUM: Medium priority group. */
693 	PANTHOR_GROUP_PRIORITY_MEDIUM,
694 
695 	/**
696 	 * @PANTHOR_GROUP_PRIORITY_HIGH: High priority group.
697 	 *
698 	 * Requires CAP_SYS_NICE or DRM_MASTER.
699 	 */
700 	PANTHOR_GROUP_PRIORITY_HIGH,
701 };
702 
703 /**
704  * struct drm_panthor_group_create - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_CREATE
705  */
706 struct drm_panthor_group_create {
707 	/** @queues: Array of drm_panthor_queue_create elements. */
708 	struct drm_panthor_obj_array queues;
709 
710 	/**
711 	 * @max_compute_cores: Maximum number of cores that can be used by compute
712 	 * jobs across CS queues bound to this group.
713 	 *
714 	 * Must be less or equal to the number of bits set in @compute_core_mask.
715 	 */
716 	__u8 max_compute_cores;
717 
718 	/**
719 	 * @max_fragment_cores: Maximum number of cores that can be used by fragment
720 	 * jobs across CS queues bound to this group.
721 	 *
722 	 * Must be less or equal to the number of bits set in @fragment_core_mask.
723 	 */
724 	__u8 max_fragment_cores;
725 
726 	/**
727 	 * @max_tiler_cores: Maximum number of tilers that can be used by tiler jobs
728 	 * across CS queues bound to this group.
729 	 *
730 	 * Must be less or equal to the number of bits set in @tiler_core_mask.
731 	 */
732 	__u8 max_tiler_cores;
733 
734 	/** @priority: Group priority (see enum drm_panthor_group_priority). */
735 	__u8 priority;
736 
737 	/** @pad: Padding field, MBZ. */
738 	__u32 pad;
739 
740 	/**
741 	 * @compute_core_mask: Mask encoding cores that can be used for compute jobs.
742 	 *
743 	 * This field must have at least @max_compute_cores bits set.
744 	 *
745 	 * The bits set here should also be set in drm_panthor_gpu_info::shader_present.
746 	 */
747 	__u64 compute_core_mask;
748 
749 	/**
750 	 * @fragment_core_mask: Mask encoding cores that can be used for fragment jobs.
751 	 *
752 	 * This field must have at least @max_fragment_cores bits set.
753 	 *
754 	 * The bits set here should also be set in drm_panthor_gpu_info::shader_present.
755 	 */
756 	__u64 fragment_core_mask;
757 
758 	/**
759 	 * @tiler_core_mask: Mask encoding cores that can be used for tiler jobs.
760 	 *
761 	 * This field must have at least @max_tiler_cores bits set.
762 	 *
763 	 * The bits set here should also be set in drm_panthor_gpu_info::tiler_present.
764 	 */
765 	__u64 tiler_core_mask;
766 
767 	/**
768 	 * @vm_id: VM ID to bind this group to.
769 	 *
770 	 * All submission to queues bound to this group will use this VM.
771 	 */
772 	__u32 vm_id;
773 
774 	/**
775 	 * @group_handle: Returned group handle. Passed back when submitting jobs or
776 	 * destroying a group.
777 	 */
778 	__u32 group_handle;
779 };
780 
781 /**
782  * struct drm_panthor_group_destroy - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_DESTROY
783  */
784 struct drm_panthor_group_destroy {
785 	/** @group_handle: Group to destroy */
786 	__u32 group_handle;
787 
788 	/** @pad: Padding field, MBZ. */
789 	__u32 pad;
790 };
791 
792 /**
793  * struct drm_panthor_queue_submit - Job submission arguments.
794  *
795  * This is describing the userspace command stream to call from the kernel
796  * command stream ring-buffer. Queue submission is always part of a group
797  * submission, taking one or more jobs to submit to the underlying queues.
798  */
799 struct drm_panthor_queue_submit {
800 	/** @queue_index: Index of the queue inside a group. */
801 	__u32 queue_index;
802 
803 	/**
804 	 * @stream_size: Size of the command stream to execute.
805 	 *
806 	 * Must be 64-bit/8-byte aligned (the size of a CS instruction)
807 	 *
808 	 * Can be zero if stream_addr is zero too.
809 	 *
810 	 * When the stream size is zero, the queue submit serves as a
811 	 * synchronization point.
812 	 */
813 	__u32 stream_size;
814 
815 	/**
816 	 * @stream_addr: GPU address of the command stream to execute.
817 	 *
818 	 * Must be aligned on 64-byte.
819 	 *
820 	 * Can be zero is stream_size is zero too.
821 	 */
822 	__u64 stream_addr;
823 
824 	/**
825 	 * @latest_flush: FLUSH_ID read at the time the stream was built.
826 	 *
827 	 * This allows cache flush elimination for the automatic
828 	 * flush+invalidate(all) done at submission time, which is needed to
829 	 * ensure the GPU doesn't get garbage when reading the indirect command
830 	 * stream buffers. If you want the cache flush to happen
831 	 * unconditionally, pass a zero here.
832 	 *
833 	 * Ignored when stream_size is zero.
834 	 */
835 	__u32 latest_flush;
836 
837 	/** @pad: MBZ. */
838 	__u32 pad;
839 
840 	/** @syncs: Array of struct drm_panthor_sync_op sync operations. */
841 	struct drm_panthor_obj_array syncs;
842 };
843 
844 /**
845  * struct drm_panthor_group_submit - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_SUBMIT
846  */
847 struct drm_panthor_group_submit {
848 	/** @group_handle: Handle of the group to queue jobs to. */
849 	__u32 group_handle;
850 
851 	/** @pad: MBZ. */
852 	__u32 pad;
853 
854 	/** @queue_submits: Array of drm_panthor_queue_submit objects. */
855 	struct drm_panthor_obj_array queue_submits;
856 };
857 
858 /**
859  * enum drm_panthor_group_state_flags - Group state flags
860  */
861 enum drm_panthor_group_state_flags {
862 	/**
863 	 * @DRM_PANTHOR_GROUP_STATE_TIMEDOUT: Group had unfinished jobs.
864 	 *
865 	 * When a group ends up with this flag set, no jobs can be submitted to its queues.
866 	 */
867 	DRM_PANTHOR_GROUP_STATE_TIMEDOUT = 1 << 0,
868 
869 	/**
870 	 * @DRM_PANTHOR_GROUP_STATE_FATAL_FAULT: Group had fatal faults.
871 	 *
872 	 * When a group ends up with this flag set, no jobs can be submitted to its queues.
873 	 */
874 	DRM_PANTHOR_GROUP_STATE_FATAL_FAULT = 1 << 1,
875 };
876 
877 /**
878  * struct drm_panthor_group_get_state - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_GET_STATE
879  *
880  * Used to query the state of a group and decide whether a new group should be created to
881  * replace it.
882  */
883 struct drm_panthor_group_get_state {
884 	/** @group_handle: Handle of the group to query state on */
885 	__u32 group_handle;
886 
887 	/**
888 	 * @state: Combination of DRM_PANTHOR_GROUP_STATE_* flags encoding the
889 	 * group state.
890 	 */
891 	__u32 state;
892 
893 	/** @fatal_queues: Bitmask of queues that faced fatal faults. */
894 	__u32 fatal_queues;
895 
896 	/** @pad: MBZ */
897 	__u32 pad;
898 };
899 
900 /**
901  * struct drm_panthor_tiler_heap_create - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE
902  */
903 struct drm_panthor_tiler_heap_create {
904 	/** @vm_id: VM ID the tiler heap should be mapped to */
905 	__u32 vm_id;
906 
907 	/** @initial_chunk_count: Initial number of chunks to allocate. Must be at least one. */
908 	__u32 initial_chunk_count;
909 
910 	/**
911 	 * @chunk_size: Chunk size.
912 	 *
913 	 * Must be page-aligned and lie in the [128k:8M] range.
914 	 */
915 	__u32 chunk_size;
916 
917 	/**
918 	 * @max_chunks: Maximum number of chunks that can be allocated.
919 	 *
920 	 * Must be at least @initial_chunk_count.
921 	 */
922 	__u32 max_chunks;
923 
924 	/**
925 	 * @target_in_flight: Maximum number of in-flight render passes.
926 	 *
927 	 * If the heap has more than tiler jobs in-flight, the FW will wait for render
928 	 * passes to finish before queuing new tiler jobs.
929 	 */
930 	__u32 target_in_flight;
931 
932 	/** @handle: Returned heap handle. Passed back to DESTROY_TILER_HEAP. */
933 	__u32 handle;
934 
935 	/** @tiler_heap_ctx_gpu_va: Returned heap GPU virtual address returned */
936 	__u64 tiler_heap_ctx_gpu_va;
937 
938 	/**
939 	 * @first_heap_chunk_gpu_va: First heap chunk.
940 	 *
941 	 * The tiler heap is formed of heap chunks forming a single-link list. This
942 	 * is the first element in the list.
943 	 */
944 	__u64 first_heap_chunk_gpu_va;
945 };
946 
947 /**
948  * struct drm_panthor_tiler_heap_destroy - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY
949  */
950 struct drm_panthor_tiler_heap_destroy {
951 	/**
952 	 * @handle: Handle of the tiler heap to destroy.
953 	 *
954 	 * Must be a valid heap handle returned by DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE.
955 	 */
956 	__u32 handle;
957 
958 	/** @pad: Padding field, MBZ. */
959 	__u32 pad;
960 };
961 
962 #if defined(__cplusplus)
963 }
964 #endif
965 
966 #endif /* _PANTHOR_DRM_H_ */
967