1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #ifndef __I915_DRM_PRELIM_H__
7 #define __I915_DRM_PRELIM_H__
8 
9 #include "drm.h"
10 
11 /*
12  * Modifications to structs/values defined here are subject to
13  * backwards-compatibility constraints.
14  *
15  * Internal/downstream declarations must be added here, not to
16  * i915_drm.h. The values in i915_drm_prelim.h must also be kept
17  * synchronized with values in i915_drm.h.
18  */
19 
20 struct prelim_i915_uevent {
21 /*
22  * PRELIM_I915_RESET_FAILED_UEVENT - Event is generated when engine or GPU
23  *	resets fail and also when GPU is declared wedged. The value
24  *	supplied with the event is always 1. Event is also generated when
25  *	resets are disabled by module parameter and an attempt to reset
26  *	either engine or GPU is made.
27  */
28 #define PRELIM_I915_RESET_FAILED_UEVENT	"RESET_FAILED"
29 
30 /*
31  * PRELIM_I915_MEMORY_HEALTH_UEVENT - Generated when driver receives a memory
32  *	degradation error from the GPU FW. The event serves as notification to
33  *	an Admin to reboot the system as soon as possible, due to the fact that
34  *	device is no longer RUNTIME recoverable again. This event will always
35  *	have a value of 1, which indicates that uncorrectable error has been
36  *	detected, and that runtime memory sparing is not feasible without system
37  *	reboot - for recovery of failed BANK.
38  */
39 #define PRELIM_I915_MEMORY_HEALTH_UEVENT	"MEMORY_HEALTH"
40 };
41 
42 struct prelim_i915_user_extension {
43 #define PRELIM_I915_USER_EXT		(1 << 16)
44 #define PRELIM_I915_USER_EXT_MASK(x)	(x & 0xffff)
45 };
46 
47 struct prelim_drm_i915_gem_context_create_ext_clone {
48 #define PRELIM_I915_CONTEXT_CREATE_EXT_CLONE	(PRELIM_I915_USER_EXT | 1)
49 	struct i915_user_extension base;
50 	__u32 clone_id;
51 	__u32 flags;
52 #define PRELIM_I915_CONTEXT_CLONE_ENGINES	(1u << 0)
53 #define PRELIM_I915_CONTEXT_CLONE_FLAGS		(1u << 1)
54 #define PRELIM_I915_CONTEXT_CLONE_SCHEDATTR	(1u << 2)
55 #define PRELIM_I915_CONTEXT_CLONE_SSEU		(1u << 3)
56 #define PRELIM_I915_CONTEXT_CLONE_TIMELINE	(1u << 4)
57 #define PRELIM_I915_CONTEXT_CLONE_VM		(1u << 5)
58 #define PRELIM_I915_CONTEXT_CLONE_UNKNOWN	-(PRELIM_I915_CONTEXT_CLONE_VM << 1)
59 	__u64 rsvd;
60 };
61 
62 /*
63  * PRELIM UAPI VERSION - /sys/<...>/drm/card<n>/prelim_uapi_version
64  * MAJOR - to be incremented right after a major public Production branch
65  *         release containing PRELIM uAPIs
66  *         PROD_DG1_201210.0 released so starting with major = 2, although
67  *         it didn't have the proper prelim api infrastructure yet.
68  * MINOR - Reset to 0 when MAJOR is bumped.
69  *         Bumped as needed when some kind of API incompatibility is identified.
70  *         This patch, which introduces this, should be the only patch in
71  *         the pile that is changing this number.
72  */
73 #define PRELIM_UAPI_MAJOR	2
74 #define PRELIM_UAPI_MINOR	0
75 
76 /*
77  * Top 8 bits of every non-engine counter are GT id.
78  * FIXME: __PRELIM_I915_PMU_GT_SHIFT will be changed to 56
79  */
80 #define __PRELIM_I915_PMU_GT_SHIFT (60)
81 
82 #define ___PRELIM_I915_PMU_OTHER(gt, x) \
83 	(((__u64)__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x)) | \
84 	((__u64)(gt) << __PRELIM_I915_PMU_GT_SHIFT))
85 
86 #define __I915_PMU_OTHER(x) ___PRELIM_I915_PMU_OTHER(0, x)
87 
88 #define __PRELIM_I915_PMU_ACTUAL_FREQUENCY(gt)		___PRELIM_I915_PMU_OTHER(gt, 0)
89 #define __PRELIM_I915_PMU_REQUESTED_FREQUENCY(gt)	___PRELIM_I915_PMU_OTHER(gt, 1)
90 #define __PRELIM_I915_PMU_INTERRUPTS(gt)		___PRELIM_I915_PMU_OTHER(gt, 2)
91 #define __PRELIM_I915_PMU_RC6_RESIDENCY(gt)		___PRELIM_I915_PMU_OTHER(gt, 3)
92 #define __PRELIM_I915_PMU_SOFTWARE_GT_AWAKE_TIME(gt)	___PRELIM_I915_PMU_OTHER(gt, 4)
93 #define __PRELIM_I915_PMU_ENGINE_RESET_COUNT(gt)	___PRELIM_I915_PMU_OTHER(gt, 5)
94 #define __PRELIM_I915_PMU_EU_ATTENTION_COUNT(gt)	___PRELIM_I915_PMU_OTHER(gt, 6)
95 #define __PRELIM_I915_PMU_RENDER_GROUP_BUSY(gt)		___PRELIM_I915_PMU_OTHER(gt, 7)
96 #define __PRELIM_I915_PMU_COPY_GROUP_BUSY(gt)		___PRELIM_I915_PMU_OTHER(gt, 8)
97 #define __PRELIM_I915_PMU_MEDIA_GROUP_BUSY(gt)		___PRELIM_I915_PMU_OTHER(gt, 9)
98 #define __PRELIM_I915_PMU_ANY_ENGINE_GROUP_BUSY(gt)	___PRELIM_I915_PMU_OTHER(gt, 10)
99 
100 
101 #define __PRELIM_I915_PMU_HW_ERROR_EVENT_ID_OFFSET	(__I915_PMU_OTHER(0) + 1000)
102 
103 #define PRELIM_I915_PMU_ENGINE_RESET_COUNT	__PRELIM_I915_PMU_ENGINE_RESET_COUNT(0)
104 #define PRELIM_I915_PMU_EU_ATTENTION_COUNT	__PRELIM_I915_PMU_EU_ATTENTION_COUNT(0)
105 #define PRELIM_I915_PMU_RENDER_GROUP_BUSY              __PRELIM_I915_PMU_RENDER_GROUP_BUSY(0)
106 #define PRELIM_I915_PMU_COPY_GROUP_BUSY                __PRELIM_I915_PMU_COPY_GROUP_BUSY(0)
107 #define PRELIM_I915_PMU_MEDIA_GROUP_BUSY               __PRELIM_I915_PMU_MEDIA_GROUP_BUSY(0)
108 #define PRELIM_I915_PMU_ANY_ENGINE_GROUP_BUSY          __PRELIM_I915_PMU_ANY_ENGINE_GROUP_BUSY(0)
109 
110 /*
111  * HW error counters.
112  */
113 #define PRELIM_I915_PMU_GT_ERROR_CORRECTABLE_L3_SNG		(0)
114 #define PRELIM_I915_PMU_GT_ERROR_CORRECTABLE_GUC		(1)
115 #define PRELIM_I915_PMU_GT_ERROR_CORRECTABLE_SAMPLER		(2)
116 #define PRELIM_I915_PMU_GT_ERROR_CORRECTABLE_SLM		(3)
117 #define PRELIM_I915_PMU_GT_ERROR_CORRECTABLE_EU_IC		(4)
118 #define PRELIM_I915_PMU_GT_ERROR_CORRECTABLE_EU_GRF		(5)
119 #define PRELIM_I915_PMU_GT_ERROR_FATAL_ARR_BIST			(6)
120 #define PRELIM_I915_PMU_GT_ERROR_FATAL_L3_DOUB			(7)
121 #define PRELIM_I915_PMU_GT_ERROR_FATAL_L3_ECC_CHK		(8)
122 #define PRELIM_I915_PMU_GT_ERROR_FATAL_GUC			(9)
123 #define PRELIM_I915_PMU_GT_ERROR_FATAL_IDI_PAR			(10)
124 #define PRELIM_I915_PMU_GT_ERROR_FATAL_SQIDI			(11)
125 #define PRELIM_I915_PMU_GT_ERROR_FATAL_SAMPLER			(12)
126 #define PRELIM_I915_PMU_GT_ERROR_FATAL_SLM			(13)
127 #define PRELIM_I915_PMU_GT_ERROR_FATAL_EU_IC			(14)
128 #define PRELIM_I915_PMU_GT_ERROR_FATAL_EU_GRF			(15)
129 #define PRELIM_I915_PMU_SGUNIT_ERROR_CORRECTABLE		(16)
130 #define PRELIM_I915_PMU_SGUNIT_ERROR_NONFATAL			(17)
131 #define PRELIM_I915_PMU_SGUNIT_ERROR_FATAL			(18)
132 #define PRELIM_I915_PMU_SOC_ERROR_CORRECTABLE_PSF_CSC_0		(19)
133 #define PRELIM_I915_PMU_SOC_ERROR_CORRECTABLE_PSF_CSC_1		(20)
134 #define PRELIM_I915_PMU_SOC_ERROR_NONFATAL_PSF_CSC_0		(21)
135 #define PRELIM_I915_PMU_SOC_ERROR_NONFATAL_PSF_CSC_1		(22)
136 #define PRELIM_I915_PMU_SOC_ERROR_NONFATAL_PSF_CSC_2		(23)
137 #define PRELIM_I915_PMU_SOC_ERROR_FATAL_PSF_CSC_0		(24)
138 #define PRELIM_I915_PMU_SOC_ERROR_FATAL_PSF_CSC_1		(25)
139 #define PRELIM_I915_PMU_SOC_ERROR_FATAL_PSF_CSC_2		(26)
140 #define PRELIM_I915_PMU_SOC_ERROR_CORRECTABLE_PUNIT		(27)
141 #define PRELIM_I915_PMU_SOC_ERROR_CORRECTABLE_MDFI_EAST		(28)
142 #define PRELIM_I915_PMU_SOC_ERROR_CORRECTABLE_MDFI_WEST		(29)
143 #define PRELIM_I915_PMU_SOC_ERROR_CORRECTABLE_MDFI_SOUTH	(30)
144 #define PRELIM_I915_PMU_SOC_ERROR_NONFATAL_PUNIT		(31)
145 #define PRELIM_I915_PMU_SOC_ERROR_NONFATAL_MDFI_EAST		(32)
146 #define PRELIM_I915_PMU_SOC_ERROR_NONFATAL_MDFI_WEST		(33)
147 #define PRELIM_I915_PMU_SOC_ERROR_NONFATAL_MDFI_SOUTH		(34)
148 #define PRELIM_I915_PMU_SOC_ERROR_FATAL_PUNIT			(35)
149 #define PRELIM_I915_PMU_SOC_ERROR_FATAL_MDFI_EAST		(36)
150 #define PRELIM_I915_PMU_SOC_ERROR_FATAL_MDFI_WEST		(37)
151 #define PRELIM_I915_PMU_SOC_ERROR_FATAL_MDFI_SOUTH		(38)
152 
153 #define PRELIM_I915_PMU_SOC_ERROR_CORRECTABLE_FBR(ss, n) \
154 	(PRELIM_I915_PMU_SOC_ERROR_FATAL_MDFI_SOUTH + 0x1 + (ss) * 0x4 + (n))
155 
156 #define PRELIM_I915_PMU_SOC_ERROR_NONFATAL_FBR(ss, n) \
157 	(PRELIM_I915_PMU_SOC_ERROR_CORRECTABLE_FBR(1, 5) + (ss) * 0x4 + (n))
158 
159 #define PRELIM_I915_PMU_SOC_ERROR_FATAL_FBR(ss, n) \
160 	(PRELIM_I915_PMU_SOC_ERROR_NONFATAL_FBR(1, 5) + (ss) * 0x4 + (n))
161 
162 #define PRELIM_I915_PMU_SOC_ERROR_CORRECTABLE_HBM(ss, n)\
163 	(PRELIM_I915_PMU_SOC_ERROR_FATAL_FBR(1, 5) + (ss) * 0x10 + (n))
164 
165 #define PRELIM_I915_PMU_SOC_ERROR_NONFATAL_HBM(ss, n)\
166 	(PRELIM_I915_PMU_SOC_ERROR_CORRECTABLE_HBM(1, 16) + (ss) * 0x10 + (n))
167 
168 #define PRELIM_I915_PMU_SOC_ERROR_FATAL_HBM(ss, n)\
169 	(PRELIM_I915_PMU_SOC_ERROR_NONFATAL_HBM(1, 16) + (ss) * 0x10 + (n))
170 
171 /* 161 is the last ID used by SOC errors */
172 #define PRELIM_I915_PMU_GT_ERROR_FATAL_FPU		(162)
173 #define PRELIM_I915_PMU_GT_ERROR_FATAL_TLB		(163)
174 #define PRELIM_I915_PMU_GT_ERROR_FATAL_L3_FABRIC	(164)
175 
176 #define PRELIM_I915_PMU_HW_ERROR(gt, id) \
177 	((__PRELIM_I915_PMU_HW_ERROR_EVENT_ID_OFFSET + (id)) | \
178 	((__u64)(gt) << __PRELIM_I915_PMU_GT_SHIFT))
179 
180 /* Per GT driver error counters */
181 #define __PRELIM_I915_PMU_GT_DRIVER_ERROR_EVENT_ID_OFFSET (__I915_PMU_OTHER(0) + 2000)
182 
183 #define PRELIM_I915_PMU_GT_DRIVER_ERROR_GGTT			(0)
184 #define PRELIM_I915_PMU_GT_DRIVER_ERROR_ENGINE_OTHER		(1)
185 #define PRELIM_I915_PMU_GT_DRIVER_ERROR_GUC_COMMUNICATION	(2)
186 #define PRELIM_I915_PMU_GT_DRIVER_ERROR_RPS			(3)
187 #define PRELIM_I915_PMU_GT_DRIVER_ERROR_GT_OTHER		(4)
188 #define PRELIM_I915_PMU_GT_DRIVER_ERROR_INTERRUPT		(5)
189 
190 #define PRELIM_I915_PMU_GT_DRIVER_ERROR(gt, id) \
191 	((__PRELIM_I915_PMU_GT_DRIVER_ERROR_EVENT_ID_OFFSET + (id)) | \
192 	 ((__u64)(gt) << __PRELIM_I915_PMU_GT_SHIFT))
193 
194 /* Global driver error counters */
195 #define __PRELIM_I915_PMU_DRIVER_ERROR_EVENT_ID_OFFSET (__I915_PMU_OTHER(0) + 3000)
196 #define PRELIM_I915_PMU_DRIVER_ERROR_OBJECT_MIGRATION	(0)
197 #define PRELIM_I915_PMU_DRIVER_ERROR(id)	(__PRELIM_I915_PMU_DRIVER_ERROR_EVENT_ID_OFFSET + (id))
198 
199 /* PRELIM ioctl's */
200 
201 /* PRELIM ioctl numbers go down from 0x5f */
202 #define PRELIM_DRM_I915_RESERVED_FOR_VERSION	0x5f
203 /* 0x5e is free, please use if needed */
204 #define PRELIM_DRM_I915_GEM_VM_BIND		0x5d
205 #define PRELIM_DRM_I915_GEM_VM_UNBIND		0x5c
206 #define PRELIM_DRM_I915_GEM_VM_ADVISE		0x5b
207 #define PRELIM_DRM_I915_GEM_WAIT_USER_FENCE	0x5a
208 #define PRELIM_DRM_I915_GEM_VM_PREFETCH		0x59
209 #define PRELIM_DRM_I915_UUID_REGISTER		0x58
210 #define PRELIM_DRM_I915_UUID_UNREGISTER		0x57
211 #define PRELIM_DRM_I915_DEBUGGER_OPEN		0x56
212 #define PRELIM_DRM_I915_GEM_CLOS_RESERVE	0x55
213 #define PRELIM_DRM_I915_GEM_CLOS_FREE		0x54
214 #define PRELIM_DRM_I915_GEM_CACHE_RESERVE	0x53
215 #define PRELIM_DRM_I915_GEM_VM_GETPARAM		DRM_I915_GEM_CONTEXT_GETPARAM
216 #define PRELIM_DRM_I915_GEM_VM_SETPARAM		DRM_I915_GEM_CONTEXT_SETPARAM
217 #define PRELIM_DRM_I915_GEM_OBJECT_SETPARAM	DRM_I915_GEM_CONTEXT_SETPARAM
218 #define PRELIM_DRM_I915_PXP_OPS			0x52
219 
220 
221 #define PRELIM_DRM_IOCTL_I915_GEM_CREATE_EXT		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct prelim_drm_i915_gem_create_ext)
222 #define PRELIM_DRM_IOCTL_I915_GEM_VM_BIND		DRM_IOWR(DRM_COMMAND_BASE + PRELIM_DRM_I915_GEM_VM_BIND, struct prelim_drm_i915_gem_vm_bind)
223 #define PRELIM_DRM_IOCTL_I915_GEM_VM_UNBIND		DRM_IOWR(DRM_COMMAND_BASE + PRELIM_DRM_I915_GEM_VM_UNBIND, struct prelim_drm_i915_gem_vm_bind)
224 #define PRELIM_DRM_IOCTL_I915_GEM_VM_ADVISE		DRM_IOWR(DRM_COMMAND_BASE + PRELIM_DRM_I915_GEM_VM_ADVISE, struct prelim_drm_i915_gem_vm_advise)
225 #define PRELIM_DRM_IOCTL_I915_GEM_WAIT_USER_FENCE	DRM_IOWR(DRM_COMMAND_BASE + PRELIM_DRM_I915_GEM_WAIT_USER_FENCE, struct prelim_drm_i915_gem_wait_user_fence)
226 #define PRELIM_DRM_IOCTL_I915_GEM_VM_PREFETCH		DRM_IOWR(DRM_COMMAND_BASE + PRELIM_DRM_I915_GEM_VM_PREFETCH, struct prelim_drm_i915_gem_vm_prefetch)
227 #define PRELIM_DRM_IOCTL_I915_UUID_REGISTER		DRM_IOWR(DRM_COMMAND_BASE + PRELIM_DRM_I915_UUID_REGISTER, struct prelim_drm_i915_uuid_control)
228 #define PRELIM_DRM_IOCTL_I915_UUID_UNREGISTER		DRM_IOWR(DRM_COMMAND_BASE + PRELIM_DRM_I915_UUID_UNREGISTER, struct prelim_drm_i915_uuid_control)
229 #define PRELIM_DRM_IOCTL_I915_DEBUGGER_OPEN		DRM_IOWR(DRM_COMMAND_BASE + PRELIM_DRM_I915_DEBUGGER_OPEN, struct prelim_drm_i915_debugger_open_param)
230 #define PRELIM_DRM_IOCTL_I915_GEM_CLOS_RESERVE		DRM_IOWR(DRM_COMMAND_BASE + PRELIM_DRM_I915_GEM_CLOS_RESERVE, struct prelim_drm_i915_gem_clos_reserve)
231 #define PRELIM_DRM_IOCTL_I915_GEM_CLOS_FREE		DRM_IOWR(DRM_COMMAND_BASE + PRELIM_DRM_I915_GEM_CLOS_FREE, struct prelim_drm_i915_gem_clos_free)
232 #define PRELIM_DRM_IOCTL_I915_GEM_CACHE_RESERVE		DRM_IOWR(DRM_COMMAND_BASE + PRELIM_DRM_I915_GEM_CACHE_RESERVE, struct prelim_drm_i915_gem_cache_reserve)
233 #define PRELIM_DRM_IOCTL_I915_GEM_VM_GETPARAM		DRM_IOWR(DRM_COMMAND_BASE + PRELIM_DRM_I915_GEM_VM_GETPARAM, struct prelim_drm_i915_gem_vm_param)
234 #define PRELIM_DRM_IOCTL_I915_GEM_VM_SETPARAM		DRM_IOWR(DRM_COMMAND_BASE + PRELIM_DRM_I915_GEM_VM_SETPARAM, struct prelim_drm_i915_gem_vm_param)
235 #define PRELIM_DRM_IOCTL_I915_GEM_OBJECT_SETPARAM	DRM_IOWR(DRM_COMMAND_BASE + PRELIM_DRM_I915_GEM_OBJECT_SETPARAM, struct prelim_drm_i915_gem_object_param)
236 #define PRELIM_DRM_IOCTL_I915_PXP_OPS			DRM_IOWR(DRM_COMMAND_BASE + PRELIM_DRM_I915_PXP_OPS, struct prelim_drm_i915_pxp_ops)
237 
238 /* End PRELIM ioctl's */
239 
240 /* getparam */
241 #define PRELIM_I915_PARAM               (1 << 16)
242 /*
243  * Querying I915_PARAM_EXECBUF2_MAX_ENGINE will return the number of context
244  * map engines addressable via the low bits of execbuf2 flags, or in the
245  * cases where the parameter is not supported (-EINVAL), legacy maximum of
246  * 64 engines should be assumed.
247  */
248 #define PRELIM_I915_PARAM_EXECBUF2_MAX_ENGINE	(PRELIM_I915_PARAM | 1)
249 
250 /* Total local memory in bytes */
251 #define PRELIM_I915_PARAM_LMEM_TOTAL_BYTES	(PRELIM_I915_PARAM | 2)
252 
253 /* Available local memory in bytes */
254 #define PRELIM_I915_PARAM_LMEM_AVAIL_BYTES	(PRELIM_I915_PARAM | 3)
255 
256 /* Shared Virtual Memory (SVM) support capability */
257 #define PRELIM_I915_PARAM_HAS_SVM		(PRELIM_I915_PARAM | 4)
258 
259 /*
260  * Frequency of the timestamps in OA reports. This used to be the same as the CS
261  * timestamp frequency, but differs on some platforms.
262  */
263 #define PRELIM_I915_PARAM_OA_TIMESTAMP_FREQUENCY	(PRELIM_I915_PARAM | 5)
264 
265 /* VM_BIND feature availability */
266 #define PRELIM_I915_PARAM_HAS_VM_BIND	(PRELIM_I915_PARAM | 6)
267 
268 /* Recoverable pagefault support */
269 #define PRELIM_I915_PARAM_HAS_PAGE_FAULT	(PRELIM_I915_PARAM | 7)
270 /* End getparam */
271 
272 struct prelim_drm_i915_gem_create_ext {
273 
274 	/**
275 	 * Requested size for the object.
276 	 *
277 	 * The (page-aligned) allocated size for the object will be returned.
278 	 */
279 	__u64 size;
280 	/**
281 	 * Returned handle for the object.
282 	 *
283 	 * Object handles are nonzero.
284 	 */
285 	__u32 handle;
286 	__u32 pad;
287 #define PRELIM_I915_GEM_CREATE_EXT_SETPARAM		(PRELIM_I915_USER_EXT | 1)
288 #define PRELIM_I915_GEM_CREATE_EXT_PROTECTED_CONTENT	(PRELIM_I915_USER_EXT | 2)
289 #define PRELIM_I915_GEM_CREATE_EXT_FLAGS_UNKNOWN \
290 	(~(PRELIM_I915_GEM_CREATE_EXT_SETPARAM | PRELIM_I915_GEM_CREATE_EXT_PROTECTED_CONTENT))
291 	__u64 extensions;
292 };
293 
294 struct prelim_drm_i915_gem_object_param {
295 	/* Object handle (0 for I915_GEM_CREATE_EXT_SETPARAM) */
296 	__u32 handle;
297 
298 	/* Data pointer size */
299 	__u32 size;
300 
301 /*
302  * PRELIM_I915_OBJECT_PARAM:
303  *
304  * Select object namespace for the param.
305  */
306 #define PRELIM_I915_OBJECT_PARAM  (1ull << 48)
307 
308 /*
309  * PRELIM_I915_PARAM_MEMORY_REGIONS:
310  *
311  * Set the data pointer with the desired set of placements in priority
312  * order(each entry must be unique and supported by the device), as an array of
313  * prelim_drm_i915_gem_memory_class_instance, or an equivalent layout of class:instance
314  * pair encodings. See PRELIM_DRM_I915_QUERY_MEMORY_REGIONS for how to query the
315  * supported regions.
316  *
317  * Note that this requires the PRELIM_I915_OBJECT_PARAM namespace:
318  *	.param = PRELIM_I915_OBJECT_PARAM | PRELIM_I915_PARAM_MEMORY_REGIONS
319  */
320 #define PRELIM_I915_PARAM_MEMORY_REGIONS ((1 << 16) | 0x1)
321 	__u64 param;
322 
323 	/* Data value or pointer */
324 	__u64 data;
325 };
326 
327 struct prelim_drm_i915_gem_create_ext_setparam {
328 	struct i915_user_extension base;
329 	struct prelim_drm_i915_gem_object_param param;
330 };
331 
332 /**
333  * struct drm_i915_gem_create_ext_protected_content - The
334  * I915_OBJECT_PARAM_PROTECTED_CONTENT extension.
335  *
336  * If this extension is provided, buffer contents are expected to be
337  * protected by PXP encryption and requires decryption for scan out
338  * and processing. This is only possible on platforms that have PXP enabled,
339  * on all other scenarios ysing this extension will cause the ioctl to fail
340  * and return -ENODEV. The flags parameter is reserved for future expansion and
341  * must currently be set to zero.
342  *
343  * The buffer contents are considered invalid after a PXP session teardown.
344  *
345  * The encryption is guaranteed to be processed correctly only if the object
346  * is submitted with a context created using the
347  * I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. This will also enable extra checks
348  * at submission time on the validity of the objects involved, which can lead to
349  * the following errors being returned from the execbuf ioctl:
350  *
351  * -ENODEV: PXP session not currently active
352  * -ENOEXEC: buffer has become invalid after a teardown event
353  */
354 struct prelim_drm_i915_gem_create_ext_protected_content {
355 	struct i915_user_extension base;
356 	__u32 flags;
357 };
358 
359 /*
360  * struct pxp_set_session_status_params - Params to reserved, set or destroy
361  * the session from the PXP state machine.
362  */
363 struct prelim_drm_i915_pxp_set_session_status_params {
364 	__u32 pxp_tag; /* in/out, session identifier tag */
365 	__u32 session_type; /* in, session type */
366 	__u32 session_mode; /* in, session mode */
367 #define PRELIM_DRM_I915_PXP_MODE_LM 0
368 #define PRELIM_DRM_I915_PXP_MODE_HM 1
369 #define PRELIM_DRM_I915_PXP_MODE_SM 2
370 
371 	__u32 req_session_state; /* in, new session state */
372 	/* Request KMD to allocate session id and move it to INIT */
373 #define PRELIM_DRM_I915_PXP_REQ_SESSION_ID_INIT 0
374 	/* Inform KMD that UMD has completed the initialization */
375 #define PRELIM_DRM_I915_PXP_REQ_SESSION_IN_PLAY 1
376 	/* Request KMD to terminate the session */
377 #define PRELIM_DRM_I915_PXP_REQ_SESSION_TERMINATE 2
378 } __attribute__((packed));
379 
380 /*
381  * struct pxp_tee_io_message_params - Params to send/receive message to/from TEE.
382  */
383 struct prelim_drm_i915_pxp_tee_io_message_params {
384 	__u64 msg_in; /* in - pointer to buffer containing input message */
385 	__u32 msg_in_size; /* in - input message size */
386 	__u64 msg_out; /* in - pointer to buffer to store the output message */
387 	__u32 msg_out_buf_size; /* in -  provided output message buffer size */
388 	__u32 msg_out_ret_size; /* out- output message actual size returned from TEE */
389 } __attribute__((packed));
390 
391 /*
392  * struct drm_i915_pxp_query_tag - Params to query the PXP tag of specified
393  * session id and whether the session is alive from PXP state machine.
394  */
395 struct prelim_drm_i915_pxp_query_tag {
396 	__u32 session_is_alive;
397 
398 	/*
399 	 * in  - Session ID, out pxp tag.
400 	 * Tag format:
401 	 * bits   0-6: session id
402 	 * bit      7: rsvd
403 	 * bits  8-15: instance id
404 	 * bit     16: session enabled
405 	 * bit     17: mode hm
406 	 * bit     18: rsvd
407 	 * bit     19: mode sm
408 	 * bits 20-31: rsvd
409 	 */
410 	__u32 pxp_tag;
411 #define PRELIM_DRM_I915_PXP_TAG_SESSION_ID_MASK		(0x7f)
412 #define PRELIM_DRM_I915_PXP_TAG_INSTANCE_ID_MASK	(0xff << 8)
413 #define PRELIM_DRM_I915_PXP_TAG_SESSION_ENABLED		(0x1 << 16)
414 #define PRELIM_DRM_I915_PXP_TAG_SESSION_HM		(0x1 << 17)
415 #define PRELIM_DRM_I915_PXP_TAG_SESSION_SM		(0x1 << 19)
416 } __attribute__((packed));
417 
418 /*
419  * DRM_I915_PXP_OPS -
420  *
421  * PXP is an i915 componment, that helps user space to establish the hardware
422  * protected session and manage the status of each alive software session,
423  * as well as the life cycle of each session.
424  *
425  * This ioctl is to allow user space driver to create, set, and destroy each
426  * session. It also provides the communication chanel to TEE (Trusted
427  * Execution Environment) for the protected hardware session creation.
428  */
429 
430 struct prelim_drm_i915_pxp_ops {
431 	__u32 action; /* in - specified action of this operation */
432 #define PRELIM_DRM_I915_PXP_ACTION_SET_SESSION_STATUS 0
433 #define PRELIM_DRM_I915_PXP_ACTION_TEE_IO_MESSAGE 1
434 #define PRELIM_DRM_I915_PXP_ACTION_QUERY_PXP_TAG 2
435 
436 	__u32 status; /* out - status output for this operation */
437 #define PRELIM_DRM_I915_PXP_OP_STATUS_SUCCESS 0
438 #define PRELIM_DRM_I915_PXP_OP_STATUS_RETRY_REQUIRED 1
439 #define PRELIM_DRM_I915_PXP_OP_STATUS_SESSION_NOT_AVAILABLE 2
440 #define PRELIM_DRM_I915_PXP_OP_STATUS_ERROR_UNKNOWN 3
441 
442 	__u64 params; /* in/out - pointer to data matching the action */
443 } __attribute__((packed));
444 
445 #define PRELIM_PERF_VERSION	(1000)
446 
447 /**
448  * Returns OA buffer properties to be used with mmap.
449  *
450  * This ioctl is available in perf revision 1000.
451  */
452 #define PRELIM_I915_PERF_IOCTL_GET_OA_BUFFER_INFO _IOWR('i', 0x80, struct prelim_drm_i915_perf_oa_buffer_info)
453 
454 /**
455  * OA buffer size and offset.
456  *
457  * OA output buffer
458  *   type: 0
459  *   flags: mbz
460  *
461  *   After querying the info, pass (size,offset) to mmap(),
462  *
463  *   mmap(0, info.size, PROT_READ, MAP_PRIVATE, perf_fd, info.offset).
464  *
465  *   Note that only a private (not shared between processes, or across fork())
466  *   read-only mmapping is allowed.
467  *
468  *   Userspace must treat the incoming data as tainted, but it conforms to the OA
469  *   format as specified by user config. The buffer provides reports that have
470  *   OA counters - A, B and C.
471  */
472 struct prelim_drm_i915_perf_oa_buffer_info {
473 	__u32 type;   /* in */
474 	__u32 flags;  /* in */
475 	__u64 size;   /* out */
476 	__u64 offset; /* out */
477 	__u64 rsvd;   /* mbz */
478 };
479 
480 enum prelim_drm_i915_eu_stall_property_id {
481 	/**
482 	 * This field specifies the Per DSS Memory Buffer Size.
483 	 * Valid values are 128 KB, 256 KB, and 512 KB.
484 	 */
485 	PRELIM_DRM_I915_EU_STALL_PROP_BUF_SZ = 1001,
486 
487 	/**
488 	 * This field specifies the sampling rate per tile
489 	 * in multiples of 251 cycles. Valid values are 1 to 7.
490 	 * If the value is 1, sampling interval is 251 cycles.
491 	 * If the value is 7, sampling interval is 7 x 251 cycles.
492 	 */
493 	PRELIM_DRM_I915_EU_STALL_PROP_SAMPLE_RATE,
494 
495 	/**
496 	 * This field specifies the EU stall data poll period
497 	 * in nanoseconds. Minimum allowed value is 100 ms.
498 	 * A default value is used by the driver if this field
499 	 * is not specified.
500 	 */
501 	PRELIM_DRM_I915_EU_STALL_PROP_POLL_PERIOD,
502 
503 	PRELIM_DRM_I915_EU_STALL_PROP_ENGINE_CLASS,
504 
505 	PRELIM_DRM_I915_EU_STALL_PROP_ENGINE_INSTANCE,
506 
507 	PRELIM_DRM_I915_EU_STALL_PROP_MAX
508 };
509 
510 /*
511  * Info that the driver adds to each entry in the EU stall counters data.
512  */
513 struct prelim_drm_i915_stall_cntr_info {
514 	__u16 subslice;
515 	__u16 flags;
516 /* EU stall data line dropped due to memory buffer being full */
517 #define PRELIM_I915_EUSTALL_FLAG_OVERFLOW_DROP	(1 << 8)
518 };
519 
520 struct prelim_drm_i915_perf_open_param {
521 	/* PRELIM flags */
522 #define PRELIM_I915_PERF_FLAG_FD_EU_STALL	(1 << 16)
523 };
524 
525 struct prelim_drm_i915_gem_memory_class_instance {
526 	__u16 memory_class; /* see enum prelim_drm_i915_gem_memory_class */
527 	__u16 memory_instance;
528 };
529 
530 struct prelim_drm_i915_query_item {
531 #define PRELIM_DRM_I915_QUERY			(1 << 16)
532 #define PRELIM_DRM_I915_QUERY_MASK(x)		(x & 0xffff)
533 /* Keep lower 16 bits same as previous values */
534 #define PRELIM_DRM_I915_QUERY_MEMORY_REGIONS	(PRELIM_DRM_I915_QUERY | 4)
535 #define PRELIM_DRM_I915_QUERY_DISTANCE_INFO	(PRELIM_DRM_I915_QUERY | 5)
536 	/**
537 	 * Query HWConfig Table: Copies a device information table to the
538 	 * query's item.data_ptr directly if the allocated length is big enough
539 	 * For details about table format and content see intel_hwconfig_types.h
540 	 */
541 #define PRELIM_DRM_I915_QUERY_HWCONFIG_TABLE	(PRELIM_DRM_I915_QUERY | 6)
542 #define PRELIM_DRM_I915_QUERY_GEOMETRY_SLICES	(PRELIM_DRM_I915_QUERY | 7)
543 #define PRELIM_DRM_I915_QUERY_COMPUTE_SLICES	(PRELIM_DRM_I915_QUERY | 8)
544 	/**
545 	 * Query Command Streamer timestamp register.
546 	 */
547 #define PRELIM_DRM_I915_QUERY_CS_CYCLES		(PRELIM_DRM_I915_QUERY | 9)
548 
549 #define PRELIM_DRM_I915_QUERY_FABRIC_INFO	(PRELIM_DRM_I915_QUERY | 11)
550 
551 #define PRELIM_DRM_I915_QUERY_HW_IP_VERSION	(PRELIM_DRM_I915_QUERY | 12)
552 
553 #define PRELIM_DRM_I915_QUERY_ENGINE_INFO	(PRELIM_DRM_I915_QUERY | 13)
554 #define PRELIM_DRM_I915_QUERY_L3_BANK_COUNT	(PRELIM_DRM_I915_QUERY | 14)
555 };
556 
557 /*
558  * Number of BB in execbuf2 IOCTL - 1, used to submit more than BB in a single
559  * execbuf2 IOCTL.
560  *
561  * Return -EINVAL if more than 1 BB (value 0) is specified if
562  * PRELIM_I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT hasn't been called on the gem
563  * context first. Also returns -EINVAL if gem context has been setup with
564  * I915_PARALLEL_BB_PREEMPT_BOUNDARY and the number BBs not equal to the total
565  * number hardware contexts in the gem context.
566  */
567 #define PRELIM_I915_EXEC_NUMBER_BB_LSB		(48)
568 #define PRELIM_I915_EXEC_NUMBER_BB_MASK		(0x3full << PRELIM_I915_EXEC_NUMBER_BB_LSB)
569 #define PRELIM_I915_EXEC_NUMBER_BB_MSB		(54)
570 #define PRELIM_I915_EXEC_NUMBER_BB_MASK_MSB	(1ull << PRELIM_I915_EXEC_NUMBER_BB_MSB)
571 
572 /*
573  * In XEHPSDV total number of engines can be more than the maximum supported
574  * engines by I915_EXEC_RING_MASK.
575  * PRELIM_I915_EXEC_ENGINE_MASK expands the total number of engines from 64 to 256.
576  *
577  * To use PRELIM_I915_EXEC_ENGINE_MASK, userspace needs to query
578  * I915_PARAM_EXECBUF2_MAX_ENGINE. On getting valid value, userspace needs
579  * to set PRELIM_I915_EXEC_ENGINE_MASK_SELECT to enable PRELIM_I915_EXEC_ENGINE_MASK.
580  *
581  * Bitfield associated with legacy I915_EXEC_CONSTANTS_MASK which was
582  * restricted previously, will be utilized by PRELIM_I915_EXEC_ENGINE_MASK.
583  *
584  * PRELIM_I915_EXEC_ENGINE_MASK only applies to contexts with engine map set up.
585  */
586 #define PRELIM_I915_EXEC_ENGINE_MASK    (0xff)
587 #define PRELIM_I915_EXEC_ENGINE_MASK_SELECT (1ull << 55)
588 
589 #define __PRELIM_I915_EXEC_UNKNOWN_FLAGS (~(GENMASK_ULL(55, 48) | ~__I915_EXEC_UNKNOWN_FLAGS))
590 
591 /*
592  * Indicates the 2k user priority levels are statically mapped into 3 buckets as
593  * follows:
594  *
595  * -1k to -1	Low priority
596  * 0		Normal priority
597  * 1 to 1k	Highest priority
598  */
599 #define   PRELIM_I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP	(1ul << 31)
600 
601 enum prelim_drm_i915_gem_engine_class {
602 #define	PRELIM_I915_ENGINE_CLASS		(1 << 8)
603 #define	PRELIM_I915_ENGINE_CLASS_MASK(x)	(x & 0xff)
604 
605 	PRELIM_I915_ENGINE_CLASS_COMPUTE = 4,
606 };
607 
608 /*
609  * prelim_i915_context_engines_parallel_submit:
610  *
611  * Setup a gem context to allow multiple BBs to be submitted in a single execbuf
612  * IOCTL. Those BBs will then be scheduled to run on the GPU in parallel.
613  *
614  * All hardware contexts in the engine set are configured for parallel
615  * submission (i.e. once this gem context is configured for parallel submission,
616  * all the hardware contexts, regardless if a BB is available on each individual
617  * context, will be submitted to the GPU in parallel). A user can submit BBs to
618  * subset (or superset) of the hardware contexts, in a single execbuf IOCTL, but
619  * it is not recommended as it may reserve physical engines with nothing to run
620  * on them. Highly recommended to configure the gem context with N hardware
621  * contexts then always submit N BBs in a single IOCTL.
622  *
623  * Their are two currently defined ways to control the placement of the
624  * hardware contexts on physical engines: default behavior (no flags) and
625  * PRELIM_I915_PARALLEL_IMPLICT_BONDS (a flag). More flags may be added the in the
626  * future as new hardware / use cases arise. Details of how to use this
627  * interface below above the flags.
628  *
629  * Returns -EINVAL if hardware context placement configuration invalid or if the
630  * placement configuration isn't supported on the platform / submission
631  * interface.
632  */
633 struct prelim_i915_context_engines_parallel_submit {
634 	struct i915_user_extension base;
635 
636 /*
637  * Default placement behvavior (currently unsupported):
638  *
639  * Rather than restricting parallel submission to a single class with a
640  * logically contiguous placement (PRELIM_I915_PARALLEL_IMPLICT_BONDS), add a mode that
641  * enables parallel submission across multiple engine classes. In this case each
642  * context's logical engine mask indicates where that context can placed
643  * compared to the flag, PRELIM_I915_PARALLEL_IMPLICT_BONDS, where only the first
644  * context's logical mask controls the placement. It is implied in this mode
645  * that all contexts have mutual exclusive placement (e.g. if one context is
646  * running VCS0 no other contexts can run on VCS0).
647  *
648  * Example 1 pseudo code:
649  * INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
650  * set_engines(INVALID, INVALID)
651  * set_load_balance(engine_index=0, num_siblings=4, engines=VCS0,VCS1,VCS2,VCS3)
652  * set_load_balance(engine_index=1, num_siblings=4, engines=RCS0,RCS1,RCS2,RCS3)
653  * set_parallel()
654  *
655  * Results in the following valid placements:
656  * VCS0, RCS0
657  * VCS0, RCS1
658  * VCS0, RCS2
659  * VCS0, RCS3
660  * VCS1, RCS0
661  * VCS1, RCS1
662  * VCS1, RCS2
663  * VCS1, RCS3
664  * VCS2, RCS0
665  * VCS2, RCS1
666  * VCS2, RCS2
667  * VCS2, RCS3
668  * VCS3, RCS0
669  * VCS3, RCS1
670  * VCS3, RCS2
671  * VCS3, RCS3
672  *
673  * Example 2 pseudo code:
674  * INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
675  * set_engines(INVALID, INVALID)
676  * set_load_balance(engine_index=0, num_siblings=3, engines=VCS0,VCS1,VCS2)
677  * set_load_balance(engine_index=1, num_siblings=3, engines=VCS0,VCS1,VCS2)
678  * set_parallel()
679  *
680  * Results in the following valid placements:
681  * VCS0, VCS1
682  * VCS0, VCS2
683  * VCS1, VCS0
684  * VCS1, VCS2
685  * VCS2, VCS0
686  * VCS2, VCS1
687  *
688  * This enables a use case where all engines are created equally, we don't care
689  * where they are scheduled, we just want a certain number of resources, for
690  * those resources to be scheduled in parallel, and possibly across multiple
691  * engine classes.
692  *
693  * This mode is not supported with GuC submission gen12 or any prior platforms,
694  * but could be supported in execlists mode. Future GuC platforms may support
695  * this.
696  */
697 
698 /*
699  * PRELIM_I915_PARALLEL_IMPLICT_BONDS - Create implict bonds between each context.
700  * Each context must have the same number sibling and bonds are implictly create
701  * of the siblings.
702  *
703  * All of the below examples are in logical space.
704  *
705  * Example 1 pseudo code:
706  * set_engines(VCS0, VCS1)
707  * set_parallel(flags=PRELIM_I915_PARALLEL_IMPLICT_BONDS)
708  *
709  * Results in the following valid placements:
710  * VCS0, VCS1
711  *
712  * Example 2 pseudo code:
713  * INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
714  * set_engines(INVALID, INVALID)
715  * set_load_balance(engine_index=0, num_siblings=4, engines=VCS0,VCS2,VCS4,VCS6)
716  * set_load_balance(engine_index=1, num_siblings=4, engines=VCS1,VCS3,VCS5,VCS7)
717  * set_parallel(flags=PRELIM_I915_PARALLEL_IMPLICT_BONDS)
718  *
719  * Results in the following valid placements:
720  * VCS0, VCS1
721  * VCS2, VCS3
722  * VCS4, VCS5
723  * VCS6, VCS7
724  *
725  * Example 3 pseudo code:
726  * INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
727  * set_engines(INVALID, INVALID, INVALID, INVALID)
728  * set_load_balance(engine_index=0, num_siblings=2, engines=VCS0,VCS4)
729  * set_load_balance(engine_index=1, num_siblings=2, engines=VCS1,VCS5)
730  * set_load_balance(engine_index=2, num_siblings=2, engines=VCS2,VCS6)
731  * set_load_balance(engine_index=3, num_siblings=2, engines=VCS3,VCS7)
732  * set_parallel(flags=PRELIM_I915_PARALLEL_IMPLICT_BONDS)
733  *
734  * Results in the following valid placements:
735  * VCS0, VCS1, VCS2, VCS3
736  * VCS4, VCS5, VCS6, VCS7
737  *
738  * This enables a use case where all engines are not equal and certain placement
739  * rules are required (i.e. split-frame requires all contexts to be placed in a
740  * logically contiguous order on the VCS engines on gen11/gen12 platforms). This
741  * use case (logically contiguous placement, within a single engine class) is
742  * supported when using GuC submission. Execlist mode could support all possible
743  * bonding configurations.
744  */
745 #define PRELIM_I915_PARALLEL_IMPLICT_BONDS	(1ull << 63)
746 /*
747  * Do not allow BBs to be preempted mid BB rather insert coordinated preemption
748  * points on all hardware contexts between each BB. An example use case of this
749  * feature is split-frame on gen11 or gen12 hardware. When using this feature a
750  * BB must be submitted on each hardware context in the parallel gem context.
751  * The execbuf2 IOCTL enforces the user adheres to policy.
752  */
753 #define PRELIM_I915_PARALLEL_BATCH_PREEMPT_BOUNDARY	(1ull << 62)
754 #define __PRELIM_I915_PARALLEL_UNKNOWN_FLAGS		(~GENMASK_ULL(63, 62))
755 	__u64 flags; /* all undefined flags must be zero */
756 	__u64 mbz64[4]; /* reserved for future use; must be zero */
757 } __attribute__ ((packed));
758 
759 /**
760  * struct prelim_drm_i915_context_engines_parallel2_submit - Configure engine
761  * for parallel submission.
762  *
763  * Setup a slot in the context engine map to allow multiple BBs to be submitted
764  * in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU
765  * in parallel. Multiple hardware contexts are created internally in the i915
766  * run these BBs. Once a slot is configured for N BBs only N BBs can be
767  * submitted in each execbuf IOCTL and this is implicit behavior e.g. The user
768  * doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how
769  * many BBs there are based on the slot's configuration. The N BBs are the last
770  * N buffer objects or first N if I915_EXEC_BATCH_FIRST is set.
771  *
772  * The default placement behavior is to create implicit bonds between each
773  * context if each context maps to more than 1 physical engine (e.g. context is
774  * a virtual engine). Also we only allow contexts of same engine class and these
775  * contexts must be in logically contiguous order. Examples of the placement
776  * behavior described below. Lastly, the default is to not allow BBs to
777  * preempted mid BB rather insert coordinated preemption on all hardware
778  * contexts between each set of BBs. Flags may be added in the future to change
779  * both of these default behaviors.
780  *
781  * Returns -EINVAL if hardware context placement configuration is invalid or if
782  * the placement configuration isn't supported on the platform / submission
783  * interface.
784  * Returns -ENODEV if extension isn't supported on the platform / submission
785  * inteface.
786  *
787  * .. code-block::
788  *
789  *	Example 1 pseudo code:
790  *	CS[X] = generic engine of same class, logical instance X
791  *	INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
792  *	set_engines(INVALID)
793  *	set_parallel(engine_index=0, width=2, num_siblings=1,
794  *		     engines=CS[0],CS[1])
795  *
796  *	Results in the following valid placement:
797  *	CS[0], CS[1]
798  *
799  *	Example 2 pseudo code:
800  *	CS[X] = generic engine of same class, logical instance X
801  *	INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
802  *	set_engines(INVALID)
803  *	set_parallel(engine_index=0, width=2, num_siblings=2,
804  *		     engines=CS[0],CS[2],CS[1],CS[3])
805  *
806  *	Results in the following valid placements:
807  *	CS[0], CS[1]
808  *	CS[2], CS[3]
809  *
810  *	This can also be thought of as 2 virtual engines described by 2-D array
811  *	in the engines the field with bonds placed between each index of the
812  *	virtual engines. e.g. CS[0] is bonded to CS[1], CS[2] is bonded to
813  *	CS[3].
814  *	VE[0] = CS[0], CS[2]
815  *	VE[1] = CS[1], CS[3]
816  *
817  *	Example 3 pseudo code:
818  *	CS[X] = generic engine of same class, logical instance X
819  *	INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
820  *	set_engines(INVALID)
821  *	set_parallel(engine_index=0, width=2, num_siblings=2,
822  *		     engines=CS[0],CS[1],CS[1],CS[3])
823  *
824  *	Results in the following valid and invalid placements:
825  *	CS[0], CS[1]
826  *	CS[1], CS[3] - Not logical contiguous, return -EINVAL
827  */
828 struct prelim_drm_i915_context_engines_parallel2_submit {
829 	/**
830 	 * @base: base user extension.
831 	 */
832 	struct i915_user_extension base;
833 
834 	/**
835 	 * @engine_index: slot for parallel engine
836 	 */
837 	__u16 engine_index;
838 
839 	/**
840 	 * @width: number of contexts per parallel engine
841 	 */
842 	__u16 width;
843 
844 	/**
845 	 * @num_siblings: number of siblings per context
846 	 */
847 	__u16 num_siblings;
848 
849 	/**
850 	 * @mbz16: reserved for future use; must be zero
851 	 */
852 	__u16 mbz16;
853 
854 	/**
855 	 * @flags: all undefined flags must be zero, currently not defined flags
856 	 */
857 	__u64 flags;
858 
859 	/**
860 	 * @mbz64: reserved for future use; must be zero
861 	 */
862 	__u64 mbz64[3];
863 
864 	/**
865 	 * @engines: 2-d array of engine instances to configure parallel engine
866 	 *
867 	 * length = width (i) * num_siblings (j)
868 	 * index = j + i * num_siblings
869 	 */
870 	struct i915_engine_class_instance engines[0];
871 } __attribute__ ((packed));
872 
873 struct prelim_i915_context_param_engines {
874 #define PRELIM_I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT (PRELIM_I915_USER_EXT | 2) /* see prelim_i915_context_engines_parallel_submit */
875 #define PRELIM_I915_CONTEXT_ENGINES_EXT_PARALLEL2_SUBMIT (PRELIM_I915_USER_EXT | 3) /* see prelim_i915_context_engines_parallel2_submit */
876 };
877 
878 /* PRELIM OA formats */
879 enum prelim_drm_i915_oa_format {
880 	PRELIM_I915_OA_FORMAT_START = 128,
881 
882 	/* XEHPSDV */
883 	PRELIM_I915_OAR_FORMAT_A32u40_A4u32_B8_C8 = PRELIM_I915_OA_FORMAT_START,
884 	PRELIM_I915_OA_FORMAT_A24u40_A14u32_B8_C8,
885 	PRELIM_I915_OAM_FORMAT_A2u64_B8_C8,
886 
887 	/* DG2 */
888 	PRELIM_I915_OAR_FORMAT_A36u64_B8_C8,
889 	PRELIM_I915_OAC_FORMAT_A24u64_B8_C8,
890 	PRELIM_I915_OA_FORMAT_A38u64_R2u64_B8_C8,
891 	PRELIM_I915_OAM_FORMAT_A2u64_R2u64_B8_C8,
892 
893 	PRELIM_I915_OA_FORMAT_MAX	/* non-ABI */
894 };
895 
896 enum prelim_drm_i915_perf_record_type {
897 #define PRELIM_DRM_I915_PERF_RECORD	(1 << 16)
898 	/*
899 	 * MMIO trigger queue is full.
900 	 * This record type is available in perf revision 1003.
901 	 */
902 	PRELIM_DRM_I915_PERF_RECORD_OA_MMIO_TRG_Q_FULL = (PRELIM_DRM_I915_PERF_RECORD | 1),
903 };
904 
905 /*
906  * Access Counter programming
907  *
908  * The programmable access counters enable hardware to detect and report
909  * frequently accessed pages. The report generated by hardware can be used by
910  * software for influencing page migration and data placement decisions.
911  *
912  * Once the count reaches the value set by trigger, HW generates trigger
913  * interrupt. DRM driver then starts the page migration from SMEM to
914  * LMEM so the upcoming access to the same page(s) from GPU will access LMEM
915  * to achive better performance.
916  *
917  * Due to the HW capacity limitation, an access counter can be de-allocated on
918  * the fly. If the counter getting de-allocated has reached at least notify
919  * it is reported to SW via interrupt. The driver interrupt handling is TBD.
920  *
921  * The use case is to let the upper layer SW such as Open CL to make the
922  * decision to program all the configurations and the DRM driver will handle
923  * the interrupts generated by HW.
924  *
925  * NOTE: if ac_notify is set to 0, access counter notification reporting is disabled
926  *       if ac_trigger is set to 0, access counter triggering is disabled.
927  *
928  *	Only allowed in i915_gem_context_create_ioctl extension
929  */
930 struct prelim_drm_i915_gem_context_param_acc {
931 		__u16 trigger;
932 		__u16 notify;
933 		__u8  granularity;
934 #define   PRELIM_I915_CONTEXT_ACG_128K      0
935 #define   PRELIM_I915_CONTEXT_ACG_2M        1
936 #define   PRELIM_I915_CONTEXT_ACG_16M       2
937 #define   PRELIM_I915_CONTEXT_ACG_64M       3
938 		__u8 pad1;
939 		__u16 pad2;
940 };
941 
942 struct prelim_drm_i915_gem_context_param {
943 /*
944  * I915_CONTEXT_PARAM_DEBUG_FLAGS
945  *
946  * Set or clear debug flags associated with this context.
947  * The flags works with 32 bit masking to enable/disable individual
948  * flags. For example to set debug flag of bit position 0, the
949  * value needs to be 0x0000000100000001, and to clear flag of
950  * bit position 0, the value needs to be 0x0000000100000000.
951  *
952  */
953 #define PRELIM_I915_CONTEXT_PARAM		(1 << 16)
954 #define PRELIM_I915_CONTEXT_PARAM_DEBUG_FLAGS	(PRELIM_I915_CONTEXT_PARAM | 0xfd)
955 
956 /*
957  * Notify driver that SIP is provided with the pipeline setup.
958  * Driver raises exception on hang resolution and waits for pipeline's
959  * sip to signal attention before capturing state of user objects
960  * associated with the context.
961  *
962  */
963 #define PRELIM_I915_CONTEXT_PARAM_DEBUG_FLAG_SIP	(1ull << 0)
964 
965 /*
966  *  PRELIM_I915_CONTEXT_PARAM_ACC:
967  *
968  *  To be able to change the access counter thresholds and configurations.
969  *
970  *  By default: access counter feature is disabled.
971  */
972 #define PRELIM_I915_CONTEXT_PARAM_ACC		(PRELIM_I915_CONTEXT_PARAM | 0xd)
973 };
974 
975 /*
976  * I915_CONTEXT_PARAM_PROTECTED_CONTENT:
977  *
978  * Mark that the context makes use of protected content, which will result
979  * in the context being invalidated when the protected content session is.
980  * This flag can only be set at context creation time and, when set to true,
981  * must be preceded by an explicit setting of I915_CONTEXT_PARAM_RECOVERABLE
982  * to false. This flag can't be set to true in conjunction with setting the
983  * I915_CONTEXT_PARAM_BANNABLE flag to false.
984  *
985  * Given the numerous restriction on this flag, there are several unique
986  * failure cases:
987  *
988  * -ENODEV: feature not available
989  * -EEXIST: trying to modify an existing context
990  * -EPERM: trying to mark a recoverable or not bannable context as protected
991  * -EACCES: submitting an invalidated context for execution
992  */
993 #define PRELIM_I915_CONTEXT_PARAM_PROTECTED_CONTENT (PRELIM_I915_CONTEXT_PARAM | 0xe)
994 
995 struct prelim_drm_i915_gem_context_create_ext {
996 #define PRELIM_I915_CONTEXT_CREATE_FLAGS_ULLS		(1u << 31)
997 #define PRELIM_I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
998 	(~(PRELIM_I915_CONTEXT_CREATE_FLAGS_ULLS | ~I915_CONTEXT_CREATE_FLAGS_UNKNOWN))
999 };
1000 
1001 /*
1002  *  PRELIM_I915_CONTEXT_PARAM_RUNALONE:
1003  *
1004  *  Enable runalone mode on a context, disabled by default.
1005  */
1006 #define PRELIM_I915_CONTEXT_PARAM_RUNALONE      (PRELIM_I915_CONTEXT_PARAM | 0xf)
1007 
1008 /* Downstream PRELIM properties */
1009 enum prelim_drm_i915_perf_property_id {
1010 	PRELIM_DRM_I915_PERF_PROP = (1 << 16),
1011 
1012 	/**
1013 	 * Specify a global OA buffer size to be allocated in bytes. The size
1014 	 * specified must be supported by HW (before XEHPSDV supported sizes are
1015 	 * powers of 2 ranging from 128Kb to 16Mb. With XEHPSDV max supported size
1016 	 * is 128Mb).
1017 	 *
1018 	 * This property is available in perf revision 1001.
1019 	 */
1020 	PRELIM_DRM_I915_PERF_PROP_OA_BUFFER_SIZE = (PRELIM_DRM_I915_PERF_PROP | 1),
1021 
1022 	/**
1023 	 * Specify the engine class defined in @enum drm_i915_gem_engine_class.
1024 	 * This defaults to I915_ENGINE_CLASS_RENDER or
1025 	 * I915_ENGINE_CLASS_COMPUTE based on the platform.
1026 	 *
1027 	 * This property is available in perf revision 1002
1028 	 *
1029 	 * Perf revision 1004 supports I915_ENGINE_CLASS_VIDEO and
1030 	 * I915_ENGINE_CLASS_VIDEO_ENHANCE.
1031 	 */
1032 	PRELIM_DRM_I915_PERF_PROP_OA_ENGINE_CLASS = (PRELIM_DRM_I915_PERF_PROP | 2),
1033 
1034 	/**
1035 	 * Specify the engine instance. Defaults to 0.
1036 	 *
1037 	 * This property is available in perf revision 1002.
1038 	 */
1039 	PRELIM_DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE = (PRELIM_DRM_I915_PERF_PROP | 3),
1040 
1041 	PRELIM_DRM_I915_PERF_PROP_LAST,
1042 
1043 	PRELIM_DRM_I915_PERF_PROP_MAX = DRM_I915_PERF_PROP_MAX - 1 + \
1044 					(PRELIM_DRM_I915_PERF_PROP_LAST & 0xffff)
1045 };
1046 
1047 struct prelim_drm_i915_uuid_control {
1048 	char  uuid[36]; /* String formatted like
1049 			 *      "%08x-%04x-%04x-%04x-%012x"
1050 			 */
1051 
1052 	__u32 uuid_class; /* Predefined UUID class or handle to
1053 			   * the previously registered UUID Class
1054 			   */
1055 
1056 	__u32 flags;	/* MBZ */
1057 
1058 	__u64 ptr;	/* Pointer to CPU memory payload associated
1059 			 * with the UUID Resource.
1060 			 * For uuid_class I915_UUID_CLASS_STRING
1061 			 * it must point to valid string buffer.
1062 			 * Otherwise must point to page aligned buffer
1063 			 * or be NULL.
1064 			 */
1065 
1066 	__u64 size;	/* Length of the payload in bytes */
1067 
1068 #define PRELIM_I915_UUID_CLASS_STRING	((__u32)-1)
1069 /*
1070  * d9900de4-be09-56ab-84a5-dfc280f52ee5 =
1071  *                          sha1("I915_UUID_CLASS_STRING")[0..35]
1072  */
1073 #define PRELIM_I915_UUID_CLASS_MAX_RESERVED ((__u32)-1024)
1074 
1075 	__u32 handle; /* Output: Registered handle ID */
1076 
1077 	__u64 extensions; /* MBZ */
1078 };
1079 
1080 /*
1081  * struct prelim_drm_i915_vm_bind_ext_uuid
1082  *
1083  * Used for registering metadata that will be attached to the vm
1084  */
1085 struct prelim_drm_i915_vm_bind_ext_uuid {
1086 #define PRELIM_I915_VM_BIND_EXT_UUID	(PRELIM_I915_USER_EXT | 1)
1087 	struct i915_user_extension base;
1088 	__u32 uuid_handle; /* Handle to the registered UUID resource. */
1089 };
1090 
1091 /**
1092  * Do a debug event read for a debugger connection.
1093  *
1094  * This ioctl is available in debug version 1.
1095  */
1096 #define PRELIM_I915_DEBUG_IOCTL_READ_EVENT _IO('j', 0x0)
1097 #define PRELIM_I915_DEBUG_IOCTL_READ_UUID  _IOWR('j', 0x1, struct prelim_drm_i915_debug_read_uuid)
1098 #define PRELIM_I915_DEBUG_IOCTL_VM_OPEN  _IOW('j', 0x2, struct prelim_drm_i915_debug_vm_open)
1099 #define PRELIM_I915_DEBUG_IOCTL_EU_CONTROL _IOWR('j', 0x3, struct prelim_drm_i915_debug_eu_control)
1100 #define PRELIM_I915_DEBUG_IOCTL_ACK_EVENT _IOW('j', 0x4, struct prelim_drm_i915_debug_event_ack)
1101 
1102 struct prelim_drm_i915_debug_event {
1103 	__u32 type;
1104 #define PRELIM_DRM_I915_DEBUG_EVENT_NONE     0
1105 #define PRELIM_DRM_I915_DEBUG_EVENT_READ     1
1106 #define PRELIM_DRM_I915_DEBUG_EVENT_CLIENT   2
1107 #define PRELIM_DRM_I915_DEBUG_EVENT_CONTEXT  3
1108 #define PRELIM_DRM_I915_DEBUG_EVENT_UUID     4
1109 #define PRELIM_DRM_I915_DEBUG_EVENT_VM       5
1110 #define PRELIM_DRM_I915_DEBUG_EVENT_VM_BIND  6
1111 #define PRELIM_DRM_I915_DEBUG_EVENT_CONTEXT_PARAM 7
1112 #define PRELIM_DRM_I915_DEBUG_EVENT_EU_ATTENTION 8
1113 #define PRELIM_DRM_I915_DEBUG_EVENT_ENGINES 9
1114 #define PRELIM_DRM_I915_DEBUG_EVENT_MAX_EVENT PRELIM_DRM_I915_DEBUG_EVENT_ENGINES
1115 
1116 	__u32 flags;
1117 #define PRELIM_DRM_I915_DEBUG_EVENT_CREATE	(1 << 31)
1118 #define PRELIM_DRM_I915_DEBUG_EVENT_DESTROY	(1 << 30)
1119 #define PRELIM_DRM_I915_DEBUG_EVENT_STATE_CHANGE (1 << 29)
1120 #define PRELIM_DRM_I915_DEBUG_EVENT_NEED_ACK	(1 << 28)
1121 	__u64 seqno;
1122 	__u64 size;
1123 } __attribute__((packed));
1124 
1125 struct prelim_drm_i915_debug_event_client {
1126 	struct prelim_drm_i915_debug_event base; /* .flags = CREATE/DESTROY */
1127 
1128 	__u64 handle; /* This is unique per debug connection */
1129 } __attribute__((packed));
1130 
1131 struct prelim_drm_i915_debug_event_context {
1132 	struct prelim_drm_i915_debug_event base;
1133 
1134 	__u64 client_handle;
1135 	__u64 handle;
1136 } __attribute__((packed));
1137 
1138 struct prelim_drm_i915_debugger_open_param {
1139 	__u64 pid; /* input: Target process ID */
1140 	__u32 flags;
1141 #define PRELIM_DRM_I915_DEBUG_FLAG_FD_NONBLOCK	(1u << 31)
1142 
1143 	__u32 version;
1144 	__u64 events;  /* input: event types to subscribe to */
1145 	__u64 extensions; /* MBZ */
1146 };
1147 
1148 struct prelim_drm_i915_debug_event_uuid {
1149 	struct prelim_drm_i915_debug_event base;
1150 	__u64 client_handle;
1151 
1152 	__u64 handle;
1153 	__u64 class_handle; /* Can be filtered based on pre-defined classes */
1154 	__u64 payload_size;
1155 } __attribute__((packed));
1156 
1157 struct prelim_drm_i915_debug_event_vm {
1158 	struct prelim_drm_i915_debug_event base;
1159 	__u64 client_handle;
1160 
1161 	__u64 handle;
1162 } __attribute__((packed));
1163 
1164 struct prelim_drm_i915_debug_event_vm_bind {
1165 	struct prelim_drm_i915_debug_event base;
1166 	__u64 client_handle;
1167 
1168 	__u64 vm_handle;
1169 	__u64 va_start;
1170 	__u64 va_length;
1171 	__u32 num_uuids;
1172 	__u32 flags;
1173 	__u64 uuids[0];
1174 } __attribute__((packed));
1175 
1176 struct prelim_drm_i915_debug_event_eu_attention {
1177 	struct prelim_drm_i915_debug_event base;
1178 	__u64 client_handle;
1179 	__u64 ctx_handle;
1180 	__u64 lrc_handle;
1181 
1182 	__u32 flags;
1183 
1184 	struct i915_engine_class_instance ci;
1185 
1186 	__u32 bitmask_size;
1187 
1188 	/**
1189 	 * Bitmask of thread attentions starting from natural
1190 	 * hardware order of slice=0,subslice=0,eu=0, 8 attention
1191 	 * bits per eu.
1192 	 *
1193 	 * NOTE: For dual subslice GENs, the bitmask is for
1194 	 * lockstepped EUs and not for logical EUs. This makes
1195 	 * the bitmask includu only half of logical EU count
1196 	 * provided by topology query as we only control the
1197 	 * 'pair' instead of individual EUs.
1198 	 */
1199 
1200 	__u8 bitmask[0];
1201 } __attribute__((packed));
1202 
1203 struct prelim_drm_i915_debug_read_uuid {
1204 	__u64 client_handle;
1205 	__u64 handle;
1206 	__u32 flags; /* MBZ */
1207 	char uuid[36]; /* output */
1208 	__u64 payload_ptr;
1209 	__u64 payload_size;
1210 } __attribute__((packed));
1211 
1212 struct prelim_drm_i915_debug_event_context_param {
1213 	struct prelim_drm_i915_debug_event base;
1214 	__u64 client_handle;
1215 	__u64 ctx_handle;
1216 	struct drm_i915_gem_context_param param;
1217 } __attribute__((packed));
1218 
1219 struct prelim_drm_i915_debug_engine_info {
1220 	struct i915_engine_class_instance engine;
1221 	__u64 lrc_handle;
1222 } __attribute__((packed));
1223 
1224 struct prelim_drm_i915_debug_event_engines {
1225 	struct prelim_drm_i915_debug_event base;
1226 	__u64 client_handle;
1227 	__u64 ctx_handle;
1228 	__u64 num_engines;
1229 	struct prelim_drm_i915_debug_engine_info engines[0];
1230 } __attribute__((packed));
1231 
1232 struct prelim_drm_i915_debug_vm_open {
1233 	__u64 client_handle;
1234 	__u64 handle; /* input: The target address space (ppGTT) */
1235 	__u64 flags;
1236 #define PRELIM_I915_DEBUG_VM_OPEN_READ_ONLY	O_RDONLY
1237 #define PRELIM_I915_DEBUG_VM_OPEN_WRITE_ONLY	O_WRONLY
1238 #define PRELIM_I915_DEBUG_VM_OPEN_READ_WRITE	O_RDWR
1239 };
1240 
1241 struct prelim_drm_i915_debug_eu_control {
1242 	__u64 client_handle;
1243 	__u32 cmd;
1244 #define PRELIM_I915_DEBUG_EU_THREADS_CMD_INTERRUPT_ALL 0
1245 #define PRELIM_I915_DEBUG_EU_THREADS_CMD_STOPPED   1
1246 #define PRELIM_I915_DEBUG_EU_THREADS_CMD_RESUME    2
1247 #define PRELIM_I915_DEBUG_EU_THREADS_CMD_INTERRUPT 3
1248 	__u32 flags;
1249 	__u64 seqno;
1250 
1251 	struct i915_engine_class_instance ci;
1252 	__u32 bitmask_size;
1253 
1254 	/**
1255 	 * Bitmask of thread attentions starting from natural
1256 	 * hardware order of slice=0,subslice=0,eu=0, 8 attention bits
1257 	 * per eu.
1258 	 *
1259 	 * NOTE: For dual subslice GENs, the bitmask is for
1260 	 * lockstepped EUs and not for logical EUs. This makes
1261 	 * the bitmask includu only half of logical EU count
1262 	 * provided by topology query as we only control the
1263 	 * 'pair' instead of individual EUs.
1264 	 */
1265 	__u64 bitmask_ptr;
1266 } __attribute__((packed));
1267 
1268 struct prelim_drm_i915_debug_event_ack {
1269 	__u32 type;
1270 	__u32 flags; /* MBZ */
1271 	__u64 seqno;
1272 } __attribute__((packed));
1273 
1274 enum prelim_drm_i915_gem_memory_class {
1275 	PRELIM_I915_MEMORY_CLASS_SYSTEM = 0,
1276 	PRELIM_I915_MEMORY_CLASS_DEVICE,
1277 	PRELIM_I915_MEMORY_CLASS_NONE = -1
1278 };
1279 
1280 /**
1281  * struct prelim_drm_i915_memory_region_info
1282  *
1283  * Describes one region as known to the driver.
1284  */
1285 struct prelim_drm_i915_memory_region_info {
1286 	/** class:instance pair encoding */
1287 	struct prelim_drm_i915_gem_memory_class_instance region;
1288 
1289 	/** MBZ */
1290 	__u32 rsvd0;
1291 
1292 	/** MBZ */
1293 	__u64 caps;
1294 
1295 	/** MBZ */
1296 	__u64 flags;
1297 
1298 	/** Memory probed by the driver (-1 = unknown) */
1299 	__u64 probed_size;
1300 
1301 	/** Estimate of memory remaining (-1 = unknown) */
1302 	__u64 unallocated_size;
1303 
1304 	/** MBZ */
1305 	__u64 rsvd1[8];
1306 };
1307 
1308 /**
1309  * struct prelim_drm_i915_query_memory_regions
1310  *
1311  * Region info query enumerates all regions known to the driver by filling in
1312  * an array of struct prelim_drm_i915_memory_region_info structures.
1313  */
1314 struct prelim_drm_i915_query_memory_regions {
1315 	/** Number of supported regions */
1316 	__u32 num_regions;
1317 
1318 	/** MBZ */
1319 	__u32 rsvd[3];
1320 
1321 	/* Info about each supported region */
1322 	struct prelim_drm_i915_memory_region_info regions[];
1323 };
1324 
1325 /**
1326  * struct prelim_drm_i915_query_distance_info
1327  *
1328  * Distance info query returns the distance of given (class, instance)
1329  * engine to the memory region id passed by the user. If the distance
1330  * is -1 it means region is unreachable.
1331  */
1332 struct prelim_drm_i915_query_distance_info {
1333 	/** Engine for which distance is queried */
1334 	struct i915_engine_class_instance engine;
1335 
1336 	/** Memory region to be used */
1337 	struct prelim_drm_i915_gem_memory_class_instance region;
1338 
1339 	/** Distance to region from engine */
1340 	__s32 distance;
1341 
1342 	/** Must be zero */
1343 	__u32 rsvd[3];
1344 };
1345 
1346 /**
1347  * struct prelim_drm_i915_query_cs_cycles
1348  *
1349  * The query returns the command streamer cycles and the frequency that can be
1350  * used to calculate the command streamer timestamp. In addition the query
1351  * returns the cpu timestamp that indicates when the command streamer cycle
1352  * count was captured.
1353  */
1354 struct prelim_drm_i915_query_cs_cycles {
1355 	/** Engine for which command streamer cycles is queried. */
1356 	struct i915_engine_class_instance engine;
1357 
1358 	/** Must be zero. */
1359 	__u32 flags;
1360 
1361 	/**
1362 	 * Command streamer cycles as read from the command streamer
1363 	 * register at 0x358 offset.
1364 	 */
1365 	__u64 cs_cycles;
1366 
1367 	/** Frequency of the cs cycles in Hz. */
1368 	__u64 cs_frequency;
1369 
1370 	/** CPU timestamp in nanoseconds. */
1371 	__u64 cpu_timestamp;
1372 
1373 	/**
1374 	 * Reference clock id for CPU timestamp. For definition, see
1375 	 * clock_gettime(2) and perf_event_open(2). Supported clock ids are
1376 	 * CLOCK_MONOTONIC, CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME,
1377 	 * CLOCK_TAI.
1378 	 */
1379 	__s32 clockid;
1380 
1381 	/** Must be zero. */
1382 	__u32 rsvd;
1383 };
1384 
1385 /**
1386  * prelim_struct drm_i915_query_hw_ip_version
1387  *
1388  * Hardware IP version (i.e., architecture generation) associated with a
1389  * specific engine.
1390  */
1391 struct prelim_drm_i915_query_hw_ip_version {
1392 	/** Engine to query HW IP version for */
1393 	struct i915_engine_class_instance engine;
1394 
1395 	__u8 flags;	/* MBZ */
1396 
1397 	/** Architecture  version */
1398 	__u8 arch;
1399 
1400 	/** Architecture release id */
1401 	__u8 release;
1402 
1403 	/** Stepping (e.g., A0, A1, B0, etc.) */
1404 	__u8 stepping;
1405 };
1406 
1407 /**
1408  * struct prelim_drm_i915_query_fabric_info
1409  *
1410  * With the given fabric id, query fabric info wrt the device.
1411  * Higher bandwidth is better.  0 means no fabric.
1412  * Latency is averaged latency (from all paths)
1413  *
1414  * fabric_id can be obtained from
1415  *    /sys/class/drm/cardx/device/iaf.y/iaf_fabric_id
1416  * Bandwidth is in Gigabits per second (max value of 8 * 4 * 90)
1417  *    8 possible ports
1418  *    4 lanes max per port
1419  *   90 gigabits per lane
1420  * Latency is in tenths of path length. 10 == 1 fabric link between src and dst
1421  *   POR is max 1 link (zero hops).
1422  */
1423 struct prelim_drm_i915_query_fabric_info {
1424 	__u32 fabric_id;
1425 	__u16 bandwidth;
1426 	__u16 latency;
1427 };
1428 
1429 /**
1430  * struct prelim_drm_i915_engine_info
1431  *
1432  * Describes one engine and it's capabilities as known to the driver.
1433  */
1434 struct prelim_drm_i915_engine_info {
1435 	/** Engine class and instance. */
1436 	struct i915_engine_class_instance engine;
1437 
1438 	/**
1439 	 * SW defined id that identifies the OA unit associated with this
1440 	 * engine. A value of U32_MAX means engine is not supported by OA. All
1441 	 * other values are valid and can be used to group engines into the
1442 	 * associated OA unit.
1443 	 */
1444 	__u32 oa_unit_id;
1445 
1446 	/** Engine flags. */
1447 	__u64 flags;
1448 #define PRELIM_I915_ENGINE_INFO_HAS_KNOWN_CAPABILITIES	(1ull << 63)
1449 #define PRELIM_I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE	(1ull << 62)
1450 #define PRELIM_I915_ENGINE_INFO_HAS_OA_UNIT_ID		(1ull << 61)
1451 
1452 	/** Capabilities of this engine. */
1453 	__u64 capabilities;
1454 #define PRELIM_I915_RENDER_CLASS_CAPABILITY_3D		(1ull << 63)
1455 #define I915_VIDEO_CLASS_CAPABILITY_HEVC		(1 << 0)
1456 #define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC	(1 << 1)
1457 #define PRELIM_I915_VIDEO_CLASS_CAPABILITY_VDENC	(1ull << 63)
1458 #define I915_VIDEO_CLASS_CAPABILITY_VDENC		(1 << 2)
1459 #define PRELIM_I915_COPY_CLASS_CAP_BLOCK_COPY		(1ull << 63)
1460 	/*
1461 	 * The following are capabilties of the copy engines, while all engines
1462 	 * are functionally same, but engines with cap PRELIM_I915_COPY_CLASS_CAP_SATURATE_LINK
1463 	 * can saturate pcie and scaleup links faster than engines with
1464 	 * PRELIM_I915_COPY_CLASS_CAP_SATURATE_PCIE. Engines having the capability of
1465 	 * PRELIM_I915_COPY_CLASS_CAP_SATURATE_LMEM can operate at HBM speeds.
1466 	 */
1467 #define PRELIM_I915_COPY_CLASS_CAP_SATURATE_PCIE	(1ull << 62)
1468 #define PRELIM_I915_COPY_CLASS_CAP_SATURATE_LINK	(1ull << 61)
1469 #define PRELIM_I915_COPY_CLASS_CAP_SATURATE_LMEM	(1ull << 60)
1470 
1471 	/** All known capabilities for this engine class. */
1472 	__u64 known_capabilities;
1473 
1474 	/** Logical engine instance */
1475 	__u16 logical_instance;
1476 
1477 	/** Reserved fields. */
1478 	__u16 rsvd1[3];
1479 	__u64 rsvd2[2];
1480 };
1481 
1482 /**
1483  * struct drm_i915_query_engine_info
1484  *
1485  * Engine info query enumerates all engines known to the driver by filling in
1486  * an array of struct drm_i915_engine_info structures.
1487  */
1488 struct prelim_drm_i915_query_engine_info {
1489 	/** Number of struct drm_i915_engine_info structs following. */
1490 	__u32 num_engines;
1491 
1492 	/** MBZ */
1493 	__u32 rsvd[3];
1494 
1495 	/** Marker for drm_i915_engine_info structures. */
1496 	struct prelim_drm_i915_engine_info engines[];
1497 };
1498 
1499 /**
1500  * struct prelim_drm_i915_gem_vm_bind
1501  *
1502  * VA to object/buffer mapping to [un]bind.
1503  *
1504  * NOTE:
1505  * A vm_bind will hold a reference on the BO which is released
1506  * during corresponding vm_unbind or while closing the VM.
1507  * Hence closing the BO alone will not ensure BO is released.
1508  */
1509 struct prelim_drm_i915_gem_vm_bind {
1510 	/** vm to [un]bind **/
1511 	__u32 vm_id;
1512 
1513 	/** BO handle or file descriptor. Set 'fd' to -1 for system pages **/
1514 	union {
1515 		__u32 handle;
1516 		__s32 fd;
1517 	};
1518 
1519 	/** VA start to [un]bind **/
1520 	__u64 start;
1521 
1522 	/** Offset in object to [un]bind **/
1523 	__u64 offset;
1524 
1525 	/** VA length to [un]bind **/
1526 	__u64 length;
1527 
1528 	/** Flags **/
1529 	__u64 flags;
1530 #define PRELIM_I915_GEM_VM_BIND_IMMEDIATE	(1ull << 63)
1531 #define PRELIM_I915_GEM_VM_BIND_READONLY	(1ull << 62)
1532 #define PRELIM_I915_GEM_VM_BIND_CAPTURE		(1ull << 61)
1533 #define PRELIM_I915_GEM_VM_BIND_MAKE_RESIDENT	(1ull << 60)
1534 #define PRELIM_I915_GEM_VM_BIND_FD		(1ull << 59)
1535 
1536 	__u64 extensions;
1537 };
1538 
1539 /**
1540  * struct prelim_drm_i915_gem_vm_advise
1541  *
1542  * Set attribute (hint) for an address range or whole buffer object.
1543  *
1544  * To apply attribute to whole buffer object, specify:  handle
1545  * To apply attribute to address range, specify:  vm_id, start, and length.
1546  */
1547 struct prelim_drm_i915_gem_vm_advise {
1548 	/** vm that contains address range (specified with start, length) */
1549 	__u32 vm_id;
1550 
1551 	/** BO handle to apply hint */
1552 	__u32 handle;
1553 
1554 	/** VA start of address range to apply hint */
1555 	__u64 start;
1556 
1557 	/** Length of range to apply attribute */
1558 	__u64 length;
1559 
1560 	/**
1561 	 * Attributes to apply to address range or buffer object
1562 	 *
1563 	 * ATOMIC_SYSTEM
1564 	 *      inform that atomic access is enabled for both CPU and GPU.
1565 	 *      For some platforms, this may be required for correctness
1566 	 *      and this hint will influence migration policy.
1567 	 * ATOMIC_DEVICE
1568 	 *      inform that atomic access is enabled for GPU devices. For
1569 	 *      some platforms, this may be required for correctness and
1570 	 *      this hint will influence migration policy.
1571 	 * ATOMIC_NONE
1572 	 *	clears above ATOMIC_SYSTEM / ATOMIC_DEVICE hint.
1573 	 * PREFERRED_LOCATION
1574 	 *	sets the preferred memory class and instance for this object's
1575 	 *	backing store.  This is a hint only and not guaranteed to be
1576 	 *	honored.  It is an error to choose a memory region that was not
1577 	 *	part of the original set of placements for the GEM object.
1578 	 *	If choosing a preferred location that is in conflict with the
1579 	 *	use of ATOMIC_SYSTEM or ATOMIC_DEVICE, the atomic hint will
1580 	 *	always be honored first.
1581 	 *	To clear the current preferred location, specify memory class
1582 	 *	as I915_MEMORY_CLASS_NONE.
1583 	 */
1584 	__u32 attribute;
1585 #define PRELIM_I915_VM_ADVISE				(1 << 16)
1586 #define PRELIM_I915_VM_ADVISE_ATOMIC_NONE		(PRELIM_I915_VM_ADVISE | 0)
1587 #define PRELIM_I915_VM_ADVISE_ATOMIC_SYSTEM		(PRELIM_I915_VM_ADVISE | 1)
1588 #define PRELIM_I915_VM_ADVISE_ATOMIC_DEVICE		(PRELIM_I915_VM_ADVISE | 2)
1589 #define PRELIM_I915_VM_ADVISE_PREFERRED_LOCATION	(PRELIM_I915_VM_ADVISE | 3)
1590 
1591 	/** Preferred location (memory region) for object backing */
1592 	struct prelim_drm_i915_gem_memory_class_instance region;
1593 
1594 	__u32 rsvd[2];
1595 };
1596 
1597 /**
1598  * struct prelim_drm_i915_gem_wait_user_fence
1599  *
1600  * Wait on user fence. User fence can be woken up either by,
1601  *    1. GPU context indicated by 'ctx_id', or,
1602  *    2. Kerrnel driver async worker upon PRELIM_I915_UFENCE_WAIT_SOFT.
1603  *       'ctx_id' is ignored when this flag is set.
1604  *
1605  * Wakeup when below condition is true.
1606  * (*addr & MASK) OP (VALUE & MASK)
1607  *
1608  */
1609 struct prelim_drm_i915_gem_wait_user_fence {
1610 	__u64 extensions;
1611 	__u64 addr;
1612 	__u32 ctx_id;
1613 	__u16 op;
1614 #define PRELIM_I915_UFENCE		(1 << 8)
1615 #define PRELIM_I915_UFENCE_WAIT_EQ	(PRELIM_I915_UFENCE | 0)
1616 #define PRELIM_I915_UFENCE_WAIT_NEQ	(PRELIM_I915_UFENCE | 1)
1617 #define PRELIM_I915_UFENCE_WAIT_GT	(PRELIM_I915_UFENCE | 2)
1618 #define PRELIM_I915_UFENCE_WAIT_GTE	(PRELIM_I915_UFENCE | 3)
1619 #define PRELIM_I915_UFENCE_WAIT_LT	(PRELIM_I915_UFENCE | 4)
1620 #define PRELIM_I915_UFENCE_WAIT_LTE	(PRELIM_I915_UFENCE | 5)
1621 #define PRELIM_I915_UFENCE_WAIT_BEFORE	(PRELIM_I915_UFENCE | 6)
1622 #define PRELIM_I915_UFENCE_WAIT_AFTER	(PRELIM_I915_UFENCE | 7)
1623 	__u16 flags;
1624 #define PRELIM_I915_UFENCE_WAIT_SOFT	(1 << 15)
1625 #define PRELIM_I915_UFENCE_WAIT_ABSTIME	(1 << 14)
1626 	__u64 value;
1627 	__u64 mask;
1628 #define PRELIM_I915_UFENCE_WAIT_U8     0xffu
1629 #define PRELIM_I915_UFENCE_WAIT_U16    0xffffu
1630 #define PRELIM_I915_UFENCE_WAIT_U32    0xfffffffful
1631 #define PRELIM_I915_UFENCE_WAIT_U64    0xffffffffffffffffull
1632 	__s64 timeout;
1633 };
1634 
1635 struct prelim_drm_i915_vm_bind_ext_sync_fence {
1636 #define PRELIM_I915_VM_BIND_EXT_SYNC_FENCE     (PRELIM_I915_USER_EXT | 0)
1637 	struct i915_user_extension base;
1638 	__u64 addr;
1639 	__u64 val;
1640 };
1641 
1642 struct prelim_drm_i915_gem_vm_region_ext {
1643 #define PRELIM_I915_GEM_VM_CONTROL_EXT_REGION	(PRELIM_I915_USER_EXT | 0)
1644 	struct i915_user_extension base;
1645 	/* memory region: to find gt to create vm on */
1646 	struct prelim_drm_i915_gem_memory_class_instance region;
1647 	__u32 pad;
1648 };
1649 
1650 struct prelim_drm_i915_gem_vm_control {
1651 #define PRELIM_I915_VM_CREATE_FLAGS_DISABLE_SCRATCH	(1 << 16)
1652 #define PRELIM_I915_VM_CREATE_FLAGS_ENABLE_PAGE_FAULT	(1 << 17)
1653 #define PRELIM_I915_VM_CREATE_FLAGS_UNKNOWN		(~(GENMASK(17, 16)))
1654 };
1655 
1656 struct prelim_drm_i915_vm_bind_ext_set_pat {
1657 #define PRELIM_I915_VM_BIND_EXT_SET_PAT	(PRELIM_I915_USER_EXT | 2)
1658        struct i915_user_extension base;
1659        __u64 pat_index;
1660 };
1661 
1662 /**
1663  * struct prelim_drm_i915_gem_clos_reserve
1664  *
1665  * Allows clients to request reservation of one free CLOS, to use in subsequent
1666  * Cache Reservations.
1667  *
1668  */
1669 struct prelim_drm_i915_gem_clos_reserve {
1670 	__u16 clos_index;
1671 	__u16 pad16;
1672 };
1673 
1674 /**
1675  * struct prelim_drm_i915_gem_clos_free
1676  *
1677  * Free off a previously reserved CLOS set. Any corresponding Cache Reservations
1678  * that are active for the CLOS are automatically dropped and returned to the
1679  * Shared set.
1680  *
1681  * The clos_index indicates the CLOS set which is being released and must
1682  * correspond to a CLOS index previously reserved.
1683  *
1684  */
1685 struct prelim_drm_i915_gem_clos_free {
1686 	__u16 clos_index;
1687 	__u16 pad16;
1688 };
1689 
1690 /**
1691  * struct prelim_drm_i915_gem_cache_reserve
1692  *
1693  * Allows clients to request, or release, reservation of one or more cache ways,
1694  * within a previously reserved CLOS set.
1695  *
1696  * If num_ways = 0, i915 will drop any existing Reservation for the specified
1697  * clos_index and cache_level. The requested clos_index and cache_level Waymasks
1698  * will then track the Shared set once again.
1699  *
1700  * Otherwise, the requested number of Ways will be removed from the Shared set
1701  * for the requested cache level, and assigned to the Cache and CLOS specified
1702  * by cache_level/clos_index.
1703  *
1704  */
1705 struct prelim_drm_i915_gem_cache_reserve {
1706 	__u16 clos_index;
1707 	__u16 cache_level; // e.g. 3 for L3
1708 	__u16 num_ways;
1709 	__u16 pad16;
1710 };
1711 
1712 /**
1713  * struct prelim_drm_i915_gem_vm_prefetch
1714  *
1715  * Prefetch an address range to a memory region.
1716  */
1717 struct prelim_drm_i915_gem_vm_prefetch {
1718 	/** Memory region to prefetch to **/
1719 	__u32 region;
1720 
1721 	/** Reserved **/
1722 	__u32 rsvd;
1723 
1724 	/** VA start to prefetch **/
1725 	__u64 start;
1726 
1727 	/** VA length to prefetch **/
1728 	__u64 length;
1729 };
1730 
1731 struct prelim_drm_i915_gem_vm_param {
1732 	__u32 vm_id;
1733 	__u32 rsvd;
1734 
1735 #define PRELIM_I915_VM_PARAM		(1ull << 63)
1736 #define PRELIM_I915_GEM_VM_PARAM_SVM	(1 << 16)
1737 	__u64 param;
1738 
1739 	__u64 value;
1740 };
1741 
1742 #endif /* __I915_DRM_PRELIM_H__ */