1 /* 2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial portions 15 * of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * 25 */ 26 27 #ifndef _I915_DRM_H_ 28 #define _I915_DRM_H_ 29 30 #include "drm.h" 31 32 #if defined(__cplusplus) 33 extern "C" { 34 #endif 35 36 /* 37 * Internal/downstream declarations should be added to i915_drm_prelim.h, 38 * not here in i915_drm.h. 39 */ 40 41 /* Please note that modifications to all structs defined here are 42 * subject to backwards-compatibility constraints. 43 */ 44 45 /** 46 * DOC: uevents generated by i915 on it's device node 47 * 48 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch 49 * event from the gpu l3 cache. Additional information supplied is ROW, 50 * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep 51 * track of these events and if a specific cache-line seems to have a 52 * persistent error remap it with the l3 remapping tool supplied in 53 * intel-gpu-tools. The value supplied with the event is always 1. 54 * 55 * I915_ERROR_UEVENT - Generated upon error detection, currently only via 56 * hangcheck. The error detection event is a good indicator of when things 57 * began to go badly. The value supplied with the event is a 1 upon error 58 * detection, and a 0 upon reset completion, signifying no more error 59 * exists. NOTE: Disabling hangcheck or reset via module parameter will 60 * cause the related events to not be seen. 61 * 62 * I915_RESET_UEVENT - Event is generated just before an attempt to reset the 63 * GPU. The value supplied with the event is always 1. NOTE: Disable 64 * reset via module parameter will cause this event to not be seen. 65 */ 66 #define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR" 67 #define I915_ERROR_UEVENT "ERROR" 68 #define I915_RESET_UEVENT "RESET" 69 70 /** 71 * struct i915_user_extension - Base class for defining a chain of extensions 72 * 73 * Many interfaces need to grow over time. In most cases we can simply 74 * extend the struct and have userspace pass in more data. Another option, 75 * as demonstrated by Vulkan's approach to providing extensions for forward 76 * and backward compatibility, is to use a list of optional structs to 77 * provide those extra details. 78 * 79 * The key advantage to using an extension chain is that it allows us to 80 * redefine the interface more easily than an ever growing struct of 81 * increasing complexity, and for large parts of that interface to be 82 * entirely optional. The downside is more pointer chasing; chasing across 83 * the boundary with pointers encapsulated inside u64. 84 * 85 * Example chaining: 86 * 87 * .. code-block:: C 88 * 89 * struct i915_user_extension ext3 { 90 * .next_extension = 0, // end 91 * .name = ..., 92 * }; 93 * struct i915_user_extension ext2 { 94 * .next_extension = (uintptr_t)&ext3, 95 * .name = ..., 96 * }; 97 * struct i915_user_extension ext1 { 98 * .next_extension = (uintptr_t)&ext2, 99 * .name = ..., 100 * }; 101 * 102 * Typically the struct i915_user_extension would be embedded in some uAPI 103 * struct, and in this case we would feed it the head of the chain(i.e ext1), 104 * which would then apply all of the above extensions. 105 * 106 */ 107 struct i915_user_extension { 108 /** 109 * @next_extension: 110 * 111 * Pointer to the next struct i915_user_extension, or zero if the end. 112 */ 113 __u64 next_extension; 114 /** 115 * @name: Name of the extension. 116 * 117 * Note that the name here is just some integer. 118 * 119 * Also note that the name space for this is not global for the whole 120 * driver, but rather its scope/meaning is limited to the specific piece 121 * of uAPI which has embedded the struct i915_user_extension. 122 */ 123 __u32 name; 124 /** 125 * @flags: MBZ 126 * 127 * All undefined bits must be zero. 128 */ 129 __u32 flags; 130 /** 131 * @rsvd: MBZ 132 * 133 * Reserved for future use; must be zero. 134 */ 135 __u32 rsvd[4]; 136 }; 137 138 /* 139 * MOCS indexes used for GPU surfaces, defining the cacheability of the 140 * surface data and the coherency for this data wrt. CPU vs. GPU accesses. 141 */ 142 enum i915_mocs_table_index { 143 /* 144 * Not cached anywhere, coherency between CPU and GPU accesses is 145 * guaranteed. 146 */ 147 I915_MOCS_UNCACHED, 148 /* 149 * Cacheability and coherency controlled by the kernel automatically 150 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current 151 * usage of the surface (used for display scanout or not). 152 */ 153 I915_MOCS_PTE, 154 /* 155 * Cached in all GPU caches available on the platform. 156 * Coherency between CPU and GPU accesses to the surface is not 157 * guaranteed without extra synchronization. 158 */ 159 I915_MOCS_CACHED, 160 }; 161 162 /** 163 * enum drm_i915_gem_engine_class - uapi engine type enumeration 164 * 165 * Different engines serve different roles, and there may be more than one 166 * engine serving each role. This enum provides a classification of the role 167 * of the engine, which may be used when requesting operations to be performed 168 * on a certain subset of engines, or for providing information about that 169 * group. 170 */ 171 enum drm_i915_gem_engine_class { 172 /** 173 * @I915_ENGINE_CLASS_RENDER: 174 * 175 * Render engines support instructions used for 3D, Compute (GPGPU), 176 * and programmable media workloads. These instructions fetch data and 177 * dispatch individual work items to threads that operate in parallel. 178 * The threads run small programs (called "kernels" or "shaders") on 179 * the GPU's execution units (EUs). 180 */ 181 I915_ENGINE_CLASS_RENDER = 0, 182 183 /** 184 * @I915_ENGINE_CLASS_COPY: 185 * 186 * Copy engines (also referred to as "blitters") support instructions 187 * that move blocks of data from one location in memory to another, 188 * or that fill a specified location of memory with fixed data. 189 * Copy engines can perform pre-defined logical or bitwise operations 190 * on the source, destination, or pattern data. 191 */ 192 I915_ENGINE_CLASS_COPY = 1, 193 194 /** 195 * @I915_ENGINE_CLASS_VIDEO: 196 * 197 * Video engines (also referred to as "bit stream decode" (BSD) or 198 * "vdbox") support instructions that perform fixed-function media 199 * decode and encode. 200 */ 201 I915_ENGINE_CLASS_VIDEO = 2, 202 203 /** 204 * @I915_ENGINE_CLASS_VIDEO_ENHANCE: 205 * 206 * Video enhancement engines (also referred to as "vebox") support 207 * instructions related to image enhancement. 208 */ 209 I915_ENGINE_CLASS_VIDEO_ENHANCE = 3, 210 211 /** 212 * @I915_ENGINE_CLASS_COMPUTE: 213 * 214 * Compute engines support a subset of the instructions available 215 * on render engines: compute engines support Compute (GPGPU) and 216 * programmable media workloads, but do not support the 3D pipeline. 217 */ 218 I915_ENGINE_CLASS_COMPUTE = 4, 219 220 /* Values in this enum should be kept compact. */ 221 222 /** 223 * @I915_ENGINE_CLASS_INVALID: 224 * 225 * Placeholder value to represent an invalid engine class assignment. 226 */ 227 I915_ENGINE_CLASS_INVALID = -1 228 }; 229 230 /* 231 * There may be more than one engine fulfilling any role within the system. 232 * Each engine of a class is given a unique instance number and therefore 233 * any engine can be specified by its class:instance tuplet. APIs that allow 234 * access to any engine in the system will use struct i915_engine_class_instance 235 * for this identification. 236 */ 237 struct i915_engine_class_instance { 238 __u16 engine_class; /* see enum drm_i915_gem_engine_class */ 239 __u16 engine_instance; 240 #define I915_ENGINE_CLASS_INVALID_NONE -1 241 #define I915_ENGINE_CLASS_INVALID_VIRTUAL -2 242 }; 243 244 /** 245 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915 246 * 247 */ 248 249 enum drm_i915_pmu_engine_sample { 250 I915_SAMPLE_BUSY = 0, 251 I915_SAMPLE_WAIT = 1, 252 I915_SAMPLE_SEMA = 2 253 }; 254 255 #define I915_PMU_SAMPLE_BITS (4) 256 #define I915_PMU_SAMPLE_MASK (0xf) 257 #define I915_PMU_SAMPLE_INSTANCE_BITS (8) 258 #define I915_PMU_CLASS_SHIFT \ 259 (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS) 260 261 #define __I915_PMU_ENGINE(class, instance, sample) \ 262 ((class) << I915_PMU_CLASS_SHIFT | \ 263 (instance) << I915_PMU_SAMPLE_BITS | \ 264 (sample)) 265 266 #define I915_PMU_ENGINE_BUSY(class, instance) \ 267 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY) 268 269 #define I915_PMU_ENGINE_WAIT(class, instance) \ 270 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT) 271 272 #define I915_PMU_ENGINE_SEMA(class, instance) \ 273 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA) 274 275 #define I915_PMU_ACTUAL_FREQUENCY __PRELIM_I915_PMU_ACTUAL_FREQUENCY(0) 276 #define I915_PMU_REQUESTED_FREQUENCY __PRELIM_I915_PMU_REQUESTED_FREQUENCY(0) 277 #define I915_PMU_INTERRUPTS __PRELIM_I915_PMU_INTERRUPTS(0) 278 #define I915_PMU_RC6_RESIDENCY __PRELIM_I915_PMU_RC6_RESIDENCY(0) 279 #define I915_PMU_SOFTWARE_GT_AWAKE_TIME __PRELIM_I915_PMU_SOFTWARE_GT_AWAKE_TIME(0) 280 281 #define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY 282 283 /* Each region is a minimum of 16k, and there are at most 255 of them. 284 */ 285 #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use 286 * of chars for next/prev indices */ 287 #define I915_LOG_MIN_TEX_REGION_SIZE 14 288 289 typedef struct _drm_i915_init { 290 enum { 291 I915_INIT_DMA = 0x01, 292 I915_CLEANUP_DMA = 0x02, 293 I915_RESUME_DMA = 0x03 294 } func; 295 unsigned int mmio_offset; 296 int sarea_priv_offset; 297 unsigned int ring_start; 298 unsigned int ring_end; 299 unsigned int ring_size; 300 unsigned int front_offset; 301 unsigned int back_offset; 302 unsigned int depth_offset; 303 unsigned int w; 304 unsigned int h; 305 unsigned int pitch; 306 unsigned int pitch_bits; 307 unsigned int back_pitch; 308 unsigned int depth_pitch; 309 unsigned int cpp; 310 unsigned int chipset; 311 } drm_i915_init_t; 312 313 typedef struct _drm_i915_sarea { 314 struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; 315 int last_upload; /* last time texture was uploaded */ 316 int last_enqueue; /* last time a buffer was enqueued */ 317 int last_dispatch; /* age of the most recently dispatched buffer */ 318 int ctxOwner; /* last context to upload state */ 319 int texAge; 320 int pf_enabled; /* is pageflipping allowed? */ 321 int pf_active; 322 int pf_current_page; /* which buffer is being displayed? */ 323 int perf_boxes; /* performance boxes to be displayed */ 324 int width, height; /* screen size in pixels */ 325 326 drm_handle_t front_handle; 327 int front_offset; 328 int front_size; 329 330 drm_handle_t back_handle; 331 int back_offset; 332 int back_size; 333 334 drm_handle_t depth_handle; 335 int depth_offset; 336 int depth_size; 337 338 drm_handle_t tex_handle; 339 int tex_offset; 340 int tex_size; 341 int log_tex_granularity; 342 int pitch; 343 int rotation; /* 0, 90, 180 or 270 */ 344 int rotated_offset; 345 int rotated_size; 346 int rotated_pitch; 347 int virtualX, virtualY; 348 349 unsigned int front_tiled; 350 unsigned int back_tiled; 351 unsigned int depth_tiled; 352 unsigned int rotated_tiled; 353 unsigned int rotated2_tiled; 354 355 int pipeA_x; 356 int pipeA_y; 357 int pipeA_w; 358 int pipeA_h; 359 int pipeB_x; 360 int pipeB_y; 361 int pipeB_w; 362 int pipeB_h; 363 364 /* fill out some space for old userspace triple buffer */ 365 drm_handle_t unused_handle; 366 __u32 unused1, unused2, unused3; 367 368 /* buffer object handles for static buffers. May change 369 * over the lifetime of the client. 370 */ 371 __u32 front_bo_handle; 372 __u32 back_bo_handle; 373 __u32 unused_bo_handle; 374 __u32 depth_bo_handle; 375 376 } drm_i915_sarea_t; 377 378 /* due to userspace building against these headers we need some compat here */ 379 #define planeA_x pipeA_x 380 #define planeA_y pipeA_y 381 #define planeA_w pipeA_w 382 #define planeA_h pipeA_h 383 #define planeB_x pipeB_x 384 #define planeB_y pipeB_y 385 #define planeB_w pipeB_w 386 #define planeB_h pipeB_h 387 388 /* Flags for perf_boxes 389 */ 390 #define I915_BOX_RING_EMPTY 0x1 391 #define I915_BOX_FLIP 0x2 392 #define I915_BOX_WAIT 0x4 393 #define I915_BOX_TEXTURE_LOAD 0x8 394 #define I915_BOX_LOST_CONTEXT 0x10 395 396 /* 397 * i915 specific ioctls. 398 * 399 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie 400 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset 401 * against DRM_COMMAND_BASE and should be between [0x0, 0x60). 402 */ 403 #define DRM_I915_INIT 0x00 404 #define DRM_I915_FLUSH 0x01 405 #define DRM_I915_FLIP 0x02 406 #define DRM_I915_BATCHBUFFER 0x03 407 #define DRM_I915_IRQ_EMIT 0x04 408 #define DRM_I915_IRQ_WAIT 0x05 409 #define DRM_I915_GETPARAM 0x06 410 #define DRM_I915_SETPARAM 0x07 411 #define DRM_I915_ALLOC 0x08 412 #define DRM_I915_FREE 0x09 413 #define DRM_I915_INIT_HEAP 0x0a 414 #define DRM_I915_CMDBUFFER 0x0b 415 #define DRM_I915_DESTROY_HEAP 0x0c 416 #define DRM_I915_SET_VBLANK_PIPE 0x0d 417 #define DRM_I915_GET_VBLANK_PIPE 0x0e 418 #define DRM_I915_VBLANK_SWAP 0x0f 419 #define DRM_I915_HWS_ADDR 0x11 420 #define DRM_I915_GEM_INIT 0x13 421 #define DRM_I915_GEM_EXECBUFFER 0x14 422 #define DRM_I915_GEM_PIN 0x15 423 #define DRM_I915_GEM_UNPIN 0x16 424 #define DRM_I915_GEM_BUSY 0x17 425 #define DRM_I915_GEM_THROTTLE 0x18 426 #define DRM_I915_GEM_ENTERVT 0x19 427 #define DRM_I915_GEM_LEAVEVT 0x1a 428 #define DRM_I915_GEM_CREATE 0x1b 429 #define DRM_I915_GEM_PREAD 0x1c 430 #define DRM_I915_GEM_PWRITE 0x1d 431 #define DRM_I915_GEM_MMAP 0x1e 432 #define DRM_I915_GEM_SET_DOMAIN 0x1f 433 #define DRM_I915_GEM_SW_FINISH 0x20 434 #define DRM_I915_GEM_SET_TILING 0x21 435 #define DRM_I915_GEM_GET_TILING 0x22 436 #define DRM_I915_GEM_GET_APERTURE 0x23 437 #define DRM_I915_GEM_MMAP_GTT 0x24 438 #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 439 #define DRM_I915_GEM_MADVISE 0x26 440 #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 441 #define DRM_I915_OVERLAY_ATTRS 0x28 442 #define DRM_I915_GEM_EXECBUFFER2 0x29 443 #define DRM_I915_GEM_EXECBUFFER2_WR DRM_I915_GEM_EXECBUFFER2 444 #define DRM_I915_GET_SPRITE_COLORKEY 0x2a 445 #define DRM_I915_SET_SPRITE_COLORKEY 0x2b 446 #define DRM_I915_GEM_WAIT 0x2c 447 #define DRM_I915_GEM_CONTEXT_CREATE 0x2d 448 #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e 449 #define DRM_I915_GEM_SET_CACHING 0x2f 450 #define DRM_I915_GEM_GET_CACHING 0x30 451 #define DRM_I915_REG_READ 0x31 452 #define DRM_I915_GET_RESET_STATS 0x32 453 #define DRM_I915_GEM_USERPTR 0x33 454 #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 455 #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 456 #define DRM_I915_PERF_OPEN 0x36 457 #define DRM_I915_PERF_ADD_CONFIG 0x37 458 #define DRM_I915_PERF_REMOVE_CONFIG 0x38 459 #define DRM_I915_QUERY 0x39 460 #define DRM_I915_GEM_VM_CREATE 0x3a 461 #define DRM_I915_GEM_VM_DESTROY 0x3b 462 /* Must be kept compact -- no holes */ 463 464 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 465 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 466 #define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) 467 #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) 468 #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) 469 #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) 470 #define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) 471 #define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) 472 #define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) 473 #define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) 474 #define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) 475 #define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) 476 #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) 477 #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 478 #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 479 #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 480 #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) 481 #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) 482 #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) 483 #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) 484 #define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2) 485 #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 486 #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 487 #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) 488 #define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching) 489 #define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching) 490 #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) 491 #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) 492 #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) 493 #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) 494 #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) 495 #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) 496 #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) 497 #define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) 498 #define DRM_IOCTL_I915_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset) 499 #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) 500 #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) 501 #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) 502 #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) 503 #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) 504 #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) 505 #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) 506 #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) 507 #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) 508 #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 509 #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 510 #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) 511 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) 512 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext) 513 #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) 514 #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) 515 #define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats) 516 #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) 517 #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) 518 #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) 519 #define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) 520 #define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config) 521 #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64) 522 #define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query) 523 #define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control) 524 #define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control) 525 526 /* Allow drivers to submit batchbuffers directly to hardware, relying 527 * on the security mechanisms provided by hardware. 528 */ 529 typedef struct drm_i915_batchbuffer { 530 int start; /* agp offset */ 531 int used; /* nr bytes in use */ 532 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 533 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 534 int num_cliprects; /* mulitpass with multiple cliprects? */ 535 struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */ 536 } drm_i915_batchbuffer_t; 537 538 /* As above, but pass a pointer to userspace buffer which can be 539 * validated by the kernel prior to sending to hardware. 540 */ 541 typedef struct _drm_i915_cmdbuffer { 542 char *buf; /* pointer to userspace command buffer */ 543 int sz; /* nr bytes in buf */ 544 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 545 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 546 int num_cliprects; /* mulitpass with multiple cliprects? */ 547 struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */ 548 } drm_i915_cmdbuffer_t; 549 550 /* Userspace can request & wait on irq's: 551 */ 552 typedef struct drm_i915_irq_emit { 553 int *irq_seq; 554 } drm_i915_irq_emit_t; 555 556 typedef struct drm_i915_irq_wait { 557 int irq_seq; 558 } drm_i915_irq_wait_t; 559 560 /* 561 * Different modes of per-process Graphics Translation Table, 562 * see I915_PARAM_HAS_ALIASING_PPGTT 563 */ 564 #define I915_GEM_PPGTT_NONE 0 565 #define I915_GEM_PPGTT_ALIASING 1 566 #define I915_GEM_PPGTT_FULL 2 567 568 /* Ioctl to query kernel params: 569 */ 570 #define I915_PARAM_IRQ_ACTIVE 1 571 #define I915_PARAM_ALLOW_BATCHBUFFER 2 572 #define I915_PARAM_LAST_DISPATCH 3 573 #define I915_PARAM_CHIPSET_ID 4 574 #define I915_PARAM_HAS_GEM 5 575 #define I915_PARAM_NUM_FENCES_AVAIL 6 576 #define I915_PARAM_HAS_OVERLAY 7 577 #define I915_PARAM_HAS_PAGEFLIPPING 8 578 #define I915_PARAM_HAS_EXECBUF2 9 579 #define I915_PARAM_HAS_BSD 10 580 #define I915_PARAM_HAS_BLT 11 581 #define I915_PARAM_HAS_RELAXED_FENCING 12 582 #define I915_PARAM_HAS_COHERENT_RINGS 13 583 #define I915_PARAM_HAS_EXEC_CONSTANTS 14 584 #define I915_PARAM_HAS_RELAXED_DELTA 15 585 #define I915_PARAM_HAS_GEN7_SOL_RESET 16 586 #define I915_PARAM_HAS_LLC 17 587 #define I915_PARAM_HAS_ALIASING_PPGTT 18 588 #define I915_PARAM_HAS_WAIT_TIMEOUT 19 589 #define I915_PARAM_HAS_SEMAPHORES 20 590 #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 591 #define I915_PARAM_HAS_VEBOX 22 592 #define I915_PARAM_HAS_SECURE_BATCHES 23 593 #define I915_PARAM_HAS_PINNED_BATCHES 24 594 #define I915_PARAM_HAS_EXEC_NO_RELOC 25 595 #define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 596 #define I915_PARAM_HAS_WT 27 597 #define I915_PARAM_CMD_PARSER_VERSION 28 598 #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29 599 #define I915_PARAM_MMAP_VERSION 30 600 #define I915_PARAM_HAS_BSD2 31 601 #define I915_PARAM_REVISION 32 602 #define I915_PARAM_SUBSLICE_TOTAL 33 603 #define I915_PARAM_EU_TOTAL 34 604 #define I915_PARAM_HAS_GPU_RESET 35 605 #define I915_PARAM_HAS_RESOURCE_STREAMER 36 606 #define I915_PARAM_HAS_EXEC_SOFTPIN 37 607 #define I915_PARAM_HAS_POOLED_EU 38 608 #define I915_PARAM_MIN_EU_IN_POOL 39 609 #define I915_PARAM_MMAP_GTT_VERSION 40 610 611 /* 612 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution 613 * priorities and the driver will attempt to execute batches in priority order. 614 * The param returns a capability bitmask, nonzero implies that the scheduler 615 * is enabled, with different features present according to the mask. 616 * 617 * The initial priority for each batch is supplied by the context and is 618 * controlled via I915_CONTEXT_PARAM_PRIORITY. 619 */ 620 #define I915_PARAM_HAS_SCHEDULER 41 621 #define I915_SCHEDULER_CAP_ENABLED (1ul << 0) 622 #define I915_SCHEDULER_CAP_PRIORITY (1ul << 1) 623 #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2) 624 #define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3) 625 #define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4) 626 627 #define I915_PARAM_HUC_STATUS 42 628 629 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of 630 * synchronisation with implicit fencing on individual objects. 631 * See EXEC_OBJECT_ASYNC. 632 */ 633 #define I915_PARAM_HAS_EXEC_ASYNC 43 634 635 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support - 636 * both being able to pass in a sync_file fd to wait upon before executing, 637 * and being able to return a new sync_file fd that is signaled when the 638 * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT. 639 */ 640 #define I915_PARAM_HAS_EXEC_FENCE 44 641 642 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture 643 * user specified bufffers for post-mortem debugging of GPU hangs. See 644 * EXEC_OBJECT_CAPTURE. 645 */ 646 #define I915_PARAM_HAS_EXEC_CAPTURE 45 647 648 #define I915_PARAM_SLICE_MASK 46 649 650 /* Assuming it's uniform for each slice, this queries the mask of subslices 651 * per-slice for this system. 652 */ 653 #define I915_PARAM_SUBSLICE_MASK 47 654 655 /* 656 * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer 657 * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST. 658 */ 659 #define I915_PARAM_HAS_EXEC_BATCH_FIRST 48 660 661 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of 662 * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY. 663 */ 664 #define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49 665 666 /* 667 * Query whether every context (both per-file default and user created) is 668 * isolated (insofar as HW supports). If this parameter is not true, then 669 * freshly created contexts may inherit values from an existing context, 670 * rather than default HW values. If true, it also ensures (insofar as HW 671 * supports) that all state set by this context will not leak to any other 672 * context. 673 * 674 * As not every engine across every gen support contexts, the returned 675 * value reports the support of context isolation for individual engines by 676 * returning a bitmask of each engine class set to true if that class supports 677 * isolation. 678 */ 679 #define I915_PARAM_HAS_CONTEXT_ISOLATION 50 680 681 /* Frequency of the command streamer timestamps given by the *_TIMESTAMP 682 * registers. This used to be fixed per platform but from CNL onwards, this 683 * might vary depending on the parts. 684 */ 685 #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51 686 687 /* 688 * Once upon a time we supposed that writes through the GGTT would be 689 * immediately in physical memory (once flushed out of the CPU path). However, 690 * on a few different processors and chipsets, this is not necessarily the case 691 * as the writes appear to be buffered internally. Thus a read of the backing 692 * storage (physical memory) via a different path (with different physical tags 693 * to the indirect write via the GGTT) will see stale values from before 694 * the GGTT write. Inside the kernel, we can for the most part keep track of 695 * the different read/write domains in use (e.g. set-domain), but the assumption 696 * of coherency is baked into the ABI, hence reporting its true state in this 697 * parameter. 698 * 699 * Reports true when writes via mmap_gtt are immediately visible following an 700 * lfence to flush the WCB. 701 * 702 * Reports false when writes via mmap_gtt are indeterminately delayed in an in 703 * internal buffer and are _not_ immediately visible to third parties accessing 704 * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC 705 * communications channel when reporting false is strongly disadvised. 706 */ 707 #define I915_PARAM_MMAP_GTT_COHERENT 52 708 709 /* 710 * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel 711 * execution through use of explicit fence support. 712 * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT. 713 */ 714 #define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53 715 716 /* 717 * Revision of the i915-perf uAPI. The value returned helps determine what 718 * i915-perf features are available. See drm_i915_perf_property_id. 719 */ 720 #define I915_PARAM_PERF_REVISION 54 721 722 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of 723 * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See 724 * I915_EXEC_USE_EXTENSIONS. 725 */ 726 #define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55 727 728 /* Must be kept compact -- no holes and well documented */ 729 730 typedef struct drm_i915_getparam { 731 __s32 param; 732 /* 733 * WARNING: Using pointers instead of fixed-size u64 means we need to write 734 * compat32 code. Don't repeat this mistake. 735 */ 736 int *value; 737 } drm_i915_getparam_t; 738 739 /* Ioctl to set kernel params: 740 */ 741 #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 742 #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 743 #define I915_SETPARAM_ALLOW_BATCHBUFFER 3 744 #define I915_SETPARAM_NUM_USED_FENCES 4 745 /* Must be kept compact -- no holes */ 746 747 typedef struct drm_i915_setparam { 748 int param; 749 int value; 750 } drm_i915_setparam_t; 751 752 /* A memory manager for regions of shared memory: 753 */ 754 #define I915_MEM_REGION_AGP 1 755 756 typedef struct drm_i915_mem_alloc { 757 int region; 758 int alignment; 759 int size; 760 int *region_offset; /* offset from start of fb or agp */ 761 } drm_i915_mem_alloc_t; 762 763 typedef struct drm_i915_mem_free { 764 int region; 765 int region_offset; 766 } drm_i915_mem_free_t; 767 768 typedef struct drm_i915_mem_init_heap { 769 int region; 770 int size; 771 int start; 772 } drm_i915_mem_init_heap_t; 773 774 /* Allow memory manager to be torn down and re-initialized (eg on 775 * rotate): 776 */ 777 typedef struct drm_i915_mem_destroy_heap { 778 int region; 779 } drm_i915_mem_destroy_heap_t; 780 781 /* Allow X server to configure which pipes to monitor for vblank signals 782 */ 783 #define DRM_I915_VBLANK_PIPE_A 1 784 #define DRM_I915_VBLANK_PIPE_B 2 785 786 typedef struct drm_i915_vblank_pipe { 787 int pipe; 788 } drm_i915_vblank_pipe_t; 789 790 /* Schedule buffer swap at given vertical blank: 791 */ 792 typedef struct drm_i915_vblank_swap { 793 drm_drawable_t drawable; 794 enum drm_vblank_seq_type seqtype; 795 unsigned int sequence; 796 } drm_i915_vblank_swap_t; 797 798 typedef struct drm_i915_hws_addr { 799 __u64 addr; 800 } drm_i915_hws_addr_t; 801 802 struct drm_i915_gem_init { 803 /** 804 * Beginning offset in the GTT to be managed by the DRM memory 805 * manager. 806 */ 807 __u64 gtt_start; 808 /** 809 * Ending offset in the GTT to be managed by the DRM memory 810 * manager. 811 */ 812 __u64 gtt_end; 813 }; 814 815 struct drm_i915_gem_create { 816 /** 817 * Requested size for the object. 818 * 819 * The (page-aligned) allocated size for the object will be returned. 820 */ 821 __u64 size; 822 /** 823 * Returned handle for the object. 824 * 825 * Object handles are nonzero. 826 */ 827 __u32 handle; 828 __u32 pad; 829 }; 830 831 struct drm_i915_gem_pread { 832 /** Handle for the object being read. */ 833 __u32 handle; 834 __u32 pad; 835 /** Offset into the object to read from */ 836 __u64 offset; 837 /** Length of data to read */ 838 __u64 size; 839 /** 840 * Pointer to write the data into. 841 * 842 * This is a fixed-size type for 32/64 compatibility. 843 */ 844 __u64 data_ptr; 845 }; 846 847 struct drm_i915_gem_pwrite { 848 /** Handle for the object being written to. */ 849 __u32 handle; 850 __u32 pad; 851 /** Offset into the object to write to */ 852 __u64 offset; 853 /** Length of data to write */ 854 __u64 size; 855 /** 856 * Pointer to read the data from. 857 * 858 * This is a fixed-size type for 32/64 compatibility. 859 */ 860 __u64 data_ptr; 861 }; 862 863 struct drm_i915_gem_mmap { 864 /** Handle for the object being mapped. */ 865 __u32 handle; 866 __u32 pad; 867 /** Offset in the object to map. */ 868 __u64 offset; 869 /** 870 * Length of data to map. 871 * 872 * The value will be page-aligned. 873 */ 874 __u64 size; 875 /** 876 * Returned pointer the data was mapped at. 877 * 878 * This is a fixed-size type for 32/64 compatibility. 879 */ 880 __u64 addr_ptr; 881 882 /** 883 * Flags for extended behaviour. 884 * 885 * Added in version 2. 886 */ 887 __u64 flags; 888 #define I915_MMAP_WC 0x1 889 }; 890 891 struct drm_i915_gem_mmap_gtt { 892 /** Handle for the object being mapped. */ 893 __u32 handle; 894 __u32 pad; 895 /** 896 * Fake offset to use for subsequent mmap call 897 * 898 * This is a fixed-size type for 32/64 compatibility. 899 */ 900 __u64 offset; 901 }; 902 903 struct drm_i915_gem_mmap_offset { 904 /** Handle for the object being mapped. */ 905 __u32 handle; 906 __u32 pad; 907 /** 908 * Fake offset to use for subsequent mmap call 909 * 910 * This is a fixed-size type for 32/64 compatibility. 911 */ 912 __u64 offset; 913 914 /** 915 * Flags for extended behaviour. 916 * 917 * It is mandatory that one of the MMAP_OFFSET types 918 * (GTT, WC, WB, UC, etc) should be included. 919 */ 920 __u64 flags; 921 #define I915_MMAP_OFFSET_GTT 0 922 #define I915_MMAP_OFFSET_WC 1 923 #define I915_MMAP_OFFSET_WB 2 924 #define I915_MMAP_OFFSET_UC 3 925 926 /* 927 * Zero-terminated chain of extensions. 928 * 929 * No current extensions defined; mbz. 930 */ 931 __u64 extensions; 932 }; 933 934 struct drm_i915_gem_set_domain { 935 /** Handle for the object */ 936 __u32 handle; 937 938 /** New read domains */ 939 __u32 read_domains; 940 941 /** New write domain */ 942 __u32 write_domain; 943 }; 944 945 struct drm_i915_gem_sw_finish { 946 /** Handle for the object */ 947 __u32 handle; 948 }; 949 950 struct drm_i915_gem_relocation_entry { 951 /** 952 * Handle of the buffer being pointed to by this relocation entry. 953 * 954 * It's appealing to make this be an index into the mm_validate_entry 955 * list to refer to the buffer, but this allows the driver to create 956 * a relocation list for state buffers and not re-write it per 957 * exec using the buffer. 958 */ 959 __u32 target_handle; 960 961 /** 962 * Value to be added to the offset of the target buffer to make up 963 * the relocation entry. 964 */ 965 __u32 delta; 966 967 /** Offset in the buffer the relocation entry will be written into */ 968 __u64 offset; 969 970 /** 971 * Offset value of the target buffer that the relocation entry was last 972 * written as. 973 * 974 * If the buffer has the same offset as last time, we can skip syncing 975 * and writing the relocation. This value is written back out by 976 * the execbuffer ioctl when the relocation is written. 977 */ 978 __u64 presumed_offset; 979 980 /** 981 * Target memory domains read by this operation. 982 */ 983 __u32 read_domains; 984 985 /** 986 * Target memory domains written by this operation. 987 * 988 * Note that only one domain may be written by the whole 989 * execbuffer operation, so that where there are conflicts, 990 * the application will get -EINVAL back. 991 */ 992 __u32 write_domain; 993 }; 994 995 /** @{ 996 * Intel memory domains 997 * 998 * Most of these just align with the various caches in 999 * the system and are used to flush and invalidate as 1000 * objects end up cached in different domains. 1001 */ 1002 /** CPU cache */ 1003 #define I915_GEM_DOMAIN_CPU 0x00000001 1004 /** Render cache, used by 2D and 3D drawing */ 1005 #define I915_GEM_DOMAIN_RENDER 0x00000002 1006 /** Sampler cache, used by texture engine */ 1007 #define I915_GEM_DOMAIN_SAMPLER 0x00000004 1008 /** Command queue, used to load batch buffers */ 1009 #define I915_GEM_DOMAIN_COMMAND 0x00000008 1010 /** Instruction cache, used by shader programs */ 1011 #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 1012 /** Vertex address cache */ 1013 #define I915_GEM_DOMAIN_VERTEX 0x00000020 1014 /** GTT domain - aperture and scanout */ 1015 #define I915_GEM_DOMAIN_GTT 0x00000040 1016 /** WC domain - uncached access */ 1017 #define I915_GEM_DOMAIN_WC 0x00000080 1018 /** @} */ 1019 1020 struct drm_i915_gem_exec_object { 1021 /** 1022 * User's handle for a buffer to be bound into the GTT for this 1023 * operation. 1024 */ 1025 __u32 handle; 1026 1027 /** Number of relocations to be performed on this buffer */ 1028 __u32 relocation_count; 1029 /** 1030 * Pointer to array of struct drm_i915_gem_relocation_entry containing 1031 * the relocations to be performed in this buffer. 1032 */ 1033 __u64 relocs_ptr; 1034 1035 /** Required alignment in graphics aperture */ 1036 __u64 alignment; 1037 1038 /** 1039 * Returned value of the updated offset of the object, for future 1040 * presumed_offset writes. 1041 */ 1042 __u64 offset; 1043 }; 1044 1045 /* DRM_IOCTL_I915_GEM_EXECBUFFER was removed in Linux 5.13 */ 1046 struct drm_i915_gem_execbuffer { 1047 /** 1048 * List of buffers to be validated with their relocations to be 1049 * performend on them. 1050 * 1051 * This is a pointer to an array of struct drm_i915_gem_validate_entry. 1052 * 1053 * These buffers must be listed in an order such that all relocations 1054 * a buffer is performing refer to buffers that have already appeared 1055 * in the validate list. 1056 */ 1057 __u64 buffers_ptr; 1058 __u32 buffer_count; 1059 1060 /** Offset in the batchbuffer to start execution from. */ 1061 __u32 batch_start_offset; 1062 /** Bytes used in batchbuffer from batch_start_offset */ 1063 __u32 batch_len; 1064 __u32 DR1; 1065 __u32 DR4; 1066 __u32 num_cliprects; 1067 /** This is a struct drm_clip_rect *cliprects */ 1068 __u64 cliprects_ptr; 1069 }; 1070 1071 struct drm_i915_gem_exec_object2 { 1072 /** 1073 * User's handle for a buffer to be bound into the GTT for this 1074 * operation. 1075 */ 1076 __u32 handle; 1077 1078 /** Number of relocations to be performed on this buffer */ 1079 __u32 relocation_count; 1080 /** 1081 * Pointer to array of struct drm_i915_gem_relocation_entry containing 1082 * the relocations to be performed in this buffer. 1083 */ 1084 __u64 relocs_ptr; 1085 1086 /** Required alignment in graphics aperture */ 1087 __u64 alignment; 1088 1089 /** 1090 * When the EXEC_OBJECT_PINNED flag is specified this is populated by 1091 * the user with the GTT offset at which this object will be pinned. 1092 * When the I915_EXEC_NO_RELOC flag is specified this must contain the 1093 * presumed_offset of the object. 1094 * During execbuffer2 the kernel populates it with the value of the 1095 * current GTT offset of the object, for future presumed_offset writes. 1096 */ 1097 __u64 offset; 1098 1099 #define EXEC_OBJECT_NEEDS_FENCE (1<<0) 1100 #define EXEC_OBJECT_NEEDS_GTT (1<<1) 1101 #define EXEC_OBJECT_WRITE (1<<2) 1102 #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3) 1103 #define EXEC_OBJECT_PINNED (1<<4) 1104 #define EXEC_OBJECT_PAD_TO_SIZE (1<<5) 1105 /* The kernel implicitly tracks GPU activity on all GEM objects, and 1106 * synchronises operations with outstanding rendering. This includes 1107 * rendering on other devices if exported via dma-buf. However, sometimes 1108 * this tracking is too coarse and the user knows better. For example, 1109 * if the object is split into non-overlapping ranges shared between different 1110 * clients or engines (i.e. suballocating objects), the implicit tracking 1111 * by kernel assumes that each operation affects the whole object rather 1112 * than an individual range, causing needless synchronisation between clients. 1113 * The kernel will also forgo any CPU cache flushes prior to rendering from 1114 * the object as the client is expected to be also handling such domain 1115 * tracking. 1116 * 1117 * The kernel maintains the implicit tracking in order to manage resources 1118 * used by the GPU - this flag only disables the synchronisation prior to 1119 * rendering with this object in this execbuf. 1120 * 1121 * Opting out of implicit synhronisation requires the user to do its own 1122 * explicit tracking to avoid rendering corruption. See, for example, 1123 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously. 1124 */ 1125 #define EXEC_OBJECT_ASYNC (1<<6) 1126 /* Request that the contents of this execobject be copied into the error 1127 * state upon a GPU hang involving this batch for post-mortem debugging. 1128 * These buffers are recorded in no particular order as "user" in 1129 * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see 1130 * if the kernel supports this flag. 1131 */ 1132 #define EXEC_OBJECT_CAPTURE (1<<7) 1133 /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */ 1134 #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1) 1135 __u64 flags; 1136 1137 union { 1138 __u64 rsvd1; 1139 __u64 pad_to_size; 1140 }; 1141 __u64 rsvd2; 1142 }; 1143 1144 struct drm_i915_gem_exec_fence { 1145 /** 1146 * User's handle for a drm_syncobj to wait on or signal. 1147 */ 1148 __u32 handle; 1149 1150 #define I915_EXEC_FENCE_WAIT (1<<0) 1151 #define I915_EXEC_FENCE_SIGNAL (1<<1) 1152 #define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1)) 1153 __u32 flags; 1154 }; 1155 1156 /* 1157 * See drm_i915_gem_execbuffer_ext_timeline_fences. 1158 */ 1159 #define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0 1160 1161 /* 1162 * This structure describes an array of drm_syncobj and associated points for 1163 * timeline variants of drm_syncobj. It is invalid to append this structure to 1164 * the execbuf if I915_EXEC_FENCE_ARRAY is set. 1165 */ 1166 struct drm_i915_gem_execbuffer_ext_timeline_fences { 1167 struct i915_user_extension base; 1168 1169 /** 1170 * Number of element in the handles_ptr & value_ptr arrays. 1171 */ 1172 __u64 fence_count; 1173 1174 /** 1175 * Pointer to an array of struct drm_i915_gem_exec_fence of length 1176 * fence_count. 1177 */ 1178 __u64 handles_ptr; 1179 1180 /** 1181 * Pointer to an array of u64 values of length fence_count. Values 1182 * must be 0 for a binary drm_syncobj. A Value of 0 for a timeline 1183 * drm_syncobj is invalid as it turns a drm_syncobj into a binary one. 1184 */ 1185 __u64 values_ptr; 1186 }; 1187 1188 struct drm_i915_gem_execbuffer2 { 1189 /** 1190 * List of gem_exec_object2 structs 1191 */ 1192 __u64 buffers_ptr; 1193 __u32 buffer_count; 1194 1195 /** Offset in the batchbuffer to start execution from. */ 1196 __u32 batch_start_offset; 1197 /** Bytes used in batchbuffer from batch_start_offset */ 1198 __u32 batch_len; 1199 __u32 DR1; 1200 __u32 DR4; 1201 __u32 num_cliprects; 1202 /** 1203 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY 1204 * & I915_EXEC_USE_EXTENSIONS are not set. 1205 * 1206 * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array 1207 * of struct drm_i915_gem_exec_fence and num_cliprects is the length 1208 * of the array. 1209 * 1210 * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a 1211 * single struct i915_user_extension and num_cliprects is 0. 1212 */ 1213 __u64 cliprects_ptr; 1214 1215 #define I915_EXEC_RING_MASK (0x3f) /* legacy for small systems */ 1216 #define I915_EXEC_DEFAULT (0<<0) 1217 #define I915_EXEC_RENDER (1<<0) 1218 #define I915_EXEC_BSD (2<<0) 1219 #define I915_EXEC_BLT (3<<0) 1220 #define I915_EXEC_VEBOX (4<<0) 1221 1222 /* Used for switching the constants addressing mode on gen4+ RENDER ring. 1223 * Gen6+ only supports relative addressing to dynamic state (default) and 1224 * absolute addressing. 1225 * 1226 * These flags are ignored for the BSD and BLT rings. 1227 */ 1228 #define I915_EXEC_CONSTANTS_MASK (3<<6) 1229 #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ 1230 #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) 1231 #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ 1232 __u64 flags; 1233 __u64 rsvd1; /* now used for context info */ 1234 __u64 rsvd2; 1235 }; 1236 1237 /** Resets the SO write offset registers for transform feedback on gen7. */ 1238 #define I915_EXEC_GEN7_SOL_RESET (1<<8) 1239 1240 /** Request a privileged ("secure") batch buffer. Note only available for 1241 * DRM_ROOT_ONLY | DRM_MASTER processes. 1242 */ 1243 #define I915_EXEC_SECURE (1<<9) 1244 1245 /** Inform the kernel that the batch is and will always be pinned. This 1246 * negates the requirement for a workaround to be performed to avoid 1247 * an incoherent CS (such as can be found on 830/845). If this flag is 1248 * not passed, the kernel will endeavour to make sure the batch is 1249 * coherent with the CS before execution. If this flag is passed, 1250 * userspace assumes the responsibility for ensuring the same. 1251 */ 1252 #define I915_EXEC_IS_PINNED (1<<10) 1253 1254 /** Provide a hint to the kernel that the command stream and auxiliary 1255 * state buffers already holds the correct presumed addresses and so the 1256 * relocation process may be skipped if no buffers need to be moved in 1257 * preparation for the execbuffer. 1258 */ 1259 #define I915_EXEC_NO_RELOC (1<<11) 1260 1261 /** Use the reloc.handle as an index into the exec object array rather 1262 * than as the per-file handle. 1263 */ 1264 #define I915_EXEC_HANDLE_LUT (1<<12) 1265 1266 /** Used for switching BSD rings on the platforms with two BSD rings */ 1267 #define I915_EXEC_BSD_SHIFT (13) 1268 #define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT) 1269 /* default ping-pong mode */ 1270 #define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT) 1271 #define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT) 1272 #define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT) 1273 1274 /** Tell the kernel that the batchbuffer is processed by 1275 * the resource streamer. 1276 */ 1277 #define I915_EXEC_RESOURCE_STREAMER (1<<15) 1278 1279 /* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent 1280 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing 1281 * the batch. 1282 * 1283 * Returns -EINVAL if the sync_file fd cannot be found. 1284 */ 1285 #define I915_EXEC_FENCE_IN (1<<16) 1286 1287 /* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd 1288 * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given 1289 * to the caller, and it should be close() after use. (The fd is a regular 1290 * file descriptor and will be cleaned up on process termination. It holds 1291 * a reference to the request, but nothing else.) 1292 * 1293 * The sync_file fd can be combined with other sync_file and passed either 1294 * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip 1295 * will only occur after this request completes), or to other devices. 1296 * 1297 * Using I915_EXEC_FENCE_OUT requires use of 1298 * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written 1299 * back to userspace. Failure to do so will cause the out-fence to always 1300 * be reported as zero, and the real fence fd to be leaked. 1301 */ 1302 #define I915_EXEC_FENCE_OUT (1<<17) 1303 1304 /* 1305 * Traditionally the execbuf ioctl has only considered the final element in 1306 * the execobject[] to be the executable batch. Often though, the client 1307 * will known the batch object prior to construction and being able to place 1308 * it into the execobject[] array first can simplify the relocation tracking. 1309 * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the 1310 * execobject[] as the * batch instead (the default is to use the last 1311 * element). 1312 */ 1313 #define I915_EXEC_BATCH_FIRST (1<<18) 1314 1315 /* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr 1316 * define an array of i915_gem_exec_fence structures which specify a set of 1317 * dma fences to wait upon or signal. 1318 */ 1319 #define I915_EXEC_FENCE_ARRAY (1<<19) 1320 1321 /* 1322 * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent 1323 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing 1324 * the batch. 1325 * 1326 * Returns -EINVAL if the sync_file fd cannot be found. 1327 */ 1328 #define I915_EXEC_FENCE_SUBMIT (1 << 20) 1329 1330 /* 1331 * Setting I915_EXEC_USE_EXTENSIONS implies that 1332 * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked 1333 * list of i915_user_extension. Each i915_user_extension node is the base of a 1334 * larger structure. The list of supported structures are listed in the 1335 * drm_i915_gem_execbuffer_ext enum. 1336 */ 1337 #define I915_EXEC_USE_EXTENSIONS (1 << 21) 1338 1339 #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1)) 1340 1341 #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 1342 #define i915_execbuffer2_set_context_id(eb2, context) \ 1343 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK 1344 #define i915_execbuffer2_get_context_id(eb2) \ 1345 ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK) 1346 1347 struct drm_i915_gem_pin { 1348 /** Handle of the buffer to be pinned. */ 1349 __u32 handle; 1350 __u32 pad; 1351 1352 /** alignment required within the aperture */ 1353 __u64 alignment; 1354 1355 /** Returned GTT offset of the buffer. */ 1356 __u64 offset; 1357 }; 1358 1359 struct drm_i915_gem_unpin { 1360 /** Handle of the buffer to be unpinned. */ 1361 __u32 handle; 1362 __u32 pad; 1363 }; 1364 1365 struct drm_i915_gem_busy { 1366 /** Handle of the buffer to check for busy */ 1367 __u32 handle; 1368 1369 /** Return busy status 1370 * 1371 * A return of 0 implies that the object is idle (after 1372 * having flushed any pending activity), and a non-zero return that 1373 * the object is still in-flight on the GPU. (The GPU has not yet 1374 * signaled completion for all pending requests that reference the 1375 * object.) An object is guaranteed to become idle eventually (so 1376 * long as no new GPU commands are executed upon it). Due to the 1377 * asynchronous nature of the hardware, an object reported 1378 * as busy may become idle before the ioctl is completed. 1379 * 1380 * Furthermore, if the object is busy, which engine is busy is only 1381 * provided as a guide and only indirectly by reporting its class 1382 * (there may be more than one engine in each class). There are race 1383 * conditions which prevent the report of which engines are busy from 1384 * being always accurate. However, the converse is not true. If the 1385 * object is idle, the result of the ioctl, that all engines are idle, 1386 * is accurate. 1387 * 1388 * The returned dword is split into two fields to indicate both 1389 * the engine classess on which the object is being read, and the 1390 * engine class on which it is currently being written (if any). 1391 * 1392 * The low word (bits 0:15) indicate if the object is being written 1393 * to by any engine (there can only be one, as the GEM implicit 1394 * synchronisation rules force writes to be serialised). Only the 1395 * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as 1396 * 1 not 0 etc) for the last write is reported. 1397 * 1398 * The high word (bits 16:31) are a bitmask of which engines classes 1399 * are currently reading from the object. Multiple engines may be 1400 * reading from the object simultaneously. 1401 * 1402 * The value of each engine class is the same as specified in the 1403 * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e. 1404 * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc. 1405 * reported as active itself. Some hardware may have parallel 1406 * execution engines, e.g. multiple media engines, which are 1407 * mapped to the same class identifier and so are not separately 1408 * reported for busyness. 1409 * 1410 * Caveat emptor: 1411 * Only the boolean result of this query is reliable; that is whether 1412 * the object is idle or busy. The report of which engines are busy 1413 * should be only used as a heuristic. 1414 */ 1415 __u32 busy; 1416 }; 1417 1418 /** 1419 * I915_CACHING_NONE 1420 * 1421 * GPU access is not coherent with cpu caches. Default for machines without an 1422 * LLC. 1423 */ 1424 #define I915_CACHING_NONE 0 1425 /** 1426 * I915_CACHING_CACHED 1427 * 1428 * GPU access is coherent with cpu caches and furthermore the data is cached in 1429 * last-level caches shared between cpu cores and the gpu GT. Default on 1430 * machines with HAS_LLC. 1431 */ 1432 #define I915_CACHING_CACHED 1 1433 /** 1434 * I915_CACHING_DISPLAY 1435 * 1436 * Special GPU caching mode which is coherent with the scanout engines. 1437 * Transparently falls back to I915_CACHING_NONE on platforms where no special 1438 * cache mode (like write-through or gfdt flushing) is available. The kernel 1439 * automatically sets this mode when using a buffer as a scanout target. 1440 * Userspace can manually set this mode to avoid a costly stall and clflush in 1441 * the hotpath of drawing the first frame. 1442 */ 1443 #define I915_CACHING_DISPLAY 2 1444 1445 struct drm_i915_gem_caching { 1446 /** 1447 * Handle of the buffer to set/get the caching level of. */ 1448 __u32 handle; 1449 1450 /** 1451 * Cacheing level to apply or return value 1452 * 1453 * bits0-15 are for generic caching control (i.e. the above defined 1454 * values). bits16-31 are reserved for platform-specific variations 1455 * (e.g. l3$ caching on gen7). */ 1456 __u32 caching; 1457 }; 1458 1459 #define I915_TILING_NONE 0 1460 #define I915_TILING_X 1 1461 #define I915_TILING_Y 2 1462 /* 1463 * Do not add new tiling types here. The I915_TILING_* values are for 1464 * de-tiling fence registers that no longer exist on modern platforms. Although 1465 * the hardware may support new types of tiling in general (e.g., Tile4), we 1466 * do not need to add them to the uapi that is specific to now-defunct ioctls. 1467 */ 1468 #define I915_TILING_LAST I915_TILING_Y 1469 1470 #define I915_BIT_6_SWIZZLE_NONE 0 1471 #define I915_BIT_6_SWIZZLE_9 1 1472 #define I915_BIT_6_SWIZZLE_9_10 2 1473 #define I915_BIT_6_SWIZZLE_9_11 3 1474 #define I915_BIT_6_SWIZZLE_9_10_11 4 1475 /* Not seen by userland */ 1476 #define I915_BIT_6_SWIZZLE_UNKNOWN 5 1477 /* Seen by userland. */ 1478 #define I915_BIT_6_SWIZZLE_9_17 6 1479 #define I915_BIT_6_SWIZZLE_9_10_17 7 1480 1481 struct drm_i915_gem_set_tiling { 1482 /** Handle of the buffer to have its tiling state updated */ 1483 __u32 handle; 1484 1485 /** 1486 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 1487 * I915_TILING_Y). 1488 * 1489 * This value is to be set on request, and will be updated by the 1490 * kernel on successful return with the actual chosen tiling layout. 1491 * 1492 * The tiling mode may be demoted to I915_TILING_NONE when the system 1493 * has bit 6 swizzling that can't be managed correctly by GEM. 1494 * 1495 * Buffer contents become undefined when changing tiling_mode. 1496 */ 1497 __u32 tiling_mode; 1498 1499 /** 1500 * Stride in bytes for the object when in I915_TILING_X or 1501 * I915_TILING_Y. 1502 */ 1503 __u32 stride; 1504 1505 /** 1506 * Returned address bit 6 swizzling required for CPU access through 1507 * mmap mapping. 1508 */ 1509 __u32 swizzle_mode; 1510 }; 1511 1512 struct drm_i915_gem_get_tiling { 1513 /** Handle of the buffer to get tiling state for. */ 1514 __u32 handle; 1515 1516 /** 1517 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 1518 * I915_TILING_Y). 1519 */ 1520 __u32 tiling_mode; 1521 1522 /** 1523 * Returned address bit 6 swizzling required for CPU access through 1524 * mmap mapping. 1525 */ 1526 __u32 swizzle_mode; 1527 1528 /** 1529 * Returned address bit 6 swizzling required for CPU access through 1530 * mmap mapping whilst bound. 1531 */ 1532 __u32 phys_swizzle_mode; 1533 }; 1534 1535 struct drm_i915_gem_get_aperture { 1536 /** Total size of the aperture used by i915_gem_execbuffer, in bytes */ 1537 __u64 aper_size; 1538 1539 /** 1540 * Available space in the aperture used by i915_gem_execbuffer, in 1541 * bytes 1542 */ 1543 __u64 aper_available_size; 1544 }; 1545 1546 struct drm_i915_get_pipe_from_crtc_id { 1547 /** ID of CRTC being requested **/ 1548 __u32 crtc_id; 1549 1550 /** pipe of requested CRTC **/ 1551 __u32 pipe; 1552 }; 1553 1554 #define I915_MADV_WILLNEED 0 1555 #define I915_MADV_DONTNEED 1 1556 #define __I915_MADV_PURGED 2 /* internal state */ 1557 1558 struct drm_i915_gem_madvise { 1559 /** Handle of the buffer to change the backing store advice */ 1560 __u32 handle; 1561 1562 /* Advice: either the buffer will be needed again in the near future, 1563 * or wont be and could be discarded under memory pressure. 1564 */ 1565 __u32 madv; 1566 1567 /** Whether the backing store still exists. */ 1568 __u32 retained; 1569 }; 1570 1571 /* flags */ 1572 #define I915_OVERLAY_TYPE_MASK 0xff 1573 #define I915_OVERLAY_YUV_PLANAR 0x01 1574 #define I915_OVERLAY_YUV_PACKED 0x02 1575 #define I915_OVERLAY_RGB 0x03 1576 1577 #define I915_OVERLAY_DEPTH_MASK 0xff00 1578 #define I915_OVERLAY_RGB24 0x1000 1579 #define I915_OVERLAY_RGB16 0x2000 1580 #define I915_OVERLAY_RGB15 0x3000 1581 #define I915_OVERLAY_YUV422 0x0100 1582 #define I915_OVERLAY_YUV411 0x0200 1583 #define I915_OVERLAY_YUV420 0x0300 1584 #define I915_OVERLAY_YUV410 0x0400 1585 1586 #define I915_OVERLAY_SWAP_MASK 0xff0000 1587 #define I915_OVERLAY_NO_SWAP 0x000000 1588 #define I915_OVERLAY_UV_SWAP 0x010000 1589 #define I915_OVERLAY_Y_SWAP 0x020000 1590 #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 1591 1592 #define I915_OVERLAY_FLAGS_MASK 0xff000000 1593 #define I915_OVERLAY_ENABLE 0x01000000 1594 1595 struct drm_intel_overlay_put_image { 1596 /* various flags and src format description */ 1597 __u32 flags; 1598 /* source picture description */ 1599 __u32 bo_handle; 1600 /* stride values and offsets are in bytes, buffer relative */ 1601 __u16 stride_Y; /* stride for packed formats */ 1602 __u16 stride_UV; 1603 __u32 offset_Y; /* offset for packet formats */ 1604 __u32 offset_U; 1605 __u32 offset_V; 1606 /* in pixels */ 1607 __u16 src_width; 1608 __u16 src_height; 1609 /* to compensate the scaling factors for partially covered surfaces */ 1610 __u16 src_scan_width; 1611 __u16 src_scan_height; 1612 /* output crtc description */ 1613 __u32 crtc_id; 1614 __u16 dst_x; 1615 __u16 dst_y; 1616 __u16 dst_width; 1617 __u16 dst_height; 1618 }; 1619 1620 /* flags */ 1621 #define I915_OVERLAY_UPDATE_ATTRS (1<<0) 1622 #define I915_OVERLAY_UPDATE_GAMMA (1<<1) 1623 #define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2) 1624 struct drm_intel_overlay_attrs { 1625 __u32 flags; 1626 __u32 color_key; 1627 __s32 brightness; 1628 __u32 contrast; 1629 __u32 saturation; 1630 __u32 gamma0; 1631 __u32 gamma1; 1632 __u32 gamma2; 1633 __u32 gamma3; 1634 __u32 gamma4; 1635 __u32 gamma5; 1636 }; 1637 1638 /* 1639 * Intel sprite handling 1640 * 1641 * Color keying works with a min/mask/max tuple. Both source and destination 1642 * color keying is allowed. 1643 * 1644 * Source keying: 1645 * Sprite pixels within the min & max values, masked against the color channels 1646 * specified in the mask field, will be transparent. All other pixels will 1647 * be displayed on top of the primary plane. For RGB surfaces, only the min 1648 * and mask fields will be used; ranged compares are not allowed. 1649 * 1650 * Destination keying: 1651 * Primary plane pixels that match the min value, masked against the color 1652 * channels specified in the mask field, will be replaced by corresponding 1653 * pixels from the sprite plane. 1654 * 1655 * Note that source & destination keying are exclusive; only one can be 1656 * active on a given plane. 1657 */ 1658 1659 #define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set 1660 * flags==0 to disable colorkeying. 1661 */ 1662 #define I915_SET_COLORKEY_DESTINATION (1<<1) 1663 #define I915_SET_COLORKEY_SOURCE (1<<2) 1664 struct drm_intel_sprite_colorkey { 1665 __u32 plane_id; 1666 __u32 min_value; 1667 __u32 channel_mask; 1668 __u32 max_value; 1669 __u32 flags; 1670 }; 1671 1672 struct drm_i915_gem_wait { 1673 /** Handle of BO we shall wait on */ 1674 __u32 bo_handle; 1675 __u32 flags; 1676 /** Number of nanoseconds to wait, Returns time remaining. */ 1677 __s64 timeout_ns; 1678 }; 1679 1680 struct drm_i915_gem_context_create { 1681 __u32 ctx_id; /* output: id of new context*/ 1682 __u32 pad; 1683 }; 1684 1685 struct drm_i915_gem_context_create_ext { 1686 __u32 ctx_id; /* output: id of new context*/ 1687 __u32 flags; 1688 #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0) 1689 #define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1) 1690 #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \ 1691 (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1)) 1692 __u64 extensions; 1693 }; 1694 1695 struct drm_i915_gem_context_param { 1696 __u32 ctx_id; 1697 __u32 size; 1698 __u64 param; 1699 #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 1700 #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 1701 #define I915_CONTEXT_PARAM_GTT_SIZE 0x3 1702 #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 1703 #define I915_CONTEXT_PARAM_BANNABLE 0x5 1704 #define I915_CONTEXT_PARAM_PRIORITY 0x6 1705 #define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */ 1706 #define I915_CONTEXT_DEFAULT_PRIORITY 0 1707 #define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */ 1708 /* 1709 * When using the following param, value should be a pointer to 1710 * drm_i915_gem_context_param_sseu. 1711 */ 1712 #define I915_CONTEXT_PARAM_SSEU 0x7 1713 1714 /* 1715 * Not all clients may want to attempt automatic recover of a context after 1716 * a hang (for example, some clients may only submit very small incremental 1717 * batches relying on known logical state of previous batches which will never 1718 * recover correctly and each attempt will hang), and so would prefer that 1719 * the context is forever banned instead. 1720 * 1721 * If set to false (0), after a reset, subsequent (and in flight) rendering 1722 * from this context is discarded, and the client will need to create a new 1723 * context to use instead. 1724 * 1725 * If set to true (1), the kernel will automatically attempt to recover the 1726 * context by skipping the hanging batch and executing the next batch starting 1727 * from the default context state (discarding the incomplete logical context 1728 * state lost due to the reset). 1729 * 1730 * On creation, all new contexts are marked as recoverable. 1731 */ 1732 #define I915_CONTEXT_PARAM_RECOVERABLE 0x8 1733 1734 /* 1735 * The id of the associated virtual memory address space (ppGTT) of 1736 * this context. Can be retrieved and passed to another context 1737 * (on the same fd) for both to use the same ppGTT and so share 1738 * address layouts, and avoid reloading the page tables on context 1739 * switches between themselves. 1740 * 1741 * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY. 1742 */ 1743 #define I915_CONTEXT_PARAM_VM 0x9 1744 1745 /* 1746 * I915_CONTEXT_PARAM_ENGINES: 1747 * 1748 * Bind this context to operate on this subset of available engines. Henceforth, 1749 * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as 1750 * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0] 1751 * and upwards. Slots 0...N are filled in using the specified (class, instance). 1752 * Use 1753 * engine_class: I915_ENGINE_CLASS_INVALID, 1754 * engine_instance: I915_ENGINE_CLASS_INVALID_NONE 1755 * to specify a gap in the array that can be filled in later, e.g. by a 1756 * virtual engine used for load balancing. 1757 * 1758 * Setting the number of engines bound to the context to 0, by passing a zero 1759 * sized argument, will revert back to default settings. 1760 * 1761 * See struct i915_context_param_engines. 1762 * 1763 * Extensions: 1764 * i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE) 1765 * i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND) 1766 * prelim_i915_context_engines_parallel_submit (PRELIM_I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT) 1767 */ 1768 #define I915_CONTEXT_PARAM_ENGINES 0xa 1769 1770 /* 1771 * I915_CONTEXT_PARAM_PERSISTENCE: 1772 * 1773 * Allow the context and active rendering to survive the process until 1774 * completion. Persistence allows fire-and-forget clients to queue up a 1775 * bunch of work, hand the output over to a display server and then quit. 1776 * If the context is marked as not persistent, upon closing (either via 1777 * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure 1778 * or process termination), the context and any outstanding requests will be 1779 * cancelled (and exported fences for cancelled requests marked as -EIO). 1780 * 1781 * By default, new contexts allow persistence. 1782 */ 1783 #define I915_CONTEXT_PARAM_PERSISTENCE 0xb 1784 1785 /* 1786 * I915_CONTEXT_PARAM_RINGSIZE: 1787 * 1788 * Sets the size of the CS ringbuffer to use for logical ring contexts. This 1789 * applies a limit of how many batches can be queued to HW before the caller 1790 * is blocked due to lack of space for more commands. 1791 * 1792 * Only reliably possible to be set prior to first use, i.e. during 1793 * construction. At any later point, the current execution must be flushed as 1794 * the ring can only be changed while the context is idle. Note, the ringsize 1795 * can be specified as a constructor property, see 1796 * I915_CONTEXT_CREATE_EXT_SETPARAM, but can also be set later if required. 1797 * 1798 * Only applies to the current set of engine and lost when those engines 1799 * are replaced by a new mapping (see I915_CONTEXT_PARAM_ENGINES). 1800 * 1801 * Must be between 4 - 512 KiB, in intervals of page size [4 KiB]. 1802 * Default is 16 KiB. 1803 */ 1804 #define I915_CONTEXT_PARAM_RINGSIZE 0xc 1805 /* Must be kept compact -- no holes and well documented */ 1806 1807 __u64 value; 1808 }; 1809 1810 /* 1811 * Context SSEU programming 1812 * 1813 * It may be necessary for either functional or performance reason to configure 1814 * a context to run with a reduced number of SSEU (where SSEU stands for Slice/ 1815 * Sub-slice/EU). 1816 * 1817 * This is done by configuring SSEU configuration using the below 1818 * @struct drm_i915_gem_context_param_sseu for every supported engine which 1819 * userspace intends to use. 1820 * 1821 * Not all GPUs or engines support this functionality in which case an error 1822 * code -ENODEV will be returned. 1823 * 1824 * Also, flexibility of possible SSEU configuration permutations varies between 1825 * GPU generations and software imposed limitations. Requesting such a 1826 * combination will return an error code of -EINVAL. 1827 * 1828 * NOTE: When perf/OA is active the context's SSEU configuration is ignored in 1829 * favour of a single global setting. 1830 */ 1831 struct drm_i915_gem_context_param_sseu { 1832 /* 1833 * Engine class & instance to be configured or queried. 1834 */ 1835 struct i915_engine_class_instance engine; 1836 1837 /* 1838 * Unknown flags must be cleared to zero. 1839 */ 1840 __u32 flags; 1841 #define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0) 1842 1843 /* 1844 * Mask of slices to enable for the context. Valid values are a subset 1845 * of the bitmask value returned for I915_PARAM_SLICE_MASK. 1846 */ 1847 __u64 slice_mask; 1848 1849 /* 1850 * Mask of subslices to enable for the context. Valid values are a 1851 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK. 1852 */ 1853 __u64 subslice_mask; 1854 1855 /* 1856 * Minimum/Maximum number of EUs to enable per subslice for the 1857 * context. min_eus_per_subslice must be inferior or equal to 1858 * max_eus_per_subslice. 1859 */ 1860 __u16 min_eus_per_subslice; 1861 __u16 max_eus_per_subslice; 1862 1863 /* 1864 * Unused for now. Must be cleared to zero. 1865 */ 1866 __u32 rsvd; 1867 }; 1868 1869 /* 1870 * i915_context_engines_load_balance: 1871 * 1872 * Enable load balancing across this set of engines. 1873 * 1874 * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when 1875 * used will proxy the execbuffer request onto one of the set of engines 1876 * in such a way as to distribute the load evenly across the set. 1877 * 1878 * The set of engines must be compatible (e.g. the same HW class) as they 1879 * will share the same logical GPU context and ring. 1880 * 1881 * To intermix rendering with the virtual engine and direct rendering onto 1882 * the backing engines (bypassing the load balancing proxy), the context must 1883 * be defined to use a single timeline for all engines. 1884 */ 1885 struct i915_context_engines_load_balance { 1886 struct i915_user_extension base; 1887 1888 __u16 engine_index; 1889 __u16 num_siblings; 1890 __u32 flags; /* all undefined flags must be zero */ 1891 1892 __u64 mbz64; /* reserved for future use; must be zero */ 1893 1894 struct i915_engine_class_instance engines[0]; 1895 } __attribute__((packed)); 1896 1897 #define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \ 1898 struct i915_user_extension base; \ 1899 __u16 engine_index; \ 1900 __u16 num_siblings; \ 1901 __u32 flags; \ 1902 __u64 mbz64; \ 1903 struct i915_engine_class_instance engines[N__]; \ 1904 } __attribute__((packed)) name__ 1905 1906 /* 1907 * i915_context_engines_bond: 1908 * 1909 * Constructed bonded pairs for execution within a virtual engine. 1910 * 1911 * All engines are equal, but some are more equal than others. Given 1912 * the distribution of resources in the HW, it may be preferable to run 1913 * a request on a given subset of engines in parallel to a request on a 1914 * specific engine. We enable this selection of engines within a virtual 1915 * engine by specifying bonding pairs, for any given master engine we will 1916 * only execute on one of the corresponding siblings within the virtual engine. 1917 * 1918 * To execute a request in parallel on the master engine and a sibling requires 1919 * coordination with a I915_EXEC_FENCE_SUBMIT. 1920 */ 1921 struct i915_context_engines_bond { 1922 struct i915_user_extension base; 1923 1924 struct i915_engine_class_instance master; 1925 1926 __u16 virtual_index; /* index of virtual engine in ctx->engines[] */ 1927 __u16 num_bonds; 1928 1929 __u64 flags; /* all undefined flags must be zero */ 1930 __u64 mbz64[4]; /* reserved for future use; must be zero */ 1931 1932 struct i915_engine_class_instance engines[0]; 1933 } __attribute__((packed)); 1934 1935 #define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \ 1936 struct i915_user_extension base; \ 1937 struct i915_engine_class_instance master; \ 1938 __u16 virtual_index; \ 1939 __u16 num_bonds; \ 1940 __u64 flags; \ 1941 __u64 mbz64[4]; \ 1942 struct i915_engine_class_instance engines[N__]; \ 1943 } __attribute__((packed)) name__ 1944 1945 struct i915_context_param_engines { 1946 __u64 extensions; /* linked chain of extension blocks, 0 terminates */ 1947 #define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */ 1948 #define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */ 1949 struct i915_engine_class_instance engines[0]; 1950 } __attribute__((packed)); 1951 1952 #define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \ 1953 __u64 extensions; \ 1954 struct i915_engine_class_instance engines[N__]; \ 1955 } __attribute__((packed)) name__ 1956 1957 struct drm_i915_gem_context_create_ext_setparam { 1958 #define I915_CONTEXT_CREATE_EXT_SETPARAM 0 1959 struct i915_user_extension base; 1960 struct drm_i915_gem_context_param param; 1961 }; 1962 1963 struct drm_i915_gem_context_destroy { 1964 __u32 ctx_id; 1965 __u32 pad; 1966 }; 1967 1968 /* 1969 * DRM_I915_GEM_VM_CREATE - 1970 * 1971 * Create a new virtual memory address space (ppGTT) for use within a context 1972 * on the same file. Extensions can be provided to configure exactly how the 1973 * address space is setup upon creation. 1974 * 1975 * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is 1976 * returned in the outparam @id. 1977 * 1978 * No flags are defined, with all bits reserved and must be zero. 1979 * 1980 * An extension chain maybe provided, starting with @extensions, and terminated 1981 * by the @next_extension being 0. Currently, mem region extension is defined. 1982 * 1983 * DRM_I915_GEM_VM_DESTROY - 1984 * 1985 * Destroys a previously created VM id, specified in @id. 1986 * 1987 * No extensions or flags are allowed currently, and so must be zero. 1988 */ 1989 struct drm_i915_gem_vm_control { 1990 __u64 extensions; 1991 __u32 flags; 1992 __u32 vm_id; 1993 }; 1994 1995 struct drm_i915_reg_read { 1996 /* 1997 * Register offset. 1998 * For 64bit wide registers where the upper 32bits don't immediately 1999 * follow the lower 32bits, the offset of the lower 32bits must 2000 * be specified 2001 */ 2002 __u64 offset; 2003 #define I915_REG_READ_8B_WA (1ul << 0) 2004 2005 __u64 val; /* Return value */ 2006 }; 2007 2008 /* Known registers: 2009 * 2010 * Render engine timestamp - 0x2358 + 64bit - gen7+ 2011 * - Note this register returns an invalid value if using the default 2012 * single instruction 8byte read, in order to workaround that pass 2013 * flag I915_REG_READ_8B_WA in offset field. 2014 * 2015 */ 2016 2017 struct drm_i915_reset_stats { 2018 __u32 ctx_id; 2019 __u32 flags; 2020 /* 2021 * contexts marked as using protected content are invalidated when the 2022 * protected content session dies. Submission of invalidated contexts 2023 * is rejected with -EACCES. 2024 */ 2025 #define I915_CONTEXT_INVALIDATED 0x1 2026 2027 /* All resets since boot/module reload, for all contexts */ 2028 __u32 reset_count; 2029 2030 /* Number of batches lost when active in GPU, for this context */ 2031 __u32 batch_active; 2032 2033 /* Number of batches lost pending for execution, for this context */ 2034 __u32 batch_pending; 2035 2036 __u32 pad; 2037 }; 2038 2039 struct drm_i915_gem_userptr { 2040 __u64 user_ptr; 2041 __u64 user_size; 2042 __u32 flags; 2043 #define I915_USERPTR_READ_ONLY 0x1 2044 #define I915_USERPTR_UNSYNCHRONIZED 0x80000000 2045 /** 2046 * Returned handle for the object. 2047 * 2048 * Object handles are nonzero. 2049 */ 2050 __u32 handle; 2051 }; 2052 2053 enum drm_i915_oa_format { 2054 I915_OA_FORMAT_A13 = 1, /* HSW only */ 2055 I915_OA_FORMAT_A29, /* HSW only */ 2056 I915_OA_FORMAT_A13_B8_C8, /* HSW only */ 2057 I915_OA_FORMAT_B4_C8, /* HSW only */ 2058 I915_OA_FORMAT_A45_B8_C8, /* HSW only */ 2059 I915_OA_FORMAT_B4_C8_A16, /* HSW only */ 2060 I915_OA_FORMAT_C4_B8, /* HSW+ */ 2061 2062 /* Gen8+ */ 2063 I915_OA_FORMAT_A12, 2064 I915_OA_FORMAT_A12_B8_C8, 2065 I915_OA_FORMAT_A32u40_A4u32_B8_C8, 2066 2067 I915_OA_FORMAT_MAX /* non-ABI */ 2068 }; 2069 2070 enum drm_i915_perf_property_id { 2071 /** 2072 * Open the stream for a specific context handle (as used with 2073 * execbuffer2). A stream opened for a specific context this way 2074 * won't typically require root privileges. 2075 * 2076 * This property is available in perf revision 1. 2077 */ 2078 DRM_I915_PERF_PROP_CTX_HANDLE = 1, 2079 2080 /** 2081 * A value of 1 requests the inclusion of raw OA unit reports as 2082 * part of stream samples. 2083 * 2084 * This property is available in perf revision 1. 2085 */ 2086 DRM_I915_PERF_PROP_SAMPLE_OA, 2087 2088 /** 2089 * The value specifies which set of OA unit metrics should be 2090 * configured, defining the contents of any OA unit reports. 2091 * 2092 * This property is available in perf revision 1. 2093 */ 2094 DRM_I915_PERF_PROP_OA_METRICS_SET, 2095 2096 /** 2097 * The value specifies the size and layout of OA unit reports. 2098 * 2099 * This property is available in perf revision 1. 2100 */ 2101 DRM_I915_PERF_PROP_OA_FORMAT, 2102 2103 /** 2104 * Specifying this property implicitly requests periodic OA unit 2105 * sampling and (at least on Haswell) the sampling frequency is derived 2106 * from this exponent as follows: 2107 * 2108 * 80ns * 2^(period_exponent + 1) 2109 * 2110 * This property is available in perf revision 1. 2111 */ 2112 DRM_I915_PERF_PROP_OA_EXPONENT, 2113 2114 /** 2115 * Specifying this property is only valid when specify a context to 2116 * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property 2117 * will hold preemption of the particular context we want to gather 2118 * performance data about. The execbuf2 submissions must include a 2119 * drm_i915_gem_execbuffer_ext_perf parameter for this to apply. 2120 * 2121 * This property is available in perf revision 3. 2122 */ 2123 DRM_I915_PERF_PROP_HOLD_PREEMPTION, 2124 2125 /** 2126 * Specifying this pins all contexts to the specified SSEU power 2127 * configuration for the duration of the recording. 2128 * 2129 * This parameter's value is a pointer to a struct 2130 * drm_i915_gem_context_param_sseu. 2131 * 2132 * This property is available in perf revision 4. 2133 */ 2134 DRM_I915_PERF_PROP_GLOBAL_SSEU, 2135 2136 /** 2137 * This optional parameter specifies the timer interval in nanoseconds 2138 * at which the i915 driver will check the OA buffer for available data. 2139 * Minimum allowed value is 100 microseconds. A default value is used by 2140 * the driver if this parameter is not specified. Note that larger timer 2141 * values will reduce cpu consumption during OA perf captures. However, 2142 * excessively large values would potentially result in OA buffer 2143 * overwrites as captures reach end of the OA buffer. 2144 * 2145 * This property is available in perf revision 5. 2146 */ 2147 DRM_I915_PERF_PROP_POLL_OA_PERIOD, 2148 2149 DRM_I915_PERF_PROP_MAX /* non-ABI */ 2150 }; 2151 2152 struct drm_i915_perf_open_param { 2153 __u32 flags; 2154 #define I915_PERF_FLAG_FD_CLOEXEC (1<<0) 2155 #define I915_PERF_FLAG_FD_NONBLOCK (1<<1) 2156 #define I915_PERF_FLAG_DISABLED (1<<2) 2157 2158 /** The number of u64 (id, value) pairs */ 2159 __u32 num_properties; 2160 2161 /** 2162 * Pointer to array of u64 (id, value) pairs configuring the stream 2163 * to open. 2164 */ 2165 __u64 properties_ptr; 2166 }; 2167 2168 /* 2169 * Enable data capture for a stream that was either opened in a disabled state 2170 * via I915_PERF_FLAG_DISABLED or was later disabled via 2171 * I915_PERF_IOCTL_DISABLE. 2172 * 2173 * It is intended to be cheaper to disable and enable a stream than it may be 2174 * to close and re-open a stream with the same configuration. 2175 * 2176 * It's undefined whether any pending data for the stream will be lost. 2177 * 2178 * This ioctl is available in perf revision 1. 2179 */ 2180 #define I915_PERF_IOCTL_ENABLE _IO('i', 0x0) 2181 2182 /* 2183 * Disable data capture for a stream. 2184 * 2185 * It is an error to try and read a stream that is disabled. 2186 * 2187 * This ioctl is available in perf revision 1. 2188 */ 2189 #define I915_PERF_IOCTL_DISABLE _IO('i', 0x1) 2190 2191 /* 2192 * Change metrics_set captured by a stream. 2193 * 2194 * If the stream is bound to a specific context, the configuration change 2195 * will performed __inline__ with that context such that it takes effect before 2196 * the next execbuf submission. 2197 * 2198 * Returns the previously bound metrics set id, or a negative error code. 2199 * 2200 * This ioctl is available in perf revision 2. 2201 */ 2202 #define I915_PERF_IOCTL_CONFIG _IO('i', 0x2) 2203 2204 /* 2205 * Common to all i915 perf records 2206 */ 2207 struct drm_i915_perf_record_header { 2208 __u32 type; 2209 __u16 pad; 2210 __u16 size; 2211 }; 2212 2213 enum drm_i915_perf_record_type { 2214 2215 /** 2216 * Samples are the work horse record type whose contents are extensible 2217 * and defined when opening an i915 perf stream based on the given 2218 * properties. 2219 * 2220 * Boolean properties following the naming convention 2221 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in 2222 * every sample. 2223 * 2224 * The order of these sample properties given by userspace has no 2225 * affect on the ordering of data within a sample. The order is 2226 * documented here. 2227 * 2228 * struct { 2229 * struct drm_i915_perf_record_header header; 2230 * 2231 * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA 2232 * }; 2233 */ 2234 DRM_I915_PERF_RECORD_SAMPLE = 1, 2235 2236 /* 2237 * Indicates that one or more OA reports were not written by the 2238 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT 2239 * command collides with periodic sampling - which would be more likely 2240 * at higher sampling frequencies. 2241 */ 2242 DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2, 2243 2244 /** 2245 * An error occurred that resulted in all pending OA reports being lost. 2246 */ 2247 DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3, 2248 2249 DRM_I915_PERF_RECORD_MAX /* non-ABI */ 2250 }; 2251 2252 /* 2253 * Structure to upload perf dynamic configuration into the kernel. 2254 */ 2255 struct drm_i915_perf_oa_config { 2256 /** String formatted like "%08x-%04x-%04x-%04x-%012x" */ 2257 char uuid[36]; 2258 2259 __u32 n_mux_regs; 2260 __u32 n_boolean_regs; 2261 __u32 n_flex_regs; 2262 2263 /* 2264 * These fields are pointers to tuples of u32 values (register address, 2265 * value). For example the expected length of the buffer pointed by 2266 * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs). 2267 */ 2268 __u64 mux_regs_ptr; 2269 __u64 boolean_regs_ptr; 2270 __u64 flex_regs_ptr; 2271 }; 2272 2273 /** 2274 * struct drm_i915_query_item - An individual query for the kernel to process. 2275 * 2276 * The behaviour is determined by the @query_id. Note that exactly what 2277 * @data_ptr is also depends on the specific @query_id. 2278 */ 2279 struct drm_i915_query_item { 2280 /** @query_id: The id for this query */ 2281 __u64 query_id; 2282 #define DRM_I915_QUERY_TOPOLOGY_INFO 1 2283 #define DRM_I915_QUERY_ENGINE_INFO 2 2284 #define DRM_I915_QUERY_PERF_CONFIG 3 2285 #define DRM_I915_QUERY_HWCONFIG_TABLE 5 2286 /* Must be kept compact -- no holes and well documented */ 2287 2288 /** 2289 * @length: 2290 * 2291 * When set to zero by userspace, this is filled with the size of the 2292 * data to be written at the @data_ptr pointer. The kernel sets this 2293 * value to a negative value to signal an error on a particular query 2294 * item. 2295 */ 2296 __s32 length; 2297 2298 /** 2299 * @flags: 2300 * 2301 * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the 2302 * following : 2303 * - DRM_I915_QUERY_PERF_CONFIG_LIST 2304 * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2305 * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID 2306 * 2307 * For all other query_id values, flags must be 0. 2308 */ 2309 __u32 flags; 2310 #define DRM_I915_QUERY_PERF_CONFIG_LIST 1 2311 #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2 2312 #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3 2313 2314 /** 2315 * @data_ptr: 2316 * 2317 * Data will be written at the location pointed by @data_ptr when the 2318 * value of @length matches the length of the data to be written by the 2319 * kernel. 2320 */ 2321 __u64 data_ptr; 2322 }; 2323 2324 /** 2325 * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the 2326 * kernel to fill out. 2327 * 2328 * Note that this is generally a two step process for each struct 2329 * drm_i915_query_item in the array: 2330 * 2331 * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct 2332 * drm_i915_query_item, with &drm_i915_query_item.length set to zero. The 2333 * kernel will then fill in the size, in bytes, which tells userspace how 2334 * memory it needs to allocate for the blob(say for an array of properties). 2335 * 2336 * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the 2337 * &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that 2338 * the &drm_i915_query_item.length should still be the same as what the 2339 * kernel previously set. At this point the kernel can fill in the blob. 2340 * 2341 * Note that for some query items it can make sense for userspace to just pass 2342 * in a buffer/blob equal to or larger than the required size. In this case only 2343 * a single ioctl call is needed. For some smaller query items this can work 2344 * quite well. 2345 * 2346 */ 2347 struct drm_i915_query { 2348 /** @num_items: The number of elements in the @items_ptr array */ 2349 __u32 num_items; 2350 2351 /** 2352 * @flags: Unused for now. Must be cleared to zero. 2353 */ 2354 __u32 flags; 2355 2356 /** 2357 * @items_ptr: 2358 * 2359 * Pointer to an array of struct drm_i915_query_item. The number of 2360 * array elements is @num_items. 2361 */ 2362 __u64 items_ptr; 2363 }; 2364 2365 /* 2366 * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO : 2367 * 2368 * data: contains the 3 pieces of information : 2369 * 2370 * - the slice mask with one bit per slice telling whether a slice is 2371 * available. The availability of slice X can be queried with the following 2372 * formula : 2373 * 2374 * (data[X / 8] >> (X % 8)) & 1 2375 * 2376 * - the subslice mask for each slice with one bit per subslice telling 2377 * whether a subslice is available. Gen12 has dual-subslices, which are 2378 * similar to two gen11 subslices. For gen12, this array represents dual- 2379 * subslices. The availability of subslice Y in slice X can be queried 2380 * with the following formula : 2381 * 2382 * (data[subslice_offset + 2383 * X * subslice_stride + 2384 * Y / 8] >> (Y % 8)) & 1 2385 * 2386 * - the EU mask for each subslice in each slice with one bit per EU telling 2387 * whether an EU is available. The availability of EU Z in subslice Y in 2388 * slice X can be queried with the following formula : 2389 * 2390 * (data[eu_offset + 2391 * (X * max_subslices + Y) * eu_stride + 2392 * Z / 8] >> (Z % 8)) & 1 2393 */ 2394 struct drm_i915_query_topology_info { 2395 /* 2396 * Unused for now. Must be cleared to zero. 2397 */ 2398 __u16 flags; 2399 2400 __u16 max_slices; 2401 __u16 max_subslices; 2402 __u16 max_eus_per_subslice; 2403 2404 /* 2405 * Offset in data[] at which the subslice masks are stored. 2406 */ 2407 __u16 subslice_offset; 2408 2409 /* 2410 * Stride at which each of the subslice masks for each slice are 2411 * stored. 2412 */ 2413 __u16 subslice_stride; 2414 2415 /* 2416 * Offset in data[] at which the EU masks are stored. 2417 */ 2418 __u16 eu_offset; 2419 2420 /* 2421 * Stride at which each of the EU masks for each subslice are stored. 2422 */ 2423 __u16 eu_stride; 2424 2425 __u8 data[]; 2426 }; 2427 2428 /** 2429 * struct drm_i915_engine_info 2430 * 2431 * Describes one engine and it's capabilities as known to the driver. 2432 * 2433 * FIXME: revert to upstream version after UMD switch to PRELIM version 2434 */ 2435 struct drm_i915_engine_info { 2436 /** @engine: Engine class and instance. */ 2437 struct i915_engine_class_instance engine; 2438 2439 /** @rsvd0: Reserved field. */ 2440 __u32 rsvd0; 2441 2442 /** @flags: Engine flags. */ 2443 __u64 flags; 2444 2445 /** @capabilities: Capabilities of this engine. */ 2446 __u64 capabilities; 2447 #define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0) 2448 #define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1) 2449 2450 /** @rsvd1: Reserved fields. */ 2451 __u64 rsvd1[4]; 2452 }; 2453 2454 /** 2455 * struct drm_i915_query_engine_info 2456 * 2457 * Engine info query enumerates all engines known to the driver by filling in 2458 * an array of struct drm_i915_engine_info structures. 2459 */ 2460 struct drm_i915_query_engine_info { 2461 /** @num_engines: Number of struct drm_i915_engine_info structs following. */ 2462 __u32 num_engines; 2463 2464 /** @rsvd: MBZ */ 2465 __u32 rsvd[3]; 2466 2467 /** @engines: Marker for drm_i915_engine_info structures. */ 2468 struct drm_i915_engine_info engines[]; 2469 }; 2470 2471 /* 2472 * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG. 2473 */ 2474 struct drm_i915_query_perf_config { 2475 union { 2476 /* 2477 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets 2478 * this fields to the number of configurations available. 2479 */ 2480 __u64 n_configs; 2481 2482 /* 2483 * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, 2484 * i915 will use the value in this field as configuration 2485 * identifier to decide what data to write into config_ptr. 2486 */ 2487 __u64 config; 2488 2489 /* 2490 * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, 2491 * i915 will use the value in this field as configuration 2492 * identifier to decide what data to write into config_ptr. 2493 * 2494 * String formatted like "%08x-%04x-%04x-%04x-%012x" 2495 */ 2496 char uuid[36]; 2497 }; 2498 2499 /* 2500 * Unused for now. Must be cleared to zero. 2501 */ 2502 __u32 flags; 2503 2504 /* 2505 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will 2506 * write an array of __u64 of configuration identifiers. 2507 * 2508 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will 2509 * write a struct drm_i915_perf_oa_config. If the following fields of 2510 * drm_i915_perf_oa_config are set not set to 0, i915 will write into 2511 * the associated pointers the values of submitted when the 2512 * configuration was created : 2513 * 2514 * - n_mux_regs 2515 * - n_boolean_regs 2516 * - n_flex_regs 2517 */ 2518 __u8 data[]; 2519 }; 2520 2521 #include "i915_drm_prelim.h" 2522 2523 /* ID of the protected content session managed by i915 when PXP is active */ 2524 #define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf 2525 2526 #if defined(__cplusplus) 2527 } 2528 #endif 2529 2530 #endif /* _I915_DRM_H_ */