xref: /openbsd/sys/dev/pci/drm/include/uapi/drm/drm.h (revision f005ef32)
1 /*
2  * Header for the Direct Rendering Manager
3  *
4  * Author: Rickard E. (Rik) Faith <faith@valinux.com>
5  *
6  * Acknowledgments:
7  * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg.
8  */
9 
10 /*
11  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
12  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
13  * All rights reserved.
14  *
15  * Permission is hereby granted, free of charge, to any person obtaining a
16  * copy of this software and associated documentation files (the "Software"),
17  * to deal in the Software without restriction, including without limitation
18  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
19  * and/or sell copies of the Software, and to permit persons to whom the
20  * Software is furnished to do so, subject to the following conditions:
21  *
22  * The above copyright notice and this permission notice (including the next
23  * paragraph) shall be included in all copies or substantial portions of the
24  * Software.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
29  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
30  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
31  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
32  * OTHER DEALINGS IN THE SOFTWARE.
33  */
34 
35 #ifndef _DRM_H_
36 #define _DRM_H_
37 
38 #ifndef __user
39 #define __user
40 #endif
41 
42 #if defined(__KERNEL__) && defined(__linux__)
43 
44 #include <linux/types.h>
45 #include <asm/ioctl.h>
46 typedef unsigned int drm_handle_t;
47 
48 #elif defined(__linux__)
49 
50 #include <linux/types.h>
51 #include <asm/ioctl.h>
52 typedef unsigned int drm_handle_t;
53 
54 #else /* One of the BSDs */
55 
56 #include <sys/ioccom.h>
57 #include <sys/types.h>
58 typedef int8_t   __s8;
59 typedef uint8_t  __u8;
60 typedef int16_t  __s16;
61 typedef uint16_t __u16;
62 typedef int32_t  __s32;
63 typedef uint32_t __u32;
64 typedef int64_t  __s64;
65 typedef uint64_t __u64;
66 typedef size_t   __kernel_size_t;
67 typedef unsigned long drm_handle_t;
68 
69 #endif
70 
71 #if defined(__cplusplus)
72 extern "C" {
73 #endif
74 
75 #define DRM_NAME	"drm"	  /**< Name in kernel, /dev, and /proc */
76 #define DRM_MIN_ORDER	5	  /**< At least 2^5 bytes = 32 bytes */
77 #define DRM_MAX_ORDER	22	  /**< Up to 2^22 bytes = 4MB */
78 #define DRM_RAM_PERCENT 10	  /**< How much system ram can we lock? */
79 
80 #define _DRM_LOCK_HELD	0x80000000U /**< Hardware lock is held */
81 #define _DRM_LOCK_CONT	0x40000000U /**< Hardware lock is contended */
82 #define _DRM_LOCK_IS_HELD(lock)	   ((lock) & _DRM_LOCK_HELD)
83 #define _DRM_LOCK_IS_CONT(lock)	   ((lock) & _DRM_LOCK_CONT)
84 #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
85 
86 typedef unsigned int drm_context_t;
87 typedef unsigned int drm_drawable_t;
88 typedef unsigned int drm_magic_t;
89 
90 /*
91  * Cliprect.
92  *
93  * \warning: If you change this structure, make sure you change
94  * XF86DRIClipRectRec in the server as well
95  *
96  * \note KW: Actually it's illegal to change either for
97  * backwards-compatibility reasons.
98  */
99 struct drm_clip_rect {
100 	unsigned short x1;
101 	unsigned short y1;
102 	unsigned short x2;
103 	unsigned short y2;
104 };
105 
106 /*
107  * Drawable information.
108  */
109 struct drm_drawable_info {
110 	unsigned int num_rects;
111 	struct drm_clip_rect *rects;
112 };
113 
114 /*
115  * Texture region,
116  */
117 struct drm_tex_region {
118 	unsigned char next;
119 	unsigned char prev;
120 	unsigned char in_use;
121 	unsigned char padding;
122 	unsigned int age;
123 };
124 
125 /*
126  * Hardware lock.
127  *
128  * The lock structure is a simple cache-line aligned integer.  To avoid
129  * processor bus contention on a multiprocessor system, there should not be any
130  * other data stored in the same cache line.
131  */
132 struct drm_hw_lock {
133 	__volatile__ unsigned int lock;		/**< lock variable */
134 	char padding[60];			/**< Pad to cache line */
135 };
136 
137 /*
138  * DRM_IOCTL_VERSION ioctl argument type.
139  *
140  * \sa drmGetVersion().
141  */
142 struct drm_version {
143 	int version_major;	  /**< Major version */
144 	int version_minor;	  /**< Minor version */
145 	int version_patchlevel;	  /**< Patch level */
146 	__kernel_size_t name_len;	  /**< Length of name buffer */
147 	char __user *name;	  /**< Name of driver */
148 	__kernel_size_t date_len;	  /**< Length of date buffer */
149 	char __user *date;	  /**< User-space buffer to hold date */
150 	__kernel_size_t desc_len;	  /**< Length of desc buffer */
151 	char __user *desc;	  /**< User-space buffer to hold desc */
152 };
153 
154 /*
155  * DRM_IOCTL_GET_UNIQUE ioctl argument type.
156  *
157  * \sa drmGetBusid() and drmSetBusId().
158  */
159 struct drm_unique {
160 	__kernel_size_t unique_len;	  /**< Length of unique */
161 	char __user *unique;	  /**< Unique name for driver instantiation */
162 };
163 
164 struct drm_list {
165 	int count;		  /**< Length of user-space structures */
166 	struct drm_version __user *version;
167 };
168 
169 struct drm_block {
170 	int unused;
171 };
172 
173 /*
174  * DRM_IOCTL_CONTROL ioctl argument type.
175  *
176  * \sa drmCtlInstHandler() and drmCtlUninstHandler().
177  */
178 struct drm_control {
179 	enum {
180 		DRM_ADD_COMMAND,
181 		DRM_RM_COMMAND,
182 		DRM_INST_HANDLER,
183 		DRM_UNINST_HANDLER
184 	} func;
185 	int irq;
186 };
187 
188 /*
189  * Type of memory to map.
190  */
191 enum drm_map_type {
192 	_DRM_FRAME_BUFFER = 0,	  /**< WC (no caching), no core dump */
193 	_DRM_REGISTERS = 1,	  /**< no caching, no core dump */
194 	_DRM_SHM = 2,		  /**< shared, cached */
195 	_DRM_AGP = 3,		  /**< AGP/GART */
196 	_DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
197 	_DRM_CONSISTENT = 5	  /**< Consistent memory for PCI DMA */
198 };
199 
200 /*
201  * Memory mapping flags.
202  */
203 enum drm_map_flags {
204 	_DRM_RESTRICTED = 0x01,	     /**< Cannot be mapped to user-virtual */
205 	_DRM_READ_ONLY = 0x02,
206 	_DRM_LOCKED = 0x04,	     /**< shared, cached, locked */
207 	_DRM_KERNEL = 0x08,	     /**< kernel requires access */
208 	_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
209 	_DRM_CONTAINS_LOCK = 0x20,   /**< SHM page that contains lock */
210 	_DRM_REMOVABLE = 0x40,	     /**< Removable mapping */
211 	_DRM_DRIVER = 0x80	     /**< Managed by driver */
212 };
213 
214 struct drm_ctx_priv_map {
215 	unsigned int ctx_id;	 /**< Context requesting private mapping */
216 	void *handle;		 /**< Handle of map */
217 };
218 
219 /*
220  * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
221  * argument type.
222  *
223  * \sa drmAddMap().
224  */
225 struct drm_map {
226 	unsigned long offset;	 /**< Requested physical address (0 for SAREA)*/
227 	unsigned long size;	 /**< Requested physical size (bytes) */
228 	enum drm_map_type type;	 /**< Type of memory to map */
229 	enum drm_map_flags flags;	 /**< Flags */
230 	void *handle;		 /**< User-space: "Handle" to pass to mmap() */
231 				 /**< Kernel-space: kernel-virtual address */
232 	int mtrr;		 /**< MTRR slot used */
233 	/*   Private data */
234 };
235 
236 /*
237  * DRM_IOCTL_GET_CLIENT ioctl argument type.
238  */
239 struct drm_client {
240 	int idx;		/**< Which client desired? */
241 	int auth;		/**< Is client authenticated? */
242 	unsigned long pid;	/**< Process ID */
243 	unsigned long uid;	/**< User ID */
244 	unsigned long magic;	/**< Magic */
245 	unsigned long iocs;	/**< Ioctl count */
246 };
247 
248 enum drm_stat_type {
249 	_DRM_STAT_LOCK,
250 	_DRM_STAT_OPENS,
251 	_DRM_STAT_CLOSES,
252 	_DRM_STAT_IOCTLS,
253 	_DRM_STAT_LOCKS,
254 	_DRM_STAT_UNLOCKS,
255 	_DRM_STAT_VALUE,	/**< Generic value */
256 	_DRM_STAT_BYTE,		/**< Generic byte counter (1024bytes/K) */
257 	_DRM_STAT_COUNT,	/**< Generic non-byte counter (1000/k) */
258 
259 	_DRM_STAT_IRQ,		/**< IRQ */
260 	_DRM_STAT_PRIMARY,	/**< Primary DMA bytes */
261 	_DRM_STAT_SECONDARY,	/**< Secondary DMA bytes */
262 	_DRM_STAT_DMA,		/**< DMA */
263 	_DRM_STAT_SPECIAL,	/**< Special DMA (e.g., priority or polled) */
264 	_DRM_STAT_MISSED	/**< Missed DMA opportunity */
265 	    /* Add to the *END* of the list */
266 };
267 
268 /*
269  * DRM_IOCTL_GET_STATS ioctl argument type.
270  */
271 struct drm_stats {
272 	unsigned long count;
273 	struct {
274 		unsigned long value;
275 		enum drm_stat_type type;
276 	} data[15];
277 };
278 
279 /*
280  * Hardware locking flags.
281  */
282 enum drm_lock_flags {
283 	_DRM_LOCK_READY = 0x01,	     /**< Wait until hardware is ready for DMA */
284 	_DRM_LOCK_QUIESCENT = 0x02,  /**< Wait until hardware quiescent */
285 	_DRM_LOCK_FLUSH = 0x04,	     /**< Flush this context's DMA queue first */
286 	_DRM_LOCK_FLUSH_ALL = 0x08,  /**< Flush all DMA queues first */
287 	/* These *HALT* flags aren't supported yet
288 	   -- they will be used to support the
289 	   full-screen DGA-like mode. */
290 	_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
291 	_DRM_HALT_CUR_QUEUES = 0x20  /**< Halt all current queues */
292 };
293 
294 /*
295  * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
296  *
297  * \sa drmGetLock() and drmUnlock().
298  */
299 struct drm_lock {
300 	int context;
301 	enum drm_lock_flags flags;
302 };
303 
304 /*
305  * DMA flags
306  *
307  * \warning
308  * These values \e must match xf86drm.h.
309  *
310  * \sa drm_dma.
311  */
312 enum drm_dma_flags {
313 	/* Flags for DMA buffer dispatch */
314 	_DRM_DMA_BLOCK = 0x01,	      /**<
315 				       * Block until buffer dispatched.
316 				       *
317 				       * \note The buffer may not yet have
318 				       * been processed by the hardware --
319 				       * getting a hardware lock with the
320 				       * hardware quiescent will ensure
321 				       * that the buffer has been
322 				       * processed.
323 				       */
324 	_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
325 	_DRM_DMA_PRIORITY = 0x04,     /**< High priority dispatch */
326 
327 	/* Flags for DMA buffer request */
328 	_DRM_DMA_WAIT = 0x10,	      /**< Wait for free buffers */
329 	_DRM_DMA_SMALLER_OK = 0x20,   /**< Smaller-than-requested buffers OK */
330 	_DRM_DMA_LARGER_OK = 0x40     /**< Larger-than-requested buffers OK */
331 };
332 
333 /*
334  * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
335  *
336  * \sa drmAddBufs().
337  */
338 struct drm_buf_desc {
339 	int count;		 /**< Number of buffers of this size */
340 	int size;		 /**< Size in bytes */
341 	int low_mark;		 /**< Low water mark */
342 	int high_mark;		 /**< High water mark */
343 	enum {
344 		_DRM_PAGE_ALIGN = 0x01,	/**< Align on page boundaries for DMA */
345 		_DRM_AGP_BUFFER = 0x02,	/**< Buffer is in AGP space */
346 		_DRM_SG_BUFFER = 0x04,	/**< Scatter/gather memory buffer */
347 		_DRM_FB_BUFFER = 0x08,	/**< Buffer is in frame buffer */
348 		_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
349 	} flags;
350 	unsigned long agp_start; /**<
351 				  * Start address of where the AGP buffers are
352 				  * in the AGP aperture
353 				  */
354 };
355 
356 /*
357  * DRM_IOCTL_INFO_BUFS ioctl argument type.
358  */
359 struct drm_buf_info {
360 	int count;		/**< Entries in list */
361 	struct drm_buf_desc __user *list;
362 };
363 
364 /*
365  * DRM_IOCTL_FREE_BUFS ioctl argument type.
366  */
367 struct drm_buf_free {
368 	int count;
369 	int __user *list;
370 };
371 
372 /*
373  * Buffer information
374  *
375  * \sa drm_buf_map.
376  */
377 struct drm_buf_pub {
378 	int idx;		       /**< Index into the master buffer list */
379 	int total;		       /**< Buffer size */
380 	int used;		       /**< Amount of buffer in use (for DMA) */
381 	void __user *address;	       /**< Address of buffer */
382 };
383 
384 /*
385  * DRM_IOCTL_MAP_BUFS ioctl argument type.
386  */
387 struct drm_buf_map {
388 	int count;		/**< Length of the buffer list */
389 #ifdef __cplusplus
390 	void __user *virt;
391 #else
392 	void __user *virtual;		/**< Mmap'd area in user-virtual */
393 #endif
394 	struct drm_buf_pub __user *list;	/**< Buffer information */
395 };
396 
397 /*
398  * DRM_IOCTL_DMA ioctl argument type.
399  *
400  * Indices here refer to the offset into the buffer list in drm_buf_get.
401  *
402  * \sa drmDMA().
403  */
404 struct drm_dma {
405 	int context;			  /**< Context handle */
406 	int send_count;			  /**< Number of buffers to send */
407 	int __user *send_indices;	  /**< List of handles to buffers */
408 	int __user *send_sizes;		  /**< Lengths of data to send */
409 	enum drm_dma_flags flags;	  /**< Flags */
410 	int request_count;		  /**< Number of buffers requested */
411 	int request_size;		  /**< Desired size for buffers */
412 	int __user *request_indices;	  /**< Buffer information */
413 	int __user *request_sizes;
414 	int granted_count;		  /**< Number of buffers granted */
415 };
416 
417 enum drm_ctx_flags {
418 	_DRM_CONTEXT_PRESERVED = 0x01,
419 	_DRM_CONTEXT_2DONLY = 0x02
420 };
421 
422 /*
423  * DRM_IOCTL_ADD_CTX ioctl argument type.
424  *
425  * \sa drmCreateContext() and drmDestroyContext().
426  */
427 struct drm_ctx {
428 	drm_context_t handle;
429 	enum drm_ctx_flags flags;
430 };
431 
432 /*
433  * DRM_IOCTL_RES_CTX ioctl argument type.
434  */
435 struct drm_ctx_res {
436 	int count;
437 	struct drm_ctx __user *contexts;
438 };
439 
440 /*
441  * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
442  */
443 struct drm_draw {
444 	drm_drawable_t handle;
445 };
446 
447 /*
448  * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
449  */
450 typedef enum {
451 	DRM_DRAWABLE_CLIPRECTS
452 } drm_drawable_info_type_t;
453 
454 struct drm_update_draw {
455 	drm_drawable_t handle;
456 	unsigned int type;
457 	unsigned int num;
458 	unsigned long long data;
459 };
460 
461 /*
462  * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
463  */
464 struct drm_auth {
465 	drm_magic_t magic;
466 };
467 
468 /*
469  * DRM_IOCTL_IRQ_BUSID ioctl argument type.
470  *
471  * \sa drmGetInterruptFromBusID().
472  */
473 struct drm_irq_busid {
474 	int irq;	/**< IRQ number */
475 	int busnum;	/**< bus number */
476 	int devnum;	/**< device number */
477 	int funcnum;	/**< function number */
478 };
479 
480 enum drm_vblank_seq_type {
481 	_DRM_VBLANK_ABSOLUTE = 0x0,	/**< Wait for specific vblank sequence number */
482 	_DRM_VBLANK_RELATIVE = 0x1,	/**< Wait for given number of vblanks */
483 	/* bits 1-6 are reserved for high crtcs */
484 	_DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
485 	_DRM_VBLANK_EVENT = 0x4000000,   /**< Send event instead of blocking */
486 	_DRM_VBLANK_FLIP = 0x8000000,   /**< Scheduled buffer swap should flip */
487 	_DRM_VBLANK_NEXTONMISS = 0x10000000,	/**< If missed, wait for next vblank */
488 	_DRM_VBLANK_SECONDARY = 0x20000000,	/**< Secondary display controller */
489 	_DRM_VBLANK_SIGNAL = 0x40000000	/**< Send signal instead of blocking, unsupported */
490 };
491 #define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
492 
493 #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
494 #define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
495 				_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
496 
497 struct drm_wait_vblank_request {
498 	enum drm_vblank_seq_type type;
499 	unsigned int sequence;
500 	unsigned long signal;
501 };
502 
503 struct drm_wait_vblank_reply {
504 	enum drm_vblank_seq_type type;
505 	unsigned int sequence;
506 	long tval_sec;
507 	long tval_usec;
508 };
509 
510 /*
511  * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
512  *
513  * \sa drmWaitVBlank().
514  */
515 union drm_wait_vblank {
516 	struct drm_wait_vblank_request request;
517 	struct drm_wait_vblank_reply reply;
518 };
519 
520 #define _DRM_PRE_MODESET 1
521 #define _DRM_POST_MODESET 2
522 
523 /*
524  * DRM_IOCTL_MODESET_CTL ioctl argument type
525  *
526  * \sa drmModesetCtl().
527  */
528 struct drm_modeset_ctl {
529 	__u32 crtc;
530 	__u32 cmd;
531 };
532 
533 /*
534  * DRM_IOCTL_AGP_ENABLE ioctl argument type.
535  *
536  * \sa drmAgpEnable().
537  */
538 struct drm_agp_mode {
539 	unsigned long mode;	/**< AGP mode */
540 };
541 
542 /*
543  * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
544  *
545  * \sa drmAgpAlloc() and drmAgpFree().
546  */
547 struct drm_agp_buffer {
548 	unsigned long size;	/**< In bytes -- will round to page boundary */
549 	unsigned long handle;	/**< Used for binding / unbinding */
550 	unsigned long type;	/**< Type of memory to allocate */
551 	unsigned long physical;	/**< Physical used by i810 */
552 };
553 
554 /*
555  * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
556  *
557  * \sa drmAgpBind() and drmAgpUnbind().
558  */
559 struct drm_agp_binding {
560 	unsigned long handle;	/**< From drm_agp_buffer */
561 	unsigned long offset;	/**< In bytes -- will round to page boundary */
562 };
563 
564 /*
565  * DRM_IOCTL_AGP_INFO ioctl argument type.
566  *
567  * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
568  * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
569  * drmAgpVendorId() and drmAgpDeviceId().
570  */
571 struct drm_agp_info {
572 	int agp_version_major;
573 	int agp_version_minor;
574 	unsigned long mode;
575 	unsigned long aperture_base;	/* physical address */
576 	unsigned long aperture_size;	/* bytes */
577 	unsigned long memory_allowed;	/* bytes */
578 	unsigned long memory_used;
579 
580 	/* PCI information */
581 	unsigned short id_vendor;
582 	unsigned short id_device;
583 };
584 
585 /*
586  * DRM_IOCTL_SG_ALLOC ioctl argument type.
587  */
588 struct drm_scatter_gather {
589 	unsigned long size;	/**< In bytes -- will round to page boundary */
590 	unsigned long handle;	/**< Used for mapping / unmapping */
591 };
592 
593 /*
594  * DRM_IOCTL_SET_VERSION ioctl argument type.
595  */
596 struct drm_set_version {
597 	int drm_di_major;
598 	int drm_di_minor;
599 	int drm_dd_major;
600 	int drm_dd_minor;
601 };
602 
603 /* DRM_IOCTL_GEM_CLOSE ioctl argument type */
604 struct drm_gem_close {
605 	/** Handle of the object to be closed. */
606 	__u32 handle;
607 	__u32 pad;
608 };
609 
610 /* DRM_IOCTL_GEM_FLINK ioctl argument type */
611 struct drm_gem_flink {
612 	/** Handle for the object being named */
613 	__u32 handle;
614 
615 	/** Returned global name */
616 	__u32 name;
617 };
618 
619 /* DRM_IOCTL_GEM_OPEN ioctl argument type */
620 struct drm_gem_open {
621 	/** Name of object being opened */
622 	__u32 name;
623 
624 	/** Returned handle for the object */
625 	__u32 handle;
626 
627 	/** Returned size of the object */
628 	__u64 size;
629 };
630 
631 /**
632  * DRM_CAP_DUMB_BUFFER
633  *
634  * If set to 1, the driver supports creating dumb buffers via the
635  * &DRM_IOCTL_MODE_CREATE_DUMB ioctl.
636  */
637 #define DRM_CAP_DUMB_BUFFER		0x1
638 /**
639  * DRM_CAP_VBLANK_HIGH_CRTC
640  *
641  * If set to 1, the kernel supports specifying a :ref:`CRTC index<crtc_index>`
642  * in the high bits of &drm_wait_vblank_request.type.
643  *
644  * Starting kernel version 2.6.39, this capability is always set to 1.
645  */
646 #define DRM_CAP_VBLANK_HIGH_CRTC	0x2
647 /**
648  * DRM_CAP_DUMB_PREFERRED_DEPTH
649  *
650  * The preferred bit depth for dumb buffers.
651  *
652  * The bit depth is the number of bits used to indicate the color of a single
653  * pixel excluding any padding. This is different from the number of bits per
654  * pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per
655  * pixel.
656  *
657  * Note that this preference only applies to dumb buffers, it's irrelevant for
658  * other types of buffers.
659  */
660 #define DRM_CAP_DUMB_PREFERRED_DEPTH	0x3
661 /**
662  * DRM_CAP_DUMB_PREFER_SHADOW
663  *
664  * If set to 1, the driver prefers userspace to render to a shadow buffer
665  * instead of directly rendering to a dumb buffer. For best speed, userspace
666  * should do streaming ordered memory copies into the dumb buffer and never
667  * read from it.
668  *
669  * Note that this preference only applies to dumb buffers, it's irrelevant for
670  * other types of buffers.
671  */
672 #define DRM_CAP_DUMB_PREFER_SHADOW	0x4
673 /**
674  * DRM_CAP_PRIME
675  *
676  * Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
677  * and &DRM_PRIME_CAP_EXPORT.
678  *
679  * Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and
680  * &DRM_PRIME_CAP_EXPORT are always advertised.
681  *
682  * PRIME buffers are exposed as dma-buf file descriptors.
683  * See :ref:`prime_buffer_sharing`.
684  */
685 #define DRM_CAP_PRIME			0x5
686 /**
687  * DRM_PRIME_CAP_IMPORT
688  *
689  * If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
690  * buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
691  *
692  * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
693  */
694 #define  DRM_PRIME_CAP_IMPORT		0x1
695 /**
696  * DRM_PRIME_CAP_EXPORT
697  *
698  * If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
699  * buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
700  *
701  * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
702  */
703 #define  DRM_PRIME_CAP_EXPORT		0x2
704 /**
705  * DRM_CAP_TIMESTAMP_MONOTONIC
706  *
707  * If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in
708  * struct drm_event_vblank. If set to 1, the kernel will report timestamps with
709  * ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these
710  * clocks.
711  *
712  * Starting from kernel version 2.6.39, the default value for this capability
713  * is 1. Starting kernel version 4.15, this capability is always set to 1.
714  */
715 #define DRM_CAP_TIMESTAMP_MONOTONIC	0x6
716 /**
717  * DRM_CAP_ASYNC_PAGE_FLIP
718  *
719  * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC.
720  */
721 #define DRM_CAP_ASYNC_PAGE_FLIP		0x7
722 /**
723  * DRM_CAP_CURSOR_WIDTH
724  *
725  * The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid
726  * width x height combination for the hardware cursor. The intention is that a
727  * hardware agnostic userspace can query a cursor plane size to use.
728  *
729  * Note that the cross-driver contract is to merely return a valid size;
730  * drivers are free to attach another meaning on top, eg. i915 returns the
731  * maximum plane size.
732  */
733 #define DRM_CAP_CURSOR_WIDTH		0x8
734 /**
735  * DRM_CAP_CURSOR_HEIGHT
736  *
737  * See &DRM_CAP_CURSOR_WIDTH.
738  */
739 #define DRM_CAP_CURSOR_HEIGHT		0x9
740 /**
741  * DRM_CAP_ADDFB2_MODIFIERS
742  *
743  * If set to 1, the driver supports supplying modifiers in the
744  * &DRM_IOCTL_MODE_ADDFB2 ioctl.
745  */
746 #define DRM_CAP_ADDFB2_MODIFIERS	0x10
747 /**
748  * DRM_CAP_PAGE_FLIP_TARGET
749  *
750  * If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and
751  * &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in
752  * &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP
753  * ioctl.
754  */
755 #define DRM_CAP_PAGE_FLIP_TARGET	0x11
756 /**
757  * DRM_CAP_CRTC_IN_VBLANK_EVENT
758  *
759  * If set to 1, the kernel supports reporting the CRTC ID in
760  * &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and
761  * &DRM_EVENT_FLIP_COMPLETE events.
762  *
763  * Starting kernel version 4.12, this capability is always set to 1.
764  */
765 #define DRM_CAP_CRTC_IN_VBLANK_EVENT	0x12
766 /**
767  * DRM_CAP_SYNCOBJ
768  *
769  * If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`.
770  */
771 #define DRM_CAP_SYNCOBJ		0x13
772 /**
773  * DRM_CAP_SYNCOBJ_TIMELINE
774  *
775  * If set to 1, the driver supports timeline operations on sync objects. See
776  * :ref:`drm_sync_objects`.
777  */
778 #define DRM_CAP_SYNCOBJ_TIMELINE	0x14
779 
780 /* DRM_IOCTL_GET_CAP ioctl argument type */
781 struct drm_get_cap {
782 	__u64 capability;
783 	__u64 value;
784 };
785 
786 /**
787  * DRM_CLIENT_CAP_STEREO_3D
788  *
789  * If set to 1, the DRM core will expose the stereo 3D capabilities of the
790  * monitor by advertising the supported 3D layouts in the flags of struct
791  * drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``.
792  *
793  * This capability is always supported for all drivers starting from kernel
794  * version 3.13.
795  */
796 #define DRM_CLIENT_CAP_STEREO_3D	1
797 
798 /**
799  * DRM_CLIENT_CAP_UNIVERSAL_PLANES
800  *
801  * If set to 1, the DRM core will expose all planes (overlay, primary, and
802  * cursor) to userspace.
803  *
804  * This capability has been introduced in kernel version 3.15. Starting from
805  * kernel version 3.17, this capability is always supported for all drivers.
806  */
807 #define DRM_CLIENT_CAP_UNIVERSAL_PLANES  2
808 
809 /**
810  * DRM_CLIENT_CAP_ATOMIC
811  *
812  * If set to 1, the DRM core will expose atomic properties to userspace. This
813  * implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and
814  * &DRM_CLIENT_CAP_ASPECT_RATIO.
815  *
816  * If the driver doesn't support atomic mode-setting, enabling this capability
817  * will fail with -EOPNOTSUPP.
818  *
819  * This capability has been introduced in kernel version 4.0. Starting from
820  * kernel version 4.2, this capability is always supported for atomic-capable
821  * drivers.
822  */
823 #define DRM_CLIENT_CAP_ATOMIC	3
824 
825 /**
826  * DRM_CLIENT_CAP_ASPECT_RATIO
827  *
828  * If set to 1, the DRM core will provide aspect ratio information in modes.
829  * See ``DRM_MODE_FLAG_PIC_AR_*``.
830  *
831  * This capability is always supported for all drivers starting from kernel
832  * version 4.18.
833  */
834 #define DRM_CLIENT_CAP_ASPECT_RATIO    4
835 
836 /**
837  * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
838  *
839  * If set to 1, the DRM core will expose special connectors to be used for
840  * writing back to memory the scene setup in the commit. The client must enable
841  * &DRM_CLIENT_CAP_ATOMIC first.
842  *
843  * This capability is always supported for atomic-capable drivers starting from
844  * kernel version 4.19.
845  */
846 #define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS	5
847 
848 /* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
849 struct drm_set_client_cap {
850 	__u64 capability;
851 	__u64 value;
852 };
853 
854 #define DRM_RDWR O_RDWR
855 #define DRM_CLOEXEC O_CLOEXEC
856 struct drm_prime_handle {
857 	__u32 handle;
858 
859 	/** Flags.. only applicable for handle->fd */
860 	__u32 flags;
861 
862 	/** Returned dmabuf file descriptor */
863 	__s32 fd;
864 };
865 
866 struct drm_syncobj_create {
867 	__u32 handle;
868 #define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0)
869 	__u32 flags;
870 };
871 
872 struct drm_syncobj_destroy {
873 	__u32 handle;
874 	__u32 pad;
875 };
876 
877 #define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0)
878 #define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0)
879 struct drm_syncobj_handle {
880 	__u32 handle;
881 	__u32 flags;
882 
883 	__s32 fd;
884 	__u32 pad;
885 };
886 
887 struct drm_syncobj_transfer {
888 	__u32 src_handle;
889 	__u32 dst_handle;
890 	__u64 src_point;
891 	__u64 dst_point;
892 	__u32 flags;
893 	__u32 pad;
894 };
895 
896 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
897 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
898 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
899 struct drm_syncobj_wait {
900 	__u64 handles;
901 	/* absolute timeout */
902 	__s64 timeout_nsec;
903 	__u32 count_handles;
904 	__u32 flags;
905 	__u32 first_signaled; /* only valid when not waiting all */
906 	__u32 pad;
907 };
908 
909 struct drm_syncobj_timeline_wait {
910 	__u64 handles;
911 	/* wait on specific timeline point for every handles*/
912 	__u64 points;
913 	/* absolute timeout */
914 	__s64 timeout_nsec;
915 	__u32 count_handles;
916 	__u32 flags;
917 	__u32 first_signaled; /* only valid when not waiting all */
918 	__u32 pad;
919 };
920 
921 /**
922  * struct drm_syncobj_eventfd
923  * @handle: syncobj handle.
924  * @flags: Zero to wait for the point to be signalled, or
925  *         &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
926  *         available for the point.
927  * @point: syncobj timeline point (set to zero for binary syncobjs).
928  * @fd: Existing eventfd to sent events to.
929  * @pad: Must be zero.
930  *
931  * Register an eventfd to be signalled by a syncobj. The eventfd counter will
932  * be incremented by one.
933  */
934 struct drm_syncobj_eventfd {
935 	__u32 handle;
936 	__u32 flags;
937 	__u64 point;
938 	__s32 fd;
939 	__u32 pad;
940 };
941 
942 
943 struct drm_syncobj_array {
944 	__u64 handles;
945 	__u32 count_handles;
946 	__u32 pad;
947 };
948 
949 #define DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED (1 << 0) /* last available point on timeline syncobj */
950 struct drm_syncobj_timeline_array {
951 	__u64 handles;
952 	__u64 points;
953 	__u32 count_handles;
954 	__u32 flags;
955 };
956 
957 
958 /* Query current scanout sequence number */
959 struct drm_crtc_get_sequence {
960 	__u32 crtc_id;		/* requested crtc_id */
961 	__u32 active;		/* return: crtc output is active */
962 	__u64 sequence;		/* return: most recent vblank sequence */
963 	__s64 sequence_ns;	/* return: most recent time of first pixel out */
964 };
965 
966 /* Queue event to be delivered at specified sequence. Time stamp marks
967  * when the first pixel of the refresh cycle leaves the display engine
968  * for the display
969  */
970 #define DRM_CRTC_SEQUENCE_RELATIVE		0x00000001	/* sequence is relative to current */
971 #define DRM_CRTC_SEQUENCE_NEXT_ON_MISS		0x00000002	/* Use next sequence if we've missed */
972 
973 struct drm_crtc_queue_sequence {
974 	__u32 crtc_id;
975 	__u32 flags;
976 	__u64 sequence;		/* on input, target sequence. on output, actual sequence */
977 	__u64 user_data;	/* user data passed to event */
978 };
979 
980 #if defined(__cplusplus)
981 }
982 #endif
983 
984 #include "drm_mode.h"
985 
986 #if defined(__cplusplus)
987 extern "C" {
988 #endif
989 
990 #define DRM_IOCTL_BASE			'd'
991 #define DRM_IO(nr)			_IO(DRM_IOCTL_BASE,nr)
992 #define DRM_IOR(nr,type)		_IOR(DRM_IOCTL_BASE,nr,type)
993 #define DRM_IOW(nr,type)		_IOW(DRM_IOCTL_BASE,nr,type)
994 #define DRM_IOWR(nr,type)		_IOWR(DRM_IOCTL_BASE,nr,type)
995 
996 #define DRM_IOCTL_VERSION		DRM_IOWR(0x00, struct drm_version)
997 #define DRM_IOCTL_GET_UNIQUE		DRM_IOWR(0x01, struct drm_unique)
998 #define DRM_IOCTL_GET_MAGIC		DRM_IOR( 0x02, struct drm_auth)
999 #define DRM_IOCTL_IRQ_BUSID		DRM_IOWR(0x03, struct drm_irq_busid)
1000 #define DRM_IOCTL_GET_MAP               DRM_IOWR(0x04, struct drm_map)
1001 #define DRM_IOCTL_GET_CLIENT            DRM_IOWR(0x05, struct drm_client)
1002 #define DRM_IOCTL_GET_STATS             DRM_IOR( 0x06, struct drm_stats)
1003 #define DRM_IOCTL_SET_VERSION		DRM_IOWR(0x07, struct drm_set_version)
1004 #define DRM_IOCTL_MODESET_CTL           DRM_IOW(0x08, struct drm_modeset_ctl)
1005 /**
1006  * DRM_IOCTL_GEM_CLOSE - Close a GEM handle.
1007  *
1008  * GEM handles are not reference-counted by the kernel. User-space is
1009  * responsible for managing their lifetime. For example, if user-space imports
1010  * the same memory object twice on the same DRM file description, the same GEM
1011  * handle is returned by both imports, and user-space needs to ensure
1012  * &DRM_IOCTL_GEM_CLOSE is performed once only. The same situation can happen
1013  * when a memory object is allocated, then exported and imported again on the
1014  * same DRM file description. The &DRM_IOCTL_MODE_GETFB2 IOCTL is an exception
1015  * and always returns fresh new GEM handles even if an existing GEM handle
1016  * already refers to the same memory object before the IOCTL is performed.
1017  */
1018 #define DRM_IOCTL_GEM_CLOSE		DRM_IOW (0x09, struct drm_gem_close)
1019 #define DRM_IOCTL_GEM_FLINK		DRM_IOWR(0x0a, struct drm_gem_flink)
1020 #define DRM_IOCTL_GEM_OPEN		DRM_IOWR(0x0b, struct drm_gem_open)
1021 #define DRM_IOCTL_GET_CAP		DRM_IOWR(0x0c, struct drm_get_cap)
1022 #define DRM_IOCTL_SET_CLIENT_CAP	DRM_IOW( 0x0d, struct drm_set_client_cap)
1023 
1024 #define DRM_IOCTL_SET_UNIQUE		DRM_IOW( 0x10, struct drm_unique)
1025 #define DRM_IOCTL_AUTH_MAGIC		DRM_IOW( 0x11, struct drm_auth)
1026 #define DRM_IOCTL_BLOCK			DRM_IOWR(0x12, struct drm_block)
1027 #define DRM_IOCTL_UNBLOCK		DRM_IOWR(0x13, struct drm_block)
1028 #define DRM_IOCTL_CONTROL		DRM_IOW( 0x14, struct drm_control)
1029 #ifdef __OpenBSD__
1030 #define DRM_IOCTL_GET_PCIINFO		DRM_IOR( 0x15, struct drm_pciinfo)
1031 #else
1032 #define DRM_IOCTL_ADD_MAP		DRM_IOWR(0x15, struct drm_map)
1033 #endif
1034 #define DRM_IOCTL_ADD_BUFS		DRM_IOWR(0x16, struct drm_buf_desc)
1035 #define DRM_IOCTL_MARK_BUFS		DRM_IOW( 0x17, struct drm_buf_desc)
1036 #define DRM_IOCTL_INFO_BUFS		DRM_IOWR(0x18, struct drm_buf_info)
1037 #define DRM_IOCTL_MAP_BUFS		DRM_IOWR(0x19, struct drm_buf_map)
1038 #define DRM_IOCTL_FREE_BUFS		DRM_IOW( 0x1a, struct drm_buf_free)
1039 
1040 #define DRM_IOCTL_RM_MAP		DRM_IOW( 0x1b, struct drm_map)
1041 
1042 #define DRM_IOCTL_SET_SAREA_CTX		DRM_IOW( 0x1c, struct drm_ctx_priv_map)
1043 #define DRM_IOCTL_GET_SAREA_CTX 	DRM_IOWR(0x1d, struct drm_ctx_priv_map)
1044 
1045 #define DRM_IOCTL_SET_MASTER            DRM_IO(0x1e)
1046 #define DRM_IOCTL_DROP_MASTER           DRM_IO(0x1f)
1047 
1048 #define DRM_IOCTL_ADD_CTX		DRM_IOWR(0x20, struct drm_ctx)
1049 #define DRM_IOCTL_RM_CTX		DRM_IOWR(0x21, struct drm_ctx)
1050 #define DRM_IOCTL_MOD_CTX		DRM_IOW( 0x22, struct drm_ctx)
1051 #define DRM_IOCTL_GET_CTX		DRM_IOWR(0x23, struct drm_ctx)
1052 #define DRM_IOCTL_SWITCH_CTX		DRM_IOW( 0x24, struct drm_ctx)
1053 #define DRM_IOCTL_NEW_CTX		DRM_IOW( 0x25, struct drm_ctx)
1054 #define DRM_IOCTL_RES_CTX		DRM_IOWR(0x26, struct drm_ctx_res)
1055 #define DRM_IOCTL_ADD_DRAW		DRM_IOWR(0x27, struct drm_draw)
1056 #define DRM_IOCTL_RM_DRAW		DRM_IOWR(0x28, struct drm_draw)
1057 #define DRM_IOCTL_DMA			DRM_IOWR(0x29, struct drm_dma)
1058 #define DRM_IOCTL_LOCK			DRM_IOW( 0x2a, struct drm_lock)
1059 #define DRM_IOCTL_UNLOCK		DRM_IOW( 0x2b, struct drm_lock)
1060 #define DRM_IOCTL_FINISH		DRM_IOW( 0x2c, struct drm_lock)
1061 
1062 /**
1063  * DRM_IOCTL_PRIME_HANDLE_TO_FD - Convert a GEM handle to a DMA-BUF FD.
1064  *
1065  * User-space sets &drm_prime_handle.handle with the GEM handle to export and
1066  * &drm_prime_handle.flags, and gets back a DMA-BUF file descriptor in
1067  * &drm_prime_handle.fd.
1068  *
1069  * The export can fail for any driver-specific reason, e.g. because export is
1070  * not supported for this specific GEM handle (but might be for others).
1071  *
1072  * Support for exporting DMA-BUFs is advertised via &DRM_PRIME_CAP_EXPORT.
1073  */
1074 #define DRM_IOCTL_PRIME_HANDLE_TO_FD    DRM_IOWR(0x2d, struct drm_prime_handle)
1075 /**
1076  * DRM_IOCTL_PRIME_FD_TO_HANDLE - Convert a DMA-BUF FD to a GEM handle.
1077  *
1078  * User-space sets &drm_prime_handle.fd with a DMA-BUF file descriptor to
1079  * import, and gets back a GEM handle in &drm_prime_handle.handle.
1080  * &drm_prime_handle.flags is unused.
1081  *
1082  * If an existing GEM handle refers to the memory object backing the DMA-BUF,
1083  * that GEM handle is returned. Therefore user-space which needs to handle
1084  * arbitrary DMA-BUFs must have a user-space lookup data structure to manually
1085  * reference-count duplicated GEM handles. For more information see
1086  * &DRM_IOCTL_GEM_CLOSE.
1087  *
1088  * The import can fail for any driver-specific reason, e.g. because import is
1089  * only supported for DMA-BUFs allocated on this DRM device.
1090  *
1091  * Support for importing DMA-BUFs is advertised via &DRM_PRIME_CAP_IMPORT.
1092  */
1093 #define DRM_IOCTL_PRIME_FD_TO_HANDLE    DRM_IOWR(0x2e, struct drm_prime_handle)
1094 
1095 #define DRM_IOCTL_AGP_ACQUIRE		DRM_IO(  0x30)
1096 #define DRM_IOCTL_AGP_RELEASE		DRM_IO(  0x31)
1097 #define DRM_IOCTL_AGP_ENABLE		DRM_IOW( 0x32, struct drm_agp_mode)
1098 #define DRM_IOCTL_AGP_INFO		DRM_IOR( 0x33, struct drm_agp_info)
1099 #define DRM_IOCTL_AGP_ALLOC		DRM_IOWR(0x34, struct drm_agp_buffer)
1100 #define DRM_IOCTL_AGP_FREE		DRM_IOW( 0x35, struct drm_agp_buffer)
1101 #define DRM_IOCTL_AGP_BIND		DRM_IOW( 0x36, struct drm_agp_binding)
1102 #define DRM_IOCTL_AGP_UNBIND		DRM_IOW( 0x37, struct drm_agp_binding)
1103 
1104 #define DRM_IOCTL_SG_ALLOC		DRM_IOWR(0x38, struct drm_scatter_gather)
1105 #define DRM_IOCTL_SG_FREE		DRM_IOW( 0x39, struct drm_scatter_gather)
1106 
1107 #define DRM_IOCTL_WAIT_VBLANK		DRM_IOWR(0x3a, union drm_wait_vblank)
1108 
1109 #define DRM_IOCTL_CRTC_GET_SEQUENCE	DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
1110 #define DRM_IOCTL_CRTC_QUEUE_SEQUENCE	DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
1111 
1112 #define DRM_IOCTL_UPDATE_DRAW		DRM_IOW(0x3f, struct drm_update_draw)
1113 
1114 #define DRM_IOCTL_MODE_GETRESOURCES	DRM_IOWR(0xA0, struct drm_mode_card_res)
1115 #define DRM_IOCTL_MODE_GETCRTC		DRM_IOWR(0xA1, struct drm_mode_crtc)
1116 #define DRM_IOCTL_MODE_SETCRTC		DRM_IOWR(0xA2, struct drm_mode_crtc)
1117 #define DRM_IOCTL_MODE_CURSOR		DRM_IOWR(0xA3, struct drm_mode_cursor)
1118 #define DRM_IOCTL_MODE_GETGAMMA		DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
1119 #define DRM_IOCTL_MODE_SETGAMMA		DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
1120 #define DRM_IOCTL_MODE_GETENCODER	DRM_IOWR(0xA6, struct drm_mode_get_encoder)
1121 #define DRM_IOCTL_MODE_GETCONNECTOR	DRM_IOWR(0xA7, struct drm_mode_get_connector)
1122 #define DRM_IOCTL_MODE_ATTACHMODE	DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */
1123 #define DRM_IOCTL_MODE_DETACHMODE	DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */
1124 
1125 #define DRM_IOCTL_MODE_GETPROPERTY	DRM_IOWR(0xAA, struct drm_mode_get_property)
1126 #define DRM_IOCTL_MODE_SETPROPERTY	DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
1127 #define DRM_IOCTL_MODE_GETPROPBLOB	DRM_IOWR(0xAC, struct drm_mode_get_blob)
1128 #define DRM_IOCTL_MODE_GETFB		DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
1129 #define DRM_IOCTL_MODE_ADDFB		DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
1130 /**
1131  * DRM_IOCTL_MODE_RMFB - Remove a framebuffer.
1132  *
1133  * This removes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
1134  * argument is a framebuffer object ID.
1135  *
1136  * Warning: removing a framebuffer currently in-use on an enabled plane will
1137  * disable that plane. The CRTC the plane is linked to may also be disabled
1138  * (depending on driver capabilities).
1139  */
1140 #define DRM_IOCTL_MODE_RMFB		DRM_IOWR(0xAF, unsigned int)
1141 #define DRM_IOCTL_MODE_PAGE_FLIP	DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
1142 #define DRM_IOCTL_MODE_DIRTYFB		DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
1143 
1144 #define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
1145 #define DRM_IOCTL_MODE_MAP_DUMB    DRM_IOWR(0xB3, struct drm_mode_map_dumb)
1146 #define DRM_IOCTL_MODE_DESTROY_DUMB    DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
1147 #define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
1148 #define DRM_IOCTL_MODE_GETPLANE	DRM_IOWR(0xB6, struct drm_mode_get_plane)
1149 #define DRM_IOCTL_MODE_SETPLANE	DRM_IOWR(0xB7, struct drm_mode_set_plane)
1150 #define DRM_IOCTL_MODE_ADDFB2		DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
1151 #define DRM_IOCTL_MODE_OBJ_GETPROPERTIES	DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
1152 #define DRM_IOCTL_MODE_OBJ_SETPROPERTY	DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
1153 #define DRM_IOCTL_MODE_CURSOR2		DRM_IOWR(0xBB, struct drm_mode_cursor2)
1154 #define DRM_IOCTL_MODE_ATOMIC		DRM_IOWR(0xBC, struct drm_mode_atomic)
1155 #define DRM_IOCTL_MODE_CREATEPROPBLOB	DRM_IOWR(0xBD, struct drm_mode_create_blob)
1156 #define DRM_IOCTL_MODE_DESTROYPROPBLOB	DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
1157 
1158 #define DRM_IOCTL_SYNCOBJ_CREATE	DRM_IOWR(0xBF, struct drm_syncobj_create)
1159 #define DRM_IOCTL_SYNCOBJ_DESTROY	DRM_IOWR(0xC0, struct drm_syncobj_destroy)
1160 #define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD	DRM_IOWR(0xC1, struct drm_syncobj_handle)
1161 #define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE	DRM_IOWR(0xC2, struct drm_syncobj_handle)
1162 #define DRM_IOCTL_SYNCOBJ_WAIT		DRM_IOWR(0xC3, struct drm_syncobj_wait)
1163 #define DRM_IOCTL_SYNCOBJ_RESET		DRM_IOWR(0xC4, struct drm_syncobj_array)
1164 #define DRM_IOCTL_SYNCOBJ_SIGNAL	DRM_IOWR(0xC5, struct drm_syncobj_array)
1165 
1166 #define DRM_IOCTL_MODE_CREATE_LEASE	DRM_IOWR(0xC6, struct drm_mode_create_lease)
1167 #define DRM_IOCTL_MODE_LIST_LESSEES	DRM_IOWR(0xC7, struct drm_mode_list_lessees)
1168 #define DRM_IOCTL_MODE_GET_LEASE	DRM_IOWR(0xC8, struct drm_mode_get_lease)
1169 #define DRM_IOCTL_MODE_REVOKE_LEASE	DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
1170 
1171 #define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT	DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
1172 #define DRM_IOCTL_SYNCOBJ_QUERY		DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
1173 #define DRM_IOCTL_SYNCOBJ_TRANSFER	DRM_IOWR(0xCC, struct drm_syncobj_transfer)
1174 #define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL	DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
1175 
1176 /**
1177  * DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata.
1178  *
1179  * This queries metadata about a framebuffer. User-space fills
1180  * &drm_mode_fb_cmd2.fb_id as the input, and the kernels fills the rest of the
1181  * struct as the output.
1182  *
1183  * If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
1184  * will be filled with GEM buffer handles. Fresh new GEM handles are always
1185  * returned, even if another GEM handle referring to the same memory object
1186  * already exists on the DRM file description. The caller is responsible for
1187  * removing the new handles, e.g. via the &DRM_IOCTL_GEM_CLOSE IOCTL. The same
1188  * new handle will be returned for multiple planes in case they use the same
1189  * memory object. Planes are valid until one has a zero handle -- this can be
1190  * used to compute the number of planes.
1191  *
1192  * Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
1193  * until one has a zero &drm_mode_fb_cmd2.pitches.
1194  *
1195  * If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
1196  * in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
1197  * modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
1198  *
1199  * To obtain DMA-BUF FDs for each plane without leaking GEM handles, user-space
1200  * can export each handle via &DRM_IOCTL_PRIME_HANDLE_TO_FD, then immediately
1201  * close each unique handle via &DRM_IOCTL_GEM_CLOSE, making sure to not
1202  * double-close handles which are specified multiple times in the array.
1203  */
1204 #define DRM_IOCTL_MODE_GETFB2		DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
1205 
1206 #define DRM_IOCTL_SYNCOBJ_EVENTFD	DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
1207 
1208 /*
1209  * Device specific ioctls should only be in their respective headers
1210  * The device specific ioctl range is from 0x40 to 0x9f.
1211  * Generic IOCTLS restart at 0xA0.
1212  *
1213  * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
1214  * drmCommandReadWrite().
1215  */
1216 #define DRM_COMMAND_BASE                0x40
1217 #define DRM_COMMAND_END			0xA0
1218 
1219 /**
1220  * struct drm_event - Header for DRM events
1221  * @type: event type.
1222  * @length: total number of payload bytes (including header).
1223  *
1224  * This struct is a header for events written back to user-space on the DRM FD.
1225  * A read on the DRM FD will always only return complete events: e.g. if the
1226  * read buffer is 100 bytes large and there are two 64 byte events pending,
1227  * only one will be returned.
1228  *
1229  * Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and
1230  * up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK,
1231  * &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE.
1232  */
1233 struct drm_event {
1234 	__u32 type;
1235 	__u32 length;
1236 };
1237 
1238 /**
1239  * DRM_EVENT_VBLANK - vertical blanking event
1240  *
1241  * This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the
1242  * &_DRM_VBLANK_EVENT flag set.
1243  *
1244  * The event payload is a struct drm_event_vblank.
1245  */
1246 #define DRM_EVENT_VBLANK 0x01
1247 /**
1248  * DRM_EVENT_FLIP_COMPLETE - page-flip completion event
1249  *
1250  * This event is sent in response to an atomic commit or legacy page-flip with
1251  * the &DRM_MODE_PAGE_FLIP_EVENT flag set.
1252  *
1253  * The event payload is a struct drm_event_vblank.
1254  */
1255 #define DRM_EVENT_FLIP_COMPLETE 0x02
1256 /**
1257  * DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event
1258  *
1259  * This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE.
1260  *
1261  * The event payload is a struct drm_event_crtc_sequence.
1262  */
1263 #define DRM_EVENT_CRTC_SEQUENCE	0x03
1264 
1265 struct drm_event_vblank {
1266 	struct drm_event base;
1267 	__u64 user_data;
1268 	__u32 tv_sec;
1269 	__u32 tv_usec;
1270 	__u32 sequence;
1271 	__u32 crtc_id; /* 0 on older kernels that do not support this */
1272 };
1273 
1274 #ifdef __OpenBSD__
1275 struct drm_pciinfo {
1276 	uint16_t	domain;
1277 	uint8_t		bus;
1278 	uint8_t		dev;
1279 	uint8_t		func;
1280 	uint16_t	vendor_id;
1281 	uint16_t	device_id;
1282 	uint16_t	subvendor_id;
1283 	uint16_t	subdevice_id;
1284 	uint8_t		revision_id;
1285 };
1286 #endif
1287 
1288 /* Event delivered at sequence. Time stamp marks when the first pixel
1289  * of the refresh cycle leaves the display engine for the display
1290  */
1291 struct drm_event_crtc_sequence {
1292 	struct drm_event	base;
1293 	__u64			user_data;
1294 	__s64			time_ns;
1295 	__u64			sequence;
1296 };
1297 
1298 /* typedef area */
1299 #ifndef __KERNEL__
1300 typedef struct drm_clip_rect drm_clip_rect_t;
1301 typedef struct drm_drawable_info drm_drawable_info_t;
1302 typedef struct drm_tex_region drm_tex_region_t;
1303 typedef struct drm_hw_lock drm_hw_lock_t;
1304 typedef struct drm_version drm_version_t;
1305 typedef struct drm_unique drm_unique_t;
1306 typedef struct drm_list drm_list_t;
1307 typedef struct drm_block drm_block_t;
1308 typedef struct drm_control drm_control_t;
1309 typedef enum drm_map_type drm_map_type_t;
1310 typedef enum drm_map_flags drm_map_flags_t;
1311 typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
1312 typedef struct drm_map drm_map_t;
1313 typedef struct drm_client drm_client_t;
1314 typedef enum drm_stat_type drm_stat_type_t;
1315 typedef struct drm_stats drm_stats_t;
1316 typedef enum drm_lock_flags drm_lock_flags_t;
1317 typedef struct drm_lock drm_lock_t;
1318 typedef enum drm_dma_flags drm_dma_flags_t;
1319 typedef struct drm_buf_desc drm_buf_desc_t;
1320 typedef struct drm_buf_info drm_buf_info_t;
1321 typedef struct drm_buf_free drm_buf_free_t;
1322 typedef struct drm_buf_pub drm_buf_pub_t;
1323 typedef struct drm_buf_map drm_buf_map_t;
1324 typedef struct drm_dma drm_dma_t;
1325 typedef union drm_wait_vblank drm_wait_vblank_t;
1326 typedef struct drm_agp_mode drm_agp_mode_t;
1327 typedef enum drm_ctx_flags drm_ctx_flags_t;
1328 typedef struct drm_ctx drm_ctx_t;
1329 typedef struct drm_ctx_res drm_ctx_res_t;
1330 typedef struct drm_draw drm_draw_t;
1331 typedef struct drm_update_draw drm_update_draw_t;
1332 typedef struct drm_auth drm_auth_t;
1333 typedef struct drm_irq_busid drm_irq_busid_t;
1334 typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
1335 
1336 typedef struct drm_agp_buffer drm_agp_buffer_t;
1337 typedef struct drm_agp_binding drm_agp_binding_t;
1338 typedef struct drm_agp_info drm_agp_info_t;
1339 typedef struct drm_scatter_gather drm_scatter_gather_t;
1340 typedef struct drm_set_version drm_set_version_t;
1341 #endif
1342 
1343 #if defined(__cplusplus)
1344 }
1345 #endif
1346 
1347 #endif
1348