1 /*
2  * Copyright (c) 2015-2016, NVIDIA CORPORATION.  All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #ifndef _UAPI_NVIDIA_DRM_IOCTL_H_
24 #define _UAPI_NVIDIA_DRM_IOCTL_H_
25 
26 #include <drm/drm.h>
27 
28 /*
29  * We should do our best to keep these values constant. Any change to these will
30  * be backwards incompatible with client applications that might be using them
31  */
32 #define DRM_NVIDIA_GET_CRTC_CRC32                   0x00
33 #define DRM_NVIDIA_GEM_IMPORT_NVKMS_MEMORY          0x01
34 #define DRM_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY      0x02
35 #define DRM_NVIDIA_GET_DEV_INFO                     0x03
36 #define DRM_NVIDIA_FENCE_SUPPORTED                  0x04
37 #define DRM_NVIDIA_PRIME_FENCE_CONTEXT_CREATE       0x05
38 #define DRM_NVIDIA_GEM_PRIME_FENCE_ATTACH           0x06
39 #define DRM_NVIDIA_GET_CLIENT_CAPABILITY            0x08
40 #define DRM_NVIDIA_GEM_EXPORT_NVKMS_MEMORY          0x09
41 #define DRM_NVIDIA_GEM_MAP_OFFSET                   0x0a
42 #define DRM_NVIDIA_GEM_ALLOC_NVKMS_MEMORY           0x0b
43 #define DRM_NVIDIA_GET_CRTC_CRC32_V2                0x0c
44 #define DRM_NVIDIA_GEM_EXPORT_DMABUF_MEMORY         0x0d
45 #define DRM_NVIDIA_GEM_IDENTIFY_OBJECT              0x0e
46 #define DRM_NVIDIA_DMABUF_SUPPORTED                 0x0f
47 #define DRM_NVIDIA_GET_DPY_ID_FOR_CONNECTOR_ID      0x10
48 #define DRM_NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID      0x11
49 #define DRM_NVIDIA_GRANT_PERMISSIONS                0x12
50 #define DRM_NVIDIA_REVOKE_PERMISSIONS               0x13
51 #define DRM_NVIDIA_SEMSURF_FENCE_CTX_CREATE         0x14
52 #define DRM_NVIDIA_SEMSURF_FENCE_CREATE             0x15
53 #define DRM_NVIDIA_SEMSURF_FENCE_WAIT               0x16
54 #define DRM_NVIDIA_SEMSURF_FENCE_ATTACH             0x17
55 
56 #define DRM_IOCTL_NVIDIA_GEM_IMPORT_NVKMS_MEMORY                           \
57     DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IMPORT_NVKMS_MEMORY),      \
58              struct drm_nvidia_gem_import_nvkms_memory_params)
59 
60 #define DRM_IOCTL_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY                       \
61     DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY),  \
62              struct drm_nvidia_gem_import_userspace_memory_params)
63 
64 #define DRM_IOCTL_NVIDIA_GET_DEV_INFO                                      \
65     DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_DEV_INFO),                 \
66              struct drm_nvidia_get_dev_info_params)
67 
68 /*
69  * XXX Solaris compiler has issues with DRM_IO. None of this is supported on
70  * Solaris anyway, so just skip it.
71  *
72  * 'warning: suggest parentheses around arithmetic in operand of |'
73  */
74 #if defined(NV_LINUX)
75 #define DRM_IOCTL_NVIDIA_FENCE_SUPPORTED                         \
76     DRM_IO(DRM_COMMAND_BASE + DRM_NVIDIA_FENCE_SUPPORTED)
77 #define DRM_IOCTL_NVIDIA_DMABUF_SUPPORTED                        \
78     DRM_IO(DRM_COMMAND_BASE + DRM_NVIDIA_DMABUF_SUPPORTED)
79 #else
80 #define DRM_IOCTL_NVIDIA_FENCE_SUPPORTED 0
81 #define DRM_IOCTL_NVIDIA_DMABUF_SUPPORTED 0
82 #endif
83 
84 #define DRM_IOCTL_NVIDIA_PRIME_FENCE_CONTEXT_CREATE                     \
85     DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_PRIME_FENCE_CONTEXT_CREATE),\
86              struct drm_nvidia_prime_fence_context_create_params)
87 
88 #define DRM_IOCTL_NVIDIA_GEM_PRIME_FENCE_ATTACH                         \
89     DRM_IOW((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_PRIME_FENCE_ATTACH),     \
90             struct drm_nvidia_gem_prime_fence_attach_params)
91 
92 #define DRM_IOCTL_NVIDIA_GET_CLIENT_CAPABILITY                          \
93     DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CLIENT_CAPABILITY),     \
94              struct drm_nvidia_get_client_capability_params)
95 
96 #define DRM_IOCTL_NVIDIA_GET_CRTC_CRC32                                 \
97     DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CRTC_CRC32),            \
98              struct drm_nvidia_get_crtc_crc32_params)
99 
100 #define DRM_IOCTL_NVIDIA_GET_CRTC_CRC32_V2                              \
101     DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CRTC_CRC32_V2),         \
102               struct drm_nvidia_get_crtc_crc32_v2_params)
103 
104 #define DRM_IOCTL_NVIDIA_GEM_EXPORT_NVKMS_MEMORY                        \
105     DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_EXPORT_NVKMS_MEMORY),   \
106               struct drm_nvidia_gem_export_nvkms_memory_params)
107 
108 #define DRM_IOCTL_NVIDIA_GEM_MAP_OFFSET                                 \
109     DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_MAP_OFFSET),            \
110              struct drm_nvidia_gem_map_offset_params)
111 
112 #define DRM_IOCTL_NVIDIA_GEM_ALLOC_NVKMS_MEMORY                         \
113     DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_ALLOC_NVKMS_MEMORY),    \
114               struct drm_nvidia_gem_alloc_nvkms_memory_params)
115 
116 #define DRM_IOCTL_NVIDIA_GEM_EXPORT_DMABUF_MEMORY                       \
117     DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_EXPORT_DMABUF_MEMORY),  \
118               struct drm_nvidia_gem_export_dmabuf_memory_params)
119 
120 #define DRM_IOCTL_NVIDIA_GEM_IDENTIFY_OBJECT                            \
121     DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IDENTIFY_OBJECT),       \
122               struct drm_nvidia_gem_identify_object_params)
123 
124 #define DRM_IOCTL_NVIDIA_GET_DPY_ID_FOR_CONNECTOR_ID                     \
125     DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_DPY_ID_FOR_CONNECTOR_ID),\
126              struct drm_nvidia_get_dpy_id_for_connector_id_params)
127 
128 #define DRM_IOCTL_NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID                     \
129     DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID),\
130              struct drm_nvidia_get_connector_id_for_dpy_id_params)
131 
132 #define DRM_IOCTL_NVIDIA_GRANT_PERMISSIONS                              \
133     DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GRANT_PERMISSIONS),         \
134              struct drm_nvidia_grant_permissions_params)
135 
136 #define DRM_IOCTL_NVIDIA_REVOKE_PERMISSIONS                             \
137     DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_REVOKE_PERMISSIONS),        \
138              struct drm_nvidia_revoke_permissions_params)
139 
140 #define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_CTX_CREATE                       \
141     DRM_IOWR((DRM_COMMAND_BASE +                                        \
142               DRM_NVIDIA_SEMSURF_FENCE_CTX_CREATE),                     \
143               struct drm_nvidia_semsurf_fence_ctx_create_params)
144 
145 #define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_CREATE                           \
146     DRM_IOWR((DRM_COMMAND_BASE +                                        \
147               DRM_NVIDIA_SEMSURF_FENCE_CREATE),                         \
148               struct drm_nvidia_semsurf_fence_create_params)
149 
150 #define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_WAIT                             \
151     DRM_IOW((DRM_COMMAND_BASE +                                         \
152               DRM_NVIDIA_SEMSURF_FENCE_WAIT),                           \
153               struct drm_nvidia_semsurf_fence_wait_params)
154 
155 #define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_ATTACH                           \
156     DRM_IOW((DRM_COMMAND_BASE +                                         \
157               DRM_NVIDIA_SEMSURF_FENCE_ATTACH),                         \
158               struct drm_nvidia_semsurf_fence_attach_params)
159 
160 struct drm_nvidia_gem_import_nvkms_memory_params {
161     uint64_t mem_size;           /* IN */
162 
163     uint64_t nvkms_params_ptr;   /* IN */
164     uint64_t nvkms_params_size;  /* IN */
165 
166     uint32_t handle;             /* OUT */
167 
168     uint32_t __pad;
169 };
170 
171 struct drm_nvidia_gem_import_userspace_memory_params {
172     uint64_t size;               /* IN Size of memory in bytes */
173     uint64_t address;            /* IN Virtual address of userspace memory */
174     uint32_t handle;             /* OUT Handle to gem object */
175 };
176 
177 struct drm_nvidia_get_dev_info_params {
178     uint32_t gpu_id;             /* OUT */
179     uint32_t primary_index;      /* OUT; the "card%d" value */
180 
181     uint32_t supports_alloc;     /* OUT */
182     /* The generic_page_kind, page_kind_generation, and sector_layout
183      * fields are only valid if supports_alloc is true.
184      * See DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D definitions of these. */
185     uint32_t generic_page_kind;    /* OUT */
186     uint32_t page_kind_generation; /* OUT */
187     uint32_t sector_layout;        /* OUT */
188     uint32_t supports_sync_fd;     /* OUT */
189     uint32_t supports_semsurf;     /* OUT */
190 };
191 
192 struct drm_nvidia_prime_fence_context_create_params {
193     uint32_t handle;            /* OUT GEM handle to fence context */
194 
195     uint32_t index;             /* IN Index of semaphore to use for fencing */
196     uint64_t size;              /* IN Size of semaphore surface in bytes */
197 
198     /* Params for importing userspace semaphore surface */
199     uint64_t import_mem_nvkms_params_ptr;  /* IN */
200     uint64_t import_mem_nvkms_params_size; /* IN */
201 
202     /* Params for creating software signaling event */
203     uint64_t event_nvkms_params_ptr;  /* IN */
204     uint64_t event_nvkms_params_size; /* IN */
205 };
206 
207 struct drm_nvidia_gem_prime_fence_attach_params {
208     uint32_t handle;                /* IN GEM handle to attach fence to */
209     uint32_t fence_context_handle;  /* IN GEM handle to fence context on which fence is run on */
210     uint32_t sem_thresh;            /* IN Semaphore value to reach before signal */
211     uint32_t __pad;
212 };
213 
214 struct drm_nvidia_get_client_capability_params {
215     uint64_t capability;    /* IN Client capability enum */
216     uint64_t value;         /* OUT Client capability value */
217 };
218 
219 /* Struct that stores Crc value and if it is supported by hardware */
220 struct drm_nvidia_crtc_crc32 {
221     uint32_t value; /* Read value, undefined if supported is false */
222     uint8_t supported; /* Supported boolean, true if readable by hardware */
223     uint8_t __pad0;
224     uint16_t __pad1;
225 };
226 
227 struct drm_nvidia_crtc_crc32_v2_out {
228     struct drm_nvidia_crtc_crc32  compositorCrc32;        /* OUT compositor hardware CRC32 value */
229     struct drm_nvidia_crtc_crc32  rasterGeneratorCrc32;   /* OUT raster generator CRC32 value */
230     struct drm_nvidia_crtc_crc32  outputCrc32;            /* OUT SF/SOR CRC32 value */
231 };
232 
233 struct drm_nvidia_get_crtc_crc32_v2_params {
234     uint32_t crtc_id;                             /* IN CRTC identifier */
235     struct drm_nvidia_crtc_crc32_v2_out crc32;    /* OUT Crc32 output structure */
236 };
237 
238 struct drm_nvidia_get_crtc_crc32_params {
239     uint32_t crtc_id;      /* IN CRTC identifier */
240     uint32_t crc32;        /* OUT CRC32 value */
241 };
242 
243 struct drm_nvidia_gem_export_nvkms_memory_params {
244     uint32_t handle;             /* IN */
245     uint32_t __pad;
246 
247     uint64_t nvkms_params_ptr;   /* IN */
248     uint64_t nvkms_params_size;  /* IN */
249 };
250 
251 struct drm_nvidia_gem_map_offset_params {
252     uint32_t handle;             /* IN Handle to gem object */
253     uint32_t __pad;
254 
255     uint64_t offset;             /* OUT Fake offset */
256 };
257 
258 #define NV_GEM_ALLOC_NO_SCANOUT                     (1 << 0)
259 
260 struct drm_nvidia_gem_alloc_nvkms_memory_params {
261     uint32_t handle;              /* OUT */
262     uint8_t  block_linear;        /* IN */
263     uint8_t  compressible;        /* IN/OUT */
264     uint16_t __pad0;
265 
266     uint64_t memory_size;         /* IN */
267     uint32_t flags;               /* IN */
268     uint32_t __pad1;
269 };
270 
271 struct drm_nvidia_gem_export_dmabuf_memory_params {
272     uint32_t handle;   /* IN GEM Handle*/
273     uint32_t __pad;
274 
275     uint64_t nvkms_params_ptr;   /* IN */
276     uint64_t nvkms_params_size;  /* IN */
277 };
278 
279 typedef enum {
280     NV_GEM_OBJECT_NVKMS,
281     NV_GEM_OBJECT_DMABUF,
282     NV_GEM_OBJECT_USERMEMORY,
283 
284     NV_GEM_OBJECT_UNKNOWN = 0x7fffffff  /* Force size of 32-bits. */
285 } drm_nvidia_gem_object_type;
286 
287 struct drm_nvidia_gem_identify_object_params {
288     uint32_t                    handle;         /* IN GEM handle*/
289     drm_nvidia_gem_object_type  object_type;    /* OUT GEM object type */
290 };
291 
292 struct drm_nvidia_get_dpy_id_for_connector_id_params {
293     uint32_t connectorId; /* IN */
294     uint32_t dpyId;       /* OUT */
295 };
296 
297 struct drm_nvidia_get_connector_id_for_dpy_id_params {
298     uint32_t dpyId;       /* IN */
299     uint32_t connectorId; /* OUT */
300 };
301 
302 enum drm_nvidia_permissions_type {
303     NV_DRM_PERMISSIONS_TYPE_MODESET = 2,
304     NV_DRM_PERMISSIONS_TYPE_SUB_OWNER = 3
305 };
306 
307 struct drm_nvidia_grant_permissions_params {
308     int32_t fd;           /* IN */
309     uint32_t dpyId;       /* IN */
310     uint32_t type;        /* IN */
311 };
312 
313 struct drm_nvidia_revoke_permissions_params {
314     uint32_t dpyId;       /* IN */
315     uint32_t type;        /* IN */
316 };
317 
318 struct drm_nvidia_semsurf_fence_ctx_create_params {
319     uint64_t index;             /* IN Index of the desired semaphore in the
320                                  * fence context's semaphore surface */
321 
322     /* Params for importing userspace semaphore surface */
323     uint64_t nvkms_params_ptr;  /* IN */
324     uint64_t nvkms_params_size; /* IN */
325 
326     uint32_t handle;            /* OUT GEM handle to fence context */
327     uint32_t __pad;
328 };
329 
330 struct drm_nvidia_semsurf_fence_create_params {
331     uint32_t fence_context_handle;  /* IN GEM handle to fence context on which
332                                      * fence is run on */
333 
334     uint32_t timeout_value_ms;      /* IN Timeout value in ms for the fence
335                                      * after which the fence will be signaled
336                                      * with its error status set to -ETIMEDOUT.
337                                      * Default timeout value is 5000ms */
338 
339     uint64_t wait_value;            /* IN Semaphore value to reach before signal */
340 
341     int32_t  fd;                    /* OUT sync FD object representing the
342                                      * semaphore at the specified index reaching
343                                      * a value >= wait_value */
344     uint32_t __pad;
345 };
346 
347 /*
348  * Note there is no provision for timeouts in this ioctl. The kernel
349  * documentation asserts timeouts should be handled by fence producers, and
350  * that waiters should not second-guess their logic, as it is producers rather
351  * than consumers that have better information when it comes to determining a
352  * reasonable timeout for a given workload.
353  */
354 struct drm_nvidia_semsurf_fence_wait_params {
355     uint32_t fence_context_handle;  /* IN GEM handle to fence context which will
356                                      * be used to wait on the sync FD.  Need not
357                                      * be the fence context used to create the
358                                      * sync FD. */
359 
360     int32_t  fd;                    /* IN sync FD object to wait on */
361 
362     uint64_t pre_wait_value;        /* IN Wait for the semaphore represented by
363                                      * fence_context to reach this value before
364                                      * waiting for the sync file. */
365 
366     uint64_t post_wait_value;       /* IN Signal the semaphore represented by
367                                      * fence_context to this value after waiting
368                                      * for the sync file */
369 };
370 
371 struct drm_nvidia_semsurf_fence_attach_params {
372     uint32_t handle;                /* IN GEM handle of buffer */
373 
374     uint32_t fence_context_handle;  /* IN GEM handle of fence context */
375 
376     uint32_t timeout_value_ms;      /* IN Timeout value in ms for the fence
377                                      * after which the fence will be signaled
378                                      * with its error status set to -ETIMEDOUT.
379                                      * Default timeout value is 5000ms */
380 
381     uint32_t shared;                /* IN If true, fence will reserve shared
382                                      * access to the buffer, otherwise it will
383                                      * reserve exclusive access */
384 
385     uint64_t wait_value;            /* IN Semaphore value to reach before signal */
386 };
387 
388 #endif /* _UAPI_NVIDIA_DRM_IOCTL_H_ */
389