xref: /openbsd/sys/dev/pci/drm/drm_drv.c (revision 998de4a5)
1 /* $OpenBSD: drm_drv.c,v 1.148 2016/08/24 09:31:56 dlg Exp $ */
2 /*-
3  * Copyright 2007-2009 Owain G. Ainsworth <oga@openbsd.org>
4  * Copyright © 2008 Intel Corporation
5  * Copyright 2003 Eric Anholt
6  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
7  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8  * All Rights Reserved.
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a
11  * copy of this software and associated documentation files (the "Software"),
12  * to deal in the Software without restriction, including without limitation
13  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14  * and/or sell copies of the Software, and to permit persons to whom the
15  * Software is furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice (including the next
18  * paragraph) shall be included in all copies or substantial portions of the
19  * Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
24  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
25  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
26  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
27  * OTHER DEALINGS IN THE SOFTWARE.
28  *
29  * Authors:
30  *    Rickard E. (Rik) Faith <faith@valinux.com>
31  *    Daryll Strauss <daryll@valinux.com>
32  *    Gareth Hughes <gareth@valinux.com>
33  *    Eric Anholt <eric@anholt.net>
34  *    Owain Ainsworth <oga@openbsd.org>
35  *
36  */
37 
38 /** @file drm_drv.c
39  * The catch-all file for DRM device support, including module setup/teardown,
40  * open/close, and ioctl dispatch.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/fcntl.h>
45 #include <sys/filio.h>
46 #include <sys/limits.h>
47 #include <sys/pledge.h>
48 #include <sys/poll.h>
49 #include <sys/specdev.h>
50 #include <sys/systm.h>
51 #include <sys/ttycom.h> /* for TIOCSGRP */
52 #include <sys/vnode.h>
53 
54 #include <uvm/uvm.h>
55 #include <uvm/uvm_device.h>
56 
57 #include "drmP.h"
58 #include "drm.h"
59 #include "drm_sarea.h"
60 
61 #ifdef DRMDEBUG
62 int drm_debug_flag = 1;
63 #endif
64 
65 int	 drm_firstopen(struct drm_device *);
66 int	 drm_lastclose(struct drm_device *);
67 void	 drm_attach(struct device *, struct device *, void *);
68 int	 drm_probe(struct device *, void *, void *);
69 int	 drm_detach(struct device *, int);
70 void	 drm_quiesce(struct drm_device *);
71 void	 drm_wakeup(struct drm_device *);
72 int	 drm_activate(struct device *, int);
73 int	 drmprint(void *, const char *);
74 int	 drmsubmatch(struct device *, void *, void *);
75 int	 drm_do_ioctl(struct drm_device *, int, u_long, caddr_t);
76 int	 drm_dequeue_event(struct drm_device *, struct drm_file *, size_t,
77 	     struct drm_pending_event **);
78 
79 int	 drm_getunique(struct drm_device *, void *, struct drm_file *);
80 int	 drm_version(struct drm_device *, void *, struct drm_file *);
81 int	 drm_setversion(struct drm_device *, void *, struct drm_file *);
82 int	 drm_getmagic(struct drm_device *, void *, struct drm_file *);
83 int	 drm_authmagic(struct drm_device *, void *, struct drm_file *);
84 int	 drm_file_cmp(struct drm_file *, struct drm_file *);
85 SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp);
86 
87 int	 drm_setunique(struct drm_device *, void *, struct drm_file *);
88 int	 drm_noop(struct drm_device *, void *, struct drm_file *);
89 
90 int	 drm_getcap(struct drm_device *, void *, struct drm_file *);
91 int	 drm_setclientcap(struct drm_device *, void *, struct drm_file *);
92 
93 #define DRM_IOCTL_DEF(ioctl, _func, _flags) \
94 	[DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
95 
96 /** Ioctl table */
97 static struct drm_ioctl_desc drm_ioctls[] = {
98 	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
99 	DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
100 	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
101 #ifdef __linux__
102 	DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
103 	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
104 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
105 	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
106 #endif
107 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
108 	DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
109 	DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
110 
111 	DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
112 	DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
113 	DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
114 	DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
115 
116 #ifdef __linux__
117 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
118 	DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
119 
120 	DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
121 	DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
122 #else
123 	DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
124 	DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_noop, DRM_AUTH),
125 #endif
126 
127 #ifdef __linux__
128 	DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
129 	DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
130 
131 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
132 	DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
133 #endif
134 	DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
135 #ifdef __linux__
136 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
137 	DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
138 	DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
139 #else
140 	DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
141 	DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
142 #endif
143 #ifdef __linux__
144 	DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
145 #endif
146 
147 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
148 	DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
149 
150 #ifdef __linux__
151 	DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
152 	DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
153 #endif
154 
155 	DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
156 
157 #ifdef __linux__
158 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
159 	DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
160 	DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
161 #else
162 	DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
163 	DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_noop, DRM_AUTH),
164 #endif
165 #ifdef __linux__
166 	DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
167 	DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
168 	DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
169 
170 	DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
171 
172 #if __OS_HAS_AGP
173 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
174 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
175 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
176 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
177 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
178 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
179 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
180 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
181 #endif
182 
183 	DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
184 	DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
185 #endif
186 
187 	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
188 
189 	DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
190 
191 	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
192 
193 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
194 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
195 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
196 
197 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
198 
199 #ifdef notyet
200 	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
201 	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
202 #endif
203 
204 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
205 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
206 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
207 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
208 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
209 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
210 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
211 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
212 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
213 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
214 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
215 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
216 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
217 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
218 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
219 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
220 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
221 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
222 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
223 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
224 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
225 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
226 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
227 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
228 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
229 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
230 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
231 };
232 
233 #define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
234 
235 int
236 pledge_ioctl_drm(struct proc *p, long com, dev_t device)
237 {
238 	struct drm_device *dev = drm_get_device_from_kdev(device);
239 	unsigned int nr = DRM_IOCTL_NR(com);
240 	const struct drm_ioctl_desc *ioctl;
241 
242 	if (dev == NULL)
243 		return EPERM;
244 
245 	if (nr < DRM_CORE_IOCTL_COUNT &&
246 	    ((nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END)))
247 		ioctl = &drm_ioctls[nr];
248 	else if (nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END &&
249 	    nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)
250 		ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
251 	else
252 		return EPERM;
253 
254 	if (ioctl->flags & DRM_RENDER_ALLOW)
255 		return 0;
256 
257 	/*
258 	 * These are dangerous, but we have to allow them until we
259 	 * have prime/dma-buf support.
260 	 */
261 	switch (com) {
262 	case DRM_IOCTL_GET_MAGIC:
263 	case DRM_IOCTL_GEM_OPEN:
264 		return 0;
265 	}
266 
267 	return EPERM;
268 }
269 
270 int
271 drm_setunique(struct drm_device *dev, void *data,
272     struct drm_file *file_priv)
273 {
274 	/*
275 	 * Deprecated in DRM version 1.1, and will return EBUSY
276 	 * when setversion has
277 	 * requested version 1.1 or greater.
278 	 */
279 	return (-EBUSY);
280 }
281 
282 /** No-op ioctl. */
283 int drm_noop(struct drm_device *dev, void *data,
284 	     struct drm_file *file_priv)
285 {
286 	return 0;
287 }
288 
289 /*
290  * attach drm to a pci-based driver.
291  *
292  * This function does all the pci-specific calculations for the
293  * drm_attach_args.
294  */
295 struct device *
296 drm_attach_pci(struct drm_driver_info *driver, struct pci_attach_args *pa,
297     int is_agp, int console, struct device *dev)
298 {
299 	struct drm_attach_args arg;
300 	pcireg_t subsys;
301 
302 	arg.driver = driver;
303 	arg.dmat = pa->pa_dmat;
304 	arg.bst = pa->pa_memt;
305 	arg.is_agp = is_agp;
306 	arg.console = console;
307 
308 	arg.pci_vendor = PCI_VENDOR(pa->pa_id);
309 	arg.pci_device = PCI_PRODUCT(pa->pa_id);
310 
311 	subsys = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
312 	arg.pci_subvendor = PCI_VENDOR(subsys);
313 	arg.pci_subdevice = PCI_PRODUCT(subsys);
314 
315 	arg.pc = pa->pa_pc;
316 	arg.tag = pa->pa_tag;
317 	arg.bridgetag = pa->pa_bridgetag;
318 
319 	arg.busid_len = 20;
320 	arg.busid = malloc(arg.busid_len + 1, M_DRM, M_NOWAIT);
321 	if (arg.busid == NULL) {
322 		printf("%s: no memory for drm\n", dev->dv_xname);
323 		return (NULL);
324 	}
325 	snprintf(arg.busid, arg.busid_len, "pci:%04x:%02x:%02x.%1x",
326 	    pa->pa_domain, pa->pa_bus, pa->pa_device, pa->pa_function);
327 
328 	return (config_found_sm(dev, &arg, drmprint, drmsubmatch));
329 }
330 
331 int
332 drmprint(void *aux, const char *pnp)
333 {
334 	if (pnp != NULL)
335 		printf("drm at %s", pnp);
336 	return (UNCONF);
337 }
338 
339 int
340 drmsubmatch(struct device *parent, void *match, void *aux)
341 {
342 	extern struct cfdriver drm_cd;
343 	struct cfdata *cf = match;
344 
345 	/* only allow drm to attach */
346 	if (cf->cf_driver == &drm_cd)
347 		return ((*cf->cf_attach->ca_match)(parent, match, aux));
348 	return (0);
349 }
350 
351 int
352 drm_pciprobe(struct pci_attach_args *pa, const struct drm_pcidev *idlist)
353 {
354 	const struct drm_pcidev *id_entry;
355 
356 	id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
357 	    PCI_PRODUCT(pa->pa_id), idlist);
358 	if (id_entry != NULL)
359 		return 1;
360 
361 	return 0;
362 }
363 
364 int
365 drm_probe(struct device *parent, void *match, void *aux)
366 {
367 	struct cfdata *cf = match;
368 	struct drm_attach_args *da = aux;
369 
370 	if (cf->drmdevcf_console != DRMDEVCF_CONSOLE_UNK) {
371 		/*
372 		 * If console-ness of device specified, either match
373 		 * exactly (at high priority), or fail.
374 		 */
375 		if (cf->drmdevcf_console != 0 && da->console != 0)
376 			return (10);
377 		else
378 			return (0);
379 	}
380 
381 	/* If console-ness unspecified, it wins. */
382 	return (1);
383 }
384 
385 void
386 drm_attach(struct device *parent, struct device *self, void *aux)
387 {
388 	struct drm_device *dev = (struct drm_device *)self;
389 	struct drm_attach_args *da = aux;
390 	int bus, slot, func;
391 	int ret;
392 
393 	dev->dev_private = parent;
394 	dev->driver = da->driver;
395 
396 	dev->dmat = da->dmat;
397 	dev->bst = da->bst;
398 	dev->unique = da->busid;
399 	dev->unique_len = da->busid_len;
400 	dev->pdev = &dev->_pdev;
401 	dev->pci_vendor = dev->pdev->vendor = da->pci_vendor;
402 	dev->pci_device = dev->pdev->device = da->pci_device;
403 	dev->pdev->subsystem_vendor = da->pci_subvendor;
404 	dev->pdev->subsystem_device = da->pci_subdevice;
405 
406 	pci_decompose_tag(da->pc, da->tag, &bus, &slot, &func);
407 	dev->pdev->bus = &dev->pdev->_bus;
408 	dev->pdev->bus->number = bus;
409 	dev->pdev->devfn = PCI_DEVFN(slot, func);
410 
411 	dev->pc = da->pc;
412 	dev->pdev->pc = da->pc;
413 	dev->bridgetag = da->bridgetag;
414 	dev->pdev->tag = da->tag;
415 	dev->pdev->pci = (struct pci_softc *)parent->dv_parent;
416 
417 	rw_init(&dev->struct_mutex, "drmdevlk");
418 	mtx_init(&dev->event_lock, IPL_TTY);
419 	mtx_init(&dev->quiesce_mtx, IPL_NONE);
420 
421 	SPLAY_INIT(&dev->files);
422 	INIT_LIST_HEAD(&dev->vblank_event_list);
423 
424 	if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
425 #if __OS_HAS_AGP
426 		if (da->is_agp)
427 			dev->agp = drm_agp_init();
428 #endif
429 		if (dev->agp != NULL) {
430 			if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
431 			    dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
432 				dev->agp->mtrr = 1;
433 		}
434 	}
435 
436 	if (dev->driver->driver_features & DRIVER_GEM) {
437 		KASSERT(dev->driver->gem_size >= sizeof(struct drm_gem_object));
438 		/* XXX unique name */
439 		pool_init(&dev->objpl, dev->driver->gem_size, 0, 0, 0,
440 		    "drmobjpl", NULL);
441 		pool_setipl(&dev->objpl, IPL_NONE);
442 	}
443 
444 	if (dev->driver->driver_features & DRIVER_GEM) {
445 		ret = drm_gem_init(dev);
446 		if (ret) {
447 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
448 			goto error;
449 		}
450 	}
451 
452 	printf("\n");
453 	return;
454 
455 error:
456 	drm_lastclose(dev);
457 	dev->dev_private = NULL;
458 }
459 
460 int
461 drm_detach(struct device *self, int flags)
462 {
463 	struct drm_device *dev = (struct drm_device *)self;
464 
465 	drm_lastclose(dev);
466 
467 	if (dev->driver->driver_features & DRIVER_GEM)
468 		drm_gem_destroy(dev);
469 
470 	if (dev->driver->driver_features & DRIVER_GEM)
471 		pool_destroy(&dev->objpl);
472 
473 	drm_vblank_cleanup(dev);
474 
475 	if (dev->agp && dev->agp->mtrr) {
476 		int retcode;
477 
478 		retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
479 		    dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
480 		DRM_DEBUG("mtrr_del = %d", retcode);
481 	}
482 
483 
484 	if (dev->agp != NULL) {
485 		drm_free(dev->agp);
486 		dev->agp = NULL;
487 	}
488 
489 	return 0;
490 }
491 
492 void
493 drm_quiesce(struct drm_device *dev)
494 {
495 	mtx_enter(&dev->quiesce_mtx);
496 	dev->quiesce = 1;
497 	while (dev->quiesce_count > 0) {
498 		msleep(&dev->quiesce_count, &dev->quiesce_mtx,
499 		    PZERO, "drmqui", 0);
500 	}
501 	mtx_leave(&dev->quiesce_mtx);
502 }
503 
504 void
505 drm_wakeup(struct drm_device *dev)
506 {
507 	mtx_enter(&dev->quiesce_mtx);
508 	dev->quiesce = 0;
509 	wakeup(&dev->quiesce);
510 	mtx_leave(&dev->quiesce_mtx);
511 }
512 
513 int
514 drm_activate(struct device *self, int act)
515 {
516 	struct drm_device *dev = (struct drm_device *)self;
517 
518 	switch (act) {
519 	case DVACT_QUIESCE:
520 		drm_quiesce(dev);
521 		break;
522 	case DVACT_WAKEUP:
523 		drm_wakeup(dev);
524 		break;
525 	}
526 
527 	return (0);
528 }
529 
530 struct cfattach drm_ca = {
531 	sizeof(struct drm_device), drm_probe, drm_attach,
532 	drm_detach, drm_activate
533 };
534 
535 struct cfdriver drm_cd = {
536 	0, "drm", DV_DULL
537 };
538 
539 const struct drm_pcidev *
540 drm_find_description(int vendor, int device, const struct drm_pcidev *idlist)
541 {
542 	int i = 0;
543 
544 	for (i = 0; idlist[i].vendor != 0; i++) {
545 		if ((idlist[i].vendor == vendor) &&
546 		    (idlist[i].device == device))
547 			return &idlist[i];
548 	}
549 	return NULL;
550 }
551 
552 int
553 drm_file_cmp(struct drm_file *f1, struct drm_file *f2)
554 {
555 	return (f1->minor < f2->minor ? -1 : f1->minor > f2->minor);
556 }
557 
558 SPLAY_GENERATE(drm_file_tree, drm_file, link, drm_file_cmp);
559 
560 struct drm_file *
561 drm_find_file_by_minor(struct drm_device *dev, int minor)
562 {
563 	struct drm_file	key;
564 
565 	key.minor = minor;
566 	return (SPLAY_FIND(drm_file_tree, &dev->files, &key));
567 }
568 
569 struct drm_device *
570 drm_get_device_from_kdev(dev_t kdev)
571 {
572 	int unit = minor(kdev) & ((1 << CLONE_SHIFT) - 1);
573 
574 	if (unit < drm_cd.cd_ndevs)
575 		return drm_cd.cd_devs[unit];
576 
577 	return NULL;
578 }
579 
580 int
581 drm_firstopen(struct drm_device *dev)
582 {
583 	if (dev->driver->firstopen)
584 		dev->driver->firstopen(dev);
585 
586 	dev->magicid = 1;
587 
588 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
589 		dev->irq_enabled = 0;
590 	dev->if_version = 0;
591 
592 	dev->buf_pgid = 0;
593 
594 	DRM_DEBUG("\n");
595 
596 	return 0;
597 }
598 
599 int
600 drm_lastclose(struct drm_device *dev)
601 {
602 	DRM_DEBUG("\n");
603 
604 	if (dev->driver->lastclose != NULL)
605 		dev->driver->lastclose(dev);
606 
607 	if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled)
608 		drm_irq_uninstall(dev);
609 
610 #if __OS_HAS_AGP
611 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
612 		drm_agp_takedown(dev);
613 #endif
614 
615 	return 0;
616 }
617 
618 int
619 drmopen(dev_t kdev, int flags, int fmt, struct proc *p)
620 {
621 	struct drm_device	*dev = NULL;
622 	struct drm_file		*file_priv;
623 	int			 ret = 0;
624 
625 	dev = drm_get_device_from_kdev(kdev);
626 	if (dev == NULL || dev->dev_private == NULL)
627 		return (ENXIO);
628 
629 	DRM_DEBUG("open_count = %d\n", dev->open_count);
630 
631 	if (flags & O_EXCL)
632 		return (EBUSY); /* No exclusive opens */
633 
634 	mutex_lock(&dev->struct_mutex);
635 	if (dev->open_count++ == 0) {
636 		mutex_unlock(&dev->struct_mutex);
637 		if ((ret = drm_firstopen(dev)) != 0)
638 			goto err;
639 	} else {
640 		mutex_unlock(&dev->struct_mutex);
641 	}
642 
643 	/* always allocate at least enough space for our data */
644 	file_priv = drm_calloc(1, max(dev->driver->file_priv_size,
645 	    sizeof(*file_priv)));
646 	if (file_priv == NULL) {
647 		ret = ENOMEM;
648 		goto err;
649 	}
650 
651 	file_priv->filp = (void *)&file_priv;
652 	file_priv->minor = minor(kdev);
653 	INIT_LIST_HEAD(&file_priv->fbs);
654 	INIT_LIST_HEAD(&file_priv->event_list);
655 	file_priv->event_space = 4096; /* 4k for event buffer */
656 	DRM_DEBUG("minor = %d\n", file_priv->minor);
657 
658 	/* for compatibility root is always authenticated */
659 	file_priv->authenticated = DRM_SUSER(p);
660 
661 	if (dev->driver->driver_features & DRIVER_GEM)
662 		drm_gem_open(dev, file_priv);
663 
664 	if (dev->driver->open) {
665 		ret = dev->driver->open(dev, file_priv);
666 		if (ret != 0) {
667 			goto free_priv;
668 		}
669 	}
670 
671 	mutex_lock(&dev->struct_mutex);
672 	/* first opener automatically becomes master if root */
673 	if (SPLAY_EMPTY(&dev->files) && !DRM_SUSER(p)) {
674 		mutex_unlock(&dev->struct_mutex);
675 		ret = EPERM;
676 		goto free_priv;
677 	}
678 
679 	file_priv->is_master = SPLAY_EMPTY(&dev->files);
680 
681 	SPLAY_INSERT(drm_file_tree, &dev->files, file_priv);
682 	mutex_unlock(&dev->struct_mutex);
683 
684 	return (0);
685 
686 free_priv:
687 	drm_free(file_priv);
688 err:
689 	mutex_lock(&dev->struct_mutex);
690 	--dev->open_count;
691 	mutex_unlock(&dev->struct_mutex);
692 	return (ret);
693 }
694 
695 int
696 drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
697 {
698 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
699 	struct drm_file			*file_priv;
700 	struct drm_pending_event *e, *et;
701 	struct drm_pending_vblank_event	*v, *vt;
702 	int				 retcode = 0;
703 
704 	if (dev == NULL)
705 		return (ENXIO);
706 
707 	DRM_DEBUG("open_count = %d\n", dev->open_count);
708 
709 	mutex_lock(&dev->struct_mutex);
710 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
711 	if (file_priv == NULL) {
712 		DRM_ERROR("can't find authenticator\n");
713 		retcode = EINVAL;
714 		goto done;
715 	}
716 	mutex_unlock(&dev->struct_mutex);
717 
718 	if (dev->driver->close != NULL)
719 		dev->driver->close(dev, file_priv);
720 	if (dev->driver->preclose != NULL)
721 		dev->driver->preclose(dev, file_priv);
722 
723 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
724 	    DRM_CURRENTPID, (long)&dev->device, dev->open_count);
725 
726 	mtx_enter(&dev->event_lock);
727 
728 	/* Remove pending flips */
729 	list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
730 		if (v->base.file_priv == file_priv) {
731 			list_del(&v->base.link);
732 			drm_vblank_put(dev, v->pipe);
733 			v->base.destroy(&v->base);
734 		}
735 
736 	/* Remove unconsumed events */
737 	list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
738 		list_del(&e->link);
739 		e->destroy(e);
740 	}
741 
742 	mtx_leave(&dev->event_lock);
743 
744 	if (dev->driver->driver_features & DRIVER_MODESET)
745 		drm_fb_release(dev, file_priv);
746 
747 	if (dev->driver->driver_features & DRIVER_GEM)
748 		drm_gem_release(dev, file_priv);
749 
750 	mutex_lock(&dev->struct_mutex);
751 
752 	dev->buf_pgid = 0;
753 
754 	if (dev->driver->postclose)
755 		dev->driver->postclose(dev, file_priv);
756 
757 	SPLAY_REMOVE(drm_file_tree, &dev->files, file_priv);
758 	drm_free(file_priv);
759 
760 done:
761 	if (--dev->open_count == 0) {
762 		mutex_unlock(&dev->struct_mutex);
763 		retcode = drm_lastclose(dev);
764 	} else
765 		mutex_unlock(&dev->struct_mutex);
766 
767 	return (retcode);
768 }
769 
770 int
771 drm_do_ioctl(struct drm_device *dev, int minor, u_long cmd, caddr_t data)
772 {
773 	struct drm_file *file_priv;
774 	const struct drm_ioctl_desc *ioctl;
775 	drm_ioctl_t *func;
776 	unsigned int nr = DRM_IOCTL_NR(cmd);
777 	int retcode = -EINVAL;
778 	unsigned int usize, asize;
779 
780 	mutex_lock(&dev->struct_mutex);
781 	file_priv = drm_find_file_by_minor(dev, minor);
782 	mutex_unlock(&dev->struct_mutex);
783 	if (file_priv == NULL) {
784 		DRM_ERROR("can't find authenticator\n");
785 		return -EINVAL;
786 	}
787 
788 	DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
789 	    DRM_CURRENTPID, cmd, (u_int)DRM_IOCTL_NR(cmd), (long)&dev->device,
790 	    file_priv->authenticated);
791 
792 	switch (cmd) {
793 	case FIONBIO:
794 	case FIOASYNC:
795 		return 0;
796 
797 	case TIOCSPGRP:
798 		dev->buf_pgid = *(int *)data;
799 		return 0;
800 
801 	case TIOCGPGRP:
802 		*(int *)data = dev->buf_pgid;
803 		return 0;
804 	}
805 
806 	if ((nr >= DRM_CORE_IOCTL_COUNT) &&
807 	    ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
808 		return (-EINVAL);
809 	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
810 	    (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
811 		uint32_t drv_size;
812 		ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
813 		drv_size = IOCPARM_LEN(ioctl->cmd_drv);
814 		usize = asize = IOCPARM_LEN(cmd);
815 		if (drv_size > asize)
816 			asize = drv_size;
817 	} else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
818 		uint32_t drv_size;
819 		ioctl = &drm_ioctls[nr];
820 
821 		drv_size = IOCPARM_LEN(ioctl->cmd_drv);
822 		usize = asize = IOCPARM_LEN(cmd);
823 		if (drv_size > asize)
824 			asize = drv_size;
825 		cmd = ioctl->cmd;
826 	} else
827 		return (-EINVAL);
828 
829 	func = ioctl->func;
830 	if (!func) {
831 		DRM_DEBUG("no function\n");
832 		return (-EINVAL);
833 	}
834 
835 	if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(curproc)) ||
836 	    ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
837 	    ((ioctl->flags & DRM_MASTER) && !file_priv->is_master))
838 		return (-EACCES);
839 
840 	if (ioctl->flags & DRM_UNLOCKED)
841 		retcode = func(dev, data, file_priv);
842 	else {
843 		/* XXX lock */
844 		retcode = func(dev, data, file_priv);
845 		/* XXX unlock */
846 	}
847 
848 	return (retcode);
849 }
850 
851 /* drmioctl is called whenever a process performs an ioctl on /dev/drm.
852  */
853 int
854 drmioctl(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
855 {
856 	struct drm_device *dev = drm_get_device_from_kdev(kdev);
857 	int error;
858 
859 	if (dev == NULL)
860 		return ENODEV;
861 
862 	mtx_enter(&dev->quiesce_mtx);
863 	while (dev->quiesce)
864 		msleep(&dev->quiesce, &dev->quiesce_mtx, PZERO, "drmioc", 0);
865 	dev->quiesce_count++;
866 	mtx_leave(&dev->quiesce_mtx);
867 
868 	error = -drm_do_ioctl(dev, minor(kdev), cmd, data);
869 	if (error < 0 && error != ERESTART && error != EJUSTRETURN)
870 		printf("%s: cmd 0x%lx errno %d\n", __func__, cmd, error);
871 
872 	mtx_enter(&dev->quiesce_mtx);
873 	dev->quiesce_count--;
874 	if (dev->quiesce)
875 		wakeup(&dev->quiesce_count);
876 	mtx_leave(&dev->quiesce_mtx);
877 
878 	return (error);
879 }
880 
881 int
882 drmread(dev_t kdev, struct uio *uio, int ioflag)
883 {
884 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
885 	struct drm_file			*file_priv;
886 	struct drm_pending_event	*ev;
887 	int		 		 error = 0;
888 
889 	if (dev == NULL)
890 		return (ENXIO);
891 
892 	mutex_lock(&dev->struct_mutex);
893 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
894 	mutex_unlock(&dev->struct_mutex);
895 	if (file_priv == NULL)
896 		return (ENXIO);
897 
898 	/*
899 	 * The semantics are a little weird here. We will wait until we
900 	 * have events to process, but as soon as we have events we will
901 	 * only deliver as many as we have.
902 	 * Note that events are atomic, if the read buffer will not fit in
903 	 * a whole event, we won't read any of it out.
904 	 */
905 	mtx_enter(&dev->event_lock);
906 	while (error == 0 && list_empty(&file_priv->event_list)) {
907 		if (ioflag & IO_NDELAY) {
908 			mtx_leave(&dev->event_lock);
909 			return (EAGAIN);
910 		}
911 		error = msleep(&file_priv->event_list, &dev->event_lock,
912 		    PWAIT | PCATCH, "drmread", 0);
913 	}
914 	if (error) {
915 		mtx_leave(&dev->event_lock);
916 		return (error);
917 	}
918 	while (drm_dequeue_event(dev, file_priv, uio->uio_resid, &ev)) {
919 		MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
920 		/* XXX we always destroy the event on error. */
921 		error = uiomove(ev->event, ev->event->length, uio);
922 		ev->destroy(ev);
923 		if (error)
924 			break;
925 		mtx_enter(&dev->event_lock);
926 	}
927 	MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
928 
929 	return (error);
930 }
931 
932 /*
933  * Deqeue an event from the file priv in question. returning 1 if an
934  * event was found. We take the resid from the read as a parameter because
935  * we will only dequeue and event if the read buffer has space to fit the
936  * entire thing.
937  *
938  * We are called locked, but we will *unlock* the queue on return so that
939  * we may sleep to copyout the event.
940  */
941 int
942 drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
943     size_t resid, struct drm_pending_event **out)
944 {
945 	struct drm_pending_event *e = NULL;
946 	int gotone = 0;
947 
948 	MUTEX_ASSERT_LOCKED(&dev->event_lock);
949 
950 	*out = NULL;
951 	if (list_empty(&file_priv->event_list))
952 		goto out;
953 	e = list_first_entry(&file_priv->event_list,
954 			     struct drm_pending_event, link);
955 	if (e->event->length > resid)
956 		goto out;
957 
958 	file_priv->event_space += e->event->length;
959 	list_del(&e->link);
960 	*out = e;
961 	gotone = 1;
962 
963 out:
964 	mtx_leave(&dev->event_lock);
965 
966 	return (gotone);
967 }
968 
969 /* XXX kqfilter ... */
970 int
971 drmpoll(dev_t kdev, int events, struct proc *p)
972 {
973 	struct drm_device	*dev = drm_get_device_from_kdev(kdev);
974 	struct drm_file		*file_priv;
975 	int		 	 revents = 0;
976 
977 	if (dev == NULL)
978 		return (POLLERR);
979 
980 	mutex_lock(&dev->struct_mutex);
981 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
982 	mutex_unlock(&dev->struct_mutex);
983 	if (file_priv == NULL)
984 		return (POLLERR);
985 
986 	mtx_enter(&dev->event_lock);
987 	if (events & (POLLIN | POLLRDNORM)) {
988 		if (!list_empty(&file_priv->event_list))
989 			revents |=  events & (POLLIN | POLLRDNORM);
990 		else
991 			selrecord(p, &file_priv->rsel);
992 	}
993 	mtx_leave(&dev->event_lock);
994 
995 	return (revents);
996 }
997 
998 paddr_t
999 drmmmap(dev_t kdev, off_t offset, int prot)
1000 {
1001 	return -1;
1002 }
1003 
1004 /*
1005  * Beginning in revision 1.1 of the DRM interface, getunique will return
1006  * a unique in the form pci:oooo:bb:dd.f (o=domain, b=bus, d=device, f=function)
1007  * before setunique has been called.  The format for the bus-specific part of
1008  * the unique is not defined for any other bus.
1009  */
1010 int
1011 drm_getunique(struct drm_device *dev, void *data, struct drm_file *file_priv)
1012 {
1013 	struct drm_unique	 *u = data;
1014 
1015 	if (u->unique_len >= dev->unique_len) {
1016 		if (DRM_COPY_TO_USER(u->unique, dev->unique, dev->unique_len))
1017 			return -EFAULT;
1018 	}
1019 	u->unique_len = dev->unique_len;
1020 
1021 	return 0;
1022 }
1023 
1024 int
1025 drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
1026 {
1027 	struct drm_get_cap *req = data;
1028 
1029 	req->value = 0;
1030 	switch (req->capability) {
1031 	case DRM_CAP_DUMB_BUFFER:
1032 		if (dev->driver->dumb_create)
1033 			req->value = 1;
1034 		break;
1035 	case DRM_CAP_VBLANK_HIGH_CRTC:
1036 		req->value = 1;
1037 		break;
1038 	case DRM_CAP_DUMB_PREFERRED_DEPTH:
1039 		req->value = dev->mode_config.preferred_depth;
1040 		break;
1041 	case DRM_CAP_DUMB_PREFER_SHADOW:
1042 		req->value = dev->mode_config.prefer_shadow;
1043 		break;
1044 #ifdef notyet
1045 	case DRM_CAP_PRIME:
1046 		req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
1047 		req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
1048 		break;
1049 #endif
1050 	case DRM_CAP_TIMESTAMP_MONOTONIC:
1051 		req->value = drm_timestamp_monotonic;
1052 		break;
1053 	case DRM_CAP_ASYNC_PAGE_FLIP:
1054 		req->value = dev->mode_config.async_page_flip;
1055 		break;
1056 	case DRM_CAP_CURSOR_WIDTH:
1057 		if (dev->mode_config.cursor_width)
1058 			req->value = dev->mode_config.cursor_width;
1059 		else
1060 			req->value = 64;
1061 		break;
1062 	case DRM_CAP_CURSOR_HEIGHT:
1063 		if (dev->mode_config.cursor_height)
1064 			req->value = dev->mode_config.cursor_height;
1065 		else
1066 			req->value = 64;
1067 		break;
1068 	default:
1069 		return -EINVAL;
1070 	}
1071 	return 0;
1072 }
1073 
1074 /**
1075  * Set device/driver capabilities
1076  */
1077 int
1078 drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
1079 {
1080 	struct drm_set_client_cap *req = data;
1081 
1082 	switch (req->capability) {
1083 	case DRM_CLIENT_CAP_STEREO_3D:
1084 		if (req->value > 1)
1085 			return -EINVAL;
1086 		file_priv->stereo_allowed = req->value;
1087 		break;
1088 	default:
1089 		return -EINVAL;
1090 	}
1091 
1092 	return 0;
1093 }
1094 
1095 #define DRM_IF_MAJOR	1
1096 #define DRM_IF_MINOR	4
1097 
1098 int
1099 drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
1100 {
1101 	struct drm_version	*version = data;
1102 	int			 len;
1103 
1104 #define DRM_COPY(name, value)						\
1105 	len = strlen( value );						\
1106 	if ( len > name##_len ) len = name##_len;			\
1107 	name##_len = strlen( value );					\
1108 	if ( len && name ) {						\
1109 		if ( DRM_COPY_TO_USER( name, value, len ) )		\
1110 			return -EFAULT;				\
1111 	}
1112 
1113 	version->version_major = dev->driver->major;
1114 	version->version_minor = dev->driver->minor;
1115 	version->version_patchlevel = dev->driver->patchlevel;
1116 
1117 	DRM_COPY(version->name, dev->driver->name);
1118 	DRM_COPY(version->date, dev->driver->date);
1119 	DRM_COPY(version->desc, dev->driver->desc);
1120 
1121 	return 0;
1122 }
1123 
1124 int
1125 drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
1126 {
1127 	struct drm_set_version	ver, *sv = data;
1128 	int			if_version;
1129 
1130 	/* Save the incoming data, and set the response before continuing
1131 	 * any further.
1132 	 */
1133 	ver = *sv;
1134 	sv->drm_di_major = DRM_IF_MAJOR;
1135 	sv->drm_di_minor = DRM_IF_MINOR;
1136 	sv->drm_dd_major = dev->driver->major;
1137 	sv->drm_dd_minor = dev->driver->minor;
1138 
1139 	/*
1140 	 * We no longer support interface versions less than 1.1, so error
1141 	 * out if the xserver is too old. 1.1 always ties the drm to a
1142 	 * certain busid, this was done on attach
1143 	 */
1144 	if (ver.drm_di_major != -1) {
1145 		if (ver.drm_di_major != DRM_IF_MAJOR || ver.drm_di_minor < 1 ||
1146 		    ver.drm_di_minor > DRM_IF_MINOR) {
1147 			return -EINVAL;
1148 		}
1149 		if_version = DRM_IF_VERSION(ver.drm_di_major, ver.drm_dd_minor);
1150 		dev->if_version = imax(if_version, dev->if_version);
1151 	}
1152 
1153 	if (ver.drm_dd_major != -1) {
1154 		if (ver.drm_dd_major != dev->driver->major ||
1155 		    ver.drm_dd_minor < 0 ||
1156 		    ver.drm_dd_minor > dev->driver->minor)
1157 			return -EINVAL;
1158 	}
1159 
1160 	return 0;
1161 }
1162 
1163 struct drm_dmamem *
1164 drm_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t alignment,
1165     int nsegments, bus_size_t maxsegsz, int mapflags, int loadflags)
1166 {
1167 	struct drm_dmamem	*mem;
1168 	size_t			 strsize;
1169 	/*
1170 	 * segs is the last member of the struct since we modify the size
1171 	 * to allow extra segments if more than one are allowed.
1172 	 */
1173 	strsize = sizeof(*mem) + (sizeof(bus_dma_segment_t) * (nsegments - 1));
1174 	mem = malloc(strsize, M_DRM, M_NOWAIT | M_ZERO);
1175 	if (mem == NULL)
1176 		return (NULL);
1177 
1178 	mem->size = size;
1179 
1180 	if (bus_dmamap_create(dmat, size, nsegments, maxsegsz, 0,
1181 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mem->map) != 0)
1182 		goto strfree;
1183 
1184 	if (bus_dmamem_alloc(dmat, size, alignment, 0, mem->segs, nsegments,
1185 	    &mem->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1186 		goto destroy;
1187 
1188 	if (bus_dmamem_map(dmat, mem->segs, mem->nsegs, size,
1189 	    &mem->kva, BUS_DMA_NOWAIT | mapflags) != 0)
1190 		goto free;
1191 
1192 	if (bus_dmamap_load(dmat, mem->map, mem->kva, size,
1193 	    NULL, BUS_DMA_NOWAIT | loadflags) != 0)
1194 		goto unmap;
1195 
1196 	return (mem);
1197 
1198 unmap:
1199 	bus_dmamem_unmap(dmat, mem->kva, size);
1200 free:
1201 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1202 destroy:
1203 	bus_dmamap_destroy(dmat, mem->map);
1204 strfree:
1205 	free(mem, M_DRM, 0);
1206 
1207 	return (NULL);
1208 }
1209 
1210 void
1211 drm_dmamem_free(bus_dma_tag_t dmat, struct drm_dmamem *mem)
1212 {
1213 	if (mem == NULL)
1214 		return;
1215 
1216 	bus_dmamap_unload(dmat, mem->map);
1217 	bus_dmamem_unmap(dmat, mem->kva, mem->size);
1218 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1219 	bus_dmamap_destroy(dmat, mem->map);
1220 	free(mem, M_DRM, 0);
1221 }
1222 
1223 /**
1224  * Called by the client, this returns a unique magic number to be authorized
1225  * by the master.
1226  *
1227  * The master may use its own knowledge of the client (such as the X
1228  * connection that the magic is passed over) to determine if the magic number
1229  * should be authenticated.
1230  */
1231 int
1232 drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
1233 {
1234 	struct drm_auth		*auth = data;
1235 
1236 	if (dev->magicid == 0)
1237 		dev->magicid = 1;
1238 
1239 	/* Find unique magic */
1240 	if (file_priv->magic) {
1241 		auth->magic = file_priv->magic;
1242 	} else {
1243 		mutex_lock(&dev->struct_mutex);
1244 		file_priv->magic = auth->magic = dev->magicid++;
1245 		mutex_unlock(&dev->struct_mutex);
1246 		DRM_DEBUG("%d\n", auth->magic);
1247 	}
1248 
1249 	DRM_DEBUG("%u\n", auth->magic);
1250 	return 0;
1251 }
1252 
1253 /**
1254  * Marks the client associated with the given magic number as authenticated.
1255  */
1256 int
1257 drm_authmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
1258 {
1259 	struct drm_file	*p;
1260 	struct drm_auth	*auth = data;
1261 	int		 ret = -EINVAL;
1262 
1263 	DRM_DEBUG("%u\n", auth->magic);
1264 
1265 	if (auth->magic == 0)
1266 		return ret;
1267 
1268 	mutex_lock(&dev->struct_mutex);
1269 	SPLAY_FOREACH(p, drm_file_tree, &dev->files) {
1270 		if (p->magic == auth->magic) {
1271 			p->authenticated = 1;
1272 			p->magic = 0;
1273 			ret = 0;
1274 			break;
1275 		}
1276 	}
1277 	mutex_unlock(&dev->struct_mutex);
1278 
1279 	return ret;
1280 }
1281 
1282 /*
1283  * Compute order.  Can be made faster.
1284  */
1285 int
1286 drm_order(unsigned long size)
1287 {
1288 	int order;
1289 	unsigned long tmp;
1290 
1291 	for (order = 0, tmp = size; tmp >>= 1; ++order)
1292 		;
1293 
1294 	if (size & ~(1 << order))
1295 		++order;
1296 
1297 	return order;
1298 }
1299 
1300 int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
1301 {
1302 	pci_chipset_tag_t	pc = dev->pc;
1303 	pcitag_t		tag;
1304 	int			pos ;
1305 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
1306 	pcireg_t		id;
1307 
1308 	*mask = 0;
1309 
1310 	if (dev->bridgetag == NULL)
1311 		return -EINVAL;
1312 	tag = *dev->bridgetag;
1313 
1314 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1315 	    &pos, NULL))
1316 		return -EINVAL;
1317 
1318 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1319 
1320 	/* we've been informed via and serverworks don't make the cut */
1321 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
1322 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
1323 		return -EINVAL;
1324 
1325 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1326 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
1327 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
1328 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
1329 
1330 	lnkcap &= 0x0f;
1331 	lnkcap2 &= 0xfe;
1332 
1333 	if (lnkcap2) { /* PCIE GEN 3.0 */
1334 		if (lnkcap2 & 2)
1335 			*mask |= DRM_PCIE_SPEED_25;
1336 		if (lnkcap2 & 4)
1337 			*mask |= DRM_PCIE_SPEED_50;
1338 		if (lnkcap2 & 8)
1339 			*mask |= DRM_PCIE_SPEED_80;
1340 	} else {
1341 		if (lnkcap & 1)
1342 			*mask |= DRM_PCIE_SPEED_25;
1343 		if (lnkcap & 2)
1344 			*mask |= DRM_PCIE_SPEED_50;
1345 	}
1346 
1347 	DRM_INFO("probing gen 2 caps for device 0x%04x:0x%04x = %x/%x\n",
1348 	    PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap, lnkcap2);
1349 	return 0;
1350 }
1351