xref: /openbsd/sys/dev/pci/drm/drm_drv.c (revision 8529ddd3)
1 /* $OpenBSD: drm_drv.c,v 1.135 2015/04/17 00:54:41 jsg Exp $ */
2 /*-
3  * Copyright 2007-2009 Owain G. Ainsworth <oga@openbsd.org>
4  * Copyright © 2008 Intel Corporation
5  * Copyright 2003 Eric Anholt
6  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
7  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8  * All Rights Reserved.
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a
11  * copy of this software and associated documentation files (the "Software"),
12  * to deal in the Software without restriction, including without limitation
13  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14  * and/or sell copies of the Software, and to permit persons to whom the
15  * Software is furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice (including the next
18  * paragraph) shall be included in all copies or substantial portions of the
19  * Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
24  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
25  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
26  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
27  * OTHER DEALINGS IN THE SOFTWARE.
28  *
29  * Authors:
30  *    Rickard E. (Rik) Faith <faith@valinux.com>
31  *    Daryll Strauss <daryll@valinux.com>
32  *    Gareth Hughes <gareth@valinux.com>
33  *    Eric Anholt <eric@anholt.net>
34  *    Owain Ainsworth <oga@openbsd.org>
35  *
36  */
37 
38 /** @file drm_drv.c
39  * The catch-all file for DRM device support, including module setup/teardown,
40  * open/close, and ioctl dispatch.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/fcntl.h>
45 #include <sys/filio.h>
46 #include <sys/limits.h>
47 #include <sys/poll.h>
48 #include <sys/specdev.h>
49 #include <sys/systm.h>
50 #include <sys/ttycom.h> /* for TIOCSGRP */
51 #include <sys/vnode.h>
52 
53 #include <uvm/uvm.h>
54 #include <uvm/uvm_device.h>
55 
56 #include "drmP.h"
57 #include "drm.h"
58 #include "drm_sarea.h"
59 
60 #ifdef DRMDEBUG
61 int drm_debug_flag = 1;
62 #endif
63 
64 struct drm_device *drm_get_device_from_kdev(dev_t);
65 int	 drm_firstopen(struct drm_device *);
66 int	 drm_lastclose(struct drm_device *);
67 void	 drm_attach(struct device *, struct device *, void *);
68 int	 drm_probe(struct device *, void *, void *);
69 int	 drm_detach(struct device *, int);
70 void	 drm_quiesce(struct drm_device *);
71 void	 drm_wakeup(struct drm_device *);
72 int	 drm_activate(struct device *, int);
73 int	 drmprint(void *, const char *);
74 int	 drmsubmatch(struct device *, void *, void *);
75 int	 drm_do_ioctl(struct drm_device *, int, u_long, caddr_t);
76 int	 drm_dequeue_event(struct drm_device *, struct drm_file *, size_t,
77 	     struct drm_pending_event **);
78 
79 int	 drm_getunique(struct drm_device *, void *, struct drm_file *);
80 int	 drm_version(struct drm_device *, void *, struct drm_file *);
81 int	 drm_setversion(struct drm_device *, void *, struct drm_file *);
82 int	 drm_getmagic(struct drm_device *, void *, struct drm_file *);
83 int	 drm_authmagic(struct drm_device *, void *, struct drm_file *);
84 int	 drm_file_cmp(struct drm_file *, struct drm_file *);
85 SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp);
86 
87 /* functions used by the per-open handle  code to grab references to object */
88 void	 drm_gem_object_handle_reference(struct drm_gem_object *);
89 void	 drm_gem_object_handle_unreference(struct drm_gem_object *);
90 void	 drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *);
91 
92 int	 drm_handle_cmp(struct drm_handle *, struct drm_handle *);
93 int	 drm_name_cmp(struct drm_gem_object *, struct drm_gem_object *);
94 int	 drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int,
95 	     vm_fault_t, vm_prot_t, int);
96 boolean_t	 drm_flush(struct uvm_object *, voff_t, voff_t, int);
97 int	 drm_setunique(struct drm_device *, void *, struct drm_file *);
98 int	 drm_noop(struct drm_device *, void *, struct drm_file *);
99 
100 SPLAY_PROTOTYPE(drm_obj_tree, drm_handle, entry, drm_handle_cmp);
101 SPLAY_PROTOTYPE(drm_name_tree, drm_gem_object, entry, drm_name_cmp);
102 
103 int	 drm_getcap(struct drm_device *, void *, struct drm_file *);
104 
105 #define DRM_IOCTL_DEF(ioctl, _func, _flags) \
106 	[DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
107 
108 /** Ioctl table */
109 static struct drm_ioctl_desc drm_ioctls[] = {
110 	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED),
111 	DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
112 	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
113 	DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
114 #ifdef __linux__
115 	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
116 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
117 	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
118 #endif
119 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED),
120 	DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
121 
122 	DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
123 	DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
124 	DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
125 	DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
126 
127 #ifdef __linux__
128 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
129 	DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
130 
131 	DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
132 	DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
133 #else
134 	DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
135 	DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_noop, DRM_AUTH),
136 #endif
137 
138 #ifdef __linux__
139 	DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
140 	DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
141 
142 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
143 	DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
144 	DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
145 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
146 	DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
147 	DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
148 #else
149 	DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
150 	DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
151 	DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
152 #endif
153 #ifdef __linux__
154 	DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
155 #endif
156 
157 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
158 	DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
159 
160 #ifdef __linux__
161 	DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
162 	DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
163 #endif
164 
165 	DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
166 
167 #ifdef __linux__
168 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
169 	DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
170 	DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
171 #else
172 	DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
173 	DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_noop, DRM_AUTH),
174 #endif
175 #ifdef __linux__
176 	DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
177 	DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
178 	/* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
179 	DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
180 #endif
181 
182 	DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
183 
184 #if defined(__linux__) && defined(__OS_HAS_AGP)
185 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
186 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
187 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
188 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
189 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
190 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
191 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
192 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
193 #endif
194 
195 #ifdef __linux__
196 	DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
197 	DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
198 #endif
199 
200 	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
201 
202 	DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
203 
204 	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
205 
206 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
207 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
208 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
209 
210 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
211 
212 #ifdef notyet
213 	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED),
214 	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED),
215 #endif
216 
217 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
218 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
219 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
220 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
221 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
222 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
223 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
224 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
225 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
226 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
227 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
228 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
229 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
230 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
231 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
232 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
233 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
234 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
235 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
236 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
237 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
238 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
239 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
240 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
241 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
242 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
243 };
244 
245 #define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
246 
247 int
248 drm_setunique(struct drm_device *dev, void *data,
249     struct drm_file *file_priv)
250 {
251 	/*
252 	 * Deprecated in DRM version 1.1, and will return EBUSY
253 	 * when setversion has
254 	 * requested version 1.1 or greater.
255 	 */
256 	return (-EBUSY);
257 }
258 
259 /** No-op ioctl. */
260 int drm_noop(struct drm_device *dev, void *data,
261 	     struct drm_file *file_priv)
262 {
263 	return 0;
264 }
265 
266 /*
267  * attach drm to a pci-based driver.
268  *
269  * This function does all the pci-specific calculations for the
270  * drm_attach_args.
271  */
272 struct device *
273 drm_attach_pci(struct drm_driver_info *driver, struct pci_attach_args *pa,
274     int is_agp, int console, struct device *dev)
275 {
276 	struct drm_attach_args arg;
277 	pcireg_t subsys;
278 
279 	arg.driver = driver;
280 	arg.dmat = pa->pa_dmat;
281 	arg.bst = pa->pa_memt;
282 	arg.irq = pa->pa_intrline;
283 	arg.is_agp = is_agp;
284 	arg.console = console;
285 
286 	arg.pci_vendor = PCI_VENDOR(pa->pa_id);
287 	arg.pci_device = PCI_PRODUCT(pa->pa_id);
288 
289 	subsys = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
290 	arg.pci_subvendor = PCI_VENDOR(subsys);
291 	arg.pci_subdevice = PCI_PRODUCT(subsys);
292 
293 	arg.pc = pa->pa_pc;
294 	arg.bridgetag = pa->pa_bridgetag;
295 
296 	arg.busid_len = 20;
297 	arg.busid = malloc(arg.busid_len + 1, M_DRM, M_NOWAIT);
298 	if (arg.busid == NULL) {
299 		printf("%s: no memory for drm\n", dev->dv_xname);
300 		return (NULL);
301 	}
302 	snprintf(arg.busid, arg.busid_len, "pci:%04x:%02x:%02x.%1x",
303 	    pa->pa_domain, pa->pa_bus, pa->pa_device, pa->pa_function);
304 
305 	return (config_found_sm(dev, &arg, drmprint, drmsubmatch));
306 }
307 
308 int
309 drmprint(void *aux, const char *pnp)
310 {
311 	if (pnp != NULL)
312 		printf("drm at %s", pnp);
313 	return (UNCONF);
314 }
315 
316 int
317 drmsubmatch(struct device *parent, void *match, void *aux)
318 {
319 	extern struct cfdriver drm_cd;
320 	struct cfdata *cf = match;
321 
322 	/* only allow drm to attach */
323 	if (cf->cf_driver == &drm_cd)
324 		return ((*cf->cf_attach->ca_match)(parent, match, aux));
325 	return (0);
326 }
327 
328 int
329 drm_pciprobe(struct pci_attach_args *pa, const struct drm_pcidev *idlist)
330 {
331 	const struct drm_pcidev *id_entry;
332 
333 	id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
334 	    PCI_PRODUCT(pa->pa_id), idlist);
335 	if (id_entry != NULL)
336 		return 1;
337 
338 	return 0;
339 }
340 
341 int
342 drm_probe(struct device *parent, void *match, void *aux)
343 {
344 	struct cfdata *cf = match;
345 	struct drm_attach_args *da = aux;
346 
347 	if (cf->drmdevcf_console != DRMDEVCF_CONSOLE_UNK) {
348 		/*
349 		 * If console-ness of device specified, either match
350 		 * exactly (at high priority), or fail.
351 		 */
352 		if (cf->drmdevcf_console != 0 && da->console != 0)
353 			return (10);
354 		else
355 			return (0);
356 	}
357 
358 	/* If console-ness unspecified, it wins. */
359 	return (1);
360 }
361 
362 void
363 drm_attach(struct device *parent, struct device *self, void *aux)
364 {
365 	struct drm_device	*dev = (struct drm_device *)self;
366 	struct drm_attach_args	*da = aux;
367 
368 	dev->dev_private = parent;
369 	dev->driver = da->driver;
370 
371 	dev->dmat = da->dmat;
372 	dev->bst = da->bst;
373 	dev->irq = da->irq;
374 	dev->unique = da->busid;
375 	dev->unique_len = da->busid_len;
376 	dev->pdev = &dev->drm_pci;
377 	dev->pci_vendor = dev->pdev->vendor = da->pci_vendor;
378 	dev->pci_device = dev->pdev->device = da->pci_device;
379 	dev->pdev->subsystem_vendor = da->pci_subvendor;
380 	dev->pdev->subsystem_device = da->pci_subdevice;
381 
382 	dev->pc = da->pc;
383 	dev->bridgetag = da->bridgetag;
384 
385 	rw_init(&dev->struct_mutex, "drmdevlk");
386 	mtx_init(&dev->event_lock, IPL_TTY);
387 	mtx_init(&dev->quiesce_mtx, IPL_NONE);
388 
389 	TAILQ_INIT(&dev->maplist);
390 	SPLAY_INIT(&dev->files);
391 	TAILQ_INIT(&dev->vbl_events);
392 
393 	/*
394 	 * the dma buffers api is just weird. offset 1Gb to ensure we don't
395 	 * conflict with it.
396 	 */
397 	dev->handle_ext = extent_create("drmext", 1024*1024*1024, LONG_MAX,
398 	    M_DRM, NULL, 0, EX_NOWAIT | EX_NOCOALESCE);
399 	if (dev->handle_ext == NULL) {
400 		DRM_ERROR("Failed to initialise handle extent\n");
401 		goto error;
402 	}
403 
404 	if (dev->driver->flags & DRIVER_AGP) {
405 #if __OS_HAS_AGP
406 		if (da->is_agp)
407 			dev->agp = drm_agp_init();
408 #endif
409 		if (dev->driver->flags & DRIVER_AGP_REQUIRE &&
410 		    dev->agp == NULL) {
411 			printf(": couldn't find agp\n");
412 			goto error;
413 		}
414 		if (dev->agp != NULL) {
415 			if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
416 			    dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
417 				dev->agp->mtrr = 1;
418 		}
419 	}
420 
421 	if (dev->driver->flags & DRIVER_GEM) {
422 		mtx_init(&dev->obj_name_lock, IPL_NONE);
423 		SPLAY_INIT(&dev->name_tree);
424 		KASSERT(dev->driver->gem_size >= sizeof(struct drm_gem_object));
425 		/* XXX unique name */
426 		pool_init(&dev->objpl, dev->driver->gem_size, 0, 0, 0,
427 		    "drmobjpl", &pool_allocator_nointr);
428 	}
429 
430 	printf("\n");
431 	return;
432 
433 error:
434 	drm_lastclose(dev);
435 	dev->dev_private = NULL;
436 }
437 
438 int
439 drm_detach(struct device *self, int flags)
440 {
441 	struct drm_device *dev = (struct drm_device *)self;
442 
443 	drm_lastclose(dev);
444 
445 	if (dev->driver->flags & DRIVER_GEM)
446 		pool_destroy(&dev->objpl);
447 
448 	extent_destroy(dev->handle_ext);
449 
450 	drm_vblank_cleanup(dev);
451 
452 	if (dev->agp && dev->agp->mtrr) {
453 		int retcode;
454 
455 		retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
456 		    dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
457 		DRM_DEBUG("mtrr_del = %d", retcode);
458 	}
459 
460 
461 	if (dev->agp != NULL) {
462 		drm_free(dev->agp);
463 		dev->agp = NULL;
464 	}
465 
466 	return 0;
467 }
468 
469 void
470 drm_quiesce(struct drm_device *dev)
471 {
472 	mtx_enter(&dev->quiesce_mtx);
473 	dev->quiesce = 1;
474 	while (dev->quiesce_count > 0) {
475 		msleep(&dev->quiesce_count, &dev->quiesce_mtx,
476 		    PZERO, "drmqui", 0);
477 	}
478 	mtx_leave(&dev->quiesce_mtx);
479 }
480 
481 void
482 drm_wakeup(struct drm_device *dev)
483 {
484 	mtx_enter(&dev->quiesce_mtx);
485 	dev->quiesce = 0;
486 	wakeup(&dev->quiesce);
487 	mtx_leave(&dev->quiesce_mtx);
488 }
489 
490 int
491 drm_activate(struct device *self, int act)
492 {
493 	struct drm_device *dev = (struct drm_device *)self;
494 
495 	switch (act) {
496 	case DVACT_QUIESCE:
497 		drm_quiesce(dev);
498 		break;
499 	case DVACT_WAKEUP:
500 		drm_wakeup(dev);
501 		break;
502 	}
503 
504 	return (0);
505 }
506 
507 struct cfattach drm_ca = {
508 	sizeof(struct drm_device), drm_probe, drm_attach,
509 	drm_detach, drm_activate
510 };
511 
512 struct cfdriver drm_cd = {
513 	0, "drm", DV_DULL
514 };
515 
516 const struct drm_pcidev *
517 drm_find_description(int vendor, int device, const struct drm_pcidev *idlist)
518 {
519 	int i = 0;
520 
521 	for (i = 0; idlist[i].vendor != 0; i++) {
522 		if ((idlist[i].vendor == vendor) &&
523 		    (idlist[i].device == device))
524 			return &idlist[i];
525 	}
526 	return NULL;
527 }
528 
529 int
530 drm_file_cmp(struct drm_file *f1, struct drm_file *f2)
531 {
532 	return (f1->minor < f2->minor ? -1 : f1->minor > f2->minor);
533 }
534 
535 SPLAY_GENERATE(drm_file_tree, drm_file, link, drm_file_cmp);
536 
537 struct drm_file *
538 drm_find_file_by_minor(struct drm_device *dev, int minor)
539 {
540 	struct drm_file	key;
541 
542 	key.minor = minor;
543 	return (SPLAY_FIND(drm_file_tree, &dev->files, &key));
544 }
545 
546 struct drm_device *
547 drm_get_device_from_kdev(dev_t kdev)
548 {
549 	int unit = minor(kdev) & ((1 << CLONE_SHIFT) - 1);
550 
551 	if (unit < drm_cd.cd_ndevs)
552 		return drm_cd.cd_devs[unit];
553 
554 	return NULL;
555 }
556 
557 int
558 drm_firstopen(struct drm_device *dev)
559 {
560 	if (dev->driver->firstopen)
561 		dev->driver->firstopen(dev);
562 
563 	dev->magicid = 1;
564 
565 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
566 		dev->irq_enabled = 0;
567 	dev->if_version = 0;
568 
569 	dev->buf_pgid = 0;
570 
571 	DRM_DEBUG("\n");
572 
573 	return 0;
574 }
575 
576 int
577 drm_lastclose(struct drm_device *dev)
578 {
579 	DRM_DEBUG("\n");
580 
581 	if (dev->driver->lastclose != NULL)
582 		dev->driver->lastclose(dev);
583 
584 	if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled)
585 		drm_irq_uninstall(dev);
586 
587 #if __OS_HAS_AGP
588 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
589 		drm_agp_takedown(dev);
590 #endif
591 
592 	return 0;
593 }
594 
595 int
596 drmopen(dev_t kdev, int flags, int fmt, struct proc *p)
597 {
598 	struct drm_device	*dev = NULL;
599 	struct drm_file		*file_priv;
600 	int			 ret = 0;
601 
602 	dev = drm_get_device_from_kdev(kdev);
603 	if (dev == NULL || dev->dev_private == NULL)
604 		return (ENXIO);
605 
606 	DRM_DEBUG("open_count = %d\n", dev->open_count);
607 
608 	if (flags & O_EXCL)
609 		return (EBUSY); /* No exclusive opens */
610 
611 	mutex_lock(&dev->struct_mutex);
612 	if (dev->open_count++ == 0) {
613 		mutex_unlock(&dev->struct_mutex);
614 		if ((ret = drm_firstopen(dev)) != 0)
615 			goto err;
616 	} else {
617 		mutex_unlock(&dev->struct_mutex);
618 	}
619 
620 	/* always allocate at least enough space for our data */
621 	file_priv = drm_calloc(1, max(dev->driver->file_priv_size,
622 	    sizeof(*file_priv)));
623 	if (file_priv == NULL) {
624 		ret = ENOMEM;
625 		goto err;
626 	}
627 
628 	file_priv->kdev = kdev;
629 	file_priv->flags = flags;
630 	file_priv->minor = minor(kdev);
631 	INIT_LIST_HEAD(&file_priv->fbs);
632 	TAILQ_INIT(&file_priv->evlist);
633 	file_priv->event_space = 4096; /* 4k for event buffer */
634 	DRM_DEBUG("minor = %d\n", file_priv->minor);
635 
636 	/* for compatibility root is always authenticated */
637 	file_priv->authenticated = DRM_SUSER(p);
638 
639 	if (dev->driver->flags & DRIVER_GEM) {
640 		SPLAY_INIT(&file_priv->obj_tree);
641 		mtx_init(&file_priv->table_lock, IPL_NONE);
642 	}
643 
644 	if (dev->driver->open) {
645 		ret = dev->driver->open(dev, file_priv);
646 		if (ret != 0) {
647 			goto free_priv;
648 		}
649 	}
650 
651 	mutex_lock(&dev->struct_mutex);
652 	/* first opener automatically becomes master if root */
653 	if (SPLAY_EMPTY(&dev->files) && !DRM_SUSER(p)) {
654 		mutex_unlock(&dev->struct_mutex);
655 		ret = EPERM;
656 		goto free_priv;
657 	}
658 
659 	file_priv->master = SPLAY_EMPTY(&dev->files);
660 
661 	SPLAY_INSERT(drm_file_tree, &dev->files, file_priv);
662 	mutex_unlock(&dev->struct_mutex);
663 
664 	return (0);
665 
666 free_priv:
667 	drm_free(file_priv);
668 err:
669 	mutex_lock(&dev->struct_mutex);
670 	--dev->open_count;
671 	mutex_unlock(&dev->struct_mutex);
672 	return (ret);
673 }
674 
675 int
676 drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
677 {
678 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
679 	struct drm_file			*file_priv;
680 	struct drm_pending_event	*ev, *evtmp;
681 	struct drm_pending_vblank_event	*vev;
682 	int				 retcode = 0;
683 
684 	if (dev == NULL)
685 		return (ENXIO);
686 
687 	DRM_DEBUG("open_count = %d\n", dev->open_count);
688 
689 	mutex_lock(&dev->struct_mutex);
690 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
691 	if (file_priv == NULL) {
692 		DRM_ERROR("can't find authenticator\n");
693 		retcode = EINVAL;
694 		goto done;
695 	}
696 	mutex_unlock(&dev->struct_mutex);
697 
698 	if (dev->driver->close != NULL)
699 		dev->driver->close(dev, file_priv);
700 
701 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
702 	    DRM_CURRENTPID, (long)&dev->device, dev->open_count);
703 
704 	mtx_enter(&dev->event_lock);
705 	struct drmevlist *list = &dev->vbl_events;
706 	for (ev = TAILQ_FIRST(list); ev != NULL; ev = evtmp) {
707 		evtmp = TAILQ_NEXT(ev, link);
708 		vev = (struct drm_pending_vblank_event *)ev;
709 		if (ev->file_priv == file_priv) {
710 			TAILQ_REMOVE(list, ev, link);
711 			drm_vblank_put(dev, vev->pipe);
712 			ev->destroy(ev);
713 		}
714 	}
715 	while ((ev = TAILQ_FIRST(&file_priv->evlist)) != NULL) {
716 		TAILQ_REMOVE(&file_priv->evlist, ev, link);
717 		ev->destroy(ev);
718 	}
719 	mtx_leave(&dev->event_lock);
720 
721 	if (dev->driver->flags & DRIVER_MODESET)
722 		drm_fb_release(dev, file_priv);
723 
724 	mutex_lock(&dev->struct_mutex);
725 	if (dev->driver->flags & DRIVER_GEM) {
726 		struct drm_handle	*han;
727 		mtx_enter(&file_priv->table_lock);
728 		while ((han = SPLAY_ROOT(&file_priv->obj_tree)) != NULL) {
729 			SPLAY_REMOVE(drm_obj_tree, &file_priv->obj_tree, han);
730 			mtx_leave(&file_priv->table_lock);
731 			drm_gem_object_handle_unreference(han->obj);
732 			drm_free(han);
733 			mtx_enter(&file_priv->table_lock);
734 		}
735 		mtx_leave(&file_priv->table_lock);
736 	}
737 
738 	dev->buf_pgid = 0;
739 
740 	SPLAY_REMOVE(drm_file_tree, &dev->files, file_priv);
741 	drm_free(file_priv);
742 
743 done:
744 	if (--dev->open_count == 0) {
745 		mutex_unlock(&dev->struct_mutex);
746 		retcode = drm_lastclose(dev);
747 	} else
748 		mutex_unlock(&dev->struct_mutex);
749 
750 	return (retcode);
751 }
752 
753 int
754 drm_do_ioctl(struct drm_device *dev, int minor, u_long cmd, caddr_t data)
755 {
756 	struct drm_file *file_priv;
757 	struct drm_ioctl_desc *ioctl;
758 	drm_ioctl_t *func;
759 	unsigned int nr = DRM_IOCTL_NR(cmd);
760 	int retcode = -EINVAL;
761 	unsigned int usize, asize;
762 
763 	mutex_lock(&dev->struct_mutex);
764 	file_priv = drm_find_file_by_minor(dev, minor);
765 	mutex_unlock(&dev->struct_mutex);
766 	if (file_priv == NULL) {
767 		DRM_ERROR("can't find authenticator\n");
768 		return -EINVAL;
769 	}
770 
771 	++file_priv->ioctl_count;
772 
773 	DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
774 	    DRM_CURRENTPID, cmd, (u_int)DRM_IOCTL_NR(cmd), (long)&dev->device,
775 	    file_priv->authenticated);
776 
777 	switch (cmd) {
778 	case FIONBIO:
779 	case FIOASYNC:
780 		return 0;
781 
782 	case TIOCSPGRP:
783 		dev->buf_pgid = *(int *)data;
784 		return 0;
785 
786 	case TIOCGPGRP:
787 		*(int *)data = dev->buf_pgid;
788 		return 0;
789 	}
790 
791 	if ((nr >= DRM_CORE_IOCTL_COUNT) &&
792 	    ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
793 		return (-EINVAL);
794 	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
795 	    (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
796 		uint32_t drv_size;
797 		ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
798 		drv_size = IOCPARM_LEN(ioctl->cmd_drv);
799 		usize = asize = IOCPARM_LEN(cmd);
800 		if (drv_size > asize)
801 			asize = drv_size;
802 	} else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
803 		uint32_t drv_size;
804 		ioctl = &drm_ioctls[nr];
805 
806 		drv_size = IOCPARM_LEN(ioctl->cmd_drv);
807 		usize = asize = IOCPARM_LEN(cmd);
808 		if (drv_size > asize)
809 			asize = drv_size;
810 		cmd = ioctl->cmd;
811 	} else
812 		return (-EINVAL);
813 
814 	func = ioctl->func;
815 	if (!func) {
816 		DRM_DEBUG("no function\n");
817 		return (-EINVAL);
818 	}
819 
820 	if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(curproc)) ||
821 	    ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
822 	    ((ioctl->flags & DRM_MASTER) && !file_priv->master))
823 		return (-EACCES);
824 
825 	if (ioctl->flags & DRM_UNLOCKED)
826 		retcode = func(dev, data, file_priv);
827 	else {
828 		/* XXX lock */
829 		retcode = func(dev, data, file_priv);
830 		/* XXX unlock */
831 	}
832 
833 	return (retcode);
834 }
835 
836 /* drmioctl is called whenever a process performs an ioctl on /dev/drm.
837  */
838 int
839 drmioctl(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
840 {
841 	struct drm_device *dev = drm_get_device_from_kdev(kdev);
842 	int error;
843 
844 	if (dev == NULL)
845 		return ENODEV;
846 
847 	mtx_enter(&dev->quiesce_mtx);
848 	while (dev->quiesce)
849 		msleep(&dev->quiesce, &dev->quiesce_mtx, PZERO, "drmioc", 0);
850 	dev->quiesce_count++;
851 	mtx_leave(&dev->quiesce_mtx);
852 
853 	error = -drm_do_ioctl(dev, minor(kdev), cmd, data);
854 	if (error < 0 && error != ERESTART && error != EJUSTRETURN)
855 		printf("%s: cmd 0x%lx errno %d\n", __func__, cmd, error);
856 
857 	mtx_enter(&dev->quiesce_mtx);
858 	dev->quiesce_count--;
859 	if (dev->quiesce)
860 		wakeup(&dev->quiesce_count);
861 	mtx_leave(&dev->quiesce_mtx);
862 
863 	return (error);
864 }
865 
866 int
867 drmread(dev_t kdev, struct uio *uio, int ioflag)
868 {
869 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
870 	struct drm_file			*file_priv;
871 	struct drm_pending_event	*ev;
872 	int		 		 error = 0;
873 
874 	if (dev == NULL)
875 		return (ENXIO);
876 
877 	mutex_lock(&dev->struct_mutex);
878 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
879 	mutex_unlock(&dev->struct_mutex);
880 	if (file_priv == NULL)
881 		return (ENXIO);
882 
883 	/*
884 	 * The semantics are a little weird here. We will wait until we
885 	 * have events to process, but as soon as we have events we will
886 	 * only deliver as many as we have.
887 	 * Note that events are atomic, if the read buffer will not fit in
888 	 * a whole event, we won't read any of it out.
889 	 */
890 	mtx_enter(&dev->event_lock);
891 	while (error == 0 && TAILQ_EMPTY(&file_priv->evlist)) {
892 		if (ioflag & IO_NDELAY) {
893 			mtx_leave(&dev->event_lock);
894 			return (EAGAIN);
895 		}
896 		error = msleep(&file_priv->evlist, &dev->event_lock,
897 		    PWAIT | PCATCH, "drmread", 0);
898 	}
899 	if (error) {
900 		mtx_leave(&dev->event_lock);
901 		return (error);
902 	}
903 	while (drm_dequeue_event(dev, file_priv, uio->uio_resid, &ev)) {
904 		MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
905 		/* XXX we always destroy the event on error. */
906 		error = uiomovei(ev->event, ev->event->length, uio);
907 		ev->destroy(ev);
908 		if (error)
909 			break;
910 		mtx_enter(&dev->event_lock);
911 	}
912 	MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
913 
914 	return (error);
915 }
916 
917 /*
918  * Deqeue an event from the file priv in question. returning 1 if an
919  * event was found. We take the resid from the read as a parameter because
920  * we will only dequeue and event if the read buffer has space to fit the
921  * entire thing.
922  *
923  * We are called locked, but we will *unlock* the queue on return so that
924  * we may sleep to copyout the event.
925  */
926 int
927 drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
928     size_t resid, struct drm_pending_event **out)
929 {
930 	struct drm_pending_event	*ev = NULL;
931 	int				 gotone = 0;
932 
933 	MUTEX_ASSERT_LOCKED(&dev->event_lock);
934 	if ((ev = TAILQ_FIRST(&file_priv->evlist)) == NULL ||
935 	    ev->event->length > resid)
936 		goto out;
937 
938 	TAILQ_REMOVE(&file_priv->evlist, ev, link);
939 	file_priv->event_space += ev->event->length;
940 	*out = ev;
941 	gotone = 1;
942 
943 out:
944 	mtx_leave(&dev->event_lock);
945 
946 	return (gotone);
947 }
948 
949 /* XXX kqfilter ... */
950 int
951 drmpoll(dev_t kdev, int events, struct proc *p)
952 {
953 	struct drm_device	*dev = drm_get_device_from_kdev(kdev);
954 	struct drm_file		*file_priv;
955 	int		 	 revents = 0;
956 
957 	if (dev == NULL)
958 		return (POLLERR);
959 
960 	mutex_lock(&dev->struct_mutex);
961 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
962 	mutex_unlock(&dev->struct_mutex);
963 	if (file_priv == NULL)
964 		return (POLLERR);
965 
966 	mtx_enter(&dev->event_lock);
967 	if (events & (POLLIN | POLLRDNORM)) {
968 		if (!TAILQ_EMPTY(&file_priv->evlist))
969 			revents |=  events & (POLLIN | POLLRDNORM);
970 		else
971 			selrecord(p, &file_priv->rsel);
972 	}
973 	mtx_leave(&dev->event_lock);
974 
975 	return (revents);
976 }
977 
978 struct drm_local_map *
979 drm_getsarea(struct drm_device *dev)
980 {
981 	struct drm_local_map	*map;
982 
983 	mutex_lock(&dev->struct_mutex);
984 	TAILQ_FOREACH(map, &dev->maplist, link) {
985 		if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
986 			break;
987 	}
988 	mutex_unlock(&dev->struct_mutex);
989 	return (map);
990 }
991 
992 paddr_t
993 drmmmap(dev_t kdev, off_t offset, int prot)
994 {
995 	struct drm_device	*dev = drm_get_device_from_kdev(kdev);
996 	struct drm_local_map	*map;
997 	struct drm_file		*file_priv;
998 	enum drm_map_type	 type;
999 
1000 	if (dev == NULL)
1001 		return (-1);
1002 
1003 	mutex_lock(&dev->struct_mutex);
1004 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
1005 	mutex_unlock(&dev->struct_mutex);
1006 	if (file_priv == NULL) {
1007 		DRM_ERROR("can't find authenticator\n");
1008 		return (-1);
1009 	}
1010 
1011 	if (!file_priv->authenticated)
1012 		return (-1);
1013 
1014 	if (dev->dma && offset >= 0 && offset < ptoa(dev->dma->page_count)) {
1015 		struct drm_device_dma *dma = dev->dma;
1016 		paddr_t	phys = -1;
1017 
1018 		rw_enter_write(&dma->dma_lock);
1019 		if (dma->pagelist != NULL)
1020 			phys = dma->pagelist[offset >> PAGE_SHIFT];
1021 		rw_exit_write(&dma->dma_lock);
1022 
1023 		return (phys);
1024 	}
1025 
1026 	/*
1027 	 * A sequential search of a linked list is
1028  	 * fine here because: 1) there will only be
1029 	 * about 5-10 entries in the list and, 2) a
1030 	 * DRI client only has to do this mapping
1031 	 * once, so it doesn't have to be optimized
1032 	 * for performance, even if the list was a
1033 	 * bit longer.
1034 	 */
1035 	mutex_lock(&dev->struct_mutex);
1036 	TAILQ_FOREACH(map, &dev->maplist, link) {
1037 		if (offset >= map->ext &&
1038 		    offset < map->ext + map->size) {
1039 			offset -= map->ext;
1040 			break;
1041 		}
1042 	}
1043 
1044 	if (map == NULL) {
1045 		mutex_unlock(&dev->struct_mutex);
1046 		DRM_DEBUG("can't find map\n");
1047 		return (-1);
1048 	}
1049 	if (((map->flags & _DRM_RESTRICTED) && file_priv->master == 0)) {
1050 		mutex_unlock(&dev->struct_mutex);
1051 		DRM_DEBUG("restricted map\n");
1052 		return (-1);
1053 	}
1054 	type = map->type;
1055 	mutex_unlock(&dev->struct_mutex);
1056 
1057 	switch (type) {
1058 #if __OS_HAS_AGP
1059 	case _DRM_AGP:
1060 		return agp_mmap(dev->agp->agpdev,
1061 		    offset + map->offset - dev->agp->base, prot);
1062 #endif
1063 	case _DRM_FRAME_BUFFER:
1064 	case _DRM_REGISTERS:
1065 		return (offset + map->offset);
1066 		break;
1067 	case _DRM_SHM:
1068 	case _DRM_CONSISTENT:
1069 		return (bus_dmamem_mmap(dev->dmat, map->dmamem->segs,
1070 		    map->dmamem->nsegs, offset, prot, BUS_DMA_NOWAIT));
1071 	default:
1072 		DRM_ERROR("bad map type %d\n", type);
1073 		return (-1);	/* This should never happen. */
1074 	}
1075 	/* NOTREACHED */
1076 }
1077 
1078 /*
1079  * Beginning in revision 1.1 of the DRM interface, getunique will return
1080  * a unique in the form pci:oooo:bb:dd.f (o=domain, b=bus, d=device, f=function)
1081  * before setunique has been called.  The format for the bus-specific part of
1082  * the unique is not defined for any other bus.
1083  */
1084 int
1085 drm_getunique(struct drm_device *dev, void *data, struct drm_file *file_priv)
1086 {
1087 	struct drm_unique	 *u = data;
1088 
1089 	if (u->unique_len >= dev->unique_len) {
1090 		if (DRM_COPY_TO_USER(u->unique, dev->unique, dev->unique_len))
1091 			return -EFAULT;
1092 	}
1093 	u->unique_len = dev->unique_len;
1094 
1095 	return 0;
1096 }
1097 
1098 int
1099 drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
1100 {
1101 	struct drm_get_cap *req = data;
1102 
1103 	req->value = 0;
1104 	switch (req->capability) {
1105 	case DRM_CAP_DUMB_BUFFER:
1106 		if (dev->driver->dumb_create)
1107 			req->value = 1;
1108 		break;
1109 	case DRM_CAP_VBLANK_HIGH_CRTC:
1110 		req->value = 1;
1111 		break;
1112 	case DRM_CAP_DUMB_PREFERRED_DEPTH:
1113 		req->value = dev->mode_config.preferred_depth;
1114 		break;
1115 	case DRM_CAP_DUMB_PREFER_SHADOW:
1116 		req->value = dev->mode_config.prefer_shadow;
1117 		break;
1118 	case DRM_CAP_TIMESTAMP_MONOTONIC:
1119 		req->value = drm_timestamp_monotonic;
1120 		break;
1121 	default:
1122 		return -EINVAL;
1123 	}
1124 	return 0;
1125 }
1126 
1127 #define DRM_IF_MAJOR	1
1128 #define DRM_IF_MINOR	2
1129 
1130 int
1131 drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
1132 {
1133 	struct drm_version	*version = data;
1134 	int			 len;
1135 
1136 #define DRM_COPY(name, value)						\
1137 	len = strlen( value );						\
1138 	if ( len > name##_len ) len = name##_len;			\
1139 	name##_len = strlen( value );					\
1140 	if ( len && name ) {						\
1141 		if ( DRM_COPY_TO_USER( name, value, len ) )		\
1142 			return -EFAULT;				\
1143 	}
1144 
1145 	version->version_major = dev->driver->major;
1146 	version->version_minor = dev->driver->minor;
1147 	version->version_patchlevel = dev->driver->patchlevel;
1148 
1149 	DRM_COPY(version->name, dev->driver->name);
1150 	DRM_COPY(version->date, dev->driver->date);
1151 	DRM_COPY(version->desc, dev->driver->desc);
1152 
1153 	return 0;
1154 }
1155 
1156 int
1157 drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
1158 {
1159 	struct drm_set_version	ver, *sv = data;
1160 	int			if_version;
1161 
1162 	/* Save the incoming data, and set the response before continuing
1163 	 * any further.
1164 	 */
1165 	ver = *sv;
1166 	sv->drm_di_major = DRM_IF_MAJOR;
1167 	sv->drm_di_minor = DRM_IF_MINOR;
1168 	sv->drm_dd_major = dev->driver->major;
1169 	sv->drm_dd_minor = dev->driver->minor;
1170 
1171 	/*
1172 	 * We no longer support interface versions less than 1.1, so error
1173 	 * out if the xserver is too old. 1.1 always ties the drm to a
1174 	 * certain busid, this was done on attach
1175 	 */
1176 	if (ver.drm_di_major != -1) {
1177 		if (ver.drm_di_major != DRM_IF_MAJOR || ver.drm_di_minor < 1 ||
1178 		    ver.drm_di_minor > DRM_IF_MINOR) {
1179 			return -EINVAL;
1180 		}
1181 		if_version = DRM_IF_VERSION(ver.drm_di_major, ver.drm_dd_minor);
1182 		dev->if_version = imax(if_version, dev->if_version);
1183 	}
1184 
1185 	if (ver.drm_dd_major != -1) {
1186 		if (ver.drm_dd_major != dev->driver->major ||
1187 		    ver.drm_dd_minor < 0 ||
1188 		    ver.drm_dd_minor > dev->driver->minor)
1189 			return -EINVAL;
1190 	}
1191 
1192 	return 0;
1193 }
1194 
1195 struct drm_dmamem *
1196 drm_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t alignment,
1197     int nsegments, bus_size_t maxsegsz, int mapflags, int loadflags)
1198 {
1199 	struct drm_dmamem	*mem;
1200 	size_t			 strsize;
1201 	/*
1202 	 * segs is the last member of the struct since we modify the size
1203 	 * to allow extra segments if more than one are allowed.
1204 	 */
1205 	strsize = sizeof(*mem) + (sizeof(bus_dma_segment_t) * (nsegments - 1));
1206 	mem = malloc(strsize, M_DRM, M_NOWAIT | M_ZERO);
1207 	if (mem == NULL)
1208 		return (NULL);
1209 
1210 	mem->size = size;
1211 
1212 	if (bus_dmamap_create(dmat, size, nsegments, maxsegsz, 0,
1213 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mem->map) != 0)
1214 		goto strfree;
1215 
1216 	if (bus_dmamem_alloc(dmat, size, alignment, 0, mem->segs, nsegments,
1217 	    &mem->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1218 		goto destroy;
1219 
1220 	if (bus_dmamem_map(dmat, mem->segs, mem->nsegs, size,
1221 	    &mem->kva, BUS_DMA_NOWAIT | mapflags) != 0)
1222 		goto free;
1223 
1224 	if (bus_dmamap_load(dmat, mem->map, mem->kva, size,
1225 	    NULL, BUS_DMA_NOWAIT | loadflags) != 0)
1226 		goto unmap;
1227 
1228 	return (mem);
1229 
1230 unmap:
1231 	bus_dmamem_unmap(dmat, mem->kva, size);
1232 free:
1233 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1234 destroy:
1235 	bus_dmamap_destroy(dmat, mem->map);
1236 strfree:
1237 	free(mem, M_DRM, 0);
1238 
1239 	return (NULL);
1240 }
1241 
1242 void
1243 drm_dmamem_free(bus_dma_tag_t dmat, struct drm_dmamem *mem)
1244 {
1245 	if (mem == NULL)
1246 		return;
1247 
1248 	bus_dmamap_unload(dmat, mem->map);
1249 	bus_dmamem_unmap(dmat, mem->kva, mem->size);
1250 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1251 	bus_dmamap_destroy(dmat, mem->map);
1252 	free(mem, M_DRM, 0);
1253 }
1254 
1255 /**
1256  * Called by the client, this returns a unique magic number to be authorized
1257  * by the master.
1258  *
1259  * The master may use its own knowledge of the client (such as the X
1260  * connection that the magic is passed over) to determine if the magic number
1261  * should be authenticated.
1262  */
1263 int
1264 drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
1265 {
1266 	struct drm_auth		*auth = data;
1267 
1268 	if (dev->magicid == 0)
1269 		dev->magicid = 1;
1270 
1271 	/* Find unique magic */
1272 	if (file_priv->magic) {
1273 		auth->magic = file_priv->magic;
1274 	} else {
1275 		mutex_lock(&dev->struct_mutex);
1276 		file_priv->magic = auth->magic = dev->magicid++;
1277 		mutex_unlock(&dev->struct_mutex);
1278 		DRM_DEBUG("%d\n", auth->magic);
1279 	}
1280 
1281 	DRM_DEBUG("%u\n", auth->magic);
1282 	return 0;
1283 }
1284 
1285 /**
1286  * Marks the client associated with the given magic number as authenticated.
1287  */
1288 int
1289 drm_authmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
1290 {
1291 	struct drm_file	*p;
1292 	struct drm_auth	*auth = data;
1293 	int		 ret = -EINVAL;
1294 
1295 	DRM_DEBUG("%u\n", auth->magic);
1296 
1297 	if (auth->magic == 0)
1298 		return ret;
1299 
1300 	mutex_lock(&dev->struct_mutex);
1301 	SPLAY_FOREACH(p, drm_file_tree, &dev->files) {
1302 		if (p->magic == auth->magic) {
1303 			p->authenticated = 1;
1304 			p->magic = 0;
1305 			ret = 0;
1306 			break;
1307 		}
1308 	}
1309 	mutex_unlock(&dev->struct_mutex);
1310 
1311 	return ret;
1312 }
1313 
1314 struct uvm_pagerops drm_pgops = {
1315 	NULL,
1316 	drm_ref,
1317 	drm_unref,
1318 	drm_fault,
1319 	drm_flush,
1320 };
1321 
1322 void
1323 drm_ref(struct uvm_object *uobj)
1324 {
1325 	uobj->uo_refs++;
1326 }
1327 
1328 void
1329 drm_unref(struct uvm_object *uobj)
1330 {
1331 	struct drm_gem_object *obj = (struct drm_gem_object *)uobj;
1332 	struct drm_device *dev = obj->dev;
1333 
1334 	if (uobj->uo_refs > 1) {
1335 		uobj->uo_refs--;
1336 		return;
1337 	}
1338 
1339 	/* We own this thing now. It is on no queues, though it may still
1340 	 * be bound to the aperture (and on the inactive list, in which case
1341 	 * idling the buffer is what triggered the free. Since we know no one
1342 	 * else can grab it now, we can nuke with impunity.
1343 	 */
1344 	if (dev->driver->gem_free_object != NULL)
1345 		dev->driver->gem_free_object(obj);
1346 }
1347 
1348 boolean_t
1349 drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
1350 {
1351 	return (TRUE);
1352 }
1353 
1354 int
1355 drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
1356     int npages, int centeridx, vm_fault_t fault_type,
1357     vm_prot_t access_type, int flags)
1358 {
1359 	struct vm_map_entry *entry = ufi->entry;
1360 	struct uvm_object *uobj = entry->object.uvm_obj;
1361 	struct drm_gem_object *obj = (struct drm_gem_object *)uobj;
1362 	struct drm_device *dev = obj->dev;
1363 	int ret;
1364 
1365 	/*
1366 	 * we do not allow device mappings to be mapped copy-on-write
1367 	 * so we kill any attempt to do so here.
1368 	 */
1369 
1370 	if (UVM_ET_ISCOPYONWRITE(entry)) {
1371 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
1372 		return(VM_PAGER_ERROR);
1373 	}
1374 
1375 	/*
1376 	 * We could end up here as the result of a copyin(9) or
1377 	 * copyout(9) while handling an ioctl.  So we must be careful
1378 	 * not to deadlock.  Therefore we only block if the quiesce
1379 	 * count is zero, which guarantees we didn't enter from within
1380 	 * an ioctl code path.
1381 	 */
1382 	mtx_enter(&dev->quiesce_mtx);
1383 	if (dev->quiesce && dev->quiesce_count == 0) {
1384 		mtx_leave(&dev->quiesce_mtx);
1385 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
1386 		mtx_enter(&dev->quiesce_mtx);
1387 		while (dev->quiesce) {
1388 			msleep(&dev->quiesce, &dev->quiesce_mtx,
1389 			    PZERO, "drmflt", 0);
1390 		}
1391 		mtx_leave(&dev->quiesce_mtx);
1392 		return(VM_PAGER_REFAULT);
1393 	}
1394 	dev->quiesce_count++;
1395 	mtx_leave(&dev->quiesce_mtx);
1396 
1397 	/* Call down into driver to do the magic */
1398 	ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr -
1399 	    entry->start), vaddr, pps, npages, centeridx,
1400 	    access_type, flags);
1401 
1402 	mtx_enter(&dev->quiesce_mtx);
1403 	dev->quiesce_count--;
1404 	if (dev->quiesce)
1405 		wakeup(&dev->quiesce_count);
1406 	mtx_leave(&dev->quiesce_mtx);
1407 
1408 	return (ret);
1409 }
1410 
1411 /*
1412  * Code to support memory managers based on the GEM (Graphics
1413  * Execution Manager) api.
1414  */
1415 struct drm_gem_object *
1416 drm_gem_object_alloc(struct drm_device *dev, size_t size)
1417 {
1418 	struct drm_gem_object	*obj;
1419 
1420 	KASSERT((size & (PAGE_SIZE -1)) == 0);
1421 
1422 	if ((obj = pool_get(&dev->objpl, PR_WAITOK | PR_ZERO)) == NULL)
1423 		return (NULL);
1424 
1425 	obj->dev = dev;
1426 
1427 	/* uao create can't fail in the 0 case, it just sleeps */
1428 	obj->uao = uao_create(size, 0);
1429 	obj->size = size;
1430 	uvm_objinit(&obj->uobj, &drm_pgops, 1);
1431 
1432 	if (dev->driver->gem_init_object != NULL &&
1433 	    dev->driver->gem_init_object(obj) != 0) {
1434 		uao_detach(obj->uao);
1435 		pool_put(&dev->objpl, obj);
1436 		return (NULL);
1437 	}
1438 	atomic_inc(&dev->obj_count);
1439 	atomic_add(obj->size, &dev->obj_memory);
1440 	return (obj);
1441 }
1442 
1443 int
1444 drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size)
1445 {
1446 	BUG_ON((size & (PAGE_SIZE -1)) != 0);
1447 
1448 	obj->dev = dev;
1449 
1450 	/* uao create can't fail in the 0 case, it just sleeps */
1451 	obj->uao = uao_create(size, 0);
1452 	obj->size = size;
1453 	uvm_objinit(&obj->uobj, &drm_pgops, 1);
1454 
1455 	atomic_inc(&dev->obj_count);
1456 	atomic_add(obj->size, &dev->obj_memory);
1457 	return 0;
1458 }
1459 
1460 void
1461 drm_gem_object_release(struct drm_gem_object *obj)
1462 {
1463 	struct drm_device *dev = obj->dev;
1464 
1465 	if (obj->uao)
1466 		uao_detach(obj->uao);
1467 
1468 	atomic_dec(&dev->obj_count);
1469 	atomic_sub(obj->size, &dev->obj_memory);
1470 	if (obj->do_flags & DRM_WANTED) /* should never happen, not on lists */
1471 		wakeup(obj);
1472 }
1473 
1474 /**
1475  * Create a handle for this object. This adds a handle reference
1476  * to the object, which includes a regular reference count. Callers
1477  * will likely want to dereference the object afterwards.
1478  */
1479 int
1480 drm_gem_handle_create(struct drm_file *file_priv,
1481 		       struct drm_gem_object *obj,
1482 		       u32 *handlep)
1483 {
1484 	struct drm_device *dev = obj->dev;
1485 	struct drm_handle *han;
1486 	int ret;
1487 
1488 	if ((han = drm_calloc(1, sizeof(*han))) == NULL)
1489 		return -ENOMEM;
1490 
1491 	han->obj = obj;
1492 	mtx_enter(&file_priv->table_lock);
1493 again:
1494 	*handlep = han->handle = ++file_priv->obj_id;
1495 	/*
1496 	 * Make sure we have no duplicates. this'll hurt once we wrap, 0 is
1497 	 * reserved.
1498 	 */
1499 	if (han->handle == 0 || SPLAY_INSERT(drm_obj_tree,
1500 	    &file_priv->obj_tree, han))
1501 		goto again;
1502 	mtx_leave(&file_priv->table_lock);
1503 
1504 	drm_gem_object_handle_reference(obj);
1505 
1506 	if (dev->driver->gem_open_object) {
1507 		ret = dev->driver->gem_open_object(obj, file_priv);
1508 		if (ret) {
1509 			drm_gem_handle_delete(file_priv, *handlep);
1510 			return ret;
1511 		}
1512 	}
1513 
1514 	return 0;
1515 }
1516 
1517 /**
1518  * Removes the mapping from handle to filp for this object.
1519  */
1520 int
1521 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
1522 {
1523 	struct drm_device *dev;
1524 	struct drm_gem_object *obj;
1525 	struct drm_handle *han, find;
1526 
1527 	find.handle = handle;
1528 	mtx_enter(&filp->table_lock);
1529 	han = SPLAY_FIND(drm_obj_tree, &filp->obj_tree, &find);
1530 	if (han == NULL) {
1531 		mtx_leave(&filp->table_lock);
1532 		return -EINVAL;
1533 	}
1534 	obj = han->obj;
1535 	dev = obj->dev;
1536 
1537 	SPLAY_REMOVE(drm_obj_tree, &filp->obj_tree, han);
1538 	mtx_leave(&filp->table_lock);
1539 
1540 	drm_free(han);
1541 
1542 	if (dev->driver->gem_close_object)
1543 		dev->driver->gem_close_object(obj, filp);
1544 	drm_gem_object_handle_unreference_unlocked(obj);
1545 
1546 	return 0;
1547 }
1548 
1549 /** Returns a reference to the object named by the handle. */
1550 struct drm_gem_object *
1551 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
1552 		      u32 handle)
1553 {
1554 	struct drm_gem_object *obj;
1555 	struct drm_handle *han, search;
1556 
1557 	mtx_enter(&filp->table_lock);
1558 
1559 	/* Check if we currently have a reference on the object */
1560 	search.handle = handle;
1561 	han = SPLAY_FIND(drm_obj_tree, &filp->obj_tree, &search);
1562 	if (han == NULL) {
1563 		mtx_leave(&filp->table_lock);
1564 		return NULL;
1565 	}
1566 	obj = han->obj;
1567 
1568 	drm_gem_object_reference(obj);
1569 
1570 	mtx_leave(&filp->table_lock);
1571 
1572 	return obj;
1573 }
1574 
1575 /**
1576  * Releases the handle to an mm object.
1577  */
1578 int
1579 drm_gem_close_ioctl(struct drm_device *dev, void *data,
1580 		    struct drm_file *file_priv)
1581 {
1582 	struct drm_gem_close *args = data;
1583 	int ret;
1584 
1585 	if (!(dev->driver->flags & DRIVER_GEM))
1586 		return -ENODEV;
1587 
1588 	ret = drm_gem_handle_delete(file_priv, args->handle);
1589 
1590 	return ret;
1591 }
1592 
1593 int
1594 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
1595     struct drm_file *file_priv)
1596 {
1597 	struct drm_gem_flink	*args = data;
1598 	struct drm_gem_object	*obj;
1599 
1600 	if (!(dev->driver->flags & DRIVER_GEM))
1601 		return -ENODEV;
1602 
1603 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1604 	if (obj == NULL)
1605 		return -ENOENT;
1606 
1607 	mtx_enter(&dev->obj_name_lock);
1608 	if (!obj->name) {
1609 again:
1610 		obj->name = ++dev->obj_name;
1611 		/* 0 is reserved, make sure we don't clash. */
1612 		if (obj->name == 0 || SPLAY_INSERT(drm_name_tree,
1613 		    &dev->name_tree, obj))
1614 			goto again;
1615 		/* name holds a reference to the object */
1616 		drm_ref(&obj->uobj);
1617 	}
1618 	mtx_leave(&dev->obj_name_lock);
1619 
1620 	args->name = (uint64_t)obj->name;
1621 
1622 	drm_unref(&obj->uobj);
1623 
1624 	return 0;
1625 }
1626 
1627 /**
1628  * Open an object using the global name, returning a handle and the size.
1629  *
1630  * This handle (of course) holds a reference to the object, so the object
1631  * will not go away until the handle is deleted.
1632  */
1633 int
1634 drm_gem_open_ioctl(struct drm_device *dev, void *data,
1635 		   struct drm_file *file_priv)
1636 {
1637 	struct drm_gem_open *args = data;
1638 	struct drm_gem_object *obj, search;
1639 	int ret;
1640 	u32 handle;
1641 
1642 	if (!(dev->driver->flags & DRIVER_GEM))
1643 		return -ENODEV;
1644 
1645 	mtx_enter(&dev->obj_name_lock);
1646 	search.name = args->name;
1647 	obj = SPLAY_FIND(drm_name_tree, &dev->name_tree, &search);
1648 	if (obj)
1649 		drm_gem_object_reference(obj);
1650 	mtx_leave(&dev->obj_name_lock);
1651 	if (!obj)
1652 		return -ENOENT;
1653 
1654 	ret = drm_gem_handle_create(file_priv, obj, &handle);
1655 	drm_gem_object_unreference_unlocked(obj);
1656 	if (ret)
1657 		return ret;
1658 
1659 	args->handle = handle;
1660 	args->size = obj->size;
1661 
1662         return 0;
1663 }
1664 
1665 void
1666 drm_gem_object_handle_reference(struct drm_gem_object *obj)
1667 {
1668 	drm_gem_object_reference(obj);
1669 	obj->handlecount++;
1670 }
1671 
1672 void
1673 drm_gem_object_handle_unreference(struct drm_gem_object *obj)
1674 {
1675 	/* do this first in case this is the last reference */
1676 	if (--obj->handlecount == 0) {
1677 		struct drm_device	*dev = obj->dev;
1678 
1679 		mtx_enter(&dev->obj_name_lock);
1680 		if (obj->name) {
1681 			SPLAY_REMOVE(drm_name_tree, &dev->name_tree, obj);
1682 			obj->name = 0;
1683 			mtx_leave(&dev->obj_name_lock);
1684 			/* name held a reference to object */
1685 			drm_gem_object_unreference(obj);
1686 		} else {
1687 			mtx_leave(&dev->obj_name_lock);
1688 		}
1689 	}
1690 
1691 	drm_gem_object_unreference(obj);
1692 }
1693 
1694 void
1695 drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
1696 {
1697 	struct drm_device *dev = obj->dev;
1698 
1699 	mutex_lock(&dev->struct_mutex);
1700 	drm_gem_object_handle_unreference(obj);
1701 	mutex_unlock(&dev->struct_mutex);
1702 }
1703 
1704 /**
1705  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
1706  * @obj: obj in question
1707  *
1708  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
1709  */
1710 void
1711 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
1712 {
1713 	struct drm_device *dev = obj->dev;
1714 	struct drm_local_map *map = obj->map;
1715 
1716 	TAILQ_REMOVE(&dev->maplist, map, link);
1717 	obj->map = NULL;
1718 
1719 	/* NOCOALESCE set, can't fail */
1720 	extent_free(dev->handle_ext, map->ext, map->size, EX_NOWAIT);
1721 
1722 	drm_free(map);
1723 }
1724 
1725 /**
1726  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
1727  * @obj: obj in question
1728  *
1729  * GEM memory mapping works by handing back to userspace a fake mmap offset
1730  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
1731  * up the object based on the offset and sets up the various memory mapping
1732  * structures.
1733  *
1734  * This routine allocates and attaches a fake offset for @obj.
1735  */
1736 int
1737 drm_gem_create_mmap_offset(struct drm_gem_object *obj)
1738 {
1739 	struct drm_device *dev = obj->dev;
1740 	struct drm_local_map *map;
1741 	int ret;
1742 
1743 	/* Set the object up for mmap'ing */
1744 	map = drm_calloc(1, sizeof(*map));
1745 	if (map == NULL)
1746 		return -ENOMEM;
1747 
1748 	map->flags = _DRM_DRIVER;
1749 	map->type = _DRM_GEM;
1750 	map->size = obj->size;
1751 	map->handle = obj;
1752 
1753 	/* Get a DRM GEM mmap offset allocated... */
1754 	ret = extent_alloc(dev->handle_ext, map->size, PAGE_SIZE, 0,
1755 	    0, EX_NOWAIT, &map->ext);
1756 	if (ret) {
1757 		DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1758 		ret = -ENOSPC;
1759 		goto out_free_list;
1760 	}
1761 
1762 	TAILQ_INSERT_TAIL(&dev->maplist, map, link);
1763 	obj->map = map;
1764 	return 0;
1765 
1766 out_free_list:
1767 	drm_free(map);
1768 
1769 	return ret;
1770 }
1771 
1772 struct uvm_object *
1773 udv_attach_drm(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
1774 {
1775 	struct drm_device *dev = drm_get_device_from_kdev(device);
1776 	struct drm_local_map *map;
1777 	struct drm_gem_object *obj;
1778 
1779 	if (cdevsw[major(device)].d_mmap != drmmmap)
1780 		return NULL;
1781 
1782 	if (dev == NULL)
1783 		return NULL;
1784 
1785 	if (dev->driver->mmap)
1786 		return dev->driver->mmap(dev, off, size);
1787 
1788 	mutex_lock(&dev->struct_mutex);
1789 	TAILQ_FOREACH(map, &dev->maplist, link) {
1790 		if (off >= map->ext && off + size <= map->ext + map->size)
1791 			break;
1792 	}
1793 
1794 	if (map == NULL || map->type != _DRM_GEM) {
1795 		mutex_unlock(&dev->struct_mutex);
1796 		return NULL;
1797 	}
1798 
1799 	obj = (struct drm_gem_object *)map->handle;
1800 	drm_ref(&obj->uobj);
1801 	mutex_unlock(&dev->struct_mutex);
1802 	return &obj->uobj;
1803 }
1804 
1805 /*
1806  * Compute order.  Can be made faster.
1807  */
1808 int
1809 drm_order(unsigned long size)
1810 {
1811 	int order;
1812 	unsigned long tmp;
1813 
1814 	for (order = 0, tmp = size; tmp >>= 1; ++order)
1815 		;
1816 
1817 	if (size & ~(1 << order))
1818 		++order;
1819 
1820 	return order;
1821 }
1822 
1823 int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
1824 {
1825 	pci_chipset_tag_t	pc = dev->pc;
1826 	pcitag_t		tag;
1827 	int			pos ;
1828 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
1829 	pcireg_t		id;
1830 
1831 	*mask = 0;
1832 
1833 	if (dev->bridgetag == NULL)
1834 		return -EINVAL;
1835 	tag = *dev->bridgetag;
1836 
1837 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1838 	    &pos, NULL))
1839 		return -EINVAL;
1840 
1841 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1842 
1843 	/* we've been informed via and serverworks don't make the cut */
1844 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
1845 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
1846 		return -EINVAL;
1847 
1848 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1849 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
1850 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
1851 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
1852 
1853 	lnkcap &= 0x0f;
1854 	lnkcap2 &= 0xfe;
1855 
1856 	if (lnkcap2) { /* PCIE GEN 3.0 */
1857 		if (lnkcap2 & 2)
1858 			*mask |= DRM_PCIE_SPEED_25;
1859 		if (lnkcap2 & 4)
1860 			*mask |= DRM_PCIE_SPEED_50;
1861 		if (lnkcap2 & 8)
1862 			*mask |= DRM_PCIE_SPEED_80;
1863 	} else {
1864 		if (lnkcap & 1)
1865 			*mask |= DRM_PCIE_SPEED_25;
1866 		if (lnkcap & 2)
1867 			*mask |= DRM_PCIE_SPEED_50;
1868 	}
1869 
1870 	DRM_INFO("probing gen 2 caps for device 0x%04x:0x%04x = %x/%x\n",
1871 	    PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap, lnkcap2);
1872 	return 0;
1873 }
1874 
1875 int
1876 drm_handle_cmp(struct drm_handle *a, struct drm_handle *b)
1877 {
1878 	return (a->handle < b->handle ? -1 : a->handle > b->handle);
1879 }
1880 
1881 int
1882 drm_name_cmp(struct drm_gem_object *a, struct drm_gem_object *b)
1883 {
1884 	return (a->name < b->name ? -1 : a->name > b->name);
1885 }
1886 
1887 SPLAY_GENERATE(drm_obj_tree, drm_handle, entry, drm_handle_cmp);
1888 
1889 SPLAY_GENERATE(drm_name_tree, drm_gem_object, entry, drm_name_cmp);
1890