xref: /dragonfly/sys/dev/drm/drm_drv.c (revision 82730a9c)
1 /*-
2  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  *
25  * Authors:
26  *    Rickard E. (Rik) Faith <faith@valinux.com>
27  *    Gareth Hughes <gareth@valinux.com>
28  *
29  * $FreeBSD: head/sys/dev/drm2/drm_drv.c 247835 2013-03-05 09:49:34Z kib $
30  */
31 
32 /** @file drm_drv.c
33  * The catch-all file for DRM device support, including module setup/teardown,
34  * open/close, and ioctl dispatch.
35  */
36 
37 #include <sys/devfs.h>
38 #include <machine/limits.h>
39 
40 #include <drm/drmP.h>
41 #include <drm/drm_core.h>
42 
43 #ifdef DRM_DEBUG_DEFAULT_ON
44 int drm_debug = (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS |
45     DRM_DEBUGBITS_FAILED_IOCTL);
46 #else
47 int drm_debug = 0;
48 #endif
49 int drm_notyet_flag = 0;
50 
51 unsigned int drm_vblank_offdelay = 5000;    /* Default to 5000 msecs. */
52 unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
53 
54 static int drm_load(struct drm_device *dev);
55 static void drm_unload(struct drm_device *dev);
56 static drm_pci_id_list_t *drm_find_description(int vendor, int device,
57     drm_pci_id_list_t *idlist);
58 
59 #define DRIVER_SOFTC(unit) \
60 	((struct drm_device *)devclass_get_softc(drm_devclass, unit))
61 
62 static int
63 drm_modevent(module_t mod, int type, void *data)
64 {
65 
66 	switch (type) {
67 	case MOD_LOAD:
68 		TUNABLE_INT_FETCH("drm.debug", &drm_debug);
69 		TUNABLE_INT_FETCH("drm.notyet", &drm_notyet_flag);
70 		break;
71 	}
72 	return (0);
73 }
74 
75 static moduledata_t drm_mod = {
76 	"drm",
77 	drm_modevent,
78 	0
79 };
80 DECLARE_MODULE(drm, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
81 MODULE_VERSION(drm, 1);
82 MODULE_DEPEND(drm, agp, 1, 1, 1);
83 MODULE_DEPEND(drm, pci, 1, 1, 1);
84 MODULE_DEPEND(drm, iicbus, 1, 1, 1);
85 
86 static drm_ioctl_desc_t		  drm_ioctls[256] = {
87 	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
88 	DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
89 	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
90 	DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
91 	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
92 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
93 	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
94 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED),
95 	DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
96 
97 	DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
98 	DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
99 	DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
100 	DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
101 
102 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
103 	DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
104 
105 	DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
106 	DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
107 
108 	DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
109 	DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
110 
111 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
112 	DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
113 	DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
114 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
115 	DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
116 	DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
117 	DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
118 
119 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
120 	DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
121 
122 	DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
123 	DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
124 
125 	DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
126 
127 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
128 	DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
129 	DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
130 	DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
131 	DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
132 	DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH),
133 
134 	DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
135 
136 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
137 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
138 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
139 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
140 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
141 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
142 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
143 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
144 
145 	DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
146 	DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
147 	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
148 	DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
149 
150 	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
151 
152 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
153 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
154 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
155 
156 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
157 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
158 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
159 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
160 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
161 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
162 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
163 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
164 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
165 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
166 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
167 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
168 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
169 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
170 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
171 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
172 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
173 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
174 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
175 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
176 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
177 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
178 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
179 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
180 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
181 };
182 
183 static struct dev_ops drm_cdevsw = {
184 	{ "drm", 0, D_TRACKCLOSE },
185 	.d_open =	drm_open,
186 	.d_close =	drm_close,
187 	.d_read =	drm_read,
188 	.d_ioctl =	drm_ioctl,
189 	.d_kqfilter =	drm_kqfilter,
190 	.d_mmap =	drm_mmap,
191 	.d_mmap_single = drm_mmap_single,
192 };
193 
194 static int drm_msi = 1;	/* Enable by default. */
195 TUNABLE_INT("hw.drm.msi", &drm_msi);
196 SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
197 SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RD, &drm_msi, 1,
198     "Enable MSI interrupts for drm devices");
199 
200 static struct drm_msi_blacklist_entry drm_msi_blacklist[] = {
201 	{0x8086, 0x2772}, /* Intel i945G	*/ \
202 	{0x8086, 0x27A2}, /* Intel i945GM	*/ \
203 	{0x8086, 0x27AE}, /* Intel i945GME	*/ \
204 	{0, 0}
205 };
206 
207 static int drm_msi_is_blacklisted(struct drm_device *dev, unsigned long flags)
208 {
209 	int i = 0;
210 
211 	if (dev->driver->use_msi != NULL) {
212 		int use_msi;
213 
214 		use_msi = dev->driver->use_msi(dev, flags);
215 
216 		return (!use_msi);
217 	}
218 
219 	/* TODO: Maybe move this to a callback in i915? */
220 	for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) {
221 		if ((drm_msi_blacklist[i].vendor == dev->pci_vendor) &&
222 		    (drm_msi_blacklist[i].device == dev->pci_device)) {
223 			return 1;
224 		}
225 	}
226 
227 	return 0;
228 }
229 
230 int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
231 {
232 	drm_pci_id_list_t *id_entry;
233 	int vendor, device;
234 
235 	vendor = pci_get_vendor(kdev);
236 	device = pci_get_device(kdev);
237 
238 	if (pci_get_class(kdev) != PCIC_DISPLAY)
239 		return ENXIO;
240 
241 	id_entry = drm_find_description(vendor, device, idlist);
242 	if (id_entry != NULL) {
243 		if (!device_get_desc(kdev)) {
244 			DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
245 			device_set_desc(kdev, id_entry->name);
246 		}
247 		return 0;
248 	}
249 
250 	return ENXIO;
251 }
252 
253 int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
254 {
255 	struct drm_device *dev;
256 	drm_pci_id_list_t *id_entry;
257 	int unit, error, msicount;
258 	int rid = 0;
259 
260 	unit = device_get_unit(kdev);
261 	dev = device_get_softc(kdev);
262 
263 	if (!strcmp(device_get_name(kdev), "drmsub"))
264 		dev->dev = device_get_parent(kdev);
265 	else
266 		dev->dev = kdev;
267 
268 	dev->pci_domain = pci_get_domain(dev->dev);
269 	dev->pci_bus = pci_get_bus(dev->dev);
270 	dev->pci_slot = pci_get_slot(dev->dev);
271 	dev->pci_func = pci_get_function(dev->dev);
272 
273 	dev->pci_vendor = pci_get_vendor(dev->dev);
274 	dev->pci_device = pci_get_device(dev->dev);
275 	dev->pci_subvendor = pci_get_subvendor(dev->dev);
276 	dev->pci_subdevice = pci_get_subdevice(dev->dev);
277 
278 	id_entry = drm_find_description(dev->pci_vendor,
279 	    dev->pci_device, idlist);
280 	dev->id_entry = id_entry;
281 
282 	if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) {
283 		if (drm_msi &&
284 		    !drm_msi_is_blacklisted(dev, dev->id_entry->driver_private)) {
285 			msicount = pci_msi_count(dev->dev);
286 			DRM_DEBUG("MSI count = %d\n", msicount);
287 			if (msicount > 1)
288 				msicount = 1;
289 
290 			if (pci_alloc_msi(dev->dev, &rid, msicount, -1) == 0) {
291 				DRM_INFO("MSI enabled %d message(s)\n",
292 				    msicount);
293 				dev->msi_enabled = 1;
294 				dev->irqrid = rid;
295 			}
296 		}
297 
298 		dev->irqr = bus_alloc_resource_any(dev->dev, SYS_RES_IRQ,
299 		    &dev->irqrid, RF_SHAREABLE);
300 		if (!dev->irqr) {
301 			return (ENOENT);
302 		}
303 
304 		dev->irq = (int) rman_get_start(dev->irqr);
305 	}
306 
307 	lockinit(&dev->dev_lock, "drmdev", 0, LK_CANRECURSE);
308 	lwkt_serialize_init(&dev->irq_lock);
309 	lockinit(&dev->vbl_lock, "drmvbl", 0, LK_CANRECURSE);
310 	lockinit(&dev->event_lock, "drmev", 0, LK_CANRECURSE);
311 	lockinit(&dev->dev_struct_lock, "drmslk", 0, LK_CANRECURSE);
312 
313 	error = drm_load(dev);
314 	if (error)
315 		goto error;
316 
317 	error = drm_create_cdevs(kdev);
318 	if (error)
319 		goto error;
320 
321 	return (error);
322 error:
323 	if (dev->irqr) {
324 		bus_release_resource(dev->dev, SYS_RES_IRQ,
325 		    dev->irqrid, dev->irqr);
326 	}
327 	if (dev->msi_enabled) {
328 		pci_release_msi(dev->dev);
329 	}
330 	return (error);
331 }
332 
333 int
334 drm_create_cdevs(device_t kdev)
335 {
336 	struct drm_device *dev;
337 	int error, unit;
338 
339 	unit = device_get_unit(kdev);
340 	dev = device_get_softc(kdev);
341 
342 	dev->devnode = make_dev(&drm_cdevsw, unit, DRM_DEV_UID, DRM_DEV_GID,
343 				DRM_DEV_MODE, "dri/card%d", unit);
344 	error = 0;
345 	if (error == 0)
346 		dev->devnode->si_drv1 = dev;
347 	return (error);
348 }
349 
350 int drm_detach(device_t kdev)
351 {
352 	struct drm_device *dev;
353 
354 	dev = device_get_softc(kdev);
355 	drm_unload(dev);
356 	if (dev->irqr) {
357 		bus_release_resource(dev->dev, SYS_RES_IRQ, dev->irqrid,
358 		    dev->irqr);
359 		if (dev->msi_enabled) {
360 			pci_release_msi(dev->dev);
361 			DRM_INFO("MSI released\n");
362 		}
363 	}
364 	return (0);
365 }
366 
367 #ifndef DRM_DEV_NAME
368 #define DRM_DEV_NAME "drm"
369 #endif
370 
371 devclass_t drm_devclass;
372 
373 drm_pci_id_list_t *drm_find_description(int vendor, int device,
374     drm_pci_id_list_t *idlist)
375 {
376 	int i = 0;
377 
378 	for (i = 0; idlist[i].vendor != 0; i++) {
379 		if ((idlist[i].vendor == vendor) &&
380 		    ((idlist[i].device == device) ||
381 		    (idlist[i].device == 0))) {
382 			return &idlist[i];
383 		}
384 	}
385 	return NULL;
386 }
387 
388 static int drm_firstopen(struct drm_device *dev)
389 {
390 	drm_local_map_t *map;
391 	int i;
392 
393 	DRM_LOCK_ASSERT(dev);
394 
395 	/* prebuild the SAREA */
396 	i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
397 	    _DRM_CONTAINS_LOCK, &map);
398 	if (i != 0)
399 		return i;
400 
401 	if (dev->driver->firstopen)
402 		dev->driver->firstopen(dev);
403 
404 	dev->buf_use = 0;
405 
406 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
407 		i = drm_dma_setup(dev);
408 		if (i != 0)
409 			return i;
410 	}
411 
412 	for (i = 0; i < DRM_HASH_SIZE; i++) {
413 		dev->magiclist[i].head = NULL;
414 		dev->magiclist[i].tail = NULL;
415 	}
416 
417 	dev->lock.lock_queue = 0;
418 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
419 		dev->irq_enabled = 0;
420 	dev->context_flag = 0;
421 	dev->last_context = 0;
422 	dev->if_version = 0;
423 
424 	dev->buf_sigio = NULL;
425 
426 	DRM_DEBUG("\n");
427 
428 	return 0;
429 }
430 
431 /**
432  * Take down the DRM device.
433  *
434  * \param dev DRM device structure.
435  *
436  * Frees every resource in \p dev.
437  *
438  * \sa drm_device
439  */
440 static int drm_lastclose(struct drm_device *dev)
441 {
442 	drm_magic_entry_t *pt, *next;
443 	int i;
444 
445 	DRM_DEBUG("\n");
446 
447 	if (dev->driver->lastclose != NULL)
448 		dev->driver->lastclose(dev);
449 
450 	if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled)
451 		drm_irq_uninstall(dev);
452 
453 	DRM_LOCK(dev);
454 	if (dev->unique) {
455 		drm_free(dev->unique, DRM_MEM_DRIVER);
456 		dev->unique = NULL;
457 		dev->unique_len = 0;
458 	}
459 	/* Clear pid list */
460 	for (i = 0; i < DRM_HASH_SIZE; i++) {
461 		for (pt = dev->magiclist[i].head; pt; pt = next) {
462 			next = pt->next;
463 			drm_free(pt, DRM_MEM_MAGIC);
464 		}
465 		dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
466 	}
467 
468 	/* Clear AGP information */
469 	if (dev->agp) {
470 		drm_agp_mem_t *entry;
471 		drm_agp_mem_t *nexte;
472 
473 		/* Remove AGP resources, but leave dev->agp intact until
474 		 * drm_unload is called.
475 		 */
476 		for (entry = dev->agp->memory; entry; entry = nexte) {
477 			nexte = entry->next;
478 			if (entry->bound)
479 				drm_agp_unbind_memory(entry->handle);
480 			drm_agp_free_memory(entry->handle);
481 			drm_free(entry, DRM_MEM_AGPLISTS);
482 		}
483 		dev->agp->memory = NULL;
484 
485 		if (dev->agp->acquired)
486 			drm_agp_release(dev);
487 
488 		dev->agp->acquired = 0;
489 		dev->agp->enabled  = 0;
490 	}
491 	if (dev->sg != NULL) {
492 		drm_sg_cleanup(dev->sg);
493 		dev->sg = NULL;
494 	}
495 
496 	drm_dma_takedown(dev);
497 	if (dev->lock.hw_lock) {
498 		dev->lock.hw_lock = NULL; /* SHM removed */
499 		dev->lock.file_priv = NULL;
500 		DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
501 	}
502 	DRM_UNLOCK(dev);
503 
504 	return 0;
505 }
506 
507 static int drm_load(struct drm_device *dev)
508 {
509 	int i, retcode;
510 
511 	DRM_DEBUG("\n");
512 
513 	INIT_LIST_HEAD(&dev->maplist);
514 	dev->map_unrhdr = new_unrhdr(1, ((1 << DRM_MAP_HANDLE_BITS) - 1), NULL);
515 	if (dev->map_unrhdr == NULL) {
516 		DRM_ERROR("Couldn't allocate map number allocator\n");
517 		return EINVAL;
518 	}
519 
520 
521 	drm_mem_init();
522 	drm_sysctl_init(dev);
523 	TAILQ_INIT(&dev->files);
524 
525 	dev->counters  = 6;
526 	dev->types[0]  = _DRM_STAT_LOCK;
527 	dev->types[1]  = _DRM_STAT_OPENS;
528 	dev->types[2]  = _DRM_STAT_CLOSES;
529 	dev->types[3]  = _DRM_STAT_IOCTLS;
530 	dev->types[4]  = _DRM_STAT_LOCKS;
531 	dev->types[5]  = _DRM_STAT_UNLOCKS;
532 
533 	for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
534 		atomic_set(&dev->counts[i], 0);
535 
536 	INIT_LIST_HEAD(&dev->vblank_event_list);
537 
538 	if (drm_core_has_AGP(dev)) {
539 		if (drm_device_is_agp(dev))
540 			dev->agp = drm_agp_init();
541 		if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) &&
542 		    dev->agp == NULL) {
543 			DRM_ERROR("Card isn't AGP, or couldn't initialize "
544 			    "AGP.\n");
545 			retcode = ENOMEM;
546 			goto error;
547 		}
548 		if (dev->agp != NULL && dev->agp->agp_info.ai_aperture_base != 0) {
549 			if (drm_mtrr_add(dev->agp->agp_info.ai_aperture_base,
550 			    dev->agp->agp_info.ai_aperture_size, DRM_MTRR_WC) == 0)
551 				dev->agp->agp_mtrr = 1;
552 		}
553 	}
554 
555 	retcode = drm_ctxbitmap_init(dev);
556 	if (retcode != 0) {
557 		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
558 		goto error;
559 	}
560 
561 	if (dev->driver->driver_features & DRIVER_GEM) {
562 		retcode = drm_gem_init(dev);
563 		if (retcode != 0) {
564 			DRM_ERROR("Cannot initialize graphics execution "
565 				  "manager (GEM)\n");
566 			goto error1;
567 		}
568 	}
569 
570 	if (dev->driver->load != NULL) {
571 		DRM_LOCK(dev);
572 		/* Shared code returns -errno. */
573 		retcode = -dev->driver->load(dev,
574 		    dev->id_entry->driver_private);
575 		if (pci_enable_busmaster(dev->dev))
576 			DRM_ERROR("Request to enable bus-master failed.\n");
577 		DRM_UNLOCK(dev);
578 		if (retcode != 0)
579 			goto error1;
580 	}
581 
582 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
583 	    dev->driver->name,
584 	    dev->driver->major,
585 	    dev->driver->minor,
586 	    dev->driver->patchlevel,
587 	    dev->driver->date);
588 
589 	return 0;
590 
591 error1:
592 	drm_gem_destroy(dev);
593 error:
594 	drm_ctxbitmap_cleanup(dev);
595 	drm_sysctl_cleanup(dev);
596 	DRM_LOCK(dev);
597 	drm_lastclose(dev);
598 	DRM_UNLOCK(dev);
599 	if (dev->devnode != NULL)
600 		destroy_dev(dev->devnode);
601 
602 	lockuninit(&dev->vbl_lock);
603 	lockuninit(&dev->dev_lock);
604 	lockuninit(&dev->event_lock);
605 	lockuninit(&dev->dev_struct_lock);
606 
607 	return retcode;
608 }
609 
610 static void drm_unload(struct drm_device *dev)
611 {
612 	int i;
613 
614 	DRM_DEBUG("\n");
615 
616 	drm_sysctl_cleanup(dev);
617 	if (dev->devnode != NULL)
618 		destroy_dev(dev->devnode);
619 
620 	drm_ctxbitmap_cleanup(dev);
621 
622 	if (dev->driver->driver_features & DRIVER_GEM)
623 		drm_gem_destroy(dev);
624 
625 	if (dev->agp && dev->agp->agp_mtrr) {
626 		int __unused retcode;
627 
628 		retcode = drm_mtrr_del(0, dev->agp->agp_info.ai_aperture_base,
629 		    dev->agp->agp_info.ai_aperture_size, DRM_MTRR_WC);
630 		DRM_DEBUG("mtrr_del = %d", retcode);
631 	}
632 
633 	drm_vblank_cleanup(dev);
634 
635 	DRM_LOCK(dev);
636 	drm_lastclose(dev);
637 	DRM_UNLOCK(dev);
638 
639 	/* Clean up PCI resources allocated by drm_bufs.c.  We're not really
640 	 * worried about resource consumption while the DRM is inactive (between
641 	 * lastclose and firstopen or unload) because these aren't actually
642 	 * taking up KVA, just keeping the PCI resource allocated.
643 	 */
644 	for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
645 		if (dev->pcir[i] == NULL)
646 			continue;
647 		bus_release_resource(dev->dev, SYS_RES_MEMORY,
648 		    dev->pcirid[i], dev->pcir[i]);
649 		dev->pcir[i] = NULL;
650 	}
651 
652 	if (dev->agp) {
653 		drm_free(dev->agp, DRM_MEM_AGPLISTS);
654 		dev->agp = NULL;
655 	}
656 
657 	if (dev->driver->unload != NULL) {
658 		DRM_LOCK(dev);
659 		dev->driver->unload(dev);
660 		DRM_UNLOCK(dev);
661 	}
662 
663 	delete_unrhdr(dev->map_unrhdr);
664 
665 	drm_mem_uninit();
666 
667 	if (pci_disable_busmaster(dev->dev))
668 		DRM_ERROR("Request to disable bus-master failed.\n");
669 
670 	lockuninit(&dev->vbl_lock);
671 	lockuninit(&dev->dev_lock);
672 	lockuninit(&dev->event_lock);
673 	lockuninit(&dev->dev_struct_lock);
674 }
675 
676 int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
677 {
678 	struct drm_version *version = data;
679 	int len;
680 
681 #define DRM_COPY( name, value )						\
682 	len = strlen( value );						\
683 	if ( len > name##_len ) len = name##_len;			\
684 	name##_len = strlen( value );					\
685 	if ( len && name ) {						\
686 		if ( DRM_COPY_TO_USER( name, value, len ) )		\
687 			return EFAULT;				\
688 	}
689 
690 	version->version_major		= dev->driver->major;
691 	version->version_minor		= dev->driver->minor;
692 	version->version_patchlevel	= dev->driver->patchlevel;
693 
694 	DRM_COPY(version->name, dev->driver->name);
695 	DRM_COPY(version->date, dev->driver->date);
696 	DRM_COPY(version->desc, dev->driver->desc);
697 
698 	return 0;
699 }
700 
701 int
702 /* drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) */
703 drm_open(struct dev_open_args *ap)
704 {
705 	struct cdev *kdev = ap->a_head.a_dev;
706 	int flags = ap->a_oflags;
707 	int fmt = 0;
708 	struct thread *p = curthread;
709 	struct drm_device *dev;
710 	int retcode;
711 
712 	dev = DRIVER_SOFTC(minor(kdev));
713 	if (dev == NULL)
714 		return (ENXIO);
715 
716 	DRM_DEBUG("open_count = %d\n", dev->open_count);
717 
718 	retcode = drm_open_helper(kdev, flags, fmt, p, dev);
719 
720 	if (retcode == 0) {
721 		atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
722 		DRM_LOCK(dev);
723 		device_busy(dev->dev);
724 		if (!dev->open_count++)
725 			retcode = drm_firstopen(dev);
726 		DRM_UNLOCK(dev);
727 	}
728 
729 	DRM_DEBUG("return %d\n", retcode);
730 
731 	return (retcode);
732 }
733 
734 int drm_close(struct dev_close_args *ap)
735 {
736 	struct cdev *kdev = ap->a_head.a_dev;
737 	struct drm_file *file_priv;
738 	struct drm_device *dev;
739 	int retcode = 0;
740 
741 	dev = DRIVER_SOFTC(minor(kdev));
742 	file_priv = drm_find_file_by_proc(dev, curthread);
743 
744 	DRM_DEBUG("open_count = %d\n", dev->open_count);
745 
746 	DRM_LOCK(dev);
747 
748 	if (dev->driver->preclose != NULL)
749 		dev->driver->preclose(dev, file_priv);
750 
751 	/* ========================================================
752 	 * Begin inline drm_release
753 	 */
754 
755 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
756 	    DRM_CURRENTPID, (long)dev->dev, dev->open_count);
757 
758 	if (dev->driver->driver_features & DRIVER_GEM)
759 		drm_gem_release(dev, file_priv);
760 
761 	if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
762 	    && dev->lock.file_priv == file_priv) {
763 		DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
764 			  DRM_CURRENTPID,
765 			  _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
766 		if (dev->driver->reclaim_buffers_locked != NULL)
767 			dev->driver->reclaim_buffers_locked(dev, file_priv);
768 
769 		drm_lock_free(&dev->lock,
770 		    _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
771 
772 				/* FIXME: may require heavy-handed reset of
773                                    hardware at this point, possibly
774                                    processed via a callback to the X
775                                    server. */
776 	} else if (dev->driver->reclaim_buffers_locked != NULL &&
777 	    dev->lock.hw_lock != NULL) {
778 		/* The lock is required to reclaim buffers */
779 		for (;;) {
780 			if (!dev->lock.hw_lock) {
781 				/* Device has been unregistered */
782 				retcode = EINTR;
783 				break;
784 			}
785 			if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) {
786 				dev->lock.file_priv = file_priv;
787 				dev->lock.lock_time = jiffies;
788 				atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
789 				break;	/* Got lock */
790 			}
791 			/* Contention */
792 			retcode = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue,
793 			    PCATCH, "drmlk2", 0);
794 			if (retcode)
795 				break;
796 		}
797 		if (retcode == 0) {
798 			dev->driver->reclaim_buffers_locked(dev, file_priv);
799 			drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
800 		}
801 	}
802 
803 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
804 	    !dev->driver->reclaim_buffers_locked)
805 		drm_reclaim_buffers(dev, file_priv);
806 
807 	funsetown(&dev->buf_sigio);
808 
809 	if (dev->driver->postclose != NULL)
810 		dev->driver->postclose(dev, file_priv);
811 	TAILQ_REMOVE(&dev->files, file_priv, link);
812 	drm_free(file_priv, DRM_MEM_FILES);
813 
814 	/* ========================================================
815 	 * End inline drm_release
816 	 */
817 
818 	atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
819 	device_unbusy(dev->dev);
820 	if (--dev->open_count == 0) {
821 		retcode = drm_lastclose(dev);
822 	}
823 
824 	DRM_UNLOCK(dev);
825 
826 	return (0);
827 }
828 
829 /* drm_ioctl is called whenever a process performs an ioctl on /dev/drm.
830  */
831 int drm_ioctl(struct dev_ioctl_args *ap)
832 {
833 	struct cdev *kdev = ap->a_head.a_dev;
834 	u_long cmd = ap->a_cmd;
835 	caddr_t data = ap->a_data;
836 	struct thread *p = curthread;
837 	struct drm_device *dev = drm_get_device_from_kdev(kdev);
838 	int retcode = 0;
839 	drm_ioctl_desc_t *ioctl;
840 	int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv);
841 	int nr = DRM_IOCTL_NR(cmd);
842 	int is_driver_ioctl = 0;
843 	struct drm_file *file_priv;
844 
845 	file_priv = drm_find_file_by_proc(dev, p);
846 
847 	atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
848 	++file_priv->ioctl_count;
849 
850 	DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
851 	    DRM_CURRENTPID, cmd, nr, (long)dev->dev,
852 	    file_priv->authenticated);
853 
854 	switch (cmd) {
855 	case FIONBIO:
856 	case FIOASYNC:
857 		return 0;
858 
859 	case FIOSETOWN:
860 		return fsetown(*(int *)data, &dev->buf_sigio);
861 
862 	case FIOGETOWN:
863 		*(int *) data = fgetown(&dev->buf_sigio);
864 		return 0;
865 	}
866 
867 	if (IOCGROUP(cmd) != DRM_IOCTL_BASE) {
868 		DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd));
869 		return EINVAL;
870 	}
871 
872 	ioctl = &drm_ioctls[nr];
873 	/* It's not a core DRM ioctl, try driver-specific. */
874 	if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
875 		/* The array entries begin at DRM_COMMAND_BASE ioctl nr */
876 		nr -= DRM_COMMAND_BASE;
877 		if (nr > dev->driver->max_ioctl) {
878 			DRM_DEBUG("Bad driver ioctl number, 0x%x (of 0x%x)\n",
879 			    nr, dev->driver->max_ioctl);
880 			return EINVAL;
881 		}
882 		ioctl = &dev->driver->ioctls[nr];
883 		is_driver_ioctl = 1;
884 	}
885 	func = ioctl->func;
886 
887 	if (func == NULL) {
888 		DRM_DEBUG("no function\n");
889 		return EINVAL;
890 	}
891 
892 	if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) ||
893 	    ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
894 	    ((ioctl->flags & DRM_MASTER) && !file_priv->master))
895 		return EACCES;
896 
897 	if (is_driver_ioctl) {
898 		if ((ioctl->flags & DRM_UNLOCKED) == 0)
899 			DRM_LOCK(dev);
900 		/* shared code returns -errno */
901 		retcode = -func(dev, data, file_priv);
902 		if ((ioctl->flags & DRM_UNLOCKED) == 0)
903 			DRM_UNLOCK(dev);
904 	} else {
905 		retcode = func(dev, data, file_priv);
906 	}
907 
908 	if (retcode != 0)
909 		DRM_DEBUG("    returning %d\n", retcode);
910 	if (retcode != 0 &&
911 	    (drm_debug & DRM_DEBUGBITS_FAILED_IOCTL) != 0) {
912 		kprintf(
913 "pid %d, cmd 0x%02lx, nr 0x%02x/%1d, dev 0x%lx, auth %d, res %d\n",
914 		    DRM_CURRENTPID, cmd, nr, is_driver_ioctl, (long)dev->dev,
915 		    file_priv->authenticated, retcode);
916 	}
917 
918 	return retcode;
919 }
920 
921 drm_local_map_t *drm_getsarea(struct drm_device *dev)
922 {
923 	struct drm_map_list *entry;
924 
925 	list_for_each_entry(entry, &dev->maplist, head) {
926 		if (entry->map && entry->map->type == _DRM_SHM &&
927 		    (entry->map->flags & _DRM_CONTAINS_LOCK)) {
928 			return entry->map;
929 		}
930 	}
931 
932 	return NULL;
933 }
934 
935 int
936 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
937     struct sysctl_oid *top)
938 {
939 	struct sysctl_oid *oid;
940 
941 	ksnprintf(dev->busid_str, sizeof(dev->busid_str),
942 	     "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
943 	     dev->pci_slot, dev->pci_func);
944 	oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
945 	    CTLFLAG_RD, dev->busid_str, 0, NULL);
946 	if (oid == NULL)
947 		return (ENOMEM);
948 	dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
949 	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
950 	    "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
951 	if (oid == NULL)
952 		return (ENOMEM);
953 
954 	return (0);
955 }
956 
957 int
958 drm_mmap_single(struct dev_mmap_single_args *ap)
959 {
960 	struct drm_device *dev;
961 	struct cdev *kdev = ap->a_head.a_dev;
962 	vm_ooffset_t *offset = ap->a_offset;
963 	vm_size_t size = ap->a_size;
964 	struct vm_object **obj_res = ap->a_object;
965 	int nprot = ap->a_nprot;
966 
967 	dev = drm_get_device_from_kdev(kdev);
968 	if (dev->drm_ttm_bdev != NULL) {
969 		return (ttm_bo_mmap_single(dev->drm_ttm_bdev, offset, size,
970 		    obj_res, nprot));
971 	} else if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
972 		return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
973 	} else {
974 		return (ENODEV);
975 	}
976 }
977 
978 #if DRM_LINUX
979 
980 #include <sys/sysproto.h>
981 
982 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
983 
984 #define LINUX_IOCTL_DRM_MIN		0x6400
985 #define LINUX_IOCTL_DRM_MAX		0x64ff
986 
987 static linux_ioctl_function_t drm_linux_ioctl;
988 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
989     LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
990 
991 /* The bits for in/out are switched on Linux */
992 #define LINUX_IOC_IN	IOC_OUT
993 #define LINUX_IOC_OUT	IOC_IN
994 
995 static int
996 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
997 {
998 	int error;
999 	int cmd = args->cmd;
1000 
1001 	args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
1002 	if (cmd & LINUX_IOC_IN)
1003 		args->cmd |= IOC_IN;
1004 	if (cmd & LINUX_IOC_OUT)
1005 		args->cmd |= IOC_OUT;
1006 
1007 	error = ioctl(p, (struct ioctl_args *)args);
1008 
1009 	return error;
1010 }
1011 #endif /* DRM_LINUX */
1012 
1013 static int
1014 drm_core_init(void *arg)
1015 {
1016 
1017 	drm_global_init();
1018 
1019 #if DRM_LINUX
1020 	linux_ioctl_register_handler(&drm_handler);
1021 #endif /* DRM_LINUX */
1022 
1023 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
1024 		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
1025 	return 0;
1026 }
1027 
1028 static void
1029 drm_core_exit(void *arg)
1030 {
1031 
1032 #if DRM_LINUX
1033 	linux_ioctl_unregister_handler(&drm_handler);
1034 #endif /* DRM_LINUX */
1035 
1036 	drm_global_release();
1037 }
1038 
1039 SYSINIT(drm_register, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,
1040     drm_core_init, NULL);
1041 SYSUNINIT(drm_unregister, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,
1042     drm_core_exit, NULL);
1043 
1044 /*
1045  * Check if dmi_system_id structure matches system DMI data
1046  */
1047 static bool
1048 dmi_found(const struct dmi_system_id *dsi)
1049 {
1050 	int i, slot;
1051 	bool found = false;
1052 	char *sys_vendor, *board_vendor, *product_name, *board_name;
1053 
1054 	sys_vendor = kgetenv("smbios.system.maker");
1055 	board_vendor = kgetenv("smbios.planar.maker");
1056 	product_name = kgetenv("smbios.system.product");
1057 	board_name = kgetenv("smbios.planar.product");
1058 
1059 	for (i = 0; i < NELEM(dsi->matches); i++) {
1060 		slot = dsi->matches[i].slot;
1061 		switch (slot) {
1062 		case DMI_NONE:
1063 			break;
1064 		case DMI_SYS_VENDOR:
1065 			if (sys_vendor != NULL &&
1066 			    !strcmp(sys_vendor, dsi->matches[i].substr))
1067 				break;
1068 			else
1069 				goto done;
1070 		case DMI_BOARD_VENDOR:
1071 			if (board_vendor != NULL &&
1072 			    !strcmp(board_vendor, dsi->matches[i].substr))
1073 				break;
1074 			else
1075 				goto done;
1076 		case DMI_PRODUCT_NAME:
1077 			if (product_name != NULL &&
1078 			    !strcmp(product_name, dsi->matches[i].substr))
1079 				break;
1080 			else
1081 				goto done;
1082 		case DMI_BOARD_NAME:
1083 			if (board_name != NULL &&
1084 			    !strcmp(board_name, dsi->matches[i].substr))
1085 				break;
1086 			else
1087 				goto done;
1088 		default:
1089 			goto done;
1090 		}
1091 	}
1092 	found = true;
1093 
1094 done:
1095 	if (sys_vendor != NULL)
1096 		kfreeenv(sys_vendor);
1097 	if (board_vendor != NULL)
1098 		kfreeenv(board_vendor);
1099 	if (product_name != NULL)
1100 		kfreeenv(product_name);
1101 	if (board_name != NULL)
1102 		kfreeenv(board_name);
1103 
1104 	return found;
1105 }
1106 
1107 bool
1108 dmi_check_system(const struct dmi_system_id *sysid)
1109 {
1110 	const struct dmi_system_id *dsi;
1111 	int num = 0;
1112 
1113 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
1114 		if (dmi_found(dsi)) {
1115 			num++;
1116 			if (dsi->callback && dsi->callback(dsi))
1117 				break;
1118 		}
1119 	}
1120 	return (num);
1121 }
1122 
1123