xref: /dragonfly/sys/dev/drm/drm_drv.c (revision 066b6da2)
1 /*-
2  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  *
25  * Authors:
26  *    Rickard E. (Rik) Faith <faith@valinux.com>
27  *    Gareth Hughes <gareth@valinux.com>
28  *
29  * $FreeBSD: head/sys/dev/drm2/drm_drv.c 247835 2013-03-05 09:49:34Z kib $
30  */
31 
32 /** @file drm_drv.c
33  * The catch-all file for DRM device support, including module setup/teardown,
34  * open/close, and ioctl dispatch.
35  */
36 
37 #include <sys/devfs.h>
38 #include <machine/limits.h>
39 
40 #include <drm/drmP.h>
41 #include <drm/drm_core.h>
42 
43 #ifdef DRM_DEBUG_DEFAULT_ON
44 int drm_debug = (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS |
45     DRM_DEBUGBITS_FAILED_IOCTL);
46 #else
47 int drm_debug = 0;
48 #endif
49 int drm_notyet_flag = 0;
50 
51 unsigned int drm_vblank_offdelay = 5000;    /* Default to 5000 msecs. */
52 unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
53 
54 static int drm_load(struct drm_device *dev);
55 static void drm_unload(struct drm_device *dev);
56 drm_pci_id_list_t *drm_find_description(int vendor, int device,
57     drm_pci_id_list_t *idlist);
58 
59 #define DRIVER_SOFTC(unit) \
60 	((struct drm_device *)devclass_get_softc(drm_devclass, unit))
61 
62 static int
63 drm_modevent(module_t mod, int type, void *data)
64 {
65 
66 	switch (type) {
67 	case MOD_LOAD:
68 		TUNABLE_INT_FETCH("drm.debug", &drm_debug);
69 		TUNABLE_INT_FETCH("drm.notyet", &drm_notyet_flag);
70 		break;
71 	}
72 	return (0);
73 }
74 
75 static moduledata_t drm_mod = {
76 	"drm",
77 	drm_modevent,
78 	0
79 };
80 DECLARE_MODULE(drm, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
81 MODULE_VERSION(drm, 1);
82 MODULE_DEPEND(drm, agp, 1, 1, 1);
83 MODULE_DEPEND(drm, pci, 1, 1, 1);
84 MODULE_DEPEND(drm, iicbus, 1, 1, 1);
85 
86 static drm_ioctl_desc_t		  drm_ioctls[256] = {
87 	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
88 	DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
89 	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
90 	DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
91 	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
92 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
93 	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
94 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED),
95 	DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
96 
97 	DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
98 	DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
99 	DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
100 	DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
101 
102 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
103 	DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
104 
105 	DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
106 	DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
107 
108 	DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
109 	DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
110 
111 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
112 	DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
113 	DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
114 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
115 	DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
116 	DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
117 	DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
118 
119 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
120 	DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
121 
122 	DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
123 	DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
124 
125 	DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
126 
127 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
128 	DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
129 	DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
130 	DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
131 	DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
132 	DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH),
133 
134 	DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
135 
136 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
137 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
138 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
139 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
140 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
141 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
142 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
143 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
144 
145 	DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
146 	DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
147 	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
148 	DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
149 
150 	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
151 
152 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
153 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
154 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
155 
156 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
157 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
158 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
159 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
160 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
161 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
162 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
163 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
164 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
165 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
166 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
167 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
168 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
169 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
170 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
171 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
172 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
173 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
174 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
175 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
176 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
177 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
178 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
179 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
180 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
181 };
182 
183 static struct dev_ops drm_cdevsw = {
184 	{ "drm", 0, D_TRACKCLOSE | D_MPSAFE },
185 	.d_open =	drm_open,
186 	.d_close =	drm_close,
187 	.d_read =	drm_read,
188 	.d_ioctl =	drm_ioctl,
189 	.d_kqfilter =	drm_kqfilter,
190 	.d_mmap =	drm_mmap,
191 	.d_mmap_single = drm_mmap_single,
192 };
193 
194 static int drm_msi = 1;	/* Enable by default. */
195 TUNABLE_INT("hw.drm.msi", &drm_msi);
196 SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
197 SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RD, &drm_msi, 1,
198     "Enable MSI interrupts for drm devices");
199 
200 static struct drm_msi_blacklist_entry drm_msi_blacklist[] = {
201 	{0x8086, 0x2772}, /* Intel i945G	*/ \
202 	{0x8086, 0x27A2}, /* Intel i945GM	*/ \
203 	{0x8086, 0x27AE}, /* Intel i945GME	*/ \
204 	{0, 0}
205 };
206 
207 static int drm_msi_is_blacklisted(struct drm_device *dev, unsigned long flags)
208 {
209 	int i = 0;
210 
211 	if (dev->driver->use_msi != NULL) {
212 		int use_msi;
213 
214 		use_msi = dev->driver->use_msi(dev, flags);
215 
216 		return (!use_msi);
217 	}
218 
219 	/* TODO: Maybe move this to a callback in i915? */
220 	for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) {
221 		if ((drm_msi_blacklist[i].vendor == dev->pci_vendor) &&
222 		    (drm_msi_blacklist[i].device == dev->pci_device)) {
223 			return 1;
224 		}
225 	}
226 
227 	return 0;
228 }
229 
230 int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
231 {
232 	drm_pci_id_list_t *id_entry;
233 	int vendor, device;
234 
235 	vendor = pci_get_vendor(kdev);
236 	device = pci_get_device(kdev);
237 
238 	if (pci_get_class(kdev) != PCIC_DISPLAY)
239 		return ENXIO;
240 
241 	id_entry = drm_find_description(vendor, device, idlist);
242 	if (id_entry != NULL) {
243 		if (!device_get_desc(kdev)) {
244 			DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
245 			device_set_desc(kdev, id_entry->name);
246 		}
247 		return 0;
248 	}
249 
250 	return ENXIO;
251 }
252 
253 int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
254 {
255 	struct drm_device *dev;
256 	drm_pci_id_list_t *id_entry;
257 	int unit, error, msicount;
258 	int rid = 0;
259 
260 	unit = device_get_unit(kdev);
261 	dev = device_get_softc(kdev);
262 
263 	if (!strcmp(device_get_name(kdev), "drmsub"))
264 		dev->dev = device_get_parent(kdev);
265 	else
266 		dev->dev = kdev;
267 
268 	dev->pci_domain = pci_get_domain(dev->dev);
269 	dev->pci_bus = pci_get_bus(dev->dev);
270 	dev->pci_slot = pci_get_slot(dev->dev);
271 	dev->pci_func = pci_get_function(dev->dev);
272 
273 	dev->pci_vendor = pci_get_vendor(dev->dev);
274 	dev->pci_device = pci_get_device(dev->dev);
275 	dev->pci_subvendor = pci_get_subvendor(dev->dev);
276 	dev->pci_subdevice = pci_get_subdevice(dev->dev);
277 
278 	id_entry = drm_find_description(dev->pci_vendor,
279 	    dev->pci_device, idlist);
280 	dev->id_entry = id_entry;
281 
282 	if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) {
283 		if (drm_msi &&
284 		    !drm_msi_is_blacklisted(dev, dev->id_entry->driver_private)) {
285 			msicount = pci_msi_count(dev->dev);
286 			DRM_DEBUG("MSI count = %d\n", msicount);
287 			if (msicount > 1)
288 				msicount = 1;
289 
290 			if (pci_alloc_msi(dev->dev, &rid, msicount, -1) == 0) {
291 				DRM_INFO("MSI enabled %d message(s)\n",
292 				    msicount);
293 				dev->msi_enabled = 1;
294 				dev->irqrid = rid;
295 			}
296 		}
297 
298 		dev->irqr = bus_alloc_resource_any(dev->dev, SYS_RES_IRQ,
299 		    &dev->irqrid, RF_SHAREABLE);
300 		if (!dev->irqr) {
301 			return (ENOENT);
302 		}
303 
304 		dev->irq = (int) rman_get_start(dev->irqr);
305 	}
306 
307 	lockinit(&dev->dev_lock, "drmdev", 0, LK_CANRECURSE);
308 	lwkt_serialize_init(&dev->irq_lock);
309 	lockinit(&dev->vbl_lock, "drmvbl", 0, LK_CANRECURSE);
310 	lockinit(&dev->event_lock, "drmev", 0, LK_CANRECURSE);
311 	lockinit(&dev->dev_struct_lock, "drmslk", 0, LK_CANRECURSE);
312 
313 	error = drm_load(dev);
314 	if (error)
315 		goto error;
316 
317 	error = drm_create_cdevs(kdev);
318 	if (error)
319 		goto error;
320 
321 	return (error);
322 error:
323 	if (dev->irqr) {
324 		bus_release_resource(dev->dev, SYS_RES_IRQ,
325 		    dev->irqrid, dev->irqr);
326 	}
327 	if (dev->msi_enabled) {
328 		pci_release_msi(dev->dev);
329 	}
330 	return (error);
331 }
332 
333 int
334 drm_create_cdevs(device_t kdev)
335 {
336 	struct drm_device *dev;
337 	int error, unit;
338 
339 	unit = device_get_unit(kdev);
340 	dev = device_get_softc(kdev);
341 
342 	dev->devnode = make_dev(&drm_cdevsw, unit, DRM_DEV_UID, DRM_DEV_GID,
343 				DRM_DEV_MODE, "dri/card%d", unit);
344 	error = 0;
345 	if (error == 0)
346 		dev->devnode->si_drv1 = dev;
347 	return (error);
348 }
349 
350 int drm_detach(device_t kdev)
351 {
352 	struct drm_device *dev;
353 
354 	dev = device_get_softc(kdev);
355 	drm_unload(dev);
356 	if (dev->irqr) {
357 		bus_release_resource(dev->dev, SYS_RES_IRQ, dev->irqrid,
358 		    dev->irqr);
359 		if (dev->msi_enabled) {
360 			pci_release_msi(dev->dev);
361 			DRM_INFO("MSI released\n");
362 		}
363 	}
364 	return (0);
365 }
366 
367 #ifndef DRM_DEV_NAME
368 #define DRM_DEV_NAME "drm"
369 #endif
370 
371 devclass_t drm_devclass;
372 
373 drm_pci_id_list_t *drm_find_description(int vendor, int device,
374     drm_pci_id_list_t *idlist)
375 {
376 	int i = 0;
377 
378 	for (i = 0; idlist[i].vendor != 0; i++) {
379 		if ((idlist[i].vendor == vendor) &&
380 		    ((idlist[i].device == device) ||
381 		    (idlist[i].device == 0))) {
382 			return &idlist[i];
383 		}
384 	}
385 	return NULL;
386 }
387 
388 /**
389  * Take down the DRM device.
390  *
391  * \param dev DRM device structure.
392  *
393  * Frees every resource in \p dev.
394  *
395  * \sa drm_device
396  */
397 static int drm_lastclose(struct drm_device *dev)
398 {
399 	drm_magic_entry_t *pt, *next;
400 	int i;
401 
402 	DRM_DEBUG("\n");
403 
404 	if (dev->driver->lastclose != NULL)
405 		dev->driver->lastclose(dev);
406 
407 	if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled)
408 		drm_irq_uninstall(dev);
409 
410 	DRM_LOCK(dev);
411 	if (dev->unique) {
412 		drm_free(dev->unique, DRM_MEM_DRIVER);
413 		dev->unique = NULL;
414 		dev->unique_len = 0;
415 	}
416 	/* Clear pid list */
417 	for (i = 0; i < DRM_HASH_SIZE; i++) {
418 		for (pt = dev->magiclist[i].head; pt; pt = next) {
419 			next = pt->next;
420 			drm_free(pt, DRM_MEM_MAGIC);
421 		}
422 		dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
423 	}
424 
425 	/* Clear AGP information */
426 	if (dev->agp) {
427 		drm_agp_mem_t *entry;
428 		drm_agp_mem_t *nexte;
429 
430 		/* Remove AGP resources, but leave dev->agp intact until
431 		 * drm_unload is called.
432 		 */
433 		for (entry = dev->agp->memory; entry; entry = nexte) {
434 			nexte = entry->next;
435 			if (entry->bound)
436 				drm_agp_unbind_memory(entry->handle);
437 			drm_agp_free_memory(entry->handle);
438 			drm_free(entry, DRM_MEM_AGPLISTS);
439 		}
440 		dev->agp->memory = NULL;
441 
442 		if (dev->agp->acquired)
443 			drm_agp_release(dev);
444 
445 		dev->agp->acquired = 0;
446 		dev->agp->enabled  = 0;
447 	}
448 	if (dev->sg != NULL) {
449 		drm_sg_cleanup(dev->sg);
450 		dev->sg = NULL;
451 	}
452 
453 	drm_dma_takedown(dev);
454 	if (dev->lock.hw_lock) {
455 		dev->lock.hw_lock = NULL; /* SHM removed */
456 		dev->lock.file_priv = NULL;
457 		DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
458 	}
459 	DRM_UNLOCK(dev);
460 
461 	return 0;
462 }
463 
464 static int drm_load(struct drm_device *dev)
465 {
466 	int i, retcode;
467 
468 	DRM_DEBUG("\n");
469 
470 	INIT_LIST_HEAD(&dev->maplist);
471 	dev->map_unrhdr = new_unrhdr(1, ((1 << DRM_MAP_HANDLE_BITS) - 1), NULL);
472 	if (dev->map_unrhdr == NULL) {
473 		DRM_ERROR("Couldn't allocate map number allocator\n");
474 		return EINVAL;
475 	}
476 
477 
478 	drm_mem_init();
479 	drm_sysctl_init(dev);
480 	INIT_LIST_HEAD(&dev->filelist);
481 
482 	dev->counters  = 6;
483 	dev->types[0]  = _DRM_STAT_LOCK;
484 	dev->types[1]  = _DRM_STAT_OPENS;
485 	dev->types[2]  = _DRM_STAT_CLOSES;
486 	dev->types[3]  = _DRM_STAT_IOCTLS;
487 	dev->types[4]  = _DRM_STAT_LOCKS;
488 	dev->types[5]  = _DRM_STAT_UNLOCKS;
489 
490 	for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
491 		atomic_set(&dev->counts[i], 0);
492 
493 	INIT_LIST_HEAD(&dev->vblank_event_list);
494 
495 	if (drm_core_has_AGP(dev)) {
496 		if (drm_device_is_agp(dev))
497 			dev->agp = drm_agp_init();
498 		if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) &&
499 		    dev->agp == NULL) {
500 			DRM_ERROR("Card isn't AGP, or couldn't initialize "
501 			    "AGP.\n");
502 			retcode = ENOMEM;
503 			goto error;
504 		}
505 		if (dev->agp != NULL && dev->agp->agp_info.ai_aperture_base != 0) {
506 			if (drm_mtrr_add(dev->agp->agp_info.ai_aperture_base,
507 			    dev->agp->agp_info.ai_aperture_size, DRM_MTRR_WC) == 0)
508 				dev->agp->agp_mtrr = 1;
509 		}
510 	}
511 
512 	retcode = drm_ctxbitmap_init(dev);
513 	if (retcode != 0) {
514 		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
515 		goto error;
516 	}
517 
518 	if (dev->driver->driver_features & DRIVER_GEM) {
519 		retcode = drm_gem_init(dev);
520 		if (retcode != 0) {
521 			DRM_ERROR("Cannot initialize graphics execution "
522 				  "manager (GEM)\n");
523 			goto error1;
524 		}
525 	}
526 
527 	if (dev->driver->load != NULL) {
528 		DRM_LOCK(dev);
529 		/* Shared code returns -errno. */
530 		retcode = -dev->driver->load(dev,
531 		    dev->id_entry->driver_private);
532 		if (pci_enable_busmaster(dev->dev))
533 			DRM_ERROR("Request to enable bus-master failed.\n");
534 		DRM_UNLOCK(dev);
535 		if (retcode != 0)
536 			goto error1;
537 	}
538 
539 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
540 	    dev->driver->name,
541 	    dev->driver->major,
542 	    dev->driver->minor,
543 	    dev->driver->patchlevel,
544 	    dev->driver->date);
545 
546 	return 0;
547 
548 error1:
549 	drm_gem_destroy(dev);
550 error:
551 	drm_ctxbitmap_cleanup(dev);
552 	drm_sysctl_cleanup(dev);
553 	DRM_LOCK(dev);
554 	drm_lastclose(dev);
555 	DRM_UNLOCK(dev);
556 	if (dev->devnode != NULL)
557 		destroy_dev(dev->devnode);
558 
559 	lockuninit(&dev->vbl_lock);
560 	lockuninit(&dev->dev_lock);
561 	lockuninit(&dev->event_lock);
562 	lockuninit(&dev->dev_struct_lock);
563 
564 	return retcode;
565 }
566 
567 static void drm_unload(struct drm_device *dev)
568 {
569 	int i;
570 
571 	DRM_DEBUG("\n");
572 
573 	drm_sysctl_cleanup(dev);
574 	if (dev->devnode != NULL)
575 		destroy_dev(dev->devnode);
576 
577 	drm_ctxbitmap_cleanup(dev);
578 
579 	if (dev->driver->driver_features & DRIVER_GEM)
580 		drm_gem_destroy(dev);
581 
582 	if (dev->agp && dev->agp->agp_mtrr) {
583 		int __unused retcode;
584 
585 		retcode = drm_mtrr_del(0, dev->agp->agp_info.ai_aperture_base,
586 		    dev->agp->agp_info.ai_aperture_size, DRM_MTRR_WC);
587 		DRM_DEBUG("mtrr_del = %d", retcode);
588 	}
589 
590 	drm_vblank_cleanup(dev);
591 
592 	DRM_LOCK(dev);
593 	drm_lastclose(dev);
594 	DRM_UNLOCK(dev);
595 
596 	/* Clean up PCI resources allocated by drm_bufs.c.  We're not really
597 	 * worried about resource consumption while the DRM is inactive (between
598 	 * lastclose and firstopen or unload) because these aren't actually
599 	 * taking up KVA, just keeping the PCI resource allocated.
600 	 */
601 	for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
602 		if (dev->pcir[i] == NULL)
603 			continue;
604 		bus_release_resource(dev->dev, SYS_RES_MEMORY,
605 		    dev->pcirid[i], dev->pcir[i]);
606 		dev->pcir[i] = NULL;
607 	}
608 
609 	if (dev->agp) {
610 		drm_free(dev->agp, DRM_MEM_AGPLISTS);
611 		dev->agp = NULL;
612 	}
613 
614 	if (dev->driver->unload != NULL) {
615 		DRM_LOCK(dev);
616 		dev->driver->unload(dev);
617 		DRM_UNLOCK(dev);
618 	}
619 
620 	delete_unrhdr(dev->map_unrhdr);
621 
622 	drm_mem_uninit();
623 
624 	if (pci_disable_busmaster(dev->dev))
625 		DRM_ERROR("Request to disable bus-master failed.\n");
626 
627 	lockuninit(&dev->vbl_lock);
628 	lockuninit(&dev->dev_lock);
629 	lockuninit(&dev->event_lock);
630 	lockuninit(&dev->dev_struct_lock);
631 }
632 
633 int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
634 {
635 	struct drm_version *version = data;
636 	int len;
637 
638 #define DRM_COPY( name, value )						\
639 	len = strlen( value );						\
640 	if ( len > name##_len ) len = name##_len;			\
641 	name##_len = strlen( value );					\
642 	if ( len && name ) {						\
643 		if ( DRM_COPY_TO_USER( name, value, len ) )		\
644 			return EFAULT;				\
645 	}
646 
647 	version->version_major		= dev->driver->major;
648 	version->version_minor		= dev->driver->minor;
649 	version->version_patchlevel	= dev->driver->patchlevel;
650 
651 	DRM_COPY(version->name, dev->driver->name);
652 	DRM_COPY(version->date, dev->driver->date);
653 	DRM_COPY(version->desc, dev->driver->desc);
654 
655 	return 0;
656 }
657 
658 /*
659  * Stub is needed for devfs
660  */
661 int drm_close(struct dev_close_args *ap)
662 {
663 	return 0;
664 }
665 
666 void drm_cdevpriv_dtor(void *cd)
667 {
668 	struct drm_file *file_priv = cd;
669 	struct drm_device *dev = file_priv->dev;
670 	int retcode = 0;
671 
672 	DRM_DEBUG("open_count = %d\n", dev->open_count);
673 
674 	DRM_LOCK(dev);
675 
676 	if (dev->driver->preclose != NULL)
677 		dev->driver->preclose(dev, file_priv);
678 
679 	/* ========================================================
680 	 * Begin inline drm_release
681 	 */
682 
683 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
684 	    DRM_CURRENTPID, (long)dev->dev, dev->open_count);
685 
686 	if (dev->driver->driver_features & DRIVER_GEM)
687 		drm_gem_release(dev, file_priv);
688 
689 	if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
690 	    && dev->lock.file_priv == file_priv) {
691 		DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
692 			  DRM_CURRENTPID,
693 			  _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
694 		if (dev->driver->reclaim_buffers_locked != NULL)
695 			dev->driver->reclaim_buffers_locked(dev, file_priv);
696 
697 		drm_lock_free(&dev->lock,
698 		    _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
699 
700 				/* FIXME: may require heavy-handed reset of
701                                    hardware at this point, possibly
702                                    processed via a callback to the X
703                                    server. */
704 	} else if (dev->driver->reclaim_buffers_locked != NULL &&
705 	    dev->lock.hw_lock != NULL) {
706 		/* The lock is required to reclaim buffers */
707 		for (;;) {
708 			if (!dev->lock.hw_lock) {
709 				/* Device has been unregistered */
710 				retcode = EINTR;
711 				break;
712 			}
713 			if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) {
714 				dev->lock.file_priv = file_priv;
715 				dev->lock.lock_time = jiffies;
716 				atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
717 				break;	/* Got lock */
718 			}
719 			/* Contention */
720 			retcode = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue,
721 			    PCATCH, "drmlk2", 0);
722 			if (retcode)
723 				break;
724 		}
725 		if (retcode == 0) {
726 			dev->driver->reclaim_buffers_locked(dev, file_priv);
727 			drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
728 		}
729 	}
730 
731 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
732 	    !dev->driver->reclaim_buffers_locked)
733 		drm_reclaim_buffers(dev, file_priv);
734 
735 	funsetown(&dev->buf_sigio);
736 
737 	if (dev->driver->postclose != NULL)
738 		dev->driver->postclose(dev, file_priv);
739 	list_del(&file_priv->lhead);
740 
741 
742 	/* ========================================================
743 	 * End inline drm_release
744 	 */
745 
746 	atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
747 	device_unbusy(dev->dev);
748 	if (--dev->open_count == 0) {
749 		retcode = drm_lastclose(dev);
750 	}
751 
752 	DRM_UNLOCK(dev);
753 }
754 
755 /* drm_ioctl is called whenever a process performs an ioctl on /dev/drm.
756  */
757 int drm_ioctl(struct dev_ioctl_args *ap)
758 {
759 	struct cdev *kdev = ap->a_head.a_dev;
760 	u_long cmd = ap->a_cmd;
761 	caddr_t data = ap->a_data;
762 	struct thread *p = curthread;
763 	struct drm_device *dev = drm_get_device_from_kdev(kdev);
764 	int retcode = 0;
765 	drm_ioctl_desc_t *ioctl;
766 	int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv);
767 	int nr = DRM_IOCTL_NR(cmd);
768 	int is_driver_ioctl = 0;
769 	struct drm_file *file_priv;
770 
771 	retcode = devfs_get_cdevpriv(ap->a_fp, (void **)&file_priv);
772 	if (retcode !=0) {
773 		DRM_ERROR("can't find authenticator\n");
774 		return EINVAL;
775 	}
776 
777 	atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
778 	++file_priv->ioctl_count;
779 
780 	DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
781 	    DRM_CURRENTPID, cmd, nr, (long)dev->dev,
782 	    file_priv->authenticated);
783 
784 	switch (cmd) {
785 	case FIONBIO:
786 	case FIOASYNC:
787 		return 0;
788 
789 	case FIOSETOWN:
790 		return fsetown(*(int *)data, &dev->buf_sigio);
791 
792 	case FIOGETOWN:
793 		*(int *) data = fgetown(&dev->buf_sigio);
794 		return 0;
795 	}
796 
797 	if (IOCGROUP(cmd) != DRM_IOCTL_BASE) {
798 		DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd));
799 		return EINVAL;
800 	}
801 
802 	ioctl = &drm_ioctls[nr];
803 	/* It's not a core DRM ioctl, try driver-specific. */
804 	if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
805 		/* The array entries begin at DRM_COMMAND_BASE ioctl nr */
806 		nr -= DRM_COMMAND_BASE;
807 		if (nr > dev->driver->max_ioctl) {
808 			DRM_DEBUG("Bad driver ioctl number, 0x%x (of 0x%x)\n",
809 			    nr, dev->driver->max_ioctl);
810 			return EINVAL;
811 		}
812 		ioctl = &dev->driver->ioctls[nr];
813 		is_driver_ioctl = 1;
814 	}
815 	func = ioctl->func;
816 
817 	if (func == NULL) {
818 		DRM_DEBUG("no function\n");
819 		return EINVAL;
820 	}
821 
822 	if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) ||
823 	    ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
824 	    ((ioctl->flags & DRM_MASTER) && !file_priv->master))
825 		return EACCES;
826 
827 	if (is_driver_ioctl) {
828 		if ((ioctl->flags & DRM_UNLOCKED) == 0)
829 			DRM_LOCK(dev);
830 		/* shared code returns -errno */
831 		retcode = -func(dev, data, file_priv);
832 		if ((ioctl->flags & DRM_UNLOCKED) == 0)
833 			DRM_UNLOCK(dev);
834 	} else {
835 		retcode = func(dev, data, file_priv);
836 	}
837 
838 	if (retcode != 0)
839 		DRM_DEBUG("    returning %d\n", retcode);
840 	if (retcode != 0 &&
841 	    (drm_debug & DRM_DEBUGBITS_FAILED_IOCTL) != 0) {
842 		kprintf(
843 "pid %d, cmd 0x%02lx, nr 0x%02x/%1d, dev 0x%lx, auth %d, res %d\n",
844 		    DRM_CURRENTPID, cmd, nr, is_driver_ioctl, (long)dev->dev,
845 		    file_priv->authenticated, retcode);
846 	}
847 
848 	return retcode;
849 }
850 
851 drm_local_map_t *drm_getsarea(struct drm_device *dev)
852 {
853 	struct drm_map_list *entry;
854 
855 	list_for_each_entry(entry, &dev->maplist, head) {
856 		if (entry->map && entry->map->type == _DRM_SHM &&
857 		    (entry->map->flags & _DRM_CONTAINS_LOCK)) {
858 			return entry->map;
859 		}
860 	}
861 
862 	return NULL;
863 }
864 
865 int
866 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
867     struct sysctl_oid *top)
868 {
869 	struct sysctl_oid *oid;
870 
871 	ksnprintf(dev->busid_str, sizeof(dev->busid_str),
872 	     "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
873 	     dev->pci_slot, dev->pci_func);
874 	oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
875 	    CTLFLAG_RD, dev->busid_str, 0, NULL);
876 	if (oid == NULL)
877 		return (ENOMEM);
878 	dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
879 	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
880 	    "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
881 	if (oid == NULL)
882 		return (ENOMEM);
883 
884 	return (0);
885 }
886 
887 int
888 drm_mmap_single(struct dev_mmap_single_args *ap)
889 {
890 	struct drm_device *dev;
891 	struct cdev *kdev = ap->a_head.a_dev;
892 	vm_ooffset_t *offset = ap->a_offset;
893 	vm_size_t size = ap->a_size;
894 	struct vm_object **obj_res = ap->a_object;
895 	int nprot = ap->a_nprot;
896 
897 	dev = drm_get_device_from_kdev(kdev);
898 	if (dev->drm_ttm_bdev != NULL) {
899 		return (ttm_bo_mmap_single(dev->drm_ttm_bdev, offset, size,
900 		    obj_res, nprot));
901 	} else if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
902 		return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
903 	} else {
904 		return (ENODEV);
905 	}
906 }
907 
908 #if DRM_LINUX
909 
910 #include <sys/sysproto.h>
911 
912 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
913 
914 #define LINUX_IOCTL_DRM_MIN		0x6400
915 #define LINUX_IOCTL_DRM_MAX		0x64ff
916 
917 static linux_ioctl_function_t drm_linux_ioctl;
918 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
919     LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
920 
921 /* The bits for in/out are switched on Linux */
922 #define LINUX_IOC_IN	IOC_OUT
923 #define LINUX_IOC_OUT	IOC_IN
924 
925 static int
926 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
927 {
928 	int error;
929 	int cmd = args->cmd;
930 
931 	args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
932 	if (cmd & LINUX_IOC_IN)
933 		args->cmd |= IOC_IN;
934 	if (cmd & LINUX_IOC_OUT)
935 		args->cmd |= IOC_OUT;
936 
937 	error = ioctl(p, (struct ioctl_args *)args);
938 
939 	return error;
940 }
941 #endif /* DRM_LINUX */
942 
943 static int
944 drm_core_init(void *arg)
945 {
946 
947 	drm_global_init();
948 
949 #if DRM_LINUX
950 	linux_ioctl_register_handler(&drm_handler);
951 #endif /* DRM_LINUX */
952 
953 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
954 		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
955 	return 0;
956 }
957 
958 static void
959 drm_core_exit(void *arg)
960 {
961 
962 #if DRM_LINUX
963 	linux_ioctl_unregister_handler(&drm_handler);
964 #endif /* DRM_LINUX */
965 
966 	drm_global_release();
967 }
968 
969 SYSINIT(drm_register, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,
970     drm_core_init, NULL);
971 SYSUNINIT(drm_unregister, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,
972     drm_core_exit, NULL);
973 
974 /*
975  * Check if dmi_system_id structure matches system DMI data
976  */
977 static bool
978 dmi_found(const struct dmi_system_id *dsi)
979 {
980 	int i, slot;
981 	bool found = false;
982 	char *sys_vendor, *board_vendor, *product_name, *board_name;
983 
984 	sys_vendor = kgetenv("smbios.system.maker");
985 	board_vendor = kgetenv("smbios.planar.maker");
986 	product_name = kgetenv("smbios.system.product");
987 	board_name = kgetenv("smbios.planar.product");
988 
989 	for (i = 0; i < NELEM(dsi->matches); i++) {
990 		slot = dsi->matches[i].slot;
991 		switch (slot) {
992 		case DMI_NONE:
993 			break;
994 		case DMI_SYS_VENDOR:
995 			if (sys_vendor != NULL &&
996 			    !strcmp(sys_vendor, dsi->matches[i].substr))
997 				break;
998 			else
999 				goto done;
1000 		case DMI_BOARD_VENDOR:
1001 			if (board_vendor != NULL &&
1002 			    !strcmp(board_vendor, dsi->matches[i].substr))
1003 				break;
1004 			else
1005 				goto done;
1006 		case DMI_PRODUCT_NAME:
1007 			if (product_name != NULL &&
1008 			    !strcmp(product_name, dsi->matches[i].substr))
1009 				break;
1010 			else
1011 				goto done;
1012 		case DMI_BOARD_NAME:
1013 			if (board_name != NULL &&
1014 			    !strcmp(board_name, dsi->matches[i].substr))
1015 				break;
1016 			else
1017 				goto done;
1018 		default:
1019 			goto done;
1020 		}
1021 	}
1022 	found = true;
1023 
1024 done:
1025 	if (sys_vendor != NULL)
1026 		kfreeenv(sys_vendor);
1027 	if (board_vendor != NULL)
1028 		kfreeenv(board_vendor);
1029 	if (product_name != NULL)
1030 		kfreeenv(product_name);
1031 	if (board_name != NULL)
1032 		kfreeenv(board_name);
1033 
1034 	return found;
1035 }
1036 
1037 bool
1038 dmi_check_system(const struct dmi_system_id *sysid)
1039 {
1040 	const struct dmi_system_id *dsi;
1041 	int num = 0;
1042 
1043 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
1044 		if (dmi_found(dsi)) {
1045 			num++;
1046 			if (dsi->callback && dsi->callback(dsi))
1047 				break;
1048 		}
1049 	}
1050 	return (num);
1051 }
1052 
1053