xref: /dragonfly/sys/dev/drm/drm_drv.c (revision 62dc643e)
1 /*
2  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3  *
4  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5  * All Rights Reserved.
6  *
7  * Author Rickard E. (Rik) Faith <faith@valinux.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <drm/drmP.h>
32 #include <drm/drm_core.h>
33 #include "drm_legacy.h"
34 #include "drm_internal.h"
35 
36 /*
37  * drm_debug: Enable debug output.
38  * Bitmask of DRM_UT_x. See include/drm/drmP.h for details.
39  */
40 #ifdef __DragonFly__
41 /* Provides three levels of debug: off, minimal, verbose */
42 #if DRM_DEBUG_DEFAULT_ON == 1
43 #define DRM_DEBUGBITS_ON (DRM_UT_CORE | DRM_UT_DRIVER | DRM_UT_KMS |	\
44 			  DRM_UT_PRIME| DRM_UT_ATOMIC | DRM_UT_FIOCTL)
45 #elif DRM_DEBUG_DEFAULT_ON == 2
46 #define DRM_DEBUGBITS_ON (DRM_UT_CORE | DRM_UT_DRIVER | DRM_UT_KMS |	\
47 			  DRM_UT_PRIME| DRM_UT_ATOMIC | DRM_UT_FIOCTL |	\
48 			  DRM_UT_PID  | DRM_UT_IOCTL  | DRM_UT_VBLANK)
49 #else
50 #define DRM_DEBUGBITS_ON (0x0)
51 #endif
52 unsigned int drm_debug = DRM_DEBUGBITS_ON;	/* defaults to 0 */
53 #else
54 unsigned int drm_debug = 0;
55 #endif /* __DragonFly__ */
56 EXPORT_SYMBOL(drm_debug);
57 
58 MODULE_AUTHOR(CORE_AUTHOR);
59 MODULE_DESCRIPTION(CORE_DESC);
60 MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
61 "\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
62 "\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
63 "\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
64 "\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
65 "\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
66 "\t\tBit 5 (0x20) will enable VBL messages (vblank code)");
67 module_param_named(debug, drm_debug, int, 0600);
68 
69 #if 0
70 static DEFINE_SPINLOCK(drm_minor_lock);
71 static struct idr drm_minors_idr;
72 #endif
73 
74 #if 0
75 static struct dentry *drm_debugfs_root;
76 #endif
77 
78 void drm_err(const char *func, const char *format, ...)
79 {
80 	__va_list args;
81 
82 	kprintf("error: [" DRM_NAME ":pid%d:%s] *ERROR* ", DRM_CURRENTPID, func);
83 
84 	__va_start(args, format);
85 	kvprintf(format, args);
86 	__va_end(args);
87 }
88 EXPORT_SYMBOL(drm_err);
89 
90 void drm_ut_debug_printk(const char *function_name, const char *format, ...)
91 {
92 	__va_list args;
93 
94 	if (unlikely(drm_debug & DRM_UT_PID)) {
95 		kprintf("[" DRM_NAME ":pid%d:%s] ",
96 		    DRM_CURRENTPID, function_name);
97 	} else {
98 		kprintf("[" DRM_NAME ":%s] ", function_name);
99 	}
100 
101 	__va_start(args, format);
102 	kvprintf(format, args);
103 	__va_end(args);
104 }
105 EXPORT_SYMBOL(drm_ut_debug_printk);
106 
107 #if 0
108 struct drm_master *drm_master_create(struct drm_minor *minor)
109 {
110 	struct drm_master *master;
111 
112 	master = kzalloc(sizeof(*master), GFP_KERNEL);
113 	if (!master)
114 		return NULL;
115 
116 	kref_init(&master->refcount);
117 	spin_lock_init(&master->lock.spinlock);
118 	init_waitqueue_head(&master->lock.lock_queue);
119 	if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
120 		kfree(master);
121 		return NULL;
122 	}
123 	master->minor = minor;
124 
125 	return master;
126 }
127 
128 struct drm_master *drm_master_get(struct drm_master *master)
129 {
130 	kref_get(&master->refcount);
131 	return master;
132 }
133 EXPORT_SYMBOL(drm_master_get);
134 
135 static void drm_master_destroy(struct kref *kref)
136 {
137 	struct drm_master *master = container_of(kref, struct drm_master, refcount);
138 	struct drm_device *dev = master->minor->dev;
139 	struct drm_map_list *r_list, *list_temp;
140 
141 	if (dev->driver->master_destroy)
142 		dev->driver->master_destroy(dev, master);
143 
144 	mutex_lock(&dev->struct_mutex);
145 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
146 		if (r_list->master == master) {
147 			drm_legacy_rmmap_locked(dev, r_list->map);
148 			r_list = NULL;
149 		}
150 	}
151 
152 	if (master->unique) {
153 		kfree(master->unique);
154 		master->unique = NULL;
155 		master->unique_len = 0;
156 	}
157 
158 	drm_ht_remove(&master->magiclist);
159 
160 	mutex_unlock(&dev->struct_mutex);
161 	kfree(master);
162 }
163 
164 void drm_master_put(struct drm_master **master)
165 {
166 	kref_put(&(*master)->refcount, drm_master_destroy);
167 	*master = NULL;
168 }
169 EXPORT_SYMBOL(drm_master_put);
170 #endif
171 
172 int drm_setmaster_ioctl(struct drm_device *dev, void *data,
173 			struct drm_file *file_priv)
174 {
175 	DRM_DEBUG("setmaster\n");
176 
177 	if (file_priv->master != 0)
178 		return (0);
179 
180 	return (-EPERM);
181 }
182 
183 int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
184 			 struct drm_file *file_priv)
185 {
186 	DRM_DEBUG("dropmaster\n");
187 	if (file_priv->master != 0)
188 		return -EINVAL;
189 	return 0;
190 }
191 
192 #if 0
193 /*
194  * DRM Minors
195  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
196  * of them is represented by a drm_minor object. Depending on the capabilities
197  * of the device-driver, different interfaces are registered.
198  *
199  * Minors can be accessed via dev->$minor_name. This pointer is either
200  * NULL or a valid drm_minor pointer and stays valid as long as the device is
201  * valid. This means, DRM minors have the same life-time as the underlying
202  * device. However, this doesn't mean that the minor is active. Minors are
203  * registered and unregistered dynamically according to device-state.
204  */
205 
206 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
207 					     unsigned int type)
208 {
209 	switch (type) {
210 	case DRM_MINOR_LEGACY:
211 		return &dev->primary;
212 	case DRM_MINOR_RENDER:
213 		return &dev->render;
214 	case DRM_MINOR_CONTROL:
215 		return &dev->control;
216 	default:
217 		return NULL;
218 	}
219 }
220 
221 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
222 {
223 	struct drm_minor *minor;
224 	unsigned long flags;
225 	int r;
226 
227 	minor = kzalloc(sizeof(*minor), GFP_KERNEL);
228 	if (!minor)
229 		return -ENOMEM;
230 
231 	minor->type = type;
232 	minor->dev = dev;
233 
234 	idr_preload(GFP_KERNEL);
235 	spin_lock_irqsave(&drm_minor_lock, flags);
236 	r = idr_alloc(&drm_minors_idr,
237 		      NULL,
238 		      64 * type,
239 		      64 * (type + 1),
240 		      GFP_NOWAIT);
241 	spin_unlock_irqrestore(&drm_minor_lock, flags);
242 	idr_preload_end();
243 
244 	if (r < 0)
245 		goto err_free;
246 
247 	minor->index = r;
248 
249 	minor->kdev = drm_sysfs_minor_alloc(minor);
250 	if (IS_ERR(minor->kdev)) {
251 		r = PTR_ERR(minor->kdev);
252 		goto err_index;
253 	}
254 
255 	*drm_minor_get_slot(dev, type) = minor;
256 	return 0;
257 
258 err_index:
259 	spin_lock_irqsave(&drm_minor_lock, flags);
260 	idr_remove(&drm_minors_idr, minor->index);
261 	spin_unlock_irqrestore(&drm_minor_lock, flags);
262 err_free:
263 	kfree(minor);
264 	return r;
265 }
266 
267 static void drm_minor_free(struct drm_device *dev, unsigned int type)
268 {
269 	struct drm_minor **slot, *minor;
270 	unsigned long flags;
271 
272 	slot = drm_minor_get_slot(dev, type);
273 	minor = *slot;
274 	if (!minor)
275 		return;
276 
277 	put_device(minor->kdev);
278 
279 	spin_lock_irqsave(&drm_minor_lock, flags);
280 	idr_remove(&drm_minors_idr, minor->index);
281 	spin_unlock_irqrestore(&drm_minor_lock, flags);
282 
283 	kfree(minor);
284 	*slot = NULL;
285 }
286 
287 static int drm_minor_register(struct drm_device *dev, unsigned int type)
288 {
289 	struct drm_minor *minor;
290 	unsigned long flags;
291 	int ret;
292 
293 	DRM_DEBUG("\n");
294 
295 	minor = *drm_minor_get_slot(dev, type);
296 	if (!minor)
297 		return 0;
298 
299 	ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
300 	if (ret) {
301 		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
302 		return ret;
303 	}
304 
305 	ret = device_add(minor->kdev);
306 	if (ret)
307 		goto err_debugfs;
308 
309 	/* replace NULL with @minor so lookups will succeed from now on */
310 	spin_lock_irqsave(&drm_minor_lock, flags);
311 	idr_replace(&drm_minors_idr, minor, minor->index);
312 	spin_unlock_irqrestore(&drm_minor_lock, flags);
313 
314 	DRM_DEBUG("new minor registered %d\n", minor->index);
315 	return 0;
316 
317 err_debugfs:
318 	drm_debugfs_cleanup(minor);
319 	return ret;
320 }
321 
322 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
323 {
324 	struct drm_minor *minor;
325 	unsigned long flags;
326 
327 	minor = *drm_minor_get_slot(dev, type);
328 	if (!minor || !device_is_registered(minor->kdev))
329 		return;
330 
331 	/* replace @minor with NULL so lookups will fail from now on */
332 	spin_lock_irqsave(&drm_minor_lock, flags);
333 	idr_replace(&drm_minors_idr, NULL, minor->index);
334 	spin_unlock_irqrestore(&drm_minor_lock, flags);
335 
336 	device_del(minor->kdev);
337 	dev_set_drvdata(minor->kdev, NULL); /* safety belt */
338 	drm_debugfs_cleanup(minor);
339 }
340 
341 /**
342  * drm_minor_acquire - Acquire a DRM minor
343  * @minor_id: Minor ID of the DRM-minor
344  *
345  * Looks up the given minor-ID and returns the respective DRM-minor object. The
346  * refence-count of the underlying device is increased so you must release this
347  * object with drm_minor_release().
348  *
349  * As long as you hold this minor, it is guaranteed that the object and the
350  * minor->dev pointer will stay valid! However, the device may get unplugged and
351  * unregistered while you hold the minor.
352  *
353  * Returns:
354  * Pointer to minor-object with increased device-refcount, or PTR_ERR on
355  * failure.
356  */
357 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
358 {
359 	struct drm_minor *minor;
360 	unsigned long flags;
361 
362 	spin_lock_irqsave(&drm_minor_lock, flags);
363 	minor = idr_find(&drm_minors_idr, minor_id);
364 	if (minor)
365 		drm_dev_ref(minor->dev);
366 	spin_unlock_irqrestore(&drm_minor_lock, flags);
367 
368 	if (!minor) {
369 		return ERR_PTR(-ENODEV);
370 	} else if (drm_device_is_unplugged(minor->dev)) {
371 		drm_dev_unref(minor->dev);
372 		return ERR_PTR(-ENODEV);
373 	}
374 
375 	return minor;
376 }
377 
378 /**
379  * drm_minor_release - Release DRM minor
380  * @minor: Pointer to DRM minor object
381  *
382  * Release a minor that was previously acquired via drm_minor_acquire().
383  */
384 void drm_minor_release(struct drm_minor *minor)
385 {
386 	drm_dev_unref(minor->dev);
387 }
388 
389 /**
390  * DOC: driver instance overview
391  *
392  * A device instance for a drm driver is represented by struct &drm_device. This
393  * is allocated with drm_dev_alloc(), usually from bus-specific ->probe()
394  * callbacks implemented by the driver. The driver then needs to initialize all
395  * the various subsystems for the drm device like memory management, vblank
396  * handling, modesetting support and intial output configuration plus obviously
397  * initialize all the corresponding hardware bits. An important part of this is
398  * also calling drm_dev_set_unique() to set the userspace-visible unique name of
399  * this device instance. Finally when everything is up and running and ready for
400  * userspace the device instance can be published using drm_dev_register().
401  *
402  * There is also deprecated support for initalizing device instances using
403  * bus-specific helpers and the ->load() callback. But due to
404  * backwards-compatibility needs the device instance have to be published too
405  * early, which requires unpretty global locking to make safe and is therefore
406  * only support for existing drivers not yet converted to the new scheme.
407  *
408  * When cleaning up a device instance everything needs to be done in reverse:
409  * First unpublish the device instance with drm_dev_unregister(). Then clean up
410  * any other resources allocated at device initialization and drop the driver's
411  * reference to &drm_device using drm_dev_unref().
412  *
413  * Note that the lifetime rules for &drm_device instance has still a lot of
414  * historical baggage. Hence use the reference counting provided by
415  * drm_dev_ref() and drm_dev_unref() only carefully.
416  *
417  * Also note that embedding of &drm_device is currently not (yet) supported (but
418  * it would be easy to add). Drivers can store driver-private data in the
419  * dev_priv field of &drm_device.
420  */
421 
422 /**
423  * drm_put_dev - Unregister and release a DRM device
424  * @dev: DRM device
425  *
426  * Called at module unload time or when a PCI device is unplugged.
427  *
428  * Cleans up all DRM device, calling drm_lastclose().
429  *
430  * Note: Use of this function is deprecated. It will eventually go away
431  * completely.  Please use drm_dev_unregister() and drm_dev_unref() explicitly
432  * instead to make sure that the device isn't userspace accessible any more
433  * while teardown is in progress, ensuring that userspace can't access an
434  * inconsistent state.
435  */
436 void drm_put_dev(struct drm_device *dev)
437 {
438 	DRM_DEBUG("\n");
439 
440 	if (!dev) {
441 		DRM_ERROR("cleanup called no dev\n");
442 		return;
443 	}
444 
445 	drm_dev_unregister(dev);
446 	drm_dev_unref(dev);
447 }
448 EXPORT_SYMBOL(drm_put_dev);
449 
450 void drm_unplug_dev(struct drm_device *dev)
451 {
452 	/* for a USB device */
453 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
454 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
455 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
456 
457 	mutex_lock(&drm_global_mutex);
458 
459 	drm_device_set_unplugged(dev);
460 
461 	if (dev->open_count == 0) {
462 		drm_put_dev(dev);
463 	}
464 	mutex_unlock(&drm_global_mutex);
465 }
466 EXPORT_SYMBOL(drm_unplug_dev);
467 
468 /*
469  * DRM internal mount
470  * We want to be able to allocate our own "struct address_space" to control
471  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
472  * stand-alone address_space objects, so we need an underlying inode. As there
473  * is no way to allocate an independent inode easily, we need a fake internal
474  * VFS mount-point.
475  *
476  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
477  * frees it again. You are allowed to use iget() and iput() to get references to
478  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
479  * drm_fs_inode_free() call (which does not have to be the last iput()).
480  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
481  * between multiple inode-users. You could, technically, call
482  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
483  * iput(), but this way you'd end up with a new vfsmount for each inode.
484  */
485 
486 static int drm_fs_cnt;
487 static struct vfsmount *drm_fs_mnt;
488 
489 static const struct dentry_operations drm_fs_dops = {
490 	.d_dname	= simple_dname,
491 };
492 
493 static const struct super_operations drm_fs_sops = {
494 	.statfs		= simple_statfs,
495 };
496 
497 static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
498 				   const char *dev_name, void *data)
499 {
500 	return mount_pseudo(fs_type,
501 			    "drm:",
502 			    &drm_fs_sops,
503 			    &drm_fs_dops,
504 			    0x010203ff);
505 }
506 
507 static struct file_system_type drm_fs_type = {
508 	.name		= "drm",
509 	.owner		= THIS_MODULE,
510 	.mount		= drm_fs_mount,
511 	.kill_sb	= kill_anon_super,
512 };
513 
514 static struct inode *drm_fs_inode_new(void)
515 {
516 	struct inode *inode;
517 	int r;
518 
519 	r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
520 	if (r < 0) {
521 		DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
522 		return ERR_PTR(r);
523 	}
524 
525 	inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
526 	if (IS_ERR(inode))
527 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
528 
529 	return inode;
530 }
531 
532 static void drm_fs_inode_free(struct inode *inode)
533 {
534 	if (inode) {
535 		iput(inode);
536 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
537 	}
538 }
539 
540 /**
541  * drm_dev_alloc - Allocate new DRM device
542  * @driver: DRM driver to allocate device for
543  * @parent: Parent device object
544  *
545  * Allocate and initialize a new DRM device. No device registration is done.
546  * Call drm_dev_register() to advertice the device to user space and register it
547  * with other core subsystems. This should be done last in the device
548  * initialization sequence to make sure userspace can't access an inconsistent
549  * state.
550  *
551  * The initial ref-count of the object is 1. Use drm_dev_ref() and
552  * drm_dev_unref() to take and drop further ref-counts.
553  *
554  * Note that for purely virtual devices @parent can be NULL.
555  *
556  * RETURNS:
557  * Pointer to new DRM device, or NULL if out of memory.
558  */
559 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
560 				 struct device *parent)
561 {
562 	struct drm_device *dev;
563 	int ret;
564 
565 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
566 	if (!dev)
567 		return NULL;
568 
569 	kref_init(&dev->ref);
570 	dev->dev = parent;
571 	dev->driver = driver;
572 
573 	INIT_LIST_HEAD(&dev->filelist);
574 	INIT_LIST_HEAD(&dev->ctxlist);
575 	INIT_LIST_HEAD(&dev->vmalist);
576 	INIT_LIST_HEAD(&dev->maplist);
577 	INIT_LIST_HEAD(&dev->vblank_event_list);
578 
579 	spin_lock_init(&dev->buf_lock);
580 	spin_lock_init(&dev->event_lock);
581 	mutex_init(&dev->struct_mutex);
582 	mutex_init(&dev->filelist_mutex);
583 	mutex_init(&dev->ctxlist_mutex);
584 	mutex_init(&dev->master_mutex);
585 
586 	dev->anon_inode = drm_fs_inode_new();
587 	if (IS_ERR(dev->anon_inode)) {
588 		ret = PTR_ERR(dev->anon_inode);
589 		DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
590 		goto err_free;
591 	}
592 
593 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
594 		ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
595 		if (ret)
596 			goto err_minors;
597 
598 		WARN_ON(driver->suspend || driver->resume);
599 	}
600 
601 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
602 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
603 		if (ret)
604 			goto err_minors;
605 	}
606 
607 	ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
608 	if (ret)
609 		goto err_minors;
610 
611 	if (drm_ht_create(&dev->map_hash, 12))
612 		goto err_minors;
613 
614 	drm_legacy_ctxbitmap_init(dev);
615 
616 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
617 		ret = drm_gem_init(dev);
618 		if (ret) {
619 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
620 			goto err_ctxbitmap;
621 		}
622 	}
623 
624 	if (parent) {
625 		ret = drm_dev_set_unique(dev, dev_name(parent));
626 		if (ret)
627 			goto err_setunique;
628 	}
629 
630 	return dev;
631 
632 err_setunique:
633 	if (drm_core_check_feature(dev, DRIVER_GEM))
634 		drm_gem_destroy(dev);
635 err_ctxbitmap:
636 	drm_legacy_ctxbitmap_cleanup(dev);
637 	drm_ht_remove(&dev->map_hash);
638 err_minors:
639 	drm_minor_free(dev, DRM_MINOR_LEGACY);
640 	drm_minor_free(dev, DRM_MINOR_RENDER);
641 	drm_minor_free(dev, DRM_MINOR_CONTROL);
642 	drm_fs_inode_free(dev->anon_inode);
643 err_free:
644 	mutex_destroy(&dev->master_mutex);
645 	kfree(dev);
646 	return NULL;
647 }
648 EXPORT_SYMBOL(drm_dev_alloc);
649 
650 static void drm_dev_release(struct kref *ref)
651 {
652 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
653 
654 	if (drm_core_check_feature(dev, DRIVER_GEM))
655 		drm_gem_destroy(dev);
656 
657 	drm_legacy_ctxbitmap_cleanup(dev);
658 	drm_ht_remove(&dev->map_hash);
659 	drm_fs_inode_free(dev->anon_inode);
660 
661 	drm_minor_free(dev, DRM_MINOR_LEGACY);
662 	drm_minor_free(dev, DRM_MINOR_RENDER);
663 	drm_minor_free(dev, DRM_MINOR_CONTROL);
664 
665 	mutex_destroy(&dev->master_mutex);
666 	kfree(dev->unique);
667 	kfree(dev);
668 }
669 
670 /**
671  * drm_dev_ref - Take reference of a DRM device
672  * @dev: device to take reference of or NULL
673  *
674  * This increases the ref-count of @dev by one. You *must* already own a
675  * reference when calling this. Use drm_dev_unref() to drop this reference
676  * again.
677  *
678  * This function never fails. However, this function does not provide *any*
679  * guarantee whether the device is alive or running. It only provides a
680  * reference to the object and the memory associated with it.
681  */
682 void drm_dev_ref(struct drm_device *dev)
683 {
684 	if (dev)
685 		kref_get(&dev->ref);
686 }
687 EXPORT_SYMBOL(drm_dev_ref);
688 
689 /**
690  * drm_dev_unref - Drop reference of a DRM device
691  * @dev: device to drop reference of or NULL
692  *
693  * This decreases the ref-count of @dev by one. The device is destroyed if the
694  * ref-count drops to zero.
695  */
696 void drm_dev_unref(struct drm_device *dev)
697 {
698 	if (dev)
699 		kref_put(&dev->ref, drm_dev_release);
700 }
701 EXPORT_SYMBOL(drm_dev_unref);
702 
703 /**
704  * drm_dev_register - Register DRM device
705  * @dev: Device to register
706  * @flags: Flags passed to the driver's .load() function
707  *
708  * Register the DRM device @dev with the system, advertise device to user-space
709  * and start normal device operation. @dev must be allocated via drm_dev_alloc()
710  * previously. Right after drm_dev_register() the driver should call
711  * drm_connector_register_all() to register all connectors in sysfs. This is
712  * a separate call for backward compatibility with drivers still using
713  * the deprecated ->load() callback, where connectors are registered from within
714  * the ->load() callback.
715  *
716  * Never call this twice on any device!
717  *
718  * NOTE: To ensure backward compatibility with existing drivers method this
719  * function calls the ->load() method after registering the device nodes,
720  * creating race conditions. Usage of the ->load() methods is therefore
721  * deprecated, drivers must perform all initialization before calling
722  * drm_dev_register().
723  *
724  * RETURNS:
725  * 0 on success, negative error code on failure.
726  */
727 int drm_dev_register(struct drm_device *dev, unsigned long flags)
728 {
729 	int ret;
730 
731 	mutex_lock(&drm_global_mutex);
732 
733 	ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
734 	if (ret)
735 		goto err_minors;
736 
737 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
738 	if (ret)
739 		goto err_minors;
740 
741 	ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
742 	if (ret)
743 		goto err_minors;
744 
745 	if (dev->driver->load) {
746 		ret = dev->driver->load(dev, flags);
747 		if (ret)
748 			goto err_minors;
749 	}
750 
751 	ret = 0;
752 	goto out_unlock;
753 
754 err_minors:
755 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
756 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
757 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
758 out_unlock:
759 	mutex_unlock(&drm_global_mutex);
760 	return ret;
761 }
762 EXPORT_SYMBOL(drm_dev_register);
763 
764 /**
765  * drm_dev_unregister - Unregister DRM device
766  * @dev: Device to unregister
767  *
768  * Unregister the DRM device from the system. This does the reverse of
769  * drm_dev_register() but does not deallocate the device. The caller must call
770  * drm_dev_unref() to drop their final reference.
771  *
772  * This should be called first in the device teardown code to make sure
773  * userspace can't access the device instance any more.
774  */
775 void drm_dev_unregister(struct drm_device *dev)
776 {
777 	struct drm_map_list *r_list, *list_temp;
778 
779 	drm_lastclose(dev);
780 
781 	if (dev->driver->unload)
782 		dev->driver->unload(dev);
783 
784 	if (dev->agp)
785 		drm_pci_agp_destroy(dev);
786 
787 	drm_vblank_cleanup(dev);
788 
789 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
790 		drm_legacy_rmmap(dev, r_list->map);
791 
792 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
793 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
794 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
795 }
796 EXPORT_SYMBOL(drm_dev_unregister);
797 
798 /**
799  * drm_dev_set_unique - Set the unique name of a DRM device
800  * @dev: device of which to set the unique name
801  * @name: unique name
802  *
803  * Sets the unique name of a DRM device using the specified string. Drivers
804  * can use this at driver probe time if the unique name of the devices they
805  * drive is static.
806  *
807  * Return: 0 on success or a negative error code on failure.
808  */
809 int drm_dev_set_unique(struct drm_device *dev, const char *name)
810 {
811 	kfree(dev->unique);
812 	dev->unique = kstrdup(name, GFP_KERNEL);
813 
814 	return dev->unique ? 0 : -ENOMEM;
815 }
816 EXPORT_SYMBOL(drm_dev_set_unique);
817 #endif
818 
819 /*
820  * DRM Core
821  * The DRM core module initializes all global DRM objects and makes them
822  * available to drivers. Once setup, drivers can probe their respective
823  * devices.
824  * Currently, core management includes:
825  *  - The "DRM-Global" key/value database
826  *  - Global ID management for connectors
827  *  - DRM major number allocation
828  *  - DRM minor management
829  *  - DRM sysfs class
830  *  - DRM debugfs root
831  *
832  * Furthermore, the DRM core provides dynamic char-dev lookups. For each
833  * interface registered on a DRM device, you can request minor numbers from DRM
834  * core. DRM core takes care of major-number management and char-dev
835  * registration. A stub ->open() callback forwards any open() requests to the
836  * registered minor.
837  */
838 
839 #if 0
840 static int drm_stub_open(struct inode *inode, struct file *filp)
841 {
842 	const struct file_operations *new_fops;
843 	struct drm_minor *minor;
844 	int err;
845 
846 	DRM_DEBUG("\n");
847 
848 	mutex_lock(&drm_global_mutex);
849 	minor = drm_minor_acquire(iminor(inode));
850 	if (IS_ERR(minor)) {
851 		err = PTR_ERR(minor);
852 		goto out_unlock;
853 	}
854 
855 	new_fops = fops_get(minor->dev->driver->fops);
856 	if (!new_fops) {
857 		err = -ENODEV;
858 		goto out_release;
859 	}
860 
861 	replace_fops(filp, new_fops);
862 	if (filp->f_op->open)
863 		err = filp->f_op->open(inode, filp);
864 	else
865 		err = 0;
866 
867 out_release:
868 	drm_minor_release(minor);
869 out_unlock:
870 	mutex_unlock(&drm_global_mutex);
871 	return err;
872 }
873 
874 static const struct file_operations drm_stub_fops = {
875 	.owner = THIS_MODULE,
876 	.open = drm_stub_open,
877 	.llseek = noop_llseek,
878 };
879 
880 static int __init drm_core_init(void)
881 {
882 	int ret = -ENOMEM;
883 
884 	drm_global_init();
885 	drm_connector_ida_init();
886 	idr_init(&drm_minors_idr);
887 
888 	if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
889 		goto err_p1;
890 
891 	ret = drm_sysfs_init();
892 	if (ret < 0) {
893 		printk(KERN_ERR "DRM: Error creating drm class.\n");
894 		goto err_p2;
895 	}
896 
897 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
898 	if (!drm_debugfs_root) {
899 		DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
900 		ret = -1;
901 		goto err_p3;
902 	}
903 
904 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
905 		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
906 	return 0;
907 err_p3:
908 	drm_sysfs_destroy();
909 err_p2:
910 	unregister_chrdev(DRM_MAJOR, "drm");
911 
912 	idr_destroy(&drm_minors_idr);
913 err_p1:
914 	return ret;
915 }
916 
917 static void __exit drm_core_exit(void)
918 {
919 	debugfs_remove(drm_debugfs_root);
920 	drm_sysfs_destroy();
921 
922 	unregister_chrdev(DRM_MAJOR, "drm");
923 
924 	drm_connector_ida_destroy();
925 	idr_destroy(&drm_minors_idr);
926 }
927 
928 module_init(drm_core_init);
929 module_exit(drm_core_exit);
930 #endif
931 
932 #include <sys/devfs.h>
933 
934 #include <linux/export.h>
935 #include <linux/dmi.h>
936 #include <drm/drmP.h>
937 #include <drm/drm_core.h>
938 
939 static int drm_load(struct drm_device *dev);
940 drm_pci_id_list_t *drm_find_description(int vendor, int device,
941     drm_pci_id_list_t *idlist);
942 
943 #define DRIVER_SOFTC(unit) \
944 	((struct drm_device *)devclass_get_softc(drm_devclass, unit))
945 
946 static int
947 drm_modevent(module_t mod, int type, void *data)
948 {
949 
950 	switch (type) {
951 	case MOD_LOAD:
952 		TUNABLE_INT_FETCH("drm.debug", &drm_debug);
953 		break;
954 	}
955 	return (0);
956 }
957 
958 static moduledata_t drm_mod = {
959 	"drm",
960 	drm_modevent,
961 	0
962 };
963 DECLARE_MODULE(drm, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
964 MODULE_VERSION(drm, 1);
965 MODULE_DEPEND(drm, agp, 1, 1, 1);
966 MODULE_DEPEND(drm, pci, 1, 1, 1);
967 MODULE_DEPEND(drm, iicbus, 1, 1, 1);
968 
969 static struct dev_ops drm_cdevsw = {
970 	{ "drm", 0, D_TRACKCLOSE | D_MPSAFE },
971 	.d_open =	drm_open,
972 	.d_close =	drm_close,
973 	.d_read =	drm_read,
974 	.d_ioctl =	drm_ioctl,
975 	.d_kqfilter =	drm_kqfilter,
976 	.d_mmap =	drm_mmap,
977 	.d_mmap_single = drm_mmap_single,
978 };
979 
980 SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
981 SYSCTL_INT(_hw_drm, OID_AUTO, debug, CTLFLAG_RW, &drm_debug, 0,
982     "DRM debugging");
983 
984 int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
985 {
986 	drm_pci_id_list_t *id_entry;
987 	int vendor, device;
988 
989 	vendor = pci_get_vendor(kdev);
990 	device = pci_get_device(kdev);
991 
992 	if (pci_get_class(kdev) != PCIC_DISPLAY)
993 		return ENXIO;
994 
995 	id_entry = drm_find_description(vendor, device, idlist);
996 	if (id_entry != NULL) {
997 		if (!device_get_desc(kdev)) {
998 			device_set_desc(kdev, id_entry->name);
999 			DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
1000 		}
1001 		return 0;
1002 	}
1003 
1004 	return ENXIO;
1005 }
1006 
1007 int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
1008 {
1009 	struct drm_device *dev;
1010 	drm_pci_id_list_t *id_entry;
1011 	int unit, error;
1012 	u_int irq_flags;
1013 	int msi_enable;
1014 
1015 	unit = device_get_unit(kdev);
1016 	dev = device_get_softc(kdev);
1017 
1018 	/* Initialize Linux struct device */
1019 	dev->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1020 
1021 	if (!strcmp(device_get_name(kdev), "drmsub"))
1022 		dev->dev->bsddev = device_get_parent(kdev);
1023 	else
1024 		dev->dev->bsddev = kdev;
1025 
1026 	dev->pci_domain = pci_get_domain(dev->dev->bsddev);
1027 	dev->pci_bus = pci_get_bus(dev->dev->bsddev);
1028 	dev->pci_slot = pci_get_slot(dev->dev->bsddev);
1029 	dev->pci_func = pci_get_function(dev->dev->bsddev);
1030 	drm_init_pdev(dev->dev->bsddev, &dev->pdev);
1031 
1032 	id_entry = drm_find_description(dev->pdev->vendor,
1033 	    dev->pdev->device, idlist);
1034 	dev->id_entry = id_entry;
1035 
1036 	if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) {
1037 		msi_enable = 1;
1038 
1039 		dev->irq_type = pci_alloc_1intr(dev->dev->bsddev, msi_enable,
1040 		    &dev->irqrid, &irq_flags);
1041 
1042 		dev->irqr = bus_alloc_resource_any(dev->dev->bsddev, SYS_RES_IRQ,
1043 		    &dev->irqrid, irq_flags);
1044 
1045 		if (!dev->irqr) {
1046 			return (ENOENT);
1047 		}
1048 
1049 		dev->irq = (int) rman_get_start(dev->irqr);
1050 		dev->pdev->irq = dev->irq; /* for i915 */
1051 	}
1052 
1053 	/* Print the contents of pdev struct. */
1054 	drm_print_pdev(dev->pdev);
1055 
1056 	lockinit(&dev->dev_lock, "drmdev", 0, LK_CANRECURSE);
1057 	lwkt_serialize_init(&dev->irq_lock);
1058 	lockinit(&dev->event_lock, "drmev", 0, LK_CANRECURSE);
1059 	lockinit(&dev->struct_mutex, "drmslk", 0, LK_CANRECURSE);
1060 
1061 	error = drm_load(dev);
1062 	if (error)
1063 		goto error;
1064 
1065 	error = drm_create_cdevs(kdev);
1066 	if (error)
1067 		goto error;
1068 
1069 	return (error);
1070 error:
1071 	if (dev->irqr) {
1072 		bus_release_resource(dev->dev->bsddev, SYS_RES_IRQ,
1073 		    dev->irqrid, dev->irqr);
1074 	}
1075 	if (dev->irq_type == PCI_INTR_TYPE_MSI) {
1076 		pci_release_msi(dev->dev->bsddev);
1077 	}
1078 	return (error);
1079 }
1080 
1081 int
1082 drm_create_cdevs(device_t kdev)
1083 {
1084 	struct drm_device *dev;
1085 	int error, unit;
1086 
1087 	unit = device_get_unit(kdev);
1088 	dev = device_get_softc(kdev);
1089 
1090 	dev->devnode = make_dev(&drm_cdevsw, unit, DRM_DEV_UID, DRM_DEV_GID,
1091 				DRM_DEV_MODE, "dri/card%d", unit);
1092 	error = 0;
1093 	if (error == 0)
1094 		dev->devnode->si_drv1 = dev;
1095 	return (error);
1096 }
1097 
1098 #ifndef DRM_DEV_NAME
1099 #define DRM_DEV_NAME "drm"
1100 #endif
1101 
1102 devclass_t drm_devclass;
1103 
1104 drm_pci_id_list_t *drm_find_description(int vendor, int device,
1105     drm_pci_id_list_t *idlist)
1106 {
1107 	int i = 0;
1108 
1109 	for (i = 0; idlist[i].vendor != 0; i++) {
1110 		if ((idlist[i].vendor == vendor) &&
1111 		    ((idlist[i].device == device) ||
1112 		    (idlist[i].device == 0))) {
1113 			return &idlist[i];
1114 		}
1115 	}
1116 	return NULL;
1117 }
1118 
1119 static int drm_load(struct drm_device *dev)
1120 {
1121 	int i, retcode;
1122 
1123 	DRM_DEBUG("\n");
1124 
1125 	INIT_LIST_HEAD(&dev->maplist);
1126 
1127 	drm_sysctl_init(dev);
1128 	INIT_LIST_HEAD(&dev->filelist);
1129 
1130 	dev->counters  = 6;
1131 	dev->types[0]  = _DRM_STAT_LOCK;
1132 	dev->types[1]  = _DRM_STAT_OPENS;
1133 	dev->types[2]  = _DRM_STAT_CLOSES;
1134 	dev->types[3]  = _DRM_STAT_IOCTLS;
1135 	dev->types[4]  = _DRM_STAT_LOCKS;
1136 	dev->types[5]  = _DRM_STAT_UNLOCKS;
1137 
1138 	for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
1139 		atomic_set(&dev->counts[i], 0);
1140 
1141 	INIT_LIST_HEAD(&dev->vblank_event_list);
1142 
1143 	if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
1144 		if (drm_pci_device_is_agp(dev))
1145 			dev->agp = drm_agp_init(dev);
1146 	}
1147 
1148 	if (dev->driver->driver_features & DRIVER_GEM) {
1149 		retcode = drm_gem_init(dev);
1150 		if (retcode != 0) {
1151 			DRM_ERROR("Cannot initialize graphics execution "
1152 				  "manager (GEM)\n");
1153 			goto error1;
1154 		}
1155 	}
1156 
1157 	if (dev->driver->load != NULL) {
1158 		DRM_LOCK(dev);
1159 		/* Shared code returns -errno. */
1160 		retcode = -dev->driver->load(dev,
1161 		    dev->id_entry->driver_private);
1162 		if (pci_enable_busmaster(dev->dev->bsddev))
1163 			DRM_ERROR("Request to enable bus-master failed.\n");
1164 		DRM_UNLOCK(dev);
1165 		if (retcode != 0)
1166 			goto error1;
1167 	}
1168 
1169 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
1170 	    dev->driver->name,
1171 	    dev->driver->major,
1172 	    dev->driver->minor,
1173 	    dev->driver->patchlevel,
1174 	    dev->driver->date);
1175 
1176 	return 0;
1177 
1178 error1:
1179 	drm_gem_destroy(dev);
1180 	drm_sysctl_cleanup(dev);
1181 	DRM_LOCK(dev);
1182 	drm_lastclose(dev);
1183 	DRM_UNLOCK(dev);
1184 	if (dev->devnode != NULL)
1185 		destroy_dev(dev->devnode);
1186 
1187 	lockuninit(&dev->vbl_lock);
1188 	lockuninit(&dev->dev_lock);
1189 	lockuninit(&dev->event_lock);
1190 	lockuninit(&dev->struct_mutex);
1191 
1192 	return retcode;
1193 }
1194 
1195 /*
1196  * Stub is needed for devfs
1197  */
1198 int drm_close(struct dev_close_args *ap)
1199 {
1200 	return 0;
1201 }
1202 
1203 void drm_cdevpriv_dtor(void *cd)
1204 {
1205 	struct drm_file *file_priv = cd;
1206 	struct drm_device *dev = file_priv->dev;
1207 
1208 	DRM_DEBUG("open_count = %d\n", dev->open_count);
1209 
1210 	DRM_LOCK(dev);
1211 
1212 	if (dev->driver->preclose != NULL)
1213 		dev->driver->preclose(dev, file_priv);
1214 
1215 	/* ========================================================
1216 	 * Begin inline drm_release
1217 	 */
1218 
1219 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
1220 	    DRM_CURRENTPID, (long)dev->dev, dev->open_count);
1221 
1222 	if (dev->driver->driver_features & DRIVER_GEM)
1223 		drm_gem_release(dev, file_priv);
1224 
1225 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
1226 	    !dev->driver->reclaim_buffers_locked)
1227 		drm_legacy_reclaim_buffers(dev, file_priv);
1228 
1229 	funsetown(&dev->buf_sigio);
1230 
1231 	if (dev->driver->postclose != NULL)
1232 		dev->driver->postclose(dev, file_priv);
1233 	list_del(&file_priv->lhead);
1234 
1235 
1236 	/* ========================================================
1237 	 * End inline drm_release
1238 	 */
1239 
1240 	atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
1241 	device_unbusy(dev->dev->bsddev);
1242 	if (--dev->open_count == 0) {
1243 		drm_lastclose(dev);
1244 	}
1245 
1246 	DRM_UNLOCK(dev);
1247 }
1248 
1249 int
1250 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
1251     struct sysctl_oid *top)
1252 {
1253 	struct sysctl_oid *oid;
1254 
1255 	ksnprintf(dev->busid_str, sizeof(dev->busid_str),
1256 	     "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
1257 	     dev->pci_slot, dev->pci_func);
1258 	oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
1259 	    CTLFLAG_RD, dev->busid_str, 0, NULL);
1260 	if (oid == NULL)
1261 		return (ENOMEM);
1262 	dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
1263 	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
1264 	    "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
1265 	if (oid == NULL)
1266 		return (ENOMEM);
1267 
1268 	return (0);
1269 }
1270 
1271 int
1272 drm_mmap_single(struct dev_mmap_single_args *ap)
1273 {
1274 	struct drm_device *dev;
1275 	struct cdev *kdev = ap->a_head.a_dev;
1276 	vm_ooffset_t *offset = ap->a_offset;
1277 	vm_size_t size = ap->a_size;
1278 	struct vm_object **obj_res = ap->a_object;
1279 	int nprot = ap->a_nprot;
1280 
1281 	dev = drm_get_device_from_kdev(kdev);
1282 	if (dev->drm_ttm_bdev != NULL) {
1283 		return (ttm_bo_mmap_single(dev->drm_ttm_bdev, offset, size,
1284 		    obj_res, nprot));
1285 	} else if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
1286 		return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
1287 	} else {
1288 		return (ENODEV);
1289 	}
1290 }
1291 
1292 static int
1293 drm_core_init(void *arg)
1294 {
1295 
1296 	drm_global_init();
1297 
1298 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
1299 		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
1300 	return 0;
1301 }
1302 
1303 static void
1304 drm_core_exit(void *arg)
1305 {
1306 
1307 	drm_global_release();
1308 }
1309 
1310 SYSINIT(drm_register, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,
1311     drm_core_init, NULL);
1312 SYSUNINIT(drm_unregister, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,
1313     drm_core_exit, NULL);
1314 
1315 
1316 #include <linux/dmi.h>
1317 
1318 /*
1319  * Check if dmi_system_id structure matches system DMI data
1320  */
1321 static bool
1322 dmi_found(const struct dmi_system_id *dsi)
1323 {
1324 	int i, slot;
1325 	bool found = false;
1326 	char *sys_vendor, *board_vendor, *product_name, *board_name;
1327 
1328 	sys_vendor = kgetenv("smbios.system.maker");
1329 	board_vendor = kgetenv("smbios.planar.maker");
1330 	product_name = kgetenv("smbios.system.product");
1331 	board_name = kgetenv("smbios.planar.product");
1332 
1333 	for (i = 0; i < NELEM(dsi->matches); i++) {
1334 		slot = dsi->matches[i].slot;
1335 		switch (slot) {
1336 		case DMI_NONE:
1337 			break;
1338 		case DMI_SYS_VENDOR:
1339 			if (sys_vendor != NULL &&
1340 			    !strcmp(sys_vendor, dsi->matches[i].substr))
1341 				break;
1342 			else
1343 				goto done;
1344 		case DMI_BOARD_VENDOR:
1345 			if (board_vendor != NULL &&
1346 			    !strcmp(board_vendor, dsi->matches[i].substr))
1347 				break;
1348 			else
1349 				goto done;
1350 		case DMI_PRODUCT_NAME:
1351 			if (product_name != NULL &&
1352 			    !strcmp(product_name, dsi->matches[i].substr))
1353 				break;
1354 			else
1355 				goto done;
1356 		case DMI_BOARD_NAME:
1357 			if (board_name != NULL &&
1358 			    !strcmp(board_name, dsi->matches[i].substr))
1359 				break;
1360 			else
1361 				goto done;
1362 		default:
1363 			goto done;
1364 		}
1365 	}
1366 	found = true;
1367 
1368 done:
1369 	if (sys_vendor != NULL)
1370 		kfreeenv(sys_vendor);
1371 	if (board_vendor != NULL)
1372 		kfreeenv(board_vendor);
1373 	if (product_name != NULL)
1374 		kfreeenv(product_name);
1375 	if (board_name != NULL)
1376 		kfreeenv(board_name);
1377 
1378 	return found;
1379 }
1380 
1381 int dmi_check_system(const struct dmi_system_id *sysid)
1382 {
1383 	const struct dmi_system_id *dsi;
1384 	int num = 0;
1385 
1386 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
1387 		if (dmi_found(dsi)) {
1388 			num++;
1389 			if (dsi->callback && dsi->callback(dsi))
1390 				break;
1391 		}
1392 	}
1393 	return (num);
1394 }
1395