xref: /dragonfly/sys/dev/drm/drm_drv.c (revision 006835dc)
1 /*
2  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3  *
4  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5  * All Rights Reserved.
6  *
7  * Author Rickard E. (Rik) Faith <faith@valinux.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <drm/drmP.h>
32 #include <drm/drm_core.h>
33 #include "drm_legacy.h"
34 #include "drm_internal.h"
35 
36 /* Provides three levels of debug: off, minimal, verbose */
37 #ifdef __DragonFly__
38 #if DRM_DEBUG_DEFAULT_ON == 1
39 #define DRM_DEBUGBITS_ON (DRM_UT_CORE | DRM_UT_DRIVER | DRM_UT_KMS |	\
40 			  DRM_UT_PRIME| DRM_UT_ATOMIC | DRM_UT_FIOCTL)
41 #elif DRM_DEBUG_DEFAULT_ON == 2
42 #define DRM_DEBUGBITS_ON (DRM_UT_CORE | DRM_UT_DRIVER | DRM_UT_KMS |	\
43 			  DRM_UT_PRIME| DRM_UT_ATOMIC | DRM_UT_FIOCTL |	\
44 			  DRM_UT_PID  | DRM_UT_IOCTL  | DRM_UT_VBLANK)
45 #else
46 #define DRM_DEBUGBITS_ON (0x0)
47 #endif
48 unsigned int drm_debug = DRM_DEBUGBITS_ON;	/* defaults to 0 */
49 #else
50 unsigned int drm_debug = 0;	/* bitmask of DRM_UT_x */
51 #endif /* __DragonFly__ */
52 EXPORT_SYMBOL(drm_debug);
53 
54 MODULE_AUTHOR(CORE_AUTHOR);
55 MODULE_DESCRIPTION(CORE_DESC);
56 MODULE_PARM_DESC(debug, "Enable debug output");
57 module_param_named(debug, drm_debug, int, 0600);
58 
59 #if 0
60 static DEFINE_SPINLOCK(drm_minor_lock);
61 static struct idr drm_minors_idr;
62 #endif
63 
64 #if 0
65 static struct dentry *drm_debugfs_root;
66 #endif
67 
68 void drm_err(const char *func, const char *format, ...)
69 {
70 	__va_list args;
71 
72 	kprintf("error: [" DRM_NAME ":pid%d:%s] *ERROR* ", DRM_CURRENTPID, func);
73 
74 	__va_start(args, format);
75 	kvprintf(format, args);
76 	__va_end(args);
77 }
78 EXPORT_SYMBOL(drm_err);
79 
80 void drm_ut_debug_printk(const char *function_name, const char *format, ...)
81 {
82 	__va_list args;
83 
84 	if (unlikely(drm_debug & DRM_UT_PID)) {
85 		kprintf("[" DRM_NAME ":pid%d:%s] ",
86 		    DRM_CURRENTPID, function_name);
87 	} else {
88 		kprintf("[" DRM_NAME ":%s] ", function_name);
89 	}
90 
91 	__va_start(args, format);
92 	kvprintf(format, args);
93 	__va_end(args);
94 }
95 EXPORT_SYMBOL(drm_ut_debug_printk);
96 
97 #if 0
98 struct drm_master *drm_master_create(struct drm_minor *minor)
99 {
100 	struct drm_master *master;
101 
102 	master = kzalloc(sizeof(*master), GFP_KERNEL);
103 	if (!master)
104 		return NULL;
105 
106 	kref_init(&master->refcount);
107 	spin_lock_init(&master->lock.spinlock);
108 	init_waitqueue_head(&master->lock.lock_queue);
109 	if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
110 		kfree(master);
111 		return NULL;
112 	}
113 	master->minor = minor;
114 
115 	return master;
116 }
117 
118 struct drm_master *drm_master_get(struct drm_master *master)
119 {
120 	kref_get(&master->refcount);
121 	return master;
122 }
123 EXPORT_SYMBOL(drm_master_get);
124 
125 static void drm_master_destroy(struct kref *kref)
126 {
127 	struct drm_master *master = container_of(kref, struct drm_master, refcount);
128 	struct drm_device *dev = master->minor->dev;
129 	struct drm_map_list *r_list, *list_temp;
130 
131 	mutex_lock(&dev->struct_mutex);
132 	if (dev->driver->master_destroy)
133 		dev->driver->master_destroy(dev, master);
134 
135 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
136 		if (r_list->master == master) {
137 			drm_legacy_rmmap_locked(dev, r_list->map);
138 			r_list = NULL;
139 		}
140 	}
141 
142 	if (master->unique) {
143 		kfree(master->unique);
144 		master->unique = NULL;
145 		master->unique_len = 0;
146 	}
147 
148 	drm_ht_remove(&master->magiclist);
149 
150 	mutex_unlock(&dev->struct_mutex);
151 	kfree(master);
152 }
153 
154 void drm_master_put(struct drm_master **master)
155 {
156 	kref_put(&(*master)->refcount, drm_master_destroy);
157 	*master = NULL;
158 }
159 EXPORT_SYMBOL(drm_master_put);
160 #endif
161 
162 int drm_setmaster_ioctl(struct drm_device *dev, void *data,
163 			struct drm_file *file_priv)
164 {
165 	DRM_DEBUG("setmaster\n");
166 
167 	if (file_priv->master != 0)
168 		return (0);
169 
170 	return (-EPERM);
171 }
172 
173 int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
174 			 struct drm_file *file_priv)
175 {
176 	DRM_DEBUG("dropmaster\n");
177 	if (file_priv->master != 0)
178 		return -EINVAL;
179 	return 0;
180 }
181 
182 #if 0
183 /*
184  * DRM Minors
185  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
186  * of them is represented by a drm_minor object. Depending on the capabilities
187  * of the device-driver, different interfaces are registered.
188  *
189  * Minors can be accessed via dev->$minor_name. This pointer is either
190  * NULL or a valid drm_minor pointer and stays valid as long as the device is
191  * valid. This means, DRM minors have the same life-time as the underlying
192  * device. However, this doesn't mean that the minor is active. Minors are
193  * registered and unregistered dynamically according to device-state.
194  */
195 
196 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
197 					     unsigned int type)
198 {
199 	switch (type) {
200 	case DRM_MINOR_LEGACY:
201 		return &dev->primary;
202 	case DRM_MINOR_RENDER:
203 		return &dev->render;
204 	case DRM_MINOR_CONTROL:
205 		return &dev->control;
206 	default:
207 		return NULL;
208 	}
209 }
210 
211 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
212 {
213 	struct drm_minor *minor;
214 	unsigned long flags;
215 	int r;
216 
217 	minor = kzalloc(sizeof(*minor), GFP_KERNEL);
218 	if (!minor)
219 		return -ENOMEM;
220 
221 	minor->type = type;
222 	minor->dev = dev;
223 
224 	idr_preload(GFP_KERNEL);
225 	spin_lock_irqsave(&drm_minor_lock, flags);
226 	r = idr_alloc(&drm_minors_idr,
227 		      NULL,
228 		      64 * type,
229 		      64 * (type + 1),
230 		      GFP_NOWAIT);
231 	spin_unlock_irqrestore(&drm_minor_lock, flags);
232 	idr_preload_end();
233 
234 	if (r < 0)
235 		goto err_free;
236 
237 	minor->index = r;
238 
239 	minor->kdev = drm_sysfs_minor_alloc(minor);
240 	if (IS_ERR(minor->kdev)) {
241 		r = PTR_ERR(minor->kdev);
242 		goto err_index;
243 	}
244 
245 	*drm_minor_get_slot(dev, type) = minor;
246 	return 0;
247 
248 err_index:
249 	spin_lock_irqsave(&drm_minor_lock, flags);
250 	idr_remove(&drm_minors_idr, minor->index);
251 	spin_unlock_irqrestore(&drm_minor_lock, flags);
252 err_free:
253 	kfree(minor);
254 	return r;
255 }
256 
257 static void drm_minor_free(struct drm_device *dev, unsigned int type)
258 {
259 	struct drm_minor **slot, *minor;
260 	unsigned long flags;
261 
262 	slot = drm_minor_get_slot(dev, type);
263 	minor = *slot;
264 	if (!minor)
265 		return;
266 
267 	put_device(minor->kdev);
268 
269 	spin_lock_irqsave(&drm_minor_lock, flags);
270 	idr_remove(&drm_minors_idr, minor->index);
271 	spin_unlock_irqrestore(&drm_minor_lock, flags);
272 
273 	kfree(minor);
274 	*slot = NULL;
275 }
276 
277 static int drm_minor_register(struct drm_device *dev, unsigned int type)
278 {
279 	struct drm_minor *minor;
280 	unsigned long flags;
281 	int ret;
282 
283 	DRM_DEBUG("\n");
284 
285 	minor = *drm_minor_get_slot(dev, type);
286 	if (!minor)
287 		return 0;
288 
289 	ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
290 	if (ret) {
291 		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
292 		return ret;
293 	}
294 
295 	ret = device_add(minor->kdev);
296 	if (ret)
297 		goto err_debugfs;
298 
299 	/* replace NULL with @minor so lookups will succeed from now on */
300 	spin_lock_irqsave(&drm_minor_lock, flags);
301 	idr_replace(&drm_minors_idr, minor, minor->index);
302 	spin_unlock_irqrestore(&drm_minor_lock, flags);
303 
304 	DRM_DEBUG("new minor registered %d\n", minor->index);
305 	return 0;
306 
307 err_debugfs:
308 	drm_debugfs_cleanup(minor);
309 	return ret;
310 }
311 
312 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
313 {
314 	struct drm_minor *minor;
315 	unsigned long flags;
316 
317 	minor = *drm_minor_get_slot(dev, type);
318 	if (!minor || !device_is_registered(minor->kdev))
319 		return;
320 
321 	/* replace @minor with NULL so lookups will fail from now on */
322 	spin_lock_irqsave(&drm_minor_lock, flags);
323 	idr_replace(&drm_minors_idr, NULL, minor->index);
324 	spin_unlock_irqrestore(&drm_minor_lock, flags);
325 
326 	device_del(minor->kdev);
327 	dev_set_drvdata(minor->kdev, NULL); /* safety belt */
328 	drm_debugfs_cleanup(minor);
329 }
330 
331 /**
332  * drm_minor_acquire - Acquire a DRM minor
333  * @minor_id: Minor ID of the DRM-minor
334  *
335  * Looks up the given minor-ID and returns the respective DRM-minor object. The
336  * refence-count of the underlying device is increased so you must release this
337  * object with drm_minor_release().
338  *
339  * As long as you hold this minor, it is guaranteed that the object and the
340  * minor->dev pointer will stay valid! However, the device may get unplugged and
341  * unregistered while you hold the minor.
342  *
343  * Returns:
344  * Pointer to minor-object with increased device-refcount, or PTR_ERR on
345  * failure.
346  */
347 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
348 {
349 	struct drm_minor *minor;
350 	unsigned long flags;
351 
352 	spin_lock_irqsave(&drm_minor_lock, flags);
353 	minor = idr_find(&drm_minors_idr, minor_id);
354 	if (minor)
355 		drm_dev_ref(minor->dev);
356 	spin_unlock_irqrestore(&drm_minor_lock, flags);
357 
358 	if (!minor) {
359 		return ERR_PTR(-ENODEV);
360 	} else if (drm_device_is_unplugged(minor->dev)) {
361 		drm_dev_unref(minor->dev);
362 		return ERR_PTR(-ENODEV);
363 	}
364 
365 	return minor;
366 }
367 
368 /**
369  * drm_minor_release - Release DRM minor
370  * @minor: Pointer to DRM minor object
371  *
372  * Release a minor that was previously acquired via drm_minor_acquire().
373  */
374 void drm_minor_release(struct drm_minor *minor)
375 {
376 	drm_dev_unref(minor->dev);
377 }
378 
379 /**
380  * DOC: driver instance overview
381  *
382  * A device instance for a drm driver is represented by struct &drm_device. This
383  * is allocated with drm_dev_alloc(), usually from bus-specific ->probe()
384  * callbacks implemented by the driver. The driver then needs to initialize all
385  * the various subsystems for the drm device like memory management, vblank
386  * handling, modesetting support and intial output configuration plus obviously
387  * initialize all the corresponding hardware bits. An important part of this is
388  * also calling drm_dev_set_unique() to set the userspace-visible unique name of
389  * this device instance. Finally when everything is up and running and ready for
390  * userspace the device instance can be published using drm_dev_register().
391  *
392  * There is also deprecated support for initalizing device instances using
393  * bus-specific helpers and the ->load() callback. But due to
394  * backwards-compatibility needs the device instance have to be published too
395  * early, which requires unpretty global locking to make safe and is therefore
396  * only support for existing drivers not yet converted to the new scheme.
397  *
398  * When cleaning up a device instance everything needs to be done in reverse:
399  * First unpublish the device instance with drm_dev_unregister(). Then clean up
400  * any other resources allocated at device initialization and drop the driver's
401  * reference to &drm_device using drm_dev_unref().
402  *
403  * Note that the lifetime rules for &drm_device instance has still a lot of
404  * historical baggage. Hence use the reference counting provided by
405  * drm_dev_ref() and drm_dev_unref() only carefully.
406  *
407  * Also note that embedding of &drm_device is currently not (yet) supported (but
408  * it would be easy to add). Drivers can store driver-private data in the
409  * dev_priv field of &drm_device.
410  */
411 
412 /**
413  * drm_put_dev - Unregister and release a DRM device
414  * @dev: DRM device
415  *
416  * Called at module unload time or when a PCI device is unplugged.
417  *
418  * Cleans up all DRM device, calling drm_lastclose().
419  *
420  * Note: Use of this function is deprecated. It will eventually go away
421  * completely.  Please use drm_dev_unregister() and drm_dev_unref() explicitly
422  * instead to make sure that the device isn't userspace accessible any more
423  * while teardown is in progress, ensuring that userspace can't access an
424  * inconsistent state.
425  */
426 void drm_put_dev(struct drm_device *dev)
427 {
428 	DRM_DEBUG("\n");
429 
430 	if (!dev) {
431 		DRM_ERROR("cleanup called no dev\n");
432 		return;
433 	}
434 
435 	drm_dev_unregister(dev);
436 	drm_dev_unref(dev);
437 }
438 EXPORT_SYMBOL(drm_put_dev);
439 
440 void drm_unplug_dev(struct drm_device *dev)
441 {
442 	/* for a USB device */
443 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
444 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
445 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
446 
447 	mutex_lock(&drm_global_mutex);
448 
449 	drm_device_set_unplugged(dev);
450 
451 	if (dev->open_count == 0) {
452 		drm_put_dev(dev);
453 	}
454 	mutex_unlock(&drm_global_mutex);
455 }
456 EXPORT_SYMBOL(drm_unplug_dev);
457 
458 /*
459  * DRM internal mount
460  * We want to be able to allocate our own "struct address_space" to control
461  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
462  * stand-alone address_space objects, so we need an underlying inode. As there
463  * is no way to allocate an independent inode easily, we need a fake internal
464  * VFS mount-point.
465  *
466  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
467  * frees it again. You are allowed to use iget() and iput() to get references to
468  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
469  * drm_fs_inode_free() call (which does not have to be the last iput()).
470  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
471  * between multiple inode-users. You could, technically, call
472  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
473  * iput(), but this way you'd end up with a new vfsmount for each inode.
474  */
475 
476 static int drm_fs_cnt;
477 static struct vfsmount *drm_fs_mnt;
478 
479 static const struct dentry_operations drm_fs_dops = {
480 	.d_dname	= simple_dname,
481 };
482 
483 static const struct super_operations drm_fs_sops = {
484 	.statfs		= simple_statfs,
485 };
486 
487 static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
488 				   const char *dev_name, void *data)
489 {
490 	return mount_pseudo(fs_type,
491 			    "drm:",
492 			    &drm_fs_sops,
493 			    &drm_fs_dops,
494 			    0x010203ff);
495 }
496 
497 static struct file_system_type drm_fs_type = {
498 	.name		= "drm",
499 	.owner		= THIS_MODULE,
500 	.mount		= drm_fs_mount,
501 	.kill_sb	= kill_anon_super,
502 };
503 
504 static struct inode *drm_fs_inode_new(void)
505 {
506 	struct inode *inode;
507 	int r;
508 
509 	r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
510 	if (r < 0) {
511 		DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
512 		return ERR_PTR(r);
513 	}
514 
515 	inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
516 	if (IS_ERR(inode))
517 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
518 
519 	return inode;
520 }
521 
522 static void drm_fs_inode_free(struct inode *inode)
523 {
524 	if (inode) {
525 		iput(inode);
526 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
527 	}
528 }
529 
530 /**
531  * drm_dev_alloc - Allocate new DRM device
532  * @driver: DRM driver to allocate device for
533  * @parent: Parent device object
534  *
535  * Allocate and initialize a new DRM device. No device registration is done.
536  * Call drm_dev_register() to advertice the device to user space and register it
537  * with other core subsystems. This should be done last in the device
538  * initialization sequence to make sure userspace can't access an inconsistent
539  * state.
540  *
541  * The initial ref-count of the object is 1. Use drm_dev_ref() and
542  * drm_dev_unref() to take and drop further ref-counts.
543  *
544  * Note that for purely virtual devices @parent can be NULL.
545  *
546  * RETURNS:
547  * Pointer to new DRM device, or NULL if out of memory.
548  */
549 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
550 				 struct device *parent)
551 {
552 	struct drm_device *dev;
553 	int ret;
554 
555 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
556 	if (!dev)
557 		return NULL;
558 
559 	kref_init(&dev->ref);
560 	dev->dev = parent;
561 	dev->driver = driver;
562 
563 	INIT_LIST_HEAD(&dev->filelist);
564 	INIT_LIST_HEAD(&dev->ctxlist);
565 	INIT_LIST_HEAD(&dev->vmalist);
566 	INIT_LIST_HEAD(&dev->maplist);
567 	INIT_LIST_HEAD(&dev->vblank_event_list);
568 
569 	spin_lock_init(&dev->buf_lock);
570 	spin_lock_init(&dev->event_lock);
571 	mutex_init(&dev->struct_mutex);
572 	mutex_init(&dev->ctxlist_mutex);
573 	mutex_init(&dev->master_mutex);
574 
575 	dev->anon_inode = drm_fs_inode_new();
576 	if (IS_ERR(dev->anon_inode)) {
577 		ret = PTR_ERR(dev->anon_inode);
578 		DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
579 		goto err_free;
580 	}
581 
582 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
583 		ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
584 		if (ret)
585 			goto err_minors;
586 
587 		WARN_ON(driver->suspend || driver->resume);
588 	}
589 
590 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
591 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
592 		if (ret)
593 			goto err_minors;
594 	}
595 
596 	ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
597 	if (ret)
598 		goto err_minors;
599 
600 	if (drm_ht_create(&dev->map_hash, 12))
601 		goto err_minors;
602 
603 	drm_legacy_ctxbitmap_init(dev);
604 
605 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
606 		ret = drm_gem_init(dev);
607 		if (ret) {
608 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
609 			goto err_ctxbitmap;
610 		}
611 	}
612 
613 	if (parent) {
614 		ret = drm_dev_set_unique(dev, dev_name(parent));
615 		if (ret)
616 			goto err_setunique;
617 	}
618 
619 	return dev;
620 
621 err_setunique:
622 	if (drm_core_check_feature(dev, DRIVER_GEM))
623 		drm_gem_destroy(dev);
624 err_ctxbitmap:
625 	drm_legacy_ctxbitmap_cleanup(dev);
626 	drm_ht_remove(&dev->map_hash);
627 err_minors:
628 	drm_minor_free(dev, DRM_MINOR_LEGACY);
629 	drm_minor_free(dev, DRM_MINOR_RENDER);
630 	drm_minor_free(dev, DRM_MINOR_CONTROL);
631 	drm_fs_inode_free(dev->anon_inode);
632 err_free:
633 	mutex_destroy(&dev->master_mutex);
634 	kfree(dev);
635 	return NULL;
636 }
637 EXPORT_SYMBOL(drm_dev_alloc);
638 
639 static void drm_dev_release(struct kref *ref)
640 {
641 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
642 
643 	if (drm_core_check_feature(dev, DRIVER_GEM))
644 		drm_gem_destroy(dev);
645 
646 	drm_legacy_ctxbitmap_cleanup(dev);
647 	drm_ht_remove(&dev->map_hash);
648 	drm_fs_inode_free(dev->anon_inode);
649 
650 	drm_minor_free(dev, DRM_MINOR_LEGACY);
651 	drm_minor_free(dev, DRM_MINOR_RENDER);
652 	drm_minor_free(dev, DRM_MINOR_CONTROL);
653 
654 	mutex_destroy(&dev->master_mutex);
655 	kfree(dev->unique);
656 	kfree(dev);
657 }
658 
659 /**
660  * drm_dev_ref - Take reference of a DRM device
661  * @dev: device to take reference of or NULL
662  *
663  * This increases the ref-count of @dev by one. You *must* already own a
664  * reference when calling this. Use drm_dev_unref() to drop this reference
665  * again.
666  *
667  * This function never fails. However, this function does not provide *any*
668  * guarantee whether the device is alive or running. It only provides a
669  * reference to the object and the memory associated with it.
670  */
671 void drm_dev_ref(struct drm_device *dev)
672 {
673 	if (dev)
674 		kref_get(&dev->ref);
675 }
676 EXPORT_SYMBOL(drm_dev_ref);
677 
678 /**
679  * drm_dev_unref - Drop reference of a DRM device
680  * @dev: device to drop reference of or NULL
681  *
682  * This decreases the ref-count of @dev by one. The device is destroyed if the
683  * ref-count drops to zero.
684  */
685 void drm_dev_unref(struct drm_device *dev)
686 {
687 	if (dev)
688 		kref_put(&dev->ref, drm_dev_release);
689 }
690 EXPORT_SYMBOL(drm_dev_unref);
691 
692 /**
693  * drm_dev_register - Register DRM device
694  * @dev: Device to register
695  * @flags: Flags passed to the driver's .load() function
696  *
697  * Register the DRM device @dev with the system, advertise device to user-space
698  * and start normal device operation. @dev must be allocated via drm_dev_alloc()
699  * previously.
700  *
701  * Never call this twice on any device!
702  *
703  * NOTE: To ensure backward compatibility with existing drivers method this
704  * function calls the ->load() method after registering the device nodes,
705  * creating race conditions. Usage of the ->load() methods is therefore
706  * deprecated, drivers must perform all initialization before calling
707  * drm_dev_register().
708  *
709  * RETURNS:
710  * 0 on success, negative error code on failure.
711  */
712 int drm_dev_register(struct drm_device *dev, unsigned long flags)
713 {
714 	int ret;
715 
716 	mutex_lock(&drm_global_mutex);
717 
718 	ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
719 	if (ret)
720 		goto err_minors;
721 
722 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
723 	if (ret)
724 		goto err_minors;
725 
726 	ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
727 	if (ret)
728 		goto err_minors;
729 
730 	if (dev->driver->load) {
731 		ret = dev->driver->load(dev, flags);
732 		if (ret)
733 			goto err_minors;
734 	}
735 
736 	ret = 0;
737 	goto out_unlock;
738 
739 err_minors:
740 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
741 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
742 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
743 out_unlock:
744 	mutex_unlock(&drm_global_mutex);
745 	return ret;
746 }
747 EXPORT_SYMBOL(drm_dev_register);
748 
749 /**
750  * drm_dev_unregister - Unregister DRM device
751  * @dev: Device to unregister
752  *
753  * Unregister the DRM device from the system. This does the reverse of
754  * drm_dev_register() but does not deallocate the device. The caller must call
755  * drm_dev_unref() to drop their final reference.
756  *
757  * This should be called first in the device teardown code to make sure
758  * userspace can't access the device instance any more.
759  */
760 void drm_dev_unregister(struct drm_device *dev)
761 {
762 	struct drm_map_list *r_list, *list_temp;
763 
764 	drm_lastclose(dev);
765 
766 	if (dev->driver->unload)
767 		dev->driver->unload(dev);
768 
769 	if (dev->agp)
770 		drm_pci_agp_destroy(dev);
771 
772 	drm_vblank_cleanup(dev);
773 
774 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
775 		drm_legacy_rmmap(dev, r_list->map);
776 
777 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
778 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
779 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
780 }
781 EXPORT_SYMBOL(drm_dev_unregister);
782 
783 /**
784  * drm_dev_set_unique - Set the unique name of a DRM device
785  * @dev: device of which to set the unique name
786  * @name: unique name
787  *
788  * Sets the unique name of a DRM device using the specified string. Drivers
789  * can use this at driver probe time if the unique name of the devices they
790  * drive is static.
791  *
792  * Return: 0 on success or a negative error code on failure.
793  */
794 int drm_dev_set_unique(struct drm_device *dev, const char *name)
795 {
796 	kfree(dev->unique);
797 	dev->unique = kstrdup(name, GFP_KERNEL);
798 
799 	return dev->unique ? 0 : -ENOMEM;
800 }
801 EXPORT_SYMBOL(drm_dev_set_unique);
802 #endif
803 
804 /*
805  * DRM Core
806  * The DRM core module initializes all global DRM objects and makes them
807  * available to drivers. Once setup, drivers can probe their respective
808  * devices.
809  * Currently, core management includes:
810  *  - The "DRM-Global" key/value database
811  *  - Global ID management for connectors
812  *  - DRM major number allocation
813  *  - DRM minor management
814  *  - DRM sysfs class
815  *  - DRM debugfs root
816  *
817  * Furthermore, the DRM core provides dynamic char-dev lookups. For each
818  * interface registered on a DRM device, you can request minor numbers from DRM
819  * core. DRM core takes care of major-number management and char-dev
820  * registration. A stub ->open() callback forwards any open() requests to the
821  * registered minor.
822  */
823 
824 #if 0
825 static int drm_stub_open(struct inode *inode, struct file *filp)
826 {
827 	const struct file_operations *new_fops;
828 	struct drm_minor *minor;
829 	int err;
830 
831 	DRM_DEBUG("\n");
832 
833 	mutex_lock(&drm_global_mutex);
834 	minor = drm_minor_acquire(iminor(inode));
835 	if (IS_ERR(minor)) {
836 		err = PTR_ERR(minor);
837 		goto out_unlock;
838 	}
839 
840 	new_fops = fops_get(minor->dev->driver->fops);
841 	if (!new_fops) {
842 		err = -ENODEV;
843 		goto out_release;
844 	}
845 
846 	replace_fops(filp, new_fops);
847 	if (filp->f_op->open)
848 		err = filp->f_op->open(inode, filp);
849 	else
850 		err = 0;
851 
852 out_release:
853 	drm_minor_release(minor);
854 out_unlock:
855 	mutex_unlock(&drm_global_mutex);
856 	return err;
857 }
858 
859 static const struct file_operations drm_stub_fops = {
860 	.owner = THIS_MODULE,
861 	.open = drm_stub_open,
862 	.llseek = noop_llseek,
863 };
864 
865 static int __init drm_core_init(void)
866 {
867 	int ret = -ENOMEM;
868 
869 	drm_global_init();
870 	drm_connector_ida_init();
871 	idr_init(&drm_minors_idr);
872 
873 	if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
874 		goto err_p1;
875 
876 	ret = drm_sysfs_init();
877 	if (ret < 0) {
878 		printk(KERN_ERR "DRM: Error creating drm class.\n");
879 		goto err_p2;
880 	}
881 
882 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
883 	if (!drm_debugfs_root) {
884 		DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
885 		ret = -1;
886 		goto err_p3;
887 	}
888 
889 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
890 		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
891 	return 0;
892 err_p3:
893 	drm_sysfs_destroy();
894 err_p2:
895 	unregister_chrdev(DRM_MAJOR, "drm");
896 
897 	idr_destroy(&drm_minors_idr);
898 err_p1:
899 	return ret;
900 }
901 
902 static void __exit drm_core_exit(void)
903 {
904 	debugfs_remove(drm_debugfs_root);
905 	drm_sysfs_destroy();
906 
907 	unregister_chrdev(DRM_MAJOR, "drm");
908 
909 	drm_connector_ida_destroy();
910 	idr_destroy(&drm_minors_idr);
911 }
912 
913 module_init(drm_core_init);
914 module_exit(drm_core_exit);
915 #endif
916 
917 #include <sys/devfs.h>
918 
919 #include <linux/export.h>
920 #include <linux/dmi.h>
921 #include <drm/drmP.h>
922 #include <drm/drm_core.h>
923 
924 static int drm_load(struct drm_device *dev);
925 drm_pci_id_list_t *drm_find_description(int vendor, int device,
926     drm_pci_id_list_t *idlist);
927 
928 #define DRIVER_SOFTC(unit) \
929 	((struct drm_device *)devclass_get_softc(drm_devclass, unit))
930 
931 static int
932 drm_modevent(module_t mod, int type, void *data)
933 {
934 
935 	switch (type) {
936 	case MOD_LOAD:
937 		TUNABLE_INT_FETCH("drm.debug", &drm_debug);
938 		break;
939 	}
940 	return (0);
941 }
942 
943 static moduledata_t drm_mod = {
944 	"drm",
945 	drm_modevent,
946 	0
947 };
948 DECLARE_MODULE(drm, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
949 MODULE_VERSION(drm, 1);
950 MODULE_DEPEND(drm, agp, 1, 1, 1);
951 MODULE_DEPEND(drm, pci, 1, 1, 1);
952 MODULE_DEPEND(drm, iicbus, 1, 1, 1);
953 
954 static struct dev_ops drm_cdevsw = {
955 	{ "drm", 0, D_TRACKCLOSE | D_MPSAFE },
956 	.d_open =	drm_open,
957 	.d_close =	drm_close,
958 	.d_read =	drm_read,
959 	.d_ioctl =	drm_ioctl,
960 	.d_kqfilter =	drm_kqfilter,
961 	.d_mmap =	drm_mmap,
962 	.d_mmap_single = drm_mmap_single,
963 };
964 
965 SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
966 SYSCTL_INT(_hw_drm, OID_AUTO, debug, CTLFLAG_RW, &drm_debug, 0,
967     "DRM debugging");
968 
969 int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
970 {
971 	drm_pci_id_list_t *id_entry;
972 	int vendor, device;
973 
974 	vendor = pci_get_vendor(kdev);
975 	device = pci_get_device(kdev);
976 
977 	if (pci_get_class(kdev) != PCIC_DISPLAY)
978 		return ENXIO;
979 
980 	id_entry = drm_find_description(vendor, device, idlist);
981 	if (id_entry != NULL) {
982 		if (!device_get_desc(kdev)) {
983 			device_set_desc(kdev, id_entry->name);
984 			DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
985 		}
986 		return 0;
987 	}
988 
989 	return ENXIO;
990 }
991 
992 int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
993 {
994 	struct drm_device *dev;
995 	drm_pci_id_list_t *id_entry;
996 	int unit, error;
997 	u_int irq_flags;
998 	int msi_enable;
999 
1000 	unit = device_get_unit(kdev);
1001 	dev = device_get_softc(kdev);
1002 
1003 	/* Initialize Linux struct device */
1004 	dev->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1005 
1006 	if (!strcmp(device_get_name(kdev), "drmsub"))
1007 		dev->dev->bsddev = device_get_parent(kdev);
1008 	else
1009 		dev->dev->bsddev = kdev;
1010 
1011 	dev->pci_domain = pci_get_domain(dev->dev->bsddev);
1012 	dev->pci_bus = pci_get_bus(dev->dev->bsddev);
1013 	dev->pci_slot = pci_get_slot(dev->dev->bsddev);
1014 	dev->pci_func = pci_get_function(dev->dev->bsddev);
1015 	drm_init_pdev(dev->dev->bsddev, &dev->pdev);
1016 
1017 	id_entry = drm_find_description(dev->pdev->vendor,
1018 	    dev->pdev->device, idlist);
1019 	dev->id_entry = id_entry;
1020 
1021 	if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) {
1022 		msi_enable = 1;
1023 
1024 		dev->irq_type = pci_alloc_1intr(dev->dev->bsddev, msi_enable,
1025 		    &dev->irqrid, &irq_flags);
1026 
1027 		dev->irqr = bus_alloc_resource_any(dev->dev->bsddev, SYS_RES_IRQ,
1028 		    &dev->irqrid, irq_flags);
1029 
1030 		if (!dev->irqr) {
1031 			return (ENOENT);
1032 		}
1033 
1034 		dev->irq = (int) rman_get_start(dev->irqr);
1035 		dev->pdev->irq = dev->irq; /* for i915 */
1036 	}
1037 
1038 	/* Print the contents of pdev struct. */
1039 	drm_print_pdev(dev->pdev);
1040 
1041 	lockinit(&dev->dev_lock, "drmdev", 0, LK_CANRECURSE);
1042 	lwkt_serialize_init(&dev->irq_lock);
1043 	lockinit(&dev->event_lock, "drmev", 0, LK_CANRECURSE);
1044 	lockinit(&dev->struct_mutex, "drmslk", 0, LK_CANRECURSE);
1045 
1046 	error = drm_load(dev);
1047 	if (error)
1048 		goto error;
1049 
1050 	error = drm_create_cdevs(kdev);
1051 	if (error)
1052 		goto error;
1053 
1054 	return (error);
1055 error:
1056 	if (dev->irqr) {
1057 		bus_release_resource(dev->dev->bsddev, SYS_RES_IRQ,
1058 		    dev->irqrid, dev->irqr);
1059 	}
1060 	if (dev->irq_type == PCI_INTR_TYPE_MSI) {
1061 		pci_release_msi(dev->dev->bsddev);
1062 	}
1063 	return (error);
1064 }
1065 
1066 int
1067 drm_create_cdevs(device_t kdev)
1068 {
1069 	struct drm_device *dev;
1070 	int error, unit;
1071 
1072 	unit = device_get_unit(kdev);
1073 	dev = device_get_softc(kdev);
1074 
1075 	dev->devnode = make_dev(&drm_cdevsw, unit, DRM_DEV_UID, DRM_DEV_GID,
1076 				DRM_DEV_MODE, "dri/card%d", unit);
1077 	error = 0;
1078 	if (error == 0)
1079 		dev->devnode->si_drv1 = dev;
1080 	return (error);
1081 }
1082 
1083 #ifndef DRM_DEV_NAME
1084 #define DRM_DEV_NAME "drm"
1085 #endif
1086 
1087 devclass_t drm_devclass;
1088 
1089 drm_pci_id_list_t *drm_find_description(int vendor, int device,
1090     drm_pci_id_list_t *idlist)
1091 {
1092 	int i = 0;
1093 
1094 	for (i = 0; idlist[i].vendor != 0; i++) {
1095 		if ((idlist[i].vendor == vendor) &&
1096 		    ((idlist[i].device == device) ||
1097 		    (idlist[i].device == 0))) {
1098 			return &idlist[i];
1099 		}
1100 	}
1101 	return NULL;
1102 }
1103 
1104 static int drm_load(struct drm_device *dev)
1105 {
1106 	int i, retcode;
1107 
1108 	DRM_DEBUG("\n");
1109 
1110 	INIT_LIST_HEAD(&dev->maplist);
1111 
1112 	drm_sysctl_init(dev);
1113 	INIT_LIST_HEAD(&dev->filelist);
1114 
1115 	dev->counters  = 6;
1116 	dev->types[0]  = _DRM_STAT_LOCK;
1117 	dev->types[1]  = _DRM_STAT_OPENS;
1118 	dev->types[2]  = _DRM_STAT_CLOSES;
1119 	dev->types[3]  = _DRM_STAT_IOCTLS;
1120 	dev->types[4]  = _DRM_STAT_LOCKS;
1121 	dev->types[5]  = _DRM_STAT_UNLOCKS;
1122 
1123 	for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
1124 		atomic_set(&dev->counts[i], 0);
1125 
1126 	INIT_LIST_HEAD(&dev->vblank_event_list);
1127 
1128 	if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
1129 		if (drm_pci_device_is_agp(dev))
1130 			dev->agp = drm_agp_init(dev);
1131 	}
1132 
1133 	if (dev->driver->driver_features & DRIVER_GEM) {
1134 		retcode = drm_gem_init(dev);
1135 		if (retcode != 0) {
1136 			DRM_ERROR("Cannot initialize graphics execution "
1137 				  "manager (GEM)\n");
1138 			goto error1;
1139 		}
1140 	}
1141 
1142 	if (dev->driver->load != NULL) {
1143 		DRM_LOCK(dev);
1144 		/* Shared code returns -errno. */
1145 		retcode = -dev->driver->load(dev,
1146 		    dev->id_entry->driver_private);
1147 		if (pci_enable_busmaster(dev->dev->bsddev))
1148 			DRM_ERROR("Request to enable bus-master failed.\n");
1149 		DRM_UNLOCK(dev);
1150 		if (retcode != 0)
1151 			goto error1;
1152 	}
1153 
1154 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
1155 	    dev->driver->name,
1156 	    dev->driver->major,
1157 	    dev->driver->minor,
1158 	    dev->driver->patchlevel,
1159 	    dev->driver->date);
1160 
1161 	return 0;
1162 
1163 error1:
1164 	drm_gem_destroy(dev);
1165 	drm_sysctl_cleanup(dev);
1166 	DRM_LOCK(dev);
1167 	drm_lastclose(dev);
1168 	DRM_UNLOCK(dev);
1169 	if (dev->devnode != NULL)
1170 		destroy_dev(dev->devnode);
1171 
1172 	lockuninit(&dev->vbl_lock);
1173 	lockuninit(&dev->dev_lock);
1174 	lockuninit(&dev->event_lock);
1175 	lockuninit(&dev->struct_mutex);
1176 
1177 	return retcode;
1178 }
1179 
1180 /*
1181  * Stub is needed for devfs
1182  */
1183 int drm_close(struct dev_close_args *ap)
1184 {
1185 	return 0;
1186 }
1187 
1188 void drm_cdevpriv_dtor(void *cd)
1189 {
1190 	struct drm_file *file_priv = cd;
1191 	struct drm_device *dev = file_priv->dev;
1192 	int retcode = 0;
1193 
1194 	DRM_DEBUG("open_count = %d\n", dev->open_count);
1195 
1196 	DRM_LOCK(dev);
1197 
1198 	if (dev->driver->preclose != NULL)
1199 		dev->driver->preclose(dev, file_priv);
1200 
1201 	/* ========================================================
1202 	 * Begin inline drm_release
1203 	 */
1204 
1205 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
1206 	    DRM_CURRENTPID, (long)dev->dev, dev->open_count);
1207 
1208 	if (dev->driver->driver_features & DRIVER_GEM)
1209 		drm_gem_release(dev, file_priv);
1210 
1211 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
1212 	    !dev->driver->reclaim_buffers_locked)
1213 		drm_legacy_reclaim_buffers(dev, file_priv);
1214 
1215 	funsetown(&dev->buf_sigio);
1216 
1217 	if (dev->driver->postclose != NULL)
1218 		dev->driver->postclose(dev, file_priv);
1219 	list_del(&file_priv->lhead);
1220 
1221 
1222 	/* ========================================================
1223 	 * End inline drm_release
1224 	 */
1225 
1226 	atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
1227 	device_unbusy(dev->dev->bsddev);
1228 	if (--dev->open_count == 0) {
1229 		retcode = drm_lastclose(dev);
1230 	}
1231 
1232 	DRM_UNLOCK(dev);
1233 }
1234 
1235 int
1236 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
1237     struct sysctl_oid *top)
1238 {
1239 	struct sysctl_oid *oid;
1240 
1241 	ksnprintf(dev->busid_str, sizeof(dev->busid_str),
1242 	     "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
1243 	     dev->pci_slot, dev->pci_func);
1244 	oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
1245 	    CTLFLAG_RD, dev->busid_str, 0, NULL);
1246 	if (oid == NULL)
1247 		return (ENOMEM);
1248 	dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
1249 	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
1250 	    "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
1251 	if (oid == NULL)
1252 		return (ENOMEM);
1253 
1254 	return (0);
1255 }
1256 
1257 int
1258 drm_mmap_single(struct dev_mmap_single_args *ap)
1259 {
1260 	struct drm_device *dev;
1261 	struct cdev *kdev = ap->a_head.a_dev;
1262 	vm_ooffset_t *offset = ap->a_offset;
1263 	vm_size_t size = ap->a_size;
1264 	struct vm_object **obj_res = ap->a_object;
1265 	int nprot = ap->a_nprot;
1266 
1267 	dev = drm_get_device_from_kdev(kdev);
1268 	if (dev->drm_ttm_bdev != NULL) {
1269 		return (ttm_bo_mmap_single(dev->drm_ttm_bdev, offset, size,
1270 		    obj_res, nprot));
1271 	} else if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
1272 		return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
1273 	} else {
1274 		return (ENODEV);
1275 	}
1276 }
1277 
1278 /* XXX broken code */
1279 #if DRM_LINUX
1280 
1281 #include <sys/sysproto.h>
1282 
1283 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
1284 
1285 #define LINUX_IOCTL_DRM_MIN		0x6400
1286 #define LINUX_IOCTL_DRM_MAX		0x64ff
1287 
1288 static linux_ioctl_function_t drm_linux_ioctl;
1289 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
1290     LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
1291 
1292 /* The bits for in/out are switched on Linux */
1293 #define LINUX_IOC_IN	IOC_OUT
1294 #define LINUX_IOC_OUT	IOC_IN
1295 
1296 static int
1297 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
1298 {
1299 	int error;
1300 	int cmd = args->cmd;
1301 
1302 	args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
1303 	if (cmd & LINUX_IOC_IN)
1304 		args->cmd |= IOC_IN;
1305 	if (cmd & LINUX_IOC_OUT)
1306 		args->cmd |= IOC_OUT;
1307 
1308 	error = ioctl(p, (struct ioctl_args *)args);
1309 
1310 	return error;
1311 }
1312 #endif /* DRM_LINUX */
1313 
1314 static int
1315 drm_core_init(void *arg)
1316 {
1317 
1318 	drm_global_init();
1319 
1320 #if DRM_LINUX
1321 	linux_ioctl_register_handler(&drm_handler);
1322 #endif /* DRM_LINUX */
1323 
1324 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
1325 		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
1326 	return 0;
1327 }
1328 
1329 static void
1330 drm_core_exit(void *arg)
1331 {
1332 
1333 #if DRM_LINUX
1334 	linux_ioctl_unregister_handler(&drm_handler);
1335 #endif /* DRM_LINUX */
1336 
1337 	drm_global_release();
1338 }
1339 
1340 SYSINIT(drm_register, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,
1341     drm_core_init, NULL);
1342 SYSUNINIT(drm_unregister, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,
1343     drm_core_exit, NULL);
1344 
1345 
1346 #include <linux/dmi.h>
1347 
1348 /*
1349  * Check if dmi_system_id structure matches system DMI data
1350  */
1351 static bool
1352 dmi_found(const struct dmi_system_id *dsi)
1353 {
1354 	int i, slot;
1355 	bool found = false;
1356 	char *sys_vendor, *board_vendor, *product_name, *board_name;
1357 
1358 	sys_vendor = kgetenv("smbios.system.maker");
1359 	board_vendor = kgetenv("smbios.planar.maker");
1360 	product_name = kgetenv("smbios.system.product");
1361 	board_name = kgetenv("smbios.planar.product");
1362 
1363 	for (i = 0; i < NELEM(dsi->matches); i++) {
1364 		slot = dsi->matches[i].slot;
1365 		switch (slot) {
1366 		case DMI_NONE:
1367 			break;
1368 		case DMI_SYS_VENDOR:
1369 			if (sys_vendor != NULL &&
1370 			    !strcmp(sys_vendor, dsi->matches[i].substr))
1371 				break;
1372 			else
1373 				goto done;
1374 		case DMI_BOARD_VENDOR:
1375 			if (board_vendor != NULL &&
1376 			    !strcmp(board_vendor, dsi->matches[i].substr))
1377 				break;
1378 			else
1379 				goto done;
1380 		case DMI_PRODUCT_NAME:
1381 			if (product_name != NULL &&
1382 			    !strcmp(product_name, dsi->matches[i].substr))
1383 				break;
1384 			else
1385 				goto done;
1386 		case DMI_BOARD_NAME:
1387 			if (board_name != NULL &&
1388 			    !strcmp(board_name, dsi->matches[i].substr))
1389 				break;
1390 			else
1391 				goto done;
1392 		default:
1393 			goto done;
1394 		}
1395 	}
1396 	found = true;
1397 
1398 done:
1399 	if (sys_vendor != NULL)
1400 		kfreeenv(sys_vendor);
1401 	if (board_vendor != NULL)
1402 		kfreeenv(board_vendor);
1403 	if (product_name != NULL)
1404 		kfreeenv(product_name);
1405 	if (board_name != NULL)
1406 		kfreeenv(board_name);
1407 
1408 	return found;
1409 }
1410 
1411 int dmi_check_system(const struct dmi_system_id *sysid)
1412 {
1413 	const struct dmi_system_id *dsi;
1414 	int num = 0;
1415 
1416 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
1417 		if (dmi_found(dsi)) {
1418 			num++;
1419 			if (dsi->callback && dsi->callback(dsi))
1420 				break;
1421 		}
1422 	}
1423 	return (num);
1424 }
1425