xref: /dragonfly/sys/dev/drm/drm_drv.c (revision 9cefb7c8)
1 /*
2  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3  *
4  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5  * All Rights Reserved.
6  *
7  * Author Rickard E. (Rik) Faith <faith@valinux.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <drm/drmP.h>
32 #include <drm/drm_core.h>
33 #include "drm_legacy.h"
34 #include "drm_internal.h"
35 
36 unsigned int drm_debug = 0;	/* 1 to enable debug output */
37 EXPORT_SYMBOL(drm_debug);
38 
39 MODULE_AUTHOR(CORE_AUTHOR);
40 MODULE_DESCRIPTION(CORE_DESC);
41 MODULE_PARM_DESC(debug, "Enable debug output");
42 MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
43 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
44 MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
45 
46 module_param_named(debug, drm_debug, int, 0600);
47 
48 #if 0
49 static DEFINE_SPINLOCK(drm_minor_lock);
50 static struct idr drm_minors_idr;
51 #endif
52 
53 struct class *drm_class;
54 #if 0
55 static struct dentry *drm_debugfs_root;
56 #endif
57 
58 void drm_err(const char *format, ...)
59 {
60 #if 0
61 	struct va_format vaf;
62 	va_list args;
63 	int r;
64 
65 	va_start(args, format);
66 
67 	vaf.fmt = format;
68 	vaf.va = &args;
69 
70 	printk(KERN_ERR "[" DRM_NAME ":%pf] *ERROR* %pV",
71 	       __builtin_return_address(0), &vaf);
72 
73 	va_end(args);
74 
75 	return r;
76 #endif
77 }
78 EXPORT_SYMBOL(drm_err);
79 
80 void drm_ut_debug_printk(const char *function_name, const char *format, ...)
81 {
82 #if 0
83 	struct va_format vaf;
84 	va_list args;
85 
86 	va_start(args, format);
87 	vaf.fmt = format;
88 	vaf.va = &args;
89 
90 	printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
91 
92 	va_end(args);
93 #endif
94 }
95 EXPORT_SYMBOL(drm_ut_debug_printk);
96 
97 #if 0
98 struct drm_master *drm_master_create(struct drm_minor *minor)
99 {
100 	struct drm_master *master;
101 
102 	master = kzalloc(sizeof(*master), GFP_KERNEL);
103 	if (!master)
104 		return NULL;
105 
106 	kref_init(&master->refcount);
107 	spin_lock_init(&master->lock.spinlock);
108 	init_waitqueue_head(&master->lock.lock_queue);
109 	if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
110 		kfree(master);
111 		return NULL;
112 	}
113 	master->minor = minor;
114 
115 	return master;
116 }
117 
118 struct drm_master *drm_master_get(struct drm_master *master)
119 {
120 	kref_get(&master->refcount);
121 	return master;
122 }
123 EXPORT_SYMBOL(drm_master_get);
124 
125 static void drm_master_destroy(struct kref *kref)
126 {
127 	struct drm_master *master = container_of(kref, struct drm_master, refcount);
128 	struct drm_device *dev = master->minor->dev;
129 	struct drm_map_list *r_list, *list_temp;
130 
131 	mutex_lock(&dev->struct_mutex);
132 	if (dev->driver->master_destroy)
133 		dev->driver->master_destroy(dev, master);
134 
135 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
136 		if (r_list->master == master) {
137 			drm_legacy_rmmap_locked(dev, r_list->map);
138 			r_list = NULL;
139 		}
140 	}
141 
142 	if (master->unique) {
143 		kfree(master->unique);
144 		master->unique = NULL;
145 		master->unique_len = 0;
146 	}
147 
148 	drm_ht_remove(&master->magiclist);
149 
150 	mutex_unlock(&dev->struct_mutex);
151 	kfree(master);
152 }
153 
154 void drm_master_put(struct drm_master **master)
155 {
156 	kref_put(&(*master)->refcount, drm_master_destroy);
157 	*master = NULL;
158 }
159 EXPORT_SYMBOL(drm_master_put);
160 #endif
161 
162 int drm_setmaster_ioctl(struct drm_device *dev, void *data,
163 			struct drm_file *file_priv)
164 {
165 	DRM_DEBUG("setmaster\n");
166 
167 	if (file_priv->master != 0)
168 		return (0);
169 
170 	return (-EPERM);
171 }
172 
173 int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
174 			 struct drm_file *file_priv)
175 {
176 	DRM_DEBUG("dropmaster\n");
177 	if (file_priv->master != 0)
178 		return -EINVAL;
179 	return 0;
180 }
181 
182 #if 0
183 /*
184  * DRM Minors
185  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
186  * of them is represented by a drm_minor object. Depending on the capabilities
187  * of the device-driver, different interfaces are registered.
188  *
189  * Minors can be accessed via dev->$minor_name. This pointer is either
190  * NULL or a valid drm_minor pointer and stays valid as long as the device is
191  * valid. This means, DRM minors have the same life-time as the underlying
192  * device. However, this doesn't mean that the minor is active. Minors are
193  * registered and unregistered dynamically according to device-state.
194  */
195 
196 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
197 					     unsigned int type)
198 {
199 	switch (type) {
200 	case DRM_MINOR_LEGACY:
201 		return &dev->primary;
202 	case DRM_MINOR_RENDER:
203 		return &dev->render;
204 	case DRM_MINOR_CONTROL:
205 		return &dev->control;
206 	default:
207 		return NULL;
208 	}
209 }
210 
211 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
212 {
213 	struct drm_minor *minor;
214 	unsigned long flags;
215 	int r;
216 
217 	minor = kzalloc(sizeof(*minor), GFP_KERNEL);
218 	if (!minor)
219 		return -ENOMEM;
220 
221 	minor->type = type;
222 	minor->dev = dev;
223 
224 	idr_preload(GFP_KERNEL);
225 	spin_lock_irqsave(&drm_minor_lock, flags);
226 	r = idr_alloc(&drm_minors_idr,
227 		      NULL,
228 		      64 * type,
229 		      64 * (type + 1),
230 		      GFP_NOWAIT);
231 	spin_unlock_irqrestore(&drm_minor_lock, flags);
232 	idr_preload_end();
233 
234 	if (r < 0)
235 		goto err_free;
236 
237 	minor->index = r;
238 
239 	minor->kdev = drm_sysfs_minor_alloc(minor);
240 	if (IS_ERR(minor->kdev)) {
241 		r = PTR_ERR(minor->kdev);
242 		goto err_index;
243 	}
244 
245 	*drm_minor_get_slot(dev, type) = minor;
246 	return 0;
247 
248 err_index:
249 	spin_lock_irqsave(&drm_minor_lock, flags);
250 	idr_remove(&drm_minors_idr, minor->index);
251 	spin_unlock_irqrestore(&drm_minor_lock, flags);
252 err_free:
253 	kfree(minor);
254 	return r;
255 }
256 
257 static void drm_minor_free(struct drm_device *dev, unsigned int type)
258 {
259 	struct drm_minor **slot, *minor;
260 	unsigned long flags;
261 
262 	slot = drm_minor_get_slot(dev, type);
263 	minor = *slot;
264 	if (!minor)
265 		return;
266 
267 	drm_mode_group_destroy(&minor->mode_group);
268 	put_device(minor->kdev);
269 
270 	spin_lock_irqsave(&drm_minor_lock, flags);
271 	idr_remove(&drm_minors_idr, minor->index);
272 	spin_unlock_irqrestore(&drm_minor_lock, flags);
273 
274 	kfree(minor);
275 	*slot = NULL;
276 }
277 
278 static int drm_minor_register(struct drm_device *dev, unsigned int type)
279 {
280 	struct drm_minor *minor;
281 	unsigned long flags;
282 	int ret;
283 
284 	DRM_DEBUG("\n");
285 
286 	minor = *drm_minor_get_slot(dev, type);
287 	if (!minor)
288 		return 0;
289 
290 	ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
291 	if (ret) {
292 		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
293 		return ret;
294 	}
295 
296 	ret = device_add(minor->kdev);
297 	if (ret)
298 		goto err_debugfs;
299 
300 	/* replace NULL with @minor so lookups will succeed from now on */
301 	spin_lock_irqsave(&drm_minor_lock, flags);
302 	idr_replace(&drm_minors_idr, minor, minor->index);
303 	spin_unlock_irqrestore(&drm_minor_lock, flags);
304 
305 	DRM_DEBUG("new minor registered %d\n", minor->index);
306 	return 0;
307 
308 err_debugfs:
309 	drm_debugfs_cleanup(minor);
310 	return ret;
311 }
312 
313 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
314 {
315 	struct drm_minor *minor;
316 	unsigned long flags;
317 
318 	minor = *drm_minor_get_slot(dev, type);
319 	if (!minor || !device_is_registered(minor->kdev))
320 		return;
321 
322 	/* replace @minor with NULL so lookups will fail from now on */
323 	spin_lock_irqsave(&drm_minor_lock, flags);
324 	idr_replace(&drm_minors_idr, NULL, minor->index);
325 	spin_unlock_irqrestore(&drm_minor_lock, flags);
326 
327 	device_del(minor->kdev);
328 	dev_set_drvdata(minor->kdev, NULL); /* safety belt */
329 	drm_debugfs_cleanup(minor);
330 }
331 
332 /**
333  * drm_minor_acquire - Acquire a DRM minor
334  * @minor_id: Minor ID of the DRM-minor
335  *
336  * Looks up the given minor-ID and returns the respective DRM-minor object. The
337  * refence-count of the underlying device is increased so you must release this
338  * object with drm_minor_release().
339  *
340  * As long as you hold this minor, it is guaranteed that the object and the
341  * minor->dev pointer will stay valid! However, the device may get unplugged and
342  * unregistered while you hold the minor.
343  *
344  * Returns:
345  * Pointer to minor-object with increased device-refcount, or PTR_ERR on
346  * failure.
347  */
348 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
349 {
350 	struct drm_minor *minor;
351 	unsigned long flags;
352 
353 	spin_lock_irqsave(&drm_minor_lock, flags);
354 	minor = idr_find(&drm_minors_idr, minor_id);
355 	if (minor)
356 		drm_dev_ref(minor->dev);
357 	spin_unlock_irqrestore(&drm_minor_lock, flags);
358 
359 	if (!minor) {
360 		return ERR_PTR(-ENODEV);
361 	} else if (drm_device_is_unplugged(minor->dev)) {
362 		drm_dev_unref(minor->dev);
363 		return ERR_PTR(-ENODEV);
364 	}
365 
366 	return minor;
367 }
368 
369 /**
370  * drm_minor_release - Release DRM minor
371  * @minor: Pointer to DRM minor object
372  *
373  * Release a minor that was previously acquired via drm_minor_acquire().
374  */
375 void drm_minor_release(struct drm_minor *minor)
376 {
377 	drm_dev_unref(minor->dev);
378 }
379 
380 /**
381  * drm_put_dev - Unregister and release a DRM device
382  * @dev: DRM device
383  *
384  * Called at module unload time or when a PCI device is unplugged.
385  *
386  * Use of this function is discouraged. It will eventually go away completely.
387  * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
388  *
389  * Cleans up all DRM device, calling drm_lastclose().
390  */
391 void drm_put_dev(struct drm_device *dev)
392 {
393 	DRM_DEBUG("\n");
394 
395 	if (!dev) {
396 		DRM_ERROR("cleanup called no dev\n");
397 		return;
398 	}
399 
400 	drm_dev_unregister(dev);
401 	drm_dev_unref(dev);
402 }
403 EXPORT_SYMBOL(drm_put_dev);
404 
405 void drm_unplug_dev(struct drm_device *dev)
406 {
407 	/* for a USB device */
408 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
409 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
410 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
411 
412 	mutex_lock(&drm_global_mutex);
413 
414 	drm_device_set_unplugged(dev);
415 
416 	if (dev->open_count == 0) {
417 		drm_put_dev(dev);
418 	}
419 	mutex_unlock(&drm_global_mutex);
420 }
421 EXPORT_SYMBOL(drm_unplug_dev);
422 
423 /*
424  * DRM internal mount
425  * We want to be able to allocate our own "struct address_space" to control
426  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
427  * stand-alone address_space objects, so we need an underlying inode. As there
428  * is no way to allocate an independent inode easily, we need a fake internal
429  * VFS mount-point.
430  *
431  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
432  * frees it again. You are allowed to use iget() and iput() to get references to
433  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
434  * drm_fs_inode_free() call (which does not have to be the last iput()).
435  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
436  * between multiple inode-users. You could, technically, call
437  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
438  * iput(), but this way you'd end up with a new vfsmount for each inode.
439  */
440 
441 static int drm_fs_cnt;
442 static struct vfsmount *drm_fs_mnt;
443 
444 static const struct dentry_operations drm_fs_dops = {
445 	.d_dname	= simple_dname,
446 };
447 
448 static const struct super_operations drm_fs_sops = {
449 	.statfs		= simple_statfs,
450 };
451 
452 static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
453 				   const char *dev_name, void *data)
454 {
455 	return mount_pseudo(fs_type,
456 			    "drm:",
457 			    &drm_fs_sops,
458 			    &drm_fs_dops,
459 			    0x010203ff);
460 }
461 
462 static struct file_system_type drm_fs_type = {
463 	.name		= "drm",
464 	.owner		= THIS_MODULE,
465 	.mount		= drm_fs_mount,
466 	.kill_sb	= kill_anon_super,
467 };
468 
469 static struct inode *drm_fs_inode_new(void)
470 {
471 	struct inode *inode;
472 	int r;
473 
474 	r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
475 	if (r < 0) {
476 		DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
477 		return ERR_PTR(r);
478 	}
479 
480 	inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
481 	if (IS_ERR(inode))
482 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
483 
484 	return inode;
485 }
486 
487 static void drm_fs_inode_free(struct inode *inode)
488 {
489 	if (inode) {
490 		iput(inode);
491 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
492 	}
493 }
494 
495 /**
496  * drm_dev_alloc - Allocate new DRM device
497  * @driver: DRM driver to allocate device for
498  * @parent: Parent device object
499  *
500  * Allocate and initialize a new DRM device. No device registration is done.
501  * Call drm_dev_register() to advertice the device to user space and register it
502  * with other core subsystems.
503  *
504  * The initial ref-count of the object is 1. Use drm_dev_ref() and
505  * drm_dev_unref() to take and drop further ref-counts.
506  *
507  * Note that for purely virtual devices @parent can be NULL.
508  *
509  * RETURNS:
510  * Pointer to new DRM device, or NULL if out of memory.
511  */
512 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
513 				 struct device *parent)
514 {
515 	struct drm_device *dev;
516 	int ret;
517 
518 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
519 	if (!dev)
520 		return NULL;
521 
522 	kref_init(&dev->ref);
523 	dev->dev = parent;
524 	dev->driver = driver;
525 
526 	INIT_LIST_HEAD(&dev->filelist);
527 	INIT_LIST_HEAD(&dev->ctxlist);
528 	INIT_LIST_HEAD(&dev->vmalist);
529 	INIT_LIST_HEAD(&dev->maplist);
530 	INIT_LIST_HEAD(&dev->vblank_event_list);
531 
532 	spin_lock_init(&dev->buf_lock);
533 	spin_lock_init(&dev->event_lock);
534 	mutex_init(&dev->struct_mutex);
535 	mutex_init(&dev->ctxlist_mutex);
536 	mutex_init(&dev->master_mutex);
537 
538 	dev->anon_inode = drm_fs_inode_new();
539 	if (IS_ERR(dev->anon_inode)) {
540 		ret = PTR_ERR(dev->anon_inode);
541 		DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
542 		goto err_free;
543 	}
544 
545 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
546 		ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
547 		if (ret)
548 			goto err_minors;
549 	}
550 
551 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
552 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
553 		if (ret)
554 			goto err_minors;
555 	}
556 
557 	ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
558 	if (ret)
559 		goto err_minors;
560 
561 	if (drm_ht_create(&dev->map_hash, 12))
562 		goto err_minors;
563 
564 	ret = drm_legacy_ctxbitmap_init(dev);
565 	if (ret) {
566 		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
567 		goto err_ht;
568 	}
569 
570 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
571 		ret = drm_gem_init(dev);
572 		if (ret) {
573 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
574 			goto err_ctxbitmap;
575 		}
576 	}
577 
578 	return dev;
579 
580 err_ctxbitmap:
581 	drm_legacy_ctxbitmap_cleanup(dev);
582 err_ht:
583 	drm_ht_remove(&dev->map_hash);
584 err_minors:
585 	drm_minor_free(dev, DRM_MINOR_LEGACY);
586 	drm_minor_free(dev, DRM_MINOR_RENDER);
587 	drm_minor_free(dev, DRM_MINOR_CONTROL);
588 	drm_fs_inode_free(dev->anon_inode);
589 err_free:
590 	mutex_destroy(&dev->master_mutex);
591 	kfree(dev);
592 	return NULL;
593 }
594 EXPORT_SYMBOL(drm_dev_alloc);
595 
596 static void drm_dev_release(struct kref *ref)
597 {
598 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
599 
600 	if (drm_core_check_feature(dev, DRIVER_GEM))
601 		drm_gem_destroy(dev);
602 
603 	drm_legacy_ctxbitmap_cleanup(dev);
604 	drm_ht_remove(&dev->map_hash);
605 	drm_fs_inode_free(dev->anon_inode);
606 
607 	drm_minor_free(dev, DRM_MINOR_LEGACY);
608 	drm_minor_free(dev, DRM_MINOR_RENDER);
609 	drm_minor_free(dev, DRM_MINOR_CONTROL);
610 
611 	mutex_destroy(&dev->master_mutex);
612 	kfree(dev->unique);
613 	kfree(dev);
614 }
615 
616 /**
617  * drm_dev_ref - Take reference of a DRM device
618  * @dev: device to take reference of or NULL
619  *
620  * This increases the ref-count of @dev by one. You *must* already own a
621  * reference when calling this. Use drm_dev_unref() to drop this reference
622  * again.
623  *
624  * This function never fails. However, this function does not provide *any*
625  * guarantee whether the device is alive or running. It only provides a
626  * reference to the object and the memory associated with it.
627  */
628 void drm_dev_ref(struct drm_device *dev)
629 {
630 	if (dev)
631 		kref_get(&dev->ref);
632 }
633 EXPORT_SYMBOL(drm_dev_ref);
634 
635 /**
636  * drm_dev_unref - Drop reference of a DRM device
637  * @dev: device to drop reference of or NULL
638  *
639  * This decreases the ref-count of @dev by one. The device is destroyed if the
640  * ref-count drops to zero.
641  */
642 void drm_dev_unref(struct drm_device *dev)
643 {
644 	if (dev)
645 		kref_put(&dev->ref, drm_dev_release);
646 }
647 EXPORT_SYMBOL(drm_dev_unref);
648 
649 /**
650  * drm_dev_register - Register DRM device
651  * @dev: Device to register
652  * @flags: Flags passed to the driver's .load() function
653  *
654  * Register the DRM device @dev with the system, advertise device to user-space
655  * and start normal device operation. @dev must be allocated via drm_dev_alloc()
656  * previously.
657  *
658  * Never call this twice on any device!
659  *
660  * RETURNS:
661  * 0 on success, negative error code on failure.
662  */
663 int drm_dev_register(struct drm_device *dev, unsigned long flags)
664 {
665 	int ret;
666 
667 	mutex_lock(&drm_global_mutex);
668 
669 	ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
670 	if (ret)
671 		goto err_minors;
672 
673 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
674 	if (ret)
675 		goto err_minors;
676 
677 	ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
678 	if (ret)
679 		goto err_minors;
680 
681 	if (dev->driver->load) {
682 		ret = dev->driver->load(dev, flags);
683 		if (ret)
684 			goto err_minors;
685 	}
686 
687 	/* setup grouping for legacy outputs */
688 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
689 		ret = drm_mode_group_init_legacy_group(dev,
690 				&dev->primary->mode_group);
691 		if (ret)
692 			goto err_unload;
693 	}
694 
695 	ret = 0;
696 	goto out_unlock;
697 
698 err_unload:
699 	if (dev->driver->unload)
700 		dev->driver->unload(dev);
701 err_minors:
702 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
703 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
704 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
705 out_unlock:
706 	mutex_unlock(&drm_global_mutex);
707 	return ret;
708 }
709 EXPORT_SYMBOL(drm_dev_register);
710 
711 /**
712  * drm_dev_unregister - Unregister DRM device
713  * @dev: Device to unregister
714  *
715  * Unregister the DRM device from the system. This does the reverse of
716  * drm_dev_register() but does not deallocate the device. The caller must call
717  * drm_dev_unref() to drop their final reference.
718  */
719 void drm_dev_unregister(struct drm_device *dev)
720 {
721 	struct drm_map_list *r_list, *list_temp;
722 
723 	drm_lastclose(dev);
724 
725 	if (dev->driver->unload)
726 		dev->driver->unload(dev);
727 
728 	if (dev->agp)
729 		drm_pci_agp_destroy(dev);
730 
731 	drm_vblank_cleanup(dev);
732 
733 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
734 		drm_legacy_rmmap(dev, r_list->map);
735 
736 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
737 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
738 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
739 }
740 EXPORT_SYMBOL(drm_dev_unregister);
741 
742 /**
743  * drm_dev_set_unique - Set the unique name of a DRM device
744  * @dev: device of which to set the unique name
745  * @fmt: format string for unique name
746  *
747  * Sets the unique name of a DRM device using the specified format string and
748  * a variable list of arguments. Drivers can use this at driver probe time if
749  * the unique name of the devices they drive is static.
750  *
751  * Return: 0 on success or a negative error code on failure.
752  */
753 int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
754 {
755 	va_list ap;
756 
757 	kfree(dev->unique);
758 
759 	va_start(ap, fmt);
760 	dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
761 	va_end(ap);
762 
763 	return dev->unique ? 0 : -ENOMEM;
764 }
765 EXPORT_SYMBOL(drm_dev_set_unique);
766 #endif
767 
768 /*
769  * DRM Core
770  * The DRM core module initializes all global DRM objects and makes them
771  * available to drivers. Once setup, drivers can probe their respective
772  * devices.
773  * Currently, core management includes:
774  *  - The "DRM-Global" key/value database
775  *  - Global ID management for connectors
776  *  - DRM major number allocation
777  *  - DRM minor management
778  *  - DRM sysfs class
779  *  - DRM debugfs root
780  *
781  * Furthermore, the DRM core provides dynamic char-dev lookups. For each
782  * interface registered on a DRM device, you can request minor numbers from DRM
783  * core. DRM core takes care of major-number management and char-dev
784  * registration. A stub ->open() callback forwards any open() requests to the
785  * registered minor.
786  */
787 
788 #if 0
789 static int drm_stub_open(struct inode *inode, struct file *filp)
790 {
791 	const struct file_operations *new_fops;
792 	struct drm_minor *minor;
793 	int err;
794 
795 	DRM_DEBUG("\n");
796 
797 	mutex_lock(&drm_global_mutex);
798 	minor = drm_minor_acquire(iminor(inode));
799 	if (IS_ERR(minor)) {
800 		err = PTR_ERR(minor);
801 		goto out_unlock;
802 	}
803 
804 	new_fops = fops_get(minor->dev->driver->fops);
805 	if (!new_fops) {
806 		err = -ENODEV;
807 		goto out_release;
808 	}
809 
810 	replace_fops(filp, new_fops);
811 	if (filp->f_op->open)
812 		err = filp->f_op->open(inode, filp);
813 	else
814 		err = 0;
815 
816 out_release:
817 	drm_minor_release(minor);
818 out_unlock:
819 	mutex_unlock(&drm_global_mutex);
820 	return err;
821 }
822 
823 static const struct file_operations drm_stub_fops = {
824 	.owner = THIS_MODULE,
825 	.open = drm_stub_open,
826 	.llseek = noop_llseek,
827 };
828 
829 static int __init drm_core_init(void)
830 {
831 	int ret = -ENOMEM;
832 
833 	drm_global_init();
834 	drm_connector_ida_init();
835 	idr_init(&drm_minors_idr);
836 
837 	if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
838 		goto err_p1;
839 
840 	drm_class = drm_sysfs_create(THIS_MODULE, "drm");
841 	if (IS_ERR(drm_class)) {
842 		printk(KERN_ERR "DRM: Error creating drm class.\n");
843 		ret = PTR_ERR(drm_class);
844 		goto err_p2;
845 	}
846 
847 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
848 	if (!drm_debugfs_root) {
849 		DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
850 		ret = -1;
851 		goto err_p3;
852 	}
853 
854 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
855 		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
856 	return 0;
857 err_p3:
858 	drm_sysfs_destroy();
859 err_p2:
860 	unregister_chrdev(DRM_MAJOR, "drm");
861 
862 	idr_destroy(&drm_minors_idr);
863 err_p1:
864 	return ret;
865 }
866 
867 static void __exit drm_core_exit(void)
868 {
869 	debugfs_remove(drm_debugfs_root);
870 	drm_sysfs_destroy();
871 
872 	unregister_chrdev(DRM_MAJOR, "drm");
873 
874 	drm_connector_ida_destroy();
875 	idr_destroy(&drm_minors_idr);
876 }
877 
878 module_init(drm_core_init);
879 module_exit(drm_core_exit);
880 #endif
881 
882 #include <sys/devfs.h>
883 
884 #include <linux/export.h>
885 #include <linux/dmi.h>
886 #include <drm/drmP.h>
887 #include <drm/drm_core.h>
888 
889 #if DRM_DEBUG_DEFAULT_ON == 1
890 #define DRM_DEBUGBITS_ON (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS | \
891     DRM_DEBUGBITS_FAILED_IOCTL)
892 #elif DRM_DEBUG_DEFAULT_ON == 2
893 #define DRM_DEBUGBITS_ON (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS | \
894     DRM_DEBUGBITS_FAILED_IOCTL | DRM_DEBUGBITS_VERBOSE)
895 #else
896 #define DRM_DEBUGBITS_ON (0x0)
897 #endif
898 
899 int drm_notyet_flag = 0;
900 
901 static int drm_load(struct drm_device *dev);
902 drm_pci_id_list_t *drm_find_description(int vendor, int device,
903     drm_pci_id_list_t *idlist);
904 
905 #define DRIVER_SOFTC(unit) \
906 	((struct drm_device *)devclass_get_softc(drm_devclass, unit))
907 
908 static int
909 drm_modevent(module_t mod, int type, void *data)
910 {
911 
912 	switch (type) {
913 	case MOD_LOAD:
914 		TUNABLE_INT_FETCH("drm.debug", &drm_debug);
915 		TUNABLE_INT_FETCH("drm.notyet", &drm_notyet_flag);
916 		break;
917 	}
918 	return (0);
919 }
920 
921 static moduledata_t drm_mod = {
922 	"drm",
923 	drm_modevent,
924 	0
925 };
926 DECLARE_MODULE(drm, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
927 MODULE_VERSION(drm, 1);
928 MODULE_DEPEND(drm, agp, 1, 1, 1);
929 MODULE_DEPEND(drm, pci, 1, 1, 1);
930 MODULE_DEPEND(drm, iicbus, 1, 1, 1);
931 
932 static struct dev_ops drm_cdevsw = {
933 	{ "drm", 0, D_TRACKCLOSE | D_MPSAFE },
934 	.d_open =	drm_open,
935 	.d_close =	drm_close,
936 	.d_read =	drm_read,
937 	.d_ioctl =	drm_ioctl,
938 	.d_kqfilter =	drm_kqfilter,
939 	.d_mmap =	drm_mmap,
940 	.d_mmap_single = drm_mmap_single,
941 };
942 
943 SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
944 SYSCTL_INT(_hw_drm, OID_AUTO, debug, CTLFLAG_RW, &drm_debug, 0,
945     "DRM debugging");
946 
947 int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
948 {
949 	drm_pci_id_list_t *id_entry;
950 	int vendor, device;
951 
952 	vendor = pci_get_vendor(kdev);
953 	device = pci_get_device(kdev);
954 
955 	if (pci_get_class(kdev) != PCIC_DISPLAY)
956 		return ENXIO;
957 
958 	id_entry = drm_find_description(vendor, device, idlist);
959 	if (id_entry != NULL) {
960 		if (!device_get_desc(kdev)) {
961 			DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
962 			device_set_desc(kdev, id_entry->name);
963 		}
964 		return 0;
965 	}
966 
967 	return ENXIO;
968 }
969 
970 int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
971 {
972 	struct drm_device *dev;
973 	drm_pci_id_list_t *id_entry;
974 	int unit, error;
975 	u_int irq_flags;
976 	int msi_enable;
977 
978 	unit = device_get_unit(kdev);
979 	dev = device_get_softc(kdev);
980 
981 	if (!strcmp(device_get_name(kdev), "drmsub"))
982 		dev->dev = device_get_parent(kdev);
983 	else
984 		dev->dev = kdev;
985 
986 	dev->pci_domain = pci_get_domain(dev->dev);
987 	dev->pci_bus = pci_get_bus(dev->dev);
988 	dev->pci_slot = pci_get_slot(dev->dev);
989 	dev->pci_func = pci_get_function(dev->dev);
990 	drm_init_pdev(dev->dev, &dev->pdev);
991 
992 	id_entry = drm_find_description(dev->pdev->vendor,
993 	    dev->pdev->device, idlist);
994 	dev->id_entry = id_entry;
995 
996 	if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) {
997 		msi_enable = 1;
998 
999 		dev->irq_type = pci_alloc_1intr(dev->dev, msi_enable,
1000 		    &dev->irqrid, &irq_flags);
1001 
1002 		dev->irqr = bus_alloc_resource_any(dev->dev, SYS_RES_IRQ,
1003 		    &dev->irqrid, irq_flags);
1004 
1005 		if (!dev->irqr) {
1006 			return (ENOENT);
1007 		}
1008 
1009 		dev->irq = (int) rman_get_start(dev->irqr);
1010 	}
1011 
1012 	lockinit(&dev->dev_lock, "drmdev", 0, LK_CANRECURSE);
1013 	lwkt_serialize_init(&dev->irq_lock);
1014 	lockinit(&dev->event_lock, "drmev", 0, LK_CANRECURSE);
1015 	lockinit(&dev->struct_mutex, "drmslk", 0, LK_CANRECURSE);
1016 
1017 	error = drm_load(dev);
1018 	if (error)
1019 		goto error;
1020 
1021 	error = drm_create_cdevs(kdev);
1022 	if (error)
1023 		goto error;
1024 
1025 	return (error);
1026 error:
1027 	if (dev->irqr) {
1028 		bus_release_resource(dev->dev, SYS_RES_IRQ,
1029 		    dev->irqrid, dev->irqr);
1030 	}
1031 	if (dev->irq_type == PCI_INTR_TYPE_MSI) {
1032 		pci_release_msi(dev->dev);
1033 	}
1034 	return (error);
1035 }
1036 
1037 int
1038 drm_create_cdevs(device_t kdev)
1039 {
1040 	struct drm_device *dev;
1041 	int error, unit;
1042 
1043 	unit = device_get_unit(kdev);
1044 	dev = device_get_softc(kdev);
1045 
1046 	dev->devnode = make_dev(&drm_cdevsw, unit, DRM_DEV_UID, DRM_DEV_GID,
1047 				DRM_DEV_MODE, "dri/card%d", unit);
1048 	error = 0;
1049 	if (error == 0)
1050 		dev->devnode->si_drv1 = dev;
1051 	return (error);
1052 }
1053 
1054 #ifndef DRM_DEV_NAME
1055 #define DRM_DEV_NAME "drm"
1056 #endif
1057 
1058 devclass_t drm_devclass;
1059 
1060 drm_pci_id_list_t *drm_find_description(int vendor, int device,
1061     drm_pci_id_list_t *idlist)
1062 {
1063 	int i = 0;
1064 
1065 	for (i = 0; idlist[i].vendor != 0; i++) {
1066 		if ((idlist[i].vendor == vendor) &&
1067 		    ((idlist[i].device == device) ||
1068 		    (idlist[i].device == 0))) {
1069 			return &idlist[i];
1070 		}
1071 	}
1072 	return NULL;
1073 }
1074 
1075 static int drm_load(struct drm_device *dev)
1076 {
1077 	int i, retcode;
1078 
1079 	DRM_DEBUG("\n");
1080 
1081 	INIT_LIST_HEAD(&dev->maplist);
1082 
1083 	drm_sysctl_init(dev);
1084 	INIT_LIST_HEAD(&dev->filelist);
1085 
1086 	dev->counters  = 6;
1087 	dev->types[0]  = _DRM_STAT_LOCK;
1088 	dev->types[1]  = _DRM_STAT_OPENS;
1089 	dev->types[2]  = _DRM_STAT_CLOSES;
1090 	dev->types[3]  = _DRM_STAT_IOCTLS;
1091 	dev->types[4]  = _DRM_STAT_LOCKS;
1092 	dev->types[5]  = _DRM_STAT_UNLOCKS;
1093 
1094 	for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
1095 		atomic_set(&dev->counts[i], 0);
1096 
1097 	INIT_LIST_HEAD(&dev->vblank_event_list);
1098 
1099 	if (drm_core_has_AGP(dev)) {
1100 		if (drm_device_is_agp(dev))
1101 			dev->agp = drm_agp_init();
1102 		if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) &&
1103 		    dev->agp == NULL) {
1104 			DRM_ERROR("Card isn't AGP, or couldn't initialize "
1105 			    "AGP.\n");
1106 			retcode = ENOMEM;
1107 			goto error;
1108 		}
1109 		if (dev->agp != NULL && dev->agp->agp_info.ai_aperture_base != 0) {
1110 			if (drm_mtrr_add(dev->agp->agp_info.ai_aperture_base,
1111 			    dev->agp->agp_info.ai_aperture_size, DRM_MTRR_WC) == 0)
1112 				dev->agp->agp_mtrr = 1;
1113 		}
1114 	}
1115 
1116 	if (dev->driver->driver_features & DRIVER_GEM) {
1117 		retcode = drm_gem_init(dev);
1118 		if (retcode != 0) {
1119 			DRM_ERROR("Cannot initialize graphics execution "
1120 				  "manager (GEM)\n");
1121 			goto error1;
1122 		}
1123 	}
1124 
1125 	if (dev->driver->load != NULL) {
1126 		DRM_LOCK(dev);
1127 		/* Shared code returns -errno. */
1128 		retcode = -dev->driver->load(dev,
1129 		    dev->id_entry->driver_private);
1130 		if (pci_enable_busmaster(dev->dev))
1131 			DRM_ERROR("Request to enable bus-master failed.\n");
1132 		DRM_UNLOCK(dev);
1133 		if (retcode != 0)
1134 			goto error1;
1135 	}
1136 
1137 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
1138 	    dev->driver->name,
1139 	    dev->driver->major,
1140 	    dev->driver->minor,
1141 	    dev->driver->patchlevel,
1142 	    dev->driver->date);
1143 
1144 	return 0;
1145 
1146 error1:
1147 	drm_gem_destroy(dev);
1148 error:
1149 	drm_sysctl_cleanup(dev);
1150 	DRM_LOCK(dev);
1151 	drm_lastclose(dev);
1152 	DRM_UNLOCK(dev);
1153 	if (dev->devnode != NULL)
1154 		destroy_dev(dev->devnode);
1155 
1156 	lockuninit(&dev->vbl_lock);
1157 	lockuninit(&dev->dev_lock);
1158 	lockuninit(&dev->event_lock);
1159 	lockuninit(&dev->struct_mutex);
1160 
1161 	return retcode;
1162 }
1163 
1164 /*
1165  * Stub is needed for devfs
1166  */
1167 int drm_close(struct dev_close_args *ap)
1168 {
1169 	return 0;
1170 }
1171 
1172 void drm_cdevpriv_dtor(void *cd)
1173 {
1174 	struct drm_file *file_priv = cd;
1175 	struct drm_device *dev = file_priv->dev;
1176 	int retcode = 0;
1177 
1178 	DRM_DEBUG("open_count = %d\n", dev->open_count);
1179 
1180 	DRM_LOCK(dev);
1181 
1182 	if (dev->driver->preclose != NULL)
1183 		dev->driver->preclose(dev, file_priv);
1184 
1185 	/* ========================================================
1186 	 * Begin inline drm_release
1187 	 */
1188 
1189 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
1190 	    DRM_CURRENTPID, (long)dev->dev, dev->open_count);
1191 
1192 	if (dev->driver->driver_features & DRIVER_GEM)
1193 		drm_gem_release(dev, file_priv);
1194 
1195 	if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
1196 	    && dev->lock.file_priv == file_priv) {
1197 		DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
1198 			  DRM_CURRENTPID,
1199 			  _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
1200 		if (dev->driver->reclaim_buffers_locked != NULL)
1201 			dev->driver->reclaim_buffers_locked(dev, file_priv);
1202 
1203 		drm_lock_free(&dev->lock,
1204 		    _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
1205 
1206 				/* FIXME: may require heavy-handed reset of
1207                                    hardware at this point, possibly
1208                                    processed via a callback to the X
1209                                    server. */
1210 	} else if (dev->driver->reclaim_buffers_locked != NULL &&
1211 	    dev->lock.hw_lock != NULL) {
1212 		/* The lock is required to reclaim buffers */
1213 		for (;;) {
1214 			if (!dev->lock.hw_lock) {
1215 				/* Device has been unregistered */
1216 				retcode = EINTR;
1217 				break;
1218 			}
1219 			/* Contention */
1220 			retcode = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue,
1221 			    PCATCH, "drmlk2", 0);
1222 			if (retcode)
1223 				break;
1224 		}
1225 		if (retcode == 0) {
1226 			dev->driver->reclaim_buffers_locked(dev, file_priv);
1227 		}
1228 	}
1229 
1230 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
1231 	    !dev->driver->reclaim_buffers_locked)
1232 		drm_legacy_reclaim_buffers(dev, file_priv);
1233 
1234 	funsetown(&dev->buf_sigio);
1235 
1236 	if (dev->driver->postclose != NULL)
1237 		dev->driver->postclose(dev, file_priv);
1238 	list_del(&file_priv->lhead);
1239 
1240 
1241 	/* ========================================================
1242 	 * End inline drm_release
1243 	 */
1244 
1245 	atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
1246 	device_unbusy(dev->dev);
1247 	if (--dev->open_count == 0) {
1248 		retcode = drm_lastclose(dev);
1249 	}
1250 
1251 	DRM_UNLOCK(dev);
1252 }
1253 
1254 int
1255 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
1256     struct sysctl_oid *top)
1257 {
1258 	struct sysctl_oid *oid;
1259 
1260 	ksnprintf(dev->busid_str, sizeof(dev->busid_str),
1261 	     "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
1262 	     dev->pci_slot, dev->pci_func);
1263 	oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
1264 	    CTLFLAG_RD, dev->busid_str, 0, NULL);
1265 	if (oid == NULL)
1266 		return (ENOMEM);
1267 	dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
1268 	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
1269 	    "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
1270 	if (oid == NULL)
1271 		return (ENOMEM);
1272 
1273 	return (0);
1274 }
1275 
1276 int
1277 drm_mmap_single(struct dev_mmap_single_args *ap)
1278 {
1279 	struct drm_device *dev;
1280 	struct cdev *kdev = ap->a_head.a_dev;
1281 	vm_ooffset_t *offset = ap->a_offset;
1282 	vm_size_t size = ap->a_size;
1283 	struct vm_object **obj_res = ap->a_object;
1284 	int nprot = ap->a_nprot;
1285 
1286 	dev = drm_get_device_from_kdev(kdev);
1287 	if (dev->drm_ttm_bdev != NULL) {
1288 		return (ttm_bo_mmap_single(dev->drm_ttm_bdev, offset, size,
1289 		    obj_res, nprot));
1290 	} else if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
1291 		return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
1292 	} else {
1293 		return (ENODEV);
1294 	}
1295 }
1296 
1297 #if DRM_LINUX
1298 
1299 #include <sys/sysproto.h>
1300 
1301 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
1302 
1303 #define LINUX_IOCTL_DRM_MIN		0x6400
1304 #define LINUX_IOCTL_DRM_MAX		0x64ff
1305 
1306 static linux_ioctl_function_t drm_linux_ioctl;
1307 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
1308     LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
1309 
1310 /* The bits for in/out are switched on Linux */
1311 #define LINUX_IOC_IN	IOC_OUT
1312 #define LINUX_IOC_OUT	IOC_IN
1313 
1314 static int
1315 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
1316 {
1317 	int error;
1318 	int cmd = args->cmd;
1319 
1320 	args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
1321 	if (cmd & LINUX_IOC_IN)
1322 		args->cmd |= IOC_IN;
1323 	if (cmd & LINUX_IOC_OUT)
1324 		args->cmd |= IOC_OUT;
1325 
1326 	error = ioctl(p, (struct ioctl_args *)args);
1327 
1328 	return error;
1329 }
1330 #endif /* DRM_LINUX */
1331 
1332 static int
1333 drm_core_init(void *arg)
1334 {
1335 
1336 	drm_global_init();
1337 
1338 #if DRM_LINUX
1339 	linux_ioctl_register_handler(&drm_handler);
1340 #endif /* DRM_LINUX */
1341 
1342 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
1343 		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
1344 	return 0;
1345 }
1346 
1347 static void
1348 drm_core_exit(void *arg)
1349 {
1350 
1351 #if DRM_LINUX
1352 	linux_ioctl_unregister_handler(&drm_handler);
1353 #endif /* DRM_LINUX */
1354 
1355 	drm_global_release();
1356 }
1357 
1358 SYSINIT(drm_register, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,
1359     drm_core_init, NULL);
1360 SYSUNINIT(drm_unregister, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,
1361     drm_core_exit, NULL);
1362 
1363 
1364 #include <linux/dmi.h>
1365 
1366 /*
1367  * Check if dmi_system_id structure matches system DMI data
1368  */
1369 static bool
1370 dmi_found(const struct dmi_system_id *dsi)
1371 {
1372 	int i, slot;
1373 	bool found = false;
1374 	char *sys_vendor, *board_vendor, *product_name, *board_name;
1375 
1376 	sys_vendor = kgetenv("smbios.system.maker");
1377 	board_vendor = kgetenv("smbios.planar.maker");
1378 	product_name = kgetenv("smbios.system.product");
1379 	board_name = kgetenv("smbios.planar.product");
1380 
1381 	for (i = 0; i < NELEM(dsi->matches); i++) {
1382 		slot = dsi->matches[i].slot;
1383 		switch (slot) {
1384 		case DMI_NONE:
1385 			break;
1386 		case DMI_SYS_VENDOR:
1387 			if (sys_vendor != NULL &&
1388 			    !strcmp(sys_vendor, dsi->matches[i].substr))
1389 				break;
1390 			else
1391 				goto done;
1392 		case DMI_BOARD_VENDOR:
1393 			if (board_vendor != NULL &&
1394 			    !strcmp(board_vendor, dsi->matches[i].substr))
1395 				break;
1396 			else
1397 				goto done;
1398 		case DMI_PRODUCT_NAME:
1399 			if (product_name != NULL &&
1400 			    !strcmp(product_name, dsi->matches[i].substr))
1401 				break;
1402 			else
1403 				goto done;
1404 		case DMI_BOARD_NAME:
1405 			if (board_name != NULL &&
1406 			    !strcmp(board_name, dsi->matches[i].substr))
1407 				break;
1408 			else
1409 				goto done;
1410 		default:
1411 			goto done;
1412 		}
1413 	}
1414 	found = true;
1415 
1416 done:
1417 	if (sys_vendor != NULL)
1418 		kfreeenv(sys_vendor);
1419 	if (board_vendor != NULL)
1420 		kfreeenv(board_vendor);
1421 	if (product_name != NULL)
1422 		kfreeenv(product_name);
1423 	if (board_name != NULL)
1424 		kfreeenv(board_name);
1425 
1426 	return found;
1427 }
1428 
1429 int dmi_check_system(const struct dmi_system_id *sysid)
1430 {
1431 	const struct dmi_system_id *dsi;
1432 	int num = 0;
1433 
1434 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
1435 		if (dmi_found(dsi)) {
1436 			num++;
1437 			if (dsi->callback && dsi->callback(dsi))
1438 				break;
1439 		}
1440 	}
1441 	return (num);
1442 }
1443