xref: /dragonfly/sys/dev/drm/drm_drv.c (revision 557118b7)
1 /*
2  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3  *
4  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5  * All Rights Reserved.
6  *
7  * Author Rickard E. (Rik) Faith <faith@valinux.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <drm/drmP.h>
32 #include <drm/drm_core.h>
33 #include "drm_legacy.h"
34 #include "drm_internal.h"
35 
36 unsigned int drm_debug = 0;	/* 1 to enable debug output */
37 EXPORT_SYMBOL(drm_debug);
38 
39 int drm_vblank_offdelay = 5000;    /* Default to 5000 msecs. */
40 
41 unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
42 
43 /*
44  * Default to use monotonic timestamps for wait-for-vblank and page-flip
45  * complete events.
46  */
47 unsigned int drm_timestamp_monotonic = 1;
48 
49 MODULE_AUTHOR(CORE_AUTHOR);
50 MODULE_DESCRIPTION(CORE_DESC);
51 MODULE_PARM_DESC(debug, "Enable debug output");
52 MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
53 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
54 MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
55 
56 module_param_named(debug, drm_debug, int, 0600);
57 module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
58 module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
59 module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
60 
61 #if 0
62 static DEFINE_SPINLOCK(drm_minor_lock);
63 static struct idr drm_minors_idr;
64 #endif
65 
66 struct class *drm_class;
67 #if 0
68 static struct dentry *drm_debugfs_root;
69 #endif
70 
71 void drm_err(const char *format, ...)
72 {
73 #if 0
74 	struct va_format vaf;
75 	va_list args;
76 	int r;
77 
78 	va_start(args, format);
79 
80 	vaf.fmt = format;
81 	vaf.va = &args;
82 
83 	printk(KERN_ERR "[" DRM_NAME ":%pf] *ERROR* %pV",
84 	       __builtin_return_address(0), &vaf);
85 
86 	va_end(args);
87 
88 	return r;
89 #endif
90 }
91 EXPORT_SYMBOL(drm_err);
92 
93 void drm_ut_debug_printk(const char *function_name, const char *format, ...)
94 {
95 #if 0
96 	struct va_format vaf;
97 	va_list args;
98 
99 	va_start(args, format);
100 	vaf.fmt = format;
101 	vaf.va = &args;
102 
103 	printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
104 
105 	va_end(args);
106 #endif
107 }
108 EXPORT_SYMBOL(drm_ut_debug_printk);
109 
110 #if 0
111 struct drm_master *drm_master_create(struct drm_minor *minor)
112 {
113 	struct drm_master *master;
114 
115 	master = kzalloc(sizeof(*master), GFP_KERNEL);
116 	if (!master)
117 		return NULL;
118 
119 	kref_init(&master->refcount);
120 	spin_lock_init(&master->lock.spinlock);
121 	init_waitqueue_head(&master->lock.lock_queue);
122 	if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
123 		kfree(master);
124 		return NULL;
125 	}
126 	INIT_LIST_HEAD(&master->magicfree);
127 	master->minor = minor;
128 
129 	return master;
130 }
131 
132 struct drm_master *drm_master_get(struct drm_master *master)
133 {
134 	kref_get(&master->refcount);
135 	return master;
136 }
137 EXPORT_SYMBOL(drm_master_get);
138 
139 static void drm_master_destroy(struct kref *kref)
140 {
141 	struct drm_master *master = container_of(kref, struct drm_master, refcount);
142 	struct drm_device *dev = master->minor->dev;
143 	struct drm_map_list *r_list, *list_temp;
144 
145 	mutex_lock(&dev->struct_mutex);
146 	if (dev->driver->master_destroy)
147 		dev->driver->master_destroy(dev, master);
148 
149 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
150 		if (r_list->master == master) {
151 			drm_legacy_rmmap_locked(dev, r_list->map);
152 			r_list = NULL;
153 		}
154 	}
155 
156 	if (master->unique) {
157 		kfree(master->unique);
158 		master->unique = NULL;
159 		master->unique_len = 0;
160 	}
161 
162 	drm_ht_remove(&master->magiclist);
163 
164 	mutex_unlock(&dev->struct_mutex);
165 	kfree(master);
166 }
167 
168 void drm_master_put(struct drm_master **master)
169 {
170 	kref_put(&(*master)->refcount, drm_master_destroy);
171 	*master = NULL;
172 }
173 EXPORT_SYMBOL(drm_master_put);
174 #endif
175 
176 int drm_setmaster_ioctl(struct drm_device *dev, void *data,
177 			struct drm_file *file_priv)
178 {
179 	DRM_DEBUG("setmaster\n");
180 
181 	if (file_priv->master != 0)
182 		return (0);
183 
184 	return (-EPERM);
185 }
186 
187 int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
188 			 struct drm_file *file_priv)
189 {
190 	DRM_DEBUG("dropmaster\n");
191 	if (file_priv->master != 0)
192 		return -EINVAL;
193 	return 0;
194 }
195 
196 #if 0
197 /*
198  * DRM Minors
199  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
200  * of them is represented by a drm_minor object. Depending on the capabilities
201  * of the device-driver, different interfaces are registered.
202  *
203  * Minors can be accessed via dev->$minor_name. This pointer is either
204  * NULL or a valid drm_minor pointer and stays valid as long as the device is
205  * valid. This means, DRM minors have the same life-time as the underlying
206  * device. However, this doesn't mean that the minor is active. Minors are
207  * registered and unregistered dynamically according to device-state.
208  */
209 
210 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
211 					     unsigned int type)
212 {
213 	switch (type) {
214 	case DRM_MINOR_LEGACY:
215 		return &dev->primary;
216 	case DRM_MINOR_RENDER:
217 		return &dev->render;
218 	case DRM_MINOR_CONTROL:
219 		return &dev->control;
220 	default:
221 		return NULL;
222 	}
223 }
224 
225 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
226 {
227 	struct drm_minor *minor;
228 	unsigned long flags;
229 	int r;
230 
231 	minor = kzalloc(sizeof(*minor), GFP_KERNEL);
232 	if (!minor)
233 		return -ENOMEM;
234 
235 	minor->type = type;
236 	minor->dev = dev;
237 
238 	idr_preload(GFP_KERNEL);
239 	spin_lock_irqsave(&drm_minor_lock, flags);
240 	r = idr_alloc(&drm_minors_idr,
241 		      NULL,
242 		      64 * type,
243 		      64 * (type + 1),
244 		      GFP_NOWAIT);
245 	spin_unlock_irqrestore(&drm_minor_lock, flags);
246 	idr_preload_end();
247 
248 	if (r < 0)
249 		goto err_free;
250 
251 	minor->index = r;
252 
253 	minor->kdev = drm_sysfs_minor_alloc(minor);
254 	if (IS_ERR(minor->kdev)) {
255 		r = PTR_ERR(minor->kdev);
256 		goto err_index;
257 	}
258 
259 	*drm_minor_get_slot(dev, type) = minor;
260 	return 0;
261 
262 err_index:
263 	spin_lock_irqsave(&drm_minor_lock, flags);
264 	idr_remove(&drm_minors_idr, minor->index);
265 	spin_unlock_irqrestore(&drm_minor_lock, flags);
266 err_free:
267 	kfree(minor);
268 	return r;
269 }
270 
271 static void drm_minor_free(struct drm_device *dev, unsigned int type)
272 {
273 	struct drm_minor **slot, *minor;
274 	unsigned long flags;
275 
276 	slot = drm_minor_get_slot(dev, type);
277 	minor = *slot;
278 	if (!minor)
279 		return;
280 
281 	drm_mode_group_destroy(&minor->mode_group);
282 	put_device(minor->kdev);
283 
284 	spin_lock_irqsave(&drm_minor_lock, flags);
285 	idr_remove(&drm_minors_idr, minor->index);
286 	spin_unlock_irqrestore(&drm_minor_lock, flags);
287 
288 	kfree(minor);
289 	*slot = NULL;
290 }
291 
292 static int drm_minor_register(struct drm_device *dev, unsigned int type)
293 {
294 	struct drm_minor *minor;
295 	unsigned long flags;
296 	int ret;
297 
298 	DRM_DEBUG("\n");
299 
300 	minor = *drm_minor_get_slot(dev, type);
301 	if (!minor)
302 		return 0;
303 
304 	ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
305 	if (ret) {
306 		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
307 		return ret;
308 	}
309 
310 	ret = device_add(minor->kdev);
311 	if (ret)
312 		goto err_debugfs;
313 
314 	/* replace NULL with @minor so lookups will succeed from now on */
315 	spin_lock_irqsave(&drm_minor_lock, flags);
316 	idr_replace(&drm_minors_idr, minor, minor->index);
317 	spin_unlock_irqrestore(&drm_minor_lock, flags);
318 
319 	DRM_DEBUG("new minor registered %d\n", minor->index);
320 	return 0;
321 
322 err_debugfs:
323 	drm_debugfs_cleanup(minor);
324 	return ret;
325 }
326 
327 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
328 {
329 	struct drm_minor *minor;
330 	unsigned long flags;
331 
332 	minor = *drm_minor_get_slot(dev, type);
333 	if (!minor || !device_is_registered(minor->kdev))
334 		return;
335 
336 	/* replace @minor with NULL so lookups will fail from now on */
337 	spin_lock_irqsave(&drm_minor_lock, flags);
338 	idr_replace(&drm_minors_idr, NULL, minor->index);
339 	spin_unlock_irqrestore(&drm_minor_lock, flags);
340 
341 	device_del(minor->kdev);
342 	dev_set_drvdata(minor->kdev, NULL); /* safety belt */
343 	drm_debugfs_cleanup(minor);
344 }
345 
346 /**
347  * drm_minor_acquire - Acquire a DRM minor
348  * @minor_id: Minor ID of the DRM-minor
349  *
350  * Looks up the given minor-ID and returns the respective DRM-minor object. The
351  * refence-count of the underlying device is increased so you must release this
352  * object with drm_minor_release().
353  *
354  * As long as you hold this minor, it is guaranteed that the object and the
355  * minor->dev pointer will stay valid! However, the device may get unplugged and
356  * unregistered while you hold the minor.
357  *
358  * Returns:
359  * Pointer to minor-object with increased device-refcount, or PTR_ERR on
360  * failure.
361  */
362 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
363 {
364 	struct drm_minor *minor;
365 	unsigned long flags;
366 
367 	spin_lock_irqsave(&drm_minor_lock, flags);
368 	minor = idr_find(&drm_minors_idr, minor_id);
369 	if (minor)
370 		drm_dev_ref(minor->dev);
371 	spin_unlock_irqrestore(&drm_minor_lock, flags);
372 
373 	if (!minor) {
374 		return ERR_PTR(-ENODEV);
375 	} else if (drm_device_is_unplugged(minor->dev)) {
376 		drm_dev_unref(minor->dev);
377 		return ERR_PTR(-ENODEV);
378 	}
379 
380 	return minor;
381 }
382 
383 /**
384  * drm_minor_release - Release DRM minor
385  * @minor: Pointer to DRM minor object
386  *
387  * Release a minor that was previously acquired via drm_minor_acquire().
388  */
389 void drm_minor_release(struct drm_minor *minor)
390 {
391 	drm_dev_unref(minor->dev);
392 }
393 
394 /**
395  * drm_put_dev - Unregister and release a DRM device
396  * @dev: DRM device
397  *
398  * Called at module unload time or when a PCI device is unplugged.
399  *
400  * Use of this function is discouraged. It will eventually go away completely.
401  * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
402  *
403  * Cleans up all DRM device, calling drm_lastclose().
404  */
405 void drm_put_dev(struct drm_device *dev)
406 {
407 	DRM_DEBUG("\n");
408 
409 	if (!dev) {
410 		DRM_ERROR("cleanup called no dev\n");
411 		return;
412 	}
413 
414 	drm_dev_unregister(dev);
415 	drm_dev_unref(dev);
416 }
417 EXPORT_SYMBOL(drm_put_dev);
418 
419 void drm_unplug_dev(struct drm_device *dev)
420 {
421 	/* for a USB device */
422 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
423 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
424 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
425 
426 	mutex_lock(&drm_global_mutex);
427 
428 	drm_device_set_unplugged(dev);
429 
430 	if (dev->open_count == 0) {
431 		drm_put_dev(dev);
432 	}
433 	mutex_unlock(&drm_global_mutex);
434 }
435 EXPORT_SYMBOL(drm_unplug_dev);
436 
437 /*
438  * DRM internal mount
439  * We want to be able to allocate our own "struct address_space" to control
440  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
441  * stand-alone address_space objects, so we need an underlying inode. As there
442  * is no way to allocate an independent inode easily, we need a fake internal
443  * VFS mount-point.
444  *
445  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
446  * frees it again. You are allowed to use iget() and iput() to get references to
447  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
448  * drm_fs_inode_free() call (which does not have to be the last iput()).
449  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
450  * between multiple inode-users. You could, technically, call
451  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
452  * iput(), but this way you'd end up with a new vfsmount for each inode.
453  */
454 
455 static int drm_fs_cnt;
456 static struct vfsmount *drm_fs_mnt;
457 
458 static const struct dentry_operations drm_fs_dops = {
459 	.d_dname	= simple_dname,
460 };
461 
462 static const struct super_operations drm_fs_sops = {
463 	.statfs		= simple_statfs,
464 };
465 
466 static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
467 				   const char *dev_name, void *data)
468 {
469 	return mount_pseudo(fs_type,
470 			    "drm:",
471 			    &drm_fs_sops,
472 			    &drm_fs_dops,
473 			    0x010203ff);
474 }
475 
476 static struct file_system_type drm_fs_type = {
477 	.name		= "drm",
478 	.owner		= THIS_MODULE,
479 	.mount		= drm_fs_mount,
480 	.kill_sb	= kill_anon_super,
481 };
482 
483 static struct inode *drm_fs_inode_new(void)
484 {
485 	struct inode *inode;
486 	int r;
487 
488 	r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
489 	if (r < 0) {
490 		DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
491 		return ERR_PTR(r);
492 	}
493 
494 	inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
495 	if (IS_ERR(inode))
496 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
497 
498 	return inode;
499 }
500 
501 static void drm_fs_inode_free(struct inode *inode)
502 {
503 	if (inode) {
504 		iput(inode);
505 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
506 	}
507 }
508 
509 /**
510  * drm_dev_alloc - Allocate new DRM device
511  * @driver: DRM driver to allocate device for
512  * @parent: Parent device object
513  *
514  * Allocate and initialize a new DRM device. No device registration is done.
515  * Call drm_dev_register() to advertice the device to user space and register it
516  * with other core subsystems.
517  *
518  * The initial ref-count of the object is 1. Use drm_dev_ref() and
519  * drm_dev_unref() to take and drop further ref-counts.
520  *
521  * Note that for purely virtual devices @parent can be NULL.
522  *
523  * RETURNS:
524  * Pointer to new DRM device, or NULL if out of memory.
525  */
526 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
527 				 struct device *parent)
528 {
529 	struct drm_device *dev;
530 	int ret;
531 
532 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
533 	if (!dev)
534 		return NULL;
535 
536 	kref_init(&dev->ref);
537 	dev->dev = parent;
538 	dev->driver = driver;
539 
540 	INIT_LIST_HEAD(&dev->filelist);
541 	INIT_LIST_HEAD(&dev->ctxlist);
542 	INIT_LIST_HEAD(&dev->vmalist);
543 	INIT_LIST_HEAD(&dev->maplist);
544 	INIT_LIST_HEAD(&dev->vblank_event_list);
545 
546 	spin_lock_init(&dev->buf_lock);
547 	spin_lock_init(&dev->event_lock);
548 	mutex_init(&dev->struct_mutex);
549 	mutex_init(&dev->ctxlist_mutex);
550 	mutex_init(&dev->master_mutex);
551 
552 	dev->anon_inode = drm_fs_inode_new();
553 	if (IS_ERR(dev->anon_inode)) {
554 		ret = PTR_ERR(dev->anon_inode);
555 		DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
556 		goto err_free;
557 	}
558 
559 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
560 		ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
561 		if (ret)
562 			goto err_minors;
563 	}
564 
565 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
566 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
567 		if (ret)
568 			goto err_minors;
569 	}
570 
571 	ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
572 	if (ret)
573 		goto err_minors;
574 
575 	if (drm_ht_create(&dev->map_hash, 12))
576 		goto err_minors;
577 
578 	ret = drm_legacy_ctxbitmap_init(dev);
579 	if (ret) {
580 		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
581 		goto err_ht;
582 	}
583 
584 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
585 		ret = drm_gem_init(dev);
586 		if (ret) {
587 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
588 			goto err_ctxbitmap;
589 		}
590 	}
591 
592 	return dev;
593 
594 err_ctxbitmap:
595 	drm_legacy_ctxbitmap_cleanup(dev);
596 err_ht:
597 	drm_ht_remove(&dev->map_hash);
598 err_minors:
599 	drm_minor_free(dev, DRM_MINOR_LEGACY);
600 	drm_minor_free(dev, DRM_MINOR_RENDER);
601 	drm_minor_free(dev, DRM_MINOR_CONTROL);
602 	drm_fs_inode_free(dev->anon_inode);
603 err_free:
604 	mutex_destroy(&dev->master_mutex);
605 	kfree(dev);
606 	return NULL;
607 }
608 EXPORT_SYMBOL(drm_dev_alloc);
609 
610 static void drm_dev_release(struct kref *ref)
611 {
612 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
613 
614 	if (drm_core_check_feature(dev, DRIVER_GEM))
615 		drm_gem_destroy(dev);
616 
617 	drm_legacy_ctxbitmap_cleanup(dev);
618 	drm_ht_remove(&dev->map_hash);
619 	drm_fs_inode_free(dev->anon_inode);
620 
621 	drm_minor_free(dev, DRM_MINOR_LEGACY);
622 	drm_minor_free(dev, DRM_MINOR_RENDER);
623 	drm_minor_free(dev, DRM_MINOR_CONTROL);
624 
625 	mutex_destroy(&dev->master_mutex);
626 	kfree(dev->unique);
627 	kfree(dev);
628 }
629 
630 /**
631  * drm_dev_ref - Take reference of a DRM device
632  * @dev: device to take reference of or NULL
633  *
634  * This increases the ref-count of @dev by one. You *must* already own a
635  * reference when calling this. Use drm_dev_unref() to drop this reference
636  * again.
637  *
638  * This function never fails. However, this function does not provide *any*
639  * guarantee whether the device is alive or running. It only provides a
640  * reference to the object and the memory associated with it.
641  */
642 void drm_dev_ref(struct drm_device *dev)
643 {
644 	if (dev)
645 		kref_get(&dev->ref);
646 }
647 EXPORT_SYMBOL(drm_dev_ref);
648 
649 /**
650  * drm_dev_unref - Drop reference of a DRM device
651  * @dev: device to drop reference of or NULL
652  *
653  * This decreases the ref-count of @dev by one. The device is destroyed if the
654  * ref-count drops to zero.
655  */
656 void drm_dev_unref(struct drm_device *dev)
657 {
658 	if (dev)
659 		kref_put(&dev->ref, drm_dev_release);
660 }
661 EXPORT_SYMBOL(drm_dev_unref);
662 
663 /**
664  * drm_dev_register - Register DRM device
665  * @dev: Device to register
666  * @flags: Flags passed to the driver's .load() function
667  *
668  * Register the DRM device @dev with the system, advertise device to user-space
669  * and start normal device operation. @dev must be allocated via drm_dev_alloc()
670  * previously.
671  *
672  * Never call this twice on any device!
673  *
674  * RETURNS:
675  * 0 on success, negative error code on failure.
676  */
677 int drm_dev_register(struct drm_device *dev, unsigned long flags)
678 {
679 	int ret;
680 
681 	mutex_lock(&drm_global_mutex);
682 
683 	ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
684 	if (ret)
685 		goto err_minors;
686 
687 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
688 	if (ret)
689 		goto err_minors;
690 
691 	ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
692 	if (ret)
693 		goto err_minors;
694 
695 	if (dev->driver->load) {
696 		ret = dev->driver->load(dev, flags);
697 		if (ret)
698 			goto err_minors;
699 	}
700 
701 	/* setup grouping for legacy outputs */
702 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
703 		ret = drm_mode_group_init_legacy_group(dev,
704 				&dev->primary->mode_group);
705 		if (ret)
706 			goto err_unload;
707 	}
708 
709 	ret = 0;
710 	goto out_unlock;
711 
712 err_unload:
713 	if (dev->driver->unload)
714 		dev->driver->unload(dev);
715 err_minors:
716 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
717 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
718 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
719 out_unlock:
720 	mutex_unlock(&drm_global_mutex);
721 	return ret;
722 }
723 EXPORT_SYMBOL(drm_dev_register);
724 
725 /**
726  * drm_dev_unregister - Unregister DRM device
727  * @dev: Device to unregister
728  *
729  * Unregister the DRM device from the system. This does the reverse of
730  * drm_dev_register() but does not deallocate the device. The caller must call
731  * drm_dev_unref() to drop their final reference.
732  */
733 void drm_dev_unregister(struct drm_device *dev)
734 {
735 	struct drm_map_list *r_list, *list_temp;
736 
737 	drm_lastclose(dev);
738 
739 	if (dev->driver->unload)
740 		dev->driver->unload(dev);
741 
742 	if (dev->agp)
743 		drm_pci_agp_destroy(dev);
744 
745 	drm_vblank_cleanup(dev);
746 
747 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
748 		drm_legacy_rmmap(dev, r_list->map);
749 
750 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
751 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
752 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
753 }
754 EXPORT_SYMBOL(drm_dev_unregister);
755 
756 /**
757  * drm_dev_set_unique - Set the unique name of a DRM device
758  * @dev: device of which to set the unique name
759  * @fmt: format string for unique name
760  *
761  * Sets the unique name of a DRM device using the specified format string and
762  * a variable list of arguments. Drivers can use this at driver probe time if
763  * the unique name of the devices they drive is static.
764  *
765  * Return: 0 on success or a negative error code on failure.
766  */
767 int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
768 {
769 	va_list ap;
770 
771 	kfree(dev->unique);
772 
773 	va_start(ap, fmt);
774 	dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
775 	va_end(ap);
776 
777 	return dev->unique ? 0 : -ENOMEM;
778 }
779 EXPORT_SYMBOL(drm_dev_set_unique);
780 #endif
781 
782 /*
783  * DRM Core
784  * The DRM core module initializes all global DRM objects and makes them
785  * available to drivers. Once setup, drivers can probe their respective
786  * devices.
787  * Currently, core management includes:
788  *  - The "DRM-Global" key/value database
789  *  - Global ID management for connectors
790  *  - DRM major number allocation
791  *  - DRM minor management
792  *  - DRM sysfs class
793  *  - DRM debugfs root
794  *
795  * Furthermore, the DRM core provides dynamic char-dev lookups. For each
796  * interface registered on a DRM device, you can request minor numbers from DRM
797  * core. DRM core takes care of major-number management and char-dev
798  * registration. A stub ->open() callback forwards any open() requests to the
799  * registered minor.
800  */
801 
802 #if 0
803 static int drm_stub_open(struct inode *inode, struct file *filp)
804 {
805 	const struct file_operations *new_fops;
806 	struct drm_minor *minor;
807 	int err;
808 
809 	DRM_DEBUG("\n");
810 
811 	mutex_lock(&drm_global_mutex);
812 	minor = drm_minor_acquire(iminor(inode));
813 	if (IS_ERR(minor)) {
814 		err = PTR_ERR(minor);
815 		goto out_unlock;
816 	}
817 
818 	new_fops = fops_get(minor->dev->driver->fops);
819 	if (!new_fops) {
820 		err = -ENODEV;
821 		goto out_release;
822 	}
823 
824 	replace_fops(filp, new_fops);
825 	if (filp->f_op->open)
826 		err = filp->f_op->open(inode, filp);
827 	else
828 		err = 0;
829 
830 out_release:
831 	drm_minor_release(minor);
832 out_unlock:
833 	mutex_unlock(&drm_global_mutex);
834 	return err;
835 }
836 
837 static const struct file_operations drm_stub_fops = {
838 	.owner = THIS_MODULE,
839 	.open = drm_stub_open,
840 	.llseek = noop_llseek,
841 };
842 
843 static int __init drm_core_init(void)
844 {
845 	int ret = -ENOMEM;
846 
847 	drm_global_init();
848 	drm_connector_ida_init();
849 	idr_init(&drm_minors_idr);
850 
851 	if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
852 		goto err_p1;
853 
854 	drm_class = drm_sysfs_create(THIS_MODULE, "drm");
855 	if (IS_ERR(drm_class)) {
856 		printk(KERN_ERR "DRM: Error creating drm class.\n");
857 		ret = PTR_ERR(drm_class);
858 		goto err_p2;
859 	}
860 
861 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
862 	if (!drm_debugfs_root) {
863 		DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
864 		ret = -1;
865 		goto err_p3;
866 	}
867 
868 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
869 		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
870 	return 0;
871 err_p3:
872 	drm_sysfs_destroy();
873 err_p2:
874 	unregister_chrdev(DRM_MAJOR, "drm");
875 
876 	idr_destroy(&drm_minors_idr);
877 err_p1:
878 	return ret;
879 }
880 
881 static void __exit drm_core_exit(void)
882 {
883 	debugfs_remove(drm_debugfs_root);
884 	drm_sysfs_destroy();
885 
886 	unregister_chrdev(DRM_MAJOR, "drm");
887 
888 	drm_connector_ida_destroy();
889 	idr_destroy(&drm_minors_idr);
890 }
891 
892 module_init(drm_core_init);
893 module_exit(drm_core_exit);
894 #endif
895 
896 #include <sys/devfs.h>
897 
898 #include <linux/export.h>
899 #include <linux/dmi.h>
900 #include <drm/drmP.h>
901 #include <drm/drm_core.h>
902 
903 #if DRM_DEBUG_DEFAULT_ON == 1
904 #define DRM_DEBUGBITS_ON (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS | \
905     DRM_DEBUGBITS_FAILED_IOCTL)
906 #elif DRM_DEBUG_DEFAULT_ON == 2
907 #define DRM_DEBUGBITS_ON (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS | \
908     DRM_DEBUGBITS_FAILED_IOCTL | DRM_DEBUGBITS_VERBOSE)
909 #else
910 #define DRM_DEBUGBITS_ON (0x0)
911 #endif
912 
913 int drm_notyet_flag = 0;
914 
915 static int drm_load(struct drm_device *dev);
916 drm_pci_id_list_t *drm_find_description(int vendor, int device,
917     drm_pci_id_list_t *idlist);
918 
919 #define DRIVER_SOFTC(unit) \
920 	((struct drm_device *)devclass_get_softc(drm_devclass, unit))
921 
922 static int
923 drm_modevent(module_t mod, int type, void *data)
924 {
925 
926 	switch (type) {
927 	case MOD_LOAD:
928 		TUNABLE_INT_FETCH("drm.debug", &drm_debug);
929 		TUNABLE_INT_FETCH("drm.notyet", &drm_notyet_flag);
930 		break;
931 	}
932 	return (0);
933 }
934 
935 static moduledata_t drm_mod = {
936 	"drm",
937 	drm_modevent,
938 	0
939 };
940 DECLARE_MODULE(drm, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
941 MODULE_VERSION(drm, 1);
942 MODULE_DEPEND(drm, agp, 1, 1, 1);
943 MODULE_DEPEND(drm, pci, 1, 1, 1);
944 MODULE_DEPEND(drm, iicbus, 1, 1, 1);
945 
946 static struct dev_ops drm_cdevsw = {
947 	{ "drm", 0, D_TRACKCLOSE | D_MPSAFE },
948 	.d_open =	drm_open,
949 	.d_close =	drm_close,
950 	.d_read =	drm_read,
951 	.d_ioctl =	drm_ioctl,
952 	.d_kqfilter =	drm_kqfilter,
953 	.d_mmap =	drm_mmap,
954 	.d_mmap_single = drm_mmap_single,
955 };
956 
957 SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
958 SYSCTL_INT(_hw_drm, OID_AUTO, debug, CTLFLAG_RW, &drm_debug, 0,
959     "DRM debugging");
960 
961 int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
962 {
963 	drm_pci_id_list_t *id_entry;
964 	int vendor, device;
965 
966 	vendor = pci_get_vendor(kdev);
967 	device = pci_get_device(kdev);
968 
969 	if (pci_get_class(kdev) != PCIC_DISPLAY)
970 		return ENXIO;
971 
972 	id_entry = drm_find_description(vendor, device, idlist);
973 	if (id_entry != NULL) {
974 		if (!device_get_desc(kdev)) {
975 			DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
976 			device_set_desc(kdev, id_entry->name);
977 		}
978 		return 0;
979 	}
980 
981 	return ENXIO;
982 }
983 
984 int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
985 {
986 	struct drm_device *dev;
987 	drm_pci_id_list_t *id_entry;
988 	int unit, error;
989 	u_int irq_flags;
990 	int msi_enable;
991 
992 	unit = device_get_unit(kdev);
993 	dev = device_get_softc(kdev);
994 
995 	if (!strcmp(device_get_name(kdev), "drmsub"))
996 		dev->dev = device_get_parent(kdev);
997 	else
998 		dev->dev = kdev;
999 
1000 	dev->pci_domain = pci_get_domain(dev->dev);
1001 	dev->pci_bus = pci_get_bus(dev->dev);
1002 	dev->pci_slot = pci_get_slot(dev->dev);
1003 	dev->pci_func = pci_get_function(dev->dev);
1004 	drm_init_pdev(dev->dev, &dev->pdev);
1005 
1006 	id_entry = drm_find_description(dev->pdev->vendor,
1007 	    dev->pdev->device, idlist);
1008 	dev->id_entry = id_entry;
1009 
1010 	if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) {
1011 		msi_enable = 1;
1012 
1013 		dev->irq_type = pci_alloc_1intr(dev->dev, msi_enable,
1014 		    &dev->irqrid, &irq_flags);
1015 
1016 		dev->irqr = bus_alloc_resource_any(dev->dev, SYS_RES_IRQ,
1017 		    &dev->irqrid, irq_flags);
1018 
1019 		if (!dev->irqr) {
1020 			return (ENOENT);
1021 		}
1022 
1023 		dev->irq = (int) rman_get_start(dev->irqr);
1024 	}
1025 
1026 	lockinit(&dev->dev_lock, "drmdev", 0, LK_CANRECURSE);
1027 	lwkt_serialize_init(&dev->irq_lock);
1028 	lockinit(&dev->event_lock, "drmev", 0, LK_CANRECURSE);
1029 	lockinit(&dev->struct_mutex, "drmslk", 0, LK_CANRECURSE);
1030 
1031 	error = drm_load(dev);
1032 	if (error)
1033 		goto error;
1034 
1035 	error = drm_create_cdevs(kdev);
1036 	if (error)
1037 		goto error;
1038 
1039 	return (error);
1040 error:
1041 	if (dev->irqr) {
1042 		bus_release_resource(dev->dev, SYS_RES_IRQ,
1043 		    dev->irqrid, dev->irqr);
1044 	}
1045 	if (dev->irq_type == PCI_INTR_TYPE_MSI) {
1046 		pci_release_msi(dev->dev);
1047 	}
1048 	return (error);
1049 }
1050 
1051 int
1052 drm_create_cdevs(device_t kdev)
1053 {
1054 	struct drm_device *dev;
1055 	int error, unit;
1056 
1057 	unit = device_get_unit(kdev);
1058 	dev = device_get_softc(kdev);
1059 
1060 	dev->devnode = make_dev(&drm_cdevsw, unit, DRM_DEV_UID, DRM_DEV_GID,
1061 				DRM_DEV_MODE, "dri/card%d", unit);
1062 	error = 0;
1063 	if (error == 0)
1064 		dev->devnode->si_drv1 = dev;
1065 	return (error);
1066 }
1067 
1068 #ifndef DRM_DEV_NAME
1069 #define DRM_DEV_NAME "drm"
1070 #endif
1071 
1072 devclass_t drm_devclass;
1073 
1074 drm_pci_id_list_t *drm_find_description(int vendor, int device,
1075     drm_pci_id_list_t *idlist)
1076 {
1077 	int i = 0;
1078 
1079 	for (i = 0; idlist[i].vendor != 0; i++) {
1080 		if ((idlist[i].vendor == vendor) &&
1081 		    ((idlist[i].device == device) ||
1082 		    (idlist[i].device == 0))) {
1083 			return &idlist[i];
1084 		}
1085 	}
1086 	return NULL;
1087 }
1088 
1089 static int drm_load(struct drm_device *dev)
1090 {
1091 	int i, retcode;
1092 
1093 	DRM_DEBUG("\n");
1094 
1095 	INIT_LIST_HEAD(&dev->maplist);
1096 
1097 	drm_mem_init();
1098 	drm_sysctl_init(dev);
1099 	INIT_LIST_HEAD(&dev->filelist);
1100 
1101 	dev->counters  = 6;
1102 	dev->types[0]  = _DRM_STAT_LOCK;
1103 	dev->types[1]  = _DRM_STAT_OPENS;
1104 	dev->types[2]  = _DRM_STAT_CLOSES;
1105 	dev->types[3]  = _DRM_STAT_IOCTLS;
1106 	dev->types[4]  = _DRM_STAT_LOCKS;
1107 	dev->types[5]  = _DRM_STAT_UNLOCKS;
1108 
1109 	for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
1110 		atomic_set(&dev->counts[i], 0);
1111 
1112 	INIT_LIST_HEAD(&dev->vblank_event_list);
1113 
1114 	if (drm_core_has_AGP(dev)) {
1115 		if (drm_device_is_agp(dev))
1116 			dev->agp = drm_agp_init();
1117 		if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) &&
1118 		    dev->agp == NULL) {
1119 			DRM_ERROR("Card isn't AGP, or couldn't initialize "
1120 			    "AGP.\n");
1121 			retcode = ENOMEM;
1122 			goto error;
1123 		}
1124 		if (dev->agp != NULL && dev->agp->agp_info.ai_aperture_base != 0) {
1125 			if (drm_mtrr_add(dev->agp->agp_info.ai_aperture_base,
1126 			    dev->agp->agp_info.ai_aperture_size, DRM_MTRR_WC) == 0)
1127 				dev->agp->agp_mtrr = 1;
1128 		}
1129 	}
1130 
1131 	if (dev->driver->driver_features & DRIVER_GEM) {
1132 		retcode = drm_gem_init(dev);
1133 		if (retcode != 0) {
1134 			DRM_ERROR("Cannot initialize graphics execution "
1135 				  "manager (GEM)\n");
1136 			goto error1;
1137 		}
1138 	}
1139 
1140 	if (dev->driver->load != NULL) {
1141 		DRM_LOCK(dev);
1142 		/* Shared code returns -errno. */
1143 		retcode = -dev->driver->load(dev,
1144 		    dev->id_entry->driver_private);
1145 		if (pci_enable_busmaster(dev->dev))
1146 			DRM_ERROR("Request to enable bus-master failed.\n");
1147 		DRM_UNLOCK(dev);
1148 		if (retcode != 0)
1149 			goto error1;
1150 	}
1151 
1152 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
1153 	    dev->driver->name,
1154 	    dev->driver->major,
1155 	    dev->driver->minor,
1156 	    dev->driver->patchlevel,
1157 	    dev->driver->date);
1158 
1159 	return 0;
1160 
1161 error1:
1162 	drm_gem_destroy(dev);
1163 error:
1164 	drm_sysctl_cleanup(dev);
1165 	DRM_LOCK(dev);
1166 	drm_lastclose(dev);
1167 	DRM_UNLOCK(dev);
1168 	if (dev->devnode != NULL)
1169 		destroy_dev(dev->devnode);
1170 
1171 	lockuninit(&dev->vbl_lock);
1172 	lockuninit(&dev->dev_lock);
1173 	lockuninit(&dev->event_lock);
1174 	lockuninit(&dev->struct_mutex);
1175 
1176 	return retcode;
1177 }
1178 
1179 /*
1180  * Stub is needed for devfs
1181  */
1182 int drm_close(struct dev_close_args *ap)
1183 {
1184 	return 0;
1185 }
1186 
1187 void drm_cdevpriv_dtor(void *cd)
1188 {
1189 	struct drm_file *file_priv = cd;
1190 	struct drm_device *dev = file_priv->dev;
1191 	int retcode = 0;
1192 
1193 	DRM_DEBUG("open_count = %d\n", dev->open_count);
1194 
1195 	DRM_LOCK(dev);
1196 
1197 	if (dev->driver->preclose != NULL)
1198 		dev->driver->preclose(dev, file_priv);
1199 
1200 	/* ========================================================
1201 	 * Begin inline drm_release
1202 	 */
1203 
1204 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
1205 	    DRM_CURRENTPID, (long)dev->dev, dev->open_count);
1206 
1207 	if (dev->driver->driver_features & DRIVER_GEM)
1208 		drm_gem_release(dev, file_priv);
1209 
1210 	if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
1211 	    && dev->lock.file_priv == file_priv) {
1212 		DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
1213 			  DRM_CURRENTPID,
1214 			  _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
1215 		if (dev->driver->reclaim_buffers_locked != NULL)
1216 			dev->driver->reclaim_buffers_locked(dev, file_priv);
1217 
1218 		drm_lock_free(&dev->lock,
1219 		    _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
1220 
1221 				/* FIXME: may require heavy-handed reset of
1222                                    hardware at this point, possibly
1223                                    processed via a callback to the X
1224                                    server. */
1225 	} else if (dev->driver->reclaim_buffers_locked != NULL &&
1226 	    dev->lock.hw_lock != NULL) {
1227 		/* The lock is required to reclaim buffers */
1228 		for (;;) {
1229 			if (!dev->lock.hw_lock) {
1230 				/* Device has been unregistered */
1231 				retcode = EINTR;
1232 				break;
1233 			}
1234 			/* Contention */
1235 			retcode = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue,
1236 			    PCATCH, "drmlk2", 0);
1237 			if (retcode)
1238 				break;
1239 		}
1240 		if (retcode == 0) {
1241 			dev->driver->reclaim_buffers_locked(dev, file_priv);
1242 		}
1243 	}
1244 
1245 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
1246 	    !dev->driver->reclaim_buffers_locked)
1247 		drm_legacy_reclaim_buffers(dev, file_priv);
1248 
1249 	funsetown(&dev->buf_sigio);
1250 
1251 	if (dev->driver->postclose != NULL)
1252 		dev->driver->postclose(dev, file_priv);
1253 	list_del(&file_priv->lhead);
1254 
1255 
1256 	/* ========================================================
1257 	 * End inline drm_release
1258 	 */
1259 
1260 	atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
1261 	device_unbusy(dev->dev);
1262 	if (--dev->open_count == 0) {
1263 		retcode = drm_lastclose(dev);
1264 	}
1265 
1266 	DRM_UNLOCK(dev);
1267 }
1268 
1269 int
1270 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
1271     struct sysctl_oid *top)
1272 {
1273 	struct sysctl_oid *oid;
1274 
1275 	ksnprintf(dev->busid_str, sizeof(dev->busid_str),
1276 	     "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
1277 	     dev->pci_slot, dev->pci_func);
1278 	oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
1279 	    CTLFLAG_RD, dev->busid_str, 0, NULL);
1280 	if (oid == NULL)
1281 		return (ENOMEM);
1282 	dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
1283 	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
1284 	    "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
1285 	if (oid == NULL)
1286 		return (ENOMEM);
1287 
1288 	return (0);
1289 }
1290 
1291 int
1292 drm_mmap_single(struct dev_mmap_single_args *ap)
1293 {
1294 	struct drm_device *dev;
1295 	struct cdev *kdev = ap->a_head.a_dev;
1296 	vm_ooffset_t *offset = ap->a_offset;
1297 	vm_size_t size = ap->a_size;
1298 	struct vm_object **obj_res = ap->a_object;
1299 	int nprot = ap->a_nprot;
1300 
1301 	dev = drm_get_device_from_kdev(kdev);
1302 	if (dev->drm_ttm_bdev != NULL) {
1303 		return (ttm_bo_mmap_single(dev->drm_ttm_bdev, offset, size,
1304 		    obj_res, nprot));
1305 	} else if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
1306 		return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
1307 	} else {
1308 		return (ENODEV);
1309 	}
1310 }
1311 
1312 #if DRM_LINUX
1313 
1314 #include <sys/sysproto.h>
1315 
1316 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
1317 
1318 #define LINUX_IOCTL_DRM_MIN		0x6400
1319 #define LINUX_IOCTL_DRM_MAX		0x64ff
1320 
1321 static linux_ioctl_function_t drm_linux_ioctl;
1322 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
1323     LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
1324 
1325 /* The bits for in/out are switched on Linux */
1326 #define LINUX_IOC_IN	IOC_OUT
1327 #define LINUX_IOC_OUT	IOC_IN
1328 
1329 static int
1330 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
1331 {
1332 	int error;
1333 	int cmd = args->cmd;
1334 
1335 	args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
1336 	if (cmd & LINUX_IOC_IN)
1337 		args->cmd |= IOC_IN;
1338 	if (cmd & LINUX_IOC_OUT)
1339 		args->cmd |= IOC_OUT;
1340 
1341 	error = ioctl(p, (struct ioctl_args *)args);
1342 
1343 	return error;
1344 }
1345 #endif /* DRM_LINUX */
1346 
1347 static int
1348 drm_core_init(void *arg)
1349 {
1350 
1351 	drm_global_init();
1352 
1353 #if DRM_LINUX
1354 	linux_ioctl_register_handler(&drm_handler);
1355 #endif /* DRM_LINUX */
1356 
1357 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
1358 		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
1359 	return 0;
1360 }
1361 
1362 static void
1363 drm_core_exit(void *arg)
1364 {
1365 
1366 #if DRM_LINUX
1367 	linux_ioctl_unregister_handler(&drm_handler);
1368 #endif /* DRM_LINUX */
1369 
1370 	drm_global_release();
1371 }
1372 
1373 SYSINIT(drm_register, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,
1374     drm_core_init, NULL);
1375 SYSUNINIT(drm_unregister, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,
1376     drm_core_exit, NULL);
1377 
1378 
1379 #include <linux/dmi.h>
1380 
1381 /*
1382  * Check if dmi_system_id structure matches system DMI data
1383  */
1384 static bool
1385 dmi_found(const struct dmi_system_id *dsi)
1386 {
1387 	int i, slot;
1388 	bool found = false;
1389 	char *sys_vendor, *board_vendor, *product_name, *board_name;
1390 
1391 	sys_vendor = kgetenv("smbios.system.maker");
1392 	board_vendor = kgetenv("smbios.planar.maker");
1393 	product_name = kgetenv("smbios.system.product");
1394 	board_name = kgetenv("smbios.planar.product");
1395 
1396 	for (i = 0; i < NELEM(dsi->matches); i++) {
1397 		slot = dsi->matches[i].slot;
1398 		switch (slot) {
1399 		case DMI_NONE:
1400 			break;
1401 		case DMI_SYS_VENDOR:
1402 			if (sys_vendor != NULL &&
1403 			    !strcmp(sys_vendor, dsi->matches[i].substr))
1404 				break;
1405 			else
1406 				goto done;
1407 		case DMI_BOARD_VENDOR:
1408 			if (board_vendor != NULL &&
1409 			    !strcmp(board_vendor, dsi->matches[i].substr))
1410 				break;
1411 			else
1412 				goto done;
1413 		case DMI_PRODUCT_NAME:
1414 			if (product_name != NULL &&
1415 			    !strcmp(product_name, dsi->matches[i].substr))
1416 				break;
1417 			else
1418 				goto done;
1419 		case DMI_BOARD_NAME:
1420 			if (board_name != NULL &&
1421 			    !strcmp(board_name, dsi->matches[i].substr))
1422 				break;
1423 			else
1424 				goto done;
1425 		default:
1426 			goto done;
1427 		}
1428 	}
1429 	found = true;
1430 
1431 done:
1432 	if (sys_vendor != NULL)
1433 		kfreeenv(sys_vendor);
1434 	if (board_vendor != NULL)
1435 		kfreeenv(board_vendor);
1436 	if (product_name != NULL)
1437 		kfreeenv(product_name);
1438 	if (board_name != NULL)
1439 		kfreeenv(board_name);
1440 
1441 	return found;
1442 }
1443 
1444 int dmi_check_system(const struct dmi_system_id *sysid)
1445 {
1446 	const struct dmi_system_id *dsi;
1447 	int num = 0;
1448 
1449 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
1450 		if (dmi_found(dsi)) {
1451 			num++;
1452 			if (dsi->callback && dsi->callback(dsi))
1453 				break;
1454 		}
1455 	}
1456 	return (num);
1457 }
1458