xref: /dragonfly/sys/dev/drm/drm_drv.c (revision 279dd846)
1 /*
2  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3  *
4  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5  * All Rights Reserved.
6  *
7  * Author Rickard E. (Rik) Faith <faith@valinux.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <drm/drmP.h>
32 #include <drm/drm_core.h>
33 #include "drm_legacy.h"
34 
35 unsigned int drm_debug = 0;	/* 1 to enable debug output */
36 EXPORT_SYMBOL(drm_debug);
37 
38 unsigned int drm_vblank_offdelay = 5000;    /* Default to 5000 msecs. */
39 
40 unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
41 
42 /*
43  * Default to use monotonic timestamps for wait-for-vblank and page-flip
44  * complete events.
45  */
46 unsigned int drm_timestamp_monotonic = 1;
47 
48 MODULE_AUTHOR(CORE_AUTHOR);
49 MODULE_DESCRIPTION(CORE_DESC);
50 MODULE_LICENSE("GPL and additional rights");
51 MODULE_PARM_DESC(debug, "Enable debug output");
52 MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
53 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
54 MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
55 
56 module_param_named(debug, drm_debug, int, 0600);
57 module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
58 module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
59 module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
60 
61 #if 0
62 static DEFINE_SPINLOCK(drm_minor_lock);
63 static struct idr drm_minors_idr;
64 #endif
65 
66 struct class *drm_class;
67 #if 0
68 static struct dentry *drm_debugfs_root;
69 #endif
70 
71 int drm_err(const char *func, const char *format, ...)
72 {
73 #if 0
74 	struct va_format vaf;
75 	va_list args;
76 	int r;
77 
78 	va_start(args, format);
79 
80 	vaf.fmt = format;
81 	vaf.va = &args;
82 
83 	r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
84 
85 	va_end(args);
86 
87 	return r;
88 #endif
89 	return 0;
90 }
91 EXPORT_SYMBOL(drm_err);
92 
93 void drm_ut_debug_printk(const char *function_name, const char *format, ...)
94 {
95 #if 0
96 	struct va_format vaf;
97 	va_list args;
98 
99 	va_start(args, format);
100 	vaf.fmt = format;
101 	vaf.va = &args;
102 
103 	printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
104 
105 	va_end(args);
106 #endif
107 }
108 EXPORT_SYMBOL(drm_ut_debug_printk);
109 
110 #if 0
111 struct drm_master *drm_master_create(struct drm_minor *minor)
112 {
113 	struct drm_master *master;
114 
115 	master = kzalloc(sizeof(*master), GFP_KERNEL);
116 	if (!master)
117 		return NULL;
118 
119 	kref_init(&master->refcount);
120 	spin_lock_init(&master->lock.spinlock);
121 	init_waitqueue_head(&master->lock.lock_queue);
122 	if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
123 		kfree(master);
124 		return NULL;
125 	}
126 	INIT_LIST_HEAD(&master->magicfree);
127 	master->minor = minor;
128 
129 	return master;
130 }
131 
132 struct drm_master *drm_master_get(struct drm_master *master)
133 {
134 	kref_get(&master->refcount);
135 	return master;
136 }
137 EXPORT_SYMBOL(drm_master_get);
138 
139 static void drm_master_destroy(struct kref *kref)
140 {
141 	struct drm_master *master = container_of(kref, struct drm_master, refcount);
142 	struct drm_magic_entry *pt, *next;
143 	struct drm_device *dev = master->minor->dev;
144 	struct drm_map_list *r_list, *list_temp;
145 
146 	mutex_lock(&dev->struct_mutex);
147 	if (dev->driver->master_destroy)
148 		dev->driver->master_destroy(dev, master);
149 
150 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
151 		if (r_list->master == master) {
152 			drm_rmmap_locked(dev, r_list->map);
153 			r_list = NULL;
154 		}
155 	}
156 
157 	if (master->unique) {
158 		kfree(master->unique);
159 		master->unique = NULL;
160 		master->unique_len = 0;
161 	}
162 
163 	list_for_each_entry_safe(pt, next, &master->magicfree, head) {
164 		list_del(&pt->head);
165 		drm_ht_remove_item(&master->magiclist, &pt->hash_item);
166 		kfree(pt);
167 	}
168 
169 	drm_ht_remove(&master->magiclist);
170 
171 	mutex_unlock(&dev->struct_mutex);
172 	kfree(master);
173 }
174 
175 void drm_master_put(struct drm_master **master)
176 {
177 	kref_put(&(*master)->refcount, drm_master_destroy);
178 	*master = NULL;
179 }
180 EXPORT_SYMBOL(drm_master_put);
181 #endif
182 
183 int drm_setmaster_ioctl(struct drm_device *dev, void *data,
184 			struct drm_file *file_priv)
185 {
186 	DRM_DEBUG("setmaster\n");
187 
188 	if (file_priv->master != 0)
189 		return (0);
190 
191 	return (-EPERM);
192 }
193 
194 int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
195 			 struct drm_file *file_priv)
196 {
197 	DRM_DEBUG("dropmaster\n");
198 	if (file_priv->master != 0)
199 		return -EINVAL;
200 	return 0;
201 }
202 
203 #if 0
204 /*
205  * DRM Minors
206  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
207  * of them is represented by a drm_minor object. Depending on the capabilities
208  * of the device-driver, different interfaces are registered.
209  *
210  * Minors can be accessed via dev->$minor_name. This pointer is either
211  * NULL or a valid drm_minor pointer and stays valid as long as the device is
212  * valid. This means, DRM minors have the same life-time as the underlying
213  * device. However, this doesn't mean that the minor is active. Minors are
214  * registered and unregistered dynamically according to device-state.
215  */
216 
217 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
218 					     unsigned int type)
219 {
220 	switch (type) {
221 	case DRM_MINOR_LEGACY:
222 		return &dev->primary;
223 	case DRM_MINOR_RENDER:
224 		return &dev->render;
225 	case DRM_MINOR_CONTROL:
226 		return &dev->control;
227 	default:
228 		return NULL;
229 	}
230 }
231 
232 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
233 {
234 	struct drm_minor *minor;
235 	unsigned long flags;
236 	int r;
237 
238 	minor = kzalloc(sizeof(*minor), GFP_KERNEL);
239 	if (!minor)
240 		return -ENOMEM;
241 
242 	minor->type = type;
243 	minor->dev = dev;
244 
245 	idr_preload(GFP_KERNEL);
246 	spin_lock_irqsave(&drm_minor_lock, flags);
247 	r = idr_alloc(&drm_minors_idr,
248 		      NULL,
249 		      64 * type,
250 		      64 * (type + 1),
251 		      GFP_NOWAIT);
252 	spin_unlock_irqrestore(&drm_minor_lock, flags);
253 	idr_preload_end();
254 
255 	if (r < 0)
256 		goto err_free;
257 
258 	minor->index = r;
259 
260 	minor->kdev = drm_sysfs_minor_alloc(minor);
261 	if (IS_ERR(minor->kdev)) {
262 		r = PTR_ERR(minor->kdev);
263 		goto err_index;
264 	}
265 
266 	*drm_minor_get_slot(dev, type) = minor;
267 	return 0;
268 
269 err_index:
270 	spin_lock_irqsave(&drm_minor_lock, flags);
271 	idr_remove(&drm_minors_idr, minor->index);
272 	spin_unlock_irqrestore(&drm_minor_lock, flags);
273 err_free:
274 	kfree(minor);
275 	return r;
276 }
277 
278 static void drm_minor_free(struct drm_device *dev, unsigned int type)
279 {
280 	struct drm_minor **slot, *minor;
281 	unsigned long flags;
282 
283 	slot = drm_minor_get_slot(dev, type);
284 	minor = *slot;
285 	if (!minor)
286 		return;
287 
288 	drm_mode_group_destroy(&minor->mode_group);
289 	put_device(minor->kdev);
290 
291 	spin_lock_irqsave(&drm_minor_lock, flags);
292 	idr_remove(&drm_minors_idr, minor->index);
293 	spin_unlock_irqrestore(&drm_minor_lock, flags);
294 
295 	kfree(minor);
296 	*slot = NULL;
297 }
298 
299 static int drm_minor_register(struct drm_device *dev, unsigned int type)
300 {
301 	struct drm_minor *minor;
302 	unsigned long flags;
303 	int ret;
304 
305 	DRM_DEBUG("\n");
306 
307 	minor = *drm_minor_get_slot(dev, type);
308 	if (!minor)
309 		return 0;
310 
311 	ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
312 	if (ret) {
313 		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
314 		return ret;
315 	}
316 
317 	ret = device_add(minor->kdev);
318 	if (ret)
319 		goto err_debugfs;
320 
321 	/* replace NULL with @minor so lookups will succeed from now on */
322 	spin_lock_irqsave(&drm_minor_lock, flags);
323 	idr_replace(&drm_minors_idr, minor, minor->index);
324 	spin_unlock_irqrestore(&drm_minor_lock, flags);
325 
326 	DRM_DEBUG("new minor registered %d\n", minor->index);
327 	return 0;
328 
329 err_debugfs:
330 	drm_debugfs_cleanup(minor);
331 	return ret;
332 }
333 
334 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
335 {
336 	struct drm_minor *minor;
337 	unsigned long flags;
338 
339 	minor = *drm_minor_get_slot(dev, type);
340 	if (!minor || !device_is_registered(minor->kdev))
341 		return;
342 
343 	/* replace @minor with NULL so lookups will fail from now on */
344 	spin_lock_irqsave(&drm_minor_lock, flags);
345 	idr_replace(&drm_minors_idr, NULL, minor->index);
346 	spin_unlock_irqrestore(&drm_minor_lock, flags);
347 
348 	device_del(minor->kdev);
349 	dev_set_drvdata(minor->kdev, NULL); /* safety belt */
350 	drm_debugfs_cleanup(minor);
351 }
352 
353 /**
354  * drm_minor_acquire - Acquire a DRM minor
355  * @minor_id: Minor ID of the DRM-minor
356  *
357  * Looks up the given minor-ID and returns the respective DRM-minor object. The
358  * refence-count of the underlying device is increased so you must release this
359  * object with drm_minor_release().
360  *
361  * As long as you hold this minor, it is guaranteed that the object and the
362  * minor->dev pointer will stay valid! However, the device may get unplugged and
363  * unregistered while you hold the minor.
364  *
365  * Returns:
366  * Pointer to minor-object with increased device-refcount, or PTR_ERR on
367  * failure.
368  */
369 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
370 {
371 	struct drm_minor *minor;
372 	unsigned long flags;
373 
374 	spin_lock_irqsave(&drm_minor_lock, flags);
375 	minor = idr_find(&drm_minors_idr, minor_id);
376 	if (minor)
377 		drm_dev_ref(minor->dev);
378 	spin_unlock_irqrestore(&drm_minor_lock, flags);
379 
380 	if (!minor) {
381 		return ERR_PTR(-ENODEV);
382 	} else if (drm_device_is_unplugged(minor->dev)) {
383 		drm_dev_unref(minor->dev);
384 		return ERR_PTR(-ENODEV);
385 	}
386 
387 	return minor;
388 }
389 
390 /**
391  * drm_minor_release - Release DRM minor
392  * @minor: Pointer to DRM minor object
393  *
394  * Release a minor that was previously acquired via drm_minor_acquire().
395  */
396 void drm_minor_release(struct drm_minor *minor)
397 {
398 	drm_dev_unref(minor->dev);
399 }
400 
401 /**
402  * drm_put_dev - Unregister and release a DRM device
403  * @dev: DRM device
404  *
405  * Called at module unload time or when a PCI device is unplugged.
406  *
407  * Use of this function is discouraged. It will eventually go away completely.
408  * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
409  *
410  * Cleans up all DRM device, calling drm_lastclose().
411  */
412 void drm_put_dev(struct drm_device *dev)
413 {
414 	DRM_DEBUG("\n");
415 
416 	if (!dev) {
417 		DRM_ERROR("cleanup called no dev\n");
418 		return;
419 	}
420 
421 	drm_dev_unregister(dev);
422 	drm_dev_unref(dev);
423 }
424 EXPORT_SYMBOL(drm_put_dev);
425 
426 void drm_unplug_dev(struct drm_device *dev)
427 {
428 	/* for a USB device */
429 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
430 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
431 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
432 
433 	mutex_lock(&drm_global_mutex);
434 
435 	drm_device_set_unplugged(dev);
436 
437 	if (dev->open_count == 0) {
438 		drm_put_dev(dev);
439 	}
440 	mutex_unlock(&drm_global_mutex);
441 }
442 EXPORT_SYMBOL(drm_unplug_dev);
443 
444 /*
445  * DRM internal mount
446  * We want to be able to allocate our own "struct address_space" to control
447  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
448  * stand-alone address_space objects, so we need an underlying inode. As there
449  * is no way to allocate an independent inode easily, we need a fake internal
450  * VFS mount-point.
451  *
452  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
453  * frees it again. You are allowed to use iget() and iput() to get references to
454  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
455  * drm_fs_inode_free() call (which does not have to be the last iput()).
456  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
457  * between multiple inode-users. You could, technically, call
458  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
459  * iput(), but this way you'd end up with a new vfsmount for each inode.
460  */
461 
462 static int drm_fs_cnt;
463 static struct vfsmount *drm_fs_mnt;
464 
465 static const struct dentry_operations drm_fs_dops = {
466 	.d_dname	= simple_dname,
467 };
468 
469 static const struct super_operations drm_fs_sops = {
470 	.statfs		= simple_statfs,
471 };
472 
473 static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
474 				   const char *dev_name, void *data)
475 {
476 	return mount_pseudo(fs_type,
477 			    "drm:",
478 			    &drm_fs_sops,
479 			    &drm_fs_dops,
480 			    0x010203ff);
481 }
482 
483 static struct file_system_type drm_fs_type = {
484 	.name		= "drm",
485 	.owner		= THIS_MODULE,
486 	.mount		= drm_fs_mount,
487 	.kill_sb	= kill_anon_super,
488 };
489 
490 static struct inode *drm_fs_inode_new(void)
491 {
492 	struct inode *inode;
493 	int r;
494 
495 	r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
496 	if (r < 0) {
497 		DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
498 		return ERR_PTR(r);
499 	}
500 
501 	inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
502 	if (IS_ERR(inode))
503 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
504 
505 	return inode;
506 }
507 
508 static void drm_fs_inode_free(struct inode *inode)
509 {
510 	if (inode) {
511 		iput(inode);
512 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
513 	}
514 }
515 
516 /**
517  * drm_dev_alloc - Allocate new DRM device
518  * @driver: DRM driver to allocate device for
519  * @parent: Parent device object
520  *
521  * Allocate and initialize a new DRM device. No device registration is done.
522  * Call drm_dev_register() to advertice the device to user space and register it
523  * with other core subsystems.
524  *
525  * The initial ref-count of the object is 1. Use drm_dev_ref() and
526  * drm_dev_unref() to take and drop further ref-counts.
527  *
528  * RETURNS:
529  * Pointer to new DRM device, or NULL if out of memory.
530  */
531 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
532 				 struct device *parent)
533 {
534 	struct drm_device *dev;
535 	int ret;
536 
537 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
538 	if (!dev)
539 		return NULL;
540 
541 	kref_init(&dev->ref);
542 	dev->dev = parent;
543 	dev->driver = driver;
544 
545 	INIT_LIST_HEAD(&dev->filelist);
546 	INIT_LIST_HEAD(&dev->ctxlist);
547 	INIT_LIST_HEAD(&dev->vmalist);
548 	INIT_LIST_HEAD(&dev->maplist);
549 	INIT_LIST_HEAD(&dev->vblank_event_list);
550 
551 	spin_lock_init(&dev->buf_lock);
552 	spin_lock_init(&dev->event_lock);
553 	mutex_init(&dev->struct_mutex);
554 	mutex_init(&dev->ctxlist_mutex);
555 	mutex_init(&dev->master_mutex);
556 
557 	dev->anon_inode = drm_fs_inode_new();
558 	if (IS_ERR(dev->anon_inode)) {
559 		ret = PTR_ERR(dev->anon_inode);
560 		DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
561 		goto err_free;
562 	}
563 
564 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
565 		ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
566 		if (ret)
567 			goto err_minors;
568 	}
569 
570 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
571 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
572 		if (ret)
573 			goto err_minors;
574 	}
575 
576 	ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
577 	if (ret)
578 		goto err_minors;
579 
580 	if (drm_ht_create(&dev->map_hash, 12))
581 		goto err_minors;
582 
583 	ret = drm_legacy_ctxbitmap_init(dev);
584 	if (ret) {
585 		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
586 		goto err_ht;
587 	}
588 
589 	if (driver->driver_features & DRIVER_GEM) {
590 		ret = drm_gem_init(dev);
591 		if (ret) {
592 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
593 			goto err_ctxbitmap;
594 		}
595 	}
596 
597 	return dev;
598 
599 err_ctxbitmap:
600 	drm_legacy_ctxbitmap_cleanup(dev);
601 err_ht:
602 	drm_ht_remove(&dev->map_hash);
603 err_minors:
604 	drm_minor_free(dev, DRM_MINOR_LEGACY);
605 	drm_minor_free(dev, DRM_MINOR_RENDER);
606 	drm_minor_free(dev, DRM_MINOR_CONTROL);
607 	drm_fs_inode_free(dev->anon_inode);
608 err_free:
609 	mutex_destroy(&dev->master_mutex);
610 	kfree(dev);
611 	return NULL;
612 }
613 EXPORT_SYMBOL(drm_dev_alloc);
614 
615 static void drm_dev_release(struct kref *ref)
616 {
617 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
618 
619 	if (dev->driver->driver_features & DRIVER_GEM)
620 		drm_gem_destroy(dev);
621 
622 	drm_legacy_ctxbitmap_cleanup(dev);
623 	drm_ht_remove(&dev->map_hash);
624 	drm_fs_inode_free(dev->anon_inode);
625 
626 	drm_minor_free(dev, DRM_MINOR_LEGACY);
627 	drm_minor_free(dev, DRM_MINOR_RENDER);
628 	drm_minor_free(dev, DRM_MINOR_CONTROL);
629 
630 	mutex_destroy(&dev->master_mutex);
631 	kfree(dev->unique);
632 	kfree(dev);
633 }
634 
635 /**
636  * drm_dev_ref - Take reference of a DRM device
637  * @dev: device to take reference of or NULL
638  *
639  * This increases the ref-count of @dev by one. You *must* already own a
640  * reference when calling this. Use drm_dev_unref() to drop this reference
641  * again.
642  *
643  * This function never fails. However, this function does not provide *any*
644  * guarantee whether the device is alive or running. It only provides a
645  * reference to the object and the memory associated with it.
646  */
647 void drm_dev_ref(struct drm_device *dev)
648 {
649 	if (dev)
650 		kref_get(&dev->ref);
651 }
652 EXPORT_SYMBOL(drm_dev_ref);
653 
654 /**
655  * drm_dev_unref - Drop reference of a DRM device
656  * @dev: device to drop reference of or NULL
657  *
658  * This decreases the ref-count of @dev by one. The device is destroyed if the
659  * ref-count drops to zero.
660  */
661 void drm_dev_unref(struct drm_device *dev)
662 {
663 	if (dev)
664 		kref_put(&dev->ref, drm_dev_release);
665 }
666 EXPORT_SYMBOL(drm_dev_unref);
667 
668 /**
669  * drm_dev_register - Register DRM device
670  * @dev: Device to register
671  * @flags: Flags passed to the driver's .load() function
672  *
673  * Register the DRM device @dev with the system, advertise device to user-space
674  * and start normal device operation. @dev must be allocated via drm_dev_alloc()
675  * previously.
676  *
677  * Never call this twice on any device!
678  *
679  * RETURNS:
680  * 0 on success, negative error code on failure.
681  */
682 int drm_dev_register(struct drm_device *dev, unsigned long flags)
683 {
684 	int ret;
685 
686 	mutex_lock(&drm_global_mutex);
687 
688 	ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
689 	if (ret)
690 		goto err_minors;
691 
692 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
693 	if (ret)
694 		goto err_minors;
695 
696 	ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
697 	if (ret)
698 		goto err_minors;
699 
700 	if (dev->driver->load) {
701 		ret = dev->driver->load(dev, flags);
702 		if (ret)
703 			goto err_minors;
704 	}
705 
706 	/* setup grouping for legacy outputs */
707 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
708 		ret = drm_mode_group_init_legacy_group(dev,
709 				&dev->primary->mode_group);
710 		if (ret)
711 			goto err_unload;
712 	}
713 
714 	ret = 0;
715 	goto out_unlock;
716 
717 err_unload:
718 	if (dev->driver->unload)
719 		dev->driver->unload(dev);
720 err_minors:
721 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
722 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
723 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
724 out_unlock:
725 	mutex_unlock(&drm_global_mutex);
726 	return ret;
727 }
728 EXPORT_SYMBOL(drm_dev_register);
729 
730 /**
731  * drm_dev_unregister - Unregister DRM device
732  * @dev: Device to unregister
733  *
734  * Unregister the DRM device from the system. This does the reverse of
735  * drm_dev_register() but does not deallocate the device. The caller must call
736  * drm_dev_unref() to drop their final reference.
737  */
738 void drm_dev_unregister(struct drm_device *dev)
739 {
740 	struct drm_map_list *r_list, *list_temp;
741 
742 	drm_lastclose(dev);
743 
744 	if (dev->driver->unload)
745 		dev->driver->unload(dev);
746 
747 	if (dev->agp)
748 		drm_pci_agp_destroy(dev);
749 
750 	drm_vblank_cleanup(dev);
751 
752 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
753 		drm_rmmap(dev, r_list->map);
754 
755 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
756 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
757 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
758 }
759 EXPORT_SYMBOL(drm_dev_unregister);
760 
761 /**
762  * drm_dev_set_unique - Set the unique name of a DRM device
763  * @dev: device of which to set the unique name
764  * @fmt: format string for unique name
765  *
766  * Sets the unique name of a DRM device using the specified format string and
767  * a variable list of arguments. Drivers can use this at driver probe time if
768  * the unique name of the devices they drive is static.
769  *
770  * Return: 0 on success or a negative error code on failure.
771  */
772 int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
773 {
774 	va_list ap;
775 
776 	kfree(dev->unique);
777 
778 	va_start(ap, fmt);
779 	dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
780 	va_end(ap);
781 
782 	return dev->unique ? 0 : -ENOMEM;
783 }
784 EXPORT_SYMBOL(drm_dev_set_unique);
785 #endif
786 
787 /*
788  * DRM Core
789  * The DRM core module initializes all global DRM objects and makes them
790  * available to drivers. Once setup, drivers can probe their respective
791  * devices.
792  * Currently, core management includes:
793  *  - The "DRM-Global" key/value database
794  *  - Global ID management for connectors
795  *  - DRM major number allocation
796  *  - DRM minor management
797  *  - DRM sysfs class
798  *  - DRM debugfs root
799  *
800  * Furthermore, the DRM core provides dynamic char-dev lookups. For each
801  * interface registered on a DRM device, you can request minor numbers from DRM
802  * core. DRM core takes care of major-number management and char-dev
803  * registration. A stub ->open() callback forwards any open() requests to the
804  * registered minor.
805  */
806 
807 #if 0
808 static int drm_stub_open(struct inode *inode, struct file *filp)
809 {
810 	const struct file_operations *new_fops;
811 	struct drm_minor *minor;
812 	int err;
813 
814 	DRM_DEBUG("\n");
815 
816 	mutex_lock(&drm_global_mutex);
817 	minor = drm_minor_acquire(iminor(inode));
818 	if (IS_ERR(minor)) {
819 		err = PTR_ERR(minor);
820 		goto out_unlock;
821 	}
822 
823 	new_fops = fops_get(minor->dev->driver->fops);
824 	if (!new_fops) {
825 		err = -ENODEV;
826 		goto out_release;
827 	}
828 
829 	replace_fops(filp, new_fops);
830 	if (filp->f_op->open)
831 		err = filp->f_op->open(inode, filp);
832 	else
833 		err = 0;
834 
835 out_release:
836 	drm_minor_release(minor);
837 out_unlock:
838 	mutex_unlock(&drm_global_mutex);
839 	return err;
840 }
841 
842 static const struct file_operations drm_stub_fops = {
843 	.owner = THIS_MODULE,
844 	.open = drm_stub_open,
845 	.llseek = noop_llseek,
846 };
847 
848 static int __init drm_core_init(void)
849 {
850 	int ret = -ENOMEM;
851 
852 	drm_global_init();
853 	drm_connector_ida_init();
854 	idr_init(&drm_minors_idr);
855 
856 	if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
857 		goto err_p1;
858 
859 	drm_class = drm_sysfs_create(THIS_MODULE, "drm");
860 	if (IS_ERR(drm_class)) {
861 		printk(KERN_ERR "DRM: Error creating drm class.\n");
862 		ret = PTR_ERR(drm_class);
863 		goto err_p2;
864 	}
865 
866 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
867 	if (!drm_debugfs_root) {
868 		DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
869 		ret = -1;
870 		goto err_p3;
871 	}
872 
873 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
874 		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
875 	return 0;
876 err_p3:
877 	drm_sysfs_destroy();
878 err_p2:
879 	unregister_chrdev(DRM_MAJOR, "drm");
880 
881 	idr_destroy(&drm_minors_idr);
882 err_p1:
883 	return ret;
884 }
885 
886 static void __exit drm_core_exit(void)
887 {
888 	debugfs_remove(drm_debugfs_root);
889 	drm_sysfs_destroy();
890 
891 	unregister_chrdev(DRM_MAJOR, "drm");
892 
893 	drm_connector_ida_destroy();
894 	idr_destroy(&drm_minors_idr);
895 }
896 
897 module_init(drm_core_init);
898 module_exit(drm_core_exit);
899 #endif
900 
901 #include <sys/devfs.h>
902 
903 #include <linux/export.h>
904 #include <linux/dmi.h>
905 #include <drm/drmP.h>
906 #include <drm/drm_core.h>
907 
908 #if DRM_DEBUG_DEFAULT_ON == 1
909 #define DRM_DEBUGBITS_ON (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS | \
910     DRM_DEBUGBITS_FAILED_IOCTL)
911 #elif DRM_DEBUG_DEFAULT_ON == 2
912 #define DRM_DEBUGBITS_ON (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS | \
913     DRM_DEBUGBITS_FAILED_IOCTL | DRM_DEBUGBITS_VERBOSE)
914 #else
915 #define DRM_DEBUGBITS_ON (0x0)
916 #endif
917 
918 int drm_notyet_flag = 0;
919 
920 static int drm_load(struct drm_device *dev);
921 drm_pci_id_list_t *drm_find_description(int vendor, int device,
922     drm_pci_id_list_t *idlist);
923 
924 #define DRIVER_SOFTC(unit) \
925 	((struct drm_device *)devclass_get_softc(drm_devclass, unit))
926 
927 static int
928 drm_modevent(module_t mod, int type, void *data)
929 {
930 
931 	switch (type) {
932 	case MOD_LOAD:
933 		TUNABLE_INT_FETCH("drm.debug", &drm_debug);
934 		TUNABLE_INT_FETCH("drm.notyet", &drm_notyet_flag);
935 		break;
936 	}
937 	return (0);
938 }
939 
940 static moduledata_t drm_mod = {
941 	"drm",
942 	drm_modevent,
943 	0
944 };
945 DECLARE_MODULE(drm, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
946 MODULE_VERSION(drm, 1);
947 MODULE_DEPEND(drm, agp, 1, 1, 1);
948 MODULE_DEPEND(drm, pci, 1, 1, 1);
949 MODULE_DEPEND(drm, iicbus, 1, 1, 1);
950 
951 static struct dev_ops drm_cdevsw = {
952 	{ "drm", 0, D_TRACKCLOSE | D_MPSAFE },
953 	.d_open =	drm_open,
954 	.d_close =	drm_close,
955 	.d_read =	drm_read,
956 	.d_ioctl =	drm_ioctl,
957 	.d_kqfilter =	drm_kqfilter,
958 	.d_mmap =	drm_mmap,
959 	.d_mmap_single = drm_mmap_single,
960 };
961 
962 static int drm_msi = 0;	/* Disable by default. This is because there are issues with
963 			   freezes using MSI and i915
964 			 */
965 TUNABLE_INT("hw.drm.msi.enable", &drm_msi);
966 SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
967 SYSCTL_NODE(_hw_drm, OID_AUTO, msi, CTLFLAG_RW, NULL, "DRM device msi");
968 SYSCTL_INT(_hw_drm_msi, OID_AUTO, enable, CTLFLAG_RD, &drm_msi, 0,
969     "Enable MSI interrupts for drm devices");
970 SYSCTL_INT(_hw_drm, OID_AUTO, debug, CTLFLAG_RW, &drm_debug, 0,
971     "DRM debugging");
972 
973 static struct drm_msi_blacklist_entry drm_msi_blacklist[] = {
974 	{0x8086, 0x2772}, /* Intel i945G	*/ \
975 	{0x8086, 0x27A2}, /* Intel i945GM	*/ \
976 	{0x8086, 0x27AE}, /* Intel i945GME	*/ \
977 	{0, 0}
978 };
979 
980 static int drm_msi_is_blacklisted(struct drm_device *dev, unsigned long flags)
981 {
982 	int i = 0;
983 
984 	if (dev->driver->use_msi != NULL) {
985 		int use_msi;
986 
987 		use_msi = dev->driver->use_msi(dev, flags);
988 
989 		return (!use_msi);
990 	}
991 
992 	/* TODO: Maybe move this to a callback in i915? */
993 	for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) {
994 		if ((drm_msi_blacklist[i].vendor == dev->pci_vendor) &&
995 		    (drm_msi_blacklist[i].device == dev->pci_device)) {
996 			return 1;
997 		}
998 	}
999 
1000 	return 0;
1001 }
1002 
1003 int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
1004 {
1005 	drm_pci_id_list_t *id_entry;
1006 	int vendor, device;
1007 
1008 	vendor = pci_get_vendor(kdev);
1009 	device = pci_get_device(kdev);
1010 
1011 	if (pci_get_class(kdev) != PCIC_DISPLAY)
1012 		return ENXIO;
1013 
1014 	id_entry = drm_find_description(vendor, device, idlist);
1015 	if (id_entry != NULL) {
1016 		if (!device_get_desc(kdev)) {
1017 			DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
1018 			device_set_desc(kdev, id_entry->name);
1019 		}
1020 		return 0;
1021 	}
1022 
1023 	return ENXIO;
1024 }
1025 
1026 int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
1027 {
1028 	struct drm_device *dev;
1029 	drm_pci_id_list_t *id_entry;
1030 	int unit, error;
1031 	u_int irq_flags;
1032 	int msi_enable;
1033 
1034 	unit = device_get_unit(kdev);
1035 	dev = device_get_softc(kdev);
1036 
1037 	if (!strcmp(device_get_name(kdev), "drmsub"))
1038 		dev->dev = device_get_parent(kdev);
1039 	else
1040 		dev->dev = kdev;
1041 
1042 	dev->pci_domain = pci_get_domain(dev->dev);
1043 	dev->pci_bus = pci_get_bus(dev->dev);
1044 	dev->pci_slot = pci_get_slot(dev->dev);
1045 	dev->pci_func = pci_get_function(dev->dev);
1046 
1047 	dev->pci_vendor = pci_get_vendor(dev->dev);
1048 	dev->pci_device = pci_get_device(dev->dev);
1049 	dev->pci_subvendor = pci_get_subvendor(dev->dev);
1050 	dev->pci_subdevice = pci_get_subdevice(dev->dev);
1051 
1052 	id_entry = drm_find_description(dev->pci_vendor,
1053 	    dev->pci_device, idlist);
1054 	dev->id_entry = id_entry;
1055 
1056 	if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) {
1057 		msi_enable = drm_msi;
1058 
1059 		if (drm_msi_is_blacklisted(dev, dev->id_entry->driver_private)) {
1060 			msi_enable = 0;
1061 		}
1062 
1063 		dev->irq_type = pci_alloc_1intr(dev->dev, msi_enable,
1064 		    &dev->irqrid, &irq_flags);
1065 
1066 		dev->irqr = bus_alloc_resource_any(dev->dev, SYS_RES_IRQ,
1067 		    &dev->irqrid, irq_flags);
1068 
1069 		if (!dev->irqr) {
1070 			return (ENOENT);
1071 		}
1072 
1073 		dev->irq = (int) rman_get_start(dev->irqr);
1074 	}
1075 
1076 	lockinit(&dev->dev_lock, "drmdev", 0, LK_CANRECURSE);
1077 	lwkt_serialize_init(&dev->irq_lock);
1078 	lockinit(&dev->event_lock, "drmev", 0, LK_CANRECURSE);
1079 	lockinit(&dev->struct_mutex, "drmslk", 0, LK_CANRECURSE);
1080 
1081 	error = drm_load(dev);
1082 	if (error)
1083 		goto error;
1084 
1085 	error = drm_create_cdevs(kdev);
1086 	if (error)
1087 		goto error;
1088 
1089 	return (error);
1090 error:
1091 	if (dev->irqr) {
1092 		bus_release_resource(dev->dev, SYS_RES_IRQ,
1093 		    dev->irqrid, dev->irqr);
1094 	}
1095 	if (dev->irq_type == PCI_INTR_TYPE_MSI) {
1096 		pci_release_msi(dev->dev);
1097 	}
1098 	return (error);
1099 }
1100 
1101 int
1102 drm_create_cdevs(device_t kdev)
1103 {
1104 	struct drm_device *dev;
1105 	int error, unit;
1106 
1107 	unit = device_get_unit(kdev);
1108 	dev = device_get_softc(kdev);
1109 
1110 	dev->devnode = make_dev(&drm_cdevsw, unit, DRM_DEV_UID, DRM_DEV_GID,
1111 				DRM_DEV_MODE, "dri/card%d", unit);
1112 	error = 0;
1113 	if (error == 0)
1114 		dev->devnode->si_drv1 = dev;
1115 	return (error);
1116 }
1117 
1118 #ifndef DRM_DEV_NAME
1119 #define DRM_DEV_NAME "drm"
1120 #endif
1121 
1122 devclass_t drm_devclass;
1123 
1124 drm_pci_id_list_t *drm_find_description(int vendor, int device,
1125     drm_pci_id_list_t *idlist)
1126 {
1127 	int i = 0;
1128 
1129 	for (i = 0; idlist[i].vendor != 0; i++) {
1130 		if ((idlist[i].vendor == vendor) &&
1131 		    ((idlist[i].device == device) ||
1132 		    (idlist[i].device == 0))) {
1133 			return &idlist[i];
1134 		}
1135 	}
1136 	return NULL;
1137 }
1138 
1139 /**
1140  * Take down the DRM device.
1141  *
1142  * \param dev DRM device structure.
1143  *
1144  * Frees every resource in \p dev.
1145  *
1146  * \sa drm_device
1147  */
1148 int drm_lastclose(struct drm_device * dev)
1149 {
1150 	drm_magic_entry_t *pt, *next;
1151 
1152 	DRM_DEBUG("\n");
1153 
1154 	if (dev->driver->lastclose != NULL)
1155 		dev->driver->lastclose(dev);
1156 
1157 	if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled)
1158 		drm_irq_uninstall(dev);
1159 
1160 	DRM_LOCK(dev);
1161 	if (dev->unique) {
1162 		drm_free(dev->unique, M_DRM);
1163 		dev->unique = NULL;
1164 		dev->unique_len = 0;
1165 	}
1166 
1167 	/* Clear pid list */
1168 	if (dev->magicfree.next) {
1169 		list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
1170 			list_del(&pt->head);
1171 			drm_ht_remove_item(&dev->magiclist, &pt->hash_item);
1172 			kfree(pt);
1173 		}
1174 		drm_ht_remove(&dev->magiclist);
1175 	}
1176 
1177 	/* Clear AGP information */
1178 	if (dev->agp) {
1179 		drm_agp_mem_t *entry;
1180 		drm_agp_mem_t *nexte;
1181 
1182 		/* Remove AGP resources, but leave dev->agp intact until
1183 		 * drm_unload is called.
1184 		 */
1185 		for (entry = dev->agp->memory; entry; entry = nexte) {
1186 			nexte = entry->next;
1187 			if (entry->bound)
1188 				drm_agp_unbind_memory(entry->handle);
1189 			drm_agp_free_memory(entry->handle);
1190 			drm_free(entry, M_DRM);
1191 		}
1192 		dev->agp->memory = NULL;
1193 
1194 		if (dev->agp->acquired)
1195 			drm_agp_release(dev);
1196 
1197 		dev->agp->acquired = 0;
1198 		dev->agp->enabled  = 0;
1199 	}
1200 	if (dev->sg != NULL) {
1201 		drm_sg_cleanup(dev->sg);
1202 		dev->sg = NULL;
1203 	}
1204 
1205 	drm_dma_takedown(dev);
1206 	if (dev->lock.hw_lock) {
1207 		dev->lock.hw_lock = NULL; /* SHM removed */
1208 		dev->lock.file_priv = NULL;
1209 		wakeup(&dev->lock.lock_queue);
1210 	}
1211 	DRM_UNLOCK(dev);
1212 
1213 	return 0;
1214 }
1215 
1216 static int drm_load(struct drm_device *dev)
1217 {
1218 	int i, retcode;
1219 
1220 	DRM_DEBUG("\n");
1221 
1222 	INIT_LIST_HEAD(&dev->maplist);
1223 
1224 	drm_mem_init();
1225 	drm_sysctl_init(dev);
1226 	INIT_LIST_HEAD(&dev->filelist);
1227 
1228 	dev->counters  = 6;
1229 	dev->types[0]  = _DRM_STAT_LOCK;
1230 	dev->types[1]  = _DRM_STAT_OPENS;
1231 	dev->types[2]  = _DRM_STAT_CLOSES;
1232 	dev->types[3]  = _DRM_STAT_IOCTLS;
1233 	dev->types[4]  = _DRM_STAT_LOCKS;
1234 	dev->types[5]  = _DRM_STAT_UNLOCKS;
1235 
1236 	for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
1237 		atomic_set(&dev->counts[i], 0);
1238 
1239 	INIT_LIST_HEAD(&dev->vblank_event_list);
1240 
1241 	if (drm_core_has_AGP(dev)) {
1242 		if (drm_device_is_agp(dev))
1243 			dev->agp = drm_agp_init();
1244 		if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) &&
1245 		    dev->agp == NULL) {
1246 			DRM_ERROR("Card isn't AGP, or couldn't initialize "
1247 			    "AGP.\n");
1248 			retcode = ENOMEM;
1249 			goto error;
1250 		}
1251 		if (dev->agp != NULL && dev->agp->agp_info.ai_aperture_base != 0) {
1252 			if (drm_mtrr_add(dev->agp->agp_info.ai_aperture_base,
1253 			    dev->agp->agp_info.ai_aperture_size, DRM_MTRR_WC) == 0)
1254 				dev->agp->agp_mtrr = 1;
1255 		}
1256 	}
1257 
1258 	if (dev->driver->driver_features & DRIVER_GEM) {
1259 		retcode = drm_gem_init(dev);
1260 		if (retcode != 0) {
1261 			DRM_ERROR("Cannot initialize graphics execution "
1262 				  "manager (GEM)\n");
1263 			goto error1;
1264 		}
1265 	}
1266 
1267 	if (dev->driver->load != NULL) {
1268 		DRM_LOCK(dev);
1269 		/* Shared code returns -errno. */
1270 		retcode = -dev->driver->load(dev,
1271 		    dev->id_entry->driver_private);
1272 		if (pci_enable_busmaster(dev->dev))
1273 			DRM_ERROR("Request to enable bus-master failed.\n");
1274 		DRM_UNLOCK(dev);
1275 		if (retcode != 0)
1276 			goto error1;
1277 	}
1278 
1279 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
1280 	    dev->driver->name,
1281 	    dev->driver->major,
1282 	    dev->driver->minor,
1283 	    dev->driver->patchlevel,
1284 	    dev->driver->date);
1285 
1286 	return 0;
1287 
1288 error1:
1289 	drm_gem_destroy(dev);
1290 error:
1291 	drm_sysctl_cleanup(dev);
1292 	DRM_LOCK(dev);
1293 	drm_lastclose(dev);
1294 	DRM_UNLOCK(dev);
1295 	if (dev->devnode != NULL)
1296 		destroy_dev(dev->devnode);
1297 
1298 	lockuninit(&dev->vbl_lock);
1299 	lockuninit(&dev->dev_lock);
1300 	lockuninit(&dev->event_lock);
1301 	lockuninit(&dev->struct_mutex);
1302 
1303 	return retcode;
1304 }
1305 
1306 /*
1307  * Stub is needed for devfs
1308  */
1309 int drm_close(struct dev_close_args *ap)
1310 {
1311 	return 0;
1312 }
1313 
1314 void drm_cdevpriv_dtor(void *cd)
1315 {
1316 	struct drm_file *file_priv = cd;
1317 	struct drm_device *dev = file_priv->dev;
1318 	int retcode = 0;
1319 
1320 	DRM_DEBUG("open_count = %d\n", dev->open_count);
1321 
1322 	DRM_LOCK(dev);
1323 
1324 	if (dev->driver->preclose != NULL)
1325 		dev->driver->preclose(dev, file_priv);
1326 
1327 	/* ========================================================
1328 	 * Begin inline drm_release
1329 	 */
1330 
1331 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
1332 	    DRM_CURRENTPID, (long)dev->dev, dev->open_count);
1333 
1334 	if (dev->driver->driver_features & DRIVER_GEM)
1335 		drm_gem_release(dev, file_priv);
1336 
1337 	if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
1338 	    && dev->lock.file_priv == file_priv) {
1339 		DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
1340 			  DRM_CURRENTPID,
1341 			  _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
1342 		if (dev->driver->reclaim_buffers_locked != NULL)
1343 			dev->driver->reclaim_buffers_locked(dev, file_priv);
1344 
1345 		drm_lock_free(&dev->lock,
1346 		    _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
1347 
1348 				/* FIXME: may require heavy-handed reset of
1349                                    hardware at this point, possibly
1350                                    processed via a callback to the X
1351                                    server. */
1352 	} else if (dev->driver->reclaim_buffers_locked != NULL &&
1353 	    dev->lock.hw_lock != NULL) {
1354 		/* The lock is required to reclaim buffers */
1355 		for (;;) {
1356 			if (!dev->lock.hw_lock) {
1357 				/* Device has been unregistered */
1358 				retcode = EINTR;
1359 				break;
1360 			}
1361 			/* Contention */
1362 			retcode = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue,
1363 			    PCATCH, "drmlk2", 0);
1364 			if (retcode)
1365 				break;
1366 		}
1367 		if (retcode == 0) {
1368 			dev->driver->reclaim_buffers_locked(dev, file_priv);
1369 		}
1370 	}
1371 
1372 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
1373 	    !dev->driver->reclaim_buffers_locked)
1374 		drm_reclaim_buffers(dev, file_priv);
1375 
1376 	funsetown(&dev->buf_sigio);
1377 
1378 	if (dev->driver->postclose != NULL)
1379 		dev->driver->postclose(dev, file_priv);
1380 	list_del(&file_priv->lhead);
1381 
1382 
1383 	/* ========================================================
1384 	 * End inline drm_release
1385 	 */
1386 
1387 	atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
1388 	device_unbusy(dev->dev);
1389 	if (--dev->open_count == 0) {
1390 		retcode = drm_lastclose(dev);
1391 	}
1392 
1393 	DRM_UNLOCK(dev);
1394 }
1395 
1396 drm_local_map_t *drm_getsarea(struct drm_device *dev)
1397 {
1398 	struct drm_map_list *entry;
1399 
1400 	list_for_each_entry(entry, &dev->maplist, head) {
1401 		if (entry->map && entry->map->type == _DRM_SHM &&
1402 		    (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1403 			return entry->map;
1404 		}
1405 	}
1406 
1407 	return NULL;
1408 }
1409 
1410 int
1411 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
1412     struct sysctl_oid *top)
1413 {
1414 	struct sysctl_oid *oid;
1415 
1416 	ksnprintf(dev->busid_str, sizeof(dev->busid_str),
1417 	     "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
1418 	     dev->pci_slot, dev->pci_func);
1419 	oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
1420 	    CTLFLAG_RD, dev->busid_str, 0, NULL);
1421 	if (oid == NULL)
1422 		return (ENOMEM);
1423 	dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
1424 	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
1425 	    "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
1426 	if (oid == NULL)
1427 		return (ENOMEM);
1428 
1429 	return (0);
1430 }
1431 
1432 int
1433 drm_mmap_single(struct dev_mmap_single_args *ap)
1434 {
1435 	struct drm_device *dev;
1436 	struct cdev *kdev = ap->a_head.a_dev;
1437 	vm_ooffset_t *offset = ap->a_offset;
1438 	vm_size_t size = ap->a_size;
1439 	struct vm_object **obj_res = ap->a_object;
1440 	int nprot = ap->a_nprot;
1441 
1442 	dev = drm_get_device_from_kdev(kdev);
1443 	if (dev->drm_ttm_bdev != NULL) {
1444 		return (ttm_bo_mmap_single(dev->drm_ttm_bdev, offset, size,
1445 		    obj_res, nprot));
1446 	} else if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
1447 		return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
1448 	} else {
1449 		return (ENODEV);
1450 	}
1451 }
1452 
1453 #if DRM_LINUX
1454 
1455 #include <sys/sysproto.h>
1456 
1457 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
1458 
1459 #define LINUX_IOCTL_DRM_MIN		0x6400
1460 #define LINUX_IOCTL_DRM_MAX		0x64ff
1461 
1462 static linux_ioctl_function_t drm_linux_ioctl;
1463 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
1464     LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
1465 
1466 /* The bits for in/out are switched on Linux */
1467 #define LINUX_IOC_IN	IOC_OUT
1468 #define LINUX_IOC_OUT	IOC_IN
1469 
1470 static int
1471 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
1472 {
1473 	int error;
1474 	int cmd = args->cmd;
1475 
1476 	args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
1477 	if (cmd & LINUX_IOC_IN)
1478 		args->cmd |= IOC_IN;
1479 	if (cmd & LINUX_IOC_OUT)
1480 		args->cmd |= IOC_OUT;
1481 
1482 	error = ioctl(p, (struct ioctl_args *)args);
1483 
1484 	return error;
1485 }
1486 #endif /* DRM_LINUX */
1487 
1488 static int
1489 drm_core_init(void *arg)
1490 {
1491 
1492 	drm_global_init();
1493 
1494 #if DRM_LINUX
1495 	linux_ioctl_register_handler(&drm_handler);
1496 #endif /* DRM_LINUX */
1497 
1498 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
1499 		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
1500 	return 0;
1501 }
1502 
1503 static void
1504 drm_core_exit(void *arg)
1505 {
1506 
1507 #if DRM_LINUX
1508 	linux_ioctl_unregister_handler(&drm_handler);
1509 #endif /* DRM_LINUX */
1510 
1511 	drm_global_release();
1512 }
1513 
1514 SYSINIT(drm_register, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,
1515     drm_core_init, NULL);
1516 SYSUNINIT(drm_unregister, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,
1517     drm_core_exit, NULL);
1518 
1519 
1520 #include <linux/dmi.h>
1521 
1522 /*
1523  * Check if dmi_system_id structure matches system DMI data
1524  */
1525 static bool
1526 dmi_found(const struct dmi_system_id *dsi)
1527 {
1528 	int i, slot;
1529 	bool found = false;
1530 	char *sys_vendor, *board_vendor, *product_name, *board_name;
1531 
1532 	sys_vendor = kgetenv("smbios.system.maker");
1533 	board_vendor = kgetenv("smbios.planar.maker");
1534 	product_name = kgetenv("smbios.system.product");
1535 	board_name = kgetenv("smbios.planar.product");
1536 
1537 	for (i = 0; i < NELEM(dsi->matches); i++) {
1538 		slot = dsi->matches[i].slot;
1539 		switch (slot) {
1540 		case DMI_NONE:
1541 			break;
1542 		case DMI_SYS_VENDOR:
1543 			if (sys_vendor != NULL &&
1544 			    !strcmp(sys_vendor, dsi->matches[i].substr))
1545 				break;
1546 			else
1547 				goto done;
1548 		case DMI_BOARD_VENDOR:
1549 			if (board_vendor != NULL &&
1550 			    !strcmp(board_vendor, dsi->matches[i].substr))
1551 				break;
1552 			else
1553 				goto done;
1554 		case DMI_PRODUCT_NAME:
1555 			if (product_name != NULL &&
1556 			    !strcmp(product_name, dsi->matches[i].substr))
1557 				break;
1558 			else
1559 				goto done;
1560 		case DMI_BOARD_NAME:
1561 			if (board_name != NULL &&
1562 			    !strcmp(board_name, dsi->matches[i].substr))
1563 				break;
1564 			else
1565 				goto done;
1566 		default:
1567 			goto done;
1568 		}
1569 	}
1570 	found = true;
1571 
1572 done:
1573 	if (sys_vendor != NULL)
1574 		kfreeenv(sys_vendor);
1575 	if (board_vendor != NULL)
1576 		kfreeenv(board_vendor);
1577 	if (product_name != NULL)
1578 		kfreeenv(product_name);
1579 	if (board_name != NULL)
1580 		kfreeenv(board_name);
1581 
1582 	return found;
1583 }
1584 
1585 int dmi_check_system(const struct dmi_system_id *sysid)
1586 {
1587 	const struct dmi_system_id *dsi;
1588 	int num = 0;
1589 
1590 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
1591 		if (dmi_found(dsi)) {
1592 			num++;
1593 			if (dsi->callback && dsi->callback(dsi))
1594 				break;
1595 		}
1596 	}
1597 	return (num);
1598 }
1599