xref: /dragonfly/sys/dev/drm/drm_drv.c (revision cad2e385)
1 /*
2  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3  *
4  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5  * All Rights Reserved.
6  *
7  * Author Rickard E. (Rik) Faith <faith@valinux.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <drm/drmP.h>
32 #include <drm/drm_core.h>
33 #include "drm_legacy.h"
34 #include "drm_internal.h"
35 
36 unsigned int drm_debug = 0;	/* 1 to enable debug output */
37 EXPORT_SYMBOL(drm_debug);
38 
39 int drm_vblank_offdelay = 5000;    /* Default to 5000 msecs. */
40 
41 unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
42 
43 /*
44  * Default to use monotonic timestamps for wait-for-vblank and page-flip
45  * complete events.
46  */
47 unsigned int drm_timestamp_monotonic = 1;
48 
49 MODULE_AUTHOR(CORE_AUTHOR);
50 MODULE_DESCRIPTION(CORE_DESC);
51 MODULE_PARM_DESC(debug, "Enable debug output");
52 MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
53 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
54 MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
55 
56 module_param_named(debug, drm_debug, int, 0600);
57 module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
58 module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
59 module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
60 
61 #if 0
62 static DEFINE_SPINLOCK(drm_minor_lock);
63 static struct idr drm_minors_idr;
64 #endif
65 
66 struct class *drm_class;
67 #if 0
68 static struct dentry *drm_debugfs_root;
69 #endif
70 
71 void drm_err(const char *func, const char *format, ...)
72 {
73 #if 0
74 	struct va_format vaf;
75 	va_list args;
76 	int r;
77 
78 	va_start(args, format);
79 
80 	vaf.fmt = format;
81 	vaf.va = &args;
82 
83 	r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
84 
85 	va_end(args);
86 
87 	return r;
88 #endif
89 }
90 EXPORT_SYMBOL(drm_err);
91 
92 void drm_ut_debug_printk(const char *function_name, const char *format, ...)
93 {
94 #if 0
95 	struct va_format vaf;
96 	va_list args;
97 
98 	va_start(args, format);
99 	vaf.fmt = format;
100 	vaf.va = &args;
101 
102 	printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
103 
104 	va_end(args);
105 #endif
106 }
107 EXPORT_SYMBOL(drm_ut_debug_printk);
108 
109 #if 0
110 struct drm_master *drm_master_create(struct drm_minor *minor)
111 {
112 	struct drm_master *master;
113 
114 	master = kzalloc(sizeof(*master), GFP_KERNEL);
115 	if (!master)
116 		return NULL;
117 
118 	kref_init(&master->refcount);
119 	spin_lock_init(&master->lock.spinlock);
120 	init_waitqueue_head(&master->lock.lock_queue);
121 	if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
122 		kfree(master);
123 		return NULL;
124 	}
125 	INIT_LIST_HEAD(&master->magicfree);
126 	master->minor = minor;
127 
128 	return master;
129 }
130 
131 struct drm_master *drm_master_get(struct drm_master *master)
132 {
133 	kref_get(&master->refcount);
134 	return master;
135 }
136 EXPORT_SYMBOL(drm_master_get);
137 
138 static void drm_master_destroy(struct kref *kref)
139 {
140 	struct drm_master *master = container_of(kref, struct drm_master, refcount);
141 	struct drm_device *dev = master->minor->dev;
142 	struct drm_map_list *r_list, *list_temp;
143 
144 	mutex_lock(&dev->struct_mutex);
145 	if (dev->driver->master_destroy)
146 		dev->driver->master_destroy(dev, master);
147 
148 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
149 		if (r_list->master == master) {
150 			drm_legacy_rmmap_locked(dev, r_list->map);
151 			r_list = NULL;
152 		}
153 	}
154 
155 	if (master->unique) {
156 		kfree(master->unique);
157 		master->unique = NULL;
158 		master->unique_len = 0;
159 	}
160 
161 	drm_ht_remove(&master->magiclist);
162 
163 	mutex_unlock(&dev->struct_mutex);
164 	kfree(master);
165 }
166 
167 void drm_master_put(struct drm_master **master)
168 {
169 	kref_put(&(*master)->refcount, drm_master_destroy);
170 	*master = NULL;
171 }
172 EXPORT_SYMBOL(drm_master_put);
173 #endif
174 
175 int drm_setmaster_ioctl(struct drm_device *dev, void *data,
176 			struct drm_file *file_priv)
177 {
178 	DRM_DEBUG("setmaster\n");
179 
180 	if (file_priv->master != 0)
181 		return (0);
182 
183 	return (-EPERM);
184 }
185 
186 int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
187 			 struct drm_file *file_priv)
188 {
189 	DRM_DEBUG("dropmaster\n");
190 	if (file_priv->master != 0)
191 		return -EINVAL;
192 	return 0;
193 }
194 
195 #if 0
196 /*
197  * DRM Minors
198  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
199  * of them is represented by a drm_minor object. Depending on the capabilities
200  * of the device-driver, different interfaces are registered.
201  *
202  * Minors can be accessed via dev->$minor_name. This pointer is either
203  * NULL or a valid drm_minor pointer and stays valid as long as the device is
204  * valid. This means, DRM minors have the same life-time as the underlying
205  * device. However, this doesn't mean that the minor is active. Minors are
206  * registered and unregistered dynamically according to device-state.
207  */
208 
209 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
210 					     unsigned int type)
211 {
212 	switch (type) {
213 	case DRM_MINOR_LEGACY:
214 		return &dev->primary;
215 	case DRM_MINOR_RENDER:
216 		return &dev->render;
217 	case DRM_MINOR_CONTROL:
218 		return &dev->control;
219 	default:
220 		return NULL;
221 	}
222 }
223 
224 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
225 {
226 	struct drm_minor *minor;
227 	unsigned long flags;
228 	int r;
229 
230 	minor = kzalloc(sizeof(*minor), GFP_KERNEL);
231 	if (!minor)
232 		return -ENOMEM;
233 
234 	minor->type = type;
235 	minor->dev = dev;
236 
237 	idr_preload(GFP_KERNEL);
238 	spin_lock_irqsave(&drm_minor_lock, flags);
239 	r = idr_alloc(&drm_minors_idr,
240 		      NULL,
241 		      64 * type,
242 		      64 * (type + 1),
243 		      GFP_NOWAIT);
244 	spin_unlock_irqrestore(&drm_minor_lock, flags);
245 	idr_preload_end();
246 
247 	if (r < 0)
248 		goto err_free;
249 
250 	minor->index = r;
251 
252 	minor->kdev = drm_sysfs_minor_alloc(minor);
253 	if (IS_ERR(minor->kdev)) {
254 		r = PTR_ERR(minor->kdev);
255 		goto err_index;
256 	}
257 
258 	*drm_minor_get_slot(dev, type) = minor;
259 	return 0;
260 
261 err_index:
262 	spin_lock_irqsave(&drm_minor_lock, flags);
263 	idr_remove(&drm_minors_idr, minor->index);
264 	spin_unlock_irqrestore(&drm_minor_lock, flags);
265 err_free:
266 	kfree(minor);
267 	return r;
268 }
269 
270 static void drm_minor_free(struct drm_device *dev, unsigned int type)
271 {
272 	struct drm_minor **slot, *minor;
273 	unsigned long flags;
274 
275 	slot = drm_minor_get_slot(dev, type);
276 	minor = *slot;
277 	if (!minor)
278 		return;
279 
280 	drm_mode_group_destroy(&minor->mode_group);
281 	put_device(minor->kdev);
282 
283 	spin_lock_irqsave(&drm_minor_lock, flags);
284 	idr_remove(&drm_minors_idr, minor->index);
285 	spin_unlock_irqrestore(&drm_minor_lock, flags);
286 
287 	kfree(minor);
288 	*slot = NULL;
289 }
290 
291 static int drm_minor_register(struct drm_device *dev, unsigned int type)
292 {
293 	struct drm_minor *minor;
294 	unsigned long flags;
295 	int ret;
296 
297 	DRM_DEBUG("\n");
298 
299 	minor = *drm_minor_get_slot(dev, type);
300 	if (!minor)
301 		return 0;
302 
303 	ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
304 	if (ret) {
305 		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
306 		return ret;
307 	}
308 
309 	ret = device_add(minor->kdev);
310 	if (ret)
311 		goto err_debugfs;
312 
313 	/* replace NULL with @minor so lookups will succeed from now on */
314 	spin_lock_irqsave(&drm_minor_lock, flags);
315 	idr_replace(&drm_minors_idr, minor, minor->index);
316 	spin_unlock_irqrestore(&drm_minor_lock, flags);
317 
318 	DRM_DEBUG("new minor registered %d\n", minor->index);
319 	return 0;
320 
321 err_debugfs:
322 	drm_debugfs_cleanup(minor);
323 	return ret;
324 }
325 
326 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
327 {
328 	struct drm_minor *minor;
329 	unsigned long flags;
330 
331 	minor = *drm_minor_get_slot(dev, type);
332 	if (!minor || !device_is_registered(minor->kdev))
333 		return;
334 
335 	/* replace @minor with NULL so lookups will fail from now on */
336 	spin_lock_irqsave(&drm_minor_lock, flags);
337 	idr_replace(&drm_minors_idr, NULL, minor->index);
338 	spin_unlock_irqrestore(&drm_minor_lock, flags);
339 
340 	device_del(minor->kdev);
341 	dev_set_drvdata(minor->kdev, NULL); /* safety belt */
342 	drm_debugfs_cleanup(minor);
343 }
344 
345 /**
346  * drm_minor_acquire - Acquire a DRM minor
347  * @minor_id: Minor ID of the DRM-minor
348  *
349  * Looks up the given minor-ID and returns the respective DRM-minor object. The
350  * refence-count of the underlying device is increased so you must release this
351  * object with drm_minor_release().
352  *
353  * As long as you hold this minor, it is guaranteed that the object and the
354  * minor->dev pointer will stay valid! However, the device may get unplugged and
355  * unregistered while you hold the minor.
356  *
357  * Returns:
358  * Pointer to minor-object with increased device-refcount, or PTR_ERR on
359  * failure.
360  */
361 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
362 {
363 	struct drm_minor *minor;
364 	unsigned long flags;
365 
366 	spin_lock_irqsave(&drm_minor_lock, flags);
367 	minor = idr_find(&drm_minors_idr, minor_id);
368 	if (minor)
369 		drm_dev_ref(minor->dev);
370 	spin_unlock_irqrestore(&drm_minor_lock, flags);
371 
372 	if (!minor) {
373 		return ERR_PTR(-ENODEV);
374 	} else if (drm_device_is_unplugged(minor->dev)) {
375 		drm_dev_unref(minor->dev);
376 		return ERR_PTR(-ENODEV);
377 	}
378 
379 	return minor;
380 }
381 
382 /**
383  * drm_minor_release - Release DRM minor
384  * @minor: Pointer to DRM minor object
385  *
386  * Release a minor that was previously acquired via drm_minor_acquire().
387  */
388 void drm_minor_release(struct drm_minor *minor)
389 {
390 	drm_dev_unref(minor->dev);
391 }
392 
393 /**
394  * drm_put_dev - Unregister and release a DRM device
395  * @dev: DRM device
396  *
397  * Called at module unload time or when a PCI device is unplugged.
398  *
399  * Use of this function is discouraged. It will eventually go away completely.
400  * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
401  *
402  * Cleans up all DRM device, calling drm_lastclose().
403  */
404 void drm_put_dev(struct drm_device *dev)
405 {
406 	DRM_DEBUG("\n");
407 
408 	if (!dev) {
409 		DRM_ERROR("cleanup called no dev\n");
410 		return;
411 	}
412 
413 	drm_dev_unregister(dev);
414 	drm_dev_unref(dev);
415 }
416 EXPORT_SYMBOL(drm_put_dev);
417 
418 void drm_unplug_dev(struct drm_device *dev)
419 {
420 	/* for a USB device */
421 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
422 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
423 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
424 
425 	mutex_lock(&drm_global_mutex);
426 
427 	drm_device_set_unplugged(dev);
428 
429 	if (dev->open_count == 0) {
430 		drm_put_dev(dev);
431 	}
432 	mutex_unlock(&drm_global_mutex);
433 }
434 EXPORT_SYMBOL(drm_unplug_dev);
435 
436 /*
437  * DRM internal mount
438  * We want to be able to allocate our own "struct address_space" to control
439  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
440  * stand-alone address_space objects, so we need an underlying inode. As there
441  * is no way to allocate an independent inode easily, we need a fake internal
442  * VFS mount-point.
443  *
444  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
445  * frees it again. You are allowed to use iget() and iput() to get references to
446  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
447  * drm_fs_inode_free() call (which does not have to be the last iput()).
448  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
449  * between multiple inode-users. You could, technically, call
450  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
451  * iput(), but this way you'd end up with a new vfsmount for each inode.
452  */
453 
454 static int drm_fs_cnt;
455 static struct vfsmount *drm_fs_mnt;
456 
457 static const struct dentry_operations drm_fs_dops = {
458 	.d_dname	= simple_dname,
459 };
460 
461 static const struct super_operations drm_fs_sops = {
462 	.statfs		= simple_statfs,
463 };
464 
465 static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
466 				   const char *dev_name, void *data)
467 {
468 	return mount_pseudo(fs_type,
469 			    "drm:",
470 			    &drm_fs_sops,
471 			    &drm_fs_dops,
472 			    0x010203ff);
473 }
474 
475 static struct file_system_type drm_fs_type = {
476 	.name		= "drm",
477 	.owner		= THIS_MODULE,
478 	.mount		= drm_fs_mount,
479 	.kill_sb	= kill_anon_super,
480 };
481 
482 static struct inode *drm_fs_inode_new(void)
483 {
484 	struct inode *inode;
485 	int r;
486 
487 	r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
488 	if (r < 0) {
489 		DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
490 		return ERR_PTR(r);
491 	}
492 
493 	inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
494 	if (IS_ERR(inode))
495 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
496 
497 	return inode;
498 }
499 
500 static void drm_fs_inode_free(struct inode *inode)
501 {
502 	if (inode) {
503 		iput(inode);
504 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
505 	}
506 }
507 
508 /**
509  * drm_dev_alloc - Allocate new DRM device
510  * @driver: DRM driver to allocate device for
511  * @parent: Parent device object
512  *
513  * Allocate and initialize a new DRM device. No device registration is done.
514  * Call drm_dev_register() to advertice the device to user space and register it
515  * with other core subsystems.
516  *
517  * The initial ref-count of the object is 1. Use drm_dev_ref() and
518  * drm_dev_unref() to take and drop further ref-counts.
519  *
520  * RETURNS:
521  * Pointer to new DRM device, or NULL if out of memory.
522  */
523 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
524 				 struct device *parent)
525 {
526 	struct drm_device *dev;
527 	int ret;
528 
529 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
530 	if (!dev)
531 		return NULL;
532 
533 	kref_init(&dev->ref);
534 	dev->dev = parent;
535 	dev->driver = driver;
536 
537 	INIT_LIST_HEAD(&dev->filelist);
538 	INIT_LIST_HEAD(&dev->ctxlist);
539 	INIT_LIST_HEAD(&dev->vmalist);
540 	INIT_LIST_HEAD(&dev->maplist);
541 	INIT_LIST_HEAD(&dev->vblank_event_list);
542 
543 	spin_lock_init(&dev->buf_lock);
544 	spin_lock_init(&dev->event_lock);
545 	mutex_init(&dev->struct_mutex);
546 	mutex_init(&dev->ctxlist_mutex);
547 	mutex_init(&dev->master_mutex);
548 
549 	dev->anon_inode = drm_fs_inode_new();
550 	if (IS_ERR(dev->anon_inode)) {
551 		ret = PTR_ERR(dev->anon_inode);
552 		DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
553 		goto err_free;
554 	}
555 
556 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
557 		ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
558 		if (ret)
559 			goto err_minors;
560 	}
561 
562 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
563 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
564 		if (ret)
565 			goto err_minors;
566 	}
567 
568 	ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
569 	if (ret)
570 		goto err_minors;
571 
572 	if (drm_ht_create(&dev->map_hash, 12))
573 		goto err_minors;
574 
575 	ret = drm_legacy_ctxbitmap_init(dev);
576 	if (ret) {
577 		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
578 		goto err_ht;
579 	}
580 
581 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
582 		ret = drm_gem_init(dev);
583 		if (ret) {
584 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
585 			goto err_ctxbitmap;
586 		}
587 	}
588 
589 	return dev;
590 
591 err_ctxbitmap:
592 	drm_legacy_ctxbitmap_cleanup(dev);
593 err_ht:
594 	drm_ht_remove(&dev->map_hash);
595 err_minors:
596 	drm_minor_free(dev, DRM_MINOR_LEGACY);
597 	drm_minor_free(dev, DRM_MINOR_RENDER);
598 	drm_minor_free(dev, DRM_MINOR_CONTROL);
599 	drm_fs_inode_free(dev->anon_inode);
600 err_free:
601 	mutex_destroy(&dev->master_mutex);
602 	kfree(dev);
603 	return NULL;
604 }
605 EXPORT_SYMBOL(drm_dev_alloc);
606 
607 static void drm_dev_release(struct kref *ref)
608 {
609 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
610 
611 	if (drm_core_check_feature(dev, DRIVER_GEM))
612 		drm_gem_destroy(dev);
613 
614 	drm_legacy_ctxbitmap_cleanup(dev);
615 	drm_ht_remove(&dev->map_hash);
616 	drm_fs_inode_free(dev->anon_inode);
617 
618 	drm_minor_free(dev, DRM_MINOR_LEGACY);
619 	drm_minor_free(dev, DRM_MINOR_RENDER);
620 	drm_minor_free(dev, DRM_MINOR_CONTROL);
621 
622 	mutex_destroy(&dev->master_mutex);
623 	kfree(dev->unique);
624 	kfree(dev);
625 }
626 
627 /**
628  * drm_dev_ref - Take reference of a DRM device
629  * @dev: device to take reference of or NULL
630  *
631  * This increases the ref-count of @dev by one. You *must* already own a
632  * reference when calling this. Use drm_dev_unref() to drop this reference
633  * again.
634  *
635  * This function never fails. However, this function does not provide *any*
636  * guarantee whether the device is alive or running. It only provides a
637  * reference to the object and the memory associated with it.
638  */
639 void drm_dev_ref(struct drm_device *dev)
640 {
641 	if (dev)
642 		kref_get(&dev->ref);
643 }
644 EXPORT_SYMBOL(drm_dev_ref);
645 
646 /**
647  * drm_dev_unref - Drop reference of a DRM device
648  * @dev: device to drop reference of or NULL
649  *
650  * This decreases the ref-count of @dev by one. The device is destroyed if the
651  * ref-count drops to zero.
652  */
653 void drm_dev_unref(struct drm_device *dev)
654 {
655 	if (dev)
656 		kref_put(&dev->ref, drm_dev_release);
657 }
658 EXPORT_SYMBOL(drm_dev_unref);
659 
660 /**
661  * drm_dev_register - Register DRM device
662  * @dev: Device to register
663  * @flags: Flags passed to the driver's .load() function
664  *
665  * Register the DRM device @dev with the system, advertise device to user-space
666  * and start normal device operation. @dev must be allocated via drm_dev_alloc()
667  * previously.
668  *
669  * Never call this twice on any device!
670  *
671  * RETURNS:
672  * 0 on success, negative error code on failure.
673  */
674 int drm_dev_register(struct drm_device *dev, unsigned long flags)
675 {
676 	int ret;
677 
678 	mutex_lock(&drm_global_mutex);
679 
680 	ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
681 	if (ret)
682 		goto err_minors;
683 
684 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
685 	if (ret)
686 		goto err_minors;
687 
688 	ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
689 	if (ret)
690 		goto err_minors;
691 
692 	if (dev->driver->load) {
693 		ret = dev->driver->load(dev, flags);
694 		if (ret)
695 			goto err_minors;
696 	}
697 
698 	/* setup grouping for legacy outputs */
699 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
700 		ret = drm_mode_group_init_legacy_group(dev,
701 				&dev->primary->mode_group);
702 		if (ret)
703 			goto err_unload;
704 	}
705 
706 	ret = 0;
707 	goto out_unlock;
708 
709 err_unload:
710 	if (dev->driver->unload)
711 		dev->driver->unload(dev);
712 err_minors:
713 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
714 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
715 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
716 out_unlock:
717 	mutex_unlock(&drm_global_mutex);
718 	return ret;
719 }
720 EXPORT_SYMBOL(drm_dev_register);
721 
722 /**
723  * drm_dev_unregister - Unregister DRM device
724  * @dev: Device to unregister
725  *
726  * Unregister the DRM device from the system. This does the reverse of
727  * drm_dev_register() but does not deallocate the device. The caller must call
728  * drm_dev_unref() to drop their final reference.
729  */
730 void drm_dev_unregister(struct drm_device *dev)
731 {
732 	struct drm_map_list *r_list, *list_temp;
733 
734 	drm_lastclose(dev);
735 
736 	if (dev->driver->unload)
737 		dev->driver->unload(dev);
738 
739 	if (dev->agp)
740 		drm_pci_agp_destroy(dev);
741 
742 	drm_vblank_cleanup(dev);
743 
744 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
745 		drm_legacy_rmmap(dev, r_list->map);
746 
747 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
748 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
749 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
750 }
751 EXPORT_SYMBOL(drm_dev_unregister);
752 
753 /**
754  * drm_dev_set_unique - Set the unique name of a DRM device
755  * @dev: device of which to set the unique name
756  * @fmt: format string for unique name
757  *
758  * Sets the unique name of a DRM device using the specified format string and
759  * a variable list of arguments. Drivers can use this at driver probe time if
760  * the unique name of the devices they drive is static.
761  *
762  * Return: 0 on success or a negative error code on failure.
763  */
764 int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
765 {
766 	va_list ap;
767 
768 	kfree(dev->unique);
769 
770 	va_start(ap, fmt);
771 	dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
772 	va_end(ap);
773 
774 	return dev->unique ? 0 : -ENOMEM;
775 }
776 EXPORT_SYMBOL(drm_dev_set_unique);
777 #endif
778 
779 /*
780  * DRM Core
781  * The DRM core module initializes all global DRM objects and makes them
782  * available to drivers. Once setup, drivers can probe their respective
783  * devices.
784  * Currently, core management includes:
785  *  - The "DRM-Global" key/value database
786  *  - Global ID management for connectors
787  *  - DRM major number allocation
788  *  - DRM minor management
789  *  - DRM sysfs class
790  *  - DRM debugfs root
791  *
792  * Furthermore, the DRM core provides dynamic char-dev lookups. For each
793  * interface registered on a DRM device, you can request minor numbers from DRM
794  * core. DRM core takes care of major-number management and char-dev
795  * registration. A stub ->open() callback forwards any open() requests to the
796  * registered minor.
797  */
798 
799 #if 0
800 static int drm_stub_open(struct inode *inode, struct file *filp)
801 {
802 	const struct file_operations *new_fops;
803 	struct drm_minor *minor;
804 	int err;
805 
806 	DRM_DEBUG("\n");
807 
808 	mutex_lock(&drm_global_mutex);
809 	minor = drm_minor_acquire(iminor(inode));
810 	if (IS_ERR(minor)) {
811 		err = PTR_ERR(minor);
812 		goto out_unlock;
813 	}
814 
815 	new_fops = fops_get(minor->dev->driver->fops);
816 	if (!new_fops) {
817 		err = -ENODEV;
818 		goto out_release;
819 	}
820 
821 	replace_fops(filp, new_fops);
822 	if (filp->f_op->open)
823 		err = filp->f_op->open(inode, filp);
824 	else
825 		err = 0;
826 
827 out_release:
828 	drm_minor_release(minor);
829 out_unlock:
830 	mutex_unlock(&drm_global_mutex);
831 	return err;
832 }
833 
834 static const struct file_operations drm_stub_fops = {
835 	.owner = THIS_MODULE,
836 	.open = drm_stub_open,
837 	.llseek = noop_llseek,
838 };
839 
840 static int __init drm_core_init(void)
841 {
842 	int ret = -ENOMEM;
843 
844 	drm_global_init();
845 	drm_connector_ida_init();
846 	idr_init(&drm_minors_idr);
847 
848 	if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
849 		goto err_p1;
850 
851 	drm_class = drm_sysfs_create(THIS_MODULE, "drm");
852 	if (IS_ERR(drm_class)) {
853 		printk(KERN_ERR "DRM: Error creating drm class.\n");
854 		ret = PTR_ERR(drm_class);
855 		goto err_p2;
856 	}
857 
858 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
859 	if (!drm_debugfs_root) {
860 		DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
861 		ret = -1;
862 		goto err_p3;
863 	}
864 
865 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
866 		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
867 	return 0;
868 err_p3:
869 	drm_sysfs_destroy();
870 err_p2:
871 	unregister_chrdev(DRM_MAJOR, "drm");
872 
873 	idr_destroy(&drm_minors_idr);
874 err_p1:
875 	return ret;
876 }
877 
878 static void __exit drm_core_exit(void)
879 {
880 	debugfs_remove(drm_debugfs_root);
881 	drm_sysfs_destroy();
882 
883 	unregister_chrdev(DRM_MAJOR, "drm");
884 
885 	drm_connector_ida_destroy();
886 	idr_destroy(&drm_minors_idr);
887 }
888 
889 module_init(drm_core_init);
890 module_exit(drm_core_exit);
891 #endif
892 
893 #include <sys/devfs.h>
894 
895 #include <linux/export.h>
896 #include <linux/dmi.h>
897 #include <drm/drmP.h>
898 #include <drm/drm_core.h>
899 
900 #if DRM_DEBUG_DEFAULT_ON == 1
901 #define DRM_DEBUGBITS_ON (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS | \
902     DRM_DEBUGBITS_FAILED_IOCTL)
903 #elif DRM_DEBUG_DEFAULT_ON == 2
904 #define DRM_DEBUGBITS_ON (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS | \
905     DRM_DEBUGBITS_FAILED_IOCTL | DRM_DEBUGBITS_VERBOSE)
906 #else
907 #define DRM_DEBUGBITS_ON (0x0)
908 #endif
909 
910 int drm_notyet_flag = 0;
911 
912 static int drm_load(struct drm_device *dev);
913 drm_pci_id_list_t *drm_find_description(int vendor, int device,
914     drm_pci_id_list_t *idlist);
915 
916 #define DRIVER_SOFTC(unit) \
917 	((struct drm_device *)devclass_get_softc(drm_devclass, unit))
918 
919 static int
920 drm_modevent(module_t mod, int type, void *data)
921 {
922 
923 	switch (type) {
924 	case MOD_LOAD:
925 		TUNABLE_INT_FETCH("drm.debug", &drm_debug);
926 		TUNABLE_INT_FETCH("drm.notyet", &drm_notyet_flag);
927 		break;
928 	}
929 	return (0);
930 }
931 
932 static moduledata_t drm_mod = {
933 	"drm",
934 	drm_modevent,
935 	0
936 };
937 DECLARE_MODULE(drm, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
938 MODULE_VERSION(drm, 1);
939 MODULE_DEPEND(drm, agp, 1, 1, 1);
940 MODULE_DEPEND(drm, pci, 1, 1, 1);
941 MODULE_DEPEND(drm, iicbus, 1, 1, 1);
942 
943 static struct dev_ops drm_cdevsw = {
944 	{ "drm", 0, D_TRACKCLOSE | D_MPSAFE },
945 	.d_open =	drm_open,
946 	.d_close =	drm_close,
947 	.d_read =	drm_read,
948 	.d_ioctl =	drm_ioctl,
949 	.d_kqfilter =	drm_kqfilter,
950 	.d_mmap =	drm_mmap,
951 	.d_mmap_single = drm_mmap_single,
952 };
953 
954 static int drm_msi = 0;	/* Disable by default. This is because there are issues with
955 			   freezes using MSI and i915
956 			 */
957 TUNABLE_INT("hw.drm.msi.enable", &drm_msi);
958 SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
959 SYSCTL_NODE(_hw_drm, OID_AUTO, msi, CTLFLAG_RW, NULL, "DRM device msi");
960 SYSCTL_INT(_hw_drm_msi, OID_AUTO, enable, CTLFLAG_RD, &drm_msi, 0,
961     "Enable MSI interrupts for drm devices");
962 SYSCTL_INT(_hw_drm, OID_AUTO, debug, CTLFLAG_RW, &drm_debug, 0,
963     "DRM debugging");
964 
965 static struct drm_msi_blacklist_entry drm_msi_blacklist[] = {
966 	{0x8086, 0x2772}, /* Intel i945G	*/ \
967 	{0x8086, 0x27A2}, /* Intel i945GM	*/ \
968 	{0x8086, 0x27AE}, /* Intel i945GME	*/ \
969 	{0, 0}
970 };
971 
972 static int drm_msi_is_blacklisted(struct drm_device *dev, unsigned long flags)
973 {
974 	int i = 0;
975 
976 	if (dev->driver->use_msi != NULL) {
977 		int use_msi;
978 
979 		use_msi = dev->driver->use_msi(dev, flags);
980 
981 		return (!use_msi);
982 	}
983 
984 	/* TODO: Maybe move this to a callback in i915? */
985 	for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) {
986 		if ((drm_msi_blacklist[i].vendor == dev->pci_vendor) &&
987 		    (drm_msi_blacklist[i].device == dev->pci_device)) {
988 			return 1;
989 		}
990 	}
991 
992 	return 0;
993 }
994 
995 int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
996 {
997 	drm_pci_id_list_t *id_entry;
998 	int vendor, device;
999 
1000 	vendor = pci_get_vendor(kdev);
1001 	device = pci_get_device(kdev);
1002 
1003 	if (pci_get_class(kdev) != PCIC_DISPLAY)
1004 		return ENXIO;
1005 
1006 	id_entry = drm_find_description(vendor, device, idlist);
1007 	if (id_entry != NULL) {
1008 		if (!device_get_desc(kdev)) {
1009 			DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
1010 			device_set_desc(kdev, id_entry->name);
1011 		}
1012 		return 0;
1013 	}
1014 
1015 	return ENXIO;
1016 }
1017 
1018 int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
1019 {
1020 	struct drm_device *dev;
1021 	drm_pci_id_list_t *id_entry;
1022 	int unit, error;
1023 	u_int irq_flags;
1024 	int msi_enable;
1025 
1026 	unit = device_get_unit(kdev);
1027 	dev = device_get_softc(kdev);
1028 
1029 	if (!strcmp(device_get_name(kdev), "drmsub"))
1030 		dev->dev = device_get_parent(kdev);
1031 	else
1032 		dev->dev = kdev;
1033 
1034 	dev->pci_domain = pci_get_domain(dev->dev);
1035 	dev->pci_bus = pci_get_bus(dev->dev);
1036 	dev->pci_slot = pci_get_slot(dev->dev);
1037 	dev->pci_func = pci_get_function(dev->dev);
1038 
1039 	dev->pci_vendor = pci_get_vendor(dev->dev);
1040 	dev->pci_device = pci_get_device(dev->dev);
1041 	dev->pci_subvendor = pci_get_subvendor(dev->dev);
1042 	dev->pci_subdevice = pci_get_subdevice(dev->dev);
1043 
1044 	id_entry = drm_find_description(dev->pci_vendor,
1045 	    dev->pci_device, idlist);
1046 	dev->id_entry = id_entry;
1047 
1048 	if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) {
1049 		msi_enable = drm_msi;
1050 
1051 		if (drm_msi_is_blacklisted(dev, dev->id_entry->driver_private)) {
1052 			msi_enable = 0;
1053 		}
1054 
1055 		dev->irq_type = pci_alloc_1intr(dev->dev, msi_enable,
1056 		    &dev->irqrid, &irq_flags);
1057 
1058 		dev->irqr = bus_alloc_resource_any(dev->dev, SYS_RES_IRQ,
1059 		    &dev->irqrid, irq_flags);
1060 
1061 		if (!dev->irqr) {
1062 			return (ENOENT);
1063 		}
1064 
1065 		dev->irq = (int) rman_get_start(dev->irqr);
1066 	}
1067 
1068 	lockinit(&dev->dev_lock, "drmdev", 0, LK_CANRECURSE);
1069 	lwkt_serialize_init(&dev->irq_lock);
1070 	lockinit(&dev->event_lock, "drmev", 0, LK_CANRECURSE);
1071 	lockinit(&dev->struct_mutex, "drmslk", 0, LK_CANRECURSE);
1072 
1073 	error = drm_load(dev);
1074 	if (error)
1075 		goto error;
1076 
1077 	error = drm_create_cdevs(kdev);
1078 	if (error)
1079 		goto error;
1080 
1081 	return (error);
1082 error:
1083 	if (dev->irqr) {
1084 		bus_release_resource(dev->dev, SYS_RES_IRQ,
1085 		    dev->irqrid, dev->irqr);
1086 	}
1087 	if (dev->irq_type == PCI_INTR_TYPE_MSI) {
1088 		pci_release_msi(dev->dev);
1089 	}
1090 	return (error);
1091 }
1092 
1093 int
1094 drm_create_cdevs(device_t kdev)
1095 {
1096 	struct drm_device *dev;
1097 	int error, unit;
1098 
1099 	unit = device_get_unit(kdev);
1100 	dev = device_get_softc(kdev);
1101 
1102 	dev->devnode = make_dev(&drm_cdevsw, unit, DRM_DEV_UID, DRM_DEV_GID,
1103 				DRM_DEV_MODE, "dri/card%d", unit);
1104 	error = 0;
1105 	if (error == 0)
1106 		dev->devnode->si_drv1 = dev;
1107 	return (error);
1108 }
1109 
1110 #ifndef DRM_DEV_NAME
1111 #define DRM_DEV_NAME "drm"
1112 #endif
1113 
1114 devclass_t drm_devclass;
1115 
1116 drm_pci_id_list_t *drm_find_description(int vendor, int device,
1117     drm_pci_id_list_t *idlist)
1118 {
1119 	int i = 0;
1120 
1121 	for (i = 0; idlist[i].vendor != 0; i++) {
1122 		if ((idlist[i].vendor == vendor) &&
1123 		    ((idlist[i].device == device) ||
1124 		    (idlist[i].device == 0))) {
1125 			return &idlist[i];
1126 		}
1127 	}
1128 	return NULL;
1129 }
1130 
1131 static int drm_load(struct drm_device *dev)
1132 {
1133 	int i, retcode;
1134 
1135 	DRM_DEBUG("\n");
1136 
1137 	INIT_LIST_HEAD(&dev->maplist);
1138 
1139 	drm_mem_init();
1140 	drm_sysctl_init(dev);
1141 	INIT_LIST_HEAD(&dev->filelist);
1142 
1143 	dev->counters  = 6;
1144 	dev->types[0]  = _DRM_STAT_LOCK;
1145 	dev->types[1]  = _DRM_STAT_OPENS;
1146 	dev->types[2]  = _DRM_STAT_CLOSES;
1147 	dev->types[3]  = _DRM_STAT_IOCTLS;
1148 	dev->types[4]  = _DRM_STAT_LOCKS;
1149 	dev->types[5]  = _DRM_STAT_UNLOCKS;
1150 
1151 	for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
1152 		atomic_set(&dev->counts[i], 0);
1153 
1154 	INIT_LIST_HEAD(&dev->vblank_event_list);
1155 
1156 	if (drm_core_has_AGP(dev)) {
1157 		if (drm_device_is_agp(dev))
1158 			dev->agp = drm_agp_init();
1159 		if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) &&
1160 		    dev->agp == NULL) {
1161 			DRM_ERROR("Card isn't AGP, or couldn't initialize "
1162 			    "AGP.\n");
1163 			retcode = ENOMEM;
1164 			goto error;
1165 		}
1166 		if (dev->agp != NULL && dev->agp->agp_info.ai_aperture_base != 0) {
1167 			if (drm_mtrr_add(dev->agp->agp_info.ai_aperture_base,
1168 			    dev->agp->agp_info.ai_aperture_size, DRM_MTRR_WC) == 0)
1169 				dev->agp->agp_mtrr = 1;
1170 		}
1171 	}
1172 
1173 	if (dev->driver->driver_features & DRIVER_GEM) {
1174 		retcode = drm_gem_init(dev);
1175 		if (retcode != 0) {
1176 			DRM_ERROR("Cannot initialize graphics execution "
1177 				  "manager (GEM)\n");
1178 			goto error1;
1179 		}
1180 	}
1181 
1182 	if (dev->driver->load != NULL) {
1183 		DRM_LOCK(dev);
1184 		/* Shared code returns -errno. */
1185 		retcode = -dev->driver->load(dev,
1186 		    dev->id_entry->driver_private);
1187 		if (pci_enable_busmaster(dev->dev))
1188 			DRM_ERROR("Request to enable bus-master failed.\n");
1189 		DRM_UNLOCK(dev);
1190 		if (retcode != 0)
1191 			goto error1;
1192 	}
1193 
1194 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
1195 	    dev->driver->name,
1196 	    dev->driver->major,
1197 	    dev->driver->minor,
1198 	    dev->driver->patchlevel,
1199 	    dev->driver->date);
1200 
1201 	return 0;
1202 
1203 error1:
1204 	drm_gem_destroy(dev);
1205 error:
1206 	drm_sysctl_cleanup(dev);
1207 	DRM_LOCK(dev);
1208 	drm_lastclose(dev);
1209 	DRM_UNLOCK(dev);
1210 	if (dev->devnode != NULL)
1211 		destroy_dev(dev->devnode);
1212 
1213 	lockuninit(&dev->vbl_lock);
1214 	lockuninit(&dev->dev_lock);
1215 	lockuninit(&dev->event_lock);
1216 	lockuninit(&dev->struct_mutex);
1217 
1218 	return retcode;
1219 }
1220 
1221 /*
1222  * Stub is needed for devfs
1223  */
1224 int drm_close(struct dev_close_args *ap)
1225 {
1226 	return 0;
1227 }
1228 
1229 void drm_cdevpriv_dtor(void *cd)
1230 {
1231 	struct drm_file *file_priv = cd;
1232 	struct drm_device *dev = file_priv->dev;
1233 	int retcode = 0;
1234 
1235 	DRM_DEBUG("open_count = %d\n", dev->open_count);
1236 
1237 	DRM_LOCK(dev);
1238 
1239 	if (dev->driver->preclose != NULL)
1240 		dev->driver->preclose(dev, file_priv);
1241 
1242 	/* ========================================================
1243 	 * Begin inline drm_release
1244 	 */
1245 
1246 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
1247 	    DRM_CURRENTPID, (long)dev->dev, dev->open_count);
1248 
1249 	if (dev->driver->driver_features & DRIVER_GEM)
1250 		drm_gem_release(dev, file_priv);
1251 
1252 	if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
1253 	    && dev->lock.file_priv == file_priv) {
1254 		DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
1255 			  DRM_CURRENTPID,
1256 			  _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
1257 		if (dev->driver->reclaim_buffers_locked != NULL)
1258 			dev->driver->reclaim_buffers_locked(dev, file_priv);
1259 
1260 		drm_lock_free(&dev->lock,
1261 		    _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
1262 
1263 				/* FIXME: may require heavy-handed reset of
1264                                    hardware at this point, possibly
1265                                    processed via a callback to the X
1266                                    server. */
1267 	} else if (dev->driver->reclaim_buffers_locked != NULL &&
1268 	    dev->lock.hw_lock != NULL) {
1269 		/* The lock is required to reclaim buffers */
1270 		for (;;) {
1271 			if (!dev->lock.hw_lock) {
1272 				/* Device has been unregistered */
1273 				retcode = EINTR;
1274 				break;
1275 			}
1276 			/* Contention */
1277 			retcode = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue,
1278 			    PCATCH, "drmlk2", 0);
1279 			if (retcode)
1280 				break;
1281 		}
1282 		if (retcode == 0) {
1283 			dev->driver->reclaim_buffers_locked(dev, file_priv);
1284 		}
1285 	}
1286 
1287 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
1288 	    !dev->driver->reclaim_buffers_locked)
1289 		drm_legacy_reclaim_buffers(dev, file_priv);
1290 
1291 	funsetown(&dev->buf_sigio);
1292 
1293 	if (dev->driver->postclose != NULL)
1294 		dev->driver->postclose(dev, file_priv);
1295 	list_del(&file_priv->lhead);
1296 
1297 
1298 	/* ========================================================
1299 	 * End inline drm_release
1300 	 */
1301 
1302 	atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
1303 	device_unbusy(dev->dev);
1304 	if (--dev->open_count == 0) {
1305 		retcode = drm_lastclose(dev);
1306 	}
1307 
1308 	DRM_UNLOCK(dev);
1309 }
1310 
1311 int
1312 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
1313     struct sysctl_oid *top)
1314 {
1315 	struct sysctl_oid *oid;
1316 
1317 	ksnprintf(dev->busid_str, sizeof(dev->busid_str),
1318 	     "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
1319 	     dev->pci_slot, dev->pci_func);
1320 	oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
1321 	    CTLFLAG_RD, dev->busid_str, 0, NULL);
1322 	if (oid == NULL)
1323 		return (ENOMEM);
1324 	dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
1325 	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
1326 	    "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
1327 	if (oid == NULL)
1328 		return (ENOMEM);
1329 
1330 	return (0);
1331 }
1332 
1333 int
1334 drm_mmap_single(struct dev_mmap_single_args *ap)
1335 {
1336 	struct drm_device *dev;
1337 	struct cdev *kdev = ap->a_head.a_dev;
1338 	vm_ooffset_t *offset = ap->a_offset;
1339 	vm_size_t size = ap->a_size;
1340 	struct vm_object **obj_res = ap->a_object;
1341 	int nprot = ap->a_nprot;
1342 
1343 	dev = drm_get_device_from_kdev(kdev);
1344 	if (dev->drm_ttm_bdev != NULL) {
1345 		return (ttm_bo_mmap_single(dev->drm_ttm_bdev, offset, size,
1346 		    obj_res, nprot));
1347 	} else if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
1348 		return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
1349 	} else {
1350 		return (ENODEV);
1351 	}
1352 }
1353 
1354 #if DRM_LINUX
1355 
1356 #include <sys/sysproto.h>
1357 
1358 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
1359 
1360 #define LINUX_IOCTL_DRM_MIN		0x6400
1361 #define LINUX_IOCTL_DRM_MAX		0x64ff
1362 
1363 static linux_ioctl_function_t drm_linux_ioctl;
1364 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
1365     LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
1366 
1367 /* The bits for in/out are switched on Linux */
1368 #define LINUX_IOC_IN	IOC_OUT
1369 #define LINUX_IOC_OUT	IOC_IN
1370 
1371 static int
1372 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
1373 {
1374 	int error;
1375 	int cmd = args->cmd;
1376 
1377 	args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
1378 	if (cmd & LINUX_IOC_IN)
1379 		args->cmd |= IOC_IN;
1380 	if (cmd & LINUX_IOC_OUT)
1381 		args->cmd |= IOC_OUT;
1382 
1383 	error = ioctl(p, (struct ioctl_args *)args);
1384 
1385 	return error;
1386 }
1387 #endif /* DRM_LINUX */
1388 
1389 static int
1390 drm_core_init(void *arg)
1391 {
1392 
1393 	drm_global_init();
1394 
1395 #if DRM_LINUX
1396 	linux_ioctl_register_handler(&drm_handler);
1397 #endif /* DRM_LINUX */
1398 
1399 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
1400 		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
1401 	return 0;
1402 }
1403 
1404 static void
1405 drm_core_exit(void *arg)
1406 {
1407 
1408 #if DRM_LINUX
1409 	linux_ioctl_unregister_handler(&drm_handler);
1410 #endif /* DRM_LINUX */
1411 
1412 	drm_global_release();
1413 }
1414 
1415 SYSINIT(drm_register, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,
1416     drm_core_init, NULL);
1417 SYSUNINIT(drm_unregister, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,
1418     drm_core_exit, NULL);
1419 
1420 
1421 #include <linux/dmi.h>
1422 
1423 /*
1424  * Check if dmi_system_id structure matches system DMI data
1425  */
1426 static bool
1427 dmi_found(const struct dmi_system_id *dsi)
1428 {
1429 	int i, slot;
1430 	bool found = false;
1431 	char *sys_vendor, *board_vendor, *product_name, *board_name;
1432 
1433 	sys_vendor = kgetenv("smbios.system.maker");
1434 	board_vendor = kgetenv("smbios.planar.maker");
1435 	product_name = kgetenv("smbios.system.product");
1436 	board_name = kgetenv("smbios.planar.product");
1437 
1438 	for (i = 0; i < NELEM(dsi->matches); i++) {
1439 		slot = dsi->matches[i].slot;
1440 		switch (slot) {
1441 		case DMI_NONE:
1442 			break;
1443 		case DMI_SYS_VENDOR:
1444 			if (sys_vendor != NULL &&
1445 			    !strcmp(sys_vendor, dsi->matches[i].substr))
1446 				break;
1447 			else
1448 				goto done;
1449 		case DMI_BOARD_VENDOR:
1450 			if (board_vendor != NULL &&
1451 			    !strcmp(board_vendor, dsi->matches[i].substr))
1452 				break;
1453 			else
1454 				goto done;
1455 		case DMI_PRODUCT_NAME:
1456 			if (product_name != NULL &&
1457 			    !strcmp(product_name, dsi->matches[i].substr))
1458 				break;
1459 			else
1460 				goto done;
1461 		case DMI_BOARD_NAME:
1462 			if (board_name != NULL &&
1463 			    !strcmp(board_name, dsi->matches[i].substr))
1464 				break;
1465 			else
1466 				goto done;
1467 		default:
1468 			goto done;
1469 		}
1470 	}
1471 	found = true;
1472 
1473 done:
1474 	if (sys_vendor != NULL)
1475 		kfreeenv(sys_vendor);
1476 	if (board_vendor != NULL)
1477 		kfreeenv(board_vendor);
1478 	if (product_name != NULL)
1479 		kfreeenv(product_name);
1480 	if (board_name != NULL)
1481 		kfreeenv(board_name);
1482 
1483 	return found;
1484 }
1485 
1486 int dmi_check_system(const struct dmi_system_id *sysid)
1487 {
1488 	const struct dmi_system_id *dsi;
1489 	int num = 0;
1490 
1491 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
1492 		if (dmi_found(dsi)) {
1493 			num++;
1494 			if (dsi->callback && dsi->callback(dsi))
1495 				break;
1496 		}
1497 	}
1498 	return (num);
1499 }
1500