xref: /dragonfly/sys/dev/drm/drm_drv.c (revision 1dedbd3b)
1 /*
2  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3  *
4  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5  * All Rights Reserved.
6  *
7  * Author Rickard E. (Rik) Faith <faith@valinux.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <drm/drmP.h>
32 #include "drm_crtc_internal.h"
33 #include "drm_legacy.h"
34 #include "drm_internal.h"
35 
36 /*
37  * drm_debug: Enable debug output.
38  * Bitmask of DRM_UT_x. See include/drm/drmP.h for details.
39  */
40 #ifdef __DragonFly__
41 /* Provides three levels of debug: off, minimal, verbose */
42 #if DRM_DEBUG_DEFAULT_ON == 1
43 #define DRM_DEBUGBITS_ON (DRM_UT_CORE | DRM_UT_DRIVER | DRM_UT_KMS |	\
44 			  DRM_UT_PRIME| DRM_UT_ATOMIC | DRM_UT_FIOCTL)
45 #elif DRM_DEBUG_DEFAULT_ON == 2
46 #define DRM_DEBUGBITS_ON (DRM_UT_CORE | DRM_UT_DRIVER | DRM_UT_KMS |	\
47 			  DRM_UT_PRIME| DRM_UT_ATOMIC | DRM_UT_FIOCTL |	\
48 			  DRM_UT_PID  | DRM_UT_IOCTL  | DRM_UT_VBLANK)
49 #else
50 #define DRM_DEBUGBITS_ON (0x0)
51 #endif
52 unsigned int drm_debug = DRM_DEBUGBITS_ON;	/* defaults to 0 */
53 #else
54 unsigned int drm_debug = 0;
55 #endif /* __DragonFly__ */
56 EXPORT_SYMBOL(drm_debug);
57 
58 MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
59 MODULE_DESCRIPTION("DRM shared core routines");
60 MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
61 "\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
62 "\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
63 "\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
64 "\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
65 "\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
66 "\t\tBit 5 (0x20) will enable VBL messages (vblank code)");
67 module_param_named(debug, drm_debug, int, 0600);
68 
69 static DEFINE_MUTEX(drm_minor_lock);
70 static struct idr drm_minors_idr;
71 
72 #if 0
73 static struct dentry *drm_debugfs_root;
74 #endif
75 
76 void drm_ut_debug_printk(const char *function_name, const char *format, ...)
77 {
78 	va_list args;
79 
80 	if (unlikely(drm_debug & DRM_UT_PID)) {
81 		kprintf("[" DRM_NAME ":pid%d:%s] ",
82 		    DRM_CURRENTPID, function_name);
83 	} else {
84 		kprintf("[" DRM_NAME ":%s] ", function_name);
85 	}
86 
87 	va_start(args, format);
88 	kvprintf(format, args);
89 	va_end(args);
90 }
91 
92 #define DRM_PRINTK_FMT "[" DRM_NAME ":%s]%s %pV"
93 
94 void drm_dev_printk(const struct device *dev, const char *level,
95 		    unsigned int category, const char *function_name,
96 		    const char *prefix, const char *format, ...)
97 {
98 	struct va_format vaf;
99 	va_list args;
100 
101 	if (category != DRM_UT_NONE && !(drm_debug & category))
102 		return;
103 
104 	va_start(args, format);
105 	vaf.fmt = format;
106 	vaf.va = &args;
107 
108 	if (dev)
109 		dev_printk(level, dev, DRM_PRINTK_FMT, function_name, prefix,
110 			   &vaf);
111 	else
112 		printk("%s" DRM_PRINTK_FMT, level, function_name, prefix, &vaf);
113 
114 	va_end(args);
115 }
116 EXPORT_SYMBOL(drm_dev_printk);
117 
118 void drm_printk(const char *level, unsigned int category,
119 		const char *format, ...)
120 {
121 	struct va_format vaf;
122 	va_list args;
123 
124 	if (category != DRM_UT_NONE && !(drm_debug & category))
125 		return;
126 
127 	va_start(args, format);
128 	vaf.fmt = format;
129 	vaf.va = &args;
130 
131 	printk("%s" "[" DRM_NAME ":%ps]%s %pV",
132 	       level, __builtin_return_address(0),
133 	       strcmp(level, KERN_ERR) == 0 ? " *ERROR*" : "", &vaf);
134 
135 	va_end(args);
136 }
137 EXPORT_SYMBOL(drm_printk);
138 
139 /*
140  * DRM Minors
141  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
142  * of them is represented by a drm_minor object. Depending on the capabilities
143  * of the device-driver, different interfaces are registered.
144  *
145  * Minors can be accessed via dev->$minor_name. This pointer is either
146  * NULL or a valid drm_minor pointer and stays valid as long as the device is
147  * valid. This means, DRM minors have the same life-time as the underlying
148  * device. However, this doesn't mean that the minor is active. Minors are
149  * registered and unregistered dynamically according to device-state.
150  */
151 
152 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
153 					     unsigned int type)
154 {
155 	switch (type) {
156 	case DRM_MINOR_PRIMARY:
157 		return &dev->primary;
158 	case DRM_MINOR_RENDER:
159 		return &dev->render;
160 	case DRM_MINOR_CONTROL:
161 		return &dev->control;
162 	default:
163 		return NULL;
164 	}
165 }
166 
167 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
168 {
169 	struct drm_minor *minor;
170 	unsigned long flags;
171 	int r;
172 
173 	minor = kzalloc(sizeof(*minor), GFP_KERNEL);
174 	if (!minor)
175 		return -ENOMEM;
176 
177 	minor->type = type;
178 	minor->dev = dev;
179 
180 	idr_preload(GFP_KERNEL);
181 	spin_lock_irqsave(&drm_minor_lock, flags);
182 	r = idr_alloc(&drm_minors_idr,
183 		      NULL,
184 		      64 * type,
185 		      64 * (type + 1),
186 		      GFP_NOWAIT);
187 	spin_unlock_irqrestore(&drm_minor_lock, flags);
188 	idr_preload_end();
189 
190 	if (r < 0)
191 		goto err_free;
192 
193 	minor->index = r;
194 
195 #if 0
196 	minor->kdev = drm_sysfs_minor_alloc(minor);
197 	if (IS_ERR(minor->kdev)) {
198 		r = PTR_ERR(minor->kdev);
199 		goto err_index;
200 	}
201 #endif
202 
203 	*drm_minor_get_slot(dev, type) = minor;
204 	return 0;
205 
206 #if 0
207 err_index:
208 	spin_lock_irqsave(&drm_minor_lock, flags);
209 	idr_remove(&drm_minors_idr, minor->index);
210 	spin_unlock_irqrestore(&drm_minor_lock, flags);
211 #endif
212 err_free:
213 	kfree(minor);
214 	return r;
215 }
216 
217 static void drm_minor_free(struct drm_device *dev, unsigned int type)
218 {
219 	struct drm_minor **slot, *minor;
220 	unsigned long flags;
221 
222 	slot = drm_minor_get_slot(dev, type);
223 	minor = *slot;
224 	if (!minor)
225 		return;
226 
227 #if 0
228 	put_device(minor->kdev);
229 #endif
230 
231 	spin_lock_irqsave(&drm_minor_lock, flags);
232 	idr_remove(&drm_minors_idr, minor->index);
233 	spin_unlock_irqrestore(&drm_minor_lock, flags);
234 
235 	kfree(minor);
236 	*slot = NULL;
237 }
238 
239 static int drm_minor_register(struct drm_device *dev, unsigned int type)
240 {
241 	struct drm_minor *minor;
242 	unsigned long flags;
243 #if 0
244 	int ret;
245 #endif
246 
247 	DRM_DEBUG("\n");
248 
249 	minor = *drm_minor_get_slot(dev, type);
250 	if (!minor)
251 		return 0;
252 
253 #if 0
254 	ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
255 	if (ret) {
256 		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
257 		return ret;
258 	}
259 
260 	ret = device_add(minor->kdev);
261 	if (ret)
262 		goto err_debugfs;
263 #endif
264 
265 	/* replace NULL with @minor so lookups will succeed from now on */
266 	spin_lock_irqsave(&drm_minor_lock, flags);
267 	idr_replace(&drm_minors_idr, minor, minor->index);
268 	spin_unlock_irqrestore(&drm_minor_lock, flags);
269 
270 	DRM_DEBUG("new minor registered %d\n", minor->index);
271 	return 0;
272 
273 #if 0
274 err_debugfs:
275 	drm_debugfs_cleanup(minor);
276 	return ret;
277 #endif
278 }
279 
280 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
281 {
282 	struct drm_minor *minor;
283 	unsigned long flags;
284 
285 	minor = *drm_minor_get_slot(dev, type);
286 #if 0
287 	if (!minor || !device_is_registered(minor->kdev))
288 #else
289 	if (!minor)
290 #endif
291 		return;
292 
293 	/* replace @minor with NULL so lookups will fail from now on */
294 	spin_lock_irqsave(&drm_minor_lock, flags);
295 	idr_replace(&drm_minors_idr, NULL, minor->index);
296 	spin_unlock_irqrestore(&drm_minor_lock, flags);
297 
298 #if 0
299 	device_del(minor->kdev);
300 	dev_set_drvdata(minor->kdev, NULL); /* safety belt */
301 #endif
302 	drm_debugfs_cleanup(minor);
303 }
304 
305 /**
306  * drm_minor_acquire - Acquire a DRM minor
307  * @minor_id: Minor ID of the DRM-minor
308  *
309  * Looks up the given minor-ID and returns the respective DRM-minor object. The
310  * refence-count of the underlying device is increased so you must release this
311  * object with drm_minor_release().
312  *
313  * As long as you hold this minor, it is guaranteed that the object and the
314  * minor->dev pointer will stay valid! However, the device may get unplugged and
315  * unregistered while you hold the minor.
316  *
317  * Returns:
318  * Pointer to minor-object with increased device-refcount, or PTR_ERR on
319  * failure.
320  */
321 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
322 {
323 	struct drm_minor *minor;
324 	unsigned long flags;
325 
326 	spin_lock_irqsave(&drm_minor_lock, flags);
327 	minor = idr_find(&drm_minors_idr, minor_id);
328 	if (minor)
329 		drm_dev_ref(minor->dev);
330 	spin_unlock_irqrestore(&drm_minor_lock, flags);
331 
332 	if (!minor) {
333 		return ERR_PTR(-ENODEV);
334 	} else if (drm_device_is_unplugged(minor->dev)) {
335 		drm_dev_unref(minor->dev);
336 		return ERR_PTR(-ENODEV);
337 	}
338 
339 	return minor;
340 }
341 
342 /**
343  * drm_minor_release - Release DRM minor
344  * @minor: Pointer to DRM minor object
345  *
346  * Release a minor that was previously acquired via drm_minor_acquire().
347  */
348 void drm_minor_release(struct drm_minor *minor)
349 {
350 	drm_dev_unref(minor->dev);
351 }
352 
353 #if 0
354 /**
355  * DOC: driver instance overview
356  *
357  * A device instance for a drm driver is represented by struct &drm_device. This
358  * is allocated with drm_dev_alloc(), usually from bus-specific ->probe()
359  * callbacks implemented by the driver. The driver then needs to initialize all
360  * the various subsystems for the drm device like memory management, vblank
361  * handling, modesetting support and intial output configuration plus obviously
362  * initialize all the corresponding hardware bits. Finally when everything is up
363  * and running and ready for userspace the device instance can be published
364  * using drm_dev_register().
365  *
366  * There is also deprecated support for initalizing device instances using
367  * bus-specific helpers and the ->load() callback. But due to
368  * backwards-compatibility needs the device instance have to be published too
369  * early, which requires unpretty global locking to make safe and is therefore
370  * only support for existing drivers not yet converted to the new scheme.
371  *
372  * When cleaning up a device instance everything needs to be done in reverse:
373  * First unpublish the device instance with drm_dev_unregister(). Then clean up
374  * any other resources allocated at device initialization and drop the driver's
375  * reference to &drm_device using drm_dev_unref().
376  *
377  * Note that the lifetime rules for &drm_device instance has still a lot of
378  * historical baggage. Hence use the reference counting provided by
379  * drm_dev_ref() and drm_dev_unref() only carefully.
380  *
381  * Also note that embedding of &drm_device is currently not (yet) supported (but
382  * it would be easy to add). Drivers can store driver-private data in the
383  * dev_priv field of &drm_device.
384  */
385 
386 static int drm_dev_set_unique(struct drm_device *dev, const char *name)
387 {
388 	if (!name)
389 		return -EINVAL;
390 
391 	kfree(dev->unique);
392 	dev->unique = kstrdup(name, GFP_KERNEL);
393 
394 	return dev->unique ? 0 : -ENOMEM;
395 }
396 
397 /**
398  * drm_put_dev - Unregister and release a DRM device
399  * @dev: DRM device
400  *
401  * Called at module unload time or when a PCI device is unplugged.
402  *
403  * Cleans up all DRM device, calling drm_lastclose().
404  *
405  * Note: Use of this function is deprecated. It will eventually go away
406  * completely.  Please use drm_dev_unregister() and drm_dev_unref() explicitly
407  * instead to make sure that the device isn't userspace accessible any more
408  * while teardown is in progress, ensuring that userspace can't access an
409  * inconsistent state.
410  */
411 void drm_put_dev(struct drm_device *dev)
412 {
413 	DRM_DEBUG("\n");
414 
415 	if (!dev) {
416 		DRM_ERROR("cleanup called no dev\n");
417 		return;
418 	}
419 
420 	drm_dev_unregister(dev);
421 	drm_dev_unref(dev);
422 }
423 EXPORT_SYMBOL(drm_put_dev);
424 
425 void drm_unplug_dev(struct drm_device *dev)
426 {
427 	/* for a USB device */
428 	drm_dev_unregister(dev);
429 
430 	mutex_lock(&drm_global_mutex);
431 
432 	drm_device_set_unplugged(dev);
433 
434 	if (dev->open_count == 0) {
435 		drm_put_dev(dev);
436 	}
437 	mutex_unlock(&drm_global_mutex);
438 }
439 EXPORT_SYMBOL(drm_unplug_dev);
440 
441 /*
442  * DRM internal mount
443  * We want to be able to allocate our own "struct address_space" to control
444  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
445  * stand-alone address_space objects, so we need an underlying inode. As there
446  * is no way to allocate an independent inode easily, we need a fake internal
447  * VFS mount-point.
448  *
449  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
450  * frees it again. You are allowed to use iget() and iput() to get references to
451  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
452  * drm_fs_inode_free() call (which does not have to be the last iput()).
453  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
454  * between multiple inode-users. You could, technically, call
455  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
456  * iput(), but this way you'd end up with a new vfsmount for each inode.
457  */
458 
459 static int drm_fs_cnt;
460 static struct vfsmount *drm_fs_mnt;
461 
462 static const struct dentry_operations drm_fs_dops = {
463 	.d_dname	= simple_dname,
464 };
465 
466 static const struct super_operations drm_fs_sops = {
467 	.statfs		= simple_statfs,
468 };
469 
470 static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
471 				   const char *dev_name, void *data)
472 {
473 	return mount_pseudo(fs_type,
474 			    "drm:",
475 			    &drm_fs_sops,
476 			    &drm_fs_dops,
477 			    0x010203ff);
478 }
479 
480 static struct file_system_type drm_fs_type = {
481 	.name		= "drm",
482 	.owner		= THIS_MODULE,
483 	.mount		= drm_fs_mount,
484 	.kill_sb	= kill_anon_super,
485 };
486 
487 static struct inode *drm_fs_inode_new(void)
488 {
489 	struct inode *inode;
490 	int r;
491 
492 	r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
493 	if (r < 0) {
494 		DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
495 		return ERR_PTR(r);
496 	}
497 
498 	inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
499 	if (IS_ERR(inode))
500 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
501 
502 	return inode;
503 }
504 
505 static void drm_fs_inode_free(struct inode *inode)
506 {
507 	if (inode) {
508 		iput(inode);
509 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
510 	}
511 }
512 #endif
513 
514 /**
515  * drm_dev_init - Initialise new DRM device
516  * @dev: DRM device
517  * @driver: DRM driver
518  * @parent: Parent device object
519  *
520  * Initialize a new DRM device. No device registration is done.
521  * Call drm_dev_register() to advertice the device to user space and register it
522  * with other core subsystems. This should be done last in the device
523  * initialization sequence to make sure userspace can't access an inconsistent
524  * state.
525  *
526  * The initial ref-count of the object is 1. Use drm_dev_ref() and
527  * drm_dev_unref() to take and drop further ref-counts.
528  *
529  * Note that for purely virtual devices @parent can be NULL.
530  *
531  * Drivers that do not want to allocate their own device struct
532  * embedding struct &drm_device can call drm_dev_alloc() instead.
533  *
534  * RETURNS:
535  * 0 on success, or error code on failure.
536  */
537 int drm_dev_init(struct drm_device *dev,
538 		 struct drm_driver *driver,
539 		 struct device *parent)
540 {
541 	int ret;
542 
543 	kref_init(&dev->ref);
544 	dev->dev = parent;
545 	dev->driver = driver;
546 
547 	INIT_LIST_HEAD(&dev->filelist);
548 	INIT_LIST_HEAD(&dev->ctxlist);
549 	INIT_LIST_HEAD(&dev->vmalist);
550 	INIT_LIST_HEAD(&dev->maplist);
551 	INIT_LIST_HEAD(&dev->vblank_event_list);
552 
553 	spin_init(&dev->buf_lock, "drmdbl");
554 	lockinit(&dev->event_lock, "drmev", 0, LK_CANRECURSE);
555 	lockinit(&dev->struct_mutex, "drmslk", 0, LK_CANRECURSE);
556 	lockinit(&dev->filelist_mutex, "drmflm", 0, LK_CANRECURSE);
557 	lockinit(&dev->ctxlist_mutex, "drmclm", 0, LK_CANRECURSE);
558 	lockinit(&dev->master_mutex, "drmmm", 0, LK_CANRECURSE);
559 
560 #ifndef __DragonFly__
561 	dev->anon_inode = drm_fs_inode_new();
562 	if (IS_ERR(dev->anon_inode)) {
563 		ret = PTR_ERR(dev->anon_inode);
564 		DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
565 		goto err_free;
566 	}
567 #else
568 	dev->anon_inode = NULL;
569 #endif
570 
571 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
572 		ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
573 		if (ret)
574 			goto err_minors;
575 	}
576 
577 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
578 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
579 		if (ret)
580 			goto err_minors;
581 	}
582 
583 	ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
584 	if (ret)
585 		goto err_minors;
586 
587 	ret = drm_ht_create(&dev->map_hash, 12);
588 	if (ret)
589 		goto err_minors;
590 
591 	drm_legacy_ctxbitmap_init(dev);
592 
593 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
594 		ret = drm_gem_init(dev);
595 		if (ret) {
596 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
597 			goto err_ctxbitmap;
598 		}
599 	}
600 
601 #if 0
602 	if (parent) {
603 		ret = drm_dev_set_unique(dev, dev_name(parent));
604 		if (ret)
605 			goto err_setunique;
606 	}
607 #endif
608 
609 	return 0;
610 
611 #if 0
612 err_setunique:
613 	if (drm_core_check_feature(dev, DRIVER_GEM))
614 		drm_gem_destroy(dev);
615 #endif
616 err_ctxbitmap:
617 	drm_legacy_ctxbitmap_cleanup(dev);
618 	drm_ht_remove(&dev->map_hash);
619 err_minors:
620 	drm_minor_free(dev, DRM_MINOR_PRIMARY);
621 	drm_minor_free(dev, DRM_MINOR_RENDER);
622 	drm_minor_free(dev, DRM_MINOR_CONTROL);
623 #ifndef __DragonFly__
624 	drm_fs_inode_free(dev->anon_inode);
625 err_free:
626 #endif
627 	mutex_destroy(&dev->master_mutex);
628 	return ret;
629 }
630 EXPORT_SYMBOL(drm_dev_init);
631 
632 /**
633  * drm_dev_alloc - Allocate new DRM device
634  * @driver: DRM driver to allocate device for
635  * @parent: Parent device object
636  *
637  * Allocate and initialize a new DRM device. No device registration is done.
638  * Call drm_dev_register() to advertice the device to user space and register it
639  * with other core subsystems. This should be done last in the device
640  * initialization sequence to make sure userspace can't access an inconsistent
641  * state.
642  *
643  * The initial ref-count of the object is 1. Use drm_dev_ref() and
644  * drm_dev_unref() to take and drop further ref-counts.
645  *
646  * Note that for purely virtual devices @parent can be NULL.
647  *
648  * Drivers that wish to subclass or embed struct &drm_device into their
649  * own struct should look at using drm_dev_init() instead.
650  *
651  * RETURNS:
652  * Pointer to new DRM device, or ERR_PTR on failure.
653  */
654 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
655 				 struct device *parent)
656 {
657 	struct drm_device *dev;
658 	int ret;
659 
660 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
661 	if (!dev)
662 		return ERR_PTR(-ENOMEM);
663 
664 	ret = drm_dev_init(dev, driver, parent);
665 	if (ret) {
666 		kfree(dev);
667 		return ERR_PTR(ret);
668 	}
669 
670 	return dev;
671 }
672 EXPORT_SYMBOL(drm_dev_alloc);
673 
674 #if 0
675 static void drm_dev_release(struct kref *ref)
676 {
677 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
678 
679 	if (drm_core_check_feature(dev, DRIVER_GEM))
680 		drm_gem_destroy(dev);
681 
682 	drm_legacy_ctxbitmap_cleanup(dev);
683 	drm_ht_remove(&dev->map_hash);
684 	drm_fs_inode_free(dev->anon_inode);
685 
686 	drm_minor_free(dev, DRM_MINOR_PRIMARY);
687 	drm_minor_free(dev, DRM_MINOR_RENDER);
688 	drm_minor_free(dev, DRM_MINOR_CONTROL);
689 
690 	mutex_destroy(&dev->master_mutex);
691 	kfree(dev->unique);
692 	kfree(dev);
693 }
694 #endif
695 
696 /**
697  * drm_dev_ref - Take reference of a DRM device
698  * @dev: device to take reference of or NULL
699  *
700  * This increases the ref-count of @dev by one. You *must* already own a
701  * reference when calling this. Use drm_dev_unref() to drop this reference
702  * again.
703  *
704  * This function never fails. However, this function does not provide *any*
705  * guarantee whether the device is alive or running. It only provides a
706  * reference to the object and the memory associated with it.
707  */
708 void drm_dev_ref(struct drm_device *dev)
709 {
710 	if (dev)
711 		kref_get(&dev->ref);
712 }
713 EXPORT_SYMBOL(drm_dev_ref);
714 
715 /**
716  * drm_dev_unref - Drop reference of a DRM device
717  * @dev: device to drop reference of or NULL
718  *
719  * This decreases the ref-count of @dev by one. The device is destroyed if the
720  * ref-count drops to zero.
721  */
722 void drm_dev_unref(struct drm_device *dev)
723 {
724 #if 0
725 	if (dev)
726 		kref_put(&dev->ref, drm_dev_release);
727 #endif
728 }
729 EXPORT_SYMBOL(drm_dev_unref);
730 
731 /**
732  * drm_dev_register - Register DRM device
733  * @dev: Device to register
734  * @flags: Flags passed to the driver's .load() function
735  *
736  * Register the DRM device @dev with the system, advertise device to user-space
737  * and start normal device operation. @dev must be allocated via drm_dev_alloc()
738  * previously.
739  *
740  * Never call this twice on any device!
741  *
742  * NOTE: To ensure backward compatibility with existing drivers method this
743  * function calls the ->load() method after registering the device nodes,
744  * creating race conditions. Usage of the ->load() methods is therefore
745  * deprecated, drivers must perform all initialization before calling
746  * drm_dev_register().
747  *
748  * RETURNS:
749  * 0 on success, negative error code on failure.
750  */
751 int drm_dev_register(struct drm_device *dev, unsigned long flags)
752 {
753 	int ret;
754 
755 	mutex_lock(&drm_global_mutex);
756 
757 	ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
758 	if (ret)
759 		goto err_minors;
760 
761 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
762 	if (ret)
763 		goto err_minors;
764 
765 	ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);
766 	if (ret)
767 		goto err_minors;
768 
769 	if (dev->driver->load) {
770 		ret = dev->driver->load(dev, flags);
771 		if (ret)
772 			goto err_minors;
773 	}
774 
775 	if (drm_core_check_feature(dev, DRIVER_MODESET))
776 		drm_modeset_register_all(dev);
777 
778 	ret = 0;
779 	goto out_unlock;
780 
781 err_minors:
782 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
783 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
784 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
785 out_unlock:
786 	mutex_unlock(&drm_global_mutex);
787 	return ret;
788 }
789 EXPORT_SYMBOL(drm_dev_register);
790 
791 /**
792  * drm_dev_unregister - Unregister DRM device
793  * @dev: Device to unregister
794  *
795  * Unregister the DRM device from the system. This does the reverse of
796  * drm_dev_register() but does not deallocate the device. The caller must call
797  * drm_dev_unref() to drop their final reference.
798  *
799  * This should be called first in the device teardown code to make sure
800  * userspace can't access the device instance any more.
801  */
802 void drm_dev_unregister(struct drm_device *dev)
803 {
804 	struct drm_map_list *r_list, *list_temp;
805 
806 	drm_lastclose(dev);
807 
808 	if (drm_core_check_feature(dev, DRIVER_MODESET))
809 		drm_modeset_unregister_all(dev);
810 
811 	if (dev->driver->unload)
812 		dev->driver->unload(dev);
813 
814 #if 0
815 	if (dev->agp)
816 		drm_pci_agp_destroy(dev);
817 #endif
818 
819 	drm_vblank_cleanup(dev);
820 
821 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
822 		drm_legacy_rmmap(dev, r_list->map);
823 
824 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
825 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
826 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
827 }
828 EXPORT_SYMBOL(drm_dev_unregister);
829 
830 /*
831  * DRM Core
832  * The DRM core module initializes all global DRM objects and makes them
833  * available to drivers. Once setup, drivers can probe their respective
834  * devices.
835  * Currently, core management includes:
836  *  - The "DRM-Global" key/value database
837  *  - Global ID management for connectors
838  *  - DRM major number allocation
839  *  - DRM minor management
840  *  - DRM sysfs class
841  *  - DRM debugfs root
842  *
843  * Furthermore, the DRM core provides dynamic char-dev lookups. For each
844  * interface registered on a DRM device, you can request minor numbers from DRM
845  * core. DRM core takes care of major-number management and char-dev
846  * registration. A stub ->open() callback forwards any open() requests to the
847  * registered minor.
848  */
849 
850 #if 0
851 static int drm_stub_open(struct inode *inode, struct file *filp)
852 {
853 	const struct file_operations *new_fops;
854 	struct drm_minor *minor;
855 	int err;
856 
857 	DRM_DEBUG("\n");
858 
859 	mutex_lock(&drm_global_mutex);
860 	minor = drm_minor_acquire(iminor(inode));
861 	if (IS_ERR(minor)) {
862 		err = PTR_ERR(minor);
863 		goto out_unlock;
864 	}
865 
866 	new_fops = fops_get(minor->dev->driver->fops);
867 	if (!new_fops) {
868 		err = -ENODEV;
869 		goto out_release;
870 	}
871 
872 	replace_fops(filp, new_fops);
873 	if (filp->f_op->open)
874 		err = filp->f_op->open(inode, filp);
875 	else
876 		err = 0;
877 
878 out_release:
879 	drm_minor_release(minor);
880 out_unlock:
881 	mutex_unlock(&drm_global_mutex);
882 	return err;
883 }
884 
885 static const struct file_operations drm_stub_fops = {
886 	.owner = THIS_MODULE,
887 	.open = drm_stub_open,
888 	.llseek = noop_llseek,
889 };
890 #endif
891 
892 static void drm_core_exit(void)
893 {
894 #if 0
895 	unregister_chrdev(DRM_MAJOR, "drm");
896 	debugfs_remove(drm_debugfs_root);
897 	drm_sysfs_destroy();
898 #endif
899 	idr_destroy(&drm_minors_idr);
900 	drm_connector_ida_destroy();
901 	drm_global_release();
902 }
903 
904 static int __init drm_core_init(void)
905 {
906 #if 0
907 	int ret;
908 #endif
909 
910 	drm_global_init();
911 	drm_connector_ida_init();
912 	idr_init(&drm_minors_idr);
913 
914 #if 0
915 	ret = drm_sysfs_init();
916 	if (ret < 0) {
917 		DRM_ERROR("Cannot create DRM class: %d\n", ret);
918 		goto error;
919 	}
920 
921 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
922 	if (!drm_debugfs_root) {
923 		ret = -ENOMEM;
924 		DRM_ERROR("Cannot create debugfs-root: %d\n", ret);
925 		goto error;
926 	}
927 
928 	ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
929 	if (ret < 0)
930 		goto error;
931 #endif
932 
933 	DRM_INFO("Initialized\n");
934 	return 0;
935 
936 #if 0
937 error:
938 	drm_core_exit();
939 	return ret;
940 #endif
941 }
942 
943 module_init(drm_core_init);
944 module_exit(drm_core_exit);
945 
946 #include <sys/devfs.h>
947 
948 #include <linux/export.h>
949 #include <linux/dmi.h>
950 #include <drm/drmP.h>
951 
952 static int drm_load(struct drm_device *dev);
953 drm_pci_id_list_t *drm_find_description(int vendor, int device,
954     drm_pci_id_list_t *idlist);
955 
956 #define DRIVER_SOFTC(unit) \
957 	((struct drm_device *)devclass_get_softc(drm_devclass, unit))
958 
959 static int
960 drm_modevent(module_t mod, int type, void *data)
961 {
962 
963 	switch (type) {
964 	case MOD_LOAD:
965 		TUNABLE_INT_FETCH("drm.debug", &drm_debug);
966 		linux_task_drop_callback = linux_task_drop;
967 		linux_proc_drop_callback = linux_proc_drop;
968 		break;
969 	case MOD_UNLOAD:
970 		linux_task_drop_callback = NULL;
971 		linux_proc_drop_callback = NULL;
972 		break;
973 	}
974 	return (0);
975 }
976 
977 static moduledata_t drm_mod = {
978 	"drm",
979 	drm_modevent,
980 	0
981 };
982 DECLARE_MODULE(drm, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
983 MODULE_VERSION(drm, 1);
984 MODULE_DEPEND(drm, agp, 1, 1, 1);
985 MODULE_DEPEND(drm, pci, 1, 1, 1);
986 MODULE_DEPEND(drm, iicbus, 1, 1, 1);
987 
988 static struct dev_ops drm_cdevsw = {
989 	{ "drm", 0, D_TRACKCLOSE | D_MPSAFE },
990 	.d_open =	drm_open,
991 	.d_close =	drm_close,
992 	.d_read =	drm_read,
993 	.d_ioctl =	drm_ioctl,
994 	.d_kqfilter =	drm_kqfilter,
995 	.d_mmap =	drm_mmap,
996 	.d_mmap_single = drm_mmap_single,
997 };
998 
999 SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
1000 SYSCTL_INT(_hw_drm, OID_AUTO, debug, CTLFLAG_RW, &drm_debug, 0,
1001     "DRM debugging");
1002 
1003 int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
1004 {
1005 	drm_pci_id_list_t *id_entry;
1006 	int vendor, device;
1007 
1008 	vendor = pci_get_vendor(kdev);
1009 	device = pci_get_device(kdev);
1010 
1011 	if (pci_get_class(kdev) != PCIC_DISPLAY)
1012 		return ENXIO;
1013 
1014 	id_entry = drm_find_description(vendor, device, idlist);
1015 	if (id_entry != NULL) {
1016 		if (!device_get_desc(kdev)) {
1017 			device_set_desc(kdev, id_entry->name);
1018 			DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
1019 		}
1020 		return 0;
1021 	}
1022 
1023 	return ENXIO;
1024 }
1025 
1026 int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
1027 {
1028 	struct drm_device *dev;
1029 	drm_pci_id_list_t *id_entry;
1030 	int unit, error;
1031 
1032 	unit = device_get_unit(kdev);
1033 	dev = device_get_softc(kdev);
1034 
1035 	/* Initialize Linux struct device */
1036 	dev->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1037 
1038 	if (!strcmp(device_get_name(kdev), "drmsub"))
1039 		dev->dev->bsddev = device_get_parent(kdev);
1040 	else
1041 		dev->dev->bsddev = kdev;
1042 
1043 	dev->pci_domain = pci_get_domain(dev->dev->bsddev);
1044 	dev->pci_bus = pci_get_bus(dev->dev->bsddev);
1045 	dev->pci_slot = pci_get_slot(dev->dev->bsddev);
1046 	dev->pci_func = pci_get_function(dev->dev->bsddev);
1047 	drm_init_pdev(dev->dev->bsddev, &dev->pdev);
1048 
1049 	id_entry = drm_find_description(dev->pdev->vendor,
1050 	    dev->pdev->device, idlist);
1051 	dev->id_entry = id_entry;
1052 
1053 	/* Print the contents of pdev struct. */
1054 	drm_print_pdev(dev->pdev);
1055 
1056 	lwkt_serialize_init(&dev->irq_lock);
1057 	lockinit(&dev->event_lock, "drmev", 0, LK_CANRECURSE);
1058 	lockinit(&dev->struct_mutex, "drmslk", 0, LK_CANRECURSE);
1059 
1060 	error = drm_load(dev);
1061 	if (error)
1062 		goto error;
1063 
1064 	error = drm_create_cdevs(kdev);
1065 
1066 error:
1067 	return (error);
1068 }
1069 
1070 int
1071 drm_create_cdevs(device_t kdev)
1072 {
1073 	struct drm_device *dev;
1074 	int error, unit;
1075 
1076 	unit = device_get_unit(kdev);
1077 	dev = device_get_softc(kdev);
1078 
1079 	dev->devnode = make_dev(&drm_cdevsw, unit, DRM_DEV_UID, DRM_DEV_GID,
1080 				DRM_DEV_MODE, "dri/card%d", unit);
1081 	error = 0;
1082 	if (error == 0)
1083 		dev->devnode->si_drv1 = dev;
1084 	return (error);
1085 }
1086 
1087 #ifndef DRM_DEV_NAME
1088 #define DRM_DEV_NAME "drm"
1089 #endif
1090 
1091 devclass_t drm_devclass;
1092 
1093 drm_pci_id_list_t *drm_find_description(int vendor, int device,
1094     drm_pci_id_list_t *idlist)
1095 {
1096 	int i = 0;
1097 
1098 	for (i = 0; idlist[i].vendor != 0; i++) {
1099 		if ((idlist[i].vendor == vendor) &&
1100 		    ((idlist[i].device == device) ||
1101 		    (idlist[i].device == 0))) {
1102 			return &idlist[i];
1103 		}
1104 	}
1105 	return NULL;
1106 }
1107 
1108 static int drm_load(struct drm_device *dev)
1109 {
1110 	int retcode;
1111 
1112 	DRM_DEBUG("\n");
1113 
1114 	INIT_LIST_HEAD(&dev->maplist);
1115 
1116 	drm_sysctl_init(dev);
1117 	INIT_LIST_HEAD(&dev->filelist);
1118 
1119 	INIT_LIST_HEAD(&dev->vblank_event_list);
1120 
1121 	if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
1122 		if (drm_pci_device_is_agp(dev))
1123 			dev->agp = drm_agp_init(dev);
1124 	}
1125 
1126 	if (dev->driver->driver_features & DRIVER_GEM) {
1127 		retcode = drm_gem_init(dev);
1128 		if (retcode != 0) {
1129 			DRM_ERROR("Cannot initialize graphics execution "
1130 				  "manager (GEM)\n");
1131 			goto error1;
1132 		}
1133 	}
1134 
1135 	if (dev->driver->load != NULL) {
1136 		DRM_LOCK(dev);
1137 		/* Shared code returns -errno. */
1138 		retcode = -dev->driver->load(dev,
1139 		    dev->id_entry->driver_private);
1140 		if (pci_enable_busmaster(dev->dev->bsddev))
1141 			DRM_ERROR("Request to enable bus-master failed.\n");
1142 		DRM_UNLOCK(dev);
1143 		if (retcode != 0)
1144 			goto error1;
1145 	}
1146 
1147 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
1148 	    dev->driver->name,
1149 	    dev->driver->major,
1150 	    dev->driver->minor,
1151 	    dev->driver->patchlevel,
1152 	    dev->driver->date);
1153 
1154 	return 0;
1155 
1156 error1:
1157 	drm_gem_destroy(dev);
1158 	drm_sysctl_cleanup(dev);
1159 	DRM_LOCK(dev);
1160 	drm_lastclose(dev);
1161 	DRM_UNLOCK(dev);
1162 	if (dev->devnode != NULL)
1163 		destroy_dev(dev->devnode);
1164 
1165 	lockuninit(&dev->vbl_lock);
1166 	lockuninit(&dev->event_lock);
1167 	lockuninit(&dev->struct_mutex);
1168 
1169 	return retcode;
1170 }
1171 
1172 /*
1173  * Stub is needed for devfs
1174  */
1175 int drm_close(struct dev_close_args *ap)
1176 {
1177 	return 0;
1178 }
1179 
1180 /* XXX: this is supposed to be drm_release() */
1181 void drm_cdevpriv_dtor(void *cd)
1182 {
1183 	struct drm_file *file_priv = cd;
1184 	struct drm_device *dev = file_priv->dev;
1185 
1186 	DRM_DEBUG("open_count = %d\n", dev->open_count);
1187 
1188 	DRM_LOCK(dev);
1189 
1190 	if (dev->driver->preclose != NULL)
1191 		dev->driver->preclose(dev, file_priv);
1192 
1193 	/* ========================================================
1194 	 * Begin inline drm_release
1195 	 */
1196 
1197 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
1198 	    DRM_CURRENTPID, (long)dev->dev, dev->open_count);
1199 
1200 	if (dev->driver->driver_features & DRIVER_GEM)
1201 		drm_gem_release(dev, file_priv);
1202 
1203 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1204 		drm_legacy_reclaim_buffers(dev, file_priv);
1205 
1206 	funsetown(&dev->buf_sigio);
1207 
1208 	if (dev->driver->postclose != NULL)
1209 		dev->driver->postclose(dev, file_priv);
1210 	list_del(&file_priv->lhead);
1211 
1212 
1213 	/* ========================================================
1214 	 * End inline drm_release
1215 	 */
1216 
1217 	device_unbusy(dev->dev->bsddev);
1218 	if (--dev->open_count == 0) {
1219 		drm_lastclose(dev);
1220 	}
1221 
1222 	DRM_UNLOCK(dev);
1223 }
1224 
1225 int
1226 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
1227     struct sysctl_oid *top)
1228 {
1229 	struct sysctl_oid *oid;
1230 
1231 	ksnprintf(dev->busid_str, sizeof(dev->busid_str),
1232 	     "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
1233 	     dev->pci_slot, dev->pci_func);
1234 	oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
1235 	    CTLFLAG_RD, dev->busid_str, 0, NULL);
1236 	if (oid == NULL)
1237 		return (ENOMEM);
1238 	dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
1239 	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
1240 	    "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
1241 	if (oid == NULL)
1242 		return (ENOMEM);
1243 
1244 	return (0);
1245 }
1246 
1247 int
1248 drm_mmap_single(struct dev_mmap_single_args *ap)
1249 {
1250 	struct drm_device *dev;
1251 	struct cdev *kdev = ap->a_head.a_dev;
1252 	vm_ooffset_t *offset = ap->a_offset;
1253 	vm_size_t size = ap->a_size;
1254 	struct vm_object **obj_res = ap->a_object;
1255 	int nprot = ap->a_nprot;
1256 
1257 	dev = drm_get_device_from_kdev(kdev);
1258 	if (dev->drm_ttm_bdev != NULL) {
1259 		return (ttm_bo_mmap_single(dev, offset, size, obj_res, nprot));
1260 	} else if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
1261 		return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
1262 	} else {
1263 		return (ENODEV);
1264 	}
1265 }
1266 
1267 #include <linux/dmi.h>
1268 
1269 /*
1270  * Check if dmi_system_id structure matches system DMI data
1271  */
1272 static bool
1273 dmi_found(const struct dmi_system_id *dsi)
1274 {
1275 	int i, slot;
1276 	bool found = false;
1277 	char *sys_vendor, *board_vendor, *product_name, *board_name;
1278 
1279 	sys_vendor = kgetenv("smbios.system.maker");
1280 	board_vendor = kgetenv("smbios.planar.maker");
1281 	product_name = kgetenv("smbios.system.product");
1282 	board_name = kgetenv("smbios.planar.product");
1283 
1284 	for (i = 0; i < NELEM(dsi->matches); i++) {
1285 		slot = dsi->matches[i].slot;
1286 		switch (slot) {
1287 		case DMI_NONE:
1288 			break;
1289 		case DMI_SYS_VENDOR:
1290 			if (sys_vendor != NULL &&
1291 			    !strcmp(sys_vendor, dsi->matches[i].substr))
1292 				break;
1293 			else
1294 				goto done;
1295 		case DMI_BOARD_VENDOR:
1296 			if (board_vendor != NULL &&
1297 			    !strcmp(board_vendor, dsi->matches[i].substr))
1298 				break;
1299 			else
1300 				goto done;
1301 		case DMI_PRODUCT_NAME:
1302 			if (product_name != NULL &&
1303 			    !strcmp(product_name, dsi->matches[i].substr))
1304 				break;
1305 			else
1306 				goto done;
1307 		case DMI_BOARD_NAME:
1308 			if (board_name != NULL &&
1309 			    !strcmp(board_name, dsi->matches[i].substr))
1310 				break;
1311 			else
1312 				goto done;
1313 		default:
1314 			goto done;
1315 		}
1316 	}
1317 	found = true;
1318 
1319 done:
1320 	if (sys_vendor != NULL)
1321 		kfreeenv(sys_vendor);
1322 	if (board_vendor != NULL)
1323 		kfreeenv(board_vendor);
1324 	if (product_name != NULL)
1325 		kfreeenv(product_name);
1326 	if (board_name != NULL)
1327 		kfreeenv(board_name);
1328 
1329 	return found;
1330 }
1331 
1332 int dmi_check_system(const struct dmi_system_id *sysid)
1333 {
1334 	const struct dmi_system_id *dsi;
1335 	int num = 0;
1336 
1337 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
1338 		if (dmi_found(dsi)) {
1339 			num++;
1340 			if (dsi->callback && dsi->callback(dsi))
1341 				break;
1342 		}
1343 	}
1344 	return (num);
1345 }
1346