xref: /dragonfly/sys/dev/drm/drm_drv.c (revision 954fc4d3)
1 /*
2  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3  *
4  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5  * All Rights Reserved.
6  *
7  * Author Rickard E. (Rik) Faith <faith@valinux.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <drm/drmP.h>
32 #include "drm_crtc_internal.h"
33 #include "drm_legacy.h"
34 #include "drm_internal.h"
35 
36 /*
37  * drm_debug: Enable debug output.
38  * Bitmask of DRM_UT_x. See include/drm/drmP.h for details.
39  */
40 #ifdef __DragonFly__
41 /* Provides three levels of debug: off, minimal, verbose */
42 #if DRM_DEBUG_DEFAULT_ON == 1
43 #define DRM_DEBUGBITS_ON (DRM_UT_CORE | DRM_UT_DRIVER | DRM_UT_KMS |	\
44 			  DRM_UT_PRIME| DRM_UT_ATOMIC | DRM_UT_FIOCTL)
45 #elif DRM_DEBUG_DEFAULT_ON == 2
46 #define DRM_DEBUGBITS_ON (DRM_UT_CORE | DRM_UT_DRIVER | DRM_UT_KMS |	\
47 			  DRM_UT_PRIME| DRM_UT_ATOMIC | DRM_UT_FIOCTL |	\
48 			  DRM_UT_PID  | DRM_UT_IOCTL  | DRM_UT_VBLANK)
49 #else
50 #define DRM_DEBUGBITS_ON (0x0)
51 #endif
52 unsigned int drm_debug = DRM_DEBUGBITS_ON;	/* defaults to 0 */
53 #else
54 unsigned int drm_debug = 0;
55 #endif /* __DragonFly__ */
56 EXPORT_SYMBOL(drm_debug);
57 
58 MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
59 MODULE_DESCRIPTION("DRM shared core routines");
60 MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
61 "\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
62 "\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
63 "\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
64 "\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
65 "\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
66 "\t\tBit 5 (0x20) will enable VBL messages (vblank code)");
67 module_param_named(debug, drm_debug, int, 0600);
68 
69 static DEFINE_MUTEX(drm_minor_lock);
70 static struct idr drm_minors_idr;
71 
72 #if 0
73 static struct dentry *drm_debugfs_root;
74 #endif
75 
76 void drm_err(const char *func, const char *format, ...)
77 {
78 	va_list args;
79 
80 	kprintf("error: [" DRM_NAME ":pid%d:%s] *ERROR* ", DRM_CURRENTPID, func);
81 
82 	va_start(args, format);
83 	kvprintf(format, args);
84 	va_end(args);
85 }
86 
87 void drm_ut_debug_printk(const char *function_name, const char *format, ...)
88 {
89 	va_list args;
90 
91 	if (unlikely(drm_debug & DRM_UT_PID)) {
92 		kprintf("[" DRM_NAME ":pid%d:%s] ",
93 		    DRM_CURRENTPID, function_name);
94 	} else {
95 		kprintf("[" DRM_NAME ":%s] ", function_name);
96 	}
97 
98 	va_start(args, format);
99 	kvprintf(format, args);
100 	va_end(args);
101 }
102 
103 #define DRM_PRINTK_FMT "[" DRM_NAME ":%s]%s %pV"
104 
105 void drm_dev_printk(const struct device *dev, const char *level,
106 		    unsigned int category, const char *function_name,
107 		    const char *prefix, const char *format, ...)
108 {
109 	struct va_format vaf;
110 	va_list args;
111 
112 	if (category != DRM_UT_NONE && !(drm_debug & category))
113 		return;
114 
115 	va_start(args, format);
116 	vaf.fmt = format;
117 	vaf.va = &args;
118 
119 	if (dev)
120 		dev_printk(level, dev, DRM_PRINTK_FMT, function_name, prefix,
121 			   &vaf);
122 	else
123 		printk("%s" DRM_PRINTK_FMT, level, function_name, prefix, &vaf);
124 
125 	va_end(args);
126 }
127 EXPORT_SYMBOL(drm_dev_printk);
128 
129 void drm_printk(const char *level, unsigned int category,
130 		const char *format, ...)
131 {
132 	struct va_format vaf;
133 	va_list args;
134 
135 	if (category != DRM_UT_NONE && !(drm_debug & category))
136 		return;
137 
138 	va_start(args, format);
139 	vaf.fmt = format;
140 	vaf.va = &args;
141 
142 	printk("%s" "[" DRM_NAME ":%ps]%s %pV",
143 	       level, __builtin_return_address(0),
144 	       strcmp(level, KERN_ERR) == 0 ? " *ERROR*" : "", &vaf);
145 
146 	va_end(args);
147 }
148 EXPORT_SYMBOL(drm_printk);
149 
150 /*
151  * DRM Minors
152  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
153  * of them is represented by a drm_minor object. Depending on the capabilities
154  * of the device-driver, different interfaces are registered.
155  *
156  * Minors can be accessed via dev->$minor_name. This pointer is either
157  * NULL or a valid drm_minor pointer and stays valid as long as the device is
158  * valid. This means, DRM minors have the same life-time as the underlying
159  * device. However, this doesn't mean that the minor is active. Minors are
160  * registered and unregistered dynamically according to device-state.
161  */
162 
163 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
164 					     unsigned int type)
165 {
166 	switch (type) {
167 	case DRM_MINOR_PRIMARY:
168 		return &dev->primary;
169 	case DRM_MINOR_RENDER:
170 		return &dev->render;
171 	case DRM_MINOR_CONTROL:
172 		return &dev->control;
173 	default:
174 		return NULL;
175 	}
176 }
177 
178 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
179 {
180 	struct drm_minor *minor;
181 	unsigned long flags;
182 	int r;
183 
184 	minor = kzalloc(sizeof(*minor), GFP_KERNEL);
185 	if (!minor)
186 		return -ENOMEM;
187 
188 	minor->type = type;
189 	minor->dev = dev;
190 
191 	idr_preload(GFP_KERNEL);
192 	spin_lock_irqsave(&drm_minor_lock, flags);
193 	r = idr_alloc(&drm_minors_idr,
194 		      NULL,
195 		      64 * type,
196 		      64 * (type + 1),
197 		      GFP_NOWAIT);
198 	spin_unlock_irqrestore(&drm_minor_lock, flags);
199 	idr_preload_end();
200 
201 	if (r < 0)
202 		goto err_free;
203 
204 	minor->index = r;
205 
206 #if 0
207 	minor->kdev = drm_sysfs_minor_alloc(minor);
208 	if (IS_ERR(minor->kdev)) {
209 		r = PTR_ERR(minor->kdev);
210 		goto err_index;
211 	}
212 #endif
213 
214 	*drm_minor_get_slot(dev, type) = minor;
215 	return 0;
216 
217 #if 0
218 err_index:
219 	spin_lock_irqsave(&drm_minor_lock, flags);
220 	idr_remove(&drm_minors_idr, minor->index);
221 	spin_unlock_irqrestore(&drm_minor_lock, flags);
222 #endif
223 err_free:
224 	kfree(minor);
225 	return r;
226 }
227 
228 static void drm_minor_free(struct drm_device *dev, unsigned int type)
229 {
230 	struct drm_minor **slot, *minor;
231 	unsigned long flags;
232 
233 	slot = drm_minor_get_slot(dev, type);
234 	minor = *slot;
235 	if (!minor)
236 		return;
237 
238 #if 0
239 	put_device(minor->kdev);
240 #endif
241 
242 	spin_lock_irqsave(&drm_minor_lock, flags);
243 	idr_remove(&drm_minors_idr, minor->index);
244 	spin_unlock_irqrestore(&drm_minor_lock, flags);
245 
246 	kfree(minor);
247 	*slot = NULL;
248 }
249 
250 static int drm_minor_register(struct drm_device *dev, unsigned int type)
251 {
252 	struct drm_minor *minor;
253 	unsigned long flags;
254 #if 0
255 	int ret;
256 #endif
257 
258 	DRM_DEBUG("\n");
259 
260 	minor = *drm_minor_get_slot(dev, type);
261 	if (!minor)
262 		return 0;
263 
264 #if 0
265 	ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
266 	if (ret) {
267 		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
268 		return ret;
269 	}
270 #endif
271 
272 #ifdef __DragonFly__
273 	/* XXX /dev entries should be created here with make_dev */
274 #else
275 	ret = device_add(minor->kdev);
276 	if (ret)
277 		goto err_debugfs;
278 #endif
279 
280 	/* replace NULL with @minor so lookups will succeed from now on */
281 	spin_lock_irqsave(&drm_minor_lock, flags);
282 	idr_replace(&drm_minors_idr, minor, minor->index);
283 	spin_unlock_irqrestore(&drm_minor_lock, flags);
284 
285 	DRM_DEBUG("new minor registered %d\n", minor->index);
286 	return 0;
287 
288 #if 0
289 err_debugfs:
290 	drm_debugfs_cleanup(minor);
291 	return ret;
292 #endif
293 }
294 
295 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
296 {
297 	struct drm_minor *minor;
298 	unsigned long flags;
299 
300 	minor = *drm_minor_get_slot(dev, type);
301 #if 0
302 	if (!minor || !device_is_registered(minor->kdev))
303 #else
304 	if (!minor)
305 #endif
306 		return;
307 
308 	/* replace @minor with NULL so lookups will fail from now on */
309 	spin_lock_irqsave(&drm_minor_lock, flags);
310 	idr_replace(&drm_minors_idr, NULL, minor->index);
311 	spin_unlock_irqrestore(&drm_minor_lock, flags);
312 
313 #if 0
314 	device_del(minor->kdev);
315 	dev_set_drvdata(minor->kdev, NULL); /* safety belt */
316 #endif
317 	drm_debugfs_cleanup(minor);
318 }
319 
320 /**
321  * drm_minor_acquire - Acquire a DRM minor
322  * @minor_id: Minor ID of the DRM-minor
323  *
324  * Looks up the given minor-ID and returns the respective DRM-minor object. The
325  * refence-count of the underlying device is increased so you must release this
326  * object with drm_minor_release().
327  *
328  * As long as you hold this minor, it is guaranteed that the object and the
329  * minor->dev pointer will stay valid! However, the device may get unplugged and
330  * unregistered while you hold the minor.
331  *
332  * Returns:
333  * Pointer to minor-object with increased device-refcount, or PTR_ERR on
334  * failure.
335  */
336 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
337 {
338 	struct drm_minor *minor;
339 	unsigned long flags;
340 
341 	spin_lock_irqsave(&drm_minor_lock, flags);
342 	minor = idr_find(&drm_minors_idr, minor_id);
343 	if (minor)
344 		drm_dev_ref(minor->dev);
345 	spin_unlock_irqrestore(&drm_minor_lock, flags);
346 
347 	if (!minor) {
348 		return ERR_PTR(-ENODEV);
349 	} else if (drm_device_is_unplugged(minor->dev)) {
350 		drm_dev_unref(minor->dev);
351 		return ERR_PTR(-ENODEV);
352 	}
353 
354 	return minor;
355 }
356 
357 /**
358  * drm_minor_release - Release DRM minor
359  * @minor: Pointer to DRM minor object
360  *
361  * Release a minor that was previously acquired via drm_minor_acquire().
362  */
363 void drm_minor_release(struct drm_minor *minor)
364 {
365 	drm_dev_unref(minor->dev);
366 }
367 
368 #if 0
369 /**
370  * DOC: driver instance overview
371  *
372  * A device instance for a drm driver is represented by struct &drm_device. This
373  * is allocated with drm_dev_alloc(), usually from bus-specific ->probe()
374  * callbacks implemented by the driver. The driver then needs to initialize all
375  * the various subsystems for the drm device like memory management, vblank
376  * handling, modesetting support and intial output configuration plus obviously
377  * initialize all the corresponding hardware bits. Finally when everything is up
378  * and running and ready for userspace the device instance can be published
379  * using drm_dev_register().
380  *
381  * There is also deprecated support for initalizing device instances using
382  * bus-specific helpers and the ->load() callback. But due to
383  * backwards-compatibility needs the device instance have to be published too
384  * early, which requires unpretty global locking to make safe and is therefore
385  * only support for existing drivers not yet converted to the new scheme.
386  *
387  * When cleaning up a device instance everything needs to be done in reverse:
388  * First unpublish the device instance with drm_dev_unregister(). Then clean up
389  * any other resources allocated at device initialization and drop the driver's
390  * reference to &drm_device using drm_dev_unref().
391  *
392  * Note that the lifetime rules for &drm_device instance has still a lot of
393  * historical baggage. Hence use the reference counting provided by
394  * drm_dev_ref() and drm_dev_unref() only carefully.
395  *
396  * Also note that embedding of &drm_device is currently not (yet) supported (but
397  * it would be easy to add). Drivers can store driver-private data in the
398  * dev_priv field of &drm_device.
399  */
400 
401 static int drm_dev_set_unique(struct drm_device *dev, const char *name)
402 {
403 	if (!name)
404 		return -EINVAL;
405 
406 	kfree(dev->unique);
407 	dev->unique = kstrdup(name, GFP_KERNEL);
408 
409 	return dev->unique ? 0 : -ENOMEM;
410 }
411 
412 /**
413  * drm_put_dev - Unregister and release a DRM device
414  * @dev: DRM device
415  *
416  * Called at module unload time or when a PCI device is unplugged.
417  *
418  * Cleans up all DRM device, calling drm_lastclose().
419  *
420  * Note: Use of this function is deprecated. It will eventually go away
421  * completely.  Please use drm_dev_unregister() and drm_dev_unref() explicitly
422  * instead to make sure that the device isn't userspace accessible any more
423  * while teardown is in progress, ensuring that userspace can't access an
424  * inconsistent state.
425  */
426 void drm_put_dev(struct drm_device *dev)
427 {
428 	DRM_DEBUG("\n");
429 
430 	if (!dev) {
431 		DRM_ERROR("cleanup called no dev\n");
432 		return;
433 	}
434 
435 	drm_dev_unregister(dev);
436 	drm_dev_unref(dev);
437 }
438 EXPORT_SYMBOL(drm_put_dev);
439 
440 void drm_unplug_dev(struct drm_device *dev)
441 {
442 	/* for a USB device */
443 	drm_dev_unregister(dev);
444 
445 	mutex_lock(&drm_global_mutex);
446 
447 	drm_device_set_unplugged(dev);
448 
449 	if (dev->open_count == 0) {
450 		drm_put_dev(dev);
451 	}
452 	mutex_unlock(&drm_global_mutex);
453 }
454 EXPORT_SYMBOL(drm_unplug_dev);
455 
456 /*
457  * DRM internal mount
458  * We want to be able to allocate our own "struct address_space" to control
459  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
460  * stand-alone address_space objects, so we need an underlying inode. As there
461  * is no way to allocate an independent inode easily, we need a fake internal
462  * VFS mount-point.
463  *
464  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
465  * frees it again. You are allowed to use iget() and iput() to get references to
466  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
467  * drm_fs_inode_free() call (which does not have to be the last iput()).
468  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
469  * between multiple inode-users. You could, technically, call
470  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
471  * iput(), but this way you'd end up with a new vfsmount for each inode.
472  */
473 
474 static int drm_fs_cnt;
475 static struct vfsmount *drm_fs_mnt;
476 
477 static const struct dentry_operations drm_fs_dops = {
478 	.d_dname	= simple_dname,
479 };
480 
481 static const struct super_operations drm_fs_sops = {
482 	.statfs		= simple_statfs,
483 };
484 
485 static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
486 				   const char *dev_name, void *data)
487 {
488 	return mount_pseudo(fs_type,
489 			    "drm:",
490 			    &drm_fs_sops,
491 			    &drm_fs_dops,
492 			    0x010203ff);
493 }
494 
495 static struct file_system_type drm_fs_type = {
496 	.name		= "drm",
497 	.owner		= THIS_MODULE,
498 	.mount		= drm_fs_mount,
499 	.kill_sb	= kill_anon_super,
500 };
501 
502 static struct inode *drm_fs_inode_new(void)
503 {
504 	struct inode *inode;
505 	int r;
506 
507 	r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
508 	if (r < 0) {
509 		DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
510 		return ERR_PTR(r);
511 	}
512 
513 	inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
514 	if (IS_ERR(inode))
515 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
516 
517 	return inode;
518 }
519 
520 static void drm_fs_inode_free(struct inode *inode)
521 {
522 	if (inode) {
523 		iput(inode);
524 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
525 	}
526 }
527 #endif
528 
529 /**
530  * drm_dev_init - Initialise new DRM device
531  * @dev: DRM device
532  * @driver: DRM driver
533  * @parent: Parent device object
534  *
535  * Initialize a new DRM device. No device registration is done.
536  * Call drm_dev_register() to advertice the device to user space and register it
537  * with other core subsystems. This should be done last in the device
538  * initialization sequence to make sure userspace can't access an inconsistent
539  * state.
540  *
541  * The initial ref-count of the object is 1. Use drm_dev_ref() and
542  * drm_dev_unref() to take and drop further ref-counts.
543  *
544  * Note that for purely virtual devices @parent can be NULL.
545  *
546  * Drivers that do not want to allocate their own device struct
547  * embedding struct &drm_device can call drm_dev_alloc() instead.
548  *
549  * RETURNS:
550  * 0 on success, or error code on failure.
551  */
552 int drm_dev_init(struct drm_device *dev,
553 		 struct drm_driver *driver,
554 		 struct device *parent)
555 {
556 	int ret;
557 #ifdef __DragonFly__
558 	struct drm_softc *softc = device_get_softc(parent->bsddev);
559 
560 	softc->drm_driver_data = dev;
561 #endif
562 
563 	kref_init(&dev->ref);
564 	dev->dev = parent;
565 	dev->driver = driver;
566 
567 	INIT_LIST_HEAD(&dev->filelist);
568 	INIT_LIST_HEAD(&dev->ctxlist);
569 	INIT_LIST_HEAD(&dev->vmalist);
570 	INIT_LIST_HEAD(&dev->maplist);
571 	INIT_LIST_HEAD(&dev->vblank_event_list);
572 
573 	spin_init(&dev->buf_lock, "drmdbl");
574 	lockinit(&dev->event_lock, "drmev", 0, LK_CANRECURSE);
575 	lockinit(&dev->struct_mutex, "drmslk", 0, LK_CANRECURSE);
576 	lockinit(&dev->filelist_mutex, "drmflm", 0, LK_CANRECURSE);
577 	lockinit(&dev->ctxlist_mutex, "drmclm", 0, LK_CANRECURSE);
578 	lockinit(&dev->master_mutex, "drmmm", 0, LK_CANRECURSE);
579 
580 #ifndef __DragonFly__
581 	dev->anon_inode = drm_fs_inode_new();
582 	if (IS_ERR(dev->anon_inode)) {
583 		ret = PTR_ERR(dev->anon_inode);
584 		DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
585 		goto err_free;
586 	}
587 #else
588 	dev->anon_inode = NULL;
589 	dev->pci_domain = pci_get_domain(dev->dev->bsddev);
590 	dev->pci_bus = pci_get_bus(dev->dev->bsddev);
591 	dev->pci_slot = pci_get_slot(dev->dev->bsddev);
592 	dev->pci_func = pci_get_function(dev->dev->bsddev);
593 	lwkt_serialize_init(&dev->irq_lock);
594 	drm_sysctl_init(dev);
595 #endif
596 
597 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
598 		ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
599 		if (ret)
600 			goto err_minors;
601 	}
602 
603 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
604 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
605 		if (ret)
606 			goto err_minors;
607 	}
608 
609 	ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
610 	if (ret)
611 		goto err_minors;
612 
613 	ret = drm_ht_create(&dev->map_hash, 12);
614 	if (ret)
615 		goto err_minors;
616 
617 	drm_legacy_ctxbitmap_init(dev);
618 
619 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
620 		ret = drm_gem_init(dev);
621 		if (ret) {
622 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
623 			goto err_ctxbitmap;
624 		}
625 	}
626 
627 #if 0
628 	if (parent) {
629 		ret = drm_dev_set_unique(dev, dev_name(parent));
630 		if (ret)
631 			goto err_setunique;
632 	}
633 #endif
634 
635 	return 0;
636 
637 #if 0
638 err_setunique:
639 	if (drm_core_check_feature(dev, DRIVER_GEM))
640 		drm_gem_destroy(dev);
641 #endif
642 err_ctxbitmap:
643 	drm_legacy_ctxbitmap_cleanup(dev);
644 	drm_ht_remove(&dev->map_hash);
645 err_minors:
646 	drm_minor_free(dev, DRM_MINOR_PRIMARY);
647 	drm_minor_free(dev, DRM_MINOR_RENDER);
648 	drm_minor_free(dev, DRM_MINOR_CONTROL);
649 #ifndef __DragonFly__
650 	drm_fs_inode_free(dev->anon_inode);
651 err_free:
652 #endif
653 	mutex_destroy(&dev->master_mutex);
654 #ifdef __DragonFly__
655 	drm_sysctl_cleanup(dev);
656 #endif
657 	return ret;
658 }
659 EXPORT_SYMBOL(drm_dev_init);
660 
661 /**
662  * drm_dev_alloc - Allocate new DRM device
663  * @driver: DRM driver to allocate device for
664  * @parent: Parent device object
665  *
666  * Allocate and initialize a new DRM device. No device registration is done.
667  * Call drm_dev_register() to advertice the device to user space and register it
668  * with other core subsystems. This should be done last in the device
669  * initialization sequence to make sure userspace can't access an inconsistent
670  * state.
671  *
672  * The initial ref-count of the object is 1. Use drm_dev_ref() and
673  * drm_dev_unref() to take and drop further ref-counts.
674  *
675  * Note that for purely virtual devices @parent can be NULL.
676  *
677  * Drivers that wish to subclass or embed struct &drm_device into their
678  * own struct should look at using drm_dev_init() instead.
679  *
680  * RETURNS:
681  * Pointer to new DRM device, or ERR_PTR on failure.
682  */
683 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
684 				 struct device *parent)
685 {
686 	struct drm_device *dev;
687 	int ret;
688 
689 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
690 	if (!dev)
691 		return ERR_PTR(-ENOMEM);
692 
693 	ret = drm_dev_init(dev, driver, parent);
694 	if (ret) {
695 		kfree(dev);
696 		return ERR_PTR(ret);
697 	}
698 
699 	return dev;
700 }
701 EXPORT_SYMBOL(drm_dev_alloc);
702 
703 #if 0
704 static void drm_dev_release(struct kref *ref)
705 {
706 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
707 
708 	if (drm_core_check_feature(dev, DRIVER_GEM))
709 		drm_gem_destroy(dev);
710 
711 	drm_legacy_ctxbitmap_cleanup(dev);
712 	drm_ht_remove(&dev->map_hash);
713 	drm_fs_inode_free(dev->anon_inode);
714 
715 	drm_minor_free(dev, DRM_MINOR_PRIMARY);
716 	drm_minor_free(dev, DRM_MINOR_RENDER);
717 	drm_minor_free(dev, DRM_MINOR_CONTROL);
718 
719 	mutex_destroy(&dev->master_mutex);
720 	kfree(dev->unique);
721 	kfree(dev);
722 }
723 #endif
724 
725 /**
726  * drm_dev_ref - Take reference of a DRM device
727  * @dev: device to take reference of or NULL
728  *
729  * This increases the ref-count of @dev by one. You *must* already own a
730  * reference when calling this. Use drm_dev_unref() to drop this reference
731  * again.
732  *
733  * This function never fails. However, this function does not provide *any*
734  * guarantee whether the device is alive or running. It only provides a
735  * reference to the object and the memory associated with it.
736  */
737 void drm_dev_ref(struct drm_device *dev)
738 {
739 	if (dev)
740 		kref_get(&dev->ref);
741 }
742 EXPORT_SYMBOL(drm_dev_ref);
743 
744 /**
745  * drm_dev_unref - Drop reference of a DRM device
746  * @dev: device to drop reference of or NULL
747  *
748  * This decreases the ref-count of @dev by one. The device is destroyed if the
749  * ref-count drops to zero.
750  */
751 void drm_dev_unref(struct drm_device *dev)
752 {
753 #if 0
754 	if (dev)
755 		kref_put(&dev->ref, drm_dev_release);
756 #endif
757 }
758 EXPORT_SYMBOL(drm_dev_unref);
759 
760 /**
761  * drm_dev_register - Register DRM device
762  * @dev: Device to register
763  * @flags: Flags passed to the driver's .load() function
764  *
765  * Register the DRM device @dev with the system, advertise device to user-space
766  * and start normal device operation. @dev must be allocated via drm_dev_alloc()
767  * previously.
768  *
769  * Never call this twice on any device!
770  *
771  * NOTE: To ensure backward compatibility with existing drivers method this
772  * function calls the ->load() method after registering the device nodes,
773  * creating race conditions. Usage of the ->load() methods is therefore
774  * deprecated, drivers must perform all initialization before calling
775  * drm_dev_register().
776  *
777  * RETURNS:
778  * 0 on success, negative error code on failure.
779  */
780 int drm_dev_register(struct drm_device *dev, unsigned long flags)
781 {
782 	int ret;
783 
784 	mutex_lock(&drm_global_mutex);
785 
786 	ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
787 	if (ret)
788 		goto err_minors;
789 
790 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
791 	if (ret)
792 		goto err_minors;
793 
794 	ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);
795 	if (ret)
796 		goto err_minors;
797 
798 	if (dev->driver->load) {
799 		ret = dev->driver->load(dev, flags);
800 		if (ret)
801 			goto err_minors;
802 	}
803 
804 	if (drm_core_check_feature(dev, DRIVER_MODESET))
805 		drm_modeset_register_all(dev);
806 
807 #ifdef __DragonFly__
808 	ret = drm_create_cdevs(dev->dev->bsddev);
809 	if (ret)
810 		goto err_minors;
811 #endif
812 
813 	ret = 0;
814 	goto out_unlock;
815 
816 err_minors:
817 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
818 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
819 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
820 out_unlock:
821 	mutex_unlock(&drm_global_mutex);
822 	return ret;
823 }
824 EXPORT_SYMBOL(drm_dev_register);
825 
826 /**
827  * drm_dev_unregister - Unregister DRM device
828  * @dev: Device to unregister
829  *
830  * Unregister the DRM device from the system. This does the reverse of
831  * drm_dev_register() but does not deallocate the device. The caller must call
832  * drm_dev_unref() to drop their final reference.
833  *
834  * This should be called first in the device teardown code to make sure
835  * userspace can't access the device instance any more.
836  */
837 void drm_dev_unregister(struct drm_device *dev)
838 {
839 	struct drm_map_list *r_list, *list_temp;
840 
841 	drm_lastclose(dev);
842 
843 	if (drm_core_check_feature(dev, DRIVER_MODESET))
844 		drm_modeset_unregister_all(dev);
845 
846 	if (dev->driver->unload)
847 		dev->driver->unload(dev);
848 
849 #if 0
850 	if (dev->agp)
851 		drm_pci_agp_destroy(dev);
852 #endif
853 
854 	drm_vblank_cleanup(dev);
855 
856 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
857 		drm_legacy_rmmap(dev, r_list->map);
858 
859 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
860 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
861 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
862 }
863 EXPORT_SYMBOL(drm_dev_unregister);
864 
865 /*
866  * DRM Core
867  * The DRM core module initializes all global DRM objects and makes them
868  * available to drivers. Once setup, drivers can probe their respective
869  * devices.
870  * Currently, core management includes:
871  *  - The "DRM-Global" key/value database
872  *  - Global ID management for connectors
873  *  - DRM major number allocation
874  *  - DRM minor management
875  *  - DRM sysfs class
876  *  - DRM debugfs root
877  *
878  * Furthermore, the DRM core provides dynamic char-dev lookups. For each
879  * interface registered on a DRM device, you can request minor numbers from DRM
880  * core. DRM core takes care of major-number management and char-dev
881  * registration. A stub ->open() callback forwards any open() requests to the
882  * registered minor.
883  */
884 
885 #if 0
886 static int drm_stub_open(struct inode *inode, struct file *filp)
887 {
888 	const struct file_operations *new_fops;
889 	struct drm_minor *minor;
890 	int err;
891 
892 	DRM_DEBUG("\n");
893 
894 	mutex_lock(&drm_global_mutex);
895 	minor = drm_minor_acquire(iminor(inode));
896 	if (IS_ERR(minor)) {
897 		err = PTR_ERR(minor);
898 		goto out_unlock;
899 	}
900 
901 	new_fops = fops_get(minor->dev->driver->fops);
902 	if (!new_fops) {
903 		err = -ENODEV;
904 		goto out_release;
905 	}
906 
907 	replace_fops(filp, new_fops);
908 	if (filp->f_op->open)
909 		err = filp->f_op->open(inode, filp);
910 	else
911 		err = 0;
912 
913 out_release:
914 	drm_minor_release(minor);
915 out_unlock:
916 	mutex_unlock(&drm_global_mutex);
917 	return err;
918 }
919 
920 static const struct file_operations drm_stub_fops = {
921 	.owner = THIS_MODULE,
922 	.open = drm_stub_open,
923 	.llseek = noop_llseek,
924 };
925 #endif
926 
927 static void drm_core_exit(void)
928 {
929 #if 0
930 	unregister_chrdev(DRM_MAJOR, "drm");
931 	debugfs_remove(drm_debugfs_root);
932 	drm_sysfs_destroy();
933 #endif
934 	idr_destroy(&drm_minors_idr);
935 	drm_connector_ida_destroy();
936 	drm_global_release();
937 }
938 
939 static int __init drm_core_init(void)
940 {
941 #if 0
942 	int ret;
943 #endif
944 
945 	drm_global_init();
946 	drm_connector_ida_init();
947 	idr_init(&drm_minors_idr);
948 
949 #if 0
950 	ret = drm_sysfs_init();
951 	if (ret < 0) {
952 		DRM_ERROR("Cannot create DRM class: %d\n", ret);
953 		goto error;
954 	}
955 
956 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
957 	if (!drm_debugfs_root) {
958 		ret = -ENOMEM;
959 		DRM_ERROR("Cannot create debugfs-root: %d\n", ret);
960 		goto error;
961 	}
962 
963 	ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
964 	if (ret < 0)
965 		goto error;
966 #endif
967 
968 	DRM_INFO("Initialized\n");
969 	return 0;
970 
971 #if 0
972 error:
973 	drm_core_exit();
974 	return ret;
975 #endif
976 }
977 
978 module_init(drm_core_init);
979 module_exit(drm_core_exit);
980 
981 #include <sys/devfs.h>
982 
983 #include <linux/export.h>
984 #include <linux/dmi.h>
985 #include <drm/drmP.h>
986 
987 static int
988 drm_modevent(module_t mod, int type, void *data)
989 {
990 
991 	switch (type) {
992 	case MOD_LOAD:
993 		TUNABLE_INT_FETCH("drm.debug", &drm_debug);
994 		linux_task_drop_callback = linux_task_drop;
995 		linux_proc_drop_callback = linux_proc_drop;
996 		break;
997 	case MOD_UNLOAD:
998 		linux_task_drop_callback = NULL;
999 		linux_proc_drop_callback = NULL;
1000 		break;
1001 	}
1002 	return (0);
1003 }
1004 
1005 static moduledata_t drm_mod = {
1006 	"drm",
1007 	drm_modevent,
1008 	0
1009 };
1010 DECLARE_MODULE(drm, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
1011 MODULE_VERSION(drm, 1);
1012 MODULE_DEPEND(drm, agp, 1, 1, 1);
1013 MODULE_DEPEND(drm, pci, 1, 1, 1);
1014 MODULE_DEPEND(drm, iicbus, 1, 1, 1);
1015 
1016 static struct dev_ops drm_cdevsw = {
1017 	{ "drm", 0, D_TRACKCLOSE | D_MPSAFE },
1018 	.d_open =	drm_open,
1019 	.d_close =	drm_close,
1020 	.d_read =	drm_read,
1021 	.d_ioctl =	drm_ioctl,
1022 	.d_kqfilter =	drm_kqfilter,
1023 	.d_mmap =	drm_mmap,
1024 	.d_mmap_single = drm_mmap_single,
1025 };
1026 
1027 SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
1028 SYSCTL_INT(_hw_drm, OID_AUTO, debug, CTLFLAG_RW, &drm_debug, 0,
1029     "DRM debugging");
1030 
1031 int
1032 drm_create_cdevs(device_t kdev)
1033 {
1034 	struct drm_device *dev;
1035 	int error, unit;
1036 #ifdef __DragonFly__
1037 	struct drm_softc *softc = device_get_softc(kdev);
1038 
1039 	dev = softc->drm_driver_data;
1040 #endif
1041 	unit = device_get_unit(kdev);
1042 
1043 	dev->devnode = make_dev(&drm_cdevsw, unit, DRM_DEV_UID, DRM_DEV_GID,
1044 				DRM_DEV_MODE, "dri/card%d", unit);
1045 	error = 0;
1046 	if (error == 0)
1047 		dev->devnode->si_drv1 = dev;
1048 	return (error);
1049 }
1050 
1051 #ifndef DRM_DEV_NAME
1052 #define DRM_DEV_NAME "drm"
1053 #endif
1054 
1055 devclass_t drm_devclass;
1056 
1057 /*
1058  * Stub is needed for devfs
1059  */
1060 int drm_close(struct dev_close_args *ap)
1061 {
1062 	return 0;
1063 }
1064 
1065 /* XXX: this is supposed to be drm_release() */
1066 void drm_cdevpriv_dtor(void *cd)
1067 {
1068 	struct drm_file *file_priv = cd;
1069 	struct drm_device *dev = file_priv->dev;
1070 
1071 	DRM_DEBUG("open_count = %d\n", dev->open_count);
1072 
1073 	DRM_LOCK(dev);
1074 
1075 	if (dev->driver->preclose != NULL)
1076 		dev->driver->preclose(dev, file_priv);
1077 
1078 	/* ========================================================
1079 	 * Begin inline drm_release
1080 	 */
1081 
1082 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
1083 	    DRM_CURRENTPID, (long)dev->dev, dev->open_count);
1084 
1085 	if (dev->driver->driver_features & DRIVER_GEM)
1086 		drm_gem_release(dev, file_priv);
1087 
1088 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1089 		drm_legacy_reclaim_buffers(dev, file_priv);
1090 
1091 	funsetown(&dev->buf_sigio);
1092 
1093 	if (dev->driver->postclose != NULL)
1094 		dev->driver->postclose(dev, file_priv);
1095 	list_del(&file_priv->lhead);
1096 
1097 
1098 	/* ========================================================
1099 	 * End inline drm_release
1100 	 */
1101 
1102 	device_unbusy(dev->dev->bsddev);
1103 	if (--dev->open_count == 0) {
1104 		drm_lastclose(dev);
1105 	}
1106 
1107 	DRM_UNLOCK(dev);
1108 }
1109 
1110 int
1111 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
1112     struct sysctl_oid *top)
1113 {
1114 	struct sysctl_oid *oid;
1115 
1116 	ksnprintf(dev->busid_str, sizeof(dev->busid_str),
1117 	     "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
1118 	     dev->pci_slot, dev->pci_func);
1119 	oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
1120 	    CTLFLAG_RD, dev->busid_str, 0, NULL);
1121 	if (oid == NULL)
1122 		return (ENOMEM);
1123 	dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
1124 	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
1125 	    "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
1126 	if (oid == NULL)
1127 		return (ENOMEM);
1128 
1129 	return (0);
1130 }
1131 
1132 int
1133 drm_mmap_single(struct dev_mmap_single_args *ap)
1134 {
1135 	struct drm_device *dev;
1136 	struct cdev *kdev = ap->a_head.a_dev;
1137 	vm_ooffset_t *offset = ap->a_offset;
1138 	vm_size_t size = ap->a_size;
1139 	struct vm_object **obj_res = ap->a_object;
1140 	int nprot = ap->a_nprot;
1141 
1142 	dev = drm_get_device_from_kdev(kdev);
1143 	if (dev->drm_ttm_bdev != NULL) {
1144 		return (ttm_bo_mmap_single(dev, offset, size, obj_res, nprot));
1145 	} else if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
1146 		return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
1147 	} else {
1148 		return (ENODEV);
1149 	}
1150 }
1151 
1152 #include <linux/dmi.h>
1153 
1154 /*
1155  * Check if dmi_system_id structure matches system DMI data
1156  */
1157 static bool
1158 dmi_found(const struct dmi_system_id *dsi)
1159 {
1160 	int i, slot;
1161 	bool found = false;
1162 	char *sys_vendor, *board_vendor, *product_name, *board_name;
1163 
1164 	sys_vendor = kgetenv("smbios.system.maker");
1165 	board_vendor = kgetenv("smbios.planar.maker");
1166 	product_name = kgetenv("smbios.system.product");
1167 	board_name = kgetenv("smbios.planar.product");
1168 
1169 	for (i = 0; i < NELEM(dsi->matches); i++) {
1170 		slot = dsi->matches[i].slot;
1171 		switch (slot) {
1172 		case DMI_NONE:
1173 			break;
1174 		case DMI_SYS_VENDOR:
1175 			if (sys_vendor != NULL &&
1176 			    !strcmp(sys_vendor, dsi->matches[i].substr))
1177 				break;
1178 			else
1179 				goto done;
1180 		case DMI_BOARD_VENDOR:
1181 			if (board_vendor != NULL &&
1182 			    !strcmp(board_vendor, dsi->matches[i].substr))
1183 				break;
1184 			else
1185 				goto done;
1186 		case DMI_PRODUCT_NAME:
1187 			if (product_name != NULL &&
1188 			    !strcmp(product_name, dsi->matches[i].substr))
1189 				break;
1190 			else
1191 				goto done;
1192 		case DMI_BOARD_NAME:
1193 			if (board_name != NULL &&
1194 			    !strcmp(board_name, dsi->matches[i].substr))
1195 				break;
1196 			else
1197 				goto done;
1198 		default:
1199 			goto done;
1200 		}
1201 	}
1202 	found = true;
1203 
1204 done:
1205 	if (sys_vendor != NULL)
1206 		kfreeenv(sys_vendor);
1207 	if (board_vendor != NULL)
1208 		kfreeenv(board_vendor);
1209 	if (product_name != NULL)
1210 		kfreeenv(product_name);
1211 	if (board_name != NULL)
1212 		kfreeenv(board_name);
1213 
1214 	return found;
1215 }
1216 
1217 int dmi_check_system(const struct dmi_system_id *sysid)
1218 {
1219 	const struct dmi_system_id *dsi;
1220 	int num = 0;
1221 
1222 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
1223 		if (dmi_found(dsi)) {
1224 			num++;
1225 			if (dsi->callback && dsi->callback(dsi))
1226 				break;
1227 		}
1228 	}
1229 	return (num);
1230 }
1231