1 /**
2  * \file drm_stub.h
3  * Stub support
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  */
7 
8 /*
9  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
10  *
11  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
12  * All Rights Reserved.
13  *
14  * Permission is hereby granted, free of charge, to any person obtaining a
15  * copy of this software and associated documentation files (the "Software"),
16  * to deal in the Software without restriction, including without limitation
17  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18  * and/or sell copies of the Software, and to permit persons to whom the
19  * Software is furnished to do so, subject to the following conditions:
20  *
21  * The above copyright notice and this permission notice (including the next
22  * paragraph) shall be included in all copies or substantial portions of the
23  * Software.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
28  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
29  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
30  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
31  * DEALINGS IN THE SOFTWARE.
32  */
33 
34 #include <linux/err.h>
35 #include <linux/export.h>
36 #include <linux/fs.h>
37 #include <linux/module.h>
38 #include <linux/moduleparam.h>
39 #include <linux/mount.h>
40 #include <linux/printk.h>
41 #include <linux/slab.h>
42 #include <drm/drmP.h>
43 #include <drm/drm_core.h>
44 
45 unsigned int drm_debug = 0;	/* 1 to enable debug output */
46 EXPORT_SYMBOL(drm_debug);
47 
48 unsigned int drm_rnodes = 0;	/* 1 to enable experimental render nodes API */
49 EXPORT_SYMBOL(drm_rnodes);
50 
51 /* 1 to allow user space to request universal planes (experimental) */
52 unsigned int drm_universal_planes = 0;
53 EXPORT_SYMBOL(drm_universal_planes);
54 
55 unsigned int drm_vblank_offdelay = 5000;    /* Default to 5000 msecs. */
56 EXPORT_SYMBOL(drm_vblank_offdelay);
57 
58 unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
59 EXPORT_SYMBOL(drm_timestamp_precision);
60 
61 /*
62  * Default to use monotonic timestamps for wait-for-vblank and page-flip
63  * complete events.
64  */
65 unsigned int drm_timestamp_monotonic = 1;
66 
67 MODULE_AUTHOR(CORE_AUTHOR);
68 MODULE_DESCRIPTION(CORE_DESC);
69 MODULE_LICENSE("GPL and additional rights");
70 MODULE_PARM_DESC(debug, "Enable debug output");
71 MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API");
72 MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
73 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
74 MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
75 
76 module_param_named(debug, drm_debug, int, 0600);
77 module_param_named(rnodes, drm_rnodes, int, 0600);
78 module_param_named(universal_planes, drm_universal_planes, int, 0600);
79 module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
80 module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
81 module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
82 
83 #ifdef __NetBSD__
84 spinlock_t drm_minor_lock;
85 #else
86 static DEFINE_SPINLOCK(drm_minor_lock);
87 #endif
88 struct idr drm_minors_idr;
89 
90 #ifndef __NetBSD__
91 struct class *drm_class;
92 struct dentry *drm_debugfs_root;
93 #endif
94 
drm_err(const char * func,const char * format,...)95 int drm_err(const char *func, const char *format, ...)
96 {
97 #ifdef __NetBSD__
98 	va_list args;
99 
100 	va_start(args, format);
101 	printf("DRM error in %s: ", func);
102 	vprintf(format, args);
103 	va_end(args);
104 
105 	return 0;
106 #else
107 	struct va_format vaf;
108 	va_list args;
109 	int r;
110 
111 	va_start(args, format);
112 
113 	vaf.fmt = format;
114 	vaf.va = &args;
115 
116 	r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
117 
118 	va_end(args);
119 
120 	return r;
121 #endif
122 }
123 EXPORT_SYMBOL(drm_err);
124 
drm_ut_debug_printk(const char * function_name,const char * format,...)125 void drm_ut_debug_printk(const char *function_name, const char *format, ...)
126 {
127 #ifdef __NetBSD__
128 	va_list args;
129 
130 	va_start(args, format);
131 	printf("DRM debug in %s: ", function_name);
132 	vprintf(format, args);
133 	va_end(args);
134 #else
135 	struct va_format vaf;
136 	va_list args;
137 
138 	va_start(args, format);
139 	vaf.fmt = format;
140 	vaf.va = &args;
141 
142 	printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
143 
144 	va_end(args);
145 #endif
146 }
147 EXPORT_SYMBOL(drm_ut_debug_printk);
148 
drm_master_create(struct drm_minor * minor)149 struct drm_master *drm_master_create(struct drm_minor *minor)
150 {
151 	struct drm_master *master;
152 
153 	master = kzalloc(sizeof(*master), GFP_KERNEL);
154 	if (!master)
155 		return NULL;
156 
157 	kref_init(&master->refcount);
158 	spin_lock_init(&master->lock.spinlock);
159 #ifdef __NetBSD__
160 	DRM_INIT_WAITQUEUE(&master->lock.lock_queue, "drmlockq");
161 #else
162 	init_waitqueue_head(&master->lock.lock_queue);
163 #endif
164 	drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
165 	INIT_LIST_HEAD(&master->magicfree);
166 	master->minor = minor;
167 
168 	return master;
169 }
170 
drm_master_get(struct drm_master * master)171 struct drm_master *drm_master_get(struct drm_master *master)
172 {
173 	kref_get(&master->refcount);
174 	return master;
175 }
176 EXPORT_SYMBOL(drm_master_get);
177 
drm_master_destroy(struct kref * kref)178 static void drm_master_destroy(struct kref *kref)
179 {
180 	struct drm_master *master = container_of(kref, struct drm_master, refcount);
181 	struct drm_magic_entry *pt, *next;
182 	struct drm_device *dev = master->minor->dev;
183 	struct drm_map_list *r_list, *list_temp;
184 
185 	mutex_lock(&dev->struct_mutex);
186 	if (dev->driver->master_destroy)
187 		dev->driver->master_destroy(dev, master);
188 
189 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
190 		if (r_list->master == master) {
191 			drm_rmmap_locked(dev, r_list->map);
192 			r_list = NULL;
193 		}
194 	}
195 
196 	if (master->unique) {
197 		kfree(master->unique);
198 		master->unique = NULL;
199 		master->unique_len = 0;
200 	}
201 
202 	kfree(dev->devname);
203 	dev->devname = NULL;
204 
205 	list_for_each_entry_safe(pt, next, &master->magicfree, head) {
206 		list_del(&pt->head);
207 		drm_ht_remove_item(&master->magiclist, &pt->hash_item);
208 		kfree(pt);
209 	}
210 
211 	drm_ht_remove(&master->magiclist);
212 
213 #ifdef __NetBSD__
214 	spin_lock_destroy(&master->lock.spinlock);
215 	DRM_DESTROY_WAITQUEUE(&master->lock.lock_queue);
216 #endif
217 
218 	mutex_unlock(&dev->struct_mutex);
219 	kfree(master);
220 }
221 
drm_master_put(struct drm_master ** master)222 void drm_master_put(struct drm_master **master)
223 {
224 	kref_put(&(*master)->refcount, drm_master_destroy);
225 	*master = NULL;
226 }
227 EXPORT_SYMBOL(drm_master_put);
228 
drm_setmaster_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)229 int drm_setmaster_ioctl(struct drm_device *dev, void *data,
230 			struct drm_file *file_priv)
231 {
232 	int ret = 0;
233 
234 	mutex_lock(&dev->master_mutex);
235 	if (file_priv->is_master)
236 		goto out_unlock;
237 
238 	if (file_priv->minor->master) {
239 		ret = -EINVAL;
240 		goto out_unlock;
241 	}
242 
243 	if (!file_priv->master) {
244 		ret = -EINVAL;
245 		goto out_unlock;
246 	}
247 
248 	file_priv->minor->master = drm_master_get(file_priv->master);
249 	file_priv->is_master = 1;
250 	if (dev->driver->master_set) {
251 		ret = dev->driver->master_set(dev, file_priv, false);
252 		if (unlikely(ret != 0)) {
253 			file_priv->is_master = 0;
254 			drm_master_put(&file_priv->minor->master);
255 		}
256 	}
257 
258 out_unlock:
259 	mutex_unlock(&dev->master_mutex);
260 	return ret;
261 }
262 
drm_dropmaster_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)263 int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
264 			 struct drm_file *file_priv)
265 {
266 	int ret = -EINVAL;
267 
268 	mutex_lock(&dev->master_mutex);
269 	if (!file_priv->is_master)
270 		goto out_unlock;
271 
272 	if (!file_priv->minor->master)
273 		goto out_unlock;
274 
275 	ret = 0;
276 	if (dev->driver->master_drop)
277 		dev->driver->master_drop(dev, file_priv, false);
278 	drm_master_put(&file_priv->minor->master);
279 	file_priv->is_master = 0;
280 
281 out_unlock:
282 	mutex_unlock(&dev->master_mutex);
283 	return ret;
284 }
285 
286 /*
287  * DRM Minors
288  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
289  * of them is represented by a drm_minor object. Depending on the capabilities
290  * of the device-driver, different interfaces are registered.
291  *
292  * Minors can be accessed via dev->$minor_name. This pointer is either
293  * NULL or a valid drm_minor pointer and stays valid as long as the device is
294  * valid. This means, DRM minors have the same life-time as the underlying
295  * device. However, this doesn't mean that the minor is active. Minors are
296  * registered and unregistered dynamically according to device-state.
297  */
298 
drm_minor_get_slot(struct drm_device * dev,unsigned int type)299 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
300 					     unsigned int type)
301 {
302 	switch (type) {
303 	case DRM_MINOR_LEGACY:
304 		return &dev->primary;
305 	case DRM_MINOR_RENDER:
306 		return &dev->render;
307 	case DRM_MINOR_CONTROL:
308 		return &dev->control;
309 	default:
310 		return NULL;
311 	}
312 }
313 
drm_minor_alloc(struct drm_device * dev,unsigned int type)314 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
315 {
316 	struct drm_minor *minor;
317 
318 	minor = kzalloc(sizeof(*minor), GFP_KERNEL);
319 	if (!minor)
320 		return -ENOMEM;
321 
322 	minor->type = type;
323 	minor->dev = dev;
324 
325 	*drm_minor_get_slot(dev, type) = minor;
326 	return 0;
327 }
328 
drm_minor_free(struct drm_device * dev,unsigned int type)329 static void drm_minor_free(struct drm_device *dev, unsigned int type)
330 {
331 	struct drm_minor **slot;
332 
333 	slot = drm_minor_get_slot(dev, type);
334 	if (*slot) {
335 		kfree(*slot);
336 		*slot = NULL;
337 	}
338 }
339 
drm_minor_register(struct drm_device * dev,unsigned int type)340 static int drm_minor_register(struct drm_device *dev, unsigned int type)
341 {
342 	struct drm_minor *new_minor;
343 	unsigned long flags;
344 #ifndef __NetBSD__
345 	int ret;
346 #endif
347 	int minor_id;
348 
349 	DRM_DEBUG("\n");
350 
351 	new_minor = *drm_minor_get_slot(dev, type);
352 	if (!new_minor)
353 		return 0;
354 
355 	idr_preload(GFP_KERNEL);
356 	spin_lock_irqsave(&drm_minor_lock, flags);
357 	minor_id = idr_alloc(&drm_minors_idr,
358 			     NULL,
359 			     64 * type,
360 			     64 * (type + 1),
361 			     GFP_NOWAIT);
362 	spin_unlock_irqrestore(&drm_minor_lock, flags);
363 	idr_preload_end();
364 
365 	if (minor_id < 0)
366 		return minor_id;
367 
368 	new_minor->index = minor_id;
369 
370 #ifndef __NetBSD__
371 	ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
372 	if (ret) {
373 		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
374 		goto err_id;
375 	}
376 
377 	ret = drm_sysfs_device_add(new_minor);
378 	if (ret) {
379 		DRM_ERROR("DRM: Error sysfs_device_add.\n");
380 		goto err_debugfs;
381 	}
382 #endif
383 	/* replace NULL with @minor so lookups will succeed from now on */
384 	spin_lock_irqsave(&drm_minor_lock, flags);
385 	idr_replace(&drm_minors_idr, new_minor, new_minor->index);
386 	spin_unlock_irqrestore(&drm_minor_lock, flags);
387 
388 	DRM_DEBUG("new minor assigned %d\n", minor_id);
389 	return 0;
390 
391 #ifndef __NetBSD__
392 err_debugfs:
393 	drm_debugfs_cleanup(new_minor);
394 err_id:
395 	spin_lock_irqsave(&drm_minor_lock, flags);
396 	idr_remove(&drm_minors_idr, minor_id);
397 	spin_unlock_irqrestore(&drm_minor_lock, flags);
398 	new_minor->index = 0;
399 	return ret;
400 #endif
401 }
402 
drm_minor_unregister(struct drm_device * dev,unsigned int type)403 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
404 {
405 	struct drm_minor *minor;
406 	unsigned long flags;
407 
408 	minor = *drm_minor_get_slot(dev, type);
409 	if (!minor || !minor->kdev)
410 		return;
411 
412 	spin_lock_irqsave(&drm_minor_lock, flags);
413 	idr_remove(&drm_minors_idr, minor->index);
414 	spin_unlock_irqrestore(&drm_minor_lock, flags);
415 	minor->index = 0;
416 
417 	drm_debugfs_cleanup(minor);
418 	drm_sysfs_device_remove(minor);
419 }
420 
421 /**
422  * drm_minor_acquire - Acquire a DRM minor
423  * @minor_id: Minor ID of the DRM-minor
424  *
425  * Looks up the given minor-ID and returns the respective DRM-minor object. The
426  * refence-count of the underlying device is increased so you must release this
427  * object with drm_minor_release().
428  *
429  * As long as you hold this minor, it is guaranteed that the object and the
430  * minor->dev pointer will stay valid! However, the device may get unplugged and
431  * unregistered while you hold the minor.
432  *
433  * Returns:
434  * Pointer to minor-object with increased device-refcount, or PTR_ERR on
435  * failure.
436  */
drm_minor_acquire(unsigned int minor_id)437 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
438 {
439 	struct drm_minor *minor;
440 	unsigned long flags;
441 
442 	spin_lock_irqsave(&drm_minor_lock, flags);
443 	minor = idr_find(&drm_minors_idr, minor_id);
444 	if (minor)
445 		drm_dev_ref(minor->dev);
446 	spin_unlock_irqrestore(&drm_minor_lock, flags);
447 
448 	if (!minor) {
449 		return ERR_PTR(-ENODEV);
450 	} else if (drm_device_is_unplugged(minor->dev)) {
451 		drm_dev_unref(minor->dev);
452 		return ERR_PTR(-ENODEV);
453 	}
454 
455 	return minor;
456 }
457 
458 /**
459  * drm_minor_release - Release DRM minor
460  * @minor: Pointer to DRM minor object
461  *
462  * Release a minor that was previously acquired via drm_minor_acquire().
463  */
drm_minor_release(struct drm_minor * minor)464 void drm_minor_release(struct drm_minor *minor)
465 {
466 	drm_dev_unref(minor->dev);
467 }
468 
469 /**
470  * Called via drm_exit() at module unload time or when pci device is
471  * unplugged.
472  *
473  * Cleans up all DRM device, calling drm_lastclose().
474  *
475  */
drm_put_dev(struct drm_device * dev)476 void drm_put_dev(struct drm_device *dev)
477 {
478 	DRM_DEBUG("\n");
479 
480 	if (!dev) {
481 		DRM_ERROR("cleanup called no dev\n");
482 		return;
483 	}
484 
485 	drm_dev_unregister(dev);
486 	drm_dev_unref(dev);
487 }
488 EXPORT_SYMBOL(drm_put_dev);
489 
drm_unplug_dev(struct drm_device * dev)490 void drm_unplug_dev(struct drm_device *dev)
491 {
492 	/* for a USB device */
493 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
494 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
495 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
496 
497 	mutex_lock(&drm_global_mutex);
498 	drm_device_set_unplugged(dev);
499 
500 	if (dev->open_count == 0) {
501 		drm_put_dev(dev);
502 	}
503 	mutex_unlock(&drm_global_mutex);
504 }
505 EXPORT_SYMBOL(drm_unplug_dev);
506 
507 #ifdef __NetBSD__
508 
509 struct inode;
510 
511 static struct inode *
drm_fs_inode_new(void)512 drm_fs_inode_new(void)
513 {
514 	return NULL;
515 }
516 
517 static void
drm_fs_inode_free(struct inode * inode)518 drm_fs_inode_free(struct inode *inode)
519 {
520 	KASSERT(inode == NULL);
521 }
522 
523 #else
524 
525 /*
526  * DRM internal mount
527  * We want to be able to allocate our own "struct address_space" to control
528  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
529  * stand-alone address_space objects, so we need an underlying inode. As there
530  * is no way to allocate an independent inode easily, we need a fake internal
531  * VFS mount-point.
532  *
533  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
534  * frees it again. You are allowed to use iget() and iput() to get references to
535  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
536  * drm_fs_inode_free() call (which does not have to be the last iput()).
537  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
538  * between multiple inode-users. You could, technically, call
539  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
540  * iput(), but this way you'd end up with a new vfsmount for each inode.
541  */
542 
543 static int drm_fs_cnt;
544 static struct vfsmount *drm_fs_mnt;
545 
546 static const struct dentry_operations drm_fs_dops = {
547 	.d_dname	= simple_dname,
548 };
549 
550 static const struct super_operations drm_fs_sops = {
551 	.statfs		= simple_statfs,
552 };
553 
drm_fs_mount(struct file_system_type * fs_type,int flags,const char * dev_name,void * data)554 static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
555 				   const char *dev_name, void *data)
556 {
557 	return mount_pseudo(fs_type,
558 			    "drm:",
559 			    &drm_fs_sops,
560 			    &drm_fs_dops,
561 			    0x010203ff);
562 }
563 
564 static struct file_system_type drm_fs_type = {
565 	.name		= "drm",
566 	.owner		= THIS_MODULE,
567 	.mount		= drm_fs_mount,
568 	.kill_sb	= kill_anon_super,
569 };
570 
drm_fs_inode_new(void)571 static struct inode *drm_fs_inode_new(void)
572 {
573 	struct inode *inode;
574 	int r;
575 
576 	r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
577 	if (r < 0) {
578 		DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
579 		return ERR_PTR(r);
580 	}
581 
582 	inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
583 	if (IS_ERR(inode))
584 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
585 
586 	return inode;
587 }
588 
drm_fs_inode_free(struct inode * inode)589 static void drm_fs_inode_free(struct inode *inode)
590 {
591 	if (inode) {
592 		iput(inode);
593 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
594 	}
595 }
596 
597 #endif
598 
599 /**
600  * drm_dev_alloc - Allocate new drm device
601  * @driver: DRM driver to allocate device for
602  * @parent: Parent device object
603  *
604  * Allocate and initialize a new DRM device. No device registration is done.
605  * Call drm_dev_register() to advertice the device to user space and register it
606  * with other core subsystems.
607  *
608  * The initial ref-count of the object is 1. Use drm_dev_ref() and
609  * drm_dev_unref() to take and drop further ref-counts.
610  *
611  * RETURNS:
612  * Pointer to new DRM device, or NULL if out of memory.
613  */
drm_dev_alloc(struct drm_driver * driver,struct device * parent)614 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
615 				 struct device *parent)
616 {
617 	struct drm_device *dev;
618 	int ret;
619 
620 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
621 	if (!dev)
622 		return NULL;
623 
624 	kref_init(&dev->ref);
625 	dev->dev = parent;
626 	dev->driver = driver;
627 
628 	INIT_LIST_HEAD(&dev->filelist);
629 	INIT_LIST_HEAD(&dev->ctxlist);
630 	INIT_LIST_HEAD(&dev->vmalist);
631 	INIT_LIST_HEAD(&dev->maplist);
632 	INIT_LIST_HEAD(&dev->vblank_event_list);
633 
634 	spin_lock_init(&dev->count_lock);
635 	spin_lock_init(&dev->event_lock);
636 #ifdef __NetBSD__
637 	linux_mutex_init(&dev->struct_mutex);
638 	linux_mutex_init(&dev->ctxlist_mutex);
639 	linux_mutex_init(&dev->master_mutex);
640 #else
641 	mutex_init(&dev->struct_mutex);
642 	mutex_init(&dev->ctxlist_mutex);
643 	mutex_init(&dev->master_mutex);
644 #endif
645 
646 	dev->anon_inode = drm_fs_inode_new();
647 	if (IS_ERR(dev->anon_inode)) {
648 		ret = PTR_ERR(dev->anon_inode);
649 		DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
650 		goto err_free;
651 	}
652 
653 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
654 		ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
655 		if (ret)
656 			goto err_minors;
657 	}
658 
659 	if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
660 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
661 		if (ret)
662 			goto err_minors;
663 	}
664 
665 	ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
666 	if (ret)
667 		goto err_minors;
668 
669 	if (drm_ht_create(&dev->map_hash, 12))
670 		goto err_minors;
671 
672 	ret = drm_ctxbitmap_init(dev);
673 	if (ret) {
674 		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
675 		goto err_ht;
676 	}
677 
678 	if (driver->driver_features & DRIVER_GEM) {
679 		ret = drm_gem_init(dev);
680 		if (ret) {
681 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
682 			goto err_ctxbitmap;
683 		}
684 	}
685 
686 	return dev;
687 
688 err_ctxbitmap:
689 	drm_ctxbitmap_cleanup(dev);
690 err_ht:
691 	drm_ht_remove(&dev->map_hash);
692 err_minors:
693 	drm_minor_free(dev, DRM_MINOR_LEGACY);
694 	drm_minor_free(dev, DRM_MINOR_RENDER);
695 	drm_minor_free(dev, DRM_MINOR_CONTROL);
696 	drm_fs_inode_free(dev->anon_inode);
697 err_free:
698 #ifdef __NetBSD__
699 	linux_mutex_destroy(&dev->struct_mutex);
700 	linux_mutex_destroy(&dev->ctxlist_mutex);
701 	linux_mutex_destroy(&dev->master_mutex);
702 	spin_lock_destroy(&dev->event_lock);
703 	spin_lock_destroy(&dev->count_lock);
704 #else
705 	mutex_destroy(&dev->master_mutex);
706 #endif
707 	kfree(dev);
708 	return NULL;
709 }
710 EXPORT_SYMBOL(drm_dev_alloc);
711 
drm_dev_release(struct kref * ref)712 static void drm_dev_release(struct kref *ref)
713 {
714 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
715 
716 	if (dev->driver->driver_features & DRIVER_GEM)
717 		drm_gem_destroy(dev);
718 
719 	drm_ctxbitmap_cleanup(dev);
720 	drm_ht_remove(&dev->map_hash);
721 	drm_fs_inode_free(dev->anon_inode);
722 
723 	drm_minor_free(dev, DRM_MINOR_LEGACY);
724 	drm_minor_free(dev, DRM_MINOR_RENDER);
725 	drm_minor_free(dev, DRM_MINOR_CONTROL);
726 
727 	kfree(dev->devname);
728 
729 #ifdef __NetBSD__
730 	linux_mutex_destroy(&dev->struct_mutex);
731 	linux_mutex_destroy(&dev->ctxlist_mutex);
732 	linux_mutex_destroy(&dev->master_mutex);
733 	spin_lock_destroy(&dev->event_lock);
734 	spin_lock_destroy(&dev->count_lock);
735 #else
736 	mutex_destroy(&dev->master_mutex);
737 #endif
738 	kfree(dev);
739 }
740 
741 /**
742  * drm_dev_ref - Take reference of a DRM device
743  * @dev: device to take reference of or NULL
744  *
745  * This increases the ref-count of @dev by one. You *must* already own a
746  * reference when calling this. Use drm_dev_unref() to drop this reference
747  * again.
748  *
749  * This function never fails. However, this function does not provide *any*
750  * guarantee whether the device is alive or running. It only provides a
751  * reference to the object and the memory associated with it.
752  */
drm_dev_ref(struct drm_device * dev)753 void drm_dev_ref(struct drm_device *dev)
754 {
755 	if (dev)
756 		kref_get(&dev->ref);
757 }
758 EXPORT_SYMBOL(drm_dev_ref);
759 
760 /**
761  * drm_dev_unref - Drop reference of a DRM device
762  * @dev: device to drop reference of or NULL
763  *
764  * This decreases the ref-count of @dev by one. The device is destroyed if the
765  * ref-count drops to zero.
766  */
drm_dev_unref(struct drm_device * dev)767 void drm_dev_unref(struct drm_device *dev)
768 {
769 	if (dev)
770 		kref_put(&dev->ref, drm_dev_release);
771 }
772 EXPORT_SYMBOL(drm_dev_unref);
773 
774 /**
775  * drm_dev_register - Register DRM device
776  * @dev: Device to register
777  *
778  * Register the DRM device @dev with the system, advertise device to user-space
779  * and start normal device operation. @dev must be allocated via drm_dev_alloc()
780  * previously.
781  *
782  * Never call this twice on any device!
783  *
784  * RETURNS:
785  * 0 on success, negative error code on failure.
786  */
drm_dev_register(struct drm_device * dev,unsigned long flags)787 int drm_dev_register(struct drm_device *dev, unsigned long flags)
788 {
789 	int ret;
790 
791 #ifndef __NetBSD__
792 	mutex_lock(&drm_global_mutex);
793 #endif
794 
795 	ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
796 	if (ret)
797 		goto err_minors;
798 
799 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
800 	if (ret)
801 		goto err_minors;
802 
803 	ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
804 	if (ret)
805 		goto err_minors;
806 
807 	if (dev->driver->load) {
808 		ret = dev->driver->load(dev, flags);
809 		if (ret)
810 			goto err_minors;
811 	}
812 
813 	/* setup grouping for legacy outputs */
814 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
815 		ret = drm_mode_group_init_legacy_group(dev,
816 				&dev->primary->mode_group);
817 		if (ret)
818 			goto err_unload;
819 	}
820 
821 	ret = 0;
822 	goto out_unlock;
823 
824 err_unload:
825 	if (dev->driver->unload)
826 		dev->driver->unload(dev);
827 err_minors:
828 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
829 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
830 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
831 out_unlock:
832 #ifndef __NetBSD__
833 	mutex_unlock(&drm_global_mutex);
834 #endif
835 	return ret;
836 }
837 EXPORT_SYMBOL(drm_dev_register);
838 
839 /**
840  * drm_dev_unregister - Unregister DRM device
841  * @dev: Device to unregister
842  *
843  * Unregister the DRM device from the system. This does the reverse of
844  * drm_dev_register() but does not deallocate the device. The caller must call
845  * drm_dev_unref() to drop their final reference.
846  */
drm_dev_unregister(struct drm_device * dev)847 void drm_dev_unregister(struct drm_device *dev)
848 {
849 	struct drm_map_list *r_list, *list_temp;
850 
851 	drm_lastclose(dev);
852 
853 	if (dev->driver->unload)
854 		dev->driver->unload(dev);
855 
856 #ifndef __NetBSD__		/* Moved to drm_pci.  */
857 	if (dev->agp)
858 		drm_pci_agp_destroy(dev);
859 #endif
860 
861 	drm_vblank_cleanup(dev);
862 
863 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
864 		drm_rmmap(dev, r_list->map);
865 
866 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
867 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
868 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
869 }
870 EXPORT_SYMBOL(drm_dev_unregister);
871