xref: /openbsd/sys/dev/pci/drm/drm_drv.c (revision 3836e7c7)
1 /*
2  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3  *
4  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5  * All Rights Reserved.
6  *
7  * Author Rickard E. (Rik) Faith <faith@valinux.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/fcntl.h>
31 #include <sys/specdev.h>
32 #include <sys/vnode.h>
33 
34 #include <machine/bus.h>
35 
36 #ifdef __HAVE_ACPI
37 #include <dev/acpi/acpidev.h>
38 #include <dev/acpi/acpivar.h>
39 #include <dev/acpi/dsdt.h>
40 #endif
41 
42 #include <linux/debugfs.h>
43 #include <linux/fs.h>
44 #include <linux/module.h>
45 #include <linux/moduleparam.h>
46 #include <linux/mount.h>
47 #include <linux/pseudo_fs.h>
48 #include <linux/slab.h>
49 #include <linux/srcu.h>
50 #include <linux/xarray.h>
51 #include <linux/suspend.h>
52 
53 #include <drm/drm_accel.h>
54 #include <drm/drm_cache.h>
55 #include <drm/drm_client.h>
56 #include <drm/drm_color_mgmt.h>
57 #include <drm/drm_drv.h>
58 #include <drm/drm_file.h>
59 #include <drm/drm_managed.h>
60 #include <drm/drm_mode_object.h>
61 #include <drm/drm_print.h>
62 #include <drm/drm_privacy_screen_machine.h>
63 
64 #include <drm/drm_gem.h>
65 
66 #include "drm_crtc_internal.h"
67 #include "drm_internal.h"
68 #include "drm_legacy.h"
69 
70 MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
71 MODULE_DESCRIPTION("DRM shared core routines");
72 MODULE_LICENSE("GPL and additional rights");
73 
74 DEFINE_XARRAY_ALLOC(drm_minors_xa);
75 
76 /*
77  * If the drm core fails to init for whatever reason,
78  * we should prevent any drivers from registering with it.
79  * It's best to check this at drm_dev_init(), as some drivers
80  * prefer to embed struct drm_device into their own device
81  * structure and call drm_dev_init() themselves.
82  */
83 static bool drm_core_init_complete;
84 
85 static struct dentry *drm_debugfs_root;
86 
87 #ifdef notyet
88 DEFINE_STATIC_SRCU(drm_unplug_srcu);
89 #endif
90 
91 /*
92  * Some functions are only called once on init regardless of how many times
93  * drm attaches.  In linux this is handled via module_init()/module_exit()
94  */
95 int drm_refcnt;
96 
97 struct drm_softc {
98 	struct device		sc_dev;
99 	struct drm_device 	*sc_drm;
100 	int			sc_allocated;
101 };
102 
103 struct drm_attach_args {
104 	struct drm_device		*drm;
105 	const struct drm_driver		*driver;
106 	char				*busid;
107 	bus_dma_tag_t			 dmat;
108 	bus_space_tag_t			 bst;
109 	size_t				 busid_len;
110 	int				 is_agp;
111 	struct pci_attach_args		*pa;
112 	int				 primary;
113 };
114 
115 void	drm_linux_init(void);
116 void	drm_linux_exit(void);
117 int	drm_linux_acpi_notify(struct aml_node *, int, void *);
118 
119 int	drm_dequeue_event(struct drm_device *, struct drm_file *, size_t,
120 	    struct drm_pending_event **);
121 
122 int	drmprint(void *, const char *);
123 int	drmsubmatch(struct device *, void *, void *);
124 const struct pci_device_id *
125 	drm_find_description(int, int, const struct pci_device_id *);
126 
127 int	drm_file_cmp(struct drm_file *, struct drm_file *);
128 SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp);
129 
130 #define DRMDEVCF_PRIMARY	0
131 #define drmdevcf_primary	cf_loc[DRMDEVCF_PRIMARY]	/* spec'd as primary? */
132 #define DRMDEVCF_PRIMARY_UNK	-1
133 
134 /*
135  * DRM Minors
136  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
137  * of them is represented by a drm_minor object. Depending on the capabilities
138  * of the device-driver, different interfaces are registered.
139  *
140  * Minors can be accessed via dev->$minor_name. This pointer is either
141  * NULL or a valid drm_minor pointer and stays valid as long as the device is
142  * valid. This means, DRM minors have the same life-time as the underlying
143  * device. However, this doesn't mean that the minor is active. Minors are
144  * registered and unregistered dynamically according to device-state.
145  */
146 
drm_minor_get_xa(enum drm_minor_type type)147 static struct xarray *drm_minor_get_xa(enum drm_minor_type type)
148 {
149 	if (type == DRM_MINOR_PRIMARY || type == DRM_MINOR_RENDER)
150 		return &drm_minors_xa;
151 #if IS_ENABLED(CONFIG_DRM_ACCEL)
152 	else if (type == DRM_MINOR_ACCEL)
153 		return &accel_minors_xa;
154 #endif
155 	else
156 		return ERR_PTR(-EOPNOTSUPP);
157 }
158 
drm_minor_get_slot(struct drm_device * dev,enum drm_minor_type type)159 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
160 					     enum drm_minor_type type)
161 {
162 	switch (type) {
163 	case DRM_MINOR_PRIMARY:
164 		return &dev->primary;
165 	case DRM_MINOR_RENDER:
166 		return &dev->render;
167 	case DRM_MINOR_ACCEL:
168 		return &dev->accel;
169 	default:
170 		BUG();
171 	}
172 }
173 
drm_minor_alloc_release(struct drm_device * dev,void * data)174 static void drm_minor_alloc_release(struct drm_device *dev, void *data)
175 {
176 	struct drm_minor *minor = data;
177 
178 	WARN_ON(dev != minor->dev);
179 
180 #ifdef __linux__
181 	put_device(minor->kdev);
182 #endif
183 
184 	xa_erase(drm_minor_get_xa(minor->type), minor->index);
185 }
186 
187 /*
188  * DRM used to support 64 devices, for backwards compatibility we need to maintain the
189  * minor allocation scheme where minors 0-63 are primary nodes, 64-127 are control nodes,
190  * and 128-191 are render nodes.
191  * After reaching the limit, we're allocating minors dynamically - first-come, first-serve.
192  * Accel nodes are using a distinct major, so the minors are allocated in continuous 0-MAX
193  * range.
194  */
195 #define DRM_MINOR_LIMIT(t) ({ \
196 	typeof(t) _t = (t); \
197 	_t == DRM_MINOR_ACCEL ? XA_LIMIT(0, ACCEL_MAX_MINORS) : XA_LIMIT(64 * _t, 64 * _t + 63); \
198 })
199 #define DRM_EXTENDED_MINOR_LIMIT XA_LIMIT(192, (1 << MINORBITS) - 1)
200 
drm_minor_alloc(struct drm_device * dev,enum drm_minor_type type)201 static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)
202 {
203 	struct drm_minor *minor;
204 	int r;
205 
206 	minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL);
207 	if (!minor)
208 		return -ENOMEM;
209 
210 	minor->type = type;
211 	minor->dev = dev;
212 
213 	r = xa_alloc(drm_minor_get_xa(type), &minor->index,
214 		     NULL, DRM_MINOR_LIMIT(type), GFP_KERNEL);
215 	if (r == -EBUSY && (type == DRM_MINOR_PRIMARY || type == DRM_MINOR_RENDER))
216 		r = xa_alloc(&drm_minors_xa, &minor->index,
217 			     NULL, DRM_EXTENDED_MINOR_LIMIT, GFP_KERNEL);
218 	if (r < 0)
219 		return r;
220 
221 	r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);
222 	if (r)
223 		return r;
224 
225 #ifdef __linux__
226 	minor->kdev = drm_sysfs_minor_alloc(minor);
227 	if (IS_ERR(minor->kdev))
228 		return PTR_ERR(minor->kdev);
229 #endif
230 
231 	*drm_minor_get_slot(dev, type) = minor;
232 	return 0;
233 }
234 
drm_minor_register(struct drm_device * dev,enum drm_minor_type type)235 static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
236 {
237 	struct drm_minor *minor;
238 	void *entry;
239 	int ret;
240 
241 	DRM_DEBUG("\n");
242 
243 	minor = *drm_minor_get_slot(dev, type);
244 	if (!minor)
245 		return 0;
246 
247 #ifdef __linux__
248 	if (minor->type == DRM_MINOR_ACCEL) {
249 		accel_debugfs_init(minor, minor->index);
250 	} else {
251 		ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
252 		if (ret) {
253 			DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
254 			goto err_debugfs;
255 		}
256 	}
257 
258 	ret = device_add(minor->kdev);
259 	if (ret)
260 		goto err_debugfs;
261 #else
262 	drm_debugfs_root = NULL;
263 #endif
264 
265 	/* replace NULL with @minor so lookups will succeed from now on */
266 	entry = xa_store(drm_minor_get_xa(type), minor->index, minor, GFP_KERNEL);
267 	if (xa_is_err(entry)) {
268 		ret = xa_err(entry);
269 		goto err_debugfs;
270 	}
271 	WARN_ON(entry);
272 
273 	DRM_DEBUG("new minor registered %d\n", minor->index);
274 	return 0;
275 
276 err_debugfs:
277 #ifdef __linux__
278 	drm_debugfs_cleanup(minor);
279 #endif
280 	return ret;
281 }
282 
drm_minor_unregister(struct drm_device * dev,enum drm_minor_type type)283 static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type type)
284 {
285 	struct drm_minor *minor;
286 
287 	minor = *drm_minor_get_slot(dev, type);
288 #ifdef __linux__
289 	if (!minor || !device_is_registered(minor->kdev))
290 #else
291 	if (!minor)
292 #endif
293 		return;
294 
295 	/* replace @minor with NULL so lookups will fail from now on */
296 	xa_store(drm_minor_get_xa(type), minor->index, NULL, GFP_KERNEL);
297 
298 #ifdef __linux__
299 	device_del(minor->kdev);
300 #endif
301 	dev_set_drvdata(minor->kdev, NULL); /* safety belt */
302 	drm_debugfs_cleanup(minor);
303 }
304 
305 /*
306  * Looks up the given minor-ID and returns the respective DRM-minor object. The
307  * refence-count of the underlying device is increased so you must release this
308  * object with drm_minor_release().
309  *
310  * As long as you hold this minor, it is guaranteed that the object and the
311  * minor->dev pointer will stay valid! However, the device may get unplugged and
312  * unregistered while you hold the minor.
313  */
drm_minor_acquire(struct xarray * minor_xa,unsigned int minor_id)314 struct drm_minor *drm_minor_acquire(struct xarray *minor_xa, unsigned int minor_id)
315 {
316 	struct drm_minor *minor;
317 
318 	xa_lock(minor_xa);
319 	minor = xa_load(minor_xa, minor_id);
320 	if (minor)
321 		drm_dev_get(minor->dev);
322 	xa_unlock(minor_xa);
323 
324 	if (!minor) {
325 		return ERR_PTR(-ENODEV);
326 	} else if (drm_dev_is_unplugged(minor->dev)) {
327 		drm_dev_put(minor->dev);
328 		return ERR_PTR(-ENODEV);
329 	}
330 
331 	return minor;
332 }
333 
drm_minor_release(struct drm_minor * minor)334 void drm_minor_release(struct drm_minor *minor)
335 {
336 	drm_dev_put(minor->dev);
337 }
338 
339 /**
340  * DOC: driver instance overview
341  *
342  * A device instance for a drm driver is represented by &struct drm_device. This
343  * is allocated and initialized with devm_drm_dev_alloc(), usually from
344  * bus-specific ->probe() callbacks implemented by the driver. The driver then
345  * needs to initialize all the various subsystems for the drm device like memory
346  * management, vblank handling, modesetting support and initial output
347  * configuration plus obviously initialize all the corresponding hardware bits.
348  * Finally when everything is up and running and ready for userspace the device
349  * instance can be published using drm_dev_register().
350  *
351  * There is also deprecated support for initializing device instances using
352  * bus-specific helpers and the &drm_driver.load callback. But due to
353  * backwards-compatibility needs the device instance have to be published too
354  * early, which requires unpretty global locking to make safe and is therefore
355  * only support for existing drivers not yet converted to the new scheme.
356  *
357  * When cleaning up a device instance everything needs to be done in reverse:
358  * First unpublish the device instance with drm_dev_unregister(). Then clean up
359  * any other resources allocated at device initialization and drop the driver's
360  * reference to &drm_device using drm_dev_put().
361  *
362  * Note that any allocation or resource which is visible to userspace must be
363  * released only when the final drm_dev_put() is called, and not when the
364  * driver is unbound from the underlying physical struct &device. Best to use
365  * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and
366  * related functions.
367  *
368  * devres managed resources like devm_kmalloc() can only be used for resources
369  * directly related to the underlying hardware device, and only used in code
370  * paths fully protected by drm_dev_enter() and drm_dev_exit().
371  *
372  * Display driver example
373  * ~~~~~~~~~~~~~~~~~~~~~~
374  *
375  * The following example shows a typical structure of a DRM display driver.
376  * The example focus on the probe() function and the other functions that is
377  * almost always present and serves as a demonstration of devm_drm_dev_alloc().
378  *
379  * .. code-block:: c
380  *
381  *	struct driver_device {
382  *		struct drm_device drm;
383  *		void *userspace_facing;
384  *		struct clk *pclk;
385  *	};
386  *
387  *	static const struct drm_driver driver_drm_driver = {
388  *		[...]
389  *	};
390  *
391  *	static int driver_probe(struct platform_device *pdev)
392  *	{
393  *		struct driver_device *priv;
394  *		struct drm_device *drm;
395  *		int ret;
396  *
397  *		priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver,
398  *					  struct driver_device, drm);
399  *		if (IS_ERR(priv))
400  *			return PTR_ERR(priv);
401  *		drm = &priv->drm;
402  *
403  *		ret = drmm_mode_config_init(drm);
404  *		if (ret)
405  *			return ret;
406  *
407  *		priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL);
408  *		if (!priv->userspace_facing)
409  *			return -ENOMEM;
410  *
411  *		priv->pclk = devm_clk_get(dev, "PCLK");
412  *		if (IS_ERR(priv->pclk))
413  *			return PTR_ERR(priv->pclk);
414  *
415  *		// Further setup, display pipeline etc
416  *
417  *		platform_set_drvdata(pdev, drm);
418  *
419  *		drm_mode_config_reset(drm);
420  *
421  *		ret = drm_dev_register(drm);
422  *		if (ret)
423  *			return ret;
424  *
425  *		drm_fbdev_generic_setup(drm, 32);
426  *
427  *		return 0;
428  *	}
429  *
430  *	// This function is called before the devm_ resources are released
431  *	static int driver_remove(struct platform_device *pdev)
432  *	{
433  *		struct drm_device *drm = platform_get_drvdata(pdev);
434  *
435  *		drm_dev_unregister(drm);
436  *		drm_atomic_helper_shutdown(drm)
437  *
438  *		return 0;
439  *	}
440  *
441  *	// This function is called on kernel restart and shutdown
442  *	static void driver_shutdown(struct platform_device *pdev)
443  *	{
444  *		drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
445  *	}
446  *
447  *	static int __maybe_unused driver_pm_suspend(struct device *dev)
448  *	{
449  *		return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
450  *	}
451  *
452  *	static int __maybe_unused driver_pm_resume(struct device *dev)
453  *	{
454  *		drm_mode_config_helper_resume(dev_get_drvdata(dev));
455  *
456  *		return 0;
457  *	}
458  *
459  *	static const struct dev_pm_ops driver_pm_ops = {
460  *		SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume)
461  *	};
462  *
463  *	static struct platform_driver driver_driver = {
464  *		.driver = {
465  *			[...]
466  *			.pm = &driver_pm_ops,
467  *		},
468  *		.probe = driver_probe,
469  *		.remove = driver_remove,
470  *		.shutdown = driver_shutdown,
471  *	};
472  *	module_platform_driver(driver_driver);
473  *
474  * Drivers that want to support device unplugging (USB, DT overlay unload) should
475  * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect
476  * regions that is accessing device resources to prevent use after they're
477  * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one
478  * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before
479  * drm_atomic_helper_shutdown() is called. This means that if the disable code
480  * paths are protected, they will not run on regular driver module unload,
481  * possibly leaving the hardware enabled.
482  */
483 
484 /**
485  * drm_put_dev - Unregister and release a DRM device
486  * @dev: DRM device
487  *
488  * Called at module unload time or when a PCI device is unplugged.
489  *
490  * Cleans up all DRM device, calling drm_lastclose().
491  *
492  * Note: Use of this function is deprecated. It will eventually go away
493  * completely.  Please use drm_dev_unregister() and drm_dev_put() explicitly
494  * instead to make sure that the device isn't userspace accessible any more
495  * while teardown is in progress, ensuring that userspace can't access an
496  * inconsistent state.
497  */
drm_put_dev(struct drm_device * dev)498 void drm_put_dev(struct drm_device *dev)
499 {
500 	DRM_DEBUG("\n");
501 
502 	if (!dev) {
503 		DRM_ERROR("cleanup called no dev\n");
504 		return;
505 	}
506 
507 	drm_dev_unregister(dev);
508 	drm_dev_put(dev);
509 }
510 EXPORT_SYMBOL(drm_put_dev);
511 
512 /**
513  * drm_dev_enter - Enter device critical section
514  * @dev: DRM device
515  * @idx: Pointer to index that will be passed to the matching drm_dev_exit()
516  *
517  * This function marks and protects the beginning of a section that should not
518  * be entered after the device has been unplugged. The section end is marked
519  * with drm_dev_exit(). Calls to this function can be nested.
520  *
521  * Returns:
522  * True if it is OK to enter the section, false otherwise.
523  */
drm_dev_enter(struct drm_device * dev,int * idx)524 bool drm_dev_enter(struct drm_device *dev, int *idx)
525 {
526 #ifdef notyet
527 	*idx = srcu_read_lock(&drm_unplug_srcu);
528 
529 	if (dev->unplugged) {
530 		srcu_read_unlock(&drm_unplug_srcu, *idx);
531 		return false;
532 	}
533 #endif
534 
535 	return true;
536 }
537 EXPORT_SYMBOL(drm_dev_enter);
538 
539 /**
540  * drm_dev_exit - Exit device critical section
541  * @idx: index returned from drm_dev_enter()
542  *
543  * This function marks the end of a section that should not be entered after
544  * the device has been unplugged.
545  */
drm_dev_exit(int idx)546 void drm_dev_exit(int idx)
547 {
548 #ifdef notyet
549 	srcu_read_unlock(&drm_unplug_srcu, idx);
550 #endif
551 }
552 EXPORT_SYMBOL(drm_dev_exit);
553 
554 /**
555  * drm_dev_unplug - unplug a DRM device
556  * @dev: DRM device
557  *
558  * This unplugs a hotpluggable DRM device, which makes it inaccessible to
559  * userspace operations. Entry-points can use drm_dev_enter() and
560  * drm_dev_exit() to protect device resources in a race free manner. This
561  * essentially unregisters the device like drm_dev_unregister(), but can be
562  * called while there are still open users of @dev.
563  */
drm_dev_unplug(struct drm_device * dev)564 void drm_dev_unplug(struct drm_device *dev)
565 {
566 	STUB();
567 #ifdef notyet
568 	/*
569 	 * After synchronizing any critical read section is guaranteed to see
570 	 * the new value of ->unplugged, and any critical section which might
571 	 * still have seen the old value of ->unplugged is guaranteed to have
572 	 * finished.
573 	 */
574 	dev->unplugged = true;
575 	synchronize_srcu(&drm_unplug_srcu);
576 
577 	drm_dev_unregister(dev);
578 
579 	/* Clear all CPU mappings pointing to this device */
580 	unmap_mapping_range(dev->anon_inode->i_mapping, 0, 0, 1);
581 #endif
582 }
583 EXPORT_SYMBOL(drm_dev_unplug);
584 
585 #ifdef __linux__
586 /*
587  * DRM internal mount
588  * We want to be able to allocate our own "struct address_space" to control
589  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
590  * stand-alone address_space objects, so we need an underlying inode. As there
591  * is no way to allocate an independent inode easily, we need a fake internal
592  * VFS mount-point.
593  *
594  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
595  * frees it again. You are allowed to use iget() and iput() to get references to
596  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
597  * drm_fs_inode_free() call (which does not have to be the last iput()).
598  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
599  * between multiple inode-users. You could, technically, call
600  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
601  * iput(), but this way you'd end up with a new vfsmount for each inode.
602  */
603 
604 static int drm_fs_cnt;
605 static struct vfsmount *drm_fs_mnt;
606 
drm_fs_init_fs_context(struct fs_context * fc)607 static int drm_fs_init_fs_context(struct fs_context *fc)
608 {
609 	return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM;
610 }
611 
612 static struct file_system_type drm_fs_type = {
613 	.name		= "drm",
614 	.owner		= THIS_MODULE,
615 	.init_fs_context = drm_fs_init_fs_context,
616 	.kill_sb	= kill_anon_super,
617 };
618 
drm_fs_inode_new(void)619 static struct inode *drm_fs_inode_new(void)
620 {
621 	struct inode *inode;
622 	int r;
623 
624 	r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
625 	if (r < 0) {
626 		DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
627 		return ERR_PTR(r);
628 	}
629 
630 	inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
631 	if (IS_ERR(inode))
632 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
633 
634 	return inode;
635 }
636 
drm_fs_inode_free(struct inode * inode)637 static void drm_fs_inode_free(struct inode *inode)
638 {
639 	if (inode) {
640 		iput(inode);
641 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
642 	}
643 }
644 
645 #endif /* __linux__ */
646 
647 /**
648  * DOC: component helper usage recommendations
649  *
650  * DRM drivers that drive hardware where a logical device consists of a pile of
651  * independent hardware blocks are recommended to use the :ref:`component helper
652  * library<component>`. For consistency and better options for code reuse the
653  * following guidelines apply:
654  *
655  *  - The entire device initialization procedure should be run from the
656  *    &component_master_ops.master_bind callback, starting with
657  *    devm_drm_dev_alloc(), then binding all components with
658  *    component_bind_all() and finishing with drm_dev_register().
659  *
660  *  - The opaque pointer passed to all components through component_bind_all()
661  *    should point at &struct drm_device of the device instance, not some driver
662  *    specific private structure.
663  *
664  *  - The component helper fills the niche where further standardization of
665  *    interfaces is not practical. When there already is, or will be, a
666  *    standardized interface like &drm_bridge or &drm_panel, providing its own
667  *    functions to find such components at driver load time, like
668  *    drm_of_find_panel_or_bridge(), then the component helper should not be
669  *    used.
670  */
671 
drm_dev_init_release(struct drm_device * dev,void * res)672 static void drm_dev_init_release(struct drm_device *dev, void *res)
673 {
674 	drm_legacy_ctxbitmap_cleanup(dev);
675 	drm_legacy_remove_map_hash(dev);
676 #ifdef __linux__
677 	drm_fs_inode_free(dev->anon_inode);
678 
679 	put_device(dev->dev);
680 #endif
681 	/* Prevent use-after-free in drm_managed_release when debugging is
682 	 * enabled. Slightly awkward, but can't really be helped. */
683 	dev->dev = NULL;
684 	mutex_destroy(&dev->master_mutex);
685 	mutex_destroy(&dev->clientlist_mutex);
686 	mutex_destroy(&dev->filelist_mutex);
687 	mutex_destroy(&dev->struct_mutex);
688 	mutex_destroy(&dev->debugfs_mutex);
689 	drm_legacy_destroy_members(dev);
690 }
691 
692 #ifdef notyet
693 
drm_dev_init(struct drm_device * dev,const struct drm_driver * driver,struct device * parent)694 static int drm_dev_init(struct drm_device *dev,
695 			const struct drm_driver *driver,
696 			struct device *parent)
697 {
698 	struct inode *inode;
699 	int ret;
700 
701 	if (!drm_core_init_complete) {
702 		DRM_ERROR("DRM core is not initialized\n");
703 		return -ENODEV;
704 	}
705 
706 	if (WARN_ON(!parent))
707 		return -EINVAL;
708 
709 	kref_init(&dev->ref);
710 	dev->dev = get_device(parent);
711 	dev->driver = driver;
712 
713 	INIT_LIST_HEAD(&dev->managed.resources);
714 	spin_lock_init(&dev->managed.lock);
715 
716 	/* no per-device feature limits by default */
717 	dev->driver_features = ~0u;
718 
719 	if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL) &&
720 				(drm_core_check_feature(dev, DRIVER_RENDER) ||
721 				drm_core_check_feature(dev, DRIVER_MODESET))) {
722 		DRM_ERROR("DRM driver can't be both a compute acceleration and graphics driver\n");
723 		return -EINVAL;
724 	}
725 
726 	drm_legacy_init_members(dev);
727 	INIT_LIST_HEAD(&dev->filelist);
728 	INIT_LIST_HEAD(&dev->filelist_internal);
729 	INIT_LIST_HEAD(&dev->clientlist);
730 	INIT_LIST_HEAD(&dev->vblank_event_list);
731 	INIT_LIST_HEAD(&dev->debugfs_list);
732 
733 	spin_lock_init(&dev->event_lock);
734 	mutex_init(&dev->struct_mutex);
735 	mutex_init(&dev->filelist_mutex);
736 	mutex_init(&dev->clientlist_mutex);
737 	mutex_init(&dev->master_mutex);
738 	mutex_init(&dev->debugfs_mutex);
739 
740 	ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL);
741 	if (ret)
742 		return ret;
743 
744 	inode = drm_fs_inode_new();
745 	if (IS_ERR(inode)) {
746 		ret = PTR_ERR(inode);
747 		DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
748 		goto err;
749 	}
750 
751 	dev->anon_inode = inode;
752 
753 	if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL)) {
754 		ret = drm_minor_alloc(dev, DRM_MINOR_ACCEL);
755 		if (ret)
756 			goto err;
757 	} else {
758 		if (drm_core_check_feature(dev, DRIVER_RENDER)) {
759 			ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
760 			if (ret)
761 				goto err;
762 		}
763 
764 		ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
765 		if (ret)
766 			goto err;
767 	}
768 
769 	ret = drm_legacy_create_map_hash(dev);
770 	if (ret)
771 		goto err;
772 
773 	drm_legacy_ctxbitmap_init(dev);
774 
775 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
776 		ret = drm_gem_init(dev);
777 		if (ret) {
778 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
779 			goto err;
780 		}
781 	}
782 
783 	dev->unique = drmm_kstrdup(dev, dev_name(parent), GFP_KERNEL);
784 	if (!dev->unique) {
785 		ret = -ENOMEM;
786 		goto err;
787 	}
788 
789 	return 0;
790 
791 err:
792 	drm_managed_release(dev);
793 
794 	return ret;
795 }
796 
devm_drm_dev_init_release(void * data)797 static void devm_drm_dev_init_release(void *data)
798 {
799 	drm_dev_put(data);
800 }
801 
devm_drm_dev_init(struct device * parent,struct drm_device * dev,const struct drm_driver * driver)802 static int devm_drm_dev_init(struct device *parent,
803 			     struct drm_device *dev,
804 			     const struct drm_driver *driver)
805 {
806 	int ret;
807 
808 	ret = drm_dev_init(dev, driver, parent);
809 	if (ret)
810 		return ret;
811 
812 	return devm_add_action_or_reset(parent,
813 					devm_drm_dev_init_release, dev);
814 }
815 
816 #endif
817 
__devm_drm_dev_alloc(struct device * parent,const struct drm_driver * driver,size_t size,size_t offset)818 void *__devm_drm_dev_alloc(struct device *parent,
819 			   const struct drm_driver *driver,
820 			   size_t size, size_t offset)
821 {
822 	void *container;
823 	struct drm_device *drm;
824 #ifdef notyet
825 	int ret;
826 #endif
827 
828 	container = kzalloc(size, GFP_KERNEL);
829 	if (!container)
830 		return ERR_PTR(-ENOMEM);
831 
832 	drm = container + offset;
833 #ifdef notyet
834 	ret = devm_drm_dev_init(parent, drm, driver);
835 	if (ret) {
836 		kfree(container);
837 		return ERR_PTR(ret);
838 	}
839 	drmm_add_final_kfree(drm, container);
840 #endif
841 
842 	return container;
843 }
844 EXPORT_SYMBOL(__devm_drm_dev_alloc);
845 
846 #ifdef notyet
847 
848 /**
849  * drm_dev_alloc - Allocate new DRM device
850  * @driver: DRM driver to allocate device for
851  * @parent: Parent device object
852  *
853  * This is the deprecated version of devm_drm_dev_alloc(), which does not support
854  * subclassing through embedding the struct &drm_device in a driver private
855  * structure, and which does not support automatic cleanup through devres.
856  *
857  * RETURNS:
858  * Pointer to new DRM device, or ERR_PTR on failure.
859  */
drm_dev_alloc(const struct drm_driver * driver,struct device * parent)860 struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
861 				 struct device *parent)
862 {
863 	struct drm_device *dev;
864 	int ret;
865 
866 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
867 	if (!dev)
868 		return ERR_PTR(-ENOMEM);
869 
870 	ret = drm_dev_init(dev, driver, parent);
871 	if (ret) {
872 		kfree(dev);
873 		return ERR_PTR(ret);
874 	}
875 
876 	drmm_add_final_kfree(dev, dev);
877 
878 	return dev;
879 }
880 EXPORT_SYMBOL(drm_dev_alloc);
881 
882 #endif
883 
drm_dev_release(struct kref * ref)884 static void drm_dev_release(struct kref *ref)
885 {
886 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
887 
888 	if (dev->driver->release)
889 		dev->driver->release(dev);
890 
891 	drm_managed_release(dev);
892 
893 	kfree(dev->managed.final_kfree);
894 }
895 
896 /**
897  * drm_dev_get - Take reference of a DRM device
898  * @dev: device to take reference of or NULL
899  *
900  * This increases the ref-count of @dev by one. You *must* already own a
901  * reference when calling this. Use drm_dev_put() to drop this reference
902  * again.
903  *
904  * This function never fails. However, this function does not provide *any*
905  * guarantee whether the device is alive or running. It only provides a
906  * reference to the object and the memory associated with it.
907  */
drm_dev_get(struct drm_device * dev)908 void drm_dev_get(struct drm_device *dev)
909 {
910 	if (dev)
911 		kref_get(&dev->ref);
912 }
913 EXPORT_SYMBOL(drm_dev_get);
914 
915 /**
916  * drm_dev_put - Drop reference of a DRM device
917  * @dev: device to drop reference of or NULL
918  *
919  * This decreases the ref-count of @dev by one. The device is destroyed if the
920  * ref-count drops to zero.
921  */
drm_dev_put(struct drm_device * dev)922 void drm_dev_put(struct drm_device *dev)
923 {
924 	if (dev)
925 		kref_put(&dev->ref, drm_dev_release);
926 }
927 EXPORT_SYMBOL(drm_dev_put);
928 
create_compat_control_link(struct drm_device * dev)929 static int create_compat_control_link(struct drm_device *dev)
930 {
931 	struct drm_minor *minor;
932 	char *name;
933 	int ret;
934 
935 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
936 		return 0;
937 
938 	minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
939 	if (!minor)
940 		return 0;
941 
942 	/*
943 	 * Some existing userspace out there uses the existing of the controlD*
944 	 * sysfs files to figure out whether it's a modeset driver. It only does
945 	 * readdir, hence a symlink is sufficient (and the least confusing
946 	 * option). Otherwise controlD* is entirely unused.
947 	 *
948 	 * Old controlD chardev have been allocated in the range
949 	 * 64-127.
950 	 */
951 	name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
952 	if (!name)
953 		return -ENOMEM;
954 
955 	ret = sysfs_create_link(minor->kdev->kobj.parent,
956 				&minor->kdev->kobj,
957 				name);
958 
959 	kfree(name);
960 
961 	return ret;
962 }
963 
remove_compat_control_link(struct drm_device * dev)964 static void remove_compat_control_link(struct drm_device *dev)
965 {
966 	struct drm_minor *minor;
967 	char *name;
968 
969 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
970 		return;
971 
972 	minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
973 	if (!minor)
974 		return;
975 
976 	name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
977 	if (!name)
978 		return;
979 
980 	sysfs_remove_link(minor->kdev->kobj.parent, name);
981 
982 	kfree(name);
983 }
984 
985 /**
986  * drm_dev_register - Register DRM device
987  * @dev: Device to register
988  * @flags: Flags passed to the driver's .load() function
989  *
990  * Register the DRM device @dev with the system, advertise device to user-space
991  * and start normal device operation. @dev must be initialized via drm_dev_init()
992  * previously.
993  *
994  * Never call this twice on any device!
995  *
996  * NOTE: To ensure backward compatibility with existing drivers method this
997  * function calls the &drm_driver.load method after registering the device
998  * nodes, creating race conditions. Usage of the &drm_driver.load methods is
999  * therefore deprecated, drivers must perform all initialization before calling
1000  * drm_dev_register().
1001  *
1002  * RETURNS:
1003  * 0 on success, negative error code on failure.
1004  */
drm_dev_register(struct drm_device * dev,unsigned long flags)1005 int drm_dev_register(struct drm_device *dev, unsigned long flags)
1006 {
1007 	const struct drm_driver *driver = dev->driver;
1008 	int ret;
1009 
1010 	if (!driver->load)
1011 		drm_mode_config_validate(dev);
1012 
1013 	WARN_ON(!dev->managed.final_kfree);
1014 
1015 	if (drm_dev_needs_global_mutex(dev))
1016 		mutex_lock(&drm_global_mutex);
1017 
1018 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
1019 	if (ret)
1020 		goto err_minors;
1021 
1022 	ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);
1023 	if (ret)
1024 		goto err_minors;
1025 
1026 	ret = drm_minor_register(dev, DRM_MINOR_ACCEL);
1027 	if (ret)
1028 		goto err_minors;
1029 
1030 	ret = create_compat_control_link(dev);
1031 	if (ret)
1032 		goto err_minors;
1033 
1034 	dev->registered = true;
1035 
1036 	if (driver->load) {
1037 		ret = driver->load(dev, flags);
1038 		if (ret)
1039 			goto err_minors;
1040 	}
1041 
1042 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1043 		ret = drm_modeset_register_all(dev);
1044 		if (ret)
1045 			goto err_unload;
1046 	}
1047 
1048 	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
1049 		 driver->name, driver->major, driver->minor,
1050 		 driver->patchlevel, driver->date,
1051 		 dev->dev ? dev_name(dev->dev) : "virtual device",
1052 		 dev->primary ? dev->primary->index : dev->accel->index);
1053 
1054 	goto out_unlock;
1055 
1056 err_unload:
1057 	if (dev->driver->unload)
1058 		dev->driver->unload(dev);
1059 err_minors:
1060 	remove_compat_control_link(dev);
1061 	drm_minor_unregister(dev, DRM_MINOR_ACCEL);
1062 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
1063 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
1064 out_unlock:
1065 	if (drm_dev_needs_global_mutex(dev))
1066 		mutex_unlock(&drm_global_mutex);
1067 	return ret;
1068 }
1069 EXPORT_SYMBOL(drm_dev_register);
1070 
1071 /**
1072  * drm_dev_unregister - Unregister DRM device
1073  * @dev: Device to unregister
1074  *
1075  * Unregister the DRM device from the system. This does the reverse of
1076  * drm_dev_register() but does not deallocate the device. The caller must call
1077  * drm_dev_put() to drop their final reference, unless it is managed with devres
1078  * (as devices allocated with devm_drm_dev_alloc() are), in which case there is
1079  * already an unwind action registered.
1080  *
1081  * A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
1082  * which can be called while there are still open users of @dev.
1083  *
1084  * This should be called first in the device teardown code to make sure
1085  * userspace can't access the device instance any more.
1086  */
drm_dev_unregister(struct drm_device * dev)1087 void drm_dev_unregister(struct drm_device *dev)
1088 {
1089 	if (drm_core_check_feature(dev, DRIVER_LEGACY))
1090 		drm_lastclose(dev);
1091 
1092 	dev->registered = false;
1093 
1094 	drm_client_dev_unregister(dev);
1095 
1096 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1097 		drm_modeset_unregister_all(dev);
1098 
1099 	if (dev->driver->unload)
1100 		dev->driver->unload(dev);
1101 
1102 	drm_legacy_pci_agp_destroy(dev);
1103 	drm_legacy_rmmaps(dev);
1104 
1105 	remove_compat_control_link(dev);
1106 	drm_minor_unregister(dev, DRM_MINOR_ACCEL);
1107 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
1108 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
1109 }
1110 EXPORT_SYMBOL(drm_dev_unregister);
1111 
1112 /*
1113  * DRM Core
1114  * The DRM core module initializes all global DRM objects and makes them
1115  * available to drivers. Once setup, drivers can probe their respective
1116  * devices.
1117  * Currently, core management includes:
1118  *  - The "DRM-Global" key/value database
1119  *  - Global ID management for connectors
1120  *  - DRM major number allocation
1121  *  - DRM minor management
1122  *  - DRM sysfs class
1123  *  - DRM debugfs root
1124  *
1125  * Furthermore, the DRM core provides dynamic char-dev lookups. For each
1126  * interface registered on a DRM device, you can request minor numbers from DRM
1127  * core. DRM core takes care of major-number management and char-dev
1128  * registration. A stub ->open() callback forwards any open() requests to the
1129  * registered minor.
1130  */
1131 
1132 #ifdef __linux__
drm_stub_open(struct inode * inode,struct file * filp)1133 static int drm_stub_open(struct inode *inode, struct file *filp)
1134 {
1135 	const struct file_operations *new_fops;
1136 	struct drm_minor *minor;
1137 	int err;
1138 
1139 	DRM_DEBUG("\n");
1140 
1141 	minor = drm_minor_acquire(&drm_minors_xa, iminor(inode));
1142 	if (IS_ERR(minor))
1143 		return PTR_ERR(minor);
1144 
1145 	new_fops = fops_get(minor->dev->driver->fops);
1146 	if (!new_fops) {
1147 		err = -ENODEV;
1148 		goto out;
1149 	}
1150 
1151 	replace_fops(filp, new_fops);
1152 	if (filp->f_op->open)
1153 		err = filp->f_op->open(inode, filp);
1154 	else
1155 		err = 0;
1156 
1157 out:
1158 	drm_minor_release(minor);
1159 
1160 	return err;
1161 }
1162 
1163 static const struct file_operations drm_stub_fops = {
1164 	.owner = THIS_MODULE,
1165 	.open = drm_stub_open,
1166 	.llseek = noop_llseek,
1167 };
1168 #endif /* __linux__ */
1169 
drm_core_exit(void)1170 static void drm_core_exit(void)
1171 {
1172 	drm_privacy_screen_lookup_exit();
1173 	accel_core_exit();
1174 #ifdef __linux__
1175 	unregister_chrdev(DRM_MAJOR, "drm");
1176 	debugfs_remove(drm_debugfs_root);
1177 	drm_sysfs_destroy();
1178 #endif
1179 	WARN_ON(!xa_empty(&drm_minors_xa));
1180 	drm_connector_ida_destroy();
1181 }
1182 
drm_core_init(void)1183 static int __init drm_core_init(void)
1184 {
1185 #ifdef __linux__
1186 	int ret;
1187 #endif
1188 
1189 	drm_connector_ida_init();
1190 	drm_memcpy_init_early();
1191 
1192 #ifdef __linux__
1193 	ret = drm_sysfs_init();
1194 	if (ret < 0) {
1195 		DRM_ERROR("Cannot create DRM class: %d\n", ret);
1196 		goto error;
1197 	}
1198 
1199 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
1200 
1201 	ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
1202 	if (ret < 0)
1203 		goto error;
1204 
1205 	ret = accel_core_init();
1206 	if (ret < 0)
1207 		goto error;
1208 #endif
1209 
1210 	drm_privacy_screen_lookup_init();
1211 
1212 	drm_core_init_complete = true;
1213 
1214 	DRM_DEBUG("Initialized\n");
1215 	return 0;
1216 #ifdef __linux__
1217 error:
1218 	drm_core_exit();
1219 	return ret;
1220 #endif
1221 }
1222 
1223 #ifdef __linux__
1224 module_init(drm_core_init);
1225 module_exit(drm_core_exit);
1226 #endif
1227 
1228 void
drm_attach_platform(struct drm_driver * driver,bus_space_tag_t iot,bus_dma_tag_t dmat,struct device * dev,struct drm_device * drm)1229 drm_attach_platform(struct drm_driver *driver, bus_space_tag_t iot,
1230     bus_dma_tag_t dmat, struct device *dev, struct drm_device *drm)
1231 {
1232 	struct drm_attach_args arg;
1233 
1234 	memset(&arg, 0, sizeof(arg));
1235 	arg.driver = driver;
1236 	arg.bst = iot;
1237 	arg.dmat = dmat;
1238 	arg.drm = drm;
1239 
1240 	arg.busid = dev->dv_xname;
1241 	arg.busid_len = strlen(dev->dv_xname) + 1;
1242 	config_found_sm(dev, &arg, drmprint, drmsubmatch);
1243 }
1244 
1245 struct drm_device *
drm_attach_pci(const struct drm_driver * driver,struct pci_attach_args * pa,int is_agp,int primary,struct device * dev,struct drm_device * drm)1246 drm_attach_pci(const struct drm_driver *driver, struct pci_attach_args *pa,
1247     int is_agp, int primary, struct device *dev, struct drm_device *drm)
1248 {
1249 	struct drm_attach_args arg;
1250 	struct drm_softc *sc;
1251 
1252 	arg.drm = drm;
1253 	arg.driver = driver;
1254 	arg.dmat = pa->pa_dmat;
1255 	arg.bst = pa->pa_memt;
1256 	arg.is_agp = is_agp;
1257 	arg.primary = primary;
1258 	arg.pa = pa;
1259 
1260 	arg.busid_len = 20;
1261 	arg.busid = malloc(arg.busid_len + 1, M_DRM, M_NOWAIT);
1262 	if (arg.busid == NULL) {
1263 		printf("%s: no memory for drm\n", dev->dv_xname);
1264 		return (NULL);
1265 	}
1266 	snprintf(arg.busid, arg.busid_len, "pci:%04x:%02x:%02x.%1x",
1267 	    pa->pa_domain, pa->pa_bus, pa->pa_device, pa->pa_function);
1268 
1269 	sc = (struct drm_softc *)config_found_sm(dev, &arg, drmprint, drmsubmatch);
1270 	if (sc == NULL)
1271 		return NULL;
1272 
1273 	return sc->sc_drm;
1274 }
1275 
1276 int
drmprint(void * aux,const char * pnp)1277 drmprint(void *aux, const char *pnp)
1278 {
1279 	if (pnp != NULL)
1280 		printf("drm at %s", pnp);
1281 	return (UNCONF);
1282 }
1283 
1284 int
drmsubmatch(struct device * parent,void * match,void * aux)1285 drmsubmatch(struct device *parent, void *match, void *aux)
1286 {
1287 	extern struct cfdriver drm_cd;
1288 	struct cfdata *cf = match;
1289 
1290 	/* only allow drm to attach */
1291 	if (cf->cf_driver == &drm_cd)
1292 		return ((*cf->cf_attach->ca_match)(parent, match, aux));
1293 	return (0);
1294 }
1295 
1296 int
drm_pciprobe(struct pci_attach_args * pa,const struct pci_device_id * idlist)1297 drm_pciprobe(struct pci_attach_args *pa, const struct pci_device_id *idlist)
1298 {
1299 	const struct pci_device_id *id_entry;
1300 
1301 	id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
1302 	    PCI_PRODUCT(pa->pa_id), idlist);
1303 	if (id_entry != NULL)
1304 		return 1;
1305 
1306 	return 0;
1307 }
1308 
1309 int
drm_probe(struct device * parent,void * match,void * aux)1310 drm_probe(struct device *parent, void *match, void *aux)
1311 {
1312 	struct cfdata *cf = match;
1313 	struct drm_attach_args *da = aux;
1314 
1315 	if (cf->drmdevcf_primary != DRMDEVCF_PRIMARY_UNK) {
1316 		/*
1317 		 * If primary-ness of device specified, either match
1318 		 * exactly (at high priority), or fail.
1319 		 */
1320 		if (cf->drmdevcf_primary != 0 && da->primary != 0)
1321 			return (10);
1322 		else
1323 			return (0);
1324 	}
1325 
1326 	/* If primary-ness unspecified, it wins. */
1327 	return (1);
1328 }
1329 
1330 int drm_buddy_module_init(void);
1331 void drm_buddy_module_exit(void);
1332 
1333 void
drm_attach(struct device * parent,struct device * self,void * aux)1334 drm_attach(struct device *parent, struct device *self, void *aux)
1335 {
1336 	struct drm_softc *sc = (struct drm_softc *)self;
1337 	struct drm_attach_args *da = aux;
1338 	struct drm_device *dev = da->drm;
1339 	int ret;
1340 
1341 	if (drm_refcnt == 0) {
1342 		drm_linux_init();
1343 		drm_core_init();
1344 		drm_buddy_module_init();
1345 	}
1346 	drm_refcnt++;
1347 
1348 	if (dev == NULL) {
1349 		dev = malloc(sizeof(struct drm_device), M_DRM,
1350 		    M_WAITOK | M_ZERO);
1351 		sc->sc_allocated = 1;
1352 	}
1353 
1354 	sc->sc_drm = dev;
1355 
1356 	kref_init(&dev->ref);
1357 	dev->dev = self;
1358 	dev->dev_private = parent;
1359 	dev->driver = da->driver;
1360 
1361 	INIT_LIST_HEAD(&dev->managed.resources);
1362 	mtx_init(&dev->managed.lock, IPL_TTY);
1363 
1364 	/* no per-device feature limits by default */
1365 	dev->driver_features = ~0u;
1366 
1367 	dev->dmat = da->dmat;
1368 	dev->bst = da->bst;
1369 	dev->unique = da->busid;
1370 
1371 	if (da->pa) {
1372 		struct pci_attach_args *pa = da->pa;
1373 		pcireg_t subsys;
1374 
1375 		subsys = pci_conf_read(pa->pa_pc, pa->pa_tag,
1376 		    PCI_SUBSYS_ID_REG);
1377 
1378 		dev->pdev = &dev->_pdev;
1379 		dev->pdev->vendor = PCI_VENDOR(pa->pa_id);
1380 		dev->pdev->device = PCI_PRODUCT(pa->pa_id);
1381 		dev->pdev->subsystem_vendor = PCI_VENDOR(subsys);
1382 		dev->pdev->subsystem_device = PCI_PRODUCT(subsys);
1383 		dev->pdev->revision = PCI_REVISION(pa->pa_class);
1384 		dev->pdev->class = (PCI_CLASS(pa->pa_class) << 16) |
1385 		    (PCI_SUBCLASS(pa->pa_class) << 8) |
1386 		    PCI_INTERFACE(pa->pa_class);
1387 
1388 		dev->pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
1389 		dev->pdev->bus = &dev->pdev->_bus;
1390 		dev->pdev->bus->pc = pa->pa_pc;
1391 		dev->pdev->bus->number = pa->pa_bus;
1392 		dev->pdev->bus->domain_nr = pa->pa_domain;
1393 		dev->pdev->bus->bridgetag = pa->pa_bridgetag;
1394 
1395 		if (pa->pa_bridgetag != NULL) {
1396 			dev->pdev->bus->self = malloc(sizeof(struct pci_dev),
1397 			    M_DRM, M_WAITOK | M_ZERO);
1398 			dev->pdev->bus->self->pc = pa->pa_pc;
1399 			dev->pdev->bus->self->tag = *pa->pa_bridgetag;
1400 		}
1401 
1402 		dev->pdev->pc = pa->pa_pc;
1403 		dev->pdev->tag = pa->pa_tag;
1404 		dev->pdev->pci = (struct pci_softc *)parent->dv_parent;
1405 		dev->pdev->_dev = parent;
1406 
1407 #ifdef CONFIG_ACPI
1408 		dev->pdev->dev.node = acpi_find_pci(pa->pa_pc, pa->pa_tag);
1409 		aml_register_notify(dev->pdev->dev.node, NULL,
1410 		    drm_linux_acpi_notify, NULL, ACPIDEV_NOPOLL);
1411 #endif
1412 	}
1413 
1414 	mtx_init(&dev->quiesce_mtx, IPL_NONE);
1415 	mtx_init(&dev->event_lock, IPL_TTY);
1416 	rw_init(&dev->struct_mutex, "drmdevlk");
1417 	rw_init(&dev->filelist_mutex, "drmflist");
1418 	rw_init(&dev->clientlist_mutex, "drmclist");
1419 	rw_init(&dev->master_mutex, "drmmast");
1420 
1421 	ret = drmm_add_action(dev, drm_dev_init_release, NULL);
1422 	if (ret)
1423 		goto error;
1424 
1425 	SPLAY_INIT(&dev->files);
1426 	INIT_LIST_HEAD(&dev->filelist_internal);
1427 	INIT_LIST_HEAD(&dev->clientlist);
1428 	INIT_LIST_HEAD(&dev->vblank_event_list);
1429 
1430 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
1431 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
1432 		if (ret)
1433 			goto error;
1434 	}
1435 
1436 	ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
1437 	if (ret)
1438 		goto error;
1439 
1440 #ifdef CONFIG_DRM_LEGACY
1441 	if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
1442 #if IS_ENABLED(CONFIG_AGP)
1443 		if (da->is_agp)
1444 			dev->agp = drm_agp_init();
1445 #endif
1446 		if (dev->agp != NULL) {
1447 			if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
1448 			    dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
1449 				dev->agp->mtrr = 1;
1450 		}
1451 	}
1452 #endif
1453 
1454 	if (dev->driver->gem_size > 0) {
1455 		KASSERT(dev->driver->gem_size >= sizeof(struct drm_gem_object));
1456 		/* XXX unique name */
1457 		pool_init(&dev->objpl, dev->driver->gem_size, 0, IPL_NONE, 0,
1458 		    "drmobjpl", NULL);
1459 	}
1460 
1461 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
1462 		ret = drm_gem_init(dev);
1463 		if (ret) {
1464 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
1465 			goto error;
1466 		}
1467 	}
1468 
1469 	drmm_add_final_kfree(dev, dev);
1470 
1471 	printf("\n");
1472 	return;
1473 
1474 error:
1475 	drm_managed_release(dev);
1476 	dev->dev_private = NULL;
1477 }
1478 
1479 int
drm_detach(struct device * self,int flags)1480 drm_detach(struct device *self, int flags)
1481 {
1482 	struct drm_softc *sc = (struct drm_softc *)self;
1483 	struct drm_device *dev = sc->sc_drm;
1484 
1485 	drm_refcnt--;
1486 	if (drm_refcnt == 0) {
1487 		drm_buddy_module_exit();
1488 		drm_core_exit();
1489 		drm_linux_exit();
1490 	}
1491 
1492 	drm_lastclose(dev);
1493 
1494 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
1495 		if (dev->driver->gem_size > 0)
1496 			pool_destroy(&dev->objpl);
1497 	}
1498 
1499 #ifdef CONFIG_DRM_LEGACY
1500 	if (dev->agp && dev->agp->mtrr) {
1501 		int retcode;
1502 
1503 		retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
1504 		    dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
1505 		DRM_DEBUG("mtrr_del = %d", retcode);
1506 	}
1507 
1508 	free(dev->agp, M_DRM, 0);
1509 #endif
1510 	if (dev->pdev && dev->pdev->bus)
1511 		free(dev->pdev->bus->self, M_DRM, sizeof(struct pci_dev));
1512 
1513 	if (sc->sc_allocated)
1514 		free(dev, M_DRM, sizeof(struct drm_device));
1515 
1516 	return 0;
1517 }
1518 
1519 void
drm_quiesce(struct drm_device * dev)1520 drm_quiesce(struct drm_device *dev)
1521 {
1522 	mtx_enter(&dev->quiesce_mtx);
1523 	dev->quiesce = 1;
1524 	while (dev->quiesce_count > 0) {
1525 		msleep_nsec(&dev->quiesce_count, &dev->quiesce_mtx,
1526 		    PZERO, "drmqui", INFSLP);
1527 	}
1528 	mtx_leave(&dev->quiesce_mtx);
1529 }
1530 
1531 void
drm_wakeup(struct drm_device * dev)1532 drm_wakeup(struct drm_device *dev)
1533 {
1534 	mtx_enter(&dev->quiesce_mtx);
1535 	dev->quiesce = 0;
1536 	wakeup(&dev->quiesce);
1537 	mtx_leave(&dev->quiesce_mtx);
1538 }
1539 
1540 int
drm_activate(struct device * self,int act)1541 drm_activate(struct device *self, int act)
1542 {
1543 	struct drm_softc *sc = (struct drm_softc *)self;
1544 	struct drm_device *dev = sc->sc_drm;
1545 
1546 	switch (act) {
1547 	case DVACT_QUIESCE:
1548 #ifdef CONFIG_ACPI
1549 		if (acpi_softc) {
1550 			switch (acpi_softc->sc_state) {
1551 			case ACPI_STATE_S0:
1552 				pm_suspend_target_state = PM_SUSPEND_TO_IDLE;
1553 				break;
1554 			case ACPI_STATE_S3:
1555 				pm_suspend_target_state = PM_SUSPEND_MEM;
1556 				break;
1557 			}
1558 		}
1559 #else
1560 		pm_suspend_target_state = PM_SUSPEND_TO_IDLE;
1561 #endif
1562 		drm_quiesce(dev);
1563 		break;
1564 	case DVACT_WAKEUP:
1565 		drm_wakeup(dev);
1566 		pm_suspend_target_state = PM_SUSPEND_ON;
1567 		break;
1568 	}
1569 
1570 	return (0);
1571 }
1572 
1573 const struct cfattach drm_ca = {
1574 	sizeof(struct drm_softc), drm_probe, drm_attach,
1575 	drm_detach, drm_activate
1576 };
1577 
1578 struct cfdriver drm_cd = {
1579 	NULL, "drm", DV_DULL
1580 };
1581 
1582 const struct pci_device_id *
drm_find_description(int vendor,int device,const struct pci_device_id * idlist)1583 drm_find_description(int vendor, int device, const struct pci_device_id *idlist)
1584 {
1585 	int i = 0;
1586 
1587 	for (i = 0; idlist[i].vendor != 0; i++) {
1588 		if ((idlist[i].vendor == vendor) &&
1589 		    (idlist[i].device == device ||
1590 		     idlist[i].device == PCI_ANY_ID) &&
1591 		    (idlist[i].subvendor == PCI_ANY_ID) &&
1592 		    (idlist[i].subdevice == PCI_ANY_ID))
1593 			return &idlist[i];
1594 	}
1595 	return NULL;
1596 }
1597 
1598 int
drm_file_cmp(struct drm_file * f1,struct drm_file * f2)1599 drm_file_cmp(struct drm_file *f1, struct drm_file *f2)
1600 {
1601 	return (f1->fminor < f2->fminor ? -1 : f1->fminor > f2->fminor);
1602 }
1603 
1604 SPLAY_GENERATE(drm_file_tree, drm_file, link, drm_file_cmp);
1605 
1606 struct drm_file *
drm_find_file_by_minor(struct drm_device * dev,int minor)1607 drm_find_file_by_minor(struct drm_device *dev, int minor)
1608 {
1609 	struct drm_file	key;
1610 
1611 	key.fminor = minor;
1612 	return (SPLAY_FIND(drm_file_tree, &dev->files, &key));
1613 }
1614 
1615 struct drm_device *
drm_get_device_from_kdev(dev_t kdev)1616 drm_get_device_from_kdev(dev_t kdev)
1617 {
1618 	int unit = minor(kdev) & ((1 << CLONE_SHIFT) - 1);
1619 	/* render */
1620 	if (unit >= 128)
1621 		unit -= 128;
1622 	struct drm_softc *sc;
1623 
1624 	if (unit < drm_cd.cd_ndevs) {
1625 		sc = (struct drm_softc *)drm_cd.cd_devs[unit];
1626 		if (sc)
1627 			return sc->sc_drm;
1628 	}
1629 
1630 	return NULL;
1631 }
1632 
1633 void
filt_drmdetach(struct knote * kn)1634 filt_drmdetach(struct knote *kn)
1635 {
1636 	struct drm_device *dev = kn->kn_hook;
1637 	int s;
1638 
1639 	s = spltty();
1640 	klist_remove_locked(&dev->note, kn);
1641 	splx(s);
1642 }
1643 
1644 int
filt_drmkms(struct knote * kn,long hint)1645 filt_drmkms(struct knote *kn, long hint)
1646 {
1647 	if (kn->kn_sfflags & hint)
1648 		kn->kn_fflags |= hint;
1649 	return (kn->kn_fflags != 0);
1650 }
1651 
1652 void
filt_drmreaddetach(struct knote * kn)1653 filt_drmreaddetach(struct knote *kn)
1654 {
1655 	struct drm_file		*file_priv = kn->kn_hook;
1656 	int s;
1657 
1658 	s = spltty();
1659 	klist_remove_locked(&file_priv->rsel.si_note, kn);
1660 	splx(s);
1661 }
1662 
1663 int
filt_drmread(struct knote * kn,long hint)1664 filt_drmread(struct knote *kn, long hint)
1665 {
1666 	struct drm_file		*file_priv = kn->kn_hook;
1667 	int			 val = 0;
1668 
1669 	if ((hint & NOTE_SUBMIT) == 0)
1670 		mtx_enter(&file_priv->minor->dev->event_lock);
1671 	val = !list_empty(&file_priv->event_list);
1672 	if ((hint & NOTE_SUBMIT) == 0)
1673 		mtx_leave(&file_priv->minor->dev->event_lock);
1674 	return (val);
1675 }
1676 
1677 const struct filterops drm_filtops = {
1678 	.f_flags	= FILTEROP_ISFD,
1679 	.f_attach	= NULL,
1680 	.f_detach	= filt_drmdetach,
1681 	.f_event	= filt_drmkms,
1682 };
1683 
1684 const struct filterops drmread_filtops = {
1685 	.f_flags	= FILTEROP_ISFD,
1686 	.f_attach	= NULL,
1687 	.f_detach	= filt_drmreaddetach,
1688 	.f_event	= filt_drmread,
1689 };
1690 
1691 int
drmkqfilter(dev_t kdev,struct knote * kn)1692 drmkqfilter(dev_t kdev, struct knote *kn)
1693 {
1694 	struct drm_device	*dev = NULL;
1695 	struct drm_file		*file_priv = NULL;
1696 	int			 s;
1697 
1698 	dev = drm_get_device_from_kdev(kdev);
1699 	if (dev == NULL || dev->dev_private == NULL)
1700 		return (ENXIO);
1701 
1702 	switch (kn->kn_filter) {
1703 	case EVFILT_READ:
1704 		mutex_lock(&dev->struct_mutex);
1705 		file_priv = drm_find_file_by_minor(dev, minor(kdev));
1706 		mutex_unlock(&dev->struct_mutex);
1707 		if (file_priv == NULL)
1708 			return (ENXIO);
1709 
1710 		kn->kn_fop = &drmread_filtops;
1711 		kn->kn_hook = file_priv;
1712 
1713 		s = spltty();
1714 		klist_insert_locked(&file_priv->rsel.si_note, kn);
1715 		splx(s);
1716 		break;
1717 	case EVFILT_DEVICE:
1718 		kn->kn_fop = &drm_filtops;
1719 		kn->kn_hook = dev;
1720 
1721 		s = spltty();
1722 		klist_insert_locked(&dev->note, kn);
1723 		splx(s);
1724 		break;
1725 	default:
1726 		return (EINVAL);
1727 	}
1728 
1729 	return (0);
1730 }
1731 
1732 int
drmopen(dev_t kdev,int flags,int fmt,struct proc * p)1733 drmopen(dev_t kdev, int flags, int fmt, struct proc *p)
1734 {
1735 	struct drm_device	*dev = NULL;
1736 	struct drm_file		*file_priv;
1737 	struct drm_minor	*dm;
1738 	int			 ret = 0;
1739 	int			 dminor, realminor, minor_type;
1740 	int need_setup = 0;
1741 
1742 	dev = drm_get_device_from_kdev(kdev);
1743 	if (dev == NULL || dev->dev_private == NULL)
1744 		return (ENXIO);
1745 
1746 	DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
1747 
1748 	if (flags & O_EXCL)
1749 		return (EBUSY); /* No exclusive opens */
1750 
1751 	if (drm_dev_needs_global_mutex(dev))
1752 		mutex_lock(&drm_global_mutex);
1753 
1754 	if (!atomic_fetch_inc(&dev->open_count))
1755 		need_setup = 1;
1756 
1757 	dminor = minor(kdev);
1758 	realminor =  dminor & ((1 << CLONE_SHIFT) - 1);
1759 	if (realminor < 64)
1760 		minor_type = DRM_MINOR_PRIMARY;
1761 	else if (realminor >= 128 && realminor < 192)
1762 		minor_type = DRM_MINOR_RENDER;
1763 	else {
1764 		ret = ENXIO;
1765 		goto err;
1766 	}
1767 
1768 	dm = *drm_minor_get_slot(dev, minor_type);
1769 	if (dm == NULL) {
1770 		ret = ENXIO;
1771 		goto err;
1772 	}
1773 	dm->index = minor(kdev);
1774 
1775 	file_priv = drm_file_alloc(dm);
1776 	if (IS_ERR(file_priv)) {
1777 		ret = ENOMEM;
1778 		goto err;
1779 	}
1780 
1781 	/* first opener automatically becomes master */
1782 	if (drm_is_primary_client(file_priv)) {
1783 		ret = drm_master_open(file_priv);
1784 		if (ret != 0)
1785 			goto out_file_free;
1786 	}
1787 
1788 	file_priv->filp = (void *)file_priv;
1789 	file_priv->fminor = minor(kdev);
1790 
1791 	mutex_lock(&dev->filelist_mutex);
1792 	SPLAY_INSERT(drm_file_tree, &dev->files, file_priv);
1793 	mutex_unlock(&dev->filelist_mutex);
1794 
1795 	if (need_setup) {
1796 		ret = drm_legacy_setup(dev);
1797 		if (ret)
1798 			goto out_file_free;
1799 	}
1800 
1801 	if (drm_dev_needs_global_mutex(dev))
1802 		mutex_unlock(&drm_global_mutex);
1803 
1804 	return 0;
1805 
1806 out_file_free:
1807 	drm_file_free(file_priv);
1808 err:
1809 	atomic_dec(&dev->open_count);
1810 	if (drm_dev_needs_global_mutex(dev))
1811 		mutex_unlock(&drm_global_mutex);
1812 	return (ret);
1813 }
1814 
1815 int
drmclose(dev_t kdev,int flags,int fmt,struct proc * p)1816 drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
1817 {
1818 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
1819 	struct drm_file			*file_priv;
1820 	int				 retcode = 0;
1821 
1822 	if (dev == NULL)
1823 		return (ENXIO);
1824 
1825 	if (drm_dev_needs_global_mutex(dev))
1826 		mutex_lock(&drm_global_mutex);
1827 
1828 	DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
1829 
1830 	mutex_lock(&dev->filelist_mutex);
1831 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
1832 	if (file_priv == NULL) {
1833 		DRM_ERROR("can't find authenticator\n");
1834 		retcode = EINVAL;
1835 		mutex_unlock(&dev->filelist_mutex);
1836 		goto done;
1837 	}
1838 
1839 	SPLAY_REMOVE(drm_file_tree, &dev->files, file_priv);
1840 	mutex_unlock(&dev->filelist_mutex);
1841 	drm_file_free(file_priv);
1842 done:
1843 	if (atomic_dec_and_test(&dev->open_count))
1844 		drm_lastclose(dev);
1845 
1846 	if (drm_dev_needs_global_mutex(dev))
1847 		mutex_unlock(&drm_global_mutex);
1848 
1849 	return (retcode);
1850 }
1851 
1852 int
drmread(dev_t kdev,struct uio * uio,int ioflag)1853 drmread(dev_t kdev, struct uio *uio, int ioflag)
1854 {
1855 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
1856 	struct drm_file			*file_priv;
1857 	struct drm_pending_event	*ev;
1858 	int		 		 error = 0;
1859 
1860 	if (dev == NULL)
1861 		return (ENXIO);
1862 
1863 	mutex_lock(&dev->filelist_mutex);
1864 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
1865 	mutex_unlock(&dev->filelist_mutex);
1866 	if (file_priv == NULL)
1867 		return (ENXIO);
1868 
1869 	/*
1870 	 * The semantics are a little weird here. We will wait until we
1871 	 * have events to process, but as soon as we have events we will
1872 	 * only deliver as many as we have.
1873 	 * Note that events are atomic, if the read buffer will not fit in
1874 	 * a whole event, we won't read any of it out.
1875 	 */
1876 	mtx_enter(&dev->event_lock);
1877 	while (error == 0 && list_empty(&file_priv->event_list)) {
1878 		if (ioflag & IO_NDELAY) {
1879 			mtx_leave(&dev->event_lock);
1880 			return (EAGAIN);
1881 		}
1882 		error = msleep_nsec(&file_priv->event_wait, &dev->event_lock,
1883 		    PWAIT | PCATCH, "drmread", INFSLP);
1884 	}
1885 	if (error) {
1886 		mtx_leave(&dev->event_lock);
1887 		return (error);
1888 	}
1889 	while (drm_dequeue_event(dev, file_priv, uio->uio_resid, &ev)) {
1890 		MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
1891 		/* XXX we always destroy the event on error. */
1892 		error = uiomove(ev->event, ev->event->length, uio);
1893 		kfree(ev);
1894 		if (error)
1895 			break;
1896 		mtx_enter(&dev->event_lock);
1897 	}
1898 	MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
1899 
1900 	return (error);
1901 }
1902 
1903 /*
1904  * Deqeue an event from the file priv in question. returning 1 if an
1905  * event was found. We take the resid from the read as a parameter because
1906  * we will only dequeue and event if the read buffer has space to fit the
1907  * entire thing.
1908  *
1909  * We are called locked, but we will *unlock* the queue on return so that
1910  * we may sleep to copyout the event.
1911  */
1912 int
drm_dequeue_event(struct drm_device * dev,struct drm_file * file_priv,size_t resid,struct drm_pending_event ** out)1913 drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
1914     size_t resid, struct drm_pending_event **out)
1915 {
1916 	struct drm_pending_event *e = NULL;
1917 	int gotone = 0;
1918 
1919 	MUTEX_ASSERT_LOCKED(&dev->event_lock);
1920 
1921 	*out = NULL;
1922 	if (list_empty(&file_priv->event_list))
1923 		goto out;
1924 	e = list_first_entry(&file_priv->event_list,
1925 			     struct drm_pending_event, link);
1926 	if (e->event->length > resid)
1927 		goto out;
1928 
1929 	file_priv->event_space += e->event->length;
1930 	list_del(&e->link);
1931 	*out = e;
1932 	gotone = 1;
1933 
1934 out:
1935 	mtx_leave(&dev->event_lock);
1936 
1937 	return (gotone);
1938 }
1939 
1940 paddr_t
drmmmap(dev_t kdev,off_t offset,int prot)1941 drmmmap(dev_t kdev, off_t offset, int prot)
1942 {
1943 	return -1;
1944 }
1945 
1946 struct drm_dmamem *
drm_dmamem_alloc(bus_dma_tag_t dmat,bus_size_t size,bus_size_t alignment,int nsegments,bus_size_t maxsegsz,int mapflags,int loadflags)1947 drm_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t alignment,
1948     int nsegments, bus_size_t maxsegsz, int mapflags, int loadflags)
1949 {
1950 	struct drm_dmamem	*mem;
1951 	size_t			 strsize;
1952 	/*
1953 	 * segs is the last member of the struct since we modify the size
1954 	 * to allow extra segments if more than one are allowed.
1955 	 */
1956 	strsize = sizeof(*mem) + (sizeof(bus_dma_segment_t) * (nsegments - 1));
1957 	mem = malloc(strsize, M_DRM, M_NOWAIT | M_ZERO);
1958 	if (mem == NULL)
1959 		return (NULL);
1960 
1961 	mem->size = size;
1962 
1963 	if (bus_dmamap_create(dmat, size, nsegments, maxsegsz, 0,
1964 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mem->map) != 0)
1965 		goto strfree;
1966 
1967 	if (bus_dmamem_alloc(dmat, size, alignment, 0, mem->segs, nsegments,
1968 	    &mem->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1969 		goto destroy;
1970 
1971 	if (bus_dmamem_map(dmat, mem->segs, mem->nsegs, size,
1972 	    &mem->kva, BUS_DMA_NOWAIT | mapflags) != 0)
1973 		goto free;
1974 
1975 	if (bus_dmamap_load(dmat, mem->map, mem->kva, size,
1976 	    NULL, BUS_DMA_NOWAIT | loadflags) != 0)
1977 		goto unmap;
1978 
1979 	return (mem);
1980 
1981 unmap:
1982 	bus_dmamem_unmap(dmat, mem->kva, size);
1983 free:
1984 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1985 destroy:
1986 	bus_dmamap_destroy(dmat, mem->map);
1987 strfree:
1988 	free(mem, M_DRM, 0);
1989 
1990 	return (NULL);
1991 }
1992 
1993 void
drm_dmamem_free(bus_dma_tag_t dmat,struct drm_dmamem * mem)1994 drm_dmamem_free(bus_dma_tag_t dmat, struct drm_dmamem *mem)
1995 {
1996 	if (mem == NULL)
1997 		return;
1998 
1999 	bus_dmamap_unload(dmat, mem->map);
2000 	bus_dmamem_unmap(dmat, mem->kva, mem->size);
2001 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
2002 	bus_dmamap_destroy(dmat, mem->map);
2003 	free(mem, M_DRM, 0);
2004 }
2005 
2006 struct drm_dma_handle *
drm_pci_alloc(struct drm_device * dev,size_t size,size_t align)2007 drm_pci_alloc(struct drm_device *dev, size_t size, size_t align)
2008 {
2009 	struct drm_dma_handle *dmah;
2010 
2011 	dmah = malloc(sizeof(*dmah), M_DRM, M_WAITOK);
2012 	dmah->mem = drm_dmamem_alloc(dev->dmat, size, align, 1, size,
2013 	    BUS_DMA_NOCACHE, 0);
2014 	if (dmah->mem == NULL) {
2015 		free(dmah, M_DRM, sizeof(*dmah));
2016 		return NULL;
2017 	}
2018 	dmah->busaddr = dmah->mem->segs[0].ds_addr;
2019 	dmah->size = dmah->mem->size;
2020 	dmah->vaddr = dmah->mem->kva;
2021 	return (dmah);
2022 }
2023 
2024 void
drm_pci_free(struct drm_device * dev,struct drm_dma_handle * dmah)2025 drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah)
2026 {
2027 	if (dmah == NULL)
2028 		return;
2029 
2030 	drm_dmamem_free(dev->dmat, dmah->mem);
2031 	free(dmah, M_DRM, sizeof(*dmah));
2032 }
2033 
2034 /*
2035  * Compute order.  Can be made faster.
2036  */
2037 int
drm_order(unsigned long size)2038 drm_order(unsigned long size)
2039 {
2040 	int order;
2041 	unsigned long tmp;
2042 
2043 	for (order = 0, tmp = size; tmp >>= 1; ++order)
2044 		;
2045 
2046 	if (size & ~(1 << order))
2047 		++order;
2048 
2049 	return order;
2050 }
2051 
2052 int
drm_getpciinfo(struct drm_device * dev,void * data,struct drm_file * file_priv)2053 drm_getpciinfo(struct drm_device *dev, void *data, struct drm_file *file_priv)
2054 {
2055 	struct drm_pciinfo *info = data;
2056 
2057 	if (dev->pdev == NULL)
2058 		return -ENOTTY;
2059 
2060 	info->domain = dev->pdev->bus->domain_nr;
2061 	info->bus = dev->pdev->bus->number;
2062 	info->dev = PCI_SLOT(dev->pdev->devfn);
2063 	info->func = PCI_FUNC(dev->pdev->devfn);
2064 	info->vendor_id = dev->pdev->vendor;
2065 	info->device_id = dev->pdev->device;
2066 	info->subvendor_id = dev->pdev->subsystem_vendor;
2067 	info->subdevice_id = dev->pdev->subsystem_device;
2068 	info->revision_id = 0;
2069 
2070 	return 0;
2071 }
2072