1 /*
2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3 *
4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5 * All Rights Reserved.
6 *
7 * Author Rickard E. (Rik) Faith <faith@valinux.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/fcntl.h>
31 #include <sys/specdev.h>
32 #include <sys/vnode.h>
33
34 #include <machine/bus.h>
35
36 #ifdef __HAVE_ACPI
37 #include <dev/acpi/acpidev.h>
38 #include <dev/acpi/acpivar.h>
39 #include <dev/acpi/dsdt.h>
40 #endif
41
42 #include <linux/debugfs.h>
43 #include <linux/fs.h>
44 #include <linux/module.h>
45 #include <linux/moduleparam.h>
46 #include <linux/mount.h>
47 #include <linux/pseudo_fs.h>
48 #include <linux/slab.h>
49 #include <linux/srcu.h>
50
51 #include <drm/drm_accel.h>
52 #include <drm/drm_cache.h>
53 #include <drm/drm_client.h>
54 #include <drm/drm_color_mgmt.h>
55 #include <drm/drm_drv.h>
56 #include <drm/drm_file.h>
57 #include <drm/drm_managed.h>
58 #include <drm/drm_mode_object.h>
59 #include <drm/drm_print.h>
60 #include <drm/drm_privacy_screen_machine.h>
61
62 #include <drm/drm_gem.h>
63
64 #include "drm_crtc_internal.h"
65 #include "drm_internal.h"
66 #include "drm_legacy.h"
67
68 MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
69 MODULE_DESCRIPTION("DRM shared core routines");
70 MODULE_LICENSE("GPL and additional rights");
71
72 static DEFINE_SPINLOCK(drm_minor_lock);
73 static struct idr drm_minors_idr;
74
75 /*
76 * If the drm core fails to init for whatever reason,
77 * we should prevent any drivers from registering with it.
78 * It's best to check this at drm_dev_init(), as some drivers
79 * prefer to embed struct drm_device into their own device
80 * structure and call drm_dev_init() themselves.
81 */
82 static bool drm_core_init_complete;
83
84 static struct dentry *drm_debugfs_root;
85
86 #ifdef notyet
87 DEFINE_STATIC_SRCU(drm_unplug_srcu);
88 #endif
89
90 /*
91 * Some functions are only called once on init regardless of how many times
92 * drm attaches. In linux this is handled via module_init()/module_exit()
93 */
94 int drm_refcnt;
95
96 struct drm_softc {
97 struct device sc_dev;
98 struct drm_device *sc_drm;
99 int sc_allocated;
100 };
101
102 struct drm_attach_args {
103 struct drm_device *drm;
104 const struct drm_driver *driver;
105 char *busid;
106 bus_dma_tag_t dmat;
107 bus_space_tag_t bst;
108 size_t busid_len;
109 int is_agp;
110 struct pci_attach_args *pa;
111 int primary;
112 };
113
114 void drm_linux_init(void);
115 void drm_linux_exit(void);
116 int drm_linux_acpi_notify(struct aml_node *, int, void *);
117
118 int drm_dequeue_event(struct drm_device *, struct drm_file *, size_t,
119 struct drm_pending_event **);
120
121 int drmprint(void *, const char *);
122 int drmsubmatch(struct device *, void *, void *);
123 const struct pci_device_id *
124 drm_find_description(int, int, const struct pci_device_id *);
125
126 int drm_file_cmp(struct drm_file *, struct drm_file *);
127 SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp);
128
129 #define DRMDEVCF_PRIMARY 0
130 #define drmdevcf_primary cf_loc[DRMDEVCF_PRIMARY] /* spec'd as primary? */
131 #define DRMDEVCF_PRIMARY_UNK -1
132
133 /*
134 * DRM Minors
135 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
136 * of them is represented by a drm_minor object. Depending on the capabilities
137 * of the device-driver, different interfaces are registered.
138 *
139 * Minors can be accessed via dev->$minor_name. This pointer is either
140 * NULL or a valid drm_minor pointer and stays valid as long as the device is
141 * valid. This means, DRM minors have the same life-time as the underlying
142 * device. However, this doesn't mean that the minor is active. Minors are
143 * registered and unregistered dynamically according to device-state.
144 */
145
drm_minor_get_slot(struct drm_device * dev,enum drm_minor_type type)146 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
147 enum drm_minor_type type)
148 {
149 switch (type) {
150 case DRM_MINOR_PRIMARY:
151 return &dev->primary;
152 case DRM_MINOR_RENDER:
153 return &dev->render;
154 case DRM_MINOR_ACCEL:
155 return &dev->accel;
156 default:
157 BUG();
158 }
159 }
160
drm_minor_alloc_release(struct drm_device * dev,void * data)161 static void drm_minor_alloc_release(struct drm_device *dev, void *data)
162 {
163 struct drm_minor *minor = data;
164 unsigned long flags;
165
166 WARN_ON(dev != minor->dev);
167
168 #ifdef __linux__
169 put_device(minor->kdev);
170 #endif
171
172 if (minor->type == DRM_MINOR_ACCEL) {
173 accel_minor_remove(minor->index);
174 } else {
175 spin_lock_irqsave(&drm_minor_lock, flags);
176 idr_remove(&drm_minors_idr, minor->index);
177 spin_unlock_irqrestore(&drm_minor_lock, flags);
178 }
179 }
180
drm_minor_alloc(struct drm_device * dev,enum drm_minor_type type)181 static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)
182 {
183 struct drm_minor *minor;
184 unsigned long flags;
185 int r;
186
187 minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL);
188 if (!minor)
189 return -ENOMEM;
190
191 minor->type = type;
192 minor->dev = dev;
193
194 idr_preload(GFP_KERNEL);
195 if (type == DRM_MINOR_ACCEL) {
196 r = accel_minor_alloc();
197 } else {
198 spin_lock_irqsave(&drm_minor_lock, flags);
199 r = idr_alloc(&drm_minors_idr,
200 NULL,
201 64 * type,
202 64 * (type + 1),
203 GFP_NOWAIT);
204 spin_unlock_irqrestore(&drm_minor_lock, flags);
205 }
206 idr_preload_end();
207
208 if (r < 0)
209 return r;
210
211 minor->index = r;
212
213 r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);
214 if (r)
215 return r;
216
217 #ifdef __linux__
218 minor->kdev = drm_sysfs_minor_alloc(minor);
219 if (IS_ERR(minor->kdev))
220 return PTR_ERR(minor->kdev);
221 #endif
222
223 *drm_minor_get_slot(dev, type) = minor;
224 return 0;
225 }
226
drm_minor_register(struct drm_device * dev,enum drm_minor_type type)227 static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
228 {
229 struct drm_minor *minor;
230 unsigned long flags;
231 #ifdef __linux__
232 int ret;
233 #endif
234
235 DRM_DEBUG("\n");
236
237 minor = *drm_minor_get_slot(dev, type);
238 if (!minor)
239 return 0;
240
241 #ifdef __linux__
242 if (minor->type == DRM_MINOR_ACCEL) {
243 accel_debugfs_init(minor, minor->index);
244 } else {
245 ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
246 if (ret) {
247 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
248 goto err_debugfs;
249 }
250 }
251
252 ret = device_add(minor->kdev);
253 if (ret)
254 goto err_debugfs;
255 #else
256 drm_debugfs_root = NULL;
257 #endif
258
259 /* replace NULL with @minor so lookups will succeed from now on */
260 if (minor->type == DRM_MINOR_ACCEL) {
261 accel_minor_replace(minor, minor->index);
262 } else {
263 spin_lock_irqsave(&drm_minor_lock, flags);
264 idr_replace(&drm_minors_idr, minor, minor->index);
265 spin_unlock_irqrestore(&drm_minor_lock, flags);
266 }
267
268 DRM_DEBUG("new minor registered %d\n", minor->index);
269 return 0;
270
271 #ifdef __linux__
272 err_debugfs:
273 drm_debugfs_cleanup(minor);
274 return ret;
275 #endif
276 }
277
drm_minor_unregister(struct drm_device * dev,enum drm_minor_type type)278 static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type type)
279 {
280 struct drm_minor *minor;
281 unsigned long flags;
282
283 minor = *drm_minor_get_slot(dev, type);
284 #ifdef __linux__
285 if (!minor || !device_is_registered(minor->kdev))
286 #else
287 if (!minor)
288 #endif
289 return;
290
291 /* replace @minor with NULL so lookups will fail from now on */
292 if (minor->type == DRM_MINOR_ACCEL) {
293 accel_minor_replace(NULL, minor->index);
294 } else {
295 spin_lock_irqsave(&drm_minor_lock, flags);
296 idr_replace(&drm_minors_idr, NULL, minor->index);
297 spin_unlock_irqrestore(&drm_minor_lock, flags);
298 }
299
300 #ifdef __linux__
301 device_del(minor->kdev);
302 #endif
303 dev_set_drvdata(minor->kdev, NULL); /* safety belt */
304 drm_debugfs_cleanup(minor);
305 }
306
307 /*
308 * Looks up the given minor-ID and returns the respective DRM-minor object. The
309 * refence-count of the underlying device is increased so you must release this
310 * object with drm_minor_release().
311 *
312 * As long as you hold this minor, it is guaranteed that the object and the
313 * minor->dev pointer will stay valid! However, the device may get unplugged and
314 * unregistered while you hold the minor.
315 */
drm_minor_acquire(unsigned int minor_id)316 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
317 {
318 struct drm_minor *minor;
319 unsigned long flags;
320
321 spin_lock_irqsave(&drm_minor_lock, flags);
322 minor = idr_find(&drm_minors_idr, minor_id);
323 if (minor)
324 drm_dev_get(minor->dev);
325 spin_unlock_irqrestore(&drm_minor_lock, flags);
326
327 if (!minor) {
328 return ERR_PTR(-ENODEV);
329 } else if (drm_dev_is_unplugged(minor->dev)) {
330 drm_dev_put(minor->dev);
331 return ERR_PTR(-ENODEV);
332 }
333
334 return minor;
335 }
336
drm_minor_release(struct drm_minor * minor)337 void drm_minor_release(struct drm_minor *minor)
338 {
339 drm_dev_put(minor->dev);
340 }
341
342 /**
343 * DOC: driver instance overview
344 *
345 * A device instance for a drm driver is represented by &struct drm_device. This
346 * is allocated and initialized with devm_drm_dev_alloc(), usually from
347 * bus-specific ->probe() callbacks implemented by the driver. The driver then
348 * needs to initialize all the various subsystems for the drm device like memory
349 * management, vblank handling, modesetting support and initial output
350 * configuration plus obviously initialize all the corresponding hardware bits.
351 * Finally when everything is up and running and ready for userspace the device
352 * instance can be published using drm_dev_register().
353 *
354 * There is also deprecated support for initializing device instances using
355 * bus-specific helpers and the &drm_driver.load callback. But due to
356 * backwards-compatibility needs the device instance have to be published too
357 * early, which requires unpretty global locking to make safe and is therefore
358 * only support for existing drivers not yet converted to the new scheme.
359 *
360 * When cleaning up a device instance everything needs to be done in reverse:
361 * First unpublish the device instance with drm_dev_unregister(). Then clean up
362 * any other resources allocated at device initialization and drop the driver's
363 * reference to &drm_device using drm_dev_put().
364 *
365 * Note that any allocation or resource which is visible to userspace must be
366 * released only when the final drm_dev_put() is called, and not when the
367 * driver is unbound from the underlying physical struct &device. Best to use
368 * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and
369 * related functions.
370 *
371 * devres managed resources like devm_kmalloc() can only be used for resources
372 * directly related to the underlying hardware device, and only used in code
373 * paths fully protected by drm_dev_enter() and drm_dev_exit().
374 *
375 * Display driver example
376 * ~~~~~~~~~~~~~~~~~~~~~~
377 *
378 * The following example shows a typical structure of a DRM display driver.
379 * The example focus on the probe() function and the other functions that is
380 * almost always present and serves as a demonstration of devm_drm_dev_alloc().
381 *
382 * .. code-block:: c
383 *
384 * struct driver_device {
385 * struct drm_device drm;
386 * void *userspace_facing;
387 * struct clk *pclk;
388 * };
389 *
390 * static const struct drm_driver driver_drm_driver = {
391 * [...]
392 * };
393 *
394 * static int driver_probe(struct platform_device *pdev)
395 * {
396 * struct driver_device *priv;
397 * struct drm_device *drm;
398 * int ret;
399 *
400 * priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver,
401 * struct driver_device, drm);
402 * if (IS_ERR(priv))
403 * return PTR_ERR(priv);
404 * drm = &priv->drm;
405 *
406 * ret = drmm_mode_config_init(drm);
407 * if (ret)
408 * return ret;
409 *
410 * priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL);
411 * if (!priv->userspace_facing)
412 * return -ENOMEM;
413 *
414 * priv->pclk = devm_clk_get(dev, "PCLK");
415 * if (IS_ERR(priv->pclk))
416 * return PTR_ERR(priv->pclk);
417 *
418 * // Further setup, display pipeline etc
419 *
420 * platform_set_drvdata(pdev, drm);
421 *
422 * drm_mode_config_reset(drm);
423 *
424 * ret = drm_dev_register(drm);
425 * if (ret)
426 * return ret;
427 *
428 * drm_fbdev_generic_setup(drm, 32);
429 *
430 * return 0;
431 * }
432 *
433 * // This function is called before the devm_ resources are released
434 * static int driver_remove(struct platform_device *pdev)
435 * {
436 * struct drm_device *drm = platform_get_drvdata(pdev);
437 *
438 * drm_dev_unregister(drm);
439 * drm_atomic_helper_shutdown(drm)
440 *
441 * return 0;
442 * }
443 *
444 * // This function is called on kernel restart and shutdown
445 * static void driver_shutdown(struct platform_device *pdev)
446 * {
447 * drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
448 * }
449 *
450 * static int __maybe_unused driver_pm_suspend(struct device *dev)
451 * {
452 * return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
453 * }
454 *
455 * static int __maybe_unused driver_pm_resume(struct device *dev)
456 * {
457 * drm_mode_config_helper_resume(dev_get_drvdata(dev));
458 *
459 * return 0;
460 * }
461 *
462 * static const struct dev_pm_ops driver_pm_ops = {
463 * SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume)
464 * };
465 *
466 * static struct platform_driver driver_driver = {
467 * .driver = {
468 * [...]
469 * .pm = &driver_pm_ops,
470 * },
471 * .probe = driver_probe,
472 * .remove = driver_remove,
473 * .shutdown = driver_shutdown,
474 * };
475 * module_platform_driver(driver_driver);
476 *
477 * Drivers that want to support device unplugging (USB, DT overlay unload) should
478 * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect
479 * regions that is accessing device resources to prevent use after they're
480 * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one
481 * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before
482 * drm_atomic_helper_shutdown() is called. This means that if the disable code
483 * paths are protected, they will not run on regular driver module unload,
484 * possibly leaving the hardware enabled.
485 */
486
487 /**
488 * drm_put_dev - Unregister and release a DRM device
489 * @dev: DRM device
490 *
491 * Called at module unload time or when a PCI device is unplugged.
492 *
493 * Cleans up all DRM device, calling drm_lastclose().
494 *
495 * Note: Use of this function is deprecated. It will eventually go away
496 * completely. Please use drm_dev_unregister() and drm_dev_put() explicitly
497 * instead to make sure that the device isn't userspace accessible any more
498 * while teardown is in progress, ensuring that userspace can't access an
499 * inconsistent state.
500 */
drm_put_dev(struct drm_device * dev)501 void drm_put_dev(struct drm_device *dev)
502 {
503 DRM_DEBUG("\n");
504
505 if (!dev) {
506 DRM_ERROR("cleanup called no dev\n");
507 return;
508 }
509
510 drm_dev_unregister(dev);
511 drm_dev_put(dev);
512 }
513 EXPORT_SYMBOL(drm_put_dev);
514
515 /**
516 * drm_dev_enter - Enter device critical section
517 * @dev: DRM device
518 * @idx: Pointer to index that will be passed to the matching drm_dev_exit()
519 *
520 * This function marks and protects the beginning of a section that should not
521 * be entered after the device has been unplugged. The section end is marked
522 * with drm_dev_exit(). Calls to this function can be nested.
523 *
524 * Returns:
525 * True if it is OK to enter the section, false otherwise.
526 */
drm_dev_enter(struct drm_device * dev,int * idx)527 bool drm_dev_enter(struct drm_device *dev, int *idx)
528 {
529 #ifdef notyet
530 *idx = srcu_read_lock(&drm_unplug_srcu);
531
532 if (dev->unplugged) {
533 srcu_read_unlock(&drm_unplug_srcu, *idx);
534 return false;
535 }
536 #endif
537
538 return true;
539 }
540 EXPORT_SYMBOL(drm_dev_enter);
541
542 /**
543 * drm_dev_exit - Exit device critical section
544 * @idx: index returned from drm_dev_enter()
545 *
546 * This function marks the end of a section that should not be entered after
547 * the device has been unplugged.
548 */
drm_dev_exit(int idx)549 void drm_dev_exit(int idx)
550 {
551 #ifdef notyet
552 srcu_read_unlock(&drm_unplug_srcu, idx);
553 #endif
554 }
555 EXPORT_SYMBOL(drm_dev_exit);
556
557 /**
558 * drm_dev_unplug - unplug a DRM device
559 * @dev: DRM device
560 *
561 * This unplugs a hotpluggable DRM device, which makes it inaccessible to
562 * userspace operations. Entry-points can use drm_dev_enter() and
563 * drm_dev_exit() to protect device resources in a race free manner. This
564 * essentially unregisters the device like drm_dev_unregister(), but can be
565 * called while there are still open users of @dev.
566 */
drm_dev_unplug(struct drm_device * dev)567 void drm_dev_unplug(struct drm_device *dev)
568 {
569 STUB();
570 #ifdef notyet
571 /*
572 * After synchronizing any critical read section is guaranteed to see
573 * the new value of ->unplugged, and any critical section which might
574 * still have seen the old value of ->unplugged is guaranteed to have
575 * finished.
576 */
577 dev->unplugged = true;
578 synchronize_srcu(&drm_unplug_srcu);
579
580 drm_dev_unregister(dev);
581
582 /* Clear all CPU mappings pointing to this device */
583 unmap_mapping_range(dev->anon_inode->i_mapping, 0, 0, 1);
584 #endif
585 }
586 EXPORT_SYMBOL(drm_dev_unplug);
587
588 #ifdef __linux__
589 /*
590 * DRM internal mount
591 * We want to be able to allocate our own "struct address_space" to control
592 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
593 * stand-alone address_space objects, so we need an underlying inode. As there
594 * is no way to allocate an independent inode easily, we need a fake internal
595 * VFS mount-point.
596 *
597 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
598 * frees it again. You are allowed to use iget() and iput() to get references to
599 * the inode. But each drm_fs_inode_new() call must be paired with exactly one
600 * drm_fs_inode_free() call (which does not have to be the last iput()).
601 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
602 * between multiple inode-users. You could, technically, call
603 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
604 * iput(), but this way you'd end up with a new vfsmount for each inode.
605 */
606
607 static int drm_fs_cnt;
608 static struct vfsmount *drm_fs_mnt;
609
drm_fs_init_fs_context(struct fs_context * fc)610 static int drm_fs_init_fs_context(struct fs_context *fc)
611 {
612 return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM;
613 }
614
615 static struct file_system_type drm_fs_type = {
616 .name = "drm",
617 .owner = THIS_MODULE,
618 .init_fs_context = drm_fs_init_fs_context,
619 .kill_sb = kill_anon_super,
620 };
621
drm_fs_inode_new(void)622 static struct inode *drm_fs_inode_new(void)
623 {
624 struct inode *inode;
625 int r;
626
627 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
628 if (r < 0) {
629 DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
630 return ERR_PTR(r);
631 }
632
633 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
634 if (IS_ERR(inode))
635 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
636
637 return inode;
638 }
639
drm_fs_inode_free(struct inode * inode)640 static void drm_fs_inode_free(struct inode *inode)
641 {
642 if (inode) {
643 iput(inode);
644 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
645 }
646 }
647
648 #endif /* __linux__ */
649
650 /**
651 * DOC: component helper usage recommendations
652 *
653 * DRM drivers that drive hardware where a logical device consists of a pile of
654 * independent hardware blocks are recommended to use the :ref:`component helper
655 * library<component>`. For consistency and better options for code reuse the
656 * following guidelines apply:
657 *
658 * - The entire device initialization procedure should be run from the
659 * &component_master_ops.master_bind callback, starting with
660 * devm_drm_dev_alloc(), then binding all components with
661 * component_bind_all() and finishing with drm_dev_register().
662 *
663 * - The opaque pointer passed to all components through component_bind_all()
664 * should point at &struct drm_device of the device instance, not some driver
665 * specific private structure.
666 *
667 * - The component helper fills the niche where further standardization of
668 * interfaces is not practical. When there already is, or will be, a
669 * standardized interface like &drm_bridge or &drm_panel, providing its own
670 * functions to find such components at driver load time, like
671 * drm_of_find_panel_or_bridge(), then the component helper should not be
672 * used.
673 */
674
drm_dev_init_release(struct drm_device * dev,void * res)675 static void drm_dev_init_release(struct drm_device *dev, void *res)
676 {
677 drm_legacy_ctxbitmap_cleanup(dev);
678 drm_legacy_remove_map_hash(dev);
679 #ifdef __linux__
680 drm_fs_inode_free(dev->anon_inode);
681
682 put_device(dev->dev);
683 #endif
684 /* Prevent use-after-free in drm_managed_release when debugging is
685 * enabled. Slightly awkward, but can't really be helped. */
686 dev->dev = NULL;
687 mutex_destroy(&dev->master_mutex);
688 mutex_destroy(&dev->clientlist_mutex);
689 mutex_destroy(&dev->filelist_mutex);
690 mutex_destroy(&dev->struct_mutex);
691 mutex_destroy(&dev->debugfs_mutex);
692 drm_legacy_destroy_members(dev);
693 }
694
695 #ifdef notyet
696
drm_dev_init(struct drm_device * dev,const struct drm_driver * driver,struct device * parent)697 static int drm_dev_init(struct drm_device *dev,
698 const struct drm_driver *driver,
699 struct device *parent)
700 {
701 struct inode *inode;
702 int ret;
703
704 if (!drm_core_init_complete) {
705 DRM_ERROR("DRM core is not initialized\n");
706 return -ENODEV;
707 }
708
709 if (WARN_ON(!parent))
710 return -EINVAL;
711
712 kref_init(&dev->ref);
713 dev->dev = get_device(parent);
714 dev->driver = driver;
715
716 INIT_LIST_HEAD(&dev->managed.resources);
717 spin_lock_init(&dev->managed.lock);
718
719 /* no per-device feature limits by default */
720 dev->driver_features = ~0u;
721
722 if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL) &&
723 (drm_core_check_feature(dev, DRIVER_RENDER) ||
724 drm_core_check_feature(dev, DRIVER_MODESET))) {
725 DRM_ERROR("DRM driver can't be both a compute acceleration and graphics driver\n");
726 return -EINVAL;
727 }
728
729 drm_legacy_init_members(dev);
730 INIT_LIST_HEAD(&dev->filelist);
731 INIT_LIST_HEAD(&dev->filelist_internal);
732 INIT_LIST_HEAD(&dev->clientlist);
733 INIT_LIST_HEAD(&dev->vblank_event_list);
734 INIT_LIST_HEAD(&dev->debugfs_list);
735
736 spin_lock_init(&dev->event_lock);
737 mutex_init(&dev->struct_mutex);
738 mutex_init(&dev->filelist_mutex);
739 mutex_init(&dev->clientlist_mutex);
740 mutex_init(&dev->master_mutex);
741 mutex_init(&dev->debugfs_mutex);
742
743 ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL);
744 if (ret)
745 return ret;
746
747 inode = drm_fs_inode_new();
748 if (IS_ERR(inode)) {
749 ret = PTR_ERR(inode);
750 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
751 goto err;
752 }
753
754 dev->anon_inode = inode;
755
756 if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL)) {
757 ret = drm_minor_alloc(dev, DRM_MINOR_ACCEL);
758 if (ret)
759 goto err;
760 } else {
761 if (drm_core_check_feature(dev, DRIVER_RENDER)) {
762 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
763 if (ret)
764 goto err;
765 }
766
767 ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
768 if (ret)
769 goto err;
770 }
771
772 ret = drm_legacy_create_map_hash(dev);
773 if (ret)
774 goto err;
775
776 drm_legacy_ctxbitmap_init(dev);
777
778 if (drm_core_check_feature(dev, DRIVER_GEM)) {
779 ret = drm_gem_init(dev);
780 if (ret) {
781 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
782 goto err;
783 }
784 }
785
786 dev->unique = drmm_kstrdup(dev, dev_name(parent), GFP_KERNEL);
787 if (!dev->unique) {
788 ret = -ENOMEM;
789 goto err;
790 }
791
792 return 0;
793
794 err:
795 drm_managed_release(dev);
796
797 return ret;
798 }
799
devm_drm_dev_init_release(void * data)800 static void devm_drm_dev_init_release(void *data)
801 {
802 drm_dev_put(data);
803 }
804
devm_drm_dev_init(struct device * parent,struct drm_device * dev,const struct drm_driver * driver)805 static int devm_drm_dev_init(struct device *parent,
806 struct drm_device *dev,
807 const struct drm_driver *driver)
808 {
809 int ret;
810
811 ret = drm_dev_init(dev, driver, parent);
812 if (ret)
813 return ret;
814
815 return devm_add_action_or_reset(parent,
816 devm_drm_dev_init_release, dev);
817 }
818
819 #endif
820
__devm_drm_dev_alloc(struct device * parent,const struct drm_driver * driver,size_t size,size_t offset)821 void *__devm_drm_dev_alloc(struct device *parent,
822 const struct drm_driver *driver,
823 size_t size, size_t offset)
824 {
825 void *container;
826 struct drm_device *drm;
827 #ifdef notyet
828 int ret;
829 #endif
830
831 container = kzalloc(size, GFP_KERNEL);
832 if (!container)
833 return ERR_PTR(-ENOMEM);
834
835 drm = container + offset;
836 #ifdef notyet
837 ret = devm_drm_dev_init(parent, drm, driver);
838 if (ret) {
839 kfree(container);
840 return ERR_PTR(ret);
841 }
842 drmm_add_final_kfree(drm, container);
843 #endif
844
845 return container;
846 }
847 EXPORT_SYMBOL(__devm_drm_dev_alloc);
848
849 #ifdef notyet
850
851 /**
852 * drm_dev_alloc - Allocate new DRM device
853 * @driver: DRM driver to allocate device for
854 * @parent: Parent device object
855 *
856 * This is the deprecated version of devm_drm_dev_alloc(), which does not support
857 * subclassing through embedding the struct &drm_device in a driver private
858 * structure, and which does not support automatic cleanup through devres.
859 *
860 * RETURNS:
861 * Pointer to new DRM device, or ERR_PTR on failure.
862 */
drm_dev_alloc(const struct drm_driver * driver,struct device * parent)863 struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
864 struct device *parent)
865 {
866 struct drm_device *dev;
867 int ret;
868
869 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
870 if (!dev)
871 return ERR_PTR(-ENOMEM);
872
873 ret = drm_dev_init(dev, driver, parent);
874 if (ret) {
875 kfree(dev);
876 return ERR_PTR(ret);
877 }
878
879 drmm_add_final_kfree(dev, dev);
880
881 return dev;
882 }
883 EXPORT_SYMBOL(drm_dev_alloc);
884
885 #endif
886
drm_dev_release(struct kref * ref)887 static void drm_dev_release(struct kref *ref)
888 {
889 struct drm_device *dev = container_of(ref, struct drm_device, ref);
890
891 if (dev->driver->release)
892 dev->driver->release(dev);
893
894 drm_managed_release(dev);
895
896 kfree(dev->managed.final_kfree);
897 }
898
899 /**
900 * drm_dev_get - Take reference of a DRM device
901 * @dev: device to take reference of or NULL
902 *
903 * This increases the ref-count of @dev by one. You *must* already own a
904 * reference when calling this. Use drm_dev_put() to drop this reference
905 * again.
906 *
907 * This function never fails. However, this function does not provide *any*
908 * guarantee whether the device is alive or running. It only provides a
909 * reference to the object and the memory associated with it.
910 */
drm_dev_get(struct drm_device * dev)911 void drm_dev_get(struct drm_device *dev)
912 {
913 if (dev)
914 kref_get(&dev->ref);
915 }
916 EXPORT_SYMBOL(drm_dev_get);
917
918 /**
919 * drm_dev_put - Drop reference of a DRM device
920 * @dev: device to drop reference of or NULL
921 *
922 * This decreases the ref-count of @dev by one. The device is destroyed if the
923 * ref-count drops to zero.
924 */
drm_dev_put(struct drm_device * dev)925 void drm_dev_put(struct drm_device *dev)
926 {
927 if (dev)
928 kref_put(&dev->ref, drm_dev_release);
929 }
930 EXPORT_SYMBOL(drm_dev_put);
931
create_compat_control_link(struct drm_device * dev)932 static int create_compat_control_link(struct drm_device *dev)
933 {
934 struct drm_minor *minor;
935 char *name;
936 int ret;
937
938 if (!drm_core_check_feature(dev, DRIVER_MODESET))
939 return 0;
940
941 minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
942 if (!minor)
943 return 0;
944
945 /*
946 * Some existing userspace out there uses the existing of the controlD*
947 * sysfs files to figure out whether it's a modeset driver. It only does
948 * readdir, hence a symlink is sufficient (and the least confusing
949 * option). Otherwise controlD* is entirely unused.
950 *
951 * Old controlD chardev have been allocated in the range
952 * 64-127.
953 */
954 name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
955 if (!name)
956 return -ENOMEM;
957
958 ret = sysfs_create_link(minor->kdev->kobj.parent,
959 &minor->kdev->kobj,
960 name);
961
962 kfree(name);
963
964 return ret;
965 }
966
remove_compat_control_link(struct drm_device * dev)967 static void remove_compat_control_link(struct drm_device *dev)
968 {
969 struct drm_minor *minor;
970 char *name;
971
972 if (!drm_core_check_feature(dev, DRIVER_MODESET))
973 return;
974
975 minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
976 if (!minor)
977 return;
978
979 name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
980 if (!name)
981 return;
982
983 sysfs_remove_link(minor->kdev->kobj.parent, name);
984
985 kfree(name);
986 }
987
988 /**
989 * drm_dev_register - Register DRM device
990 * @dev: Device to register
991 * @flags: Flags passed to the driver's .load() function
992 *
993 * Register the DRM device @dev with the system, advertise device to user-space
994 * and start normal device operation. @dev must be initialized via drm_dev_init()
995 * previously.
996 *
997 * Never call this twice on any device!
998 *
999 * NOTE: To ensure backward compatibility with existing drivers method this
1000 * function calls the &drm_driver.load method after registering the device
1001 * nodes, creating race conditions. Usage of the &drm_driver.load methods is
1002 * therefore deprecated, drivers must perform all initialization before calling
1003 * drm_dev_register().
1004 *
1005 * RETURNS:
1006 * 0 on success, negative error code on failure.
1007 */
drm_dev_register(struct drm_device * dev,unsigned long flags)1008 int drm_dev_register(struct drm_device *dev, unsigned long flags)
1009 {
1010 const struct drm_driver *driver = dev->driver;
1011 int ret;
1012
1013 if (!driver->load)
1014 drm_mode_config_validate(dev);
1015
1016 WARN_ON(!dev->managed.final_kfree);
1017
1018 if (drm_dev_needs_global_mutex(dev))
1019 mutex_lock(&drm_global_mutex);
1020
1021 ret = drm_minor_register(dev, DRM_MINOR_RENDER);
1022 if (ret)
1023 goto err_minors;
1024
1025 ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);
1026 if (ret)
1027 goto err_minors;
1028
1029 ret = drm_minor_register(dev, DRM_MINOR_ACCEL);
1030 if (ret)
1031 goto err_minors;
1032
1033 ret = create_compat_control_link(dev);
1034 if (ret)
1035 goto err_minors;
1036
1037 dev->registered = true;
1038
1039 if (driver->load) {
1040 ret = driver->load(dev, flags);
1041 if (ret)
1042 goto err_minors;
1043 }
1044
1045 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1046 ret = drm_modeset_register_all(dev);
1047 if (ret)
1048 goto err_unload;
1049 }
1050
1051 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
1052 driver->name, driver->major, driver->minor,
1053 driver->patchlevel, driver->date,
1054 dev->dev ? dev_name(dev->dev) : "virtual device",
1055 dev->primary ? dev->primary->index : dev->accel->index);
1056
1057 goto out_unlock;
1058
1059 err_unload:
1060 if (dev->driver->unload)
1061 dev->driver->unload(dev);
1062 err_minors:
1063 remove_compat_control_link(dev);
1064 drm_minor_unregister(dev, DRM_MINOR_ACCEL);
1065 drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
1066 drm_minor_unregister(dev, DRM_MINOR_RENDER);
1067 out_unlock:
1068 if (drm_dev_needs_global_mutex(dev))
1069 mutex_unlock(&drm_global_mutex);
1070 return ret;
1071 }
1072 EXPORT_SYMBOL(drm_dev_register);
1073
1074 /**
1075 * drm_dev_unregister - Unregister DRM device
1076 * @dev: Device to unregister
1077 *
1078 * Unregister the DRM device from the system. This does the reverse of
1079 * drm_dev_register() but does not deallocate the device. The caller must call
1080 * drm_dev_put() to drop their final reference, unless it is managed with devres
1081 * (as devices allocated with devm_drm_dev_alloc() are), in which case there is
1082 * already an unwind action registered.
1083 *
1084 * A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
1085 * which can be called while there are still open users of @dev.
1086 *
1087 * This should be called first in the device teardown code to make sure
1088 * userspace can't access the device instance any more.
1089 */
drm_dev_unregister(struct drm_device * dev)1090 void drm_dev_unregister(struct drm_device *dev)
1091 {
1092 if (drm_core_check_feature(dev, DRIVER_LEGACY))
1093 drm_lastclose(dev);
1094
1095 dev->registered = false;
1096
1097 drm_client_dev_unregister(dev);
1098
1099 if (drm_core_check_feature(dev, DRIVER_MODESET))
1100 drm_modeset_unregister_all(dev);
1101
1102 if (dev->driver->unload)
1103 dev->driver->unload(dev);
1104
1105 drm_legacy_pci_agp_destroy(dev);
1106 drm_legacy_rmmaps(dev);
1107
1108 remove_compat_control_link(dev);
1109 drm_minor_unregister(dev, DRM_MINOR_ACCEL);
1110 drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
1111 drm_minor_unregister(dev, DRM_MINOR_RENDER);
1112 }
1113 EXPORT_SYMBOL(drm_dev_unregister);
1114
1115 /*
1116 * DRM Core
1117 * The DRM core module initializes all global DRM objects and makes them
1118 * available to drivers. Once setup, drivers can probe their respective
1119 * devices.
1120 * Currently, core management includes:
1121 * - The "DRM-Global" key/value database
1122 * - Global ID management for connectors
1123 * - DRM major number allocation
1124 * - DRM minor management
1125 * - DRM sysfs class
1126 * - DRM debugfs root
1127 *
1128 * Furthermore, the DRM core provides dynamic char-dev lookups. For each
1129 * interface registered on a DRM device, you can request minor numbers from DRM
1130 * core. DRM core takes care of major-number management and char-dev
1131 * registration. A stub ->open() callback forwards any open() requests to the
1132 * registered minor.
1133 */
1134
1135 #ifdef __linux__
drm_stub_open(struct inode * inode,struct file * filp)1136 static int drm_stub_open(struct inode *inode, struct file *filp)
1137 {
1138 const struct file_operations *new_fops;
1139 struct drm_minor *minor;
1140 int err;
1141
1142 DRM_DEBUG("\n");
1143
1144 minor = drm_minor_acquire(iminor(inode));
1145 if (IS_ERR(minor))
1146 return PTR_ERR(minor);
1147
1148 new_fops = fops_get(minor->dev->driver->fops);
1149 if (!new_fops) {
1150 err = -ENODEV;
1151 goto out;
1152 }
1153
1154 replace_fops(filp, new_fops);
1155 if (filp->f_op->open)
1156 err = filp->f_op->open(inode, filp);
1157 else
1158 err = 0;
1159
1160 out:
1161 drm_minor_release(minor);
1162
1163 return err;
1164 }
1165
1166 static const struct file_operations drm_stub_fops = {
1167 .owner = THIS_MODULE,
1168 .open = drm_stub_open,
1169 .llseek = noop_llseek,
1170 };
1171 #endif /* __linux__ */
1172
drm_core_exit(void)1173 static void drm_core_exit(void)
1174 {
1175 drm_privacy_screen_lookup_exit();
1176 accel_core_exit();
1177 #ifdef __linux__
1178 unregister_chrdev(DRM_MAJOR, "drm");
1179 debugfs_remove(drm_debugfs_root);
1180 drm_sysfs_destroy();
1181 #endif
1182 idr_destroy(&drm_minors_idr);
1183 drm_connector_ida_destroy();
1184 }
1185
drm_core_init(void)1186 static int __init drm_core_init(void)
1187 {
1188 #ifdef __linux__
1189 int ret;
1190 #endif
1191
1192 drm_connector_ida_init();
1193 idr_init(&drm_minors_idr);
1194 drm_memcpy_init_early();
1195
1196 #ifdef __linux__
1197 ret = drm_sysfs_init();
1198 if (ret < 0) {
1199 DRM_ERROR("Cannot create DRM class: %d\n", ret);
1200 goto error;
1201 }
1202
1203 drm_debugfs_root = debugfs_create_dir("dri", NULL);
1204
1205 ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
1206 if (ret < 0)
1207 goto error;
1208
1209 ret = accel_core_init();
1210 if (ret < 0)
1211 goto error;
1212 #endif
1213
1214 drm_privacy_screen_lookup_init();
1215
1216 drm_core_init_complete = true;
1217
1218 DRM_DEBUG("Initialized\n");
1219 return 0;
1220 #ifdef __linux__
1221 error:
1222 drm_core_exit();
1223 return ret;
1224 #endif
1225 }
1226
1227 #ifdef __linux__
1228 module_init(drm_core_init);
1229 module_exit(drm_core_exit);
1230 #endif
1231
1232 void
drm_attach_platform(struct drm_driver * driver,bus_space_tag_t iot,bus_dma_tag_t dmat,struct device * dev,struct drm_device * drm)1233 drm_attach_platform(struct drm_driver *driver, bus_space_tag_t iot,
1234 bus_dma_tag_t dmat, struct device *dev, struct drm_device *drm)
1235 {
1236 struct drm_attach_args arg;
1237
1238 memset(&arg, 0, sizeof(arg));
1239 arg.driver = driver;
1240 arg.bst = iot;
1241 arg.dmat = dmat;
1242 arg.drm = drm;
1243
1244 arg.busid = dev->dv_xname;
1245 arg.busid_len = strlen(dev->dv_xname) + 1;
1246 config_found_sm(dev, &arg, drmprint, drmsubmatch);
1247 }
1248
1249 struct drm_device *
drm_attach_pci(const struct drm_driver * driver,struct pci_attach_args * pa,int is_agp,int primary,struct device * dev,struct drm_device * drm)1250 drm_attach_pci(const struct drm_driver *driver, struct pci_attach_args *pa,
1251 int is_agp, int primary, struct device *dev, struct drm_device *drm)
1252 {
1253 struct drm_attach_args arg;
1254 struct drm_softc *sc;
1255
1256 arg.drm = drm;
1257 arg.driver = driver;
1258 arg.dmat = pa->pa_dmat;
1259 arg.bst = pa->pa_memt;
1260 arg.is_agp = is_agp;
1261 arg.primary = primary;
1262 arg.pa = pa;
1263
1264 arg.busid_len = 20;
1265 arg.busid = malloc(arg.busid_len + 1, M_DRM, M_NOWAIT);
1266 if (arg.busid == NULL) {
1267 printf("%s: no memory for drm\n", dev->dv_xname);
1268 return (NULL);
1269 }
1270 snprintf(arg.busid, arg.busid_len, "pci:%04x:%02x:%02x.%1x",
1271 pa->pa_domain, pa->pa_bus, pa->pa_device, pa->pa_function);
1272
1273 sc = (struct drm_softc *)config_found_sm(dev, &arg, drmprint, drmsubmatch);
1274 if (sc == NULL)
1275 return NULL;
1276
1277 return sc->sc_drm;
1278 }
1279
1280 int
drmprint(void * aux,const char * pnp)1281 drmprint(void *aux, const char *pnp)
1282 {
1283 if (pnp != NULL)
1284 printf("drm at %s", pnp);
1285 return (UNCONF);
1286 }
1287
1288 int
drmsubmatch(struct device * parent,void * match,void * aux)1289 drmsubmatch(struct device *parent, void *match, void *aux)
1290 {
1291 extern struct cfdriver drm_cd;
1292 struct cfdata *cf = match;
1293
1294 /* only allow drm to attach */
1295 if (cf->cf_driver == &drm_cd)
1296 return ((*cf->cf_attach->ca_match)(parent, match, aux));
1297 return (0);
1298 }
1299
1300 int
drm_pciprobe(struct pci_attach_args * pa,const struct pci_device_id * idlist)1301 drm_pciprobe(struct pci_attach_args *pa, const struct pci_device_id *idlist)
1302 {
1303 const struct pci_device_id *id_entry;
1304
1305 id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
1306 PCI_PRODUCT(pa->pa_id), idlist);
1307 if (id_entry != NULL)
1308 return 1;
1309
1310 return 0;
1311 }
1312
1313 int
drm_probe(struct device * parent,void * match,void * aux)1314 drm_probe(struct device *parent, void *match, void *aux)
1315 {
1316 struct cfdata *cf = match;
1317 struct drm_attach_args *da = aux;
1318
1319 if (cf->drmdevcf_primary != DRMDEVCF_PRIMARY_UNK) {
1320 /*
1321 * If primary-ness of device specified, either match
1322 * exactly (at high priority), or fail.
1323 */
1324 if (cf->drmdevcf_primary != 0 && da->primary != 0)
1325 return (10);
1326 else
1327 return (0);
1328 }
1329
1330 /* If primary-ness unspecified, it wins. */
1331 return (1);
1332 }
1333
1334 int drm_buddy_module_init(void);
1335 void drm_buddy_module_exit(void);
1336
1337 void
drm_attach(struct device * parent,struct device * self,void * aux)1338 drm_attach(struct device *parent, struct device *self, void *aux)
1339 {
1340 struct drm_softc *sc = (struct drm_softc *)self;
1341 struct drm_attach_args *da = aux;
1342 struct drm_device *dev = da->drm;
1343 int ret;
1344
1345 if (drm_refcnt == 0) {
1346 drm_linux_init();
1347 drm_core_init();
1348 drm_buddy_module_init();
1349 }
1350 drm_refcnt++;
1351
1352 if (dev == NULL) {
1353 dev = malloc(sizeof(struct drm_device), M_DRM,
1354 M_WAITOK | M_ZERO);
1355 sc->sc_allocated = 1;
1356 }
1357
1358 sc->sc_drm = dev;
1359
1360 kref_init(&dev->ref);
1361 dev->dev = self;
1362 dev->dev_private = parent;
1363 dev->driver = da->driver;
1364
1365 INIT_LIST_HEAD(&dev->managed.resources);
1366 mtx_init(&dev->managed.lock, IPL_TTY);
1367
1368 /* no per-device feature limits by default */
1369 dev->driver_features = ~0u;
1370
1371 dev->dmat = da->dmat;
1372 dev->bst = da->bst;
1373 dev->unique = da->busid;
1374
1375 if (da->pa) {
1376 struct pci_attach_args *pa = da->pa;
1377 pcireg_t subsys;
1378
1379 subsys = pci_conf_read(pa->pa_pc, pa->pa_tag,
1380 PCI_SUBSYS_ID_REG);
1381
1382 dev->pdev = &dev->_pdev;
1383 dev->pdev->vendor = PCI_VENDOR(pa->pa_id);
1384 dev->pdev->device = PCI_PRODUCT(pa->pa_id);
1385 dev->pdev->subsystem_vendor = PCI_VENDOR(subsys);
1386 dev->pdev->subsystem_device = PCI_PRODUCT(subsys);
1387 dev->pdev->revision = PCI_REVISION(pa->pa_class);
1388 dev->pdev->class = (PCI_CLASS(pa->pa_class) << 16) |
1389 (PCI_SUBCLASS(pa->pa_class) << 8) |
1390 PCI_INTERFACE(pa->pa_class);
1391
1392 dev->pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
1393 dev->pdev->bus = &dev->pdev->_bus;
1394 dev->pdev->bus->pc = pa->pa_pc;
1395 dev->pdev->bus->number = pa->pa_bus;
1396 dev->pdev->bus->domain_nr = pa->pa_domain;
1397 dev->pdev->bus->bridgetag = pa->pa_bridgetag;
1398
1399 if (pa->pa_bridgetag != NULL) {
1400 dev->pdev->bus->self = malloc(sizeof(struct pci_dev),
1401 M_DRM, M_WAITOK | M_ZERO);
1402 dev->pdev->bus->self->pc = pa->pa_pc;
1403 dev->pdev->bus->self->tag = *pa->pa_bridgetag;
1404 }
1405
1406 dev->pdev->pc = pa->pa_pc;
1407 dev->pdev->tag = pa->pa_tag;
1408 dev->pdev->pci = (struct pci_softc *)parent->dv_parent;
1409
1410 #ifdef CONFIG_ACPI
1411 dev->pdev->dev.node = acpi_find_pci(pa->pa_pc, pa->pa_tag);
1412 aml_register_notify(dev->pdev->dev.node, NULL,
1413 drm_linux_acpi_notify, NULL, ACPIDEV_NOPOLL);
1414 #endif
1415 }
1416
1417 mtx_init(&dev->quiesce_mtx, IPL_NONE);
1418 mtx_init(&dev->event_lock, IPL_TTY);
1419 rw_init(&dev->struct_mutex, "drmdevlk");
1420 rw_init(&dev->filelist_mutex, "drmflist");
1421 rw_init(&dev->clientlist_mutex, "drmclist");
1422 rw_init(&dev->master_mutex, "drmmast");
1423
1424 ret = drmm_add_action(dev, drm_dev_init_release, NULL);
1425 if (ret)
1426 goto error;
1427
1428 SPLAY_INIT(&dev->files);
1429 INIT_LIST_HEAD(&dev->filelist_internal);
1430 INIT_LIST_HEAD(&dev->clientlist);
1431 INIT_LIST_HEAD(&dev->vblank_event_list);
1432
1433 if (drm_core_check_feature(dev, DRIVER_RENDER)) {
1434 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
1435 if (ret)
1436 goto error;
1437 }
1438
1439 ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
1440 if (ret)
1441 goto error;
1442
1443 #ifdef CONFIG_DRM_LEGACY
1444 if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
1445 #if IS_ENABLED(CONFIG_AGP)
1446 if (da->is_agp)
1447 dev->agp = drm_agp_init();
1448 #endif
1449 if (dev->agp != NULL) {
1450 if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
1451 dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
1452 dev->agp->mtrr = 1;
1453 }
1454 }
1455 #endif
1456
1457 if (dev->driver->gem_size > 0) {
1458 KASSERT(dev->driver->gem_size >= sizeof(struct drm_gem_object));
1459 /* XXX unique name */
1460 pool_init(&dev->objpl, dev->driver->gem_size, 0, IPL_NONE, 0,
1461 "drmobjpl", NULL);
1462 }
1463
1464 if (drm_core_check_feature(dev, DRIVER_GEM)) {
1465 ret = drm_gem_init(dev);
1466 if (ret) {
1467 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
1468 goto error;
1469 }
1470 }
1471
1472 drmm_add_final_kfree(dev, dev);
1473
1474 printf("\n");
1475 return;
1476
1477 error:
1478 drm_managed_release(dev);
1479 dev->dev_private = NULL;
1480 }
1481
1482 int
drm_detach(struct device * self,int flags)1483 drm_detach(struct device *self, int flags)
1484 {
1485 struct drm_softc *sc = (struct drm_softc *)self;
1486 struct drm_device *dev = sc->sc_drm;
1487
1488 drm_refcnt--;
1489 if (drm_refcnt == 0) {
1490 drm_buddy_module_exit();
1491 drm_core_exit();
1492 drm_linux_exit();
1493 }
1494
1495 drm_lastclose(dev);
1496
1497 if (drm_core_check_feature(dev, DRIVER_GEM)) {
1498 if (dev->driver->gem_size > 0)
1499 pool_destroy(&dev->objpl);
1500 }
1501
1502 #ifdef CONFIG_DRM_LEGACY
1503 if (dev->agp && dev->agp->mtrr) {
1504 int retcode;
1505
1506 retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
1507 dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
1508 DRM_DEBUG("mtrr_del = %d", retcode);
1509 }
1510
1511 free(dev->agp, M_DRM, 0);
1512 #endif
1513 if (dev->pdev && dev->pdev->bus)
1514 free(dev->pdev->bus->self, M_DRM, sizeof(struct pci_dev));
1515
1516 if (sc->sc_allocated)
1517 free(dev, M_DRM, sizeof(struct drm_device));
1518
1519 return 0;
1520 }
1521
1522 void
drm_quiesce(struct drm_device * dev)1523 drm_quiesce(struct drm_device *dev)
1524 {
1525 mtx_enter(&dev->quiesce_mtx);
1526 dev->quiesce = 1;
1527 while (dev->quiesce_count > 0) {
1528 msleep_nsec(&dev->quiesce_count, &dev->quiesce_mtx,
1529 PZERO, "drmqui", INFSLP);
1530 }
1531 mtx_leave(&dev->quiesce_mtx);
1532 }
1533
1534 void
drm_wakeup(struct drm_device * dev)1535 drm_wakeup(struct drm_device *dev)
1536 {
1537 mtx_enter(&dev->quiesce_mtx);
1538 dev->quiesce = 0;
1539 wakeup(&dev->quiesce);
1540 mtx_leave(&dev->quiesce_mtx);
1541 }
1542
1543 int
drm_activate(struct device * self,int act)1544 drm_activate(struct device *self, int act)
1545 {
1546 struct drm_softc *sc = (struct drm_softc *)self;
1547 struct drm_device *dev = sc->sc_drm;
1548
1549 switch (act) {
1550 case DVACT_QUIESCE:
1551 drm_quiesce(dev);
1552 break;
1553 case DVACT_WAKEUP:
1554 drm_wakeup(dev);
1555 break;
1556 }
1557
1558 return (0);
1559 }
1560
1561 const struct cfattach drm_ca = {
1562 sizeof(struct drm_softc), drm_probe, drm_attach,
1563 drm_detach, drm_activate
1564 };
1565
1566 struct cfdriver drm_cd = {
1567 0, "drm", DV_DULL
1568 };
1569
1570 const struct pci_device_id *
drm_find_description(int vendor,int device,const struct pci_device_id * idlist)1571 drm_find_description(int vendor, int device, const struct pci_device_id *idlist)
1572 {
1573 int i = 0;
1574
1575 for (i = 0; idlist[i].vendor != 0; i++) {
1576 if ((idlist[i].vendor == vendor) &&
1577 (idlist[i].device == device ||
1578 idlist[i].device == PCI_ANY_ID) &&
1579 (idlist[i].subvendor == PCI_ANY_ID) &&
1580 (idlist[i].subdevice == PCI_ANY_ID))
1581 return &idlist[i];
1582 }
1583 return NULL;
1584 }
1585
1586 int
drm_file_cmp(struct drm_file * f1,struct drm_file * f2)1587 drm_file_cmp(struct drm_file *f1, struct drm_file *f2)
1588 {
1589 return (f1->fminor < f2->fminor ? -1 : f1->fminor > f2->fminor);
1590 }
1591
1592 SPLAY_GENERATE(drm_file_tree, drm_file, link, drm_file_cmp);
1593
1594 struct drm_file *
drm_find_file_by_minor(struct drm_device * dev,int minor)1595 drm_find_file_by_minor(struct drm_device *dev, int minor)
1596 {
1597 struct drm_file key;
1598
1599 key.fminor = minor;
1600 return (SPLAY_FIND(drm_file_tree, &dev->files, &key));
1601 }
1602
1603 struct drm_device *
drm_get_device_from_kdev(dev_t kdev)1604 drm_get_device_from_kdev(dev_t kdev)
1605 {
1606 int unit = minor(kdev) & ((1 << CLONE_SHIFT) - 1);
1607 /* render */
1608 if (unit >= 128)
1609 unit -= 128;
1610 struct drm_softc *sc;
1611
1612 if (unit < drm_cd.cd_ndevs) {
1613 sc = (struct drm_softc *)drm_cd.cd_devs[unit];
1614 if (sc)
1615 return sc->sc_drm;
1616 }
1617
1618 return NULL;
1619 }
1620
1621 void
filt_drmdetach(struct knote * kn)1622 filt_drmdetach(struct knote *kn)
1623 {
1624 struct drm_device *dev = kn->kn_hook;
1625 int s;
1626
1627 s = spltty();
1628 klist_remove_locked(&dev->note, kn);
1629 splx(s);
1630 }
1631
1632 int
filt_drmkms(struct knote * kn,long hint)1633 filt_drmkms(struct knote *kn, long hint)
1634 {
1635 if (kn->kn_sfflags & hint)
1636 kn->kn_fflags |= hint;
1637 return (kn->kn_fflags != 0);
1638 }
1639
1640 void
filt_drmreaddetach(struct knote * kn)1641 filt_drmreaddetach(struct knote *kn)
1642 {
1643 struct drm_file *file_priv = kn->kn_hook;
1644 int s;
1645
1646 s = spltty();
1647 klist_remove_locked(&file_priv->rsel.si_note, kn);
1648 splx(s);
1649 }
1650
1651 int
filt_drmread(struct knote * kn,long hint)1652 filt_drmread(struct knote *kn, long hint)
1653 {
1654 struct drm_file *file_priv = kn->kn_hook;
1655 int val = 0;
1656
1657 if ((hint & NOTE_SUBMIT) == 0)
1658 mtx_enter(&file_priv->minor->dev->event_lock);
1659 val = !list_empty(&file_priv->event_list);
1660 if ((hint & NOTE_SUBMIT) == 0)
1661 mtx_leave(&file_priv->minor->dev->event_lock);
1662 return (val);
1663 }
1664
1665 const struct filterops drm_filtops = {
1666 .f_flags = FILTEROP_ISFD,
1667 .f_attach = NULL,
1668 .f_detach = filt_drmdetach,
1669 .f_event = filt_drmkms,
1670 };
1671
1672 const struct filterops drmread_filtops = {
1673 .f_flags = FILTEROP_ISFD,
1674 .f_attach = NULL,
1675 .f_detach = filt_drmreaddetach,
1676 .f_event = filt_drmread,
1677 };
1678
1679 int
drmkqfilter(dev_t kdev,struct knote * kn)1680 drmkqfilter(dev_t kdev, struct knote *kn)
1681 {
1682 struct drm_device *dev = NULL;
1683 struct drm_file *file_priv = NULL;
1684 int s;
1685
1686 dev = drm_get_device_from_kdev(kdev);
1687 if (dev == NULL || dev->dev_private == NULL)
1688 return (ENXIO);
1689
1690 switch (kn->kn_filter) {
1691 case EVFILT_READ:
1692 mutex_lock(&dev->struct_mutex);
1693 file_priv = drm_find_file_by_minor(dev, minor(kdev));
1694 mutex_unlock(&dev->struct_mutex);
1695 if (file_priv == NULL)
1696 return (ENXIO);
1697
1698 kn->kn_fop = &drmread_filtops;
1699 kn->kn_hook = file_priv;
1700
1701 s = spltty();
1702 klist_insert_locked(&file_priv->rsel.si_note, kn);
1703 splx(s);
1704 break;
1705 case EVFILT_DEVICE:
1706 kn->kn_fop = &drm_filtops;
1707 kn->kn_hook = dev;
1708
1709 s = spltty();
1710 klist_insert_locked(&dev->note, kn);
1711 splx(s);
1712 break;
1713 default:
1714 return (EINVAL);
1715 }
1716
1717 return (0);
1718 }
1719
1720 int
drmopen(dev_t kdev,int flags,int fmt,struct proc * p)1721 drmopen(dev_t kdev, int flags, int fmt, struct proc *p)
1722 {
1723 struct drm_device *dev = NULL;
1724 struct drm_file *file_priv;
1725 struct drm_minor *dm;
1726 int ret = 0;
1727 int dminor, realminor, minor_type;
1728 int need_setup = 0;
1729
1730 dev = drm_get_device_from_kdev(kdev);
1731 if (dev == NULL || dev->dev_private == NULL)
1732 return (ENXIO);
1733
1734 DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
1735
1736 if (flags & O_EXCL)
1737 return (EBUSY); /* No exclusive opens */
1738
1739 if (drm_dev_needs_global_mutex(dev))
1740 mutex_lock(&drm_global_mutex);
1741
1742 if (!atomic_fetch_inc(&dev->open_count))
1743 need_setup = 1;
1744
1745 dminor = minor(kdev);
1746 realminor = dminor & ((1 << CLONE_SHIFT) - 1);
1747 if (realminor < 64)
1748 minor_type = DRM_MINOR_PRIMARY;
1749 else if (realminor >= 128 && realminor < 192)
1750 minor_type = DRM_MINOR_RENDER;
1751 else {
1752 ret = ENXIO;
1753 goto err;
1754 }
1755
1756 dm = *drm_minor_get_slot(dev, minor_type);
1757 if (dm == NULL) {
1758 ret = ENXIO;
1759 goto err;
1760 }
1761 dm->index = minor(kdev);
1762
1763 file_priv = drm_file_alloc(dm);
1764 if (IS_ERR(file_priv)) {
1765 ret = ENOMEM;
1766 goto err;
1767 }
1768
1769 /* first opener automatically becomes master */
1770 if (drm_is_primary_client(file_priv)) {
1771 ret = drm_master_open(file_priv);
1772 if (ret != 0)
1773 goto out_file_free;
1774 }
1775
1776 file_priv->filp = (void *)file_priv;
1777 file_priv->fminor = minor(kdev);
1778
1779 mutex_lock(&dev->filelist_mutex);
1780 SPLAY_INSERT(drm_file_tree, &dev->files, file_priv);
1781 mutex_unlock(&dev->filelist_mutex);
1782
1783 if (need_setup) {
1784 ret = drm_legacy_setup(dev);
1785 if (ret)
1786 goto out_file_free;
1787 }
1788
1789 if (drm_dev_needs_global_mutex(dev))
1790 mutex_unlock(&drm_global_mutex);
1791
1792 return 0;
1793
1794 out_file_free:
1795 drm_file_free(file_priv);
1796 err:
1797 atomic_dec(&dev->open_count);
1798 if (drm_dev_needs_global_mutex(dev))
1799 mutex_unlock(&drm_global_mutex);
1800 return (ret);
1801 }
1802
1803 int
drmclose(dev_t kdev,int flags,int fmt,struct proc * p)1804 drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
1805 {
1806 struct drm_device *dev = drm_get_device_from_kdev(kdev);
1807 struct drm_file *file_priv;
1808 int retcode = 0;
1809
1810 if (dev == NULL)
1811 return (ENXIO);
1812
1813 if (drm_dev_needs_global_mutex(dev))
1814 mutex_lock(&drm_global_mutex);
1815
1816 DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
1817
1818 mutex_lock(&dev->filelist_mutex);
1819 file_priv = drm_find_file_by_minor(dev, minor(kdev));
1820 if (file_priv == NULL) {
1821 DRM_ERROR("can't find authenticator\n");
1822 retcode = EINVAL;
1823 mutex_unlock(&dev->filelist_mutex);
1824 goto done;
1825 }
1826
1827 SPLAY_REMOVE(drm_file_tree, &dev->files, file_priv);
1828 mutex_unlock(&dev->filelist_mutex);
1829 drm_file_free(file_priv);
1830 done:
1831 if (atomic_dec_and_test(&dev->open_count))
1832 drm_lastclose(dev);
1833
1834 if (drm_dev_needs_global_mutex(dev))
1835 mutex_unlock(&drm_global_mutex);
1836
1837 return (retcode);
1838 }
1839
1840 int
drmread(dev_t kdev,struct uio * uio,int ioflag)1841 drmread(dev_t kdev, struct uio *uio, int ioflag)
1842 {
1843 struct drm_device *dev = drm_get_device_from_kdev(kdev);
1844 struct drm_file *file_priv;
1845 struct drm_pending_event *ev;
1846 int error = 0;
1847
1848 if (dev == NULL)
1849 return (ENXIO);
1850
1851 mutex_lock(&dev->filelist_mutex);
1852 file_priv = drm_find_file_by_minor(dev, minor(kdev));
1853 mutex_unlock(&dev->filelist_mutex);
1854 if (file_priv == NULL)
1855 return (ENXIO);
1856
1857 /*
1858 * The semantics are a little weird here. We will wait until we
1859 * have events to process, but as soon as we have events we will
1860 * only deliver as many as we have.
1861 * Note that events are atomic, if the read buffer will not fit in
1862 * a whole event, we won't read any of it out.
1863 */
1864 mtx_enter(&dev->event_lock);
1865 while (error == 0 && list_empty(&file_priv->event_list)) {
1866 if (ioflag & IO_NDELAY) {
1867 mtx_leave(&dev->event_lock);
1868 return (EAGAIN);
1869 }
1870 error = msleep_nsec(&file_priv->event_wait, &dev->event_lock,
1871 PWAIT | PCATCH, "drmread", INFSLP);
1872 }
1873 if (error) {
1874 mtx_leave(&dev->event_lock);
1875 return (error);
1876 }
1877 while (drm_dequeue_event(dev, file_priv, uio->uio_resid, &ev)) {
1878 MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
1879 /* XXX we always destroy the event on error. */
1880 error = uiomove(ev->event, ev->event->length, uio);
1881 kfree(ev);
1882 if (error)
1883 break;
1884 mtx_enter(&dev->event_lock);
1885 }
1886 MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
1887
1888 return (error);
1889 }
1890
1891 /*
1892 * Deqeue an event from the file priv in question. returning 1 if an
1893 * event was found. We take the resid from the read as a parameter because
1894 * we will only dequeue and event if the read buffer has space to fit the
1895 * entire thing.
1896 *
1897 * We are called locked, but we will *unlock* the queue on return so that
1898 * we may sleep to copyout the event.
1899 */
1900 int
drm_dequeue_event(struct drm_device * dev,struct drm_file * file_priv,size_t resid,struct drm_pending_event ** out)1901 drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
1902 size_t resid, struct drm_pending_event **out)
1903 {
1904 struct drm_pending_event *e = NULL;
1905 int gotone = 0;
1906
1907 MUTEX_ASSERT_LOCKED(&dev->event_lock);
1908
1909 *out = NULL;
1910 if (list_empty(&file_priv->event_list))
1911 goto out;
1912 e = list_first_entry(&file_priv->event_list,
1913 struct drm_pending_event, link);
1914 if (e->event->length > resid)
1915 goto out;
1916
1917 file_priv->event_space += e->event->length;
1918 list_del(&e->link);
1919 *out = e;
1920 gotone = 1;
1921
1922 out:
1923 mtx_leave(&dev->event_lock);
1924
1925 return (gotone);
1926 }
1927
1928 paddr_t
drmmmap(dev_t kdev,off_t offset,int prot)1929 drmmmap(dev_t kdev, off_t offset, int prot)
1930 {
1931 return -1;
1932 }
1933
1934 struct drm_dmamem *
drm_dmamem_alloc(bus_dma_tag_t dmat,bus_size_t size,bus_size_t alignment,int nsegments,bus_size_t maxsegsz,int mapflags,int loadflags)1935 drm_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t alignment,
1936 int nsegments, bus_size_t maxsegsz, int mapflags, int loadflags)
1937 {
1938 struct drm_dmamem *mem;
1939 size_t strsize;
1940 /*
1941 * segs is the last member of the struct since we modify the size
1942 * to allow extra segments if more than one are allowed.
1943 */
1944 strsize = sizeof(*mem) + (sizeof(bus_dma_segment_t) * (nsegments - 1));
1945 mem = malloc(strsize, M_DRM, M_NOWAIT | M_ZERO);
1946 if (mem == NULL)
1947 return (NULL);
1948
1949 mem->size = size;
1950
1951 if (bus_dmamap_create(dmat, size, nsegments, maxsegsz, 0,
1952 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mem->map) != 0)
1953 goto strfree;
1954
1955 if (bus_dmamem_alloc(dmat, size, alignment, 0, mem->segs, nsegments,
1956 &mem->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1957 goto destroy;
1958
1959 if (bus_dmamem_map(dmat, mem->segs, mem->nsegs, size,
1960 &mem->kva, BUS_DMA_NOWAIT | mapflags) != 0)
1961 goto free;
1962
1963 if (bus_dmamap_load(dmat, mem->map, mem->kva, size,
1964 NULL, BUS_DMA_NOWAIT | loadflags) != 0)
1965 goto unmap;
1966
1967 return (mem);
1968
1969 unmap:
1970 bus_dmamem_unmap(dmat, mem->kva, size);
1971 free:
1972 bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1973 destroy:
1974 bus_dmamap_destroy(dmat, mem->map);
1975 strfree:
1976 free(mem, M_DRM, 0);
1977
1978 return (NULL);
1979 }
1980
1981 void
drm_dmamem_free(bus_dma_tag_t dmat,struct drm_dmamem * mem)1982 drm_dmamem_free(bus_dma_tag_t dmat, struct drm_dmamem *mem)
1983 {
1984 if (mem == NULL)
1985 return;
1986
1987 bus_dmamap_unload(dmat, mem->map);
1988 bus_dmamem_unmap(dmat, mem->kva, mem->size);
1989 bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1990 bus_dmamap_destroy(dmat, mem->map);
1991 free(mem, M_DRM, 0);
1992 }
1993
1994 struct drm_dma_handle *
drm_pci_alloc(struct drm_device * dev,size_t size,size_t align)1995 drm_pci_alloc(struct drm_device *dev, size_t size, size_t align)
1996 {
1997 struct drm_dma_handle *dmah;
1998
1999 dmah = malloc(sizeof(*dmah), M_DRM, M_WAITOK);
2000 dmah->mem = drm_dmamem_alloc(dev->dmat, size, align, 1, size,
2001 BUS_DMA_NOCACHE, 0);
2002 if (dmah->mem == NULL) {
2003 free(dmah, M_DRM, sizeof(*dmah));
2004 return NULL;
2005 }
2006 dmah->busaddr = dmah->mem->segs[0].ds_addr;
2007 dmah->size = dmah->mem->size;
2008 dmah->vaddr = dmah->mem->kva;
2009 return (dmah);
2010 }
2011
2012 void
drm_pci_free(struct drm_device * dev,struct drm_dma_handle * dmah)2013 drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah)
2014 {
2015 if (dmah == NULL)
2016 return;
2017
2018 drm_dmamem_free(dev->dmat, dmah->mem);
2019 free(dmah, M_DRM, sizeof(*dmah));
2020 }
2021
2022 /*
2023 * Compute order. Can be made faster.
2024 */
2025 int
drm_order(unsigned long size)2026 drm_order(unsigned long size)
2027 {
2028 int order;
2029 unsigned long tmp;
2030
2031 for (order = 0, tmp = size; tmp >>= 1; ++order)
2032 ;
2033
2034 if (size & ~(1 << order))
2035 ++order;
2036
2037 return order;
2038 }
2039
2040 int
drm_getpciinfo(struct drm_device * dev,void * data,struct drm_file * file_priv)2041 drm_getpciinfo(struct drm_device *dev, void *data, struct drm_file *file_priv)
2042 {
2043 struct drm_pciinfo *info = data;
2044
2045 if (dev->pdev == NULL)
2046 return -ENOTTY;
2047
2048 info->domain = dev->pdev->bus->domain_nr;
2049 info->bus = dev->pdev->bus->number;
2050 info->dev = PCI_SLOT(dev->pdev->devfn);
2051 info->func = PCI_FUNC(dev->pdev->devfn);
2052 info->vendor_id = dev->pdev->vendor;
2053 info->device_id = dev->pdev->device;
2054 info->subvendor_id = dev->pdev->subsystem_vendor;
2055 info->subdevice_id = dev->pdev->subsystem_device;
2056 info->revision_id = 0;
2057
2058 return 0;
2059 }
2060