1 /**
2  * \file drmP.h
3  * Private header for Direct Rendering Manager
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8 
9 /*
10  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
11  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
12  * Copyright (c) 2009-2010, Code Aurora Forum.
13  * All rights reserved.
14  *
15  * Permission is hereby granted, free of charge, to any person obtaining a
16  * copy of this software and associated documentation files (the "Software"),
17  * to deal in the Software without restriction, including without limitation
18  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
19  * and/or sell copies of the Software, and to permit persons to whom the
20  * Software is furnished to do so, subject to the following conditions:
21  *
22  * The above copyright notice and this permission notice (including the next
23  * paragraph) shall be included in all copies or substantial portions of the
24  * Software.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
29  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
30  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
31  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
32  * OTHER DEALINGS IN THE SOFTWARE.
33  */
34 
35 #ifndef _DRM_P_H_
36 #define _DRM_P_H_
37 
38 #ifdef __KERNEL__
39 #ifdef __alpha__
40 /* add include of current.h so that "current" is defined
41  * before static inline funcs in wait.h. Doing this so we
42  * can build the DRM (part of PI DRI). 4/21/2000 S + B */
43 #include <asm/current.h>
44 #endif				/* __alpha__ */
45 #include <linux/kernel.h>
46 #include <linux/kref.h>
47 #include <linux/miscdevice.h>
48 #include <linux/fs.h>
49 #include <linux/init.h>
50 #include <linux/file.h>
51 #include <linux/platform_device.h>
52 #include <linux/pci.h>
53 #include <linux/jiffies.h>
54 #include <linux/dma-mapping.h>
55 #include <linux/mm.h>
56 #include <linux/cdev.h>
57 #include <linux/mutex.h>
58 #include <linux/io.h>
59 #include <linux/slab.h>
60 #include <linux/ratelimit.h>
61 #if defined(__alpha__) || defined(__powerpc__)
62 #include <asm/pgtable.h>	/* For pte_wrprotect */
63 #endif
64 #include <asm/mman.h>
65 #include <asm/uaccess.h>
66 #include <linux/types.h>
67 #include <linux/agp_backend.h>
68 #include <linux/workqueue.h>
69 #include <linux/poll.h>
70 #include <linux/atomic.h>
71 #include <linux/uidgid.h>
72 #include <linux/kref.h>
73 #include <linux/pm.h>
74 #include <linux/timer.h>
75 #include <linux/ktime.h>
76 #include <asm/pgalloc.h>
77 #include <drm/drm.h>
78 #include <drm/drm_sarea.h>
79 #include <asm/barrier.h>
80 #include <drm/drm_vma_manager.h>
81 
82 #include <linux/idr.h>
83 
84 #ifndef __NetBSD__
85 #define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
86 #endif
87 
88 struct module;
89 
90 struct drm_file;
91 struct drm_device;
92 
93 struct device_node;
94 struct videomode;
95 
96 #ifdef __NetBSD__
97 #include <drm/drm_os_netbsd.h>
98 #else
99 #include <drm/drm_os_linux.h>
100 #endif
101 #include <drm/drm_hashtab.h>
102 #include <drm/drm_mm.h>
103 
104 /*
105  * 4 debug categories are defined:
106  *
107  * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ...
108  *	 This is the category used by the DRM_DEBUG() macro.
109  *
110  * DRIVER: Used in the vendor specific part of the driver: i915, radeon, ...
111  *	   This is the category used by the DRM_DEBUG_DRIVER() macro.
112  *
113  * KMS: used in the modesetting code.
114  *	This is the category used by the DRM_DEBUG_KMS() macro.
115  *
116  * PRIME: used in the prime code.
117  *	  This is the category used by the DRM_DEBUG_PRIME() macro.
118  *
119  * Enabling verbose debug messages is done through the drm.debug parameter,
120  * each category being enabled by a bit.
121  *
122  * drm.debug=0x1 will enable CORE messages
123  * drm.debug=0x2 will enable DRIVER messages
124  * drm.debug=0x3 will enable CORE and DRIVER messages
125  * ...
126  * drm.debug=0xf will enable all messages
127  *
128  * An interesting feature is that it's possible to enable verbose logging at
129  * run-time by echoing the debug value in its sysfs node:
130  *   # echo 0xf > /sys/module/drm/parameters/debug
131  */
132 #define DRM_UT_CORE 		0x01
133 #define DRM_UT_DRIVER		0x02
134 #define DRM_UT_KMS		0x04
135 #define DRM_UT_PRIME		0x08
136 
137 extern __printf(2, 3)
138 void drm_ut_debug_printk(const char *function_name,
139 			 const char *format, ...);
140 extern __printf(2, 3)
141 int drm_err(const char *func, const char *format, ...);
142 
143 /***********************************************************************/
144 /** \name DRM template customization defaults */
145 /*@{*/
146 
147 /* driver capabilities and requirements mask */
148 #define DRIVER_USE_AGP     0x1
149 #define DRIVER_PCI_DMA     0x8
150 #define DRIVER_SG          0x10
151 #define DRIVER_HAVE_DMA    0x20
152 #define DRIVER_HAVE_IRQ    0x40
153 #define DRIVER_IRQ_SHARED  0x80
154 #define DRIVER_GEM         0x1000
155 #define DRIVER_MODESET     0x2000
156 #define DRIVER_PRIME       0x4000
157 #define DRIVER_RENDER      0x8000
158 
159 #define DRIVER_BUS_PCI 0x1
160 #define DRIVER_BUS_PLATFORM 0x2
161 #define DRIVER_BUS_USB 0x3
162 #define DRIVER_BUS_HOST1X 0x4
163 
164 /***********************************************************************/
165 /** \name Begin the DRM... */
166 /*@{*/
167 
168 #define DRM_DEBUG_CODE 2	  /**< Include debugging code if > 1, then
169 				     also include looping detection. */
170 
171 #define DRM_MAGIC_HASH_ORDER  4  /**< Size of key hash table. Must be power of 2. */
172 #define DRM_KERNEL_CONTEXT    0	 /**< Change drm_resctx if changed */
173 #define DRM_RESERVED_CONTEXTS 1	 /**< Change drm_resctx if changed */
174 
175 #define DRM_MAP_HASH_OFFSET 0x10000000
176 
177 /*@}*/
178 
179 /***********************************************************************/
180 /** \name Macros to make printk easier */
181 /*@{*/
182 
183 /**
184  * Error output.
185  *
186  * \param fmt printf() like format string.
187  * \param arg arguments
188  */
189 #define DRM_ERROR(fmt, ...)				\
190 	drm_err(__func__, fmt, ##__VA_ARGS__)
191 
192 
193 #ifdef __NetBSD__
194 /* XXX Use device_printf, with a device.  */
195 #define	DRM_INFO(fmt, ...)				\
196 	printf("drm: " fmt, ##__VA_ARGS__)
197 #endif
198 
199 /**
200  * Rate limited error output.  Like DRM_ERROR() but won't flood the log.
201  *
202  * \param fmt printf() like format string.
203  * \param arg arguments
204  */
205 #define DRM_ERROR_RATELIMITED(fmt, ...)				\
206 ({									\
207 	static DEFINE_RATELIMIT_STATE(_rs,				\
208 				      DEFAULT_RATELIMIT_INTERVAL,	\
209 				      DEFAULT_RATELIMIT_BURST);		\
210 									\
211 	if (__ratelimit(&_rs))						\
212 		drm_err(__func__, fmt, ##__VA_ARGS__);			\
213 })
214 
215 #ifndef __NetBSD__
216 #define DRM_INFO(fmt, ...)				\
217 	printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
218 #endif
219 
220 #define DRM_INFO_ONCE(fmt, ...)				\
221 	printk_once(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
222 
223 /**
224  * Debug output.
225  *
226  * \param fmt printf() like format string.
227  * \param arg arguments
228  */
229 #if DRM_DEBUG_CODE
230 #define DRM_DEBUG(fmt, args...)						\
231 	do {								\
232 		if (unlikely(drm_debug & DRM_UT_CORE))			\
233 			drm_ut_debug_printk(__func__, fmt, ##args);	\
234 	} while (0)
235 
236 #define DRM_DEBUG_DRIVER(fmt, args...)					\
237 	do {								\
238 		if (unlikely(drm_debug & DRM_UT_DRIVER))		\
239 			drm_ut_debug_printk(__func__, fmt, ##args);	\
240 	} while (0)
241 #define DRM_DEBUG_KMS(fmt, args...)					\
242 	do {								\
243 		if (unlikely(drm_debug & DRM_UT_KMS))			\
244 			drm_ut_debug_printk(__func__, fmt, ##args);	\
245 	} while (0)
246 #define DRM_DEBUG_PRIME(fmt, args...)					\
247 	do {								\
248 		if (unlikely(drm_debug & DRM_UT_PRIME))			\
249 			drm_ut_debug_printk(__func__, fmt, ##args);	\
250 	} while (0)
251 #else
252 #define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0)
253 #define DRM_DEBUG_KMS(fmt, args...)	do { } while (0)
254 #define DRM_DEBUG_PRIME(fmt, args...)	do { } while (0)
255 #define DRM_DEBUG(fmt, arg...)		 do { } while (0)
256 #endif
257 
258 /*@}*/
259 
260 /***********************************************************************/
261 /** \name Internal types and structures */
262 /*@{*/
263 
264 #define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x)
265 
266 #define DRM_IF_VERSION(maj, min) (maj << 16 | min)
267 
268 /**
269  * Test that the hardware lock is held by the caller, returning otherwise.
270  *
271  * \param dev DRM device.
272  * \param filp file pointer of the caller.
273  */
274 #define LOCK_TEST_WITH_RETURN( dev, _file_priv )				\
275 do {										\
276 	if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) ||	\
277 	    _file_priv->master->lock.file_priv != _file_priv)	{		\
278 		DRM_ERROR( "%s called without lock held, held  %d owner %p %p\n",\
279 			   __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
280 			   _file_priv->master->lock.file_priv, _file_priv);	\
281 		return -EINVAL;							\
282 	}									\
283 } while (0)
284 
285 /**
286  * Ioctl function type.
287  *
288  * \param inode device inode.
289  * \param file_priv DRM file private pointer.
290  * \param cmd command.
291  * \param arg argument.
292  */
293 typedef int drm_ioctl_t(struct drm_device *dev, void *data,
294 			struct drm_file *file_priv);
295 
296 typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
297 			       unsigned long arg);
298 
299 #ifdef __NetBSD__
300 /* XXX Kludge...is there a better way to do this?  */
301 #define	DRM_IOCTL_NR(n)							\
302 	(IOCBASECMD(n) &~ (IOC_DIRMASK | (IOCGROUP(n) << IOCGROUP_SHIFT)))
303 #define	DRM_MAJOR	cdevsw_lookup_major(&drm_cdevsw)
304 #else
305 #define DRM_IOCTL_NR(n)                _IOC_NR(n)
306 #define DRM_MAJOR       226
307 #endif
308 
309 #define DRM_AUTH	0x1
310 #define	DRM_MASTER	0x2
311 #define DRM_ROOT_ONLY	0x4
312 #define DRM_CONTROL_ALLOW 0x8
313 #define DRM_UNLOCKED	0x10
314 #define DRM_RENDER_ALLOW 0x20
315 
316 struct drm_ioctl_desc {
317 	unsigned int cmd;
318 	int flags;
319 	drm_ioctl_t *func;
320 	unsigned int cmd_drv;
321 	const char *name;
322 };
323 
324 /**
325  * Creates a driver or general drm_ioctl_desc array entry for the given
326  * ioctl, for use by drm_ioctl().
327  */
328 
329 #define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags)			\
330 	[DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl}
331 
332 struct drm_magic_entry {
333 	struct list_head head;
334 	struct drm_hash_item hash_item;
335 	struct drm_file *priv;
336 };
337 
338 struct drm_vma_entry {
339 	struct list_head head;
340 	struct vm_area_struct *vma;
341 	pid_t pid;
342 };
343 
344 /**
345  * DMA buffer.
346  */
347 struct drm_buf {
348 	int idx;		       /**< Index into master buflist */
349 	int total;		       /**< Buffer size */
350 	int order;		       /**< log-base-2(total) */
351 	int used;		       /**< Amount of buffer in use (for DMA) */
352 	unsigned long offset;	       /**< Byte offset (used internally) */
353 	void *address;		       /**< Address of buffer */
354 	unsigned long bus_address;     /**< Bus address of buffer */
355 	struct drm_buf *next;	       /**< Kernel-only: used for free list */
356 	__volatile__ int waiting;      /**< On kernel DMA queue */
357 	__volatile__ int pending;      /**< On hardware DMA queue */
358 	struct drm_file *file_priv;    /**< Private of holding file descr */
359 	int context;		       /**< Kernel queue for this buffer */
360 	int while_locked;	       /**< Dispatch this buffer while locked */
361 	enum {
362 		DRM_LIST_NONE = 0,
363 		DRM_LIST_FREE = 1,
364 		DRM_LIST_WAIT = 2,
365 		DRM_LIST_PEND = 3,
366 		DRM_LIST_PRIO = 4,
367 		DRM_LIST_RECLAIM = 5
368 	} list;			       /**< Which list we're on */
369 
370 	int dev_priv_size;		 /**< Size of buffer private storage */
371 	void *dev_private;		 /**< Per-buffer private storage */
372 };
373 
374 /** bufs is one longer than it has to be */
375 struct drm_waitlist {
376 	int count;			/**< Number of possible buffers */
377 	struct drm_buf **bufs;		/**< List of pointers to buffers */
378 	struct drm_buf **rp;			/**< Read pointer */
379 	struct drm_buf **wp;			/**< Write pointer */
380 	struct drm_buf **end;		/**< End pointer */
381 	spinlock_t read_lock;
382 	spinlock_t write_lock;
383 };
384 
385 struct drm_freelist {
386 #ifndef __NetBSD__
387 	int initialized;	       /**< Freelist in use */
388 	atomic_t count;		       /**< Number of free buffers */
389 	struct drm_buf *next;	       /**< End pointer */
390 
391 #ifdef __NetBSD__
392 	drm_waitqueue_t waiting;       /**< Processes waiting on free bufs */
393 #else
394 	wait_queue_head_t waiting;     /**< Processes waiting on free bufs */
395 #endif
396 #endif
397 	int low_mark;		       /**< Low water mark */
398 	int high_mark;		       /**< High water mark */
399 #ifndef __NetBSD__
400 	atomic_t wfh;		       /**< If waiting for high mark */
401 	spinlock_t lock;
402 #endif
403 };
404 
405 typedef struct drm_dma_handle {
406 	dma_addr_t busaddr;
407 	void *vaddr;
408 	size_t size;
409 #ifdef __NetBSD__
410 	bus_dma_tag_t dmah_tag;
411 	bus_dmamap_t dmah_map;
412 	bus_dma_segment_t dmah_seg;
413 #endif
414 } drm_dma_handle_t;
415 
416 /**
417  * Buffer entry.  There is one of this for each buffer size order.
418  */
419 struct drm_buf_entry {
420 	int buf_size;			/**< size */
421 	int buf_count;			/**< number of buffers */
422 	struct drm_buf *buflist;		/**< buffer list */
423 	int seg_count;
424 	int page_order;
425 	struct drm_dma_handle **seglist;
426 
427 	struct drm_freelist freelist;
428 };
429 
430 /* Event queued up for userspace to read */
431 struct drm_pending_event {
432 	struct drm_event *event;
433 	struct list_head link;
434 	struct drm_file *file_priv;
435 	pid_t pid; /* pid of requester, no guarantee it's valid by the time
436 		      we deliver the event, for tracing only */
437 	void (*destroy)(struct drm_pending_event *event);
438 };
439 
440 /* initial implementaton using a linked list - todo hashtab */
441 struct drm_prime_file_private {
442 	struct list_head head;
443 	struct mutex lock;
444 };
445 
446 /** File private data */
447 struct drm_file {
448 	unsigned always_authenticated :1;
449 	unsigned authenticated :1;
450 	/* Whether we're master for a minor. Protected by master_mutex */
451 	unsigned is_master :1;
452 	/* true when the client has asked us to expose stereo 3D mode flags */
453 	unsigned stereo_allowed :1;
454 	/*
455 	 * true if client understands CRTC primary planes and cursor planes
456 	 * in the plane list
457 	 */
458 	unsigned universal_planes:1;
459 
460 #ifndef __NetBSD__
461 	struct pid *pid;
462 	kuid_t uid;
463 #endif
464 	drm_magic_t magic;
465 	struct list_head lhead;
466 	struct drm_minor *minor;
467 	unsigned long lock_count;
468 
469 	/** Mapping of mm object handles to object pointers. */
470 	struct idr object_idr;
471 	/** Lock for synchronization of access to object_idr. */
472 	spinlock_t table_lock;
473 
474 	struct file *filp;
475 	void *driver_priv;
476 
477 	struct drm_master *master; /* master this node is currently associated with
478 				      N.B. not always minor->master */
479 	/**
480 	 * fbs - List of framebuffers associated with this file.
481 	 *
482 	 * Protected by fbs_lock. Note that the fbs list holds a reference on
483 	 * the fb object to prevent it from untimely disappearing.
484 	 */
485 	struct list_head fbs;
486 	struct mutex fbs_lock;
487 
488 #ifdef __NetBSD__
489 	drm_waitqueue_t event_wait;
490 	struct selinfo event_selq;
491 #else
492 	wait_queue_head_t event_wait;
493 #endif
494 	struct list_head event_list;
495 	int event_space;
496 
497 	struct drm_prime_file_private prime;
498 };
499 
500 /** Wait queue */
501 struct drm_queue {
502 	atomic_t use_count;		/**< Outstanding uses (+1) */
503 	atomic_t finalization;		/**< Finalization in progress */
504 	atomic_t block_count;		/**< Count of processes waiting */
505 	atomic_t block_read;		/**< Queue blocked for reads */
506 #ifdef __NetBSD__
507 	drm_waitqueue_t read_queue;	/**< Processes waiting on block_read */
508 #else
509 	wait_queue_head_t read_queue;	/**< Processes waiting on block_read */
510 #endif
511 	atomic_t block_write;		/**< Queue blocked for writes */
512 #ifdef __NetBSD__
513 	drm_waitqueue_t write_queue;	/**< Processes waiting on block_write */
514 #else
515 	wait_queue_head_t write_queue;	/**< Processes waiting on block_write */
516 #endif
517 	atomic_t total_queued;		/**< Total queued statistic */
518 	atomic_t total_flushed;		/**< Total flushes statistic */
519 	atomic_t total_locks;		/**< Total locks statistics */
520 	enum drm_ctx_flags flags;	/**< Context preserving and 2D-only */
521 	struct drm_waitlist waitlist;	/**< Pending buffers */
522 #ifdef __NetBSD__
523 	drm_waitqueue_t flush_queue;	/**< Processes waiting until flush */
524 #else
525 	wait_queue_head_t flush_queue;	/**< Processes waiting until flush */
526 #endif
527 };
528 
529 /**
530  * Lock data.
531  */
532 struct drm_lock_data {
533 	struct drm_hw_lock *hw_lock;	/**< Hardware lock */
534 	/** Private of lock holder's file (NULL=kernel) */
535 	struct drm_file *file_priv;
536 #ifdef __NetBSD__
537 	drm_waitqueue_t lock_queue;	/**< Queue of blocked processes */
538 #else
539 	wait_queue_head_t lock_queue;	/**< Queue of blocked processes */
540 #endif
541 	unsigned long lock_time;	/**< Time of last lock in jiffies */
542 	spinlock_t spinlock;
543 	uint32_t kernel_waiters;
544 	uint32_t user_waiters;
545 	int idle_has_lock;
546 };
547 
548 /**
549  * DMA data.
550  */
551 struct drm_device_dma {
552 
553 	struct drm_buf_entry bufs[DRM_MAX_ORDER + 1];	/**< buffers, grouped by their size order */
554 	int buf_count;			/**< total number of buffers */
555 	struct drm_buf **buflist;		/**< Vector of pointers into drm_device_dma::bufs */
556 	int seg_count;
557 	int page_count;			/**< number of pages */
558 	unsigned long *pagelist;	/**< page list */
559 	unsigned long byte_count;
560 	enum {
561 		_DRM_DMA_USE_AGP = 0x01,
562 		_DRM_DMA_USE_SG = 0x02,
563 		_DRM_DMA_USE_FB = 0x04,
564 		_DRM_DMA_USE_PCI_RO = 0x08
565 	} flags;
566 
567 };
568 
569 /**
570  * AGP memory entry.  Stored as a doubly linked list.
571  */
572 struct drm_agp_mem {
573 	unsigned long handle;		/**< handle */
574 	struct agp_memory *memory;
575 	unsigned long bound;		/**< address */
576 	int pages;
577 	struct list_head head;
578 };
579 
580 /**
581  * AGP data.
582  *
583  * \sa drm_agp_init() and drm_device::agp.
584  */
585 struct drm_agp_head {
586 	struct agp_kern_info agp_info;		/**< AGP device information */
587 	struct list_head memory;
588 	unsigned long mode;		/**< AGP mode */
589 	struct agp_bridge_data *bridge;
590 	int enabled;			/**< whether the AGP bus as been enabled */
591 	int acquired;			/**< whether the AGP device has been acquired */
592 	unsigned long base;
593 	int agp_mtrr;
594 	int cant_use_aperture;
595 	unsigned long page_mask;
596 };
597 
598 /**
599  * Scatter-gather memory.
600  */
601 struct drm_sg_mem {
602 	unsigned long handle;
603 	void *virtual;
604 #ifdef __NetBSD__
605 	size_t sg_size;
606 	bus_dma_tag_t sg_tag;
607 	bus_dmamap_t sg_map;
608 	unsigned int sg_nsegs;
609 	unsigned int sg_nsegs_max;
610 	bus_dma_segment_t sg_segs[];
611 #else
612 	int pages;
613 	struct page **pagelist;
614 	dma_addr_t *busaddr;
615 #endif
616 };
617 
618 struct drm_sigdata {
619 	int context;
620 	struct drm_hw_lock *lock;
621 };
622 
623 #ifdef __NetBSD__
624 /*
625  * XXX Remember: memory mappings only.  bm_flags must include
626  * BUS_SPACE_MAP_LINEAR.
627  */
628 struct drm_bus_map {
629 	bus_addr_t		bm_base;
630 	bus_size_t		bm_size;
631 	bus_space_handle_t	bm_bsh;
632 	int			bm_flags;
633 };
634 #endif
635 
636 /**
637  * Kernel side of a mapping
638  */
639 struct drm_local_map {
640 	resource_size_t offset;	 /**< Requested physical address (0 for SAREA)*/
641 	unsigned long size;	 /**< Requested physical size (bytes) */
642 	enum drm_map_type type;	 /**< Type of memory to map */
643 	enum drm_map_flags flags;	 /**< Flags */
644 	void *handle;		 /**< User-space: "Handle" to pass to mmap() */
645 				 /**< Kernel-space: kernel-virtual address */
646 	int mtrr;		 /**< MTRR slot used */
647 
648 #ifdef __NetBSD__
649 	union {
650 		/* _DRM_FRAME_BUFFER, _DRM_AGP, _DRM_REGISTERS */
651 		/* XXX mtrr should be moved into this case too.  */
652 		struct {
653 			/*
654 			 * XXX bst seems like a waste of space, but not
655 			 * all accessors have the drm_device handy.
656 			 */
657 			bus_space_tag_t bst;
658 			bus_space_handle_t bsh;
659 			struct drm_bus_map *bus_map;
660 		} bus_space;
661 
662 		/* _DRM_CONSISTENT */
663 		struct drm_dma_handle *dmah;
664 
665 		/* _DRM_SCATTER_GATHER */
666 #if 0				/* XXX stored in dev->sg instead */
667 		struct drm_sg_mem *sg;
668 #endif
669 
670 		/* _DRM_SHM */
671 		/* XXX Anything?  uvm object?  */
672 	} lm_data;
673 #endif
674 };
675 
676 typedef struct drm_local_map drm_local_map_t;
677 
678 /**
679  * Mappings list
680  */
681 struct drm_map_list {
682 	struct list_head head;		/**< list head */
683 	struct drm_hash_item hash;
684 	struct drm_local_map *map;	/**< mapping */
685 	uint64_t user_token;
686 	struct drm_master *master;
687 };
688 
689 /**
690  * Context handle list
691  */
692 struct drm_ctx_list {
693 	struct list_head head;		/**< list head */
694 	drm_context_t handle;		/**< context handle */
695 	struct drm_file *tag;		/**< associated fd private data */
696 };
697 
698 /* location of GART table */
699 #define DRM_ATI_GART_MAIN 1
700 #define DRM_ATI_GART_FB   2
701 
702 #define DRM_ATI_GART_PCI 1
703 #define DRM_ATI_GART_PCIE 2
704 #define DRM_ATI_GART_IGP 3
705 
706 struct drm_ati_pcigart_info {
707 	int gart_table_location;
708 	int gart_reg_if;
709 	void *addr;
710 	dma_addr_t bus_addr;
711 	dma_addr_t table_mask;
712 	struct drm_dma_handle *table_handle;
713 	struct drm_local_map mapping;
714 	int table_size;
715 };
716 
717 /**
718  * This structure defines the drm_mm memory object, which will be used by the
719  * DRM for its buffer objects.
720  */
721 struct drm_gem_object {
722 	/** Reference count of this object */
723 	struct kref refcount;
724 
725 	/**
726 	 * handle_count - gem file_priv handle count of this object
727 	 *
728 	 * Each handle also holds a reference. Note that when the handle_count
729 	 * drops to 0 any global names (e.g. the id in the flink namespace) will
730 	 * be cleared.
731 	 *
732 	 * Protected by dev->object_name_lock.
733 	 * */
734 	unsigned handle_count;
735 
736 	/** Related drm device */
737 	struct drm_device *dev;
738 
739 #ifdef __NetBSD__
740 	/* UVM anonymous object for shared memory mappings.  */
741 	struct uvm_object *gemo_shm_uao;
742 
743 	/* UVM object with custom pager ops for device memory mappings.  */
744 	struct uvm_object gemo_uvmobj;
745 #else
746 	/** File representing the shmem storage */
747 	struct file *filp;
748 #endif
749 
750 	/* Mapping info for this object */
751 	struct drm_vma_offset_node vma_node;
752 
753 	/**
754 	 * Size of the object, in bytes.  Immutable over the object's
755 	 * lifetime.
756 	 */
757 	size_t size;
758 
759 	/**
760 	 * Global name for this object, starts at 1. 0 means unnamed.
761 	 * Access is covered by the object_name_lock in the related drm_device
762 	 */
763 	int name;
764 
765 	/**
766 	 * Memory domains. These monitor which caches contain read/write data
767 	 * related to the object. When transitioning from one set of domains
768 	 * to another, the driver is called to ensure that caches are suitably
769 	 * flushed and invalidated
770 	 */
771 	uint32_t read_domains;
772 	uint32_t write_domain;
773 
774 	/**
775 	 * While validating an exec operation, the
776 	 * new read/write domain values are computed here.
777 	 * They will be transferred to the above values
778 	 * at the point that any cache flushing occurs
779 	 */
780 	uint32_t pending_read_domains;
781 	uint32_t pending_write_domain;
782 
783 #ifndef __NetBSD__	    /* XXX drm prime */
784 	/**
785 	 * dma_buf - dma buf associated with this GEM object
786 	 *
787 	 * Pointer to the dma-buf associated with this gem object (either
788 	 * through importing or exporting). We break the resulting reference
789 	 * loop when the last gem handle for this object is released.
790 	 *
791 	 * Protected by obj->object_name_lock
792 	 */
793 	struct dma_buf *dma_buf;
794 
795 	/**
796 	 * import_attach - dma buf attachment backing this object
797 	 *
798 	 * Any foreign dma_buf imported as a gem object has this set to the
799 	 * attachment point for the device. This is invariant over the lifetime
800 	 * of a gem object.
801 	 *
802 	 * The driver's ->gem_free_object callback is responsible for cleaning
803 	 * up the dma_buf attachment and references acquired at import time.
804 	 *
805 	 * Note that the drm gem/prime core does not depend upon drivers setting
806 	 * this field any more. So for drivers where this doesn't make sense
807 	 * (e.g. virtual devices or a displaylink behind an usb bus) they can
808 	 * simply leave it as NULL.
809 	 */
810 	struct dma_buf_attachment *import_attach;
811 #endif
812 };
813 
814 #include <drm/drm_crtc.h>
815 
816 /**
817  * struct drm_master - drm master structure
818  *
819  * @refcount: Refcount for this master object.
820  * @minor: Link back to minor char device we are master for. Immutable.
821  * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
822  * @unique_len: Length of unique field. Protected by drm_global_mutex.
823  * @unique_size: Amount allocated. Protected by drm_global_mutex.
824  * @magiclist: Hash of used authentication tokens. Protected by struct_mutex.
825  * @magicfree: List of used authentication tokens. Protected by struct_mutex.
826  * @lock: DRI lock information.
827  * @driver_priv: Pointer to driver-private information.
828  */
829 struct drm_master {
830 	struct kref refcount;
831 	struct drm_minor *minor;
832 	char *unique;
833 	int unique_len;
834 	int unique_size;
835 	struct drm_open_hash magiclist;
836 	struct list_head magicfree;
837 	struct drm_lock_data lock;
838 	void *driver_priv;
839 };
840 
841 /* Size of ringbuffer for vblank timestamps. Just double-buffer
842  * in initial implementation.
843  */
844 #define DRM_VBLANKTIME_RBSIZE 2
845 
846 /* Flags and return codes for get_vblank_timestamp() driver function. */
847 #define DRM_CALLED_FROM_VBLIRQ 1
848 #define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
849 #define DRM_VBLANKTIME_INVBL             (1 << 1)
850 
851 /* get_scanout_position() return flags */
852 #define DRM_SCANOUTPOS_VALID        (1 << 0)
853 #define DRM_SCANOUTPOS_INVBL        (1 << 1)
854 #define DRM_SCANOUTPOS_ACCURATE     (1 << 2)
855 
856 struct drm_bus_irq_cookie;
857 
858 struct drm_bus {
859 	int bus_type;
860 	/*
861 	 * XXX NetBSD will have a problem with this: pci_intr_handle_t
862 	 * is a long on some LP64 architectures, where int is 32-bit,
863 	 * such as alpha and mips64.
864 	 */
865 	int (*get_irq)(struct drm_device *dev);
866 #ifdef __NetBSD__
867 	int (*irq_install)(struct drm_device *, irqreturn_t (*)(void *), int,
868 	    const char *, void *, struct drm_bus_irq_cookie **);
869 	void (*irq_uninstall)(struct drm_device *,
870 	    struct drm_bus_irq_cookie *);
871 #endif
872 	const char *(*get_name)(struct drm_device *dev);
873 	int (*set_busid)(struct drm_device *dev, struct drm_master *master);
874 	int (*set_unique)(struct drm_device *dev, struct drm_master *master,
875 			  struct drm_unique *unique);
876 	int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p);
877 };
878 
879 /**
880  * DRM driver structure. This structure represent the common code for
881  * a family of cards. There will one drm_device for each card present
882  * in this family
883  */
884 struct drm_driver {
885 	int (*load) (struct drm_device *, unsigned long flags);
886 	int (*firstopen) (struct drm_device *);
887 	int (*open) (struct drm_device *, struct drm_file *);
888 	void (*preclose) (struct drm_device *, struct drm_file *file_priv);
889 	void (*postclose) (struct drm_device *, struct drm_file *);
890 	void (*lastclose) (struct drm_device *);
891 	int (*unload) (struct drm_device *);
892 	int (*suspend) (struct drm_device *, pm_message_t state);
893 	int (*resume) (struct drm_device *);
894 	int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
895 	int (*dma_quiescent) (struct drm_device *);
896 	int (*context_dtor) (struct drm_device *dev, int context);
897 
898 	/**
899 	 * get_vblank_counter - get raw hardware vblank counter
900 	 * @dev: DRM device
901 	 * @crtc: counter to fetch
902 	 *
903 	 * Driver callback for fetching a raw hardware vblank counter for @crtc.
904 	 * If a device doesn't have a hardware counter, the driver can simply
905 	 * return the value of drm_vblank_count. The DRM core will account for
906 	 * missed vblank events while interrupts where disabled based on system
907 	 * timestamps.
908 	 *
909 	 * Wraparound handling and loss of events due to modesetting is dealt
910 	 * with in the DRM core code.
911 	 *
912 	 * RETURNS
913 	 * Raw vblank counter value.
914 	 */
915 	u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
916 
917 	/**
918 	 * enable_vblank - enable vblank interrupt events
919 	 * @dev: DRM device
920 	 * @crtc: which irq to enable
921 	 *
922 	 * Enable vblank interrupts for @crtc.  If the device doesn't have
923 	 * a hardware vblank counter, this routine should be a no-op, since
924 	 * interrupts will have to stay on to keep the count accurate.
925 	 *
926 	 * RETURNS
927 	 * Zero on success, appropriate errno if the given @crtc's vblank
928 	 * interrupt cannot be enabled.
929 	 */
930 	int (*enable_vblank) (struct drm_device *dev, int crtc);
931 
932 	/**
933 	 * disable_vblank - disable vblank interrupt events
934 	 * @dev: DRM device
935 	 * @crtc: which irq to enable
936 	 *
937 	 * Disable vblank interrupts for @crtc.  If the device doesn't have
938 	 * a hardware vblank counter, this routine should be a no-op, since
939 	 * interrupts will have to stay on to keep the count accurate.
940 	 */
941 	void (*disable_vblank) (struct drm_device *dev, int crtc);
942 
943 	/**
944 	 * Called by \c drm_device_is_agp.  Typically used to determine if a
945 	 * card is really attached to AGP or not.
946 	 *
947 	 * \param dev  DRM device handle
948 	 *
949 	 * \returns
950 	 * One of three values is returned depending on whether or not the
951 	 * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
952 	 * (return of 1), or may or may not be AGP (return of 2).
953 	 */
954 	int (*device_is_agp) (struct drm_device *dev);
955 
956 	/**
957 	 * Called by vblank timestamping code.
958 	 *
959 	 * Return the current display scanout position from a crtc, and an
960 	 * optional accurate ktime_get timestamp of when position was measured.
961 	 *
962 	 * \param dev  DRM device.
963 	 * \param crtc Id of the crtc to query.
964 	 * \param flags Flags from the caller (DRM_CALLED_FROM_VBLIRQ or 0).
965 	 * \param *vpos Target location for current vertical scanout position.
966 	 * \param *hpos Target location for current horizontal scanout position.
967 	 * \param *stime Target location for timestamp taken immediately before
968 	 *               scanout position query. Can be NULL to skip timestamp.
969 	 * \param *etime Target location for timestamp taken immediately after
970 	 *               scanout position query. Can be NULL to skip timestamp.
971 	 *
972 	 * Returns vpos as a positive number while in active scanout area.
973 	 * Returns vpos as a negative number inside vblank, counting the number
974 	 * of scanlines to go until end of vblank, e.g., -1 means "one scanline
975 	 * until start of active scanout / end of vblank."
976 	 *
977 	 * \return Flags, or'ed together as follows:
978 	 *
979 	 * DRM_SCANOUTPOS_VALID = Query successful.
980 	 * DRM_SCANOUTPOS_INVBL = Inside vblank.
981 	 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
982 	 * this flag means that returned position may be offset by a constant
983 	 * but unknown small number of scanlines wrt. real scanout position.
984 	 *
985 	 */
986 	int (*get_scanout_position) (struct drm_device *dev, int crtc,
987 				     unsigned int flags,
988 				     int *vpos, int *hpos, ktime_t *stime,
989 				     ktime_t *etime);
990 
991 	/**
992 	 * Called by \c drm_get_last_vbltimestamp. Should return a precise
993 	 * timestamp when the most recent VBLANK interval ended or will end.
994 	 *
995 	 * Specifically, the timestamp in @vblank_time should correspond as
996 	 * closely as possible to the time when the first video scanline of
997 	 * the video frame after the end of VBLANK will start scanning out,
998 	 * the time immediately after end of the VBLANK interval. If the
999 	 * @crtc is currently inside VBLANK, this will be a time in the future.
1000 	 * If the @crtc is currently scanning out a frame, this will be the
1001 	 * past start time of the current scanout. This is meant to adhere
1002 	 * to the OpenML OML_sync_control extension specification.
1003 	 *
1004 	 * \param dev dev DRM device handle.
1005 	 * \param crtc crtc for which timestamp should be returned.
1006 	 * \param *max_error Maximum allowable timestamp error in nanoseconds.
1007 	 *                   Implementation should strive to provide timestamp
1008 	 *                   with an error of at most *max_error nanoseconds.
1009 	 *                   Returns true upper bound on error for timestamp.
1010 	 * \param *vblank_time Target location for returned vblank timestamp.
1011 	 * \param flags 0 = Defaults, no special treatment needed.
1012 	 * \param       DRM_CALLED_FROM_VBLIRQ = Function is called from vblank
1013 	 *	        irq handler. Some drivers need to apply some workarounds
1014 	 *              for gpu-specific vblank irq quirks if flag is set.
1015 	 *
1016 	 * \returns
1017 	 * Zero if timestamping isn't supported in current display mode or a
1018 	 * negative number on failure. A positive status code on success,
1019 	 * which describes how the vblank_time timestamp was computed.
1020 	 */
1021 	int (*get_vblank_timestamp) (struct drm_device *dev, int crtc,
1022 				     int *max_error,
1023 				     struct timeval *vblank_time,
1024 				     unsigned flags);
1025 
1026 	/* these have to be filled in */
1027 
1028 	irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
1029 	void (*irq_preinstall) (struct drm_device *dev);
1030 	int (*irq_postinstall) (struct drm_device *dev);
1031 	void (*irq_uninstall) (struct drm_device *dev);
1032 
1033 	/* Master routines */
1034 	int (*master_create)(struct drm_device *dev, struct drm_master *master);
1035 	void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
1036 	/**
1037 	 * master_set is called whenever the minor master is set.
1038 	 * master_drop is called whenever the minor master is dropped.
1039 	 */
1040 
1041 	int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
1042 			  bool from_open);
1043 	void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv,
1044 			    bool from_release);
1045 
1046 	int (*debugfs_init)(struct drm_minor *minor);
1047 	void (*debugfs_cleanup)(struct drm_minor *minor);
1048 
1049 	/**
1050 	 * Driver-specific constructor for drm_gem_objects, to set up
1051 	 * obj->driver_private.
1052 	 *
1053 	 * Returns 0 on success.
1054 	 */
1055 	void (*gem_free_object) (struct drm_gem_object *obj);
1056 	int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
1057 	void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
1058 
1059 	/* prime: */
1060 	/* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */
1061 	int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
1062 				uint32_t handle, uint32_t flags, int *prime_fd);
1063 	/* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */
1064 	int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
1065 				int prime_fd, uint32_t *handle);
1066 	/* export GEM -> dmabuf */
1067 	struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
1068 				struct drm_gem_object *obj, int flags);
1069 	/* import dmabuf -> GEM */
1070 	struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
1071 				struct dma_buf *dma_buf);
1072 	/* low-level interface used by drm_gem_prime_{import,export} */
1073 	int (*gem_prime_pin)(struct drm_gem_object *obj);
1074 	void (*gem_prime_unpin)(struct drm_gem_object *obj);
1075 	struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
1076 	struct drm_gem_object *(*gem_prime_import_sg_table)(
1077 				struct drm_device *dev, size_t size,
1078 				struct sg_table *sgt);
1079 	void *(*gem_prime_vmap)(struct drm_gem_object *obj);
1080 	void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
1081 	int (*gem_prime_mmap)(struct drm_gem_object *obj,
1082 				struct vm_area_struct *vma);
1083 
1084 	/* vga arb irq handler */
1085 	void (*vgaarb_irq)(struct drm_device *dev, bool state);
1086 
1087 	/* dumb alloc support */
1088 	int (*dumb_create)(struct drm_file *file_priv,
1089 			   struct drm_device *dev,
1090 			   struct drm_mode_create_dumb *args);
1091 	int (*dumb_map_offset)(struct drm_file *file_priv,
1092 			       struct drm_device *dev, uint32_t handle,
1093 			       uint64_t *offset);
1094 	int (*dumb_destroy)(struct drm_file *file_priv,
1095 			    struct drm_device *dev,
1096 			    uint32_t handle);
1097 
1098 	/* Driver private ops for this object */
1099 #ifdef __NetBSD__
1100 	int (*mmap_object)(struct drm_device *, off_t, size_t, int,
1101 	    struct uvm_object **, voff_t *, struct file *);
1102 	const struct uvm_pagerops *gem_uvm_ops;
1103 #else
1104 	const struct vm_operations_struct *gem_vm_ops;
1105 #endif
1106 
1107 	int major;
1108 	int minor;
1109 	int patchlevel;
1110 	const char *name;
1111 	const char *desc;
1112 	const char *date;
1113 
1114 	u32 driver_features;
1115 	int dev_priv_size;
1116 	const struct drm_ioctl_desc *ioctls;
1117 	int num_ioctls;
1118 	const struct file_operations *fops;
1119 	union {
1120 		struct pci_driver *pci;
1121 		struct platform_device *platform_device;
1122 		struct usb_driver *usb;
1123 	} kdriver;
1124 	const struct drm_bus *bus;
1125 
1126 	/* List of devices hanging off this driver with stealth attach. */
1127 	struct list_head legacy_dev_list;
1128 };
1129 
1130 enum drm_minor_type {
1131 	DRM_MINOR_LEGACY,
1132 	DRM_MINOR_CONTROL,
1133 	DRM_MINOR_RENDER,
1134 	DRM_MINOR_CNT,
1135 };
1136 
1137 #ifdef __NetBSD__		/* XXX debugfs */
1138 struct seq_file;
1139 #endif
1140 
1141 /**
1142  * Info file list entry. This structure represents a debugfs or proc file to
1143  * be created by the drm core
1144  */
1145 struct drm_info_list {
1146 	const char *name; /** file name */
1147 	int (*show)(struct seq_file*, void*); /** show callback */
1148 	u32 driver_features; /**< Required driver features for this entry */
1149 	void *data;
1150 };
1151 
1152 /**
1153  * debugfs node structure. This structure represents a debugfs file.
1154  */
1155 struct drm_info_node {
1156 	struct list_head list;
1157 	struct drm_minor *minor;
1158 	const struct drm_info_list *info_ent;
1159 	struct dentry *dent;
1160 };
1161 
1162 /**
1163  * DRM minor structure. This structure represents a drm minor number.
1164  */
1165 struct drm_minor {
1166 	int index;			/**< Minor device number */
1167 	int type;                       /**< Control or render */
1168 	struct device *kdev;		/**< Linux device */
1169 	struct drm_device *dev;
1170 
1171 #ifndef __NetBSD__		/* XXX debugfs */
1172 	struct dentry *debugfs_root;
1173 
1174 	struct list_head debugfs_list;
1175 	struct mutex debugfs_lock; /* Protects debugfs_list. */
1176 #endif
1177 
1178 	/* currently active master for this node. Protected by master_mutex */
1179 	struct drm_master *master;
1180 	struct drm_mode_group mode_group;
1181 };
1182 
1183 
1184 struct drm_pending_vblank_event {
1185 	struct drm_pending_event base;
1186 	int pipe;
1187 	struct drm_event_vblank event;
1188 };
1189 
1190 struct drm_vblank_crtc {
1191 #ifdef __NetBSD__
1192 	drm_waitqueue_t queue;
1193 #else
1194 	wait_queue_head_t queue;	/**< VBLANK wait queue */
1195 #endif
1196 	struct timeval time[DRM_VBLANKTIME_RBSIZE];	/**< timestamp of current count */
1197 	atomic_t count;			/**< number of VBLANK interrupts */
1198 	atomic_t refcount;		/* number of users of vblank interruptsper crtc */
1199 	u32 last;			/* protected by dev->vbl_lock, used */
1200 					/* for wraparound handling */
1201 	u32 last_wait;			/* Last vblank seqno waited per CRTC */
1202 	unsigned int inmodeset;		/* Display driver is setting mode */
1203 	bool enabled;			/* so we don't call enable more than
1204 					   once per disable */
1205 };
1206 
1207 /**
1208  * DRM device structure. This structure represent a complete card that
1209  * may contain multiple heads.
1210  */
1211 struct drm_device {
1212 	struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */
1213 	char *devname;			/**< For /proc/interrupts */
1214 	int if_version;			/**< Highest interface version set */
1215 
1216 	/** \name Lifetime Management */
1217 	/*@{ */
1218 	struct kref ref;		/**< Object ref-count */
1219 	struct device *dev;		/**< Device structure of bus-device */
1220 	struct drm_driver *driver;	/**< DRM driver managing the device */
1221 	void *dev_private;		/**< DRM driver private data */
1222 	struct drm_minor *control;		/**< Control node */
1223 	struct drm_minor *primary;		/**< Primary node */
1224 	struct drm_minor *render;		/**< Render node */
1225 	atomic_t unplugged;			/**< Flag whether dev is dead */
1226 	struct inode *anon_inode;		/**< inode for private address-space */
1227 	/*@} */
1228 
1229 	/** \name Locks */
1230 	/*@{ */
1231 	spinlock_t count_lock;		/**< For inuse, drm_device::open_count, drm_device::buf_use */
1232 	struct mutex struct_mutex;	/**< For others */
1233 	struct mutex master_mutex;      /**< For drm_minor::master and drm_file::is_master */
1234 	/*@} */
1235 
1236 	/** \name Usage Counters */
1237 	/*@{ */
1238 	int open_count;			/**< Outstanding files open */
1239 	int buf_use;			/**< Buffers in use -- cannot alloc */
1240 	atomic_t buf_alloc;		/**< Buffer allocation in progress */
1241 	/*@} */
1242 
1243 	struct list_head filelist;
1244 
1245 	/** \name Memory management */
1246 	/*@{ */
1247 	struct list_head maplist;	/**< Linked list of regions */
1248 	struct drm_open_hash map_hash;	/**< User token hash table for maps */
1249 
1250 	/** \name Context handle management */
1251 	/*@{ */
1252 	struct list_head ctxlist;	/**< Linked list of context handles */
1253 	struct mutex ctxlist_mutex;	/**< For ctxlist */
1254 
1255 	struct idr ctx_idr;
1256 
1257 	struct list_head vmalist;	/**< List of vmas (for debugging) */
1258 
1259 	/*@} */
1260 
1261 	/** \name DMA support */
1262 	/*@{ */
1263 	struct drm_device_dma *dma;		/**< Optional pointer for DMA support */
1264 	/*@} */
1265 
1266 	/** \name Context support */
1267 	/*@{ */
1268 	bool irq_enabled;		/**< True if irq handler is enabled */
1269 #ifdef __NetBSD__
1270 	struct drm_bus_irq_cookie *irq_cookie;
1271 #endif
1272 	__volatile__ long context_flag;	/**< Context swapping flag */
1273 	int last_context;		/**< Last current context */
1274 	/*@} */
1275 
1276 	/** \name VBLANK IRQ support */
1277 	/*@{ */
1278 
1279 	/*
1280 	 * At load time, disabling the vblank interrupt won't be allowed since
1281 	 * old clients may not call the modeset ioctl and therefore misbehave.
1282 	 * Once the modeset ioctl *has* been called though, we can safely
1283 	 * disable them when unused.
1284 	 */
1285 	bool vblank_disable_allowed;
1286 
1287 	/* array of size num_crtcs */
1288 	struct drm_vblank_crtc *vblank;
1289 
1290 	spinlock_t vblank_time_lock;    /**< Protects vblank count and time updates during vblank enable/disable */
1291 	spinlock_t vbl_lock;
1292 	struct timer_list vblank_disable_timer;
1293 
1294 	u32 max_vblank_count;           /**< size of vblank counter register */
1295 
1296 	/**
1297 	 * List of events
1298 	 */
1299 	struct list_head vblank_event_list;
1300 	spinlock_t event_lock;
1301 
1302 	/*@} */
1303 
1304 	struct drm_agp_head *agp;	/**< AGP data */
1305 
1306 	struct pci_dev *pdev;		/**< PCI device structure */
1307 #ifdef __alpha__
1308 	struct pci_controller *hose;
1309 #endif
1310 
1311 	struct platform_device *platformdev; /**< Platform device struture */
1312 	struct usb_device *usbdev;
1313 
1314 #ifdef __NetBSD__
1315 	bus_space_tag_t bst;
1316 	struct drm_bus_map *bus_maps;
1317 	unsigned bus_nmaps;
1318 	bus_dma_tag_t bus_dmat;
1319 	bus_dma_tag_t dmat;
1320 	bool dmat_subregion_p;
1321 	bus_addr_t dmat_subregion_min;
1322 	bus_addr_t dmat_subregion_max;
1323 #endif
1324 
1325 	struct drm_sg_mem *sg;	/**< Scatter gather memory */
1326 	unsigned int num_crtcs;                  /**< Number of CRTCs on this device */
1327 	struct drm_sigdata sigdata;	   /**< For block_all_signals */
1328 #ifndef __NetBSD__
1329 	sigset_t sigmask;
1330 #endif
1331 
1332 	struct drm_local_map *agp_buffer_map;
1333 	unsigned int agp_buffer_token;
1334 
1335         struct drm_mode_config mode_config;	/**< Current mode config */
1336 
1337 	/** \name GEM information */
1338 	/*@{ */
1339 	struct mutex object_name_lock;
1340 	struct idr object_name_idr;
1341 	struct drm_vma_offset_manager *vma_offset_manager;
1342 	/*@} */
1343 	int switch_power_state;
1344 };
1345 
1346 #define DRM_SWITCH_POWER_ON 0
1347 #define DRM_SWITCH_POWER_OFF 1
1348 #define DRM_SWITCH_POWER_CHANGING 2
1349 #define DRM_SWITCH_POWER_DYNAMIC_OFF 3
1350 
drm_core_check_feature(struct drm_device * dev,int feature)1351 static __inline__ int drm_core_check_feature(struct drm_device *dev,
1352 					     int feature)
1353 {
1354 	return ((dev->driver->driver_features & feature) ? 1 : 0);
1355 }
1356 
drm_dev_to_irq(struct drm_device * dev)1357 static inline int drm_dev_to_irq(struct drm_device *dev)
1358 {
1359 	return dev->driver->bus->get_irq(dev);
1360 }
1361 
drm_device_set_unplugged(struct drm_device * dev)1362 static inline void drm_device_set_unplugged(struct drm_device *dev)
1363 {
1364 	smp_wmb();
1365 	atomic_set(&dev->unplugged, 1);
1366 }
1367 
drm_device_is_unplugged(struct drm_device * dev)1368 static inline int drm_device_is_unplugged(struct drm_device *dev)
1369 {
1370 	int ret = atomic_read(&dev->unplugged);
1371 	smp_rmb();
1372 	return ret;
1373 }
1374 
drm_modeset_is_locked(struct drm_device * dev)1375 static inline bool drm_modeset_is_locked(struct drm_device *dev)
1376 {
1377 	return mutex_is_locked(&dev->mode_config.mutex);
1378 }
1379 
drm_is_render_client(const struct drm_file * file_priv)1380 static inline bool drm_is_render_client(const struct drm_file *file_priv)
1381 {
1382 	return file_priv->minor->type == DRM_MINOR_RENDER;
1383 }
1384 
drm_is_control_client(const struct drm_file * file_priv)1385 static inline bool drm_is_control_client(const struct drm_file *file_priv)
1386 {
1387 	return file_priv->minor->type == DRM_MINOR_CONTROL;
1388 }
1389 
drm_is_primary_client(const struct drm_file * file_priv)1390 static inline bool drm_is_primary_client(const struct drm_file *file_priv)
1391 {
1392 	return file_priv->minor->type == DRM_MINOR_LEGACY;
1393 }
1394 
1395 /******************************************************************/
1396 /** \name Internal function definitions */
1397 /*@{*/
1398 
1399 				/* Driver support (drm_drv.h) */
1400 #ifndef __NetBSD__
1401 extern long drm_ioctl(struct file *filp,
1402 		      unsigned int cmd, unsigned long arg);
1403 extern long drm_compat_ioctl(struct file *filp,
1404 			     unsigned int cmd, unsigned long arg);
1405 #endif
1406 extern int drm_lastclose(struct drm_device *dev);
1407 #ifndef __NetBSD__
1408 extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags);
1409 #endif
1410 
1411 				/* Device support (drm_fops.h) */
1412 extern struct mutex drm_global_mutex;
1413 #ifdef __NetBSD__
1414 extern int drm_open_file(struct drm_file *, void *, struct drm_minor *);
1415 extern void drm_close_file(struct drm_file *);
1416 #else
1417 extern int drm_open(struct inode *inode, struct file *filp);
1418 extern int drm_stub_open(struct inode *inode, struct file *filp);
1419 extern ssize_t drm_read(struct file *filp, char __user *buffer,
1420 			size_t count, loff_t *offset);
1421 extern int drm_release(struct inode *inode, struct file *filp);
1422 #endif
1423 
1424 				/* Mapping support (drm_vm.h) */
1425 #ifdef __NetBSD__
1426 extern int drm_mmap_object(struct drm_device *, off_t, size_t, int,
1427     struct uvm_object **, voff_t *, struct file *);
1428 extern paddr_t drm_mmap_paddr(struct drm_device *, off_t, int);
1429 #else
1430 extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
1431 extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
1432 extern void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma);
1433 extern void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma);
1434 extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
1435 
1436 #endif
1437 
1438 				/* Memory management support (drm_memory.h) */
1439 #include <drm/drm_memory.h>
1440 #ifdef __NetBSD__
1441 extern int drm_limit_dma_space(struct drm_device *, resource_size_t,
1442     resource_size_t);
1443 #endif
1444 
1445 				/* Misc. IOCTL support (drm_ioctl.h) */
1446 extern int drm_irq_by_busid(struct drm_device *dev, void *data,
1447 			    struct drm_file *file_priv);
1448 extern int drm_getunique(struct drm_device *dev, void *data,
1449 			 struct drm_file *file_priv);
1450 extern int drm_setunique(struct drm_device *dev, void *data,
1451 			 struct drm_file *file_priv);
1452 extern int drm_getmap(struct drm_device *dev, void *data,
1453 		      struct drm_file *file_priv);
1454 extern int drm_getclient(struct drm_device *dev, void *data,
1455 			 struct drm_file *file_priv);
1456 extern int drm_getstats(struct drm_device *dev, void *data,
1457 			struct drm_file *file_priv);
1458 extern int drm_getcap(struct drm_device *dev, void *data,
1459 		      struct drm_file *file_priv);
1460 extern int drm_setclientcap(struct drm_device *dev, void *data,
1461 			    struct drm_file *file_priv);
1462 extern int drm_setversion(struct drm_device *dev, void *data,
1463 			  struct drm_file *file_priv);
1464 extern int drm_noop(struct drm_device *dev, void *data,
1465 		    struct drm_file *file_priv);
1466 
1467 				/* Context IOCTL support (drm_context.h) */
1468 extern int drm_resctx(struct drm_device *dev, void *data,
1469 		      struct drm_file *file_priv);
1470 extern int drm_addctx(struct drm_device *dev, void *data,
1471 		      struct drm_file *file_priv);
1472 extern int drm_getctx(struct drm_device *dev, void *data,
1473 		      struct drm_file *file_priv);
1474 extern int drm_switchctx(struct drm_device *dev, void *data,
1475 			 struct drm_file *file_priv);
1476 extern int drm_newctx(struct drm_device *dev, void *data,
1477 		      struct drm_file *file_priv);
1478 extern int drm_rmctx(struct drm_device *dev, void *data,
1479 		     struct drm_file *file_priv);
1480 
1481 extern int drm_ctxbitmap_init(struct drm_device *dev);
1482 extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
1483 extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
1484 
1485 extern int drm_setsareactx(struct drm_device *dev, void *data,
1486 			   struct drm_file *file_priv);
1487 extern int drm_getsareactx(struct drm_device *dev, void *data,
1488 			   struct drm_file *file_priv);
1489 
1490 				/* Authentication IOCTL support (drm_auth.h) */
1491 extern int drm_getmagic(struct drm_device *dev, void *data,
1492 			struct drm_file *file_priv);
1493 extern int drm_authmagic(struct drm_device *dev, void *data,
1494 			 struct drm_file *file_priv);
1495 extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
1496 
1497 /* Cache management (drm_cache.c) */
1498 void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
1499 #ifdef __NetBSD__		/* XXX drm clflush */
1500 void drm_clflush_pglist(struct pglist *);
1501 void drm_clflush_page(struct page *);
1502 void drm_clflush_virt_range(const void *, size_t);
1503 #else
1504 void drm_clflush_sg(struct sg_table *st);
1505 void drm_clflush_virt_range(char *addr, unsigned long length);
1506 #endif
1507 
1508 				/* Locking IOCTL support (drm_lock.h) */
1509 extern int drm_lock(struct drm_device *dev, void *data,
1510 		    struct drm_file *file_priv);
1511 extern int drm_unlock(struct drm_device *dev, void *data,
1512 		      struct drm_file *file_priv);
1513 extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
1514 extern void drm_idlelock_take(struct drm_lock_data *lock_data);
1515 extern void drm_idlelock_release(struct drm_lock_data *lock_data);
1516 
1517 /*
1518  * These are exported to drivers so that they can implement fencing using
1519  * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
1520  */
1521 
1522 extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv);
1523 
1524 				/* Buffer management support (drm_bufs.h) */
1525 extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
1526 extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
1527 extern int drm_addmap(struct drm_device *dev, resource_size_t offset,
1528 		      unsigned int size, enum drm_map_type type,
1529 		      enum drm_map_flags flags, struct drm_local_map **map_ptr);
1530 extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
1531 			    struct drm_file *file_priv);
1532 extern int drm_rmmap(struct drm_device *dev, struct drm_local_map *map);
1533 extern int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map);
1534 extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
1535 			   struct drm_file *file_priv);
1536 extern int drm_addbufs(struct drm_device *dev, void *data,
1537 		       struct drm_file *file_priv);
1538 extern int drm_infobufs(struct drm_device *dev, void *data,
1539 			struct drm_file *file_priv);
1540 extern int drm_markbufs(struct drm_device *dev, void *data,
1541 			struct drm_file *file_priv);
1542 extern int drm_freebufs(struct drm_device *dev, void *data,
1543 			struct drm_file *file_priv);
1544 extern int drm_mapbufs(struct drm_device *dev, void *data,
1545 		       struct drm_file *file_priv);
1546 extern int drm_dma_ioctl(struct drm_device *dev, void *data,
1547 			 struct drm_file *file_priv);
1548 
1549 				/* DMA support (drm_dma.h) */
1550 extern int drm_legacy_dma_setup(struct drm_device *dev);
1551 extern void drm_legacy_dma_takedown(struct drm_device *dev);
1552 extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
1553 extern void drm_core_reclaim_buffers(struct drm_device *dev,
1554 				     struct drm_file *filp);
1555 
1556 				/* IRQ support (drm_irq.h) */
1557 extern int drm_control(struct drm_device *dev, void *data,
1558 		       struct drm_file *file_priv);
1559 extern int drm_irq_install(struct drm_device *dev);
1560 extern int drm_irq_uninstall(struct drm_device *dev);
1561 
1562 extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
1563 extern int drm_wait_vblank(struct drm_device *dev, void *data,
1564 			   struct drm_file *filp);
1565 extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
1566 extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
1567 				     struct timeval *vblanktime);
1568 extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
1569 				     struct drm_pending_vblank_event *e);
1570 extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
1571 extern int drm_vblank_get(struct drm_device *dev, int crtc);
1572 extern void drm_vblank_put(struct drm_device *dev, int crtc);
1573 extern void drm_vblank_off(struct drm_device *dev, int crtc);
1574 extern void drm_vblank_cleanup(struct drm_device *dev);
1575 extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
1576 				     struct timeval *tvblank, unsigned flags);
1577 extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
1578 						 int crtc, int *max_error,
1579 						 struct timeval *vblank_time,
1580 						 unsigned flags,
1581 						 const struct drm_crtc *refcrtc,
1582 						 const struct drm_display_mode *mode);
1583 extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
1584 					    const struct drm_display_mode *mode);
1585 
1586 
1587 /* Modesetting support */
1588 extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
1589 extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
1590 extern int drm_modeset_ctl(struct drm_device *dev, void *data,
1591 			   struct drm_file *file_priv);
1592 
1593 				/* AGP/GART support (drm_agpsupport.h) */
1594 
1595 #include <drm/drm_agpsupport.h>
1596 
1597 #ifdef __NetBSD__
1598 struct drm_agp_hooks {
1599 	drm_ioctl_t	*agph_acquire_ioctl;
1600 	drm_ioctl_t	*agph_release_ioctl;
1601 	drm_ioctl_t	*agph_enable_ioctl;
1602 	drm_ioctl_t	*agph_info_ioctl;
1603 	drm_ioctl_t	*agph_alloc_ioctl;
1604 	drm_ioctl_t	*agph_free_ioctl;
1605 	drm_ioctl_t	*agph_bind_ioctl;
1606 	drm_ioctl_t	*agph_unbind_ioctl;
1607 	int		(*agph_release)(struct drm_device *);
1608 	void		(*agph_clear)(struct drm_device *);
1609 };
1610 
1611 extern int drm_agp_release_hook(struct drm_device *);
1612 extern void drm_agp_clear_hook(struct drm_device *);
1613 
1614 extern int drm_agp_register(const struct drm_agp_hooks *);
1615 extern void drm_agp_deregister(const struct drm_agp_hooks *);
1616 #endif
1617 
1618 				/* Stub support (drm_stub.h) */
1619 extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
1620 			       struct drm_file *file_priv);
1621 extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
1622 				struct drm_file *file_priv);
1623 struct drm_master *drm_master_create(struct drm_minor *minor);
1624 extern struct drm_master *drm_master_get(struct drm_master *master);
1625 extern void drm_master_put(struct drm_master **master);
1626 
1627 extern void drm_put_dev(struct drm_device *dev);
1628 extern void drm_unplug_dev(struct drm_device *dev);
1629 extern unsigned int drm_debug;
1630 extern unsigned int drm_rnodes;
1631 extern unsigned int drm_universal_planes;
1632 
1633 extern unsigned int drm_vblank_offdelay;
1634 extern unsigned int drm_timestamp_precision;
1635 extern unsigned int drm_timestamp_monotonic;
1636 
1637 extern struct class *drm_class;
1638 #ifndef __NetBSD__
1639 extern struct dentry *drm_debugfs_root;
1640 
1641 #else
1642 extern spinlock_t drm_minor_lock;
1643 #endif
1644 extern struct idr drm_minors_idr;
1645 
1646 extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
1647 
1648 				/* Debugfs support */
1649 #if defined(CONFIG_DEBUG_FS)
1650 extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
1651 			    struct dentry *root);
1652 extern int drm_debugfs_create_files(const struct drm_info_list *files,
1653 				    int count, struct dentry *root,
1654 				    struct drm_minor *minor);
1655 extern int drm_debugfs_remove_files(const struct drm_info_list *files,
1656 				    int count, struct drm_minor *minor);
1657 extern int drm_debugfs_cleanup(struct drm_minor *minor);
1658 #else
drm_debugfs_init(struct drm_minor * minor,int minor_id,struct dentry * root)1659 static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
1660 				   struct dentry *root)
1661 {
1662 	return 0;
1663 }
1664 
drm_debugfs_create_files(const struct drm_info_list * files,int count,struct dentry * root,struct drm_minor * minor)1665 static inline int drm_debugfs_create_files(const struct drm_info_list *files,
1666 					   int count, struct dentry *root,
1667 					   struct drm_minor *minor)
1668 {
1669 	return 0;
1670 }
1671 
drm_debugfs_remove_files(const struct drm_info_list * files,int count,struct drm_minor * minor)1672 static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
1673 					   int count, struct drm_minor *minor)
1674 {
1675 	return 0;
1676 }
1677 
drm_debugfs_cleanup(struct drm_minor * minor)1678 static inline int drm_debugfs_cleanup(struct drm_minor *minor)
1679 {
1680 	return 0;
1681 }
1682 #endif
1683 
1684 #ifndef __NetBSD__
1685 				/* Info file support */
1686 extern int drm_name_info(struct seq_file *m, void *data);
1687 extern int drm_vm_info(struct seq_file *m, void *data);
1688 extern int drm_bufs_info(struct seq_file *m, void *data);
1689 extern int drm_vblank_info(struct seq_file *m, void *data);
1690 extern int drm_clients_info(struct seq_file *m, void* data);
1691 extern int drm_gem_name_info(struct seq_file *m, void *data);
1692 #endif
1693 
1694 
1695 extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
1696 		struct drm_gem_object *obj, int flags);
1697 extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
1698 		struct drm_file *file_priv, uint32_t handle, uint32_t flags,
1699 		int *prime_fd);
1700 extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
1701 		struct dma_buf *dma_buf);
1702 extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
1703 		struct drm_file *file_priv, int prime_fd, uint32_t *handle);
1704 extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
1705 
1706 extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
1707 					struct drm_file *file_priv);
1708 extern int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
1709 					struct drm_file *file_priv);
1710 
1711 #ifndef __NetBSD__		/* XXX temporary measure 20130212 */
1712 extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
1713 					    dma_addr_t *addrs, int max_pages);
1714 extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
1715 extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
1716 #endif
1717 
1718 int drm_gem_dumb_destroy(struct drm_file *file,
1719 			 struct drm_device *dev,
1720 			 uint32_t handle);
1721 
1722 #ifndef __NetBSD__		/* XXX drm prime */
1723 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
1724 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
1725 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
1726 #endif
1727 
1728 #if DRM_DEBUG_CODE
1729 #ifndef __NetBSD__
1730 extern int drm_vma_info(struct seq_file *m, void *data);
1731 #endif
1732 #endif
1733 
1734 				/* Scatter Gather Support (drm_scatter.h) */
1735 extern void drm_legacy_sg_cleanup(struct drm_device *dev);
1736 extern int drm_sg_alloc(struct drm_device *dev, void *data,
1737 			struct drm_file *file_priv);
1738 extern int drm_sg_free(struct drm_device *dev, void *data,
1739 		       struct drm_file *file_priv);
1740 
1741 			       /* ATI PCIGART support (ati_pcigart.h) */
1742 extern int drm_ati_pcigart_init(struct drm_device *dev,
1743 				struct drm_ati_pcigart_info * gart_info);
1744 extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
1745 				   struct drm_ati_pcigart_info * gart_info);
1746 
1747 extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
1748 				       size_t align);
1749 extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
1750 extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
1751 #ifdef __NetBSD__
1752 extern int drmkms_pci_agp_guarantee_initialized(void);
1753 extern int drm_pci_attach(device_t, const struct pci_attach_args *,
1754     struct pci_dev *, struct drm_driver *, unsigned long,
1755     struct drm_device **);
1756 extern int drm_pci_detach(struct drm_device *, int);
1757 #endif
1758 
1759 			       /* sysfs support (drm_sysfs.c) */
1760 struct drm_sysfs_class;
1761 extern struct class *drm_sysfs_create(struct module *owner, char *name);
1762 extern void drm_sysfs_destroy(void);
1763 extern int drm_sysfs_device_add(struct drm_minor *minor);
1764 extern void drm_sysfs_hotplug_event(struct drm_device *dev);
1765 extern void drm_sysfs_device_remove(struct drm_minor *minor);
1766 extern int drm_sysfs_connector_add(struct drm_connector *connector);
1767 extern void drm_sysfs_connector_remove(struct drm_connector *connector);
1768 
1769 /* Graphics Execution Manager library functions (drm_gem.c) */
1770 int drm_gem_init(struct drm_device *dev);
1771 void drm_gem_destroy(struct drm_device *dev);
1772 void drm_gem_object_release(struct drm_gem_object *obj);
1773 void drm_gem_object_free(struct kref *kref);
1774 int drm_gem_object_init(struct drm_device *dev,
1775 			struct drm_gem_object *obj, size_t size);
1776 void drm_gem_private_object_init(struct drm_device *dev,
1777 				 struct drm_gem_object *obj, size_t size);
1778 #ifdef __NetBSD__
1779 void drm_gem_pager_reference(struct uvm_object *);
1780 void drm_gem_pager_detach(struct uvm_object *);
1781 int drm_gem_mmap_object(struct drm_device *, off_t, size_t, int,
1782     struct uvm_object **, voff_t *, struct file *);
1783 int drm_gem_or_legacy_mmap_object(struct drm_device *, off_t, size_t, int,
1784     struct uvm_object **, voff_t *, struct file *);
1785 #else
1786 void drm_gem_vm_open(struct vm_area_struct *vma);
1787 void drm_gem_vm_close(struct vm_area_struct *vma);
1788 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1789 		     struct vm_area_struct *vma);
1790 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
1791 #endif
1792 
1793 #include <drm/drm_global.h>
1794 
1795 static inline void
drm_gem_object_reference(struct drm_gem_object * obj)1796 drm_gem_object_reference(struct drm_gem_object *obj)
1797 {
1798 	kref_get(&obj->refcount);
1799 }
1800 
1801 static inline void
drm_gem_object_unreference(struct drm_gem_object * obj)1802 drm_gem_object_unreference(struct drm_gem_object *obj)
1803 {
1804 	if (obj != NULL)
1805 		kref_put(&obj->refcount, drm_gem_object_free);
1806 }
1807 
1808 static inline void
drm_gem_object_unreference_unlocked(struct drm_gem_object * obj)1809 drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
1810 {
1811 	if (obj != NULL) {
1812 		struct drm_device *const dev = obj->dev;
1813 		if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
1814 			&dev->struct_mutex))
1815 			mutex_unlock(&dev->struct_mutex);
1816 	}
1817 }
1818 
1819 int drm_gem_handle_create_tail(struct drm_file *file_priv,
1820 			       struct drm_gem_object *obj,
1821 			       u32 *handlep);
1822 int drm_gem_handle_create(struct drm_file *file_priv,
1823 			  struct drm_gem_object *obj,
1824 			  u32 *handlep);
1825 int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
1826 
1827 
1828 void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
1829 int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
1830 int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
1831 
1832 struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
1833 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
1834 		bool dirty, bool accessed);
1835 
1836 struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
1837 					     struct drm_file *filp,
1838 					     u32 handle);
1839 int drm_gem_close_ioctl(struct drm_device *dev, void *data,
1840 			struct drm_file *file_priv);
1841 int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
1842 			struct drm_file *file_priv);
1843 int drm_gem_open_ioctl(struct drm_device *dev, void *data,
1844 		       struct drm_file *file_priv);
1845 void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
1846 void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
1847 
1848 extern void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev);
1849 extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
1850 extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
1851 
drm_core_findmap(struct drm_device * dev,unsigned int token)1852 static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev,
1853 							 unsigned int token)
1854 {
1855 	struct drm_map_list *_entry;
1856 	list_for_each_entry(_entry, &dev->maplist, head)
1857 	    if (_entry->user_token == token)
1858 		return _entry->map;
1859 	return NULL;
1860 }
1861 
drm_core_dropmap(struct drm_local_map * map)1862 static __inline__ void drm_core_dropmap(struct drm_local_map *map)
1863 {
1864 }
1865 
1866 #include <drm/drm_mem_util.h>
1867 
1868 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
1869 				 struct device *parent);
1870 void drm_dev_ref(struct drm_device *dev);
1871 void drm_dev_unref(struct drm_device *dev);
1872 int drm_dev_register(struct drm_device *dev, unsigned long flags);
1873 void drm_dev_unregister(struct drm_device *dev);
1874 
1875 struct drm_minor *drm_minor_acquire(unsigned int minor_id);
1876 void drm_minor_release(struct drm_minor *minor);
1877 
1878 /*@}*/
1879 
1880 /* PCI section */
drm_pci_device_is_agp(struct drm_device * dev)1881 static __inline__ int drm_pci_device_is_agp(struct drm_device *dev)
1882 {
1883 	if (dev->driver->device_is_agp != NULL) {
1884 		int err = (*dev->driver->device_is_agp) (dev);
1885 
1886 		if (err != 2) {
1887 			return err;
1888 		}
1889 	}
1890 
1891 	return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
1892 }
1893 void drm_pci_agp_destroy(struct drm_device *dev);
1894 
1895 extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
1896 extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
1897 extern int drm_get_pci_dev(struct pci_dev *pdev,
1898 			   const struct pci_device_id *ent,
1899 			   struct drm_driver *driver);
1900 
1901 #define DRM_PCIE_SPEED_25 1
1902 #define DRM_PCIE_SPEED_50 2
1903 #define DRM_PCIE_SPEED_80 4
1904 
1905 extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
1906 
1907 /* platform section */
1908 extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
1909 
1910 /* returns true if currently okay to sleep */
drm_can_sleep(void)1911 static __inline__ bool drm_can_sleep(void)
1912 {
1913 #ifdef __NetBSD__
1914 	return false;		/* XXX */
1915 #else
1916 	if (in_atomic() || in_dbg_master() || irqs_disabled())
1917 		return false;
1918 	return true;
1919 #endif
1920 }
1921 
1922 #ifdef __NetBSD__
1923 static inline bool
DRM_IS_BUS_SPACE(struct drm_local_map * map)1924 DRM_IS_BUS_SPACE(struct drm_local_map *map)
1925 {
1926 	switch (map->type) {
1927 	case _DRM_FRAME_BUFFER:
1928 		panic("I don't know how to access drm frame buffer memory!");
1929 
1930 	case _DRM_REGISTERS:
1931 		return true;
1932 
1933 	case _DRM_SHM:
1934 		panic("I don't know how to access drm shared memory!");
1935 
1936 	case _DRM_AGP:
1937 		panic("I don't know how to access drm agp memory!");
1938 
1939 	case _DRM_SCATTER_GATHER:
1940 		panic("I don't know how to access drm scatter-gather memory!");
1941 
1942 	case _DRM_CONSISTENT:
1943 		/*
1944 		 * XXX Old drm uses bus space access for this, but
1945 		 * consistent maps don't have bus space handles!  They
1946 		 * do, however, have kernel virtual addresses in the
1947 		 * map->handle, so maybe that's right.
1948 		 */
1949 #if 0
1950 		return false;
1951 #endif
1952 		panic("I don't know how to access drm consistent memory!");
1953 
1954 	default:
1955 		panic("I don't know what kind of memory you mean!");
1956 	}
1957 }
1958 
1959 static inline uint8_t
DRM_READ8(struct drm_local_map * map,bus_size_t offset)1960 DRM_READ8(struct drm_local_map *map, bus_size_t offset)
1961 {
1962 	if (DRM_IS_BUS_SPACE(map))
1963 		return bus_space_read_1(map->lm_data.bus_space.bst,
1964 		    map->lm_data.bus_space.bsh, offset);
1965 	else
1966 		return *(volatile uint8_t *)((vaddr_t)map->handle + offset);
1967 }
1968 
1969 static inline uint16_t
DRM_READ16(struct drm_local_map * map,bus_size_t offset)1970 DRM_READ16(struct drm_local_map *map, bus_size_t offset)
1971 {
1972 	if (DRM_IS_BUS_SPACE(map))
1973 		return bus_space_read_2(map->lm_data.bus_space.bst,
1974 		    map->lm_data.bus_space.bsh, offset);
1975 	else
1976 		return *(volatile uint16_t *)((vaddr_t)map->handle + offset);
1977 }
1978 
1979 static inline uint32_t
DRM_READ32(struct drm_local_map * map,bus_size_t offset)1980 DRM_READ32(struct drm_local_map *map, bus_size_t offset)
1981 {
1982 	if (DRM_IS_BUS_SPACE(map))
1983 		return bus_space_read_4(map->lm_data.bus_space.bst,
1984 		    map->lm_data.bus_space.bsh, offset);
1985 	else
1986 		return *(volatile uint32_t *)((vaddr_t)map->handle + offset);
1987 }
1988 
1989 static inline uint64_t
DRM_READ64(struct drm_local_map * map,bus_size_t offset)1990 DRM_READ64(struct drm_local_map *map, bus_size_t offset)
1991 {
1992 	if (DRM_IS_BUS_SPACE(map)) {
1993 #if _LP64			/* XXX How to detect bus_space_read_8?  */
1994 		return bus_space_read_8(map->lm_data.bus_space.bst,
1995 		    map->lm_data.bus_space.bsh, offset);
1996 #elif _BYTE_ORDER == _LITTLE_ENDIAN
1997 		/* XXX Yes, this is sketchy.  */
1998 		return bus_space_read_4(map->lm_data.bus_space.bst,
1999 		    map->lm_data.bus_space.bsh, offset) |
2000 		    ((uint64_t)bus_space_read_4(map->lm_data.bus_space.bst,
2001 			map->lm_data.bus_space.bsh, (offset + 4)) << 32);
2002 #else
2003 		/* XXX Yes, this is sketchy.  */
2004 		return bus_space_read_4(map->lm_data.bus_space.bst,
2005 		    map->lm_data.bus_space.bsh, (offset + 4)) |
2006 		    ((uint64_t)bus_space_read_4(map->lm_data.bus_space.bst,
2007 			map->lm_data.bus_space.bsh, offset) << 32);
2008 #endif
2009 	} else {
2010 		return *(volatile uint64_t *)((vaddr_t)map->handle + offset);
2011 	}
2012 }
2013 
2014 static inline void
DRM_WRITE8(struct drm_local_map * map,bus_size_t offset,uint8_t value)2015 DRM_WRITE8(struct drm_local_map *map, bus_size_t offset, uint8_t value)
2016 {
2017 	if (DRM_IS_BUS_SPACE(map))
2018 		bus_space_write_1(map->lm_data.bus_space.bst,
2019 		    map->lm_data.bus_space.bsh, offset, value);
2020 	else
2021 		*(volatile uint8_t *)((vaddr_t)map->handle + offset) = value;
2022 }
2023 
2024 static inline void
DRM_WRITE16(struct drm_local_map * map,bus_size_t offset,uint16_t value)2025 DRM_WRITE16(struct drm_local_map *map, bus_size_t offset, uint16_t value)
2026 {
2027 	if (DRM_IS_BUS_SPACE(map))
2028 		bus_space_write_2(map->lm_data.bus_space.bst,
2029 		    map->lm_data.bus_space.bsh, offset, value);
2030 	else
2031 		*(volatile uint16_t *)((vaddr_t)map->handle + offset) = value;
2032 }
2033 
2034 static inline void
DRM_WRITE32(struct drm_local_map * map,bus_size_t offset,uint32_t value)2035 DRM_WRITE32(struct drm_local_map *map, bus_size_t offset, uint32_t value)
2036 {
2037 	if (DRM_IS_BUS_SPACE(map))
2038 		bus_space_write_4(map->lm_data.bus_space.bst,
2039 		    map->lm_data.bus_space.bsh, offset, value);
2040 	else
2041 		*(volatile uint32_t *)((vaddr_t)map->handle + offset) = value;
2042 }
2043 
2044 static inline void
DRM_WRITE64(struct drm_local_map * map,bus_size_t offset,uint64_t value)2045 DRM_WRITE64(struct drm_local_map *map, bus_size_t offset, uint64_t value)
2046 {
2047 	if (DRM_IS_BUS_SPACE(map)) {
2048 #if _LP64			/* XXX How to detect bus_space_write_8?  */
2049 		bus_space_write_8(map->lm_data.bus_space.bst,
2050 		    map->lm_data.bus_space.bsh, offset, value);
2051 #elif _BYTE_ORDER == _LITTLE_ENDIAN
2052 		bus_space_write_4(map->lm_data.bus_space.bst,
2053 		    map->lm_data.bus_space.bsh, offset, (value & 0xffffffffU));
2054 		bus_space_write_4(map->lm_data.bus_space.bst,
2055 		    map->lm_data.bus_space.bsh, (offset + 4), (value >> 32));
2056 #else
2057 		bus_space_write_4(map->lm_data.bus_space.bst,
2058 		    map->lm_data.bus_space.bsh, offset, (value >> 32));
2059 		bus_space_write_4(map->lm_data.bus_space.bst,
2060 		    map->lm_data.bus_space.bsh, (offset + 4),
2061 		    (value & 0xffffffffU));
2062 #endif
2063 	} else {
2064 		*(volatile uint64_t *)((vaddr_t)map->handle + offset) = value;
2065 	}
2066 }
2067 #endif	/* defined(__NetBSD__) */
2068 
2069 #ifdef __NetBSD__
2070 
2071 /* XXX This is pretty kludgerific.  */
2072 
2073 #include <linux/io-mapping.h>
2074 
2075 static inline struct io_mapping *
drm_io_mapping_create_wc(struct drm_device * dev,resource_size_t addr,unsigned long size)2076 drm_io_mapping_create_wc(struct drm_device *dev, resource_size_t addr,
2077     unsigned long size)
2078 {
2079 	return bus_space_io_mapping_create_wc(dev->bst, addr, size);
2080 }
2081 
2082 #endif	/* defined(__NetBSD__) */
2083 
2084 #ifdef __NetBSD__
2085 extern const struct cdevsw drm_cdevsw;
2086 #endif
2087 
2088 #endif				/* __KERNEL__ */
2089 #endif
2090