xref: /openbsd/sys/dev/pci/drm/i915/i915_drv.h (revision fc61954a)
1 /* $OpenBSD: i915_drv.h,v 1.74 2016/04/05 21:22:02 kettenis Exp $ */
2 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
3  */
4 /*
5  *
6  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7  * All Rights Reserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the
11  * "Software"), to deal in the Software without restriction, including
12  * without limitation the rights to use, copy, modify, merge, publish,
13  * distribute, sub license, and/or sell copies of the Software, and to
14  * permit persons to whom the Software is furnished to do so, subject to
15  * the following conditions:
16  *
17  * The above copyright notice and this permission notice (including the
18  * next paragraph) shall be included in all copies or substantial portions
19  * of the Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
25  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28  *
29  */
30 
31 #ifndef _I915_DRV_H_
32 #define _I915_DRV_H_
33 
34 #include "i915_reg.h"
35 #include <dev/pci/drm/i915_drm.h>
36 #include "intel_bios.h"
37 #include "intel_ringbuffer.h"
38 
39 struct sg_table;
40 #define i2c_adapter i2c_controller
41 
42 #define CONFIG_DRM_I915_FBDEV 1
43 #define CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT 1
44 
45 #include "acpi.h"
46 #if NACPI > 0
47 #define CONFIG_ACPI
48 #endif
49 
50 #include "drm.h"
51 #include "vga.h"
52 
53 #include <dev/ic/mc6845reg.h>
54 #include <dev/ic/pcdisplayvar.h>
55 #include <dev/ic/vgareg.h>
56 #include <dev/ic/vgavar.h>
57 
58 #include <sys/task.h>
59 #include <dev/pci/vga_pcivar.h>
60 #include <dev/wscons/wsconsio.h>
61 #include <dev/wscons/wsdisplayvar.h>
62 #include <dev/rasops/rasops.h>
63 
64 extern void intel_gtt_chipset_flush(void);
65 extern int intel_gmch_probe(struct pci_dev *, struct pci_dev *, void *);
66 extern void intel_gtt_get(size_t *, size_t *, phys_addr_t *, unsigned long *);
67 extern void intel_gmch_remove(void);
68 
69 #ifdef __i386__
70 
71 static inline u_int64_t
72 bus_space_read_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
73 {
74 	u_int64_t lo, hi;
75 
76 	lo = bus_space_read_4(t, h, o);
77 	hi = bus_space_read_4(t, h, o + 4);
78 	return (lo | (hi << 32));
79 }
80 
81 static inline void
82 bus_space_write_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
83     u_int64_t v)
84 {
85 	bus_space_write_4(t, h, o, v);
86 	bus_space_write_4(t, h, o + 4, v >> 32);
87 }
88 
89 #endif
90 
91 /*
92  * The Bridge device's PCI config space has information about the
93  * fb aperture size and the amount of pre-reserved memory.
94  * This is all handled in the intel-gtt.ko module. i915.ko only
95  * cares about the vga bit for the vga rbiter.
96  */
97 #define INTEL_GMCH_CTRL		0x52
98 #define INTEL_GMCH_VGA_DISABLE  (1 << 1)
99 #define SNB_GMCH_CTRL		0x50
100 #define    SNB_GMCH_GGMS_SHIFT	8 /* GTT Graphics Memory Size */
101 #define    SNB_GMCH_GGMS_MASK	0x3
102 #define    SNB_GMCH_GMS_SHIFT   3 /* Graphics Mode Select */
103 #define    SNB_GMCH_GMS_MASK    0x1f
104 #define    BDW_GMCH_GGMS_SHIFT	6
105 #define    BDW_GMCH_GGMS_MASK	0x3
106 #define    BDW_GMCH_GMS_SHIFT   8
107 #define    BDW_GMCH_GMS_MASK    0xff
108 
109 #define I830_GMCH_CTRL			0x52
110 
111 #define I855_GMCH_GMS_MASK		0xF0
112 #define I855_GMCH_GMS_STOLEN_0M		0x0
113 #define I855_GMCH_GMS_STOLEN_1M		(0x1 << 4)
114 #define I855_GMCH_GMS_STOLEN_4M		(0x2 << 4)
115 #define I855_GMCH_GMS_STOLEN_8M		(0x3 << 4)
116 #define I855_GMCH_GMS_STOLEN_16M	(0x4 << 4)
117 #define I855_GMCH_GMS_STOLEN_32M	(0x5 << 4)
118 #define I915_GMCH_GMS_STOLEN_48M	(0x6 << 4)
119 #define I915_GMCH_GMS_STOLEN_64M	(0x7 << 4)
120 #define G33_GMCH_GMS_STOLEN_128M	(0x8 << 4)
121 #define G33_GMCH_GMS_STOLEN_256M	(0x9 << 4)
122 #define INTEL_GMCH_GMS_STOLEN_96M	(0xa << 4)
123 #define INTEL_GMCH_GMS_STOLEN_160M	(0xb << 4)
124 #define INTEL_GMCH_GMS_STOLEN_224M	(0xc << 4)
125 #define INTEL_GMCH_GMS_STOLEN_352M	(0xd << 4)
126 
127 struct intel_gtt {
128 	/* Size of memory reserved for graphics by the BIOS */
129 	unsigned int stolen_size;
130 	/* Total number of gtt entries. */
131 	unsigned int gtt_total_entries;
132 	/* Part of the gtt that is mappable by the cpu, for those chips where
133 	 * this is not the full gtt. */
134 	unsigned int gtt_mappable_entries;
135 	/* Share the scratch page dma with ppgtts. */
136 	bus_addr_t scratch_page_dma;
137 	struct drm_dmamem *scratch_page;
138 	/* for ppgtt PDE access */
139 	bus_space_handle_t gtt;
140 	/* needed for ioremap in drm/i915 */
141 	bus_addr_t gma_bus_addr;
142 };
143 
144 /* General customization:
145  */
146 
147 #define DRIVER_AUTHOR		"Tungsten Graphics, Inc."
148 
149 #define DRIVER_NAME		"i915"
150 #define DRIVER_DESC		"Intel Graphics"
151 #define DRIVER_DATE		"20080730"
152 
153 enum pipe {
154 	INVALID_PIPE = -1,
155 	PIPE_A = 0,
156 	PIPE_B,
157 	PIPE_C,
158 	I915_MAX_PIPES
159 };
160 #define pipe_name(p) ((p) + 'A')
161 
162 enum transcoder {
163 	TRANSCODER_A = 0,
164 	TRANSCODER_B,
165 	TRANSCODER_C,
166 	TRANSCODER_EDP = 0xF,
167 };
168 #define transcoder_name(t) ((t) + 'A')
169 
170 enum plane {
171 	PLANE_A = 0,
172 	PLANE_B,
173 	PLANE_C,
174 };
175 #define plane_name(p) ((p) + 'A')
176 
177 #define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
178 
179 enum port {
180 	PORT_A = 0,
181 	PORT_B,
182 	PORT_C,
183 	PORT_D,
184 	PORT_E,
185 	I915_MAX_PORTS
186 };
187 #define port_name(p) ((p) + 'A')
188 
189 #define I915_NUM_PHYS_VLV 1
190 
191 enum dpio_channel {
192 	DPIO_CH0,
193 	DPIO_CH1
194 };
195 
196 enum dpio_phy {
197 	DPIO_PHY0,
198 	DPIO_PHY1
199 };
200 
201 enum intel_display_power_domain {
202 	POWER_DOMAIN_PIPE_A,
203 	POWER_DOMAIN_PIPE_B,
204 	POWER_DOMAIN_PIPE_C,
205 	POWER_DOMAIN_PIPE_A_PANEL_FITTER,
206 	POWER_DOMAIN_PIPE_B_PANEL_FITTER,
207 	POWER_DOMAIN_PIPE_C_PANEL_FITTER,
208 	POWER_DOMAIN_TRANSCODER_A,
209 	POWER_DOMAIN_TRANSCODER_B,
210 	POWER_DOMAIN_TRANSCODER_C,
211 	POWER_DOMAIN_TRANSCODER_EDP,
212 	POWER_DOMAIN_VGA,
213 	POWER_DOMAIN_AUDIO,
214 	POWER_DOMAIN_INIT,
215 
216 	POWER_DOMAIN_NUM,
217 };
218 
219 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
220 
221 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
222 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
223 		((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
224 #define POWER_DOMAIN_TRANSCODER(tran) \
225 	((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
226 	 (tran) + POWER_DOMAIN_TRANSCODER_A)
227 
228 #define HSW_ALWAYS_ON_POWER_DOMAINS (		\
229 	BIT(POWER_DOMAIN_PIPE_A) |		\
230 	BIT(POWER_DOMAIN_TRANSCODER_EDP))
231 #define BDW_ALWAYS_ON_POWER_DOMAINS (		\
232 	BIT(POWER_DOMAIN_PIPE_A) |		\
233 	BIT(POWER_DOMAIN_TRANSCODER_EDP) |	\
234 	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
235 
236 enum hpd_pin {
237 	HPD_NONE = 0,
238 	HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
239 	HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
240 	HPD_CRT,
241 	HPD_SDVO_B,
242 	HPD_SDVO_C,
243 	HPD_PORT_B,
244 	HPD_PORT_C,
245 	HPD_PORT_D,
246 	HPD_NUM_PINS
247 };
248 
249 #define I915_GEM_GPU_DOMAINS \
250 	(I915_GEM_DOMAIN_RENDER | \
251 	 I915_GEM_DOMAIN_SAMPLER | \
252 	 I915_GEM_DOMAIN_COMMAND | \
253 	 I915_GEM_DOMAIN_INSTRUCTION | \
254 	 I915_GEM_DOMAIN_VERTEX)
255 
256 #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
257 
258 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
259 	list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
260 		if ((intel_encoder)->base.crtc == (__crtc))
261 
262 struct inteldrm_softc;
263 #define drm_i915_private inteldrm_softc
264 
265 enum intel_dpll_id {
266 	DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
267 	/* real shared dpll ids must be >= 0 */
268 	DPLL_ID_PCH_PLL_A,
269 	DPLL_ID_PCH_PLL_B,
270 };
271 #define I915_NUM_PLLS 2
272 
273 struct intel_dpll_hw_state {
274 	uint32_t dpll;
275 	uint32_t dpll_md;
276 	uint32_t fp0;
277 	uint32_t fp1;
278 };
279 
280 struct intel_shared_dpll {
281 	int refcount; /* count of number of CRTCs sharing this PLL */
282 	int active; /* count of number of active CRTCs (i.e. DPMS on) */
283 	bool on; /* is the PLL actually active? Disabled during modeset */
284 	const char *name;
285 	/* should match the index in the dev_priv->shared_dplls array */
286 	enum intel_dpll_id id;
287 	struct intel_dpll_hw_state hw_state;
288 	void (*mode_set)(struct drm_i915_private *dev_priv,
289 			 struct intel_shared_dpll *pll);
290 	void (*enable)(struct drm_i915_private *dev_priv,
291 		       struct intel_shared_dpll *pll);
292 	void (*disable)(struct drm_i915_private *dev_priv,
293 			struct intel_shared_dpll *pll);
294 	bool (*get_hw_state)(struct drm_i915_private *dev_priv,
295 			     struct intel_shared_dpll *pll,
296 			     struct intel_dpll_hw_state *hw_state);
297 };
298 
299 /* Used by dp and fdi links */
300 struct intel_link_m_n {
301 	uint32_t	tu;
302 	uint32_t	gmch_m;
303 	uint32_t	gmch_n;
304 	uint32_t	link_m;
305 	uint32_t	link_n;
306 };
307 
308 void intel_link_compute_m_n(int bpp, int nlanes,
309 			    int pixel_clock, int link_clock,
310 			    struct intel_link_m_n *m_n);
311 
312 struct intel_ddi_plls {
313 	int spll_refcount;
314 	int wrpll1_refcount;
315 	int wrpll2_refcount;
316 };
317 
318 /* Interface history:
319  *
320  * 1.1: Original.
321  * 1.2: Add Power Management
322  * 1.3: Add vblank support
323  * 1.4: Fix cmdbuffer path, add heap destroy
324  * 1.5: Add vblank pipe configuration
325  * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
326  *      - Support vertical blank on secondary display pipe
327  */
328 #define DRIVER_MAJOR		1
329 #define DRIVER_MINOR		6
330 #define DRIVER_PATCHLEVEL	0
331 
332 #define WATCH_LISTS	0
333 #define WATCH_GTT	0
334 
335 #define I915_GEM_PHYS_CURSOR_0 1
336 #define I915_GEM_PHYS_CURSOR_1 2
337 #define I915_GEM_PHYS_OVERLAY_REGS 3
338 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
339 
340 struct drm_i915_gem_phys_object {
341 	int id;
342 #ifdef __linux__
343 	struct page **page_list;
344 #endif
345 	drm_dma_handle_t *handle;
346 	struct drm_i915_gem_object *cur_obj;
347 };
348 
349 struct opregion_header;
350 struct opregion_acpi;
351 struct opregion_swsci;
352 struct opregion_asle;
353 
354 struct intel_opregion {
355 	struct opregion_header __iomem *header;
356 	struct opregion_acpi __iomem *acpi;
357 	struct opregion_swsci __iomem *swsci;
358 	u32 swsci_gbda_sub_functions;
359 	u32 swsci_sbcb_sub_functions;
360 	struct opregion_asle __iomem *asle;
361 	void __iomem *vbt;
362 	u32 __iomem *lid_state;
363 	struct work_struct asle_work;
364 };
365 #define OPREGION_SIZE            (8*1024)
366 
367 struct intel_overlay;
368 struct intel_overlay_error_state;
369 
370 #ifdef __linux__
371 struct drm_i915_master_private {
372 	drm_local_map_t *sarea;
373 	struct _drm_i915_sarea *sarea_priv;
374 };
375 #endif
376 #define I915_FENCE_REG_NONE -1
377 #define I915_MAX_NUM_FENCES 32
378 /* 32 fences + sign bit for FENCE_REG_NONE */
379 #define I915_MAX_NUM_FENCE_BITS 6
380 
381 struct drm_i915_fence_reg {
382 	struct list_head lru_list;
383 	struct drm_i915_gem_object *obj;
384 	int pin_count;
385 };
386 
387 struct sdvo_device_mapping {
388 	u8 initialized;
389 	u8 dvo_port;
390 	u8 slave_addr;
391 	u8 dvo_wiring;
392 	u8 i2c_pin;
393 	u8 ddc_pin;
394 };
395 
396 struct intel_display_error_state;
397 
398 struct drm_i915_error_state {
399 	struct kref ref;
400 	u32 eir;
401 	u32 pgtbl_er;
402 	u32 ier;
403 	u32 ccid;
404 	u32 derrmr;
405 	u32 forcewake;
406 	bool waiting[I915_NUM_RINGS];
407 	u32 pipestat[I915_MAX_PIPES];
408 	u32 tail[I915_NUM_RINGS];
409 	u32 head[I915_NUM_RINGS];
410 	u32 ctl[I915_NUM_RINGS];
411 	u32 ipeir[I915_NUM_RINGS];
412 	u32 ipehr[I915_NUM_RINGS];
413 	u32 instdone[I915_NUM_RINGS];
414 	u32 acthd[I915_NUM_RINGS];
415 	u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
416 	u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
417 	u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
418 	/* our own tracking of ring head and tail */
419 	u32 cpu_ring_head[I915_NUM_RINGS];
420 	u32 cpu_ring_tail[I915_NUM_RINGS];
421 	u32 error; /* gen6+ */
422 	u32 err_int; /* gen7 */
423 	u32 bbstate[I915_NUM_RINGS];
424 	u32 instpm[I915_NUM_RINGS];
425 	u32 instps[I915_NUM_RINGS];
426 	u32 extra_instdone[I915_NUM_INSTDONE_REG];
427 	u32 seqno[I915_NUM_RINGS];
428 	u64 bbaddr[I915_NUM_RINGS];
429 	u32 fault_reg[I915_NUM_RINGS];
430 	u32 done_reg;
431 	u32 faddr[I915_NUM_RINGS];
432 	u64 fence[I915_MAX_NUM_FENCES];
433 	struct timeval time;
434 	struct drm_i915_error_ring {
435 		bool valid;
436 		struct drm_i915_error_object {
437 			int page_count;
438 			u32 gtt_offset;
439 			u32 *pages[0];
440 		} *ringbuffer, *batchbuffer, *ctx;
441 		struct drm_i915_error_request {
442 			long jiffies;
443 			u32 seqno;
444 			u32 tail;
445 		} *requests;
446 		int num_requests;
447 	} ring[I915_NUM_RINGS];
448 	struct drm_i915_error_buffer {
449 		u32 size;
450 		u32 name;
451 		u32 rseqno, wseqno;
452 		u32 gtt_offset;
453 		u32 read_domains;
454 		u32 write_domain;
455 		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
456 		s32 pinned:2;
457 		u32 tiling:2;
458 		u32 dirty:1;
459 		u32 purgeable:1;
460 		s32 ring:4;
461 		u32 cache_level:3;
462 	} **active_bo, **pinned_bo;
463 	u32 *active_bo_count, *pinned_bo_count;
464 	struct intel_overlay_error_state *overlay;
465 	struct intel_display_error_state *display;
466 	int hangcheck_score[I915_NUM_RINGS];
467 	enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
468 };
469 
470 struct intel_connector;
471 struct intel_crtc_config;
472 struct intel_crtc;
473 struct intel_limit;
474 struct dpll;
475 
476 struct drm_i915_display_funcs {
477 	bool (*fbc_enabled)(struct drm_device *dev);
478 	void (*enable_fbc)(struct drm_crtc *crtc);
479 	void (*disable_fbc)(struct drm_device *dev);
480 	int (*get_display_clock_speed)(struct drm_device *dev);
481 	int (*get_fifo_size)(struct drm_device *dev, int plane);
482 	/**
483 	 * find_dpll() - Find the best values for the PLL
484 	 * @limit: limits for the PLL
485 	 * @crtc: current CRTC
486 	 * @target: target frequency in kHz
487 	 * @refclk: reference clock frequency in kHz
488 	 * @match_clock: if provided, @best_clock P divider must
489 	 *               match the P divider from @match_clock
490 	 *               used for LVDS downclocking
491 	 * @best_clock: best PLL values found
492 	 *
493 	 * Returns true on success, false on failure.
494 	 */
495 	bool (*find_dpll)(const struct intel_limit *limit,
496 			  struct drm_crtc *crtc,
497 			  int target, int refclk,
498 			  struct dpll *match_clock,
499 			  struct dpll *best_clock);
500 	void (*update_wm)(struct drm_crtc *crtc);
501 	void (*update_sprite_wm)(struct drm_plane *plane,
502 				 struct drm_crtc *crtc,
503 				 uint32_t sprite_width, int pixel_size,
504 				 bool enable, bool scaled);
505 	void (*modeset_global_resources)(struct drm_device *dev);
506 	/* Returns the active state of the crtc, and if the crtc is active,
507 	 * fills out the pipe-config with the hw state. */
508 	bool (*get_pipe_config)(struct intel_crtc *,
509 				struct intel_crtc_config *);
510 	int (*crtc_mode_set)(struct drm_crtc *crtc,
511 			     int x, int y,
512 			     struct drm_framebuffer *old_fb);
513 	void (*crtc_enable)(struct drm_crtc *crtc);
514 	void (*crtc_disable)(struct drm_crtc *crtc);
515 	void (*off)(struct drm_crtc *crtc);
516 	void (*write_eld)(struct drm_connector *connector,
517 			  struct drm_crtc *crtc,
518 			  struct drm_display_mode *mode);
519 	void (*fdi_link_train)(struct drm_crtc *crtc);
520 	void (*init_clock_gating)(struct drm_device *dev);
521 	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
522 			  struct drm_framebuffer *fb,
523 			  struct drm_i915_gem_object *obj,
524 			  uint32_t flags);
525 	int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
526 			    int x, int y);
527 	void (*hpd_irq_setup)(struct drm_device *dev);
528 	/* clock updates for mode set */
529 	/* cursor updates */
530 	/* render clock increase/decrease */
531 	/* display clock increase/decrease */
532 	/* pll clock increase/decrease */
533 
534 	int (*setup_backlight)(struct intel_connector *connector);
535 	uint32_t (*get_backlight)(struct intel_connector *connector);
536 	void (*set_backlight)(struct intel_connector *connector,
537 			      uint32_t level);
538 	void (*disable_backlight)(struct intel_connector *connector);
539 	void (*enable_backlight)(struct intel_connector *connector);
540 };
541 
542 struct intel_uncore_funcs {
543 	void (*force_wake_get)(struct drm_i915_private *dev_priv,
544 							int fw_engine);
545 	void (*force_wake_put)(struct drm_i915_private *dev_priv,
546 							int fw_engine);
547 
548 	uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
549 	uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
550 	uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
551 	uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
552 
553 	void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
554 				uint8_t val, bool trace);
555 	void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
556 				uint16_t val, bool trace);
557 	void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
558 				uint32_t val, bool trace);
559 	void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
560 				uint64_t val, bool trace);
561 };
562 
563 struct intel_uncore {
564 	spinlock_t lock; /** lock is also taken in irq contexts. */
565 
566 	struct intel_uncore_funcs funcs;
567 
568 	unsigned fifo_count;
569 	unsigned forcewake_count;
570 
571 	unsigned fw_rendercount;
572 	unsigned fw_mediacount;
573 
574 	struct delayed_work force_wake_work;
575 };
576 
577 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
578 	func(is_mobile) sep \
579 	func(is_i85x) sep \
580 	func(is_i915g) sep \
581 	func(is_i945gm) sep \
582 	func(is_g33) sep \
583 	func(need_gfx_hws) sep \
584 	func(is_g4x) sep \
585 	func(is_pineview) sep \
586 	func(is_broadwater) sep \
587 	func(is_crestline) sep \
588 	func(is_ivybridge) sep \
589 	func(is_valleyview) sep \
590 	func(is_haswell) sep \
591 	func(is_preliminary) sep \
592 	func(has_fbc) sep \
593 	func(has_pipe_cxsr) sep \
594 	func(has_hotplug) sep \
595 	func(cursor_needs_physical) sep \
596 	func(has_overlay) sep \
597 	func(overlay_needs_physical) sep \
598 	func(supports_tv) sep \
599 	func(has_llc) sep \
600 	func(has_ddi) sep \
601 	func(has_fpga_dbg)
602 
603 #define DEFINE_FLAG(name) u8 name:1
604 #define SEP_SEMICOLON ;
605 
606 struct intel_device_info {
607 	u32 display_mmio_offset;
608 	u8 num_pipes:3;
609 	u8 gen;
610 	u8 ring_mask; /* Rings supported by the HW */
611 	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
612 };
613 
614 #undef DEFINE_FLAG
615 #undef SEP_SEMICOLON
616 
617 enum i915_cache_level {
618 	I915_CACHE_NONE = 0,
619 	I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
620 	I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
621 			      caches, eg sampler/render caches, and the
622 			      large Last-Level-Cache. LLC is coherent with
623 			      the CPU, but L3 is only visible to the GPU. */
624 	I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
625 };
626 
627 typedef uint32_t gen6_gtt_pte_t;
628 
629 struct i915_address_space {
630 	struct drm_mm mm;
631 	struct drm_device *dev;
632 	struct list_head global_link;
633 	unsigned long start;		/* Start offset always 0 for dri2 */
634 	size_t total;		/* size addr space maps (ex. 2GB for ggtt) */
635 
636 	struct {
637 		dma_addr_t addr;
638 		struct drm_dmamem *page;
639 	} scratch;
640 
641 	/**
642 	 * List of objects currently involved in rendering.
643 	 *
644 	 * Includes buffers having the contents of their GPU caches
645 	 * flushed, not necessarily primitives.  last_rendering_seqno
646 	 * represents when the rendering involved will be completed.
647 	 *
648 	 * A reference is held on the buffer while on this list.
649 	 */
650 	struct list_head active_list;
651 
652 	/**
653 	 * LRU list of objects which are not in the ringbuffer and
654 	 * are ready to unbind, but are still in the GTT.
655 	 *
656 	 * last_rendering_seqno is 0 while an object is in this list.
657 	 *
658 	 * A reference is not held on the buffer while on this list,
659 	 * as merely being GTT-bound shouldn't prevent its being
660 	 * freed, and we'll pull it off the list in the free path.
661 	 */
662 	struct list_head inactive_list;
663 
664 	/* FIXME: Need a more generic return type */
665 	gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
666 				     enum i915_cache_level level,
667 				     bool valid); /* Create a valid PTE */
668 	void (*clear_range)(struct i915_address_space *vm,
669 			    unsigned int first_entry,
670 			    unsigned int num_entries,
671 			    bool use_scratch);
672 	void (*insert_entries)(struct i915_address_space *vm,
673 			       struct vm_page **pages,
674 			       unsigned int num_entries,
675 			       unsigned int first_entry,
676 			       enum i915_cache_level cache_level);
677 	void (*cleanup)(struct i915_address_space *vm);
678 };
679 
680 /* The Graphics Translation Table is the way in which GEN hardware translates a
681  * Graphics Virtual Address into a Physical Address. In addition to the normal
682  * collateral associated with any va->pa translations GEN hardware also has a
683  * portion of the GTT which can be mapped by the CPU and remain both coherent
684  * and correct (in cases like swizzling). That region is referred to as GMADR in
685  * the spec.
686  */
687 struct i915_gtt {
688 	struct i915_address_space base;
689 	size_t stolen_size;		/* Total size of stolen memory */
690 
691 	unsigned long mappable_end;	/* End offset that we can CPU map */
692 	struct io_mapping *mappable;	/* Mapping to our CPU mappable region */
693 	phys_addr_t mappable_base;	/* PA of our GMADR */
694 
695 	/** "Graphics Stolen Memory" holds the global PTEs */
696 	void __iomem *gsm;
697 
698 	bool do_idle_maps;
699 
700 	int mtrr;
701 
702 	/* global gtt ops */
703 	int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
704 			  size_t *stolen, phys_addr_t *mappable_base,
705 			  unsigned long *mappable_end);
706 };
707 #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
708 
709 struct i915_hw_ppgtt {
710 	struct i915_address_space base;
711 	unsigned num_pd_entries;
712 	union {
713 		struct vm_page **pt_pages;
714 		struct vm_page *gen8_pt_pages;
715 	};
716 	struct vm_page *pd_pages;
717 	int num_pd_pages;
718 	int num_pt_pages;
719 	union {
720 		uint32_t pd_offset;
721 		dma_addr_t pd_dma_addr[4];
722 	};
723 	union {
724 		dma_addr_t *pt_dma_addr;
725 		dma_addr_t *gen8_pt_dma_addr[4];
726 	};
727 	int (*enable)(struct drm_device *dev);
728 };
729 
730 /**
731  * A VMA represents a GEM BO that is bound into an address space. Therefore, a
732  * VMA's presence cannot be guaranteed before binding, or after unbinding the
733  * object into/from the address space.
734  *
735  * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
736  * will always be <= an objects lifetime. So object refcounting should cover us.
737  */
738 struct i915_vma {
739 	struct drm_mm_node node;
740 	struct drm_i915_gem_object *obj;
741 	struct i915_address_space *vm;
742 
743 	/** This object's place on the active/inactive lists */
744 	struct list_head mm_list;
745 
746 	struct list_head vma_link; /* Link in the object's VMA list */
747 
748 	/** This vma's place in the batchbuffer or on the eviction list */
749 	struct list_head exec_list;
750 
751 	/**
752 	 * Used for performing relocations during execbuffer insertion.
753 	 */
754 	struct hlist_node exec_node;
755 	unsigned long exec_handle;
756 	struct drm_i915_gem_exec_object2 *exec_entry;
757 
758 };
759 
760 struct i915_ctx_hang_stats {
761 	/* This context had batch pending when hang was declared */
762 	unsigned batch_pending;
763 
764 	/* This context had batch active when hang was declared */
765 	unsigned batch_active;
766 
767 	/* Time when this context was last blamed for a GPU reset */
768 	unsigned long guilty_ts;
769 
770 	/* This context is banned to submit more work */
771 	bool banned;
772 };
773 
774 /* This must match up with the value previously used for execbuf2.rsvd1. */
775 #define DEFAULT_CONTEXT_ID 0
776 struct i915_hw_context {
777 	struct kref ref;
778 	int id;
779 	bool is_initialized;
780 	uint8_t remap_slice;
781 	struct drm_i915_file_private *file_priv;
782 	struct intel_ring_buffer *ring;
783 	struct drm_i915_gem_object *obj;
784 	struct i915_ctx_hang_stats hang_stats;
785 
786 	struct list_head link;
787 };
788 
789 struct i915_fbc {
790 	unsigned long size;
791 	unsigned int fb_id;
792 	enum plane plane;
793 	int y;
794 
795 	struct drm_mm_node *compressed_fb;
796 	struct drm_mm_node *compressed_llb;
797 
798 	struct intel_fbc_work {
799 		struct delayed_work work;
800 		struct drm_crtc *crtc;
801 		struct drm_framebuffer *fb;
802 	} *fbc_work;
803 
804 	enum no_fbc_reason {
805 		FBC_OK, /* FBC is enabled */
806 		FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
807 		FBC_NO_OUTPUT, /* no outputs enabled to compress */
808 		FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
809 		FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
810 		FBC_MODE_TOO_LARGE, /* mode too large for compression */
811 		FBC_BAD_PLANE, /* fbc not supported on plane */
812 		FBC_NOT_TILED, /* buffer not tiled */
813 		FBC_MULTIPLE_PIPES, /* more than one pipe active */
814 		FBC_MODULE_PARAM,
815 		FBC_CHIP_DEFAULT, /* disabled by default on this chip */
816 	} no_fbc_reason;
817 };
818 
819 struct i915_psr {
820 	bool sink_support;
821 	bool source_ok;
822 };
823 
824 enum intel_pch {
825 	PCH_NONE = 0,	/* No PCH present */
826 	PCH_IBX,	/* Ibexpeak PCH */
827 	PCH_CPT,	/* Cougarpoint PCH */
828 	PCH_LPT,	/* Lynxpoint PCH */
829 	PCH_NOP,
830 };
831 
832 enum intel_sbi_destination {
833 	SBI_ICLK,
834 	SBI_MPHY,
835 };
836 
837 #define QUIRK_PIPEA_FORCE (1<<0)
838 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
839 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
840 #define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
841 
842 struct intel_fbdev;
843 struct intel_fbc_work;
844 
845 struct intel_gmbus {
846 	struct i2c_controller controller;
847 	u32 port;
848 	u32 speed;
849 	u32 force_bit;
850 	u32 reg0;
851 	u32 gpio_reg;
852 	struct drm_i915_private *dev_priv;
853 };
854 
855 struct i915_suspend_saved_registers {
856 	u8 saveLBB;
857 	u32 saveDSPACNTR;
858 	u32 saveDSPBCNTR;
859 	u32 saveDSPARB;
860 	u32 savePIPEACONF;
861 	u32 savePIPEBCONF;
862 	u32 savePIPEASRC;
863 	u32 savePIPEBSRC;
864 	u32 saveFPA0;
865 	u32 saveFPA1;
866 	u32 saveDPLL_A;
867 	u32 saveDPLL_A_MD;
868 	u32 saveHTOTAL_A;
869 	u32 saveHBLANK_A;
870 	u32 saveHSYNC_A;
871 	u32 saveVTOTAL_A;
872 	u32 saveVBLANK_A;
873 	u32 saveVSYNC_A;
874 	u32 saveBCLRPAT_A;
875 	u32 saveTRANSACONF;
876 	u32 saveTRANS_HTOTAL_A;
877 	u32 saveTRANS_HBLANK_A;
878 	u32 saveTRANS_HSYNC_A;
879 	u32 saveTRANS_VTOTAL_A;
880 	u32 saveTRANS_VBLANK_A;
881 	u32 saveTRANS_VSYNC_A;
882 	u32 savePIPEASTAT;
883 	u32 saveDSPASTRIDE;
884 	u32 saveDSPASIZE;
885 	u32 saveDSPAPOS;
886 	u32 saveDSPAADDR;
887 	u32 saveDSPASURF;
888 	u32 saveDSPATILEOFF;
889 	u32 savePFIT_PGM_RATIOS;
890 	u32 saveBLC_HIST_CTL;
891 	u32 saveBLC_PWM_CTL;
892 	u32 saveBLC_PWM_CTL2;
893 	u32 saveBLC_HIST_CTL_B;
894 	u32 saveBLC_CPU_PWM_CTL;
895 	u32 saveBLC_CPU_PWM_CTL2;
896 	u32 saveFPB0;
897 	u32 saveFPB1;
898 	u32 saveDPLL_B;
899 	u32 saveDPLL_B_MD;
900 	u32 saveHTOTAL_B;
901 	u32 saveHBLANK_B;
902 	u32 saveHSYNC_B;
903 	u32 saveVTOTAL_B;
904 	u32 saveVBLANK_B;
905 	u32 saveVSYNC_B;
906 	u32 saveBCLRPAT_B;
907 	u32 saveTRANSBCONF;
908 	u32 saveTRANS_HTOTAL_B;
909 	u32 saveTRANS_HBLANK_B;
910 	u32 saveTRANS_HSYNC_B;
911 	u32 saveTRANS_VTOTAL_B;
912 	u32 saveTRANS_VBLANK_B;
913 	u32 saveTRANS_VSYNC_B;
914 	u32 savePIPEBSTAT;
915 	u32 saveDSPBSTRIDE;
916 	u32 saveDSPBSIZE;
917 	u32 saveDSPBPOS;
918 	u32 saveDSPBADDR;
919 	u32 saveDSPBSURF;
920 	u32 saveDSPBTILEOFF;
921 	u32 saveVGA0;
922 	u32 saveVGA1;
923 	u32 saveVGA_PD;
924 	u32 saveVGACNTRL;
925 	u32 saveADPA;
926 	u32 saveLVDS;
927 	u32 savePP_ON_DELAYS;
928 	u32 savePP_OFF_DELAYS;
929 	u32 saveDVOA;
930 	u32 saveDVOB;
931 	u32 saveDVOC;
932 	u32 savePP_ON;
933 	u32 savePP_OFF;
934 	u32 savePP_CONTROL;
935 	u32 savePP_DIVISOR;
936 	u32 savePFIT_CONTROL;
937 	u32 save_palette_a[256];
938 	u32 save_palette_b[256];
939 	u32 saveDPFC_CB_BASE;
940 	u32 saveFBC_CFB_BASE;
941 	u32 saveFBC_LL_BASE;
942 	u32 saveFBC_CONTROL;
943 	u32 saveFBC_CONTROL2;
944 	u32 saveIER;
945 	u32 saveIIR;
946 	u32 saveIMR;
947 	u32 saveDEIER;
948 	u32 saveDEIMR;
949 	u32 saveGTIER;
950 	u32 saveGTIMR;
951 	u32 saveFDI_RXA_IMR;
952 	u32 saveFDI_RXB_IMR;
953 	u32 saveCACHE_MODE_0;
954 	u32 saveMI_ARB_STATE;
955 	u32 saveSWF0[16];
956 	u32 saveSWF1[16];
957 	u32 saveSWF2[3];
958 	u8 saveMSR;
959 	u8 saveSR[8];
960 	u8 saveGR[25];
961 	u8 saveAR_INDEX;
962 	u8 saveAR[21];
963 	u8 saveDACMASK;
964 	u8 saveCR[37];
965 	uint64_t saveFENCE[I915_MAX_NUM_FENCES];
966 	u32 saveCURACNTR;
967 	u32 saveCURAPOS;
968 	u32 saveCURABASE;
969 	u32 saveCURBCNTR;
970 	u32 saveCURBPOS;
971 	u32 saveCURBBASE;
972 	u32 saveCURSIZE;
973 	u32 saveDP_B;
974 	u32 saveDP_C;
975 	u32 saveDP_D;
976 	u32 savePIPEA_GMCH_DATA_M;
977 	u32 savePIPEB_GMCH_DATA_M;
978 	u32 savePIPEA_GMCH_DATA_N;
979 	u32 savePIPEB_GMCH_DATA_N;
980 	u32 savePIPEA_DP_LINK_M;
981 	u32 savePIPEB_DP_LINK_M;
982 	u32 savePIPEA_DP_LINK_N;
983 	u32 savePIPEB_DP_LINK_N;
984 	u32 saveFDI_RXA_CTL;
985 	u32 saveFDI_TXA_CTL;
986 	u32 saveFDI_RXB_CTL;
987 	u32 saveFDI_TXB_CTL;
988 	u32 savePFA_CTL_1;
989 	u32 savePFB_CTL_1;
990 	u32 savePFA_WIN_SZ;
991 	u32 savePFB_WIN_SZ;
992 	u32 savePFA_WIN_POS;
993 	u32 savePFB_WIN_POS;
994 	u32 savePCH_DREF_CONTROL;
995 	u32 saveDISP_ARB_CTL;
996 	u32 savePIPEA_DATA_M1;
997 	u32 savePIPEA_DATA_N1;
998 	u32 savePIPEA_LINK_M1;
999 	u32 savePIPEA_LINK_N1;
1000 	u32 savePIPEB_DATA_M1;
1001 	u32 savePIPEB_DATA_N1;
1002 	u32 savePIPEB_LINK_M1;
1003 	u32 savePIPEB_LINK_N1;
1004 	u32 saveMCHBAR_RENDER_STANDBY;
1005 	u32 savePCH_PORT_HOTPLUG;
1006 };
1007 
1008 struct intel_gen6_power_mgmt {
1009 	/* work and pm_iir are protected by dev_priv->irq_lock */
1010 	struct work_struct work;
1011 	u32 pm_iir;
1012 
1013 	/* The below variables an all the rps hw state are protected by
1014 	 * dev->struct mutext. */
1015 	u8 cur_delay;
1016 	u8 min_delay;
1017 	u8 max_delay;
1018 	u8 rpe_delay;
1019 	u8 rp1_delay;
1020 	u8 rp0_delay;
1021 	u8 hw_max;
1022 
1023 	int last_adj;
1024 	enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
1025 
1026 	bool enabled;
1027 	struct delayed_work delayed_resume_work;
1028 
1029 	/*
1030 	 * Protects RPS/RC6 register access and PCU communication.
1031 	 * Must be taken after struct_mutex if nested.
1032 	 */
1033 	struct rwlock hw_lock;
1034 };
1035 
1036 /* defined intel_pm.c */
1037 extern spinlock_t mchdev_lock;
1038 
1039 struct intel_ilk_power_mgmt {
1040 	u8 cur_delay;
1041 	u8 min_delay;
1042 	u8 max_delay;
1043 	u8 fmax;
1044 	u8 fstart;
1045 
1046 	u64 last_count1;
1047 	unsigned long last_time1;
1048 	unsigned long chipset_power;
1049 	u64 last_count2;
1050 	struct timespec last_time2;
1051 	unsigned long gfx_power;
1052 	u8 corr;
1053 
1054 	int c_m;
1055 	int r_t;
1056 
1057 	struct drm_i915_gem_object *pwrctx;
1058 	struct drm_i915_gem_object *renderctx;
1059 };
1060 
1061 /* Power well structure for haswell */
1062 struct i915_power_well {
1063 	const char *name;
1064 	bool always_on;
1065 	/* power well enable/disable usage count */
1066 	int count;
1067 	unsigned long domains;
1068 	void *data;
1069 	void (*set)(struct drm_device *dev, struct i915_power_well *power_well,
1070 		    bool enable);
1071 	bool (*is_enabled)(struct drm_device *dev,
1072 			   struct i915_power_well *power_well);
1073 };
1074 
1075 struct i915_power_domains {
1076 	/*
1077 	 * Power wells needed for initialization at driver init and suspend
1078 	 * time are on. They are kept on until after the first modeset.
1079 	 */
1080 	bool init_power_on;
1081 	int power_well_count;
1082 
1083 	struct rwlock lock;
1084 	int domain_use_count[POWER_DOMAIN_NUM];
1085 	struct i915_power_well *power_wells;
1086 };
1087 
1088 struct i915_dri1_state {
1089 	unsigned allow_batchbuffer : 1;
1090 	u32 __iomem *gfx_hws_cpu_addr;
1091 
1092 	unsigned int cpp;
1093 	int back_offset;
1094 	int front_offset;
1095 	int current_page;
1096 	int page_flipping;
1097 
1098 	uint32_t counter;
1099 };
1100 
1101 struct i915_ums_state {
1102 	/**
1103 	 * Flag if the X Server, and thus DRM, is not currently in
1104 	 * control of the device.
1105 	 *
1106 	 * This is set between LeaveVT and EnterVT.  It needs to be
1107 	 * replaced with a semaphore.  It also needs to be
1108 	 * transitioned away from for kernel modesetting.
1109 	 */
1110 	int mm_suspended;
1111 };
1112 
1113 #define MAX_L3_SLICES 2
1114 struct intel_l3_parity {
1115 	u32 *remap_info[MAX_L3_SLICES];
1116 	struct work_struct error_work;
1117 	int which_slice;
1118 };
1119 
1120 struct i915_gem_mm {
1121 	/** Memory allocator for GTT stolen memory */
1122 	struct drm_mm stolen;
1123 	/** List of all objects in gtt_space. Used to restore gtt
1124 	 * mappings on resume */
1125 	struct list_head bound_list;
1126 	/**
1127 	 * List of objects which are not bound to the GTT (thus
1128 	 * are idle and not used by the GPU) but still have
1129 	 * (presumably uncached) pages still attached.
1130 	 */
1131 	struct list_head unbound_list;
1132 
1133 	/** Usable portion of the GTT for GEM */
1134 	unsigned long stolen_base; /* limited to low memory (32-bit) */
1135 
1136 	/** PPGTT used for aliasing the PPGTT with the GTT */
1137 	struct i915_hw_ppgtt *aliasing_ppgtt;
1138 
1139 //	struct shrinker inactive_shrinker;
1140 	bool shrinker_no_lock_stealing;
1141 
1142 	/** LRU list of objects with fence regs on them. */
1143 	struct list_head fence_list;
1144 
1145 	/**
1146 	 * We leave the user IRQ off as much as possible,
1147 	 * but this means that requests will finish and never
1148 	 * be retired once the system goes idle. Set a timer to
1149 	 * fire periodically while the ring is running. When it
1150 	 * fires, go retire requests.
1151 	 */
1152 	struct delayed_work retire_work;
1153 
1154 	/**
1155 	 * When we detect an idle GPU, we want to turn on
1156 	 * powersaving features. So once we see that there
1157 	 * are no more requests outstanding and no more
1158 	 * arrive within a small period of time, we fire
1159 	 * off the idle_work.
1160 	 */
1161 	struct delayed_work idle_work;
1162 
1163 	/**
1164 	 * Are we in a non-interruptible section of code like
1165 	 * modesetting?
1166 	 */
1167 	bool interruptible;
1168 
1169 	/** Bit 6 swizzling required for X tiling */
1170 	uint32_t bit_6_swizzle_x;
1171 	/** Bit 6 swizzling required for Y tiling */
1172 	uint32_t bit_6_swizzle_y;
1173 
1174 	/* storage for physical objects */
1175 	struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
1176 
1177 	/* accounting, useful for userland debugging */
1178 	spinlock_t object_stat_lock;
1179 	size_t object_memory;
1180 	u32 object_count;
1181 };
1182 
1183 struct drm_i915_error_state_buf {
1184 	unsigned bytes;
1185 	unsigned size;
1186 	int err;
1187 	u8 *buf;
1188 	loff_t start;
1189 	loff_t pos;
1190 };
1191 
1192 struct i915_error_state_file_priv {
1193 	struct drm_device *dev;
1194 	struct drm_i915_error_state *error;
1195 };
1196 
1197 struct i915_gpu_error {
1198 	/* For hangcheck timer */
1199 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
1200 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
1201 	/* Hang gpu twice in this window and your context gets banned */
1202 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
1203 
1204 	struct timeout hangcheck_timer;
1205 
1206 	/* For reset and error_state handling. */
1207 	spinlock_t lock;
1208 	/* Protected by the above dev->gpu_error.lock. */
1209 	struct drm_i915_error_state *first_error;
1210 	struct work_struct work;
1211 
1212 
1213 	unsigned long missed_irq_rings;
1214 
1215 	/**
1216 	 * State variable controlling the reset flow and count
1217 	 *
1218 	 * This is a counter which gets incremented when reset is triggered,
1219 	 * and again when reset has been handled. So odd values (lowest bit set)
1220 	 * means that reset is in progress and even values that
1221 	 * (reset_counter >> 1):th reset was successfully completed.
1222 	 *
1223 	 * If reset is not completed succesfully, the I915_WEDGE bit is
1224 	 * set meaning that hardware is terminally sour and there is no
1225 	 * recovery. All waiters on the reset_queue will be woken when
1226 	 * that happens.
1227 	 *
1228 	 * This counter is used by the wait_seqno code to notice that reset
1229 	 * event happened and it needs to restart the entire ioctl (since most
1230 	 * likely the seqno it waited for won't ever signal anytime soon).
1231 	 *
1232 	 * This is important for lock-free wait paths, where no contended lock
1233 	 * naturally enforces the correct ordering between the bail-out of the
1234 	 * waiter and the gpu reset work code.
1235 	 */
1236 	atomic_t reset_counter;
1237 
1238 #define I915_RESET_IN_PROGRESS_FLAG	1
1239 #define I915_WEDGED			(1 << 31)
1240 
1241 	/**
1242 	 * Waitqueue to signal when the reset has completed. Used by clients
1243 	 * that wait for dev_priv->mm.wedged to settle.
1244 	 */
1245 	wait_queue_head_t reset_queue;
1246 
1247 	/* For gpu hang simulation. */
1248 	unsigned int stop_rings;
1249 
1250 	/* For missed irq/seqno simulation. */
1251 	unsigned int test_irq_rings;
1252 };
1253 
1254 enum modeset_restore {
1255 	MODESET_ON_LID_OPEN,
1256 	MODESET_DONE,
1257 	MODESET_SUSPENDED,
1258 };
1259 
1260 struct ddi_vbt_port_info {
1261 	uint8_t hdmi_level_shift;
1262 
1263 	uint8_t supports_dvi:1;
1264 	uint8_t supports_hdmi:1;
1265 	uint8_t supports_dp:1;
1266 };
1267 
1268 struct intel_vbt_data {
1269 	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1270 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1271 
1272 	/* Feature bits */
1273 	unsigned int int_tv_support:1;
1274 	unsigned int lvds_dither:1;
1275 	unsigned int lvds_vbt:1;
1276 	unsigned int int_crt_support:1;
1277 	unsigned int lvds_use_ssc:1;
1278 	unsigned int display_clock_mode:1;
1279 	unsigned int fdi_rx_polarity_inverted:1;
1280 	int lvds_ssc_freq;
1281 	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1282 
1283 	/* eDP */
1284 	int edp_rate;
1285 	int edp_lanes;
1286 	int edp_preemphasis;
1287 	int edp_vswing;
1288 	bool edp_initialized;
1289 	bool edp_support;
1290 	int edp_bpp;
1291 	struct edp_power_seq edp_pps;
1292 
1293 	struct {
1294 		u16 pwm_freq_hz;
1295 		bool active_low_pwm;
1296 	} backlight;
1297 
1298 	/* MIPI DSI */
1299 	struct {
1300 		u16 panel_id;
1301 	} dsi;
1302 
1303 	int crt_ddc_pin;
1304 
1305 	int child_dev_num;
1306 	union child_device_config *child_dev;
1307 
1308 	struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
1309 };
1310 
1311 enum intel_ddb_partitioning {
1312 	INTEL_DDB_PART_1_2,
1313 	INTEL_DDB_PART_5_6, /* IVB+ */
1314 };
1315 
1316 struct intel_wm_level {
1317 	bool enable;
1318 	uint32_t pri_val;
1319 	uint32_t spr_val;
1320 	uint32_t cur_val;
1321 	uint32_t fbc_val;
1322 };
1323 
1324 struct ilk_wm_values {
1325 	uint32_t wm_pipe[3];
1326 	uint32_t wm_lp[3];
1327 	uint32_t wm_lp_spr[3];
1328 	uint32_t wm_linetime[3];
1329 	bool enable_fbc_wm;
1330 	enum intel_ddb_partitioning partitioning;
1331 };
1332 
1333 /*
1334  * This struct tracks the state needed for the Package C8+ feature.
1335  *
1336  * Package states C8 and deeper are really deep PC states that can only be
1337  * reached when all the devices on the system allow it, so even if the graphics
1338  * device allows PC8+, it doesn't mean the system will actually get to these
1339  * states.
1340  *
1341  * Our driver only allows PC8+ when all the outputs are disabled, the power well
1342  * is disabled and the GPU is idle. When these conditions are met, we manually
1343  * do the other conditions: disable the interrupts, clocks and switch LCPLL
1344  * refclk to Fclk.
1345  *
1346  * When we really reach PC8 or deeper states (not just when we allow it) we lose
1347  * the state of some registers, so when we come back from PC8+ we need to
1348  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1349  * need to take care of the registers kept by RC6.
1350  *
1351  * The interrupt disabling is part of the requirements. We can only leave the
1352  * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we
1353  * can lock the machine.
1354  *
1355  * Ideally every piece of our code that needs PC8+ disabled would call
1356  * hsw_disable_package_c8, which would increment disable_count and prevent the
1357  * system from reaching PC8+. But we don't have a symmetric way to do this for
1358  * everything, so we have the requirements_met and gpu_idle variables. When we
1359  * switch requirements_met or gpu_idle to true we decrease disable_count, and
1360  * increase it in the opposite case. The requirements_met variable is true when
1361  * all the CRTCs, encoders and the power well are disabled. The gpu_idle
1362  * variable is true when the GPU is idle.
1363  *
1364  * In addition to everything, we only actually enable PC8+ if disable_count
1365  * stays at zero for at least some seconds. This is implemented with the
1366  * enable_work variable. We do this so we don't enable/disable PC8 dozens of
1367  * consecutive times when all screens are disabled and some background app
1368  * queries the state of our connectors, or we have some application constantly
1369  * waking up to use the GPU. Only after the enable_work function actually
1370  * enables PC8+ the "enable" variable will become true, which means that it can
1371  * be false even if disable_count is 0.
1372  *
1373  * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1374  * goes back to false exactly before we reenable the IRQs. We use this variable
1375  * to check if someone is trying to enable/disable IRQs while they're supposed
1376  * to be disabled. This shouldn't happen and we'll print some error messages in
1377  * case it happens, but if it actually happens we'll also update the variables
1378  * inside struct regsave so when we restore the IRQs they will contain the
1379  * latest expected values.
1380  *
1381  * For more, read "Display Sequences for Package C8" on our documentation.
1382  */
1383 struct i915_package_c8 {
1384 	bool requirements_met;
1385 	bool gpu_idle;
1386 	bool irqs_disabled;
1387 	/* Only true after the delayed work task actually enables it. */
1388 	bool enabled;
1389 	int disable_count;
1390 	struct rwlock lock;
1391 	struct delayed_work enable_work;
1392 
1393 	struct {
1394 		uint32_t deimr;
1395 		uint32_t sdeimr;
1396 		uint32_t gtimr;
1397 		uint32_t gtier;
1398 		uint32_t gen6_pmimr;
1399 	} regsave;
1400 };
1401 
1402 struct i915_runtime_pm {
1403 	bool suspended;
1404 };
1405 
1406 enum intel_pipe_crc_source {
1407 	INTEL_PIPE_CRC_SOURCE_NONE,
1408 	INTEL_PIPE_CRC_SOURCE_PLANE1,
1409 	INTEL_PIPE_CRC_SOURCE_PLANE2,
1410 	INTEL_PIPE_CRC_SOURCE_PF,
1411 	INTEL_PIPE_CRC_SOURCE_PIPE,
1412 	/* TV/DP on pre-gen5/vlv can't use the pipe source. */
1413 	INTEL_PIPE_CRC_SOURCE_TV,
1414 	INTEL_PIPE_CRC_SOURCE_DP_B,
1415 	INTEL_PIPE_CRC_SOURCE_DP_C,
1416 	INTEL_PIPE_CRC_SOURCE_DP_D,
1417 	INTEL_PIPE_CRC_SOURCE_AUTO,
1418 	INTEL_PIPE_CRC_SOURCE_MAX,
1419 };
1420 
1421 struct intel_pipe_crc_entry {
1422 	uint32_t frame;
1423 	uint32_t crc[5];
1424 };
1425 
1426 #define INTEL_PIPE_CRC_ENTRIES_NR	128
1427 struct intel_pipe_crc {
1428 	spinlock_t lock;
1429 	bool opened;		/* exclusive access to the result file */
1430 	struct intel_pipe_crc_entry *entries;
1431 	enum intel_pipe_crc_source source;
1432 	int head, tail;
1433 	wait_queue_head_t wq;
1434 };
1435 
1436 typedef struct inteldrm_softc {
1437 	struct device sc_dev;
1438 	struct drm_device *dev;
1439 	bus_dma_tag_t dmat;
1440 	bus_space_tag_t bst;
1441 	struct agp_map *agph;
1442 	bus_space_handle_t opregion_ioh;
1443 
1444 	const struct intel_device_info *info;
1445 
1446 	int relative_constants_mode;
1447 
1448 	pci_chipset_tag_t pc;
1449 	pcitag_t tag;
1450 	struct extent *memex;
1451 	pci_intr_handle_t ih;
1452 	void *irqh;
1453 
1454 	struct vga_pci_bar bar;
1455 	struct vga_pci_bar *regs;
1456 
1457 	int nscreens;
1458 	void (*switchcb)(void *, int, int);
1459 	void *switchcbarg;
1460 	void *switchcookie;
1461 	struct task switchtask;
1462 	struct rasops_info ro;
1463 
1464 	struct task burner_task;
1465 	int burner_dpms_mode;
1466 
1467 	struct backlight_device {
1468 		struct intel_connector *connector;
1469 		struct {
1470 			uint32_t brightness;
1471 			uint32_t max_brightness;
1472 		} props;
1473 	} backlight;
1474 
1475 	struct intel_uncore uncore;
1476 
1477 	struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
1478 
1479 
1480 	/** gmbus_mutex protects against concurrent usage of the single hw gmbus
1481 	 * controller on different i2c buses. */
1482 	struct mutex gmbus_mutex;
1483 
1484 	/**
1485 	 * Base address of the gmbus and gpio block.
1486 	 */
1487 	uint32_t gpio_mmio_base;
1488 
1489 	wait_queue_head_t gmbus_wait_queue;
1490 
1491 	struct pci_dev *bridge_dev;
1492 	struct intel_ring_buffer ring[I915_NUM_RINGS];
1493 	uint32_t last_seqno, next_seqno;
1494 
1495 	drm_dma_handle_t *status_page_dmah;
1496 	struct resource mch_res;
1497 	union flush {
1498 		struct {
1499 			bus_space_tag_t		bst;
1500 			bus_space_handle_t	bsh;
1501 		} i9xx;
1502 		struct {
1503 			bus_dma_segment_t	seg;
1504 			caddr_t			kva;
1505 		} i8xx;
1506 	}			 ifp;
1507 	struct vm_page *pgs;
1508 
1509 	atomic_t irq_received;
1510 
1511 	/* protects the irq masks */
1512 	spinlock_t irq_lock;
1513 
1514 #ifdef noyet
1515 	/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1516 	struct pm_qos_request pm_qos;
1517 #endif
1518 
1519 	/* DPIO indirect register protection */
1520 	struct rwlock dpio_lock;
1521 
1522 	/** Cached value of IMR to avoid reads in updating the bitfield */
1523 	union {
1524 		u32 irq_mask;
1525 		u32 de_irq_mask[I915_MAX_PIPES];
1526 	};
1527 	u32 gt_irq_mask;
1528 	u32 pm_irq_mask;
1529 
1530 	struct work_struct hotplug_work;
1531 	bool enable_hotplug_processing;
1532 	struct {
1533 		unsigned long hpd_last_jiffies;
1534 		int hpd_cnt;
1535 		enum {
1536 			HPD_ENABLED = 0,
1537 			HPD_DISABLED = 1,
1538 			HPD_MARK_DISABLED = 2
1539 		} hpd_mark;
1540 	} hpd_stats[HPD_NUM_PINS];
1541 	u32 hpd_event_bits;
1542 	struct timeout hotplug_reenable_timer;
1543 
1544 	int num_plane;
1545 
1546 	struct i915_fbc fbc;
1547 	struct intel_opregion opregion;
1548 	struct intel_vbt_data vbt;
1549 
1550 	/* overlay */
1551 	struct intel_overlay *overlay;
1552 
1553 	/* backlight registers and fields in struct intel_panel */
1554 	spinlock_t backlight_lock;
1555 
1556 	/* LVDS info */
1557 	bool no_aux_handshake;
1558 
1559 	struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1560 	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
1561 	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1562 
1563 	unsigned int fsb_freq, mem_freq, is_ddr3;
1564 
1565 	/**
1566 	 * wq - Driver workqueue for GEM.
1567 	 *
1568 	 * NOTE: Work items scheduled here are not allowed to grab any modeset
1569 	 * locks, for otherwise the flushing done in the pageflip code will
1570 	 * result in deadlocks.
1571 	 */
1572 	struct workqueue_struct *wq;
1573 
1574 	/* Display functions */
1575 	struct drm_i915_display_funcs display;
1576 
1577 	/* PCH chipset type */
1578 	enum intel_pch pch_type;
1579 	unsigned short pch_id;
1580 
1581 	unsigned long quirks;
1582 
1583 	enum modeset_restore modeset_restore;
1584 	struct rwlock modeset_restore_lock;
1585 
1586 	struct list_head vm_list; /* Global list of all address spaces */
1587 	struct i915_gtt gtt; /* VMA representing the global address space */
1588 
1589 	struct i915_gem_mm mm;
1590 
1591 	/* Kernel Modesetting */
1592 
1593 	struct sdvo_device_mapping sdvo_mappings[2];
1594 
1595 	struct drm_crtc *plane_to_crtc_mapping[3];
1596 	struct drm_crtc *pipe_to_crtc_mapping[3];
1597 	wait_queue_head_t pending_flip_queue;
1598 
1599 #ifdef CONFIG_DEBUG_FS
1600 	struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
1601 #endif
1602 
1603 	int num_shared_dpll;
1604 	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1605 	struct intel_ddi_plls ddi_plls;
1606 	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
1607 
1608 	/* Reclocking support */
1609 	bool render_reclock_avail;
1610 	bool lvds_downclock_avail;
1611 	/* indicates the reduced downclock for LVDS*/
1612 	int lvds_downclock;
1613 	u16 orig_clock;
1614 
1615 	bool mchbar_need_disable;
1616 
1617 	struct intel_l3_parity l3_parity;
1618 
1619 	/* Cannot be determined by PCIID. You must always read a register. */
1620 	size_t ellc_size;
1621 
1622 	/* gen6+ rps state */
1623 	struct intel_gen6_power_mgmt rps;
1624 
1625 	/* ilk-only ips/rps state. Everything in here is protected by the global
1626 	 * mchdev_lock in intel_pm.c */
1627 	struct intel_ilk_power_mgmt ips;
1628 
1629 	struct i915_power_domains power_domains;
1630 
1631 	struct i915_psr psr;
1632 
1633 	struct i915_gpu_error gpu_error;
1634 
1635 	struct drm_i915_gem_object *vlv_pctx;
1636 
1637 #ifdef CONFIG_DRM_I915_FBDEV
1638 	/* list of fbdev register on this device */
1639 	struct intel_fbdev *fbdev;
1640 #endif
1641 
1642 	/*
1643 	 * The console may be contended at resume, but we don't
1644 	 * want it to block on it.
1645 	 */
1646 	struct work_struct console_resume_work;
1647 
1648 	struct drm_property *broadcast_rgb_property;
1649 	struct drm_property *force_audio_property;
1650 
1651 	uint32_t hw_context_size;
1652 	struct list_head context_list;
1653 
1654 	u32 fdi_rx_config;
1655 
1656 	struct i915_suspend_saved_registers regfile;
1657 
1658 	struct {
1659 		/*
1660 		 * Raw watermark latency values:
1661 		 * in 0.1us units for WM0,
1662 		 * in 0.5us units for WM1+.
1663 		 */
1664 		/* primary */
1665 		uint16_t pri_latency[5];
1666 		/* sprite */
1667 		uint16_t spr_latency[5];
1668 		/* cursor */
1669 		uint16_t cur_latency[5];
1670 
1671 		/* current hardware state */
1672 		struct ilk_wm_values hw;
1673 	} wm;
1674 
1675 	struct i915_package_c8 pc8;
1676 
1677 	struct i915_runtime_pm pm;
1678 
1679 	/* Old dri1 support infrastructure, beware the dragons ya fools entering
1680 	 * here! */
1681 	struct i915_dri1_state dri1;
1682 	/* Old ums support infrastructure, same warning applies. */
1683 	struct i915_ums_state ums;
1684 } drm_i915_private_t;
1685 
1686 static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1687 {
1688 	return dev->dev_private;
1689 }
1690 
1691 /* Iterate over initialised rings */
1692 #define for_each_ring(ring__, dev_priv__, i__) \
1693 	for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
1694 		if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
1695 
1696 enum hdmi_force_audio {
1697 	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */
1698 	HDMI_AUDIO_OFF,			/* force turn off HDMI audio */
1699 	HDMI_AUDIO_AUTO,		/* trust EDID */
1700 	HDMI_AUDIO_ON,			/* force turn on HDMI audio */
1701 };
1702 
1703 #define I915_GTT_OFFSET_NONE ((u32)-1)
1704 
1705 struct drm_i915_gem_object_ops {
1706 	/* Interface between the GEM object and its backing storage.
1707 	 * get_pages() is called once prior to the use of the associated set
1708 	 * of pages before to binding them into the GTT, and put_pages() is
1709 	 * called after we no longer need them. As we expect there to be
1710 	 * associated cost with migrating pages between the backing storage
1711 	 * and making them available for the GPU (e.g. clflush), we may hold
1712 	 * onto the pages after they are no longer referenced by the GPU
1713 	 * in case they may be used again shortly (for example migrating the
1714 	 * pages to a different memory domain within the GTT). put_pages()
1715 	 * will therefore most likely be called when the object itself is
1716 	 * being released or under memory pressure (where we attempt to
1717 	 * reap pages for the shrinker).
1718 	 */
1719 	int (*get_pages)(struct drm_i915_gem_object *);
1720 	void (*put_pages)(struct drm_i915_gem_object *);
1721 };
1722 
1723 struct drm_i915_gem_object {
1724 	struct drm_gem_object base;
1725 
1726 	const struct drm_i915_gem_object_ops *ops;
1727 
1728 	/** List of VMAs backed by this object */
1729 	struct list_head vma_list;
1730 
1731 	/** Stolen memory for this object, instead of being backed by shmem. */
1732 	struct drm_mm_node *stolen;
1733 	struct list_head global_list;
1734 
1735 	struct list_head ring_list;
1736 	/** Used in execbuf to temporarily hold a ref */
1737 	struct list_head obj_exec_link;
1738 
1739 	/**
1740 	 * This is set if the object is on the active lists (has pending
1741 	 * rendering and so a non-zero seqno), and is not set if it i s on
1742 	 * inactive (ready to be unbound) list.
1743 	 */
1744 	unsigned int active:1;
1745 
1746 	/**
1747 	 * This is set if the object has been written to since last bound
1748 	 * to the GTT
1749 	 */
1750 	unsigned int dirty:1;
1751 
1752 	/**
1753 	 * Fence register bits (if any) for this object.  Will be set
1754 	 * as needed when mapped into the GTT.
1755 	 * Protected by dev->struct_mutex.
1756 	 */
1757 	signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
1758 
1759 	/**
1760 	 * Advice: are the backing pages purgeable?
1761 	 */
1762 	unsigned int madv:2;
1763 
1764 	/**
1765 	 * Current tiling mode for the object.
1766 	 */
1767 	unsigned int tiling_mode:2;
1768 	/**
1769 	 * Whether the tiling parameters for the currently associated fence
1770 	 * register have changed. Note that for the purposes of tracking
1771 	 * tiling changes we also treat the unfenced register, the register
1772 	 * slot that the object occupies whilst it executes a fenced
1773 	 * command (such as BLT on gen2/3), as a "fence".
1774 	 */
1775 	unsigned int fence_dirty:1;
1776 
1777 	/** How many users have pinned this object in GTT space. The following
1778 	 * users can each hold at most one reference: pwrite/pread, pin_ioctl
1779 	 * (via user_pin_count), execbuffer (objects are not allowed multiple
1780 	 * times for the same batchbuffer), and the framebuffer code. When
1781 	 * switching/pageflipping, the framebuffer code has at most two buffers
1782 	 * pinned per crtc.
1783 	 *
1784 	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1785 	 * bits with absolutely no headroom. So use 4 bits. */
1786 	unsigned int pin_count:4;
1787 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
1788 
1789 	/**
1790 	 * Is the object at the current location in the gtt mappable and
1791 	 * fenceable? Used to avoid costly recalculations.
1792 	 */
1793 	unsigned int map_and_fenceable:1;
1794 
1795 	/**
1796 	 * Whether the current gtt mapping needs to be mappable (and isn't just
1797 	 * mappable by accident). Track pin and fault separate for a more
1798 	 * accurate mappable working set.
1799 	 */
1800 	unsigned int fault_mappable:1;
1801 	unsigned int pin_mappable:1;
1802 	unsigned int pin_display:1;
1803 
1804 	/*
1805 	 * Is the GPU currently using a fence to access this buffer,
1806 	 */
1807 	unsigned int pending_fenced_gpu_access:1;
1808 	unsigned int fenced_gpu_access:1;
1809 
1810 	unsigned int cache_level:3;
1811 
1812 	unsigned int has_aliasing_ppgtt_mapping:1;
1813 	unsigned int has_global_gtt_mapping:1;
1814 	unsigned int has_dma_mapping:1;
1815 
1816 #ifdef __linux__
1817 	struct sg_table *pages;
1818 #else
1819 	struct vm_page **pages;
1820 #endif
1821 	int pages_pin_count;
1822 
1823 	/* prime dma-buf support */
1824 	void *dma_buf_vmapping;
1825 	int vmapping_count;
1826 
1827 	struct intel_ring_buffer *ring;
1828 
1829 	/** Breadcrumb of last rendering to the buffer. */
1830 	uint32_t last_read_seqno;
1831 	uint32_t last_write_seqno;
1832 	/** Breadcrumb of last fenced GPU access to the buffer. */
1833 	uint32_t last_fenced_seqno;
1834 
1835 	/** Current tiling stride for the object, if it's tiled. */
1836 	uint32_t stride;
1837 
1838 	/** References from framebuffers, locks out tiling changes. */
1839 	unsigned long framebuffer_references;
1840 
1841 	/** Record of address bit 17 of each page at last unbind. */
1842 	unsigned long *bit_17;
1843 
1844 	/** User space pin count and filp owning the pin */
1845 	unsigned long user_pin_count;
1846 	struct drm_file *pin_filp;
1847 
1848 	/** for phy allocated objects */
1849 	struct drm_i915_gem_phys_object *phys_obj;
1850 };
1851 #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
1852 
1853 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
1854 
1855 /**
1856  * Request queue structure.
1857  *
1858  * The request queue allows us to note sequence numbers that have been emitted
1859  * and may be associated with active buffers to be retired.
1860  *
1861  * By keeping this list, we can avoid having to do questionable
1862  * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1863  * an emission time with seqnos for tracking how far ahead of the GPU we are.
1864  */
1865 struct drm_i915_gem_request {
1866 	/** On Which ring this request was generated */
1867 	struct intel_ring_buffer *ring;
1868 
1869 	/** GEM sequence number associated with this request. */
1870 	uint32_t seqno;
1871 
1872 	/** Position in the ringbuffer of the start of the request */
1873 	u32 head;
1874 
1875 	/** Position in the ringbuffer of the end of the request */
1876 	u32 tail;
1877 
1878 	/** Context related to this request */
1879 	struct i915_hw_context *ctx;
1880 
1881 	/** Batch buffer related to this request if any */
1882 	struct drm_i915_gem_object *batch_obj;
1883 
1884 	/** Time at which this request was emitted, in jiffies. */
1885 	unsigned long emitted_jiffies;
1886 
1887 	/** global list entry for this request */
1888 	struct list_head list;
1889 
1890 	struct drm_i915_file_private *file_priv;
1891 	/** file_priv list entry for this request */
1892 	struct list_head client_list;
1893 };
1894 
1895 struct drm_i915_file_private {
1896 	struct drm_i915_private *dev_priv;
1897 
1898 	struct {
1899 		spinlock_t lock;
1900 		struct list_head request_list;
1901 		struct delayed_work idle_work;
1902 	} mm;
1903 	struct idr context_idr;
1904 
1905 	struct i915_ctx_hang_stats hang_stats;
1906 	atomic_t rps_wait_boost;
1907 };
1908 
1909 #define INTEL_INFO(dev)	(to_i915(dev)->info)
1910 
1911 #define IS_I830(dev)		((dev)->pdev->device == 0x3577)
1912 #define IS_845G(dev)		((dev)->pdev->device == 0x2562)
1913 #define IS_I85X(dev)		(INTEL_INFO(dev)->is_i85x)
1914 #define IS_I865G(dev)		((dev)->pdev->device == 0x2572)
1915 #define IS_I915G(dev)		(INTEL_INFO(dev)->is_i915g)
1916 #define IS_I915GM(dev)		((dev)->pdev->device == 0x2592)
1917 #define IS_I945G(dev)		((dev)->pdev->device == 0x2772)
1918 #define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
1919 #define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
1920 #define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
1921 #define IS_GM45(dev)		((dev)->pdev->device == 0x2A42)
1922 #define IS_G4X(dev)		(INTEL_INFO(dev)->is_g4x)
1923 #define IS_PINEVIEW_G(dev)	((dev)->pdev->device == 0xa001)
1924 #define IS_PINEVIEW_M(dev)	((dev)->pdev->device == 0xa011)
1925 #define IS_PINEVIEW(dev)	(INTEL_INFO(dev)->is_pineview)
1926 #define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
1927 #define IS_IRONLAKE_M(dev)	((dev)->pdev->device == 0x0046)
1928 #define IS_IVYBRIDGE(dev)	(INTEL_INFO(dev)->is_ivybridge)
1929 #define IS_IVB_GT1(dev)		((dev)->pdev->device == 0x0156 || \
1930 				 (dev)->pdev->device == 0x0152 || \
1931 				 (dev)->pdev->device == 0x015a)
1932 #define IS_SNB_GT1(dev)		((dev)->pdev->device == 0x0102 || \
1933 				 (dev)->pdev->device == 0x0106 || \
1934 				 (dev)->pdev->device == 0x010A)
1935 #define IS_VALLEYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview)
1936 #define IS_HASWELL(dev)	(INTEL_INFO(dev)->is_haswell)
1937 #define IS_BROADWELL(dev)	(INTEL_INFO(dev)->gen == 8)
1938 #define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
1939 #define IS_HSW_EARLY_SDV(dev)	(IS_HASWELL(dev) && \
1940 				 ((dev)->pdev->device & 0xFF00) == 0x0C00)
1941 #define IS_BDW_ULT(dev)		(IS_BROADWELL(dev) && \
1942 				 (((dev)->pdev->device & 0xf) == 0x6 || \
1943 				 ((dev)->pdev->device & 0xf) == 0xb || \
1944 				 ((dev)->pdev->device & 0xf) == 0xe))
1945 #define IS_HSW_ULT(dev)		(IS_HASWELL(dev) && \
1946 				 ((dev)->pdev->device & 0xFF00) == 0x0A00)
1947 #define IS_ULT(dev)		(IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
1948 #define IS_HSW_GT3(dev)		(IS_HASWELL(dev) && \
1949 				 ((dev)->pdev->device & 0x00F0) == 0x0020)
1950 /* ULX machines are also considered ULT. */
1951 #define IS_HSW_ULX(dev)		((dev)->pdev->device == 0x0A0E || \
1952 				 (dev)->pdev->device == 0x0A1E)
1953 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
1954 
1955 /*
1956  * The genX designation typically refers to the render engine, so render
1957  * capability related checks should use IS_GEN, while display and other checks
1958  * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
1959  * chips, etc.).
1960  */
1961 #define IS_GEN2(dev)	(INTEL_INFO(dev)->gen == 2)
1962 #define IS_GEN3(dev)	(INTEL_INFO(dev)->gen == 3)
1963 #define IS_GEN4(dev)	(INTEL_INFO(dev)->gen == 4)
1964 #define IS_GEN5(dev)	(INTEL_INFO(dev)->gen == 5)
1965 #define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)
1966 #define IS_GEN7(dev)	(INTEL_INFO(dev)->gen == 7)
1967 #define IS_GEN8(dev)	(INTEL_INFO(dev)->gen == 8)
1968 
1969 #define RENDER_RING		(1<<RCS)
1970 #define BSD_RING		(1<<VCS)
1971 #define BLT_RING		(1<<BCS)
1972 #define VEBOX_RING		(1<<VECS)
1973 #define HAS_BSD(dev)            (INTEL_INFO(dev)->ring_mask & BSD_RING)
1974 #define HAS_BLT(dev)            (INTEL_INFO(dev)->ring_mask & BLT_RING)
1975 #define HAS_VEBOX(dev)            (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
1976 #define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc)
1977 #define HAS_WT(dev)            (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
1978 #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
1979 
1980 #define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 6)
1981 #define HAS_ALIASING_PPGTT(dev)	(INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
1982 
1983 #define HAS_OVERLAY(dev)		(INTEL_INFO(dev)->has_overlay)
1984 #define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)
1985 
1986 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
1987 #define HAS_BROKEN_CS_TLB(dev)		(IS_I830(dev) || IS_845G(dev))
1988 /*
1989  * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
1990  * even when in MSI mode. This results in spurious interrupt warnings if the
1991  * legacy irq no. is shared with another device. The kernel then disables that
1992  * interrupt source and so prevents the other device from working properly.
1993  */
1994 #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
1995 #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
1996 
1997 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1998  * rows, which changed the alignment requirements and fence programming.
1999  */
2000 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
2001 						      IS_I915GM(dev)))
2002 #define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))
2003 #define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))
2004 #define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev))
2005 #define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
2006 #define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
2007 
2008 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
2009 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
2010 #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
2011 
2012 #define HAS_IPS(dev)		(IS_ULT(dev) || IS_BROADWELL(dev))
2013 
2014 #define HAS_DDI(dev)		(INTEL_INFO(dev)->has_ddi)
2015 #define HAS_FPGA_DBG_UNCLAIMED(dev)	(INTEL_INFO(dev)->has_fpga_dbg)
2016 #define HAS_PSR(dev)		(IS_HASWELL(dev) || IS_BROADWELL(dev))
2017 #define HAS_PC8(dev)		(IS_HASWELL(dev)) /* XXX HSW:ULX */
2018 #define HAS_RUNTIME_PM(dev)	(IS_HASWELL(dev))
2019 
2020 #define INTEL_PCH_DEVICE_ID_MASK		0xff00
2021 #define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
2022 #define INTEL_PCH_CPT_DEVICE_ID_TYPE		0x1c00
2023 #define INTEL_PCH_PPT_DEVICE_ID_TYPE		0x1e00
2024 #define INTEL_PCH_LPT_DEVICE_ID_TYPE		0x8c00
2025 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE		0x9c00
2026 
2027 #define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
2028 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
2029 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
2030 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
2031 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
2032 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
2033 
2034 /* DPF == dynamic parity feature */
2035 #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
2036 #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
2037 
2038 #define GT_FREQUENCY_MULTIPLIER 50
2039 
2040 #include "i915_trace.h"
2041 
2042 extern const struct drm_ioctl_desc i915_ioctls[];
2043 extern int i915_max_ioctl;
2044 extern unsigned int i915_fbpercrtc __always_unused;
2045 extern int i915_panel_ignore_lid __read_mostly;
2046 extern unsigned int i915_powersave __read_mostly;
2047 extern int i915_semaphores __read_mostly;
2048 extern unsigned int i915_lvds_downclock __read_mostly;
2049 extern int i915_lvds_channel_mode __read_mostly;
2050 extern int i915_panel_use_ssc __read_mostly;
2051 extern int i915_vbt_sdvo_panel_type __read_mostly;
2052 extern int i915_enable_rc6 __read_mostly;
2053 extern int i915_enable_fbc __read_mostly;
2054 extern bool i915_enable_hangcheck __read_mostly;
2055 extern int i915_enable_ppgtt __read_mostly;
2056 extern int i915_enable_psr __read_mostly;
2057 extern unsigned int i915_preliminary_hw_support __read_mostly;
2058 extern int i915_disable_power_well __read_mostly;
2059 extern int i915_enable_ips __read_mostly;
2060 extern bool i915_fastboot __read_mostly;
2061 extern int i915_enable_pc8 __read_mostly;
2062 extern int i915_pc8_timeout __read_mostly;
2063 extern bool i915_prefault_disable __read_mostly;
2064 
2065 #ifdef __linux__
2066 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
2067 extern int i915_resume(struct drm_device *dev);
2068 extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
2069 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
2070 #endif
2071 
2072 				/* i915_dma.c */
2073 void i915_update_dri1_breadcrumb(struct drm_device *dev);
2074 extern void i915_kernel_lost_context(struct drm_device * dev);
2075 extern int i915_driver_load(struct drm_device *, unsigned long flags);
2076 extern int i915_driver_unload(struct drm_device *);
2077 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
2078 extern void i915_driver_lastclose(struct drm_device * dev);
2079 extern void i915_driver_preclose(struct drm_device *dev,
2080 				 struct drm_file *file_priv);
2081 extern void i915_driver_postclose(struct drm_device *dev,
2082 				  struct drm_file *file_priv);
2083 extern int i915_driver_device_is_agp(struct drm_device * dev);
2084 #ifdef CONFIG_COMPAT
2085 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2086 			      unsigned long arg);
2087 #endif
2088 extern int i915_emit_box(struct drm_device *dev,
2089 			 struct drm_clip_rect *box,
2090 			 int DR1, int DR4);
2091 extern int intel_gpu_reset(struct drm_device *dev);
2092 extern int i915_reset(struct drm_device *dev);
2093 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
2094 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
2095 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
2096 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2097 
2098 extern void intel_console_resume(struct work_struct *work);
2099 
2100 /* i915_irq.c */
2101 void i915_queue_hangcheck(struct drm_device *dev);
2102 void i915_handle_error(struct drm_device *dev, bool wedged);
2103 
2104 extern void intel_irq_init(struct drm_device *dev);
2105 extern void intel_hpd_init(struct drm_device *dev);
2106 
2107 extern void intel_uncore_sanitize(struct drm_device *dev);
2108 extern void intel_uncore_early_sanitize(struct drm_device *dev);
2109 extern void intel_uncore_init(struct drm_device *dev);
2110 extern void intel_uncore_check_errors(struct drm_device *dev);
2111 extern void intel_uncore_fini(struct drm_device *dev);
2112 
2113 void
2114 i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
2115 
2116 void
2117 i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
2118 
2119 /* i915_gem.c */
2120 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
2121 			struct drm_file *file_priv);
2122 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
2123 			  struct drm_file *file_priv);
2124 int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
2125 			 struct drm_file *file_priv);
2126 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
2127 			  struct drm_file *file_priv);
2128 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
2129 			struct drm_file *file_priv);
2130 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2131 			struct drm_file *file_priv);
2132 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
2133 			      struct drm_file *file_priv);
2134 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
2135 			     struct drm_file *file_priv);
2136 int i915_gem_execbuffer(struct drm_device *dev, void *data,
2137 			struct drm_file *file_priv);
2138 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
2139 			 struct drm_file *file_priv);
2140 int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2141 		       struct drm_file *file_priv);
2142 int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2143 			 struct drm_file *file_priv);
2144 int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2145 			struct drm_file *file_priv);
2146 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
2147 			       struct drm_file *file);
2148 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
2149 			       struct drm_file *file);
2150 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2151 			    struct drm_file *file_priv);
2152 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
2153 			   struct drm_file *file_priv);
2154 int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
2155 			   struct drm_file *file_priv);
2156 int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
2157 			   struct drm_file *file_priv);
2158 int i915_gem_set_tiling(struct drm_device *dev, void *data,
2159 			struct drm_file *file_priv);
2160 int i915_gem_get_tiling(struct drm_device *dev, void *data,
2161 			struct drm_file *file_priv);
2162 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
2163 				struct drm_file *file_priv);
2164 int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
2165 			struct drm_file *file_priv);
2166 void i915_gem_load(struct drm_device *dev);
2167 void *i915_gem_object_alloc(struct drm_device *dev);
2168 void i915_gem_object_free(struct drm_i915_gem_object *obj);
2169 void i915_gem_object_init(struct drm_i915_gem_object *obj,
2170 			 const struct drm_i915_gem_object_ops *ops);
2171 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
2172 						  size_t size);
2173 void i915_gem_free_object(struct drm_gem_object *obj);
2174 void i915_gem_vma_destroy(struct i915_vma *vma);
2175 
2176 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
2177 				     struct i915_address_space *vm,
2178 				     uint32_t alignment,
2179 				     bool map_and_fenceable,
2180 				     bool nonblocking);
2181 void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
2182 int __must_check i915_vma_unbind(struct i915_vma *vma);
2183 int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
2184 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
2185 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
2186 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
2187 void i915_gem_lastclose(struct drm_device *dev);
2188 
2189 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
2190 #ifdef __linux__
2191 static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
2192 {
2193 	struct sg_page_iter sg_iter;
2194 
2195 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
2196 		return sg_page_iter_page(&sg_iter);
2197 
2198 	return NULL;
2199 }
2200 #else
2201 static inline struct vm_page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
2202 {
2203 	return (obj->pages[n]);
2204 }
2205 #endif
2206 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
2207 {
2208 	BUG_ON(obj->pages == NULL);
2209 	obj->pages_pin_count++;
2210 }
2211 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
2212 {
2213 	BUG_ON(obj->pages_pin_count == 0);
2214 	obj->pages_pin_count--;
2215 }
2216 
2217 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
2218 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
2219 			 struct intel_ring_buffer *to);
2220 void i915_vma_move_to_active(struct i915_vma *vma,
2221 			     struct intel_ring_buffer *ring);
2222 int i915_gem_dumb_create(struct drm_file *file_priv,
2223 			 struct drm_device *dev,
2224 			 struct drm_mode_create_dumb *args);
2225 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
2226 		      uint32_t handle, uint64_t *offset);
2227 /**
2228  * Returns true if seq1 is later than seq2.
2229  */
2230 static inline bool
2231 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
2232 {
2233 	return (int32_t)(seq1 - seq2) >= 0;
2234 }
2235 
2236 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
2237 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
2238 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
2239 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
2240 
2241 static inline bool
2242 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
2243 {
2244 	if (obj->fence_reg != I915_FENCE_REG_NONE) {
2245 		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2246 		dev_priv->fence_regs[obj->fence_reg].pin_count++;
2247 		return true;
2248 	} else
2249 		return false;
2250 }
2251 
2252 static inline void
2253 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
2254 {
2255 	if (obj->fence_reg != I915_FENCE_REG_NONE) {
2256 		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2257 		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
2258 		dev_priv->fence_regs[obj->fence_reg].pin_count--;
2259 	}
2260 }
2261 
2262 bool i915_gem_retire_requests(struct drm_device *dev);
2263 void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
2264 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
2265 				      bool interruptible);
2266 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
2267 {
2268 	return unlikely(atomic_read(&error->reset_counter)
2269 			& (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
2270 }
2271 
2272 static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
2273 {
2274 	return atomic_read(&error->reset_counter) & I915_WEDGED;
2275 }
2276 
2277 static inline u32 i915_reset_count(struct i915_gpu_error *error)
2278 {
2279 	return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
2280 }
2281 
2282 void i915_gem_reset(struct drm_device *dev);
2283 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
2284 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
2285 int __must_check i915_gem_init(struct drm_device *dev);
2286 int __must_check i915_gem_init_hw(struct drm_device *dev);
2287 int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
2288 void i915_gem_init_swizzling(struct drm_device *dev);
2289 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
2290 int __must_check i915_gpu_idle(struct drm_device *dev);
2291 int __must_check i915_gem_suspend(struct drm_device *dev);
2292 int __i915_add_request(struct intel_ring_buffer *ring,
2293 		       struct drm_file *file,
2294 		       struct drm_i915_gem_object *batch_obj,
2295 		       u32 *seqno);
2296 #define i915_add_request(ring, seqno) \
2297 	__i915_add_request(ring, NULL, NULL, seqno)
2298 int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
2299 				 uint32_t seqno);
2300 int i915_gem_fault(struct drm_gem_object *gem_obj, struct uvm_faultinfo *ufi,
2301 		   off_t offset, vaddr_t vaddr, vm_page_t *pps, int npages,
2302 		   int centeridx, vm_prot_t access_type, int flags);
2303 int __must_check
2304 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
2305 				  bool write);
2306 int __must_check
2307 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
2308 int __must_check
2309 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2310 				     u32 alignment,
2311 				     struct intel_ring_buffer *pipelined);
2312 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
2313 int i915_gem_attach_phys_object(struct drm_device *dev,
2314 				struct drm_i915_gem_object *obj,
2315 				int id,
2316 				int align);
2317 void i915_gem_detach_phys_object(struct drm_device *dev,
2318 				 struct drm_i915_gem_object *obj);
2319 void i915_gem_free_all_phys_object(struct drm_device *dev);
2320 int i915_gem_open(struct drm_device *dev, struct drm_file *file);
2321 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
2322 
2323 uint32_t
2324 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
2325 uint32_t
2326 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
2327 			    int tiling_mode, bool fenced);
2328 
2329 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2330 				    enum i915_cache_level cache_level);
2331 
2332 #ifdef notyet
2333 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
2334 				struct dma_buf *dma_buf);
2335 
2336 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
2337 				struct drm_gem_object *gem_obj, int flags);
2338 #endif
2339 
2340 void i915_gem_restore_fences(struct drm_device *dev);
2341 
2342 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
2343 				  struct i915_address_space *vm);
2344 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
2345 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
2346 			struct i915_address_space *vm);
2347 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
2348 				struct i915_address_space *vm);
2349 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
2350 				     struct i915_address_space *vm);
2351 struct i915_vma *
2352 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2353 				  struct i915_address_space *vm);
2354 
2355 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
2356 
2357 /* Some GGTT VM helpers */
2358 #define obj_to_ggtt(obj) \
2359 	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
2360 static inline bool i915_is_ggtt(struct i915_address_space *vm)
2361 {
2362 	struct i915_address_space *ggtt =
2363 		&((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
2364 	return vm == ggtt;
2365 }
2366 
2367 static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
2368 {
2369 	return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
2370 }
2371 
2372 static inline unsigned long
2373 i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
2374 {
2375 	return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
2376 }
2377 
2378 static inline unsigned long
2379 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
2380 {
2381 	return i915_gem_obj_size(obj, obj_to_ggtt(obj));
2382 }
2383 
2384 static inline int __must_check
2385 i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2386 		      uint32_t alignment,
2387 		      bool map_and_fenceable,
2388 		      bool nonblocking)
2389 {
2390 	return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
2391 				   map_and_fenceable, nonblocking);
2392 }
2393 
2394 /* i915_gem_context.c */
2395 int __must_check i915_gem_context_init(struct drm_device *dev);
2396 void i915_gem_context_fini(struct drm_device *dev);
2397 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
2398 int i915_switch_context(struct intel_ring_buffer *ring,
2399 			struct drm_file *file, int to_id);
2400 void i915_gem_context_free(struct kref *ctx_ref);
2401 static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
2402 {
2403 	kref_get(&ctx->ref);
2404 }
2405 
2406 static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
2407 {
2408 	kref_put(&ctx->ref, i915_gem_context_free);
2409 }
2410 
2411 struct i915_ctx_hang_stats * __must_check
2412 i915_gem_context_get_hang_stats(struct drm_device *dev,
2413 				struct drm_file *file,
2414 				u32 id);
2415 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2416 				  struct drm_file *file);
2417 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2418 				   struct drm_file *file);
2419 
2420 /* i915_gem_gtt.c */
2421 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
2422 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
2423 			    struct drm_i915_gem_object *obj,
2424 			    enum i915_cache_level cache_level);
2425 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
2426 			      struct drm_i915_gem_object *obj);
2427 
2428 void i915_check_and_clear_faults(struct drm_device *dev);
2429 void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
2430 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
2431 int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
2432 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
2433 				enum i915_cache_level cache_level);
2434 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
2435 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
2436 void i915_gem_init_global_gtt(struct drm_device *dev);
2437 void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
2438 			       unsigned long mappable_end, unsigned long end);
2439 int i915_gem_gtt_init(struct drm_device *dev);
2440 static inline void i915_gem_chipset_flush(struct drm_device *dev)
2441 {
2442 	if (INTEL_INFO(dev)->gen < 6)
2443 		intel_gtt_chipset_flush();
2444 }
2445 
2446 
2447 /* i915_gem_evict.c */
2448 int __must_check i915_gem_evict_something(struct drm_device *dev,
2449 					  struct i915_address_space *vm,
2450 					  int min_size,
2451 					  unsigned alignment,
2452 					  unsigned cache_level,
2453 					  bool mappable,
2454 					  bool nonblock);
2455 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2456 int i915_gem_evict_everything(struct drm_device *dev);
2457 
2458 /* i915_gem_stolen.c */
2459 int i915_gem_init_stolen(struct drm_device *dev);
2460 int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
2461 void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
2462 void i915_gem_cleanup_stolen(struct drm_device *dev);
2463 struct drm_i915_gem_object *
2464 i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
2465 struct drm_i915_gem_object *
2466 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
2467 					       u32 stolen_offset,
2468 					       u32 gtt_offset,
2469 					       u32 size);
2470 void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
2471 
2472 /* i915_gem_tiling.c */
2473 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
2474 {
2475 	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2476 
2477 	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
2478 		obj->tiling_mode != I915_TILING_NONE;
2479 }
2480 
2481 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
2482 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
2483 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
2484 
2485 /* i915_gem_debug.c */
2486 #if WATCH_LISTS
2487 int i915_verify_lists(struct drm_device *dev);
2488 #else
2489 #define i915_verify_lists(dev) 0
2490 #endif
2491 
2492 #ifdef __linux__
2493 /* i915_debugfs.c */
2494 int i915_debugfs_init(struct drm_minor *minor);
2495 void i915_debugfs_cleanup(struct drm_minor *minor);
2496 #ifdef CONFIG_DEBUG_FS
2497 void intel_display_crc_init(struct drm_device *dev);
2498 #else
2499 static inline void intel_display_crc_init(struct drm_device *dev) {}
2500 #endif
2501 #endif
2502 
2503 /* i915_gpu_error.c */
2504 //__printf(2, 3)
2505 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
2506 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
2507 			    const struct i915_error_state_file_priv *error);
2508 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
2509 			      size_t count, loff_t pos);
2510 static inline void i915_error_state_buf_release(
2511 	struct drm_i915_error_state_buf *eb)
2512 {
2513 	kfree(eb->buf);
2514 }
2515 void i915_capture_error_state(struct drm_device *dev);
2516 void i915_error_state_get(struct drm_device *dev,
2517 			  struct i915_error_state_file_priv *error_priv);
2518 void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
2519 void i915_destroy_error_state(struct drm_device *dev);
2520 
2521 void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
2522 const char *i915_cache_level_str(int type);
2523 
2524 /* i915_suspend.c */
2525 extern int i915_save_state(struct drm_device *dev);
2526 extern int i915_restore_state(struct drm_device *dev);
2527 
2528 /* i915_ums.c */
2529 void i915_save_display_reg(struct drm_device *dev);
2530 void i915_restore_display_reg(struct drm_device *dev);
2531 
2532 /* i915_sysfs.c */
2533 void i915_setup_sysfs(struct drm_device *dev_priv);
2534 void i915_teardown_sysfs(struct drm_device *dev_priv);
2535 
2536 /* intel_i2c.c */
2537 extern int intel_setup_gmbus(struct drm_device *dev);
2538 extern void intel_teardown_gmbus(struct drm_device *dev);
2539 static inline bool intel_gmbus_is_port_valid(unsigned port)
2540 {
2541 	return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
2542 }
2543 
2544 extern struct i2c_adapter *intel_gmbus_get_adapter(
2545 		struct drm_i915_private *dev_priv, unsigned port);
2546 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
2547 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
2548 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
2549 {
2550 	return container_of(adapter, struct intel_gmbus, controller)->force_bit;
2551 }
2552 extern void intel_i2c_reset(struct drm_device *dev);
2553 
2554 /* intel_opregion.c */
2555 struct intel_encoder;
2556 #ifdef CONFIG_ACPI
2557 extern int intel_opregion_setup(struct drm_device *dev);
2558 extern void intel_opregion_init(struct drm_device *dev);
2559 extern void intel_opregion_fini(struct drm_device *dev);
2560 extern void intel_opregion_asle_intr(struct drm_device *dev);
2561 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
2562 					 bool enable);
2563 extern int intel_opregion_notify_adapter(struct drm_device *dev,
2564 					 pci_power_t state);
2565 #else
2566 static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
2567 static inline void intel_opregion_init(struct drm_device *dev) { return; }
2568 static inline void intel_opregion_fini(struct drm_device *dev) { return; }
2569 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
2570 static inline int
2571 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
2572 {
2573 	return 0;
2574 }
2575 static inline int
2576 intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
2577 {
2578 	return 0;
2579 }
2580 #endif
2581 
2582 /* intel_acpi.c */
2583 #ifdef CONFIG_ACPI
2584 extern void intel_register_dsm_handler(void);
2585 extern void intel_unregister_dsm_handler(void);
2586 #else
2587 static inline void intel_register_dsm_handler(void) { return; }
2588 static inline void intel_unregister_dsm_handler(void) { return; }
2589 #endif /* CONFIG_ACPI */
2590 
2591 /* modesetting */
2592 extern void intel_modeset_init_hw(struct drm_device *dev);
2593 extern void intel_modeset_suspend_hw(struct drm_device *dev);
2594 extern void intel_modeset_init(struct drm_device *dev);
2595 extern void intel_modeset_gem_init(struct drm_device *dev);
2596 extern void intel_modeset_cleanup(struct drm_device *dev);
2597 extern void intel_connector_unregister(struct intel_connector *);
2598 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
2599 extern void intel_modeset_setup_hw_state(struct drm_device *dev,
2600 					 bool force_restore);
2601 extern void i915_redisable_vga(struct drm_device *dev);
2602 extern bool intel_fbc_enabled(struct drm_device *dev);
2603 extern void intel_disable_fbc(struct drm_device *dev);
2604 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
2605 extern void intel_init_pch_refclk(struct drm_device *dev);
2606 extern void gen6_set_rps(struct drm_device *dev, u8 val);
2607 extern void valleyview_set_rps(struct drm_device *dev, u8 val);
2608 extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
2609 extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
2610 extern void intel_detect_pch(struct drm_device *dev);
2611 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
2612 extern int intel_enable_rc6(const struct drm_device *dev);
2613 
2614 extern bool i915_semaphore_is_enabled(struct drm_device *dev);
2615 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
2616 			struct drm_file *file);
2617 int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
2618 			       struct drm_file *file);
2619 
2620 /* overlay */
2621 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
2622 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
2623 					    struct intel_overlay_error_state *error);
2624 
2625 extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
2626 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
2627 					    struct drm_device *dev,
2628 					    struct intel_display_error_state *error);
2629 
2630 /* On SNB platform, before reading ring registers forcewake bit
2631  * must be set to prevent GT core from power down and stale values being
2632  * returned.
2633  */
2634 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
2635 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
2636 
2637 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
2638 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
2639 
2640 /* intel_sideband.c */
2641 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
2642 void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
2643 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
2644 u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
2645 void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2646 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
2647 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2648 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
2649 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2650 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
2651 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2652 u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
2653 void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2654 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
2655 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
2656 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
2657 		   enum intel_sbi_destination destination);
2658 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
2659 		     enum intel_sbi_destination destination);
2660 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
2661 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2662 
2663 int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
2664 int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
2665 
2666 void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
2667 void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
2668 
2669 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
2670 	(((reg) >= 0x2000 && (reg) < 0x4000) ||\
2671 	((reg) >= 0x5000 && (reg) < 0x8000) ||\
2672 	((reg) >= 0xB000 && (reg) < 0x12000) ||\
2673 	((reg) >= 0x2E000 && (reg) < 0x30000))
2674 
2675 #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
2676 	(((reg) >= 0x12000 && (reg) < 0x14000) ||\
2677 	((reg) >= 0x22000 && (reg) < 0x24000) ||\
2678 	((reg) >= 0x30000 && (reg) < 0x40000))
2679 
2680 #define FORCEWAKE_RENDER	(1 << 0)
2681 #define FORCEWAKE_MEDIA		(1 << 1)
2682 #define FORCEWAKE_ALL		(FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
2683 
2684 
2685 #define I915_READ8(reg)		dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
2686 #define I915_WRITE8(reg, val)	dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
2687 
2688 #define I915_READ16(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
2689 #define I915_WRITE16(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
2690 #define I915_READ16_NOTRACE(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
2691 #define I915_WRITE16_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
2692 
2693 #define I915_READ(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
2694 #define I915_WRITE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
2695 #define I915_READ_NOTRACE(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
2696 #define I915_WRITE_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
2697 
2698 #define I915_WRITE64(reg, val)	dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
2699 #define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
2700 
2701 #define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
2702 #define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
2703 
2704 /* "Broadcast RGB" property */
2705 #define INTEL_BROADCAST_RGB_AUTO 0
2706 #define INTEL_BROADCAST_RGB_FULL 1
2707 #define INTEL_BROADCAST_RGB_LIMITED 2
2708 
2709 static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
2710 {
2711 	if (HAS_PCH_SPLIT(dev))
2712 		return CPU_VGACNTRL;
2713 	else if (IS_VALLEYVIEW(dev))
2714 		return VLV_VGACNTRL;
2715 	else
2716 		return VGACNTRL;
2717 }
2718 
2719 static inline void __user *to_user_ptr(u64 address)
2720 {
2721 	return (void __user *)(uintptr_t)address;
2722 }
2723 
2724 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
2725 {
2726 	unsigned long j = msecs_to_jiffies(m);
2727 
2728 	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2729 }
2730 
2731 static inline unsigned long
2732 timespec_to_jiffies_timeout(const struct timespec *value)
2733 {
2734 	unsigned long j = timespec_to_jiffies(value);
2735 
2736 	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2737 }
2738 
2739 #endif
2740