xref: /dragonfly/sys/dev/drm/i915/i915_drv.h (revision 6a3cbbc2)
1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29 
30 #ifndef _I915_DRV_H_
31 #define _I915_DRV_H_
32 
33 #include <uapi/drm/i915_drm.h>
34 #include <uapi/drm/drm_fourcc.h>
35 
36 #include <linux/io-mapping.h>
37 #include <linux/i2c.h>
38 #include <linux/i2c-algo-bit.h>
39 #include <linux/backlight.h>
40 #include <linux/hashtable.h>
41 #include <linux/intel-iommu.h>
42 #include <linux/kref.h>
43 #include <linux/pm_qos.h>
44 #include <linux/shmem_fs.h>
45 
46 #include <drm/drmP.h>
47 #include <drm/intel-gtt.h>
48 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */
49 #include <drm/drm_gem.h>
50 #include <drm/drm_auth.h>
51 
52 #include "i915_params.h"
53 #include "i915_reg.h"
54 
55 #include "intel_bios.h"
56 #include "intel_dpll_mgr.h"
57 #include "intel_guc.h"
58 #include "intel_lrc.h"
59 #include "intel_ringbuffer.h"
60 
61 #include "i915_gem.h"
62 #include "i915_gem_gtt.h"
63 #include "i915_gem_render_state.h"
64 #include "i915_gem_request.h"
65 
66 #include "intel_gvt.h"
67 
68 /* General customization:
69  */
70 
71 #define DRIVER_NAME		"i915"
72 #define DRIVER_DESC		"Intel Graphics"
73 #define DRIVER_DATE		"20161024"
74 #define DRIVER_TIMESTAMP	1477290335
75 
76 #undef WARN_ON
77 /* Many gcc seem to no see through this and fall over :( */
78 #if 0
79 #define WARN_ON(x) ({ \
80 	bool __i915_warn_cond = (x); \
81 	if (__builtin_constant_p(__i915_warn_cond)) \
82 		BUILD_BUG_ON(__i915_warn_cond); \
83 	WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
84 #else
85 #define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
86 #endif
87 
88 #undef WARN_ON_ONCE
89 #define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
90 
91 #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
92 			     (long) (x), __func__);
93 
94 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
95  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
96  * which may not necessarily be a user visible problem.  This will either
97  * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
98  * enable distros and users to tailor their preferred amount of i915 abrt
99  * spam.
100  */
101 #define I915_STATE_WARN(condition, format...) ({			\
102 	int __ret_warn_on = !!(condition);				\
103 	if (unlikely(__ret_warn_on))					\
104 		if (!WARN(i915.verbose_state_checks, format))		\
105 			DRM_ERROR(format);				\
106 	unlikely(__ret_warn_on);					\
107 })
108 
109 #define I915_STATE_WARN_ON(x)						\
110 	I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
111 
112 bool __i915_inject_load_failure(const char *func, int line);
113 #define i915_inject_load_failure() \
114 	__i915_inject_load_failure(__func__, __LINE__)
115 
116 static inline const char *yesno(bool v)
117 {
118 	return v ? "yes" : "no";
119 }
120 
121 static inline const char *onoff(bool v)
122 {
123 	return v ? "on" : "off";
124 }
125 
126 enum i915_pipe {
127 	INVALID_PIPE = -1,
128 	PIPE_A = 0,
129 	PIPE_B,
130 	PIPE_C,
131 	_PIPE_EDP,
132 	I915_MAX_PIPES = _PIPE_EDP
133 };
134 #define pipe_name(p) ((p) + 'A')
135 
136 enum transcoder {
137 	TRANSCODER_A = 0,
138 	TRANSCODER_B,
139 	TRANSCODER_C,
140 	TRANSCODER_EDP,
141 	TRANSCODER_DSI_A,
142 	TRANSCODER_DSI_C,
143 	I915_MAX_TRANSCODERS
144 };
145 
146 static inline const char *transcoder_name(enum transcoder transcoder)
147 {
148 	switch (transcoder) {
149 	case TRANSCODER_A:
150 		return "A";
151 	case TRANSCODER_B:
152 		return "B";
153 	case TRANSCODER_C:
154 		return "C";
155 	case TRANSCODER_EDP:
156 		return "EDP";
157 	case TRANSCODER_DSI_A:
158 		return "DSI A";
159 	case TRANSCODER_DSI_C:
160 		return "DSI C";
161 	default:
162 		return "<invalid>";
163 	}
164 }
165 
166 static inline bool transcoder_is_dsi(enum transcoder transcoder)
167 {
168 	return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
169 }
170 
171 /*
172  * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
173  * number of planes per CRTC.  Not all platforms really have this many planes,
174  * which means some arrays of size I915_MAX_PLANES may have unused entries
175  * between the topmost sprite plane and the cursor plane.
176  */
177 enum plane {
178 	PLANE_A = 0,
179 	PLANE_B,
180 	PLANE_C,
181 	PLANE_CURSOR,
182 	I915_MAX_PLANES,
183 };
184 #define plane_name(p) ((p) + 'A')
185 
186 #define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
187 
188 enum port {
189 	PORT_NONE = -1,
190 	PORT_A = 0,
191 	PORT_B,
192 	PORT_C,
193 	PORT_D,
194 	PORT_E,
195 	I915_MAX_PORTS
196 };
197 #define port_name(p) ((p) + 'A')
198 
199 #define I915_NUM_PHYS_VLV 2
200 
201 enum dpio_channel {
202 	DPIO_CH0,
203 	DPIO_CH1
204 };
205 
206 enum dpio_phy {
207 	DPIO_PHY0,
208 	DPIO_PHY1
209 };
210 
211 enum intel_display_power_domain {
212 	POWER_DOMAIN_PIPE_A,
213 	POWER_DOMAIN_PIPE_B,
214 	POWER_DOMAIN_PIPE_C,
215 	POWER_DOMAIN_PIPE_A_PANEL_FITTER,
216 	POWER_DOMAIN_PIPE_B_PANEL_FITTER,
217 	POWER_DOMAIN_PIPE_C_PANEL_FITTER,
218 	POWER_DOMAIN_TRANSCODER_A,
219 	POWER_DOMAIN_TRANSCODER_B,
220 	POWER_DOMAIN_TRANSCODER_C,
221 	POWER_DOMAIN_TRANSCODER_EDP,
222 	POWER_DOMAIN_TRANSCODER_DSI_A,
223 	POWER_DOMAIN_TRANSCODER_DSI_C,
224 	POWER_DOMAIN_PORT_DDI_A_LANES,
225 	POWER_DOMAIN_PORT_DDI_B_LANES,
226 	POWER_DOMAIN_PORT_DDI_C_LANES,
227 	POWER_DOMAIN_PORT_DDI_D_LANES,
228 	POWER_DOMAIN_PORT_DDI_E_LANES,
229 	POWER_DOMAIN_PORT_DSI,
230 	POWER_DOMAIN_PORT_CRT,
231 	POWER_DOMAIN_PORT_OTHER,
232 	POWER_DOMAIN_VGA,
233 	POWER_DOMAIN_AUDIO,
234 	POWER_DOMAIN_PLLS,
235 	POWER_DOMAIN_AUX_A,
236 	POWER_DOMAIN_AUX_B,
237 	POWER_DOMAIN_AUX_C,
238 	POWER_DOMAIN_AUX_D,
239 	POWER_DOMAIN_GMBUS,
240 	POWER_DOMAIN_MODESET,
241 	POWER_DOMAIN_INIT,
242 
243 	POWER_DOMAIN_NUM,
244 };
245 
246 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
247 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
248 		((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
249 #define POWER_DOMAIN_TRANSCODER(tran) \
250 	((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
251 	 (tran) + POWER_DOMAIN_TRANSCODER_A)
252 
253 enum hpd_pin {
254 	HPD_NONE = 0,
255 	HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
256 	HPD_CRT,
257 	HPD_SDVO_B,
258 	HPD_SDVO_C,
259 	HPD_PORT_A,
260 	HPD_PORT_B,
261 	HPD_PORT_C,
262 	HPD_PORT_D,
263 	HPD_PORT_E,
264 	HPD_NUM_PINS
265 };
266 
267 #define for_each_hpd_pin(__pin) \
268 	for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
269 
270 struct i915_hotplug {
271 	struct work_struct hotplug_work;
272 
273 	struct {
274 		unsigned long last_jiffies;
275 		int count;
276 		enum {
277 			HPD_ENABLED = 0,
278 			HPD_DISABLED = 1,
279 			HPD_MARK_DISABLED = 2
280 		} state;
281 	} stats[HPD_NUM_PINS];
282 	u32 event_bits;
283 	struct delayed_work reenable_work;
284 
285 	struct intel_digital_port *irq_port[I915_MAX_PORTS];
286 	u32 long_port_mask;
287 	u32 short_port_mask;
288 	struct work_struct dig_port_work;
289 
290 	struct work_struct poll_init_work;
291 	bool poll_enabled;
292 
293 	/*
294 	 * if we get a HPD irq from DP and a HPD irq from non-DP
295 	 * the non-DP HPD could block the workqueue on a mode config
296 	 * mutex getting, that userspace may have taken. However
297 	 * userspace is waiting on the DP workqueue to run which is
298 	 * blocked behind the non-DP one.
299 	 */
300 	struct workqueue_struct *dp_wq;
301 };
302 
303 #define I915_GEM_GPU_DOMAINS \
304 	(I915_GEM_DOMAIN_RENDER | \
305 	 I915_GEM_DOMAIN_SAMPLER | \
306 	 I915_GEM_DOMAIN_COMMAND | \
307 	 I915_GEM_DOMAIN_INSTRUCTION | \
308 	 I915_GEM_DOMAIN_VERTEX)
309 
310 #define for_each_pipe(__dev_priv, __p) \
311 	for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
312 #define for_each_pipe_masked(__dev_priv, __p, __mask) \
313 	for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
314 		for_each_if ((__mask) & (1 << (__p)))
315 #define for_each_plane(__dev_priv, __pipe, __p)				\
316 	for ((__p) = 0;							\
317 	     (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1;	\
318 	     (__p)++)
319 #define for_each_sprite(__dev_priv, __p, __s)				\
320 	for ((__s) = 0;							\
321 	     (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)];	\
322 	     (__s)++)
323 
324 #define for_each_port_masked(__port, __ports_mask) \
325 	for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++)	\
326 		for_each_if ((__ports_mask) & (1 << (__port)))
327 
328 #define for_each_crtc(dev, crtc) \
329 	list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
330 
331 #define for_each_intel_plane(dev, intel_plane) \
332 	list_for_each_entry(intel_plane,			\
333 			    &(dev)->mode_config.plane_list,	\
334 			    base.head)
335 
336 #define for_each_intel_plane_mask(dev, intel_plane, plane_mask)		\
337 	list_for_each_entry(intel_plane,				\
338 			    &(dev)->mode_config.plane_list,		\
339 			    base.head)					\
340 		for_each_if ((plane_mask) &				\
341 			     (1 << drm_plane_index(&intel_plane->base)))
342 
343 #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane)	\
344 	list_for_each_entry(intel_plane,				\
345 			    &(dev)->mode_config.plane_list,		\
346 			    base.head)					\
347 		for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
348 
349 #define for_each_intel_crtc(dev, intel_crtc)				\
350 	list_for_each_entry(intel_crtc,					\
351 			    &(dev)->mode_config.crtc_list,		\
352 			    base.head)
353 
354 #define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask)		\
355 	list_for_each_entry(intel_crtc,					\
356 			    &(dev)->mode_config.crtc_list,		\
357 			    base.head)					\
358 		for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
359 
360 #define for_each_intel_encoder(dev, intel_encoder)		\
361 	list_for_each_entry(intel_encoder,			\
362 			    &(dev)->mode_config.encoder_list,	\
363 			    base.head)
364 
365 #define for_each_intel_connector(dev, intel_connector)		\
366 	list_for_each_entry(intel_connector,			\
367 			    &(dev)->mode_config.connector_list,	\
368 			    base.head)
369 
370 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
371 	list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
372 		for_each_if ((intel_encoder)->base.crtc == (__crtc))
373 
374 #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
375 	list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
376 		for_each_if ((intel_connector)->base.encoder == (__encoder))
377 
378 #define for_each_power_domain(domain, mask)				\
379 	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
380 		for_each_if ((1 << (domain)) & (mask))
381 
382 struct drm_i915_private;
383 struct i915_mm_struct;
384 struct i915_mmu_object;
385 
386 struct drm_i915_file_private {
387 	struct drm_i915_private *dev_priv;
388 	struct drm_file *file;
389 
390 	struct {
391 		spinlock_t lock;
392 		struct list_head request_list;
393 /* 20ms is a fairly arbitrary limit (greater than the average frame time)
394  * chosen to prevent the CPU getting more than a frame ahead of the GPU
395  * (when using lax throttling for the frontbuffer). We also use it to
396  * offer free GPU waitboosts for severely congested workloads.
397  */
398 #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
399 	} mm;
400 	struct idr context_idr;
401 
402 	struct intel_rps_client {
403 		struct list_head link;
404 		unsigned boosts;
405 	} rps;
406 
407 	unsigned int bsd_engine;
408 };
409 
410 /* Used by dp and fdi links */
411 struct intel_link_m_n {
412 	uint32_t	tu;
413 	uint32_t	gmch_m;
414 	uint32_t	gmch_n;
415 	uint32_t	link_m;
416 	uint32_t	link_n;
417 };
418 
419 void intel_link_compute_m_n(int bpp, int nlanes,
420 			    int pixel_clock, int link_clock,
421 			    struct intel_link_m_n *m_n);
422 
423 /* Interface history:
424  *
425  * 1.1: Original.
426  * 1.2: Add Power Management
427  * 1.3: Add vblank support
428  * 1.4: Fix cmdbuffer path, add heap destroy
429  * 1.5: Add vblank pipe configuration
430  * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
431  *      - Support vertical blank on secondary display pipe
432  */
433 #define DRIVER_MAJOR		1
434 #define DRIVER_MINOR		6
435 #define DRIVER_PATCHLEVEL	0
436 
437 struct opregion_header;
438 struct opregion_acpi;
439 struct opregion_swsci;
440 struct opregion_asle;
441 
442 struct intel_opregion {
443 	struct opregion_header *header;
444 	struct opregion_acpi *acpi;
445 	struct opregion_swsci *swsci;
446 	u32 swsci_gbda_sub_functions;
447 	u32 swsci_sbcb_sub_functions;
448 	struct opregion_asle *asle;
449 	void *rvda;
450 	const void *vbt;
451 	u32 vbt_size;
452 	u32 *lid_state;
453 	struct work_struct asle_work;
454 };
455 #define OPREGION_SIZE            (8*1024)
456 
457 struct intel_overlay;
458 struct intel_overlay_error_state;
459 
460 struct drm_i915_fence_reg {
461 	struct list_head link;
462 	struct drm_i915_private *i915;
463 	struct i915_vma *vma;
464 	int pin_count;
465 	int id;
466 	/**
467 	 * Whether the tiling parameters for the currently
468 	 * associated fence register have changed. Note that
469 	 * for the purposes of tracking tiling changes we also
470 	 * treat the unfenced register, the register slot that
471 	 * the object occupies whilst it executes a fenced
472 	 * command (such as BLT on gen2/3), as a "fence".
473 	 */
474 	bool dirty;
475 };
476 
477 struct sdvo_device_mapping {
478 	u8 initialized;
479 	u8 dvo_port;
480 	u8 slave_addr;
481 	u8 dvo_wiring;
482 	u8 i2c_pin;
483 	u8 ddc_pin;
484 };
485 
486 struct intel_connector;
487 struct intel_encoder;
488 struct intel_crtc_state;
489 struct intel_initial_plane_config;
490 struct intel_crtc;
491 struct intel_limit;
492 struct dpll;
493 
494 struct drm_i915_display_funcs {
495 	int (*get_display_clock_speed)(struct drm_device *dev);
496 	int (*get_fifo_size)(struct drm_device *dev, int plane);
497 	int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
498 	int (*compute_intermediate_wm)(struct drm_device *dev,
499 				       struct intel_crtc *intel_crtc,
500 				       struct intel_crtc_state *newstate);
501 	void (*initial_watermarks)(struct intel_crtc_state *cstate);
502 	void (*optimize_watermarks)(struct intel_crtc_state *cstate);
503 	int (*compute_global_watermarks)(struct drm_atomic_state *state);
504 	void (*update_wm)(struct drm_crtc *crtc);
505 	int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
506 	void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
507 	/* Returns the active state of the crtc, and if the crtc is active,
508 	 * fills out the pipe-config with the hw state. */
509 	bool (*get_pipe_config)(struct intel_crtc *,
510 				struct intel_crtc_state *);
511 	void (*get_initial_plane_config)(struct intel_crtc *,
512 					 struct intel_initial_plane_config *);
513 	int (*crtc_compute_clock)(struct intel_crtc *crtc,
514 				  struct intel_crtc_state *crtc_state);
515 	void (*crtc_enable)(struct intel_crtc_state *pipe_config,
516 			    struct drm_atomic_state *old_state);
517 	void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
518 			     struct drm_atomic_state *old_state);
519 	void (*update_crtcs)(struct drm_atomic_state *state,
520 			     unsigned int *crtc_vblank_mask);
521 	void (*audio_codec_enable)(struct drm_connector *connector,
522 				   struct intel_encoder *encoder,
523 				   const struct drm_display_mode *adjusted_mode);
524 	void (*audio_codec_disable)(struct intel_encoder *encoder);
525 	void (*fdi_link_train)(struct drm_crtc *crtc);
526 	void (*init_clock_gating)(struct drm_device *dev);
527 	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
528 			  struct drm_framebuffer *fb,
529 			  struct drm_i915_gem_object *obj,
530 			  struct drm_i915_gem_request *req,
531 			  uint32_t flags);
532 	void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
533 	/* clock updates for mode set */
534 	/* cursor updates */
535 	/* render clock increase/decrease */
536 	/* display clock increase/decrease */
537 	/* pll clock increase/decrease */
538 
539 	void (*load_csc_matrix)(struct drm_crtc_state *crtc_state);
540 	void (*load_luts)(struct drm_crtc_state *crtc_state);
541 };
542 
543 enum forcewake_domain_id {
544 	FW_DOMAIN_ID_RENDER = 0,
545 	FW_DOMAIN_ID_BLITTER,
546 	FW_DOMAIN_ID_MEDIA,
547 
548 	FW_DOMAIN_ID_COUNT
549 };
550 
551 enum forcewake_domains {
552 	FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER),
553 	FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER),
554 	FORCEWAKE_MEDIA	= (1 << FW_DOMAIN_ID_MEDIA),
555 	FORCEWAKE_ALL = (FORCEWAKE_RENDER |
556 			 FORCEWAKE_BLITTER |
557 			 FORCEWAKE_MEDIA)
558 };
559 
560 #define FW_REG_READ  (1)
561 #define FW_REG_WRITE (2)
562 
563 enum forcewake_domains
564 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
565 			       i915_reg_t reg, unsigned int op);
566 
567 struct intel_uncore_funcs {
568 	void (*force_wake_get)(struct drm_i915_private *dev_priv,
569 							enum forcewake_domains domains);
570 	void (*force_wake_put)(struct drm_i915_private *dev_priv,
571 							enum forcewake_domains domains);
572 
573 	uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
574 	uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
575 	uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
576 	u64      (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
577 
578 	void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r,
579 				uint8_t val, bool trace);
580 	void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r,
581 				uint16_t val, bool trace);
582 	void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r,
583 				uint32_t val, bool trace);
584 };
585 
586 struct intel_forcewake_range {
587 	u32 start;
588 	u32 end;
589 
590 	enum forcewake_domains domains;
591 };
592 
593 struct intel_uncore {
594 	spinlock_t lock; /** lock is also taken in irq contexts. */
595 
596 	const struct intel_forcewake_range *fw_domains_table;
597 	unsigned int fw_domains_table_entries;
598 
599 	struct intel_uncore_funcs funcs;
600 
601 	unsigned fifo_count;
602 
603 	enum forcewake_domains fw_domains;
604 	enum forcewake_domains fw_domains_active;
605 
606 	struct intel_uncore_forcewake_domain {
607 		struct drm_i915_private *i915;
608 		enum forcewake_domain_id id;
609 		enum forcewake_domains mask;
610 		unsigned wake_count;
611 		struct hrtimer timer;
612 		i915_reg_t reg_set;
613 		u32 val_set;
614 		u32 val_clear;
615 		i915_reg_t reg_ack;
616 		i915_reg_t reg_post;
617 		u32 val_reset;
618 	} fw_domain[FW_DOMAIN_ID_COUNT];
619 
620 	int unclaimed_mmio_check;
621 };
622 
623 /* Iterate over initialised fw domains */
624 #define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \
625 	for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
626 	     (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \
627 	     (domain__)++) \
628 		for_each_if ((mask__) & (domain__)->mask)
629 
630 #define for_each_fw_domain(domain__, dev_priv__) \
631 	for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__)
632 
633 #define CSR_VERSION(major, minor)	((major) << 16 | (minor))
634 #define CSR_VERSION_MAJOR(version)	((version) >> 16)
635 #define CSR_VERSION_MINOR(version)	((version) & 0xffff)
636 
637 struct intel_csr {
638 	struct work_struct work;
639 	const char *fw_path;
640 	uint32_t *dmc_payload;
641 	uint32_t dmc_fw_size;
642 	uint32_t version;
643 	uint32_t mmio_count;
644 	i915_reg_t mmioaddr[8];
645 	uint32_t mmiodata[8];
646 	uint32_t dc_state;
647 	uint32_t allowed_dc_mask;
648 };
649 
650 #define DEV_INFO_FOR_EACH_FLAG(func) \
651 	/* Keep is_* in chronological order */ \
652 	func(is_mobile); \
653 	func(is_i85x); \
654 	func(is_i915g); \
655 	func(is_i945gm); \
656 	func(is_g33); \
657 	func(is_g4x); \
658 	func(is_pineview); \
659 	func(is_broadwater); \
660 	func(is_crestline); \
661 	func(is_ivybridge); \
662 	func(is_valleyview); \
663 	func(is_cherryview); \
664 	func(is_haswell); \
665 	func(is_broadwell); \
666 	func(is_skylake); \
667 	func(is_broxton); \
668 	func(is_kabylake); \
669 	func(is_preliminary); \
670 	/* Keep has_* in alphabetical order */ \
671 	func(has_csr); \
672 	func(has_ddi); \
673 	func(has_dp_mst); \
674 	func(has_fbc); \
675 	func(has_fpga_dbg); \
676 	func(has_gmbus_irq); \
677 	func(has_gmch_display); \
678 	func(has_guc); \
679 	func(has_hotplug); \
680 	func(has_hw_contexts); \
681 	func(has_l3_dpf); \
682 	func(has_llc); \
683 	func(has_logical_ring_contexts); \
684 	func(has_overlay); \
685 	func(has_pipe_cxsr); \
686 	func(has_pooled_eu); \
687 	func(has_psr); \
688 	func(has_rc6); \
689 	func(has_rc6p); \
690 	func(has_resource_streamer); \
691 	func(has_runtime_pm); \
692 	func(has_snoop); \
693 	func(cursor_needs_physical); \
694 	func(hws_needs_physical); \
695 	func(overlay_needs_physical); \
696 	func(supports_tv)
697 
698 struct sseu_dev_info {
699 	u8 slice_mask;
700 	u8 subslice_mask;
701 	u8 eu_total;
702 	u8 eu_per_subslice;
703 	u8 min_eu_in_pool;
704 	/* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
705 	u8 subslice_7eu[3];
706 	u8 has_slice_pg:1;
707 	u8 has_subslice_pg:1;
708 	u8 has_eu_pg:1;
709 };
710 
711 static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
712 {
713 	return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
714 }
715 
716 struct intel_device_info {
717 	u32 display_mmio_offset;
718 	u16 device_id;
719 	u8 num_pipes;
720 	u8 num_sprites[I915_MAX_PIPES];
721 	u8 gen;
722 	u16 gen_mask;
723 	u8 ring_mask; /* Rings supported by the HW */
724 	u8 num_rings;
725 #define DEFINE_FLAG(name) u8 name:1
726 	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
727 #undef DEFINE_FLAG
728 	u16 ddb_size; /* in blocks */
729 	/* Register offsets for the various display pipes and transcoders */
730 	int pipe_offsets[I915_MAX_TRANSCODERS];
731 	int trans_offsets[I915_MAX_TRANSCODERS];
732 	int palette_offsets[I915_MAX_PIPES];
733 	int cursor_offsets[I915_MAX_PIPES];
734 
735 	/* Slice/subslice/EU info */
736 	struct sseu_dev_info sseu;
737 
738 	struct color_luts {
739 		u16 degamma_lut_size;
740 		u16 gamma_lut_size;
741 	} color;
742 };
743 
744 struct intel_display_error_state;
745 
746 struct drm_i915_error_state {
747 	struct kref ref;
748 	struct timeval time;
749 
750 	struct drm_i915_private *i915;
751 
752 	char error_msg[128];
753 	bool simulated;
754 	int iommu;
755 	u32 reset_count;
756 	u32 suspend_count;
757 	struct intel_device_info device_info;
758 
759 	/* Generic register state */
760 	u32 eir;
761 	u32 pgtbl_er;
762 	u32 ier;
763 	u32 gtier[4];
764 	u32 ccid;
765 	u32 derrmr;
766 	u32 forcewake;
767 	u32 error; /* gen6+ */
768 	u32 err_int; /* gen7 */
769 	u32 fault_data0; /* gen8, gen9 */
770 	u32 fault_data1; /* gen8, gen9 */
771 	u32 done_reg;
772 	u32 gac_eco;
773 	u32 gam_ecochk;
774 	u32 gab_ctl;
775 	u32 gfx_mode;
776 
777 	u64 fence[I915_MAX_NUM_FENCES];
778 	struct intel_overlay_error_state *overlay;
779 	struct intel_display_error_state *display;
780 	struct drm_i915_error_object *semaphore;
781 
782 	struct drm_i915_error_engine {
783 		int engine_id;
784 		/* Software tracked state */
785 		bool waiting;
786 		int num_waiters;
787 		int hangcheck_score;
788 		enum intel_engine_hangcheck_action hangcheck_action;
789 		struct i915_address_space *vm;
790 		int num_requests;
791 
792 		/* position of active request inside the ring */
793 		u32 rq_head, rq_post, rq_tail;
794 
795 		/* our own tracking of ring head and tail */
796 		u32 cpu_ring_head;
797 		u32 cpu_ring_tail;
798 
799 		u32 last_seqno;
800 		u32 semaphore_seqno[I915_NUM_ENGINES - 1];
801 
802 		/* Register state */
803 		u32 start;
804 		u32 tail;
805 		u32 head;
806 		u32 ctl;
807 		u32 mode;
808 		u32 hws;
809 		u32 ipeir;
810 		u32 ipehr;
811 		u32 bbstate;
812 		u32 instpm;
813 		u32 instps;
814 		u32 seqno;
815 		u64 bbaddr;
816 		u64 acthd;
817 		u32 fault_reg;
818 		u64 faddr;
819 		u32 rc_psmi; /* sleep state */
820 		u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
821 		struct intel_instdone instdone;
822 
823 		struct drm_i915_error_object {
824 			u64 gtt_offset;
825 			u64 gtt_size;
826 			int page_count;
827 			int unused;
828 			u32 *pages[0];
829 		} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
830 
831 		struct drm_i915_error_object *wa_ctx;
832 
833 		struct drm_i915_error_request {
834 			long jiffies;
835 			pid_t pid;
836 			u32 context;
837 			u32 seqno;
838 			u32 head;
839 			u32 tail;
840 		} *requests, execlist[2];
841 
842 		struct drm_i915_error_waiter {
843 			char comm[TASK_COMM_LEN];
844 			pid_t pid;
845 			u32 seqno;
846 		} *waiters;
847 
848 		struct {
849 			u32 gfx_mode;
850 			union {
851 				u64 pdp[4];
852 				u32 pp_dir_base;
853 			};
854 		} vm_info;
855 
856 		pid_t pid;
857 		char comm[TASK_COMM_LEN];
858 	} engine[I915_NUM_ENGINES];
859 
860 	struct drm_i915_error_buffer {
861 		u32 size;
862 		u32 name;
863 		u32 rseqno[I915_NUM_ENGINES], wseqno;
864 		u64 gtt_offset;
865 		u32 read_domains;
866 		u32 write_domain;
867 		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
868 		u32 tiling:2;
869 		u32 dirty:1;
870 		u32 purgeable:1;
871 		u32 userptr:1;
872 		s32 engine:4;
873 		u32 cache_level:3;
874 	} *active_bo[I915_NUM_ENGINES], *pinned_bo;
875 	u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
876 	struct i915_address_space *active_vm[I915_NUM_ENGINES];
877 };
878 
879 enum i915_cache_level {
880 	I915_CACHE_NONE = 0,
881 	I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
882 	I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
883 			      caches, eg sampler/render caches, and the
884 			      large Last-Level-Cache. LLC is coherent with
885 			      the CPU, but L3 is only visible to the GPU. */
886 	I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
887 };
888 
889 struct i915_ctx_hang_stats {
890 	/* This context had batch pending when hang was declared */
891 	unsigned batch_pending;
892 
893 	/* This context had batch active when hang was declared */
894 	unsigned batch_active;
895 
896 	/* Time when this context was last blamed for a GPU reset */
897 	unsigned long guilty_ts;
898 
899 	/* If the contexts causes a second GPU hang within this time,
900 	 * it is permanently banned from submitting any more work.
901 	 */
902 	unsigned long ban_period_seconds;
903 
904 	/* This context is banned to submit more work */
905 	bool banned;
906 };
907 
908 /* This must match up with the value previously used for execbuf2.rsvd1. */
909 #define DEFAULT_CONTEXT_HANDLE 0
910 
911 /**
912  * struct i915_gem_context - as the name implies, represents a context.
913  * @ref: reference count.
914  * @user_handle: userspace tracking identity for this context.
915  * @remap_slice: l3 row remapping information.
916  * @flags: context specific flags:
917  *         CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0.
918  * @file_priv: filp associated with this context (NULL for global default
919  *	       context).
920  * @hang_stats: information about the role of this context in possible GPU
921  *		hangs.
922  * @ppgtt: virtual memory space used by this context.
923  * @legacy_hw_ctx: render context backing object and whether it is correctly
924  *                initialized (legacy ring submission mechanism only).
925  * @link: link in the global list of contexts.
926  *
927  * Contexts are memory images used by the hardware to store copies of their
928  * internal state.
929  */
930 struct i915_gem_context {
931 	struct kref ref;
932 	struct drm_i915_private *i915;
933 	struct drm_i915_file_private *file_priv;
934 	struct i915_hw_ppgtt *ppgtt;
935 	pid_t pid;
936 
937 	struct i915_ctx_hang_stats hang_stats;
938 
939 	unsigned long flags;
940 #define CONTEXT_NO_ZEROMAP		BIT(0)
941 #define CONTEXT_NO_ERROR_CAPTURE	BIT(1)
942 
943 	/* Unique identifier for this context, used by the hw for tracking */
944 	unsigned int hw_id;
945 	u32 user_handle;
946 
947 	u32 ggtt_alignment;
948 
949 	struct intel_context {
950 		struct i915_vma *state;
951 		struct intel_ring *ring;
952 		uint32_t *lrc_reg_state;
953 		u64 lrc_desc;
954 		int pin_count;
955 		bool initialised;
956 	} engine[I915_NUM_ENGINES];
957 	u32 ring_size;
958 	u32 desc_template;
959 	struct atomic_notifier_head status_notifier;
960 	bool execlists_force_single_submission;
961 
962 	struct list_head link;
963 
964 	u8 remap_slice;
965 	bool closed:1;
966 };
967 
968 enum fb_op_origin {
969 	ORIGIN_GTT,
970 	ORIGIN_CPU,
971 	ORIGIN_CS,
972 	ORIGIN_FLIP,
973 	ORIGIN_DIRTYFB,
974 };
975 
976 struct intel_fbc {
977 	/* This is always the inner lock when overlapping with struct_mutex and
978 	 * it's the outer lock when overlapping with stolen_lock. */
979 	struct lock lock;
980 	unsigned threshold;
981 	unsigned int possible_framebuffer_bits;
982 	unsigned int busy_bits;
983 	unsigned int visible_pipes_mask;
984 	struct intel_crtc *crtc;
985 
986 	struct drm_mm_node compressed_fb;
987 	struct drm_mm_node *compressed_llb;
988 
989 	bool false_color;
990 
991 	bool enabled;
992 	bool active;
993 
994 	bool underrun_detected;
995 	struct work_struct underrun_work;
996 
997 	struct intel_fbc_state_cache {
998 		struct {
999 			unsigned int mode_flags;
1000 			uint32_t hsw_bdw_pixel_rate;
1001 		} crtc;
1002 
1003 		struct {
1004 			unsigned int rotation;
1005 			int src_w;
1006 			int src_h;
1007 			bool visible;
1008 		} plane;
1009 
1010 		struct {
1011 			u64 ilk_ggtt_offset;
1012 			uint32_t pixel_format;
1013 			unsigned int stride;
1014 			int fence_reg;
1015 			unsigned int tiling_mode;
1016 		} fb;
1017 	} state_cache;
1018 
1019 	struct intel_fbc_reg_params {
1020 		struct {
1021 			enum i915_pipe pipe;
1022 			enum plane plane;
1023 			unsigned int fence_y_offset;
1024 		} crtc;
1025 
1026 		struct {
1027 			u64 ggtt_offset;
1028 			uint32_t pixel_format;
1029 			unsigned int stride;
1030 			int fence_reg;
1031 		} fb;
1032 
1033 		int cfb_size;
1034 	} params;
1035 
1036 	struct intel_fbc_work {
1037 		bool scheduled;
1038 		u32 scheduled_vblank;
1039 		struct work_struct work;
1040 	} work;
1041 
1042 	const char *no_fbc_reason;
1043 };
1044 
1045 /**
1046  * HIGH_RR is the highest eDP panel refresh rate read from EDID
1047  * LOW_RR is the lowest eDP panel refresh rate found from EDID
1048  * parsing for same resolution.
1049  */
1050 enum drrs_refresh_rate_type {
1051 	DRRS_HIGH_RR,
1052 	DRRS_LOW_RR,
1053 	DRRS_MAX_RR, /* RR count */
1054 };
1055 
1056 enum drrs_support_type {
1057 	DRRS_NOT_SUPPORTED = 0,
1058 	STATIC_DRRS_SUPPORT = 1,
1059 	SEAMLESS_DRRS_SUPPORT = 2
1060 };
1061 
1062 struct intel_dp;
1063 struct i915_drrs {
1064 	struct lock mutex;
1065 	struct delayed_work work;
1066 	struct intel_dp *dp;
1067 	unsigned busy_frontbuffer_bits;
1068 	enum drrs_refresh_rate_type refresh_rate_type;
1069 	enum drrs_support_type type;
1070 };
1071 
1072 struct i915_psr {
1073 	struct lock lock;
1074 	bool sink_support;
1075 	bool source_ok;
1076 	struct intel_dp *enabled;
1077 	bool active;
1078 	struct delayed_work work;
1079 	unsigned busy_frontbuffer_bits;
1080 	bool psr2_support;
1081 	bool aux_frame_sync;
1082 	bool link_standby;
1083 };
1084 
1085 enum intel_pch {
1086 	PCH_NONE = 0,	/* No PCH present */
1087 	PCH_IBX,	/* Ibexpeak PCH */
1088 	PCH_CPT,	/* Cougarpoint PCH */
1089 	PCH_LPT,	/* Lynxpoint PCH */
1090 	PCH_SPT,        /* Sunrisepoint PCH */
1091 	PCH_KBP,        /* Kabypoint PCH */
1092 	PCH_NOP,
1093 };
1094 
1095 enum intel_sbi_destination {
1096 	SBI_ICLK,
1097 	SBI_MPHY,
1098 };
1099 
1100 #define QUIRK_PIPEA_FORCE (1<<0)
1101 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
1102 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
1103 #define QUIRK_BACKLIGHT_PRESENT (1<<3)
1104 #define QUIRK_PIPEB_FORCE (1<<4)
1105 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
1106 
1107 struct intel_fbdev;
1108 struct intel_fbc_work;
1109 
1110 struct intel_gmbus {
1111 	struct i2c_adapter adapter;
1112 #define GMBUS_FORCE_BIT_RETRY (1U << 31)
1113 	u32 force_bit;
1114 	u32 reg0;
1115 	i915_reg_t gpio_reg;
1116 	struct i2c_algo_bit_data bit_algo;
1117 	struct drm_i915_private *dev_priv;
1118 };
1119 
1120 struct i915_suspend_saved_registers {
1121 	u32 saveDSPARB;
1122 	u32 saveFBC_CONTROL;
1123 	u32 saveCACHE_MODE_0;
1124 	u32 saveMI_ARB_STATE;
1125 	u32 saveSWF0[16];
1126 	u32 saveSWF1[16];
1127 	u32 saveSWF3[3];
1128 	uint64_t saveFENCE[I915_MAX_NUM_FENCES];
1129 	u32 savePCH_PORT_HOTPLUG;
1130 	u16 saveGCDGMBUS;
1131 };
1132 
1133 struct vlv_s0ix_state {
1134 	/* GAM */
1135 	u32 wr_watermark;
1136 	u32 gfx_prio_ctrl;
1137 	u32 arb_mode;
1138 	u32 gfx_pend_tlb0;
1139 	u32 gfx_pend_tlb1;
1140 	u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
1141 	u32 media_max_req_count;
1142 	u32 gfx_max_req_count;
1143 	u32 render_hwsp;
1144 	u32 ecochk;
1145 	u32 bsd_hwsp;
1146 	u32 blt_hwsp;
1147 	u32 tlb_rd_addr;
1148 
1149 	/* MBC */
1150 	u32 g3dctl;
1151 	u32 gsckgctl;
1152 	u32 mbctl;
1153 
1154 	/* GCP */
1155 	u32 ucgctl1;
1156 	u32 ucgctl3;
1157 	u32 rcgctl1;
1158 	u32 rcgctl2;
1159 	u32 rstctl;
1160 	u32 misccpctl;
1161 
1162 	/* GPM */
1163 	u32 gfxpause;
1164 	u32 rpdeuhwtc;
1165 	u32 rpdeuc;
1166 	u32 ecobus;
1167 	u32 pwrdwnupctl;
1168 	u32 rp_down_timeout;
1169 	u32 rp_deucsw;
1170 	u32 rcubmabdtmr;
1171 	u32 rcedata;
1172 	u32 spare2gh;
1173 
1174 	/* Display 1 CZ domain */
1175 	u32 gt_imr;
1176 	u32 gt_ier;
1177 	u32 pm_imr;
1178 	u32 pm_ier;
1179 	u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
1180 
1181 	/* GT SA CZ domain */
1182 	u32 tilectl;
1183 	u32 gt_fifoctl;
1184 	u32 gtlc_wake_ctrl;
1185 	u32 gtlc_survive;
1186 	u32 pmwgicz;
1187 
1188 	/* Display 2 CZ domain */
1189 	u32 gu_ctl0;
1190 	u32 gu_ctl1;
1191 	u32 pcbr;
1192 	u32 clock_gate_dis2;
1193 };
1194 
1195 struct intel_rps_ei {
1196 	u32 cz_clock;
1197 	u32 render_c0;
1198 	u32 media_c0;
1199 };
1200 
1201 struct intel_gen6_power_mgmt {
1202 	/*
1203 	 * work, interrupts_enabled and pm_iir are protected by
1204 	 * dev_priv->irq_lock
1205 	 */
1206 	struct work_struct work;
1207 	bool interrupts_enabled;
1208 	u32 pm_iir;
1209 
1210 	/* PM interrupt bits that should never be masked */
1211 	u32 pm_intr_keep;
1212 
1213 	/* Frequencies are stored in potentially platform dependent multiples.
1214 	 * In other words, *_freq needs to be multiplied by X to be interesting.
1215 	 * Soft limits are those which are used for the dynamic reclocking done
1216 	 * by the driver (raise frequencies under heavy loads, and lower for
1217 	 * lighter loads). Hard limits are those imposed by the hardware.
1218 	 *
1219 	 * A distinction is made for overclocking, which is never enabled by
1220 	 * default, and is considered to be above the hard limit if it's
1221 	 * possible at all.
1222 	 */
1223 	u8 cur_freq;		/* Current frequency (cached, may not == HW) */
1224 	u8 min_freq_softlimit;	/* Minimum frequency permitted by the driver */
1225 	u8 max_freq_softlimit;	/* Max frequency permitted by the driver */
1226 	u8 max_freq;		/* Maximum frequency, RP0 if not overclocking */
1227 	u8 min_freq;		/* AKA RPn. Minimum frequency */
1228 	u8 boost_freq;		/* Frequency to request when wait boosting */
1229 	u8 idle_freq;		/* Frequency to request when we are idle */
1230 	u8 efficient_freq;	/* AKA RPe. Pre-determined balanced frequency */
1231 	u8 rp1_freq;		/* "less than" RP0 power/freqency */
1232 	u8 rp0_freq;		/* Non-overclocked max frequency. */
1233 	u16 gpll_ref_freq;	/* vlv/chv GPLL reference frequency */
1234 
1235 	u8 up_threshold; /* Current %busy required to uplock */
1236 	u8 down_threshold; /* Current %busy required to downclock */
1237 
1238 	int last_adj;
1239 	enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
1240 
1241 	spinlock_t client_lock;
1242 	struct list_head clients;
1243 	bool client_boost;
1244 
1245 	bool enabled;
1246 	struct delayed_work autoenable_work;
1247 	unsigned boosts;
1248 
1249 	/* manual wa residency calculations */
1250 	struct intel_rps_ei up_ei, down_ei;
1251 
1252 	/*
1253 	 * Protects RPS/RC6 register access and PCU communication.
1254 	 * Must be taken after struct_mutex if nested. Note that
1255 	 * this lock may be held for long periods of time when
1256 	 * talking to hw - so only take it when talking to hw!
1257 	 */
1258 	struct lock hw_lock;
1259 };
1260 
1261 /* defined intel_pm.c */
1262 extern spinlock_t mchdev_lock;
1263 
1264 struct intel_ilk_power_mgmt {
1265 	u8 cur_delay;
1266 	u8 min_delay;
1267 	u8 max_delay;
1268 	u8 fmax;
1269 	u8 fstart;
1270 
1271 	u64 last_count1;
1272 	unsigned long last_time1;
1273 	unsigned long chipset_power;
1274 	u64 last_count2;
1275 	u64 last_time2;
1276 	unsigned long gfx_power;
1277 	u8 corr;
1278 
1279 	int c_m;
1280 	int r_t;
1281 };
1282 
1283 struct drm_i915_private;
1284 struct i915_power_well;
1285 
1286 struct i915_power_well_ops {
1287 	/*
1288 	 * Synchronize the well's hw state to match the current sw state, for
1289 	 * example enable/disable it based on the current refcount. Called
1290 	 * during driver init and resume time, possibly after first calling
1291 	 * the enable/disable handlers.
1292 	 */
1293 	void (*sync_hw)(struct drm_i915_private *dev_priv,
1294 			struct i915_power_well *power_well);
1295 	/*
1296 	 * Enable the well and resources that depend on it (for example
1297 	 * interrupts located on the well). Called after the 0->1 refcount
1298 	 * transition.
1299 	 */
1300 	void (*enable)(struct drm_i915_private *dev_priv,
1301 		       struct i915_power_well *power_well);
1302 	/*
1303 	 * Disable the well and resources that depend on it. Called after
1304 	 * the 1->0 refcount transition.
1305 	 */
1306 	void (*disable)(struct drm_i915_private *dev_priv,
1307 			struct i915_power_well *power_well);
1308 	/* Returns the hw enabled state. */
1309 	bool (*is_enabled)(struct drm_i915_private *dev_priv,
1310 			   struct i915_power_well *power_well);
1311 };
1312 
1313 /* Power well structure for haswell */
1314 struct i915_power_well {
1315 	const char *name;
1316 	bool always_on;
1317 	/* power well enable/disable usage count */
1318 	int count;
1319 	/* cached hw enabled state */
1320 	bool hw_enabled;
1321 	unsigned long domains;
1322 	unsigned long data;
1323 	const struct i915_power_well_ops *ops;
1324 };
1325 
1326 struct i915_power_domains {
1327 	/*
1328 	 * Power wells needed for initialization at driver init and suspend
1329 	 * time are on. They are kept on until after the first modeset.
1330 	 */
1331 	bool init_power_on;
1332 	bool initializing;
1333 	int power_well_count;
1334 
1335 	struct lock lock;
1336 	int domain_use_count[POWER_DOMAIN_NUM];
1337 	struct i915_power_well *power_wells;
1338 };
1339 
1340 #define MAX_L3_SLICES 2
1341 struct intel_l3_parity {
1342 	u32 *remap_info[MAX_L3_SLICES];
1343 	struct work_struct error_work;
1344 	int which_slice;
1345 };
1346 
1347 struct i915_gem_mm {
1348 	/** Memory allocator for GTT stolen memory */
1349 	struct drm_mm stolen;
1350 	/** Protects the usage of the GTT stolen memory allocator. This is
1351 	 * always the inner lock when overlapping with struct_mutex. */
1352 	struct lock stolen_lock;
1353 
1354 	/** List of all objects in gtt_space. Used to restore gtt
1355 	 * mappings on resume */
1356 	struct list_head bound_list;
1357 	/**
1358 	 * List of objects which are not bound to the GTT (thus
1359 	 * are idle and not used by the GPU) but still have
1360 	 * (presumably uncached) pages still attached.
1361 	 */
1362 	struct list_head unbound_list;
1363 
1364 	/** Usable portion of the GTT for GEM */
1365 	unsigned long stolen_base; /* limited to low memory (32-bit) */
1366 
1367 	/** PPGTT used for aliasing the PPGTT with the GTT */
1368 	struct i915_hw_ppgtt *aliasing_ppgtt;
1369 
1370 	struct notifier_block oom_notifier;
1371 	struct notifier_block vmap_notifier;
1372 	struct shrinker shrinker;
1373 
1374 	/** LRU list of objects with fence regs on them. */
1375 	struct list_head fence_list;
1376 
1377 	/**
1378 	 * Are we in a non-interruptible section of code like
1379 	 * modesetting?
1380 	 */
1381 	bool interruptible;
1382 
1383 	/* the indicator for dispatch video commands on two BSD rings */
1384 	atomic_t bsd_engine_dispatch_index;
1385 
1386 	/** Bit 6 swizzling required for X tiling */
1387 	uint32_t bit_6_swizzle_x;
1388 	/** Bit 6 swizzling required for Y tiling */
1389 	uint32_t bit_6_swizzle_y;
1390 
1391 	/* accounting, useful for userland debugging */
1392 	spinlock_t object_stat_lock;
1393 	size_t object_memory;
1394 	u32 object_count;
1395 };
1396 
1397 struct drm_i915_error_state_buf {
1398 	struct drm_i915_private *i915;
1399 	unsigned bytes;
1400 	unsigned size;
1401 	int err;
1402 	u8 *buf;
1403 	loff_t start;
1404 	loff_t pos;
1405 };
1406 
1407 struct i915_error_state_file_priv {
1408 	struct drm_device *dev;
1409 	struct drm_i915_error_state *error;
1410 };
1411 
1412 struct i915_gpu_error {
1413 	/* For hangcheck timer */
1414 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
1415 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
1416 	/* Hang gpu twice in this window and your context gets banned */
1417 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
1418 
1419 	struct delayed_work hangcheck_work;
1420 
1421 	/* For reset and error_state handling. */
1422 	spinlock_t lock;
1423 	/* Protected by the above dev->gpu_error.lock. */
1424 	struct drm_i915_error_state *first_error;
1425 
1426 	unsigned long missed_irq_rings;
1427 
1428 	/**
1429 	 * State variable controlling the reset flow and count
1430 	 *
1431 	 * This is a counter which gets incremented when reset is triggered,
1432 	 *
1433 	 * Before the reset commences, the I915_RESET_IN_PROGRESS bit is set
1434 	 * meaning that any waiters holding onto the struct_mutex should
1435 	 * relinquish the lock immediately in order for the reset to start.
1436 	 *
1437 	 * If reset is not completed succesfully, the I915_WEDGE bit is
1438 	 * set meaning that hardware is terminally sour and there is no
1439 	 * recovery. All waiters on the reset_queue will be woken when
1440 	 * that happens.
1441 	 *
1442 	 * This counter is used by the wait_seqno code to notice that reset
1443 	 * event happened and it needs to restart the entire ioctl (since most
1444 	 * likely the seqno it waited for won't ever signal anytime soon).
1445 	 *
1446 	 * This is important for lock-free wait paths, where no contended lock
1447 	 * naturally enforces the correct ordering between the bail-out of the
1448 	 * waiter and the gpu reset work code.
1449 	 */
1450 	unsigned long reset_count;
1451 
1452 	unsigned long flags;
1453 #define I915_RESET_IN_PROGRESS	0
1454 #define I915_WEDGED		(BITS_PER_LONG - 1)
1455 
1456 	/**
1457 	 * Waitqueue to signal when a hang is detected. Used to for waiters
1458 	 * to release the struct_mutex for the reset to procede.
1459 	 */
1460 	wait_queue_head_t wait_queue;
1461 
1462 	/**
1463 	 * Waitqueue to signal when the reset has completed. Used by clients
1464 	 * that wait for dev_priv->mm.wedged to settle.
1465 	 */
1466 	wait_queue_head_t reset_queue;
1467 
1468 	/* For missed irq/seqno simulation. */
1469 	unsigned long test_irq_rings;
1470 };
1471 
1472 enum modeset_restore {
1473 	MODESET_ON_LID_OPEN,
1474 	MODESET_DONE,
1475 	MODESET_SUSPENDED,
1476 };
1477 
1478 #define DP_AUX_A 0x40
1479 #define DP_AUX_B 0x10
1480 #define DP_AUX_C 0x20
1481 #define DP_AUX_D 0x30
1482 
1483 #define DDC_PIN_B  0x05
1484 #define DDC_PIN_C  0x04
1485 #define DDC_PIN_D  0x06
1486 
1487 struct ddi_vbt_port_info {
1488 	/*
1489 	 * This is an index in the HDMI/DVI DDI buffer translation table.
1490 	 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
1491 	 * populate this field.
1492 	 */
1493 #define HDMI_LEVEL_SHIFT_UNKNOWN	0xff
1494 	uint8_t hdmi_level_shift;
1495 
1496 	uint8_t supports_dvi:1;
1497 	uint8_t supports_hdmi:1;
1498 	uint8_t supports_dp:1;
1499 
1500 	uint8_t alternate_aux_channel;
1501 	uint8_t alternate_ddc_pin;
1502 
1503 	uint8_t dp_boost_level;
1504 	uint8_t hdmi_boost_level;
1505 };
1506 
1507 enum psr_lines_to_wait {
1508 	PSR_0_LINES_TO_WAIT = 0,
1509 	PSR_1_LINE_TO_WAIT,
1510 	PSR_4_LINES_TO_WAIT,
1511 	PSR_8_LINES_TO_WAIT
1512 };
1513 
1514 struct intel_vbt_data {
1515 	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1516 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1517 
1518 	/* Feature bits */
1519 	unsigned int int_tv_support:1;
1520 	unsigned int lvds_dither:1;
1521 	unsigned int lvds_vbt:1;
1522 	unsigned int int_crt_support:1;
1523 	unsigned int lvds_use_ssc:1;
1524 	unsigned int display_clock_mode:1;
1525 	unsigned int fdi_rx_polarity_inverted:1;
1526 	unsigned int panel_type:4;
1527 	int lvds_ssc_freq;
1528 	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1529 
1530 	enum drrs_support_type drrs_type;
1531 
1532 	struct {
1533 		int rate;
1534 		int lanes;
1535 		int preemphasis;
1536 		int vswing;
1537 		bool low_vswing;
1538 		bool initialized;
1539 		bool support;
1540 		int bpp;
1541 		struct edp_power_seq pps;
1542 	} edp;
1543 
1544 	struct {
1545 		bool full_link;
1546 		bool require_aux_wakeup;
1547 		int idle_frames;
1548 		enum psr_lines_to_wait lines_to_wait;
1549 		int tp1_wakeup_time;
1550 		int tp2_tp3_wakeup_time;
1551 	} psr;
1552 
1553 	struct {
1554 		u16 pwm_freq_hz;
1555 		bool present;
1556 		bool active_low_pwm;
1557 		u8 min_brightness;	/* min_brightness/255 of max */
1558 		enum intel_backlight_type type;
1559 	} backlight;
1560 
1561 	/* MIPI DSI */
1562 	struct {
1563 		u16 panel_id;
1564 		struct mipi_config *config;
1565 		struct mipi_pps_data *pps;
1566 		u8 seq_version;
1567 		u32 size;
1568 		u8 *data;
1569 		const u8 *sequence[MIPI_SEQ_MAX];
1570 	} dsi;
1571 
1572 	int crt_ddc_pin;
1573 
1574 	int child_dev_num;
1575 	union child_device_config *child_dev;
1576 
1577 	struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
1578 	struct sdvo_device_mapping sdvo_mappings[2];
1579 };
1580 
1581 enum intel_ddb_partitioning {
1582 	INTEL_DDB_PART_1_2,
1583 	INTEL_DDB_PART_5_6, /* IVB+ */
1584 };
1585 
1586 struct intel_wm_level {
1587 	bool enable;
1588 	uint32_t pri_val;
1589 	uint32_t spr_val;
1590 	uint32_t cur_val;
1591 	uint32_t fbc_val;
1592 };
1593 
1594 struct ilk_wm_values {
1595 	uint32_t wm_pipe[3];
1596 	uint32_t wm_lp[3];
1597 	uint32_t wm_lp_spr[3];
1598 	uint32_t wm_linetime[3];
1599 	bool enable_fbc_wm;
1600 	enum intel_ddb_partitioning partitioning;
1601 };
1602 
1603 struct vlv_pipe_wm {
1604 	uint16_t primary;
1605 	uint16_t sprite[2];
1606 	uint8_t cursor;
1607 };
1608 
1609 struct vlv_sr_wm {
1610 	uint16_t plane;
1611 	uint8_t cursor;
1612 };
1613 
1614 struct vlv_wm_values {
1615 	struct vlv_pipe_wm pipe[3];
1616 	struct vlv_sr_wm sr;
1617 	struct {
1618 		uint8_t cursor;
1619 		uint8_t sprite[2];
1620 		uint8_t primary;
1621 	} ddl[3];
1622 	uint8_t level;
1623 	bool cxsr;
1624 };
1625 
1626 struct skl_ddb_entry {
1627 	uint16_t start, end;	/* in number of blocks, 'end' is exclusive */
1628 };
1629 
1630 static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
1631 {
1632 	return entry->end - entry->start;
1633 }
1634 
1635 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
1636 				       const struct skl_ddb_entry *e2)
1637 {
1638 	if (e1->start == e2->start && e1->end == e2->end)
1639 		return true;
1640 
1641 	return false;
1642 }
1643 
1644 struct skl_ddb_allocation {
1645 	struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
1646 	struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
1647 };
1648 
1649 struct skl_wm_values {
1650 	unsigned dirty_pipes;
1651 	struct skl_ddb_allocation ddb;
1652 };
1653 
1654 struct skl_wm_level {
1655 	bool plane_en;
1656 	uint16_t plane_res_b;
1657 	uint8_t plane_res_l;
1658 };
1659 
1660 /*
1661  * This struct helps tracking the state needed for runtime PM, which puts the
1662  * device in PCI D3 state. Notice that when this happens, nothing on the
1663  * graphics device works, even register access, so we don't get interrupts nor
1664  * anything else.
1665  *
1666  * Every piece of our code that needs to actually touch the hardware needs to
1667  * either call intel_runtime_pm_get or call intel_display_power_get with the
1668  * appropriate power domain.
1669  *
1670  * Our driver uses the autosuspend delay feature, which means we'll only really
1671  * suspend if we stay with zero refcount for a certain amount of time. The
1672  * default value is currently very conservative (see intel_runtime_pm_enable), but
1673  * it can be changed with the standard runtime PM files from sysfs.
1674  *
1675  * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1676  * goes back to false exactly before we reenable the IRQs. We use this variable
1677  * to check if someone is trying to enable/disable IRQs while they're supposed
1678  * to be disabled. This shouldn't happen and we'll print some error messages in
1679  * case it happens.
1680  *
1681  * For more, read the Documentation/power/runtime_pm.txt.
1682  */
1683 struct i915_runtime_pm {
1684 	atomic_t wakeref_count;
1685 	atomic_t atomic_seq;
1686 	bool suspended;
1687 	bool irqs_enabled;
1688 };
1689 
1690 enum intel_pipe_crc_source {
1691 	INTEL_PIPE_CRC_SOURCE_NONE,
1692 	INTEL_PIPE_CRC_SOURCE_PLANE1,
1693 	INTEL_PIPE_CRC_SOURCE_PLANE2,
1694 	INTEL_PIPE_CRC_SOURCE_PF,
1695 	INTEL_PIPE_CRC_SOURCE_PIPE,
1696 	/* TV/DP on pre-gen5/vlv can't use the pipe source. */
1697 	INTEL_PIPE_CRC_SOURCE_TV,
1698 	INTEL_PIPE_CRC_SOURCE_DP_B,
1699 	INTEL_PIPE_CRC_SOURCE_DP_C,
1700 	INTEL_PIPE_CRC_SOURCE_DP_D,
1701 	INTEL_PIPE_CRC_SOURCE_AUTO,
1702 	INTEL_PIPE_CRC_SOURCE_MAX,
1703 };
1704 
1705 struct intel_pipe_crc_entry {
1706 	uint32_t frame;
1707 	uint32_t crc[5];
1708 };
1709 
1710 #define INTEL_PIPE_CRC_ENTRIES_NR	128
1711 struct intel_pipe_crc {
1712 	spinlock_t lock;
1713 	bool opened;		/* exclusive access to the result file */
1714 	struct intel_pipe_crc_entry *entries;
1715 	enum intel_pipe_crc_source source;
1716 	int head, tail;
1717 	wait_queue_head_t wq;
1718 };
1719 
1720 struct i915_frontbuffer_tracking {
1721 	spinlock_t lock;
1722 
1723 	/*
1724 	 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
1725 	 * scheduled flips.
1726 	 */
1727 	unsigned busy_bits;
1728 	unsigned flip_bits;
1729 };
1730 
1731 struct i915_wa_reg {
1732 	i915_reg_t addr;
1733 	u32 value;
1734 	/* bitmask representing WA bits */
1735 	u32 mask;
1736 };
1737 
1738 /*
1739  * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only
1740  * allowing it for RCS as we don't foresee any requirement of having
1741  * a whitelist for other engines. When it is really required for
1742  * other engines then the limit need to be increased.
1743  */
1744 #define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS)
1745 
1746 struct i915_workarounds {
1747 	struct i915_wa_reg reg[I915_MAX_WA_REGS];
1748 	u32 count;
1749 	u32 hw_whitelist_count[I915_NUM_ENGINES];
1750 };
1751 
1752 struct i915_virtual_gpu {
1753 	bool active;
1754 };
1755 
1756 /* used in computing the new watermarks state */
1757 struct intel_wm_config {
1758 	unsigned int num_pipes_active;
1759 	bool sprites_enabled;
1760 	bool sprites_scaled;
1761 };
1762 
1763 struct drm_i915_private {
1764 	struct drm_device drm;
1765 
1766 	struct kmem_cache *objects;
1767 	struct kmem_cache *vmas;
1768 	struct kmem_cache *requests;
1769 
1770 	const struct intel_device_info info;
1771 
1772 	int relative_constants_mode;
1773 
1774 	void __iomem *regs;
1775 
1776 	struct intel_uncore uncore;
1777 
1778 	struct i915_virtual_gpu vgpu;
1779 
1780 	struct intel_gvt *gvt;
1781 
1782 	struct intel_guc guc;
1783 
1784 	struct intel_csr csr;
1785 
1786 	struct intel_gmbus gmbus[GMBUS_NUM_PINS];
1787 
1788 	/** gmbus_mutex protects against concurrent usage of the single hw gmbus
1789 	 * controller on different i2c buses. */
1790 	struct lock gmbus_mutex;
1791 
1792 	/**
1793 	 * Base address of the gmbus and gpio block.
1794 	 */
1795 	uint32_t gpio_mmio_base;
1796 
1797 	/* MMIO base address for MIPI regs */
1798 	uint32_t mipi_mmio_base;
1799 
1800 	uint32_t psr_mmio_base;
1801 
1802 	uint32_t pps_mmio_base;
1803 
1804 	wait_queue_head_t gmbus_wait_queue;
1805 
1806 	struct pci_dev *bridge_dev;
1807 	struct i915_gem_context *kernel_context;
1808 	struct intel_engine_cs *engine[I915_NUM_ENGINES];
1809 	struct i915_vma *semaphore;
1810 	u32 next_seqno;
1811 
1812 	struct drm_dma_handle *status_page_dmah;
1813 	struct resource *mch_res;
1814 	int mch_res_rid;
1815 
1816 	/* protects the irq masks */
1817 	spinlock_t irq_lock;
1818 
1819 	/* protects the mmio flip data */
1820 	spinlock_t mmio_flip_lock;
1821 
1822 	bool display_irqs_enabled;
1823 
1824 	/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1825 	struct pm_qos_request pm_qos;
1826 
1827 	/* Sideband mailbox protection */
1828 	struct lock sb_lock;
1829 
1830 	/** Cached value of IMR to avoid reads in updating the bitfield */
1831 	union {
1832 		u32 irq_mask;
1833 		u32 de_irq_mask[I915_MAX_PIPES];
1834 	};
1835 	u32 gt_irq_mask;
1836 	u32 pm_irq_mask;
1837 	u32 pm_rps_events;
1838 	u32 pipestat_irq_mask[I915_MAX_PIPES];
1839 
1840 	struct i915_hotplug hotplug;
1841 	struct intel_fbc fbc;
1842 	struct i915_drrs drrs;
1843 	struct intel_opregion opregion;
1844 	struct intel_vbt_data vbt;
1845 
1846 	bool preserve_bios_swizzle;
1847 
1848 	/* overlay */
1849 	struct intel_overlay *overlay;
1850 
1851 	/* backlight registers and fields in struct intel_panel */
1852 	struct lock backlight_lock;
1853 
1854 	/* LVDS info */
1855 	bool no_aux_handshake;
1856 
1857 	/* protects panel power sequencer state */
1858 	struct lock pps_mutex;
1859 
1860 	struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1861 	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1862 
1863 	unsigned int fsb_freq, mem_freq, is_ddr3;
1864 	unsigned int skl_preferred_vco_freq;
1865 	unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
1866 	unsigned int max_dotclk_freq;
1867 	unsigned int rawclk_freq;
1868 	unsigned int hpll_freq;
1869 	unsigned int czclk_freq;
1870 
1871 	struct {
1872 		unsigned int vco, ref;
1873 	} cdclk_pll;
1874 
1875 	/**
1876 	 * wq - Driver workqueue for GEM.
1877 	 *
1878 	 * NOTE: Work items scheduled here are not allowed to grab any modeset
1879 	 * locks, for otherwise the flushing done in the pageflip code will
1880 	 * result in deadlocks.
1881 	 */
1882 	struct workqueue_struct *wq;
1883 
1884 	/* Display functions */
1885 	struct drm_i915_display_funcs display;
1886 
1887 	/* PCH chipset type */
1888 	enum intel_pch pch_type;
1889 	unsigned short pch_id;
1890 
1891 	unsigned long quirks;
1892 
1893 	enum modeset_restore modeset_restore;
1894 	struct lock modeset_restore_lock;
1895 	struct drm_atomic_state *modeset_restore_state;
1896 	struct drm_modeset_acquire_ctx reset_ctx;
1897 
1898 	struct list_head vm_list; /* Global list of all address spaces */
1899 	struct i915_ggtt ggtt; /* VM representing the global address space */
1900 
1901 	struct i915_gem_mm mm;
1902 	DECLARE_HASHTABLE(mm_structs, 7);
1903 	struct lock mm_lock;
1904 
1905 	/* The hw wants to have a stable context identifier for the lifetime
1906 	 * of the context (for OA, PASID, faults, etc). This is limited
1907 	 * in execlists to 21 bits.
1908 	 */
1909 	struct ida context_hw_ida;
1910 #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
1911 
1912 	/* Kernel Modesetting */
1913 
1914 	struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
1915 	struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
1916 	wait_queue_head_t pending_flip_queue;
1917 
1918 #ifdef CONFIG_DEBUG_FS
1919 	struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
1920 #endif
1921 
1922 	/* dpll and cdclk state is protected by connection_mutex */
1923 	int num_shared_dpll;
1924 	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1925 	const struct intel_dpll_mgr *dpll_mgr;
1926 
1927 	/*
1928 	 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
1929 	 * Must be global rather than per dpll, because on some platforms
1930 	 * plls share registers.
1931 	 */
1932 	struct lock dpll_lock;
1933 
1934 	unsigned int active_crtcs;
1935 	unsigned int min_pixclk[I915_MAX_PIPES];
1936 
1937 	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
1938 
1939 	struct i915_workarounds workarounds;
1940 
1941 	struct i915_frontbuffer_tracking fb_tracking;
1942 
1943 	u16 orig_clock;
1944 
1945 	bool mchbar_need_disable;
1946 
1947 	struct intel_l3_parity l3_parity;
1948 
1949 	/* Cannot be determined by PCIID. You must always read a register. */
1950 	u32 edram_cap;
1951 
1952 	/* gen6+ rps state */
1953 	struct intel_gen6_power_mgmt rps;
1954 
1955 	/* ilk-only ips/rps state. Everything in here is protected by the global
1956 	 * mchdev_lock in intel_pm.c */
1957 	struct intel_ilk_power_mgmt ips;
1958 
1959 	struct i915_power_domains power_domains;
1960 
1961 	struct i915_psr psr;
1962 
1963 	struct i915_gpu_error gpu_error;
1964 
1965 	struct drm_i915_gem_object *vlv_pctx;
1966 
1967 #ifdef CONFIG_DRM_FBDEV_EMULATION
1968 	/* list of fbdev register on this device */
1969 	struct intel_fbdev *fbdev;
1970 	struct work_struct fbdev_suspend_work;
1971 #endif
1972 
1973 	struct drm_property *broadcast_rgb_property;
1974 	struct drm_property *force_audio_property;
1975 
1976 	/* hda/i915 audio component */
1977 	struct i915_audio_component *audio_component;
1978 	bool audio_component_registered;
1979 	/**
1980 	 * av_mutex - mutex for audio/video sync
1981 	 *
1982 	 */
1983 	struct lock av_mutex;
1984 
1985 	uint32_t hw_context_size;
1986 	struct list_head context_list;
1987 
1988 	u32 fdi_rx_config;
1989 
1990 	/* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
1991 	u32 chv_phy_control;
1992 	/*
1993 	 * Shadows for CHV DPLL_MD regs to keep the state
1994 	 * checker somewhat working in the presence hardware
1995 	 * crappiness (can't read out DPLL_MD for pipes B & C).
1996 	 */
1997 	u32 chv_dpll_md[I915_MAX_PIPES];
1998 	u32 bxt_phy_grc;
1999 
2000 	u32 suspend_count;
2001 	bool suspended_to_idle;
2002 	struct i915_suspend_saved_registers regfile;
2003 	struct vlv_s0ix_state vlv_s0ix_state;
2004 
2005 	enum {
2006 		I915_SAGV_UNKNOWN = 0,
2007 		I915_SAGV_DISABLED,
2008 		I915_SAGV_ENABLED,
2009 		I915_SAGV_NOT_CONTROLLED
2010 	} sagv_status;
2011 
2012 	struct {
2013 		/*
2014 		 * Raw watermark latency values:
2015 		 * in 0.1us units for WM0,
2016 		 * in 0.5us units for WM1+.
2017 		 */
2018 		/* primary */
2019 		uint16_t pri_latency[5];
2020 		/* sprite */
2021 		uint16_t spr_latency[5];
2022 		/* cursor */
2023 		uint16_t cur_latency[5];
2024 		/*
2025 		 * Raw watermark memory latency values
2026 		 * for SKL for all 8 levels
2027 		 * in 1us units.
2028 		 */
2029 		uint16_t skl_latency[8];
2030 
2031 		/*
2032 		 * The skl_wm_values structure is a bit too big for stack
2033 		 * allocation, so we keep the staging struct where we store
2034 		 * intermediate results here instead.
2035 		 */
2036 		struct skl_wm_values skl_results;
2037 
2038 		/* current hardware state */
2039 		union {
2040 			struct ilk_wm_values hw;
2041 			struct skl_wm_values skl_hw;
2042 			struct vlv_wm_values vlv;
2043 		};
2044 
2045 		uint8_t max_level;
2046 
2047 		/*
2048 		 * Should be held around atomic WM register writing; also
2049 		 * protects * intel_crtc->wm.active and
2050 		 * cstate->wm.need_postvbl_update.
2051 		 */
2052 		struct lock wm_mutex;
2053 
2054 		/*
2055 		 * Set during HW readout of watermarks/DDB.  Some platforms
2056 		 * need to know when we're still using BIOS-provided values
2057 		 * (which we don't fully trust).
2058 		 */
2059 		bool distrust_bios_wm;
2060 	} wm;
2061 
2062 	struct i915_runtime_pm pm;
2063 
2064 	uint32_t bios_vgacntr;
2065 
2066 	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
2067 	struct {
2068 		void (*resume)(struct drm_i915_private *);
2069 		void (*cleanup_engine)(struct intel_engine_cs *engine);
2070 
2071 		/**
2072 		 * Is the GPU currently considered idle, or busy executing
2073 		 * userspace requests? Whilst idle, we allow runtime power
2074 		 * management to power down the hardware and display clocks.
2075 		 * In order to reduce the effect on performance, there
2076 		 * is a slight delay before we do so.
2077 		 */
2078 		unsigned int active_engines;
2079 		bool awake;
2080 
2081 		/**
2082 		 * We leave the user IRQ off as much as possible,
2083 		 * but this means that requests will finish and never
2084 		 * be retired once the system goes idle. Set a timer to
2085 		 * fire periodically while the ring is running. When it
2086 		 * fires, go retire requests.
2087 		 */
2088 		struct delayed_work retire_work;
2089 
2090 		/**
2091 		 * When we detect an idle GPU, we want to turn on
2092 		 * powersaving features. So once we see that there
2093 		 * are no more requests outstanding and no more
2094 		 * arrive within a small period of time, we fire
2095 		 * off the idle_work.
2096 		 */
2097 		struct delayed_work idle_work;
2098 	} gt;
2099 
2100 	/* perform PHY state sanity checks? */
2101 	bool chv_phy_assert[2];
2102 
2103 	/* Used to save the pipe-to-encoder mapping for audio */
2104 	struct intel_encoder *av_enc_map[I915_MAX_PIPES];
2105 
2106 	/*
2107 	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
2108 	 * will be rejected. Instead look for a better place.
2109 	 */
2110 };
2111 
2112 static inline struct drm_i915_private *to_i915(struct drm_device *dev)
2113 {
2114 	return container_of(dev, struct drm_i915_private, drm);
2115 }
2116 
2117 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
2118 {
2119 	return to_i915(dev_get_drvdata(kdev));
2120 }
2121 
2122 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
2123 {
2124 	return container_of(guc, struct drm_i915_private, guc);
2125 }
2126 
2127 /* Simple iterator over all initialised engines */
2128 #define for_each_engine(engine__, dev_priv__, id__) \
2129 	for ((id__) = 0; \
2130 	     (id__) < I915_NUM_ENGINES; \
2131 	     (id__)++) \
2132 		for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
2133 
2134 #define __mask_next_bit(mask) ({					\
2135 	int __idx = ffs(mask) - 1;					\
2136 	mask &= ~BIT(__idx);						\
2137 	__idx;								\
2138 })
2139 
2140 /* Iterator over subset of engines selected by mask */
2141 #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
2142 	for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask;	\
2143 	     tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
2144 
2145 enum hdmi_force_audio {
2146 	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */
2147 	HDMI_AUDIO_OFF,			/* force turn off HDMI audio */
2148 	HDMI_AUDIO_AUTO,		/* trust EDID */
2149 	HDMI_AUDIO_ON,			/* force turn on HDMI audio */
2150 };
2151 
2152 #define I915_GTT_OFFSET_NONE ((u32)-1)
2153 
2154 struct drm_i915_gem_object_ops {
2155 	unsigned int flags;
2156 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
2157 
2158 	/* Interface between the GEM object and its backing storage.
2159 	 * get_pages() is called once prior to the use of the associated set
2160 	 * of pages before to binding them into the GTT, and put_pages() is
2161 	 * called after we no longer need them. As we expect there to be
2162 	 * associated cost with migrating pages between the backing storage
2163 	 * and making them available for the GPU (e.g. clflush), we may hold
2164 	 * onto the pages after they are no longer referenced by the GPU
2165 	 * in case they may be used again shortly (for example migrating the
2166 	 * pages to a different memory domain within the GTT). put_pages()
2167 	 * will therefore most likely be called when the object itself is
2168 	 * being released or under memory pressure (where we attempt to
2169 	 * reap pages for the shrinker).
2170 	 */
2171 	int (*get_pages)(struct drm_i915_gem_object *);
2172 	void (*put_pages)(struct drm_i915_gem_object *);
2173 
2174 	int (*dmabuf_export)(struct drm_i915_gem_object *);
2175 	void (*release)(struct drm_i915_gem_object *);
2176 };
2177 
2178 /*
2179  * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
2180  * considered to be the frontbuffer for the given plane interface-wise. This
2181  * doesn't mean that the hw necessarily already scans it out, but that any
2182  * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
2183  *
2184  * We have one bit per pipe and per scanout plane type.
2185  */
2186 #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5
2187 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
2188 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \
2189 	(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
2190 #define INTEL_FRONTBUFFER_CURSOR(pipe) \
2191 	(1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
2192 #define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \
2193 	(1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
2194 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \
2195 	(1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
2196 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
2197 	(0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
2198 
2199 struct drm_i915_gem_object {
2200 	struct drm_gem_object base;
2201 
2202 	const struct drm_i915_gem_object_ops *ops;
2203 
2204 	/** List of VMAs backed by this object */
2205 	struct list_head vma_list;
2206 
2207 	/** Stolen memory for this object, instead of being backed by shmem. */
2208 	struct drm_mm_node *stolen;
2209 	struct list_head global_list;
2210 
2211 	/** Used in execbuf to temporarily hold a ref */
2212 	struct list_head obj_exec_link;
2213 
2214 	struct list_head batch_pool_link;
2215 
2216 	unsigned long flags;
2217 	/**
2218 	 * This is set if the object is on the active lists (has pending
2219 	 * rendering and so a non-zero seqno), and is not set if it i s on
2220 	 * inactive (ready to be unbound) list.
2221 	 */
2222 #define I915_BO_ACTIVE_SHIFT 0
2223 #define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1)
2224 #define __I915_BO_ACTIVE(bo) \
2225 	((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
2226 
2227 	/**
2228 	 * This is set if the object has been written to since last bound
2229 	 * to the GTT
2230 	 */
2231 	unsigned int dirty:1;
2232 
2233 	/**
2234 	 * Advice: are the backing pages purgeable?
2235 	 */
2236 	unsigned int madv:2;
2237 
2238 	/**
2239 	 * Whether the current gtt mapping needs to be mappable (and isn't just
2240 	 * mappable by accident). Track pin and fault separate for a more
2241 	 * accurate mappable working set.
2242 	 */
2243 	unsigned int fault_mappable:1;
2244 
2245 	/*
2246 	 * Is the object to be mapped as read-only to the GPU
2247 	 * Only honoured if hardware has relevant pte bit
2248 	 */
2249 	unsigned long gt_ro:1;
2250 	unsigned int cache_level:3;
2251 	unsigned int cache_dirty:1;
2252 
2253 	atomic_t frontbuffer_bits;
2254 	unsigned int frontbuffer_ggtt_origin; /* write once */
2255 
2256 	/** Current tiling stride for the object, if it's tiled. */
2257 	unsigned int tiling_and_stride;
2258 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
2259 #define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
2260 #define STRIDE_MASK (~TILING_MASK)
2261 
2262 	/** Count of VMA actually bound by this object */
2263 	unsigned int bind_count;
2264 	unsigned int pin_display;
2265 
2266 	struct sg_table *pages;
2267 	int pages_pin_count;
2268 	struct get_page {
2269 		struct scatterlist *sg;
2270 		int last;
2271 	} get_page;
2272 	void *mapping;
2273 
2274 	/** Breadcrumb of last rendering to the buffer.
2275 	 * There can only be one writer, but we allow for multiple readers.
2276 	 * If there is a writer that necessarily implies that all other
2277 	 * read requests are complete - but we may only be lazily clearing
2278 	 * the read requests. A read request is naturally the most recent
2279 	 * request on a ring, so we may have two different write and read
2280 	 * requests on one ring where the write request is older than the
2281 	 * read request. This allows for the CPU to read from an active
2282 	 * buffer by only waiting for the write to complete.
2283 	 */
2284 	struct i915_gem_active last_read[I915_NUM_ENGINES];
2285 	struct i915_gem_active last_write;
2286 
2287 	/** References from framebuffers, locks out tiling changes. */
2288 	unsigned long framebuffer_references;
2289 
2290 	/** Record of address bit 17 of each page at last unbind. */
2291 	unsigned long *bit_17;
2292 
2293 	struct i915_gem_userptr {
2294 		uintptr_t ptr;
2295 		unsigned read_only :1;
2296 		unsigned workers :4;
2297 #define I915_GEM_USERPTR_MAX_WORKERS 15
2298 
2299 		struct i915_mm_struct *mm;
2300 		struct i915_mmu_object *mmu_object;
2301 		struct work_struct *work;
2302 	} userptr;
2303 
2304 	/** for phys allocated objects */
2305 	struct drm_dma_handle *phys_handle;
2306 };
2307 
2308 static inline struct drm_i915_gem_object *
2309 to_intel_bo(struct drm_gem_object *gem)
2310 {
2311 	/* Assert that to_intel_bo(NULL) == NULL */
2312 	BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
2313 
2314 	return container_of(gem, struct drm_i915_gem_object, base);
2315 }
2316 
2317 static inline struct drm_i915_gem_object *
2318 i915_gem_object_lookup(struct drm_file *file, u32 handle)
2319 {
2320 	return to_intel_bo(drm_gem_object_lookup(file, handle));
2321 }
2322 
2323 __attribute__((nonnull))
2324 static inline struct drm_i915_gem_object *
2325 i915_gem_object_get(struct drm_i915_gem_object *obj)
2326 {
2327 	drm_gem_object_reference(&obj->base);
2328 	return obj;
2329 }
2330 
2331 __attribute__((nonnull))
2332 static inline void
2333 i915_gem_object_put(struct drm_i915_gem_object *obj)
2334 {
2335 	drm_gem_object_unreference(&obj->base);
2336 }
2337 
2338 __attribute__((nonnull))
2339 static inline void
2340 i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj)
2341 {
2342 	drm_gem_object_unreference_unlocked(&obj->base);
2343 }
2344 
2345 static inline bool
2346 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
2347 {
2348 	return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
2349 }
2350 
2351 static inline unsigned long
2352 i915_gem_object_get_active(const struct drm_i915_gem_object *obj)
2353 {
2354 	return (obj->flags >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK;
2355 }
2356 
2357 static inline bool
2358 i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
2359 {
2360 	return i915_gem_object_get_active(obj);
2361 }
2362 
2363 static inline void
2364 i915_gem_object_set_active(struct drm_i915_gem_object *obj, int engine)
2365 {
2366 	obj->flags |= BIT(engine + I915_BO_ACTIVE_SHIFT);
2367 }
2368 
2369 static inline void
2370 i915_gem_object_clear_active(struct drm_i915_gem_object *obj, int engine)
2371 {
2372 	obj->flags &= ~BIT(engine + I915_BO_ACTIVE_SHIFT);
2373 }
2374 
2375 static inline bool
2376 i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
2377 				  int engine)
2378 {
2379 	return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
2380 }
2381 
2382 static inline unsigned int
2383 i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
2384 {
2385 	return obj->tiling_and_stride & TILING_MASK;
2386 }
2387 
2388 static inline bool
2389 i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
2390 {
2391 	return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
2392 }
2393 
2394 static inline unsigned int
2395 i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
2396 {
2397 	return obj->tiling_and_stride & STRIDE_MASK;
2398 }
2399 
2400 static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
2401 {
2402 	i915_gem_object_get(vma->obj);
2403 	return vma;
2404 }
2405 
2406 static inline void i915_vma_put(struct i915_vma *vma)
2407 {
2408 	lockdep_assert_held(&vma->vm->dev->struct_mutex);
2409 	i915_gem_object_put(vma->obj);
2410 }
2411 
2412 /*
2413  * Optimised SGL iterator for GEM objects
2414  */
2415 static __always_inline struct sgt_iter {
2416 	struct scatterlist *sgp;
2417 	union {
2418 		unsigned long pfn;
2419 		dma_addr_t dma;
2420 	};
2421 	unsigned int curr;
2422 	unsigned int max;
2423 } __sgt_iter(struct scatterlist *sgl, bool dma) {
2424 	struct sgt_iter s = { .sgp = sgl };
2425 
2426 	if (s.sgp) {
2427 		s.max = s.curr = s.sgp->offset;
2428 		s.max += s.sgp->length;
2429 		if (dma)
2430 			s.dma = sg_dma_address(s.sgp);
2431 		else
2432 			s.pfn = page_to_pfn(sg_page(s.sgp));
2433 	}
2434 
2435 	return s;
2436 }
2437 
2438 /**
2439  * __sg_next - return the next scatterlist entry in a list
2440  * @sg:		The current sg entry
2441  *
2442  * Description:
2443  *   If the entry is the last, return NULL; otherwise, step to the next
2444  *   element in the array (@sg@+1). If that's a chain pointer, follow it;
2445  *   otherwise just return the pointer to the current element.
2446  **/
2447 static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2448 {
2449 #ifdef CONFIG_DEBUG_SG
2450 	BUG_ON(sg->sg_magic != SG_MAGIC);
2451 #endif
2452 	return sg_is_last(sg) ? NULL :
2453 		likely(!sg_is_chain(++sg)) ? sg :
2454 		sg_chain_ptr(sg);
2455 }
2456 
2457 /**
2458  * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
2459  * @__dmap:	DMA address (output)
2460  * @__iter:	'struct sgt_iter' (iterator state, internal)
2461  * @__sgt:	sg_table to iterate over (input)
2462  */
2463 #define for_each_sgt_dma(__dmap, __iter, __sgt)				\
2464 	for ((__iter) = __sgt_iter((__sgt)->sgl, true);			\
2465 	     ((__dmap) = (__iter).dma + (__iter).curr);			\
2466 	     (((__iter).curr += PAGE_SIZE) < (__iter).max) ||		\
2467 	     ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0))
2468 
2469 /**
2470  * for_each_sgt_page - iterate over the pages of the given sg_table
2471  * @__pp:	page pointer (output)
2472  * @__iter:	'struct sgt_iter' (iterator state, internal)
2473  * @__sgt:	sg_table to iterate over (input)
2474  */
2475 #define for_each_sgt_page(__pp, __iter, __sgt)				\
2476 	for ((__iter) = __sgt_iter((__sgt)->sgl, false);		\
2477 	     ((__pp) = (__iter).pfn == 0 ? NULL :			\
2478 	      pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
2479 	     (((__iter).curr += PAGE_SIZE) < (__iter).max) ||		\
2480 	     ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
2481 
2482 /*
2483  * A command that requires special handling by the command parser.
2484  */
2485 struct drm_i915_cmd_descriptor {
2486 	/*
2487 	 * Flags describing how the command parser processes the command.
2488 	 *
2489 	 * CMD_DESC_FIXED: The command has a fixed length if this is set,
2490 	 *                 a length mask if not set
2491 	 * CMD_DESC_SKIP: The command is allowed but does not follow the
2492 	 *                standard length encoding for the opcode range in
2493 	 *                which it falls
2494 	 * CMD_DESC_REJECT: The command is never allowed
2495 	 * CMD_DESC_REGISTER: The command should be checked against the
2496 	 *                    register whitelist for the appropriate ring
2497 	 * CMD_DESC_MASTER: The command is allowed if the submitting process
2498 	 *                  is the DRM master
2499 	 */
2500 	u32 flags;
2501 #define CMD_DESC_FIXED    (1<<0)
2502 #define CMD_DESC_SKIP     (1<<1)
2503 #define CMD_DESC_REJECT   (1<<2)
2504 #define CMD_DESC_REGISTER (1<<3)
2505 #define CMD_DESC_BITMASK  (1<<4)
2506 #define CMD_DESC_MASTER   (1<<5)
2507 
2508 	/*
2509 	 * The command's unique identification bits and the bitmask to get them.
2510 	 * This isn't strictly the opcode field as defined in the spec and may
2511 	 * also include type, subtype, and/or subop fields.
2512 	 */
2513 	struct {
2514 		u32 value;
2515 		u32 mask;
2516 	} cmd;
2517 
2518 	/*
2519 	 * The command's length. The command is either fixed length (i.e. does
2520 	 * not include a length field) or has a length field mask. The flag
2521 	 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
2522 	 * a length mask. All command entries in a command table must include
2523 	 * length information.
2524 	 */
2525 	union {
2526 		u32 fixed;
2527 		u32 mask;
2528 	} length;
2529 
2530 	/*
2531 	 * Describes where to find a register address in the command to check
2532 	 * against the ring's register whitelist. Only valid if flags has the
2533 	 * CMD_DESC_REGISTER bit set.
2534 	 *
2535 	 * A non-zero step value implies that the command may access multiple
2536 	 * registers in sequence (e.g. LRI), in that case step gives the
2537 	 * distance in dwords between individual offset fields.
2538 	 */
2539 	struct {
2540 		u32 offset;
2541 		u32 mask;
2542 		u32 step;
2543 	} reg;
2544 
2545 #define MAX_CMD_DESC_BITMASKS 3
2546 	/*
2547 	 * Describes command checks where a particular dword is masked and
2548 	 * compared against an expected value. If the command does not match
2549 	 * the expected value, the parser rejects it. Only valid if flags has
2550 	 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
2551 	 * are valid.
2552 	 *
2553 	 * If the check specifies a non-zero condition_mask then the parser
2554 	 * only performs the check when the bits specified by condition_mask
2555 	 * are non-zero.
2556 	 */
2557 	struct {
2558 		u32 offset;
2559 		u32 mask;
2560 		u32 expected;
2561 		u32 condition_offset;
2562 		u32 condition_mask;
2563 	} bits[MAX_CMD_DESC_BITMASKS];
2564 };
2565 
2566 /*
2567  * A table of commands requiring special handling by the command parser.
2568  *
2569  * Each engine has an array of tables. Each table consists of an array of
2570  * command descriptors, which must be sorted with command opcodes in
2571  * ascending order.
2572  */
2573 struct drm_i915_cmd_table {
2574 	const struct drm_i915_cmd_descriptor *table;
2575 	int count;
2576 };
2577 
2578 /* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
2579 #define __I915__(p) ({ \
2580 	struct drm_i915_private *__p; \
2581 	if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \
2582 		__p = (struct drm_i915_private *)p; \
2583 	else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
2584 		__p = to_i915((struct drm_device *)p); \
2585 	else \
2586 		BUILD_BUG(); \
2587 	__p; \
2588 })
2589 #define INTEL_INFO(p)	(&__I915__(p)->info)
2590 
2591 #define INTEL_GEN(dev_priv)	((dev_priv)->info.gen)
2592 #define INTEL_DEVID(dev_priv)	((dev_priv)->info.device_id)
2593 
2594 #define REVID_FOREVER		0xff
2595 #define INTEL_REVID(p)	(__I915__(p)->drm.pdev->revision)
2596 
2597 #define GEN_FOREVER (0)
2598 /*
2599  * Returns true if Gen is in inclusive range [Start, End].
2600  *
2601  * Use GEN_FOREVER for unbound start and or end.
2602  */
2603 #define IS_GEN(dev_priv, s, e) ({ \
2604 	unsigned int __s = (s), __e = (e); \
2605 	BUILD_BUG_ON(!__builtin_constant_p(s)); \
2606 	BUILD_BUG_ON(!__builtin_constant_p(e)); \
2607 	if ((__s) != GEN_FOREVER) \
2608 		__s = (s) - 1; \
2609 	if ((__e) == GEN_FOREVER) \
2610 		__e = BITS_PER_LONG - 1; \
2611 	else \
2612 		__e = (e) - 1; \
2613 	!!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \
2614 })
2615 
2616 /*
2617  * Return true if revision is in range [since,until] inclusive.
2618  *
2619  * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
2620  */
2621 #define IS_REVID(p, since, until) \
2622 	(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
2623 
2624 #define IS_I830(dev_priv)	(INTEL_DEVID(dev_priv) == 0x3577)
2625 #define IS_845G(dev_priv)	(INTEL_DEVID(dev_priv) == 0x2562)
2626 #define IS_I85X(dev)		(INTEL_INFO(dev)->is_i85x)
2627 #define IS_I865G(dev_priv)	(INTEL_DEVID(dev_priv) == 0x2572)
2628 #define IS_I915G(dev)		(INTEL_INFO(dev)->is_i915g)
2629 #define IS_I915GM(dev_priv)	(INTEL_DEVID(dev_priv) == 0x2592)
2630 #define IS_I945G(dev_priv)	(INTEL_DEVID(dev_priv) == 0x2772)
2631 #define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
2632 #define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
2633 #define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
2634 #define IS_GM45(dev_priv)	(INTEL_DEVID(dev_priv) == 0x2A42)
2635 #define IS_G4X(dev_priv)	((dev_priv)->info.is_g4x)
2636 #define IS_PINEVIEW_G(dev_priv)	(INTEL_DEVID(dev_priv) == 0xa001)
2637 #define IS_PINEVIEW_M(dev_priv)	(INTEL_DEVID(dev_priv) == 0xa011)
2638 #define IS_PINEVIEW(dev)	(INTEL_INFO(dev)->is_pineview)
2639 #define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
2640 #define IS_IRONLAKE_M(dev_priv)	(INTEL_DEVID(dev_priv) == 0x0046)
2641 #define IS_IVYBRIDGE(dev_priv)	((dev_priv)->info.is_ivybridge)
2642 #define IS_IVB_GT1(dev_priv)	(INTEL_DEVID(dev_priv) == 0x0156 || \
2643 				 INTEL_DEVID(dev_priv) == 0x0152 || \
2644 				 INTEL_DEVID(dev_priv) == 0x015a)
2645 #define IS_VALLEYVIEW(dev_priv)	((dev_priv)->info.is_valleyview)
2646 #define IS_CHERRYVIEW(dev_priv)	((dev_priv)->info.is_cherryview)
2647 #define IS_HASWELL(dev_priv)	((dev_priv)->info.is_haswell)
2648 #define IS_BROADWELL(dev_priv)	((dev_priv)->info.is_broadwell)
2649 #define IS_SKYLAKE(dev_priv)	((dev_priv)->info.is_skylake)
2650 #define IS_BROXTON(dev_priv)	((dev_priv)->info.is_broxton)
2651 #define IS_KABYLAKE(dev_priv)	((dev_priv)->info.is_kabylake)
2652 #define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
2653 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
2654 				    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
2655 #define IS_BDW_ULT(dev_priv)	(IS_BROADWELL(dev_priv) && \
2656 				 ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 ||	\
2657 				 (INTEL_DEVID(dev_priv) & 0xf) == 0xb ||	\
2658 				 (INTEL_DEVID(dev_priv) & 0xf) == 0xe))
2659 /* ULX machines are also considered ULT. */
2660 #define IS_BDW_ULX(dev_priv)	(IS_BROADWELL(dev_priv) && \
2661 				 (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
2662 #define IS_BDW_GT3(dev_priv)	(IS_BROADWELL(dev_priv) && \
2663 				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
2664 #define IS_HSW_ULT(dev_priv)	(IS_HASWELL(dev_priv) && \
2665 				 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
2666 #define IS_HSW_GT3(dev_priv)	(IS_HASWELL(dev_priv) && \
2667 				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
2668 /* ULX machines are also considered ULT. */
2669 #define IS_HSW_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x0A0E || \
2670 				 INTEL_DEVID(dev_priv) == 0x0A1E)
2671 #define IS_SKL_ULT(dev_priv)	(INTEL_DEVID(dev_priv) == 0x1906 || \
2672 				 INTEL_DEVID(dev_priv) == 0x1913 || \
2673 				 INTEL_DEVID(dev_priv) == 0x1916 || \
2674 				 INTEL_DEVID(dev_priv) == 0x1921 || \
2675 				 INTEL_DEVID(dev_priv) == 0x1926)
2676 #define IS_SKL_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x190E || \
2677 				 INTEL_DEVID(dev_priv) == 0x1915 || \
2678 				 INTEL_DEVID(dev_priv) == 0x191E)
2679 #define IS_KBL_ULT(dev_priv)	(INTEL_DEVID(dev_priv) == 0x5906 || \
2680 				 INTEL_DEVID(dev_priv) == 0x5913 || \
2681 				 INTEL_DEVID(dev_priv) == 0x5916 || \
2682 				 INTEL_DEVID(dev_priv) == 0x5921 || \
2683 				 INTEL_DEVID(dev_priv) == 0x5926)
2684 #define IS_KBL_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x590E || \
2685 				 INTEL_DEVID(dev_priv) == 0x5915 || \
2686 				 INTEL_DEVID(dev_priv) == 0x591E)
2687 #define IS_SKL_GT3(dev_priv)	(IS_SKYLAKE(dev_priv) && \
2688 				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
2689 #define IS_SKL_GT4(dev_priv)	(IS_SKYLAKE(dev_priv) && \
2690 				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
2691 
2692 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
2693 
2694 #define SKL_REVID_A0		0x0
2695 #define SKL_REVID_B0		0x1
2696 #define SKL_REVID_C0		0x2
2697 #define SKL_REVID_D0		0x3
2698 #define SKL_REVID_E0		0x4
2699 #define SKL_REVID_F0		0x5
2700 #define SKL_REVID_G0		0x6
2701 #define SKL_REVID_H0		0x7
2702 
2703 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
2704 
2705 #define BXT_REVID_A0		0x0
2706 #define BXT_REVID_A1		0x1
2707 #define BXT_REVID_B0		0x3
2708 #define BXT_REVID_C0		0x9
2709 
2710 #define IS_BXT_REVID(dev_priv, since, until) \
2711 	(IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
2712 
2713 #define KBL_REVID_A0		0x0
2714 #define KBL_REVID_B0		0x1
2715 #define KBL_REVID_C0		0x2
2716 #define KBL_REVID_D0		0x3
2717 #define KBL_REVID_E0		0x4
2718 
2719 #define IS_KBL_REVID(dev_priv, since, until) \
2720 	(IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
2721 
2722 /*
2723  * The genX designation typically refers to the render engine, so render
2724  * capability related checks should use IS_GEN, while display and other checks
2725  * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
2726  * chips, etc.).
2727  */
2728 #define IS_GEN2(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(1)))
2729 #define IS_GEN3(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(2)))
2730 #define IS_GEN4(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(3)))
2731 #define IS_GEN5(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(4)))
2732 #define IS_GEN6(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(5)))
2733 #define IS_GEN7(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(6)))
2734 #define IS_GEN8(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(7)))
2735 #define IS_GEN9(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(8)))
2736 
2737 #define ENGINE_MASK(id)	BIT(id)
2738 #define RENDER_RING	ENGINE_MASK(RCS)
2739 #define BSD_RING	ENGINE_MASK(VCS)
2740 #define BLT_RING	ENGINE_MASK(BCS)
2741 #define VEBOX_RING	ENGINE_MASK(VECS)
2742 #define BSD2_RING	ENGINE_MASK(VCS2)
2743 #define ALL_ENGINES	(~0)
2744 
2745 #define HAS_ENGINE(dev_priv, id) \
2746 	(!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
2747 
2748 #define HAS_BSD(dev_priv)	HAS_ENGINE(dev_priv, VCS)
2749 #define HAS_BSD2(dev_priv)	HAS_ENGINE(dev_priv, VCS2)
2750 #define HAS_BLT(dev_priv)	HAS_ENGINE(dev_priv, BCS)
2751 #define HAS_VEBOX(dev_priv)	HAS_ENGINE(dev_priv, VECS)
2752 
2753 #define HAS_LLC(dev)		(INTEL_INFO(dev)->has_llc)
2754 #define HAS_SNOOP(dev)		(INTEL_INFO(dev)->has_snoop)
2755 #define HAS_EDRAM(dev)		(!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
2756 #define HAS_WT(dev_priv)	((IS_HASWELL(dev_priv) || \
2757 				 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
2758 #define HWS_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->hws_needs_physical)
2759 
2760 #define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->has_hw_contexts)
2761 #define HAS_LOGICAL_RING_CONTEXTS(dev)	(INTEL_INFO(dev)->has_logical_ring_contexts)
2762 #define USES_PPGTT(dev)		(i915.enable_ppgtt)
2763 #define USES_FULL_PPGTT(dev)	(i915.enable_ppgtt >= 2)
2764 #define USES_FULL_48BIT_PPGTT(dev)	(i915.enable_ppgtt == 3)
2765 
2766 #define HAS_OVERLAY(dev)		(INTEL_INFO(dev)->has_overlay)
2767 #define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)
2768 
2769 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
2770 #define HAS_BROKEN_CS_TLB(dev_priv)	(IS_I830(dev_priv) || IS_845G(dev_priv))
2771 
2772 /* WaRsDisableCoarsePowerGating:skl,bxt */
2773 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
2774 	(IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \
2775 	 IS_SKL_GT3(dev_priv) || \
2776 	 IS_SKL_GT4(dev_priv))
2777 
2778 /*
2779  * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
2780  * even when in MSI mode. This results in spurious interrupt warnings if the
2781  * legacy irq no. is shared with another device. The kernel then disables that
2782  * interrupt source and so prevents the other device from working properly.
2783  */
2784 #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
2785 #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->has_gmbus_irq)
2786 
2787 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
2788  * rows, which changed the alignment requirements and fence programming.
2789  */
2790 #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
2791 					 !(IS_I915G(dev_priv) || \
2792 					 IS_I915GM(dev_priv)))
2793 #define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
2794 #define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
2795 
2796 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
2797 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
2798 #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
2799 
2800 #define HAS_IPS(dev_priv)	(IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
2801 
2802 #define HAS_DP_MST(dev)	(INTEL_INFO(dev)->has_dp_mst)
2803 
2804 #define HAS_DDI(dev_priv)	((dev_priv)->info.has_ddi)
2805 #define HAS_FPGA_DBG_UNCLAIMED(dev)	(INTEL_INFO(dev)->has_fpga_dbg)
2806 #define HAS_PSR(dev)		(INTEL_INFO(dev)->has_psr)
2807 #define HAS_RC6(dev)		(INTEL_INFO(dev)->has_rc6)
2808 #define HAS_RC6p(dev)		(INTEL_INFO(dev)->has_rc6p)
2809 
2810 #define HAS_CSR(dev)	(INTEL_INFO(dev)->has_csr)
2811 
2812 #define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
2813 /*
2814  * For now, anything with a GuC requires uCode loading, and then supports
2815  * command submission once loaded. But these are logically independent
2816  * properties, so we have separate macros to test them.
2817  */
2818 #define HAS_GUC(dev)		(INTEL_INFO(dev)->has_guc)
2819 #define HAS_GUC_UCODE(dev)	(HAS_GUC(dev))
2820 #define HAS_GUC_SCHED(dev)	(HAS_GUC(dev))
2821 
2822 #define HAS_RESOURCE_STREAMER(dev) (INTEL_INFO(dev)->has_resource_streamer)
2823 
2824 #define HAS_POOLED_EU(dev)	(INTEL_INFO(dev)->has_pooled_eu)
2825 
2826 #define INTEL_PCH_DEVICE_ID_MASK		0xff00
2827 #define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
2828 #define INTEL_PCH_CPT_DEVICE_ID_TYPE		0x1c00
2829 #define INTEL_PCH_PPT_DEVICE_ID_TYPE		0x1e00
2830 #define INTEL_PCH_LPT_DEVICE_ID_TYPE		0x8c00
2831 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE		0x9c00
2832 #define INTEL_PCH_SPT_DEVICE_ID_TYPE		0xA100
2833 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE		0x9D00
2834 #define INTEL_PCH_KBP_DEVICE_ID_TYPE		0xA200
2835 #define INTEL_PCH_P2X_DEVICE_ID_TYPE		0x7100
2836 #define INTEL_PCH_P3X_DEVICE_ID_TYPE		0x7000
2837 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE		0x2900 /* qemu q35 has 2918 */
2838 
2839 #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
2840 #define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
2841 #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
2842 #define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
2843 #define HAS_PCH_LPT_LP(dev_priv) \
2844 	((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
2845 #define HAS_PCH_LPT_H(dev_priv) \
2846 	((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
2847 #define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
2848 #define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
2849 #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
2850 #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
2851 
2852 #define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
2853 
2854 #define HAS_LSPCON(dev_priv) (IS_GEN9(dev_priv))
2855 
2856 /* DPF == dynamic parity feature */
2857 #define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
2858 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
2859 				 2 : HAS_L3_DPF(dev_priv))
2860 
2861 #define GT_FREQUENCY_MULTIPLIER 50
2862 #define GEN9_FREQ_SCALER 3
2863 
2864 #include "i915_trace.h"
2865 
2866 static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
2867 {
2868 #ifdef CONFIG_INTEL_IOMMU
2869 	if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped)
2870 		return true;
2871 #endif
2872 	return false;
2873 }
2874 
2875 extern int i915_suspend_switcheroo(device_t kdev);
2876 extern int i915_resume_switcheroo(struct drm_device *dev);
2877 
2878 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
2879 				int enable_ppgtt);
2880 
2881 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value);
2882 
2883 /* i915_drv.c */
2884 void __printf(3, 4)
2885 __i915_printk(struct drm_i915_private *dev_priv, const char *level,
2886 	      const char *fmt, ...);
2887 
2888 #define i915_report_error(dev_priv, fmt, ...)				   \
2889 	__i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
2890 
2891 #ifdef CONFIG_COMPAT
2892 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2893 			      unsigned long arg);
2894 #endif
2895 extern const struct dev_pm_ops i915_pm_ops;
2896 
2897 extern int i915_driver_load(struct pci_dev *pdev,
2898 			    const struct pci_device_id *ent);
2899 extern void i915_driver_unload(struct drm_device *dev);
2900 extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
2901 extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
2902 extern void i915_reset(struct drm_i915_private *dev_priv);
2903 extern int intel_guc_reset(struct drm_i915_private *dev_priv);
2904 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
2905 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
2906 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
2907 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
2908 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2909 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2910 
2911 /* intel_hotplug.c */
2912 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
2913 			   u32 pin_mask, u32 long_mask);
2914 void intel_hpd_init(struct drm_i915_private *dev_priv);
2915 void intel_hpd_init_work(struct drm_i915_private *dev_priv);
2916 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
2917 bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
2918 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
2919 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
2920 
2921 /* i915_irq.c */
2922 static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
2923 {
2924 	unsigned long delay;
2925 
2926 	if (unlikely(!i915.enable_hangcheck))
2927 		return;
2928 
2929 	/* Don't continually defer the hangcheck so that it is always run at
2930 	 * least once after work has been scheduled on any ring. Otherwise,
2931 	 * we will ignore a hung ring if a second ring is kept busy.
2932 	 */
2933 
2934 	delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
2935 	queue_delayed_work(system_long_wq,
2936 			   &dev_priv->gpu_error.hangcheck_work, delay);
2937 }
2938 
2939 __printf(3, 4)
2940 void i915_handle_error(struct drm_i915_private *dev_priv,
2941 		       u32 engine_mask,
2942 		       const char *fmt, ...);
2943 
2944 extern void intel_irq_init(struct drm_i915_private *dev_priv);
2945 int intel_irq_install(struct drm_i915_private *dev_priv);
2946 void intel_irq_uninstall(struct drm_i915_private *dev_priv);
2947 
2948 extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
2949 extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
2950 					bool restore_forcewake);
2951 extern void intel_uncore_init(struct drm_i915_private *dev_priv);
2952 extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
2953 extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
2954 extern void intel_uncore_fini(struct drm_i915_private *dev_priv);
2955 extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
2956 					 bool restore);
2957 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
2958 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
2959 				enum forcewake_domains domains);
2960 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
2961 				enum forcewake_domains domains);
2962 /* Like above but the caller must manage the uncore.lock itself.
2963  * Must be used with I915_READ_FW and friends.
2964  */
2965 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
2966 					enum forcewake_domains domains);
2967 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
2968 					enum forcewake_domains domains);
2969 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
2970 
2971 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
2972 
2973 int intel_wait_for_register(struct drm_i915_private *dev_priv,
2974 			    i915_reg_t reg,
2975 			    const u32 mask,
2976 			    const u32 value,
2977 			    const unsigned long timeout_ms);
2978 int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
2979 			       i915_reg_t reg,
2980 			       const u32 mask,
2981 			       const u32 value,
2982 			       const unsigned long timeout_ms);
2983 
2984 static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
2985 {
2986 	return dev_priv->gvt;
2987 }
2988 
2989 static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
2990 {
2991 	return dev_priv->vgpu.active;
2992 }
2993 
2994 void
2995 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
2996 		     u32 status_mask);
2997 
2998 void
2999 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
3000 		      u32 status_mask);
3001 
3002 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
3003 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
3004 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
3005 				   uint32_t mask,
3006 				   uint32_t bits);
3007 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
3008 			    uint32_t interrupt_mask,
3009 			    uint32_t enabled_irq_mask);
3010 static inline void
3011 ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
3012 {
3013 	ilk_update_display_irq(dev_priv, bits, bits);
3014 }
3015 static inline void
3016 ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
3017 {
3018 	ilk_update_display_irq(dev_priv, bits, 0);
3019 }
3020 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
3021 			 enum i915_pipe pipe,
3022 			 uint32_t interrupt_mask,
3023 			 uint32_t enabled_irq_mask);
3024 static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
3025 				       enum i915_pipe pipe, uint32_t bits)
3026 {
3027 	bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
3028 }
3029 static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
3030 					enum i915_pipe pipe, uint32_t bits)
3031 {
3032 	bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
3033 }
3034 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
3035 				  uint32_t interrupt_mask,
3036 				  uint32_t enabled_irq_mask);
3037 static inline void
3038 ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
3039 {
3040 	ibx_display_interrupt_update(dev_priv, bits, bits);
3041 }
3042 static inline void
3043 ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
3044 {
3045 	ibx_display_interrupt_update(dev_priv, bits, 0);
3046 }
3047 
3048 /* i915_gem.c */
3049 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
3050 			  struct drm_file *file_priv);
3051 int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
3052 			 struct drm_file *file_priv);
3053 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
3054 			  struct drm_file *file_priv);
3055 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
3056 			struct drm_file *file_priv);
3057 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
3058 			struct drm_file *file_priv);
3059 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
3060 			      struct drm_file *file_priv);
3061 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
3062 			     struct drm_file *file_priv);
3063 int i915_gem_execbuffer(struct drm_device *dev, void *data,
3064 			struct drm_file *file_priv);
3065 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
3066 			 struct drm_file *file_priv);
3067 int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3068 			struct drm_file *file_priv);
3069 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3070 			       struct drm_file *file);
3071 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3072 			       struct drm_file *file);
3073 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3074 			    struct drm_file *file_priv);
3075 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3076 			   struct drm_file *file_priv);
3077 int i915_gem_set_tiling(struct drm_device *dev, void *data,
3078 			struct drm_file *file_priv);
3079 int i915_gem_get_tiling(struct drm_device *dev, void *data,
3080 			struct drm_file *file_priv);
3081 void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
3082 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
3083 			   struct drm_file *file);
3084 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
3085 				struct drm_file *file_priv);
3086 int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
3087 			struct drm_file *file_priv);
3088 void i915_gem_load_init(struct drm_device *dev);
3089 void i915_gem_load_cleanup(struct drm_device *dev);
3090 void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
3091 int i915_gem_freeze(struct drm_i915_private *dev_priv);
3092 int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
3093 
3094 void *i915_gem_object_alloc(struct drm_device *dev);
3095 void i915_gem_object_free(struct drm_i915_gem_object *obj);
3096 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3097 			 const struct drm_i915_gem_object_ops *ops);
3098 struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
3099 						  size_t size);
3100 struct drm_i915_gem_object *i915_gem_object_create_from_data(
3101 		struct drm_device *dev, const void *data, size_t size);
3102 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
3103 void i915_gem_free_object(struct drm_gem_object *obj);
3104 
3105 struct i915_vma * __must_check
3106 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3107 			 const struct i915_ggtt_view *view,
3108 			 u64 size,
3109 			 u64 alignment,
3110 			 u64 flags);
3111 
3112 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
3113 		  u32 flags);
3114 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
3115 int __must_check i915_vma_unbind(struct i915_vma *vma);
3116 void i915_vma_close(struct i915_vma *vma);
3117 void i915_vma_destroy(struct i915_vma *vma);
3118 
3119 int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
3120 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
3121 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
3122 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
3123 
3124 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
3125 
3126 static inline int __sg_page_count(struct scatterlist *sg)
3127 {
3128 	return sg->length >> PAGE_SHIFT;
3129 }
3130 
3131 struct page *
3132 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n);
3133 
3134 static inline dma_addr_t
3135 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n)
3136 {
3137 	if (n < obj->get_page.last) {
3138 		obj->get_page.sg = obj->pages->sgl;
3139 		obj->get_page.last = 0;
3140 	}
3141 
3142 	while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
3143 		obj->get_page.last += __sg_page_count(obj->get_page.sg++);
3144 		if (unlikely(sg_is_chain(obj->get_page.sg)))
3145 			obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
3146 	}
3147 
3148 	return sg_dma_address(obj->get_page.sg) + ((n - obj->get_page.last) << PAGE_SHIFT);
3149 }
3150 
3151 static inline struct page *
3152 i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
3153 {
3154 	if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT))
3155 		return NULL;
3156 
3157 	if (n < obj->get_page.last) {
3158 		obj->get_page.sg = obj->pages->sgl;
3159 		obj->get_page.last = 0;
3160 	}
3161 
3162 	while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
3163 		obj->get_page.last += __sg_page_count(obj->get_page.sg++);
3164 		if (unlikely(sg_is_chain(obj->get_page.sg)))
3165 			obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
3166 	}
3167 
3168 	return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last);
3169 }
3170 
3171 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
3172 {
3173 	GEM_BUG_ON(obj->pages == NULL);
3174 	obj->pages_pin_count++;
3175 }
3176 
3177 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
3178 {
3179 	GEM_BUG_ON(obj->pages_pin_count == 0);
3180 	obj->pages_pin_count--;
3181 	GEM_BUG_ON(obj->pages_pin_count < obj->bind_count);
3182 }
3183 
3184 enum i915_map_type {
3185 	I915_MAP_WB = 0,
3186 	I915_MAP_WC,
3187 };
3188 
3189 /**
3190  * i915_gem_object_pin_map - return a contiguous mapping of the entire object
3191  * @obj - the object to map into kernel address space
3192  * @type - the type of mapping, used to select pgprot_t
3193  *
3194  * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
3195  * pages and then returns a contiguous mapping of the backing storage into
3196  * the kernel address space. Based on the @type of mapping, the PTE will be
3197  * set to either WriteBack or WriteCombine (via pgprot_t).
3198  *
3199  * The caller must hold the struct_mutex, and is responsible for calling
3200  * i915_gem_object_unpin_map() when the mapping is no longer required.
3201  *
3202  * Returns the pointer through which to access the mapped object, or an
3203  * ERR_PTR() on error.
3204  */
3205 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
3206 					   enum i915_map_type type);
3207 
3208 /**
3209  * i915_gem_object_unpin_map - releases an earlier mapping
3210  * @obj - the object to unmap
3211  *
3212  * After pinning the object and mapping its pages, once you are finished
3213  * with your access, call i915_gem_object_unpin_map() to release the pin
3214  * upon the mapping. Once the pin count reaches zero, that mapping may be
3215  * removed.
3216  *
3217  * The caller must hold the struct_mutex.
3218  */
3219 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
3220 {
3221 	lockdep_assert_held(&obj->base.dev->struct_mutex);
3222 	i915_gem_object_unpin_pages(obj);
3223 }
3224 
3225 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
3226 				    unsigned int *needs_clflush);
3227 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
3228 				     unsigned int *needs_clflush);
3229 #define CLFLUSH_BEFORE 0x1
3230 #define CLFLUSH_AFTER 0x2
3231 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
3232 
3233 static inline void
3234 i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
3235 {
3236 	i915_gem_object_unpin_pages(obj);
3237 }
3238 
3239 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
3240 void i915_vma_move_to_active(struct i915_vma *vma,
3241 			     struct drm_i915_gem_request *req,
3242 			     unsigned int flags);
3243 int i915_gem_dumb_create(struct drm_file *file_priv,
3244 			 struct drm_device *dev,
3245 			 struct drm_mode_create_dumb *args);
3246 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
3247 		      uint32_t handle, uint64_t *offset);
3248 int i915_gem_mmap_gtt_version(void);
3249 
3250 void i915_gem_track_fb(struct drm_i915_gem_object *old,
3251 		       struct drm_i915_gem_object *new,
3252 		       unsigned frontbuffer_bits);
3253 
3254 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
3255 
3256 struct drm_i915_gem_request *
3257 i915_gem_find_active_request(struct intel_engine_cs *engine);
3258 
3259 void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
3260 
3261 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
3262 {
3263 	return unlikely(test_bit(I915_RESET_IN_PROGRESS, &error->flags));
3264 }
3265 
3266 static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
3267 {
3268 	return unlikely(test_bit(I915_WEDGED, &error->flags));
3269 }
3270 
3271 static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
3272 {
3273 	return i915_reset_in_progress(error) | i915_terminally_wedged(error);
3274 }
3275 
3276 static inline u32 i915_reset_count(struct i915_gpu_error *error)
3277 {
3278 	return READ_ONCE(error->reset_count);
3279 }
3280 
3281 void i915_gem_reset(struct drm_i915_private *dev_priv);
3282 void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
3283 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
3284 int __must_check i915_gem_init(struct drm_device *dev);
3285 int __must_check i915_gem_init_hw(struct drm_device *dev);
3286 void i915_gem_init_swizzling(struct drm_device *dev);
3287 void i915_gem_cleanup_engines(struct drm_device *dev);
3288 int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
3289 					unsigned int flags);
3290 int __must_check i915_gem_suspend(struct drm_device *dev);
3291 void i915_gem_resume(struct drm_device *dev);
3292 int i915_gem_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres);
3293 int __must_check
3294 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
3295 			       bool readonly);
3296 int __must_check
3297 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
3298 				  bool write);
3299 int __must_check
3300 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
3301 struct i915_vma * __must_check
3302 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3303 				     u32 alignment,
3304 				     const struct i915_ggtt_view *view);
3305 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
3306 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
3307 				int align);
3308 int i915_gem_open(struct drm_device *dev, struct drm_file *file);
3309 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
3310 
3311 u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size,
3312 			   int tiling_mode);
3313 u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
3314 				int tiling_mode, bool fenced);
3315 
3316 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3317 				    enum i915_cache_level cache_level);
3318 
3319 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
3320 				struct dma_buf *dma_buf);
3321 
3322 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
3323 				struct drm_gem_object *gem_obj, int flags);
3324 
3325 struct i915_vma *
3326 i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
3327 		     struct i915_address_space *vm,
3328 		     const struct i915_ggtt_view *view);
3329 
3330 struct i915_vma *
3331 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
3332 				  struct i915_address_space *vm,
3333 				  const struct i915_ggtt_view *view);
3334 
3335 static inline struct i915_hw_ppgtt *
3336 i915_vm_to_ppgtt(struct i915_address_space *vm)
3337 {
3338 	return container_of(vm, struct i915_hw_ppgtt, base);
3339 }
3340 
3341 static inline struct i915_vma *
3342 i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
3343 			const struct i915_ggtt_view *view)
3344 {
3345 	return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
3346 }
3347 
3348 static inline unsigned long
3349 i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
3350 			    const struct i915_ggtt_view *view)
3351 {
3352 	return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
3353 }
3354 
3355 /* i915_gem_fence.c */
3356 int __must_check i915_vma_get_fence(struct i915_vma *vma);
3357 int __must_check i915_vma_put_fence(struct i915_vma *vma);
3358 
3359 /**
3360  * i915_vma_pin_fence - pin fencing state
3361  * @vma: vma to pin fencing for
3362  *
3363  * This pins the fencing state (whether tiled or untiled) to make sure the
3364  * vma (and its object) is ready to be used as a scanout target. Fencing
3365  * status must be synchronize first by calling i915_vma_get_fence():
3366  *
3367  * The resulting fence pin reference must be released again with
3368  * i915_vma_unpin_fence().
3369  *
3370  * Returns:
3371  *
3372  * True if the vma has a fence, false otherwise.
3373  */
3374 static inline bool
3375 i915_vma_pin_fence(struct i915_vma *vma)
3376 {
3377 	if (vma->fence) {
3378 		vma->fence->pin_count++;
3379 		return true;
3380 	} else
3381 		return false;
3382 }
3383 
3384 /**
3385  * i915_vma_unpin_fence - unpin fencing state
3386  * @vma: vma to unpin fencing for
3387  *
3388  * This releases the fence pin reference acquired through
3389  * i915_vma_pin_fence. It will handle both objects with and without an
3390  * attached fence correctly, callers do not need to distinguish this.
3391  */
3392 static inline void
3393 i915_vma_unpin_fence(struct i915_vma *vma)
3394 {
3395 	if (vma->fence) {
3396 		GEM_BUG_ON(vma->fence->pin_count <= 0);
3397 		vma->fence->pin_count--;
3398 	}
3399 }
3400 
3401 void i915_gem_restore_fences(struct drm_device *dev);
3402 
3403 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
3404 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
3405 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
3406 
3407 /* i915_gem_context.c */
3408 int __must_check i915_gem_context_init(struct drm_device *dev);
3409 void i915_gem_context_lost(struct drm_i915_private *dev_priv);
3410 void i915_gem_context_fini(struct drm_device *dev);
3411 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
3412 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
3413 int i915_switch_context(struct drm_i915_gem_request *req);
3414 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
3415 void i915_gem_context_free(struct kref *ctx_ref);
3416 struct drm_i915_gem_object *
3417 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
3418 struct i915_gem_context *
3419 i915_gem_context_create_gvt(struct drm_device *dev);
3420 
3421 static inline struct i915_gem_context *
3422 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
3423 {
3424 	struct i915_gem_context *ctx;
3425 
3426 	lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
3427 
3428 	ctx = idr_find(&file_priv->context_idr, id);
3429 	if (!ctx)
3430 		return ERR_PTR(-ENOENT);
3431 
3432 	return ctx;
3433 }
3434 
3435 static inline struct i915_gem_context *
3436 i915_gem_context_get(struct i915_gem_context *ctx)
3437 {
3438 	kref_get(&ctx->ref);
3439 	return ctx;
3440 }
3441 
3442 static inline void i915_gem_context_put(struct i915_gem_context *ctx)
3443 {
3444 	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
3445 	kref_put(&ctx->ref, i915_gem_context_free);
3446 }
3447 
3448 static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
3449 {
3450 	return c->user_handle == DEFAULT_CONTEXT_HANDLE;
3451 }
3452 
3453 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
3454 				  struct drm_file *file);
3455 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
3456 				   struct drm_file *file);
3457 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
3458 				    struct drm_file *file_priv);
3459 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
3460 				    struct drm_file *file_priv);
3461 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
3462 				       struct drm_file *file);
3463 
3464 /* i915_gem_evict.c */
3465 int __must_check i915_gem_evict_something(struct i915_address_space *vm,
3466 					  u64 min_size, u64 alignment,
3467 					  unsigned cache_level,
3468 					  u64 start, u64 end,
3469 					  unsigned flags);
3470 int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
3471 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
3472 
3473 /* belongs in i915_gem_gtt.h */
3474 static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
3475 {
3476 	wmb();
3477 	if (INTEL_GEN(dev_priv) < 6)
3478 		intel_gtt_chipset_flush();
3479 }
3480 
3481 /* i915_gem_stolen.c */
3482 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
3483 				struct drm_mm_node *node, u64 size,
3484 				unsigned alignment);
3485 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
3486 					 struct drm_mm_node *node, u64 size,
3487 					 unsigned alignment, u64 start,
3488 					 u64 end);
3489 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
3490 				 struct drm_mm_node *node);
3491 int i915_gem_init_stolen(struct drm_device *dev);
3492 void i915_gem_cleanup_stolen(struct drm_device *dev);
3493 struct drm_i915_gem_object *
3494 i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
3495 struct drm_i915_gem_object *
3496 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
3497 					       u32 stolen_offset,
3498 					       u32 gtt_offset,
3499 					       u32 size);
3500 
3501 /* i915_gem_shrinker.c */
3502 unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
3503 			      unsigned long target,
3504 			      unsigned flags);
3505 #define I915_SHRINK_PURGEABLE 0x1
3506 #define I915_SHRINK_UNBOUND 0x2
3507 #define I915_SHRINK_BOUND 0x4
3508 #define I915_SHRINK_ACTIVE 0x8
3509 #define I915_SHRINK_VMAPS 0x10
3510 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
3511 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
3512 void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
3513 
3514 
3515 /* i915_gem_tiling.c */
3516 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
3517 {
3518 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3519 
3520 	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
3521 		i915_gem_object_is_tiled(obj);
3522 }
3523 
3524 /* i915_debugfs.c */
3525 #ifdef CONFIG_DEBUG_FS
3526 int i915_debugfs_register(struct drm_i915_private *dev_priv);
3527 void i915_debugfs_unregister(struct drm_i915_private *dev_priv);
3528 int i915_debugfs_connector_add(struct drm_connector *connector);
3529 void intel_display_crc_init(struct drm_i915_private *dev_priv);
3530 #else
3531 static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
3532 static inline void i915_debugfs_unregister(struct drm_i915_private *dev_priv) {}
3533 static inline int i915_debugfs_connector_add(struct drm_connector *connector)
3534 { return 0; }
3535 static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
3536 #endif
3537 
3538 /* i915_gpu_error.c */
3539 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
3540 
3541 __printf(2, 3)
3542 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
3543 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
3544 			    const struct i915_error_state_file_priv *error);
3545 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
3546 			      struct drm_i915_private *i915,
3547 			      size_t count, loff_t pos);
3548 static inline void i915_error_state_buf_release(
3549 	struct drm_i915_error_state_buf *eb)
3550 {
3551 	kfree(eb->buf);
3552 }
3553 void i915_capture_error_state(struct drm_i915_private *dev_priv,
3554 			      u32 engine_mask,
3555 			      const char *error_msg);
3556 void i915_error_state_get(struct drm_device *dev,
3557 			  struct i915_error_state_file_priv *error_priv);
3558 void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
3559 void i915_destroy_error_state(struct drm_device *dev);
3560 
3561 #else
3562 
3563 static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
3564 					    u32 engine_mask,
3565 					    const char *error_msg)
3566 {
3567 }
3568 
3569 static inline void i915_destroy_error_state(struct drm_device *dev)
3570 {
3571 }
3572 
3573 #endif
3574 
3575 const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
3576 
3577 /* i915_cmd_parser.c */
3578 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
3579 void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
3580 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
3581 bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine);
3582 int intel_engine_cmd_parser(struct intel_engine_cs *engine,
3583 			    struct drm_i915_gem_object *batch_obj,
3584 			    struct drm_i915_gem_object *shadow_batch_obj,
3585 			    u32 batch_start_offset,
3586 			    u32 batch_len,
3587 			    bool is_master);
3588 
3589 /* i915_suspend.c */
3590 extern int i915_save_state(struct drm_device *dev);
3591 extern int i915_restore_state(struct drm_device *dev);
3592 
3593 /* i915_sysfs.c */
3594 void i915_setup_sysfs(struct drm_i915_private *dev_priv);
3595 void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
3596 
3597 /* intel_i2c.c */
3598 extern int intel_setup_gmbus(struct drm_device *dev);
3599 extern void intel_teardown_gmbus(struct drm_device *dev);
3600 extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
3601 				     unsigned int pin);
3602 
3603 extern struct i2c_adapter *
3604 intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
3605 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
3606 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
3607 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
3608 {
3609 	return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
3610 }
3611 extern void intel_i2c_reset(struct drm_device *dev);
3612 
3613 /* intel_bios.c */
3614 int intel_bios_init(struct drm_i915_private *dev_priv);
3615 bool intel_bios_is_valid_vbt(const void *buf, size_t size);
3616 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
3617 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
3618 bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
3619 bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
3620 bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
3621 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
3622 bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
3623 				     enum port port);
3624 bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
3625 				enum port port);
3626 
3627 /* intel_opregion.c */
3628 #ifdef CONFIG_ACPI
3629 extern int intel_opregion_setup(struct drm_i915_private *dev_priv);
3630 extern void intel_opregion_register(struct drm_i915_private *dev_priv);
3631 extern void intel_opregion_unregister(struct drm_i915_private *dev_priv);
3632 extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
3633 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
3634 					 bool enable);
3635 extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
3636 					 pci_power_t state);
3637 extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
3638 #else
3639 static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; }
3640 static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { }
3641 static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { }
3642 static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
3643 {
3644 }
3645 static inline int
3646 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
3647 {
3648 	return 0;
3649 }
3650 static inline int
3651 intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
3652 {
3653 	return 0;
3654 }
3655 static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
3656 {
3657 	return -ENODEV;
3658 }
3659 #endif
3660 
3661 /* intel_acpi.c */
3662 #ifdef CONFIG_ACPI
3663 extern void intel_register_dsm_handler(void);
3664 extern void intel_unregister_dsm_handler(void);
3665 #else
3666 static inline void intel_register_dsm_handler(void) { return; }
3667 static inline void intel_unregister_dsm_handler(void) { return; }
3668 #endif /* CONFIG_ACPI */
3669 
3670 /* intel_device_info.c */
3671 static inline struct intel_device_info *
3672 mkwrite_device_info(struct drm_i915_private *dev_priv)
3673 {
3674 	return (struct intel_device_info *)&dev_priv->info;
3675 }
3676 
3677 void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
3678 void intel_device_info_dump(struct drm_i915_private *dev_priv);
3679 
3680 /* modesetting */
3681 extern void intel_modeset_init_hw(struct drm_device *dev);
3682 extern void intel_modeset_init(struct drm_device *dev);
3683 extern void intel_modeset_gem_init(struct drm_device *dev);
3684 extern void intel_modeset_cleanup(struct drm_device *dev);
3685 extern int intel_connector_register(struct drm_connector *);
3686 extern void intel_connector_unregister(struct drm_connector *);
3687 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
3688 extern void intel_display_resume(struct drm_device *dev);
3689 extern void i915_redisable_vga(struct drm_device *dev);
3690 extern void i915_redisable_vga_power_on(struct drm_device *dev);
3691 extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
3692 extern void intel_init_pch_refclk(struct drm_device *dev);
3693 extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
3694 extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
3695 				  bool enable);
3696 
3697 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
3698 			struct drm_file *file);
3699 
3700 /* overlay */
3701 extern struct intel_overlay_error_state *
3702 intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
3703 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
3704 					    struct intel_overlay_error_state *error);
3705 
3706 extern struct intel_display_error_state *
3707 intel_display_capture_error_state(struct drm_i915_private *dev_priv);
3708 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
3709 					    struct drm_device *dev,
3710 					    struct intel_display_error_state *error);
3711 
3712 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
3713 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
3714 
3715 /* intel_sideband.c */
3716 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
3717 void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
3718 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
3719 u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
3720 void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
3721 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
3722 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3723 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
3724 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3725 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
3726 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3727 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg);
3728 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg, u32 val);
3729 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
3730 		   enum intel_sbi_destination destination);
3731 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
3732 		     enum intel_sbi_destination destination);
3733 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
3734 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3735 
3736 /* intel_dpio_phy.c */
3737 void chv_set_phy_signal_level(struct intel_encoder *encoder,
3738 			      u32 deemph_reg_value, u32 margin_reg_value,
3739 			      bool uniq_trans_scale);
3740 void chv_data_lane_soft_reset(struct intel_encoder *encoder,
3741 			      bool reset);
3742 void chv_phy_pre_pll_enable(struct intel_encoder *encoder);
3743 void chv_phy_pre_encoder_enable(struct intel_encoder *encoder);
3744 void chv_phy_release_cl2_override(struct intel_encoder *encoder);
3745 void chv_phy_post_pll_disable(struct intel_encoder *encoder);
3746 
3747 void vlv_set_phy_signal_level(struct intel_encoder *encoder,
3748 			      u32 demph_reg_value, u32 preemph_reg_value,
3749 			      u32 uniqtranscale_reg_value, u32 tx3_demph);
3750 void vlv_phy_pre_pll_enable(struct intel_encoder *encoder);
3751 void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder);
3752 void vlv_phy_reset_lanes(struct intel_encoder *encoder);
3753 
3754 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
3755 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3756 
3757 #define I915_READ8(reg)		dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
3758 #define I915_WRITE8(reg, val)	dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
3759 
3760 #define I915_READ16(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
3761 #define I915_WRITE16(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
3762 #define I915_READ16_NOTRACE(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
3763 #define I915_WRITE16_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
3764 
3765 #define I915_READ(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
3766 #define I915_WRITE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
3767 #define I915_READ_NOTRACE(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
3768 #define I915_WRITE_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
3769 
3770 /* Be very careful with read/write 64-bit values. On 32-bit machines, they
3771  * will be implemented using 2 32-bit writes in an arbitrary order with
3772  * an arbitrary delay between them. This can cause the hardware to
3773  * act upon the intermediate value, possibly leading to corruption and
3774  * machine death. For this reason we do not support I915_WRITE64, or
3775  * dev_priv->uncore.funcs.mmio_writeq.
3776  *
3777  * When reading a 64-bit value as two 32-bit values, the delay may cause
3778  * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
3779  * occasionally a 64-bit register does not actualy support a full readq
3780  * and must be read using two 32-bit reads.
3781  *
3782  * You have been warned.
3783  */
3784 #define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
3785 
3786 #define I915_READ64_2x32(lower_reg, upper_reg) ({			\
3787 	u32 upper, lower, old_upper, loop = 0;				\
3788 	upper = I915_READ(upper_reg);					\
3789 	do {								\
3790 		old_upper = upper;					\
3791 		lower = I915_READ(lower_reg);				\
3792 		upper = I915_READ(upper_reg);				\
3793 	} while (upper != old_upper && loop++ < 2);			\
3794 	(u64)upper << 32 | lower; })
3795 
3796 #define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
3797 #define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
3798 
3799 #define __raw_read(x, s) \
3800 static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \
3801 					     i915_reg_t reg) \
3802 { \
3803 	return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
3804 }
3805 
3806 #define __raw_write(x, s) \
3807 static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \
3808 				       i915_reg_t reg, uint##x##_t val) \
3809 { \
3810 	write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
3811 }
3812 __raw_read(8, b)
3813 __raw_read(16, w)
3814 __raw_read(32, l)
3815 __raw_read(64, q)
3816 
3817 __raw_write(8, b)
3818 __raw_write(16, w)
3819 __raw_write(32, l)
3820 __raw_write(64, q)
3821 
3822 #undef __raw_read
3823 #undef __raw_write
3824 
3825 /* These are untraced mmio-accessors that are only valid to be used inside
3826  * critical sections inside IRQ handlers where forcewake is explicitly
3827  * controlled.
3828  * Think twice, and think again, before using these.
3829  * Note: Should only be used between intel_uncore_forcewake_irqlock() and
3830  * intel_uncore_forcewake_irqunlock().
3831  */
3832 #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
3833 #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
3834 #define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
3835 #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
3836 
3837 /* "Broadcast RGB" property */
3838 #define INTEL_BROADCAST_RGB_AUTO 0
3839 #define INTEL_BROADCAST_RGB_FULL 1
3840 #define INTEL_BROADCAST_RGB_LIMITED 2
3841 
3842 static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
3843 {
3844 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3845 		return VLV_VGACNTRL;
3846 	else if (INTEL_GEN(dev_priv) >= 5)
3847 		return CPU_VGACNTRL;
3848 	else
3849 		return VGACNTRL;
3850 }
3851 
3852 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
3853 {
3854 	unsigned long j = msecs_to_jiffies(m);
3855 
3856 	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
3857 }
3858 
3859 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
3860 {
3861         return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
3862 }
3863 
3864 static inline unsigned long
3865 timespec_to_jiffies_timeout(const struct timespec *value)
3866 {
3867 	unsigned long j = timespec_to_jiffies(value);
3868 
3869 	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
3870 }
3871 
3872 /*
3873  * If you need to wait X milliseconds between events A and B, but event B
3874  * doesn't happen exactly after event A, you record the timestamp (jiffies) of
3875  * when event A happened, then just before event B you call this function and
3876  * pass the timestamp as the first argument, and X as the second argument.
3877  */
3878 static inline void
3879 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
3880 {
3881 	unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
3882 
3883 	/*
3884 	 * Don't re-read the value of "jiffies" every time since it may change
3885 	 * behind our back and break the math.
3886 	 */
3887 	tmp_jiffies = jiffies;
3888 	target_jiffies = timestamp_jiffies +
3889 			 msecs_to_jiffies_timeout(to_wait_ms);
3890 
3891 	if (time_after(target_jiffies, tmp_jiffies)) {
3892 		remaining_jiffies = target_jiffies - tmp_jiffies;
3893 		while (remaining_jiffies)
3894 			remaining_jiffies =
3895 			    schedule_timeout_uninterruptible(remaining_jiffies);
3896 	}
3897 }
3898 
3899 static inline bool
3900 __i915_request_irq_complete(struct drm_i915_gem_request *req)
3901 {
3902 	struct intel_engine_cs *engine = req->engine;
3903 
3904 	/* Before we do the heavier coherent read of the seqno,
3905 	 * check the value (hopefully) in the CPU cacheline.
3906 	 */
3907 	if (i915_gem_request_completed(req))
3908 		return true;
3909 
3910 	/* Ensure our read of the seqno is coherent so that we
3911 	 * do not "miss an interrupt" (i.e. if this is the last
3912 	 * request and the seqno write from the GPU is not visible
3913 	 * by the time the interrupt fires, we will see that the
3914 	 * request is incomplete and go back to sleep awaiting
3915 	 * another interrupt that will never come.)
3916 	 *
3917 	 * Strictly, we only need to do this once after an interrupt,
3918 	 * but it is easier and safer to do it every time the waiter
3919 	 * is woken.
3920 	 */
3921 	if (engine->irq_seqno_barrier &&
3922 	    rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current &&
3923 	    cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) {
3924 		struct task_struct *tsk;
3925 
3926 		/* The ordering of irq_posted versus applying the barrier
3927 		 * is crucial. The clearing of the current irq_posted must
3928 		 * be visible before we perform the barrier operation,
3929 		 * such that if a subsequent interrupt arrives, irq_posted
3930 		 * is reasserted and our task rewoken (which causes us to
3931 		 * do another __i915_request_irq_complete() immediately
3932 		 * and reapply the barrier). Conversely, if the clear
3933 		 * occurs after the barrier, then an interrupt that arrived
3934 		 * whilst we waited on the barrier would not trigger a
3935 		 * barrier on the next pass, and the read may not see the
3936 		 * seqno update.
3937 		 */
3938 		engine->irq_seqno_barrier(engine);
3939 
3940 		/* If we consume the irq, but we are no longer the bottom-half,
3941 		 * the real bottom-half may not have serialised their own
3942 		 * seqno check with the irq-barrier (i.e. may have inspected
3943 		 * the seqno before we believe it coherent since they see
3944 		 * irq_posted == false but we are still running).
3945 		 */
3946 		rcu_read_lock();
3947 		tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
3948 		if (tsk && tsk != current)
3949 			/* Note that if the bottom-half is changed as we
3950 			 * are sending the wake-up, the new bottom-half will
3951 			 * be woken by whomever made the change. We only have
3952 			 * to worry about when we steal the irq-posted for
3953 			 * ourself.
3954 			 */
3955 			wake_up_process(tsk);
3956 		rcu_read_unlock();
3957 
3958 		if (i915_gem_request_completed(req))
3959 			return true;
3960 	}
3961 
3962 	return false;
3963 }
3964 
3965 void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
3966 bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
3967 
3968 /* i915_mm.c */
3969 int remap_io_mapping(struct vm_area_struct *vma,
3970 		     unsigned long addr, unsigned long pfn, unsigned long size,
3971 		     struct io_mapping *iomap);
3972 
3973 #define ptr_mask_bits(ptr) ({						\
3974 	unsigned long __v = (unsigned long)(ptr);			\
3975 	(typeof(ptr))(__v & PAGE_MASK);					\
3976 })
3977 
3978 #define ptr_unpack_bits(ptr, bits) ({					\
3979 	unsigned long __v = (unsigned long)(ptr);			\
3980 	(bits) = __v & ~PAGE_MASK;					\
3981 	(typeof(ptr))(__v & PAGE_MASK);					\
3982 })
3983 
3984 #define ptr_pack_bits(ptr, bits)					\
3985 	((typeof(ptr))((unsigned long)(ptr) | (bits)))
3986 
3987 #define fetch_and_zero(ptr) ({						\
3988 	typeof(*ptr) __T = *(ptr);					\
3989 	*(ptr) = (typeof(*ptr))0;					\
3990 	__T;								\
3991 })
3992 
3993 #endif
3994