xref: /dragonfly/sys/dev/drm/i915/i915_drv.h (revision 3a48e5e1)
1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29 
30 #ifndef _I915_DRV_H_
31 #define _I915_DRV_H_
32 
33 #include <uapi/drm/i915_drm.h>
34 #include <uapi/drm/drm_fourcc.h>
35 
36 #include <linux/io-mapping.h>
37 #include <linux/i2c.h>
38 #include <linux/i2c-algo-bit.h>
39 #include <linux/backlight.h>
40 #include <linux/hashtable.h>
41 #include <linux/intel-iommu.h>
42 #include <linux/kref.h>
43 #include <linux/pm_qos.h>
44 #include <linux/reservation.h>
45 #include <linux/shmem_fs.h>
46 
47 #include <drm/drmP.h>
48 #include <drm/intel-gtt.h>
49 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */
50 #include <drm/drm_gem.h>
51 #include <drm/drm_auth.h>
52 #include <drm/drm_cache.h>
53 
54 #include "i915_params.h"
55 #include "i915_reg.h"
56 #include "i915_utils.h"
57 
58 #include "intel_bios.h"
59 #include "intel_dpll_mgr.h"
60 #include "intel_uc.h"
61 #include "intel_lrc.h"
62 #include "intel_ringbuffer.h"
63 
64 #include "i915_gem.h"
65 #include "i915_gem_context.h"
66 #include "i915_gem_fence_reg.h"
67 #include "i915_gem_object.h"
68 #include "i915_gem_gtt.h"
69 #include "i915_gem_render_state.h"
70 #include "i915_gem_request.h"
71 #include "i915_gem_timeline.h"
72 
73 #include "i915_vma.h"
74 
75 #include "intel_gvt.h"
76 
77 /* General customization:
78  */
79 
80 #define DRIVER_NAME		"i915"
81 #define DRIVER_DESC		"Intel Graphics"
82 #define DRIVER_DATE		"20170403"
83 #define DRIVER_TIMESTAMP	1491198738
84 
85 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
86  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
87  * which may not necessarily be a user visible problem.  This will either
88  * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
89  * enable distros and users to tailor their preferred amount of i915 abrt
90  * spam.
91  */
92 #define I915_STATE_WARN(condition, format...) ({			\
93 	int __ret_warn_on = !!(condition);				\
94 	if (unlikely(__ret_warn_on))					\
95 		if (!WARN(i915.verbose_state_checks, format))		\
96 			DRM_ERROR(format);				\
97 	unlikely(__ret_warn_on);					\
98 })
99 
100 #define I915_STATE_WARN_ON(x)						\
101 	I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
102 
103 bool __i915_inject_load_failure(const char *func, int line);
104 #define i915_inject_load_failure() \
105 	__i915_inject_load_failure(__func__, __LINE__)
106 
107 typedef struct {
108 	uint32_t val;
109 } uint_fixed_16_16_t;
110 
111 #define FP_16_16_MAX ({ \
112 	uint_fixed_16_16_t fp; \
113 	fp.val = UINT_MAX; \
114 	fp; \
115 })
116 
117 static inline uint_fixed_16_16_t u32_to_fixed_16_16(uint32_t val)
118 {
119 	uint_fixed_16_16_t fp;
120 
121 	WARN_ON(val >> 16);
122 
123 	fp.val = val << 16;
124 	return fp;
125 }
126 
127 static inline uint32_t fixed_16_16_to_u32_round_up(uint_fixed_16_16_t fp)
128 {
129 	return DIV_ROUND_UP(fp.val, 1 << 16);
130 }
131 
132 static inline uint32_t fixed_16_16_to_u32(uint_fixed_16_16_t fp)
133 {
134 	return fp.val >> 16;
135 }
136 
137 static inline uint_fixed_16_16_t min_fixed_16_16(uint_fixed_16_16_t min1,
138 						 uint_fixed_16_16_t min2)
139 {
140 	uint_fixed_16_16_t min;
141 
142 	min.val = min(min1.val, min2.val);
143 	return min;
144 }
145 
146 static inline uint_fixed_16_16_t max_fixed_16_16(uint_fixed_16_16_t max1,
147 						 uint_fixed_16_16_t max2)
148 {
149 	uint_fixed_16_16_t max;
150 
151 	max.val = max(max1.val, max2.val);
152 	return max;
153 }
154 
155 static inline uint_fixed_16_16_t fixed_16_16_div_round_up(uint32_t val,
156 							  uint32_t d)
157 {
158 	uint_fixed_16_16_t fp, res;
159 
160 	fp = u32_to_fixed_16_16(val);
161 	res.val = DIV_ROUND_UP(fp.val, d);
162 	return res;
163 }
164 
165 static inline uint_fixed_16_16_t fixed_16_16_div_round_up_u64(uint32_t val,
166 							      uint32_t d)
167 {
168 	uint_fixed_16_16_t res;
169 	uint64_t interm_val;
170 
171 	interm_val = (uint64_t)val << 16;
172 	interm_val = DIV_ROUND_UP_ULL(interm_val, d);
173 	WARN_ON(interm_val >> 32);
174 	res.val = (uint32_t) interm_val;
175 
176 	return res;
177 }
178 
179 static inline uint_fixed_16_16_t mul_u32_fixed_16_16(uint32_t val,
180 						     uint_fixed_16_16_t mul)
181 {
182 	uint64_t intermediate_val;
183 	uint_fixed_16_16_t fp;
184 
185 	intermediate_val = (uint64_t) val * mul.val;
186 	WARN_ON(intermediate_val >> 32);
187 	fp.val = (uint32_t) intermediate_val;
188 	return fp;
189 }
190 
191 static inline const char *yesno(bool v)
192 {
193 	return v ? "yes" : "no";
194 }
195 
196 static inline const char *onoff(bool v)
197 {
198 	return v ? "on" : "off";
199 }
200 
201 static inline const char *enableddisabled(bool v)
202 {
203 	return v ? "enabled" : "disabled";
204 }
205 
206 enum i915_pipe {
207 	INVALID_PIPE = -1,
208 	PIPE_A = 0,
209 	PIPE_B,
210 	PIPE_C,
211 	_PIPE_EDP,
212 	I915_MAX_PIPES = _PIPE_EDP
213 };
214 #define pipe_name(p) ((p) + 'A')
215 
216 enum transcoder {
217 	TRANSCODER_A = 0,
218 	TRANSCODER_B,
219 	TRANSCODER_C,
220 	TRANSCODER_EDP,
221 	TRANSCODER_DSI_A,
222 	TRANSCODER_DSI_C,
223 	I915_MAX_TRANSCODERS
224 };
225 
226 static inline const char *transcoder_name(enum transcoder transcoder)
227 {
228 	switch (transcoder) {
229 	case TRANSCODER_A:
230 		return "A";
231 	case TRANSCODER_B:
232 		return "B";
233 	case TRANSCODER_C:
234 		return "C";
235 	case TRANSCODER_EDP:
236 		return "EDP";
237 	case TRANSCODER_DSI_A:
238 		return "DSI A";
239 	case TRANSCODER_DSI_C:
240 		return "DSI C";
241 	default:
242 		return "<invalid>";
243 	}
244 }
245 
246 static inline bool transcoder_is_dsi(enum transcoder transcoder)
247 {
248 	return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
249 }
250 
251 /*
252  * Global legacy plane identifier. Valid only for primary/sprite
253  * planes on pre-g4x, and only for primary planes on g4x+.
254  */
255 enum plane {
256 	PLANE_A,
257 	PLANE_B,
258 	PLANE_C,
259 };
260 #define plane_name(p) ((p) + 'A')
261 
262 #define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
263 
264 /*
265  * Per-pipe plane identifier.
266  * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
267  * number of planes per CRTC.  Not all platforms really have this many planes,
268  * which means some arrays of size I915_MAX_PLANES may have unused entries
269  * between the topmost sprite plane and the cursor plane.
270  *
271  * This is expected to be passed to various register macros
272  * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
273  */
274 enum plane_id {
275 	PLANE_PRIMARY,
276 	PLANE_SPRITE0,
277 	PLANE_SPRITE1,
278 	PLANE_SPRITE2,
279 	PLANE_CURSOR,
280 	I915_MAX_PLANES,
281 };
282 
283 #define for_each_plane_id_on_crtc(__crtc, __p) \
284 	for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
285 		for_each_if ((__crtc)->plane_ids_mask & BIT(__p))
286 
287 enum port {
288 	PORT_NONE = -1,
289 	PORT_A = 0,
290 	PORT_B,
291 	PORT_C,
292 	PORT_D,
293 	PORT_E,
294 	I915_MAX_PORTS
295 };
296 #define port_name(p) ((p) + 'A')
297 
298 #define I915_NUM_PHYS_VLV 2
299 
300 enum dpio_channel {
301 	DPIO_CH0,
302 	DPIO_CH1
303 };
304 
305 enum dpio_phy {
306 	DPIO_PHY0,
307 	DPIO_PHY1,
308 	DPIO_PHY2,
309 };
310 
311 enum intel_display_power_domain {
312 	POWER_DOMAIN_PIPE_A,
313 	POWER_DOMAIN_PIPE_B,
314 	POWER_DOMAIN_PIPE_C,
315 	POWER_DOMAIN_PIPE_A_PANEL_FITTER,
316 	POWER_DOMAIN_PIPE_B_PANEL_FITTER,
317 	POWER_DOMAIN_PIPE_C_PANEL_FITTER,
318 	POWER_DOMAIN_TRANSCODER_A,
319 	POWER_DOMAIN_TRANSCODER_B,
320 	POWER_DOMAIN_TRANSCODER_C,
321 	POWER_DOMAIN_TRANSCODER_EDP,
322 	POWER_DOMAIN_TRANSCODER_DSI_A,
323 	POWER_DOMAIN_TRANSCODER_DSI_C,
324 	POWER_DOMAIN_PORT_DDI_A_LANES,
325 	POWER_DOMAIN_PORT_DDI_B_LANES,
326 	POWER_DOMAIN_PORT_DDI_C_LANES,
327 	POWER_DOMAIN_PORT_DDI_D_LANES,
328 	POWER_DOMAIN_PORT_DDI_E_LANES,
329 	POWER_DOMAIN_PORT_DDI_A_IO,
330 	POWER_DOMAIN_PORT_DDI_B_IO,
331 	POWER_DOMAIN_PORT_DDI_C_IO,
332 	POWER_DOMAIN_PORT_DDI_D_IO,
333 	POWER_DOMAIN_PORT_DDI_E_IO,
334 	POWER_DOMAIN_PORT_DSI,
335 	POWER_DOMAIN_PORT_CRT,
336 	POWER_DOMAIN_PORT_OTHER,
337 	POWER_DOMAIN_VGA,
338 	POWER_DOMAIN_AUDIO,
339 	POWER_DOMAIN_PLLS,
340 	POWER_DOMAIN_AUX_A,
341 	POWER_DOMAIN_AUX_B,
342 	POWER_DOMAIN_AUX_C,
343 	POWER_DOMAIN_AUX_D,
344 	POWER_DOMAIN_GMBUS,
345 	POWER_DOMAIN_MODESET,
346 	POWER_DOMAIN_INIT,
347 
348 	POWER_DOMAIN_NUM,
349 };
350 
351 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
352 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
353 		((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
354 #define POWER_DOMAIN_TRANSCODER(tran) \
355 	((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
356 	 (tran) + POWER_DOMAIN_TRANSCODER_A)
357 
358 enum hpd_pin {
359 	HPD_NONE = 0,
360 	HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
361 	HPD_CRT,
362 	HPD_SDVO_B,
363 	HPD_SDVO_C,
364 	HPD_PORT_A,
365 	HPD_PORT_B,
366 	HPD_PORT_C,
367 	HPD_PORT_D,
368 	HPD_PORT_E,
369 	HPD_NUM_PINS
370 };
371 
372 #define for_each_hpd_pin(__pin) \
373 	for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
374 
375 #define HPD_STORM_DEFAULT_THRESHOLD 5
376 
377 struct i915_hotplug {
378 	struct work_struct hotplug_work;
379 
380 	struct {
381 		unsigned long last_jiffies;
382 		int count;
383 		enum {
384 			HPD_ENABLED = 0,
385 			HPD_DISABLED = 1,
386 			HPD_MARK_DISABLED = 2
387 		} state;
388 	} stats[HPD_NUM_PINS];
389 	u32 event_bits;
390 	struct delayed_work reenable_work;
391 
392 	struct intel_digital_port *irq_port[I915_MAX_PORTS];
393 	u32 long_port_mask;
394 	u32 short_port_mask;
395 	struct work_struct dig_port_work;
396 
397 	struct work_struct poll_init_work;
398 	bool poll_enabled;
399 
400 	unsigned int hpd_storm_threshold;
401 
402 	/*
403 	 * if we get a HPD irq from DP and a HPD irq from non-DP
404 	 * the non-DP HPD could block the workqueue on a mode config
405 	 * mutex getting, that userspace may have taken. However
406 	 * userspace is waiting on the DP workqueue to run which is
407 	 * blocked behind the non-DP one.
408 	 */
409 	struct workqueue_struct *dp_wq;
410 };
411 
412 #define I915_GEM_GPU_DOMAINS \
413 	(I915_GEM_DOMAIN_RENDER | \
414 	 I915_GEM_DOMAIN_SAMPLER | \
415 	 I915_GEM_DOMAIN_COMMAND | \
416 	 I915_GEM_DOMAIN_INSTRUCTION | \
417 	 I915_GEM_DOMAIN_VERTEX)
418 
419 #define for_each_pipe(__dev_priv, __p) \
420 	for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
421 #define for_each_pipe_masked(__dev_priv, __p, __mask) \
422 	for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
423 		for_each_if ((__mask) & (1 << (__p)))
424 #define for_each_universal_plane(__dev_priv, __pipe, __p)		\
425 	for ((__p) = 0;							\
426 	     (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1;	\
427 	     (__p)++)
428 #define for_each_sprite(__dev_priv, __p, __s)				\
429 	for ((__s) = 0;							\
430 	     (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)];	\
431 	     (__s)++)
432 
433 #define for_each_port_masked(__port, __ports_mask) \
434 	for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++)	\
435 		for_each_if ((__ports_mask) & (1 << (__port)))
436 
437 #define for_each_crtc(dev, crtc) \
438 	list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
439 
440 #define for_each_intel_plane(dev, intel_plane) \
441 	list_for_each_entry(intel_plane,			\
442 			    &(dev)->mode_config.plane_list,	\
443 			    base.head)
444 
445 #define for_each_intel_plane_mask(dev, intel_plane, plane_mask)		\
446 	list_for_each_entry(intel_plane,				\
447 			    &(dev)->mode_config.plane_list,		\
448 			    base.head)					\
449 		for_each_if ((plane_mask) &				\
450 			     (1 << drm_plane_index(&intel_plane->base)))
451 
452 #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane)	\
453 	list_for_each_entry(intel_plane,				\
454 			    &(dev)->mode_config.plane_list,		\
455 			    base.head)					\
456 		for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
457 
458 #define for_each_intel_crtc(dev, intel_crtc)				\
459 	list_for_each_entry(intel_crtc,					\
460 			    &(dev)->mode_config.crtc_list,		\
461 			    base.head)
462 
463 #define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask)		\
464 	list_for_each_entry(intel_crtc,					\
465 			    &(dev)->mode_config.crtc_list,		\
466 			    base.head)					\
467 		for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
468 
469 #define for_each_intel_encoder(dev, intel_encoder)		\
470 	list_for_each_entry(intel_encoder,			\
471 			    &(dev)->mode_config.encoder_list,	\
472 			    base.head)
473 
474 #define for_each_intel_connector_iter(intel_connector, iter) \
475 	while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
476 
477 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
478 	list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
479 		for_each_if ((intel_encoder)->base.crtc == (__crtc))
480 
481 #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
482 	list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
483 		for_each_if ((intel_connector)->base.encoder == (__encoder))
484 
485 #define for_each_power_domain(domain, mask)				\
486 	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
487 		for_each_if (BIT_ULL(domain) & (mask))
488 
489 #define for_each_power_well(__dev_priv, __power_well)				\
490 	for ((__power_well) = (__dev_priv)->power_domains.power_wells;	\
491 	     (__power_well) - (__dev_priv)->power_domains.power_wells <	\
492 		(__dev_priv)->power_domains.power_well_count;		\
493 	     (__power_well)++)
494 
495 #define for_each_power_well_rev(__dev_priv, __power_well)			\
496 	for ((__power_well) = (__dev_priv)->power_domains.power_wells +		\
497 			      (__dev_priv)->power_domains.power_well_count - 1;	\
498 	     (__power_well) - (__dev_priv)->power_domains.power_wells >= 0;	\
499 	     (__power_well)--)
500 
501 #define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask)	\
502 	for_each_power_well(__dev_priv, __power_well)				\
503 		for_each_if ((__power_well)->domains & (__domain_mask))
504 
505 #define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
506 	for_each_power_well_rev(__dev_priv, __power_well)		        \
507 		for_each_if ((__power_well)->domains & (__domain_mask))
508 
509 #define for_each_intel_plane_in_state(__state, plane, plane_state, __i) \
510 	for ((__i) = 0; \
511 	     (__i) < (__state)->base.dev->mode_config.num_total_plane && \
512 		     ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
513 		      (plane_state) = to_intel_plane_state((__state)->base.planes[__i].state), 1); \
514 	     (__i)++) \
515 		for_each_if (plane_state)
516 
517 struct drm_i915_private;
518 struct i915_mm_struct;
519 struct i915_mmu_object;
520 
521 struct drm_i915_file_private {
522 	struct drm_i915_private *dev_priv;
523 	struct drm_file *file;
524 
525 	struct {
526 		spinlock_t lock;
527 		struct list_head request_list;
528 /* 20ms is a fairly arbitrary limit (greater than the average frame time)
529  * chosen to prevent the CPU getting more than a frame ahead of the GPU
530  * (when using lax throttling for the frontbuffer). We also use it to
531  * offer free GPU waitboosts for severely congested workloads.
532  */
533 #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
534 	} mm;
535 	struct idr context_idr;
536 
537 	struct intel_rps_client {
538 		struct list_head link;
539 		unsigned boosts;
540 	} rps;
541 
542 	unsigned int bsd_engine;
543 
544 /* Client can have a maximum of 3 contexts banned before
545  * it is denied of creating new contexts. As one context
546  * ban needs 4 consecutive hangs, and more if there is
547  * progress in between, this is a last resort stop gap measure
548  * to limit the badly behaving clients access to gpu.
549  */
550 #define I915_MAX_CLIENT_CONTEXT_BANS 3
551 	int context_bans;
552 };
553 
554 /* Used by dp and fdi links */
555 struct intel_link_m_n {
556 	uint32_t	tu;
557 	uint32_t	gmch_m;
558 	uint32_t	gmch_n;
559 	uint32_t	link_m;
560 	uint32_t	link_n;
561 };
562 
563 void intel_link_compute_m_n(int bpp, int nlanes,
564 			    int pixel_clock, int link_clock,
565 			    struct intel_link_m_n *m_n,
566 			    bool reduce_m_n);
567 
568 /* Interface history:
569  *
570  * 1.1: Original.
571  * 1.2: Add Power Management
572  * 1.3: Add vblank support
573  * 1.4: Fix cmdbuffer path, add heap destroy
574  * 1.5: Add vblank pipe configuration
575  * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
576  *      - Support vertical blank on secondary display pipe
577  */
578 #define DRIVER_MAJOR		1
579 #define DRIVER_MINOR		6
580 #define DRIVER_PATCHLEVEL	0
581 
582 struct opregion_header;
583 struct opregion_acpi;
584 struct opregion_swsci;
585 struct opregion_asle;
586 
587 struct intel_opregion {
588 	struct opregion_header *header;
589 	struct opregion_acpi *acpi;
590 	struct opregion_swsci *swsci;
591 	u32 swsci_gbda_sub_functions;
592 	u32 swsci_sbcb_sub_functions;
593 	struct opregion_asle *asle;
594 	void *rvda;
595 	const void *vbt;
596 	u32 vbt_size;
597 	u32 *lid_state;
598 	struct work_struct asle_work;
599 };
600 #define OPREGION_SIZE            (8*1024)
601 
602 struct intel_overlay;
603 struct intel_overlay_error_state;
604 
605 struct sdvo_device_mapping {
606 	u8 initialized;
607 	u8 dvo_port;
608 	u8 slave_addr;
609 	u8 dvo_wiring;
610 	u8 i2c_pin;
611 	u8 ddc_pin;
612 };
613 
614 struct intel_connector;
615 struct intel_encoder;
616 struct intel_atomic_state;
617 struct intel_crtc_state;
618 struct intel_initial_plane_config;
619 struct intel_crtc;
620 struct intel_limit;
621 struct dpll;
622 struct intel_cdclk_state;
623 
624 struct drm_i915_display_funcs {
625 	void (*get_cdclk)(struct drm_i915_private *dev_priv,
626 			  struct intel_cdclk_state *cdclk_state);
627 	void (*set_cdclk)(struct drm_i915_private *dev_priv,
628 			  const struct intel_cdclk_state *cdclk_state);
629 	int (*get_fifo_size)(struct drm_i915_private *dev_priv, int plane);
630 	int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
631 	int (*compute_intermediate_wm)(struct drm_device *dev,
632 				       struct intel_crtc *intel_crtc,
633 				       struct intel_crtc_state *newstate);
634 	void (*initial_watermarks)(struct intel_atomic_state *state,
635 				   struct intel_crtc_state *cstate);
636 	void (*atomic_update_watermarks)(struct intel_atomic_state *state,
637 					 struct intel_crtc_state *cstate);
638 	void (*optimize_watermarks)(struct intel_atomic_state *state,
639 				    struct intel_crtc_state *cstate);
640 	int (*compute_global_watermarks)(struct drm_atomic_state *state);
641 	void (*update_wm)(struct intel_crtc *crtc);
642 	int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
643 	/* Returns the active state of the crtc, and if the crtc is active,
644 	 * fills out the pipe-config with the hw state. */
645 	bool (*get_pipe_config)(struct intel_crtc *,
646 				struct intel_crtc_state *);
647 	void (*get_initial_plane_config)(struct intel_crtc *,
648 					 struct intel_initial_plane_config *);
649 	int (*crtc_compute_clock)(struct intel_crtc *crtc,
650 				  struct intel_crtc_state *crtc_state);
651 	void (*crtc_enable)(struct intel_crtc_state *pipe_config,
652 			    struct drm_atomic_state *old_state);
653 	void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
654 			     struct drm_atomic_state *old_state);
655 	void (*update_crtcs)(struct drm_atomic_state *state,
656 			     unsigned int *crtc_vblank_mask);
657 	void (*audio_codec_enable)(struct drm_connector *connector,
658 				   struct intel_encoder *encoder,
659 				   const struct drm_display_mode *adjusted_mode);
660 	void (*audio_codec_disable)(struct intel_encoder *encoder);
661 	void (*fdi_link_train)(struct intel_crtc *crtc,
662 			       const struct intel_crtc_state *crtc_state);
663 	void (*init_clock_gating)(struct drm_i915_private *dev_priv);
664 	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
665 			  struct drm_framebuffer *fb,
666 			  struct drm_i915_gem_object *obj,
667 			  struct drm_i915_gem_request *req,
668 			  uint32_t flags);
669 	void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
670 	/* clock updates for mode set */
671 	/* cursor updates */
672 	/* render clock increase/decrease */
673 	/* display clock increase/decrease */
674 	/* pll clock increase/decrease */
675 
676 	void (*load_csc_matrix)(struct drm_crtc_state *crtc_state);
677 	void (*load_luts)(struct drm_crtc_state *crtc_state);
678 };
679 
680 enum forcewake_domain_id {
681 	FW_DOMAIN_ID_RENDER = 0,
682 	FW_DOMAIN_ID_BLITTER,
683 	FW_DOMAIN_ID_MEDIA,
684 
685 	FW_DOMAIN_ID_COUNT
686 };
687 
688 enum forcewake_domains {
689 	FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER),
690 	FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER),
691 	FORCEWAKE_MEDIA	= BIT(FW_DOMAIN_ID_MEDIA),
692 	FORCEWAKE_ALL = (FORCEWAKE_RENDER |
693 			 FORCEWAKE_BLITTER |
694 			 FORCEWAKE_MEDIA)
695 };
696 
697 #define FW_REG_READ  (1)
698 #define FW_REG_WRITE (2)
699 
700 enum decoupled_power_domain {
701 	GEN9_DECOUPLED_PD_BLITTER = 0,
702 	GEN9_DECOUPLED_PD_RENDER,
703 	GEN9_DECOUPLED_PD_MEDIA,
704 	GEN9_DECOUPLED_PD_ALL
705 };
706 
707 enum decoupled_ops {
708 	GEN9_DECOUPLED_OP_WRITE = 0,
709 	GEN9_DECOUPLED_OP_READ
710 };
711 
712 enum forcewake_domains
713 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
714 			       i915_reg_t reg, unsigned int op);
715 
716 struct intel_uncore_funcs {
717 	void (*force_wake_get)(struct drm_i915_private *dev_priv,
718 			       enum forcewake_domains domains);
719 	void (*force_wake_put)(struct drm_i915_private *dev_priv,
720 			       enum forcewake_domains domains);
721 
722 	uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv,
723 			       i915_reg_t r, bool trace);
724 	uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv,
725 			       i915_reg_t r, bool trace);
726 	uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv,
727 			       i915_reg_t r, bool trace);
728 	u64      (*mmio_readq)(struct drm_i915_private *dev_priv,
729 			       i915_reg_t r, bool trace);
730 
731 	void (*mmio_writeb)(struct drm_i915_private *dev_priv,
732 			    i915_reg_t r, uint8_t val, bool trace);
733 	void (*mmio_writew)(struct drm_i915_private *dev_priv,
734 			    i915_reg_t r, uint16_t val, bool trace);
735 	void (*mmio_writel)(struct drm_i915_private *dev_priv,
736 			    i915_reg_t r, uint32_t val, bool trace);
737 };
738 
739 struct intel_forcewake_range {
740 	u32 start;
741 	u32 end;
742 
743 	enum forcewake_domains domains;
744 };
745 
746 struct intel_uncore {
747 	spinlock_t lock; /** lock is also taken in irq contexts. */
748 
749 	const struct intel_forcewake_range *fw_domains_table;
750 	unsigned int fw_domains_table_entries;
751 
752 	struct notifier_block pmic_bus_access_nb;
753 	struct intel_uncore_funcs funcs;
754 
755 	unsigned fifo_count;
756 
757 	enum forcewake_domains fw_domains;
758 	enum forcewake_domains fw_domains_active;
759 
760 	u32 fw_set;
761 	u32 fw_clear;
762 	u32 fw_reset;
763 
764 	struct intel_uncore_forcewake_domain {
765 		enum forcewake_domain_id id;
766 		enum forcewake_domains mask;
767 		unsigned wake_count;
768 		struct hrtimer timer;
769 		i915_reg_t reg_set;
770 		i915_reg_t reg_ack;
771 	} fw_domain[FW_DOMAIN_ID_COUNT];
772 
773 	int unclaimed_mmio_check;
774 };
775 
776 #define __mask_next_bit(mask) ({					\
777 	int __idx = ffs(mask) - 1;					\
778 	mask &= ~BIT(__idx);						\
779 	__idx;								\
780 })
781 
782 /* Iterate over initialised fw domains */
783 #define for_each_fw_domain_masked(domain__, mask__, dev_priv__, tmp__) \
784 	for (tmp__ = (mask__); \
785 	     tmp__ ? (domain__ = &(dev_priv__)->uncore.fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
786 
787 #define for_each_fw_domain(domain__, dev_priv__, tmp__) \
788 	for_each_fw_domain_masked(domain__, (dev_priv__)->uncore.fw_domains, dev_priv__, tmp__)
789 
790 #define CSR_VERSION(major, minor)	((major) << 16 | (minor))
791 #define CSR_VERSION_MAJOR(version)	((version) >> 16)
792 #define CSR_VERSION_MINOR(version)	((version) & 0xffff)
793 
794 struct intel_csr {
795 	struct work_struct work;
796 	const char *fw_path;
797 	uint32_t *dmc_payload;
798 	uint32_t dmc_fw_size;
799 	uint32_t version;
800 	uint32_t mmio_count;
801 	i915_reg_t mmioaddr[8];
802 	uint32_t mmiodata[8];
803 	uint32_t dc_state;
804 	uint32_t allowed_dc_mask;
805 };
806 
807 #define DEV_INFO_FOR_EACH_FLAG(func) \
808 	func(is_mobile); \
809 	func(is_lp); \
810 	func(is_alpha_support); \
811 	/* Keep has_* in alphabetical order */ \
812 	func(has_64bit_reloc); \
813 	func(has_aliasing_ppgtt); \
814 	func(has_csr); \
815 	func(has_ddi); \
816 	func(has_decoupled_mmio); \
817 	func(has_dp_mst); \
818 	func(has_fbc); \
819 	func(has_fpga_dbg); \
820 	func(has_full_ppgtt); \
821 	func(has_full_48bit_ppgtt); \
822 	func(has_gmbus_irq); \
823 	func(has_gmch_display); \
824 	func(has_guc); \
825 	func(has_hotplug); \
826 	func(has_hw_contexts); \
827 	func(has_l3_dpf); \
828 	func(has_llc); \
829 	func(has_logical_ring_contexts); \
830 	func(has_overlay); \
831 	func(has_pipe_cxsr); \
832 	func(has_pooled_eu); \
833 	func(has_psr); \
834 	func(has_rc6); \
835 	func(has_rc6p); \
836 	func(has_resource_streamer); \
837 	func(has_runtime_pm); \
838 	func(has_snoop); \
839 	func(unfenced_needs_alignment); \
840 	func(cursor_needs_physical); \
841 	func(hws_needs_physical); \
842 	func(overlay_needs_physical); \
843 	func(supports_tv);
844 
845 struct sseu_dev_info {
846 	u8 slice_mask;
847 	u8 subslice_mask;
848 	u8 eu_total;
849 	u8 eu_per_subslice;
850 	u8 min_eu_in_pool;
851 	/* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
852 	u8 subslice_7eu[3];
853 	u8 has_slice_pg:1;
854 	u8 has_subslice_pg:1;
855 	u8 has_eu_pg:1;
856 };
857 
858 static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
859 {
860 	return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
861 }
862 
863 /* Keep in gen based order, and chronological order within a gen */
864 enum intel_platform {
865 	INTEL_PLATFORM_UNINITIALIZED = 0,
866 	INTEL_I830,
867 	INTEL_I845G,
868 	INTEL_I85X,
869 	INTEL_I865G,
870 	INTEL_I915G,
871 	INTEL_I915GM,
872 	INTEL_I945G,
873 	INTEL_I945GM,
874 	INTEL_G33,
875 	INTEL_PINEVIEW,
876 	INTEL_I965G,
877 	INTEL_I965GM,
878 	INTEL_G45,
879 	INTEL_GM45,
880 	INTEL_IRONLAKE,
881 	INTEL_SANDYBRIDGE,
882 	INTEL_IVYBRIDGE,
883 	INTEL_VALLEYVIEW,
884 	INTEL_HASWELL,
885 	INTEL_BROADWELL,
886 	INTEL_CHERRYVIEW,
887 	INTEL_SKYLAKE,
888 	INTEL_BROXTON,
889 	INTEL_KABYLAKE,
890 	INTEL_GEMINILAKE,
891 	INTEL_MAX_PLATFORMS
892 };
893 
894 struct intel_device_info {
895 	u32 display_mmio_offset;
896 	u16 device_id;
897 	u8 num_pipes;
898 	u8 num_sprites[I915_MAX_PIPES];
899 	u8 num_scalers[I915_MAX_PIPES];
900 	u8 gen;
901 	u16 gen_mask;
902 	enum intel_platform platform;
903 	u8 ring_mask; /* Rings supported by the HW */
904 	u8 num_rings;
905 #define DEFINE_FLAG(name) u8 name:1
906 	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
907 #undef DEFINE_FLAG
908 	u16 ddb_size; /* in blocks */
909 	/* Register offsets for the various display pipes and transcoders */
910 	int pipe_offsets[I915_MAX_TRANSCODERS];
911 	int trans_offsets[I915_MAX_TRANSCODERS];
912 	int palette_offsets[I915_MAX_PIPES];
913 	int cursor_offsets[I915_MAX_PIPES];
914 
915 	/* Slice/subslice/EU info */
916 	struct sseu_dev_info sseu;
917 
918 	struct color_luts {
919 		u16 degamma_lut_size;
920 		u16 gamma_lut_size;
921 	} color;
922 };
923 
924 struct intel_display_error_state;
925 
926 struct i915_gpu_state {
927 	struct kref ref;
928 	struct timeval time;
929 	struct timeval boottime;
930 	struct timeval uptime;
931 
932 	struct drm_i915_private *i915;
933 
934 	char error_msg[128];
935 	bool simulated;
936 	bool awake;
937 	bool wakelock;
938 	bool suspended;
939 	int iommu;
940 	u32 reset_count;
941 	u32 suspend_count;
942 	struct intel_device_info device_info;
943 	struct i915_params params;
944 
945 	/* Generic register state */
946 	u32 eir;
947 	u32 pgtbl_er;
948 	u32 ier;
949 	u32 gtier[4], ngtier;
950 	u32 ccid;
951 	u32 derrmr;
952 	u32 forcewake;
953 	u32 error; /* gen6+ */
954 	u32 err_int; /* gen7 */
955 	u32 fault_data0; /* gen8, gen9 */
956 	u32 fault_data1; /* gen8, gen9 */
957 	u32 done_reg;
958 	u32 gac_eco;
959 	u32 gam_ecochk;
960 	u32 gab_ctl;
961 	u32 gfx_mode;
962 
963 	u32 nfence;
964 	u64 fence[I915_MAX_NUM_FENCES];
965 	struct intel_overlay_error_state *overlay;
966 	struct intel_display_error_state *display;
967 	struct drm_i915_error_object *semaphore;
968 	struct drm_i915_error_object *guc_log;
969 
970 	struct drm_i915_error_engine {
971 		int engine_id;
972 		/* Software tracked state */
973 		bool waiting;
974 		int num_waiters;
975 		unsigned long hangcheck_timestamp;
976 		bool hangcheck_stalled;
977 		enum intel_engine_hangcheck_action hangcheck_action;
978 		struct i915_address_space *vm;
979 		int num_requests;
980 
981 		/* position of active request inside the ring */
982 		u32 rq_head, rq_post, rq_tail;
983 
984 		/* our own tracking of ring head and tail */
985 		u32 cpu_ring_head;
986 		u32 cpu_ring_tail;
987 
988 		u32 last_seqno;
989 
990 		/* Register state */
991 		u32 start;
992 		u32 tail;
993 		u32 head;
994 		u32 ctl;
995 		u32 mode;
996 		u32 hws;
997 		u32 ipeir;
998 		u32 ipehr;
999 		u32 bbstate;
1000 		u32 instpm;
1001 		u32 instps;
1002 		u32 seqno;
1003 		u64 bbaddr;
1004 		u64 acthd;
1005 		u32 fault_reg;
1006 		u64 faddr;
1007 		u32 rc_psmi; /* sleep state */
1008 		u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
1009 		struct intel_instdone instdone;
1010 
1011 		struct drm_i915_error_context {
1012 			char comm[TASK_COMM_LEN];
1013 			pid_t pid;
1014 			u32 handle;
1015 			u32 hw_id;
1016 			int ban_score;
1017 			int active;
1018 			int guilty;
1019 		} context;
1020 
1021 		struct drm_i915_error_object {
1022 			u64 gtt_offset;
1023 			u64 gtt_size;
1024 			int page_count;
1025 			int unused;
1026 			u32 *pages[0];
1027 		} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
1028 
1029 		struct drm_i915_error_object *wa_ctx;
1030 
1031 		struct drm_i915_error_request {
1032 			long jiffies;
1033 			pid_t pid;
1034 			u32 context;
1035 			int ban_score;
1036 			u32 seqno;
1037 			u32 head;
1038 			u32 tail;
1039 		} *requests, execlist[2];
1040 
1041 		struct drm_i915_error_waiter {
1042 			char comm[TASK_COMM_LEN];
1043 			pid_t pid;
1044 			u32 seqno;
1045 		} *waiters;
1046 
1047 		struct {
1048 			u32 gfx_mode;
1049 			union {
1050 				u64 pdp[4];
1051 				u32 pp_dir_base;
1052 			};
1053 		} vm_info;
1054 	} engine[I915_NUM_ENGINES];
1055 
1056 	struct drm_i915_error_buffer {
1057 		u32 size;
1058 		u32 name;
1059 		u32 rseqno[I915_NUM_ENGINES], wseqno;
1060 		u64 gtt_offset;
1061 		u32 read_domains;
1062 		u32 write_domain;
1063 		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
1064 		u32 tiling:2;
1065 		u32 dirty:1;
1066 		u32 purgeable:1;
1067 		u32 userptr:1;
1068 		s32 engine:4;
1069 		u32 cache_level:3;
1070 	} *active_bo[I915_NUM_ENGINES], *pinned_bo;
1071 	u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
1072 	struct i915_address_space *active_vm[I915_NUM_ENGINES];
1073 };
1074 
1075 enum i915_cache_level {
1076 	I915_CACHE_NONE = 0,
1077 	I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
1078 	I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
1079 			      caches, eg sampler/render caches, and the
1080 			      large Last-Level-Cache. LLC is coherent with
1081 			      the CPU, but L3 is only visible to the GPU. */
1082 	I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
1083 };
1084 
1085 #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
1086 
1087 enum fb_op_origin {
1088 	ORIGIN_GTT,
1089 	ORIGIN_CPU,
1090 	ORIGIN_CS,
1091 	ORIGIN_FLIP,
1092 	ORIGIN_DIRTYFB,
1093 };
1094 
1095 struct intel_fbc {
1096 	/* This is always the inner lock when overlapping with struct_mutex and
1097 	 * it's the outer lock when overlapping with stolen_lock. */
1098 	struct lock lock;
1099 	unsigned threshold;
1100 	unsigned int possible_framebuffer_bits;
1101 	unsigned int busy_bits;
1102 	unsigned int visible_pipes_mask;
1103 	struct intel_crtc *crtc;
1104 
1105 	struct drm_mm_node compressed_fb;
1106 	struct drm_mm_node *compressed_llb;
1107 
1108 	bool false_color;
1109 
1110 	bool enabled;
1111 	bool active;
1112 
1113 	bool underrun_detected;
1114 	struct work_struct underrun_work;
1115 
1116 	struct intel_fbc_state_cache {
1117 		struct i915_vma *vma;
1118 
1119 		struct {
1120 			unsigned int mode_flags;
1121 			uint32_t hsw_bdw_pixel_rate;
1122 		} crtc;
1123 
1124 		struct {
1125 			unsigned int rotation;
1126 			int src_w;
1127 			int src_h;
1128 			bool visible;
1129 		} plane;
1130 
1131 		struct {
1132 			const struct drm_format_info *format;
1133 			unsigned int stride;
1134 		} fb;
1135 	} state_cache;
1136 
1137 	struct intel_fbc_reg_params {
1138 		struct i915_vma *vma;
1139 
1140 		struct {
1141 			enum i915_pipe pipe;
1142 			enum plane plane;
1143 			unsigned int fence_y_offset;
1144 		} crtc;
1145 
1146 		struct {
1147 			const struct drm_format_info *format;
1148 			unsigned int stride;
1149 		} fb;
1150 
1151 		int cfb_size;
1152 	} params;
1153 
1154 	struct intel_fbc_work {
1155 		bool scheduled;
1156 		u32 scheduled_vblank;
1157 		struct work_struct work;
1158 	} work;
1159 
1160 	const char *no_fbc_reason;
1161 };
1162 
1163 /*
1164  * HIGH_RR is the highest eDP panel refresh rate read from EDID
1165  * LOW_RR is the lowest eDP panel refresh rate found from EDID
1166  * parsing for same resolution.
1167  */
1168 enum drrs_refresh_rate_type {
1169 	DRRS_HIGH_RR,
1170 	DRRS_LOW_RR,
1171 	DRRS_MAX_RR, /* RR count */
1172 };
1173 
1174 enum drrs_support_type {
1175 	DRRS_NOT_SUPPORTED = 0,
1176 	STATIC_DRRS_SUPPORT = 1,
1177 	SEAMLESS_DRRS_SUPPORT = 2
1178 };
1179 
1180 struct intel_dp;
1181 struct i915_drrs {
1182 	struct lock mutex;
1183 	struct delayed_work work;
1184 	struct intel_dp *dp;
1185 	unsigned busy_frontbuffer_bits;
1186 	enum drrs_refresh_rate_type refresh_rate_type;
1187 	enum drrs_support_type type;
1188 };
1189 
1190 struct i915_psr {
1191 	struct lock lock;
1192 	bool sink_support;
1193 	bool source_ok;
1194 	struct intel_dp *enabled;
1195 	bool active;
1196 	struct delayed_work work;
1197 	unsigned busy_frontbuffer_bits;
1198 	bool psr2_support;
1199 	bool aux_frame_sync;
1200 	bool link_standby;
1201 	bool y_cord_support;
1202 	bool colorimetry_support;
1203 	bool alpm;
1204 };
1205 
1206 enum intel_pch {
1207 	PCH_NONE = 0,	/* No PCH present */
1208 	PCH_IBX,	/* Ibexpeak PCH */
1209 	PCH_CPT,	/* Cougarpoint PCH */
1210 	PCH_LPT,	/* Lynxpoint PCH */
1211 	PCH_SPT,        /* Sunrisepoint PCH */
1212 	PCH_KBP,        /* Kabypoint PCH */
1213 	PCH_NOP,
1214 };
1215 
1216 enum intel_sbi_destination {
1217 	SBI_ICLK,
1218 	SBI_MPHY,
1219 };
1220 
1221 #define QUIRK_PIPEA_FORCE (1<<0)
1222 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
1223 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
1224 #define QUIRK_BACKLIGHT_PRESENT (1<<3)
1225 #define QUIRK_PIPEB_FORCE (1<<4)
1226 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
1227 
1228 struct intel_fbdev;
1229 struct intel_fbc_work;
1230 
1231 struct intel_gmbus {
1232 	struct i2c_adapter adapter;
1233 #define GMBUS_FORCE_BIT_RETRY (1U << 31)
1234 	u32 force_bit;
1235 	u32 reg0;
1236 	i915_reg_t gpio_reg;
1237 	struct i2c_algo_bit_data bit_algo;
1238 	struct drm_i915_private *dev_priv;
1239 };
1240 
1241 struct i915_suspend_saved_registers {
1242 	u32 saveDSPARB;
1243 	u32 saveFBC_CONTROL;
1244 	u32 saveCACHE_MODE_0;
1245 	u32 saveMI_ARB_STATE;
1246 	u32 saveSWF0[16];
1247 	u32 saveSWF1[16];
1248 	u32 saveSWF3[3];
1249 	uint64_t saveFENCE[I915_MAX_NUM_FENCES];
1250 	u32 savePCH_PORT_HOTPLUG;
1251 	u16 saveGCDGMBUS;
1252 };
1253 
1254 struct vlv_s0ix_state {
1255 	/* GAM */
1256 	u32 wr_watermark;
1257 	u32 gfx_prio_ctrl;
1258 	u32 arb_mode;
1259 	u32 gfx_pend_tlb0;
1260 	u32 gfx_pend_tlb1;
1261 	u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
1262 	u32 media_max_req_count;
1263 	u32 gfx_max_req_count;
1264 	u32 render_hwsp;
1265 	u32 ecochk;
1266 	u32 bsd_hwsp;
1267 	u32 blt_hwsp;
1268 	u32 tlb_rd_addr;
1269 
1270 	/* MBC */
1271 	u32 g3dctl;
1272 	u32 gsckgctl;
1273 	u32 mbctl;
1274 
1275 	/* GCP */
1276 	u32 ucgctl1;
1277 	u32 ucgctl3;
1278 	u32 rcgctl1;
1279 	u32 rcgctl2;
1280 	u32 rstctl;
1281 	u32 misccpctl;
1282 
1283 	/* GPM */
1284 	u32 gfxpause;
1285 	u32 rpdeuhwtc;
1286 	u32 rpdeuc;
1287 	u32 ecobus;
1288 	u32 pwrdwnupctl;
1289 	u32 rp_down_timeout;
1290 	u32 rp_deucsw;
1291 	u32 rcubmabdtmr;
1292 	u32 rcedata;
1293 	u32 spare2gh;
1294 
1295 	/* Display 1 CZ domain */
1296 	u32 gt_imr;
1297 	u32 gt_ier;
1298 	u32 pm_imr;
1299 	u32 pm_ier;
1300 	u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
1301 
1302 	/* GT SA CZ domain */
1303 	u32 tilectl;
1304 	u32 gt_fifoctl;
1305 	u32 gtlc_wake_ctrl;
1306 	u32 gtlc_survive;
1307 	u32 pmwgicz;
1308 
1309 	/* Display 2 CZ domain */
1310 	u32 gu_ctl0;
1311 	u32 gu_ctl1;
1312 	u32 pcbr;
1313 	u32 clock_gate_dis2;
1314 };
1315 
1316 struct intel_rps_ei {
1317 	ktime_t ktime;
1318 	u32 render_c0;
1319 	u32 media_c0;
1320 };
1321 
1322 struct intel_gen6_power_mgmt {
1323 	/*
1324 	 * work, interrupts_enabled and pm_iir are protected by
1325 	 * dev_priv->irq_lock
1326 	 */
1327 	struct work_struct work;
1328 	bool interrupts_enabled;
1329 	u32 pm_iir;
1330 
1331 	/* PM interrupt bits that should never be masked */
1332 	u32 pm_intrmsk_mbz;
1333 
1334 	/* Frequencies are stored in potentially platform dependent multiples.
1335 	 * In other words, *_freq needs to be multiplied by X to be interesting.
1336 	 * Soft limits are those which are used for the dynamic reclocking done
1337 	 * by the driver (raise frequencies under heavy loads, and lower for
1338 	 * lighter loads). Hard limits are those imposed by the hardware.
1339 	 *
1340 	 * A distinction is made for overclocking, which is never enabled by
1341 	 * default, and is considered to be above the hard limit if it's
1342 	 * possible at all.
1343 	 */
1344 	u8 cur_freq;		/* Current frequency (cached, may not == HW) */
1345 	u8 min_freq_softlimit;	/* Minimum frequency permitted by the driver */
1346 	u8 max_freq_softlimit;	/* Max frequency permitted by the driver */
1347 	u8 max_freq;		/* Maximum frequency, RP0 if not overclocking */
1348 	u8 min_freq;		/* AKA RPn. Minimum frequency */
1349 	u8 boost_freq;		/* Frequency to request when wait boosting */
1350 	u8 idle_freq;		/* Frequency to request when we are idle */
1351 	u8 efficient_freq;	/* AKA RPe. Pre-determined balanced frequency */
1352 	u8 rp1_freq;		/* "less than" RP0 power/freqency */
1353 	u8 rp0_freq;		/* Non-overclocked max frequency. */
1354 	u16 gpll_ref_freq;	/* vlv/chv GPLL reference frequency */
1355 
1356 	u8 up_threshold; /* Current %busy required to uplock */
1357 	u8 down_threshold; /* Current %busy required to downclock */
1358 
1359 	int last_adj;
1360 	enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
1361 
1362 	spinlock_t client_lock;
1363 	struct list_head clients;
1364 	bool client_boost;
1365 
1366 	bool enabled;
1367 	struct delayed_work autoenable_work;
1368 	unsigned boosts;
1369 
1370 	/* manual wa residency calculations */
1371 	struct intel_rps_ei ei;
1372 
1373 	/*
1374 	 * Protects RPS/RC6 register access and PCU communication.
1375 	 * Must be taken after struct_mutex if nested. Note that
1376 	 * this lock may be held for long periods of time when
1377 	 * talking to hw - so only take it when talking to hw!
1378 	 */
1379 	struct lock hw_lock;
1380 };
1381 
1382 /* defined intel_pm.c */
1383 extern spinlock_t mchdev_lock;
1384 
1385 struct intel_ilk_power_mgmt {
1386 	u8 cur_delay;
1387 	u8 min_delay;
1388 	u8 max_delay;
1389 	u8 fmax;
1390 	u8 fstart;
1391 
1392 	u64 last_count1;
1393 	unsigned long last_time1;
1394 	unsigned long chipset_power;
1395 	u64 last_count2;
1396 	u64 last_time2;
1397 	unsigned long gfx_power;
1398 	u8 corr;
1399 
1400 	int c_m;
1401 	int r_t;
1402 };
1403 
1404 struct drm_i915_private;
1405 struct i915_power_well;
1406 
1407 struct i915_power_well_ops {
1408 	/*
1409 	 * Synchronize the well's hw state to match the current sw state, for
1410 	 * example enable/disable it based on the current refcount. Called
1411 	 * during driver init and resume time, possibly after first calling
1412 	 * the enable/disable handlers.
1413 	 */
1414 	void (*sync_hw)(struct drm_i915_private *dev_priv,
1415 			struct i915_power_well *power_well);
1416 	/*
1417 	 * Enable the well and resources that depend on it (for example
1418 	 * interrupts located on the well). Called after the 0->1 refcount
1419 	 * transition.
1420 	 */
1421 	void (*enable)(struct drm_i915_private *dev_priv,
1422 		       struct i915_power_well *power_well);
1423 	/*
1424 	 * Disable the well and resources that depend on it. Called after
1425 	 * the 1->0 refcount transition.
1426 	 */
1427 	void (*disable)(struct drm_i915_private *dev_priv,
1428 			struct i915_power_well *power_well);
1429 	/* Returns the hw enabled state. */
1430 	bool (*is_enabled)(struct drm_i915_private *dev_priv,
1431 			   struct i915_power_well *power_well);
1432 };
1433 
1434 /* Power well structure for haswell */
1435 struct i915_power_well {
1436 	const char *name;
1437 	bool always_on;
1438 	/* power well enable/disable usage count */
1439 	int count;
1440 	/* cached hw enabled state */
1441 	bool hw_enabled;
1442 	u64 domains;
1443 	/* unique identifier for this power well */
1444 	unsigned long id;
1445 	/*
1446 	 * Arbitraty data associated with this power well. Platform and power
1447 	 * well specific.
1448 	 */
1449 	unsigned long data;
1450 	const struct i915_power_well_ops *ops;
1451 };
1452 
1453 struct i915_power_domains {
1454 	/*
1455 	 * Power wells needed for initialization at driver init and suspend
1456 	 * time are on. They are kept on until after the first modeset.
1457 	 */
1458 	bool init_power_on;
1459 	bool initializing;
1460 	int power_well_count;
1461 
1462 	struct lock lock;
1463 	int domain_use_count[POWER_DOMAIN_NUM];
1464 	struct i915_power_well *power_wells;
1465 };
1466 
1467 #define MAX_L3_SLICES 2
1468 struct intel_l3_parity {
1469 	u32 *remap_info[MAX_L3_SLICES];
1470 	struct work_struct error_work;
1471 	int which_slice;
1472 };
1473 
1474 struct i915_gem_mm {
1475 	/** Memory allocator for GTT stolen memory */
1476 	struct drm_mm stolen;
1477 	/** Protects the usage of the GTT stolen memory allocator. This is
1478 	 * always the inner lock when overlapping with struct_mutex. */
1479 	struct lock stolen_lock;
1480 
1481 	/** List of all objects in gtt_space. Used to restore gtt
1482 	 * mappings on resume */
1483 	struct list_head bound_list;
1484 	/**
1485 	 * List of objects which are not bound to the GTT (thus
1486 	 * are idle and not used by the GPU). These objects may or may
1487 	 * not actually have any pages attached.
1488 	 */
1489 	struct list_head unbound_list;
1490 
1491 	/** List of all objects in gtt_space, currently mmaped by userspace.
1492 	 * All objects within this list must also be on bound_list.
1493 	 */
1494 	struct list_head userfault_list;
1495 
1496 	/**
1497 	 * List of objects which are pending destruction.
1498 	 */
1499 	struct llist_head free_list;
1500 	struct work_struct free_work;
1501 
1502 	/** Usable portion of the GTT for GEM */
1503 	dma_addr_t stolen_base; /* limited to low memory (32-bit) */
1504 
1505 	/** PPGTT used for aliasing the PPGTT with the GTT */
1506 	struct i915_hw_ppgtt *aliasing_ppgtt;
1507 
1508 	struct notifier_block oom_notifier;
1509 	struct notifier_block vmap_notifier;
1510 	struct shrinker shrinker;
1511 
1512 	/** LRU list of objects with fence regs on them. */
1513 	struct list_head fence_list;
1514 
1515 	/**
1516 	 * Are we in a non-interruptible section of code like
1517 	 * modesetting?
1518 	 */
1519 	bool interruptible;
1520 
1521 	/* the indicator for dispatch video commands on two BSD rings */
1522 	atomic_t bsd_engine_dispatch_index;
1523 
1524 	/** Bit 6 swizzling required for X tiling */
1525 	uint32_t bit_6_swizzle_x;
1526 	/** Bit 6 swizzling required for Y tiling */
1527 	uint32_t bit_6_swizzle_y;
1528 
1529 	/* accounting, useful for userland debugging */
1530 	spinlock_t object_stat_lock;
1531 	u64 object_memory;
1532 	u32 object_count;
1533 };
1534 
1535 struct drm_i915_error_state_buf {
1536 	struct drm_i915_private *i915;
1537 	unsigned bytes;
1538 	unsigned size;
1539 	int err;
1540 	u8 *buf;
1541 	loff_t start;
1542 	loff_t pos;
1543 };
1544 
1545 #define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
1546 #define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
1547 
1548 #define I915_ENGINE_DEAD_TIMEOUT  (4 * HZ)  /* Seqno, head and subunits dead */
1549 #define I915_SEQNO_DEAD_TIMEOUT   (12 * HZ) /* Seqno dead with active head */
1550 
1551 struct i915_gpu_error {
1552 	/* For hangcheck timer */
1553 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
1554 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
1555 
1556 	struct delayed_work hangcheck_work;
1557 
1558 	/* For reset and error_state handling. */
1559 	spinlock_t lock;
1560 	/* Protected by the above dev->gpu_error.lock. */
1561 	struct i915_gpu_state *first_error;
1562 
1563 	unsigned long missed_irq_rings;
1564 
1565 	/**
1566 	 * State variable controlling the reset flow and count
1567 	 *
1568 	 * This is a counter which gets incremented when reset is triggered,
1569 	 *
1570 	 * Before the reset commences, the I915_RESET_IN_PROGRESS bit is set
1571 	 * meaning that any waiters holding onto the struct_mutex should
1572 	 * relinquish the lock immediately in order for the reset to start.
1573 	 *
1574 	 * If reset is not completed succesfully, the I915_WEDGE bit is
1575 	 * set meaning that hardware is terminally sour and there is no
1576 	 * recovery. All waiters on the reset_queue will be woken when
1577 	 * that happens.
1578 	 *
1579 	 * This counter is used by the wait_seqno code to notice that reset
1580 	 * event happened and it needs to restart the entire ioctl (since most
1581 	 * likely the seqno it waited for won't ever signal anytime soon).
1582 	 *
1583 	 * This is important for lock-free wait paths, where no contended lock
1584 	 * naturally enforces the correct ordering between the bail-out of the
1585 	 * waiter and the gpu reset work code.
1586 	 */
1587 	unsigned long reset_count;
1588 
1589 	/**
1590 	 * flags: Control various stages of the GPU reset
1591 	 *
1592 	 * #I915_RESET_BACKOFF - When we start a reset, we want to stop any
1593 	 * other users acquiring the struct_mutex. To do this we set the
1594 	 * #I915_RESET_BACKOFF bit in the error flags when we detect a reset
1595 	 * and then check for that bit before acquiring the struct_mutex (in
1596 	 * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
1597 	 * secondary role in preventing two concurrent global reset attempts.
1598 	 *
1599 	 * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the
1600 	 * struct_mutex. We try to acquire the struct_mutex in the reset worker,
1601 	 * but it may be held by some long running waiter (that we cannot
1602 	 * interrupt without causing trouble). Once we are ready to do the GPU
1603 	 * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If
1604 	 * they already hold the struct_mutex and want to participate they can
1605 	 * inspect the bit and do the reset directly, otherwise the worker
1606 	 * waits for the struct_mutex.
1607 	 *
1608 	 * #I915_WEDGED - If reset fails and we can no longer use the GPU,
1609 	 * we set the #I915_WEDGED bit. Prior to command submission, e.g.
1610 	 * i915_gem_request_alloc(), this bit is checked and the sequence
1611 	 * aborted (with -EIO reported to userspace) if set.
1612 	 */
1613 	unsigned long flags;
1614 #define I915_RESET_BACKOFF	0
1615 #define I915_RESET_HANDOFF	1
1616 #define I915_WEDGED		(BITS_PER_LONG - 1)
1617 
1618 	/**
1619 	 * Waitqueue to signal when a hang is detected. Used to for waiters
1620 	 * to release the struct_mutex for the reset to procede.
1621 	 */
1622 	wait_queue_head_t wait_queue;
1623 
1624 	/**
1625 	 * Waitqueue to signal when the reset has completed. Used by clients
1626 	 * that wait for dev_priv->mm.wedged to settle.
1627 	 */
1628 	wait_queue_head_t reset_queue;
1629 
1630 	/* For missed irq/seqno simulation. */
1631 	unsigned long test_irq_rings;
1632 };
1633 
1634 enum modeset_restore {
1635 	MODESET_ON_LID_OPEN,
1636 	MODESET_DONE,
1637 	MODESET_SUSPENDED,
1638 };
1639 
1640 #define DP_AUX_A 0x40
1641 #define DP_AUX_B 0x10
1642 #define DP_AUX_C 0x20
1643 #define DP_AUX_D 0x30
1644 
1645 #define DDC_PIN_B  0x05
1646 #define DDC_PIN_C  0x04
1647 #define DDC_PIN_D  0x06
1648 
1649 struct ddi_vbt_port_info {
1650 	/*
1651 	 * This is an index in the HDMI/DVI DDI buffer translation table.
1652 	 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
1653 	 * populate this field.
1654 	 */
1655 #define HDMI_LEVEL_SHIFT_UNKNOWN	0xff
1656 	uint8_t hdmi_level_shift;
1657 
1658 	uint8_t supports_dvi:1;
1659 	uint8_t supports_hdmi:1;
1660 	uint8_t supports_dp:1;
1661 	uint8_t supports_edp:1;
1662 
1663 	uint8_t alternate_aux_channel;
1664 	uint8_t alternate_ddc_pin;
1665 
1666 	uint8_t dp_boost_level;
1667 	uint8_t hdmi_boost_level;
1668 };
1669 
1670 enum psr_lines_to_wait {
1671 	PSR_0_LINES_TO_WAIT = 0,
1672 	PSR_1_LINE_TO_WAIT,
1673 	PSR_4_LINES_TO_WAIT,
1674 	PSR_8_LINES_TO_WAIT
1675 };
1676 
1677 struct intel_vbt_data {
1678 	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1679 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1680 
1681 	/* Feature bits */
1682 	unsigned int int_tv_support:1;
1683 	unsigned int lvds_dither:1;
1684 	unsigned int lvds_vbt:1;
1685 	unsigned int int_crt_support:1;
1686 	unsigned int lvds_use_ssc:1;
1687 	unsigned int display_clock_mode:1;
1688 	unsigned int fdi_rx_polarity_inverted:1;
1689 	unsigned int panel_type:4;
1690 	int lvds_ssc_freq;
1691 	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1692 
1693 	enum drrs_support_type drrs_type;
1694 
1695 	struct {
1696 		int rate;
1697 		int lanes;
1698 		int preemphasis;
1699 		int vswing;
1700 		bool low_vswing;
1701 		bool initialized;
1702 		bool support;
1703 		int bpp;
1704 		struct edp_power_seq pps;
1705 	} edp;
1706 
1707 	struct {
1708 		bool full_link;
1709 		bool require_aux_wakeup;
1710 		int idle_frames;
1711 		enum psr_lines_to_wait lines_to_wait;
1712 		int tp1_wakeup_time;
1713 		int tp2_tp3_wakeup_time;
1714 	} psr;
1715 
1716 	struct {
1717 		u16 pwm_freq_hz;
1718 		bool present;
1719 		bool active_low_pwm;
1720 		u8 min_brightness;	/* min_brightness/255 of max */
1721 		u8 controller;		/* brightness controller number */
1722 		enum intel_backlight_type type;
1723 	} backlight;
1724 
1725 	/* MIPI DSI */
1726 	struct {
1727 		u16 panel_id;
1728 		struct mipi_config *config;
1729 		struct mipi_pps_data *pps;
1730 		u8 seq_version;
1731 		u32 size;
1732 		u8 *data;
1733 		const u8 *sequence[MIPI_SEQ_MAX];
1734 	} dsi;
1735 
1736 	int crt_ddc_pin;
1737 
1738 	int child_dev_num;
1739 	union child_device_config *child_dev;
1740 
1741 	struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
1742 	struct sdvo_device_mapping sdvo_mappings[2];
1743 };
1744 
1745 enum intel_ddb_partitioning {
1746 	INTEL_DDB_PART_1_2,
1747 	INTEL_DDB_PART_5_6, /* IVB+ */
1748 };
1749 
1750 struct intel_wm_level {
1751 	bool enable;
1752 	uint32_t pri_val;
1753 	uint32_t spr_val;
1754 	uint32_t cur_val;
1755 	uint32_t fbc_val;
1756 };
1757 
1758 struct ilk_wm_values {
1759 	uint32_t wm_pipe[3];
1760 	uint32_t wm_lp[3];
1761 	uint32_t wm_lp_spr[3];
1762 	uint32_t wm_linetime[3];
1763 	bool enable_fbc_wm;
1764 	enum intel_ddb_partitioning partitioning;
1765 };
1766 
1767 struct vlv_pipe_wm {
1768 	uint16_t plane[I915_MAX_PLANES];
1769 };
1770 
1771 struct vlv_sr_wm {
1772 	uint16_t plane;
1773 	uint16_t cursor;
1774 };
1775 
1776 struct vlv_wm_ddl_values {
1777 	uint8_t plane[I915_MAX_PLANES];
1778 };
1779 
1780 struct vlv_wm_values {
1781 	struct vlv_pipe_wm pipe[3];
1782 	struct vlv_sr_wm sr;
1783 	struct vlv_wm_ddl_values ddl[3];
1784 	uint8_t level;
1785 	bool cxsr;
1786 };
1787 
1788 struct skl_ddb_entry {
1789 	uint16_t start, end;	/* in number of blocks, 'end' is exclusive */
1790 };
1791 
1792 static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
1793 {
1794 	return entry->end - entry->start;
1795 }
1796 
1797 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
1798 				       const struct skl_ddb_entry *e2)
1799 {
1800 	if (e1->start == e2->start && e1->end == e2->end)
1801 		return true;
1802 
1803 	return false;
1804 }
1805 
1806 struct skl_ddb_allocation {
1807 	struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
1808 	struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
1809 };
1810 
1811 struct skl_wm_values {
1812 	unsigned dirty_pipes;
1813 	struct skl_ddb_allocation ddb;
1814 };
1815 
1816 struct skl_wm_level {
1817 	bool plane_en;
1818 	uint16_t plane_res_b;
1819 	uint8_t plane_res_l;
1820 };
1821 
1822 /*
1823  * This struct helps tracking the state needed for runtime PM, which puts the
1824  * device in PCI D3 state. Notice that when this happens, nothing on the
1825  * graphics device works, even register access, so we don't get interrupts nor
1826  * anything else.
1827  *
1828  * Every piece of our code that needs to actually touch the hardware needs to
1829  * either call intel_runtime_pm_get or call intel_display_power_get with the
1830  * appropriate power domain.
1831  *
1832  * Our driver uses the autosuspend delay feature, which means we'll only really
1833  * suspend if we stay with zero refcount for a certain amount of time. The
1834  * default value is currently very conservative (see intel_runtime_pm_enable), but
1835  * it can be changed with the standard runtime PM files from sysfs.
1836  *
1837  * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1838  * goes back to false exactly before we reenable the IRQs. We use this variable
1839  * to check if someone is trying to enable/disable IRQs while they're supposed
1840  * to be disabled. This shouldn't happen and we'll print some error messages in
1841  * case it happens.
1842  *
1843  * For more, read the Documentation/power/runtime_pm.txt.
1844  */
1845 struct i915_runtime_pm {
1846 	atomic_t wakeref_count;
1847 	bool suspended;
1848 	bool irqs_enabled;
1849 };
1850 
1851 enum intel_pipe_crc_source {
1852 	INTEL_PIPE_CRC_SOURCE_NONE,
1853 	INTEL_PIPE_CRC_SOURCE_PLANE1,
1854 	INTEL_PIPE_CRC_SOURCE_PLANE2,
1855 	INTEL_PIPE_CRC_SOURCE_PF,
1856 	INTEL_PIPE_CRC_SOURCE_PIPE,
1857 	/* TV/DP on pre-gen5/vlv can't use the pipe source. */
1858 	INTEL_PIPE_CRC_SOURCE_TV,
1859 	INTEL_PIPE_CRC_SOURCE_DP_B,
1860 	INTEL_PIPE_CRC_SOURCE_DP_C,
1861 	INTEL_PIPE_CRC_SOURCE_DP_D,
1862 	INTEL_PIPE_CRC_SOURCE_AUTO,
1863 	INTEL_PIPE_CRC_SOURCE_MAX,
1864 };
1865 
1866 struct intel_pipe_crc_entry {
1867 	uint32_t frame;
1868 	uint32_t crc[5];
1869 };
1870 
1871 #define INTEL_PIPE_CRC_ENTRIES_NR	128
1872 struct intel_pipe_crc {
1873 	spinlock_t lock;
1874 	bool opened;		/* exclusive access to the result file */
1875 	struct intel_pipe_crc_entry *entries;
1876 	enum intel_pipe_crc_source source;
1877 	int head, tail;
1878 	wait_queue_head_t wq;
1879 	int skipped;
1880 };
1881 
1882 struct i915_frontbuffer_tracking {
1883 	spinlock_t lock;
1884 
1885 	/*
1886 	 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
1887 	 * scheduled flips.
1888 	 */
1889 	unsigned busy_bits;
1890 	unsigned flip_bits;
1891 };
1892 
1893 struct i915_wa_reg {
1894 	i915_reg_t addr;
1895 	u32 value;
1896 	/* bitmask representing WA bits */
1897 	u32 mask;
1898 };
1899 
1900 /*
1901  * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only
1902  * allowing it for RCS as we don't foresee any requirement of having
1903  * a whitelist for other engines. When it is really required for
1904  * other engines then the limit need to be increased.
1905  */
1906 #define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS)
1907 
1908 struct i915_workarounds {
1909 	struct i915_wa_reg reg[I915_MAX_WA_REGS];
1910 	u32 count;
1911 	u32 hw_whitelist_count[I915_NUM_ENGINES];
1912 };
1913 
1914 struct i915_virtual_gpu {
1915 	bool active;
1916 };
1917 
1918 /* used in computing the new watermarks state */
1919 struct intel_wm_config {
1920 	unsigned int num_pipes_active;
1921 	bool sprites_enabled;
1922 	bool sprites_scaled;
1923 };
1924 
1925 struct i915_oa_format {
1926 	u32 format;
1927 	int size;
1928 };
1929 
1930 struct i915_oa_reg {
1931 	i915_reg_t addr;
1932 	u32 value;
1933 };
1934 
1935 struct i915_perf_stream;
1936 
1937 /**
1938  * struct i915_perf_stream_ops - the OPs to support a specific stream type
1939  */
1940 struct i915_perf_stream_ops {
1941 	/**
1942 	 * @enable: Enables the collection of HW samples, either in response to
1943 	 * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened
1944 	 * without `I915_PERF_FLAG_DISABLED`.
1945 	 */
1946 	void (*enable)(struct i915_perf_stream *stream);
1947 
1948 	/**
1949 	 * @disable: Disables the collection of HW samples, either in response
1950 	 * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying
1951 	 * the stream.
1952 	 */
1953 	void (*disable)(struct i915_perf_stream *stream);
1954 
1955 	/**
1956 	 * @poll_wait: Call poll_wait, passing a wait queue that will be woken
1957 	 * once there is something ready to read() for the stream
1958 	 */
1959 	void (*poll_wait)(struct i915_perf_stream *stream,
1960 			  struct file *file,
1961 			  poll_table *wait);
1962 
1963 	/**
1964 	 * @wait_unlocked: For handling a blocking read, wait until there is
1965 	 * something to ready to read() for the stream. E.g. wait on the same
1966 	 * wait queue that would be passed to poll_wait().
1967 	 */
1968 	int (*wait_unlocked)(struct i915_perf_stream *stream);
1969 
1970 	/**
1971 	 * @read: Copy buffered metrics as records to userspace
1972 	 * **buf**: the userspace, destination buffer
1973 	 * **count**: the number of bytes to copy, requested by userspace
1974 	 * **offset**: zero at the start of the read, updated as the read
1975 	 * proceeds, it represents how many bytes have been copied so far and
1976 	 * the buffer offset for copying the next record.
1977 	 *
1978 	 * Copy as many buffered i915 perf samples and records for this stream
1979 	 * to userspace as will fit in the given buffer.
1980 	 *
1981 	 * Only write complete records; returning -%ENOSPC if there isn't room
1982 	 * for a complete record.
1983 	 *
1984 	 * Return any error condition that results in a short read such as
1985 	 * -%ENOSPC or -%EFAULT, even though these may be squashed before
1986 	 * returning to userspace.
1987 	 */
1988 	int (*read)(struct i915_perf_stream *stream,
1989 		    char __user *buf,
1990 		    size_t count,
1991 		    size_t *offset);
1992 
1993 	/**
1994 	 * @destroy: Cleanup any stream specific resources.
1995 	 *
1996 	 * The stream will always be disabled before this is called.
1997 	 */
1998 	void (*destroy)(struct i915_perf_stream *stream);
1999 };
2000 
2001 /**
2002  * struct i915_perf_stream - state for a single open stream FD
2003  */
2004 struct i915_perf_stream {
2005 	/**
2006 	 * @dev_priv: i915 drm device
2007 	 */
2008 	struct drm_i915_private *dev_priv;
2009 
2010 	/**
2011 	 * @link: Links the stream into ``&drm_i915_private->streams``
2012 	 */
2013 	struct list_head link;
2014 
2015 	/**
2016 	 * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
2017 	 * properties given when opening a stream, representing the contents
2018 	 * of a single sample as read() by userspace.
2019 	 */
2020 	u32 sample_flags;
2021 
2022 	/**
2023 	 * @sample_size: Considering the configured contents of a sample
2024 	 * combined with the required header size, this is the total size
2025 	 * of a single sample record.
2026 	 */
2027 	int sample_size;
2028 
2029 	/**
2030 	 * @ctx: %NULL if measuring system-wide across all contexts or a
2031 	 * specific context that is being monitored.
2032 	 */
2033 	struct i915_gem_context *ctx;
2034 
2035 	/**
2036 	 * @enabled: Whether the stream is currently enabled, considering
2037 	 * whether the stream was opened in a disabled state and based
2038 	 * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls.
2039 	 */
2040 	bool enabled;
2041 
2042 	/**
2043 	 * @ops: The callbacks providing the implementation of this specific
2044 	 * type of configured stream.
2045 	 */
2046 	const struct i915_perf_stream_ops *ops;
2047 };
2048 
2049 /**
2050  * struct i915_oa_ops - Gen specific implementation of an OA unit stream
2051  */
2052 struct i915_oa_ops {
2053 	/**
2054 	 * @init_oa_buffer: Resets the head and tail pointers of the
2055 	 * circular buffer for periodic OA reports.
2056 	 *
2057 	 * Called when first opening a stream for OA metrics, but also may be
2058 	 * called in response to an OA buffer overflow or other error
2059 	 * condition.
2060 	 *
2061 	 * Note it may be necessary to clear the full OA buffer here as part of
2062 	 * maintaining the invariable that new reports must be written to
2063 	 * zeroed memory for us to be able to reliable detect if an expected
2064 	 * report has not yet landed in memory.  (At least on Haswell the OA
2065 	 * buffer tail pointer is not synchronized with reports being visible
2066 	 * to the CPU)
2067 	 */
2068 	void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
2069 
2070 	/**
2071 	 * @enable_metric_set: Applies any MUX configuration to set up the
2072 	 * Boolean and Custom (B/C) counters that are part of the counter
2073 	 * reports being sampled. May apply system constraints such as
2074 	 * disabling EU clock gating as required.
2075 	 */
2076 	int (*enable_metric_set)(struct drm_i915_private *dev_priv);
2077 
2078 	/**
2079 	 * @disable_metric_set: Remove system constraints associated with using
2080 	 * the OA unit.
2081 	 */
2082 	void (*disable_metric_set)(struct drm_i915_private *dev_priv);
2083 
2084 	/**
2085 	 * @oa_enable: Enable periodic sampling
2086 	 */
2087 	void (*oa_enable)(struct drm_i915_private *dev_priv);
2088 
2089 	/**
2090 	 * @oa_disable: Disable periodic sampling
2091 	 */
2092 	void (*oa_disable)(struct drm_i915_private *dev_priv);
2093 
2094 	/**
2095 	 * @read: Copy data from the circular OA buffer into a given userspace
2096 	 * buffer.
2097 	 */
2098 	int (*read)(struct i915_perf_stream *stream,
2099 		    char __user *buf,
2100 		    size_t count,
2101 		    size_t *offset);
2102 
2103 	/**
2104 	 * @oa_buffer_is_empty: Check if OA buffer empty (false positives OK)
2105 	 *
2106 	 * This is either called via fops or the poll check hrtimer (atomic
2107 	 * ctx) without any locks taken.
2108 	 *
2109 	 * It's safe to read OA config state here unlocked, assuming that this
2110 	 * is only called while the stream is enabled, while the global OA
2111 	 * configuration can't be modified.
2112 	 *
2113 	 * Efficiency is more important than avoiding some false positives
2114 	 * here, which will be handled gracefully - likely resulting in an
2115 	 * %EAGAIN error for userspace.
2116 	 */
2117 	bool (*oa_buffer_is_empty)(struct drm_i915_private *dev_priv);
2118 };
2119 
2120 struct intel_cdclk_state {
2121 	unsigned int cdclk, vco, ref;
2122 };
2123 
2124 struct drm_i915_private {
2125 	struct drm_device drm;
2126 
2127 	struct kmem_cache *objects;
2128 	struct kmem_cache *vmas;
2129 	struct kmem_cache *requests;
2130 	struct kmem_cache *dependencies;
2131 
2132 	const struct intel_device_info info;
2133 
2134 	void __iomem *regs;
2135 
2136 	struct intel_uncore uncore;
2137 
2138 	struct i915_virtual_gpu vgpu;
2139 
2140 	struct intel_gvt *gvt;
2141 
2142 	struct intel_huc huc;
2143 	struct intel_guc guc;
2144 
2145 	struct intel_csr csr;
2146 
2147 	struct intel_gmbus gmbus[GMBUS_NUM_PINS];
2148 
2149 	/** gmbus_mutex protects against concurrent usage of the single hw gmbus
2150 	 * controller on different i2c buses. */
2151 	struct lock gmbus_mutex;
2152 
2153 	/**
2154 	 * Base address of the gmbus and gpio block.
2155 	 */
2156 	uint32_t gpio_mmio_base;
2157 
2158 	/* MMIO base address for MIPI regs */
2159 	uint32_t mipi_mmio_base;
2160 
2161 	uint32_t psr_mmio_base;
2162 
2163 	uint32_t pps_mmio_base;
2164 
2165 	wait_queue_head_t gmbus_wait_queue;
2166 
2167 	struct pci_dev *bridge_dev;
2168 	struct i915_gem_context *kernel_context;
2169 	struct intel_engine_cs *engine[I915_NUM_ENGINES];
2170 	struct i915_vma *semaphore;
2171 
2172 	struct drm_dma_handle *status_page_dmah;
2173 	struct resource *mch_res;
2174 	int mch_res_rid;
2175 
2176 	/* protects the irq masks */
2177 	spinlock_t irq_lock;
2178 
2179 	/* protects the mmio flip data */
2180 	spinlock_t mmio_flip_lock;
2181 
2182 	bool display_irqs_enabled;
2183 
2184 	/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
2185 	struct pm_qos_request pm_qos;
2186 
2187 	/* Sideband mailbox protection */
2188 	struct lock sb_lock;
2189 
2190 	/** Cached value of IMR to avoid reads in updating the bitfield */
2191 	union {
2192 		u32 irq_mask;
2193 		u32 de_irq_mask[I915_MAX_PIPES];
2194 	};
2195 	u32 gt_irq_mask;
2196 	u32 pm_imr;
2197 	u32 pm_ier;
2198 	u32 pm_rps_events;
2199 	u32 pm_guc_events;
2200 	u32 pipestat_irq_mask[I915_MAX_PIPES];
2201 
2202 	struct i915_hotplug hotplug;
2203 	struct intel_fbc fbc;
2204 	struct i915_drrs drrs;
2205 	struct intel_opregion opregion;
2206 	struct intel_vbt_data vbt;
2207 
2208 	bool preserve_bios_swizzle;
2209 
2210 	/* overlay */
2211 	struct intel_overlay *overlay;
2212 
2213 	/* backlight registers and fields in struct intel_panel */
2214 	struct lock backlight_lock;
2215 
2216 	/* LVDS info */
2217 	bool no_aux_handshake;
2218 
2219 	/* protects panel power sequencer state */
2220 	struct lock pps_mutex;
2221 
2222 	struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
2223 	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
2224 
2225 	unsigned int fsb_freq, mem_freq, is_ddr3;
2226 	unsigned int skl_preferred_vco_freq;
2227 	unsigned int max_cdclk_freq;
2228 
2229 	unsigned int max_dotclk_freq;
2230 	unsigned int rawclk_freq;
2231 	unsigned int hpll_freq;
2232 	unsigned int czclk_freq;
2233 
2234 	struct {
2235 		/*
2236 		 * The current logical cdclk state.
2237 		 * See intel_atomic_state.cdclk.logical
2238 		 *
2239 		 * For reading holding any crtc lock is sufficient,
2240 		 * for writing must hold all of them.
2241 		 */
2242 		struct intel_cdclk_state logical;
2243 		/*
2244 		 * The current actual cdclk state.
2245 		 * See intel_atomic_state.cdclk.actual
2246 		 */
2247 		struct intel_cdclk_state actual;
2248 		/* The current hardware cdclk state */
2249 		struct intel_cdclk_state hw;
2250 	} cdclk;
2251 
2252 	/**
2253 	 * wq - Driver workqueue for GEM.
2254 	 *
2255 	 * NOTE: Work items scheduled here are not allowed to grab any modeset
2256 	 * locks, for otherwise the flushing done in the pageflip code will
2257 	 * result in deadlocks.
2258 	 */
2259 	struct workqueue_struct *wq;
2260 
2261 	/* Display functions */
2262 	struct drm_i915_display_funcs display;
2263 
2264 	/* PCH chipset type */
2265 	enum intel_pch pch_type;
2266 	unsigned short pch_id;
2267 
2268 	unsigned long quirks;
2269 
2270 	enum modeset_restore modeset_restore;
2271 	struct lock modeset_restore_lock;
2272 	struct drm_atomic_state *modeset_restore_state;
2273 	struct drm_modeset_acquire_ctx reset_ctx;
2274 
2275 	struct list_head vm_list; /* Global list of all address spaces */
2276 	struct i915_ggtt ggtt; /* VM representing the global address space */
2277 
2278 	struct i915_gem_mm mm;
2279 	DECLARE_HASHTABLE(mm_structs, 7);
2280 	struct lock mm_lock;
2281 
2282 	/* The hw wants to have a stable context identifier for the lifetime
2283 	 * of the context (for OA, PASID, faults, etc). This is limited
2284 	 * in execlists to 21 bits.
2285 	 */
2286 	struct ida context_hw_ida;
2287 #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
2288 
2289 	/* Kernel Modesetting */
2290 
2291 	struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
2292 	struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
2293 	wait_queue_head_t pending_flip_queue;
2294 
2295 #ifdef CONFIG_DEBUG_FS
2296 	struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
2297 #endif
2298 
2299 	/* dpll and cdclk state is protected by connection_mutex */
2300 	int num_shared_dpll;
2301 	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
2302 	const struct intel_dpll_mgr *dpll_mgr;
2303 
2304 	/*
2305 	 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
2306 	 * Must be global rather than per dpll, because on some platforms
2307 	 * plls share registers.
2308 	 */
2309 	struct lock dpll_lock;
2310 
2311 	unsigned int active_crtcs;
2312 	unsigned int min_pixclk[I915_MAX_PIPES];
2313 
2314 	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
2315 
2316 	struct i915_workarounds workarounds;
2317 
2318 	struct i915_frontbuffer_tracking fb_tracking;
2319 
2320 	struct intel_atomic_helper {
2321 		struct llist_head free_list;
2322 		struct work_struct free_work;
2323 	} atomic_helper;
2324 
2325 	u16 orig_clock;
2326 
2327 	bool mchbar_need_disable;
2328 
2329 	struct intel_l3_parity l3_parity;
2330 
2331 	/* Cannot be determined by PCIID. You must always read a register. */
2332 	u32 edram_cap;
2333 
2334 	/* gen6+ rps state */
2335 	struct intel_gen6_power_mgmt rps;
2336 
2337 	/* ilk-only ips/rps state. Everything in here is protected by the global
2338 	 * mchdev_lock in intel_pm.c */
2339 	struct intel_ilk_power_mgmt ips;
2340 
2341 	struct i915_power_domains power_domains;
2342 
2343 	struct i915_psr psr;
2344 
2345 	struct i915_gpu_error gpu_error;
2346 
2347 	struct drm_i915_gem_object *vlv_pctx;
2348 
2349 #ifdef CONFIG_DRM_FBDEV_EMULATION
2350 	/* list of fbdev register on this device */
2351 	struct intel_fbdev *fbdev;
2352 	struct work_struct fbdev_suspend_work;
2353 #endif
2354 
2355 	struct drm_property *broadcast_rgb_property;
2356 	struct drm_property *force_audio_property;
2357 
2358 	/* hda/i915 audio component */
2359 	struct i915_audio_component *audio_component;
2360 	bool audio_component_registered;
2361 	/**
2362 	 * av_mutex - mutex for audio/video sync
2363 	 *
2364 	 */
2365 	struct lock av_mutex;
2366 
2367 	uint32_t hw_context_size;
2368 	struct list_head context_list;
2369 
2370 	u32 fdi_rx_config;
2371 
2372 	/* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
2373 	u32 chv_phy_control;
2374 	/*
2375 	 * Shadows for CHV DPLL_MD regs to keep the state
2376 	 * checker somewhat working in the presence hardware
2377 	 * crappiness (can't read out DPLL_MD for pipes B & C).
2378 	 */
2379 	u32 chv_dpll_md[I915_MAX_PIPES];
2380 	u32 bxt_phy_grc;
2381 
2382 	u32 suspend_count;
2383 	bool suspended_to_idle;
2384 	struct i915_suspend_saved_registers regfile;
2385 	struct vlv_s0ix_state vlv_s0ix_state;
2386 
2387 	enum {
2388 		I915_SAGV_UNKNOWN = 0,
2389 		I915_SAGV_DISABLED,
2390 		I915_SAGV_ENABLED,
2391 		I915_SAGV_NOT_CONTROLLED
2392 	} sagv_status;
2393 
2394 	struct {
2395 		/*
2396 		 * Raw watermark latency values:
2397 		 * in 0.1us units for WM0,
2398 		 * in 0.5us units for WM1+.
2399 		 */
2400 		/* primary */
2401 		uint16_t pri_latency[5];
2402 		/* sprite */
2403 		uint16_t spr_latency[5];
2404 		/* cursor */
2405 		uint16_t cur_latency[5];
2406 		/*
2407 		 * Raw watermark memory latency values
2408 		 * for SKL for all 8 levels
2409 		 * in 1us units.
2410 		 */
2411 		uint16_t skl_latency[8];
2412 
2413 		/* current hardware state */
2414 		union {
2415 			struct ilk_wm_values hw;
2416 			struct skl_wm_values skl_hw;
2417 			struct vlv_wm_values vlv;
2418 		};
2419 
2420 		uint8_t max_level;
2421 
2422 		/*
2423 		 * Should be held around atomic WM register writing; also
2424 		 * protects * intel_crtc->wm.active and
2425 		 * cstate->wm.need_postvbl_update.
2426 		 */
2427 		struct lock wm_mutex;
2428 
2429 		/*
2430 		 * Set during HW readout of watermarks/DDB.  Some platforms
2431 		 * need to know when we're still using BIOS-provided values
2432 		 * (which we don't fully trust).
2433 		 */
2434 		bool distrust_bios_wm;
2435 	} wm;
2436 
2437 	struct i915_runtime_pm pm;
2438 
2439 	struct {
2440 		bool initialized;
2441 
2442 		struct kobject *metrics_kobj;
2443 		struct ctl_table_header *sysctl_header;
2444 
2445 		struct lock lock;
2446 		struct list_head streams;
2447 
2448 		spinlock_t hook_lock;
2449 
2450 		struct {
2451 			struct i915_perf_stream *exclusive_stream;
2452 
2453 			u32 specific_ctx_id;
2454 
2455 			struct hrtimer poll_check_timer;
2456 			wait_queue_head_t poll_wq;
2457 			bool pollin;
2458 
2459 			bool periodic;
2460 			int period_exponent;
2461 			int timestamp_frequency;
2462 
2463 			int tail_margin;
2464 
2465 			int metrics_set;
2466 
2467 			const struct i915_oa_reg *mux_regs;
2468 			int mux_regs_len;
2469 			const struct i915_oa_reg *b_counter_regs;
2470 			int b_counter_regs_len;
2471 
2472 			struct {
2473 				struct i915_vma *vma;
2474 				u8 *vaddr;
2475 				int format;
2476 				int format_size;
2477 			} oa_buffer;
2478 
2479 			u32 gen7_latched_oastatus1;
2480 
2481 			struct i915_oa_ops ops;
2482 			const struct i915_oa_format *oa_formats;
2483 			int n_builtin_sets;
2484 		} oa;
2485 	} perf;
2486 
2487 	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
2488 	struct {
2489 		void (*resume)(struct drm_i915_private *);
2490 		void (*cleanup_engine)(struct intel_engine_cs *engine);
2491 
2492 		struct list_head timelines;
2493 		struct i915_gem_timeline global_timeline;
2494 		u32 active_requests;
2495 
2496 		/**
2497 		 * Is the GPU currently considered idle, or busy executing
2498 		 * userspace requests? Whilst idle, we allow runtime power
2499 		 * management to power down the hardware and display clocks.
2500 		 * In order to reduce the effect on performance, there
2501 		 * is a slight delay before we do so.
2502 		 */
2503 		bool awake;
2504 
2505 		/**
2506 		 * We leave the user IRQ off as much as possible,
2507 		 * but this means that requests will finish and never
2508 		 * be retired once the system goes idle. Set a timer to
2509 		 * fire periodically while the ring is running. When it
2510 		 * fires, go retire requests.
2511 		 */
2512 		struct delayed_work retire_work;
2513 
2514 		/**
2515 		 * When we detect an idle GPU, we want to turn on
2516 		 * powersaving features. So once we see that there
2517 		 * are no more requests outstanding and no more
2518 		 * arrive within a small period of time, we fire
2519 		 * off the idle_work.
2520 		 */
2521 		struct delayed_work idle_work;
2522 
2523 		ktime_t last_init_time;
2524 	} gt;
2525 
2526 	/* perform PHY state sanity checks? */
2527 	bool chv_phy_assert[2];
2528 
2529 	bool ipc_enabled;
2530 
2531 	/* Used to save the pipe-to-encoder mapping for audio */
2532 	struct intel_encoder *av_enc_map[I915_MAX_PIPES];
2533 
2534 	/* necessary resource sharing with HDMI LPE audio driver. */
2535 	struct {
2536 		struct platform_device *platdev;
2537 		int	irq;
2538 	} lpe_audio;
2539 
2540 	/*
2541 	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
2542 	 * will be rejected. Instead look for a better place.
2543 	 */
2544 };
2545 
2546 static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
2547 {
2548 	return container_of(dev, struct drm_i915_private, drm);
2549 }
2550 
2551 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
2552 {
2553 	return to_i915(dev_get_drvdata(kdev));
2554 }
2555 
2556 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
2557 {
2558 	return container_of(guc, struct drm_i915_private, guc);
2559 }
2560 
2561 static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
2562 {
2563 	return container_of(huc, struct drm_i915_private, huc);
2564 }
2565 
2566 /* Simple iterator over all initialised engines */
2567 #define for_each_engine(engine__, dev_priv__, id__) \
2568 	for ((id__) = 0; \
2569 	     (id__) < I915_NUM_ENGINES; \
2570 	     (id__)++) \
2571 		for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
2572 
2573 /* Iterator over subset of engines selected by mask */
2574 #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
2575 	for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask;	\
2576 	     tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
2577 
2578 enum hdmi_force_audio {
2579 	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */
2580 	HDMI_AUDIO_OFF,			/* force turn off HDMI audio */
2581 	HDMI_AUDIO_AUTO,		/* trust EDID */
2582 	HDMI_AUDIO_ON,			/* force turn on HDMI audio */
2583 };
2584 
2585 #define I915_GTT_OFFSET_NONE ((u32)-1)
2586 
2587 /*
2588  * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
2589  * considered to be the frontbuffer for the given plane interface-wise. This
2590  * doesn't mean that the hw necessarily already scans it out, but that any
2591  * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
2592  *
2593  * We have one bit per pipe and per scanout plane type.
2594  */
2595 #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5
2596 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
2597 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \
2598 	(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
2599 #define INTEL_FRONTBUFFER_CURSOR(pipe) \
2600 	(1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
2601 #define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \
2602 	(1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
2603 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \
2604 	(1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
2605 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
2606 	(0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
2607 
2608 /*
2609  * Optimised SGL iterator for GEM objects
2610  */
2611 static __always_inline struct sgt_iter {
2612 	struct scatterlist *sgp;
2613 	union {
2614 		unsigned long pfn;
2615 		dma_addr_t dma;
2616 	};
2617 	unsigned int curr;
2618 	unsigned int max;
2619 } __sgt_iter(struct scatterlist *sgl, bool dma) {
2620 	struct sgt_iter s = { .sgp = sgl };
2621 
2622 	if (s.sgp) {
2623 		s.max = s.curr = s.sgp->offset;
2624 		s.max += s.sgp->length;
2625 		if (dma)
2626 			s.dma = sg_dma_address(s.sgp);
2627 		else
2628 			s.pfn = page_to_pfn(sg_page(s.sgp));
2629 	}
2630 
2631 	return s;
2632 }
2633 
2634 static inline struct scatterlist *____sg_next(struct scatterlist *sg)
2635 {
2636 	++sg;
2637 	if (unlikely(sg_is_chain(sg)))
2638 		sg = sg_chain_ptr(sg);
2639 	return sg;
2640 }
2641 
2642 /**
2643  * __sg_next - return the next scatterlist entry in a list
2644  * @sg:		The current sg entry
2645  *
2646  * Description:
2647  *   If the entry is the last, return NULL; otherwise, step to the next
2648  *   element in the array (@sg@+1). If that's a chain pointer, follow it;
2649  *   otherwise just return the pointer to the current element.
2650  **/
2651 static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2652 {
2653 #ifdef CONFIG_DEBUG_SG
2654 	BUG_ON(sg->sg_magic != SG_MAGIC);
2655 #endif
2656 	return sg_is_last(sg) ? NULL : ____sg_next(sg);
2657 }
2658 
2659 /**
2660  * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
2661  * @__dmap:	DMA address (output)
2662  * @__iter:	'struct sgt_iter' (iterator state, internal)
2663  * @__sgt:	sg_table to iterate over (input)
2664  */
2665 #define for_each_sgt_dma(__dmap, __iter, __sgt)				\
2666 	for ((__iter) = __sgt_iter((__sgt)->sgl, true);			\
2667 	     ((__dmap) = (__iter).dma + (__iter).curr);			\
2668 	     (((__iter).curr += PAGE_SIZE) < (__iter).max) ||		\
2669 	     ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0))
2670 
2671 /**
2672  * for_each_sgt_page - iterate over the pages of the given sg_table
2673  * @__pp:	page pointer (output)
2674  * @__iter:	'struct sgt_iter' (iterator state, internal)
2675  * @__sgt:	sg_table to iterate over (input)
2676  */
2677 #define for_each_sgt_page(__pp, __iter, __sgt)				\
2678 	for ((__iter) = __sgt_iter((__sgt)->sgl, false);		\
2679 	     ((__pp) = (__iter).pfn == 0 ? NULL :			\
2680 	      pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
2681 	     (((__iter).curr += PAGE_SIZE) < (__iter).max) ||		\
2682 	     ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
2683 
2684 static inline const struct intel_device_info *
2685 intel_info(const struct drm_i915_private *dev_priv)
2686 {
2687 	return &dev_priv->info;
2688 }
2689 
2690 #define INTEL_INFO(dev_priv)	intel_info((dev_priv))
2691 
2692 #define INTEL_GEN(dev_priv)	((dev_priv)->info.gen)
2693 #define INTEL_DEVID(dev_priv)	((dev_priv)->info.device_id)
2694 
2695 #define REVID_FOREVER		0xff
2696 #define INTEL_REVID(dev_priv)	((dev_priv)->drm.pdev->revision)
2697 
2698 #define GEN_FOREVER (0)
2699 /*
2700  * Returns true if Gen is in inclusive range [Start, End].
2701  *
2702  * Use GEN_FOREVER for unbound start and or end.
2703  */
2704 #define IS_GEN(dev_priv, s, e) ({ \
2705 	unsigned int __s = (s), __e = (e); \
2706 	BUILD_BUG_ON(!__builtin_constant_p(s)); \
2707 	BUILD_BUG_ON(!__builtin_constant_p(e)); \
2708 	if ((__s) != GEN_FOREVER) \
2709 		__s = (s) - 1; \
2710 	if ((__e) == GEN_FOREVER) \
2711 		__e = BITS_PER_LONG - 1; \
2712 	else \
2713 		__e = (e) - 1; \
2714 	!!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \
2715 })
2716 
2717 /*
2718  * Return true if revision is in range [since,until] inclusive.
2719  *
2720  * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
2721  */
2722 #define IS_REVID(p, since, until) \
2723 	(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
2724 
2725 #define IS_I830(dev_priv)	((dev_priv)->info.platform == INTEL_I830)
2726 #define IS_I845G(dev_priv)	((dev_priv)->info.platform == INTEL_I845G)
2727 #define IS_I85X(dev_priv)	((dev_priv)->info.platform == INTEL_I85X)
2728 #define IS_I865G(dev_priv)	((dev_priv)->info.platform == INTEL_I865G)
2729 #define IS_I915G(dev_priv)	((dev_priv)->info.platform == INTEL_I915G)
2730 #define IS_I915GM(dev_priv)	((dev_priv)->info.platform == INTEL_I915GM)
2731 #define IS_I945G(dev_priv)	((dev_priv)->info.platform == INTEL_I945G)
2732 #define IS_I945GM(dev_priv)	((dev_priv)->info.platform == INTEL_I945GM)
2733 #define IS_I965G(dev_priv)	((dev_priv)->info.platform == INTEL_I965G)
2734 #define IS_I965GM(dev_priv)	((dev_priv)->info.platform == INTEL_I965GM)
2735 #define IS_G45(dev_priv)	((dev_priv)->info.platform == INTEL_G45)
2736 #define IS_GM45(dev_priv)	((dev_priv)->info.platform == INTEL_GM45)
2737 #define IS_G4X(dev_priv)	(IS_G45(dev_priv) || IS_GM45(dev_priv))
2738 #define IS_PINEVIEW_G(dev_priv)	(INTEL_DEVID(dev_priv) == 0xa001)
2739 #define IS_PINEVIEW_M(dev_priv)	(INTEL_DEVID(dev_priv) == 0xa011)
2740 #define IS_PINEVIEW(dev_priv)	((dev_priv)->info.platform == INTEL_PINEVIEW)
2741 #define IS_G33(dev_priv)	((dev_priv)->info.platform == INTEL_G33)
2742 #define IS_IRONLAKE_M(dev_priv)	(INTEL_DEVID(dev_priv) == 0x0046)
2743 #define IS_IVYBRIDGE(dev_priv)	((dev_priv)->info.platform == INTEL_IVYBRIDGE)
2744 #define IS_IVB_GT1(dev_priv)	(INTEL_DEVID(dev_priv) == 0x0156 || \
2745 				 INTEL_DEVID(dev_priv) == 0x0152 || \
2746 				 INTEL_DEVID(dev_priv) == 0x015a)
2747 #define IS_VALLEYVIEW(dev_priv)	((dev_priv)->info.platform == INTEL_VALLEYVIEW)
2748 #define IS_CHERRYVIEW(dev_priv)	((dev_priv)->info.platform == INTEL_CHERRYVIEW)
2749 #define IS_HASWELL(dev_priv)	((dev_priv)->info.platform == INTEL_HASWELL)
2750 #define IS_BROADWELL(dev_priv)	((dev_priv)->info.platform == INTEL_BROADWELL)
2751 #define IS_SKYLAKE(dev_priv)	((dev_priv)->info.platform == INTEL_SKYLAKE)
2752 #define IS_BROXTON(dev_priv)	((dev_priv)->info.platform == INTEL_BROXTON)
2753 #define IS_KABYLAKE(dev_priv)	((dev_priv)->info.platform == INTEL_KABYLAKE)
2754 #define IS_GEMINILAKE(dev_priv)	((dev_priv)->info.platform == INTEL_GEMINILAKE)
2755 #define IS_MOBILE(dev_priv)	((dev_priv)->info.is_mobile)
2756 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
2757 				    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
2758 #define IS_BDW_ULT(dev_priv)	(IS_BROADWELL(dev_priv) && \
2759 				 ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 ||	\
2760 				 (INTEL_DEVID(dev_priv) & 0xf) == 0xb ||	\
2761 				 (INTEL_DEVID(dev_priv) & 0xf) == 0xe))
2762 /* ULX machines are also considered ULT. */
2763 #define IS_BDW_ULX(dev_priv)	(IS_BROADWELL(dev_priv) && \
2764 				 (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
2765 #define IS_BDW_GT3(dev_priv)	(IS_BROADWELL(dev_priv) && \
2766 				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
2767 #define IS_HSW_ULT(dev_priv)	(IS_HASWELL(dev_priv) && \
2768 				 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
2769 #define IS_HSW_GT3(dev_priv)	(IS_HASWELL(dev_priv) && \
2770 				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
2771 /* ULX machines are also considered ULT. */
2772 #define IS_HSW_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x0A0E || \
2773 				 INTEL_DEVID(dev_priv) == 0x0A1E)
2774 #define IS_SKL_ULT(dev_priv)	(INTEL_DEVID(dev_priv) == 0x1906 || \
2775 				 INTEL_DEVID(dev_priv) == 0x1913 || \
2776 				 INTEL_DEVID(dev_priv) == 0x1916 || \
2777 				 INTEL_DEVID(dev_priv) == 0x1921 || \
2778 				 INTEL_DEVID(dev_priv) == 0x1926)
2779 #define IS_SKL_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x190E || \
2780 				 INTEL_DEVID(dev_priv) == 0x1915 || \
2781 				 INTEL_DEVID(dev_priv) == 0x191E)
2782 #define IS_KBL_ULT(dev_priv)	(INTEL_DEVID(dev_priv) == 0x5906 || \
2783 				 INTEL_DEVID(dev_priv) == 0x5913 || \
2784 				 INTEL_DEVID(dev_priv) == 0x5916 || \
2785 				 INTEL_DEVID(dev_priv) == 0x5921 || \
2786 				 INTEL_DEVID(dev_priv) == 0x5926)
2787 #define IS_KBL_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x590E || \
2788 				 INTEL_DEVID(dev_priv) == 0x5915 || \
2789 				 INTEL_DEVID(dev_priv) == 0x591E)
2790 #define IS_SKL_GT3(dev_priv)	(IS_SKYLAKE(dev_priv) && \
2791 				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
2792 #define IS_SKL_GT4(dev_priv)	(IS_SKYLAKE(dev_priv) && \
2793 				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
2794 
2795 #define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
2796 
2797 #define SKL_REVID_A0		0x0
2798 #define SKL_REVID_B0		0x1
2799 #define SKL_REVID_C0		0x2
2800 #define SKL_REVID_D0		0x3
2801 #define SKL_REVID_E0		0x4
2802 #define SKL_REVID_F0		0x5
2803 #define SKL_REVID_G0		0x6
2804 #define SKL_REVID_H0		0x7
2805 
2806 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
2807 
2808 #define BXT_REVID_A0		0x0
2809 #define BXT_REVID_A1		0x1
2810 #define BXT_REVID_B0		0x3
2811 #define BXT_REVID_B_LAST	0x8
2812 #define BXT_REVID_C0		0x9
2813 
2814 #define IS_BXT_REVID(dev_priv, since, until) \
2815 	(IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
2816 
2817 #define KBL_REVID_A0		0x0
2818 #define KBL_REVID_B0		0x1
2819 #define KBL_REVID_C0		0x2
2820 #define KBL_REVID_D0		0x3
2821 #define KBL_REVID_E0		0x4
2822 
2823 #define IS_KBL_REVID(dev_priv, since, until) \
2824 	(IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
2825 
2826 #define GLK_REVID_A0		0x0
2827 #define GLK_REVID_A1		0x1
2828 
2829 #define IS_GLK_REVID(dev_priv, since, until) \
2830 	(IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
2831 
2832 /*
2833  * The genX designation typically refers to the render engine, so render
2834  * capability related checks should use IS_GEN, while display and other checks
2835  * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
2836  * chips, etc.).
2837  */
2838 #define IS_GEN2(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(1)))
2839 #define IS_GEN3(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(2)))
2840 #define IS_GEN4(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(3)))
2841 #define IS_GEN5(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(4)))
2842 #define IS_GEN6(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(5)))
2843 #define IS_GEN7(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(6)))
2844 #define IS_GEN8(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(7)))
2845 #define IS_GEN9(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(8)))
2846 
2847 #define IS_LP(dev_priv)	(INTEL_INFO(dev_priv)->is_lp)
2848 #define IS_GEN9_LP(dev_priv)	(IS_GEN9(dev_priv) && IS_LP(dev_priv))
2849 #define IS_GEN9_BC(dev_priv)	(IS_GEN9(dev_priv) && !IS_LP(dev_priv))
2850 
2851 #define ENGINE_MASK(id)	BIT(id)
2852 #define RENDER_RING	ENGINE_MASK(RCS)
2853 #define BSD_RING	ENGINE_MASK(VCS)
2854 #define BLT_RING	ENGINE_MASK(BCS)
2855 #define VEBOX_RING	ENGINE_MASK(VECS)
2856 #define BSD2_RING	ENGINE_MASK(VCS2)
2857 #define ALL_ENGINES	(~0)
2858 
2859 #define HAS_ENGINE(dev_priv, id) \
2860 	(!!((dev_priv)->info.ring_mask & ENGINE_MASK(id)))
2861 
2862 #define HAS_BSD(dev_priv)	HAS_ENGINE(dev_priv, VCS)
2863 #define HAS_BSD2(dev_priv)	HAS_ENGINE(dev_priv, VCS2)
2864 #define HAS_BLT(dev_priv)	HAS_ENGINE(dev_priv, BCS)
2865 #define HAS_VEBOX(dev_priv)	HAS_ENGINE(dev_priv, VECS)
2866 
2867 #define HAS_LLC(dev_priv)	((dev_priv)->info.has_llc)
2868 #define HAS_SNOOP(dev_priv)	((dev_priv)->info.has_snoop)
2869 #define HAS_EDRAM(dev_priv)	(!!((dev_priv)->edram_cap & EDRAM_ENABLED))
2870 #define HAS_WT(dev_priv)	((IS_HASWELL(dev_priv) || \
2871 				 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
2872 
2873 #define HWS_NEEDS_PHYSICAL(dev_priv)	((dev_priv)->info.hws_needs_physical)
2874 
2875 #define HAS_HW_CONTEXTS(dev_priv)	    ((dev_priv)->info.has_hw_contexts)
2876 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
2877 		((dev_priv)->info.has_logical_ring_contexts)
2878 #define USES_PPGTT(dev_priv)		(i915.enable_ppgtt)
2879 #define USES_FULL_PPGTT(dev_priv)	(i915.enable_ppgtt >= 2)
2880 #define USES_FULL_48BIT_PPGTT(dev_priv)	(i915.enable_ppgtt == 3)
2881 
2882 #define HAS_OVERLAY(dev_priv)		 ((dev_priv)->info.has_overlay)
2883 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
2884 		((dev_priv)->info.overlay_needs_physical)
2885 
2886 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
2887 #define HAS_BROKEN_CS_TLB(dev_priv)	(IS_I830(dev_priv) || IS_I845G(dev_priv))
2888 
2889 /* WaRsDisableCoarsePowerGating:skl,bxt */
2890 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
2891 	(IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
2892 
2893 /*
2894  * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
2895  * even when in MSI mode. This results in spurious interrupt warnings if the
2896  * legacy irq no. is shared with another device. The kernel then disables that
2897  * interrupt source and so prevents the other device from working properly.
2898  */
2899 #define HAS_AUX_IRQ(dev_priv)   ((dev_priv)->info.gen >= 5)
2900 #define HAS_GMBUS_IRQ(dev_priv) ((dev_priv)->info.has_gmbus_irq)
2901 
2902 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
2903  * rows, which changed the alignment requirements and fence programming.
2904  */
2905 #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
2906 					 !(IS_I915G(dev_priv) || \
2907 					 IS_I915GM(dev_priv)))
2908 #define SUPPORTS_TV(dev_priv)		((dev_priv)->info.supports_tv)
2909 #define I915_HAS_HOTPLUG(dev_priv)	((dev_priv)->info.has_hotplug)
2910 
2911 #define HAS_FW_BLC(dev_priv) 	(INTEL_GEN(dev_priv) > 2)
2912 #define HAS_PIPE_CXSR(dev_priv) ((dev_priv)->info.has_pipe_cxsr)
2913 #define HAS_FBC(dev_priv)	((dev_priv)->info.has_fbc)
2914 
2915 #define HAS_IPS(dev_priv)	(IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
2916 
2917 #define HAS_DP_MST(dev_priv)	((dev_priv)->info.has_dp_mst)
2918 
2919 #define HAS_DDI(dev_priv)		 ((dev_priv)->info.has_ddi)
2920 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
2921 #define HAS_PSR(dev_priv)		 ((dev_priv)->info.has_psr)
2922 #define HAS_RC6(dev_priv)		 ((dev_priv)->info.has_rc6)
2923 #define HAS_RC6p(dev_priv)		 ((dev_priv)->info.has_rc6p)
2924 
2925 #define HAS_CSR(dev_priv)	((dev_priv)->info.has_csr)
2926 
2927 #define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
2928 #define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
2929 
2930 /*
2931  * For now, anything with a GuC requires uCode loading, and then supports
2932  * command submission once loaded. But these are logically independent
2933  * properties, so we have separate macros to test them.
2934  */
2935 #define HAS_GUC(dev_priv)	((dev_priv)->info.has_guc)
2936 #define HAS_GUC_UCODE(dev_priv)	(HAS_GUC(dev_priv))
2937 #define HAS_GUC_SCHED(dev_priv)	(HAS_GUC(dev_priv))
2938 #define HAS_HUC_UCODE(dev_priv)	(HAS_GUC(dev_priv))
2939 
2940 #define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
2941 
2942 #define HAS_POOLED_EU(dev_priv)	((dev_priv)->info.has_pooled_eu)
2943 
2944 #define INTEL_PCH_DEVICE_ID_MASK		0xff00
2945 #define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
2946 #define INTEL_PCH_CPT_DEVICE_ID_TYPE		0x1c00
2947 #define INTEL_PCH_PPT_DEVICE_ID_TYPE		0x1e00
2948 #define INTEL_PCH_LPT_DEVICE_ID_TYPE		0x8c00
2949 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE		0x9c00
2950 #define INTEL_PCH_SPT_DEVICE_ID_TYPE		0xA100
2951 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE		0x9D00
2952 #define INTEL_PCH_KBP_DEVICE_ID_TYPE		0xA200
2953 #define INTEL_PCH_P2X_DEVICE_ID_TYPE		0x7100
2954 #define INTEL_PCH_P3X_DEVICE_ID_TYPE		0x7000
2955 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE		0x2900 /* qemu q35 has 2918 */
2956 
2957 #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
2958 #define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
2959 #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
2960 #define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
2961 #define HAS_PCH_LPT_LP(dev_priv) \
2962 	((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
2963 #define HAS_PCH_LPT_H(dev_priv) \
2964 	((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
2965 #define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
2966 #define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
2967 #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
2968 #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
2969 
2970 #define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
2971 
2972 #define HAS_LSPCON(dev_priv) (IS_GEN9(dev_priv))
2973 
2974 /* DPF == dynamic parity feature */
2975 #define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
2976 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
2977 				 2 : HAS_L3_DPF(dev_priv))
2978 
2979 #define GT_FREQUENCY_MULTIPLIER 50
2980 #define GEN9_FREQ_SCALER 3
2981 
2982 #define HAS_DECOUPLED_MMIO(dev_priv) (INTEL_INFO(dev_priv)->has_decoupled_mmio)
2983 
2984 #include "i915_trace.h"
2985 
2986 static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
2987 {
2988 #ifdef CONFIG_INTEL_IOMMU
2989 	if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped)
2990 		return true;
2991 #endif
2992 	return false;
2993 }
2994 
2995 static inline bool
2996 intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
2997 {
2998 #ifdef CONFIG_INTEL_IOMMU
2999 	if (IS_BROXTON(dev_priv) && intel_iommu_gfx_mapped)
3000 		return true;
3001 #endif
3002 	return false;
3003 }
3004 
3005 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
3006 				int enable_ppgtt);
3007 
3008 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value);
3009 
3010 /* i915_drv.c */
3011 void __printf(3, 4)
3012 __i915_printk(struct drm_i915_private *dev_priv, const char *level,
3013 	      const char *fmt, ...);
3014 
3015 #define i915_report_error(dev_priv, fmt, ...)				   \
3016 	__i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
3017 
3018 #ifdef CONFIG_COMPAT
3019 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
3020 			      unsigned long arg);
3021 #else
3022 #define i915_compat_ioctl NULL
3023 #endif
3024 extern const struct dev_pm_ops i915_pm_ops;
3025 
3026 extern int i915_driver_load(struct pci_dev *pdev,
3027 			    const struct pci_device_id *ent);
3028 extern void i915_driver_unload(struct drm_device *dev);
3029 extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
3030 extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
3031 extern void i915_reset(struct drm_i915_private *dev_priv);
3032 extern int intel_guc_reset(struct drm_i915_private *dev_priv);
3033 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
3034 extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
3035 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
3036 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
3037 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
3038 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
3039 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
3040 
3041 int intel_engines_init_early(struct drm_i915_private *dev_priv);
3042 int intel_engines_init(struct drm_i915_private *dev_priv);
3043 
3044 /* intel_hotplug.c */
3045 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
3046 			   u32 pin_mask, u32 long_mask);
3047 void intel_hpd_init(struct drm_i915_private *dev_priv);
3048 void intel_hpd_init_work(struct drm_i915_private *dev_priv);
3049 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
3050 bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
3051 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
3052 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
3053 
3054 /* i915_irq.c */
3055 static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
3056 {
3057 	unsigned long delay;
3058 
3059 	if (unlikely(!i915.enable_hangcheck))
3060 		return;
3061 
3062 	/* Don't continually defer the hangcheck so that it is always run at
3063 	 * least once after work has been scheduled on any ring. Otherwise,
3064 	 * we will ignore a hung ring if a second ring is kept busy.
3065 	 */
3066 
3067 	delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
3068 	queue_delayed_work(system_long_wq,
3069 			   &dev_priv->gpu_error.hangcheck_work, delay);
3070 }
3071 
3072 __printf(3, 4)
3073 void i915_handle_error(struct drm_i915_private *dev_priv,
3074 		       u32 engine_mask,
3075 		       const char *fmt, ...);
3076 
3077 extern void intel_irq_init(struct drm_i915_private *dev_priv);
3078 int intel_irq_install(struct drm_i915_private *dev_priv);
3079 void intel_irq_uninstall(struct drm_i915_private *dev_priv);
3080 
3081 extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
3082 extern void intel_uncore_init(struct drm_i915_private *dev_priv);
3083 extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
3084 extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
3085 extern void intel_uncore_fini(struct drm_i915_private *dev_priv);
3086 extern void intel_uncore_suspend(struct drm_i915_private *dev_priv);
3087 extern void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
3088 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
3089 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
3090 				enum forcewake_domains domains);
3091 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
3092 				enum forcewake_domains domains);
3093 /* Like above but the caller must manage the uncore.lock itself.
3094  * Must be used with I915_READ_FW and friends.
3095  */
3096 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
3097 					enum forcewake_domains domains);
3098 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
3099 					enum forcewake_domains domains);
3100 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
3101 
3102 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
3103 
3104 int intel_wait_for_register(struct drm_i915_private *dev_priv,
3105 			    i915_reg_t reg,
3106 			    const u32 mask,
3107 			    const u32 value,
3108 			    const unsigned long timeout_ms);
3109 int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
3110 			       i915_reg_t reg,
3111 			       const u32 mask,
3112 			       const u32 value,
3113 			       const unsigned long timeout_ms);
3114 
3115 static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
3116 {
3117 	return dev_priv->gvt;
3118 }
3119 
3120 static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
3121 {
3122 	return dev_priv->vgpu.active;
3123 }
3124 
3125 void
3126 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
3127 		     u32 status_mask);
3128 
3129 void
3130 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
3131 		      u32 status_mask);
3132 
3133 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
3134 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
3135 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
3136 				   uint32_t mask,
3137 				   uint32_t bits);
3138 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
3139 			    uint32_t interrupt_mask,
3140 			    uint32_t enabled_irq_mask);
3141 static inline void
3142 ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
3143 {
3144 	ilk_update_display_irq(dev_priv, bits, bits);
3145 }
3146 static inline void
3147 ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
3148 {
3149 	ilk_update_display_irq(dev_priv, bits, 0);
3150 }
3151 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
3152 			 enum i915_pipe pipe,
3153 			 uint32_t interrupt_mask,
3154 			 uint32_t enabled_irq_mask);
3155 static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
3156 				       enum i915_pipe pipe, uint32_t bits)
3157 {
3158 	bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
3159 }
3160 static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
3161 					enum i915_pipe pipe, uint32_t bits)
3162 {
3163 	bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
3164 }
3165 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
3166 				  uint32_t interrupt_mask,
3167 				  uint32_t enabled_irq_mask);
3168 static inline void
3169 ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
3170 {
3171 	ibx_display_interrupt_update(dev_priv, bits, bits);
3172 }
3173 static inline void
3174 ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
3175 {
3176 	ibx_display_interrupt_update(dev_priv, bits, 0);
3177 }
3178 
3179 /* i915_gem.c */
3180 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
3181 			  struct drm_file *file_priv);
3182 int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
3183 			 struct drm_file *file_priv);
3184 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
3185 			  struct drm_file *file_priv);
3186 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
3187 			struct drm_file *file_priv);
3188 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
3189 			struct drm_file *file_priv);
3190 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
3191 			      struct drm_file *file_priv);
3192 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
3193 			     struct drm_file *file_priv);
3194 int i915_gem_execbuffer(struct drm_device *dev, void *data,
3195 			struct drm_file *file_priv);
3196 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
3197 			 struct drm_file *file_priv);
3198 int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3199 			struct drm_file *file_priv);
3200 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3201 			       struct drm_file *file);
3202 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3203 			       struct drm_file *file);
3204 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3205 			    struct drm_file *file_priv);
3206 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3207 			   struct drm_file *file_priv);
3208 int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
3209 			      struct drm_file *file_priv);
3210 int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
3211 			      struct drm_file *file_priv);
3212 void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
3213 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
3214 			   struct drm_file *file);
3215 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
3216 				struct drm_file *file_priv);
3217 int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
3218 			struct drm_file *file_priv);
3219 void i915_gem_sanitize(struct drm_i915_private *i915);
3220 int i915_gem_load_init(struct drm_i915_private *dev_priv);
3221 void i915_gem_load_cleanup(struct drm_i915_private *dev_priv);
3222 void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
3223 int i915_gem_freeze(struct drm_i915_private *dev_priv);
3224 int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
3225 
3226 void *i915_gem_object_alloc(struct drm_i915_private *dev_priv);
3227 void i915_gem_object_free(struct drm_i915_gem_object *obj);
3228 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3229 			 const struct drm_i915_gem_object_ops *ops);
3230 struct drm_i915_gem_object *
3231 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size);
3232 struct drm_i915_gem_object *
3233 i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
3234 				 const void *data, size_t size);
3235 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
3236 void i915_gem_free_object(struct drm_gem_object *obj);
3237 
3238 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
3239 {
3240 	/* A single pass should suffice to release all the freed objects (along
3241 	 * most call paths) , but be a little more paranoid in that freeing
3242 	 * the objects does take a little amount of time, during which the rcu
3243 	 * callbacks could have added new objects into the freed list, and
3244 	 * armed the work again.
3245 	 */
3246 	do {
3247 		rcu_barrier();
3248 	} while (flush_work(&i915->mm.free_work));
3249 }
3250 
3251 struct i915_vma * __must_check
3252 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3253 			 const struct i915_ggtt_view *view,
3254 			 u64 size,
3255 			 u64 alignment,
3256 			 u64 flags);
3257 
3258 int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
3259 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
3260 
3261 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
3262 
3263 static inline int __sg_page_count(const struct scatterlist *sg)
3264 {
3265 	return sg->length >> PAGE_SHIFT;
3266 }
3267 
3268 struct scatterlist *
3269 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
3270 		       unsigned int n, unsigned int *offset);
3271 
3272 struct page *
3273 i915_gem_object_get_page(struct drm_i915_gem_object *obj,
3274 			 unsigned int n);
3275 
3276 struct page *
3277 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
3278 			       unsigned int n);
3279 
3280 dma_addr_t
3281 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
3282 				unsigned long n);
3283 
3284 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
3285 				 struct sg_table *pages);
3286 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
3287 
3288 static inline int __must_check
3289 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
3290 {
3291 	might_lock(&obj->mm.lock);
3292 
3293 	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
3294 		return 0;
3295 
3296 	return __i915_gem_object_get_pages(obj);
3297 }
3298 
3299 static inline void
3300 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
3301 {
3302 	GEM_BUG_ON(!obj->mm.pages);
3303 
3304 	atomic_inc(&obj->mm.pages_pin_count);
3305 }
3306 
3307 static inline bool
3308 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
3309 {
3310 	return atomic_read(&obj->mm.pages_pin_count);
3311 }
3312 
3313 static inline void
3314 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
3315 {
3316 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
3317 	GEM_BUG_ON(!obj->mm.pages);
3318 
3319 	atomic_dec(&obj->mm.pages_pin_count);
3320 }
3321 
3322 static inline void
3323 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
3324 {
3325 	__i915_gem_object_unpin_pages(obj);
3326 }
3327 
3328 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
3329 	I915_MM_NORMAL = 0,
3330 	I915_MM_SHRINKER
3331 };
3332 
3333 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
3334 				 enum i915_mm_subclass subclass);
3335 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
3336 
3337 enum i915_map_type {
3338 	I915_MAP_WB = 0,
3339 	I915_MAP_WC,
3340 };
3341 
3342 /**
3343  * i915_gem_object_pin_map - return a contiguous mapping of the entire object
3344  * @obj: the object to map into kernel address space
3345  * @type: the type of mapping, used to select pgprot_t
3346  *
3347  * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
3348  * pages and then returns a contiguous mapping of the backing storage into
3349  * the kernel address space. Based on the @type of mapping, the PTE will be
3350  * set to either WriteBack or WriteCombine (via pgprot_t).
3351  *
3352  * The caller is responsible for calling i915_gem_object_unpin_map() when the
3353  * mapping is no longer required.
3354  *
3355  * Returns the pointer through which to access the mapped object, or an
3356  * ERR_PTR() on error.
3357  */
3358 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
3359 					   enum i915_map_type type);
3360 
3361 /**
3362  * i915_gem_object_unpin_map - releases an earlier mapping
3363  * @obj: the object to unmap
3364  *
3365  * After pinning the object and mapping its pages, once you are finished
3366  * with your access, call i915_gem_object_unpin_map() to release the pin
3367  * upon the mapping. Once the pin count reaches zero, that mapping may be
3368  * removed.
3369  */
3370 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
3371 {
3372 	i915_gem_object_unpin_pages(obj);
3373 }
3374 
3375 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
3376 				    unsigned int *needs_clflush);
3377 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
3378 				     unsigned int *needs_clflush);
3379 #define CLFLUSH_BEFORE	BIT(0)
3380 #define CLFLUSH_AFTER	BIT(1)
3381 #define CLFLUSH_FLAGS	(CLFLUSH_BEFORE | CLFLUSH_AFTER)
3382 
3383 static inline void
3384 i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
3385 {
3386 	i915_gem_object_unpin_pages(obj);
3387 }
3388 
3389 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
3390 void i915_vma_move_to_active(struct i915_vma *vma,
3391 			     struct drm_i915_gem_request *req,
3392 			     unsigned int flags);
3393 int i915_gem_dumb_create(struct drm_file *file_priv,
3394 			 struct drm_device *dev,
3395 			 struct drm_mode_create_dumb *args);
3396 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
3397 		      uint32_t handle, uint64_t *offset);
3398 int i915_gem_mmap_gtt_version(void);
3399 
3400 void i915_gem_track_fb(struct drm_i915_gem_object *old,
3401 		       struct drm_i915_gem_object *new,
3402 		       unsigned frontbuffer_bits);
3403 
3404 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
3405 
3406 struct drm_i915_gem_request *
3407 i915_gem_find_active_request(struct intel_engine_cs *engine);
3408 
3409 void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
3410 
3411 static inline bool i915_reset_backoff(struct i915_gpu_error *error)
3412 {
3413 	return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
3414 }
3415 
3416 static inline bool i915_reset_handoff(struct i915_gpu_error *error)
3417 {
3418 	return unlikely(test_bit(I915_RESET_HANDOFF, &error->flags));
3419 }
3420 
3421 static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
3422 {
3423 	return unlikely(test_bit(I915_WEDGED, &error->flags));
3424 }
3425 
3426 static inline bool i915_reset_backoff_or_wedged(struct i915_gpu_error *error)
3427 {
3428 	return i915_reset_backoff(error) | i915_terminally_wedged(error);
3429 }
3430 
3431 static inline u32 i915_reset_count(struct i915_gpu_error *error)
3432 {
3433 	return READ_ONCE(error->reset_count);
3434 }
3435 
3436 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
3437 void i915_gem_reset(struct drm_i915_private *dev_priv);
3438 void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
3439 void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
3440 bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
3441 
3442 void i915_gem_init_mmio(struct drm_i915_private *i915);
3443 int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
3444 int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
3445 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
3446 void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
3447 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
3448 			   unsigned int flags);
3449 int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
3450 void i915_gem_resume(struct drm_i915_private *dev_priv);
3451 int i915_gem_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres);
3452 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
3453 			 unsigned int flags,
3454 			 long timeout,
3455 			 struct intel_rps_client *rps);
3456 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
3457 				  unsigned int flags,
3458 				  int priority);
3459 #define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
3460 
3461 int __must_check
3462 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
3463 				  bool write);
3464 int __must_check
3465 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
3466 struct i915_vma * __must_check
3467 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3468 				     u32 alignment,
3469 				     const struct i915_ggtt_view *view);
3470 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
3471 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
3472 				int align);
3473 int i915_gem_open(struct drm_device *dev, struct drm_file *file);
3474 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
3475 
3476 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3477 				    enum i915_cache_level cache_level);
3478 
3479 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
3480 				struct dma_buf *dma_buf);
3481 
3482 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
3483 				struct drm_gem_object *gem_obj, int flags);
3484 
3485 static inline struct i915_hw_ppgtt *
3486 i915_vm_to_ppgtt(struct i915_address_space *vm)
3487 {
3488 	return container_of(vm, struct i915_hw_ppgtt, base);
3489 }
3490 
3491 /* i915_gem_fence_reg.c */
3492 int __must_check i915_vma_get_fence(struct i915_vma *vma);
3493 int __must_check i915_vma_put_fence(struct i915_vma *vma);
3494 
3495 void i915_gem_revoke_fences(struct drm_i915_private *dev_priv);
3496 void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
3497 
3498 void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
3499 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
3500 				       struct sg_table *pages);
3501 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
3502 					 struct sg_table *pages);
3503 
3504 static inline struct i915_gem_context *
3505 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
3506 {
3507 	struct i915_gem_context *ctx;
3508 
3509 	lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
3510 
3511 	ctx = idr_find(&file_priv->context_idr, id);
3512 	if (!ctx)
3513 		return ERR_PTR(-ENOENT);
3514 
3515 	return ctx;
3516 }
3517 
3518 static inline struct i915_gem_context *
3519 i915_gem_context_get(struct i915_gem_context *ctx)
3520 {
3521 	kref_get(&ctx->ref);
3522 	return ctx;
3523 }
3524 
3525 static inline void i915_gem_context_put(struct i915_gem_context *ctx)
3526 {
3527 	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
3528 	kref_put(&ctx->ref, i915_gem_context_free);
3529 }
3530 
3531 static inline void i915_gem_context_put_unlocked(struct i915_gem_context *ctx)
3532 {
3533 	struct lock *lock = &ctx->i915->drm.struct_mutex;
3534 
3535 	if (kref_put_mutex(&ctx->ref, i915_gem_context_free, lock))
3536 		mutex_unlock(lock);
3537 }
3538 
3539 static inline struct intel_timeline *
3540 i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
3541 				 struct intel_engine_cs *engine)
3542 {
3543 	struct i915_address_space *vm;
3544 
3545 	vm = ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
3546 	return &vm->timeline.engine[engine->id];
3547 }
3548 
3549 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
3550 			 struct drm_file *file);
3551 
3552 /* i915_gem_evict.c */
3553 int __must_check i915_gem_evict_something(struct i915_address_space *vm,
3554 					  u64 min_size, u64 alignment,
3555 					  unsigned cache_level,
3556 					  u64 start, u64 end,
3557 					  unsigned flags);
3558 int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
3559 					 struct drm_mm_node *node,
3560 					 unsigned int flags);
3561 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
3562 
3563 /* belongs in i915_gem_gtt.h */
3564 static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
3565 {
3566 	wmb();
3567 	if (INTEL_GEN(dev_priv) < 6)
3568 		intel_gtt_chipset_flush();
3569 }
3570 
3571 /* i915_gem_stolen.c */
3572 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
3573 				struct drm_mm_node *node, u64 size,
3574 				unsigned alignment);
3575 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
3576 					 struct drm_mm_node *node, u64 size,
3577 					 unsigned alignment, u64 start,
3578 					 u64 end);
3579 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
3580 				 struct drm_mm_node *node);
3581 int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
3582 void i915_gem_cleanup_stolen(struct drm_device *dev);
3583 struct drm_i915_gem_object *
3584 i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size);
3585 struct drm_i915_gem_object *
3586 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
3587 					       u32 stolen_offset,
3588 					       u32 gtt_offset,
3589 					       u32 size);
3590 
3591 /* i915_gem_internal.c */
3592 struct drm_i915_gem_object *
3593 i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
3594 				phys_addr_t size);
3595 
3596 /* i915_gem_shrinker.c */
3597 unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
3598 			      unsigned long target,
3599 			      unsigned flags);
3600 #define I915_SHRINK_PURGEABLE 0x1
3601 #define I915_SHRINK_UNBOUND 0x2
3602 #define I915_SHRINK_BOUND 0x4
3603 #define I915_SHRINK_ACTIVE 0x8
3604 #define I915_SHRINK_VMAPS 0x10
3605 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
3606 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
3607 void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
3608 
3609 
3610 /* i915_gem_tiling.c */
3611 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
3612 {
3613 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3614 
3615 	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
3616 		i915_gem_object_is_tiled(obj);
3617 }
3618 
3619 u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
3620 			unsigned int tiling, unsigned int stride);
3621 u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
3622 			     unsigned int tiling, unsigned int stride);
3623 
3624 /* i915_debugfs.c */
3625 #ifdef CONFIG_DEBUG_FS
3626 int i915_debugfs_register(struct drm_i915_private *dev_priv);
3627 int i915_debugfs_connector_add(struct drm_connector *connector);
3628 void intel_display_crc_init(struct drm_i915_private *dev_priv);
3629 #else
3630 static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
3631 static inline int i915_debugfs_connector_add(struct drm_connector *connector)
3632 { return 0; }
3633 static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
3634 #endif
3635 
3636 /* i915_gpu_error.c */
3637 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
3638 
3639 __printf(2, 3)
3640 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
3641 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
3642 			    const struct i915_gpu_state *gpu);
3643 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
3644 			      struct drm_i915_private *i915,
3645 			      size_t count, loff_t pos);
3646 static inline void i915_error_state_buf_release(
3647 	struct drm_i915_error_state_buf *eb)
3648 {
3649 	kfree(eb->buf);
3650 }
3651 
3652 struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
3653 void i915_capture_error_state(struct drm_i915_private *dev_priv,
3654 			      u32 engine_mask,
3655 			      const char *error_msg);
3656 
3657 static inline struct i915_gpu_state *
3658 i915_gpu_state_get(struct i915_gpu_state *gpu)
3659 {
3660 	kref_get(&gpu->ref);
3661 	return gpu;
3662 }
3663 
3664 void __i915_gpu_state_free(struct kref *kref);
3665 static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
3666 {
3667 	if (gpu)
3668 		kref_put(&gpu->ref, __i915_gpu_state_free);
3669 }
3670 
3671 struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
3672 void i915_reset_error_state(struct drm_i915_private *i915);
3673 
3674 #else
3675 
3676 static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
3677 					    u32 engine_mask,
3678 					    const char *error_msg)
3679 {
3680 }
3681 
3682 static inline struct i915_gpu_state *
3683 i915_first_error_state(struct drm_i915_private *i915)
3684 {
3685 	return NULL;
3686 }
3687 
3688 static inline void i915_reset_error_state(struct drm_i915_private *i915)
3689 {
3690 }
3691 
3692 #endif
3693 
3694 const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
3695 
3696 /* i915_cmd_parser.c */
3697 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
3698 void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
3699 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
3700 int intel_engine_cmd_parser(struct intel_engine_cs *engine,
3701 			    struct drm_i915_gem_object *batch_obj,
3702 			    struct drm_i915_gem_object *shadow_batch_obj,
3703 			    u32 batch_start_offset,
3704 			    u32 batch_len,
3705 			    bool is_master);
3706 
3707 /* i915_perf.c */
3708 extern void i915_perf_init(struct drm_i915_private *dev_priv);
3709 extern void i915_perf_fini(struct drm_i915_private *dev_priv);
3710 extern void i915_perf_register(struct drm_i915_private *dev_priv);
3711 extern void i915_perf_unregister(struct drm_i915_private *dev_priv);
3712 
3713 /* i915_suspend.c */
3714 extern int i915_save_state(struct drm_i915_private *dev_priv);
3715 extern int i915_restore_state(struct drm_i915_private *dev_priv);
3716 
3717 /* i915_sysfs.c */
3718 void i915_setup_sysfs(struct drm_i915_private *dev_priv);
3719 void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
3720 
3721 /* intel_lpe_audio.c */
3722 int  intel_lpe_audio_init(struct drm_i915_private *dev_priv);
3723 void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
3724 void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
3725 void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
3726 			    void *eld, int port, int pipe, int tmds_clk_speed,
3727 			    bool dp_output, int link_rate);
3728 
3729 /* intel_i2c.c */
3730 extern int intel_setup_gmbus(struct drm_i915_private *dev_priv);
3731 extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv);
3732 extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
3733 				     unsigned int pin);
3734 
3735 extern struct i2c_adapter *
3736 intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
3737 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
3738 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
3739 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
3740 {
3741 	return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
3742 }
3743 extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
3744 
3745 /* intel_bios.c */
3746 void intel_bios_init(struct drm_i915_private *dev_priv);
3747 bool intel_bios_is_valid_vbt(const void *buf, size_t size);
3748 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
3749 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
3750 bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
3751 bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
3752 bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
3753 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
3754 bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
3755 				     enum port port);
3756 bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
3757 				enum port port);
3758 
3759 
3760 /* intel_opregion.c */
3761 #ifdef CONFIG_ACPI
3762 extern int intel_opregion_setup(struct drm_i915_private *dev_priv);
3763 extern void intel_opregion_register(struct drm_i915_private *dev_priv);
3764 extern void intel_opregion_unregister(struct drm_i915_private *dev_priv);
3765 extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
3766 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
3767 					 bool enable);
3768 extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
3769 					 pci_power_t state);
3770 extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
3771 #else
3772 static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; }
3773 static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { }
3774 static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { }
3775 static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
3776 {
3777 }
3778 static inline int
3779 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
3780 {
3781 	return 0;
3782 }
3783 static inline int
3784 intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
3785 {
3786 	return 0;
3787 }
3788 static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
3789 {
3790 	return -ENODEV;
3791 }
3792 #endif
3793 
3794 /* intel_acpi.c */
3795 #ifdef CONFIG_ACPI
3796 extern void intel_register_dsm_handler(void);
3797 extern void intel_unregister_dsm_handler(void);
3798 #else
3799 static inline void intel_register_dsm_handler(void) { return; }
3800 static inline void intel_unregister_dsm_handler(void) { return; }
3801 #endif /* CONFIG_ACPI */
3802 
3803 /* intel_device_info.c */
3804 static inline struct intel_device_info *
3805 mkwrite_device_info(struct drm_i915_private *dev_priv)
3806 {
3807 	return (struct intel_device_info *)&dev_priv->info;
3808 }
3809 
3810 const char *intel_platform_name(enum intel_platform platform);
3811 void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
3812 void intel_device_info_dump(struct drm_i915_private *dev_priv);
3813 
3814 /* modesetting */
3815 extern void intel_modeset_init_hw(struct drm_device *dev);
3816 extern int intel_modeset_init(struct drm_device *dev);
3817 extern void intel_modeset_gem_init(struct drm_device *dev);
3818 extern void intel_modeset_cleanup(struct drm_device *dev);
3819 extern int intel_connector_register(struct drm_connector *);
3820 extern void intel_connector_unregister(struct drm_connector *);
3821 extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
3822 				       bool state);
3823 extern void intel_display_resume(struct drm_device *dev);
3824 extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
3825 extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
3826 extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
3827 extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
3828 extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
3829 extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
3830 				  bool enable);
3831 
3832 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
3833 			struct drm_file *file);
3834 
3835 /* overlay */
3836 extern struct intel_overlay_error_state *
3837 intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
3838 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
3839 					    struct intel_overlay_error_state *error);
3840 
3841 extern struct intel_display_error_state *
3842 intel_display_capture_error_state(struct drm_i915_private *dev_priv);
3843 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
3844 					    struct intel_display_error_state *error);
3845 
3846 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
3847 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
3848 int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
3849 		      u32 reply_mask, u32 reply, int timeout_base_ms);
3850 
3851 /* intel_sideband.c */
3852 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
3853 int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
3854 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
3855 u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
3856 void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
3857 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
3858 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3859 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
3860 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3861 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
3862 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3863 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg);
3864 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg, u32 val);
3865 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
3866 		   enum intel_sbi_destination destination);
3867 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
3868 		     enum intel_sbi_destination destination);
3869 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
3870 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3871 
3872 /* intel_dpio_phy.c */
3873 void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
3874 			     enum dpio_phy *phy, enum dpio_channel *ch);
3875 void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
3876 				  enum port port, u32 margin, u32 scale,
3877 				  u32 enable, u32 deemphasis);
3878 void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
3879 void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
3880 bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
3881 			    enum dpio_phy phy);
3882 bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
3883 			      enum dpio_phy phy);
3884 uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
3885 					     uint8_t lane_count);
3886 void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
3887 				     uint8_t lane_lat_optim_mask);
3888 uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
3889 
3890 void chv_set_phy_signal_level(struct intel_encoder *encoder,
3891 			      u32 deemph_reg_value, u32 margin_reg_value,
3892 			      bool uniq_trans_scale);
3893 void chv_data_lane_soft_reset(struct intel_encoder *encoder,
3894 			      bool reset);
3895 void chv_phy_pre_pll_enable(struct intel_encoder *encoder);
3896 void chv_phy_pre_encoder_enable(struct intel_encoder *encoder);
3897 void chv_phy_release_cl2_override(struct intel_encoder *encoder);
3898 void chv_phy_post_pll_disable(struct intel_encoder *encoder);
3899 
3900 void vlv_set_phy_signal_level(struct intel_encoder *encoder,
3901 			      u32 demph_reg_value, u32 preemph_reg_value,
3902 			      u32 uniqtranscale_reg_value, u32 tx3_demph);
3903 void vlv_phy_pre_pll_enable(struct intel_encoder *encoder);
3904 void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder);
3905 void vlv_phy_reset_lanes(struct intel_encoder *encoder);
3906 
3907 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
3908 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3909 u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
3910 			   const i915_reg_t reg);
3911 
3912 #define I915_READ8(reg)		dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
3913 #define I915_WRITE8(reg, val)	dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
3914 
3915 #define I915_READ16(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
3916 #define I915_WRITE16(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
3917 #define I915_READ16_NOTRACE(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
3918 #define I915_WRITE16_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
3919 
3920 #define I915_READ(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
3921 #define I915_WRITE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
3922 #define I915_READ_NOTRACE(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
3923 #define I915_WRITE_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
3924 
3925 /* Be very careful with read/write 64-bit values. On 32-bit machines, they
3926  * will be implemented using 2 32-bit writes in an arbitrary order with
3927  * an arbitrary delay between them. This can cause the hardware to
3928  * act upon the intermediate value, possibly leading to corruption and
3929  * machine death. For this reason we do not support I915_WRITE64, or
3930  * dev_priv->uncore.funcs.mmio_writeq.
3931  *
3932  * When reading a 64-bit value as two 32-bit values, the delay may cause
3933  * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
3934  * occasionally a 64-bit register does not actualy support a full readq
3935  * and must be read using two 32-bit reads.
3936  *
3937  * You have been warned.
3938  */
3939 #define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
3940 
3941 #define I915_READ64_2x32(lower_reg, upper_reg) ({			\
3942 	u32 upper, lower, old_upper, loop = 0;				\
3943 	upper = I915_READ(upper_reg);					\
3944 	do {								\
3945 		old_upper = upper;					\
3946 		lower = I915_READ(lower_reg);				\
3947 		upper = I915_READ(upper_reg);				\
3948 	} while (upper != old_upper && loop++ < 2);			\
3949 	(u64)upper << 32 | lower; })
3950 
3951 #define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
3952 #define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
3953 
3954 #define __raw_read(x, s) \
3955 static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \
3956 					     i915_reg_t reg) \
3957 { \
3958 	return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
3959 }
3960 
3961 #define __raw_write(x, s) \
3962 static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \
3963 				       i915_reg_t reg, uint##x##_t val) \
3964 { \
3965 	write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
3966 }
3967 __raw_read(8, b)
3968 __raw_read(16, w)
3969 __raw_read(32, l)
3970 __raw_read(64, q)
3971 
3972 __raw_write(8, b)
3973 __raw_write(16, w)
3974 __raw_write(32, l)
3975 __raw_write(64, q)
3976 
3977 #undef __raw_read
3978 #undef __raw_write
3979 
3980 /* These are untraced mmio-accessors that are only valid to be used inside
3981  * critical sections, such as inside IRQ handlers, where forcewake is explicitly
3982  * controlled.
3983  *
3984  * Think twice, and think again, before using these.
3985  *
3986  * As an example, these accessors can possibly be used between:
3987  *
3988  * spin_lock_irq(&dev_priv->uncore.lock);
3989  * intel_uncore_forcewake_get__locked();
3990  *
3991  * and
3992  *
3993  * intel_uncore_forcewake_put__locked();
3994  * spin_unlock_irq(&dev_priv->uncore.lock);
3995  *
3996  *
3997  * Note: some registers may not need forcewake held, so
3998  * intel_uncore_forcewake_{get,put} can be omitted, see
3999  * intel_uncore_forcewake_for_reg().
4000  *
4001  * Certain architectures will die if the same cacheline is concurrently accessed
4002  * by different clients (e.g. on Ivybridge). Access to registers should
4003  * therefore generally be serialised, by either the dev_priv->uncore.lock or
4004  * a more localised lock guarding all access to that bank of registers.
4005  */
4006 #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
4007 #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
4008 #define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
4009 #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
4010 
4011 /* "Broadcast RGB" property */
4012 #define INTEL_BROADCAST_RGB_AUTO 0
4013 #define INTEL_BROADCAST_RGB_FULL 1
4014 #define INTEL_BROADCAST_RGB_LIMITED 2
4015 
4016 static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
4017 {
4018 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4019 		return VLV_VGACNTRL;
4020 	else if (INTEL_GEN(dev_priv) >= 5)
4021 		return CPU_VGACNTRL;
4022 	else
4023 		return VGACNTRL;
4024 }
4025 
4026 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
4027 {
4028 	unsigned long j = msecs_to_jiffies(m);
4029 
4030 	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
4031 }
4032 
4033 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
4034 {
4035         return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
4036 }
4037 
4038 static inline unsigned long
4039 timespec_to_jiffies_timeout(const struct timespec *value)
4040 {
4041 	unsigned long j = timespec_to_jiffies(value);
4042 
4043 	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
4044 }
4045 
4046 /*
4047  * If you need to wait X milliseconds between events A and B, but event B
4048  * doesn't happen exactly after event A, you record the timestamp (jiffies) of
4049  * when event A happened, then just before event B you call this function and
4050  * pass the timestamp as the first argument, and X as the second argument.
4051  */
4052 static inline void
4053 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
4054 {
4055 	unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
4056 
4057 	/*
4058 	 * Don't re-read the value of "jiffies" every time since it may change
4059 	 * behind our back and break the math.
4060 	 */
4061 	tmp_jiffies = jiffies;
4062 	target_jiffies = timestamp_jiffies +
4063 			 msecs_to_jiffies_timeout(to_wait_ms);
4064 
4065 	if (time_after(target_jiffies, tmp_jiffies)) {
4066 		remaining_jiffies = target_jiffies - tmp_jiffies;
4067 		while (remaining_jiffies)
4068 			remaining_jiffies =
4069 			    schedule_timeout_uninterruptible(remaining_jiffies);
4070 	}
4071 }
4072 
4073 static inline bool
4074 __i915_request_irq_complete(const struct drm_i915_gem_request *req)
4075 {
4076 	struct intel_engine_cs *engine = req->engine;
4077 	u32 seqno;
4078 
4079 	/* Note that the engine may have wrapped around the seqno, and
4080 	 * so our request->global_seqno will be ahead of the hardware,
4081 	 * even though it completed the request before wrapping. We catch
4082 	 * this by kicking all the waiters before resetting the seqno
4083 	 * in hardware, and also signal the fence.
4084 	 */
4085 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &req->fence.flags))
4086 		return true;
4087 
4088 	/* The request was dequeued before we were awoken. We check after
4089 	 * inspecting the hw to confirm that this was the same request
4090 	 * that generated the HWS update. The memory barriers within
4091 	 * the request execution are sufficient to ensure that a check
4092 	 * after reading the value from hw matches this request.
4093 	 */
4094 	seqno = i915_gem_request_global_seqno(req);
4095 	if (!seqno)
4096 		return false;
4097 
4098 	/* Before we do the heavier coherent read of the seqno,
4099 	 * check the value (hopefully) in the CPU cacheline.
4100 	 */
4101 	if (__i915_gem_request_completed(req, seqno))
4102 		return true;
4103 
4104 	/* Ensure our read of the seqno is coherent so that we
4105 	 * do not "miss an interrupt" (i.e. if this is the last
4106 	 * request and the seqno write from the GPU is not visible
4107 	 * by the time the interrupt fires, we will see that the
4108 	 * request is incomplete and go back to sleep awaiting
4109 	 * another interrupt that will never come.)
4110 	 *
4111 	 * Strictly, we only need to do this once after an interrupt,
4112 	 * but it is easier and safer to do it every time the waiter
4113 	 * is woken.
4114 	 */
4115 	if (engine->irq_seqno_barrier &&
4116 	    test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) {
4117 		struct intel_breadcrumbs *b = &engine->breadcrumbs;
4118 
4119 		/* The ordering of irq_posted versus applying the barrier
4120 		 * is crucial. The clearing of the current irq_posted must
4121 		 * be visible before we perform the barrier operation,
4122 		 * such that if a subsequent interrupt arrives, irq_posted
4123 		 * is reasserted and our task rewoken (which causes us to
4124 		 * do another __i915_request_irq_complete() immediately
4125 		 * and reapply the barrier). Conversely, if the clear
4126 		 * occurs after the barrier, then an interrupt that arrived
4127 		 * whilst we waited on the barrier would not trigger a
4128 		 * barrier on the next pass, and the read may not see the
4129 		 * seqno update.
4130 		 */
4131 		engine->irq_seqno_barrier(engine);
4132 
4133 		/* If we consume the irq, but we are no longer the bottom-half,
4134 		 * the real bottom-half may not have serialised their own
4135 		 * seqno check with the irq-barrier (i.e. may have inspected
4136 		 * the seqno before we believe it coherent since they see
4137 		 * irq_posted == false but we are still running).
4138 		 */
4139 		spin_lock_irq(&b->irq_lock);
4140 		if (b->irq_wait && b->irq_wait->tsk != current)
4141 			/* Note that if the bottom-half is changed as we
4142 			 * are sending the wake-up, the new bottom-half will
4143 			 * be woken by whomever made the change. We only have
4144 			 * to worry about when we steal the irq-posted for
4145 			 * ourself.
4146 			 */
4147 			wake_up_process(b->irq_wait->tsk);
4148 		spin_unlock_irq(&b->irq_lock);
4149 
4150 		if (__i915_gem_request_completed(req, seqno))
4151 			return true;
4152 	}
4153 
4154 	return false;
4155 }
4156 
4157 void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
4158 bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
4159 
4160 /* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
4161  * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot
4162  * perform the operation. To check beforehand, pass in the parameters to
4163  * to i915_can_memcpy_from_wc() - since we only care about the low 4 bits,
4164  * you only need to pass in the minor offsets, page-aligned pointers are
4165  * always valid.
4166  *
4167  * For just checking for SSE4.1, in the foreknowledge that the future use
4168  * will be correctly aligned, just use i915_has_memcpy_from_wc().
4169  */
4170 #define i915_can_memcpy_from_wc(dst, src, len) \
4171 	i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0)
4172 
4173 #define i915_has_memcpy_from_wc() \
4174 	i915_memcpy_from_wc(NULL, NULL, 0)
4175 
4176 /* i915_mm.c */
4177 int remap_io_mapping(struct vm_area_struct *vma,
4178 		     unsigned long addr, unsigned long pfn, unsigned long size,
4179 		     struct io_mapping *iomap);
4180 
4181 static inline bool i915_gem_object_is_coherent(struct drm_i915_gem_object *obj)
4182 {
4183 	return (obj->cache_level != I915_CACHE_NONE ||
4184 		HAS_LLC(to_i915(obj->base.dev)));
4185 }
4186 
4187 #endif
4188