xref: /dragonfly/sys/dev/drm/i915/intel_uncore.c (revision 896f2e3a)
1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "i915_drv.h"
25 #include "intel_drv.h"
26 
27 #define FORCEWAKE_ACK_TIMEOUT_MS 2
28 
29 #define __raw_i915_read8(dev_priv__, reg__) DRM_READ8(dev_priv__->mmio_map, reg__)
30 #define __raw_i915_write8(dev_priv__, reg__, val__) DRM_WRITE8(dev_priv__->mmio_map, reg__, val__)
31 
32 #define __raw_i915_read16(dev_priv__, reg__) DRM_READ16(dev_priv__->mmio_map, reg__)
33 #define __raw_i915_write16(dev_priv__, reg__, val__) DRM_WRITE16(dev_priv__->mmio_map, reg__, val__)
34 
35 #define __raw_i915_read32(dev_priv__, reg__) DRM_READ32(dev_priv__->mmio_map, reg__)
36 #define __raw_i915_write32(dev_priv__, reg__, val__) DRM_WRITE32(dev_priv__->mmio_map, reg__, val__)
37 
38 #define __raw_i915_read64(dev_priv__, reg__) DRM_READ64(dev_priv__->mmio_map, reg__)
39 #define __raw_i915_write64(dev_priv__, reg__, val__) DRM_WRITE64(dev_priv__->mmio_map, reg__, val__)
40 
41 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
42 
43 
44 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
45 {
46 	u32 gt_thread_status_mask;
47 
48 	if (IS_HASWELL(dev_priv->dev))
49 		gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
50 	else
51 		gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
52 
53 	/* w/a for a sporadic read returning 0 by waiting for the GT
54 	 * thread to wake up.
55 	 */
56 	if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
57 		DRM_ERROR("GT thread status wait timed out\n");
58 }
59 
60 static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
61 {
62 	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
63 	/* something from same cacheline, but !FORCEWAKE */
64 	__raw_posting_read(dev_priv, ECOBUS);
65 }
66 
67 static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
68 							int fw_engine)
69 {
70 	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
71 			    FORCEWAKE_ACK_TIMEOUT_MS))
72 		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
73 
74 	__raw_i915_write32(dev_priv, FORCEWAKE, 1);
75 	/* something from same cacheline, but !FORCEWAKE */
76 	__raw_posting_read(dev_priv, ECOBUS);
77 
78 	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
79 			    FORCEWAKE_ACK_TIMEOUT_MS))
80 		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
81 
82 	/* WaRsForcewakeWaitTC0:snb */
83 	__gen6_gt_wait_for_thread_c0(dev_priv);
84 }
85 
86 static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
87 {
88 	__raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
89 	/* something from same cacheline, but !FORCEWAKE_MT */
90 	__raw_posting_read(dev_priv, ECOBUS);
91 }
92 
93 static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
94 							int fw_engine)
95 {
96 	u32 forcewake_ack;
97 
98 	if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
99 		forcewake_ack = FORCEWAKE_ACK_HSW;
100 	else
101 		forcewake_ack = FORCEWAKE_MT_ACK;
102 
103 	if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
104 			    FORCEWAKE_ACK_TIMEOUT_MS))
105 		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
106 
107 	__raw_i915_write32(dev_priv, FORCEWAKE_MT,
108 			   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
109 	/* something from same cacheline, but !FORCEWAKE_MT */
110 	__raw_posting_read(dev_priv, ECOBUS);
111 
112 	if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
113 			    FORCEWAKE_ACK_TIMEOUT_MS))
114 		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
115 
116 	/* WaRsForcewakeWaitTC0:ivb,hsw */
117 	if (INTEL_INFO(dev_priv->dev)->gen < 8)
118 		__gen6_gt_wait_for_thread_c0(dev_priv);
119 }
120 
121 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
122 {
123 	u32 gtfifodbg;
124 
125 	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
126 	if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
127 		__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
128 }
129 
130 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
131 							int fw_engine)
132 {
133 	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
134 	/* something from same cacheline, but !FORCEWAKE */
135 	__raw_posting_read(dev_priv, ECOBUS);
136 	gen6_gt_check_fifodbg(dev_priv);
137 }
138 
139 static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
140 							int fw_engine)
141 {
142 	__raw_i915_write32(dev_priv, FORCEWAKE_MT,
143 			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
144 	/* something from same cacheline, but !FORCEWAKE_MT */
145 	__raw_posting_read(dev_priv, ECOBUS);
146 	gen6_gt_check_fifodbg(dev_priv);
147 }
148 
149 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
150 {
151 	int ret = 0;
152 
153 	/* On VLV, FIFO will be shared by both SW and HW.
154 	 * So, we need to read the FREE_ENTRIES everytime */
155 	if (IS_VALLEYVIEW(dev_priv->dev))
156 		dev_priv->uncore.fifo_count =
157 			__raw_i915_read32(dev_priv, GTFIFOCTL) &
158 						GT_FIFO_FREE_ENTRIES_MASK;
159 
160 	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
161 		int loop = 500;
162 		u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
163 		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
164 			udelay(10);
165 			fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
166 		}
167 		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
168 			++ret;
169 		dev_priv->uncore.fifo_count = fifo;
170 	}
171 	dev_priv->uncore.fifo_count--;
172 
173 	return ret;
174 }
175 
176 static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
177 {
178 	__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
179 			   _MASKED_BIT_DISABLE(0xffff));
180 	/* something from same cacheline, but !FORCEWAKE_VLV */
181 	__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
182 }
183 
184 static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
185 						int fw_engine)
186 {
187 	/* Check for Render Engine */
188 	if (FORCEWAKE_RENDER & fw_engine) {
189 		if (wait_for_atomic((__raw_i915_read32(dev_priv,
190 						FORCEWAKE_ACK_VLV) &
191 						FORCEWAKE_KERNEL) == 0,
192 					FORCEWAKE_ACK_TIMEOUT_MS))
193 			DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
194 
195 		__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
196 				   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
197 
198 		if (wait_for_atomic((__raw_i915_read32(dev_priv,
199 						FORCEWAKE_ACK_VLV) &
200 						FORCEWAKE_KERNEL),
201 					FORCEWAKE_ACK_TIMEOUT_MS))
202 			DRM_ERROR("Timed out: waiting for Render to ack.\n");
203 	}
204 
205 	/* Check for Media Engine */
206 	if (FORCEWAKE_MEDIA & fw_engine) {
207 		if (wait_for_atomic((__raw_i915_read32(dev_priv,
208 						FORCEWAKE_ACK_MEDIA_VLV) &
209 						FORCEWAKE_KERNEL) == 0,
210 					FORCEWAKE_ACK_TIMEOUT_MS))
211 			DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
212 
213 		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
214 				   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
215 
216 		if (wait_for_atomic((__raw_i915_read32(dev_priv,
217 						FORCEWAKE_ACK_MEDIA_VLV) &
218 						FORCEWAKE_KERNEL),
219 					FORCEWAKE_ACK_TIMEOUT_MS))
220 			DRM_ERROR("Timed out: waiting for media to ack.\n");
221 	}
222 
223 	/* WaRsForcewakeWaitTC0:vlv */
224 	__gen6_gt_wait_for_thread_c0(dev_priv);
225 
226 }
227 
228 static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
229 					int fw_engine)
230 {
231 
232 	/* Check for Render Engine */
233 	if (FORCEWAKE_RENDER & fw_engine)
234 		__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
235 					_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
236 
237 
238 	/* Check for Media Engine */
239 	if (FORCEWAKE_MEDIA & fw_engine)
240 		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
241 				_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
242 
243 	/* The below doubles as a POSTING_READ */
244 	gen6_gt_check_fifodbg(dev_priv);
245 
246 }
247 
248 void vlv_force_wake_get(struct drm_i915_private *dev_priv,
249 						int fw_engine)
250 {
251 	lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE);
252 	if (FORCEWAKE_RENDER & fw_engine) {
253 		if (dev_priv->uncore.fw_rendercount++ == 0)
254 			dev_priv->uncore.funcs.force_wake_get(dev_priv,
255 							FORCEWAKE_RENDER);
256 	}
257 	if (FORCEWAKE_MEDIA & fw_engine) {
258 		if (dev_priv->uncore.fw_mediacount++ == 0)
259 			dev_priv->uncore.funcs.force_wake_get(dev_priv,
260 							FORCEWAKE_MEDIA);
261 	}
262 
263 	lockmgr(&dev_priv->uncore.lock, LK_RELEASE);
264 }
265 
266 void vlv_force_wake_put(struct drm_i915_private *dev_priv,
267 						int fw_engine)
268 {
269 	lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE);
270 
271 	if (FORCEWAKE_RENDER & fw_engine) {
272 		WARN_ON(dev_priv->uncore.fw_rendercount == 0);
273 		if (--dev_priv->uncore.fw_rendercount == 0)
274 			dev_priv->uncore.funcs.force_wake_put(dev_priv,
275 							FORCEWAKE_RENDER);
276 	}
277 
278 	if (FORCEWAKE_MEDIA & fw_engine) {
279 		WARN_ON(dev_priv->uncore.fw_mediacount == 0);
280 		if (--dev_priv->uncore.fw_mediacount == 0)
281 			dev_priv->uncore.funcs.force_wake_put(dev_priv,
282 							FORCEWAKE_MEDIA);
283 	}
284 
285 	lockmgr(&dev_priv->uncore.lock, LK_RELEASE);
286 }
287 
288 static void gen6_force_wake_work(struct work_struct *work)
289 {
290 	struct drm_i915_private *dev_priv =
291 		container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
292 
293 	lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE);
294 	if (--dev_priv->uncore.forcewake_count == 0)
295 		dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
296 	lockmgr(&dev_priv->uncore.lock, LK_RELEASE);
297 }
298 
299 static void intel_uncore_forcewake_reset(struct drm_device *dev)
300 {
301 	struct drm_i915_private *dev_priv = dev->dev_private;
302 
303 	if (IS_VALLEYVIEW(dev)) {
304 		vlv_force_wake_reset(dev_priv);
305 	} else if (INTEL_INFO(dev)->gen >= 6) {
306 		__gen6_gt_force_wake_reset(dev_priv);
307 		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
308 			__gen6_gt_force_wake_mt_reset(dev_priv);
309 	}
310 }
311 
312 void intel_uncore_early_sanitize(struct drm_device *dev)
313 {
314 	struct drm_i915_private *dev_priv = dev->dev_private;
315 
316 	if (HAS_FPGA_DBG_UNCLAIMED(dev))
317 		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
318 
319 	if (IS_HASWELL(dev) &&
320 	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
321 		/* The docs do not explain exactly how the calculation can be
322 		 * made. It is somewhat guessable, but for now, it's always
323 		 * 128MB.
324 		 * NB: We can't write IDICR yet because we do not have gt funcs
325 		 * set up */
326 		dev_priv->ellc_size = 128;
327 		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
328 	}
329 
330 	/* clear out old GT FIFO errors */
331 	if (IS_GEN6(dev) || IS_GEN7(dev))
332 		__raw_i915_write32(dev_priv, GTFIFODBG,
333 				   __raw_i915_read32(dev_priv, GTFIFODBG));
334 
335 	intel_uncore_forcewake_reset(dev);
336 }
337 
338 void intel_uncore_sanitize(struct drm_device *dev)
339 {
340 	struct drm_i915_private *dev_priv = dev->dev_private;
341 	u32 reg_val;
342 
343 	/* BIOS often leaves RC6 enabled, but disable it for hw init */
344 	intel_disable_gt_powersave(dev);
345 
346 	/* Turn off power gate, require especially for the BIOS less system */
347 	if (IS_VALLEYVIEW(dev)) {
348 
349 		mutex_lock(&dev_priv->rps.hw_lock);
350 		reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
351 
352 		if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
353 			vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
354 
355 		mutex_unlock(&dev_priv->rps.hw_lock);
356 
357 	}
358 }
359 
360 /*
361  * Generally this is called implicitly by the register read function. However,
362  * if some sequence requires the GT to not power down then this function should
363  * be called at the beginning of the sequence followed by a call to
364  * gen6_gt_force_wake_put() at the end of the sequence.
365  */
366 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
367 {
368 	if (!dev_priv->uncore.funcs.force_wake_get)
369 		return;
370 
371 	intel_runtime_pm_get(dev_priv);
372 
373 	/* Redirect to VLV specific routine */
374 	if (IS_VALLEYVIEW(dev_priv->dev))
375 		return vlv_force_wake_get(dev_priv, fw_engine);
376 
377 	lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE);
378 	if (dev_priv->uncore.forcewake_count++ == 0)
379 		dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
380 	lockmgr(&dev_priv->uncore.lock, LK_RELEASE);
381 }
382 
383 /*
384  * see gen6_gt_force_wake_get()
385  */
386 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
387 {
388 	if (!dev_priv->uncore.funcs.force_wake_put)
389 		return;
390 
391 	/* Redirect to VLV specific routine */
392 	if (IS_VALLEYVIEW(dev_priv->dev))
393 		return vlv_force_wake_put(dev_priv, fw_engine);
394 
395 
396 	lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE);
397 	if (--dev_priv->uncore.forcewake_count == 0) {
398 		dev_priv->uncore.forcewake_count++;
399 		mod_delayed_work(dev_priv->wq,
400 				 &dev_priv->uncore.force_wake_work,
401 				 1);
402 	}
403 	lockmgr(&dev_priv->uncore.lock, LK_RELEASE);
404 
405 	intel_runtime_pm_put(dev_priv);
406 }
407 
408 /* We give fast paths for the really cool registers */
409 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
410 	 ((reg) < 0x40000 && (reg) != FORCEWAKE)
411 
412 static void
413 ilk_dummy_write(struct drm_i915_private *dev_priv)
414 {
415 	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
416 	 * the chip from rc6 before touching it for real. MI_MODE is masked,
417 	 * hence harmless to write 0 into. */
418 	__raw_i915_write32(dev_priv, MI_MODE, 0);
419 }
420 
421 static void
422 hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
423 {
424 	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
425 		DRM_ERROR("Unknown unclaimed register before writing to %x\n",
426 			  reg);
427 		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
428 	}
429 }
430 
431 static void
432 hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
433 {
434 	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
435 		DRM_ERROR("Unclaimed write to %x\n", reg);
436 		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
437 	}
438 }
439 
440 static void
441 assert_device_not_suspended(struct drm_i915_private *dev_priv)
442 {
443 	WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
444 	     "Device suspended\n");
445 }
446 
447 #define REG_READ_HEADER(x) \
448 	u##x val = 0; \
449 	lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); \
450 
451 #define REG_READ_FOOTER \
452 	lockmgr(&dev_priv->uncore.lock, LK_RELEASE); \
453 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
454 	return val
455 
456 #define __gen4_read(x) \
457 static u##x \
458 gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
459 	REG_READ_HEADER(x); \
460 	val = __raw_i915_read##x(dev_priv, reg); \
461 	REG_READ_FOOTER; \
462 }
463 
464 #define __gen5_read(x) \
465 static u##x \
466 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
467 	REG_READ_HEADER(x); \
468 	ilk_dummy_write(dev_priv); \
469 	val = __raw_i915_read##x(dev_priv, reg); \
470 	REG_READ_FOOTER; \
471 }
472 
473 #define __gen6_read(x) \
474 static u##x \
475 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
476 	REG_READ_HEADER(x); \
477 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
478 		if (dev_priv->uncore.forcewake_count == 0) \
479 			dev_priv->uncore.funcs.force_wake_get(dev_priv, \
480 							FORCEWAKE_ALL); \
481 		val = __raw_i915_read##x(dev_priv, reg); \
482 		if (dev_priv->uncore.forcewake_count == 0) \
483 			dev_priv->uncore.funcs.force_wake_put(dev_priv, \
484 							FORCEWAKE_ALL); \
485 	} else { \
486 		val = __raw_i915_read##x(dev_priv, reg); \
487 	} \
488 	REG_READ_FOOTER; \
489 }
490 
491 #define __vlv_read(x) \
492 static u##x \
493 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
494 	unsigned fwengine = 0; \
495 	unsigned *fwcount; \
496 	REG_READ_HEADER(x); \
497 	if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) {   \
498 		fwengine = FORCEWAKE_RENDER;            \
499 		fwcount = &dev_priv->uncore.fw_rendercount;    \
500 	}                                               \
501 	else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) {       \
502 		fwengine = FORCEWAKE_MEDIA;             \
503 		fwcount = &dev_priv->uncore.fw_mediacount;     \
504 	}  \
505 	if (fwengine != 0) {		\
506 		if ((*fwcount)++ == 0) \
507 			(dev_priv)->uncore.funcs.force_wake_get(dev_priv, \
508 								fwengine); \
509 		val = __raw_i915_read##x(dev_priv, reg); \
510 		if (--(*fwcount) == 0) \
511 			(dev_priv)->uncore.funcs.force_wake_put(dev_priv, \
512 							fwengine); \
513 	} else { \
514 		val = __raw_i915_read##x(dev_priv, reg); \
515 	} \
516 	REG_READ_FOOTER; \
517 }
518 
519 
520 __vlv_read(8)
521 __vlv_read(16)
522 __vlv_read(32)
523 __vlv_read(64)
524 __gen6_read(8)
525 __gen6_read(16)
526 __gen6_read(32)
527 __gen6_read(64)
528 __gen5_read(8)
529 __gen5_read(16)
530 __gen5_read(32)
531 __gen5_read(64)
532 __gen4_read(8)
533 __gen4_read(16)
534 __gen4_read(32)
535 __gen4_read(64)
536 
537 #undef __vlv_read
538 #undef __gen6_read
539 #undef __gen5_read
540 #undef __gen4_read
541 #undef REG_READ_FOOTER
542 #undef REG_READ_HEADER
543 
544 #define REG_WRITE_HEADER \
545 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
546 	lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); \
547 
548 #define REG_WRITE_FOOTER \
549 	lockmgr(&dev_priv->uncore.lock, LK_RELEASE); \
550 
551 #define __gen4_write(x) \
552 static void \
553 gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
554 	REG_WRITE_HEADER; \
555 	__raw_i915_write##x(dev_priv, reg, val); \
556 	REG_WRITE_FOOTER; \
557 }
558 
559 #define __gen5_write(x) \
560 static void \
561 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
562 	REG_WRITE_HEADER; \
563 	ilk_dummy_write(dev_priv); \
564 	__raw_i915_write##x(dev_priv, reg, val); \
565 	REG_WRITE_FOOTER; \
566 }
567 
568 #define __gen6_write(x) \
569 static void \
570 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
571 	u32 __fifo_ret = 0; \
572 	REG_WRITE_HEADER; \
573 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
574 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
575 	} \
576 	assert_device_not_suspended(dev_priv); \
577 	__raw_i915_write##x(dev_priv, reg, val); \
578 	if (unlikely(__fifo_ret)) { \
579 		gen6_gt_check_fifodbg(dev_priv); \
580 	} \
581 	REG_WRITE_FOOTER; \
582 }
583 
584 #define __hsw_write(x) \
585 static void \
586 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
587 	u32 __fifo_ret = 0; \
588 	REG_WRITE_HEADER; \
589 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
590 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
591 	} \
592 	assert_device_not_suspended(dev_priv); \
593 	hsw_unclaimed_reg_clear(dev_priv, reg); \
594 	__raw_i915_write##x(dev_priv, reg, val); \
595 	if (unlikely(__fifo_ret)) { \
596 		gen6_gt_check_fifodbg(dev_priv); \
597 	} \
598 	hsw_unclaimed_reg_check(dev_priv, reg); \
599 	REG_WRITE_FOOTER; \
600 }
601 
602 
603 static const u32 gen8_shadowed_regs[] = {
604 	FORCEWAKE_MT,
605 	GEN6_RPNSWREQ,
606 	GEN6_RC_VIDEO_FREQ,
607 	RING_TAIL(RENDER_RING_BASE),
608 	RING_TAIL(GEN6_BSD_RING_BASE),
609 	RING_TAIL(VEBOX_RING_BASE),
610 	RING_TAIL(BLT_RING_BASE),
611 	/* TODO: Other registers are not yet used */
612 };
613 
614 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
615 {
616 	int i;
617 	for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
618 		if (reg == gen8_shadowed_regs[i])
619 			return true;
620 
621 	return false;
622 }
623 
624 #define __gen8_write(x) \
625 static void \
626 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
627 	bool __needs_put = reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg); \
628 	REG_WRITE_HEADER; \
629 	if (__needs_put) { \
630 		dev_priv->uncore.funcs.force_wake_get(dev_priv, \
631 							FORCEWAKE_ALL); \
632 	} \
633 	__raw_i915_write##x(dev_priv, reg, val); \
634 	if (__needs_put) { \
635 		dev_priv->uncore.funcs.force_wake_put(dev_priv, \
636 							FORCEWAKE_ALL); \
637 	} \
638 	REG_WRITE_FOOTER; \
639 }
640 
641 __gen8_write(8)
642 __gen8_write(16)
643 __gen8_write(32)
644 __gen8_write(64)
645 __hsw_write(8)
646 __hsw_write(16)
647 __hsw_write(32)
648 __hsw_write(64)
649 __gen6_write(8)
650 __gen6_write(16)
651 __gen6_write(32)
652 __gen6_write(64)
653 __gen5_write(8)
654 __gen5_write(16)
655 __gen5_write(32)
656 __gen5_write(64)
657 __gen4_write(8)
658 __gen4_write(16)
659 __gen4_write(32)
660 __gen4_write(64)
661 
662 #undef __gen8_write
663 #undef __hsw_write
664 #undef __gen6_write
665 #undef __gen5_write
666 #undef __gen4_write
667 #undef REG_WRITE_FOOTER
668 #undef REG_WRITE_HEADER
669 
670 void intel_uncore_init(struct drm_device *dev)
671 {
672 	struct drm_i915_private *dev_priv = dev->dev_private;
673 
674 	INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
675 			  gen6_force_wake_work);
676 
677 	if (IS_VALLEYVIEW(dev)) {
678 		dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
679 		dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
680 	} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
681 		dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
682 		dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
683 	} else if (IS_IVYBRIDGE(dev)) {
684 		u32 ecobus;
685 
686 		/* IVB configs may use multi-threaded forcewake */
687 
688 		/* A small trick here - if the bios hasn't configured
689 		 * MT forcewake, and if the device is in RC6, then
690 		 * force_wake_mt_get will not wake the device and the
691 		 * ECOBUS read will return zero. Which will be
692 		 * (correctly) interpreted by the test below as MT
693 		 * forcewake being disabled.
694 		 */
695 		mutex_lock(&dev->struct_mutex);
696 		__gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
697 		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
698 		__gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
699 		mutex_unlock(&dev->struct_mutex);
700 
701 		if (ecobus & FORCEWAKE_MT_ENABLE) {
702 			dev_priv->uncore.funcs.force_wake_get =
703 				__gen6_gt_force_wake_mt_get;
704 			dev_priv->uncore.funcs.force_wake_put =
705 				__gen6_gt_force_wake_mt_put;
706 		} else {
707 			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
708 			DRM_INFO("when using vblank-synced partial screen updates.\n");
709 			dev_priv->uncore.funcs.force_wake_get =
710 				__gen6_gt_force_wake_get;
711 			dev_priv->uncore.funcs.force_wake_put =
712 				__gen6_gt_force_wake_put;
713 		}
714 	} else if (IS_GEN6(dev)) {
715 		dev_priv->uncore.funcs.force_wake_get =
716 			__gen6_gt_force_wake_get;
717 		dev_priv->uncore.funcs.force_wake_put =
718 			__gen6_gt_force_wake_put;
719 	}
720 
721 	switch (INTEL_INFO(dev)->gen) {
722 	default:
723 		dev_priv->uncore.funcs.mmio_writeb  = gen8_write8;
724 		dev_priv->uncore.funcs.mmio_writew  = gen8_write16;
725 		dev_priv->uncore.funcs.mmio_writel  = gen8_write32;
726 		dev_priv->uncore.funcs.mmio_writeq  = gen8_write64;
727 		dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
728 		dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
729 		dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
730 		dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
731 		break;
732 	case 7:
733 	case 6:
734 		if (IS_HASWELL(dev)) {
735 			dev_priv->uncore.funcs.mmio_writeb  = hsw_write8;
736 			dev_priv->uncore.funcs.mmio_writew  = hsw_write16;
737 			dev_priv->uncore.funcs.mmio_writel  = hsw_write32;
738 			dev_priv->uncore.funcs.mmio_writeq  = hsw_write64;
739 		} else {
740 			dev_priv->uncore.funcs.mmio_writeb  = gen6_write8;
741 			dev_priv->uncore.funcs.mmio_writew  = gen6_write16;
742 			dev_priv->uncore.funcs.mmio_writel  = gen6_write32;
743 			dev_priv->uncore.funcs.mmio_writeq  = gen6_write64;
744 		}
745 
746 		if (IS_VALLEYVIEW(dev)) {
747 			dev_priv->uncore.funcs.mmio_readb  = vlv_read8;
748 			dev_priv->uncore.funcs.mmio_readw  = vlv_read16;
749 			dev_priv->uncore.funcs.mmio_readl  = vlv_read32;
750 			dev_priv->uncore.funcs.mmio_readq  = vlv_read64;
751 		} else {
752 			dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
753 			dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
754 			dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
755 			dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
756 		}
757 		break;
758 	case 5:
759 		dev_priv->uncore.funcs.mmio_writeb  = gen5_write8;
760 		dev_priv->uncore.funcs.mmio_writew  = gen5_write16;
761 		dev_priv->uncore.funcs.mmio_writel  = gen5_write32;
762 		dev_priv->uncore.funcs.mmio_writeq  = gen5_write64;
763 		dev_priv->uncore.funcs.mmio_readb  = gen5_read8;
764 		dev_priv->uncore.funcs.mmio_readw  = gen5_read16;
765 		dev_priv->uncore.funcs.mmio_readl  = gen5_read32;
766 		dev_priv->uncore.funcs.mmio_readq  = gen5_read64;
767 		break;
768 	case 4:
769 	case 3:
770 	case 2:
771 		dev_priv->uncore.funcs.mmio_writeb  = gen4_write8;
772 		dev_priv->uncore.funcs.mmio_writew  = gen4_write16;
773 		dev_priv->uncore.funcs.mmio_writel  = gen4_write32;
774 		dev_priv->uncore.funcs.mmio_writeq  = gen4_write64;
775 		dev_priv->uncore.funcs.mmio_readb  = gen4_read8;
776 		dev_priv->uncore.funcs.mmio_readw  = gen4_read16;
777 		dev_priv->uncore.funcs.mmio_readl  = gen4_read32;
778 		dev_priv->uncore.funcs.mmio_readq  = gen4_read64;
779 		break;
780 	}
781 }
782 
783 void intel_uncore_fini(struct drm_device *dev)
784 {
785 #if 0
786 	struct drm_i915_private *dev_priv = dev->dev_private;
787 
788 	flush_delayed_work(&dev_priv->uncore.force_wake_work);
789 #endif
790 
791 	/* Paranoia: make sure we have disabled everything before we exit. */
792 	intel_uncore_sanitize(dev);
793 }
794 
795 static const struct register_whitelist {
796 	uint64_t offset;
797 	uint32_t size;
798 	uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
799 } whitelist[] = {
800 	{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0x1F0 },
801 };
802 
803 int i915_reg_read_ioctl(struct drm_device *dev,
804 			void *data, struct drm_file *file)
805 {
806 	struct drm_i915_private *dev_priv = dev->dev_private;
807 	struct drm_i915_reg_read *reg = data;
808 	struct register_whitelist const *entry = whitelist;
809 	int i;
810 
811 	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
812 		if (entry->offset == reg->offset &&
813 		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
814 			break;
815 	}
816 
817 	if (i == ARRAY_SIZE(whitelist))
818 		return -EINVAL;
819 
820 	switch (entry->size) {
821 	case 8:
822 		reg->val = I915_READ64(reg->offset);
823 		break;
824 	case 4:
825 		reg->val = I915_READ(reg->offset);
826 		break;
827 	case 2:
828 		reg->val = I915_READ16(reg->offset);
829 		break;
830 	case 1:
831 		reg->val = I915_READ8(reg->offset);
832 		break;
833 	default:
834 		WARN_ON(1);
835 		return -EINVAL;
836 	}
837 
838 	return 0;
839 }
840 
841 int i915_get_reset_stats_ioctl(struct drm_device *dev,
842 			       void *data, struct drm_file *file)
843 {
844 	struct drm_i915_private *dev_priv = dev->dev_private;
845 	struct drm_i915_reset_stats *args = data;
846 	struct i915_ctx_hang_stats *hs;
847 	int ret;
848 
849 	if (args->flags || args->pad)
850 		return -EINVAL;
851 
852 	ret = mutex_lock_interruptible(&dev->struct_mutex);
853 	if (ret)
854 		return ret;
855 
856 	hs = i915_gem_context_get_hang_stats(dev, file, args->ctx_id);
857 	if (IS_ERR(hs)) {
858 		mutex_unlock(&dev->struct_mutex);
859 		return PTR_ERR(hs);
860 	}
861 
862 		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
863 
864 	args->batch_active = hs->batch_active;
865 	args->batch_pending = hs->batch_pending;
866 
867 	mutex_unlock(&dev->struct_mutex);
868 
869 	return 0;
870 }
871 
872 static int i965_reset_complete(struct drm_device *dev)
873 {
874 	u8 gdrst;
875 	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
876 	return (gdrst & GRDOM_RESET_ENABLE) == 0;
877 }
878 
879 static int i965_do_reset(struct drm_device *dev)
880 {
881 	int ret;
882 
883 	/*
884 	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
885 	 * well as the reset bit (GR/bit 0).  Setting the GR bit
886 	 * triggers the reset; when done, the hardware will clear it.
887 	 */
888 	pci_write_config_byte(dev->pdev, I965_GDRST,
889 			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
890 	ret =  wait_for(i965_reset_complete(dev), 500);
891 	if (ret)
892 		return ret;
893 
894 	/* We can't reset render&media without also resetting display ... */
895 	pci_write_config_byte(dev->pdev, I965_GDRST,
896 			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
897 
898 	ret =  wait_for(i965_reset_complete(dev), 500);
899 	if (ret)
900 		return ret;
901 
902 	pci_write_config_byte(dev->pdev, I965_GDRST, 0);
903 
904 	return 0;
905 }
906 
907 static int ironlake_do_reset(struct drm_device *dev)
908 {
909 	struct drm_i915_private *dev_priv = dev->dev_private;
910 	u32 gdrst;
911 	int ret;
912 
913 	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
914 	gdrst &= ~GRDOM_MASK;
915 	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
916 		   gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
917 	ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
918 	if (ret)
919 		return ret;
920 
921 	/* We can't reset render&media without also resetting display ... */
922 	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
923 	gdrst &= ~GRDOM_MASK;
924 	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
925 		   gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
926 	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
927 }
928 
929 static int gen6_do_reset(struct drm_device *dev)
930 {
931 	struct drm_i915_private *dev_priv = dev->dev_private;
932 	int	ret;
933 
934 	/* Hold uncore.lock across reset to prevent any register access
935 	 * with forcewake not set correctly
936 	 */
937 	lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE);
938 
939 	/* Reset the chip */
940 
941 	/* GEN6_GDRST is not in the gt power well, no need to check
942 	 * for fifo space for the write or forcewake the chip for
943 	 * the read
944 	 */
945 	__raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
946 
947 	/* Spin waiting for the device to ack the reset request */
948 	ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
949 
950 	intel_uncore_forcewake_reset(dev);
951 
952 	/* If reset with a user forcewake, try to restore, otherwise turn it off */
953 	if (dev_priv->uncore.forcewake_count)
954 		dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
955 	else
956 		dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
957 
958 	/* Restore fifo count */
959 	dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
960 
961 	lockmgr(&dev_priv->uncore.lock, LK_RELEASE);
962 	return ret;
963 }
964 
965 int intel_gpu_reset(struct drm_device *dev)
966 {
967 	switch (INTEL_INFO(dev)->gen) {
968 	case 8:
969 	case 7:
970 	case 6: return gen6_do_reset(dev);
971 	case 5: return ironlake_do_reset(dev);
972 	case 4: return i965_do_reset(dev);
973 	default: return -ENODEV;
974 	}
975 }
976 
977 void intel_uncore_check_errors(struct drm_device *dev)
978 {
979 	struct drm_i915_private *dev_priv = dev->dev_private;
980 
981 	if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
982 	    (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
983 		DRM_ERROR("Unclaimed register before interrupt\n");
984 		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
985 	}
986 }
987