xref: /dragonfly/sys/dev/drm/i915/intel_uncore.c (revision cb740add)
1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "i915_drv.h"
25 #include "intel_drv.h"
26 #include "i915_vgpu.h"
27 
28 #define FORCEWAKE_ACK_TIMEOUT_MS 50
29 
30 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
31 
32 static const char * const forcewake_domain_names[] = {
33 	"render",
34 	"blitter",
35 	"media",
36 };
37 
38 const char *
39 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
40 {
41 	BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
42 
43 	if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
44 		return forcewake_domain_names[id];
45 
46 	WARN_ON(id);
47 
48 	return "unknown";
49 }
50 
51 static inline void
52 fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
53 {
54 	WARN_ON(!i915_mmio_reg_valid(d->reg_set));
55 	__raw_i915_write32(d->i915, d->reg_set, d->val_reset);
56 }
57 
58 static inline void
59 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
60 {
61 	mod_timer_pinned(&d->timer, jiffies + 1);
62 }
63 
64 static inline void
65 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
66 {
67 	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
68 			     FORCEWAKE_KERNEL) == 0,
69 			    FORCEWAKE_ACK_TIMEOUT_MS))
70 		DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
71 			  intel_uncore_forcewake_domain_to_str(d->id));
72 }
73 
74 static inline void
75 fw_domain_get(const struct intel_uncore_forcewake_domain *d)
76 {
77 	__raw_i915_write32(d->i915, d->reg_set, d->val_set);
78 }
79 
80 static inline void
81 fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
82 {
83 	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
84 			     FORCEWAKE_KERNEL),
85 			    FORCEWAKE_ACK_TIMEOUT_MS))
86 		DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
87 			  intel_uncore_forcewake_domain_to_str(d->id));
88 }
89 
90 static inline void
91 fw_domain_put(const struct intel_uncore_forcewake_domain *d)
92 {
93 	__raw_i915_write32(d->i915, d->reg_set, d->val_clear);
94 }
95 
96 static inline void
97 fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
98 {
99 	/* something from same cacheline, but not from the set register */
100 	if (i915_mmio_reg_valid(d->reg_post))
101 		__raw_posting_read(d->i915, d->reg_post);
102 }
103 
104 static void
105 fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
106 {
107 	struct intel_uncore_forcewake_domain *d;
108 	enum forcewake_domain_id id;
109 
110 	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
111 		fw_domain_wait_ack_clear(d);
112 		fw_domain_get(d);
113 		fw_domain_wait_ack(d);
114 	}
115 }
116 
117 static void
118 fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
119 {
120 	struct intel_uncore_forcewake_domain *d;
121 	enum forcewake_domain_id id;
122 
123 	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
124 		fw_domain_put(d);
125 		fw_domain_posting_read(d);
126 	}
127 }
128 
129 static void
130 fw_domains_posting_read(struct drm_i915_private *dev_priv)
131 {
132 	struct intel_uncore_forcewake_domain *d;
133 	enum forcewake_domain_id id;
134 
135 	/* No need to do for all, just do for first found */
136 	for_each_fw_domain(d, dev_priv, id) {
137 		fw_domain_posting_read(d);
138 		break;
139 	}
140 }
141 
142 static void
143 fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
144 {
145 	struct intel_uncore_forcewake_domain *d;
146 	enum forcewake_domain_id id;
147 
148 	if (dev_priv->uncore.fw_domains == 0)
149 		return;
150 
151 	for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
152 		fw_domain_reset(d);
153 
154 	fw_domains_posting_read(dev_priv);
155 }
156 
157 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
158 {
159 	/* w/a for a sporadic read returning 0 by waiting for the GT
160 	 * thread to wake up.
161 	 */
162 	if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
163 				GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
164 		DRM_ERROR("GT thread status wait timed out\n");
165 }
166 
167 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
168 					      enum forcewake_domains fw_domains)
169 {
170 	fw_domains_get(dev_priv, fw_domains);
171 
172 	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
173 	__gen6_gt_wait_for_thread_c0(dev_priv);
174 }
175 
176 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
177 {
178 	u32 gtfifodbg;
179 
180 	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
181 	if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
182 		__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
183 }
184 
185 static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
186 				     enum forcewake_domains fw_domains)
187 {
188 	fw_domains_put(dev_priv, fw_domains);
189 	gen6_gt_check_fifodbg(dev_priv);
190 }
191 
192 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
193 {
194 	u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
195 
196 	return count & GT_FIFO_FREE_ENTRIES_MASK;
197 }
198 
199 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
200 {
201 	int ret = 0;
202 
203 	/* On VLV, FIFO will be shared by both SW and HW.
204 	 * So, we need to read the FREE_ENTRIES everytime */
205 	if (IS_VALLEYVIEW(dev_priv->dev))
206 		dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
207 
208 	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
209 		int loop = 500;
210 		u32 fifo = fifo_free_entries(dev_priv);
211 
212 		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
213 			udelay(10);
214 			fifo = fifo_free_entries(dev_priv);
215 		}
216 		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
217 			++ret;
218 		dev_priv->uncore.fifo_count = fifo;
219 	}
220 	dev_priv->uncore.fifo_count--;
221 
222 	return ret;
223 }
224 
225 static void intel_uncore_fw_release_timer(unsigned long arg)
226 {
227 	struct intel_uncore_forcewake_domain *domain = (void *)arg;
228 	unsigned long irqflags;
229 
230 	assert_rpm_device_not_suspended(domain->i915);
231 
232 	spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
233 	if (WARN_ON(domain->wake_count == 0))
234 		domain->wake_count++;
235 
236 	if (--domain->wake_count == 0)
237 		domain->i915->uncore.funcs.force_wake_put(domain->i915,
238 							  1 << domain->id);
239 
240 	spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
241 }
242 
243 void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
244 {
245 	struct drm_i915_private *dev_priv = dev->dev_private;
246 	unsigned long irqflags;
247 	struct intel_uncore_forcewake_domain *domain;
248 	int retry_count = 100;
249 	enum forcewake_domain_id id;
250 	enum forcewake_domains fw = 0, active_domains;
251 
252 	/* Hold uncore.lock across reset to prevent any register access
253 	 * with forcewake not set correctly. Wait until all pending
254 	 * timers are run before holding.
255 	 */
256 	while (1) {
257 		active_domains = 0;
258 
259 		for_each_fw_domain(domain, dev_priv, id) {
260 			if (del_timer_sync(&domain->timer) == 0)
261 				continue;
262 
263 			intel_uncore_fw_release_timer((unsigned long)domain);
264 		}
265 
266 		spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
267 
268 		for_each_fw_domain(domain, dev_priv, id) {
269 			if (timer_pending(&domain->timer))
270 				active_domains |= (1 << id);
271 		}
272 
273 		if (active_domains == 0)
274 			break;
275 
276 		if (--retry_count == 0) {
277 			DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
278 			break;
279 		}
280 
281 		spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
282 #if 0
283 		cond_resched();
284 #endif
285 	}
286 
287 	WARN_ON(active_domains);
288 
289 	for_each_fw_domain(domain, dev_priv, id)
290 		if (domain->wake_count)
291 			fw |= 1 << id;
292 
293 	if (fw)
294 		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
295 
296 	fw_domains_reset(dev_priv, FORCEWAKE_ALL);
297 
298 	if (restore) { /* If reset with a user forcewake, try to restore */
299 		if (fw)
300 			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
301 
302 		if (IS_GEN6(dev) || IS_GEN7(dev))
303 			dev_priv->uncore.fifo_count =
304 				fifo_free_entries(dev_priv);
305 	}
306 
307 	if (!restore)
308 		assert_forcewakes_inactive(dev_priv);
309 
310 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
311 }
312 
313 static void intel_uncore_ellc_detect(struct drm_device *dev)
314 {
315 	struct drm_i915_private *dev_priv = dev->dev_private;
316 
317 	if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
318 	     INTEL_INFO(dev)->gen >= 9) &&
319 	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
320 		/* The docs do not explain exactly how the calculation can be
321 		 * made. It is somewhat guessable, but for now, it's always
322 		 * 128MB.
323 		 * NB: We can't write IDICR yet because we do not have gt funcs
324 		 * set up */
325 		dev_priv->ellc_size = 128;
326 		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
327 	}
328 }
329 
330 static bool
331 fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
332 {
333 	u32 dbg;
334 
335 	dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
336 	if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
337 		return false;
338 
339 	__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
340 
341 	return true;
342 }
343 
344 static bool
345 vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
346 {
347 	u32 cer;
348 
349 	cer = __raw_i915_read32(dev_priv, CLAIM_ER);
350 	if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
351 		return false;
352 
353 	__raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
354 
355 	return true;
356 }
357 
358 static bool
359 check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
360 {
361 	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
362 		return fpga_check_for_unclaimed_mmio(dev_priv);
363 
364 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
365 		return vlv_check_for_unclaimed_mmio(dev_priv);
366 
367 	return false;
368 }
369 
370 static void __intel_uncore_early_sanitize(struct drm_device *dev,
371 					  bool restore_forcewake)
372 {
373 	struct drm_i915_private *dev_priv = dev->dev_private;
374 
375 	/* clear out unclaimed reg detection bit */
376 	if (check_for_unclaimed_mmio(dev_priv))
377 		DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
378 
379 	/* clear out old GT FIFO errors */
380 	if (IS_GEN6(dev) || IS_GEN7(dev))
381 		__raw_i915_write32(dev_priv, GTFIFODBG,
382 				   __raw_i915_read32(dev_priv, GTFIFODBG));
383 
384 	/* WaDisableShadowRegForCpd:chv */
385 	if (IS_CHERRYVIEW(dev)) {
386 		__raw_i915_write32(dev_priv, GTFIFOCTL,
387 				   __raw_i915_read32(dev_priv, GTFIFOCTL) |
388 				   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
389 				   GT_FIFO_CTL_RC6_POLICY_STALL);
390 	}
391 
392 	intel_uncore_forcewake_reset(dev, restore_forcewake);
393 }
394 
395 void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
396 {
397 	__intel_uncore_early_sanitize(dev, restore_forcewake);
398 	i915_check_and_clear_faults(dev);
399 }
400 
401 void intel_uncore_sanitize(struct drm_device *dev)
402 {
403 	i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
404 
405 	/* BIOS often leaves RC6 enabled, but disable it for hw init */
406 	intel_disable_gt_powersave(dev);
407 }
408 
409 static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
410 					 enum forcewake_domains fw_domains)
411 {
412 	struct intel_uncore_forcewake_domain *domain;
413 	enum forcewake_domain_id id;
414 
415 	if (!dev_priv->uncore.funcs.force_wake_get)
416 		return;
417 
418 	fw_domains &= dev_priv->uncore.fw_domains;
419 
420 	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
421 		if (domain->wake_count++)
422 			fw_domains &= ~(1 << id);
423 	}
424 
425 	if (fw_domains)
426 		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
427 }
428 
429 /**
430  * intel_uncore_forcewake_get - grab forcewake domain references
431  * @dev_priv: i915 device instance
432  * @fw_domains: forcewake domains to get reference on
433  *
434  * This function can be used get GT's forcewake domain references.
435  * Normal register access will handle the forcewake domains automatically.
436  * However if some sequence requires the GT to not power down a particular
437  * forcewake domains this function should be called at the beginning of the
438  * sequence. And subsequently the reference should be dropped by symmetric
439  * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
440  * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
441  */
442 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
443 				enum forcewake_domains fw_domains)
444 {
445 	unsigned long irqflags;
446 
447 	if (!dev_priv->uncore.funcs.force_wake_get)
448 		return;
449 
450 	assert_rpm_wakelock_held(dev_priv);
451 
452 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
453 	__intel_uncore_forcewake_get(dev_priv, fw_domains);
454 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
455 }
456 
457 /**
458  * intel_uncore_forcewake_get__locked - grab forcewake domain references
459  * @dev_priv: i915 device instance
460  * @fw_domains: forcewake domains to get reference on
461  *
462  * See intel_uncore_forcewake_get(). This variant places the onus
463  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
464  */
465 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
466 					enum forcewake_domains fw_domains)
467 {
468 	assert_spin_locked(&dev_priv->uncore.lock);
469 
470 	if (!dev_priv->uncore.funcs.force_wake_get)
471 		return;
472 
473 	__intel_uncore_forcewake_get(dev_priv, fw_domains);
474 }
475 
476 static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
477 					 enum forcewake_domains fw_domains)
478 {
479 	struct intel_uncore_forcewake_domain *domain;
480 	enum forcewake_domain_id id;
481 
482 	if (!dev_priv->uncore.funcs.force_wake_put)
483 		return;
484 
485 	fw_domains &= dev_priv->uncore.fw_domains;
486 
487 	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
488 		if (WARN_ON(domain->wake_count == 0))
489 			continue;
490 
491 		if (--domain->wake_count)
492 			continue;
493 
494 		domain->wake_count++;
495 		fw_domain_arm_timer(domain);
496 	}
497 }
498 
499 /**
500  * intel_uncore_forcewake_put - release a forcewake domain reference
501  * @dev_priv: i915 device instance
502  * @fw_domains: forcewake domains to put references
503  *
504  * This function drops the device-level forcewakes for specified
505  * domains obtained by intel_uncore_forcewake_get().
506  */
507 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
508 				enum forcewake_domains fw_domains)
509 {
510 	unsigned long irqflags;
511 
512 	if (!dev_priv->uncore.funcs.force_wake_put)
513 		return;
514 
515 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
516 	__intel_uncore_forcewake_put(dev_priv, fw_domains);
517 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
518 }
519 
520 /**
521  * intel_uncore_forcewake_put__locked - grab forcewake domain references
522  * @dev_priv: i915 device instance
523  * @fw_domains: forcewake domains to get reference on
524  *
525  * See intel_uncore_forcewake_put(). This variant places the onus
526  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
527  */
528 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
529 					enum forcewake_domains fw_domains)
530 {
531 	assert_spin_locked(&dev_priv->uncore.lock);
532 
533 	if (!dev_priv->uncore.funcs.force_wake_put)
534 		return;
535 
536 	__intel_uncore_forcewake_put(dev_priv, fw_domains);
537 }
538 
539 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
540 {
541 	struct intel_uncore_forcewake_domain *domain;
542 	enum forcewake_domain_id id;
543 
544 	if (!dev_priv->uncore.funcs.force_wake_get)
545 		return;
546 
547 	for_each_fw_domain(domain, dev_priv, id)
548 		WARN_ON(domain->wake_count);
549 }
550 
551 /* We give fast paths for the really cool registers */
552 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
553 
554 #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
555 
556 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
557 	(REG_RANGE((reg), 0x2000, 0x4000) || \
558 	 REG_RANGE((reg), 0x5000, 0x8000) || \
559 	 REG_RANGE((reg), 0xB000, 0x12000) || \
560 	 REG_RANGE((reg), 0x2E000, 0x30000))
561 
562 #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
563 	(REG_RANGE((reg), 0x12000, 0x14000) || \
564 	 REG_RANGE((reg), 0x22000, 0x24000) || \
565 	 REG_RANGE((reg), 0x30000, 0x40000))
566 
567 #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
568 	(REG_RANGE((reg), 0x2000, 0x4000) || \
569 	 REG_RANGE((reg), 0x5200, 0x8000) || \
570 	 REG_RANGE((reg), 0x8300, 0x8500) || \
571 	 REG_RANGE((reg), 0xB000, 0xB480) || \
572 	 REG_RANGE((reg), 0xE000, 0xE800))
573 
574 #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
575 	(REG_RANGE((reg), 0x8800, 0x8900) || \
576 	 REG_RANGE((reg), 0xD000, 0xD800) || \
577 	 REG_RANGE((reg), 0x12000, 0x14000) || \
578 	 REG_RANGE((reg), 0x1A000, 0x1C000) || \
579 	 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
580 	 REG_RANGE((reg), 0x30000, 0x38000))
581 
582 #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
583 	(REG_RANGE((reg), 0x4000, 0x5000) || \
584 	 REG_RANGE((reg), 0x8000, 0x8300) || \
585 	 REG_RANGE((reg), 0x8500, 0x8600) || \
586 	 REG_RANGE((reg), 0x9000, 0xB000) || \
587 	 REG_RANGE((reg), 0xF000, 0x10000))
588 
589 #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
590 	REG_RANGE((reg), 0xB00,  0x2000)
591 
592 #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
593 	(REG_RANGE((reg), 0x2000, 0x2700) || \
594 	 REG_RANGE((reg), 0x3000, 0x4000) || \
595 	 REG_RANGE((reg), 0x5200, 0x8000) || \
596 	 REG_RANGE((reg), 0x8140, 0x8160) || \
597 	 REG_RANGE((reg), 0x8300, 0x8500) || \
598 	 REG_RANGE((reg), 0x8C00, 0x8D00) || \
599 	 REG_RANGE((reg), 0xB000, 0xB480) || \
600 	 REG_RANGE((reg), 0xE000, 0xE900) || \
601 	 REG_RANGE((reg), 0x24400, 0x24800))
602 
603 #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
604 	(REG_RANGE((reg), 0x8130, 0x8140) || \
605 	 REG_RANGE((reg), 0x8800, 0x8A00) || \
606 	 REG_RANGE((reg), 0xD000, 0xD800) || \
607 	 REG_RANGE((reg), 0x12000, 0x14000) || \
608 	 REG_RANGE((reg), 0x1A000, 0x1EA00) || \
609 	 REG_RANGE((reg), 0x30000, 0x40000))
610 
611 #define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
612 	REG_RANGE((reg), 0x9400, 0x9800)
613 
614 #define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
615 	((reg) < 0x40000 && \
616 	 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
617 	 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
618 	 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
619 	 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
620 
621 static void
622 ilk_dummy_write(struct drm_i915_private *dev_priv)
623 {
624 	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
625 	 * the chip from rc6 before touching it for real. MI_MODE is masked,
626 	 * hence harmless to write 0 into. */
627 	__raw_i915_write32(dev_priv, MI_MODE, 0);
628 }
629 
630 static void
631 __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
632 		      const i915_reg_t reg,
633 		      const bool read,
634 		      const bool before)
635 {
636 	/* XXX. We limit the auto arming traces for mmio
637 	 * debugs on these platforms. There are just too many
638 	 * revealed by these and CI/Bat suffers from the noise.
639 	 * Please fix and then re-enable the automatic traces.
640 	 */
641 	if (i915.mmio_debug < 2 &&
642 	    (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
643 		return;
644 
645 	if (WARN(check_for_unclaimed_mmio(dev_priv),
646 		 "Unclaimed register detected %s %s register 0x%x\n",
647 		 before ? "before" : "after",
648 		 read ? "reading" : "writing to",
649 		 i915_mmio_reg_offset(reg)))
650 		i915.mmio_debug--; /* Only report the first N failures */
651 }
652 
653 static inline void
654 unclaimed_reg_debug(struct drm_i915_private *dev_priv,
655 		    const i915_reg_t reg,
656 		    const bool read,
657 		    const bool before)
658 {
659 	if (likely(!i915.mmio_debug))
660 		return;
661 
662 	__unclaimed_reg_debug(dev_priv, reg, read, before);
663 }
664 
665 #define GEN2_READ_HEADER(x) \
666 	u##x val = 0; \
667 	assert_rpm_wakelock_held(dev_priv);
668 
669 #define GEN2_READ_FOOTER \
670 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
671 	return val
672 
673 #define __gen2_read(x) \
674 static u##x \
675 gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
676 	GEN2_READ_HEADER(x); \
677 	val = __raw_i915_read##x(dev_priv, reg); \
678 	GEN2_READ_FOOTER; \
679 }
680 
681 #define __gen5_read(x) \
682 static u##x \
683 gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
684 	GEN2_READ_HEADER(x); \
685 	ilk_dummy_write(dev_priv); \
686 	val = __raw_i915_read##x(dev_priv, reg); \
687 	GEN2_READ_FOOTER; \
688 }
689 
690 __gen5_read(8)
691 __gen5_read(16)
692 __gen5_read(32)
693 __gen5_read(64)
694 __gen2_read(8)
695 __gen2_read(16)
696 __gen2_read(32)
697 __gen2_read(64)
698 
699 #undef __gen5_read
700 #undef __gen2_read
701 
702 #undef GEN2_READ_FOOTER
703 #undef GEN2_READ_HEADER
704 
705 #define GEN6_READ_HEADER(x) \
706 	u32 offset = i915_mmio_reg_offset(reg); \
707 	unsigned long irqflags; \
708 	u##x val = 0; \
709 	assert_rpm_wakelock_held(dev_priv); \
710 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
711 	unclaimed_reg_debug(dev_priv, reg, true, true)
712 
713 #define GEN6_READ_FOOTER \
714 	unclaimed_reg_debug(dev_priv, reg, true, false); \
715 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
716 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
717 	return val
718 
719 static inline void __force_wake_get(struct drm_i915_private *dev_priv,
720 				    enum forcewake_domains fw_domains)
721 {
722 	struct intel_uncore_forcewake_domain *domain;
723 	enum forcewake_domain_id id;
724 
725 	if (WARN_ON(!fw_domains))
726 		return;
727 
728 	/* Ideally GCC would be constant-fold and eliminate this loop */
729 	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
730 		if (domain->wake_count) {
731 			fw_domains &= ~(1 << id);
732 			continue;
733 		}
734 
735 		domain->wake_count++;
736 		fw_domain_arm_timer(domain);
737 	}
738 
739 	if (fw_domains)
740 		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
741 }
742 
743 #define __gen6_read(x) \
744 static u##x \
745 gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
746 	GEN6_READ_HEADER(x); \
747 	if (NEEDS_FORCE_WAKE(offset)) \
748 		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
749 	val = __raw_i915_read##x(dev_priv, reg); \
750 	GEN6_READ_FOOTER; \
751 }
752 
753 #define __vlv_read(x) \
754 static u##x \
755 vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
756 	enum forcewake_domains fw_engine = 0; \
757 	GEN6_READ_HEADER(x); \
758 	if (!NEEDS_FORCE_WAKE(offset)) \
759 		fw_engine = 0; \
760 	else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
761 		fw_engine = FORCEWAKE_RENDER; \
762 	else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
763 		fw_engine = FORCEWAKE_MEDIA; \
764 	if (fw_engine) \
765 		__force_wake_get(dev_priv, fw_engine); \
766 	val = __raw_i915_read##x(dev_priv, reg); \
767 	GEN6_READ_FOOTER; \
768 }
769 
770 #define __chv_read(x) \
771 static u##x \
772 chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
773 	enum forcewake_domains fw_engine = 0; \
774 	GEN6_READ_HEADER(x); \
775 	if (!NEEDS_FORCE_WAKE(offset)) \
776 		fw_engine = 0; \
777 	else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
778 		fw_engine = FORCEWAKE_RENDER; \
779 	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
780 		fw_engine = FORCEWAKE_MEDIA; \
781 	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
782 		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
783 	if (fw_engine) \
784 		__force_wake_get(dev_priv, fw_engine); \
785 	val = __raw_i915_read##x(dev_priv, reg); \
786 	GEN6_READ_FOOTER; \
787 }
788 
789 #define SKL_NEEDS_FORCE_WAKE(reg) \
790 	((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
791 
792 #define __gen9_read(x) \
793 static u##x \
794 gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
795 	enum forcewake_domains fw_engine; \
796 	GEN6_READ_HEADER(x); \
797 	if (!SKL_NEEDS_FORCE_WAKE(offset)) \
798 		fw_engine = 0; \
799 	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
800 		fw_engine = FORCEWAKE_RENDER; \
801 	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
802 		fw_engine = FORCEWAKE_MEDIA; \
803 	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
804 		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
805 	else \
806 		fw_engine = FORCEWAKE_BLITTER; \
807 	if (fw_engine) \
808 		__force_wake_get(dev_priv, fw_engine); \
809 	val = __raw_i915_read##x(dev_priv, reg); \
810 	GEN6_READ_FOOTER; \
811 }
812 
813 __gen9_read(8)
814 __gen9_read(16)
815 __gen9_read(32)
816 __gen9_read(64)
817 __chv_read(8)
818 __chv_read(16)
819 __chv_read(32)
820 __chv_read(64)
821 __vlv_read(8)
822 __vlv_read(16)
823 __vlv_read(32)
824 __vlv_read(64)
825 __gen6_read(8)
826 __gen6_read(16)
827 __gen6_read(32)
828 __gen6_read(64)
829 
830 #undef __gen9_read
831 #undef __chv_read
832 #undef __vlv_read
833 #undef __gen6_read
834 #undef GEN6_READ_FOOTER
835 #undef GEN6_READ_HEADER
836 
837 #define VGPU_READ_HEADER(x) \
838 	unsigned long irqflags; \
839 	u##x val = 0; \
840 	assert_rpm_device_not_suspended(dev_priv); \
841 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
842 
843 #define VGPU_READ_FOOTER \
844 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
845 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
846 	return val
847 
848 #define __vgpu_read(x) \
849 static u##x \
850 vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
851 	VGPU_READ_HEADER(x); \
852 	val = __raw_i915_read##x(dev_priv, reg); \
853 	VGPU_READ_FOOTER; \
854 }
855 
856 __vgpu_read(8)
857 __vgpu_read(16)
858 __vgpu_read(32)
859 __vgpu_read(64)
860 
861 #undef __vgpu_read
862 #undef VGPU_READ_FOOTER
863 #undef VGPU_READ_HEADER
864 
865 #define GEN2_WRITE_HEADER \
866 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
867 	assert_rpm_wakelock_held(dev_priv); \
868 
869 #define GEN2_WRITE_FOOTER
870 
871 #define __gen2_write(x) \
872 static void \
873 gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
874 	GEN2_WRITE_HEADER; \
875 	__raw_i915_write##x(dev_priv, reg, val); \
876 	GEN2_WRITE_FOOTER; \
877 }
878 
879 #define __gen5_write(x) \
880 static void \
881 gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
882 	GEN2_WRITE_HEADER; \
883 	ilk_dummy_write(dev_priv); \
884 	__raw_i915_write##x(dev_priv, reg, val); \
885 	GEN2_WRITE_FOOTER; \
886 }
887 
888 __gen5_write(8)
889 __gen5_write(16)
890 __gen5_write(32)
891 __gen5_write(64)
892 __gen2_write(8)
893 __gen2_write(16)
894 __gen2_write(32)
895 __gen2_write(64)
896 
897 #undef __gen5_write
898 #undef __gen2_write
899 
900 #undef GEN2_WRITE_FOOTER
901 #undef GEN2_WRITE_HEADER
902 
903 #define GEN6_WRITE_HEADER \
904 	u32 offset = i915_mmio_reg_offset(reg); \
905 	unsigned long irqflags; \
906 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
907 	assert_rpm_wakelock_held(dev_priv); \
908 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
909 	unclaimed_reg_debug(dev_priv, reg, false, true)
910 
911 #define GEN6_WRITE_FOOTER \
912 	unclaimed_reg_debug(dev_priv, reg, false, false); \
913 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
914 
915 #define __gen6_write(x) \
916 static void \
917 gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
918 	u32 __fifo_ret = 0; \
919 	GEN6_WRITE_HEADER; \
920 	if (NEEDS_FORCE_WAKE(offset)) { \
921 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
922 	} \
923 	__raw_i915_write##x(dev_priv, reg, val); \
924 	if (unlikely(__fifo_ret)) { \
925 		gen6_gt_check_fifodbg(dev_priv); \
926 	} \
927 	GEN6_WRITE_FOOTER; \
928 }
929 
930 #define __hsw_write(x) \
931 static void \
932 hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
933 	u32 __fifo_ret = 0; \
934 	GEN6_WRITE_HEADER; \
935 	if (NEEDS_FORCE_WAKE(offset)) { \
936 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
937 	} \
938 	__raw_i915_write##x(dev_priv, reg, val); \
939 	if (unlikely(__fifo_ret)) { \
940 		gen6_gt_check_fifodbg(dev_priv); \
941 	} \
942 	GEN6_WRITE_FOOTER; \
943 }
944 
945 static const i915_reg_t gen8_shadowed_regs[] = {
946 	FORCEWAKE_MT,
947 	GEN6_RPNSWREQ,
948 	GEN6_RC_VIDEO_FREQ,
949 	RING_TAIL(RENDER_RING_BASE),
950 	RING_TAIL(GEN6_BSD_RING_BASE),
951 	RING_TAIL(VEBOX_RING_BASE),
952 	RING_TAIL(BLT_RING_BASE),
953 	/* TODO: Other registers are not yet used */
954 };
955 
956 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
957 			     i915_reg_t reg)
958 {
959 	int i;
960 	for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
961 		if (i915_mmio_reg_equal(reg, gen8_shadowed_regs[i]))
962 			return true;
963 
964 	return false;
965 }
966 
967 #define __gen8_write(x) \
968 static void \
969 gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
970 	GEN6_WRITE_HEADER; \
971 	if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
972 		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
973 	__raw_i915_write##x(dev_priv, reg, val); \
974 	GEN6_WRITE_FOOTER; \
975 }
976 
977 #define __chv_write(x) \
978 static void \
979 chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
980 	enum forcewake_domains fw_engine = 0; \
981 	GEN6_WRITE_HEADER; \
982 	if (!NEEDS_FORCE_WAKE(offset) || \
983 	    is_gen8_shadowed(dev_priv, reg)) \
984 		fw_engine = 0; \
985 	else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
986 		fw_engine = FORCEWAKE_RENDER; \
987 	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
988 		fw_engine = FORCEWAKE_MEDIA; \
989 	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
990 		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
991 	if (fw_engine) \
992 		__force_wake_get(dev_priv, fw_engine); \
993 	__raw_i915_write##x(dev_priv, reg, val); \
994 	GEN6_WRITE_FOOTER; \
995 }
996 
997 static const i915_reg_t gen9_shadowed_regs[] = {
998 	RING_TAIL(RENDER_RING_BASE),
999 	RING_TAIL(GEN6_BSD_RING_BASE),
1000 	RING_TAIL(VEBOX_RING_BASE),
1001 	RING_TAIL(BLT_RING_BASE),
1002 	FORCEWAKE_BLITTER_GEN9,
1003 	FORCEWAKE_RENDER_GEN9,
1004 	FORCEWAKE_MEDIA_GEN9,
1005 	GEN6_RPNSWREQ,
1006 	GEN6_RC_VIDEO_FREQ,
1007 	/* TODO: Other registers are not yet used */
1008 };
1009 
1010 static bool is_gen9_shadowed(struct drm_i915_private *dev_priv,
1011 			     i915_reg_t reg)
1012 {
1013 	int i;
1014 	for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
1015 		if (i915_mmio_reg_equal(reg, gen9_shadowed_regs[i]))
1016 			return true;
1017 
1018 	return false;
1019 }
1020 
1021 #define __gen9_write(x) \
1022 static void \
1023 gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
1024 		bool trace) { \
1025 	enum forcewake_domains fw_engine; \
1026 	GEN6_WRITE_HEADER; \
1027 	if (!SKL_NEEDS_FORCE_WAKE(offset) || \
1028 	    is_gen9_shadowed(dev_priv, reg)) \
1029 		fw_engine = 0; \
1030 	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
1031 		fw_engine = FORCEWAKE_RENDER; \
1032 	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
1033 		fw_engine = FORCEWAKE_MEDIA; \
1034 	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
1035 		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
1036 	else \
1037 		fw_engine = FORCEWAKE_BLITTER; \
1038 	if (fw_engine) \
1039 		__force_wake_get(dev_priv, fw_engine); \
1040 	__raw_i915_write##x(dev_priv, reg, val); \
1041 	GEN6_WRITE_FOOTER; \
1042 }
1043 
1044 __gen9_write(8)
1045 __gen9_write(16)
1046 __gen9_write(32)
1047 __gen9_write(64)
1048 __chv_write(8)
1049 __chv_write(16)
1050 __chv_write(32)
1051 __chv_write(64)
1052 __gen8_write(8)
1053 __gen8_write(16)
1054 __gen8_write(32)
1055 __gen8_write(64)
1056 __hsw_write(8)
1057 __hsw_write(16)
1058 __hsw_write(32)
1059 __hsw_write(64)
1060 __gen6_write(8)
1061 __gen6_write(16)
1062 __gen6_write(32)
1063 __gen6_write(64)
1064 
1065 #undef __gen9_write
1066 #undef __chv_write
1067 #undef __gen8_write
1068 #undef __hsw_write
1069 #undef __gen6_write
1070 #undef GEN6_WRITE_FOOTER
1071 #undef GEN6_WRITE_HEADER
1072 
1073 #define VGPU_WRITE_HEADER \
1074 	unsigned long irqflags; \
1075 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1076 	assert_rpm_device_not_suspended(dev_priv); \
1077 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
1078 
1079 #define VGPU_WRITE_FOOTER \
1080 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1081 
1082 #define __vgpu_write(x) \
1083 static void vgpu_write##x(struct drm_i915_private *dev_priv, \
1084 			  i915_reg_t reg, u##x val, bool trace) { \
1085 	VGPU_WRITE_HEADER; \
1086 	__raw_i915_write##x(dev_priv, reg, val); \
1087 	VGPU_WRITE_FOOTER; \
1088 }
1089 
1090 __vgpu_write(8)
1091 __vgpu_write(16)
1092 __vgpu_write(32)
1093 __vgpu_write(64)
1094 
1095 #undef __vgpu_write
1096 #undef VGPU_WRITE_FOOTER
1097 #undef VGPU_WRITE_HEADER
1098 
1099 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1100 do { \
1101 	dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1102 	dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1103 	dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1104 	dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
1105 } while (0)
1106 
1107 #define ASSIGN_READ_MMIO_VFUNCS(x) \
1108 do { \
1109 	dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1110 	dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1111 	dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1112 	dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1113 } while (0)
1114 
1115 
1116 static void fw_domain_init(struct drm_i915_private *dev_priv,
1117 			   enum forcewake_domain_id domain_id,
1118 			   i915_reg_t reg_set,
1119 			   i915_reg_t reg_ack)
1120 {
1121 	struct intel_uncore_forcewake_domain *d;
1122 
1123 	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1124 		return;
1125 
1126 	d = &dev_priv->uncore.fw_domain[domain_id];
1127 
1128 	WARN_ON(d->wake_count);
1129 
1130 	d->wake_count = 0;
1131 	d->reg_set = reg_set;
1132 	d->reg_ack = reg_ack;
1133 
1134 	if (IS_GEN6(dev_priv)) {
1135 		d->val_reset = 0;
1136 		d->val_set = FORCEWAKE_KERNEL;
1137 		d->val_clear = 0;
1138 	} else {
1139 		/* WaRsClearFWBitsAtReset:bdw,skl */
1140 		d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1141 		d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1142 		d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1143 	}
1144 
1145 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1146 		d->reg_post = FORCEWAKE_ACK_VLV;
1147 	else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1148 		d->reg_post = ECOBUS;
1149 
1150 	d->i915 = dev_priv;
1151 	d->id = domain_id;
1152 
1153 	setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
1154 
1155 	dev_priv->uncore.fw_domains |= (1 << domain_id);
1156 
1157 	fw_domain_reset(d);
1158 }
1159 
1160 static void intel_uncore_fw_domains_init(struct drm_device *dev)
1161 {
1162 	struct drm_i915_private *dev_priv = dev->dev_private;
1163 
1164 	if (INTEL_INFO(dev_priv->dev)->gen <= 5)
1165 		return;
1166 
1167 	if (IS_GEN9(dev)) {
1168 		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1169 		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1170 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1171 			       FORCEWAKE_RENDER_GEN9,
1172 			       FORCEWAKE_ACK_RENDER_GEN9);
1173 		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1174 			       FORCEWAKE_BLITTER_GEN9,
1175 			       FORCEWAKE_ACK_BLITTER_GEN9);
1176 		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1177 			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1178 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1179 		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1180 		if (!IS_CHERRYVIEW(dev))
1181 			dev_priv->uncore.funcs.force_wake_put =
1182 				fw_domains_put_with_fifo;
1183 		else
1184 			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1185 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1186 			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1187 		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1188 			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1189 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1190 		dev_priv->uncore.funcs.force_wake_get =
1191 			fw_domains_get_with_thread_status;
1192 		if (IS_HASWELL(dev))
1193 			dev_priv->uncore.funcs.force_wake_put =
1194 				fw_domains_put_with_fifo;
1195 		else
1196 			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1197 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1198 			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1199 	} else if (IS_IVYBRIDGE(dev)) {
1200 		u32 ecobus;
1201 
1202 		/* IVB configs may use multi-threaded forcewake */
1203 
1204 		/* A small trick here - if the bios hasn't configured
1205 		 * MT forcewake, and if the device is in RC6, then
1206 		 * force_wake_mt_get will not wake the device and the
1207 		 * ECOBUS read will return zero. Which will be
1208 		 * (correctly) interpreted by the test below as MT
1209 		 * forcewake being disabled.
1210 		 */
1211 		dev_priv->uncore.funcs.force_wake_get =
1212 			fw_domains_get_with_thread_status;
1213 		dev_priv->uncore.funcs.force_wake_put =
1214 			fw_domains_put_with_fifo;
1215 
1216 		/* We need to init first for ECOBUS access and then
1217 		 * determine later if we want to reinit, in case of MT access is
1218 		 * not working. In this stage we don't know which flavour this
1219 		 * ivb is, so it is better to reset also the gen6 fw registers
1220 		 * before the ecobus check.
1221 		 */
1222 
1223 		__raw_i915_write32(dev_priv, FORCEWAKE, 0);
1224 		__raw_posting_read(dev_priv, ECOBUS);
1225 
1226 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1227 			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1228 
1229 		mutex_lock(&dev->struct_mutex);
1230 		fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1231 		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1232 		fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1233 		mutex_unlock(&dev->struct_mutex);
1234 
1235 		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1236 			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1237 			DRM_INFO("when using vblank-synced partial screen updates.\n");
1238 			fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1239 				       FORCEWAKE, FORCEWAKE_ACK);
1240 		}
1241 	} else if (IS_GEN6(dev)) {
1242 		dev_priv->uncore.funcs.force_wake_get =
1243 			fw_domains_get_with_thread_status;
1244 		dev_priv->uncore.funcs.force_wake_put =
1245 			fw_domains_put_with_fifo;
1246 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1247 			       FORCEWAKE, FORCEWAKE_ACK);
1248 	}
1249 
1250 	/* All future platforms are expected to require complex power gating */
1251 	WARN_ON(dev_priv->uncore.fw_domains == 0);
1252 }
1253 
1254 void intel_uncore_init(struct drm_device *dev)
1255 {
1256 	struct drm_i915_private *dev_priv = dev->dev_private;
1257 
1258 	i915_check_vgpu(dev);
1259 
1260 	intel_uncore_ellc_detect(dev);
1261 	intel_uncore_fw_domains_init(dev);
1262 	__intel_uncore_early_sanitize(dev, false);
1263 
1264 	dev_priv->uncore.unclaimed_mmio_check = 1;
1265 
1266 	switch (INTEL_INFO(dev)->gen) {
1267 	default:
1268 	case 9:
1269 		ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1270 		ASSIGN_READ_MMIO_VFUNCS(gen9);
1271 		break;
1272 	case 8:
1273 		if (IS_CHERRYVIEW(dev)) {
1274 			ASSIGN_WRITE_MMIO_VFUNCS(chv);
1275 			ASSIGN_READ_MMIO_VFUNCS(chv);
1276 
1277 		} else {
1278 			ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1279 			ASSIGN_READ_MMIO_VFUNCS(gen6);
1280 		}
1281 		break;
1282 	case 7:
1283 	case 6:
1284 		if (IS_HASWELL(dev)) {
1285 			ASSIGN_WRITE_MMIO_VFUNCS(hsw);
1286 		} else {
1287 			ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1288 		}
1289 
1290 		if (IS_VALLEYVIEW(dev)) {
1291 			ASSIGN_READ_MMIO_VFUNCS(vlv);
1292 		} else {
1293 			ASSIGN_READ_MMIO_VFUNCS(gen6);
1294 		}
1295 		break;
1296 	case 5:
1297 		ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1298 		ASSIGN_READ_MMIO_VFUNCS(gen5);
1299 		break;
1300 	case 4:
1301 	case 3:
1302 	case 2:
1303 		ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1304 		ASSIGN_READ_MMIO_VFUNCS(gen2);
1305 		break;
1306 	}
1307 
1308 	if (intel_vgpu_active(dev)) {
1309 		ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1310 		ASSIGN_READ_MMIO_VFUNCS(vgpu);
1311 	}
1312 
1313 	i915_check_and_clear_faults(dev);
1314 }
1315 #undef ASSIGN_WRITE_MMIO_VFUNCS
1316 #undef ASSIGN_READ_MMIO_VFUNCS
1317 
1318 void intel_uncore_fini(struct drm_device *dev)
1319 {
1320 	/* Paranoia: make sure we have disabled everything before we exit. */
1321 	intel_uncore_sanitize(dev);
1322 	intel_uncore_forcewake_reset(dev, false);
1323 }
1324 
1325 #define GEN_RANGE(l, h) GENMASK(h, l)
1326 
1327 static const struct register_whitelist {
1328 	i915_reg_t offset_ldw, offset_udw;
1329 	uint32_t size;
1330 	/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1331 	uint32_t gen_bitmask;
1332 } whitelist[] = {
1333 	{ .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1334 	  .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1335 	  .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
1336 };
1337 
1338 int i915_reg_read_ioctl(struct drm_device *dev,
1339 			void *data, struct drm_file *file)
1340 {
1341 	struct drm_i915_private *dev_priv = dev->dev_private;
1342 	struct drm_i915_reg_read *reg = data;
1343 	struct register_whitelist const *entry = whitelist;
1344 	unsigned size;
1345 	i915_reg_t offset_ldw, offset_udw;
1346 	int i, ret = 0;
1347 
1348 	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1349 		if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
1350 		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1351 			break;
1352 	}
1353 
1354 	if (i == ARRAY_SIZE(whitelist))
1355 		return -EINVAL;
1356 
1357 	/* We use the low bits to encode extra flags as the register should
1358 	 * be naturally aligned (and those that are not so aligned merely
1359 	 * limit the available flags for that register).
1360 	 */
1361 	offset_ldw = entry->offset_ldw;
1362 	offset_udw = entry->offset_udw;
1363 	size = entry->size;
1364 	size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
1365 
1366 	intel_runtime_pm_get(dev_priv);
1367 
1368 	switch (size) {
1369 	case 8 | 1:
1370 		reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
1371 		break;
1372 	case 8:
1373 		reg->val = I915_READ64(offset_ldw);
1374 		break;
1375 	case 4:
1376 		reg->val = I915_READ(offset_ldw);
1377 		break;
1378 	case 2:
1379 		reg->val = I915_READ16(offset_ldw);
1380 		break;
1381 	case 1:
1382 		reg->val = I915_READ8(offset_ldw);
1383 		break;
1384 	default:
1385 		ret = -EINVAL;
1386 		goto out;
1387 	}
1388 
1389 out:
1390 	intel_runtime_pm_put(dev_priv);
1391 	return ret;
1392 }
1393 
1394 int i915_get_reset_stats_ioctl(struct drm_device *dev,
1395 			       void *data, struct drm_file *file)
1396 {
1397 	struct drm_i915_private *dev_priv = dev->dev_private;
1398 	struct drm_i915_reset_stats *args = data;
1399 	struct i915_ctx_hang_stats *hs;
1400 	struct intel_context *ctx;
1401 	int ret;
1402 
1403 	if (args->flags || args->pad)
1404 		return -EINVAL;
1405 
1406 	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1407 		return -EPERM;
1408 
1409 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1410 	if (ret)
1411 		return ret;
1412 
1413 	ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1414 	if (IS_ERR(ctx)) {
1415 		mutex_unlock(&dev->struct_mutex);
1416 		return PTR_ERR(ctx);
1417 	}
1418 	hs = &ctx->hang_stats;
1419 
1420 	if (capable(CAP_SYS_ADMIN))
1421 		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1422 	else
1423 		args->reset_count = 0;
1424 
1425 	args->batch_active = hs->batch_active;
1426 	args->batch_pending = hs->batch_pending;
1427 
1428 	mutex_unlock(&dev->struct_mutex);
1429 
1430 	return 0;
1431 }
1432 
1433 static int i915_reset_complete(struct drm_device *dev)
1434 {
1435 	u8 gdrst;
1436 	pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1437 	return (gdrst & GRDOM_RESET_STATUS) == 0;
1438 }
1439 
1440 static int i915_do_reset(struct drm_device *dev)
1441 {
1442 	/* assert reset for at least 20 usec */
1443 	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1444 	udelay(20);
1445 	pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1446 
1447 	return wait_for(i915_reset_complete(dev), 500);
1448 }
1449 
1450 static int g4x_reset_complete(struct drm_device *dev)
1451 {
1452 	u8 gdrst;
1453 	pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1454 	return (gdrst & GRDOM_RESET_ENABLE) == 0;
1455 }
1456 
1457 static int g33_do_reset(struct drm_device *dev)
1458 {
1459 	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1460 	return wait_for(g4x_reset_complete(dev), 500);
1461 }
1462 
1463 static int g4x_do_reset(struct drm_device *dev)
1464 {
1465 	struct drm_i915_private *dev_priv = dev->dev_private;
1466 	int ret;
1467 
1468 	pci_write_config_byte(dev->pdev, I915_GDRST,
1469 			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
1470 	ret =  wait_for(g4x_reset_complete(dev), 500);
1471 	if (ret)
1472 		return ret;
1473 
1474 	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
1475 	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1476 	POSTING_READ(VDECCLK_GATE_D);
1477 
1478 	pci_write_config_byte(dev->pdev, I915_GDRST,
1479 			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1480 	ret =  wait_for(g4x_reset_complete(dev), 500);
1481 	if (ret)
1482 		return ret;
1483 
1484 	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
1485 	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1486 	POSTING_READ(VDECCLK_GATE_D);
1487 
1488 	pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1489 
1490 	return 0;
1491 }
1492 
1493 static int ironlake_do_reset(struct drm_device *dev)
1494 {
1495 	struct drm_i915_private *dev_priv = dev->dev_private;
1496 	int ret;
1497 
1498 	I915_WRITE(ILK_GDSR,
1499 		   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1500 	ret = wait_for((I915_READ(ILK_GDSR) &
1501 			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1502 	if (ret)
1503 		return ret;
1504 
1505 	I915_WRITE(ILK_GDSR,
1506 		   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1507 	ret = wait_for((I915_READ(ILK_GDSR) &
1508 			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1509 	if (ret)
1510 		return ret;
1511 
1512 	I915_WRITE(ILK_GDSR, 0);
1513 
1514 	return 0;
1515 }
1516 
1517 static int gen6_do_reset(struct drm_device *dev)
1518 {
1519 	struct drm_i915_private *dev_priv = dev->dev_private;
1520 	int	ret;
1521 
1522 	/* Reset the chip */
1523 
1524 	/* GEN6_GDRST is not in the gt power well, no need to check
1525 	 * for fifo space for the write or forcewake the chip for
1526 	 * the read
1527 	 */
1528 	__raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1529 
1530 	/* Spin waiting for the device to ack the reset request */
1531 	ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1532 
1533 	intel_uncore_forcewake_reset(dev, true);
1534 
1535 	return ret;
1536 }
1537 
1538 static int wait_for_register(struct drm_i915_private *dev_priv,
1539 			     i915_reg_t reg,
1540 			     const u32 mask,
1541 			     const u32 value,
1542 			     const unsigned long timeout_ms)
1543 {
1544 	return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
1545 }
1546 
1547 static int gen8_do_reset(struct drm_device *dev)
1548 {
1549 	struct drm_i915_private *dev_priv = dev->dev_private;
1550 	struct intel_engine_cs *engine;
1551 	int i;
1552 
1553 	for_each_ring(engine, dev_priv, i) {
1554 		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1555 			   _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1556 
1557 		if (wait_for_register(dev_priv,
1558 				      RING_RESET_CTL(engine->mmio_base),
1559 				      RESET_CTL_READY_TO_RESET,
1560 				      RESET_CTL_READY_TO_RESET,
1561 				      700)) {
1562 			DRM_ERROR("%s: reset request timeout\n", engine->name);
1563 			goto not_ready;
1564 		}
1565 	}
1566 
1567 	return gen6_do_reset(dev);
1568 
1569 not_ready:
1570 	for_each_ring(engine, dev_priv, i)
1571 		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1572 			   _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1573 
1574 	return -EIO;
1575 }
1576 
1577 static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
1578 {
1579 	if (!i915.reset)
1580 		return NULL;
1581 
1582 	if (INTEL_INFO(dev)->gen >= 8)
1583 		return gen8_do_reset;
1584 	else if (INTEL_INFO(dev)->gen >= 6)
1585 		return gen6_do_reset;
1586 	else if (IS_GEN5(dev))
1587 		return ironlake_do_reset;
1588 	else if (IS_G4X(dev))
1589 		return g4x_do_reset;
1590 	else if (IS_G33(dev))
1591 		return g33_do_reset;
1592 	else if (INTEL_INFO(dev)->gen >= 3)
1593 		return i915_do_reset;
1594 	else
1595 		return NULL;
1596 }
1597 
1598 int intel_gpu_reset(struct drm_device *dev)
1599 {
1600 	struct drm_i915_private *dev_priv = to_i915(dev);
1601 	int (*reset)(struct drm_device *);
1602 	int ret;
1603 
1604 	reset = intel_get_gpu_reset(dev);
1605 	if (reset == NULL)
1606 		return -ENODEV;
1607 
1608 	/* If the power well sleeps during the reset, the reset
1609 	 * request may be dropped and never completes (causing -EIO).
1610 	 */
1611 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1612 	ret = reset(dev);
1613 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1614 
1615 	return ret;
1616 }
1617 
1618 bool intel_has_gpu_reset(struct drm_device *dev)
1619 {
1620 	return intel_get_gpu_reset(dev) != NULL;
1621 }
1622 
1623 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
1624 {
1625 	return check_for_unclaimed_mmio(dev_priv);
1626 }
1627 
1628 bool
1629 intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1630 {
1631 	if (unlikely(i915.mmio_debug ||
1632 		     dev_priv->uncore.unclaimed_mmio_check <= 0))
1633 		return false;
1634 
1635 	if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
1636 		DRM_DEBUG("Unclaimed register detected, "
1637 			  "enabling oneshot unclaimed register reporting. "
1638 			  "Please use i915.mmio_debug=N for more information.\n");
1639 		i915.mmio_debug++;
1640 		dev_priv->uncore.unclaimed_mmio_check--;
1641 		return true;
1642 	}
1643 
1644 	return false;
1645 }
1646