xref: /openbsd/sys/dev/pci/drm/i915/intel_uncore.c (revision 2da3df3d)
1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <drm/drm_managed.h>
25 #include <linux/pm_runtime.h>
26 
27 #include "gt/intel_engine_regs.h"
28 #include "gt/intel_gt_regs.h"
29 
30 #include "i915_drv.h"
31 #include "i915_iosf_mbi.h"
32 #include "i915_reg.h"
33 #include "i915_trace.h"
34 #include "i915_vgpu.h"
35 
36 #define FORCEWAKE_ACK_TIMEOUT_MS 50
37 #define GT_FIFO_TIMEOUT_MS	 10
38 
39 #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
40 
41 static void
fw_domains_get(struct intel_uncore * uncore,enum forcewake_domains fw_domains)42 fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
43 {
44 	uncore->fw_get_funcs->force_wake_get(uncore, fw_domains);
45 }
46 
47 void
intel_uncore_mmio_debug_init_early(struct drm_i915_private * i915)48 intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915)
49 {
50 	mtx_init(&i915->mmio_debug.lock, IPL_TTY);
51 	i915->mmio_debug.unclaimed_mmio_check = 1;
52 
53 	i915->uncore.debug = &i915->mmio_debug;
54 }
55 
mmio_debug_suspend(struct intel_uncore * uncore)56 static void mmio_debug_suspend(struct intel_uncore *uncore)
57 {
58 	if (!uncore->debug)
59 		return;
60 
61 	spin_lock(&uncore->debug->lock);
62 
63 	/* Save and disable mmio debugging for the user bypass */
64 	if (!uncore->debug->suspend_count++) {
65 		uncore->debug->saved_mmio_check = uncore->debug->unclaimed_mmio_check;
66 		uncore->debug->unclaimed_mmio_check = 0;
67 	}
68 
69 	spin_unlock(&uncore->debug->lock);
70 }
71 
72 static bool check_for_unclaimed_mmio(struct intel_uncore *uncore);
73 
mmio_debug_resume(struct intel_uncore * uncore)74 static void mmio_debug_resume(struct intel_uncore *uncore)
75 {
76 	if (!uncore->debug)
77 		return;
78 
79 	spin_lock(&uncore->debug->lock);
80 
81 	if (!--uncore->debug->suspend_count)
82 		uncore->debug->unclaimed_mmio_check = uncore->debug->saved_mmio_check;
83 
84 	if (check_for_unclaimed_mmio(uncore))
85 		drm_info(&uncore->i915->drm,
86 			 "Invalid mmio detected during user access\n");
87 
88 	spin_unlock(&uncore->debug->lock);
89 }
90 
91 static const char * const forcewake_domain_names[] = {
92 	"render",
93 	"gt",
94 	"media",
95 	"vdbox0",
96 	"vdbox1",
97 	"vdbox2",
98 	"vdbox3",
99 	"vdbox4",
100 	"vdbox5",
101 	"vdbox6",
102 	"vdbox7",
103 	"vebox0",
104 	"vebox1",
105 	"vebox2",
106 	"vebox3",
107 	"gsc",
108 };
109 
110 const char *
intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)111 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
112 {
113 	BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
114 
115 	if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
116 		return forcewake_domain_names[id];
117 
118 	WARN_ON(id);
119 
120 	return "unknown";
121 }
122 
123 #define fw_ack(d) readl((d)->reg_ack)
124 #define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
125 #define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
126 
127 static inline void
fw_domain_reset(const struct intel_uncore_forcewake_domain * d)128 fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
129 {
130 	/*
131 	 * We don't really know if the powerwell for the forcewake domain we are
132 	 * trying to reset here does exist at this point (engines could be fused
133 	 * off in ICL+), so no waiting for acks
134 	 */
135 	/* WaRsClearFWBitsAtReset */
136 	if (GRAPHICS_VER(d->uncore->i915) >= 12)
137 		fw_clear(d, 0xefff);
138 	else
139 		fw_clear(d, 0xffff);
140 }
141 
142 static inline void
fw_domain_arm_timer(struct intel_uncore_forcewake_domain * d)143 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
144 {
145 	GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
146 	d->uncore->fw_domains_timer |= d->mask;
147 	d->wake_count++;
148 #ifdef __linux__
149 	hrtimer_start_range_ns(&d->timer,
150 			       NSEC_PER_MSEC,
151 			       NSEC_PER_MSEC,
152 			       HRTIMER_MODE_REL);
153 #else
154 	timeout_add_msec(&d->timer, 1);
155 #endif
156 }
157 
158 static inline int
__wait_for_ack(const struct intel_uncore_forcewake_domain * d,const u32 ack,const u32 value)159 __wait_for_ack(const struct intel_uncore_forcewake_domain *d,
160 	       const u32 ack,
161 	       const u32 value)
162 {
163 	return wait_for_atomic((fw_ack(d) & ack) == value,
164 			       FORCEWAKE_ACK_TIMEOUT_MS);
165 }
166 
167 static inline int
wait_ack_clear(const struct intel_uncore_forcewake_domain * d,const u32 ack)168 wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
169 	       const u32 ack)
170 {
171 	return __wait_for_ack(d, ack, 0);
172 }
173 
174 static inline int
wait_ack_set(const struct intel_uncore_forcewake_domain * d,const u32 ack)175 wait_ack_set(const struct intel_uncore_forcewake_domain *d,
176 	     const u32 ack)
177 {
178 	return __wait_for_ack(d, ack, ack);
179 }
180 
181 static inline void
fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain * d)182 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
183 {
184 	if (!wait_ack_clear(d, FORCEWAKE_KERNEL))
185 		return;
186 
187 	if (fw_ack(d) == ~0)
188 		drm_err(&d->uncore->i915->drm,
189 			"%s: MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
190 			intel_uncore_forcewake_domain_to_str(d->id));
191 	else
192 		drm_err(&d->uncore->i915->drm,
193 			"%s: timed out waiting for forcewake ack to clear.\n",
194 			intel_uncore_forcewake_domain_to_str(d->id));
195 
196 	add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
197 }
198 
199 enum ack_type {
200 	ACK_CLEAR = 0,
201 	ACK_SET
202 };
203 
204 static int
fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain * d,const enum ack_type type)205 fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
206 				 const enum ack_type type)
207 {
208 	const u32 ack_bit = FORCEWAKE_KERNEL;
209 	const u32 value = type == ACK_SET ? ack_bit : 0;
210 	unsigned int pass;
211 	bool ack_detected;
212 
213 	/*
214 	 * There is a possibility of driver's wake request colliding
215 	 * with hardware's own wake requests and that can cause
216 	 * hardware to not deliver the driver's ack message.
217 	 *
218 	 * Use a fallback bit toggle to kick the gpu state machine
219 	 * in the hope that the original ack will be delivered along with
220 	 * the fallback ack.
221 	 *
222 	 * This workaround is described in HSDES #1604254524 and it's known as:
223 	 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
224 	 * although the name is a bit misleading.
225 	 */
226 
227 	pass = 1;
228 	do {
229 		wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
230 
231 		fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
232 		/* Give gt some time to relax before the polling frenzy */
233 		udelay(10 * pass);
234 		wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
235 
236 		ack_detected = (fw_ack(d) & ack_bit) == value;
237 
238 		fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
239 	} while (!ack_detected && pass++ < 10);
240 
241 	drm_dbg(&d->uncore->i915->drm,
242 		"%s had to use fallback to %s ack, 0x%x (passes %u)\n",
243 		intel_uncore_forcewake_domain_to_str(d->id),
244 		type == ACK_SET ? "set" : "clear",
245 		fw_ack(d),
246 		pass);
247 
248 	return ack_detected ? 0 : -ETIMEDOUT;
249 }
250 
251 static inline void
fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain * d)252 fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
253 {
254 	if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
255 		return;
256 
257 	if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
258 		fw_domain_wait_ack_clear(d);
259 }
260 
261 static inline void
fw_domain_get(const struct intel_uncore_forcewake_domain * d)262 fw_domain_get(const struct intel_uncore_forcewake_domain *d)
263 {
264 	fw_set(d, FORCEWAKE_KERNEL);
265 }
266 
267 static inline void
fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain * d)268 fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
269 {
270 	if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
271 		drm_err(&d->uncore->i915->drm,
272 			"%s: timed out waiting for forcewake ack request.\n",
273 			intel_uncore_forcewake_domain_to_str(d->id));
274 		add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
275 	}
276 }
277 
278 static inline void
fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain * d)279 fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
280 {
281 	if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
282 		return;
283 
284 	if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
285 		fw_domain_wait_ack_set(d);
286 }
287 
288 static inline void
fw_domain_put(const struct intel_uncore_forcewake_domain * d)289 fw_domain_put(const struct intel_uncore_forcewake_domain *d)
290 {
291 	fw_clear(d, FORCEWAKE_KERNEL);
292 }
293 
294 static void
fw_domains_get_normal(struct intel_uncore * uncore,enum forcewake_domains fw_domains)295 fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
296 {
297 	struct intel_uncore_forcewake_domain *d;
298 	unsigned int tmp;
299 
300 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
301 
302 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
303 		fw_domain_wait_ack_clear(d);
304 		fw_domain_get(d);
305 	}
306 
307 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
308 		fw_domain_wait_ack_set(d);
309 
310 	uncore->fw_domains_active |= fw_domains;
311 }
312 
313 static void
fw_domains_get_with_fallback(struct intel_uncore * uncore,enum forcewake_domains fw_domains)314 fw_domains_get_with_fallback(struct intel_uncore *uncore,
315 			     enum forcewake_domains fw_domains)
316 {
317 	struct intel_uncore_forcewake_domain *d;
318 	unsigned int tmp;
319 
320 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
321 
322 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
323 		fw_domain_wait_ack_clear_fallback(d);
324 		fw_domain_get(d);
325 	}
326 
327 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
328 		fw_domain_wait_ack_set_fallback(d);
329 
330 	uncore->fw_domains_active |= fw_domains;
331 }
332 
333 static void
fw_domains_put(struct intel_uncore * uncore,enum forcewake_domains fw_domains)334 fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
335 {
336 	struct intel_uncore_forcewake_domain *d;
337 	unsigned int tmp;
338 
339 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
340 
341 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
342 		fw_domain_put(d);
343 
344 	uncore->fw_domains_active &= ~fw_domains;
345 }
346 
347 static void
fw_domains_reset(struct intel_uncore * uncore,enum forcewake_domains fw_domains)348 fw_domains_reset(struct intel_uncore *uncore,
349 		 enum forcewake_domains fw_domains)
350 {
351 	struct intel_uncore_forcewake_domain *d;
352 	unsigned int tmp;
353 
354 	if (!fw_domains)
355 		return;
356 
357 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
358 
359 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
360 		fw_domain_reset(d);
361 }
362 
gt_thread_status(struct intel_uncore * uncore)363 static inline u32 gt_thread_status(struct intel_uncore *uncore)
364 {
365 	u32 val;
366 
367 	val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
368 	val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
369 
370 	return val;
371 }
372 
__gen6_gt_wait_for_thread_c0(struct intel_uncore * uncore)373 static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
374 {
375 	/*
376 	 * w/a for a sporadic read returning 0 by waiting for the GT
377 	 * thread to wake up.
378 	 */
379 	drm_WARN_ONCE(&uncore->i915->drm,
380 		      wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
381 		      "GT thread status wait timed out\n");
382 }
383 
fw_domains_get_with_thread_status(struct intel_uncore * uncore,enum forcewake_domains fw_domains)384 static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
385 					      enum forcewake_domains fw_domains)
386 {
387 	fw_domains_get_normal(uncore, fw_domains);
388 
389 	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
390 	__gen6_gt_wait_for_thread_c0(uncore);
391 }
392 
fifo_free_entries(struct intel_uncore * uncore)393 static inline u32 fifo_free_entries(struct intel_uncore *uncore)
394 {
395 	u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
396 
397 	return count & GT_FIFO_FREE_ENTRIES_MASK;
398 }
399 
__gen6_gt_wait_for_fifo(struct intel_uncore * uncore)400 static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
401 {
402 	u32 n;
403 
404 	/* On VLV, FIFO will be shared by both SW and HW.
405 	 * So, we need to read the FREE_ENTRIES everytime */
406 	if (IS_VALLEYVIEW(uncore->i915))
407 		n = fifo_free_entries(uncore);
408 	else
409 		n = uncore->fifo_count;
410 
411 	if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
412 		if (wait_for_atomic((n = fifo_free_entries(uncore)) >
413 				    GT_FIFO_NUM_RESERVED_ENTRIES,
414 				    GT_FIFO_TIMEOUT_MS)) {
415 			drm_dbg(&uncore->i915->drm,
416 				"GT_FIFO timeout, entries: %u\n", n);
417 			return;
418 		}
419 	}
420 
421 	uncore->fifo_count = n - 1;
422 }
423 
424 #ifdef __linux__
425 
426 static enum hrtimer_restart
intel_uncore_fw_release_timer(struct hrtimer * timer)427 intel_uncore_fw_release_timer(struct hrtimer *timer)
428 {
429 	struct intel_uncore_forcewake_domain *domain =
430 	       container_of(timer, struct intel_uncore_forcewake_domain, timer);
431 	struct intel_uncore *uncore = domain->uncore;
432 	unsigned long irqflags;
433 
434 	assert_rpm_device_not_suspended(uncore->rpm);
435 
436 	if (xchg(&domain->active, false))
437 		return HRTIMER_RESTART;
438 
439 	spin_lock_irqsave(&uncore->lock, irqflags);
440 
441 	uncore->fw_domains_timer &= ~domain->mask;
442 
443 	GEM_BUG_ON(!domain->wake_count);
444 	if (--domain->wake_count == 0)
445 		fw_domains_put(uncore, domain->mask);
446 
447 	spin_unlock_irqrestore(&uncore->lock, irqflags);
448 
449 	return HRTIMER_NORESTART;
450 }
451 
452 #else
453 
454 void
intel_uncore_fw_release_timer(void * arg)455 intel_uncore_fw_release_timer(void *arg)
456 {
457 	struct intel_uncore_forcewake_domain *domain = arg;
458 	struct intel_uncore *uncore = domain->uncore;
459 	unsigned long irqflags;
460 
461 	assert_rpm_device_not_suspended(uncore->rpm);
462 
463 	if (xchg(&domain->active, false))
464 		return;
465 
466 	spin_lock_irqsave(&uncore->lock, irqflags);
467 
468 	uncore->fw_domains_timer &= ~domain->mask;
469 
470 	GEM_BUG_ON(!domain->wake_count);
471 	if (--domain->wake_count == 0)
472 		fw_domains_put(uncore, domain->mask);
473 
474 	spin_unlock_irqrestore(&uncore->lock, irqflags);
475 }
476 
477 #endif
478 
479 /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
480 static unsigned int
intel_uncore_forcewake_reset(struct intel_uncore * uncore)481 intel_uncore_forcewake_reset(struct intel_uncore *uncore)
482 {
483 	unsigned long irqflags;
484 	struct intel_uncore_forcewake_domain *domain;
485 	int retry_count = 100;
486 	enum forcewake_domains fw, active_domains;
487 
488 	iosf_mbi_assert_punit_acquired();
489 
490 	/* Hold uncore.lock across reset to prevent any register access
491 	 * with forcewake not set correctly. Wait until all pending
492 	 * timers are run before holding.
493 	 */
494 	while (1) {
495 		unsigned int tmp;
496 
497 		active_domains = 0;
498 
499 		for_each_fw_domain(domain, uncore, tmp) {
500 			smp_store_mb(domain->active, false);
501 			if (hrtimer_cancel(&domain->timer) == 0)
502 				continue;
503 
504 			intel_uncore_fw_release_timer(&domain->timer);
505 		}
506 
507 		spin_lock_irqsave(&uncore->lock, irqflags);
508 
509 		for_each_fw_domain(domain, uncore, tmp) {
510 			if (hrtimer_active(&domain->timer))
511 				active_domains |= domain->mask;
512 		}
513 
514 		if (active_domains == 0)
515 			break;
516 
517 		if (--retry_count == 0) {
518 			drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
519 			break;
520 		}
521 
522 		spin_unlock_irqrestore(&uncore->lock, irqflags);
523 		cond_resched();
524 	}
525 
526 	drm_WARN_ON(&uncore->i915->drm, active_domains);
527 
528 	fw = uncore->fw_domains_active;
529 	if (fw)
530 		fw_domains_put(uncore, fw);
531 
532 	fw_domains_reset(uncore, uncore->fw_domains);
533 	assert_forcewakes_inactive(uncore);
534 
535 	spin_unlock_irqrestore(&uncore->lock, irqflags);
536 
537 	return fw; /* track the lost user forcewake domains */
538 }
539 
540 static bool
fpga_check_for_unclaimed_mmio(struct intel_uncore * uncore)541 fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
542 {
543 	u32 dbg;
544 
545 	dbg = __raw_uncore_read32(uncore, FPGA_DBG);
546 	if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
547 		return false;
548 
549 	/*
550 	 * Bugs in PCI programming (or failing hardware) can occasionally cause
551 	 * us to lose access to the MMIO BAR.  When this happens, register
552 	 * reads will come back with 0xFFFFFFFF for every register and things
553 	 * go bad very quickly.  Let's try to detect that special case and at
554 	 * least try to print a more informative message about what has
555 	 * happened.
556 	 *
557 	 * During normal operation the FPGA_DBG register has several unused
558 	 * bits that will always read back as 0's so we can use them as canaries
559 	 * to recognize when MMIO accesses are just busted.
560 	 */
561 	if (unlikely(dbg == ~0))
562 		drm_err(&uncore->i915->drm,
563 			"Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n");
564 
565 	__raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
566 
567 	return true;
568 }
569 
570 static bool
vlv_check_for_unclaimed_mmio(struct intel_uncore * uncore)571 vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
572 {
573 	u32 cer;
574 
575 	cer = __raw_uncore_read32(uncore, CLAIM_ER);
576 	if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
577 		return false;
578 
579 	__raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
580 
581 	return true;
582 }
583 
584 static bool
gen6_check_for_fifo_debug(struct intel_uncore * uncore)585 gen6_check_for_fifo_debug(struct intel_uncore *uncore)
586 {
587 	u32 fifodbg;
588 
589 	fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
590 
591 	if (unlikely(fifodbg)) {
592 		drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
593 		__raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
594 	}
595 
596 	return fifodbg;
597 }
598 
599 static bool
check_for_unclaimed_mmio(struct intel_uncore * uncore)600 check_for_unclaimed_mmio(struct intel_uncore *uncore)
601 {
602 	bool ret = false;
603 
604 	lockdep_assert_held(&uncore->debug->lock);
605 
606 	if (uncore->debug->suspend_count)
607 		return false;
608 
609 	if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
610 		ret |= fpga_check_for_unclaimed_mmio(uncore);
611 
612 	if (intel_uncore_has_dbg_unclaimed(uncore))
613 		ret |= vlv_check_for_unclaimed_mmio(uncore);
614 
615 	if (intel_uncore_has_fifo(uncore))
616 		ret |= gen6_check_for_fifo_debug(uncore);
617 
618 	return ret;
619 }
620 
forcewake_early_sanitize(struct intel_uncore * uncore,unsigned int restore_forcewake)621 static void forcewake_early_sanitize(struct intel_uncore *uncore,
622 				     unsigned int restore_forcewake)
623 {
624 	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
625 
626 	/* WaDisableShadowRegForCpd:chv */
627 	if (IS_CHERRYVIEW(uncore->i915)) {
628 		__raw_uncore_write32(uncore, GTFIFOCTL,
629 				     __raw_uncore_read32(uncore, GTFIFOCTL) |
630 				     GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
631 				     GT_FIFO_CTL_RC6_POLICY_STALL);
632 	}
633 
634 	iosf_mbi_punit_acquire();
635 	intel_uncore_forcewake_reset(uncore);
636 	if (restore_forcewake) {
637 		spin_lock_irq(&uncore->lock);
638 		fw_domains_get(uncore, restore_forcewake);
639 
640 		if (intel_uncore_has_fifo(uncore))
641 			uncore->fifo_count = fifo_free_entries(uncore);
642 		spin_unlock_irq(&uncore->lock);
643 	}
644 	iosf_mbi_punit_release();
645 }
646 
intel_uncore_suspend(struct intel_uncore * uncore)647 void intel_uncore_suspend(struct intel_uncore *uncore)
648 {
649 	if (!intel_uncore_has_forcewake(uncore))
650 		return;
651 
652 	iosf_mbi_punit_acquire();
653 	iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
654 		&uncore->pmic_bus_access_nb);
655 	uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
656 	iosf_mbi_punit_release();
657 }
658 
intel_uncore_resume_early(struct intel_uncore * uncore)659 void intel_uncore_resume_early(struct intel_uncore *uncore)
660 {
661 	unsigned int restore_forcewake;
662 
663 	if (intel_uncore_unclaimed_mmio(uncore))
664 		drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
665 
666 	if (!intel_uncore_has_forcewake(uncore))
667 		return;
668 
669 	restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
670 	forcewake_early_sanitize(uncore, restore_forcewake);
671 
672 	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
673 }
674 
intel_uncore_runtime_resume(struct intel_uncore * uncore)675 void intel_uncore_runtime_resume(struct intel_uncore *uncore)
676 {
677 	if (!intel_uncore_has_forcewake(uncore))
678 		return;
679 
680 	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
681 }
682 
__intel_uncore_forcewake_get(struct intel_uncore * uncore,enum forcewake_domains fw_domains)683 static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
684 					 enum forcewake_domains fw_domains)
685 {
686 	struct intel_uncore_forcewake_domain *domain;
687 	unsigned int tmp;
688 
689 	fw_domains &= uncore->fw_domains;
690 
691 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
692 		if (domain->wake_count++) {
693 			fw_domains &= ~domain->mask;
694 			domain->active = true;
695 		}
696 	}
697 
698 	if (fw_domains)
699 		fw_domains_get(uncore, fw_domains);
700 }
701 
702 /**
703  * intel_uncore_forcewake_get - grab forcewake domain references
704  * @uncore: the intel_uncore structure
705  * @fw_domains: forcewake domains to get reference on
706  *
707  * This function can be used get GT's forcewake domain references.
708  * Normal register access will handle the forcewake domains automatically.
709  * However if some sequence requires the GT to not power down a particular
710  * forcewake domains this function should be called at the beginning of the
711  * sequence. And subsequently the reference should be dropped by symmetric
712  * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
713  * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
714  */
intel_uncore_forcewake_get(struct intel_uncore * uncore,enum forcewake_domains fw_domains)715 void intel_uncore_forcewake_get(struct intel_uncore *uncore,
716 				enum forcewake_domains fw_domains)
717 {
718 	unsigned long irqflags;
719 
720 	if (!uncore->fw_get_funcs)
721 		return;
722 
723 	assert_rpm_wakelock_held(uncore->rpm);
724 
725 	spin_lock_irqsave(&uncore->lock, irqflags);
726 	__intel_uncore_forcewake_get(uncore, fw_domains);
727 	spin_unlock_irqrestore(&uncore->lock, irqflags);
728 }
729 
730 /**
731  * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
732  * @uncore: the intel_uncore structure
733  *
734  * This function is a wrapper around intel_uncore_forcewake_get() to acquire
735  * the GT powerwell and in the process disable our debugging for the
736  * duration of userspace's bypass.
737  */
intel_uncore_forcewake_user_get(struct intel_uncore * uncore)738 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
739 {
740 	spin_lock_irq(&uncore->lock);
741 	if (!uncore->user_forcewake_count++) {
742 		intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
743 		mmio_debug_suspend(uncore);
744 	}
745 	spin_unlock_irq(&uncore->lock);
746 }
747 
748 /**
749  * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
750  * @uncore: the intel_uncore structure
751  *
752  * This function complements intel_uncore_forcewake_user_get() and releases
753  * the GT powerwell taken on behalf of the userspace bypass.
754  */
intel_uncore_forcewake_user_put(struct intel_uncore * uncore)755 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
756 {
757 	spin_lock_irq(&uncore->lock);
758 	if (!--uncore->user_forcewake_count) {
759 		mmio_debug_resume(uncore);
760 		intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
761 	}
762 	spin_unlock_irq(&uncore->lock);
763 }
764 
765 /**
766  * intel_uncore_forcewake_get__locked - grab forcewake domain references
767  * @uncore: the intel_uncore structure
768  * @fw_domains: forcewake domains to get reference on
769  *
770  * See intel_uncore_forcewake_get(). This variant places the onus
771  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
772  */
intel_uncore_forcewake_get__locked(struct intel_uncore * uncore,enum forcewake_domains fw_domains)773 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
774 					enum forcewake_domains fw_domains)
775 {
776 	lockdep_assert_held(&uncore->lock);
777 
778 	if (!uncore->fw_get_funcs)
779 		return;
780 
781 	__intel_uncore_forcewake_get(uncore, fw_domains);
782 }
783 
__intel_uncore_forcewake_put(struct intel_uncore * uncore,enum forcewake_domains fw_domains,bool delayed)784 static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
785 					 enum forcewake_domains fw_domains,
786 					 bool delayed)
787 {
788 	struct intel_uncore_forcewake_domain *domain;
789 	unsigned int tmp;
790 
791 	fw_domains &= uncore->fw_domains;
792 
793 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
794 		GEM_BUG_ON(!domain->wake_count);
795 
796 		if (--domain->wake_count) {
797 			domain->active = true;
798 			continue;
799 		}
800 
801 		if (delayed &&
802 		    !(domain->uncore->fw_domains_timer & domain->mask))
803 			fw_domain_arm_timer(domain);
804 		else
805 			fw_domains_put(uncore, domain->mask);
806 	}
807 }
808 
809 /**
810  * intel_uncore_forcewake_put - release a forcewake domain reference
811  * @uncore: the intel_uncore structure
812  * @fw_domains: forcewake domains to put references
813  *
814  * This function drops the device-level forcewakes for specified
815  * domains obtained by intel_uncore_forcewake_get().
816  */
intel_uncore_forcewake_put(struct intel_uncore * uncore,enum forcewake_domains fw_domains)817 void intel_uncore_forcewake_put(struct intel_uncore *uncore,
818 				enum forcewake_domains fw_domains)
819 {
820 	unsigned long irqflags;
821 
822 	if (!uncore->fw_get_funcs)
823 		return;
824 
825 	spin_lock_irqsave(&uncore->lock, irqflags);
826 	__intel_uncore_forcewake_put(uncore, fw_domains, false);
827 	spin_unlock_irqrestore(&uncore->lock, irqflags);
828 }
829 
intel_uncore_forcewake_put_delayed(struct intel_uncore * uncore,enum forcewake_domains fw_domains)830 void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
831 					enum forcewake_domains fw_domains)
832 {
833 	unsigned long irqflags;
834 
835 	if (!uncore->fw_get_funcs)
836 		return;
837 
838 	spin_lock_irqsave(&uncore->lock, irqflags);
839 	__intel_uncore_forcewake_put(uncore, fw_domains, true);
840 	spin_unlock_irqrestore(&uncore->lock, irqflags);
841 }
842 
843 /**
844  * intel_uncore_forcewake_flush - flush the delayed release
845  * @uncore: the intel_uncore structure
846  * @fw_domains: forcewake domains to flush
847  */
intel_uncore_forcewake_flush(struct intel_uncore * uncore,enum forcewake_domains fw_domains)848 void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
849 				  enum forcewake_domains fw_domains)
850 {
851 	struct intel_uncore_forcewake_domain *domain;
852 	unsigned int tmp;
853 
854 	if (!uncore->fw_get_funcs)
855 		return;
856 
857 	fw_domains &= uncore->fw_domains;
858 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
859 		WRITE_ONCE(domain->active, false);
860 		if (hrtimer_cancel(&domain->timer))
861 			intel_uncore_fw_release_timer(&domain->timer);
862 	}
863 }
864 
865 /**
866  * intel_uncore_forcewake_put__locked - release forcewake domain references
867  * @uncore: the intel_uncore structure
868  * @fw_domains: forcewake domains to put references
869  *
870  * See intel_uncore_forcewake_put(). This variant places the onus
871  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
872  */
intel_uncore_forcewake_put__locked(struct intel_uncore * uncore,enum forcewake_domains fw_domains)873 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
874 					enum forcewake_domains fw_domains)
875 {
876 	lockdep_assert_held(&uncore->lock);
877 
878 	if (!uncore->fw_get_funcs)
879 		return;
880 
881 	__intel_uncore_forcewake_put(uncore, fw_domains, false);
882 }
883 
assert_forcewakes_inactive(struct intel_uncore * uncore)884 void assert_forcewakes_inactive(struct intel_uncore *uncore)
885 {
886 	if (!uncore->fw_get_funcs)
887 		return;
888 
889 	drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
890 		 "Expected all fw_domains to be inactive, but %08x are still on\n",
891 		 uncore->fw_domains_active);
892 }
893 
assert_forcewakes_active(struct intel_uncore * uncore,enum forcewake_domains fw_domains)894 void assert_forcewakes_active(struct intel_uncore *uncore,
895 			      enum forcewake_domains fw_domains)
896 {
897 	struct intel_uncore_forcewake_domain *domain;
898 	unsigned int tmp;
899 
900 	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
901 		return;
902 
903 	if (!uncore->fw_get_funcs)
904 		return;
905 
906 	spin_lock_irq(&uncore->lock);
907 
908 	assert_rpm_wakelock_held(uncore->rpm);
909 
910 	fw_domains &= uncore->fw_domains;
911 	drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
912 		 "Expected %08x fw_domains to be active, but %08x are off\n",
913 		 fw_domains, fw_domains & ~uncore->fw_domains_active);
914 
915 	/*
916 	 * Check that the caller has an explicit wakeref and we don't mistake
917 	 * it for the auto wakeref.
918 	 */
919 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
920 		unsigned int actual = READ_ONCE(domain->wake_count);
921 		unsigned int expect = 1;
922 
923 		if (uncore->fw_domains_timer & domain->mask)
924 			expect++; /* pending automatic release */
925 
926 		if (drm_WARN(&uncore->i915->drm, actual < expect,
927 			     "Expected domain %d to be held awake by caller, count=%d\n",
928 			     domain->id, actual))
929 			break;
930 	}
931 
932 	spin_unlock_irq(&uncore->lock);
933 }
934 
935 /*
936  * We give fast paths for the really cool registers.  The second range includes
937  * media domains (and the GSC starting from Xe_LPM+)
938  */
939 #define NEEDS_FORCE_WAKE(reg) ({ \
940 	u32 __reg = (reg); \
941 	__reg < 0x40000 || __reg >= 0x116000; \
942 })
943 
fw_range_cmp(u32 offset,const struct intel_forcewake_range * entry)944 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
945 {
946 	if (offset < entry->start)
947 		return -1;
948 	else if (offset > entry->end)
949 		return 1;
950 	else
951 		return 0;
952 }
953 
954 /* Copied and "macroized" from lib/bsearch.c */
955 #define BSEARCH(key, base, num, cmp) ({                                 \
956 	unsigned int start__ = 0, end__ = (num);                        \
957 	typeof(base) result__ = NULL;                                   \
958 	while (start__ < end__) {                                       \
959 		unsigned int mid__ = start__ + (end__ - start__) / 2;   \
960 		int ret__ = (cmp)((key), (base) + mid__);               \
961 		if (ret__ < 0) {                                        \
962 			end__ = mid__;                                  \
963 		} else if (ret__ > 0) {                                 \
964 			start__ = mid__ + 1;                            \
965 		} else {                                                \
966 			result__ = (base) + mid__;                      \
967 			break;                                          \
968 		}                                                       \
969 	}                                                               \
970 	result__;                                                       \
971 })
972 
973 static enum forcewake_domains
find_fw_domain(struct intel_uncore * uncore,u32 offset)974 find_fw_domain(struct intel_uncore *uncore, u32 offset)
975 {
976 	const struct intel_forcewake_range *entry;
977 
978 	if (IS_GSI_REG(offset))
979 		offset += uncore->gsi_offset;
980 
981 	entry = BSEARCH(offset,
982 			uncore->fw_domains_table,
983 			uncore->fw_domains_table_entries,
984 			fw_range_cmp);
985 
986 	if (!entry)
987 		return 0;
988 
989 	/*
990 	 * The list of FW domains depends on the SKU in gen11+ so we
991 	 * can't determine it statically. We use FORCEWAKE_ALL and
992 	 * translate it here to the list of available domains.
993 	 */
994 	if (entry->domains == FORCEWAKE_ALL)
995 		return uncore->fw_domains;
996 
997 	drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
998 		 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
999 		 entry->domains & ~uncore->fw_domains, offset);
1000 
1001 	return entry->domains;
1002 }
1003 
1004 /*
1005  * Shadowed register tables describe special register ranges that i915 is
1006  * allowed to write to without acquiring forcewake.  If these registers' power
1007  * wells are down, the hardware will save values written by i915 to a shadow
1008  * copy and automatically transfer them into the real register the next time
1009  * the power well is woken up.  Shadowing only applies to writes; forcewake
1010  * must still be acquired when reading from registers in these ranges.
1011  *
1012  * The documentation for shadowed registers is somewhat spotty on older
1013  * platforms.  However missing registers from these lists is non-fatal; it just
1014  * means we'll wake up the hardware for some register accesses where we didn't
1015  * really need to.
1016  *
1017  * The ranges listed in these tables must be sorted by offset.
1018  *
1019  * When adding new tables here, please also add them to
1020  * intel_shadow_table_check() in selftests/intel_uncore.c so that they will be
1021  * scanned for obvious mistakes or typos by the selftests.
1022  */
1023 
1024 static const struct i915_range gen8_shadowed_regs[] = {
1025 	{ .start =  0x2030, .end =  0x2030 },
1026 	{ .start =  0xA008, .end =  0xA00C },
1027 	{ .start = 0x12030, .end = 0x12030 },
1028 	{ .start = 0x1a030, .end = 0x1a030 },
1029 	{ .start = 0x22030, .end = 0x22030 },
1030 };
1031 
1032 static const struct i915_range gen11_shadowed_regs[] = {
1033 	{ .start =   0x2030, .end =   0x2030 },
1034 	{ .start =   0x2550, .end =   0x2550 },
1035 	{ .start =   0xA008, .end =   0xA00C },
1036 	{ .start =  0x22030, .end =  0x22030 },
1037 	{ .start =  0x22230, .end =  0x22230 },
1038 	{ .start =  0x22510, .end =  0x22550 },
1039 	{ .start = 0x1C0030, .end = 0x1C0030 },
1040 	{ .start = 0x1C0230, .end = 0x1C0230 },
1041 	{ .start = 0x1C0510, .end = 0x1C0550 },
1042 	{ .start = 0x1C4030, .end = 0x1C4030 },
1043 	{ .start = 0x1C4230, .end = 0x1C4230 },
1044 	{ .start = 0x1C4510, .end = 0x1C4550 },
1045 	{ .start = 0x1C8030, .end = 0x1C8030 },
1046 	{ .start = 0x1C8230, .end = 0x1C8230 },
1047 	{ .start = 0x1C8510, .end = 0x1C8550 },
1048 	{ .start = 0x1D0030, .end = 0x1D0030 },
1049 	{ .start = 0x1D0230, .end = 0x1D0230 },
1050 	{ .start = 0x1D0510, .end = 0x1D0550 },
1051 	{ .start = 0x1D4030, .end = 0x1D4030 },
1052 	{ .start = 0x1D4230, .end = 0x1D4230 },
1053 	{ .start = 0x1D4510, .end = 0x1D4550 },
1054 	{ .start = 0x1D8030, .end = 0x1D8030 },
1055 	{ .start = 0x1D8230, .end = 0x1D8230 },
1056 	{ .start = 0x1D8510, .end = 0x1D8550 },
1057 };
1058 
1059 static const struct i915_range gen12_shadowed_regs[] = {
1060 	{ .start =   0x2030, .end =   0x2030 },
1061 	{ .start =   0x2510, .end =   0x2550 },
1062 	{ .start =   0xA008, .end =   0xA00C },
1063 	{ .start =   0xA188, .end =   0xA188 },
1064 	{ .start =   0xA278, .end =   0xA278 },
1065 	{ .start =   0xA540, .end =   0xA56C },
1066 	{ .start =   0xC4C8, .end =   0xC4C8 },
1067 	{ .start =   0xC4D4, .end =   0xC4D4 },
1068 	{ .start =   0xC600, .end =   0xC600 },
1069 	{ .start =  0x22030, .end =  0x22030 },
1070 	{ .start =  0x22510, .end =  0x22550 },
1071 	{ .start = 0x1C0030, .end = 0x1C0030 },
1072 	{ .start = 0x1C0510, .end = 0x1C0550 },
1073 	{ .start = 0x1C4030, .end = 0x1C4030 },
1074 	{ .start = 0x1C4510, .end = 0x1C4550 },
1075 	{ .start = 0x1C8030, .end = 0x1C8030 },
1076 	{ .start = 0x1C8510, .end = 0x1C8550 },
1077 	{ .start = 0x1D0030, .end = 0x1D0030 },
1078 	{ .start = 0x1D0510, .end = 0x1D0550 },
1079 	{ .start = 0x1D4030, .end = 0x1D4030 },
1080 	{ .start = 0x1D4510, .end = 0x1D4550 },
1081 	{ .start = 0x1D8030, .end = 0x1D8030 },
1082 	{ .start = 0x1D8510, .end = 0x1D8550 },
1083 
1084 	/*
1085 	 * The rest of these ranges are specific to Xe_HP and beyond, but
1086 	 * are reserved/unused ranges on earlier gen12 platforms, so they can
1087 	 * be safely added to the gen12 table.
1088 	 */
1089 	{ .start = 0x1E0030, .end = 0x1E0030 },
1090 	{ .start = 0x1E0510, .end = 0x1E0550 },
1091 	{ .start = 0x1E4030, .end = 0x1E4030 },
1092 	{ .start = 0x1E4510, .end = 0x1E4550 },
1093 	{ .start = 0x1E8030, .end = 0x1E8030 },
1094 	{ .start = 0x1E8510, .end = 0x1E8550 },
1095 	{ .start = 0x1F0030, .end = 0x1F0030 },
1096 	{ .start = 0x1F0510, .end = 0x1F0550 },
1097 	{ .start = 0x1F4030, .end = 0x1F4030 },
1098 	{ .start = 0x1F4510, .end = 0x1F4550 },
1099 	{ .start = 0x1F8030, .end = 0x1F8030 },
1100 	{ .start = 0x1F8510, .end = 0x1F8550 },
1101 };
1102 
1103 static const struct i915_range dg2_shadowed_regs[] = {
1104 	{ .start =   0x2030, .end =   0x2030 },
1105 	{ .start =   0x2510, .end =   0x2550 },
1106 	{ .start =   0xA008, .end =   0xA00C },
1107 	{ .start =   0xA188, .end =   0xA188 },
1108 	{ .start =   0xA278, .end =   0xA278 },
1109 	{ .start =   0xA540, .end =   0xA56C },
1110 	{ .start =   0xC4C8, .end =   0xC4C8 },
1111 	{ .start =   0xC4E0, .end =   0xC4E0 },
1112 	{ .start =   0xC600, .end =   0xC600 },
1113 	{ .start =   0xC658, .end =   0xC658 },
1114 	{ .start =  0x22030, .end =  0x22030 },
1115 	{ .start =  0x22510, .end =  0x22550 },
1116 	{ .start = 0x1C0030, .end = 0x1C0030 },
1117 	{ .start = 0x1C0510, .end = 0x1C0550 },
1118 	{ .start = 0x1C4030, .end = 0x1C4030 },
1119 	{ .start = 0x1C4510, .end = 0x1C4550 },
1120 	{ .start = 0x1C8030, .end = 0x1C8030 },
1121 	{ .start = 0x1C8510, .end = 0x1C8550 },
1122 	{ .start = 0x1D0030, .end = 0x1D0030 },
1123 	{ .start = 0x1D0510, .end = 0x1D0550 },
1124 	{ .start = 0x1D4030, .end = 0x1D4030 },
1125 	{ .start = 0x1D4510, .end = 0x1D4550 },
1126 	{ .start = 0x1D8030, .end = 0x1D8030 },
1127 	{ .start = 0x1D8510, .end = 0x1D8550 },
1128 	{ .start = 0x1E0030, .end = 0x1E0030 },
1129 	{ .start = 0x1E0510, .end = 0x1E0550 },
1130 	{ .start = 0x1E4030, .end = 0x1E4030 },
1131 	{ .start = 0x1E4510, .end = 0x1E4550 },
1132 	{ .start = 0x1E8030, .end = 0x1E8030 },
1133 	{ .start = 0x1E8510, .end = 0x1E8550 },
1134 	{ .start = 0x1F0030, .end = 0x1F0030 },
1135 	{ .start = 0x1F0510, .end = 0x1F0550 },
1136 	{ .start = 0x1F4030, .end = 0x1F4030 },
1137 	{ .start = 0x1F4510, .end = 0x1F4550 },
1138 	{ .start = 0x1F8030, .end = 0x1F8030 },
1139 	{ .start = 0x1F8510, .end = 0x1F8550 },
1140 };
1141 
1142 static const struct i915_range pvc_shadowed_regs[] = {
1143 	{ .start =   0x2030, .end =   0x2030 },
1144 	{ .start =   0x2510, .end =   0x2550 },
1145 	{ .start =   0xA008, .end =   0xA00C },
1146 	{ .start =   0xA188, .end =   0xA188 },
1147 	{ .start =   0xA278, .end =   0xA278 },
1148 	{ .start =   0xA540, .end =   0xA56C },
1149 	{ .start =   0xC4C8, .end =   0xC4C8 },
1150 	{ .start =   0xC4E0, .end =   0xC4E0 },
1151 	{ .start =   0xC600, .end =   0xC600 },
1152 	{ .start =   0xC658, .end =   0xC658 },
1153 	{ .start =  0x22030, .end =  0x22030 },
1154 	{ .start =  0x22510, .end =  0x22550 },
1155 	{ .start = 0x1C0030, .end = 0x1C0030 },
1156 	{ .start = 0x1C0510, .end = 0x1C0550 },
1157 	{ .start = 0x1C4030, .end = 0x1C4030 },
1158 	{ .start = 0x1C4510, .end = 0x1C4550 },
1159 	{ .start = 0x1C8030, .end = 0x1C8030 },
1160 	{ .start = 0x1C8510, .end = 0x1C8550 },
1161 	{ .start = 0x1D0030, .end = 0x1D0030 },
1162 	{ .start = 0x1D0510, .end = 0x1D0550 },
1163 	{ .start = 0x1D4030, .end = 0x1D4030 },
1164 	{ .start = 0x1D4510, .end = 0x1D4550 },
1165 	{ .start = 0x1D8030, .end = 0x1D8030 },
1166 	{ .start = 0x1D8510, .end = 0x1D8550 },
1167 	{ .start = 0x1E0030, .end = 0x1E0030 },
1168 	{ .start = 0x1E0510, .end = 0x1E0550 },
1169 	{ .start = 0x1E4030, .end = 0x1E4030 },
1170 	{ .start = 0x1E4510, .end = 0x1E4550 },
1171 	{ .start = 0x1E8030, .end = 0x1E8030 },
1172 	{ .start = 0x1E8510, .end = 0x1E8550 },
1173 	{ .start = 0x1F0030, .end = 0x1F0030 },
1174 	{ .start = 0x1F0510, .end = 0x1F0550 },
1175 	{ .start = 0x1F4030, .end = 0x1F4030 },
1176 	{ .start = 0x1F4510, .end = 0x1F4550 },
1177 	{ .start = 0x1F8030, .end = 0x1F8030 },
1178 	{ .start = 0x1F8510, .end = 0x1F8550 },
1179 };
1180 
1181 static const struct i915_range mtl_shadowed_regs[] = {
1182 	{ .start =   0x2030, .end =   0x2030 },
1183 	{ .start =   0x2510, .end =   0x2550 },
1184 	{ .start =   0xA008, .end =   0xA00C },
1185 	{ .start =   0xA188, .end =   0xA188 },
1186 	{ .start =   0xA278, .end =   0xA278 },
1187 	{ .start =   0xA540, .end =   0xA56C },
1188 	{ .start =   0xC050, .end =   0xC050 },
1189 	{ .start =   0xC340, .end =   0xC340 },
1190 	{ .start =   0xC4C8, .end =   0xC4C8 },
1191 	{ .start =   0xC4E0, .end =   0xC4E0 },
1192 	{ .start =   0xC600, .end =   0xC600 },
1193 	{ .start =   0xC658, .end =   0xC658 },
1194 	{ .start =   0xCFD4, .end =   0xCFDC },
1195 	{ .start =  0x22030, .end =  0x22030 },
1196 	{ .start =  0x22510, .end =  0x22550 },
1197 };
1198 
1199 static const struct i915_range xelpmp_shadowed_regs[] = {
1200 	{ .start = 0x1C0030, .end = 0x1C0030 },
1201 	{ .start = 0x1C0510, .end = 0x1C0550 },
1202 	{ .start = 0x1C8030, .end = 0x1C8030 },
1203 	{ .start = 0x1C8510, .end = 0x1C8550 },
1204 	{ .start = 0x1D0030, .end = 0x1D0030 },
1205 	{ .start = 0x1D0510, .end = 0x1D0550 },
1206 	{ .start = 0x38A008, .end = 0x38A00C },
1207 	{ .start = 0x38A188, .end = 0x38A188 },
1208 	{ .start = 0x38A278, .end = 0x38A278 },
1209 	{ .start = 0x38A540, .end = 0x38A56C },
1210 	{ .start = 0x38A618, .end = 0x38A618 },
1211 	{ .start = 0x38C050, .end = 0x38C050 },
1212 	{ .start = 0x38C340, .end = 0x38C340 },
1213 	{ .start = 0x38C4C8, .end = 0x38C4C8 },
1214 	{ .start = 0x38C4E0, .end = 0x38C4E4 },
1215 	{ .start = 0x38C600, .end = 0x38C600 },
1216 	{ .start = 0x38C658, .end = 0x38C658 },
1217 	{ .start = 0x38CFD4, .end = 0x38CFDC },
1218 };
1219 
mmio_range_cmp(u32 key,const struct i915_range * range)1220 static int mmio_range_cmp(u32 key, const struct i915_range *range)
1221 {
1222 	if (key < range->start)
1223 		return -1;
1224 	else if (key > range->end)
1225 		return 1;
1226 	else
1227 		return 0;
1228 }
1229 
is_shadowed(struct intel_uncore * uncore,u32 offset)1230 static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
1231 {
1232 	if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
1233 		return false;
1234 
1235 	if (IS_GSI_REG(offset))
1236 		offset += uncore->gsi_offset;
1237 
1238 	return BSEARCH(offset,
1239 		       uncore->shadowed_reg_table,
1240 		       uncore->shadowed_reg_table_entries,
1241 		       mmio_range_cmp);
1242 }
1243 
1244 static enum forcewake_domains
gen6_reg_write_fw_domains(struct intel_uncore * uncore,i915_reg_t reg)1245 gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1246 {
1247 	return FORCEWAKE_RENDER;
1248 }
1249 
1250 #define __fwtable_reg_read_fw_domains(uncore, offset) \
1251 ({ \
1252 	enum forcewake_domains __fwd = 0; \
1253 	if (NEEDS_FORCE_WAKE((offset))) \
1254 		__fwd = find_fw_domain(uncore, offset); \
1255 	__fwd; \
1256 })
1257 
1258 #define __fwtable_reg_write_fw_domains(uncore, offset) \
1259 ({ \
1260 	enum forcewake_domains __fwd = 0; \
1261 	const u32 __offset = (offset); \
1262 	if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
1263 		__fwd = find_fw_domain(uncore, __offset); \
1264 	__fwd; \
1265 })
1266 
1267 #define GEN_FW_RANGE(s, e, d) \
1268 	{ .start = (s), .end = (e), .domains = (d) }
1269 
1270 /*
1271  * All platforms' forcewake tables below must be sorted by offset ranges.
1272  * Furthermore, new forcewake tables added should be "watertight" and have
1273  * no gaps between ranges.
1274  *
1275  * When there are multiple consecutive ranges listed in the bspec with
1276  * the same forcewake domain, it is customary to combine them into a single
1277  * row in the tables below to keep the tables small and lookups fast.
1278  * Likewise, reserved/unused ranges may be combined with the preceding and/or
1279  * following ranges since the driver will never be making MMIO accesses in
1280  * those ranges.
1281  *
1282  * For example, if the bspec were to list:
1283  *
1284  *    ...
1285  *    0x1000 - 0x1fff:  GT
1286  *    0x2000 - 0x2cff:  GT
1287  *    0x2d00 - 0x2fff:  unused/reserved
1288  *    0x3000 - 0xffff:  GT
1289  *    ...
1290  *
1291  * these could all be represented by a single line in the code:
1292  *
1293  *   GEN_FW_RANGE(0x1000, 0xffff, FORCEWAKE_GT)
1294  *
1295  * When adding new forcewake tables here, please also add them to
1296  * intel_uncore_mock_selftests in selftests/intel_uncore.c so that they will be
1297  * scanned for obvious mistakes or typos by the selftests.
1298  */
1299 
1300 static const struct intel_forcewake_range __gen6_fw_ranges[] = {
1301 	GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER),
1302 };
1303 
1304 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
1305 	GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1306 	GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
1307 	GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
1308 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1309 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
1310 	GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
1311 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1312 };
1313 
1314 static const struct intel_forcewake_range __chv_fw_ranges[] = {
1315 	GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1316 	GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1317 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1318 	GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1319 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1320 	GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1321 	GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
1322 	GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1323 	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1324 	GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1325 	GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
1326 	GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1327 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1328 	GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
1329 	GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
1330 	GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
1331 };
1332 
1333 static const struct intel_forcewake_range __gen9_fw_ranges[] = {
1334 	GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
1335 	GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1336 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1337 	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1338 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1339 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1340 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1341 	GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT),
1342 	GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
1343 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1344 	GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1345 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1346 	GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1347 	GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
1348 	GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT),
1349 	GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1350 	GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT),
1351 	GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1352 	GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1353 	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1354 	GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT),
1355 	GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1356 	GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT),
1357 	GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1358 	GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT),
1359 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1360 	GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT),
1361 	GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
1362 	GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT),
1363 	GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1364 	GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT),
1365 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1366 };
1367 
1368 static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1369 	GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
1370 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1371 	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1372 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1373 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1374 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1375 	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1376 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1377 	GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1378 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1379 	GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1380 	GEN_FW_RANGE(0x8800, 0x8bff, 0),
1381 	GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1382 	GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT),
1383 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1384 	GEN_FW_RANGE(0x9560, 0x95ff, 0),
1385 	GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT),
1386 	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1387 	GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT),
1388 	GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
1389 	GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT),
1390 	GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
1391 	GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT),
1392 	GEN_FW_RANGE(0x24000, 0x2407f, 0),
1393 	GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT),
1394 	GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
1395 	GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT),
1396 	GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
1397 	GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT),
1398 	GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1399 	GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1400 	GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1401 	GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
1402 	GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1403 	GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
1404 };
1405 
1406 static const struct intel_forcewake_range __gen12_fw_ranges[] = {
1407 	GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1408 		0x0   -  0xaff: reserved
1409 		0xb00 - 0x1fff: always on */
1410 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1411 	GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT),
1412 	GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER),
1413 	GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT),
1414 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1415 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1416 		0x4000 - 0x48ff: gt
1417 		0x4900 - 0x51ff: reserved */
1418 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1419 		0x5200 - 0x53ff: render
1420 		0x5400 - 0x54ff: reserved
1421 		0x5500 - 0x7fff: render */
1422 	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1423 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1424 	GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1425 		0x8160 - 0x817f: reserved
1426 		0x8180 - 0x81ff: always on */
1427 	GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
1428 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1429 	GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT), /*
1430 		0x8500 - 0x87ff: gt
1431 		0x8800 - 0x8fff: reserved
1432 		0x9000 - 0x947f: gt
1433 		0x9480 - 0x94cf: reserved */
1434 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1435 	GEN_FW_RANGE(0x9560, 0x97ff, 0), /*
1436 		0x9560 - 0x95ff: always on
1437 		0x9600 - 0x97ff: reserved */
1438 	GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1439 	GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER),
1440 	GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT), /*
1441 		0xb400 - 0xbf7f: gt
1442 		0xb480 - 0xbfff: reserved
1443 		0xc000 - 0xcfff: gt */
1444 	GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1445 	GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER),
1446 	GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT),
1447 	GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER), /*
1448 		0xdc00 - 0xddff: render
1449 		0xde00 - 0xde7f: reserved
1450 		0xde80 - 0xe8ff: render
1451 		0xe900 - 0xefff: reserved */
1452 	GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT), /*
1453 		 0xf000 - 0xffff: gt
1454 		0x10000 - 0x147ff: reserved */
1455 	GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER), /*
1456 		0x14800 - 0x14fff: render
1457 		0x15000 - 0x16dff: reserved
1458 		0x16e00 - 0x1bfff: render
1459 		0x1c000 - 0x1ffff: reserved */
1460 	GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0),
1461 	GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2),
1462 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1463 	GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1464 		0x24000 - 0x2407f: always on
1465 		0x24080 - 0x2417f: reserved */
1466 	GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1467 		0x24180 - 0x241ff: gt
1468 		0x24200 - 0x249ff: reserved */
1469 	GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1470 		0x24a00 - 0x24a7f: render
1471 		0x24a80 - 0x251ff: reserved */
1472 	GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT), /*
1473 		0x25200 - 0x252ff: gt
1474 		0x25300 - 0x255ff: reserved */
1475 	GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0),
1476 	GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2), /*
1477 		0x25680 - 0x256ff: VD2
1478 		0x25700 - 0x259ff: reserved */
1479 	GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0),
1480 	GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2), /*
1481 		0x25a80 - 0x25aff: VD2
1482 		0x25b00 - 0x2ffff: reserved */
1483 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1484 	GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1485 	GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1486 		0x1c0000 - 0x1c2bff: VD0
1487 		0x1c2c00 - 0x1c2cff: reserved
1488 		0x1c2d00 - 0x1c2dff: VD0
1489 		0x1c2e00 - 0x1c3eff: reserved
1490 		0x1c3f00 - 0x1c3fff: VD0 */
1491 	GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1492 	GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1493 		0x1c8000 - 0x1ca0ff: VE0
1494 		0x1ca100 - 0x1cbeff: reserved
1495 		0x1cbf00 - 0x1cbfff: VE0 */
1496 	GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1497 		0x1cc000 - 0x1ccfff: VD0
1498 		0x1cd000 - 0x1cffff: reserved */
1499 	GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1500 		0x1d0000 - 0x1d2bff: VD2
1501 		0x1d2c00 - 0x1d2cff: reserved
1502 		0x1d2d00 - 0x1d2dff: VD2
1503 		0x1d2e00 - 0x1d3eff: reserved
1504 		0x1d3f00 - 0x1d3fff: VD2 */
1505 };
1506 
1507 /*
1508  * Graphics IP version 12.55 brings a slight change to the 0xd800 range,
1509  * switching it from the GT domain to the render domain.
1510  */
1511 #define XEHP_FWRANGES(FW_RANGE_D800)					\
1512 	GEN_FW_RANGE(0x0, 0x1fff, 0), /*					\
1513 		  0x0 -  0xaff: reserved					\
1514 		0xb00 - 0x1fff: always on */					\
1515 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),				\
1516 	GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT),				\
1517 	GEN_FW_RANGE(0x4b00, 0x51ff, 0), /*					\
1518 		0x4b00 - 0x4fff: reserved					\
1519 		0x5000 - 0x51ff: always on */					\
1520 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),				\
1521 	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),				\
1522 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),				\
1523 	GEN_FW_RANGE(0x8160, 0x81ff, 0), /*					\
1524 		0x8160 - 0x817f: reserved					\
1525 		0x8180 - 0x81ff: always on */					\
1526 	GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),				\
1527 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),				\
1528 	GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /*				\
1529 		0x8500 - 0x87ff: gt						\
1530 		0x8800 - 0x8c7f: reserved					\
1531 		0x8c80 - 0x8cff: gt (DG2 only) */				\
1532 	GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /*			\
1533 		0x8d00 - 0x8dff: render (DG2 only)				\
1534 		0x8e00 - 0x8fff: reserved */					\
1535 	GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /*				\
1536 		0x9000 - 0x947f: gt						\
1537 		0x9480 - 0x94cf: reserved */					\
1538 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),				\
1539 	GEN_FW_RANGE(0x9560, 0x967f, 0), /*					\
1540 		0x9560 - 0x95ff: always on					\
1541 		0x9600 - 0x967f: reserved */					\
1542 	GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*			\
1543 		0x9680 - 0x96ff: render (DG2 only)				\
1544 		0x9700 - 0x97ff: reserved */					\
1545 	GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*				\
1546 		0x9800 - 0xb4ff: gt						\
1547 		0xb500 - 0xbfff: reserved					\
1548 		0xc000 - 0xcfff: gt */						\
1549 	GEN_FW_RANGE(0xd000, 0xd7ff, 0),					\
1550 	GEN_FW_RANGE(0xd800, 0xd87f, FW_RANGE_D800),			\
1551 	GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),				\
1552 	GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),				\
1553 	GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*				\
1554 		0xdd00 - 0xddff: gt						\
1555 		0xde00 - 0xde7f: reserved */					\
1556 	GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*			\
1557 		0xde80 - 0xdfff: render						\
1558 		0xe000 - 0xe0ff: reserved					\
1559 		0xe100 - 0xe8ff: render */					\
1560 	GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /*				\
1561 		0xe900 - 0xe9ff: gt						\
1562 		0xea00 - 0xefff: reserved					\
1563 		0xf000 - 0xffff: gt */						\
1564 	GEN_FW_RANGE(0x10000, 0x12fff, 0), /*					\
1565 		0x10000 - 0x11fff: reserved					\
1566 		0x12000 - 0x127ff: always on					\
1567 		0x12800 - 0x12fff: reserved */					\
1568 	GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0), /* DG2 only */	\
1569 	GEN_FW_RANGE(0x13200, 0x13fff, FORCEWAKE_MEDIA_VDBOX2), /*		\
1570 		0x13200 - 0x133ff: VD2 (DG2 only)				\
1571 		0x13400 - 0x13fff: reserved */					\
1572 	GEN_FW_RANGE(0x14000, 0x141ff, FORCEWAKE_MEDIA_VDBOX0), /* XEHPSDV only */	\
1573 	GEN_FW_RANGE(0x14200, 0x143ff, FORCEWAKE_MEDIA_VDBOX2), /* XEHPSDV only */	\
1574 	GEN_FW_RANGE(0x14400, 0x145ff, FORCEWAKE_MEDIA_VDBOX4), /* XEHPSDV only */	\
1575 	GEN_FW_RANGE(0x14600, 0x147ff, FORCEWAKE_MEDIA_VDBOX6), /* XEHPSDV only */	\
1576 	GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER),			\
1577 	GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /*			\
1578 		0x15000 - 0x15fff: gt (DG2 only)				\
1579 		0x16000 - 0x16dff: reserved */					\
1580 	GEN_FW_RANGE(0x16e00, 0x1ffff, FORCEWAKE_RENDER),			\
1581 	GEN_FW_RANGE(0x20000, 0x21fff, FORCEWAKE_MEDIA_VDBOX0), /*		\
1582 		0x20000 - 0x20fff: VD0 (XEHPSDV only)				\
1583 		0x21000 - 0x21fff: reserved */					\
1584 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),				\
1585 	GEN_FW_RANGE(0x24000, 0x2417f, 0), /*					\
1586 		0x24000 - 0x2407f: always on					\
1587 		0x24080 - 0x2417f: reserved */					\
1588 	GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*			\
1589 		0x24180 - 0x241ff: gt						\
1590 		0x24200 - 0x249ff: reserved */					\
1591 	GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*			\
1592 		0x24a00 - 0x24a7f: render					\
1593 		0x24a80 - 0x251ff: reserved */					\
1594 	GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /*			\
1595 		0x25200 - 0x252ff: gt						\
1596 		0x25300 - 0x25fff: reserved */					\
1597 	GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /*			\
1598 		0x26000 - 0x27fff: render					\
1599 		0x28000 - 0x29fff: reserved					\
1600 		0x2a000 - 0x2ffff: undocumented */				\
1601 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),				\
1602 	GEN_FW_RANGE(0x40000, 0x1bffff, 0),					\
1603 	GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*		\
1604 		0x1c0000 - 0x1c2bff: VD0					\
1605 		0x1c2c00 - 0x1c2cff: reserved					\
1606 		0x1c2d00 - 0x1c2dff: VD0					\
1607 		0x1c2e00 - 0x1c3eff: VD0 (DG2 only)				\
1608 		0x1c3f00 - 0x1c3fff: VD0 */					\
1609 	GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /*		\
1610 		0x1c4000 - 0x1c6bff: VD1					\
1611 		0x1c6c00 - 0x1c6cff: reserved					\
1612 		0x1c6d00 - 0x1c6dff: VD1					\
1613 		0x1c6e00 - 0x1c7fff: reserved */				\
1614 	GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*		\
1615 		0x1c8000 - 0x1ca0ff: VE0					\
1616 		0x1ca100 - 0x1cbfff: reserved */				\
1617 	GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0),		\
1618 	GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2),		\
1619 	GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4),		\
1620 	GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6),		\
1621 	GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*		\
1622 		0x1d0000 - 0x1d2bff: VD2					\
1623 		0x1d2c00 - 0x1d2cff: reserved					\
1624 		0x1d2d00 - 0x1d2dff: VD2					\
1625 		0x1d2e00 - 0x1d3dff: VD2 (DG2 only)				\
1626 		0x1d3e00 - 0x1d3eff: reserved					\
1627 		0x1d3f00 - 0x1d3fff: VD2 */					\
1628 	GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /*		\
1629 		0x1d4000 - 0x1d6bff: VD3					\
1630 		0x1d6c00 - 0x1d6cff: reserved					\
1631 		0x1d6d00 - 0x1d6dff: VD3					\
1632 		0x1d6e00 - 0x1d7fff: reserved */				\
1633 	GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /*		\
1634 		0x1d8000 - 0x1da0ff: VE1					\
1635 		0x1da100 - 0x1dffff: reserved */				\
1636 	GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /*		\
1637 		0x1e0000 - 0x1e2bff: VD4					\
1638 		0x1e2c00 - 0x1e2cff: reserved					\
1639 		0x1e2d00 - 0x1e2dff: VD4					\
1640 		0x1e2e00 - 0x1e3eff: reserved					\
1641 		0x1e3f00 - 0x1e3fff: VD4 */					\
1642 	GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /*		\
1643 		0x1e4000 - 0x1e6bff: VD5					\
1644 		0x1e6c00 - 0x1e6cff: reserved					\
1645 		0x1e6d00 - 0x1e6dff: VD5					\
1646 		0x1e6e00 - 0x1e7fff: reserved */				\
1647 	GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /*		\
1648 		0x1e8000 - 0x1ea0ff: VE2					\
1649 		0x1ea100 - 0x1effff: reserved */				\
1650 	GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /*		\
1651 		0x1f0000 - 0x1f2bff: VD6					\
1652 		0x1f2c00 - 0x1f2cff: reserved					\
1653 		0x1f2d00 - 0x1f2dff: VD6					\
1654 		0x1f2e00 - 0x1f3eff: reserved					\
1655 		0x1f3f00 - 0x1f3fff: VD6 */					\
1656 	GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /*		\
1657 		0x1f4000 - 0x1f6bff: VD7					\
1658 		0x1f6c00 - 0x1f6cff: reserved					\
1659 		0x1f6d00 - 0x1f6dff: VD7					\
1660 		0x1f6e00 - 0x1f7fff: reserved */				\
1661 	GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
1662 
1663 static const struct intel_forcewake_range __xehp_fw_ranges[] = {
1664 	XEHP_FWRANGES(FORCEWAKE_GT)
1665 };
1666 
1667 static const struct intel_forcewake_range __dg2_fw_ranges[] = {
1668 	XEHP_FWRANGES(FORCEWAKE_RENDER)
1669 };
1670 
1671 static const struct intel_forcewake_range __pvc_fw_ranges[] = {
1672 	GEN_FW_RANGE(0x0, 0xaff, 0),
1673 	GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT),
1674 	GEN_FW_RANGE(0xc00, 0xfff, 0),
1675 	GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT),
1676 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1677 	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1678 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1679 	GEN_FW_RANGE(0x4000, 0x813f, FORCEWAKE_GT), /*
1680 		0x4000 - 0x4aff: gt
1681 		0x4b00 - 0x4fff: reserved
1682 		0x5000 - 0x51ff: gt
1683 		0x5200 - 0x52ff: reserved
1684 		0x5300 - 0x53ff: gt
1685 		0x5400 - 0x7fff: reserved
1686 		0x8000 - 0x813f: gt */
1687 	GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER),
1688 	GEN_FW_RANGE(0x8180, 0x81ff, 0),
1689 	GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /*
1690 		0x8200 - 0x82ff: gt
1691 		0x8300 - 0x84ff: reserved
1692 		0x8500 - 0x887f: gt
1693 		0x8880 - 0x8a7f: reserved
1694 		0x8a80 - 0x8aff: gt
1695 		0x8b00 - 0x8fff: reserved
1696 		0x9000 - 0x947f: gt
1697 		0x9480 - 0x94cf: reserved */
1698 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1699 	GEN_FW_RANGE(0x9560, 0x967f, 0), /*
1700 		0x9560 - 0x95ff: always on
1701 		0x9600 - 0x967f: reserved */
1702 	GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
1703 		0x9680 - 0x96ff: render
1704 		0x9700 - 0x97ff: reserved */
1705 	GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1706 		0x9800 - 0xb4ff: gt
1707 		0xb500 - 0xbfff: reserved
1708 		0xc000 - 0xcfff: gt */
1709 	GEN_FW_RANGE(0xd000, 0xd3ff, 0),
1710 	GEN_FW_RANGE(0xd400, 0xdbff, FORCEWAKE_GT),
1711 	GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1712 	GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1713 		0xdd00 - 0xddff: gt
1714 		0xde00 - 0xde7f: reserved */
1715 	GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1716 		0xde80 - 0xdeff: render
1717 		0xdf00 - 0xe1ff: reserved
1718 		0xe200 - 0xe7ff: render
1719 		0xe800 - 0xe8ff: reserved */
1720 	GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT), /*
1721 		 0xe900 -  0xe9ff: gt
1722 		 0xea00 -  0xebff: reserved
1723 		 0xec00 -  0xffff: gt
1724 		0x10000 - 0x11fff: reserved */
1725 	GEN_FW_RANGE(0x12000, 0x12fff, 0), /*
1726 		0x12000 - 0x127ff: always on
1727 		0x12800 - 0x12fff: reserved */
1728 	GEN_FW_RANGE(0x13000, 0x19fff, FORCEWAKE_GT), /*
1729 		0x13000 - 0x135ff: gt
1730 		0x13600 - 0x147ff: reserved
1731 		0x14800 - 0x153ff: gt
1732 		0x15400 - 0x19fff: reserved */
1733 	GEN_FW_RANGE(0x1a000, 0x21fff, FORCEWAKE_RENDER), /*
1734 		0x1a000 - 0x1ffff: render
1735 		0x20000 - 0x21fff: reserved */
1736 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1737 	GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1738 		24000 - 0x2407f: always on
1739 		24080 - 0x2417f: reserved */
1740 	GEN_FW_RANGE(0x24180, 0x25fff, FORCEWAKE_GT), /*
1741 		0x24180 - 0x241ff: gt
1742 		0x24200 - 0x251ff: reserved
1743 		0x25200 - 0x252ff: gt
1744 		0x25300 - 0x25fff: reserved */
1745 	GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /*
1746 		0x26000 - 0x27fff: render
1747 		0x28000 - 0x2ffff: reserved */
1748 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1749 	GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1750 	GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1751 		0x1c0000 - 0x1c2bff: VD0
1752 		0x1c2c00 - 0x1c2cff: reserved
1753 		0x1c2d00 - 0x1c2dff: VD0
1754 		0x1c2e00 - 0x1c3eff: reserved
1755 		0x1c3f00 - 0x1c3fff: VD0 */
1756 	GEN_FW_RANGE(0x1c4000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX1), /*
1757 		0x1c4000 - 0x1c6aff: VD1
1758 		0x1c6b00 - 0x1c7eff: reserved
1759 		0x1c7f00 - 0x1c7fff: VD1
1760 		0x1c8000 - 0x1cffff: reserved */
1761 	GEN_FW_RANGE(0x1d0000, 0x23ffff, FORCEWAKE_MEDIA_VDBOX2), /*
1762 		0x1d0000 - 0x1d2aff: VD2
1763 		0x1d2b00 - 0x1d3eff: reserved
1764 		0x1d3f00 - 0x1d3fff: VD2
1765 		0x1d4000 - 0x23ffff: reserved */
1766 	GEN_FW_RANGE(0x240000, 0x3dffff, 0),
1767 	GEN_FW_RANGE(0x3e0000, 0x3effff, FORCEWAKE_GT),
1768 };
1769 
1770 static const struct intel_forcewake_range __mtl_fw_ranges[] = {
1771 	GEN_FW_RANGE(0x0, 0xaff, 0),
1772 	GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT),
1773 	GEN_FW_RANGE(0xc00, 0xfff, 0),
1774 	GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT),
1775 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1776 	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1777 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1778 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1779 		0x4000 - 0x48ff: render
1780 		0x4900 - 0x51ff: reserved */
1781 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1782 		0x5200 - 0x53ff: render
1783 		0x5400 - 0x54ff: reserved
1784 		0x5500 - 0x7fff: render */
1785 	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1786 	GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER), /*
1787 		0x8140 - 0x815f: render
1788 		0x8160 - 0x817f: reserved */
1789 	GEN_FW_RANGE(0x8180, 0x81ff, 0),
1790 	GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /*
1791 		0x8200 - 0x87ff: gt
1792 		0x8800 - 0x8dff: reserved
1793 		0x8e00 - 0x8f7f: gt
1794 		0x8f80 - 0x8fff: reserved
1795 		0x9000 - 0x947f: gt
1796 		0x9480 - 0x94cf: reserved */
1797 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1798 	GEN_FW_RANGE(0x9560, 0x967f, 0), /*
1799 		0x9560 - 0x95ff: always on
1800 		0x9600 - 0x967f: reserved */
1801 	GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
1802 		0x9680 - 0x96ff: render
1803 		0x9700 - 0x97ff: reserved */
1804 	GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1805 		0x9800 - 0xb4ff: gt
1806 		0xb500 - 0xbfff: reserved
1807 		0xc000 - 0xcfff: gt */
1808 	GEN_FW_RANGE(0xd000, 0xd7ff, 0), /*
1809 		0xd000 - 0xd3ff: always on
1810 		0xd400 - 0xd7ff: reserved */
1811 	GEN_FW_RANGE(0xd800, 0xd87f, FORCEWAKE_RENDER),
1812 	GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),
1813 	GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1814 	GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1815 		0xdd00 - 0xddff: gt
1816 		0xde00 - 0xde7f: reserved */
1817 	GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1818 		0xde80 - 0xdfff: render
1819 		0xe000 - 0xe0ff: reserved
1820 		0xe100 - 0xe8ff: render */
1821 	GEN_FW_RANGE(0xe900, 0xe9ff, FORCEWAKE_GT),
1822 	GEN_FW_RANGE(0xea00, 0x147ff, 0), /*
1823 		 0xea00 - 0x11fff: reserved
1824 		0x12000 - 0x127ff: always on
1825 		0x12800 - 0x147ff: reserved */
1826 	GEN_FW_RANGE(0x14800, 0x19fff, FORCEWAKE_GT), /*
1827 		0x14800 - 0x153ff: gt
1828 		0x15400 - 0x19fff: reserved */
1829 	GEN_FW_RANGE(0x1a000, 0x21fff, FORCEWAKE_RENDER), /*
1830 		0x1a000 - 0x1bfff: render
1831 		0x1c000 - 0x21fff: reserved */
1832 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1833 	GEN_FW_RANGE(0x24000, 0x2ffff, 0), /*
1834 		0x24000 - 0x2407f: always on
1835 		0x24080 - 0x2ffff: reserved */
1836 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1837 	GEN_FW_RANGE(0x40000, 0x1901ef, 0),
1838 	GEN_FW_RANGE(0x1901f0, 0x1901f3, FORCEWAKE_GT)
1839 		/* FIXME: WA to wake GT while triggering H2G */
1840 };
1841 
1842 /*
1843  * Note that the register ranges here are the final offsets after
1844  * translation of the GSI block to the 0x380000 offset.
1845  *
1846  * NOTE:  There are a couple MCR ranges near the bottom of this table
1847  * that need to power up either VD0 or VD2 depending on which replicated
1848  * instance of the register we're trying to access.  Our forcewake logic
1849  * at the moment doesn't have a good way to take steering into consideration,
1850  * and the driver doesn't even access any registers in those ranges today,
1851  * so for now we just mark those ranges as FORCEWAKE_ALL.  That will ensure
1852  * proper operation if we do start using the ranges in the future, and we
1853  * can determine at that time whether it's worth adding extra complexity to
1854  * the forcewake handling to take steering into consideration.
1855  */
1856 static const struct intel_forcewake_range __xelpmp_fw_ranges[] = {
1857 	GEN_FW_RANGE(0x0, 0x115fff, 0), /* render GT range */
1858 	GEN_FW_RANGE(0x116000, 0x11ffff, FORCEWAKE_GSC), /*
1859 		0x116000 - 0x117fff: gsc
1860 		0x118000 - 0x119fff: reserved
1861 		0x11a000 - 0x11efff: gsc
1862 		0x11f000 - 0x11ffff: reserved */
1863 	GEN_FW_RANGE(0x120000, 0x1bffff, 0), /* non-GT range */
1864 	GEN_FW_RANGE(0x1c0000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX0), /*
1865 		0x1c0000 - 0x1c3dff: VD0
1866 		0x1c3e00 - 0x1c3eff: reserved
1867 		0x1c3f00 - 0x1c3fff: VD0
1868 		0x1c4000 - 0x1c7fff: reserved */
1869 	GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1870 		0x1c8000 - 0x1ca0ff: VE0
1871 		0x1ca100 - 0x1cbfff: reserved */
1872 	GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1873 		0x1cc000 - 0x1cdfff: VD0
1874 		0x1ce000 - 0x1cffff: reserved */
1875 	GEN_FW_RANGE(0x1d0000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX2), /*
1876 		0x1d0000 - 0x1d3dff: VD2
1877 		0x1d3e00 - 0x1d3eff: reserved
1878 		0x1d4000 - 0x1d7fff: VD2 */
1879 	GEN_FW_RANGE(0x1d8000, 0x1da0ff, FORCEWAKE_MEDIA_VEBOX1),
1880 	GEN_FW_RANGE(0x1da100, 0x380aff, 0), /*
1881 		0x1da100 - 0x23ffff: reserved
1882 		0x240000 - 0x37ffff: non-GT range
1883 		0x380000 - 0x380aff: reserved */
1884 	GEN_FW_RANGE(0x380b00, 0x380bff, FORCEWAKE_GT),
1885 	GEN_FW_RANGE(0x380c00, 0x380fff, 0),
1886 	GEN_FW_RANGE(0x381000, 0x38817f, FORCEWAKE_GT), /*
1887 		0x381000 - 0x381fff: gt
1888 		0x382000 - 0x383fff: reserved
1889 		0x384000 - 0x384aff: gt
1890 		0x384b00 - 0x3851ff: reserved
1891 		0x385200 - 0x3871ff: gt
1892 		0x387200 - 0x387fff: reserved
1893 		0x388000 - 0x38813f: gt
1894 		0x388140 - 0x38817f: reserved */
1895 	GEN_FW_RANGE(0x388180, 0x3882ff, 0), /*
1896 		0x388180 - 0x3881ff: always on
1897 		0x388200 - 0x3882ff: reserved */
1898 	GEN_FW_RANGE(0x388300, 0x38955f, FORCEWAKE_GT), /*
1899 		0x388300 - 0x38887f: gt
1900 		0x388880 - 0x388fff: reserved
1901 		0x389000 - 0x38947f: gt
1902 		0x389480 - 0x38955f: reserved */
1903 	GEN_FW_RANGE(0x389560, 0x389fff, 0), /*
1904 		0x389560 - 0x3895ff: always on
1905 		0x389600 - 0x389fff: reserved */
1906 	GEN_FW_RANGE(0x38a000, 0x38cfff, FORCEWAKE_GT), /*
1907 		0x38a000 - 0x38afff: gt
1908 		0x38b000 - 0x38bfff: reserved
1909 		0x38c000 - 0x38cfff: gt */
1910 	GEN_FW_RANGE(0x38d000, 0x38d11f, 0),
1911 	GEN_FW_RANGE(0x38d120, 0x391fff, FORCEWAKE_GT), /*
1912 		0x38d120 - 0x38dfff: gt
1913 		0x38e000 - 0x38efff: reserved
1914 		0x38f000 - 0x38ffff: gt
1915 		0x389000 - 0x391fff: reserved */
1916 	GEN_FW_RANGE(0x392000, 0x392fff, 0), /*
1917 		0x392000 - 0x3927ff: always on
1918 		0x392800 - 0x292fff: reserved */
1919 	GEN_FW_RANGE(0x393000, 0x3931ff, FORCEWAKE_GT),
1920 	GEN_FW_RANGE(0x393200, 0x39323f, FORCEWAKE_ALL), /* instance-based, see note above */
1921 	GEN_FW_RANGE(0x393240, 0x3933ff, FORCEWAKE_GT),
1922 	GEN_FW_RANGE(0x393400, 0x3934ff, FORCEWAKE_ALL), /* instance-based, see note above */
1923 	GEN_FW_RANGE(0x393500, 0x393c7f, 0), /*
1924 		0x393500 - 0x393bff: reserved
1925 		0x393c00 - 0x393c7f: always on */
1926 	GEN_FW_RANGE(0x393c80, 0x393dff, FORCEWAKE_GT),
1927 };
1928 
1929 static void
ilk_dummy_write(struct intel_uncore * uncore)1930 ilk_dummy_write(struct intel_uncore *uncore)
1931 {
1932 	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1933 	 * the chip from rc6 before touching it for real. MI_MODE is masked,
1934 	 * hence harmless to write 0 into. */
1935 	__raw_uncore_write32(uncore, RING_MI_MODE(RENDER_RING_BASE), 0);
1936 }
1937 
1938 static void
__unclaimed_reg_debug(struct intel_uncore * uncore,const i915_reg_t reg,const bool read)1939 __unclaimed_reg_debug(struct intel_uncore *uncore,
1940 		      const i915_reg_t reg,
1941 		      const bool read)
1942 {
1943 	if (drm_WARN(&uncore->i915->drm,
1944 		     check_for_unclaimed_mmio(uncore),
1945 		     "Unclaimed %s register 0x%x\n",
1946 		     read ? "read from" : "write to",
1947 		     i915_mmio_reg_offset(reg)))
1948 		/* Only report the first N failures */
1949 		uncore->i915->params.mmio_debug--;
1950 }
1951 
1952 static void
__unclaimed_previous_reg_debug(struct intel_uncore * uncore,const i915_reg_t reg,const bool read)1953 __unclaimed_previous_reg_debug(struct intel_uncore *uncore,
1954 			       const i915_reg_t reg,
1955 			       const bool read)
1956 {
1957 	if (check_for_unclaimed_mmio(uncore))
1958 		drm_dbg(&uncore->i915->drm,
1959 			"Unclaimed access detected before %s register 0x%x\n",
1960 			read ? "read from" : "write to",
1961 			i915_mmio_reg_offset(reg));
1962 }
1963 
1964 static inline bool __must_check
unclaimed_reg_debug_header(struct intel_uncore * uncore,const i915_reg_t reg,const bool read)1965 unclaimed_reg_debug_header(struct intel_uncore *uncore,
1966 			   const i915_reg_t reg, const bool read)
1967 {
1968 	if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug)
1969 		return false;
1970 
1971 	/* interrupts are disabled and re-enabled around uncore->lock usage */
1972 	lockdep_assert_held(&uncore->lock);
1973 
1974 	spin_lock(&uncore->debug->lock);
1975 	__unclaimed_previous_reg_debug(uncore, reg, read);
1976 
1977 	return true;
1978 }
1979 
1980 static inline void
unclaimed_reg_debug_footer(struct intel_uncore * uncore,const i915_reg_t reg,const bool read)1981 unclaimed_reg_debug_footer(struct intel_uncore *uncore,
1982 			   const i915_reg_t reg, const bool read)
1983 {
1984 	/* interrupts are disabled and re-enabled around uncore->lock usage */
1985 	lockdep_assert_held(&uncore->lock);
1986 
1987 	__unclaimed_reg_debug(uncore, reg, read);
1988 	spin_unlock(&uncore->debug->lock);
1989 }
1990 
1991 #define __vgpu_read(x) \
1992 static u##x \
1993 vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1994 	u##x val = __raw_uncore_read##x(uncore, reg); \
1995 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1996 	return val; \
1997 }
1998 __vgpu_read(8)
1999 __vgpu_read(16)
2000 __vgpu_read(32)
2001 __vgpu_read(64)
2002 
2003 #define GEN2_READ_HEADER(x) \
2004 	u##x val = 0; \
2005 	assert_rpm_wakelock_held(uncore->rpm);
2006 
2007 #define GEN2_READ_FOOTER \
2008 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
2009 	return val
2010 
2011 #define __gen2_read(x) \
2012 static u##x \
2013 gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
2014 	GEN2_READ_HEADER(x); \
2015 	val = __raw_uncore_read##x(uncore, reg); \
2016 	GEN2_READ_FOOTER; \
2017 }
2018 
2019 #define __gen5_read(x) \
2020 static u##x \
2021 gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
2022 	GEN2_READ_HEADER(x); \
2023 	ilk_dummy_write(uncore); \
2024 	val = __raw_uncore_read##x(uncore, reg); \
2025 	GEN2_READ_FOOTER; \
2026 }
2027 
2028 __gen5_read(8)
2029 __gen5_read(16)
2030 __gen5_read(32)
2031 __gen5_read(64)
2032 __gen2_read(8)
2033 __gen2_read(16)
2034 __gen2_read(32)
2035 __gen2_read(64)
2036 
2037 #undef __gen5_read
2038 #undef __gen2_read
2039 
2040 #undef GEN2_READ_FOOTER
2041 #undef GEN2_READ_HEADER
2042 
2043 #define GEN6_READ_HEADER(x) \
2044 	u32 offset = i915_mmio_reg_offset(reg); \
2045 	unsigned long irqflags; \
2046 	bool unclaimed_reg_debug; \
2047 	u##x val = 0; \
2048 	assert_rpm_wakelock_held(uncore->rpm); \
2049 	spin_lock_irqsave(&uncore->lock, irqflags); \
2050 	unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, true)
2051 
2052 #define GEN6_READ_FOOTER \
2053 	if (unclaimed_reg_debug) \
2054 		unclaimed_reg_debug_footer(uncore, reg, true);	\
2055 	spin_unlock_irqrestore(&uncore->lock, irqflags); \
2056 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
2057 	return val
2058 
___force_wake_auto(struct intel_uncore * uncore,enum forcewake_domains fw_domains)2059 static noinline void ___force_wake_auto(struct intel_uncore *uncore,
2060 					enum forcewake_domains fw_domains)
2061 {
2062 	struct intel_uncore_forcewake_domain *domain;
2063 	unsigned int tmp;
2064 
2065 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
2066 
2067 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
2068 		fw_domain_arm_timer(domain);
2069 
2070 	fw_domains_get(uncore, fw_domains);
2071 }
2072 
__force_wake_auto(struct intel_uncore * uncore,enum forcewake_domains fw_domains)2073 static inline void __force_wake_auto(struct intel_uncore *uncore,
2074 				     enum forcewake_domains fw_domains)
2075 {
2076 	GEM_BUG_ON(!fw_domains);
2077 
2078 	/* Turn on all requested but inactive supported forcewake domains. */
2079 	fw_domains &= uncore->fw_domains;
2080 	fw_domains &= ~uncore->fw_domains_active;
2081 
2082 	if (fw_domains)
2083 		___force_wake_auto(uncore, fw_domains);
2084 }
2085 
2086 #define __gen_fwtable_read(x) \
2087 static u##x \
2088 fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) \
2089 { \
2090 	enum forcewake_domains fw_engine; \
2091 	GEN6_READ_HEADER(x); \
2092 	fw_engine = __fwtable_reg_read_fw_domains(uncore, offset); \
2093 	if (fw_engine) \
2094 		__force_wake_auto(uncore, fw_engine); \
2095 	val = __raw_uncore_read##x(uncore, reg); \
2096 	GEN6_READ_FOOTER; \
2097 }
2098 
2099 static enum forcewake_domains
fwtable_reg_read_fw_domains(struct intel_uncore * uncore,i915_reg_t reg)2100 fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) {
2101 	return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg));
2102 }
2103 
2104 __gen_fwtable_read(8)
2105 __gen_fwtable_read(16)
2106 __gen_fwtable_read(32)
2107 __gen_fwtable_read(64)
2108 
2109 #undef __gen_fwtable_read
2110 #undef GEN6_READ_FOOTER
2111 #undef GEN6_READ_HEADER
2112 
2113 #define GEN2_WRITE_HEADER \
2114 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
2115 	assert_rpm_wakelock_held(uncore->rpm); \
2116 
2117 #define GEN2_WRITE_FOOTER
2118 
2119 #define __gen2_write(x) \
2120 static void \
2121 gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2122 	GEN2_WRITE_HEADER; \
2123 	__raw_uncore_write##x(uncore, reg, val); \
2124 	GEN2_WRITE_FOOTER; \
2125 }
2126 
2127 #define __gen5_write(x) \
2128 static void \
2129 gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2130 	GEN2_WRITE_HEADER; \
2131 	ilk_dummy_write(uncore); \
2132 	__raw_uncore_write##x(uncore, reg, val); \
2133 	GEN2_WRITE_FOOTER; \
2134 }
2135 
2136 __gen5_write(8)
2137 __gen5_write(16)
2138 __gen5_write(32)
2139 __gen2_write(8)
2140 __gen2_write(16)
2141 __gen2_write(32)
2142 
2143 #undef __gen5_write
2144 #undef __gen2_write
2145 
2146 #undef GEN2_WRITE_FOOTER
2147 #undef GEN2_WRITE_HEADER
2148 
2149 #define GEN6_WRITE_HEADER \
2150 	u32 offset = i915_mmio_reg_offset(reg); \
2151 	unsigned long irqflags; \
2152 	bool unclaimed_reg_debug; \
2153 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
2154 	assert_rpm_wakelock_held(uncore->rpm); \
2155 	spin_lock_irqsave(&uncore->lock, irqflags); \
2156 	unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, false)
2157 
2158 #define GEN6_WRITE_FOOTER \
2159 	if (unclaimed_reg_debug) \
2160 		unclaimed_reg_debug_footer(uncore, reg, false); \
2161 	spin_unlock_irqrestore(&uncore->lock, irqflags)
2162 
2163 #define __gen6_write(x) \
2164 static void \
2165 gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2166 	GEN6_WRITE_HEADER; \
2167 	if (NEEDS_FORCE_WAKE(offset)) \
2168 		__gen6_gt_wait_for_fifo(uncore); \
2169 	__raw_uncore_write##x(uncore, reg, val); \
2170 	GEN6_WRITE_FOOTER; \
2171 }
2172 __gen6_write(8)
2173 __gen6_write(16)
2174 __gen6_write(32)
2175 
2176 #define __gen_fwtable_write(x) \
2177 static void \
2178 fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2179 	enum forcewake_domains fw_engine; \
2180 	GEN6_WRITE_HEADER; \
2181 	fw_engine = __fwtable_reg_write_fw_domains(uncore, offset); \
2182 	if (fw_engine) \
2183 		__force_wake_auto(uncore, fw_engine); \
2184 	__raw_uncore_write##x(uncore, reg, val); \
2185 	GEN6_WRITE_FOOTER; \
2186 }
2187 
2188 static enum forcewake_domains
fwtable_reg_write_fw_domains(struct intel_uncore * uncore,i915_reg_t reg)2189 fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
2190 {
2191 	return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg));
2192 }
2193 
2194 __gen_fwtable_write(8)
2195 __gen_fwtable_write(16)
2196 __gen_fwtable_write(32)
2197 
2198 #undef __gen_fwtable_write
2199 #undef GEN6_WRITE_FOOTER
2200 #undef GEN6_WRITE_HEADER
2201 
2202 #define __vgpu_write(x) \
2203 static void \
2204 vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2205 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
2206 	__raw_uncore_write##x(uncore, reg, val); \
2207 }
2208 __vgpu_write(8)
2209 __vgpu_write(16)
2210 __vgpu_write(32)
2211 
2212 #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
2213 do { \
2214 	(uncore)->funcs.mmio_writeb = x##_write8; \
2215 	(uncore)->funcs.mmio_writew = x##_write16; \
2216 	(uncore)->funcs.mmio_writel = x##_write32; \
2217 } while (0)
2218 
2219 #define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
2220 do { \
2221 	(uncore)->funcs.mmio_readb = x##_read8; \
2222 	(uncore)->funcs.mmio_readw = x##_read16; \
2223 	(uncore)->funcs.mmio_readl = x##_read32; \
2224 	(uncore)->funcs.mmio_readq = x##_read64; \
2225 } while (0)
2226 
2227 #define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
2228 do { \
2229 	ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
2230 	(uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
2231 } while (0)
2232 
2233 #define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
2234 do { \
2235 	ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
2236 	(uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
2237 } while (0)
2238 
__fw_domain_init(struct intel_uncore * uncore,enum forcewake_domain_id domain_id,i915_reg_t reg_set,i915_reg_t reg_ack)2239 static int __fw_domain_init(struct intel_uncore *uncore,
2240 			    enum forcewake_domain_id domain_id,
2241 			    i915_reg_t reg_set,
2242 			    i915_reg_t reg_ack)
2243 {
2244 	struct intel_uncore_forcewake_domain *d;
2245 
2246 	GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
2247 	GEM_BUG_ON(uncore->fw_domain[domain_id]);
2248 
2249 	if (i915_inject_probe_failure(uncore->i915))
2250 		return -ENOMEM;
2251 
2252 	d = kzalloc(sizeof(*d), GFP_KERNEL);
2253 	if (!d)
2254 		return -ENOMEM;
2255 
2256 	drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
2257 	drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
2258 
2259 	d->uncore = uncore;
2260 	d->wake_count = 0;
2261 	d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set) + uncore->gsi_offset;
2262 	d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack) + uncore->gsi_offset;
2263 
2264 	d->id = domain_id;
2265 
2266 	BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
2267 	BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT));
2268 	BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
2269 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
2270 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
2271 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
2272 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
2273 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX4 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX4));
2274 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX5 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX5));
2275 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX6 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX6));
2276 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX7 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX7));
2277 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
2278 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
2279 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX2));
2280 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX3));
2281 	BUILD_BUG_ON(FORCEWAKE_GSC != (1 << FW_DOMAIN_ID_GSC));
2282 
2283 	d->mask = BIT(domain_id);
2284 
2285 #ifdef __linux__
2286 	hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2287 	d->timer.function = intel_uncore_fw_release_timer;
2288 #else
2289 	timeout_set(&d->timer, intel_uncore_fw_release_timer, d);
2290 #endif
2291 
2292 	uncore->fw_domains |= BIT(domain_id);
2293 
2294 	fw_domain_reset(d);
2295 
2296 	uncore->fw_domain[domain_id] = d;
2297 
2298 	return 0;
2299 }
2300 
fw_domain_fini(struct intel_uncore * uncore,enum forcewake_domain_id domain_id)2301 static void fw_domain_fini(struct intel_uncore *uncore,
2302 			   enum forcewake_domain_id domain_id)
2303 {
2304 	struct intel_uncore_forcewake_domain *d;
2305 
2306 	GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
2307 
2308 	d = fetch_and_zero(&uncore->fw_domain[domain_id]);
2309 	if (!d)
2310 		return;
2311 
2312 	uncore->fw_domains &= ~BIT(domain_id);
2313 	drm_WARN_ON(&uncore->i915->drm, d->wake_count);
2314 	drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
2315 	kfree(d);
2316 }
2317 
intel_uncore_fw_domains_fini(struct intel_uncore * uncore)2318 static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
2319 {
2320 	struct intel_uncore_forcewake_domain *d;
2321 	int tmp;
2322 
2323 	for_each_fw_domain(d, uncore, tmp)
2324 		fw_domain_fini(uncore, d->id);
2325 }
2326 
2327 static const struct intel_uncore_fw_get uncore_get_fallback = {
2328 	.force_wake_get = fw_domains_get_with_fallback
2329 };
2330 
2331 static const struct intel_uncore_fw_get uncore_get_normal = {
2332 	.force_wake_get = fw_domains_get_normal,
2333 };
2334 
2335 static const struct intel_uncore_fw_get uncore_get_thread_status = {
2336 	.force_wake_get = fw_domains_get_with_thread_status
2337 };
2338 
intel_uncore_fw_domains_init(struct intel_uncore * uncore)2339 static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
2340 {
2341 	struct drm_i915_private *i915 = uncore->i915;
2342 	int ret = 0;
2343 
2344 	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2345 
2346 #define fw_domain_init(uncore__, id__, set__, ack__) \
2347 	(ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
2348 
2349 	if (GRAPHICS_VER(i915) >= 11) {
2350 		intel_engine_mask_t emask;
2351 		int i;
2352 
2353 		/* we'll prune the domains of missing engines later */
2354 		emask = uncore->gt->info.engine_mask;
2355 
2356 		uncore->fw_get_funcs = &uncore_get_fallback;
2357 		if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
2358 			fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2359 				       FORCEWAKE_GT_GEN9,
2360 				       FORCEWAKE_ACK_GT_MTL);
2361 		else
2362 			fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2363 				       FORCEWAKE_GT_GEN9,
2364 				       FORCEWAKE_ACK_GT_GEN9);
2365 
2366 		if (RCS_MASK(uncore->gt) || CCS_MASK(uncore->gt))
2367 			fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2368 				       FORCEWAKE_RENDER_GEN9,
2369 				       FORCEWAKE_ACK_RENDER_GEN9);
2370 
2371 		for (i = 0; i < I915_MAX_VCS; i++) {
2372 			if (!__HAS_ENGINE(emask, _VCS(i)))
2373 				continue;
2374 
2375 			fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
2376 				       FORCEWAKE_MEDIA_VDBOX_GEN11(i),
2377 				       FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
2378 		}
2379 		for (i = 0; i < I915_MAX_VECS; i++) {
2380 			if (!__HAS_ENGINE(emask, _VECS(i)))
2381 				continue;
2382 
2383 			fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
2384 				       FORCEWAKE_MEDIA_VEBOX_GEN11(i),
2385 				       FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
2386 		}
2387 
2388 		if (uncore->gt->type == GT_MEDIA)
2389 			fw_domain_init(uncore, FW_DOMAIN_ID_GSC,
2390 				       FORCEWAKE_REQ_GSC, FORCEWAKE_ACK_GSC);
2391 	} else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2392 		uncore->fw_get_funcs = &uncore_get_fallback;
2393 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2394 			       FORCEWAKE_RENDER_GEN9,
2395 			       FORCEWAKE_ACK_RENDER_GEN9);
2396 		fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2397 			       FORCEWAKE_GT_GEN9,
2398 			       FORCEWAKE_ACK_GT_GEN9);
2399 		fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2400 			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
2401 	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
2402 		uncore->fw_get_funcs = &uncore_get_normal;
2403 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2404 			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
2405 		fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2406 			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
2407 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2408 		uncore->fw_get_funcs = &uncore_get_thread_status;
2409 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2410 			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
2411 	} else if (IS_IVYBRIDGE(i915)) {
2412 		u32 ecobus;
2413 
2414 		/* IVB configs may use multi-threaded forcewake */
2415 
2416 		/* A small trick here - if the bios hasn't configured
2417 		 * MT forcewake, and if the device is in RC6, then
2418 		 * force_wake_mt_get will not wake the device and the
2419 		 * ECOBUS read will return zero. Which will be
2420 		 * (correctly) interpreted by the test below as MT
2421 		 * forcewake being disabled.
2422 		 */
2423 		uncore->fw_get_funcs = &uncore_get_thread_status;
2424 
2425 		/* We need to init first for ECOBUS access and then
2426 		 * determine later if we want to reinit, in case of MT access is
2427 		 * not working. In this stage we don't know which flavour this
2428 		 * ivb is, so it is better to reset also the gen6 fw registers
2429 		 * before the ecobus check.
2430 		 */
2431 
2432 		__raw_uncore_write32(uncore, FORCEWAKE, 0);
2433 		__raw_posting_read(uncore, ECOBUS);
2434 
2435 		ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2436 				       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
2437 		if (ret)
2438 			goto out;
2439 
2440 		spin_lock_irq(&uncore->lock);
2441 		fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
2442 		ecobus = __raw_uncore_read32(uncore, ECOBUS);
2443 		fw_domains_put(uncore, FORCEWAKE_RENDER);
2444 		spin_unlock_irq(&uncore->lock);
2445 
2446 		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
2447 			drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
2448 			drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
2449 			fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
2450 			fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2451 				       FORCEWAKE, FORCEWAKE_ACK);
2452 		}
2453 	} else if (GRAPHICS_VER(i915) == 6) {
2454 		uncore->fw_get_funcs = &uncore_get_thread_status;
2455 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2456 			       FORCEWAKE, FORCEWAKE_ACK);
2457 	}
2458 
2459 #undef fw_domain_init
2460 
2461 	/* All future platforms are expected to require complex power gating */
2462 	drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
2463 
2464 out:
2465 	if (ret)
2466 		intel_uncore_fw_domains_fini(uncore);
2467 
2468 	return ret;
2469 }
2470 
2471 #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
2472 { \
2473 	(uncore)->fw_domains_table = \
2474 			(struct intel_forcewake_range *)(d); \
2475 	(uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
2476 }
2477 
2478 #define ASSIGN_SHADOW_TABLE(uncore, d) \
2479 { \
2480 	(uncore)->shadowed_reg_table = d; \
2481 	(uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d)); \
2482 }
2483 
i915_pmic_bus_access_notifier(struct notifier_block * nb,unsigned long action,void * data)2484 static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
2485 					 unsigned long action, void *data)
2486 {
2487 	struct intel_uncore *uncore = container_of(nb,
2488 			struct intel_uncore, pmic_bus_access_nb);
2489 
2490 	switch (action) {
2491 	case MBI_PMIC_BUS_ACCESS_BEGIN:
2492 		/*
2493 		 * forcewake all now to make sure that we don't need to do a
2494 		 * forcewake later which on systems where this notifier gets
2495 		 * called requires the punit to access to the shared pmic i2c
2496 		 * bus, which will be busy after this notification, leading to:
2497 		 * "render: timed out waiting for forcewake ack request."
2498 		 * errors.
2499 		 *
2500 		 * The notifier is unregistered during intel_runtime_suspend(),
2501 		 * so it's ok to access the HW here without holding a RPM
2502 		 * wake reference -> disable wakeref asserts for the time of
2503 		 * the access.
2504 		 */
2505 		disable_rpm_wakeref_asserts(uncore->rpm);
2506 		intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2507 		enable_rpm_wakeref_asserts(uncore->rpm);
2508 		break;
2509 	case MBI_PMIC_BUS_ACCESS_END:
2510 		intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
2511 		break;
2512 	}
2513 
2514 	return NOTIFY_OK;
2515 }
2516 
uncore_unmap_mmio(struct drm_device * drm,void * regs)2517 static void uncore_unmap_mmio(struct drm_device *drm, void *regs)
2518 {
2519 #ifdef __linux__
2520 	iounmap((void __iomem *)regs);
2521 #endif
2522 }
2523 
intel_uncore_setup_mmio(struct intel_uncore * uncore,phys_addr_t phys_addr)2524 int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
2525 {
2526 	struct drm_i915_private *i915 = uncore->i915;
2527 	int mmio_size;
2528 
2529 	/*
2530 	 * Before gen4, the registers and the GTT are behind different BARs.
2531 	 * However, from gen4 onwards, the registers and the GTT are shared
2532 	 * in the same BAR, so we want to restrict this ioremap from
2533 	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
2534 	 * the register BAR remains the same size for all the earlier
2535 	 * generations up to Ironlake.
2536 	 * For dgfx chips register range is expanded to 4MB, and this larger
2537 	 * range is also used for integrated gpus beginning with Meteor Lake.
2538 	 */
2539 	if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
2540 		mmio_size = 4 * 1024 * 1024;
2541 	else if (GRAPHICS_VER(i915) >= 5)
2542 		mmio_size = 2 * 1024 * 1024;
2543 	else
2544 		mmio_size = 512 * 1024;
2545 #ifdef __linux__
2546 	uncore->regs = ioremap(phys_addr, mmio_size);
2547 	if (uncore->regs == NULL) {
2548 		drm_err(&i915->drm, "failed to map registers\n");
2549 		return -EIO;
2550 	}
2551 #endif
2552 
2553 	return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio,
2554 					(void __force *)uncore->regs);
2555 }
2556 
intel_uncore_init_early(struct intel_uncore * uncore,struct intel_gt * gt)2557 void intel_uncore_init_early(struct intel_uncore *uncore,
2558 			     struct intel_gt *gt)
2559 {
2560 	mtx_init(&uncore->lock, IPL_TTY);
2561 	uncore->i915 = gt->i915;
2562 	uncore->gt = gt;
2563 	uncore->rpm = &gt->i915->runtime_pm;
2564 }
2565 
uncore_raw_init(struct intel_uncore * uncore)2566 static void uncore_raw_init(struct intel_uncore *uncore)
2567 {
2568 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
2569 
2570 	if (intel_vgpu_active(uncore->i915)) {
2571 		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
2572 		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
2573 	} else if (GRAPHICS_VER(uncore->i915) == 5) {
2574 		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
2575 		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
2576 	} else {
2577 		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
2578 		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
2579 	}
2580 }
2581 
uncore_media_forcewake_init(struct intel_uncore * uncore)2582 static int uncore_media_forcewake_init(struct intel_uncore *uncore)
2583 {
2584 	struct drm_i915_private *i915 = uncore->i915;
2585 
2586 	if (MEDIA_VER(i915) >= 13) {
2587 		ASSIGN_FW_DOMAINS_TABLE(uncore, __xelpmp_fw_ranges);
2588 		ASSIGN_SHADOW_TABLE(uncore, xelpmp_shadowed_regs);
2589 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2590 	} else {
2591 		MISSING_CASE(MEDIA_VER(i915));
2592 		return -ENODEV;
2593 	}
2594 
2595 	return 0;
2596 }
2597 
uncore_forcewake_init(struct intel_uncore * uncore)2598 static int uncore_forcewake_init(struct intel_uncore *uncore)
2599 {
2600 	struct drm_i915_private *i915 = uncore->i915;
2601 	int ret;
2602 
2603 	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2604 
2605 	ret = intel_uncore_fw_domains_init(uncore);
2606 	if (ret)
2607 		return ret;
2608 	forcewake_early_sanitize(uncore, 0);
2609 
2610 	ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2611 
2612 	if (uncore->gt->type == GT_MEDIA)
2613 		return uncore_media_forcewake_init(uncore);
2614 
2615 	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
2616 		ASSIGN_FW_DOMAINS_TABLE(uncore, __mtl_fw_ranges);
2617 		ASSIGN_SHADOW_TABLE(uncore, mtl_shadowed_regs);
2618 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2619 	} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60)) {
2620 		ASSIGN_FW_DOMAINS_TABLE(uncore, __pvc_fw_ranges);
2621 		ASSIGN_SHADOW_TABLE(uncore, pvc_shadowed_regs);
2622 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2623 	} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
2624 		ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
2625 		ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
2626 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2627 	} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
2628 		ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges);
2629 		ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
2630 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2631 	} else if (GRAPHICS_VER(i915) >= 12) {
2632 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
2633 		ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
2634 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2635 	} else if (GRAPHICS_VER(i915) == 11) {
2636 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
2637 		ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs);
2638 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2639 	} else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2640 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
2641 		ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2642 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2643 	} else if (IS_CHERRYVIEW(i915)) {
2644 		ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
2645 		ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2646 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2647 	} else if (GRAPHICS_VER(i915) == 8) {
2648 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2649 		ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2650 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2651 	} else if (IS_VALLEYVIEW(i915)) {
2652 		ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
2653 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2654 	} else if (IS_GRAPHICS_VER(i915, 6, 7)) {
2655 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2656 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2657 	}
2658 
2659 	uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
2660 	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
2661 
2662 	return 0;
2663 }
2664 
sanity_check_mmio_access(struct intel_uncore * uncore)2665 static int sanity_check_mmio_access(struct intel_uncore *uncore)
2666 {
2667 	struct drm_i915_private *i915 = uncore->i915;
2668 
2669 	if (GRAPHICS_VER(i915) < 8)
2670 		return 0;
2671 
2672 	/*
2673 	 * Sanitycheck that MMIO access to the device is working properly.  If
2674 	 * the CPU is unable to communcate with a PCI device, BAR reads will
2675 	 * return 0xFFFFFFFF.  Let's make sure the device isn't in this state
2676 	 * before we start trying to access registers.
2677 	 *
2678 	 * We use the primary GT's forcewake register as our guinea pig since
2679 	 * it's been around since HSW and it's a masked register so the upper
2680 	 * 16 bits can never read back as 1's if device access is operating
2681 	 * properly.
2682 	 *
2683 	 * If MMIO isn't working, we'll wait up to 2 seconds to see if it
2684 	 * recovers, then give up.
2685 	 */
2686 #define COND (__raw_uncore_read32(uncore, FORCEWAKE_MT) != ~0)
2687 	if (wait_for(COND, 2000) == -ETIMEDOUT) {
2688 		drm_err(&i915->drm, "Device is non-operational; MMIO access returns 0xFFFFFFFF!\n");
2689 		return -EIO;
2690 	}
2691 
2692 	return 0;
2693 }
2694 
intel_uncore_init_mmio(struct intel_uncore * uncore)2695 int intel_uncore_init_mmio(struct intel_uncore *uncore)
2696 {
2697 	struct drm_i915_private *i915 = uncore->i915;
2698 	int ret;
2699 
2700 	ret = sanity_check_mmio_access(uncore);
2701 	if (ret)
2702 		return ret;
2703 
2704 	/*
2705 	 * The boot firmware initializes local memory and assesses its health.
2706 	 * If memory training fails, the punit will have been instructed to
2707 	 * keep the GT powered down; we won't be able to communicate with it
2708 	 * and we should not continue with driver initialization.
2709 	 */
2710 	if (IS_DGFX(i915) &&
2711 	    !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) {
2712 		drm_err(&i915->drm, "LMEM not initialized by firmware\n");
2713 		return -ENODEV;
2714 	}
2715 
2716 	if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915))
2717 		uncore->flags |= UNCORE_HAS_FORCEWAKE;
2718 
2719 	if (!intel_uncore_has_forcewake(uncore)) {
2720 		uncore_raw_init(uncore);
2721 	} else {
2722 		ret = uncore_forcewake_init(uncore);
2723 		if (ret)
2724 			return ret;
2725 	}
2726 
2727 	/* make sure fw funcs are set if and only if we have fw*/
2728 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->fw_get_funcs);
2729 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
2730 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
2731 
2732 	if (HAS_FPGA_DBG_UNCLAIMED(i915))
2733 		uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
2734 
2735 	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
2736 		uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
2737 
2738 	if (IS_GRAPHICS_VER(i915, 6, 7))
2739 		uncore->flags |= UNCORE_HAS_FIFO;
2740 
2741 	/* clear out unclaimed reg detection bit */
2742 	if (intel_uncore_unclaimed_mmio(uncore))
2743 		drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
2744 
2745 	return 0;
2746 }
2747 
2748 /*
2749  * We might have detected that some engines are fused off after we initialized
2750  * the forcewake domains. Prune them, to make sure they only reference existing
2751  * engines.
2752  */
intel_uncore_prune_engine_fw_domains(struct intel_uncore * uncore,struct intel_gt * gt)2753 void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
2754 					  struct intel_gt *gt)
2755 {
2756 	enum forcewake_domains fw_domains = uncore->fw_domains;
2757 	enum forcewake_domain_id domain_id;
2758 	int i;
2759 
2760 	if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11)
2761 		return;
2762 
2763 	for (i = 0; i < I915_MAX_VCS; i++) {
2764 		domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
2765 
2766 		if (HAS_ENGINE(gt, _VCS(i)))
2767 			continue;
2768 
2769 		/*
2770 		 * Starting with XeHP, the power well for an even-numbered
2771 		 * VDBOX is also used for shared units within the
2772 		 * media slice such as SFC.  So even if the engine
2773 		 * itself is fused off, we still need to initialize
2774 		 * the forcewake domain if any of the other engines
2775 		 * in the same media slice are present.
2776 		 */
2777 		if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50) && i % 2 == 0) {
2778 			if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
2779 				continue;
2780 
2781 			if (HAS_ENGINE(gt, _VECS(i / 2)))
2782 				continue;
2783 		}
2784 
2785 		if (fw_domains & BIT(domain_id))
2786 			fw_domain_fini(uncore, domain_id);
2787 	}
2788 
2789 	for (i = 0; i < I915_MAX_VECS; i++) {
2790 		domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
2791 
2792 		if (HAS_ENGINE(gt, _VECS(i)))
2793 			continue;
2794 
2795 		if (fw_domains & BIT(domain_id))
2796 			fw_domain_fini(uncore, domain_id);
2797 	}
2798 
2799 	if ((fw_domains & BIT(FW_DOMAIN_ID_GSC)) && !HAS_ENGINE(gt, GSC0))
2800 		fw_domain_fini(uncore, FW_DOMAIN_ID_GSC);
2801 }
2802 
2803 /*
2804  * The driver-initiated FLR is the highest level of reset that we can trigger
2805  * from within the driver. It is different from the PCI FLR in that it doesn't
2806  * fully reset the SGUnit and doesn't modify the PCI config space and therefore
2807  * it doesn't require a re-enumeration of the PCI BARs. However, the
2808  * driver-initiated FLR does still cause a reset of both GT and display and a
2809  * memory wipe of local and stolen memory, so recovery would require a full HW
2810  * re-init and saving/restoring (or re-populating) the wiped memory. Since we
2811  * perform the FLR as the very last action before releasing access to the HW
2812  * during the driver release flow, we don't attempt recovery at all, because
2813  * if/when a new instance of i915 is bound to the device it will do a full
2814  * re-init anyway.
2815  */
driver_initiated_flr(struct intel_uncore * uncore)2816 static void driver_initiated_flr(struct intel_uncore *uncore)
2817 {
2818 	struct drm_i915_private *i915 = uncore->i915;
2819 	const unsigned int flr_timeout_ms = 3000; /* specs recommend a 3s wait */
2820 	int ret;
2821 
2822 	drm_dbg(&i915->drm, "Triggering Driver-FLR\n");
2823 
2824 	/*
2825 	 * Make sure any pending FLR requests have cleared by waiting for the
2826 	 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS
2827 	 * to make sure it's not still set from a prior attempt (it's a write to
2828 	 * clear bit).
2829 	 * Note that we should never be in a situation where a previous attempt
2830 	 * is still pending (unless the HW is totally dead), but better to be
2831 	 * safe in case something unexpected happens
2832 	 */
2833 	ret = intel_wait_for_register_fw(uncore, GU_CNTL, DRIVERFLR, 0, flr_timeout_ms);
2834 	if (ret) {
2835 		drm_err(&i915->drm,
2836 			"Failed to wait for Driver-FLR bit to clear! %d\n",
2837 			ret);
2838 		return;
2839 	}
2840 	intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
2841 
2842 	/* Trigger the actual Driver-FLR */
2843 	intel_uncore_rmw_fw(uncore, GU_CNTL, 0, DRIVERFLR);
2844 
2845 	/* Wait for hardware teardown to complete */
2846 	ret = intel_wait_for_register_fw(uncore, GU_CNTL,
2847 					 DRIVERFLR, 0,
2848 					 flr_timeout_ms);
2849 	if (ret) {
2850 		drm_err(&i915->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
2851 		return;
2852 	}
2853 
2854 	/* Wait for hardware/firmware re-init to complete */
2855 	ret = intel_wait_for_register_fw(uncore, GU_DEBUG,
2856 					 DRIVERFLR_STATUS, DRIVERFLR_STATUS,
2857 					 flr_timeout_ms);
2858 	if (ret) {
2859 		drm_err(&i915->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
2860 		return;
2861 	}
2862 
2863 	/* Clear sticky completion status */
2864 	intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
2865 }
2866 
2867 /* Called via drm-managed action */
intel_uncore_fini_mmio(struct drm_device * dev,void * data)2868 void intel_uncore_fini_mmio(struct drm_device *dev, void *data)
2869 {
2870 	struct intel_uncore *uncore = data;
2871 
2872 	if (intel_uncore_has_forcewake(uncore)) {
2873 		iosf_mbi_punit_acquire();
2874 		iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
2875 			&uncore->pmic_bus_access_nb);
2876 		intel_uncore_forcewake_reset(uncore);
2877 		intel_uncore_fw_domains_fini(uncore);
2878 		iosf_mbi_punit_release();
2879 	}
2880 
2881 	if (intel_uncore_needs_flr_on_fini(uncore))
2882 		driver_initiated_flr(uncore);
2883 }
2884 
2885 /**
2886  * __intel_wait_for_register_fw - wait until register matches expected state
2887  * @uncore: the struct intel_uncore
2888  * @reg: the register to read
2889  * @mask: mask to apply to register value
2890  * @value: expected value
2891  * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2892  * @slow_timeout_ms: slow timeout in millisecond
2893  * @out_value: optional placeholder to hold registry value
2894  *
2895  * This routine waits until the target register @reg contains the expected
2896  * @value after applying the @mask, i.e. it waits until ::
2897  *
2898  *     (intel_uncore_read_fw(uncore, reg) & mask) == value
2899  *
2900  * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
2901  * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
2902  * must be not larger than 20,0000 microseconds.
2903  *
2904  * Note that this routine assumes the caller holds forcewake asserted, it is
2905  * not suitable for very long waits. See intel_wait_for_register() if you
2906  * wish to wait without holding forcewake for the duration (i.e. you expect
2907  * the wait to be slow).
2908  *
2909  * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2910  */
__intel_wait_for_register_fw(struct intel_uncore * uncore,i915_reg_t reg,u32 mask,u32 value,unsigned int fast_timeout_us,unsigned int slow_timeout_ms,u32 * out_value)2911 int __intel_wait_for_register_fw(struct intel_uncore *uncore,
2912 				 i915_reg_t reg,
2913 				 u32 mask,
2914 				 u32 value,
2915 				 unsigned int fast_timeout_us,
2916 				 unsigned int slow_timeout_ms,
2917 				 u32 *out_value)
2918 {
2919 	u32 reg_value = 0;
2920 #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
2921 	int ret;
2922 
2923 	/* Catch any overuse of this function */
2924 	might_sleep_if(slow_timeout_ms);
2925 	GEM_BUG_ON(fast_timeout_us > 20000);
2926 	GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
2927 
2928 	ret = -ETIMEDOUT;
2929 	if (fast_timeout_us && fast_timeout_us <= 20000)
2930 		ret = _wait_for_atomic(done, fast_timeout_us, 0);
2931 	if (ret && slow_timeout_ms)
2932 		ret = wait_for(done, slow_timeout_ms);
2933 
2934 	if (out_value)
2935 		*out_value = reg_value;
2936 
2937 	return ret;
2938 #undef done
2939 }
2940 
2941 /**
2942  * __intel_wait_for_register - wait until register matches expected state
2943  * @uncore: the struct intel_uncore
2944  * @reg: the register to read
2945  * @mask: mask to apply to register value
2946  * @value: expected value
2947  * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2948  * @slow_timeout_ms: slow timeout in millisecond
2949  * @out_value: optional placeholder to hold registry value
2950  *
2951  * This routine waits until the target register @reg contains the expected
2952  * @value after applying the @mask, i.e. it waits until ::
2953  *
2954  *     (intel_uncore_read(uncore, reg) & mask) == value
2955  *
2956  * Otherwise, the wait will timeout after @timeout_ms milliseconds.
2957  *
2958  * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2959  */
__intel_wait_for_register(struct intel_uncore * uncore,i915_reg_t reg,u32 mask,u32 value,unsigned int fast_timeout_us,unsigned int slow_timeout_ms,u32 * out_value)2960 int __intel_wait_for_register(struct intel_uncore *uncore,
2961 			      i915_reg_t reg,
2962 			      u32 mask,
2963 			      u32 value,
2964 			      unsigned int fast_timeout_us,
2965 			      unsigned int slow_timeout_ms,
2966 			      u32 *out_value)
2967 {
2968 	unsigned fw =
2969 		intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
2970 	u32 reg_value;
2971 	int ret;
2972 
2973 	might_sleep_if(slow_timeout_ms);
2974 
2975 	spin_lock_irq(&uncore->lock);
2976 	intel_uncore_forcewake_get__locked(uncore, fw);
2977 
2978 	ret = __intel_wait_for_register_fw(uncore,
2979 					   reg, mask, value,
2980 					   fast_timeout_us, 0, &reg_value);
2981 
2982 	intel_uncore_forcewake_put__locked(uncore, fw);
2983 	spin_unlock_irq(&uncore->lock);
2984 
2985 	if (ret && slow_timeout_ms)
2986 		ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
2987 								       reg),
2988 				 (reg_value & mask) == value,
2989 				 slow_timeout_ms * 1000, 10, 1000);
2990 
2991 	/* just trace the final value */
2992 	trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2993 
2994 	if (out_value)
2995 		*out_value = reg_value;
2996 
2997 	return ret;
2998 }
2999 
intel_uncore_unclaimed_mmio(struct intel_uncore * uncore)3000 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
3001 {
3002 	bool ret;
3003 
3004 	if (!uncore->debug)
3005 		return false;
3006 
3007 	spin_lock_irq(&uncore->debug->lock);
3008 	ret = check_for_unclaimed_mmio(uncore);
3009 	spin_unlock_irq(&uncore->debug->lock);
3010 
3011 	return ret;
3012 }
3013 
3014 bool
intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore * uncore)3015 intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
3016 {
3017 	bool ret = false;
3018 
3019 	if (drm_WARN_ON(&uncore->i915->drm, !uncore->debug))
3020 		return false;
3021 
3022 	spin_lock_irq(&uncore->debug->lock);
3023 
3024 	if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
3025 		goto out;
3026 
3027 	if (unlikely(check_for_unclaimed_mmio(uncore))) {
3028 		if (!uncore->i915->params.mmio_debug) {
3029 			drm_dbg(&uncore->i915->drm,
3030 				"Unclaimed register detected, "
3031 				"enabling oneshot unclaimed register reporting. "
3032 				"Please use i915.mmio_debug=N for more information.\n");
3033 			uncore->i915->params.mmio_debug++;
3034 		}
3035 		uncore->debug->unclaimed_mmio_check--;
3036 		ret = true;
3037 	}
3038 
3039 out:
3040 	spin_unlock_irq(&uncore->debug->lock);
3041 
3042 	return ret;
3043 }
3044 
3045 /**
3046  * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
3047  * 				    a register
3048  * @uncore: pointer to struct intel_uncore
3049  * @reg: register in question
3050  * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
3051  *
3052  * Returns a set of forcewake domains required to be taken with for example
3053  * intel_uncore_forcewake_get for the specified register to be accessible in the
3054  * specified mode (read, write or read/write) with raw mmio accessors.
3055  *
3056  * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
3057  * callers to do FIFO management on their own or risk losing writes.
3058  */
3059 enum forcewake_domains
intel_uncore_forcewake_for_reg(struct intel_uncore * uncore,i915_reg_t reg,unsigned int op)3060 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
3061 			       i915_reg_t reg, unsigned int op)
3062 {
3063 	enum forcewake_domains fw_domains = 0;
3064 
3065 	drm_WARN_ON(&uncore->i915->drm, !op);
3066 
3067 	if (!intel_uncore_has_forcewake(uncore))
3068 		return 0;
3069 
3070 	if (op & FW_REG_READ)
3071 		fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
3072 
3073 	if (op & FW_REG_WRITE)
3074 		fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
3075 
3076 	drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
3077 
3078 	return fw_domains;
3079 }
3080 
3081 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3082 #include "selftests/mock_uncore.c"
3083 #include "selftests/intel_uncore.c"
3084 #endif
3085