1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "i915_drv.h"
25 #include "intel_drv.h"
26 #include "i915_vgpu.h"
27
28 #include <asm/iosf_mbi.h>
29 #include <linux/pm_runtime.h>
30
31 #define FORCEWAKE_ACK_TIMEOUT_MS 50
32 #define GT_FIFO_TIMEOUT_MS 10
33
34 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
35
36 static const char * const forcewake_domain_names[] = {
37 "render",
38 "blitter",
39 "media",
40 };
41
42 const char *
intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)43 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
44 {
45 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
46
47 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
48 return forcewake_domain_names[id];
49
50 WARN_ON(id);
51
52 return "unknown";
53 }
54
55 static inline void
fw_domain_reset(struct drm_i915_private * i915,const struct intel_uncore_forcewake_domain * d)56 fw_domain_reset(struct drm_i915_private *i915,
57 const struct intel_uncore_forcewake_domain *d)
58 {
59 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
60 }
61
62 static inline void
fw_domain_arm_timer(struct intel_uncore_forcewake_domain * d)63 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
64 {
65 d->wake_count++;
66 hrtimer_start_range_ns(&d->timer,
67 NSEC_PER_MSEC,
68 NSEC_PER_MSEC,
69 HRTIMER_MODE_REL);
70 }
71
72 static inline void
fw_domain_wait_ack_clear(const struct drm_i915_private * i915,const struct intel_uncore_forcewake_domain * d)73 fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
74 const struct intel_uncore_forcewake_domain *d)
75 {
76 if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
77 FORCEWAKE_KERNEL) == 0,
78 FORCEWAKE_ACK_TIMEOUT_MS))
79 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
80 intel_uncore_forcewake_domain_to_str(d->id));
81 }
82
83 static inline void
fw_domain_get(struct drm_i915_private * i915,const struct intel_uncore_forcewake_domain * d)84 fw_domain_get(struct drm_i915_private *i915,
85 const struct intel_uncore_forcewake_domain *d)
86 {
87 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set);
88 }
89
90 static inline void
fw_domain_wait_ack(const struct drm_i915_private * i915,const struct intel_uncore_forcewake_domain * d)91 fw_domain_wait_ack(const struct drm_i915_private *i915,
92 const struct intel_uncore_forcewake_domain *d)
93 {
94 if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
95 FORCEWAKE_KERNEL),
96 FORCEWAKE_ACK_TIMEOUT_MS))
97 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
98 intel_uncore_forcewake_domain_to_str(d->id));
99 }
100
101 static inline void
fw_domain_put(const struct drm_i915_private * i915,const struct intel_uncore_forcewake_domain * d)102 fw_domain_put(const struct drm_i915_private *i915,
103 const struct intel_uncore_forcewake_domain *d)
104 {
105 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear);
106 }
107
108 static void
fw_domains_get(struct drm_i915_private * i915,enum forcewake_domains fw_domains)109 fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
110 {
111 struct intel_uncore_forcewake_domain *d;
112 unsigned int tmp;
113
114 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
115
116 for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
117 fw_domain_wait_ack_clear(i915, d);
118 fw_domain_get(i915, d);
119 }
120
121 for_each_fw_domain_masked(d, fw_domains, i915, tmp)
122 fw_domain_wait_ack(i915, d);
123
124 i915->uncore.fw_domains_active |= fw_domains;
125 }
126
127 static void
fw_domains_put(struct drm_i915_private * i915,enum forcewake_domains fw_domains)128 fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
129 {
130 struct intel_uncore_forcewake_domain *d;
131 unsigned int tmp;
132
133 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
134
135 for_each_fw_domain_masked(d, fw_domains, i915, tmp)
136 fw_domain_put(i915, d);
137
138 i915->uncore.fw_domains_active &= ~fw_domains;
139 }
140
141 static void
fw_domains_reset(struct drm_i915_private * i915,enum forcewake_domains fw_domains)142 fw_domains_reset(struct drm_i915_private *i915,
143 enum forcewake_domains fw_domains)
144 {
145 struct intel_uncore_forcewake_domain *d;
146 unsigned int tmp;
147
148 if (!fw_domains)
149 return;
150
151 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
152
153 for_each_fw_domain_masked(d, fw_domains, i915, tmp)
154 fw_domain_reset(i915, d);
155 }
156
__gen6_gt_wait_for_thread_c0(struct drm_i915_private * dev_priv)157 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
158 {
159 /* w/a for a sporadic read returning 0 by waiting for the GT
160 * thread to wake up.
161 */
162 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
163 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
164 DRM_ERROR("GT thread status wait timed out\n");
165 }
166
fw_domains_get_with_thread_status(struct drm_i915_private * dev_priv,enum forcewake_domains fw_domains)167 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
168 enum forcewake_domains fw_domains)
169 {
170 fw_domains_get(dev_priv, fw_domains);
171
172 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
173 __gen6_gt_wait_for_thread_c0(dev_priv);
174 }
175
fifo_free_entries(struct drm_i915_private * dev_priv)176 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
177 {
178 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
179
180 return count & GT_FIFO_FREE_ENTRIES_MASK;
181 }
182
__gen6_gt_wait_for_fifo(struct drm_i915_private * dev_priv)183 static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
184 {
185 u32 n;
186
187 /* On VLV, FIFO will be shared by both SW and HW.
188 * So, we need to read the FREE_ENTRIES everytime */
189 if (IS_VALLEYVIEW(dev_priv))
190 n = fifo_free_entries(dev_priv);
191 else
192 n = dev_priv->uncore.fifo_count;
193
194 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
195 if (wait_for_atomic((n = fifo_free_entries(dev_priv)) >
196 GT_FIFO_NUM_RESERVED_ENTRIES,
197 GT_FIFO_TIMEOUT_MS)) {
198 DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
199 return;
200 }
201 }
202
203 dev_priv->uncore.fifo_count = n - 1;
204 }
205
206 static enum hrtimer_restart
intel_uncore_fw_release_timer(struct hrtimer * timer)207 intel_uncore_fw_release_timer(struct hrtimer *timer)
208 {
209 struct intel_uncore_forcewake_domain *domain =
210 container_of(timer, struct intel_uncore_forcewake_domain, timer);
211 struct drm_i915_private *dev_priv =
212 container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]);
213 unsigned long irqflags;
214
215 assert_rpm_device_not_suspended(dev_priv);
216
217 if (xchg(&domain->active, false))
218 return HRTIMER_RESTART;
219
220 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
221 if (WARN_ON(domain->wake_count == 0))
222 domain->wake_count++;
223
224 if (--domain->wake_count == 0)
225 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
226
227 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
228
229 return HRTIMER_NORESTART;
230 }
231
intel_uncore_forcewake_reset(struct drm_i915_private * dev_priv,bool restore)232 static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
233 bool restore)
234 {
235 unsigned long irqflags;
236 struct intel_uncore_forcewake_domain *domain;
237 int retry_count = 100;
238 enum forcewake_domains fw, active_domains;
239
240 /* Hold uncore.lock across reset to prevent any register access
241 * with forcewake not set correctly. Wait until all pending
242 * timers are run before holding.
243 */
244 while (1) {
245 unsigned int tmp;
246
247 active_domains = 0;
248
249 for_each_fw_domain(domain, dev_priv, tmp) {
250 smp_store_mb(domain->active, false);
251 if (hrtimer_cancel(&domain->timer) == 0)
252 continue;
253
254 intel_uncore_fw_release_timer(&domain->timer);
255 }
256
257 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
258
259 for_each_fw_domain(domain, dev_priv, tmp) {
260 if (hrtimer_active(&domain->timer))
261 active_domains |= domain->mask;
262 }
263
264 if (active_domains == 0)
265 break;
266
267 if (--retry_count == 0) {
268 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
269 break;
270 }
271
272 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
273 cond_resched();
274 }
275
276 WARN_ON(active_domains);
277
278 fw = dev_priv->uncore.fw_domains_active;
279 if (fw)
280 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
281
282 fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
283
284 if (restore) { /* If reset with a user forcewake, try to restore */
285 if (fw)
286 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
287
288 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
289 dev_priv->uncore.fifo_count =
290 fifo_free_entries(dev_priv);
291 }
292
293 if (!restore)
294 assert_forcewakes_inactive(dev_priv);
295
296 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
297 }
298
gen9_edram_size(struct drm_i915_private * dev_priv)299 static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
300 {
301 const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
302 const unsigned int sets[4] = { 1, 1, 2, 2 };
303 const u32 cap = dev_priv->edram_cap;
304
305 return EDRAM_NUM_BANKS(cap) *
306 ways[EDRAM_WAYS_IDX(cap)] *
307 sets[EDRAM_SETS_IDX(cap)] *
308 1024 * 1024;
309 }
310
intel_uncore_edram_size(struct drm_i915_private * dev_priv)311 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
312 {
313 if (!HAS_EDRAM(dev_priv))
314 return 0;
315
316 /* The needed capability bits for size calculation
317 * are not there with pre gen9 so return 128MB always.
318 */
319 if (INTEL_GEN(dev_priv) < 9)
320 return 128 * 1024 * 1024;
321
322 return gen9_edram_size(dev_priv);
323 }
324
intel_uncore_edram_detect(struct drm_i915_private * dev_priv)325 static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
326 {
327 if (IS_HASWELL(dev_priv) ||
328 IS_BROADWELL(dev_priv) ||
329 INTEL_GEN(dev_priv) >= 9) {
330 dev_priv->edram_cap = __raw_i915_read32(dev_priv,
331 HSW_EDRAM_CAP);
332
333 /* NB: We can't write IDICR yet because we do not have gt funcs
334 * set up */
335 } else {
336 dev_priv->edram_cap = 0;
337 }
338
339 if (HAS_EDRAM(dev_priv))
340 DRM_INFO("Found %lluMB of eDRAM\n",
341 intel_uncore_edram_size(dev_priv) / (1024 * 1024));
342 }
343
344 static bool
fpga_check_for_unclaimed_mmio(struct drm_i915_private * dev_priv)345 fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
346 {
347 u32 dbg;
348
349 dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
350 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
351 return false;
352
353 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
354
355 return true;
356 }
357
358 static bool
vlv_check_for_unclaimed_mmio(struct drm_i915_private * dev_priv)359 vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
360 {
361 u32 cer;
362
363 cer = __raw_i915_read32(dev_priv, CLAIM_ER);
364 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
365 return false;
366
367 __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
368
369 return true;
370 }
371
372 static bool
gen6_check_for_fifo_debug(struct drm_i915_private * dev_priv)373 gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv)
374 {
375 u32 fifodbg;
376
377 fifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
378
379 if (unlikely(fifodbg)) {
380 DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
381 __raw_i915_write32(dev_priv, GTFIFODBG, fifodbg);
382 }
383
384 return fifodbg;
385 }
386
387 static bool
check_for_unclaimed_mmio(struct drm_i915_private * dev_priv)388 check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
389 {
390 bool ret = false;
391
392 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
393 ret |= fpga_check_for_unclaimed_mmio(dev_priv);
394
395 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
396 ret |= vlv_check_for_unclaimed_mmio(dev_priv);
397
398 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
399 ret |= gen6_check_for_fifo_debug(dev_priv);
400
401 return ret;
402 }
403
__intel_uncore_early_sanitize(struct drm_i915_private * dev_priv,bool restore_forcewake)404 static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
405 bool restore_forcewake)
406 {
407 /* clear out unclaimed reg detection bit */
408 if (check_for_unclaimed_mmio(dev_priv))
409 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
410
411 /* WaDisableShadowRegForCpd:chv */
412 if (IS_CHERRYVIEW(dev_priv)) {
413 __raw_i915_write32(dev_priv, GTFIFOCTL,
414 __raw_i915_read32(dev_priv, GTFIFOCTL) |
415 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
416 GT_FIFO_CTL_RC6_POLICY_STALL);
417 }
418
419 intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
420 }
421
intel_uncore_suspend(struct drm_i915_private * dev_priv)422 void intel_uncore_suspend(struct drm_i915_private *dev_priv)
423 {
424 iosf_mbi_unregister_pmic_bus_access_notifier(
425 &dev_priv->uncore.pmic_bus_access_nb);
426 intel_uncore_forcewake_reset(dev_priv, false);
427 }
428
intel_uncore_resume_early(struct drm_i915_private * dev_priv)429 void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
430 {
431 __intel_uncore_early_sanitize(dev_priv, true);
432 iosf_mbi_register_pmic_bus_access_notifier(
433 &dev_priv->uncore.pmic_bus_access_nb);
434 i915_check_and_clear_faults(dev_priv);
435 }
436
intel_uncore_runtime_resume(struct drm_i915_private * dev_priv)437 void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
438 {
439 iosf_mbi_register_pmic_bus_access_notifier(
440 &dev_priv->uncore.pmic_bus_access_nb);
441 }
442
intel_uncore_sanitize(struct drm_i915_private * dev_priv)443 void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
444 {
445 i915_modparams.enable_rc6 =
446 sanitize_rc6_option(dev_priv, i915_modparams.enable_rc6);
447
448 /* BIOS often leaves RC6 enabled, but disable it for hw init */
449 intel_sanitize_gt_powersave(dev_priv);
450 }
451
__intel_uncore_forcewake_get(struct drm_i915_private * dev_priv,enum forcewake_domains fw_domains)452 static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
453 enum forcewake_domains fw_domains)
454 {
455 struct intel_uncore_forcewake_domain *domain;
456 unsigned int tmp;
457
458 fw_domains &= dev_priv->uncore.fw_domains;
459
460 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
461 if (domain->wake_count++) {
462 fw_domains &= ~domain->mask;
463 domain->active = true;
464 }
465 }
466
467 if (fw_domains)
468 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
469 }
470
471 /**
472 * intel_uncore_forcewake_get - grab forcewake domain references
473 * @dev_priv: i915 device instance
474 * @fw_domains: forcewake domains to get reference on
475 *
476 * This function can be used get GT's forcewake domain references.
477 * Normal register access will handle the forcewake domains automatically.
478 * However if some sequence requires the GT to not power down a particular
479 * forcewake domains this function should be called at the beginning of the
480 * sequence. And subsequently the reference should be dropped by symmetric
481 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
482 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
483 */
intel_uncore_forcewake_get(struct drm_i915_private * dev_priv,enum forcewake_domains fw_domains)484 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
485 enum forcewake_domains fw_domains)
486 {
487 unsigned long irqflags;
488
489 if (!dev_priv->uncore.funcs.force_wake_get)
490 return;
491
492 assert_rpm_wakelock_held(dev_priv);
493
494 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
495 __intel_uncore_forcewake_get(dev_priv, fw_domains);
496 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
497 }
498
499 /**
500 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
501 * @dev_priv: i915 device instance
502 *
503 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
504 * the GT powerwell and in the process disable our debugging for the
505 * duration of userspace's bypass.
506 */
intel_uncore_forcewake_user_get(struct drm_i915_private * dev_priv)507 void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv)
508 {
509 spin_lock_irq(&dev_priv->uncore.lock);
510 if (!dev_priv->uncore.user_forcewake.count++) {
511 intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
512
513 /* Save and disable mmio debugging for the user bypass */
514 dev_priv->uncore.user_forcewake.saved_mmio_check =
515 dev_priv->uncore.unclaimed_mmio_check;
516 dev_priv->uncore.user_forcewake.saved_mmio_debug =
517 i915_modparams.mmio_debug;
518
519 dev_priv->uncore.unclaimed_mmio_check = 0;
520 i915_modparams.mmio_debug = 0;
521 }
522 spin_unlock_irq(&dev_priv->uncore.lock);
523 }
524
525 /**
526 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
527 * @dev_priv: i915 device instance
528 *
529 * This function complements intel_uncore_forcewake_user_get() and releases
530 * the GT powerwell taken on behalf of the userspace bypass.
531 */
intel_uncore_forcewake_user_put(struct drm_i915_private * dev_priv)532 void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv)
533 {
534 spin_lock_irq(&dev_priv->uncore.lock);
535 if (!--dev_priv->uncore.user_forcewake.count) {
536 if (intel_uncore_unclaimed_mmio(dev_priv))
537 dev_info(dev_priv->drm.dev,
538 "Invalid mmio detected during user access\n");
539
540 dev_priv->uncore.unclaimed_mmio_check =
541 dev_priv->uncore.user_forcewake.saved_mmio_check;
542 i915_modparams.mmio_debug =
543 dev_priv->uncore.user_forcewake.saved_mmio_debug;
544
545 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
546 }
547 spin_unlock_irq(&dev_priv->uncore.lock);
548 }
549
550 /**
551 * intel_uncore_forcewake_get__locked - grab forcewake domain references
552 * @dev_priv: i915 device instance
553 * @fw_domains: forcewake domains to get reference on
554 *
555 * See intel_uncore_forcewake_get(). This variant places the onus
556 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
557 */
intel_uncore_forcewake_get__locked(struct drm_i915_private * dev_priv,enum forcewake_domains fw_domains)558 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
559 enum forcewake_domains fw_domains)
560 {
561 lockdep_assert_held(&dev_priv->uncore.lock);
562
563 if (!dev_priv->uncore.funcs.force_wake_get)
564 return;
565
566 __intel_uncore_forcewake_get(dev_priv, fw_domains);
567 }
568
__intel_uncore_forcewake_put(struct drm_i915_private * dev_priv,enum forcewake_domains fw_domains)569 static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
570 enum forcewake_domains fw_domains)
571 {
572 struct intel_uncore_forcewake_domain *domain;
573 unsigned int tmp;
574
575 fw_domains &= dev_priv->uncore.fw_domains;
576
577 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
578 if (WARN_ON(domain->wake_count == 0))
579 continue;
580
581 if (--domain->wake_count) {
582 domain->active = true;
583 continue;
584 }
585
586 fw_domain_arm_timer(domain);
587 }
588 }
589
590 /**
591 * intel_uncore_forcewake_put - release a forcewake domain reference
592 * @dev_priv: i915 device instance
593 * @fw_domains: forcewake domains to put references
594 *
595 * This function drops the device-level forcewakes for specified
596 * domains obtained by intel_uncore_forcewake_get().
597 */
intel_uncore_forcewake_put(struct drm_i915_private * dev_priv,enum forcewake_domains fw_domains)598 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
599 enum forcewake_domains fw_domains)
600 {
601 unsigned long irqflags;
602
603 if (!dev_priv->uncore.funcs.force_wake_put)
604 return;
605
606 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
607 __intel_uncore_forcewake_put(dev_priv, fw_domains);
608 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
609 }
610
611 /**
612 * intel_uncore_forcewake_put__locked - grab forcewake domain references
613 * @dev_priv: i915 device instance
614 * @fw_domains: forcewake domains to get reference on
615 *
616 * See intel_uncore_forcewake_put(). This variant places the onus
617 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
618 */
intel_uncore_forcewake_put__locked(struct drm_i915_private * dev_priv,enum forcewake_domains fw_domains)619 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
620 enum forcewake_domains fw_domains)
621 {
622 lockdep_assert_held(&dev_priv->uncore.lock);
623
624 if (!dev_priv->uncore.funcs.force_wake_put)
625 return;
626
627 __intel_uncore_forcewake_put(dev_priv, fw_domains);
628 }
629
assert_forcewakes_inactive(struct drm_i915_private * dev_priv)630 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
631 {
632 if (!dev_priv->uncore.funcs.force_wake_get)
633 return;
634
635 WARN(dev_priv->uncore.fw_domains_active,
636 "Expected all fw_domains to be inactive, but %08x are still on\n",
637 dev_priv->uncore.fw_domains_active);
638 }
639
assert_forcewakes_active(struct drm_i915_private * dev_priv,enum forcewake_domains fw_domains)640 void assert_forcewakes_active(struct drm_i915_private *dev_priv,
641 enum forcewake_domains fw_domains)
642 {
643 if (!dev_priv->uncore.funcs.force_wake_get)
644 return;
645
646 assert_rpm_wakelock_held(dev_priv);
647
648 fw_domains &= dev_priv->uncore.fw_domains;
649 WARN(fw_domains & ~dev_priv->uncore.fw_domains_active,
650 "Expected %08x fw_domains to be active, but %08x are off\n",
651 fw_domains, fw_domains & ~dev_priv->uncore.fw_domains_active);
652 }
653
654 /* We give fast paths for the really cool registers */
655 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
656
657 #define __gen6_reg_read_fw_domains(offset) \
658 ({ \
659 enum forcewake_domains __fwd; \
660 if (NEEDS_FORCE_WAKE(offset)) \
661 __fwd = FORCEWAKE_RENDER; \
662 else \
663 __fwd = 0; \
664 __fwd; \
665 })
666
fw_range_cmp(u32 offset,const struct intel_forcewake_range * entry)667 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
668 {
669 if (offset < entry->start)
670 return -1;
671 else if (offset > entry->end)
672 return 1;
673 else
674 return 0;
675 }
676
677 /* Copied and "macroized" from lib/bsearch.c */
678 #define BSEARCH(key, base, num, cmp) ({ \
679 unsigned int start__ = 0, end__ = (num); \
680 typeof(base) result__ = NULL; \
681 while (start__ < end__) { \
682 unsigned int mid__ = start__ + (end__ - start__) / 2; \
683 int ret__ = (cmp)((key), (base) + mid__); \
684 if (ret__ < 0) { \
685 end__ = mid__; \
686 } else if (ret__ > 0) { \
687 start__ = mid__ + 1; \
688 } else { \
689 result__ = (base) + mid__; \
690 break; \
691 } \
692 } \
693 result__; \
694 })
695
696 static enum forcewake_domains
find_fw_domain(struct drm_i915_private * dev_priv,u32 offset)697 find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
698 {
699 const struct intel_forcewake_range *entry;
700
701 entry = BSEARCH(offset,
702 dev_priv->uncore.fw_domains_table,
703 dev_priv->uncore.fw_domains_table_entries,
704 fw_range_cmp);
705
706 if (!entry)
707 return 0;
708
709 WARN(entry->domains & ~dev_priv->uncore.fw_domains,
710 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
711 entry->domains & ~dev_priv->uncore.fw_domains, offset);
712
713 return entry->domains;
714 }
715
716 #define GEN_FW_RANGE(s, e, d) \
717 { .start = (s), .end = (e), .domains = (d) }
718
719 #define HAS_FWTABLE(dev_priv) \
720 (INTEL_GEN(dev_priv) >= 9 || \
721 IS_CHERRYVIEW(dev_priv) || \
722 IS_VALLEYVIEW(dev_priv))
723
724 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
725 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
726 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
727 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
728 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
729 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
730 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
731 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
732 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
733 };
734
735 #define __fwtable_reg_read_fw_domains(offset) \
736 ({ \
737 enum forcewake_domains __fwd = 0; \
738 if (NEEDS_FORCE_WAKE((offset))) \
739 __fwd = find_fw_domain(dev_priv, offset); \
740 __fwd; \
741 })
742
743 /* *Must* be sorted by offset! See intel_shadow_table_check(). */
744 static const i915_reg_t gen8_shadowed_regs[] = {
745 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
746 GEN6_RPNSWREQ, /* 0xA008 */
747 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
748 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
749 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
750 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
751 /* TODO: Other registers are not yet used */
752 };
753
mmio_reg_cmp(u32 key,const i915_reg_t * reg)754 static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
755 {
756 u32 offset = i915_mmio_reg_offset(*reg);
757
758 if (key < offset)
759 return -1;
760 else if (key > offset)
761 return 1;
762 else
763 return 0;
764 }
765
is_gen8_shadowed(u32 offset)766 static bool is_gen8_shadowed(u32 offset)
767 {
768 const i915_reg_t *regs = gen8_shadowed_regs;
769
770 return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
771 mmio_reg_cmp);
772 }
773
774 #define __gen8_reg_write_fw_domains(offset) \
775 ({ \
776 enum forcewake_domains __fwd; \
777 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
778 __fwd = FORCEWAKE_RENDER; \
779 else \
780 __fwd = 0; \
781 __fwd; \
782 })
783
784 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
785 static const struct intel_forcewake_range __chv_fw_ranges[] = {
786 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
787 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
788 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
789 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
790 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
791 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
792 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
793 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
794 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
795 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
796 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
797 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
798 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
799 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
800 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
801 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
802 };
803
804 #define __fwtable_reg_write_fw_domains(offset) \
805 ({ \
806 enum forcewake_domains __fwd = 0; \
807 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
808 __fwd = find_fw_domain(dev_priv, offset); \
809 __fwd; \
810 })
811
812 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
813 static const struct intel_forcewake_range __gen9_fw_ranges[] = {
814 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
815 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
816 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
817 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
818 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
819 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
820 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
821 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
822 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
823 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
824 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
825 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
826 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
827 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
828 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
829 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
830 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
831 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
832 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
833 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
834 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
835 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
836 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
837 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
838 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
839 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
840 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
841 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
842 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
843 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
844 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
845 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
846 };
847
848 static void
ilk_dummy_write(struct drm_i915_private * dev_priv)849 ilk_dummy_write(struct drm_i915_private *dev_priv)
850 {
851 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
852 * the chip from rc6 before touching it for real. MI_MODE is masked,
853 * hence harmless to write 0 into. */
854 __raw_i915_write32(dev_priv, MI_MODE, 0);
855 }
856
857 static void
__unclaimed_reg_debug(struct drm_i915_private * dev_priv,const i915_reg_t reg,const bool read,const bool before)858 __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
859 const i915_reg_t reg,
860 const bool read,
861 const bool before)
862 {
863 if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
864 "Unclaimed %s register 0x%x\n",
865 read ? "read from" : "write to",
866 i915_mmio_reg_offset(reg)))
867 /* Only report the first N failures */
868 i915_modparams.mmio_debug--;
869 }
870
871 static inline void
unclaimed_reg_debug(struct drm_i915_private * dev_priv,const i915_reg_t reg,const bool read,const bool before)872 unclaimed_reg_debug(struct drm_i915_private *dev_priv,
873 const i915_reg_t reg,
874 const bool read,
875 const bool before)
876 {
877 if (likely(!i915_modparams.mmio_debug))
878 return;
879
880 __unclaimed_reg_debug(dev_priv, reg, read, before);
881 }
882
883 #define GEN2_READ_HEADER(x) \
884 u##x val = 0; \
885 assert_rpm_wakelock_held(dev_priv);
886
887 #define GEN2_READ_FOOTER \
888 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
889 return val
890
891 #define __gen2_read(x) \
892 static u##x \
893 gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
894 GEN2_READ_HEADER(x); \
895 val = __raw_i915_read##x(dev_priv, reg); \
896 GEN2_READ_FOOTER; \
897 }
898
899 #define __gen5_read(x) \
900 static u##x \
901 gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
902 GEN2_READ_HEADER(x); \
903 ilk_dummy_write(dev_priv); \
904 val = __raw_i915_read##x(dev_priv, reg); \
905 GEN2_READ_FOOTER; \
906 }
907
908 __gen5_read(8)
909 __gen5_read(16)
910 __gen5_read(32)
911 __gen5_read(64)
912 __gen2_read(8)
913 __gen2_read(16)
914 __gen2_read(32)
915 __gen2_read(64)
916
917 #undef __gen5_read
918 #undef __gen2_read
919
920 #undef GEN2_READ_FOOTER
921 #undef GEN2_READ_HEADER
922
923 #define GEN6_READ_HEADER(x) \
924 u32 offset = i915_mmio_reg_offset(reg); \
925 unsigned long irqflags; \
926 u##x val = 0; \
927 assert_rpm_wakelock_held(dev_priv); \
928 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
929 unclaimed_reg_debug(dev_priv, reg, true, true)
930
931 #define GEN6_READ_FOOTER \
932 unclaimed_reg_debug(dev_priv, reg, true, false); \
933 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
934 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
935 return val
936
___force_wake_auto(struct drm_i915_private * dev_priv,enum forcewake_domains fw_domains)937 static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
938 enum forcewake_domains fw_domains)
939 {
940 struct intel_uncore_forcewake_domain *domain;
941 unsigned int tmp;
942
943 GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains);
944
945 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
946 fw_domain_arm_timer(domain);
947
948 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
949 }
950
__force_wake_auto(struct drm_i915_private * dev_priv,enum forcewake_domains fw_domains)951 static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
952 enum forcewake_domains fw_domains)
953 {
954 if (WARN_ON(!fw_domains))
955 return;
956
957 /* Turn on all requested but inactive supported forcewake domains. */
958 fw_domains &= dev_priv->uncore.fw_domains;
959 fw_domains &= ~dev_priv->uncore.fw_domains_active;
960
961 if (fw_domains)
962 ___force_wake_auto(dev_priv, fw_domains);
963 }
964
965 #define __gen_read(func, x) \
966 static u##x \
967 func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
968 enum forcewake_domains fw_engine; \
969 GEN6_READ_HEADER(x); \
970 fw_engine = __##func##_reg_read_fw_domains(offset); \
971 if (fw_engine) \
972 __force_wake_auto(dev_priv, fw_engine); \
973 val = __raw_i915_read##x(dev_priv, reg); \
974 GEN6_READ_FOOTER; \
975 }
976 #define __gen6_read(x) __gen_read(gen6, x)
977 #define __fwtable_read(x) __gen_read(fwtable, x)
978
979 __fwtable_read(8)
980 __fwtable_read(16)
981 __fwtable_read(32)
982 __fwtable_read(64)
983 __gen6_read(8)
984 __gen6_read(16)
985 __gen6_read(32)
986 __gen6_read(64)
987
988 #undef __fwtable_read
989 #undef __gen6_read
990 #undef GEN6_READ_FOOTER
991 #undef GEN6_READ_HEADER
992
993 #define GEN2_WRITE_HEADER \
994 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
995 assert_rpm_wakelock_held(dev_priv); \
996
997 #define GEN2_WRITE_FOOTER
998
999 #define __gen2_write(x) \
1000 static void \
1001 gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1002 GEN2_WRITE_HEADER; \
1003 __raw_i915_write##x(dev_priv, reg, val); \
1004 GEN2_WRITE_FOOTER; \
1005 }
1006
1007 #define __gen5_write(x) \
1008 static void \
1009 gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1010 GEN2_WRITE_HEADER; \
1011 ilk_dummy_write(dev_priv); \
1012 __raw_i915_write##x(dev_priv, reg, val); \
1013 GEN2_WRITE_FOOTER; \
1014 }
1015
1016 __gen5_write(8)
1017 __gen5_write(16)
1018 __gen5_write(32)
1019 __gen2_write(8)
1020 __gen2_write(16)
1021 __gen2_write(32)
1022
1023 #undef __gen5_write
1024 #undef __gen2_write
1025
1026 #undef GEN2_WRITE_FOOTER
1027 #undef GEN2_WRITE_HEADER
1028
1029 #define GEN6_WRITE_HEADER \
1030 u32 offset = i915_mmio_reg_offset(reg); \
1031 unsigned long irqflags; \
1032 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1033 assert_rpm_wakelock_held(dev_priv); \
1034 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1035 unclaimed_reg_debug(dev_priv, reg, false, true)
1036
1037 #define GEN6_WRITE_FOOTER \
1038 unclaimed_reg_debug(dev_priv, reg, false, false); \
1039 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1040
1041 #define __gen6_write(x) \
1042 static void \
1043 gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1044 GEN6_WRITE_HEADER; \
1045 if (NEEDS_FORCE_WAKE(offset)) \
1046 __gen6_gt_wait_for_fifo(dev_priv); \
1047 __raw_i915_write##x(dev_priv, reg, val); \
1048 GEN6_WRITE_FOOTER; \
1049 }
1050
1051 #define __gen_write(func, x) \
1052 static void \
1053 func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1054 enum forcewake_domains fw_engine; \
1055 GEN6_WRITE_HEADER; \
1056 fw_engine = __##func##_reg_write_fw_domains(offset); \
1057 if (fw_engine) \
1058 __force_wake_auto(dev_priv, fw_engine); \
1059 __raw_i915_write##x(dev_priv, reg, val); \
1060 GEN6_WRITE_FOOTER; \
1061 }
1062 #define __gen8_write(x) __gen_write(gen8, x)
1063 #define __fwtable_write(x) __gen_write(fwtable, x)
1064
1065 __fwtable_write(8)
1066 __fwtable_write(16)
1067 __fwtable_write(32)
1068 __gen8_write(8)
1069 __gen8_write(16)
1070 __gen8_write(32)
1071 __gen6_write(8)
1072 __gen6_write(16)
1073 __gen6_write(32)
1074
1075 #undef __fwtable_write
1076 #undef __gen8_write
1077 #undef __gen6_write
1078 #undef GEN6_WRITE_FOOTER
1079 #undef GEN6_WRITE_HEADER
1080
1081 #define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \
1082 do { \
1083 (i915)->uncore.funcs.mmio_writeb = x##_write8; \
1084 (i915)->uncore.funcs.mmio_writew = x##_write16; \
1085 (i915)->uncore.funcs.mmio_writel = x##_write32; \
1086 } while (0)
1087
1088 #define ASSIGN_READ_MMIO_VFUNCS(i915, x) \
1089 do { \
1090 (i915)->uncore.funcs.mmio_readb = x##_read8; \
1091 (i915)->uncore.funcs.mmio_readw = x##_read16; \
1092 (i915)->uncore.funcs.mmio_readl = x##_read32; \
1093 (i915)->uncore.funcs.mmio_readq = x##_read64; \
1094 } while (0)
1095
1096
fw_domain_init(struct drm_i915_private * dev_priv,enum forcewake_domain_id domain_id,i915_reg_t reg_set,i915_reg_t reg_ack)1097 static void fw_domain_init(struct drm_i915_private *dev_priv,
1098 enum forcewake_domain_id domain_id,
1099 i915_reg_t reg_set,
1100 i915_reg_t reg_ack)
1101 {
1102 struct intel_uncore_forcewake_domain *d;
1103
1104 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1105 return;
1106
1107 d = &dev_priv->uncore.fw_domain[domain_id];
1108
1109 WARN_ON(d->wake_count);
1110
1111 WARN_ON(!i915_mmio_reg_valid(reg_set));
1112 WARN_ON(!i915_mmio_reg_valid(reg_ack));
1113
1114 d->wake_count = 0;
1115 d->reg_set = reg_set;
1116 d->reg_ack = reg_ack;
1117
1118 d->id = domain_id;
1119
1120 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1121 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1122 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1123
1124 d->mask = BIT(domain_id);
1125
1126 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1127 d->timer.function = intel_uncore_fw_release_timer;
1128
1129 dev_priv->uncore.fw_domains |= BIT(domain_id);
1130
1131 fw_domain_reset(dev_priv, d);
1132 }
1133
intel_uncore_fw_domains_init(struct drm_i915_private * dev_priv)1134 static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1135 {
1136 if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
1137 return;
1138
1139 if (IS_GEN6(dev_priv)) {
1140 dev_priv->uncore.fw_reset = 0;
1141 dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
1142 dev_priv->uncore.fw_clear = 0;
1143 } else {
1144 /* WaRsClearFWBitsAtReset:bdw,skl */
1145 dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff);
1146 dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1147 dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1148 }
1149
1150 if (INTEL_GEN(dev_priv) >= 9) {
1151 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1152 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1153 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1154 FORCEWAKE_RENDER_GEN9,
1155 FORCEWAKE_ACK_RENDER_GEN9);
1156 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1157 FORCEWAKE_BLITTER_GEN9,
1158 FORCEWAKE_ACK_BLITTER_GEN9);
1159 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1160 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1161 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1162 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1163 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1164 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1165 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1166 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1167 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1168 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1169 dev_priv->uncore.funcs.force_wake_get =
1170 fw_domains_get_with_thread_status;
1171 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1172 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1173 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1174 } else if (IS_IVYBRIDGE(dev_priv)) {
1175 u32 ecobus;
1176
1177 /* IVB configs may use multi-threaded forcewake */
1178
1179 /* A small trick here - if the bios hasn't configured
1180 * MT forcewake, and if the device is in RC6, then
1181 * force_wake_mt_get will not wake the device and the
1182 * ECOBUS read will return zero. Which will be
1183 * (correctly) interpreted by the test below as MT
1184 * forcewake being disabled.
1185 */
1186 dev_priv->uncore.funcs.force_wake_get =
1187 fw_domains_get_with_thread_status;
1188 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1189
1190 /* We need to init first for ECOBUS access and then
1191 * determine later if we want to reinit, in case of MT access is
1192 * not working. In this stage we don't know which flavour this
1193 * ivb is, so it is better to reset also the gen6 fw registers
1194 * before the ecobus check.
1195 */
1196
1197 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1198 __raw_posting_read(dev_priv, ECOBUS);
1199
1200 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1201 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1202
1203 spin_lock_irq(&dev_priv->uncore.lock);
1204 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER);
1205 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1206 fw_domains_put(dev_priv, FORCEWAKE_RENDER);
1207 spin_unlock_irq(&dev_priv->uncore.lock);
1208
1209 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1210 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1211 DRM_INFO("when using vblank-synced partial screen updates.\n");
1212 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1213 FORCEWAKE, FORCEWAKE_ACK);
1214 }
1215 } else if (IS_GEN6(dev_priv)) {
1216 dev_priv->uncore.funcs.force_wake_get =
1217 fw_domains_get_with_thread_status;
1218 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1219 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1220 FORCEWAKE, FORCEWAKE_ACK);
1221 }
1222
1223 /* All future platforms are expected to require complex power gating */
1224 WARN_ON(dev_priv->uncore.fw_domains == 0);
1225 }
1226
1227 #define ASSIGN_FW_DOMAINS_TABLE(d) \
1228 { \
1229 dev_priv->uncore.fw_domains_table = \
1230 (struct intel_forcewake_range *)(d); \
1231 dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
1232 }
1233
i915_pmic_bus_access_notifier(struct notifier_block * nb,unsigned long action,void * data)1234 static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1235 unsigned long action, void *data)
1236 {
1237 struct drm_i915_private *dev_priv = container_of(nb,
1238 struct drm_i915_private, uncore.pmic_bus_access_nb);
1239
1240 switch (action) {
1241 case MBI_PMIC_BUS_ACCESS_BEGIN:
1242 /*
1243 * forcewake all now to make sure that we don't need to do a
1244 * forcewake later which on systems where this notifier gets
1245 * called requires the punit to access to the shared pmic i2c
1246 * bus, which will be busy after this notification, leading to:
1247 * "render: timed out waiting for forcewake ack request."
1248 * errors.
1249 *
1250 * The notifier is unregistered during intel_runtime_suspend(),
1251 * so it's ok to access the HW here without holding a RPM
1252 * wake reference -> disable wakeref asserts for the time of
1253 * the access.
1254 */
1255 disable_rpm_wakeref_asserts(dev_priv);
1256 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1257 enable_rpm_wakeref_asserts(dev_priv);
1258 break;
1259 case MBI_PMIC_BUS_ACCESS_END:
1260 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1261 break;
1262 }
1263
1264 return NOTIFY_OK;
1265 }
1266
intel_uncore_init(struct drm_i915_private * dev_priv)1267 void intel_uncore_init(struct drm_i915_private *dev_priv)
1268 {
1269 i915_check_vgpu(dev_priv);
1270
1271 intel_uncore_edram_detect(dev_priv);
1272 intel_uncore_fw_domains_init(dev_priv);
1273 __intel_uncore_early_sanitize(dev_priv, false);
1274
1275 dev_priv->uncore.unclaimed_mmio_check = 1;
1276 dev_priv->uncore.pmic_bus_access_nb.notifier_call =
1277 i915_pmic_bus_access_notifier;
1278
1279 if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
1280 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
1281 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
1282 } else if (IS_GEN5(dev_priv)) {
1283 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
1284 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
1285 } else if (IS_GEN(dev_priv, 6, 7)) {
1286 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
1287
1288 if (IS_VALLEYVIEW(dev_priv)) {
1289 ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
1290 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
1291 } else {
1292 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
1293 }
1294 } else if (IS_GEN8(dev_priv)) {
1295 if (IS_CHERRYVIEW(dev_priv)) {
1296 ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
1297 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
1298 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
1299
1300 } else {
1301 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
1302 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
1303 }
1304 } else {
1305 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
1306 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
1307 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
1308 }
1309
1310 iosf_mbi_register_pmic_bus_access_notifier(
1311 &dev_priv->uncore.pmic_bus_access_nb);
1312
1313 i915_check_and_clear_faults(dev_priv);
1314 }
1315
intel_uncore_fini(struct drm_i915_private * dev_priv)1316 void intel_uncore_fini(struct drm_i915_private *dev_priv)
1317 {
1318 iosf_mbi_unregister_pmic_bus_access_notifier(
1319 &dev_priv->uncore.pmic_bus_access_nb);
1320
1321 /* Paranoia: make sure we have disabled everything before we exit. */
1322 intel_uncore_sanitize(dev_priv);
1323 intel_uncore_forcewake_reset(dev_priv, false);
1324 }
1325
1326 static const struct reg_whitelist {
1327 i915_reg_t offset_ldw;
1328 i915_reg_t offset_udw;
1329 u16 gen_mask;
1330 u8 size;
1331 } reg_read_whitelist[] = { {
1332 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1333 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1334 .gen_mask = INTEL_GEN_MASK(4, 10),
1335 .size = 8
1336 } };
1337
i915_reg_read_ioctl(struct drm_device * dev,void * data,struct drm_file * file)1338 int i915_reg_read_ioctl(struct drm_device *dev,
1339 void *data, struct drm_file *file)
1340 {
1341 struct drm_i915_private *dev_priv = to_i915(dev);
1342 struct drm_i915_reg_read *reg = data;
1343 struct reg_whitelist const *entry;
1344 unsigned int flags;
1345 int remain;
1346 int ret = 0;
1347
1348 entry = reg_read_whitelist;
1349 remain = ARRAY_SIZE(reg_read_whitelist);
1350 while (remain) {
1351 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
1352
1353 GEM_BUG_ON(!is_power_of_2(entry->size));
1354 GEM_BUG_ON(entry->size > 8);
1355 GEM_BUG_ON(entry_offset & (entry->size - 1));
1356
1357 if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask &&
1358 entry_offset == (reg->offset & -entry->size))
1359 break;
1360 entry++;
1361 remain--;
1362 }
1363
1364 if (!remain)
1365 return -EINVAL;
1366
1367 flags = reg->offset & (entry->size - 1);
1368
1369 intel_runtime_pm_get(dev_priv);
1370 if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
1371 reg->val = I915_READ64_2x32(entry->offset_ldw,
1372 entry->offset_udw);
1373 else if (entry->size == 8 && flags == 0)
1374 reg->val = I915_READ64(entry->offset_ldw);
1375 else if (entry->size == 4 && flags == 0)
1376 reg->val = I915_READ(entry->offset_ldw);
1377 else if (entry->size == 2 && flags == 0)
1378 reg->val = I915_READ16(entry->offset_ldw);
1379 else if (entry->size == 1 && flags == 0)
1380 reg->val = I915_READ8(entry->offset_ldw);
1381 else
1382 ret = -EINVAL;
1383 intel_runtime_pm_put(dev_priv);
1384
1385 return ret;
1386 }
1387
gen3_stop_engine(struct intel_engine_cs * engine)1388 static void gen3_stop_engine(struct intel_engine_cs *engine)
1389 {
1390 struct drm_i915_private *dev_priv = engine->i915;
1391 const u32 base = engine->mmio_base;
1392 const i915_reg_t mode = RING_MI_MODE(base);
1393
1394 I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
1395 if (intel_wait_for_register_fw(dev_priv,
1396 mode,
1397 MODE_IDLE,
1398 MODE_IDLE,
1399 500))
1400 DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
1401 engine->name);
1402
1403 I915_WRITE_FW(RING_CTL(base), 0);
1404 I915_WRITE_FW(RING_HEAD(base), 0);
1405 I915_WRITE_FW(RING_TAIL(base), 0);
1406
1407 /* Check acts as a post */
1408 if (I915_READ_FW(RING_HEAD(base)) != 0)
1409 DRM_DEBUG_DRIVER("%s: ring head not parked\n",
1410 engine->name);
1411 }
1412
i915_stop_engines(struct drm_i915_private * dev_priv,unsigned engine_mask)1413 static void i915_stop_engines(struct drm_i915_private *dev_priv,
1414 unsigned engine_mask)
1415 {
1416 struct intel_engine_cs *engine;
1417 enum intel_engine_id id;
1418
1419 if (INTEL_GEN(dev_priv) < 3)
1420 return;
1421
1422 for_each_engine_masked(engine, dev_priv, engine_mask, id)
1423 gen3_stop_engine(engine);
1424 }
1425
i915_reset_complete(struct pci_dev * pdev)1426 static bool i915_reset_complete(struct pci_dev *pdev)
1427 {
1428 u8 gdrst;
1429
1430 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1431 return (gdrst & GRDOM_RESET_STATUS) == 0;
1432 }
1433
i915_do_reset(struct drm_i915_private * dev_priv,unsigned engine_mask)1434 static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1435 {
1436 struct pci_dev *pdev = dev_priv->drm.pdev;
1437
1438 /* assert reset for at least 20 usec */
1439 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1440 usleep_range(50, 200);
1441 pci_write_config_byte(pdev, I915_GDRST, 0);
1442
1443 return wait_for(i915_reset_complete(pdev), 500);
1444 }
1445
g4x_reset_complete(struct pci_dev * pdev)1446 static bool g4x_reset_complete(struct pci_dev *pdev)
1447 {
1448 u8 gdrst;
1449
1450 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1451 return (gdrst & GRDOM_RESET_ENABLE) == 0;
1452 }
1453
g33_do_reset(struct drm_i915_private * dev_priv,unsigned engine_mask)1454 static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1455 {
1456 struct pci_dev *pdev = dev_priv->drm.pdev;
1457
1458 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1459 return wait_for(g4x_reset_complete(pdev), 500);
1460 }
1461
g4x_do_reset(struct drm_i915_private * dev_priv,unsigned engine_mask)1462 static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1463 {
1464 struct pci_dev *pdev = dev_priv->drm.pdev;
1465 int ret;
1466
1467 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1468 I915_WRITE(VDECCLK_GATE_D,
1469 I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1470 POSTING_READ(VDECCLK_GATE_D);
1471
1472 pci_write_config_byte(pdev, I915_GDRST,
1473 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1474 ret = wait_for(g4x_reset_complete(pdev), 500);
1475 if (ret) {
1476 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
1477 goto out;
1478 }
1479
1480 pci_write_config_byte(pdev, I915_GDRST,
1481 GRDOM_RENDER | GRDOM_RESET_ENABLE);
1482 ret = wait_for(g4x_reset_complete(pdev), 500);
1483 if (ret) {
1484 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
1485 goto out;
1486 }
1487
1488 out:
1489 pci_write_config_byte(pdev, I915_GDRST, 0);
1490
1491 I915_WRITE(VDECCLK_GATE_D,
1492 I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1493 POSTING_READ(VDECCLK_GATE_D);
1494
1495 return ret;
1496 }
1497
ironlake_do_reset(struct drm_i915_private * dev_priv,unsigned engine_mask)1498 static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1499 unsigned engine_mask)
1500 {
1501 int ret;
1502
1503 I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1504 ret = intel_wait_for_register(dev_priv,
1505 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1506 500);
1507 if (ret) {
1508 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
1509 goto out;
1510 }
1511
1512 I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1513 ret = intel_wait_for_register(dev_priv,
1514 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1515 500);
1516 if (ret) {
1517 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
1518 goto out;
1519 }
1520
1521 out:
1522 I915_WRITE(ILK_GDSR, 0);
1523 POSTING_READ(ILK_GDSR);
1524 return ret;
1525 }
1526
1527 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
gen6_hw_domain_reset(struct drm_i915_private * dev_priv,u32 hw_domain_mask)1528 static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1529 u32 hw_domain_mask)
1530 {
1531 int err;
1532
1533 /* GEN6_GDRST is not in the gt power well, no need to check
1534 * for fifo space for the write or forcewake the chip for
1535 * the read
1536 */
1537 __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
1538
1539 /* Wait for the device to ack the reset requests */
1540 err = intel_wait_for_register_fw(dev_priv,
1541 GEN6_GDRST, hw_domain_mask, 0,
1542 500);
1543 if (err)
1544 DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
1545 hw_domain_mask);
1546
1547 return err;
1548 }
1549
1550 /**
1551 * gen6_reset_engines - reset individual engines
1552 * @dev_priv: i915 device
1553 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1554 *
1555 * This function will reset the individual engines that are set in engine_mask.
1556 * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
1557 *
1558 * Note: It is responsibility of the caller to handle the difference between
1559 * asking full domain reset versus reset for all available individual engines.
1560 *
1561 * Returns 0 on success, nonzero on error.
1562 */
gen6_reset_engines(struct drm_i915_private * dev_priv,unsigned engine_mask)1563 static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1564 unsigned engine_mask)
1565 {
1566 struct intel_engine_cs *engine;
1567 const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1568 [RCS] = GEN6_GRDOM_RENDER,
1569 [BCS] = GEN6_GRDOM_BLT,
1570 [VCS] = GEN6_GRDOM_MEDIA,
1571 [VCS2] = GEN8_GRDOM_MEDIA2,
1572 [VECS] = GEN6_GRDOM_VECS,
1573 };
1574 u32 hw_mask;
1575
1576 if (engine_mask == ALL_ENGINES) {
1577 hw_mask = GEN6_GRDOM_FULL;
1578 } else {
1579 unsigned int tmp;
1580
1581 hw_mask = 0;
1582 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1583 hw_mask |= hw_engine_mask[engine->id];
1584 }
1585
1586 return gen6_hw_domain_reset(dev_priv, hw_mask);
1587 }
1588
1589 /**
1590 * __intel_wait_for_register_fw - wait until register matches expected state
1591 * @dev_priv: the i915 device
1592 * @reg: the register to read
1593 * @mask: mask to apply to register value
1594 * @value: expected value
1595 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1596 * @slow_timeout_ms: slow timeout in millisecond
1597 * @out_value: optional placeholder to hold registry value
1598 *
1599 * This routine waits until the target register @reg contains the expected
1600 * @value after applying the @mask, i.e. it waits until ::
1601 *
1602 * (I915_READ_FW(reg) & mask) == value
1603 *
1604 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
1605 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
1606 * must be not larger than 20,0000 microseconds.
1607 *
1608 * Note that this routine assumes the caller holds forcewake asserted, it is
1609 * not suitable for very long waits. See intel_wait_for_register() if you
1610 * wish to wait without holding forcewake for the duration (i.e. you expect
1611 * the wait to be slow).
1612 *
1613 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1614 */
__intel_wait_for_register_fw(struct drm_i915_private * dev_priv,i915_reg_t reg,u32 mask,u32 value,unsigned int fast_timeout_us,unsigned int slow_timeout_ms,u32 * out_value)1615 int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
1616 i915_reg_t reg,
1617 u32 mask,
1618 u32 value,
1619 unsigned int fast_timeout_us,
1620 unsigned int slow_timeout_ms,
1621 u32 *out_value)
1622 {
1623 u32 reg_value = 0;
1624 #define done (((reg_value = I915_READ_FW(reg)) & mask) == value)
1625 int ret;
1626
1627 /* Catch any overuse of this function */
1628 might_sleep_if(slow_timeout_ms);
1629 GEM_BUG_ON(fast_timeout_us > 20000);
1630
1631 ret = -ETIMEDOUT;
1632 if (fast_timeout_us && fast_timeout_us <= 20000)
1633 ret = _wait_for_atomic(done, fast_timeout_us, 0);
1634 if (ret && slow_timeout_ms)
1635 ret = wait_for(done, slow_timeout_ms);
1636
1637 if (out_value)
1638 *out_value = reg_value;
1639
1640 return ret;
1641 #undef done
1642 }
1643
1644 /**
1645 * intel_wait_for_register - wait until register matches expected state
1646 * @dev_priv: the i915 device
1647 * @reg: the register to read
1648 * @mask: mask to apply to register value
1649 * @value: expected value
1650 * @timeout_ms: timeout in millisecond
1651 *
1652 * This routine waits until the target register @reg contains the expected
1653 * @value after applying the @mask, i.e. it waits until ::
1654 *
1655 * (I915_READ(reg) & mask) == value
1656 *
1657 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1658 *
1659 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1660 */
intel_wait_for_register(struct drm_i915_private * dev_priv,i915_reg_t reg,u32 mask,u32 value,unsigned int timeout_ms)1661 int intel_wait_for_register(struct drm_i915_private *dev_priv,
1662 i915_reg_t reg,
1663 u32 mask,
1664 u32 value,
1665 unsigned int timeout_ms)
1666 {
1667 unsigned fw =
1668 intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
1669 int ret;
1670
1671 might_sleep();
1672
1673 spin_lock_irq(&dev_priv->uncore.lock);
1674 intel_uncore_forcewake_get__locked(dev_priv, fw);
1675
1676 ret = __intel_wait_for_register_fw(dev_priv,
1677 reg, mask, value,
1678 2, 0, NULL);
1679
1680 intel_uncore_forcewake_put__locked(dev_priv, fw);
1681 spin_unlock_irq(&dev_priv->uncore.lock);
1682
1683 if (ret)
1684 ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
1685 timeout_ms);
1686
1687 return ret;
1688 }
1689
gen8_reset_engine_start(struct intel_engine_cs * engine)1690 static int gen8_reset_engine_start(struct intel_engine_cs *engine)
1691 {
1692 struct drm_i915_private *dev_priv = engine->i915;
1693 int ret;
1694
1695 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1696 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1697
1698 ret = intel_wait_for_register_fw(dev_priv,
1699 RING_RESET_CTL(engine->mmio_base),
1700 RESET_CTL_READY_TO_RESET,
1701 RESET_CTL_READY_TO_RESET,
1702 700);
1703 if (ret)
1704 DRM_ERROR("%s: reset request timeout\n", engine->name);
1705
1706 return ret;
1707 }
1708
gen8_reset_engine_cancel(struct intel_engine_cs * engine)1709 static void gen8_reset_engine_cancel(struct intel_engine_cs *engine)
1710 {
1711 struct drm_i915_private *dev_priv = engine->i915;
1712
1713 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1714 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1715 }
1716
gen8_reset_engines(struct drm_i915_private * dev_priv,unsigned engine_mask)1717 static int gen8_reset_engines(struct drm_i915_private *dev_priv,
1718 unsigned engine_mask)
1719 {
1720 struct intel_engine_cs *engine;
1721 unsigned int tmp;
1722
1723 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1724 if (gen8_reset_engine_start(engine))
1725 goto not_ready;
1726
1727 return gen6_reset_engines(dev_priv, engine_mask);
1728
1729 not_ready:
1730 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1731 gen8_reset_engine_cancel(engine);
1732
1733 return -EIO;
1734 }
1735
1736 typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
1737
intel_get_gpu_reset(struct drm_i915_private * dev_priv)1738 static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
1739 {
1740 if (!i915_modparams.reset)
1741 return NULL;
1742
1743 if (INTEL_INFO(dev_priv)->gen >= 8)
1744 return gen8_reset_engines;
1745 else if (INTEL_INFO(dev_priv)->gen >= 6)
1746 return gen6_reset_engines;
1747 else if (IS_GEN5(dev_priv))
1748 return ironlake_do_reset;
1749 else if (IS_G4X(dev_priv))
1750 return g4x_do_reset;
1751 else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
1752 return g33_do_reset;
1753 else if (INTEL_INFO(dev_priv)->gen >= 3)
1754 return i915_do_reset;
1755 else
1756 return NULL;
1757 }
1758
intel_gpu_reset(struct drm_i915_private * dev_priv,unsigned engine_mask)1759 int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1760 {
1761 reset_func reset = intel_get_gpu_reset(dev_priv);
1762 int retry;
1763 int ret;
1764
1765 might_sleep();
1766
1767 /* If the power well sleeps during the reset, the reset
1768 * request may be dropped and never completes (causing -EIO).
1769 */
1770 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1771 for (retry = 0; retry < 3; retry++) {
1772
1773 /* We stop engines, otherwise we might get failed reset and a
1774 * dead gpu (on elk). Also as modern gpu as kbl can suffer
1775 * from system hang if batchbuffer is progressing when
1776 * the reset is issued, regardless of READY_TO_RESET ack.
1777 * Thus assume it is best to stop engines on all gens
1778 * where we have a gpu reset.
1779 *
1780 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
1781 *
1782 * FIXME: Wa for more modern gens needs to be validated
1783 */
1784 i915_stop_engines(dev_priv, engine_mask);
1785
1786 ret = -ENODEV;
1787 if (reset)
1788 ret = reset(dev_priv, engine_mask);
1789 if (ret != -ETIMEDOUT)
1790 break;
1791
1792 cond_resched();
1793 }
1794 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1795
1796 return ret;
1797 }
1798
intel_has_gpu_reset(struct drm_i915_private * dev_priv)1799 bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
1800 {
1801 return intel_get_gpu_reset(dev_priv) != NULL;
1802 }
1803
1804 /*
1805 * When GuC submission is enabled, GuC manages ELSP and can initiate the
1806 * engine reset too. For now, fall back to full GPU reset if it is enabled.
1807 */
intel_has_reset_engine(struct drm_i915_private * dev_priv)1808 bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
1809 {
1810 return (dev_priv->info.has_reset_engine &&
1811 !dev_priv->guc.execbuf_client &&
1812 i915_modparams.reset >= 2);
1813 }
1814
intel_guc_reset(struct drm_i915_private * dev_priv)1815 int intel_guc_reset(struct drm_i915_private *dev_priv)
1816 {
1817 int ret;
1818
1819 if (!HAS_GUC(dev_priv))
1820 return -EINVAL;
1821
1822 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1823 ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
1824 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1825
1826 return ret;
1827 }
1828
intel_uncore_unclaimed_mmio(struct drm_i915_private * dev_priv)1829 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
1830 {
1831 return check_for_unclaimed_mmio(dev_priv);
1832 }
1833
1834 bool
intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private * dev_priv)1835 intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1836 {
1837 if (unlikely(i915_modparams.mmio_debug ||
1838 dev_priv->uncore.unclaimed_mmio_check <= 0))
1839 return false;
1840
1841 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
1842 DRM_DEBUG("Unclaimed register detected, "
1843 "enabling oneshot unclaimed register reporting. "
1844 "Please use i915.mmio_debug=N for more information.\n");
1845 i915_modparams.mmio_debug++;
1846 dev_priv->uncore.unclaimed_mmio_check--;
1847 return true;
1848 }
1849
1850 return false;
1851 }
1852
1853 static enum forcewake_domains
intel_uncore_forcewake_for_read(struct drm_i915_private * dev_priv,i915_reg_t reg)1854 intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
1855 i915_reg_t reg)
1856 {
1857 u32 offset = i915_mmio_reg_offset(reg);
1858 enum forcewake_domains fw_domains;
1859
1860 if (HAS_FWTABLE(dev_priv)) {
1861 fw_domains = __fwtable_reg_read_fw_domains(offset);
1862 } else if (INTEL_GEN(dev_priv) >= 6) {
1863 fw_domains = __gen6_reg_read_fw_domains(offset);
1864 } else {
1865 WARN_ON(!IS_GEN(dev_priv, 2, 5));
1866 fw_domains = 0;
1867 }
1868
1869 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1870
1871 return fw_domains;
1872 }
1873
1874 static enum forcewake_domains
intel_uncore_forcewake_for_write(struct drm_i915_private * dev_priv,i915_reg_t reg)1875 intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
1876 i915_reg_t reg)
1877 {
1878 u32 offset = i915_mmio_reg_offset(reg);
1879 enum forcewake_domains fw_domains;
1880
1881 if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
1882 fw_domains = __fwtable_reg_write_fw_domains(offset);
1883 } else if (IS_GEN8(dev_priv)) {
1884 fw_domains = __gen8_reg_write_fw_domains(offset);
1885 } else if (IS_GEN(dev_priv, 6, 7)) {
1886 fw_domains = FORCEWAKE_RENDER;
1887 } else {
1888 WARN_ON(!IS_GEN(dev_priv, 2, 5));
1889 fw_domains = 0;
1890 }
1891
1892 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1893
1894 return fw_domains;
1895 }
1896
1897 /**
1898 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
1899 * a register
1900 * @dev_priv: pointer to struct drm_i915_private
1901 * @reg: register in question
1902 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
1903 *
1904 * Returns a set of forcewake domains required to be taken with for example
1905 * intel_uncore_forcewake_get for the specified register to be accessible in the
1906 * specified mode (read, write or read/write) with raw mmio accessors.
1907 *
1908 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
1909 * callers to do FIFO management on their own or risk losing writes.
1910 */
1911 enum forcewake_domains
intel_uncore_forcewake_for_reg(struct drm_i915_private * dev_priv,i915_reg_t reg,unsigned int op)1912 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
1913 i915_reg_t reg, unsigned int op)
1914 {
1915 enum forcewake_domains fw_domains = 0;
1916
1917 WARN_ON(!op);
1918
1919 if (intel_vgpu_active(dev_priv))
1920 return 0;
1921
1922 if (op & FW_REG_READ)
1923 fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
1924
1925 if (op & FW_REG_WRITE)
1926 fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
1927
1928 return fw_domains;
1929 }
1930
1931 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1932 #include "selftests/mock_uncore.c"
1933 #include "selftests/intel_uncore.c"
1934 #endif
1935