1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "i915_drv.h" 25 #include "intel_drv.h" 26 #include "i915_vgpu.h" 27 28 #include <linux/pm_runtime.h> 29 30 #define FORCEWAKE_ACK_TIMEOUT_MS 50 31 32 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__)) 33 34 static const char * const forcewake_domain_names[] = { 35 "render", 36 "blitter", 37 "media", 38 }; 39 40 const char * 41 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) 42 { 43 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT); 44 45 if (id >= 0 && id < FW_DOMAIN_ID_COUNT) 46 return forcewake_domain_names[id]; 47 48 WARN_ON(id); 49 50 return "unknown"; 51 } 52 53 static inline void 54 fw_domain_reset(const struct intel_uncore_forcewake_domain *d) 55 { 56 WARN_ON(!i915_mmio_reg_valid(d->reg_set)); 57 __raw_i915_write32(d->i915, d->reg_set, d->val_reset); 58 } 59 60 static inline void 61 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) 62 { 63 d->wake_count++; 64 hrtimer_start_range_ns(&d->timer, 65 ktime_set(0, NSEC_PER_MSEC), 66 NSEC_PER_MSEC, 67 HRTIMER_MODE_REL); 68 } 69 70 static inline void 71 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) 72 { 73 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 74 FORCEWAKE_KERNEL) == 0, 75 FORCEWAKE_ACK_TIMEOUT_MS)) 76 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", 77 intel_uncore_forcewake_domain_to_str(d->id)); 78 } 79 80 static inline void 81 fw_domain_get(const struct intel_uncore_forcewake_domain *d) 82 { 83 __raw_i915_write32(d->i915, d->reg_set, d->val_set); 84 } 85 86 static inline void 87 fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d) 88 { 89 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 90 FORCEWAKE_KERNEL), 91 FORCEWAKE_ACK_TIMEOUT_MS)) 92 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", 93 intel_uncore_forcewake_domain_to_str(d->id)); 94 } 95 96 static inline void 97 fw_domain_put(const struct intel_uncore_forcewake_domain *d) 98 { 99 __raw_i915_write32(d->i915, d->reg_set, d->val_clear); 100 } 101 102 static inline void 103 fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d) 104 { 105 /* something from same cacheline, but not from the set register */ 106 if (i915_mmio_reg_valid(d->reg_post)) 107 __raw_posting_read(d->i915, d->reg_post); 108 } 109 110 static void 111 fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 112 { 113 struct intel_uncore_forcewake_domain *d; 114 115 for_each_fw_domain_masked(d, fw_domains, dev_priv) { 116 fw_domain_wait_ack_clear(d); 117 fw_domain_get(d); 118 } 119 120 for_each_fw_domain_masked(d, fw_domains, dev_priv) 121 fw_domain_wait_ack(d); 122 } 123 124 static void 125 fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 126 { 127 struct intel_uncore_forcewake_domain *d; 128 129 for_each_fw_domain_masked(d, fw_domains, dev_priv) { 130 fw_domain_put(d); 131 fw_domain_posting_read(d); 132 } 133 } 134 135 static void 136 fw_domains_posting_read(struct drm_i915_private *dev_priv) 137 { 138 struct intel_uncore_forcewake_domain *d; 139 140 /* No need to do for all, just do for first found */ 141 for_each_fw_domain(d, dev_priv) { 142 fw_domain_posting_read(d); 143 break; 144 } 145 } 146 147 static void 148 fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 149 { 150 struct intel_uncore_forcewake_domain *d; 151 152 if (dev_priv->uncore.fw_domains == 0) 153 return; 154 155 for_each_fw_domain_masked(d, fw_domains, dev_priv) 156 fw_domain_reset(d); 157 158 fw_domains_posting_read(dev_priv); 159 } 160 161 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 162 { 163 /* w/a for a sporadic read returning 0 by waiting for the GT 164 * thread to wake up. 165 */ 166 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & 167 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500)) 168 DRM_ERROR("GT thread status wait timed out\n"); 169 } 170 171 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv, 172 enum forcewake_domains fw_domains) 173 { 174 fw_domains_get(dev_priv, fw_domains); 175 176 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ 177 __gen6_gt_wait_for_thread_c0(dev_priv); 178 } 179 180 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 181 { 182 u32 gtfifodbg; 183 184 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); 185 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg)) 186 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg); 187 } 188 189 static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv, 190 enum forcewake_domains fw_domains) 191 { 192 fw_domains_put(dev_priv, fw_domains); 193 gen6_gt_check_fifodbg(dev_priv); 194 } 195 196 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv) 197 { 198 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL); 199 200 return count & GT_FIFO_FREE_ENTRIES_MASK; 201 } 202 203 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 204 { 205 int ret = 0; 206 207 /* On VLV, FIFO will be shared by both SW and HW. 208 * So, we need to read the FREE_ENTRIES everytime */ 209 if (IS_VALLEYVIEW(dev_priv)) 210 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv); 211 212 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 213 int loop = 500; 214 u32 fifo = fifo_free_entries(dev_priv); 215 216 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { 217 udelay(10); 218 fifo = fifo_free_entries(dev_priv); 219 } 220 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) 221 ++ret; 222 dev_priv->uncore.fifo_count = fifo; 223 } 224 dev_priv->uncore.fifo_count--; 225 226 return ret; 227 } 228 229 static enum hrtimer_restart 230 intel_uncore_fw_release_timer(struct hrtimer *timer) 231 { 232 struct intel_uncore_forcewake_domain *domain = 233 container_of(timer, struct intel_uncore_forcewake_domain, timer); 234 struct drm_i915_private *dev_priv = domain->i915; 235 unsigned long irqflags; 236 237 assert_rpm_device_not_suspended(dev_priv); 238 239 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 240 if (WARN_ON(domain->wake_count == 0)) 241 domain->wake_count++; 242 243 if (--domain->wake_count == 0) { 244 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); 245 dev_priv->uncore.fw_domains_active &= ~domain->mask; 246 } 247 248 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 249 250 return HRTIMER_NORESTART; 251 } 252 253 void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, 254 bool restore) 255 { 256 unsigned long irqflags; 257 struct intel_uncore_forcewake_domain *domain; 258 int retry_count = 100; 259 enum forcewake_domains fw, active_domains; 260 261 /* Hold uncore.lock across reset to prevent any register access 262 * with forcewake not set correctly. Wait until all pending 263 * timers are run before holding. 264 */ 265 while (1) { 266 active_domains = 0; 267 268 for_each_fw_domain(domain, dev_priv) { 269 if (hrtimer_cancel(&domain->timer) == 0) 270 continue; 271 272 intel_uncore_fw_release_timer(&domain->timer); 273 } 274 275 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 276 277 for_each_fw_domain(domain, dev_priv) { 278 if (hrtimer_active(&domain->timer)) 279 active_domains |= domain->mask; 280 } 281 282 if (active_domains == 0) 283 break; 284 285 if (--retry_count == 0) { 286 DRM_ERROR("Timed out waiting for forcewake timers to finish\n"); 287 break; 288 } 289 290 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 291 cond_resched(); 292 } 293 294 WARN_ON(active_domains); 295 296 fw = dev_priv->uncore.fw_domains_active; 297 if (fw) 298 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); 299 300 fw_domains_reset(dev_priv, FORCEWAKE_ALL); 301 302 if (restore) { /* If reset with a user forcewake, try to restore */ 303 if (fw) 304 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); 305 306 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) 307 dev_priv->uncore.fifo_count = 308 fifo_free_entries(dev_priv); 309 } 310 311 if (!restore) 312 assert_forcewakes_inactive(dev_priv); 313 314 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 315 } 316 317 static u64 gen9_edram_size(struct drm_i915_private *dev_priv) 318 { 319 const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 }; 320 const unsigned int sets[4] = { 1, 1, 2, 2 }; 321 const u32 cap = dev_priv->edram_cap; 322 323 return EDRAM_NUM_BANKS(cap) * 324 ways[EDRAM_WAYS_IDX(cap)] * 325 sets[EDRAM_SETS_IDX(cap)] * 326 1024 * 1024; 327 } 328 329 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv) 330 { 331 if (!HAS_EDRAM(dev_priv)) 332 return 0; 333 334 /* The needed capability bits for size calculation 335 * are not there with pre gen9 so return 128MB always. 336 */ 337 if (INTEL_GEN(dev_priv) < 9) 338 return 128 * 1024 * 1024; 339 340 return gen9_edram_size(dev_priv); 341 } 342 343 static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv) 344 { 345 if (IS_HASWELL(dev_priv) || 346 IS_BROADWELL(dev_priv) || 347 INTEL_GEN(dev_priv) >= 9) { 348 dev_priv->edram_cap = __raw_i915_read32(dev_priv, 349 HSW_EDRAM_CAP); 350 351 /* NB: We can't write IDICR yet because we do not have gt funcs 352 * set up */ 353 } else { 354 dev_priv->edram_cap = 0; 355 } 356 357 if (HAS_EDRAM(dev_priv)) 358 DRM_INFO("Found %lluMB of eDRAM\n", 359 intel_uncore_edram_size(dev_priv) / (1024 * 1024)); 360 } 361 362 static bool 363 fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) 364 { 365 u32 dbg; 366 367 dbg = __raw_i915_read32(dev_priv, FPGA_DBG); 368 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))) 369 return false; 370 371 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 372 373 return true; 374 } 375 376 static bool 377 vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) 378 { 379 u32 cer; 380 381 cer = __raw_i915_read32(dev_priv, CLAIM_ER); 382 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK)))) 383 return false; 384 385 __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR); 386 387 return true; 388 } 389 390 static bool 391 check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) 392 { 393 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv)) 394 return fpga_check_for_unclaimed_mmio(dev_priv); 395 396 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 397 return vlv_check_for_unclaimed_mmio(dev_priv); 398 399 return false; 400 } 401 402 static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, 403 bool restore_forcewake) 404 { 405 /* clear out unclaimed reg detection bit */ 406 if (check_for_unclaimed_mmio(dev_priv)) 407 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); 408 409 /* clear out old GT FIFO errors */ 410 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) 411 __raw_i915_write32(dev_priv, GTFIFODBG, 412 __raw_i915_read32(dev_priv, GTFIFODBG)); 413 414 /* WaDisableShadowRegForCpd:chv */ 415 if (IS_CHERRYVIEW(dev_priv)) { 416 __raw_i915_write32(dev_priv, GTFIFOCTL, 417 __raw_i915_read32(dev_priv, GTFIFOCTL) | 418 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | 419 GT_FIFO_CTL_RC6_POLICY_STALL); 420 } 421 422 intel_uncore_forcewake_reset(dev_priv, restore_forcewake); 423 } 424 425 void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, 426 bool restore_forcewake) 427 { 428 __intel_uncore_early_sanitize(dev_priv, restore_forcewake); 429 i915_check_and_clear_faults(dev_priv); 430 } 431 432 void intel_uncore_sanitize(struct drm_i915_private *dev_priv) 433 { 434 i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6); 435 436 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 437 intel_sanitize_gt_powersave(dev_priv); 438 } 439 440 static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 441 enum forcewake_domains fw_domains) 442 { 443 struct intel_uncore_forcewake_domain *domain; 444 445 fw_domains &= dev_priv->uncore.fw_domains; 446 447 for_each_fw_domain_masked(domain, fw_domains, dev_priv) { 448 if (domain->wake_count++) 449 fw_domains &= ~domain->mask; 450 } 451 452 if (fw_domains) { 453 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 454 dev_priv->uncore.fw_domains_active |= fw_domains; 455 } 456 } 457 458 /** 459 * intel_uncore_forcewake_get - grab forcewake domain references 460 * @dev_priv: i915 device instance 461 * @fw_domains: forcewake domains to get reference on 462 * 463 * This function can be used get GT's forcewake domain references. 464 * Normal register access will handle the forcewake domains automatically. 465 * However if some sequence requires the GT to not power down a particular 466 * forcewake domains this function should be called at the beginning of the 467 * sequence. And subsequently the reference should be dropped by symmetric 468 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains 469 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. 470 */ 471 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 472 enum forcewake_domains fw_domains) 473 { 474 unsigned long irqflags; 475 476 if (!dev_priv->uncore.funcs.force_wake_get) 477 return; 478 479 assert_rpm_wakelock_held(dev_priv); 480 481 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 482 __intel_uncore_forcewake_get(dev_priv, fw_domains); 483 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 484 } 485 486 /** 487 * intel_uncore_forcewake_get__locked - grab forcewake domain references 488 * @dev_priv: i915 device instance 489 * @fw_domains: forcewake domains to get reference on 490 * 491 * See intel_uncore_forcewake_get(). This variant places the onus 492 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 493 */ 494 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, 495 enum forcewake_domains fw_domains) 496 { 497 assert_spin_locked(&dev_priv->uncore.lock); 498 499 if (!dev_priv->uncore.funcs.force_wake_get) 500 return; 501 502 __intel_uncore_forcewake_get(dev_priv, fw_domains); 503 } 504 505 static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 506 enum forcewake_domains fw_domains) 507 { 508 struct intel_uncore_forcewake_domain *domain; 509 510 fw_domains &= dev_priv->uncore.fw_domains; 511 512 for_each_fw_domain_masked(domain, fw_domains, dev_priv) { 513 if (WARN_ON(domain->wake_count == 0)) 514 continue; 515 516 if (--domain->wake_count) 517 continue; 518 519 fw_domain_arm_timer(domain); 520 } 521 } 522 523 /** 524 * intel_uncore_forcewake_put - release a forcewake domain reference 525 * @dev_priv: i915 device instance 526 * @fw_domains: forcewake domains to put references 527 * 528 * This function drops the device-level forcewakes for specified 529 * domains obtained by intel_uncore_forcewake_get(). 530 */ 531 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 532 enum forcewake_domains fw_domains) 533 { 534 unsigned long irqflags; 535 536 if (!dev_priv->uncore.funcs.force_wake_put) 537 return; 538 539 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 540 __intel_uncore_forcewake_put(dev_priv, fw_domains); 541 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 542 } 543 544 /** 545 * intel_uncore_forcewake_put__locked - grab forcewake domain references 546 * @dev_priv: i915 device instance 547 * @fw_domains: forcewake domains to get reference on 548 * 549 * See intel_uncore_forcewake_put(). This variant places the onus 550 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 551 */ 552 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, 553 enum forcewake_domains fw_domains) 554 { 555 assert_spin_locked(&dev_priv->uncore.lock); 556 557 if (!dev_priv->uncore.funcs.force_wake_put) 558 return; 559 560 __intel_uncore_forcewake_put(dev_priv, fw_domains); 561 } 562 563 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) 564 { 565 if (!dev_priv->uncore.funcs.force_wake_get) 566 return; 567 568 WARN_ON(dev_priv->uncore.fw_domains_active); 569 } 570 571 /* We give fast paths for the really cool registers */ 572 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) 573 574 #define __gen6_reg_read_fw_domains(offset) \ 575 ({ \ 576 enum forcewake_domains __fwd; \ 577 if (NEEDS_FORCE_WAKE(offset)) \ 578 __fwd = FORCEWAKE_RENDER; \ 579 else \ 580 __fwd = 0; \ 581 __fwd; \ 582 }) 583 584 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry) 585 { 586 if (offset < entry->start) 587 return -1; 588 else if (offset > entry->end) 589 return 1; 590 else 591 return 0; 592 } 593 594 /* Copied and "macroized" from lib/bsearch.c */ 595 #define BSEARCH(key, base, num, cmp) ({ \ 596 unsigned int start__ = 0, end__ = (num); \ 597 typeof(base) result__ = NULL; \ 598 while (start__ < end__) { \ 599 unsigned int mid__ = start__ + (end__ - start__) / 2; \ 600 int ret__ = (cmp)((key), (base) + mid__); \ 601 if (ret__ < 0) { \ 602 end__ = mid__; \ 603 } else if (ret__ > 0) { \ 604 start__ = mid__ + 1; \ 605 } else { \ 606 result__ = (base) + mid__; \ 607 break; \ 608 } \ 609 } \ 610 result__; \ 611 }) 612 613 static enum forcewake_domains 614 find_fw_domain(struct drm_i915_private *dev_priv, u32 offset) 615 { 616 const struct intel_forcewake_range *entry; 617 618 entry = BSEARCH(offset, 619 dev_priv->uncore.fw_domains_table, 620 dev_priv->uncore.fw_domains_table_entries, 621 fw_range_cmp); 622 623 return entry ? entry->domains : 0; 624 } 625 626 static void 627 intel_fw_table_check(struct drm_i915_private *dev_priv) 628 { 629 const struct intel_forcewake_range *ranges; 630 unsigned int num_ranges; 631 s32 prev; 632 unsigned int i; 633 634 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG)) 635 return; 636 637 ranges = dev_priv->uncore.fw_domains_table; 638 if (!ranges) 639 return; 640 641 num_ranges = dev_priv->uncore.fw_domains_table_entries; 642 643 for (i = 0, prev = -1; i < num_ranges; i++, ranges++) { 644 WARN_ON_ONCE(prev >= (s32)ranges->start); 645 prev = ranges->start; 646 WARN_ON_ONCE(prev >= (s32)ranges->end); 647 prev = ranges->end; 648 } 649 } 650 651 #define GEN_FW_RANGE(s, e, d) \ 652 { .start = (s), .end = (e), .domains = (d) } 653 654 #define HAS_FWTABLE(dev_priv) \ 655 (IS_GEN9(dev_priv) || \ 656 IS_CHERRYVIEW(dev_priv) || \ 657 IS_VALLEYVIEW(dev_priv)) 658 659 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 660 static const struct intel_forcewake_range __vlv_fw_ranges[] = { 661 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), 662 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER), 663 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER), 664 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 665 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA), 666 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER), 667 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 668 }; 669 670 #define __fwtable_reg_read_fw_domains(offset) \ 671 ({ \ 672 enum forcewake_domains __fwd = 0; \ 673 if (NEEDS_FORCE_WAKE((offset))) \ 674 __fwd = find_fw_domain(dev_priv, offset); \ 675 __fwd; \ 676 }) 677 678 /* *Must* be sorted by offset! See intel_shadow_table_check(). */ 679 static const i915_reg_t gen8_shadowed_regs[] = { 680 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ 681 GEN6_RPNSWREQ, /* 0xA008 */ 682 GEN6_RC_VIDEO_FREQ, /* 0xA00C */ 683 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */ 684 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */ 685 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ 686 /* TODO: Other registers are not yet used */ 687 }; 688 689 static void intel_shadow_table_check(void) 690 { 691 const i915_reg_t *reg = gen8_shadowed_regs; 692 s32 prev; 693 u32 offset; 694 unsigned int i; 695 696 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG)) 697 return; 698 699 for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) { 700 offset = i915_mmio_reg_offset(*reg); 701 WARN_ON_ONCE(prev >= (s32)offset); 702 prev = offset; 703 } 704 } 705 706 static int mmio_reg_cmp(u32 key, const i915_reg_t *reg) 707 { 708 u32 offset = i915_mmio_reg_offset(*reg); 709 710 if (key < offset) 711 return -1; 712 else if (key > offset) 713 return 1; 714 else 715 return 0; 716 } 717 718 static bool is_gen8_shadowed(u32 offset) 719 { 720 const i915_reg_t *regs = gen8_shadowed_regs; 721 722 return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs), 723 mmio_reg_cmp); 724 } 725 726 #define __gen8_reg_write_fw_domains(offset) \ 727 ({ \ 728 enum forcewake_domains __fwd; \ 729 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \ 730 __fwd = FORCEWAKE_RENDER; \ 731 else \ 732 __fwd = 0; \ 733 __fwd; \ 734 }) 735 736 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 737 static const struct intel_forcewake_range __chv_fw_ranges[] = { 738 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), 739 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 740 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 741 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 742 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 743 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 744 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA), 745 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 746 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 747 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), 748 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER), 749 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 750 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 751 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA), 752 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA), 753 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA), 754 }; 755 756 #define __fwtable_reg_write_fw_domains(offset) \ 757 ({ \ 758 enum forcewake_domains __fwd = 0; \ 759 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \ 760 __fwd = find_fw_domain(dev_priv, offset); \ 761 __fwd; \ 762 }) 763 764 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 765 static const struct intel_forcewake_range __gen9_fw_ranges[] = { 766 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), 767 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ 768 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), 769 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), 770 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), 771 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), 772 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 773 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER), 774 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA), 775 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), 776 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), 777 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 778 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER), 779 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA), 780 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER), 781 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), 782 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), 783 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 784 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), 785 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 786 GEN_FW_RANGE(0xb480, 0xbfff, FORCEWAKE_BLITTER), 787 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), 788 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER), 789 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), 790 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER), 791 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 792 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER), 793 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA), 794 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER), 795 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), 796 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER), 797 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 798 }; 799 800 static void 801 ilk_dummy_write(struct drm_i915_private *dev_priv) 802 { 803 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 804 * the chip from rc6 before touching it for real. MI_MODE is masked, 805 * hence harmless to write 0 into. */ 806 __raw_i915_write32(dev_priv, MI_MODE, 0); 807 } 808 809 static void 810 __unclaimed_reg_debug(struct drm_i915_private *dev_priv, 811 const i915_reg_t reg, 812 const bool read, 813 const bool before) 814 { 815 if (WARN(check_for_unclaimed_mmio(dev_priv) && !before, 816 "Unclaimed %s register 0x%x\n", 817 read ? "read from" : "write to", 818 i915_mmio_reg_offset(reg))) 819 i915.mmio_debug--; /* Only report the first N failures */ 820 } 821 822 static inline void 823 unclaimed_reg_debug(struct drm_i915_private *dev_priv, 824 const i915_reg_t reg, 825 const bool read, 826 const bool before) 827 { 828 if (likely(!i915.mmio_debug)) 829 return; 830 831 __unclaimed_reg_debug(dev_priv, reg, read, before); 832 } 833 834 #define GEN2_READ_HEADER(x) \ 835 u##x val = 0; \ 836 assert_rpm_wakelock_held(dev_priv); 837 838 #define GEN2_READ_FOOTER \ 839 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 840 return val 841 842 #define __gen2_read(x) \ 843 static u##x \ 844 gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 845 GEN2_READ_HEADER(x); \ 846 val = __raw_i915_read##x(dev_priv, reg); \ 847 GEN2_READ_FOOTER; \ 848 } 849 850 #define __gen5_read(x) \ 851 static u##x \ 852 gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 853 GEN2_READ_HEADER(x); \ 854 ilk_dummy_write(dev_priv); \ 855 val = __raw_i915_read##x(dev_priv, reg); \ 856 GEN2_READ_FOOTER; \ 857 } 858 859 __gen5_read(8) 860 __gen5_read(16) 861 __gen5_read(32) 862 __gen5_read(64) 863 __gen2_read(8) 864 __gen2_read(16) 865 __gen2_read(32) 866 __gen2_read(64) 867 868 #undef __gen5_read 869 #undef __gen2_read 870 871 #undef GEN2_READ_FOOTER 872 #undef GEN2_READ_HEADER 873 874 #define GEN6_READ_HEADER(x) \ 875 u32 offset = i915_mmio_reg_offset(reg); \ 876 unsigned long irqflags; \ 877 u##x val = 0; \ 878 assert_rpm_wakelock_held(dev_priv); \ 879 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ 880 unclaimed_reg_debug(dev_priv, reg, true, true) 881 882 #define GEN6_READ_FOOTER \ 883 unclaimed_reg_debug(dev_priv, reg, true, false); \ 884 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 885 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 886 return val 887 888 static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv, 889 enum forcewake_domains fw_domains) 890 { 891 struct intel_uncore_forcewake_domain *domain; 892 893 for_each_fw_domain_masked(domain, fw_domains, dev_priv) 894 fw_domain_arm_timer(domain); 895 896 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 897 dev_priv->uncore.fw_domains_active |= fw_domains; 898 } 899 900 static inline void __force_wake_auto(struct drm_i915_private *dev_priv, 901 enum forcewake_domains fw_domains) 902 { 903 if (WARN_ON(!fw_domains)) 904 return; 905 906 /* Turn on all requested but inactive supported forcewake domains. */ 907 fw_domains &= dev_priv->uncore.fw_domains; 908 fw_domains &= ~dev_priv->uncore.fw_domains_active; 909 910 if (fw_domains) 911 ___force_wake_auto(dev_priv, fw_domains); 912 } 913 914 #define __gen6_read(x) \ 915 static u##x \ 916 gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 917 enum forcewake_domains fw_engine; \ 918 GEN6_READ_HEADER(x); \ 919 fw_engine = __gen6_reg_read_fw_domains(offset); \ 920 if (fw_engine) \ 921 __force_wake_auto(dev_priv, fw_engine); \ 922 val = __raw_i915_read##x(dev_priv, reg); \ 923 GEN6_READ_FOOTER; \ 924 } 925 926 #define __fwtable_read(x) \ 927 static u##x \ 928 fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 929 enum forcewake_domains fw_engine; \ 930 GEN6_READ_HEADER(x); \ 931 fw_engine = __fwtable_reg_read_fw_domains(offset); \ 932 if (fw_engine) \ 933 __force_wake_auto(dev_priv, fw_engine); \ 934 val = __raw_i915_read##x(dev_priv, reg); \ 935 GEN6_READ_FOOTER; \ 936 } 937 938 __fwtable_read(8) 939 __fwtable_read(16) 940 __fwtable_read(32) 941 __fwtable_read(64) 942 __gen6_read(8) 943 __gen6_read(16) 944 __gen6_read(32) 945 __gen6_read(64) 946 947 #undef __fwtable_read 948 #undef __gen6_read 949 #undef GEN6_READ_FOOTER 950 #undef GEN6_READ_HEADER 951 952 #define VGPU_READ_HEADER(x) \ 953 unsigned long irqflags; \ 954 u##x val = 0; \ 955 assert_rpm_device_not_suspended(dev_priv); \ 956 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 957 958 #define VGPU_READ_FOOTER \ 959 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 960 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 961 return val 962 963 #define __vgpu_read(x) \ 964 static u##x \ 965 vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 966 VGPU_READ_HEADER(x); \ 967 val = __raw_i915_read##x(dev_priv, reg); \ 968 VGPU_READ_FOOTER; \ 969 } 970 971 __vgpu_read(8) 972 __vgpu_read(16) 973 __vgpu_read(32) 974 __vgpu_read(64) 975 976 #undef __vgpu_read 977 #undef VGPU_READ_FOOTER 978 #undef VGPU_READ_HEADER 979 980 #define GEN2_WRITE_HEADER \ 981 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 982 assert_rpm_wakelock_held(dev_priv); \ 983 984 #define GEN2_WRITE_FOOTER 985 986 #define __gen2_write(x) \ 987 static void \ 988 gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 989 GEN2_WRITE_HEADER; \ 990 __raw_i915_write##x(dev_priv, reg, val); \ 991 GEN2_WRITE_FOOTER; \ 992 } 993 994 #define __gen5_write(x) \ 995 static void \ 996 gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 997 GEN2_WRITE_HEADER; \ 998 ilk_dummy_write(dev_priv); \ 999 __raw_i915_write##x(dev_priv, reg, val); \ 1000 GEN2_WRITE_FOOTER; \ 1001 } 1002 1003 __gen5_write(8) 1004 __gen5_write(16) 1005 __gen5_write(32) 1006 __gen2_write(8) 1007 __gen2_write(16) 1008 __gen2_write(32) 1009 1010 #undef __gen5_write 1011 #undef __gen2_write 1012 1013 #undef GEN2_WRITE_FOOTER 1014 #undef GEN2_WRITE_HEADER 1015 1016 #define GEN6_WRITE_HEADER \ 1017 u32 offset = i915_mmio_reg_offset(reg); \ 1018 unsigned long irqflags; \ 1019 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1020 assert_rpm_wakelock_held(dev_priv); \ 1021 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ 1022 unclaimed_reg_debug(dev_priv, reg, false, true) 1023 1024 #define GEN6_WRITE_FOOTER \ 1025 unclaimed_reg_debug(dev_priv, reg, false, false); \ 1026 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) 1027 1028 #define __gen6_write(x) \ 1029 static void \ 1030 gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 1031 u32 __fifo_ret = 0; \ 1032 GEN6_WRITE_HEADER; \ 1033 if (NEEDS_FORCE_WAKE(offset)) { \ 1034 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 1035 } \ 1036 __raw_i915_write##x(dev_priv, reg, val); \ 1037 if (unlikely(__fifo_ret)) { \ 1038 gen6_gt_check_fifodbg(dev_priv); \ 1039 } \ 1040 GEN6_WRITE_FOOTER; \ 1041 } 1042 1043 #define __gen8_write(x) \ 1044 static void \ 1045 gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 1046 enum forcewake_domains fw_engine; \ 1047 GEN6_WRITE_HEADER; \ 1048 fw_engine = __gen8_reg_write_fw_domains(offset); \ 1049 if (fw_engine) \ 1050 __force_wake_auto(dev_priv, fw_engine); \ 1051 __raw_i915_write##x(dev_priv, reg, val); \ 1052 GEN6_WRITE_FOOTER; \ 1053 } 1054 1055 #define __fwtable_write(x) \ 1056 static void \ 1057 fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 1058 enum forcewake_domains fw_engine; \ 1059 GEN6_WRITE_HEADER; \ 1060 fw_engine = __fwtable_reg_write_fw_domains(offset); \ 1061 if (fw_engine) \ 1062 __force_wake_auto(dev_priv, fw_engine); \ 1063 __raw_i915_write##x(dev_priv, reg, val); \ 1064 GEN6_WRITE_FOOTER; \ 1065 } 1066 1067 __fwtable_write(8) 1068 __fwtable_write(16) 1069 __fwtable_write(32) 1070 __gen8_write(8) 1071 __gen8_write(16) 1072 __gen8_write(32) 1073 __gen6_write(8) 1074 __gen6_write(16) 1075 __gen6_write(32) 1076 1077 #undef __fwtable_write 1078 #undef __gen8_write 1079 #undef __gen6_write 1080 #undef GEN6_WRITE_FOOTER 1081 #undef GEN6_WRITE_HEADER 1082 1083 #define VGPU_WRITE_HEADER \ 1084 unsigned long irqflags; \ 1085 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1086 assert_rpm_device_not_suspended(dev_priv); \ 1087 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 1088 1089 #define VGPU_WRITE_FOOTER \ 1090 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) 1091 1092 #define __vgpu_write(x) \ 1093 static void vgpu_write##x(struct drm_i915_private *dev_priv, \ 1094 i915_reg_t reg, u##x val, bool trace) { \ 1095 VGPU_WRITE_HEADER; \ 1096 __raw_i915_write##x(dev_priv, reg, val); \ 1097 VGPU_WRITE_FOOTER; \ 1098 } 1099 1100 __vgpu_write(8) 1101 __vgpu_write(16) 1102 __vgpu_write(32) 1103 1104 #undef __vgpu_write 1105 #undef VGPU_WRITE_FOOTER 1106 #undef VGPU_WRITE_HEADER 1107 1108 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \ 1109 do { \ 1110 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \ 1111 dev_priv->uncore.funcs.mmio_writew = x##_write16; \ 1112 dev_priv->uncore.funcs.mmio_writel = x##_write32; \ 1113 } while (0) 1114 1115 #define ASSIGN_READ_MMIO_VFUNCS(x) \ 1116 do { \ 1117 dev_priv->uncore.funcs.mmio_readb = x##_read8; \ 1118 dev_priv->uncore.funcs.mmio_readw = x##_read16; \ 1119 dev_priv->uncore.funcs.mmio_readl = x##_read32; \ 1120 dev_priv->uncore.funcs.mmio_readq = x##_read64; \ 1121 } while (0) 1122 1123 1124 static void fw_domain_init(struct drm_i915_private *dev_priv, 1125 enum forcewake_domain_id domain_id, 1126 i915_reg_t reg_set, 1127 i915_reg_t reg_ack) 1128 { 1129 struct intel_uncore_forcewake_domain *d; 1130 1131 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) 1132 return; 1133 1134 d = &dev_priv->uncore.fw_domain[domain_id]; 1135 1136 WARN_ON(d->wake_count); 1137 1138 d->wake_count = 0; 1139 d->reg_set = reg_set; 1140 d->reg_ack = reg_ack; 1141 1142 if (IS_GEN6(dev_priv)) { 1143 d->val_reset = 0; 1144 d->val_set = FORCEWAKE_KERNEL; 1145 d->val_clear = 0; 1146 } else { 1147 /* WaRsClearFWBitsAtReset:bdw,skl */ 1148 d->val_reset = _MASKED_BIT_DISABLE(0xffff); 1149 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL); 1150 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); 1151 } 1152 1153 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1154 d->reg_post = FORCEWAKE_ACK_VLV; 1155 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) 1156 d->reg_post = ECOBUS; 1157 1158 d->i915 = dev_priv; 1159 d->id = domain_id; 1160 1161 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER)); 1162 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER)); 1163 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA)); 1164 1165 d->mask = 1 << domain_id; 1166 1167 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1168 d->timer.function = intel_uncore_fw_release_timer; 1169 1170 dev_priv->uncore.fw_domains |= (1 << domain_id); 1171 1172 fw_domain_reset(d); 1173 } 1174 1175 static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) 1176 { 1177 if (INTEL_INFO(dev_priv)->gen <= 5) 1178 return; 1179 1180 if (IS_GEN9(dev_priv)) { 1181 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1182 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1183 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1184 FORCEWAKE_RENDER_GEN9, 1185 FORCEWAKE_ACK_RENDER_GEN9); 1186 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, 1187 FORCEWAKE_BLITTER_GEN9, 1188 FORCEWAKE_ACK_BLITTER_GEN9); 1189 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1190 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); 1191 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1192 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1193 if (!IS_CHERRYVIEW(dev_priv)) 1194 dev_priv->uncore.funcs.force_wake_put = 1195 fw_domains_put_with_fifo; 1196 else 1197 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1198 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1199 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); 1200 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1201 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); 1202 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 1203 dev_priv->uncore.funcs.force_wake_get = 1204 fw_domains_get_with_thread_status; 1205 if (IS_HASWELL(dev_priv)) 1206 dev_priv->uncore.funcs.force_wake_put = 1207 fw_domains_put_with_fifo; 1208 else 1209 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1210 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1211 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1212 } else if (IS_IVYBRIDGE(dev_priv)) { 1213 u32 ecobus; 1214 1215 /* IVB configs may use multi-threaded forcewake */ 1216 1217 /* A small trick here - if the bios hasn't configured 1218 * MT forcewake, and if the device is in RC6, then 1219 * force_wake_mt_get will not wake the device and the 1220 * ECOBUS read will return zero. Which will be 1221 * (correctly) interpreted by the test below as MT 1222 * forcewake being disabled. 1223 */ 1224 dev_priv->uncore.funcs.force_wake_get = 1225 fw_domains_get_with_thread_status; 1226 dev_priv->uncore.funcs.force_wake_put = 1227 fw_domains_put_with_fifo; 1228 1229 /* We need to init first for ECOBUS access and then 1230 * determine later if we want to reinit, in case of MT access is 1231 * not working. In this stage we don't know which flavour this 1232 * ivb is, so it is better to reset also the gen6 fw registers 1233 * before the ecobus check. 1234 */ 1235 1236 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 1237 __raw_posting_read(dev_priv, ECOBUS); 1238 1239 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1240 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1241 1242 spin_lock_irq(&dev_priv->uncore.lock); 1243 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); 1244 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 1245 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); 1246 spin_unlock_irq(&dev_priv->uncore.lock); 1247 1248 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1249 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1250 DRM_INFO("when using vblank-synced partial screen updates.\n"); 1251 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1252 FORCEWAKE, FORCEWAKE_ACK); 1253 } 1254 } else if (IS_GEN6(dev_priv)) { 1255 dev_priv->uncore.funcs.force_wake_get = 1256 fw_domains_get_with_thread_status; 1257 dev_priv->uncore.funcs.force_wake_put = 1258 fw_domains_put_with_fifo; 1259 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1260 FORCEWAKE, FORCEWAKE_ACK); 1261 } 1262 1263 /* All future platforms are expected to require complex power gating */ 1264 WARN_ON(dev_priv->uncore.fw_domains == 0); 1265 } 1266 1267 #define ASSIGN_FW_DOMAINS_TABLE(d) \ 1268 { \ 1269 dev_priv->uncore.fw_domains_table = \ 1270 (struct intel_forcewake_range *)(d); \ 1271 dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \ 1272 } 1273 1274 void intel_uncore_init(struct drm_i915_private *dev_priv) 1275 { 1276 i915_check_vgpu(dev_priv); 1277 1278 intel_uncore_edram_detect(dev_priv); 1279 intel_uncore_fw_domains_init(dev_priv); 1280 __intel_uncore_early_sanitize(dev_priv, false); 1281 1282 dev_priv->uncore.unclaimed_mmio_check = 1; 1283 1284 switch (INTEL_INFO(dev_priv)->gen) { 1285 default: 1286 case 9: 1287 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges); 1288 ASSIGN_WRITE_MMIO_VFUNCS(fwtable); 1289 ASSIGN_READ_MMIO_VFUNCS(fwtable); 1290 break; 1291 case 8: 1292 if (IS_CHERRYVIEW(dev_priv)) { 1293 ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges); 1294 ASSIGN_WRITE_MMIO_VFUNCS(fwtable); 1295 ASSIGN_READ_MMIO_VFUNCS(fwtable); 1296 1297 } else { 1298 ASSIGN_WRITE_MMIO_VFUNCS(gen8); 1299 ASSIGN_READ_MMIO_VFUNCS(gen6); 1300 } 1301 break; 1302 case 7: 1303 case 6: 1304 ASSIGN_WRITE_MMIO_VFUNCS(gen6); 1305 1306 if (IS_VALLEYVIEW(dev_priv)) { 1307 ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges); 1308 ASSIGN_READ_MMIO_VFUNCS(fwtable); 1309 } else { 1310 ASSIGN_READ_MMIO_VFUNCS(gen6); 1311 } 1312 break; 1313 case 5: 1314 ASSIGN_WRITE_MMIO_VFUNCS(gen5); 1315 ASSIGN_READ_MMIO_VFUNCS(gen5); 1316 break; 1317 case 4: 1318 case 3: 1319 case 2: 1320 ASSIGN_WRITE_MMIO_VFUNCS(gen2); 1321 ASSIGN_READ_MMIO_VFUNCS(gen2); 1322 break; 1323 } 1324 1325 intel_fw_table_check(dev_priv); 1326 if (INTEL_GEN(dev_priv) >= 8) 1327 intel_shadow_table_check(); 1328 1329 if (intel_vgpu_active(dev_priv)) { 1330 ASSIGN_WRITE_MMIO_VFUNCS(vgpu); 1331 ASSIGN_READ_MMIO_VFUNCS(vgpu); 1332 } 1333 1334 i915_check_and_clear_faults(dev_priv); 1335 } 1336 #undef ASSIGN_WRITE_MMIO_VFUNCS 1337 #undef ASSIGN_READ_MMIO_VFUNCS 1338 1339 void intel_uncore_fini(struct drm_i915_private *dev_priv) 1340 { 1341 /* Paranoia: make sure we have disabled everything before we exit. */ 1342 intel_uncore_sanitize(dev_priv); 1343 intel_uncore_forcewake_reset(dev_priv, false); 1344 } 1345 1346 #define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1) 1347 1348 static const struct register_whitelist { 1349 i915_reg_t offset_ldw, offset_udw; 1350 uint32_t size; 1351 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 1352 uint32_t gen_bitmask; 1353 } whitelist[] = { 1354 { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), 1355 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), 1356 .size = 8, .gen_bitmask = GEN_RANGE(4, 9) }, 1357 }; 1358 1359 int i915_reg_read_ioctl(struct drm_device *dev, 1360 void *data, struct drm_file *file) 1361 { 1362 struct drm_i915_private *dev_priv = to_i915(dev); 1363 struct drm_i915_reg_read *reg = data; 1364 struct register_whitelist const *entry = whitelist; 1365 unsigned size; 1366 i915_reg_t offset_ldw, offset_udw; 1367 int i, ret = 0; 1368 1369 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1370 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) && 1371 (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask)) 1372 break; 1373 } 1374 1375 if (i == ARRAY_SIZE(whitelist)) 1376 return -EINVAL; 1377 1378 /* We use the low bits to encode extra flags as the register should 1379 * be naturally aligned (and those that are not so aligned merely 1380 * limit the available flags for that register). 1381 */ 1382 offset_ldw = entry->offset_ldw; 1383 offset_udw = entry->offset_udw; 1384 size = entry->size; 1385 size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw); 1386 1387 intel_runtime_pm_get(dev_priv); 1388 1389 switch (size) { 1390 case 8 | 1: 1391 reg->val = I915_READ64_2x32(offset_ldw, offset_udw); 1392 break; 1393 case 8: 1394 reg->val = I915_READ64(offset_ldw); 1395 break; 1396 case 4: 1397 reg->val = I915_READ(offset_ldw); 1398 break; 1399 case 2: 1400 reg->val = I915_READ16(offset_ldw); 1401 break; 1402 case 1: 1403 reg->val = I915_READ8(offset_ldw); 1404 break; 1405 default: 1406 ret = -EINVAL; 1407 goto out; 1408 } 1409 1410 out: 1411 intel_runtime_pm_put(dev_priv); 1412 return ret; 1413 } 1414 1415 static int i915_reset_complete(struct pci_dev *pdev) 1416 { 1417 u8 gdrst; 1418 pci_read_config_byte(pdev, I915_GDRST, &gdrst); 1419 return (gdrst & GRDOM_RESET_STATUS) == 0; 1420 } 1421 1422 static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1423 { 1424 struct pci_dev *pdev = dev_priv->drm.pdev; 1425 1426 /* assert reset for at least 20 usec */ 1427 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1428 udelay(20); 1429 pci_write_config_byte(pdev, I915_GDRST, 0); 1430 1431 return wait_for(i915_reset_complete(pdev), 500); 1432 } 1433 1434 static int g4x_reset_complete(struct pci_dev *pdev) 1435 { 1436 u8 gdrst; 1437 pci_read_config_byte(pdev, I915_GDRST, &gdrst); 1438 return (gdrst & GRDOM_RESET_ENABLE) == 0; 1439 } 1440 1441 static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1442 { 1443 struct pci_dev *pdev = dev_priv->drm.pdev; 1444 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1445 return wait_for(g4x_reset_complete(pdev), 500); 1446 } 1447 1448 static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1449 { 1450 struct pci_dev *pdev = dev_priv->drm.pdev; 1451 int ret; 1452 1453 pci_write_config_byte(pdev, I915_GDRST, 1454 GRDOM_RENDER | GRDOM_RESET_ENABLE); 1455 ret = wait_for(g4x_reset_complete(pdev), 500); 1456 if (ret) 1457 return ret; 1458 1459 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1460 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 1461 POSTING_READ(VDECCLK_GATE_D); 1462 1463 pci_write_config_byte(pdev, I915_GDRST, 1464 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1465 ret = wait_for(g4x_reset_complete(pdev), 500); 1466 if (ret) 1467 return ret; 1468 1469 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1470 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 1471 POSTING_READ(VDECCLK_GATE_D); 1472 1473 pci_write_config_byte(pdev, I915_GDRST, 0); 1474 1475 return 0; 1476 } 1477 1478 static int ironlake_do_reset(struct drm_i915_private *dev_priv, 1479 unsigned engine_mask) 1480 { 1481 int ret; 1482 1483 I915_WRITE(ILK_GDSR, 1484 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); 1485 ret = intel_wait_for_register(dev_priv, 1486 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, 1487 500); 1488 if (ret) 1489 return ret; 1490 1491 I915_WRITE(ILK_GDSR, 1492 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); 1493 ret = intel_wait_for_register(dev_priv, 1494 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, 1495 500); 1496 if (ret) 1497 return ret; 1498 1499 I915_WRITE(ILK_GDSR, 0); 1500 1501 return 0; 1502 } 1503 1504 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */ 1505 static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, 1506 u32 hw_domain_mask) 1507 { 1508 /* GEN6_GDRST is not in the gt power well, no need to check 1509 * for fifo space for the write or forcewake the chip for 1510 * the read 1511 */ 1512 __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask); 1513 1514 /* Spin waiting for the device to ack the reset requests */ 1515 return intel_wait_for_register_fw(dev_priv, 1516 GEN6_GDRST, hw_domain_mask, 0, 1517 500); 1518 } 1519 1520 /** 1521 * gen6_reset_engines - reset individual engines 1522 * @dev_priv: i915 device 1523 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset 1524 * 1525 * This function will reset the individual engines that are set in engine_mask. 1526 * If you provide ALL_ENGINES as mask, full global domain reset will be issued. 1527 * 1528 * Note: It is responsibility of the caller to handle the difference between 1529 * asking full domain reset versus reset for all available individual engines. 1530 * 1531 * Returns 0 on success, nonzero on error. 1532 */ 1533 static int gen6_reset_engines(struct drm_i915_private *dev_priv, 1534 unsigned engine_mask) 1535 { 1536 struct intel_engine_cs *engine; 1537 const u32 hw_engine_mask[I915_NUM_ENGINES] = { 1538 [RCS] = GEN6_GRDOM_RENDER, 1539 [BCS] = GEN6_GRDOM_BLT, 1540 [VCS] = GEN6_GRDOM_MEDIA, 1541 [VCS2] = GEN8_GRDOM_MEDIA2, 1542 [VECS] = GEN6_GRDOM_VECS, 1543 }; 1544 u32 hw_mask; 1545 int ret; 1546 1547 if (engine_mask == ALL_ENGINES) { 1548 hw_mask = GEN6_GRDOM_FULL; 1549 } else { 1550 unsigned int tmp; 1551 1552 hw_mask = 0; 1553 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) 1554 hw_mask |= hw_engine_mask[engine->id]; 1555 } 1556 1557 ret = gen6_hw_domain_reset(dev_priv, hw_mask); 1558 1559 intel_uncore_forcewake_reset(dev_priv, true); 1560 1561 return ret; 1562 } 1563 1564 /** 1565 * intel_wait_for_register_fw - wait until register matches expected state 1566 * @dev_priv: the i915 device 1567 * @reg: the register to read 1568 * @mask: mask to apply to register value 1569 * @value: expected value 1570 * @timeout_ms: timeout in millisecond 1571 * 1572 * This routine waits until the target register @reg contains the expected 1573 * @value after applying the @mask, i.e. it waits until :: 1574 * 1575 * (I915_READ_FW(reg) & mask) == value 1576 * 1577 * Otherwise, the wait will timeout after @timeout_ms milliseconds. 1578 * 1579 * Note that this routine assumes the caller holds forcewake asserted, it is 1580 * not suitable for very long waits. See intel_wait_for_register() if you 1581 * wish to wait without holding forcewake for the duration (i.e. you expect 1582 * the wait to be slow). 1583 * 1584 * Returns 0 if the register matches the desired condition, or -ETIMEOUT. 1585 */ 1586 int intel_wait_for_register_fw(struct drm_i915_private *dev_priv, 1587 i915_reg_t reg, 1588 const u32 mask, 1589 const u32 value, 1590 const unsigned long timeout_ms) 1591 { 1592 #define done ((I915_READ_FW(reg) & mask) == value) 1593 int ret = wait_for_us(done, 2); 1594 if (ret) 1595 ret = wait_for(done, timeout_ms); 1596 return ret; 1597 #undef done 1598 } 1599 1600 /** 1601 * intel_wait_for_register - wait until register matches expected state 1602 * @dev_priv: the i915 device 1603 * @reg: the register to read 1604 * @mask: mask to apply to register value 1605 * @value: expected value 1606 * @timeout_ms: timeout in millisecond 1607 * 1608 * This routine waits until the target register @reg contains the expected 1609 * @value after applying the @mask, i.e. it waits until :: 1610 * 1611 * (I915_READ(reg) & mask) == value 1612 * 1613 * Otherwise, the wait will timeout after @timeout_ms milliseconds. 1614 * 1615 * Returns 0 if the register matches the desired condition, or -ETIMEOUT. 1616 */ 1617 int intel_wait_for_register(struct drm_i915_private *dev_priv, 1618 i915_reg_t reg, 1619 const u32 mask, 1620 const u32 value, 1621 const unsigned long timeout_ms) 1622 { 1623 1624 unsigned fw = 1625 intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); 1626 int ret; 1627 1628 intel_uncore_forcewake_get(dev_priv, fw); 1629 ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2); 1630 intel_uncore_forcewake_put(dev_priv, fw); 1631 if (ret) 1632 ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value, 1633 timeout_ms); 1634 1635 return ret; 1636 } 1637 1638 static int gen8_request_engine_reset(struct intel_engine_cs *engine) 1639 { 1640 struct drm_i915_private *dev_priv = engine->i915; 1641 int ret; 1642 1643 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), 1644 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); 1645 1646 ret = intel_wait_for_register_fw(dev_priv, 1647 RING_RESET_CTL(engine->mmio_base), 1648 RESET_CTL_READY_TO_RESET, 1649 RESET_CTL_READY_TO_RESET, 1650 700); 1651 if (ret) 1652 DRM_ERROR("%s: reset request timeout\n", engine->name); 1653 1654 return ret; 1655 } 1656 1657 static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine) 1658 { 1659 struct drm_i915_private *dev_priv = engine->i915; 1660 1661 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), 1662 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); 1663 } 1664 1665 static int gen8_reset_engines(struct drm_i915_private *dev_priv, 1666 unsigned engine_mask) 1667 { 1668 struct intel_engine_cs *engine; 1669 unsigned int tmp; 1670 1671 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) 1672 if (gen8_request_engine_reset(engine)) 1673 goto not_ready; 1674 1675 return gen6_reset_engines(dev_priv, engine_mask); 1676 1677 not_ready: 1678 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) 1679 gen8_unrequest_engine_reset(engine); 1680 1681 return -EIO; 1682 } 1683 1684 typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask); 1685 1686 static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) 1687 { 1688 if (!i915.reset) 1689 return NULL; 1690 1691 if (INTEL_INFO(dev_priv)->gen >= 8) 1692 return gen8_reset_engines; 1693 else if (INTEL_INFO(dev_priv)->gen >= 6) 1694 return gen6_reset_engines; 1695 else if (IS_GEN5(dev_priv)) 1696 return ironlake_do_reset; 1697 else if (IS_G4X(dev_priv)) 1698 return g4x_do_reset; 1699 else if (IS_G33(dev_priv)) 1700 return g33_do_reset; 1701 else if (INTEL_INFO(dev_priv)->gen >= 3) 1702 return i915_do_reset; 1703 else 1704 return NULL; 1705 } 1706 1707 int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1708 { 1709 reset_func reset; 1710 int ret; 1711 1712 reset = intel_get_gpu_reset(dev_priv); 1713 if (reset == NULL) 1714 return -ENODEV; 1715 1716 /* If the power well sleeps during the reset, the reset 1717 * request may be dropped and never completes (causing -EIO). 1718 */ 1719 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1720 ret = reset(dev_priv, engine_mask); 1721 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1722 1723 return ret; 1724 } 1725 1726 bool intel_has_gpu_reset(struct drm_i915_private *dev_priv) 1727 { 1728 return intel_get_gpu_reset(dev_priv) != NULL; 1729 } 1730 1731 int intel_guc_reset(struct drm_i915_private *dev_priv) 1732 { 1733 int ret; 1734 unsigned long irqflags; 1735 1736 if (!HAS_GUC(dev_priv)) 1737 return -EINVAL; 1738 1739 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1740 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1741 1742 ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC); 1743 1744 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1745 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1746 1747 return ret; 1748 } 1749 1750 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv) 1751 { 1752 return check_for_unclaimed_mmio(dev_priv); 1753 } 1754 1755 bool 1756 intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) 1757 { 1758 if (unlikely(i915.mmio_debug || 1759 dev_priv->uncore.unclaimed_mmio_check <= 0)) 1760 return false; 1761 1762 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) { 1763 DRM_DEBUG("Unclaimed register detected, " 1764 "enabling oneshot unclaimed register reporting. " 1765 "Please use i915.mmio_debug=N for more information.\n"); 1766 i915.mmio_debug++; 1767 dev_priv->uncore.unclaimed_mmio_check--; 1768 return true; 1769 } 1770 1771 return false; 1772 } 1773 1774 static enum forcewake_domains 1775 intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv, 1776 i915_reg_t reg) 1777 { 1778 u32 offset = i915_mmio_reg_offset(reg); 1779 enum forcewake_domains fw_domains; 1780 1781 if (HAS_FWTABLE(dev_priv)) { 1782 fw_domains = __fwtable_reg_read_fw_domains(offset); 1783 } else if (INTEL_GEN(dev_priv) >= 6) { 1784 fw_domains = __gen6_reg_read_fw_domains(offset); 1785 } else { 1786 WARN_ON(!IS_GEN(dev_priv, 2, 5)); 1787 fw_domains = 0; 1788 } 1789 1790 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); 1791 1792 return fw_domains; 1793 } 1794 1795 static enum forcewake_domains 1796 intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, 1797 i915_reg_t reg) 1798 { 1799 u32 offset = i915_mmio_reg_offset(reg); 1800 enum forcewake_domains fw_domains; 1801 1802 if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) { 1803 fw_domains = __fwtable_reg_write_fw_domains(offset); 1804 } else if (IS_GEN8(dev_priv)) { 1805 fw_domains = __gen8_reg_write_fw_domains(offset); 1806 } else if (IS_GEN(dev_priv, 6, 7)) { 1807 fw_domains = FORCEWAKE_RENDER; 1808 } else { 1809 WARN_ON(!IS_GEN(dev_priv, 2, 5)); 1810 fw_domains = 0; 1811 } 1812 1813 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); 1814 1815 return fw_domains; 1816 } 1817 1818 /** 1819 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access 1820 * a register 1821 * @dev_priv: pointer to struct drm_i915_private 1822 * @reg: register in question 1823 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE 1824 * 1825 * Returns a set of forcewake domains required to be taken with for example 1826 * intel_uncore_forcewake_get for the specified register to be accessible in the 1827 * specified mode (read, write or read/write) with raw mmio accessors. 1828 * 1829 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the 1830 * callers to do FIFO management on their own or risk losing writes. 1831 */ 1832 enum forcewake_domains 1833 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, 1834 i915_reg_t reg, unsigned int op) 1835 { 1836 enum forcewake_domains fw_domains = 0; 1837 1838 WARN_ON(!op); 1839 1840 if (intel_vgpu_active(dev_priv)) 1841 return 0; 1842 1843 if (op & FW_REG_READ) 1844 fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg); 1845 1846 if (op & FW_REG_WRITE) 1847 fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg); 1848 1849 return fw_domains; 1850 } 1851