1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "i915_drv.h" 25 #include "intel_drv.h" 26 #include "i915_vgpu.h" 27 28 #include <linux/pm_runtime.h> 29 30 #define FORCEWAKE_ACK_TIMEOUT_MS 50 31 32 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__)) 33 34 static const char * const forcewake_domain_names[] = { 35 "render", 36 "blitter", 37 "media", 38 }; 39 40 const char * 41 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) 42 { 43 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT); 44 45 if (id >= 0 && id < FW_DOMAIN_ID_COUNT) 46 return forcewake_domain_names[id]; 47 48 WARN_ON(id); 49 50 return "unknown"; 51 } 52 53 static inline void 54 fw_domain_reset(const struct intel_uncore_forcewake_domain *d) 55 { 56 WARN_ON(!i915_mmio_reg_valid(d->reg_set)); 57 __raw_i915_write32(d->i915, d->reg_set, d->val_reset); 58 } 59 60 static inline void 61 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) 62 { 63 d->wake_count++; 64 hrtimer_start_range_ns(&d->timer, 65 NSEC_PER_MSEC, 66 NSEC_PER_MSEC, 67 HRTIMER_MODE_REL); 68 } 69 70 static inline void 71 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) 72 { 73 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 74 FORCEWAKE_KERNEL) == 0, 75 FORCEWAKE_ACK_TIMEOUT_MS)) 76 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", 77 intel_uncore_forcewake_domain_to_str(d->id)); 78 } 79 80 static inline void 81 fw_domain_get(const struct intel_uncore_forcewake_domain *d) 82 { 83 __raw_i915_write32(d->i915, d->reg_set, d->val_set); 84 } 85 86 static inline void 87 fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d) 88 { 89 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 90 FORCEWAKE_KERNEL), 91 FORCEWAKE_ACK_TIMEOUT_MS)) 92 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", 93 intel_uncore_forcewake_domain_to_str(d->id)); 94 } 95 96 static inline void 97 fw_domain_put(const struct intel_uncore_forcewake_domain *d) 98 { 99 __raw_i915_write32(d->i915, d->reg_set, d->val_clear); 100 } 101 102 static inline void 103 fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d) 104 { 105 /* something from same cacheline, but not from the set register */ 106 if (i915_mmio_reg_valid(d->reg_post)) 107 __raw_posting_read(d->i915, d->reg_post); 108 } 109 110 static void 111 fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 112 { 113 struct intel_uncore_forcewake_domain *d; 114 115 for_each_fw_domain_masked(d, fw_domains, dev_priv) { 116 fw_domain_wait_ack_clear(d); 117 fw_domain_get(d); 118 } 119 120 for_each_fw_domain_masked(d, fw_domains, dev_priv) 121 fw_domain_wait_ack(d); 122 123 dev_priv->uncore.fw_domains_active |= fw_domains; 124 } 125 126 static void 127 fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 128 { 129 struct intel_uncore_forcewake_domain *d; 130 131 for_each_fw_domain_masked(d, fw_domains, dev_priv) { 132 fw_domain_put(d); 133 fw_domain_posting_read(d); 134 } 135 136 dev_priv->uncore.fw_domains_active &= ~fw_domains; 137 } 138 139 static void 140 fw_domains_posting_read(struct drm_i915_private *dev_priv) 141 { 142 struct intel_uncore_forcewake_domain *d; 143 144 /* No need to do for all, just do for first found */ 145 for_each_fw_domain(d, dev_priv) { 146 fw_domain_posting_read(d); 147 break; 148 } 149 } 150 151 static void 152 fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 153 { 154 struct intel_uncore_forcewake_domain *d; 155 156 if (dev_priv->uncore.fw_domains == 0) 157 return; 158 159 for_each_fw_domain_masked(d, fw_domains, dev_priv) 160 fw_domain_reset(d); 161 162 fw_domains_posting_read(dev_priv); 163 } 164 165 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 166 { 167 /* w/a for a sporadic read returning 0 by waiting for the GT 168 * thread to wake up. 169 */ 170 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & 171 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500)) 172 DRM_ERROR("GT thread status wait timed out\n"); 173 } 174 175 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv, 176 enum forcewake_domains fw_domains) 177 { 178 fw_domains_get(dev_priv, fw_domains); 179 180 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ 181 __gen6_gt_wait_for_thread_c0(dev_priv); 182 } 183 184 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 185 { 186 u32 gtfifodbg; 187 188 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); 189 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg)) 190 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg); 191 } 192 193 static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv, 194 enum forcewake_domains fw_domains) 195 { 196 fw_domains_put(dev_priv, fw_domains); 197 gen6_gt_check_fifodbg(dev_priv); 198 } 199 200 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv) 201 { 202 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL); 203 204 return count & GT_FIFO_FREE_ENTRIES_MASK; 205 } 206 207 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 208 { 209 int ret = 0; 210 211 /* On VLV, FIFO will be shared by both SW and HW. 212 * So, we need to read the FREE_ENTRIES everytime */ 213 if (IS_VALLEYVIEW(dev_priv)) 214 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv); 215 216 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 217 int loop = 500; 218 u32 fifo = fifo_free_entries(dev_priv); 219 220 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { 221 udelay(10); 222 fifo = fifo_free_entries(dev_priv); 223 } 224 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) 225 ++ret; 226 dev_priv->uncore.fifo_count = fifo; 227 } 228 dev_priv->uncore.fifo_count--; 229 230 return ret; 231 } 232 233 static enum hrtimer_restart 234 intel_uncore_fw_release_timer(struct hrtimer *timer) 235 { 236 struct intel_uncore_forcewake_domain *domain = 237 container_of(timer, struct intel_uncore_forcewake_domain, timer); 238 struct drm_i915_private *dev_priv = domain->i915; 239 unsigned long irqflags; 240 241 assert_rpm_device_not_suspended(dev_priv); 242 243 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 244 if (WARN_ON(domain->wake_count == 0)) 245 domain->wake_count++; 246 247 if (--domain->wake_count == 0) 248 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); 249 250 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 251 252 return HRTIMER_NORESTART; 253 } 254 255 void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, 256 bool restore) 257 { 258 unsigned long irqflags; 259 struct intel_uncore_forcewake_domain *domain; 260 int retry_count = 100; 261 enum forcewake_domains fw, active_domains; 262 263 /* Hold uncore.lock across reset to prevent any register access 264 * with forcewake not set correctly. Wait until all pending 265 * timers are run before holding. 266 */ 267 while (1) { 268 active_domains = 0; 269 270 for_each_fw_domain(domain, dev_priv) { 271 if (hrtimer_cancel(&domain->timer) == 0) 272 continue; 273 274 intel_uncore_fw_release_timer(&domain->timer); 275 } 276 277 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 278 279 for_each_fw_domain(domain, dev_priv) { 280 if (hrtimer_active(&domain->timer)) 281 active_domains |= domain->mask; 282 } 283 284 if (active_domains == 0) 285 break; 286 287 if (--retry_count == 0) { 288 DRM_ERROR("Timed out waiting for forcewake timers to finish\n"); 289 break; 290 } 291 292 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 293 cond_resched(); 294 } 295 296 WARN_ON(active_domains); 297 298 fw = dev_priv->uncore.fw_domains_active; 299 if (fw) 300 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); 301 302 fw_domains_reset(dev_priv, FORCEWAKE_ALL); 303 304 if (restore) { /* If reset with a user forcewake, try to restore */ 305 if (fw) 306 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); 307 308 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) 309 dev_priv->uncore.fifo_count = 310 fifo_free_entries(dev_priv); 311 } 312 313 if (!restore) 314 assert_forcewakes_inactive(dev_priv); 315 316 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 317 } 318 319 static u64 gen9_edram_size(struct drm_i915_private *dev_priv) 320 { 321 const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 }; 322 const unsigned int sets[4] = { 1, 1, 2, 2 }; 323 const u32 cap = dev_priv->edram_cap; 324 325 return EDRAM_NUM_BANKS(cap) * 326 ways[EDRAM_WAYS_IDX(cap)] * 327 sets[EDRAM_SETS_IDX(cap)] * 328 1024 * 1024; 329 } 330 331 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv) 332 { 333 if (!HAS_EDRAM(dev_priv)) 334 return 0; 335 336 /* The needed capability bits for size calculation 337 * are not there with pre gen9 so return 128MB always. 338 */ 339 if (INTEL_GEN(dev_priv) < 9) 340 return 128 * 1024 * 1024; 341 342 return gen9_edram_size(dev_priv); 343 } 344 345 static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv) 346 { 347 if (IS_HASWELL(dev_priv) || 348 IS_BROADWELL(dev_priv) || 349 INTEL_GEN(dev_priv) >= 9) { 350 dev_priv->edram_cap = __raw_i915_read32(dev_priv, 351 HSW_EDRAM_CAP); 352 353 /* NB: We can't write IDICR yet because we do not have gt funcs 354 * set up */ 355 } else { 356 dev_priv->edram_cap = 0; 357 } 358 359 if (HAS_EDRAM(dev_priv)) 360 DRM_INFO("Found %lluMB of eDRAM\n", 361 intel_uncore_edram_size(dev_priv) / (1024 * 1024)); 362 } 363 364 static bool 365 fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) 366 { 367 u32 dbg; 368 369 dbg = __raw_i915_read32(dev_priv, FPGA_DBG); 370 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))) 371 return false; 372 373 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 374 375 return true; 376 } 377 378 static bool 379 vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) 380 { 381 u32 cer; 382 383 cer = __raw_i915_read32(dev_priv, CLAIM_ER); 384 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK)))) 385 return false; 386 387 __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR); 388 389 return true; 390 } 391 392 static bool 393 check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) 394 { 395 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv)) 396 return fpga_check_for_unclaimed_mmio(dev_priv); 397 398 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 399 return vlv_check_for_unclaimed_mmio(dev_priv); 400 401 return false; 402 } 403 404 static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, 405 bool restore_forcewake) 406 { 407 struct intel_device_info *info = mkwrite_device_info(dev_priv); 408 409 /* clear out unclaimed reg detection bit */ 410 if (check_for_unclaimed_mmio(dev_priv)) 411 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); 412 413 /* clear out old GT FIFO errors */ 414 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) 415 __raw_i915_write32(dev_priv, GTFIFODBG, 416 __raw_i915_read32(dev_priv, GTFIFODBG)); 417 418 /* WaDisableShadowRegForCpd:chv */ 419 if (IS_CHERRYVIEW(dev_priv)) { 420 __raw_i915_write32(dev_priv, GTFIFOCTL, 421 __raw_i915_read32(dev_priv, GTFIFOCTL) | 422 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | 423 GT_FIFO_CTL_RC6_POLICY_STALL); 424 } 425 426 /* Enable Decoupled MMIO only on BXT C stepping onwards */ 427 if (!IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) 428 info->has_decoupled_mmio = false; 429 430 intel_uncore_forcewake_reset(dev_priv, restore_forcewake); 431 } 432 433 void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, 434 bool restore_forcewake) 435 { 436 __intel_uncore_early_sanitize(dev_priv, restore_forcewake); 437 i915_check_and_clear_faults(dev_priv); 438 } 439 440 void intel_uncore_sanitize(struct drm_i915_private *dev_priv) 441 { 442 i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6); 443 444 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 445 intel_sanitize_gt_powersave(dev_priv); 446 } 447 448 static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 449 enum forcewake_domains fw_domains) 450 { 451 struct intel_uncore_forcewake_domain *domain; 452 453 fw_domains &= dev_priv->uncore.fw_domains; 454 455 for_each_fw_domain_masked(domain, fw_domains, dev_priv) { 456 if (domain->wake_count++) 457 fw_domains &= ~domain->mask; 458 } 459 460 if (fw_domains) 461 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 462 } 463 464 /** 465 * intel_uncore_forcewake_get - grab forcewake domain references 466 * @dev_priv: i915 device instance 467 * @fw_domains: forcewake domains to get reference on 468 * 469 * This function can be used get GT's forcewake domain references. 470 * Normal register access will handle the forcewake domains automatically. 471 * However if some sequence requires the GT to not power down a particular 472 * forcewake domains this function should be called at the beginning of the 473 * sequence. And subsequently the reference should be dropped by symmetric 474 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains 475 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. 476 */ 477 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 478 enum forcewake_domains fw_domains) 479 { 480 unsigned long irqflags; 481 482 if (!dev_priv->uncore.funcs.force_wake_get) 483 return; 484 485 assert_rpm_wakelock_held(dev_priv); 486 487 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 488 __intel_uncore_forcewake_get(dev_priv, fw_domains); 489 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 490 } 491 492 /** 493 * intel_uncore_forcewake_get__locked - grab forcewake domain references 494 * @dev_priv: i915 device instance 495 * @fw_domains: forcewake domains to get reference on 496 * 497 * See intel_uncore_forcewake_get(). This variant places the onus 498 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 499 */ 500 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, 501 enum forcewake_domains fw_domains) 502 { 503 assert_spin_locked(&dev_priv->uncore.lock); 504 505 if (!dev_priv->uncore.funcs.force_wake_get) 506 return; 507 508 __intel_uncore_forcewake_get(dev_priv, fw_domains); 509 } 510 511 static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 512 enum forcewake_domains fw_domains) 513 { 514 struct intel_uncore_forcewake_domain *domain; 515 516 fw_domains &= dev_priv->uncore.fw_domains; 517 518 for_each_fw_domain_masked(domain, fw_domains, dev_priv) { 519 if (WARN_ON(domain->wake_count == 0)) 520 continue; 521 522 if (--domain->wake_count) 523 continue; 524 525 fw_domain_arm_timer(domain); 526 } 527 } 528 529 /** 530 * intel_uncore_forcewake_put - release a forcewake domain reference 531 * @dev_priv: i915 device instance 532 * @fw_domains: forcewake domains to put references 533 * 534 * This function drops the device-level forcewakes for specified 535 * domains obtained by intel_uncore_forcewake_get(). 536 */ 537 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 538 enum forcewake_domains fw_domains) 539 { 540 unsigned long irqflags; 541 542 if (!dev_priv->uncore.funcs.force_wake_put) 543 return; 544 545 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 546 __intel_uncore_forcewake_put(dev_priv, fw_domains); 547 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 548 } 549 550 /** 551 * intel_uncore_forcewake_put__locked - grab forcewake domain references 552 * @dev_priv: i915 device instance 553 * @fw_domains: forcewake domains to get reference on 554 * 555 * See intel_uncore_forcewake_put(). This variant places the onus 556 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 557 */ 558 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, 559 enum forcewake_domains fw_domains) 560 { 561 assert_spin_locked(&dev_priv->uncore.lock); 562 563 if (!dev_priv->uncore.funcs.force_wake_put) 564 return; 565 566 __intel_uncore_forcewake_put(dev_priv, fw_domains); 567 } 568 569 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) 570 { 571 if (!dev_priv->uncore.funcs.force_wake_get) 572 return; 573 574 WARN_ON(dev_priv->uncore.fw_domains_active); 575 } 576 577 /* We give fast paths for the really cool registers */ 578 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) 579 580 #define __gen6_reg_read_fw_domains(offset) \ 581 ({ \ 582 enum forcewake_domains __fwd; \ 583 if (NEEDS_FORCE_WAKE(offset)) \ 584 __fwd = FORCEWAKE_RENDER; \ 585 else \ 586 __fwd = 0; \ 587 __fwd; \ 588 }) 589 590 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry) 591 { 592 if (offset < entry->start) 593 return -1; 594 else if (offset > entry->end) 595 return 1; 596 else 597 return 0; 598 } 599 600 /* Copied and "macroized" from lib/bsearch.c */ 601 #define BSEARCH(key, base, num, cmp) ({ \ 602 unsigned int start__ = 0, end__ = (num); \ 603 typeof(base) result__ = NULL; \ 604 while (start__ < end__) { \ 605 unsigned int mid__ = start__ + (end__ - start__) / 2; \ 606 int ret__ = (cmp)((key), (base) + mid__); \ 607 if (ret__ < 0) { \ 608 end__ = mid__; \ 609 } else if (ret__ > 0) { \ 610 start__ = mid__ + 1; \ 611 } else { \ 612 result__ = (base) + mid__; \ 613 break; \ 614 } \ 615 } \ 616 result__; \ 617 }) 618 619 static enum forcewake_domains 620 find_fw_domain(struct drm_i915_private *dev_priv, u32 offset) 621 { 622 const struct intel_forcewake_range *entry; 623 624 entry = BSEARCH(offset, 625 dev_priv->uncore.fw_domains_table, 626 dev_priv->uncore.fw_domains_table_entries, 627 fw_range_cmp); 628 629 return entry ? entry->domains : 0; 630 } 631 632 static void 633 intel_fw_table_check(struct drm_i915_private *dev_priv) 634 { 635 const struct intel_forcewake_range *ranges; 636 unsigned int num_ranges; 637 s32 prev; 638 unsigned int i; 639 640 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG)) 641 return; 642 643 ranges = dev_priv->uncore.fw_domains_table; 644 if (!ranges) 645 return; 646 647 num_ranges = dev_priv->uncore.fw_domains_table_entries; 648 649 for (i = 0, prev = -1; i < num_ranges; i++, ranges++) { 650 WARN_ON_ONCE(IS_GEN9(dev_priv) && 651 (prev + 1) != (s32)ranges->start); 652 WARN_ON_ONCE(prev >= (s32)ranges->start); 653 prev = ranges->start; 654 WARN_ON_ONCE(prev >= (s32)ranges->end); 655 prev = ranges->end; 656 } 657 } 658 659 #define GEN_FW_RANGE(s, e, d) \ 660 { .start = (s), .end = (e), .domains = (d) } 661 662 #define HAS_FWTABLE(dev_priv) \ 663 (IS_GEN9(dev_priv) || \ 664 IS_CHERRYVIEW(dev_priv) || \ 665 IS_VALLEYVIEW(dev_priv)) 666 667 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 668 static const struct intel_forcewake_range __vlv_fw_ranges[] = { 669 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), 670 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER), 671 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER), 672 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 673 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA), 674 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER), 675 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 676 }; 677 678 #define __fwtable_reg_read_fw_domains(offset) \ 679 ({ \ 680 enum forcewake_domains __fwd = 0; \ 681 if (NEEDS_FORCE_WAKE((offset))) \ 682 __fwd = find_fw_domain(dev_priv, offset); \ 683 __fwd; \ 684 }) 685 686 /* *Must* be sorted by offset! See intel_shadow_table_check(). */ 687 static const i915_reg_t gen8_shadowed_regs[] = { 688 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ 689 GEN6_RPNSWREQ, /* 0xA008 */ 690 GEN6_RC_VIDEO_FREQ, /* 0xA00C */ 691 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */ 692 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */ 693 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ 694 /* TODO: Other registers are not yet used */ 695 }; 696 697 static void intel_shadow_table_check(void) 698 { 699 const i915_reg_t *reg = gen8_shadowed_regs; 700 s32 prev; 701 u32 offset; 702 unsigned int i; 703 704 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG)) 705 return; 706 707 for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) { 708 offset = i915_mmio_reg_offset(*reg); 709 WARN_ON_ONCE(prev >= (s32)offset); 710 prev = offset; 711 } 712 } 713 714 static int mmio_reg_cmp(u32 key, const i915_reg_t *reg) 715 { 716 u32 offset = i915_mmio_reg_offset(*reg); 717 718 if (key < offset) 719 return -1; 720 else if (key > offset) 721 return 1; 722 else 723 return 0; 724 } 725 726 static bool is_gen8_shadowed(u32 offset) 727 { 728 const i915_reg_t *regs = gen8_shadowed_regs; 729 730 return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs), 731 mmio_reg_cmp); 732 } 733 734 #define __gen8_reg_write_fw_domains(offset) \ 735 ({ \ 736 enum forcewake_domains __fwd; \ 737 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \ 738 __fwd = FORCEWAKE_RENDER; \ 739 else \ 740 __fwd = 0; \ 741 __fwd; \ 742 }) 743 744 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 745 static const struct intel_forcewake_range __chv_fw_ranges[] = { 746 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), 747 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 748 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 749 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 750 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 751 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 752 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA), 753 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 754 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 755 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), 756 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER), 757 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 758 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 759 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA), 760 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA), 761 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA), 762 }; 763 764 #define __fwtable_reg_write_fw_domains(offset) \ 765 ({ \ 766 enum forcewake_domains __fwd = 0; \ 767 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \ 768 __fwd = find_fw_domain(dev_priv, offset); \ 769 __fwd; \ 770 }) 771 772 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 773 static const struct intel_forcewake_range __gen9_fw_ranges[] = { 774 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), 775 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ 776 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), 777 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), 778 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), 779 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), 780 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 781 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER), 782 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA), 783 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), 784 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), 785 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 786 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER), 787 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA), 788 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER), 789 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), 790 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), 791 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 792 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), 793 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 794 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER), 795 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), 796 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER), 797 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), 798 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER), 799 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 800 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER), 801 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA), 802 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER), 803 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), 804 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER), 805 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 806 }; 807 808 static void 809 ilk_dummy_write(struct drm_i915_private *dev_priv) 810 { 811 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 812 * the chip from rc6 before touching it for real. MI_MODE is masked, 813 * hence harmless to write 0 into. */ 814 __raw_i915_write32(dev_priv, MI_MODE, 0); 815 } 816 817 static void 818 __unclaimed_reg_debug(struct drm_i915_private *dev_priv, 819 const i915_reg_t reg, 820 const bool read, 821 const bool before) 822 { 823 if (WARN(check_for_unclaimed_mmio(dev_priv) && !before, 824 "Unclaimed %s register 0x%x\n", 825 read ? "read from" : "write to", 826 i915_mmio_reg_offset(reg))) 827 i915.mmio_debug--; /* Only report the first N failures */ 828 } 829 830 static inline void 831 unclaimed_reg_debug(struct drm_i915_private *dev_priv, 832 const i915_reg_t reg, 833 const bool read, 834 const bool before) 835 { 836 if (likely(!i915.mmio_debug)) 837 return; 838 839 __unclaimed_reg_debug(dev_priv, reg, read, before); 840 } 841 842 static const enum decoupled_power_domain fw2dpd_domain[] = { 843 GEN9_DECOUPLED_PD_RENDER, 844 GEN9_DECOUPLED_PD_BLITTER, 845 GEN9_DECOUPLED_PD_ALL, 846 GEN9_DECOUPLED_PD_MEDIA, 847 GEN9_DECOUPLED_PD_ALL, 848 GEN9_DECOUPLED_PD_ALL, 849 GEN9_DECOUPLED_PD_ALL 850 }; 851 852 /* 853 * Decoupled MMIO access for only 1 DWORD 854 */ 855 static void __gen9_decoupled_mmio_access(struct drm_i915_private *dev_priv, 856 u32 reg, 857 enum forcewake_domains fw_domain, 858 enum decoupled_ops operation) 859 { 860 enum decoupled_power_domain dp_domain; 861 u32 ctrl_reg_data = 0; 862 863 dp_domain = fw2dpd_domain[fw_domain - 1]; 864 865 ctrl_reg_data |= reg; 866 ctrl_reg_data |= (operation << GEN9_DECOUPLED_OP_SHIFT); 867 ctrl_reg_data |= (dp_domain << GEN9_DECOUPLED_PD_SHIFT); 868 ctrl_reg_data |= GEN9_DECOUPLED_DW1_GO; 869 __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW1, ctrl_reg_data); 870 871 if (wait_for_atomic((__raw_i915_read32(dev_priv, 872 GEN9_DECOUPLED_REG0_DW1) & 873 GEN9_DECOUPLED_DW1_GO) == 0, 874 FORCEWAKE_ACK_TIMEOUT_MS)) 875 DRM_ERROR("Decoupled MMIO wait timed out\n"); 876 } 877 878 static inline u32 879 __gen9_decoupled_mmio_read32(struct drm_i915_private *dev_priv, 880 u32 reg, 881 enum forcewake_domains fw_domain) 882 { 883 __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain, 884 GEN9_DECOUPLED_OP_READ); 885 886 return __raw_i915_read32(dev_priv, GEN9_DECOUPLED_REG0_DW0); 887 } 888 889 static inline void 890 __gen9_decoupled_mmio_write(struct drm_i915_private *dev_priv, 891 u32 reg, u32 data, 892 enum forcewake_domains fw_domain) 893 { 894 895 __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW0, data); 896 897 __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain, 898 GEN9_DECOUPLED_OP_WRITE); 899 } 900 901 902 #define GEN2_READ_HEADER(x) \ 903 u##x val = 0; \ 904 assert_rpm_wakelock_held(dev_priv); 905 906 #define GEN2_READ_FOOTER \ 907 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 908 return val 909 910 #define __gen2_read(x) \ 911 static u##x \ 912 gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 913 GEN2_READ_HEADER(x); \ 914 val = __raw_i915_read##x(dev_priv, reg); \ 915 GEN2_READ_FOOTER; \ 916 } 917 918 #define __gen5_read(x) \ 919 static u##x \ 920 gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 921 GEN2_READ_HEADER(x); \ 922 ilk_dummy_write(dev_priv); \ 923 val = __raw_i915_read##x(dev_priv, reg); \ 924 GEN2_READ_FOOTER; \ 925 } 926 927 __gen5_read(8) 928 __gen5_read(16) 929 __gen5_read(32) 930 __gen5_read(64) 931 __gen2_read(8) 932 __gen2_read(16) 933 __gen2_read(32) 934 __gen2_read(64) 935 936 #undef __gen5_read 937 #undef __gen2_read 938 939 #undef GEN2_READ_FOOTER 940 #undef GEN2_READ_HEADER 941 942 #define GEN6_READ_HEADER(x) \ 943 u32 offset = i915_mmio_reg_offset(reg); \ 944 unsigned long irqflags; \ 945 u##x val = 0; \ 946 assert_rpm_wakelock_held(dev_priv); \ 947 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ 948 unclaimed_reg_debug(dev_priv, reg, true, true) 949 950 #define GEN6_READ_FOOTER \ 951 unclaimed_reg_debug(dev_priv, reg, true, false); \ 952 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 953 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 954 return val 955 956 static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv, 957 enum forcewake_domains fw_domains) 958 { 959 struct intel_uncore_forcewake_domain *domain; 960 961 for_each_fw_domain_masked(domain, fw_domains, dev_priv) 962 fw_domain_arm_timer(domain); 963 964 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 965 } 966 967 static inline void __force_wake_auto(struct drm_i915_private *dev_priv, 968 enum forcewake_domains fw_domains) 969 { 970 if (WARN_ON(!fw_domains)) 971 return; 972 973 /* Turn on all requested but inactive supported forcewake domains. */ 974 fw_domains &= dev_priv->uncore.fw_domains; 975 fw_domains &= ~dev_priv->uncore.fw_domains_active; 976 977 if (fw_domains) 978 ___force_wake_auto(dev_priv, fw_domains); 979 } 980 981 #define __gen6_read(x) \ 982 static u##x \ 983 gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 984 enum forcewake_domains fw_engine; \ 985 GEN6_READ_HEADER(x); \ 986 fw_engine = __gen6_reg_read_fw_domains(offset); \ 987 if (fw_engine) \ 988 __force_wake_auto(dev_priv, fw_engine); \ 989 val = __raw_i915_read##x(dev_priv, reg); \ 990 GEN6_READ_FOOTER; \ 991 } 992 993 #define __fwtable_read(x) \ 994 static u##x \ 995 fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 996 enum forcewake_domains fw_engine; \ 997 GEN6_READ_HEADER(x); \ 998 fw_engine = __fwtable_reg_read_fw_domains(offset); \ 999 if (fw_engine) \ 1000 __force_wake_auto(dev_priv, fw_engine); \ 1001 val = __raw_i915_read##x(dev_priv, reg); \ 1002 GEN6_READ_FOOTER; \ 1003 } 1004 1005 #define __gen9_decoupled_read(x) \ 1006 static u##x \ 1007 gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \ 1008 i915_reg_t reg, bool trace) { \ 1009 enum forcewake_domains fw_engine; \ 1010 GEN6_READ_HEADER(x); \ 1011 fw_engine = __fwtable_reg_read_fw_domains(offset); \ 1012 if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \ 1013 unsigned i; \ 1014 u32 *ptr_data = (u32 *) &val; \ 1015 for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \ 1016 *ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \ 1017 offset, \ 1018 fw_engine); \ 1019 } else { \ 1020 val = __raw_i915_read##x(dev_priv, reg); \ 1021 } \ 1022 GEN6_READ_FOOTER; \ 1023 } 1024 1025 __gen9_decoupled_read(32) 1026 __gen9_decoupled_read(64) 1027 __fwtable_read(8) 1028 __fwtable_read(16) 1029 __fwtable_read(32) 1030 __fwtable_read(64) 1031 __gen6_read(8) 1032 __gen6_read(16) 1033 __gen6_read(32) 1034 __gen6_read(64) 1035 1036 #undef __fwtable_read 1037 #undef __gen6_read 1038 #undef GEN6_READ_FOOTER 1039 #undef GEN6_READ_HEADER 1040 1041 #define VGPU_READ_HEADER(x) \ 1042 unsigned long irqflags; \ 1043 u##x val = 0; \ 1044 assert_rpm_device_not_suspended(dev_priv); \ 1045 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 1046 1047 #define VGPU_READ_FOOTER \ 1048 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 1049 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 1050 return val 1051 1052 #define __vgpu_read(x) \ 1053 static u##x \ 1054 vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 1055 VGPU_READ_HEADER(x); \ 1056 val = __raw_i915_read##x(dev_priv, reg); \ 1057 VGPU_READ_FOOTER; \ 1058 } 1059 1060 __vgpu_read(8) 1061 __vgpu_read(16) 1062 __vgpu_read(32) 1063 __vgpu_read(64) 1064 1065 #undef __vgpu_read 1066 #undef VGPU_READ_FOOTER 1067 #undef VGPU_READ_HEADER 1068 1069 #define GEN2_WRITE_HEADER \ 1070 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1071 assert_rpm_wakelock_held(dev_priv); \ 1072 1073 #define GEN2_WRITE_FOOTER 1074 1075 #define __gen2_write(x) \ 1076 static void \ 1077 gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 1078 GEN2_WRITE_HEADER; \ 1079 __raw_i915_write##x(dev_priv, reg, val); \ 1080 GEN2_WRITE_FOOTER; \ 1081 } 1082 1083 #define __gen5_write(x) \ 1084 static void \ 1085 gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 1086 GEN2_WRITE_HEADER; \ 1087 ilk_dummy_write(dev_priv); \ 1088 __raw_i915_write##x(dev_priv, reg, val); \ 1089 GEN2_WRITE_FOOTER; \ 1090 } 1091 1092 __gen5_write(8) 1093 __gen5_write(16) 1094 __gen5_write(32) 1095 __gen2_write(8) 1096 __gen2_write(16) 1097 __gen2_write(32) 1098 1099 #undef __gen5_write 1100 #undef __gen2_write 1101 1102 #undef GEN2_WRITE_FOOTER 1103 #undef GEN2_WRITE_HEADER 1104 1105 #define GEN6_WRITE_HEADER \ 1106 u32 offset = i915_mmio_reg_offset(reg); \ 1107 unsigned long irqflags; \ 1108 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1109 assert_rpm_wakelock_held(dev_priv); \ 1110 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ 1111 unclaimed_reg_debug(dev_priv, reg, false, true) 1112 1113 #define GEN6_WRITE_FOOTER \ 1114 unclaimed_reg_debug(dev_priv, reg, false, false); \ 1115 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) 1116 1117 #define __gen6_write(x) \ 1118 static void \ 1119 gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 1120 u32 __fifo_ret = 0; \ 1121 GEN6_WRITE_HEADER; \ 1122 if (NEEDS_FORCE_WAKE(offset)) { \ 1123 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 1124 } \ 1125 __raw_i915_write##x(dev_priv, reg, val); \ 1126 if (unlikely(__fifo_ret)) { \ 1127 gen6_gt_check_fifodbg(dev_priv); \ 1128 } \ 1129 GEN6_WRITE_FOOTER; \ 1130 } 1131 1132 #define __gen8_write(x) \ 1133 static void \ 1134 gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 1135 enum forcewake_domains fw_engine; \ 1136 GEN6_WRITE_HEADER; \ 1137 fw_engine = __gen8_reg_write_fw_domains(offset); \ 1138 if (fw_engine) \ 1139 __force_wake_auto(dev_priv, fw_engine); \ 1140 __raw_i915_write##x(dev_priv, reg, val); \ 1141 GEN6_WRITE_FOOTER; \ 1142 } 1143 1144 #define __fwtable_write(x) \ 1145 static void \ 1146 fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 1147 enum forcewake_domains fw_engine; \ 1148 GEN6_WRITE_HEADER; \ 1149 fw_engine = __fwtable_reg_write_fw_domains(offset); \ 1150 if (fw_engine) \ 1151 __force_wake_auto(dev_priv, fw_engine); \ 1152 __raw_i915_write##x(dev_priv, reg, val); \ 1153 GEN6_WRITE_FOOTER; \ 1154 } 1155 1156 #define __gen9_decoupled_write(x) \ 1157 static void \ 1158 gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \ 1159 i915_reg_t reg, u##x val, \ 1160 bool trace) { \ 1161 enum forcewake_domains fw_engine; \ 1162 GEN6_WRITE_HEADER; \ 1163 fw_engine = __fwtable_reg_write_fw_domains(offset); \ 1164 if (fw_engine & ~dev_priv->uncore.fw_domains_active) \ 1165 __gen9_decoupled_mmio_write(dev_priv, \ 1166 offset, \ 1167 val, \ 1168 fw_engine); \ 1169 else \ 1170 __raw_i915_write##x(dev_priv, reg, val); \ 1171 GEN6_WRITE_FOOTER; \ 1172 } 1173 1174 __gen9_decoupled_write(32) 1175 __fwtable_write(8) 1176 __fwtable_write(16) 1177 __fwtable_write(32) 1178 __gen8_write(8) 1179 __gen8_write(16) 1180 __gen8_write(32) 1181 __gen6_write(8) 1182 __gen6_write(16) 1183 __gen6_write(32) 1184 1185 #undef __fwtable_write 1186 #undef __gen8_write 1187 #undef __gen6_write 1188 #undef GEN6_WRITE_FOOTER 1189 #undef GEN6_WRITE_HEADER 1190 1191 #define VGPU_WRITE_HEADER \ 1192 unsigned long irqflags; \ 1193 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1194 assert_rpm_device_not_suspended(dev_priv); \ 1195 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 1196 1197 #define VGPU_WRITE_FOOTER \ 1198 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) 1199 1200 #define __vgpu_write(x) \ 1201 static void vgpu_write##x(struct drm_i915_private *dev_priv, \ 1202 i915_reg_t reg, u##x val, bool trace) { \ 1203 VGPU_WRITE_HEADER; \ 1204 __raw_i915_write##x(dev_priv, reg, val); \ 1205 VGPU_WRITE_FOOTER; \ 1206 } 1207 1208 __vgpu_write(8) 1209 __vgpu_write(16) 1210 __vgpu_write(32) 1211 1212 #undef __vgpu_write 1213 #undef VGPU_WRITE_FOOTER 1214 #undef VGPU_WRITE_HEADER 1215 1216 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \ 1217 do { \ 1218 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \ 1219 dev_priv->uncore.funcs.mmio_writew = x##_write16; \ 1220 dev_priv->uncore.funcs.mmio_writel = x##_write32; \ 1221 } while (0) 1222 1223 #define ASSIGN_READ_MMIO_VFUNCS(x) \ 1224 do { \ 1225 dev_priv->uncore.funcs.mmio_readb = x##_read8; \ 1226 dev_priv->uncore.funcs.mmio_readw = x##_read16; \ 1227 dev_priv->uncore.funcs.mmio_readl = x##_read32; \ 1228 dev_priv->uncore.funcs.mmio_readq = x##_read64; \ 1229 } while (0) 1230 1231 1232 static void fw_domain_init(struct drm_i915_private *dev_priv, 1233 enum forcewake_domain_id domain_id, 1234 i915_reg_t reg_set, 1235 i915_reg_t reg_ack) 1236 { 1237 struct intel_uncore_forcewake_domain *d; 1238 1239 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) 1240 return; 1241 1242 d = &dev_priv->uncore.fw_domain[domain_id]; 1243 1244 WARN_ON(d->wake_count); 1245 1246 d->wake_count = 0; 1247 d->reg_set = reg_set; 1248 d->reg_ack = reg_ack; 1249 1250 if (IS_GEN6(dev_priv)) { 1251 d->val_reset = 0; 1252 d->val_set = FORCEWAKE_KERNEL; 1253 d->val_clear = 0; 1254 } else { 1255 /* WaRsClearFWBitsAtReset:bdw,skl */ 1256 d->val_reset = _MASKED_BIT_DISABLE(0xffff); 1257 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL); 1258 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); 1259 } 1260 1261 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1262 d->reg_post = FORCEWAKE_ACK_VLV; 1263 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) 1264 d->reg_post = ECOBUS; 1265 1266 d->i915 = dev_priv; 1267 d->id = domain_id; 1268 1269 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER)); 1270 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER)); 1271 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA)); 1272 1273 d->mask = 1 << domain_id; 1274 1275 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1276 d->timer.function = intel_uncore_fw_release_timer; 1277 1278 dev_priv->uncore.fw_domains |= (1 << domain_id); 1279 1280 fw_domain_reset(d); 1281 } 1282 1283 static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) 1284 { 1285 if (INTEL_INFO(dev_priv)->gen <= 5) 1286 return; 1287 1288 if (IS_GEN9(dev_priv)) { 1289 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1290 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1291 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1292 FORCEWAKE_RENDER_GEN9, 1293 FORCEWAKE_ACK_RENDER_GEN9); 1294 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, 1295 FORCEWAKE_BLITTER_GEN9, 1296 FORCEWAKE_ACK_BLITTER_GEN9); 1297 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1298 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); 1299 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1300 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1301 if (!IS_CHERRYVIEW(dev_priv)) 1302 dev_priv->uncore.funcs.force_wake_put = 1303 fw_domains_put_with_fifo; 1304 else 1305 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1306 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1307 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); 1308 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1309 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); 1310 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 1311 dev_priv->uncore.funcs.force_wake_get = 1312 fw_domains_get_with_thread_status; 1313 if (IS_HASWELL(dev_priv)) 1314 dev_priv->uncore.funcs.force_wake_put = 1315 fw_domains_put_with_fifo; 1316 else 1317 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1318 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1319 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1320 } else if (IS_IVYBRIDGE(dev_priv)) { 1321 u32 ecobus; 1322 1323 /* IVB configs may use multi-threaded forcewake */ 1324 1325 /* A small trick here - if the bios hasn't configured 1326 * MT forcewake, and if the device is in RC6, then 1327 * force_wake_mt_get will not wake the device and the 1328 * ECOBUS read will return zero. Which will be 1329 * (correctly) interpreted by the test below as MT 1330 * forcewake being disabled. 1331 */ 1332 dev_priv->uncore.funcs.force_wake_get = 1333 fw_domains_get_with_thread_status; 1334 dev_priv->uncore.funcs.force_wake_put = 1335 fw_domains_put_with_fifo; 1336 1337 /* We need to init first for ECOBUS access and then 1338 * determine later if we want to reinit, in case of MT access is 1339 * not working. In this stage we don't know which flavour this 1340 * ivb is, so it is better to reset also the gen6 fw registers 1341 * before the ecobus check. 1342 */ 1343 1344 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 1345 __raw_posting_read(dev_priv, ECOBUS); 1346 1347 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1348 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1349 1350 spin_lock_irq(&dev_priv->uncore.lock); 1351 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); 1352 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 1353 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); 1354 spin_unlock_irq(&dev_priv->uncore.lock); 1355 1356 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1357 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1358 DRM_INFO("when using vblank-synced partial screen updates.\n"); 1359 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1360 FORCEWAKE, FORCEWAKE_ACK); 1361 } 1362 } else if (IS_GEN6(dev_priv)) { 1363 dev_priv->uncore.funcs.force_wake_get = 1364 fw_domains_get_with_thread_status; 1365 dev_priv->uncore.funcs.force_wake_put = 1366 fw_domains_put_with_fifo; 1367 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1368 FORCEWAKE, FORCEWAKE_ACK); 1369 } 1370 1371 /* All future platforms are expected to require complex power gating */ 1372 WARN_ON(dev_priv->uncore.fw_domains == 0); 1373 } 1374 1375 #define ASSIGN_FW_DOMAINS_TABLE(d) \ 1376 { \ 1377 dev_priv->uncore.fw_domains_table = \ 1378 (struct intel_forcewake_range *)(d); \ 1379 dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \ 1380 } 1381 1382 void intel_uncore_init(struct drm_i915_private *dev_priv) 1383 { 1384 i915_check_vgpu(dev_priv); 1385 1386 intel_uncore_edram_detect(dev_priv); 1387 intel_uncore_fw_domains_init(dev_priv); 1388 __intel_uncore_early_sanitize(dev_priv, false); 1389 1390 dev_priv->uncore.unclaimed_mmio_check = 1; 1391 1392 switch (INTEL_INFO(dev_priv)->gen) { 1393 default: 1394 case 9: 1395 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges); 1396 ASSIGN_WRITE_MMIO_VFUNCS(fwtable); 1397 ASSIGN_READ_MMIO_VFUNCS(fwtable); 1398 if (HAS_DECOUPLED_MMIO(dev_priv)) { 1399 dev_priv->uncore.funcs.mmio_readl = 1400 gen9_decoupled_read32; 1401 dev_priv->uncore.funcs.mmio_readq = 1402 gen9_decoupled_read64; 1403 dev_priv->uncore.funcs.mmio_writel = 1404 gen9_decoupled_write32; 1405 } 1406 break; 1407 case 8: 1408 if (IS_CHERRYVIEW(dev_priv)) { 1409 ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges); 1410 ASSIGN_WRITE_MMIO_VFUNCS(fwtable); 1411 ASSIGN_READ_MMIO_VFUNCS(fwtable); 1412 1413 } else { 1414 ASSIGN_WRITE_MMIO_VFUNCS(gen8); 1415 ASSIGN_READ_MMIO_VFUNCS(gen6); 1416 } 1417 break; 1418 case 7: 1419 case 6: 1420 ASSIGN_WRITE_MMIO_VFUNCS(gen6); 1421 1422 if (IS_VALLEYVIEW(dev_priv)) { 1423 ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges); 1424 ASSIGN_READ_MMIO_VFUNCS(fwtable); 1425 } else { 1426 ASSIGN_READ_MMIO_VFUNCS(gen6); 1427 } 1428 break; 1429 case 5: 1430 ASSIGN_WRITE_MMIO_VFUNCS(gen5); 1431 ASSIGN_READ_MMIO_VFUNCS(gen5); 1432 break; 1433 case 4: 1434 case 3: 1435 case 2: 1436 ASSIGN_WRITE_MMIO_VFUNCS(gen2); 1437 ASSIGN_READ_MMIO_VFUNCS(gen2); 1438 break; 1439 } 1440 1441 intel_fw_table_check(dev_priv); 1442 if (INTEL_GEN(dev_priv) >= 8) 1443 intel_shadow_table_check(); 1444 1445 if (intel_vgpu_active(dev_priv)) { 1446 ASSIGN_WRITE_MMIO_VFUNCS(vgpu); 1447 ASSIGN_READ_MMIO_VFUNCS(vgpu); 1448 } 1449 1450 i915_check_and_clear_faults(dev_priv); 1451 } 1452 #undef ASSIGN_WRITE_MMIO_VFUNCS 1453 #undef ASSIGN_READ_MMIO_VFUNCS 1454 1455 void intel_uncore_fini(struct drm_i915_private *dev_priv) 1456 { 1457 /* Paranoia: make sure we have disabled everything before we exit. */ 1458 intel_uncore_sanitize(dev_priv); 1459 intel_uncore_forcewake_reset(dev_priv, false); 1460 } 1461 1462 #define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1) 1463 1464 static const struct register_whitelist { 1465 i915_reg_t offset_ldw, offset_udw; 1466 uint32_t size; 1467 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 1468 uint32_t gen_bitmask; 1469 } whitelist[] = { 1470 { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), 1471 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), 1472 .size = 8, .gen_bitmask = GEN_RANGE(4, 9) }, 1473 }; 1474 1475 int i915_reg_read_ioctl(struct drm_device *dev, 1476 void *data, struct drm_file *file) 1477 { 1478 struct drm_i915_private *dev_priv = to_i915(dev); 1479 struct drm_i915_reg_read *reg = data; 1480 struct register_whitelist const *entry = whitelist; 1481 unsigned size; 1482 i915_reg_t offset_ldw, offset_udw; 1483 int i, ret = 0; 1484 1485 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1486 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) && 1487 (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask)) 1488 break; 1489 } 1490 1491 if (i == ARRAY_SIZE(whitelist)) 1492 return -EINVAL; 1493 1494 /* We use the low bits to encode extra flags as the register should 1495 * be naturally aligned (and those that are not so aligned merely 1496 * limit the available flags for that register). 1497 */ 1498 offset_ldw = entry->offset_ldw; 1499 offset_udw = entry->offset_udw; 1500 size = entry->size; 1501 size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw); 1502 1503 intel_runtime_pm_get(dev_priv); 1504 1505 switch (size) { 1506 case 8 | 1: 1507 reg->val = I915_READ64_2x32(offset_ldw, offset_udw); 1508 break; 1509 case 8: 1510 reg->val = I915_READ64(offset_ldw); 1511 break; 1512 case 4: 1513 reg->val = I915_READ(offset_ldw); 1514 break; 1515 case 2: 1516 reg->val = I915_READ16(offset_ldw); 1517 break; 1518 case 1: 1519 reg->val = I915_READ8(offset_ldw); 1520 break; 1521 default: 1522 ret = -EINVAL; 1523 goto out; 1524 } 1525 1526 out: 1527 intel_runtime_pm_put(dev_priv); 1528 return ret; 1529 } 1530 1531 static int i915_reset_complete(struct pci_dev *pdev) 1532 { 1533 u8 gdrst; 1534 pci_read_config_byte(pdev, I915_GDRST, &gdrst); 1535 return (gdrst & GRDOM_RESET_STATUS) == 0; 1536 } 1537 1538 static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1539 { 1540 struct pci_dev *pdev = dev_priv->drm.pdev; 1541 1542 /* assert reset for at least 20 usec */ 1543 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1544 udelay(20); 1545 pci_write_config_byte(pdev, I915_GDRST, 0); 1546 1547 return wait_for(i915_reset_complete(pdev), 500); 1548 } 1549 1550 static int g4x_reset_complete(struct pci_dev *pdev) 1551 { 1552 u8 gdrst; 1553 pci_read_config_byte(pdev, I915_GDRST, &gdrst); 1554 return (gdrst & GRDOM_RESET_ENABLE) == 0; 1555 } 1556 1557 static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1558 { 1559 struct pci_dev *pdev = dev_priv->drm.pdev; 1560 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1561 return wait_for(g4x_reset_complete(pdev), 500); 1562 } 1563 1564 static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1565 { 1566 struct pci_dev *pdev = dev_priv->drm.pdev; 1567 int ret; 1568 1569 pci_write_config_byte(pdev, I915_GDRST, 1570 GRDOM_RENDER | GRDOM_RESET_ENABLE); 1571 ret = wait_for(g4x_reset_complete(pdev), 500); 1572 if (ret) 1573 return ret; 1574 1575 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1576 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 1577 POSTING_READ(VDECCLK_GATE_D); 1578 1579 pci_write_config_byte(pdev, I915_GDRST, 1580 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1581 ret = wait_for(g4x_reset_complete(pdev), 500); 1582 if (ret) 1583 return ret; 1584 1585 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1586 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 1587 POSTING_READ(VDECCLK_GATE_D); 1588 1589 pci_write_config_byte(pdev, I915_GDRST, 0); 1590 1591 return 0; 1592 } 1593 1594 static int ironlake_do_reset(struct drm_i915_private *dev_priv, 1595 unsigned engine_mask) 1596 { 1597 int ret; 1598 1599 I915_WRITE(ILK_GDSR, 1600 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); 1601 ret = intel_wait_for_register(dev_priv, 1602 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, 1603 500); 1604 if (ret) 1605 return ret; 1606 1607 I915_WRITE(ILK_GDSR, 1608 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); 1609 ret = intel_wait_for_register(dev_priv, 1610 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, 1611 500); 1612 if (ret) 1613 return ret; 1614 1615 I915_WRITE(ILK_GDSR, 0); 1616 1617 return 0; 1618 } 1619 1620 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */ 1621 static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, 1622 u32 hw_domain_mask) 1623 { 1624 /* GEN6_GDRST is not in the gt power well, no need to check 1625 * for fifo space for the write or forcewake the chip for 1626 * the read 1627 */ 1628 __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask); 1629 1630 /* Spin waiting for the device to ack the reset requests */ 1631 return intel_wait_for_register_fw(dev_priv, 1632 GEN6_GDRST, hw_domain_mask, 0, 1633 500); 1634 } 1635 1636 /** 1637 * gen6_reset_engines - reset individual engines 1638 * @dev_priv: i915 device 1639 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset 1640 * 1641 * This function will reset the individual engines that are set in engine_mask. 1642 * If you provide ALL_ENGINES as mask, full global domain reset will be issued. 1643 * 1644 * Note: It is responsibility of the caller to handle the difference between 1645 * asking full domain reset versus reset for all available individual engines. 1646 * 1647 * Returns 0 on success, nonzero on error. 1648 */ 1649 static int gen6_reset_engines(struct drm_i915_private *dev_priv, 1650 unsigned engine_mask) 1651 { 1652 struct intel_engine_cs *engine; 1653 const u32 hw_engine_mask[I915_NUM_ENGINES] = { 1654 [RCS] = GEN6_GRDOM_RENDER, 1655 [BCS] = GEN6_GRDOM_BLT, 1656 [VCS] = GEN6_GRDOM_MEDIA, 1657 [VCS2] = GEN8_GRDOM_MEDIA2, 1658 [VECS] = GEN6_GRDOM_VECS, 1659 }; 1660 u32 hw_mask; 1661 int ret; 1662 1663 if (engine_mask == ALL_ENGINES) { 1664 hw_mask = GEN6_GRDOM_FULL; 1665 } else { 1666 unsigned int tmp; 1667 1668 hw_mask = 0; 1669 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) 1670 hw_mask |= hw_engine_mask[engine->id]; 1671 } 1672 1673 ret = gen6_hw_domain_reset(dev_priv, hw_mask); 1674 1675 intel_uncore_forcewake_reset(dev_priv, true); 1676 1677 return ret; 1678 } 1679 1680 /** 1681 * intel_wait_for_register_fw - wait until register matches expected state 1682 * @dev_priv: the i915 device 1683 * @reg: the register to read 1684 * @mask: mask to apply to register value 1685 * @value: expected value 1686 * @timeout_ms: timeout in millisecond 1687 * 1688 * This routine waits until the target register @reg contains the expected 1689 * @value after applying the @mask, i.e. it waits until :: 1690 * 1691 * (I915_READ_FW(reg) & mask) == value 1692 * 1693 * Otherwise, the wait will timeout after @timeout_ms milliseconds. 1694 * 1695 * Note that this routine assumes the caller holds forcewake asserted, it is 1696 * not suitable for very long waits. See intel_wait_for_register() if you 1697 * wish to wait without holding forcewake for the duration (i.e. you expect 1698 * the wait to be slow). 1699 * 1700 * Returns 0 if the register matches the desired condition, or -ETIMEOUT. 1701 */ 1702 int intel_wait_for_register_fw(struct drm_i915_private *dev_priv, 1703 i915_reg_t reg, 1704 const u32 mask, 1705 const u32 value, 1706 const unsigned long timeout_ms) 1707 { 1708 #define done ((I915_READ_FW(reg) & mask) == value) 1709 int ret = wait_for_us(done, 2); 1710 if (ret) 1711 ret = wait_for(done, timeout_ms); 1712 return ret; 1713 #undef done 1714 } 1715 1716 /** 1717 * intel_wait_for_register - wait until register matches expected state 1718 * @dev_priv: the i915 device 1719 * @reg: the register to read 1720 * @mask: mask to apply to register value 1721 * @value: expected value 1722 * @timeout_ms: timeout in millisecond 1723 * 1724 * This routine waits until the target register @reg contains the expected 1725 * @value after applying the @mask, i.e. it waits until :: 1726 * 1727 * (I915_READ(reg) & mask) == value 1728 * 1729 * Otherwise, the wait will timeout after @timeout_ms milliseconds. 1730 * 1731 * Returns 0 if the register matches the desired condition, or -ETIMEOUT. 1732 */ 1733 int intel_wait_for_register(struct drm_i915_private *dev_priv, 1734 i915_reg_t reg, 1735 const u32 mask, 1736 const u32 value, 1737 const unsigned long timeout_ms) 1738 { 1739 1740 unsigned fw = 1741 intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); 1742 int ret; 1743 1744 intel_uncore_forcewake_get(dev_priv, fw); 1745 ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2); 1746 intel_uncore_forcewake_put(dev_priv, fw); 1747 if (ret) 1748 ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value, 1749 timeout_ms); 1750 1751 return ret; 1752 } 1753 1754 static int gen8_request_engine_reset(struct intel_engine_cs *engine) 1755 { 1756 struct drm_i915_private *dev_priv = engine->i915; 1757 int ret; 1758 1759 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), 1760 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); 1761 1762 ret = intel_wait_for_register_fw(dev_priv, 1763 RING_RESET_CTL(engine->mmio_base), 1764 RESET_CTL_READY_TO_RESET, 1765 RESET_CTL_READY_TO_RESET, 1766 700); 1767 if (ret) 1768 DRM_ERROR("%s: reset request timeout\n", engine->name); 1769 1770 return ret; 1771 } 1772 1773 static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine) 1774 { 1775 struct drm_i915_private *dev_priv = engine->i915; 1776 1777 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), 1778 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); 1779 } 1780 1781 static int gen8_reset_engines(struct drm_i915_private *dev_priv, 1782 unsigned engine_mask) 1783 { 1784 struct intel_engine_cs *engine; 1785 unsigned int tmp; 1786 1787 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) 1788 if (gen8_request_engine_reset(engine)) 1789 goto not_ready; 1790 1791 return gen6_reset_engines(dev_priv, engine_mask); 1792 1793 not_ready: 1794 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) 1795 gen8_unrequest_engine_reset(engine); 1796 1797 return -EIO; 1798 } 1799 1800 typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask); 1801 1802 static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) 1803 { 1804 if (!i915.reset) 1805 return NULL; 1806 1807 if (INTEL_INFO(dev_priv)->gen >= 8) 1808 return gen8_reset_engines; 1809 else if (INTEL_INFO(dev_priv)->gen >= 6) 1810 return gen6_reset_engines; 1811 else if (IS_GEN5(dev_priv)) 1812 return ironlake_do_reset; 1813 else if (IS_G4X(dev_priv)) 1814 return g4x_do_reset; 1815 else if (IS_G33(dev_priv)) 1816 return g33_do_reset; 1817 else if (INTEL_INFO(dev_priv)->gen >= 3) 1818 return i915_do_reset; 1819 else 1820 return NULL; 1821 } 1822 1823 int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1824 { 1825 reset_func reset; 1826 int ret; 1827 1828 reset = intel_get_gpu_reset(dev_priv); 1829 if (reset == NULL) 1830 return -ENODEV; 1831 1832 /* If the power well sleeps during the reset, the reset 1833 * request may be dropped and never completes (causing -EIO). 1834 */ 1835 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1836 ret = reset(dev_priv, engine_mask); 1837 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1838 1839 return ret; 1840 } 1841 1842 bool intel_has_gpu_reset(struct drm_i915_private *dev_priv) 1843 { 1844 return intel_get_gpu_reset(dev_priv) != NULL; 1845 } 1846 1847 int intel_guc_reset(struct drm_i915_private *dev_priv) 1848 { 1849 int ret; 1850 unsigned long irqflags; 1851 1852 if (!HAS_GUC(dev_priv)) 1853 return -EINVAL; 1854 1855 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1856 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1857 1858 ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC); 1859 1860 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1861 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1862 1863 return ret; 1864 } 1865 1866 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv) 1867 { 1868 return check_for_unclaimed_mmio(dev_priv); 1869 } 1870 1871 bool 1872 intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) 1873 { 1874 if (unlikely(i915.mmio_debug || 1875 dev_priv->uncore.unclaimed_mmio_check <= 0)) 1876 return false; 1877 1878 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) { 1879 DRM_DEBUG("Unclaimed register detected, " 1880 "enabling oneshot unclaimed register reporting. " 1881 "Please use i915.mmio_debug=N for more information.\n"); 1882 i915.mmio_debug++; 1883 dev_priv->uncore.unclaimed_mmio_check--; 1884 return true; 1885 } 1886 1887 return false; 1888 } 1889 1890 static enum forcewake_domains 1891 intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv, 1892 i915_reg_t reg) 1893 { 1894 u32 offset = i915_mmio_reg_offset(reg); 1895 enum forcewake_domains fw_domains; 1896 1897 if (HAS_FWTABLE(dev_priv)) { 1898 fw_domains = __fwtable_reg_read_fw_domains(offset); 1899 } else if (INTEL_GEN(dev_priv) >= 6) { 1900 fw_domains = __gen6_reg_read_fw_domains(offset); 1901 } else { 1902 WARN_ON(!IS_GEN(dev_priv, 2, 5)); 1903 fw_domains = 0; 1904 } 1905 1906 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); 1907 1908 return fw_domains; 1909 } 1910 1911 static enum forcewake_domains 1912 intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, 1913 i915_reg_t reg) 1914 { 1915 u32 offset = i915_mmio_reg_offset(reg); 1916 enum forcewake_domains fw_domains; 1917 1918 if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) { 1919 fw_domains = __fwtable_reg_write_fw_domains(offset); 1920 } else if (IS_GEN8(dev_priv)) { 1921 fw_domains = __gen8_reg_write_fw_domains(offset); 1922 } else if (IS_GEN(dev_priv, 6, 7)) { 1923 fw_domains = FORCEWAKE_RENDER; 1924 } else { 1925 WARN_ON(!IS_GEN(dev_priv, 2, 5)); 1926 fw_domains = 0; 1927 } 1928 1929 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); 1930 1931 return fw_domains; 1932 } 1933 1934 /** 1935 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access 1936 * a register 1937 * @dev_priv: pointer to struct drm_i915_private 1938 * @reg: register in question 1939 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE 1940 * 1941 * Returns a set of forcewake domains required to be taken with for example 1942 * intel_uncore_forcewake_get for the specified register to be accessible in the 1943 * specified mode (read, write or read/write) with raw mmio accessors. 1944 * 1945 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the 1946 * callers to do FIFO management on their own or risk losing writes. 1947 */ 1948 enum forcewake_domains 1949 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, 1950 i915_reg_t reg, unsigned int op) 1951 { 1952 enum forcewake_domains fw_domains = 0; 1953 1954 WARN_ON(!op); 1955 1956 if (intel_vgpu_active(dev_priv)) 1957 return 0; 1958 1959 if (op & FW_REG_READ) 1960 fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg); 1961 1962 if (op & FW_REG_WRITE) 1963 fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg); 1964 1965 return fw_domains; 1966 } 1967