1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "i915_drv.h" 25 #include "intel_drv.h" 26 #include "i915_vgpu.h" 27 28 #define FORCEWAKE_ACK_TIMEOUT_MS 2 29 30 #define __raw_i915_read8(dev_priv__, reg__) DRM_READ8(dev_priv__->mmio_map, reg__) 31 #define __raw_i915_write8(dev_priv__, reg__, val__) DRM_WRITE8(dev_priv__->mmio_map, reg__, val__) 32 33 #define __raw_i915_read16(dev_priv__, reg__) DRM_READ16(dev_priv__->mmio_map, reg__) 34 #define __raw_i915_write16(dev_priv__, reg__, val__) DRM_WRITE16(dev_priv__->mmio_map, reg__, val__) 35 36 #define __raw_i915_read32(dev_priv__, reg__) DRM_READ32(dev_priv__->mmio_map, reg__) 37 #define __raw_i915_write32(dev_priv__, reg__, val__) DRM_WRITE32(dev_priv__->mmio_map, reg__, val__) 38 39 #define __raw_i915_read64(dev_priv__, reg__) DRM_READ64(dev_priv__->mmio_map, reg__) 40 #define __raw_i915_write64(dev_priv__, reg__, val__) DRM_WRITE64(dev_priv__->mmio_map, reg__, val__) 41 42 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) 43 44 static const char * const forcewake_domain_names[] = { 45 "render", 46 "blitter", 47 "media", 48 }; 49 50 const char * 51 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) 52 { 53 BUILD_BUG_ON((sizeof(forcewake_domain_names)/sizeof(const char *)) != 54 FW_DOMAIN_ID_COUNT); 55 56 if (id >= 0 && id < FW_DOMAIN_ID_COUNT) 57 return forcewake_domain_names[id]; 58 59 WARN_ON(id); 60 61 return "unknown"; 62 } 63 64 static void 65 assert_device_not_suspended(struct drm_i915_private *dev_priv) 66 { 67 WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, 68 "Device suspended\n"); 69 } 70 71 static inline void 72 fw_domain_reset(const struct intel_uncore_forcewake_domain *d) 73 { 74 WARN_ON(d->reg_set == 0); 75 __raw_i915_write32(d->i915, d->reg_set, d->val_reset); 76 } 77 78 static inline void 79 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) 80 { 81 mod_timer_pinned(&d->timer, jiffies + 1); 82 } 83 84 static inline void 85 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) 86 { 87 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 88 FORCEWAKE_KERNEL) == 0, 89 FORCEWAKE_ACK_TIMEOUT_MS)) 90 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", 91 intel_uncore_forcewake_domain_to_str(d->id)); 92 } 93 94 static inline void 95 fw_domain_get(const struct intel_uncore_forcewake_domain *d) 96 { 97 __raw_i915_write32(d->i915, d->reg_set, d->val_set); 98 } 99 100 static inline void 101 fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d) 102 { 103 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 104 FORCEWAKE_KERNEL), 105 FORCEWAKE_ACK_TIMEOUT_MS)) 106 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", 107 intel_uncore_forcewake_domain_to_str(d->id)); 108 } 109 110 static inline void 111 fw_domain_put(const struct intel_uncore_forcewake_domain *d) 112 { 113 __raw_i915_write32(d->i915, d->reg_set, d->val_clear); 114 } 115 116 static inline void 117 fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d) 118 { 119 /* something from same cacheline, but not from the set register */ 120 if (d->reg_post) 121 __raw_posting_read(d->i915, d->reg_post); 122 } 123 124 static void 125 fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 126 { 127 struct intel_uncore_forcewake_domain *d; 128 enum forcewake_domain_id id; 129 130 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) { 131 fw_domain_wait_ack_clear(d); 132 fw_domain_get(d); 133 fw_domain_wait_ack(d); 134 } 135 } 136 137 static void 138 fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 139 { 140 struct intel_uncore_forcewake_domain *d; 141 enum forcewake_domain_id id; 142 143 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) { 144 fw_domain_put(d); 145 fw_domain_posting_read(d); 146 } 147 } 148 149 static void 150 fw_domains_posting_read(struct drm_i915_private *dev_priv) 151 { 152 struct intel_uncore_forcewake_domain *d; 153 enum forcewake_domain_id id; 154 155 /* No need to do for all, just do for first found */ 156 for_each_fw_domain(d, dev_priv, id) { 157 fw_domain_posting_read(d); 158 break; 159 } 160 } 161 162 static void 163 fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 164 { 165 struct intel_uncore_forcewake_domain *d; 166 enum forcewake_domain_id id; 167 168 if (dev_priv->uncore.fw_domains == 0) 169 return; 170 171 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) 172 fw_domain_reset(d); 173 174 fw_domains_posting_read(dev_priv); 175 } 176 177 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 178 { 179 /* w/a for a sporadic read returning 0 by waiting for the GT 180 * thread to wake up. 181 */ 182 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & 183 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500)) 184 DRM_ERROR("GT thread status wait timed out\n"); 185 } 186 187 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv, 188 enum forcewake_domains fw_domains) 189 { 190 fw_domains_get(dev_priv, fw_domains); 191 192 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ 193 __gen6_gt_wait_for_thread_c0(dev_priv); 194 } 195 196 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 197 { 198 u32 gtfifodbg; 199 200 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); 201 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg)) 202 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg); 203 } 204 205 static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv, 206 enum forcewake_domains fw_domains) 207 { 208 fw_domains_put(dev_priv, fw_domains); 209 gen6_gt_check_fifodbg(dev_priv); 210 } 211 212 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv) 213 { 214 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL); 215 216 return count & GT_FIFO_FREE_ENTRIES_MASK; 217 } 218 219 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 220 { 221 int ret = 0; 222 223 /* On VLV, FIFO will be shared by both SW and HW. 224 * So, we need to read the FREE_ENTRIES everytime */ 225 if (IS_VALLEYVIEW(dev_priv->dev)) 226 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv); 227 228 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 229 int loop = 500; 230 u32 fifo = fifo_free_entries(dev_priv); 231 232 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { 233 udelay(10); 234 fifo = fifo_free_entries(dev_priv); 235 } 236 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) 237 ++ret; 238 dev_priv->uncore.fifo_count = fifo; 239 } 240 dev_priv->uncore.fifo_count--; 241 242 return ret; 243 } 244 245 static void intel_uncore_fw_release_timer(unsigned long arg) 246 { 247 struct intel_uncore_forcewake_domain *domain = (void *)arg; 248 249 assert_device_not_suspended(domain->i915); 250 251 lockmgr(&domain->i915->uncore.lock, LK_EXCLUSIVE); 252 if (WARN_ON(domain->wake_count == 0)) 253 domain->wake_count++; 254 255 if (--domain->wake_count == 0) 256 domain->i915->uncore.funcs.force_wake_put(domain->i915, 257 1 << domain->id); 258 259 lockmgr(&domain->i915->uncore.lock, LK_RELEASE); 260 } 261 262 void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) 263 { 264 struct drm_i915_private *dev_priv = dev->dev_private; 265 struct intel_uncore_forcewake_domain *domain; 266 int retry_count = 100; 267 enum forcewake_domain_id id; 268 enum forcewake_domains fw = 0, active_domains; 269 270 /* Hold uncore.lock across reset to prevent any register access 271 * with forcewake not set correctly. Wait until all pending 272 * timers are run before holding. 273 */ 274 while (1) { 275 active_domains = 0; 276 277 for_each_fw_domain(domain, dev_priv, id) { 278 if (del_timer_sync(&domain->timer) == 0) 279 continue; 280 281 intel_uncore_fw_release_timer((unsigned long)domain); 282 } 283 284 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 285 286 for_each_fw_domain(domain, dev_priv, id) { 287 if (timer_pending(&domain->timer)) 288 active_domains |= (1 << id); 289 } 290 291 if (active_domains == 0) 292 break; 293 294 if (--retry_count == 0) { 295 DRM_ERROR("Timed out waiting for forcewake timers to finish\n"); 296 break; 297 } 298 299 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 300 #if 0 301 cond_resched(); 302 #endif 303 } 304 305 WARN_ON(active_domains); 306 307 for_each_fw_domain(domain, dev_priv, id) 308 if (domain->wake_count) 309 fw |= 1 << id; 310 311 if (fw) 312 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); 313 314 fw_domains_reset(dev_priv, FORCEWAKE_ALL); 315 316 if (restore) { /* If reset with a user forcewake, try to restore */ 317 if (fw) 318 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); 319 320 if (IS_GEN6(dev) || IS_GEN7(dev)) 321 dev_priv->uncore.fifo_count = 322 fifo_free_entries(dev_priv); 323 } 324 325 if (!restore) 326 assert_forcewakes_inactive(dev_priv); 327 328 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 329 } 330 331 static void intel_uncore_ellc_detect(struct drm_device *dev) 332 { 333 struct drm_i915_private *dev_priv = dev->dev_private; 334 335 if ((IS_HASWELL(dev) || IS_BROADWELL(dev) || 336 INTEL_INFO(dev)->gen >= 9) && 337 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) { 338 /* The docs do not explain exactly how the calculation can be 339 * made. It is somewhat guessable, but for now, it's always 340 * 128MB. 341 * NB: We can't write IDICR yet because we do not have gt funcs 342 * set up */ 343 dev_priv->ellc_size = 128; 344 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); 345 } 346 } 347 348 static void __intel_uncore_early_sanitize(struct drm_device *dev, 349 bool restore_forcewake) 350 { 351 struct drm_i915_private *dev_priv = dev->dev_private; 352 353 if (HAS_FPGA_DBG_UNCLAIMED(dev)) 354 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 355 356 /* clear out old GT FIFO errors */ 357 if (IS_GEN6(dev) || IS_GEN7(dev)) 358 __raw_i915_write32(dev_priv, GTFIFODBG, 359 __raw_i915_read32(dev_priv, GTFIFODBG)); 360 361 /* WaDisableShadowRegForCpd:chv */ 362 if (IS_CHERRYVIEW(dev)) { 363 __raw_i915_write32(dev_priv, GTFIFOCTL, 364 __raw_i915_read32(dev_priv, GTFIFOCTL) | 365 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | 366 GT_FIFO_CTL_RC6_POLICY_STALL); 367 } 368 369 intel_uncore_forcewake_reset(dev, restore_forcewake); 370 } 371 372 void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) 373 { 374 __intel_uncore_early_sanitize(dev, restore_forcewake); 375 i915_check_and_clear_faults(dev); 376 } 377 378 void intel_uncore_sanitize(struct drm_device *dev) 379 { 380 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 381 intel_disable_gt_powersave(dev); 382 } 383 384 /** 385 * intel_uncore_forcewake_get - grab forcewake domain references 386 * @dev_priv: i915 device instance 387 * @fw_domains: forcewake domains to get reference on 388 * 389 * This function can be used get GT's forcewake domain references. 390 * Normal register access will handle the forcewake domains automatically. 391 * However if some sequence requires the GT to not power down a particular 392 * forcewake domains this function should be called at the beginning of the 393 * sequence. And subsequently the reference should be dropped by symmetric 394 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains 395 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. 396 */ 397 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 398 enum forcewake_domains fw_domains) 399 { 400 struct intel_uncore_forcewake_domain *domain; 401 enum forcewake_domain_id id; 402 403 if (!dev_priv->uncore.funcs.force_wake_get) 404 return; 405 406 WARN_ON(dev_priv->pm.suspended); 407 408 fw_domains &= dev_priv->uncore.fw_domains; 409 410 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 411 412 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { 413 if (domain->wake_count++) 414 fw_domains &= ~(1 << id); 415 } 416 417 if (fw_domains) 418 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 419 420 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 421 } 422 423 /** 424 * intel_uncore_forcewake_put - release a forcewake domain reference 425 * @dev_priv: i915 device instance 426 * @fw_domains: forcewake domains to put references 427 * 428 * This function drops the device-level forcewakes for specified 429 * domains obtained by intel_uncore_forcewake_get(). 430 */ 431 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 432 enum forcewake_domains fw_domains) 433 { 434 struct intel_uncore_forcewake_domain *domain; 435 enum forcewake_domain_id id; 436 437 if (!dev_priv->uncore.funcs.force_wake_put) 438 return; 439 440 fw_domains &= dev_priv->uncore.fw_domains; 441 442 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 443 444 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { 445 if (WARN_ON(domain->wake_count == 0)) 446 continue; 447 448 if (--domain->wake_count) 449 continue; 450 451 domain->wake_count++; 452 fw_domain_arm_timer(domain); 453 } 454 455 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 456 } 457 458 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) 459 { 460 struct intel_uncore_forcewake_domain *domain; 461 enum forcewake_domain_id id; 462 463 if (!dev_priv->uncore.funcs.force_wake_get) 464 return; 465 466 for_each_fw_domain(domain, dev_priv, id) 467 WARN_ON(domain->wake_count); 468 } 469 470 /* We give fast paths for the really cool registers */ 471 #define NEEDS_FORCE_WAKE(dev_priv, reg) \ 472 ((reg) < 0x40000 && (reg) != FORCEWAKE) 473 474 #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end)) 475 476 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ 477 (REG_RANGE((reg), 0x2000, 0x4000) || \ 478 REG_RANGE((reg), 0x5000, 0x8000) || \ 479 REG_RANGE((reg), 0xB000, 0x12000) || \ 480 REG_RANGE((reg), 0x2E000, 0x30000)) 481 482 #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \ 483 (REG_RANGE((reg), 0x12000, 0x14000) || \ 484 REG_RANGE((reg), 0x22000, 0x24000) || \ 485 REG_RANGE((reg), 0x30000, 0x40000)) 486 487 #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \ 488 (REG_RANGE((reg), 0x2000, 0x4000) || \ 489 REG_RANGE((reg), 0x5200, 0x8000) || \ 490 REG_RANGE((reg), 0x8300, 0x8500) || \ 491 REG_RANGE((reg), 0xB000, 0xB480) || \ 492 REG_RANGE((reg), 0xE000, 0xE800)) 493 494 #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \ 495 (REG_RANGE((reg), 0x8800, 0x8900) || \ 496 REG_RANGE((reg), 0xD000, 0xD800) || \ 497 REG_RANGE((reg), 0x12000, 0x14000) || \ 498 REG_RANGE((reg), 0x1A000, 0x1C000) || \ 499 REG_RANGE((reg), 0x1E800, 0x1EA00) || \ 500 REG_RANGE((reg), 0x30000, 0x38000)) 501 502 #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \ 503 (REG_RANGE((reg), 0x4000, 0x5000) || \ 504 REG_RANGE((reg), 0x8000, 0x8300) || \ 505 REG_RANGE((reg), 0x8500, 0x8600) || \ 506 REG_RANGE((reg), 0x9000, 0xB000) || \ 507 REG_RANGE((reg), 0xF000, 0x10000)) 508 509 #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \ 510 REG_RANGE((reg), 0xB00, 0x2000) 511 512 #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \ 513 (REG_RANGE((reg), 0x2000, 0x2700) || \ 514 REG_RANGE((reg), 0x3000, 0x4000) || \ 515 REG_RANGE((reg), 0x5200, 0x8000) || \ 516 REG_RANGE((reg), 0x8140, 0x8160) || \ 517 REG_RANGE((reg), 0x8300, 0x8500) || \ 518 REG_RANGE((reg), 0x8C00, 0x8D00) || \ 519 REG_RANGE((reg), 0xB000, 0xB480) || \ 520 REG_RANGE((reg), 0xE000, 0xE900) || \ 521 REG_RANGE((reg), 0x24400, 0x24800)) 522 523 #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \ 524 (REG_RANGE((reg), 0x8130, 0x8140) || \ 525 REG_RANGE((reg), 0x8800, 0x8A00) || \ 526 REG_RANGE((reg), 0xD000, 0xD800) || \ 527 REG_RANGE((reg), 0x12000, 0x14000) || \ 528 REG_RANGE((reg), 0x1A000, 0x1EA00) || \ 529 REG_RANGE((reg), 0x30000, 0x40000)) 530 531 #define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \ 532 REG_RANGE((reg), 0x9400, 0x9800) 533 534 #define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \ 535 ((reg) < 0x40000 &&\ 536 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \ 537 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \ 538 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \ 539 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) 540 541 static void 542 ilk_dummy_write(struct drm_i915_private *dev_priv) 543 { 544 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 545 * the chip from rc6 before touching it for real. MI_MODE is masked, 546 * hence harmless to write 0 into. */ 547 __raw_i915_write32(dev_priv, MI_MODE, 0); 548 } 549 550 static void 551 hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read, 552 bool before) 553 { 554 const char *op = read ? "reading" : "writing to"; 555 const char *when = before ? "before" : "after"; 556 557 if (!i915.mmio_debug) 558 return; 559 560 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 561 WARN(1, "Unclaimed register detected %s %s register 0x%x\n", 562 when, op, reg); 563 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 564 i915.mmio_debug--; /* Only report the first N failures */ 565 } 566 } 567 568 static void 569 hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv) 570 { 571 static bool mmio_debug_once = true; 572 573 if (i915.mmio_debug || !mmio_debug_once) 574 return; 575 576 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 577 DRM_DEBUG("Unclaimed register detected, " 578 "enabling oneshot unclaimed register reporting. " 579 "Please use i915.mmio_debug=N for more information.\n"); 580 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 581 i915.mmio_debug = mmio_debug_once--; 582 } 583 } 584 585 #define GEN2_READ_HEADER(x) \ 586 u##x val = 0; \ 587 assert_device_not_suspended(dev_priv); 588 589 #define GEN2_READ_FOOTER \ 590 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 591 return val 592 593 #define __gen2_read(x) \ 594 static u##x \ 595 gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 596 GEN2_READ_HEADER(x); \ 597 val = __raw_i915_read##x(dev_priv, reg); \ 598 GEN2_READ_FOOTER; \ 599 } 600 601 #define __gen5_read(x) \ 602 static u##x \ 603 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 604 GEN2_READ_HEADER(x); \ 605 ilk_dummy_write(dev_priv); \ 606 val = __raw_i915_read##x(dev_priv, reg); \ 607 GEN2_READ_FOOTER; \ 608 } 609 610 __gen5_read(8) 611 __gen5_read(16) 612 __gen5_read(32) 613 __gen5_read(64) 614 __gen2_read(8) 615 __gen2_read(16) 616 __gen2_read(32) 617 __gen2_read(64) 618 619 #undef __gen5_read 620 #undef __gen2_read 621 622 #undef GEN2_READ_FOOTER 623 #undef GEN2_READ_HEADER 624 625 #define GEN6_READ_HEADER(x) \ 626 u##x val = 0; \ 627 assert_device_not_suspended(dev_priv); \ 628 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE) 629 630 #define GEN6_READ_FOOTER \ 631 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); \ 632 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 633 return val 634 635 static inline void __force_wake_get(struct drm_i915_private *dev_priv, 636 enum forcewake_domains fw_domains) 637 { 638 struct intel_uncore_forcewake_domain *domain; 639 enum forcewake_domain_id id; 640 641 if (WARN_ON(!fw_domains)) 642 return; 643 644 /* Ideally GCC would be constant-fold and eliminate this loop */ 645 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { 646 if (domain->wake_count) { 647 fw_domains &= ~(1 << id); 648 continue; 649 } 650 651 domain->wake_count++; 652 fw_domain_arm_timer(domain); 653 } 654 655 if (fw_domains) 656 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 657 } 658 659 #define __vgpu_read(x) \ 660 static u##x \ 661 vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 662 GEN6_READ_HEADER(x); \ 663 val = __raw_i915_read##x(dev_priv, reg); \ 664 GEN6_READ_FOOTER; \ 665 } 666 667 #define __gen6_read(x) \ 668 static u##x \ 669 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 670 GEN6_READ_HEADER(x); \ 671 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ 672 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) \ 673 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 674 val = __raw_i915_read##x(dev_priv, reg); \ 675 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ 676 GEN6_READ_FOOTER; \ 677 } 678 679 #define __vlv_read(x) \ 680 static u##x \ 681 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 682 GEN6_READ_HEADER(x); \ 683 if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \ 684 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 685 else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \ 686 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 687 val = __raw_i915_read##x(dev_priv, reg); \ 688 GEN6_READ_FOOTER; \ 689 } 690 691 #define __chv_read(x) \ 692 static u##x \ 693 chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 694 GEN6_READ_HEADER(x); \ 695 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \ 696 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 697 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \ 698 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 699 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \ 700 __force_wake_get(dev_priv, \ 701 FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \ 702 val = __raw_i915_read##x(dev_priv, reg); \ 703 GEN6_READ_FOOTER; \ 704 } 705 706 #define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \ 707 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg)) 708 709 #define __gen9_read(x) \ 710 static u##x \ 711 gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 712 enum forcewake_domains fw_engine; \ 713 GEN6_READ_HEADER(x); \ 714 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) \ 715 fw_engine = 0; \ 716 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ 717 fw_engine = FORCEWAKE_RENDER; \ 718 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ 719 fw_engine = FORCEWAKE_MEDIA; \ 720 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \ 721 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ 722 else \ 723 fw_engine = FORCEWAKE_BLITTER; \ 724 if (fw_engine) \ 725 __force_wake_get(dev_priv, fw_engine); \ 726 val = __raw_i915_read##x(dev_priv, reg); \ 727 GEN6_READ_FOOTER; \ 728 } 729 730 __vgpu_read(8) 731 __vgpu_read(16) 732 __vgpu_read(32) 733 __vgpu_read(64) 734 __gen9_read(8) 735 __gen9_read(16) 736 __gen9_read(32) 737 __gen9_read(64) 738 __chv_read(8) 739 __chv_read(16) 740 __chv_read(32) 741 __chv_read(64) 742 __vlv_read(8) 743 __vlv_read(16) 744 __vlv_read(32) 745 __vlv_read(64) 746 __gen6_read(8) 747 __gen6_read(16) 748 __gen6_read(32) 749 __gen6_read(64) 750 751 #undef __gen9_read 752 #undef __chv_read 753 #undef __vlv_read 754 #undef __gen6_read 755 #undef __vgpu_read 756 #undef GEN6_READ_FOOTER 757 #undef GEN6_READ_HEADER 758 759 #define GEN2_WRITE_HEADER \ 760 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 761 assert_device_not_suspended(dev_priv); \ 762 763 #define GEN2_WRITE_FOOTER 764 765 #define __gen2_write(x) \ 766 static void \ 767 gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 768 GEN2_WRITE_HEADER; \ 769 __raw_i915_write##x(dev_priv, reg, val); \ 770 GEN2_WRITE_FOOTER; \ 771 } 772 773 #define __gen5_write(x) \ 774 static void \ 775 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 776 GEN2_WRITE_HEADER; \ 777 ilk_dummy_write(dev_priv); \ 778 __raw_i915_write##x(dev_priv, reg, val); \ 779 GEN2_WRITE_FOOTER; \ 780 } 781 782 __gen5_write(8) 783 __gen5_write(16) 784 __gen5_write(32) 785 __gen5_write(64) 786 __gen2_write(8) 787 __gen2_write(16) 788 __gen2_write(32) 789 __gen2_write(64) 790 791 #undef __gen5_write 792 #undef __gen2_write 793 794 #undef GEN2_WRITE_FOOTER 795 #undef GEN2_WRITE_HEADER 796 797 #define GEN6_WRITE_HEADER \ 798 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 799 assert_device_not_suspended(dev_priv); \ 800 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE) 801 802 #define GEN6_WRITE_FOOTER \ 803 lockmgr(&dev_priv->uncore.lock, LK_RELEASE) 804 805 #define __gen6_write(x) \ 806 static void \ 807 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 808 u32 __fifo_ret = 0; \ 809 GEN6_WRITE_HEADER; \ 810 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 811 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 812 } \ 813 __raw_i915_write##x(dev_priv, reg, val); \ 814 if (unlikely(__fifo_ret)) { \ 815 gen6_gt_check_fifodbg(dev_priv); \ 816 } \ 817 GEN6_WRITE_FOOTER; \ 818 } 819 820 #define __hsw_write(x) \ 821 static void \ 822 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 823 u32 __fifo_ret = 0; \ 824 GEN6_WRITE_HEADER; \ 825 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 826 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 827 } \ 828 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 829 __raw_i915_write##x(dev_priv, reg, val); \ 830 if (unlikely(__fifo_ret)) { \ 831 gen6_gt_check_fifodbg(dev_priv); \ 832 } \ 833 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ 834 hsw_unclaimed_reg_detect(dev_priv); \ 835 GEN6_WRITE_FOOTER; \ 836 } 837 838 #define __vgpu_write(x) \ 839 static void vgpu_write##x(struct drm_i915_private *dev_priv, \ 840 off_t reg, u##x val, bool trace) { \ 841 GEN6_WRITE_HEADER; \ 842 __raw_i915_write##x(dev_priv, reg, val); \ 843 GEN6_WRITE_FOOTER; \ 844 } 845 846 static const u32 gen8_shadowed_regs[] = { 847 FORCEWAKE_MT, 848 GEN6_RPNSWREQ, 849 GEN6_RC_VIDEO_FREQ, 850 RING_TAIL(RENDER_RING_BASE), 851 RING_TAIL(GEN6_BSD_RING_BASE), 852 RING_TAIL(VEBOX_RING_BASE), 853 RING_TAIL(BLT_RING_BASE), 854 /* TODO: Other registers are not yet used */ 855 }; 856 857 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg) 858 { 859 int i; 860 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++) 861 if (reg == gen8_shadowed_regs[i]) 862 return true; 863 864 return false; 865 } 866 867 #define __gen8_write(x) \ 868 static void \ 869 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 870 GEN6_WRITE_HEADER; \ 871 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 872 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \ 873 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 874 __raw_i915_write##x(dev_priv, reg, val); \ 875 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ 876 hsw_unclaimed_reg_detect(dev_priv); \ 877 GEN6_WRITE_FOOTER; \ 878 } 879 880 #define __chv_write(x) \ 881 static void \ 882 chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 883 bool shadowed = is_gen8_shadowed(dev_priv, reg); \ 884 GEN6_WRITE_HEADER; \ 885 if (!shadowed) { \ 886 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \ 887 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 888 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \ 889 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 890 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \ 891 __force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \ 892 } \ 893 __raw_i915_write##x(dev_priv, reg, val); \ 894 GEN6_WRITE_FOOTER; \ 895 } 896 897 static const u32 gen9_shadowed_regs[] = { 898 RING_TAIL(RENDER_RING_BASE), 899 RING_TAIL(GEN6_BSD_RING_BASE), 900 RING_TAIL(VEBOX_RING_BASE), 901 RING_TAIL(BLT_RING_BASE), 902 FORCEWAKE_BLITTER_GEN9, 903 FORCEWAKE_RENDER_GEN9, 904 FORCEWAKE_MEDIA_GEN9, 905 GEN6_RPNSWREQ, 906 GEN6_RC_VIDEO_FREQ, 907 /* TODO: Other registers are not yet used */ 908 }; 909 910 static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg) 911 { 912 int i; 913 for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++) 914 if (reg == gen9_shadowed_regs[i]) 915 return true; 916 917 return false; 918 } 919 920 #define __gen9_write(x) \ 921 static void \ 922 gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \ 923 bool trace) { \ 924 enum forcewake_domains fw_engine; \ 925 GEN6_WRITE_HEADER; \ 926 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \ 927 is_gen9_shadowed(dev_priv, reg)) \ 928 fw_engine = 0; \ 929 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ 930 fw_engine = FORCEWAKE_RENDER; \ 931 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ 932 fw_engine = FORCEWAKE_MEDIA; \ 933 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \ 934 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ 935 else \ 936 fw_engine = FORCEWAKE_BLITTER; \ 937 if (fw_engine) \ 938 __force_wake_get(dev_priv, fw_engine); \ 939 __raw_i915_write##x(dev_priv, reg, val); \ 940 GEN6_WRITE_FOOTER; \ 941 } 942 943 __gen9_write(8) 944 __gen9_write(16) 945 __gen9_write(32) 946 __gen9_write(64) 947 __chv_write(8) 948 __chv_write(16) 949 __chv_write(32) 950 __chv_write(64) 951 __gen8_write(8) 952 __gen8_write(16) 953 __gen8_write(32) 954 __gen8_write(64) 955 __hsw_write(8) 956 __hsw_write(16) 957 __hsw_write(32) 958 __hsw_write(64) 959 __gen6_write(8) 960 __gen6_write(16) 961 __gen6_write(32) 962 __gen6_write(64) 963 __vgpu_write(8) 964 __vgpu_write(16) 965 __vgpu_write(32) 966 __vgpu_write(64) 967 968 #undef __gen9_write 969 #undef __chv_write 970 #undef __gen8_write 971 #undef __hsw_write 972 #undef __gen6_write 973 #undef __vgpu_write 974 #undef GEN6_WRITE_FOOTER 975 #undef GEN6_WRITE_HEADER 976 977 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \ 978 do { \ 979 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \ 980 dev_priv->uncore.funcs.mmio_writew = x##_write16; \ 981 dev_priv->uncore.funcs.mmio_writel = x##_write32; \ 982 dev_priv->uncore.funcs.mmio_writeq = x##_write64; \ 983 } while (0) 984 985 #define ASSIGN_READ_MMIO_VFUNCS(x) \ 986 do { \ 987 dev_priv->uncore.funcs.mmio_readb = x##_read8; \ 988 dev_priv->uncore.funcs.mmio_readw = x##_read16; \ 989 dev_priv->uncore.funcs.mmio_readl = x##_read32; \ 990 dev_priv->uncore.funcs.mmio_readq = x##_read64; \ 991 } while (0) 992 993 994 static void fw_domain_init(struct drm_i915_private *dev_priv, 995 enum forcewake_domain_id domain_id, 996 u32 reg_set, u32 reg_ack) 997 { 998 struct intel_uncore_forcewake_domain *d; 999 1000 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) 1001 return; 1002 1003 d = &dev_priv->uncore.fw_domain[domain_id]; 1004 1005 WARN_ON(d->wake_count); 1006 1007 d->wake_count = 0; 1008 d->reg_set = reg_set; 1009 d->reg_ack = reg_ack; 1010 1011 if (IS_GEN6(dev_priv)) { 1012 d->val_reset = 0; 1013 d->val_set = FORCEWAKE_KERNEL; 1014 d->val_clear = 0; 1015 } else { 1016 /* WaRsClearFWBitsAtReset:bdw,skl */ 1017 d->val_reset = _MASKED_BIT_DISABLE(0xffff); 1018 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL); 1019 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); 1020 } 1021 1022 if (IS_VALLEYVIEW(dev_priv)) 1023 d->reg_post = FORCEWAKE_ACK_VLV; 1024 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) 1025 d->reg_post = ECOBUS; 1026 else 1027 d->reg_post = 0; 1028 1029 d->i915 = dev_priv; 1030 d->id = domain_id; 1031 1032 setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d); 1033 1034 dev_priv->uncore.fw_domains |= (1 << domain_id); 1035 1036 fw_domain_reset(d); 1037 } 1038 1039 static void intel_uncore_fw_domains_init(struct drm_device *dev) 1040 { 1041 struct drm_i915_private *dev_priv = dev->dev_private; 1042 1043 if (INTEL_INFO(dev_priv->dev)->gen <= 5) 1044 return; 1045 1046 if (IS_GEN9(dev)) { 1047 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1048 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1049 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1050 FORCEWAKE_RENDER_GEN9, 1051 FORCEWAKE_ACK_RENDER_GEN9); 1052 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, 1053 FORCEWAKE_BLITTER_GEN9, 1054 FORCEWAKE_ACK_BLITTER_GEN9); 1055 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1056 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); 1057 } else if (IS_VALLEYVIEW(dev)) { 1058 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1059 if (!IS_CHERRYVIEW(dev)) 1060 dev_priv->uncore.funcs.force_wake_put = 1061 fw_domains_put_with_fifo; 1062 else 1063 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1064 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1065 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); 1066 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1067 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); 1068 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 1069 dev_priv->uncore.funcs.force_wake_get = 1070 fw_domains_get_with_thread_status; 1071 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1072 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1073 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1074 } else if (IS_IVYBRIDGE(dev)) { 1075 u32 ecobus; 1076 1077 /* IVB configs may use multi-threaded forcewake */ 1078 1079 /* A small trick here - if the bios hasn't configured 1080 * MT forcewake, and if the device is in RC6, then 1081 * force_wake_mt_get will not wake the device and the 1082 * ECOBUS read will return zero. Which will be 1083 * (correctly) interpreted by the test below as MT 1084 * forcewake being disabled. 1085 */ 1086 dev_priv->uncore.funcs.force_wake_get = 1087 fw_domains_get_with_thread_status; 1088 dev_priv->uncore.funcs.force_wake_put = 1089 fw_domains_put_with_fifo; 1090 1091 /* We need to init first for ECOBUS access and then 1092 * determine later if we want to reinit, in case of MT access is 1093 * not working. In this stage we don't know which flavour this 1094 * ivb is, so it is better to reset also the gen6 fw registers 1095 * before the ecobus check. 1096 */ 1097 1098 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 1099 __raw_posting_read(dev_priv, ECOBUS); 1100 1101 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1102 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1103 1104 mutex_lock(&dev->struct_mutex); 1105 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); 1106 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 1107 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); 1108 mutex_unlock(&dev->struct_mutex); 1109 1110 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1111 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1112 DRM_INFO("when using vblank-synced partial screen updates.\n"); 1113 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1114 FORCEWAKE, FORCEWAKE_ACK); 1115 } 1116 } else if (IS_GEN6(dev)) { 1117 dev_priv->uncore.funcs.force_wake_get = 1118 fw_domains_get_with_thread_status; 1119 dev_priv->uncore.funcs.force_wake_put = 1120 fw_domains_put_with_fifo; 1121 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1122 FORCEWAKE, FORCEWAKE_ACK); 1123 } 1124 1125 /* All future platforms are expected to require complex power gating */ 1126 WARN_ON(dev_priv->uncore.fw_domains == 0); 1127 } 1128 1129 void intel_uncore_init(struct drm_device *dev) 1130 { 1131 struct drm_i915_private *dev_priv = dev->dev_private; 1132 1133 i915_check_vgpu(dev); 1134 1135 intel_uncore_ellc_detect(dev); 1136 intel_uncore_fw_domains_init(dev); 1137 __intel_uncore_early_sanitize(dev, false); 1138 1139 switch (INTEL_INFO(dev)->gen) { 1140 default: 1141 MISSING_CASE(INTEL_INFO(dev)->gen); 1142 return; 1143 case 9: 1144 ASSIGN_WRITE_MMIO_VFUNCS(gen9); 1145 ASSIGN_READ_MMIO_VFUNCS(gen9); 1146 break; 1147 case 8: 1148 if (IS_CHERRYVIEW(dev)) { 1149 ASSIGN_WRITE_MMIO_VFUNCS(chv); 1150 ASSIGN_READ_MMIO_VFUNCS(chv); 1151 1152 } else { 1153 ASSIGN_WRITE_MMIO_VFUNCS(gen8); 1154 ASSIGN_READ_MMIO_VFUNCS(gen6); 1155 } 1156 break; 1157 case 7: 1158 case 6: 1159 if (IS_HASWELL(dev)) { 1160 ASSIGN_WRITE_MMIO_VFUNCS(hsw); 1161 } else { 1162 ASSIGN_WRITE_MMIO_VFUNCS(gen6); 1163 } 1164 1165 if (IS_VALLEYVIEW(dev)) { 1166 ASSIGN_READ_MMIO_VFUNCS(vlv); 1167 } else { 1168 ASSIGN_READ_MMIO_VFUNCS(gen6); 1169 } 1170 break; 1171 case 5: 1172 ASSIGN_WRITE_MMIO_VFUNCS(gen5); 1173 ASSIGN_READ_MMIO_VFUNCS(gen5); 1174 break; 1175 case 4: 1176 case 3: 1177 case 2: 1178 ASSIGN_WRITE_MMIO_VFUNCS(gen2); 1179 ASSIGN_READ_MMIO_VFUNCS(gen2); 1180 break; 1181 } 1182 1183 if (intel_vgpu_active(dev)) { 1184 ASSIGN_WRITE_MMIO_VFUNCS(vgpu); 1185 ASSIGN_READ_MMIO_VFUNCS(vgpu); 1186 } 1187 1188 i915_check_and_clear_faults(dev); 1189 } 1190 #undef ASSIGN_WRITE_MMIO_VFUNCS 1191 #undef ASSIGN_READ_MMIO_VFUNCS 1192 1193 void intel_uncore_fini(struct drm_device *dev) 1194 { 1195 /* Paranoia: make sure we have disabled everything before we exit. */ 1196 intel_uncore_sanitize(dev); 1197 intel_uncore_forcewake_reset(dev, false); 1198 } 1199 1200 #define GEN_RANGE(l, h) GENMASK(h, l) 1201 1202 static const struct register_whitelist { 1203 uint64_t offset; 1204 uint32_t size; 1205 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 1206 uint32_t gen_bitmask; 1207 } whitelist[] = { 1208 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) }, 1209 }; 1210 1211 int i915_reg_read_ioctl(struct drm_device *dev, 1212 void *data, struct drm_file *file) 1213 { 1214 struct drm_i915_private *dev_priv = dev->dev_private; 1215 struct drm_i915_reg_read *reg = data; 1216 struct register_whitelist const *entry = whitelist; 1217 int i, ret = 0; 1218 1219 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1220 if (entry->offset == reg->offset && 1221 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 1222 break; 1223 } 1224 1225 if (i == ARRAY_SIZE(whitelist)) 1226 return -EINVAL; 1227 1228 intel_runtime_pm_get(dev_priv); 1229 1230 switch (entry->size) { 1231 case 8: 1232 reg->val = I915_READ64(reg->offset); 1233 break; 1234 case 4: 1235 reg->val = I915_READ(reg->offset); 1236 break; 1237 case 2: 1238 reg->val = I915_READ16(reg->offset); 1239 break; 1240 case 1: 1241 reg->val = I915_READ8(reg->offset); 1242 break; 1243 default: 1244 MISSING_CASE(entry->size); 1245 ret = -EINVAL; 1246 goto out; 1247 } 1248 1249 out: 1250 intel_runtime_pm_put(dev_priv); 1251 return ret; 1252 } 1253 1254 int i915_get_reset_stats_ioctl(struct drm_device *dev, 1255 void *data, struct drm_file *file) 1256 { 1257 struct drm_i915_private *dev_priv = dev->dev_private; 1258 struct drm_i915_reset_stats *args = data; 1259 struct i915_ctx_hang_stats *hs; 1260 struct intel_context *ctx; 1261 int ret; 1262 1263 if (args->flags || args->pad) 1264 return -EINVAL; 1265 1266 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN)) 1267 return -EPERM; 1268 1269 ret = mutex_lock_interruptible(&dev->struct_mutex); 1270 if (ret) 1271 return ret; 1272 1273 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id); 1274 if (IS_ERR(ctx)) { 1275 mutex_unlock(&dev->struct_mutex); 1276 return PTR_ERR(ctx); 1277 } 1278 hs = &ctx->hang_stats; 1279 1280 if (capable(CAP_SYS_ADMIN)) 1281 args->reset_count = i915_reset_count(&dev_priv->gpu_error); 1282 else 1283 args->reset_count = 0; 1284 1285 args->batch_active = hs->batch_active; 1286 args->batch_pending = hs->batch_pending; 1287 1288 mutex_unlock(&dev->struct_mutex); 1289 1290 return 0; 1291 } 1292 1293 static int i915_reset_complete(struct drm_device *dev) 1294 { 1295 u8 gdrst; 1296 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1297 return (gdrst & GRDOM_RESET_STATUS) == 0; 1298 } 1299 1300 static int i915_do_reset(struct drm_device *dev) 1301 { 1302 /* assert reset for at least 20 usec */ 1303 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1304 udelay(20); 1305 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1306 1307 return wait_for(i915_reset_complete(dev), 500); 1308 } 1309 1310 static int g4x_reset_complete(struct drm_device *dev) 1311 { 1312 u8 gdrst; 1313 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1314 return (gdrst & GRDOM_RESET_ENABLE) == 0; 1315 } 1316 1317 static int g33_do_reset(struct drm_device *dev) 1318 { 1319 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1320 return wait_for(g4x_reset_complete(dev), 500); 1321 } 1322 1323 static int g4x_do_reset(struct drm_device *dev) 1324 { 1325 struct drm_i915_private *dev_priv = dev->dev_private; 1326 int ret; 1327 1328 pci_write_config_byte(dev->pdev, I915_GDRST, 1329 GRDOM_RENDER | GRDOM_RESET_ENABLE); 1330 ret = wait_for(g4x_reset_complete(dev), 500); 1331 if (ret) 1332 return ret; 1333 1334 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1335 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 1336 POSTING_READ(VDECCLK_GATE_D); 1337 1338 pci_write_config_byte(dev->pdev, I915_GDRST, 1339 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1340 ret = wait_for(g4x_reset_complete(dev), 500); 1341 if (ret) 1342 return ret; 1343 1344 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1345 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 1346 POSTING_READ(VDECCLK_GATE_D); 1347 1348 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1349 1350 return 0; 1351 } 1352 1353 static int ironlake_do_reset(struct drm_device *dev) 1354 { 1355 struct drm_i915_private *dev_priv = dev->dev_private; 1356 int ret; 1357 1358 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1359 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); 1360 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1361 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1362 if (ret) 1363 return ret; 1364 1365 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1366 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); 1367 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1368 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1369 if (ret) 1370 return ret; 1371 1372 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0); 1373 1374 return 0; 1375 } 1376 1377 static int gen6_do_reset(struct drm_device *dev) 1378 { 1379 struct drm_i915_private *dev_priv = dev->dev_private; 1380 int ret; 1381 1382 /* Reset the chip */ 1383 1384 /* GEN6_GDRST is not in the gt power well, no need to check 1385 * for fifo space for the write or forcewake the chip for 1386 * the read 1387 */ 1388 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL); 1389 1390 /* Spin waiting for the device to ack the reset request */ 1391 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 1392 1393 intel_uncore_forcewake_reset(dev, true); 1394 1395 return ret; 1396 } 1397 1398 int intel_gpu_reset(struct drm_device *dev) 1399 { 1400 if (INTEL_INFO(dev)->gen >= 6) 1401 return gen6_do_reset(dev); 1402 else if (IS_GEN5(dev)) 1403 return ironlake_do_reset(dev); 1404 else if (IS_G4X(dev)) 1405 return g4x_do_reset(dev); 1406 else if (IS_G33(dev)) 1407 return g33_do_reset(dev); 1408 else if (INTEL_INFO(dev)->gen >= 3) 1409 return i915_do_reset(dev); 1410 else 1411 return -ENODEV; 1412 } 1413 1414 void intel_uncore_check_errors(struct drm_device *dev) 1415 { 1416 struct drm_i915_private *dev_priv = dev->dev_private; 1417 1418 if (HAS_FPGA_DBG_UNCLAIMED(dev) && 1419 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 1420 DRM_ERROR("Unclaimed register before interrupt\n"); 1421 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 1422 } 1423 } 1424