1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include <linux/string_helpers.h> 7 8 #include <drm/i915_drm.h> 9 10 #include "display/intel_display.h" 11 #include "display/intel_display_irq.h" 12 #include "i915_drv.h" 13 #include "i915_irq.h" 14 #include "i915_reg.h" 15 #include "intel_breadcrumbs.h" 16 #include "intel_gt.h" 17 #include "intel_gt_clock_utils.h" 18 #include "intel_gt_irq.h" 19 #include "intel_gt_pm.h" 20 #include "intel_gt_pm_irq.h" 21 #include "intel_gt_print.h" 22 #include "intel_gt_regs.h" 23 #include "intel_mchbar_regs.h" 24 #include "intel_pcode.h" 25 #include "intel_rps.h" 26 #include "vlv_sideband.h" 27 #ifdef __linux__ 28 #include "../../../platform/x86/intel_ips.h" 29 #endif 30 31 #define BUSY_MAX_EI 20u /* ms */ 32 33 /* 34 * Lock protecting IPS related data structures 35 */ 36 static DEFINE_SPINLOCK(mchdev_lock); 37 38 static struct intel_gt *rps_to_gt(struct intel_rps *rps) 39 { 40 return container_of(rps, struct intel_gt, rps); 41 } 42 43 static struct drm_i915_private *rps_to_i915(struct intel_rps *rps) 44 { 45 return rps_to_gt(rps)->i915; 46 } 47 48 static struct intel_uncore *rps_to_uncore(struct intel_rps *rps) 49 { 50 return rps_to_gt(rps)->uncore; 51 } 52 53 static struct intel_guc_slpc *rps_to_slpc(struct intel_rps *rps) 54 { 55 struct intel_gt *gt = rps_to_gt(rps); 56 57 return >->uc.guc.slpc; 58 } 59 60 static bool rps_uses_slpc(struct intel_rps *rps) 61 { 62 struct intel_gt *gt = rps_to_gt(rps); 63 64 return intel_uc_uses_guc_slpc(>->uc); 65 } 66 67 static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask) 68 { 69 return mask & ~rps->pm_intrmsk_mbz; 70 } 71 72 static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) 73 { 74 intel_uncore_write_fw(uncore, reg, val); 75 } 76 77 static void rps_timer(void *arg) 78 { 79 struct intel_rps *rps = arg; 80 struct intel_gt *gt = rps_to_gt(rps); 81 struct intel_engine_cs *engine; 82 ktime_t dt, last, timestamp; 83 enum intel_engine_id id; 84 s64 max_busy[3] = {}; 85 86 timestamp = 0; 87 for_each_engine(engine, gt, id) { 88 s64 busy; 89 int i; 90 91 dt = intel_engine_get_busy_time(engine, ×tamp); 92 last = engine->stats.rps; 93 engine->stats.rps = dt; 94 95 busy = ktime_to_ns(ktime_sub(dt, last)); 96 for (i = 0; i < ARRAY_SIZE(max_busy); i++) { 97 if (busy > max_busy[i]) 98 swap(busy, max_busy[i]); 99 } 100 } 101 last = rps->pm_timestamp; 102 rps->pm_timestamp = timestamp; 103 104 if (intel_rps_is_active(rps)) { 105 s64 busy; 106 int i; 107 108 dt = ktime_sub(timestamp, last); 109 110 /* 111 * Our goal is to evaluate each engine independently, so we run 112 * at the lowest clocks required to sustain the heaviest 113 * workload. However, a task may be split into sequential 114 * dependent operations across a set of engines, such that 115 * the independent contributions do not account for high load, 116 * but overall the task is GPU bound. For example, consider 117 * video decode on vcs followed by colour post-processing 118 * on vecs, followed by general post-processing on rcs. 119 * Since multi-engines being active does imply a single 120 * continuous workload across all engines, we hedge our 121 * bets by only contributing a factor of the distributed 122 * load into our busyness calculation. 123 */ 124 busy = max_busy[0]; 125 for (i = 1; i < ARRAY_SIZE(max_busy); i++) { 126 if (!max_busy[i]) 127 break; 128 129 busy += div_u64(max_busy[i], 1 << i); 130 } 131 GT_TRACE(gt, 132 "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n", 133 busy, (int)div64_u64(100 * busy, dt), 134 max_busy[0], max_busy[1], max_busy[2], 135 rps->pm_interval); 136 137 if (100 * busy > rps->power.up_threshold * dt && 138 rps->cur_freq < rps->max_freq_softlimit) { 139 rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD; 140 rps->pm_interval = 1; 141 queue_work(gt->i915->unordered_wq, &rps->work); 142 } else if (100 * busy < rps->power.down_threshold * dt && 143 rps->cur_freq > rps->min_freq_softlimit) { 144 rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD; 145 rps->pm_interval = 1; 146 queue_work(gt->i915->unordered_wq, &rps->work); 147 } else { 148 rps->last_adj = 0; 149 } 150 151 mod_timer(&rps->timer, 152 jiffies + msecs_to_jiffies(rps->pm_interval)); 153 rps->pm_interval = min(rps->pm_interval * 2, BUSY_MAX_EI); 154 } 155 } 156 157 static void rps_start_timer(struct intel_rps *rps) 158 { 159 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp); 160 rps->pm_interval = 1; 161 mod_timer(&rps->timer, jiffies + 1); 162 } 163 164 static void rps_stop_timer(struct intel_rps *rps) 165 { 166 del_timer_sync(&rps->timer); 167 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp); 168 cancel_work_sync(&rps->work); 169 } 170 171 static u32 rps_pm_mask(struct intel_rps *rps, u8 val) 172 { 173 u32 mask = 0; 174 175 /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */ 176 if (val > rps->min_freq_softlimit) 177 mask |= (GEN6_PM_RP_UP_EI_EXPIRED | 178 GEN6_PM_RP_DOWN_THRESHOLD | 179 GEN6_PM_RP_DOWN_TIMEOUT); 180 181 if (val < rps->max_freq_softlimit) 182 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; 183 184 mask &= rps->pm_events; 185 186 return rps_pm_sanitize_mask(rps, ~mask); 187 } 188 189 static void rps_reset_ei(struct intel_rps *rps) 190 { 191 memset(&rps->ei, 0, sizeof(rps->ei)); 192 } 193 194 static void rps_enable_interrupts(struct intel_rps *rps) 195 { 196 struct intel_gt *gt = rps_to_gt(rps); 197 198 GEM_BUG_ON(rps_uses_slpc(rps)); 199 200 GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n", 201 rps->pm_events, rps_pm_mask(rps, rps->last_freq)); 202 203 rps_reset_ei(rps); 204 205 spin_lock_irq(gt->irq_lock); 206 gen6_gt_pm_enable_irq(gt, rps->pm_events); 207 spin_unlock_irq(gt->irq_lock); 208 209 intel_uncore_write(gt->uncore, 210 GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq)); 211 } 212 213 static void gen6_rps_reset_interrupts(struct intel_rps *rps) 214 { 215 gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS); 216 } 217 218 static void gen11_rps_reset_interrupts(struct intel_rps *rps) 219 { 220 while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM)) 221 ; 222 } 223 224 static void rps_reset_interrupts(struct intel_rps *rps) 225 { 226 struct intel_gt *gt = rps_to_gt(rps); 227 228 spin_lock_irq(gt->irq_lock); 229 if (GRAPHICS_VER(gt->i915) >= 11) 230 gen11_rps_reset_interrupts(rps); 231 else 232 gen6_rps_reset_interrupts(rps); 233 234 rps->pm_iir = 0; 235 spin_unlock_irq(gt->irq_lock); 236 } 237 238 static void rps_disable_interrupts(struct intel_rps *rps) 239 { 240 struct intel_gt *gt = rps_to_gt(rps); 241 242 intel_uncore_write(gt->uncore, 243 GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u)); 244 245 spin_lock_irq(gt->irq_lock); 246 gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS); 247 spin_unlock_irq(gt->irq_lock); 248 249 intel_synchronize_irq(gt->i915); 250 251 /* 252 * Now that we will not be generating any more work, flush any 253 * outstanding tasks. As we are called on the RPS idle path, 254 * we will reset the GPU to minimum frequencies, so the current 255 * state of the worker can be discarded. 256 */ 257 cancel_work_sync(&rps->work); 258 259 rps_reset_interrupts(rps); 260 GT_TRACE(gt, "interrupts:off\n"); 261 } 262 263 static const struct cparams { 264 u16 i; 265 u16 t; 266 u16 m; 267 u16 c; 268 } cparams[] = { 269 { 1, 1333, 301, 28664 }, 270 { 1, 1066, 294, 24460 }, 271 { 1, 800, 294, 25192 }, 272 { 0, 1333, 276, 27605 }, 273 { 0, 1066, 276, 27605 }, 274 { 0, 800, 231, 23784 }, 275 }; 276 277 static void gen5_rps_init(struct intel_rps *rps) 278 { 279 struct drm_i915_private *i915 = rps_to_i915(rps); 280 struct intel_uncore *uncore = rps_to_uncore(rps); 281 u8 fmax, fmin, fstart; 282 u32 rgvmodectl; 283 int c_m, i; 284 285 if (i915->fsb_freq <= 3200) 286 c_m = 0; 287 else if (i915->fsb_freq <= 4800) 288 c_m = 1; 289 else 290 c_m = 2; 291 292 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 293 if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) { 294 rps->ips.m = cparams[i].m; 295 rps->ips.c = cparams[i].c; 296 break; 297 } 298 } 299 300 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); 301 302 /* Set up min, max, and cur for interrupt handling */ 303 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; 304 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 305 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 306 MEMMODE_FSTART_SHIFT; 307 drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n", 308 fmax, fmin, fstart); 309 310 rps->min_freq = fmax; 311 rps->efficient_freq = fstart; 312 rps->max_freq = fmin; 313 } 314 315 static unsigned long 316 __ips_chipset_val(struct intel_ips *ips) 317 { 318 struct intel_uncore *uncore = 319 rps_to_uncore(container_of(ips, struct intel_rps, ips)); 320 unsigned long now = jiffies_to_msecs(jiffies), dt; 321 unsigned long result; 322 u64 total, delta; 323 324 lockdep_assert_held(&mchdev_lock); 325 326 /* 327 * Prevent division-by-zero if we are asking too fast. 328 * Also, we don't get interesting results if we are polling 329 * faster than once in 10ms, so just return the saved value 330 * in such cases. 331 */ 332 dt = now - ips->last_time1; 333 if (dt <= 10) 334 return ips->chipset_power; 335 336 /* FIXME: handle per-counter overflow */ 337 total = intel_uncore_read(uncore, DMIEC); 338 total += intel_uncore_read(uncore, DDREC); 339 total += intel_uncore_read(uncore, CSIEC); 340 341 delta = total - ips->last_count1; 342 343 result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10); 344 345 ips->last_count1 = total; 346 ips->last_time1 = now; 347 348 ips->chipset_power = result; 349 350 return result; 351 } 352 353 static unsigned long ips_mch_val(struct intel_uncore *uncore) 354 { 355 unsigned int m, x, b; 356 u32 tsfs; 357 358 tsfs = intel_uncore_read(uncore, TSFS); 359 x = intel_uncore_read8(uncore, TR1); 360 361 b = tsfs & TSFS_INTR_MASK; 362 m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT; 363 364 return m * x / 127 - b; 365 } 366 367 static int _pxvid_to_vd(u8 pxvid) 368 { 369 if (pxvid == 0) 370 return 0; 371 372 if (pxvid >= 8 && pxvid < 31) 373 pxvid = 31; 374 375 return (pxvid + 2) * 125; 376 } 377 378 static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid) 379 { 380 const int vd = _pxvid_to_vd(pxvid); 381 382 if (INTEL_INFO(i915)->is_mobile) 383 return max(vd - 1125, 0); 384 385 return vd; 386 } 387 388 static void __gen5_ips_update(struct intel_ips *ips) 389 { 390 struct intel_uncore *uncore = 391 rps_to_uncore(container_of(ips, struct intel_rps, ips)); 392 u64 now, delta, dt; 393 u32 count; 394 395 lockdep_assert_held(&mchdev_lock); 396 397 now = ktime_get_raw_ns(); 398 dt = now - ips->last_time2; 399 do_div(dt, NSEC_PER_MSEC); 400 401 /* Don't divide by 0 */ 402 if (dt <= 10) 403 return; 404 405 count = intel_uncore_read(uncore, GFXEC); 406 delta = count - ips->last_count2; 407 408 ips->last_count2 = count; 409 ips->last_time2 = now; 410 411 /* More magic constants... */ 412 ips->gfx_power = div_u64(delta * 1181, dt * 10); 413 } 414 415 static void gen5_rps_update(struct intel_rps *rps) 416 { 417 spin_lock_irq(&mchdev_lock); 418 __gen5_ips_update(&rps->ips); 419 spin_unlock_irq(&mchdev_lock); 420 } 421 422 static unsigned int gen5_invert_freq(struct intel_rps *rps, 423 unsigned int val) 424 { 425 /* Invert the frequency bin into an ips delay */ 426 val = rps->max_freq - val; 427 val = rps->min_freq + val; 428 429 return val; 430 } 431 432 static int __gen5_rps_set(struct intel_rps *rps, u8 val) 433 { 434 struct intel_uncore *uncore = rps_to_uncore(rps); 435 u16 rgvswctl; 436 437 lockdep_assert_held(&mchdev_lock); 438 439 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); 440 if (rgvswctl & MEMCTL_CMD_STS) { 441 drm_dbg(&rps_to_i915(rps)->drm, 442 "gpu busy, RCS change rejected\n"); 443 return -EBUSY; /* still busy with another command */ 444 } 445 446 /* Invert the frequency bin into an ips delay */ 447 val = gen5_invert_freq(rps, val); 448 449 rgvswctl = 450 (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 451 (val << MEMCTL_FREQ_SHIFT) | 452 MEMCTL_SFCAVM; 453 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); 454 intel_uncore_posting_read16(uncore, MEMSWCTL); 455 456 rgvswctl |= MEMCTL_CMD_STS; 457 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); 458 459 return 0; 460 } 461 462 static int gen5_rps_set(struct intel_rps *rps, u8 val) 463 { 464 int err; 465 466 spin_lock_irq(&mchdev_lock); 467 err = __gen5_rps_set(rps, val); 468 spin_unlock_irq(&mchdev_lock); 469 470 return err; 471 } 472 473 static unsigned long intel_pxfreq(u32 vidfreq) 474 { 475 int div = (vidfreq & 0x3f0000) >> 16; 476 int post = (vidfreq & 0x3000) >> 12; 477 int pre = (vidfreq & 0x7); 478 479 if (!pre) 480 return 0; 481 482 return div * 133333 / (pre << post); 483 } 484 485 static unsigned int init_emon(struct intel_uncore *uncore) 486 { 487 u8 pxw[16]; 488 int i; 489 490 /* Disable to program */ 491 intel_uncore_write(uncore, ECR, 0); 492 intel_uncore_posting_read(uncore, ECR); 493 494 /* Program energy weights for various events */ 495 intel_uncore_write(uncore, SDEW, 0x15040d00); 496 intel_uncore_write(uncore, CSIEW0, 0x007f0000); 497 intel_uncore_write(uncore, CSIEW1, 0x1e220004); 498 intel_uncore_write(uncore, CSIEW2, 0x04000004); 499 500 for (i = 0; i < 5; i++) 501 intel_uncore_write(uncore, PEW(i), 0); 502 for (i = 0; i < 3; i++) 503 intel_uncore_write(uncore, DEW(i), 0); 504 505 /* Program P-state weights to account for frequency power adjustment */ 506 for (i = 0; i < 16; i++) { 507 u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i)); 508 unsigned int freq = intel_pxfreq(pxvidfreq); 509 unsigned int vid = 510 (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; 511 unsigned int val; 512 513 val = vid * vid * freq / 1000 * 255; 514 val /= 127 * 127 * 900; 515 516 pxw[i] = val; 517 } 518 /* Render standby states get 0 weight */ 519 pxw[14] = 0; 520 pxw[15] = 0; 521 522 for (i = 0; i < 4; i++) { 523 intel_uncore_write(uncore, PXW(i), 524 pxw[i * 4 + 0] << 24 | 525 pxw[i * 4 + 1] << 16 | 526 pxw[i * 4 + 2] << 8 | 527 pxw[i * 4 + 3] << 0); 528 } 529 530 /* Adjust magic regs to magic values (more experimental results) */ 531 intel_uncore_write(uncore, OGW0, 0); 532 intel_uncore_write(uncore, OGW1, 0); 533 intel_uncore_write(uncore, EG0, 0x00007f00); 534 intel_uncore_write(uncore, EG1, 0x0000000e); 535 intel_uncore_write(uncore, EG2, 0x000e0000); 536 intel_uncore_write(uncore, EG3, 0x68000300); 537 intel_uncore_write(uncore, EG4, 0x42000000); 538 intel_uncore_write(uncore, EG5, 0x00140031); 539 intel_uncore_write(uncore, EG6, 0); 540 intel_uncore_write(uncore, EG7, 0); 541 542 for (i = 0; i < 8; i++) 543 intel_uncore_write(uncore, PXWL(i), 0); 544 545 /* Enable PMON + select events */ 546 intel_uncore_write(uncore, ECR, 0x80000019); 547 548 return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK; 549 } 550 551 static bool gen5_rps_enable(struct intel_rps *rps) 552 { 553 struct drm_i915_private *i915 = rps_to_i915(rps); 554 struct intel_uncore *uncore = rps_to_uncore(rps); 555 u8 fstart, vstart; 556 u32 rgvmodectl; 557 558 spin_lock_irq(&mchdev_lock); 559 560 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); 561 562 /* Enable temp reporting */ 563 intel_uncore_write16(uncore, PMMISC, 564 intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN); 565 intel_uncore_write16(uncore, TSC1, 566 intel_uncore_read16(uncore, TSC1) | TSE); 567 568 /* 100ms RC evaluation intervals */ 569 intel_uncore_write(uncore, RCUPEI, 100000); 570 intel_uncore_write(uncore, RCDNEI, 100000); 571 572 /* Set max/min thresholds to 90ms and 80ms respectively */ 573 intel_uncore_write(uncore, RCBMAXAVG, 90000); 574 intel_uncore_write(uncore, RCBMINAVG, 80000); 575 576 intel_uncore_write(uncore, MEMIHYST, 1); 577 578 /* Set up min, max, and cur for interrupt handling */ 579 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 580 MEMMODE_FSTART_SHIFT; 581 582 vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) & 583 PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; 584 585 intel_uncore_write(uncore, 586 MEMINTREN, 587 MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 588 589 intel_uncore_write(uncore, VIDSTART, vstart); 590 intel_uncore_posting_read(uncore, VIDSTART); 591 592 rgvmodectl |= MEMMODE_SWMODE_EN; 593 intel_uncore_write(uncore, MEMMODECTL, rgvmodectl); 594 595 if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) & 596 MEMCTL_CMD_STS) == 0, 10)) 597 drm_err(&uncore->i915->drm, 598 "stuck trying to change perf mode\n"); 599 mdelay(1); 600 601 __gen5_rps_set(rps, rps->cur_freq); 602 603 rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC); 604 rps->ips.last_count1 += intel_uncore_read(uncore, DDREC); 605 rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC); 606 rps->ips.last_time1 = jiffies_to_msecs(jiffies); 607 608 rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC); 609 rps->ips.last_time2 = ktime_get_raw_ns(); 610 611 spin_lock(&i915->irq_lock); 612 ilk_enable_display_irq(i915, DE_PCU_EVENT); 613 spin_unlock(&i915->irq_lock); 614 615 spin_unlock_irq(&mchdev_lock); 616 617 rps->ips.corr = init_emon(uncore); 618 619 return true; 620 } 621 622 static void gen5_rps_disable(struct intel_rps *rps) 623 { 624 struct drm_i915_private *i915 = rps_to_i915(rps); 625 struct intel_uncore *uncore = rps_to_uncore(rps); 626 u16 rgvswctl; 627 628 spin_lock_irq(&mchdev_lock); 629 630 spin_lock(&i915->irq_lock); 631 ilk_disable_display_irq(i915, DE_PCU_EVENT); 632 spin_unlock(&i915->irq_lock); 633 634 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); 635 636 /* Ack interrupts, disable EFC interrupt */ 637 intel_uncore_rmw(uncore, MEMINTREN, MEMINT_EVAL_CHG_EN, 0); 638 intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); 639 640 /* Go back to the starting frequency */ 641 __gen5_rps_set(rps, rps->idle_freq); 642 mdelay(1); 643 rgvswctl |= MEMCTL_CMD_STS; 644 intel_uncore_write(uncore, MEMSWCTL, rgvswctl); 645 mdelay(1); 646 647 spin_unlock_irq(&mchdev_lock); 648 } 649 650 static u32 rps_limits(struct intel_rps *rps, u8 val) 651 { 652 u32 limits; 653 654 /* 655 * Only set the down limit when we've reached the lowest level to avoid 656 * getting more interrupts, otherwise leave this clear. This prevents a 657 * race in the hw when coming out of rc6: There's a tiny window where 658 * the hw runs at the minimal clock before selecting the desired 659 * frequency, if the down threshold expires in that window we will not 660 * receive a down interrupt. 661 */ 662 if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) { 663 limits = rps->max_freq_softlimit << 23; 664 if (val <= rps->min_freq_softlimit) 665 limits |= rps->min_freq_softlimit << 14; 666 } else { 667 limits = rps->max_freq_softlimit << 24; 668 if (val <= rps->min_freq_softlimit) 669 limits |= rps->min_freq_softlimit << 16; 670 } 671 672 return limits; 673 } 674 675 static void rps_set_power(struct intel_rps *rps, int new_power) 676 { 677 struct intel_gt *gt = rps_to_gt(rps); 678 struct intel_uncore *uncore = gt->uncore; 679 u32 ei_up = 0, ei_down = 0; 680 681 lockdep_assert_held(&rps->power.mutex); 682 683 if (new_power == rps->power.mode) 684 return; 685 686 /* Note the units here are not exactly 1us, but 1280ns. */ 687 switch (new_power) { 688 case LOW_POWER: 689 ei_up = 16000; 690 ei_down = 32000; 691 break; 692 693 case BETWEEN: 694 ei_up = 13000; 695 ei_down = 32000; 696 break; 697 698 case HIGH_POWER: 699 ei_up = 10000; 700 ei_down = 32000; 701 break; 702 } 703 704 /* When byt can survive without system hang with dynamic 705 * sw freq adjustments, this restriction can be lifted. 706 */ 707 if (IS_VALLEYVIEW(gt->i915)) 708 goto skip_hw_write; 709 710 GT_TRACE(gt, 711 "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n", 712 new_power, 713 rps->power.up_threshold, ei_up, 714 rps->power.down_threshold, ei_down); 715 716 set(uncore, GEN6_RP_UP_EI, 717 intel_gt_ns_to_pm_interval(gt, ei_up * 1000)); 718 set(uncore, GEN6_RP_UP_THRESHOLD, 719 intel_gt_ns_to_pm_interval(gt, 720 ei_up * rps->power.up_threshold * 10)); 721 722 set(uncore, GEN6_RP_DOWN_EI, 723 intel_gt_ns_to_pm_interval(gt, ei_down * 1000)); 724 set(uncore, GEN6_RP_DOWN_THRESHOLD, 725 intel_gt_ns_to_pm_interval(gt, 726 ei_down * 727 rps->power.down_threshold * 10)); 728 729 set(uncore, GEN6_RP_CONTROL, 730 (GRAPHICS_VER(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) | 731 GEN6_RP_MEDIA_HW_NORMAL_MODE | 732 GEN6_RP_MEDIA_IS_GFX | 733 GEN6_RP_ENABLE | 734 GEN6_RP_UP_BUSY_AVG | 735 GEN6_RP_DOWN_IDLE_AVG); 736 737 skip_hw_write: 738 rps->power.mode = new_power; 739 } 740 741 static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val) 742 { 743 int new_power; 744 745 new_power = rps->power.mode; 746 switch (rps->power.mode) { 747 case LOW_POWER: 748 if (val > rps->efficient_freq + 1 && 749 val > rps->cur_freq) 750 new_power = BETWEEN; 751 break; 752 753 case BETWEEN: 754 if (val <= rps->efficient_freq && 755 val < rps->cur_freq) 756 new_power = LOW_POWER; 757 else if (val >= rps->rp0_freq && 758 val > rps->cur_freq) 759 new_power = HIGH_POWER; 760 break; 761 762 case HIGH_POWER: 763 if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 && 764 val < rps->cur_freq) 765 new_power = BETWEEN; 766 break; 767 } 768 /* Max/min bins are special */ 769 if (val <= rps->min_freq_softlimit) 770 new_power = LOW_POWER; 771 if (val >= rps->max_freq_softlimit) 772 new_power = HIGH_POWER; 773 774 mutex_lock(&rps->power.mutex); 775 if (rps->power.interactive) 776 new_power = HIGH_POWER; 777 rps_set_power(rps, new_power); 778 mutex_unlock(&rps->power.mutex); 779 } 780 781 void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive) 782 { 783 GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", 784 str_yes_no(interactive)); 785 786 mutex_lock(&rps->power.mutex); 787 if (interactive) { 788 if (!rps->power.interactive++ && intel_rps_is_active(rps)) 789 rps_set_power(rps, HIGH_POWER); 790 } else { 791 GEM_BUG_ON(!rps->power.interactive); 792 rps->power.interactive--; 793 } 794 mutex_unlock(&rps->power.mutex); 795 } 796 797 static int gen6_rps_set(struct intel_rps *rps, u8 val) 798 { 799 struct intel_uncore *uncore = rps_to_uncore(rps); 800 struct drm_i915_private *i915 = rps_to_i915(rps); 801 u32 swreq; 802 803 GEM_BUG_ON(rps_uses_slpc(rps)); 804 805 if (GRAPHICS_VER(i915) >= 9) 806 swreq = GEN9_FREQUENCY(val); 807 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 808 swreq = HSW_FREQUENCY(val); 809 else 810 swreq = (GEN6_FREQUENCY(val) | 811 GEN6_OFFSET(0) | 812 GEN6_AGGRESSIVE_TURBO); 813 set(uncore, GEN6_RPNSWREQ, swreq); 814 815 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n", 816 val, intel_gpu_freq(rps, val), swreq); 817 818 return 0; 819 } 820 821 static int vlv_rps_set(struct intel_rps *rps, u8 val) 822 { 823 struct drm_i915_private *i915 = rps_to_i915(rps); 824 int err; 825 826 vlv_punit_get(i915); 827 err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val); 828 vlv_punit_put(i915); 829 830 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n", 831 val, intel_gpu_freq(rps, val)); 832 833 return err; 834 } 835 836 static int rps_set(struct intel_rps *rps, u8 val, bool update) 837 { 838 struct drm_i915_private *i915 = rps_to_i915(rps); 839 int err; 840 841 if (val == rps->last_freq) 842 return 0; 843 844 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 845 err = vlv_rps_set(rps, val); 846 else if (GRAPHICS_VER(i915) >= 6) 847 err = gen6_rps_set(rps, val); 848 else 849 err = gen5_rps_set(rps, val); 850 if (err) 851 return err; 852 853 if (update && GRAPHICS_VER(i915) >= 6) 854 gen6_rps_set_thresholds(rps, val); 855 rps->last_freq = val; 856 857 return 0; 858 } 859 860 void intel_rps_unpark(struct intel_rps *rps) 861 { 862 if (!intel_rps_is_enabled(rps)) 863 return; 864 865 GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq); 866 867 /* 868 * Use the user's desired frequency as a guide, but for better 869 * performance, jump directly to RPe as our starting frequency. 870 */ 871 mutex_lock(&rps->lock); 872 873 intel_rps_set_active(rps); 874 intel_rps_set(rps, 875 clamp(rps->cur_freq, 876 rps->min_freq_softlimit, 877 rps->max_freq_softlimit)); 878 879 mutex_unlock(&rps->lock); 880 881 rps->pm_iir = 0; 882 if (intel_rps_has_interrupts(rps)) 883 rps_enable_interrupts(rps); 884 if (intel_rps_uses_timer(rps)) 885 rps_start_timer(rps); 886 887 if (GRAPHICS_VER(rps_to_i915(rps)) == 5) 888 gen5_rps_update(rps); 889 } 890 891 void intel_rps_park(struct intel_rps *rps) 892 { 893 int adj; 894 895 if (!intel_rps_is_enabled(rps)) 896 return; 897 898 if (!intel_rps_clear_active(rps)) 899 return; 900 901 if (intel_rps_uses_timer(rps)) 902 rps_stop_timer(rps); 903 if (intel_rps_has_interrupts(rps)) 904 rps_disable_interrupts(rps); 905 906 if (rps->last_freq <= rps->idle_freq) 907 return; 908 909 /* 910 * The punit delays the write of the frequency and voltage until it 911 * determines the GPU is awake. During normal usage we don't want to 912 * waste power changing the frequency if the GPU is sleeping (rc6). 913 * However, the GPU and driver is now idle and we do not want to delay 914 * switching to minimum voltage (reducing power whilst idle) as we do 915 * not expect to be woken in the near future and so must flush the 916 * change by waking the device. 917 * 918 * We choose to take the media powerwell (either would do to trick the 919 * punit into committing the voltage change) as that takes a lot less 920 * power than the render powerwell. 921 */ 922 intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA); 923 rps_set(rps, rps->idle_freq, false); 924 intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA); 925 926 /* 927 * Since we will try and restart from the previously requested 928 * frequency on unparking, treat this idle point as a downclock 929 * interrupt and reduce the frequency for resume. If we park/unpark 930 * more frequently than the rps worker can run, we will not respond 931 * to any EI and never see a change in frequency. 932 * 933 * (Note we accommodate Cherryview's limitation of only using an 934 * even bin by applying it to all.) 935 */ 936 adj = rps->last_adj; 937 if (adj < 0) 938 adj *= 2; 939 else /* CHV needs even encode values */ 940 adj = -2; 941 rps->last_adj = adj; 942 rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq); 943 if (rps->cur_freq < rps->efficient_freq) { 944 rps->cur_freq = rps->efficient_freq; 945 rps->last_adj = 0; 946 } 947 948 GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq); 949 } 950 951 u32 intel_rps_get_boost_frequency(struct intel_rps *rps) 952 { 953 struct intel_guc_slpc *slpc; 954 955 if (rps_uses_slpc(rps)) { 956 slpc = rps_to_slpc(rps); 957 958 return slpc->boost_freq; 959 } else { 960 return intel_gpu_freq(rps, rps->boost_freq); 961 } 962 } 963 964 static int rps_set_boost_freq(struct intel_rps *rps, u32 val) 965 { 966 bool boost = false; 967 968 /* Validate against (static) hardware limits */ 969 val = intel_freq_opcode(rps, val); 970 if (val < rps->min_freq || val > rps->max_freq) 971 return -EINVAL; 972 973 mutex_lock(&rps->lock); 974 if (val != rps->boost_freq) { 975 rps->boost_freq = val; 976 boost = atomic_read(&rps->num_waiters); 977 } 978 mutex_unlock(&rps->lock); 979 if (boost) 980 queue_work(rps_to_gt(rps)->i915->unordered_wq, &rps->work); 981 982 return 0; 983 } 984 985 int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq) 986 { 987 struct intel_guc_slpc *slpc; 988 989 if (rps_uses_slpc(rps)) { 990 slpc = rps_to_slpc(rps); 991 992 return intel_guc_slpc_set_boost_freq(slpc, freq); 993 } else { 994 return rps_set_boost_freq(rps, freq); 995 } 996 } 997 998 void intel_rps_dec_waiters(struct intel_rps *rps) 999 { 1000 struct intel_guc_slpc *slpc; 1001 1002 if (rps_uses_slpc(rps)) { 1003 slpc = rps_to_slpc(rps); 1004 1005 intel_guc_slpc_dec_waiters(slpc); 1006 } else { 1007 atomic_dec(&rps->num_waiters); 1008 } 1009 } 1010 1011 void intel_rps_boost(struct i915_request *rq) 1012 { 1013 struct intel_guc_slpc *slpc; 1014 1015 if (i915_request_signaled(rq) || i915_request_has_waitboost(rq)) 1016 return; 1017 1018 /* Serializes with i915_request_retire() */ 1019 if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) { 1020 struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps; 1021 1022 if (rps_uses_slpc(rps)) { 1023 slpc = rps_to_slpc(rps); 1024 1025 if (slpc->min_freq_softlimit >= slpc->boost_freq) 1026 return; 1027 1028 /* Return if old value is non zero */ 1029 if (!atomic_fetch_inc(&slpc->num_waiters)) { 1030 GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n", 1031 rq->fence.context, rq->fence.seqno); 1032 queue_work(rps_to_gt(rps)->i915->unordered_wq, 1033 &slpc->boost_work); 1034 } 1035 1036 return; 1037 } 1038 1039 if (atomic_fetch_inc(&rps->num_waiters)) 1040 return; 1041 1042 if (!intel_rps_is_active(rps)) 1043 return; 1044 1045 GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n", 1046 rq->fence.context, rq->fence.seqno); 1047 1048 if (READ_ONCE(rps->cur_freq) < rps->boost_freq) 1049 queue_work(rps_to_gt(rps)->i915->unordered_wq, &rps->work); 1050 1051 WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */ 1052 } 1053 } 1054 1055 int intel_rps_set(struct intel_rps *rps, u8 val) 1056 { 1057 int err; 1058 1059 lockdep_assert_held(&rps->lock); 1060 GEM_BUG_ON(val > rps->max_freq); 1061 GEM_BUG_ON(val < rps->min_freq); 1062 1063 if (intel_rps_is_active(rps)) { 1064 err = rps_set(rps, val, true); 1065 if (err) 1066 return err; 1067 1068 /* 1069 * Make sure we continue to get interrupts 1070 * until we hit the minimum or maximum frequencies. 1071 */ 1072 if (intel_rps_has_interrupts(rps)) { 1073 struct intel_uncore *uncore = rps_to_uncore(rps); 1074 1075 set(uncore, 1076 GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val)); 1077 1078 set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val)); 1079 } 1080 } 1081 1082 rps->cur_freq = val; 1083 return 0; 1084 } 1085 1086 static u32 intel_rps_read_state_cap(struct intel_rps *rps) 1087 { 1088 struct drm_i915_private *i915 = rps_to_i915(rps); 1089 struct intel_uncore *uncore = rps_to_uncore(rps); 1090 1091 if (IS_PONTEVECCHIO(i915)) 1092 return intel_uncore_read(uncore, PVC_RP_STATE_CAP); 1093 else if (IS_XEHPSDV(i915)) 1094 return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP); 1095 else if (IS_GEN9_LP(i915)) 1096 return intel_uncore_read(uncore, BXT_RP_STATE_CAP); 1097 else 1098 return intel_uncore_read(uncore, GEN6_RP_STATE_CAP); 1099 } 1100 1101 static void 1102 mtl_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps) 1103 { 1104 struct intel_uncore *uncore = rps_to_uncore(rps); 1105 u32 rp_state_cap = rps_to_gt(rps)->type == GT_MEDIA ? 1106 intel_uncore_read(uncore, MTL_MEDIAP_STATE_CAP) : 1107 intel_uncore_read(uncore, MTL_RP_STATE_CAP); 1108 u32 rpe = rps_to_gt(rps)->type == GT_MEDIA ? 1109 intel_uncore_read(uncore, MTL_MPE_FREQUENCY) : 1110 intel_uncore_read(uncore, MTL_GT_RPE_FREQUENCY); 1111 1112 /* MTL values are in units of 16.67 MHz */ 1113 caps->rp0_freq = REG_FIELD_GET(MTL_RP0_CAP_MASK, rp_state_cap); 1114 caps->min_freq = REG_FIELD_GET(MTL_RPN_CAP_MASK, rp_state_cap); 1115 caps->rp1_freq = REG_FIELD_GET(MTL_RPE_MASK, rpe); 1116 } 1117 1118 static void 1119 __gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps) 1120 { 1121 struct drm_i915_private *i915 = rps_to_i915(rps); 1122 u32 rp_state_cap; 1123 1124 rp_state_cap = intel_rps_read_state_cap(rps); 1125 1126 /* static values from HW: RP0 > RP1 > RPn (min_freq) */ 1127 if (IS_GEN9_LP(i915)) { 1128 caps->rp0_freq = (rp_state_cap >> 16) & 0xff; 1129 caps->rp1_freq = (rp_state_cap >> 8) & 0xff; 1130 caps->min_freq = (rp_state_cap >> 0) & 0xff; 1131 } else { 1132 caps->rp0_freq = (rp_state_cap >> 0) & 0xff; 1133 if (GRAPHICS_VER(i915) >= 10) 1134 caps->rp1_freq = REG_FIELD_GET(RPE_MASK, 1135 intel_uncore_read(to_gt(i915)->uncore, 1136 GEN10_FREQ_INFO_REC)); 1137 else 1138 caps->rp1_freq = (rp_state_cap >> 8) & 0xff; 1139 caps->min_freq = (rp_state_cap >> 16) & 0xff; 1140 } 1141 1142 if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) { 1143 /* 1144 * In this case rp_state_cap register reports frequencies in 1145 * units of 50 MHz. Convert these to the actual "hw unit", i.e. 1146 * units of 16.67 MHz 1147 */ 1148 caps->rp0_freq *= GEN9_FREQ_SCALER; 1149 caps->rp1_freq *= GEN9_FREQ_SCALER; 1150 caps->min_freq *= GEN9_FREQ_SCALER; 1151 } 1152 } 1153 1154 /** 1155 * gen6_rps_get_freq_caps - Get freq caps exposed by HW 1156 * @rps: the intel_rps structure 1157 * @caps: returned freq caps 1158 * 1159 * Returned "caps" frequencies should be converted to MHz using 1160 * intel_gpu_freq() 1161 */ 1162 void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps) 1163 { 1164 struct drm_i915_private *i915 = rps_to_i915(rps); 1165 1166 if (IS_METEORLAKE(i915)) 1167 return mtl_get_freq_caps(rps, caps); 1168 else 1169 return __gen6_rps_get_freq_caps(rps, caps); 1170 } 1171 1172 static void gen6_rps_init(struct intel_rps *rps) 1173 { 1174 struct drm_i915_private *i915 = rps_to_i915(rps); 1175 struct intel_rps_freq_caps caps; 1176 1177 gen6_rps_get_freq_caps(rps, &caps); 1178 rps->rp0_freq = caps.rp0_freq; 1179 rps->rp1_freq = caps.rp1_freq; 1180 rps->min_freq = caps.min_freq; 1181 1182 /* hw_max = RP0 until we check for overclocking */ 1183 rps->max_freq = rps->rp0_freq; 1184 1185 rps->efficient_freq = rps->rp1_freq; 1186 if (IS_HASWELL(i915) || IS_BROADWELL(i915) || 1187 IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) { 1188 u32 ddcc_status = 0; 1189 u32 mult = 1; 1190 1191 if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) 1192 mult = GEN9_FREQ_SCALER; 1193 if (snb_pcode_read(rps_to_gt(rps)->uncore, 1194 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, 1195 &ddcc_status, NULL) == 0) 1196 rps->efficient_freq = 1197 clamp_t(u32, 1198 ((ddcc_status >> 8) & 0xff) * mult, 1199 rps->min_freq, 1200 rps->max_freq); 1201 } 1202 } 1203 1204 static bool rps_reset(struct intel_rps *rps) 1205 { 1206 struct drm_i915_private *i915 = rps_to_i915(rps); 1207 1208 /* force a reset */ 1209 rps->power.mode = -1; 1210 rps->last_freq = -1; 1211 1212 if (rps_set(rps, rps->min_freq, true)) { 1213 drm_err(&i915->drm, "Failed to reset RPS to initial values\n"); 1214 return false; 1215 } 1216 1217 rps->cur_freq = rps->min_freq; 1218 return true; 1219 } 1220 1221 /* See the Gen9_GT_PM_Programming_Guide doc for the below */ 1222 static bool gen9_rps_enable(struct intel_rps *rps) 1223 { 1224 struct intel_gt *gt = rps_to_gt(rps); 1225 struct intel_uncore *uncore = gt->uncore; 1226 1227 /* Program defaults and thresholds for RPS */ 1228 if (GRAPHICS_VER(gt->i915) == 9) 1229 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, 1230 GEN9_FREQUENCY(rps->rp1_freq)); 1231 1232 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa); 1233 1234 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD; 1235 1236 return rps_reset(rps); 1237 } 1238 1239 static bool gen8_rps_enable(struct intel_rps *rps) 1240 { 1241 struct intel_uncore *uncore = rps_to_uncore(rps); 1242 1243 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, 1244 HSW_FREQUENCY(rps->rp1_freq)); 1245 1246 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1247 1248 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD; 1249 1250 return rps_reset(rps); 1251 } 1252 1253 static bool gen6_rps_enable(struct intel_rps *rps) 1254 { 1255 struct intel_uncore *uncore = rps_to_uncore(rps); 1256 1257 /* Power down if completely idle for over 50ms */ 1258 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000); 1259 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1260 1261 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | 1262 GEN6_PM_RP_DOWN_THRESHOLD | 1263 GEN6_PM_RP_DOWN_TIMEOUT); 1264 1265 return rps_reset(rps); 1266 } 1267 1268 static int chv_rps_max_freq(struct intel_rps *rps) 1269 { 1270 struct drm_i915_private *i915 = rps_to_i915(rps); 1271 struct intel_gt *gt = rps_to_gt(rps); 1272 u32 val; 1273 1274 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE); 1275 1276 switch (gt->info.sseu.eu_total) { 1277 case 8: 1278 /* (2 * 4) config */ 1279 val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT; 1280 break; 1281 case 12: 1282 /* (2 * 6) config */ 1283 val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT; 1284 break; 1285 case 16: 1286 /* (2 * 8) config */ 1287 default: 1288 /* Setting (2 * 8) Min RP0 for any other combination */ 1289 val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT; 1290 break; 1291 } 1292 1293 return val & FB_GFX_FREQ_FUSE_MASK; 1294 } 1295 1296 static int chv_rps_rpe_freq(struct intel_rps *rps) 1297 { 1298 struct drm_i915_private *i915 = rps_to_i915(rps); 1299 u32 val; 1300 1301 val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG); 1302 val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT; 1303 1304 return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; 1305 } 1306 1307 static int chv_rps_guar_freq(struct intel_rps *rps) 1308 { 1309 struct drm_i915_private *i915 = rps_to_i915(rps); 1310 u32 val; 1311 1312 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE); 1313 1314 return val & FB_GFX_FREQ_FUSE_MASK; 1315 } 1316 1317 static u32 chv_rps_min_freq(struct intel_rps *rps) 1318 { 1319 struct drm_i915_private *i915 = rps_to_i915(rps); 1320 u32 val; 1321 1322 val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE); 1323 val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT; 1324 1325 return val & FB_GFX_FREQ_FUSE_MASK; 1326 } 1327 1328 static bool chv_rps_enable(struct intel_rps *rps) 1329 { 1330 struct intel_uncore *uncore = rps_to_uncore(rps); 1331 struct drm_i915_private *i915 = rps_to_i915(rps); 1332 u32 val; 1333 1334 /* 1: Program defaults and thresholds for RPS*/ 1335 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000); 1336 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400); 1337 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000); 1338 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000); 1339 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000); 1340 1341 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1342 1343 /* 2: Enable RPS */ 1344 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL, 1345 GEN6_RP_MEDIA_HW_NORMAL_MODE | 1346 GEN6_RP_MEDIA_IS_GFX | 1347 GEN6_RP_ENABLE | 1348 GEN6_RP_UP_BUSY_AVG | 1349 GEN6_RP_DOWN_IDLE_AVG); 1350 1351 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | 1352 GEN6_PM_RP_DOWN_THRESHOLD | 1353 GEN6_PM_RP_DOWN_TIMEOUT); 1354 1355 /* Setting Fixed Bias */ 1356 vlv_punit_get(i915); 1357 1358 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50; 1359 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val); 1360 1361 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 1362 1363 vlv_punit_put(i915); 1364 1365 /* RPS code assumes GPLL is used */ 1366 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, 1367 "GPLL not enabled\n"); 1368 1369 drm_dbg(&i915->drm, "GPLL enabled? %s\n", 1370 str_yes_no(val & GPLLENABLE)); 1371 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val); 1372 1373 return rps_reset(rps); 1374 } 1375 1376 static int vlv_rps_guar_freq(struct intel_rps *rps) 1377 { 1378 struct drm_i915_private *i915 = rps_to_i915(rps); 1379 u32 val, rp1; 1380 1381 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE); 1382 1383 rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK; 1384 rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; 1385 1386 return rp1; 1387 } 1388 1389 static int vlv_rps_max_freq(struct intel_rps *rps) 1390 { 1391 struct drm_i915_private *i915 = rps_to_i915(rps); 1392 u32 val, rp0; 1393 1394 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE); 1395 1396 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; 1397 /* Clamp to max */ 1398 rp0 = min_t(u32, rp0, 0xea); 1399 1400 return rp0; 1401 } 1402 1403 static int vlv_rps_rpe_freq(struct intel_rps *rps) 1404 { 1405 struct drm_i915_private *i915 = rps_to_i915(rps); 1406 u32 val, rpe; 1407 1408 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO); 1409 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; 1410 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI); 1411 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; 1412 1413 return rpe; 1414 } 1415 1416 static int vlv_rps_min_freq(struct intel_rps *rps) 1417 { 1418 struct drm_i915_private *i915 = rps_to_i915(rps); 1419 u32 val; 1420 1421 val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff; 1422 /* 1423 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value 1424 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on 1425 * a BYT-M B0 the above register contains 0xbf. Moreover when setting 1426 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0 1427 * to make sure it matches what Punit accepts. 1428 */ 1429 return max_t(u32, val, 0xc0); 1430 } 1431 1432 static bool vlv_rps_enable(struct intel_rps *rps) 1433 { 1434 struct intel_uncore *uncore = rps_to_uncore(rps); 1435 struct drm_i915_private *i915 = rps_to_i915(rps); 1436 u32 val; 1437 1438 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000); 1439 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400); 1440 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000); 1441 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000); 1442 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000); 1443 1444 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1445 1446 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL, 1447 GEN6_RP_MEDIA_TURBO | 1448 GEN6_RP_MEDIA_HW_NORMAL_MODE | 1449 GEN6_RP_MEDIA_IS_GFX | 1450 GEN6_RP_ENABLE | 1451 GEN6_RP_UP_BUSY_AVG | 1452 GEN6_RP_DOWN_IDLE_CONT); 1453 1454 /* WaGsvRC0ResidencyMethod:vlv */ 1455 rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED; 1456 1457 vlv_punit_get(i915); 1458 1459 /* Setting Fixed Bias */ 1460 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875; 1461 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val); 1462 1463 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 1464 1465 vlv_punit_put(i915); 1466 1467 /* RPS code assumes GPLL is used */ 1468 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, 1469 "GPLL not enabled\n"); 1470 1471 drm_dbg(&i915->drm, "GPLL enabled? %s\n", 1472 str_yes_no(val & GPLLENABLE)); 1473 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val); 1474 1475 return rps_reset(rps); 1476 } 1477 1478 static unsigned long __ips_gfx_val(struct intel_ips *ips) 1479 { 1480 struct intel_rps *rps = container_of(ips, typeof(*rps), ips); 1481 struct intel_uncore *uncore = rps_to_uncore(rps); 1482 unsigned int t, state1, state2; 1483 u32 pxvid, ext_v; 1484 u64 corr, corr2; 1485 1486 lockdep_assert_held(&mchdev_lock); 1487 1488 pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq)); 1489 pxvid = (pxvid >> 24) & 0x7f; 1490 ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid); 1491 1492 state1 = ext_v; 1493 1494 /* Revel in the empirically derived constants */ 1495 1496 /* Correction factor in 1/100000 units */ 1497 t = ips_mch_val(uncore); 1498 if (t > 80) 1499 corr = t * 2349 + 135940; 1500 else if (t >= 50) 1501 corr = t * 964 + 29317; 1502 else /* < 50 */ 1503 corr = t * 301 + 1004; 1504 1505 corr = div_u64(corr * 150142 * state1, 10000) - 78642; 1506 corr2 = div_u64(corr, 100000) * ips->corr; 1507 1508 state2 = div_u64(corr2 * state1, 10000); 1509 state2 /= 100; /* convert to mW */ 1510 1511 __gen5_ips_update(ips); 1512 1513 return ips->gfx_power + state2; 1514 } 1515 1516 static bool has_busy_stats(struct intel_rps *rps) 1517 { 1518 struct intel_engine_cs *engine; 1519 enum intel_engine_id id; 1520 1521 for_each_engine(engine, rps_to_gt(rps), id) { 1522 if (!intel_engine_supports_stats(engine)) 1523 return false; 1524 } 1525 1526 return true; 1527 } 1528 1529 void intel_rps_enable(struct intel_rps *rps) 1530 { 1531 struct drm_i915_private *i915 = rps_to_i915(rps); 1532 struct intel_uncore *uncore = rps_to_uncore(rps); 1533 bool enabled = false; 1534 1535 if (!HAS_RPS(i915)) 1536 return; 1537 1538 if (rps_uses_slpc(rps)) 1539 return; 1540 1541 intel_gt_check_clock_frequency(rps_to_gt(rps)); 1542 1543 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 1544 if (rps->max_freq <= rps->min_freq) 1545 /* leave disabled, no room for dynamic reclocking */; 1546 else if (IS_CHERRYVIEW(i915)) 1547 enabled = chv_rps_enable(rps); 1548 else if (IS_VALLEYVIEW(i915)) 1549 enabled = vlv_rps_enable(rps); 1550 else if (GRAPHICS_VER(i915) >= 9) 1551 enabled = gen9_rps_enable(rps); 1552 else if (GRAPHICS_VER(i915) >= 8) 1553 enabled = gen8_rps_enable(rps); 1554 else if (GRAPHICS_VER(i915) >= 6) 1555 enabled = gen6_rps_enable(rps); 1556 else if (IS_IRONLAKE_M(i915)) 1557 enabled = gen5_rps_enable(rps); 1558 else 1559 MISSING_CASE(GRAPHICS_VER(i915)); 1560 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 1561 if (!enabled) 1562 return; 1563 1564 GT_TRACE(rps_to_gt(rps), 1565 "min:%x, max:%x, freq:[%d, %d], thresholds:[%u, %u]\n", 1566 rps->min_freq, rps->max_freq, 1567 intel_gpu_freq(rps, rps->min_freq), 1568 intel_gpu_freq(rps, rps->max_freq), 1569 rps->power.up_threshold, 1570 rps->power.down_threshold); 1571 1572 GEM_BUG_ON(rps->max_freq < rps->min_freq); 1573 GEM_BUG_ON(rps->idle_freq > rps->max_freq); 1574 1575 GEM_BUG_ON(rps->efficient_freq < rps->min_freq); 1576 GEM_BUG_ON(rps->efficient_freq > rps->max_freq); 1577 1578 if (has_busy_stats(rps)) 1579 intel_rps_set_timer(rps); 1580 else if (GRAPHICS_VER(i915) >= 6 && GRAPHICS_VER(i915) <= 11) 1581 intel_rps_set_interrupts(rps); 1582 else 1583 /* Ironlake currently uses intel_ips.ko */ {} 1584 1585 intel_rps_set_enabled(rps); 1586 } 1587 1588 static void gen6_rps_disable(struct intel_rps *rps) 1589 { 1590 set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0); 1591 } 1592 1593 void intel_rps_disable(struct intel_rps *rps) 1594 { 1595 struct drm_i915_private *i915 = rps_to_i915(rps); 1596 1597 if (!intel_rps_is_enabled(rps)) 1598 return; 1599 1600 intel_rps_clear_enabled(rps); 1601 intel_rps_clear_interrupts(rps); 1602 intel_rps_clear_timer(rps); 1603 1604 if (GRAPHICS_VER(i915) >= 6) 1605 gen6_rps_disable(rps); 1606 else if (IS_IRONLAKE_M(i915)) 1607 gen5_rps_disable(rps); 1608 } 1609 1610 static int byt_gpu_freq(struct intel_rps *rps, int val) 1611 { 1612 /* 1613 * N = val - 0xb7 1614 * Slow = Fast = GPLL ref * N 1615 */ 1616 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000); 1617 } 1618 1619 static int byt_freq_opcode(struct intel_rps *rps, int val) 1620 { 1621 return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7; 1622 } 1623 1624 static int chv_gpu_freq(struct intel_rps *rps, int val) 1625 { 1626 /* 1627 * N = val / 2 1628 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2 1629 */ 1630 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000); 1631 } 1632 1633 static int chv_freq_opcode(struct intel_rps *rps, int val) 1634 { 1635 /* CHV needs even values */ 1636 return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2; 1637 } 1638 1639 int intel_gpu_freq(struct intel_rps *rps, int val) 1640 { 1641 struct drm_i915_private *i915 = rps_to_i915(rps); 1642 1643 if (GRAPHICS_VER(i915) >= 9) 1644 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, 1645 GEN9_FREQ_SCALER); 1646 else if (IS_CHERRYVIEW(i915)) 1647 return chv_gpu_freq(rps, val); 1648 else if (IS_VALLEYVIEW(i915)) 1649 return byt_gpu_freq(rps, val); 1650 else if (GRAPHICS_VER(i915) >= 6) 1651 return val * GT_FREQUENCY_MULTIPLIER; 1652 else 1653 return val; 1654 } 1655 1656 int intel_freq_opcode(struct intel_rps *rps, int val) 1657 { 1658 struct drm_i915_private *i915 = rps_to_i915(rps); 1659 1660 if (GRAPHICS_VER(i915) >= 9) 1661 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, 1662 GT_FREQUENCY_MULTIPLIER); 1663 else if (IS_CHERRYVIEW(i915)) 1664 return chv_freq_opcode(rps, val); 1665 else if (IS_VALLEYVIEW(i915)) 1666 return byt_freq_opcode(rps, val); 1667 else if (GRAPHICS_VER(i915) >= 6) 1668 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); 1669 else 1670 return val; 1671 } 1672 1673 static void vlv_init_gpll_ref_freq(struct intel_rps *rps) 1674 { 1675 struct drm_i915_private *i915 = rps_to_i915(rps); 1676 1677 rps->gpll_ref_freq = 1678 vlv_get_cck_clock(i915, "GPLL ref", 1679 CCK_GPLL_CLOCK_CONTROL, 1680 i915->czclk_freq); 1681 1682 drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n", 1683 rps->gpll_ref_freq); 1684 } 1685 1686 static void vlv_rps_init(struct intel_rps *rps) 1687 { 1688 struct drm_i915_private *i915 = rps_to_i915(rps); 1689 1690 vlv_iosf_sb_get(i915, 1691 BIT(VLV_IOSF_SB_PUNIT) | 1692 BIT(VLV_IOSF_SB_NC) | 1693 BIT(VLV_IOSF_SB_CCK)); 1694 1695 vlv_init_gpll_ref_freq(rps); 1696 1697 rps->max_freq = vlv_rps_max_freq(rps); 1698 rps->rp0_freq = rps->max_freq; 1699 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", 1700 intel_gpu_freq(rps, rps->max_freq), rps->max_freq); 1701 1702 rps->efficient_freq = vlv_rps_rpe_freq(rps); 1703 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n", 1704 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq); 1705 1706 rps->rp1_freq = vlv_rps_guar_freq(rps); 1707 drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n", 1708 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq); 1709 1710 rps->min_freq = vlv_rps_min_freq(rps); 1711 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n", 1712 intel_gpu_freq(rps, rps->min_freq), rps->min_freq); 1713 1714 vlv_iosf_sb_put(i915, 1715 BIT(VLV_IOSF_SB_PUNIT) | 1716 BIT(VLV_IOSF_SB_NC) | 1717 BIT(VLV_IOSF_SB_CCK)); 1718 } 1719 1720 static void chv_rps_init(struct intel_rps *rps) 1721 { 1722 struct drm_i915_private *i915 = rps_to_i915(rps); 1723 1724 vlv_iosf_sb_get(i915, 1725 BIT(VLV_IOSF_SB_PUNIT) | 1726 BIT(VLV_IOSF_SB_NC) | 1727 BIT(VLV_IOSF_SB_CCK)); 1728 1729 vlv_init_gpll_ref_freq(rps); 1730 1731 rps->max_freq = chv_rps_max_freq(rps); 1732 rps->rp0_freq = rps->max_freq; 1733 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", 1734 intel_gpu_freq(rps, rps->max_freq), rps->max_freq); 1735 1736 rps->efficient_freq = chv_rps_rpe_freq(rps); 1737 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n", 1738 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq); 1739 1740 rps->rp1_freq = chv_rps_guar_freq(rps); 1741 drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n", 1742 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq); 1743 1744 rps->min_freq = chv_rps_min_freq(rps); 1745 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n", 1746 intel_gpu_freq(rps, rps->min_freq), rps->min_freq); 1747 1748 vlv_iosf_sb_put(i915, 1749 BIT(VLV_IOSF_SB_PUNIT) | 1750 BIT(VLV_IOSF_SB_NC) | 1751 BIT(VLV_IOSF_SB_CCK)); 1752 1753 drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq | 1754 rps->rp1_freq | rps->min_freq) & 1, 1755 "Odd GPU freq values\n"); 1756 } 1757 1758 static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei) 1759 { 1760 ei->ktime = ktime_get_raw(); 1761 ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT); 1762 ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT); 1763 } 1764 1765 static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir) 1766 { 1767 struct intel_uncore *uncore = rps_to_uncore(rps); 1768 const struct intel_rps_ei *prev = &rps->ei; 1769 struct intel_rps_ei now; 1770 u32 events = 0; 1771 1772 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1773 return 0; 1774 1775 vlv_c0_read(uncore, &now); 1776 1777 #ifdef __linux__ 1778 if (prev->ktime) { 1779 #else 1780 if (ktime_to_ns(prev->ktime)) { 1781 #endif 1782 u64 time, c0; 1783 u32 render, media; 1784 1785 time = ktime_us_delta(now.ktime, prev->ktime); 1786 1787 time *= rps_to_i915(rps)->czclk_freq; 1788 1789 /* Workload can be split between render + media, 1790 * e.g. SwapBuffers being blitted in X after being rendered in 1791 * mesa. To account for this we need to combine both engines 1792 * into our activity counter. 1793 */ 1794 render = now.render_c0 - prev->render_c0; 1795 media = now.media_c0 - prev->media_c0; 1796 c0 = max(render, media); 1797 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1798 1799 if (c0 > time * rps->power.up_threshold) 1800 events = GEN6_PM_RP_UP_THRESHOLD; 1801 else if (c0 < time * rps->power.down_threshold) 1802 events = GEN6_PM_RP_DOWN_THRESHOLD; 1803 } 1804 1805 rps->ei = now; 1806 return events; 1807 } 1808 1809 static void rps_work(struct work_struct *work) 1810 { 1811 struct intel_rps *rps = container_of(work, typeof(*rps), work); 1812 struct intel_gt *gt = rps_to_gt(rps); 1813 struct drm_i915_private *i915 = rps_to_i915(rps); 1814 bool client_boost = false; 1815 int new_freq, adj, min, max; 1816 u32 pm_iir = 0; 1817 1818 spin_lock_irq(gt->irq_lock); 1819 pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events; 1820 client_boost = atomic_read(&rps->num_waiters); 1821 spin_unlock_irq(gt->irq_lock); 1822 1823 /* Make sure we didn't queue anything we're not going to process. */ 1824 if (!pm_iir && !client_boost) 1825 goto out; 1826 1827 mutex_lock(&rps->lock); 1828 if (!intel_rps_is_active(rps)) { 1829 mutex_unlock(&rps->lock); 1830 return; 1831 } 1832 1833 pm_iir |= vlv_wa_c0_ei(rps, pm_iir); 1834 1835 adj = rps->last_adj; 1836 new_freq = rps->cur_freq; 1837 min = rps->min_freq_softlimit; 1838 max = rps->max_freq_softlimit; 1839 if (client_boost) 1840 max = rps->max_freq; 1841 1842 GT_TRACE(gt, 1843 "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n", 1844 pm_iir, str_yes_no(client_boost), 1845 adj, new_freq, min, max); 1846 1847 if (client_boost && new_freq < rps->boost_freq) { 1848 new_freq = rps->boost_freq; 1849 adj = 0; 1850 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1851 if (adj > 0) 1852 adj *= 2; 1853 else /* CHV needs even encode values */ 1854 adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1; 1855 1856 if (new_freq >= rps->max_freq_softlimit) 1857 adj = 0; 1858 } else if (client_boost) { 1859 adj = 0; 1860 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1861 if (rps->cur_freq > rps->efficient_freq) 1862 new_freq = rps->efficient_freq; 1863 else if (rps->cur_freq > rps->min_freq_softlimit) 1864 new_freq = rps->min_freq_softlimit; 1865 adj = 0; 1866 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1867 if (adj < 0) 1868 adj *= 2; 1869 else /* CHV needs even encode values */ 1870 adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1; 1871 1872 if (new_freq <= rps->min_freq_softlimit) 1873 adj = 0; 1874 } else { /* unknown event */ 1875 adj = 0; 1876 } 1877 1878 /* 1879 * sysfs frequency limits may have snuck in while 1880 * servicing the interrupt 1881 */ 1882 new_freq += adj; 1883 new_freq = clamp_t(int, new_freq, min, max); 1884 1885 if (intel_rps_set(rps, new_freq)) { 1886 drm_dbg(&i915->drm, "Failed to set new GPU frequency\n"); 1887 adj = 0; 1888 } 1889 rps->last_adj = adj; 1890 1891 mutex_unlock(&rps->lock); 1892 1893 out: 1894 spin_lock_irq(gt->irq_lock); 1895 gen6_gt_pm_unmask_irq(gt, rps->pm_events); 1896 spin_unlock_irq(gt->irq_lock); 1897 } 1898 1899 void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) 1900 { 1901 struct intel_gt *gt = rps_to_gt(rps); 1902 const u32 events = rps->pm_events & pm_iir; 1903 1904 lockdep_assert_held(gt->irq_lock); 1905 1906 if (unlikely(!events)) 1907 return; 1908 1909 GT_TRACE(gt, "irq events:%x\n", events); 1910 1911 gen6_gt_pm_mask_irq(gt, events); 1912 1913 rps->pm_iir |= events; 1914 queue_work(gt->i915->unordered_wq, &rps->work); 1915 } 1916 1917 void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) 1918 { 1919 struct intel_gt *gt = rps_to_gt(rps); 1920 u32 events; 1921 1922 events = pm_iir & rps->pm_events; 1923 if (events) { 1924 spin_lock(gt->irq_lock); 1925 1926 GT_TRACE(gt, "irq events:%x\n", events); 1927 1928 gen6_gt_pm_mask_irq(gt, events); 1929 rps->pm_iir |= events; 1930 1931 queue_work(gt->i915->unordered_wq, &rps->work); 1932 spin_unlock(gt->irq_lock); 1933 } 1934 1935 if (GRAPHICS_VER(gt->i915) >= 8) 1936 return; 1937 1938 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1939 intel_engine_cs_irq(gt->engine[VECS0], pm_iir >> 10); 1940 1941 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1942 drm_dbg(&rps_to_i915(rps)->drm, 1943 "Command parser error, pm_iir 0x%08x\n", pm_iir); 1944 } 1945 1946 void gen5_rps_irq_handler(struct intel_rps *rps) 1947 { 1948 struct intel_uncore *uncore = rps_to_uncore(rps); 1949 u32 busy_up, busy_down, max_avg, min_avg; 1950 u8 new_freq; 1951 1952 spin_lock(&mchdev_lock); 1953 1954 intel_uncore_write16(uncore, 1955 MEMINTRSTS, 1956 intel_uncore_read(uncore, MEMINTRSTS)); 1957 1958 intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); 1959 busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG); 1960 busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG); 1961 max_avg = intel_uncore_read(uncore, RCBMAXAVG); 1962 min_avg = intel_uncore_read(uncore, RCBMINAVG); 1963 1964 /* Handle RCS change request from hw */ 1965 new_freq = rps->cur_freq; 1966 if (busy_up > max_avg) 1967 new_freq++; 1968 else if (busy_down < min_avg) 1969 new_freq--; 1970 new_freq = clamp(new_freq, 1971 rps->min_freq_softlimit, 1972 rps->max_freq_softlimit); 1973 1974 if (new_freq != rps->cur_freq && !__gen5_rps_set(rps, new_freq)) 1975 rps->cur_freq = new_freq; 1976 1977 spin_unlock(&mchdev_lock); 1978 } 1979 1980 void intel_rps_init_early(struct intel_rps *rps) 1981 { 1982 rw_init(&rps->lock, "rpslk"); 1983 rw_init(&rps->power.mutex, "rpspwr"); 1984 1985 INIT_WORK(&rps->work, rps_work); 1986 #ifdef __linux__ 1987 timer_setup(&rps->timer, rps_timer, 0); 1988 #else 1989 timeout_set(&rps->timer, rps_timer, rps); 1990 #endif 1991 1992 atomic_set(&rps->num_waiters, 0); 1993 } 1994 1995 void intel_rps_init(struct intel_rps *rps) 1996 { 1997 struct drm_i915_private *i915 = rps_to_i915(rps); 1998 1999 if (rps_uses_slpc(rps)) 2000 return; 2001 2002 if (IS_CHERRYVIEW(i915)) 2003 chv_rps_init(rps); 2004 else if (IS_VALLEYVIEW(i915)) 2005 vlv_rps_init(rps); 2006 else if (GRAPHICS_VER(i915) >= 6) 2007 gen6_rps_init(rps); 2008 else if (IS_IRONLAKE_M(i915)) 2009 gen5_rps_init(rps); 2010 2011 /* Derive initial user preferences/limits from the hardware limits */ 2012 rps->max_freq_softlimit = rps->max_freq; 2013 rps_to_gt(rps)->defaults.max_freq = rps->max_freq_softlimit; 2014 rps->min_freq_softlimit = rps->min_freq; 2015 rps_to_gt(rps)->defaults.min_freq = rps->min_freq_softlimit; 2016 2017 /* After setting max-softlimit, find the overclock max freq */ 2018 if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) { 2019 u32 params = 0; 2020 2021 snb_pcode_read(rps_to_gt(rps)->uncore, GEN6_READ_OC_PARAMS, ¶ms, NULL); 2022 if (params & BIT(31)) { /* OC supported */ 2023 drm_dbg(&i915->drm, 2024 "Overclocking supported, max: %dMHz, overclock: %dMHz\n", 2025 (rps->max_freq & 0xff) * 50, 2026 (params & 0xff) * 50); 2027 rps->max_freq = params & 0xff; 2028 } 2029 } 2030 2031 /* Set default thresholds in % */ 2032 rps->power.up_threshold = 95; 2033 rps_to_gt(rps)->defaults.rps_up_threshold = rps->power.up_threshold; 2034 rps->power.down_threshold = 85; 2035 rps_to_gt(rps)->defaults.rps_down_threshold = rps->power.down_threshold; 2036 2037 /* Finally allow us to boost to max by default */ 2038 rps->boost_freq = rps->max_freq; 2039 rps->idle_freq = rps->min_freq; 2040 2041 /* Start in the middle, from here we will autotune based on workload */ 2042 rps->cur_freq = rps->efficient_freq; 2043 2044 rps->pm_intrmsk_mbz = 0; 2045 2046 /* 2047 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 2048 * if GEN6_PM_UP_EI_EXPIRED is masked. 2049 * 2050 * TODO: verify if this can be reproduced on VLV,CHV. 2051 */ 2052 if (GRAPHICS_VER(i915) <= 7) 2053 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 2054 2055 if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) < 11) 2056 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 2057 2058 /* GuC needs ARAT expired interrupt unmasked */ 2059 if (intel_uc_uses_guc_submission(&rps_to_gt(rps)->uc)) 2060 rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK; 2061 } 2062 2063 void intel_rps_sanitize(struct intel_rps *rps) 2064 { 2065 if (rps_uses_slpc(rps)) 2066 return; 2067 2068 if (GRAPHICS_VER(rps_to_i915(rps)) >= 6) 2069 rps_disable_interrupts(rps); 2070 } 2071 2072 u32 intel_rps_read_rpstat(struct intel_rps *rps) 2073 { 2074 struct drm_i915_private *i915 = rps_to_i915(rps); 2075 i915_reg_t rpstat; 2076 2077 rpstat = (GRAPHICS_VER(i915) >= 12) ? GEN12_RPSTAT1 : GEN6_RPSTAT1; 2078 2079 return intel_uncore_read(rps_to_gt(rps)->uncore, rpstat); 2080 } 2081 2082 static u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat) 2083 { 2084 struct drm_i915_private *i915 = rps_to_i915(rps); 2085 u32 cagf; 2086 2087 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) 2088 cagf = REG_FIELD_GET(MTL_CAGF_MASK, rpstat); 2089 else if (GRAPHICS_VER(i915) >= 12) 2090 cagf = REG_FIELD_GET(GEN12_CAGF_MASK, rpstat); 2091 else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 2092 cagf = REG_FIELD_GET(RPE_MASK, rpstat); 2093 else if (GRAPHICS_VER(i915) >= 9) 2094 cagf = REG_FIELD_GET(GEN9_CAGF_MASK, rpstat); 2095 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 2096 cagf = REG_FIELD_GET(HSW_CAGF_MASK, rpstat); 2097 else if (GRAPHICS_VER(i915) >= 6) 2098 cagf = REG_FIELD_GET(GEN6_CAGF_MASK, rpstat); 2099 else 2100 cagf = gen5_invert_freq(rps, REG_FIELD_GET(MEMSTAT_PSTATE_MASK, rpstat)); 2101 2102 return cagf; 2103 } 2104 2105 static u32 __read_cagf(struct intel_rps *rps, bool take_fw) 2106 { 2107 struct drm_i915_private *i915 = rps_to_i915(rps); 2108 struct intel_uncore *uncore = rps_to_uncore(rps); 2109 i915_reg_t r = INVALID_MMIO_REG; 2110 u32 freq; 2111 2112 /* 2113 * For Gen12+ reading freq from HW does not need a forcewake and 2114 * registers will return 0 freq when GT is in RC6 2115 */ 2116 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) { 2117 r = MTL_MIRROR_TARGET_WP1; 2118 } else if (GRAPHICS_VER(i915) >= 12) { 2119 r = GEN12_RPSTAT1; 2120 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 2121 vlv_punit_get(i915); 2122 freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 2123 vlv_punit_put(i915); 2124 } else if (GRAPHICS_VER(i915) >= 6) { 2125 r = GEN6_RPSTAT1; 2126 } else { 2127 r = MEMSTAT_ILK; 2128 } 2129 2130 if (i915_mmio_reg_valid(r)) 2131 freq = take_fw ? intel_uncore_read(uncore, r) : intel_uncore_read_fw(uncore, r); 2132 2133 return intel_rps_get_cagf(rps, freq); 2134 } 2135 2136 static u32 read_cagf(struct intel_rps *rps) 2137 { 2138 return __read_cagf(rps, true); 2139 } 2140 2141 u32 intel_rps_read_actual_frequency(struct intel_rps *rps) 2142 { 2143 struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm; 2144 intel_wakeref_t wakeref; 2145 u32 freq = 0; 2146 2147 with_intel_runtime_pm_if_in_use(rpm, wakeref) 2148 freq = intel_gpu_freq(rps, read_cagf(rps)); 2149 2150 return freq; 2151 } 2152 2153 u32 intel_rps_read_actual_frequency_fw(struct intel_rps *rps) 2154 { 2155 return intel_gpu_freq(rps, __read_cagf(rps, false)); 2156 } 2157 2158 static u32 intel_rps_read_punit_req(struct intel_rps *rps) 2159 { 2160 struct intel_uncore *uncore = rps_to_uncore(rps); 2161 struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm; 2162 intel_wakeref_t wakeref; 2163 u32 freq = 0; 2164 2165 with_intel_runtime_pm_if_in_use(rpm, wakeref) 2166 freq = intel_uncore_read(uncore, GEN6_RPNSWREQ); 2167 2168 return freq; 2169 } 2170 2171 static u32 intel_rps_get_req(u32 pureq) 2172 { 2173 u32 req = pureq >> GEN9_SW_REQ_UNSLICE_RATIO_SHIFT; 2174 2175 return req; 2176 } 2177 2178 u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps) 2179 { 2180 u32 freq = intel_rps_get_req(intel_rps_read_punit_req(rps)); 2181 2182 return intel_gpu_freq(rps, freq); 2183 } 2184 2185 u32 intel_rps_get_requested_frequency(struct intel_rps *rps) 2186 { 2187 if (rps_uses_slpc(rps)) 2188 return intel_rps_read_punit_req_frequency(rps); 2189 else 2190 return intel_gpu_freq(rps, rps->cur_freq); 2191 } 2192 2193 u32 intel_rps_get_max_frequency(struct intel_rps *rps) 2194 { 2195 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2196 2197 if (rps_uses_slpc(rps)) 2198 return slpc->max_freq_softlimit; 2199 else 2200 return intel_gpu_freq(rps, rps->max_freq_softlimit); 2201 } 2202 2203 /** 2204 * intel_rps_get_max_raw_freq - returns the max frequency in some raw format. 2205 * @rps: the intel_rps structure 2206 * 2207 * Returns the max frequency in a raw format. In newer platforms raw is in 2208 * units of 50 MHz. 2209 */ 2210 u32 intel_rps_get_max_raw_freq(struct intel_rps *rps) 2211 { 2212 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2213 u32 freq; 2214 2215 if (rps_uses_slpc(rps)) { 2216 return DIV_ROUND_CLOSEST(slpc->rp0_freq, 2217 GT_FREQUENCY_MULTIPLIER); 2218 } else { 2219 freq = rps->max_freq; 2220 if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) { 2221 /* Convert GT frequency to 50 MHz units */ 2222 freq /= GEN9_FREQ_SCALER; 2223 } 2224 return freq; 2225 } 2226 } 2227 2228 u32 intel_rps_get_rp0_frequency(struct intel_rps *rps) 2229 { 2230 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2231 2232 if (rps_uses_slpc(rps)) 2233 return slpc->rp0_freq; 2234 else 2235 return intel_gpu_freq(rps, rps->rp0_freq); 2236 } 2237 2238 u32 intel_rps_get_rp1_frequency(struct intel_rps *rps) 2239 { 2240 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2241 2242 if (rps_uses_slpc(rps)) 2243 return slpc->rp1_freq; 2244 else 2245 return intel_gpu_freq(rps, rps->rp1_freq); 2246 } 2247 2248 u32 intel_rps_get_rpn_frequency(struct intel_rps *rps) 2249 { 2250 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2251 2252 if (rps_uses_slpc(rps)) 2253 return slpc->min_freq; 2254 else 2255 return intel_gpu_freq(rps, rps->min_freq); 2256 } 2257 2258 static void rps_frequency_dump(struct intel_rps *rps, struct drm_printer *p) 2259 { 2260 struct intel_gt *gt = rps_to_gt(rps); 2261 struct drm_i915_private *i915 = gt->i915; 2262 struct intel_uncore *uncore = gt->uncore; 2263 struct intel_rps_freq_caps caps; 2264 u32 rp_state_limits; 2265 u32 gt_perf_status; 2266 u32 rpmodectl, rpinclimit, rpdeclimit; 2267 u32 rpstat, cagf, reqf; 2268 u32 rpcurupei, rpcurup, rpprevup; 2269 u32 rpcurdownei, rpcurdown, rpprevdown; 2270 u32 rpupei, rpupt, rpdownei, rpdownt; 2271 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 2272 2273 rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS); 2274 gen6_rps_get_freq_caps(rps, &caps); 2275 if (IS_GEN9_LP(i915)) 2276 gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS); 2277 else 2278 gt_perf_status = intel_uncore_read(uncore, GEN6_GT_PERF_STATUS); 2279 2280 /* RPSTAT1 is in the GT power well */ 2281 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 2282 2283 reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ); 2284 if (GRAPHICS_VER(i915) >= 9) { 2285 reqf >>= 23; 2286 } else { 2287 reqf &= ~GEN6_TURBO_DISABLE; 2288 if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 2289 reqf >>= 24; 2290 else 2291 reqf >>= 25; 2292 } 2293 reqf = intel_gpu_freq(rps, reqf); 2294 2295 rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL); 2296 rpinclimit = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD); 2297 rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD); 2298 2299 rpstat = intel_rps_read_rpstat(rps); 2300 rpcurupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; 2301 rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; 2302 rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; 2303 rpcurdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; 2304 rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; 2305 rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; 2306 2307 rpupei = intel_uncore_read(uncore, GEN6_RP_UP_EI); 2308 rpupt = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD); 2309 2310 rpdownei = intel_uncore_read(uncore, GEN6_RP_DOWN_EI); 2311 rpdownt = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD); 2312 2313 cagf = intel_rps_read_actual_frequency(rps); 2314 2315 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 2316 2317 if (GRAPHICS_VER(i915) >= 11) { 2318 pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE); 2319 pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK); 2320 /* 2321 * The equivalent to the PM ISR & IIR cannot be read 2322 * without affecting the current state of the system 2323 */ 2324 pm_isr = 0; 2325 pm_iir = 0; 2326 } else if (GRAPHICS_VER(i915) >= 8) { 2327 pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2)); 2328 pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2)); 2329 pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2)); 2330 pm_iir = intel_uncore_read(uncore, GEN8_GT_IIR(2)); 2331 } else { 2332 pm_ier = intel_uncore_read(uncore, GEN6_PMIER); 2333 pm_imr = intel_uncore_read(uncore, GEN6_PMIMR); 2334 pm_isr = intel_uncore_read(uncore, GEN6_PMISR); 2335 pm_iir = intel_uncore_read(uncore, GEN6_PMIIR); 2336 } 2337 pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK); 2338 2339 drm_printf(p, "Video Turbo Mode: %s\n", 2340 str_yes_no(rpmodectl & GEN6_RP_MEDIA_TURBO)); 2341 drm_printf(p, "HW control enabled: %s\n", 2342 str_yes_no(rpmodectl & GEN6_RP_ENABLE)); 2343 drm_printf(p, "SW control enabled: %s\n", 2344 str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE)); 2345 2346 drm_printf(p, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n", 2347 pm_ier, pm_imr, pm_mask); 2348 if (GRAPHICS_VER(i915) <= 10) 2349 drm_printf(p, "PM ISR=0x%08x IIR=0x%08x\n", 2350 pm_isr, pm_iir); 2351 drm_printf(p, "pm_intrmsk_mbz: 0x%08x\n", 2352 rps->pm_intrmsk_mbz); 2353 drm_printf(p, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 2354 drm_printf(p, "Render p-state ratio: %d\n", 2355 (gt_perf_status & (GRAPHICS_VER(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8); 2356 drm_printf(p, "Render p-state VID: %d\n", 2357 gt_perf_status & 0xff); 2358 drm_printf(p, "Render p-state limit: %d\n", 2359 rp_state_limits & 0xff); 2360 drm_printf(p, "RPSTAT1: 0x%08x\n", rpstat); 2361 drm_printf(p, "RPMODECTL: 0x%08x\n", rpmodectl); 2362 drm_printf(p, "RPINCLIMIT: 0x%08x\n", rpinclimit); 2363 drm_printf(p, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 2364 drm_printf(p, "RPNSWREQ: %dMHz\n", reqf); 2365 drm_printf(p, "CAGF: %dMHz\n", cagf); 2366 drm_printf(p, "RP CUR UP EI: %d (%lldns)\n", 2367 rpcurupei, 2368 intel_gt_pm_interval_to_ns(gt, rpcurupei)); 2369 drm_printf(p, "RP CUR UP: %d (%lldns)\n", 2370 rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup)); 2371 drm_printf(p, "RP PREV UP: %d (%lldns)\n", 2372 rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup)); 2373 drm_printf(p, "Up threshold: %d%%\n", 2374 rps->power.up_threshold); 2375 drm_printf(p, "RP UP EI: %d (%lldns)\n", 2376 rpupei, intel_gt_pm_interval_to_ns(gt, rpupei)); 2377 drm_printf(p, "RP UP THRESHOLD: %d (%lldns)\n", 2378 rpupt, intel_gt_pm_interval_to_ns(gt, rpupt)); 2379 2380 drm_printf(p, "RP CUR DOWN EI: %d (%lldns)\n", 2381 rpcurdownei, 2382 intel_gt_pm_interval_to_ns(gt, rpcurdownei)); 2383 drm_printf(p, "RP CUR DOWN: %d (%lldns)\n", 2384 rpcurdown, 2385 intel_gt_pm_interval_to_ns(gt, rpcurdown)); 2386 drm_printf(p, "RP PREV DOWN: %d (%lldns)\n", 2387 rpprevdown, 2388 intel_gt_pm_interval_to_ns(gt, rpprevdown)); 2389 drm_printf(p, "Down threshold: %d%%\n", 2390 rps->power.down_threshold); 2391 drm_printf(p, "RP DOWN EI: %d (%lldns)\n", 2392 rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei)); 2393 drm_printf(p, "RP DOWN THRESHOLD: %d (%lldns)\n", 2394 rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt)); 2395 2396 drm_printf(p, "Lowest (RPN) frequency: %dMHz\n", 2397 intel_gpu_freq(rps, caps.min_freq)); 2398 drm_printf(p, "Nominal (RP1) frequency: %dMHz\n", 2399 intel_gpu_freq(rps, caps.rp1_freq)); 2400 drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n", 2401 intel_gpu_freq(rps, caps.rp0_freq)); 2402 drm_printf(p, "Max overclocked frequency: %dMHz\n", 2403 intel_gpu_freq(rps, rps->max_freq)); 2404 2405 drm_printf(p, "Current freq: %d MHz\n", 2406 intel_gpu_freq(rps, rps->cur_freq)); 2407 drm_printf(p, "Actual freq: %d MHz\n", cagf); 2408 drm_printf(p, "Idle freq: %d MHz\n", 2409 intel_gpu_freq(rps, rps->idle_freq)); 2410 drm_printf(p, "Min freq: %d MHz\n", 2411 intel_gpu_freq(rps, rps->min_freq)); 2412 drm_printf(p, "Boost freq: %d MHz\n", 2413 intel_gpu_freq(rps, rps->boost_freq)); 2414 drm_printf(p, "Max freq: %d MHz\n", 2415 intel_gpu_freq(rps, rps->max_freq)); 2416 drm_printf(p, 2417 "efficient (RPe) frequency: %d MHz\n", 2418 intel_gpu_freq(rps, rps->efficient_freq)); 2419 } 2420 2421 static void slpc_frequency_dump(struct intel_rps *rps, struct drm_printer *p) 2422 { 2423 struct intel_gt *gt = rps_to_gt(rps); 2424 struct intel_uncore *uncore = gt->uncore; 2425 struct intel_rps_freq_caps caps; 2426 u32 pm_mask; 2427 2428 gen6_rps_get_freq_caps(rps, &caps); 2429 pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK); 2430 2431 drm_printf(p, "PM MASK=0x%08x\n", pm_mask); 2432 drm_printf(p, "pm_intrmsk_mbz: 0x%08x\n", 2433 rps->pm_intrmsk_mbz); 2434 drm_printf(p, "RPSTAT1: 0x%08x\n", intel_rps_read_rpstat(rps)); 2435 drm_printf(p, "RPNSWREQ: %dMHz\n", intel_rps_get_requested_frequency(rps)); 2436 drm_printf(p, "Lowest (RPN) frequency: %dMHz\n", 2437 intel_gpu_freq(rps, caps.min_freq)); 2438 drm_printf(p, "Nominal (RP1) frequency: %dMHz\n", 2439 intel_gpu_freq(rps, caps.rp1_freq)); 2440 drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n", 2441 intel_gpu_freq(rps, caps.rp0_freq)); 2442 drm_printf(p, "Current freq: %d MHz\n", 2443 intel_rps_get_requested_frequency(rps)); 2444 drm_printf(p, "Actual freq: %d MHz\n", 2445 intel_rps_read_actual_frequency(rps)); 2446 drm_printf(p, "Min freq: %d MHz\n", 2447 intel_rps_get_min_frequency(rps)); 2448 drm_printf(p, "Boost freq: %d MHz\n", 2449 intel_rps_get_boost_frequency(rps)); 2450 drm_printf(p, "Max freq: %d MHz\n", 2451 intel_rps_get_max_frequency(rps)); 2452 drm_printf(p, 2453 "efficient (RPe) frequency: %d MHz\n", 2454 intel_gpu_freq(rps, caps.rp1_freq)); 2455 } 2456 2457 void gen6_rps_frequency_dump(struct intel_rps *rps, struct drm_printer *p) 2458 { 2459 if (rps_uses_slpc(rps)) 2460 return slpc_frequency_dump(rps, p); 2461 else 2462 return rps_frequency_dump(rps, p); 2463 } 2464 2465 static int set_max_freq(struct intel_rps *rps, u32 val) 2466 { 2467 struct drm_i915_private *i915 = rps_to_i915(rps); 2468 int ret = 0; 2469 2470 mutex_lock(&rps->lock); 2471 2472 val = intel_freq_opcode(rps, val); 2473 if (val < rps->min_freq || 2474 val > rps->max_freq || 2475 val < rps->min_freq_softlimit) { 2476 ret = -EINVAL; 2477 goto unlock; 2478 } 2479 2480 if (val > rps->rp0_freq) 2481 drm_dbg(&i915->drm, "User requested overclocking to %d\n", 2482 intel_gpu_freq(rps, val)); 2483 2484 rps->max_freq_softlimit = val; 2485 2486 val = clamp_t(int, rps->cur_freq, 2487 rps->min_freq_softlimit, 2488 rps->max_freq_softlimit); 2489 2490 /* 2491 * We still need *_set_rps to process the new max_delay and 2492 * update the interrupt limits and PMINTRMSK even though 2493 * frequency request may be unchanged. 2494 */ 2495 intel_rps_set(rps, val); 2496 2497 unlock: 2498 mutex_unlock(&rps->lock); 2499 2500 return ret; 2501 } 2502 2503 int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val) 2504 { 2505 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2506 2507 if (rps_uses_slpc(rps)) 2508 return intel_guc_slpc_set_max_freq(slpc, val); 2509 else 2510 return set_max_freq(rps, val); 2511 } 2512 2513 u32 intel_rps_get_min_frequency(struct intel_rps *rps) 2514 { 2515 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2516 2517 if (rps_uses_slpc(rps)) 2518 return slpc->min_freq_softlimit; 2519 else 2520 return intel_gpu_freq(rps, rps->min_freq_softlimit); 2521 } 2522 2523 /** 2524 * intel_rps_get_min_raw_freq - returns the min frequency in some raw format. 2525 * @rps: the intel_rps structure 2526 * 2527 * Returns the min frequency in a raw format. In newer platforms raw is in 2528 * units of 50 MHz. 2529 */ 2530 u32 intel_rps_get_min_raw_freq(struct intel_rps *rps) 2531 { 2532 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2533 u32 freq; 2534 2535 if (rps_uses_slpc(rps)) { 2536 return DIV_ROUND_CLOSEST(slpc->min_freq, 2537 GT_FREQUENCY_MULTIPLIER); 2538 } else { 2539 freq = rps->min_freq; 2540 if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) { 2541 /* Convert GT frequency to 50 MHz units */ 2542 freq /= GEN9_FREQ_SCALER; 2543 } 2544 return freq; 2545 } 2546 } 2547 2548 static int set_min_freq(struct intel_rps *rps, u32 val) 2549 { 2550 int ret = 0; 2551 2552 mutex_lock(&rps->lock); 2553 2554 val = intel_freq_opcode(rps, val); 2555 if (val < rps->min_freq || 2556 val > rps->max_freq || 2557 val > rps->max_freq_softlimit) { 2558 ret = -EINVAL; 2559 goto unlock; 2560 } 2561 2562 rps->min_freq_softlimit = val; 2563 2564 val = clamp_t(int, rps->cur_freq, 2565 rps->min_freq_softlimit, 2566 rps->max_freq_softlimit); 2567 2568 /* 2569 * We still need *_set_rps to process the new min_delay and 2570 * update the interrupt limits and PMINTRMSK even though 2571 * frequency request may be unchanged. 2572 */ 2573 intel_rps_set(rps, val); 2574 2575 unlock: 2576 mutex_unlock(&rps->lock); 2577 2578 return ret; 2579 } 2580 2581 int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val) 2582 { 2583 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2584 2585 if (rps_uses_slpc(rps)) 2586 return intel_guc_slpc_set_min_freq(slpc, val); 2587 else 2588 return set_min_freq(rps, val); 2589 } 2590 2591 u8 intel_rps_get_up_threshold(struct intel_rps *rps) 2592 { 2593 return rps->power.up_threshold; 2594 } 2595 2596 static int rps_set_threshold(struct intel_rps *rps, u8 *threshold, u8 val) 2597 { 2598 int ret; 2599 2600 if (val > 100) 2601 return -EINVAL; 2602 2603 ret = mutex_lock_interruptible(&rps->lock); 2604 if (ret) 2605 return ret; 2606 2607 if (*threshold == val) 2608 goto out_unlock; 2609 2610 *threshold = val; 2611 2612 /* Force reset. */ 2613 rps->last_freq = -1; 2614 mutex_lock(&rps->power.mutex); 2615 rps->power.mode = -1; 2616 mutex_unlock(&rps->power.mutex); 2617 2618 intel_rps_set(rps, clamp(rps->cur_freq, 2619 rps->min_freq_softlimit, 2620 rps->max_freq_softlimit)); 2621 2622 out_unlock: 2623 mutex_unlock(&rps->lock); 2624 2625 return ret; 2626 } 2627 2628 int intel_rps_set_up_threshold(struct intel_rps *rps, u8 threshold) 2629 { 2630 return rps_set_threshold(rps, &rps->power.up_threshold, threshold); 2631 } 2632 2633 u8 intel_rps_get_down_threshold(struct intel_rps *rps) 2634 { 2635 return rps->power.down_threshold; 2636 } 2637 2638 int intel_rps_set_down_threshold(struct intel_rps *rps, u8 threshold) 2639 { 2640 return rps_set_threshold(rps, &rps->power.down_threshold, threshold); 2641 } 2642 2643 static void intel_rps_set_manual(struct intel_rps *rps, bool enable) 2644 { 2645 struct intel_uncore *uncore = rps_to_uncore(rps); 2646 u32 state = enable ? GEN9_RPSWCTL_ENABLE : GEN9_RPSWCTL_DISABLE; 2647 2648 /* Allow punit to process software requests */ 2649 intel_uncore_write(uncore, GEN6_RP_CONTROL, state); 2650 } 2651 2652 void intel_rps_raise_unslice(struct intel_rps *rps) 2653 { 2654 struct intel_uncore *uncore = rps_to_uncore(rps); 2655 2656 mutex_lock(&rps->lock); 2657 2658 if (rps_uses_slpc(rps)) { 2659 /* RP limits have not been initialized yet for SLPC path */ 2660 struct intel_rps_freq_caps caps; 2661 2662 gen6_rps_get_freq_caps(rps, &caps); 2663 2664 intel_rps_set_manual(rps, true); 2665 intel_uncore_write(uncore, GEN6_RPNSWREQ, 2666 ((caps.rp0_freq << 2667 GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) | 2668 GEN9_IGNORE_SLICE_RATIO)); 2669 intel_rps_set_manual(rps, false); 2670 } else { 2671 intel_rps_set(rps, rps->rp0_freq); 2672 } 2673 2674 mutex_unlock(&rps->lock); 2675 } 2676 2677 void intel_rps_lower_unslice(struct intel_rps *rps) 2678 { 2679 struct intel_uncore *uncore = rps_to_uncore(rps); 2680 2681 mutex_lock(&rps->lock); 2682 2683 if (rps_uses_slpc(rps)) { 2684 /* RP limits have not been initialized yet for SLPC path */ 2685 struct intel_rps_freq_caps caps; 2686 2687 gen6_rps_get_freq_caps(rps, &caps); 2688 2689 intel_rps_set_manual(rps, true); 2690 intel_uncore_write(uncore, GEN6_RPNSWREQ, 2691 ((caps.min_freq << 2692 GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) | 2693 GEN9_IGNORE_SLICE_RATIO)); 2694 intel_rps_set_manual(rps, false); 2695 } else { 2696 intel_rps_set(rps, rps->min_freq); 2697 } 2698 2699 mutex_unlock(&rps->lock); 2700 } 2701 2702 static u32 rps_read_mmio(struct intel_rps *rps, i915_reg_t reg32) 2703 { 2704 struct intel_gt *gt = rps_to_gt(rps); 2705 intel_wakeref_t wakeref; 2706 u32 val; 2707 2708 with_intel_runtime_pm(gt->uncore->rpm, wakeref) 2709 val = intel_uncore_read(gt->uncore, reg32); 2710 2711 return val; 2712 } 2713 2714 bool rps_read_mask_mmio(struct intel_rps *rps, 2715 i915_reg_t reg32, u32 mask) 2716 { 2717 return rps_read_mmio(rps, reg32) & mask; 2718 } 2719 2720 /* External interface for intel_ips.ko */ 2721 2722 static struct drm_i915_private __rcu *ips_mchdev; 2723 2724 /* 2725 * Tells the intel_ips driver that the i915 driver is now loaded, if 2726 * IPS got loaded first. 2727 * 2728 * This awkward dance is so that neither module has to depend on the 2729 * other in order for IPS to do the appropriate communication of 2730 * GPU turbo limits to i915. 2731 */ 2732 static void 2733 ips_ping_for_i915_load(void) 2734 { 2735 #ifdef __linux__ 2736 void (*link)(void); 2737 2738 link = symbol_get(ips_link_to_i915_driver); 2739 if (link) { 2740 link(); 2741 symbol_put(ips_link_to_i915_driver); 2742 } 2743 #endif 2744 } 2745 2746 void intel_rps_driver_register(struct intel_rps *rps) 2747 { 2748 struct intel_gt *gt = rps_to_gt(rps); 2749 2750 /* 2751 * We only register the i915 ips part with intel-ips once everything is 2752 * set up, to avoid intel-ips sneaking in and reading bogus values. 2753 */ 2754 if (GRAPHICS_VER(gt->i915) == 5) { 2755 GEM_BUG_ON(ips_mchdev); 2756 rcu_assign_pointer(ips_mchdev, gt->i915); 2757 ips_ping_for_i915_load(); 2758 } 2759 } 2760 2761 void intel_rps_driver_unregister(struct intel_rps *rps) 2762 { 2763 if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps)) 2764 rcu_assign_pointer(ips_mchdev, NULL); 2765 } 2766 2767 static struct drm_i915_private *mchdev_get(void) 2768 { 2769 struct drm_i915_private *i915; 2770 2771 rcu_read_lock(); 2772 i915 = rcu_dereference(ips_mchdev); 2773 if (i915 && !kref_get_unless_zero(&i915->drm.ref)) 2774 i915 = NULL; 2775 rcu_read_unlock(); 2776 2777 return i915; 2778 } 2779 2780 /** 2781 * i915_read_mch_val - return value for IPS use 2782 * 2783 * Calculate and return a value for the IPS driver to use when deciding whether 2784 * we have thermal and power headroom to increase CPU or GPU power budget. 2785 */ 2786 unsigned long i915_read_mch_val(void) 2787 { 2788 struct drm_i915_private *i915; 2789 unsigned long chipset_val = 0; 2790 unsigned long graphics_val = 0; 2791 intel_wakeref_t wakeref; 2792 2793 i915 = mchdev_get(); 2794 if (!i915) 2795 return 0; 2796 2797 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 2798 struct intel_ips *ips = &to_gt(i915)->rps.ips; 2799 2800 spin_lock_irq(&mchdev_lock); 2801 chipset_val = __ips_chipset_val(ips); 2802 graphics_val = __ips_gfx_val(ips); 2803 spin_unlock_irq(&mchdev_lock); 2804 } 2805 2806 drm_dev_put(&i915->drm); 2807 return chipset_val + graphics_val; 2808 } 2809 EXPORT_SYMBOL_GPL(i915_read_mch_val); 2810 2811 /** 2812 * i915_gpu_raise - raise GPU frequency limit 2813 * 2814 * Raise the limit; IPS indicates we have thermal headroom. 2815 */ 2816 bool i915_gpu_raise(void) 2817 { 2818 struct drm_i915_private *i915; 2819 struct intel_rps *rps; 2820 2821 i915 = mchdev_get(); 2822 if (!i915) 2823 return false; 2824 2825 rps = &to_gt(i915)->rps; 2826 2827 spin_lock_irq(&mchdev_lock); 2828 if (rps->max_freq_softlimit < rps->max_freq) 2829 rps->max_freq_softlimit++; 2830 spin_unlock_irq(&mchdev_lock); 2831 2832 drm_dev_put(&i915->drm); 2833 return true; 2834 } 2835 EXPORT_SYMBOL_GPL(i915_gpu_raise); 2836 2837 /** 2838 * i915_gpu_lower - lower GPU frequency limit 2839 * 2840 * IPS indicates we're close to a thermal limit, so throttle back the GPU 2841 * frequency maximum. 2842 */ 2843 bool i915_gpu_lower(void) 2844 { 2845 struct drm_i915_private *i915; 2846 struct intel_rps *rps; 2847 2848 i915 = mchdev_get(); 2849 if (!i915) 2850 return false; 2851 2852 rps = &to_gt(i915)->rps; 2853 2854 spin_lock_irq(&mchdev_lock); 2855 if (rps->max_freq_softlimit > rps->min_freq) 2856 rps->max_freq_softlimit--; 2857 spin_unlock_irq(&mchdev_lock); 2858 2859 drm_dev_put(&i915->drm); 2860 return true; 2861 } 2862 EXPORT_SYMBOL_GPL(i915_gpu_lower); 2863 2864 /** 2865 * i915_gpu_busy - indicate GPU business to IPS 2866 * 2867 * Tell the IPS driver whether or not the GPU is busy. 2868 */ 2869 bool i915_gpu_busy(void) 2870 { 2871 struct drm_i915_private *i915; 2872 bool ret; 2873 2874 i915 = mchdev_get(); 2875 if (!i915) 2876 return false; 2877 2878 ret = to_gt(i915)->awake; 2879 2880 drm_dev_put(&i915->drm); 2881 return ret; 2882 } 2883 EXPORT_SYMBOL_GPL(i915_gpu_busy); 2884 2885 /** 2886 * i915_gpu_turbo_disable - disable graphics turbo 2887 * 2888 * Disable graphics turbo by resetting the max frequency and setting the 2889 * current frequency to the default. 2890 */ 2891 bool i915_gpu_turbo_disable(void) 2892 { 2893 struct drm_i915_private *i915; 2894 struct intel_rps *rps; 2895 bool ret; 2896 2897 i915 = mchdev_get(); 2898 if (!i915) 2899 return false; 2900 2901 rps = &to_gt(i915)->rps; 2902 2903 spin_lock_irq(&mchdev_lock); 2904 rps->max_freq_softlimit = rps->min_freq; 2905 ret = !__gen5_rps_set(&to_gt(i915)->rps, rps->min_freq); 2906 spin_unlock_irq(&mchdev_lock); 2907 2908 drm_dev_put(&i915->drm); 2909 return ret; 2910 } 2911 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); 2912 2913 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2914 #include "selftest_rps.c" 2915 #include "selftest_slpc.c" 2916 #endif 2917