1 /* 2 * Copyright © 2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eugeni Dodonov <eugeni.dodonov@intel.com> 25 * 26 */ 27 28 #include <linux/cpufreq.h> 29 #include <drm/drm_plane_helper.h> 30 #include "i915_drv.h" 31 #include "intel_drv.h" 32 #include <linux/module.h> 33 #include <drm/drm_atomic_helper.h> 34 35 /** 36 * DOC: RC6 37 * 38 * RC6 is a special power stage which allows the GPU to enter an very 39 * low-voltage mode when idle, using down to 0V while at this stage. This 40 * stage is entered automatically when the GPU is idle when RC6 support is 41 * enabled, and as soon as new workload arises GPU wakes up automatically as well. 42 * 43 * There are different RC6 modes available in Intel GPU, which differentiate 44 * among each other with the latency required to enter and leave RC6 and 45 * voltage consumed by the GPU in different states. 46 * 47 * The combination of the following flags define which states GPU is allowed 48 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and 49 * RC6pp is deepest RC6. Their support by hardware varies according to the 50 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one 51 * which brings the most power savings; deeper states save more power, but 52 * require higher latency to switch to and wake up. 53 */ 54 #define INTEL_RC6_ENABLE (1<<0) 55 #define INTEL_RC6p_ENABLE (1<<1) 56 #define INTEL_RC6pp_ENABLE (1<<2) 57 58 static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) 59 { 60 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */ 61 I915_WRITE(CHICKEN_PAR1_1, 62 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); 63 64 I915_WRITE(GEN8_CONFIG0, 65 I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES); 66 67 /* WaEnableChickenDCPR:skl,bxt,kbl */ 68 I915_WRITE(GEN8_CHICKEN_DCPR_1, 69 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); 70 71 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */ 72 /* WaFbcWakeMemOn:skl,bxt,kbl */ 73 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 74 DISP_FBC_WM_DIS | 75 DISP_FBC_MEMORY_WAKE); 76 77 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */ 78 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | 79 ILK_DPFC_DISABLE_DUMMY0); 80 } 81 82 static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) 83 { 84 gen9_init_clock_gating(dev_priv); 85 86 /* WaDisableSDEUnitClockGating:bxt */ 87 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 88 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 89 90 /* 91 * FIXME: 92 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only. 93 */ 94 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 95 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); 96 97 /* 98 * Wa: Backlight PWM may stop in the asserted state, causing backlight 99 * to stay fully on. 100 */ 101 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) 102 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | 103 PWM1_GATING_DIS | PWM2_GATING_DIS); 104 } 105 106 static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv) 107 { 108 u32 tmp; 109 110 tmp = I915_READ(CLKCFG); 111 112 switch (tmp & CLKCFG_FSB_MASK) { 113 case CLKCFG_FSB_533: 114 dev_priv->fsb_freq = 533; /* 133*4 */ 115 break; 116 case CLKCFG_FSB_800: 117 dev_priv->fsb_freq = 800; /* 200*4 */ 118 break; 119 case CLKCFG_FSB_667: 120 dev_priv->fsb_freq = 667; /* 167*4 */ 121 break; 122 case CLKCFG_FSB_400: 123 dev_priv->fsb_freq = 400; /* 100*4 */ 124 break; 125 } 126 127 switch (tmp & CLKCFG_MEM_MASK) { 128 case CLKCFG_MEM_533: 129 dev_priv->mem_freq = 533; 130 break; 131 case CLKCFG_MEM_667: 132 dev_priv->mem_freq = 667; 133 break; 134 case CLKCFG_MEM_800: 135 dev_priv->mem_freq = 800; 136 break; 137 } 138 139 /* detect pineview DDR3 setting */ 140 tmp = I915_READ(CSHRDDR3CTL); 141 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; 142 } 143 144 static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv) 145 { 146 u16 ddrpll, csipll; 147 148 ddrpll = I915_READ16(DDRMPLL1); 149 csipll = I915_READ16(CSIPLL0); 150 151 switch (ddrpll & 0xff) { 152 case 0xc: 153 dev_priv->mem_freq = 800; 154 break; 155 case 0x10: 156 dev_priv->mem_freq = 1066; 157 break; 158 case 0x14: 159 dev_priv->mem_freq = 1333; 160 break; 161 case 0x18: 162 dev_priv->mem_freq = 1600; 163 break; 164 default: 165 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", 166 ddrpll & 0xff); 167 dev_priv->mem_freq = 0; 168 break; 169 } 170 171 dev_priv->ips.r_t = dev_priv->mem_freq; 172 173 switch (csipll & 0x3ff) { 174 case 0x00c: 175 dev_priv->fsb_freq = 3200; 176 break; 177 case 0x00e: 178 dev_priv->fsb_freq = 3733; 179 break; 180 case 0x010: 181 dev_priv->fsb_freq = 4266; 182 break; 183 case 0x012: 184 dev_priv->fsb_freq = 4800; 185 break; 186 case 0x014: 187 dev_priv->fsb_freq = 5333; 188 break; 189 case 0x016: 190 dev_priv->fsb_freq = 5866; 191 break; 192 case 0x018: 193 dev_priv->fsb_freq = 6400; 194 break; 195 default: 196 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", 197 csipll & 0x3ff); 198 dev_priv->fsb_freq = 0; 199 break; 200 } 201 202 if (dev_priv->fsb_freq == 3200) { 203 dev_priv->ips.c_m = 0; 204 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { 205 dev_priv->ips.c_m = 1; 206 } else { 207 dev_priv->ips.c_m = 2; 208 } 209 } 210 211 static const struct cxsr_latency cxsr_latency_table[] = { 212 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ 213 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ 214 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ 215 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ 216 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ 217 218 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ 219 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ 220 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ 221 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ 222 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ 223 224 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ 225 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ 226 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ 227 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ 228 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ 229 230 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ 231 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ 232 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ 233 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ 234 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ 235 236 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ 237 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ 238 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ 239 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ 240 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ 241 242 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ 243 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ 244 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ 245 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ 246 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ 247 }; 248 249 static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop, 250 bool is_ddr3, 251 int fsb, 252 int mem) 253 { 254 const struct cxsr_latency *latency; 255 int i; 256 257 if (fsb == 0 || mem == 0) 258 return NULL; 259 260 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { 261 latency = &cxsr_latency_table[i]; 262 if (is_desktop == latency->is_desktop && 263 is_ddr3 == latency->is_ddr3 && 264 fsb == latency->fsb_freq && mem == latency->mem_freq) 265 return latency; 266 } 267 268 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 269 270 return NULL; 271 } 272 273 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) 274 { 275 u32 val; 276 277 mutex_lock(&dev_priv->rps.hw_lock); 278 279 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 280 if (enable) 281 val &= ~FORCE_DDR_HIGH_FREQ; 282 else 283 val |= FORCE_DDR_HIGH_FREQ; 284 val &= ~FORCE_DDR_LOW_FREQ; 285 val |= FORCE_DDR_FREQ_REQ_ACK; 286 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); 287 288 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & 289 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) 290 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n"); 291 292 mutex_unlock(&dev_priv->rps.hw_lock); 293 } 294 295 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) 296 { 297 u32 val; 298 299 mutex_lock(&dev_priv->rps.hw_lock); 300 301 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 302 if (enable) 303 val |= DSP_MAXFIFO_PM5_ENABLE; 304 else 305 val &= ~DSP_MAXFIFO_PM5_ENABLE; 306 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 307 308 mutex_unlock(&dev_priv->rps.hw_lock); 309 } 310 311 #define FW_WM(value, plane) \ 312 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) 313 314 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) 315 { 316 u32 val; 317 318 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 319 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); 320 POSTING_READ(FW_BLC_SELF_VLV); 321 dev_priv->wm.vlv.cxsr = enable; 322 } else if (IS_G4X(dev_priv) || IS_CRESTLINE(dev_priv)) { 323 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); 324 POSTING_READ(FW_BLC_SELF); 325 } else if (IS_PINEVIEW(dev_priv)) { 326 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN; 327 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0; 328 I915_WRITE(DSPFW3, val); 329 POSTING_READ(DSPFW3); 330 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) { 331 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : 332 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); 333 I915_WRITE(FW_BLC_SELF, val); 334 POSTING_READ(FW_BLC_SELF); 335 } else if (IS_I915GM(dev_priv)) { 336 /* 337 * FIXME can't find a bit like this for 915G, and 338 * and yet it does have the related watermark in 339 * FW_BLC_SELF. What's going on? 340 */ 341 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : 342 _MASKED_BIT_DISABLE(INSTPM_SELF_EN); 343 I915_WRITE(INSTPM, val); 344 POSTING_READ(INSTPM); 345 } else { 346 return; 347 } 348 349 DRM_DEBUG_KMS("memory self-refresh is %s\n", enableddisabled(enable)); 350 } 351 352 353 /* 354 * Latency for FIFO fetches is dependent on several factors: 355 * - memory configuration (speed, channels) 356 * - chipset 357 * - current MCH state 358 * It can be fairly high in some situations, so here we assume a fairly 359 * pessimal value. It's a tradeoff between extra memory fetches (if we 360 * set this value too high, the FIFO will fetch frequently to stay full) 361 * and power consumption (set it too low to save power and we might see 362 * FIFO underruns and display "flicker"). 363 * 364 * A value of 5us seems to be a good balance; safe for very low end 365 * platforms but not overly aggressive on lower latency configs. 366 */ 367 static const int pessimal_latency_ns = 5000; 368 369 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \ 370 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) 371 372 static int vlv_get_fifo_size(struct drm_i915_private *dev_priv, 373 enum i915_pipe pipe, int plane) 374 { 375 int sprite0_start, sprite1_start, size; 376 377 switch (pipe) { 378 uint32_t dsparb, dsparb2, dsparb3; 379 case PIPE_A: 380 dsparb = I915_READ(DSPARB); 381 dsparb2 = I915_READ(DSPARB2); 382 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0); 383 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4); 384 break; 385 case PIPE_B: 386 dsparb = I915_READ(DSPARB); 387 dsparb2 = I915_READ(DSPARB2); 388 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8); 389 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12); 390 break; 391 case PIPE_C: 392 dsparb2 = I915_READ(DSPARB2); 393 dsparb3 = I915_READ(DSPARB3); 394 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16); 395 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20); 396 break; 397 default: 398 return 0; 399 } 400 401 switch (plane) { 402 case 0: 403 size = sprite0_start; 404 break; 405 case 1: 406 size = sprite1_start - sprite0_start; 407 break; 408 case 2: 409 size = 512 - 1 - sprite1_start; 410 break; 411 default: 412 return 0; 413 } 414 415 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n", 416 pipe_name(pipe), plane == 0 ? "primary" : "sprite", 417 plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1), 418 size); 419 420 return size; 421 } 422 423 static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, int plane) 424 { 425 uint32_t dsparb = I915_READ(DSPARB); 426 int size; 427 428 size = dsparb & 0x7f; 429 if (plane) 430 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; 431 432 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 433 plane ? "B" : "A", size); 434 435 return size; 436 } 437 438 static int i830_get_fifo_size(struct drm_i915_private *dev_priv, int plane) 439 { 440 uint32_t dsparb = I915_READ(DSPARB); 441 int size; 442 443 size = dsparb & 0x1ff; 444 if (plane) 445 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; 446 size >>= 1; /* Convert to cachelines */ 447 448 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 449 plane ? "B" : "A", size); 450 451 return size; 452 } 453 454 static int i845_get_fifo_size(struct drm_i915_private *dev_priv, int plane) 455 { 456 uint32_t dsparb = I915_READ(DSPARB); 457 int size; 458 459 size = dsparb & 0x7f; 460 size >>= 2; /* Convert to cachelines */ 461 462 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 463 plane ? "B" : "A", 464 size); 465 466 return size; 467 } 468 469 /* Pineview has different values for various configs */ 470 static const struct intel_watermark_params pineview_display_wm = { 471 .fifo_size = PINEVIEW_DISPLAY_FIFO, 472 .max_wm = PINEVIEW_MAX_WM, 473 .default_wm = PINEVIEW_DFT_WM, 474 .guard_size = PINEVIEW_GUARD_WM, 475 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 476 }; 477 static const struct intel_watermark_params pineview_display_hplloff_wm = { 478 .fifo_size = PINEVIEW_DISPLAY_FIFO, 479 .max_wm = PINEVIEW_MAX_WM, 480 .default_wm = PINEVIEW_DFT_HPLLOFF_WM, 481 .guard_size = PINEVIEW_GUARD_WM, 482 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 483 }; 484 static const struct intel_watermark_params pineview_cursor_wm = { 485 .fifo_size = PINEVIEW_CURSOR_FIFO, 486 .max_wm = PINEVIEW_CURSOR_MAX_WM, 487 .default_wm = PINEVIEW_CURSOR_DFT_WM, 488 .guard_size = PINEVIEW_CURSOR_GUARD_WM, 489 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 490 }; 491 static const struct intel_watermark_params pineview_cursor_hplloff_wm = { 492 .fifo_size = PINEVIEW_CURSOR_FIFO, 493 .max_wm = PINEVIEW_CURSOR_MAX_WM, 494 .default_wm = PINEVIEW_CURSOR_DFT_WM, 495 .guard_size = PINEVIEW_CURSOR_GUARD_WM, 496 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 497 }; 498 static const struct intel_watermark_params g4x_wm_info = { 499 .fifo_size = G4X_FIFO_SIZE, 500 .max_wm = G4X_MAX_WM, 501 .default_wm = G4X_MAX_WM, 502 .guard_size = 2, 503 .cacheline_size = G4X_FIFO_LINE_SIZE, 504 }; 505 static const struct intel_watermark_params g4x_cursor_wm_info = { 506 .fifo_size = I965_CURSOR_FIFO, 507 .max_wm = I965_CURSOR_MAX_WM, 508 .default_wm = I965_CURSOR_DFT_WM, 509 .guard_size = 2, 510 .cacheline_size = G4X_FIFO_LINE_SIZE, 511 }; 512 static const struct intel_watermark_params i965_cursor_wm_info = { 513 .fifo_size = I965_CURSOR_FIFO, 514 .max_wm = I965_CURSOR_MAX_WM, 515 .default_wm = I965_CURSOR_DFT_WM, 516 .guard_size = 2, 517 .cacheline_size = I915_FIFO_LINE_SIZE, 518 }; 519 static const struct intel_watermark_params i945_wm_info = { 520 .fifo_size = I945_FIFO_SIZE, 521 .max_wm = I915_MAX_WM, 522 .default_wm = 1, 523 .guard_size = 2, 524 .cacheline_size = I915_FIFO_LINE_SIZE, 525 }; 526 static const struct intel_watermark_params i915_wm_info = { 527 .fifo_size = I915_FIFO_SIZE, 528 .max_wm = I915_MAX_WM, 529 .default_wm = 1, 530 .guard_size = 2, 531 .cacheline_size = I915_FIFO_LINE_SIZE, 532 }; 533 static const struct intel_watermark_params i830_a_wm_info = { 534 .fifo_size = I855GM_FIFO_SIZE, 535 .max_wm = I915_MAX_WM, 536 .default_wm = 1, 537 .guard_size = 2, 538 .cacheline_size = I830_FIFO_LINE_SIZE, 539 }; 540 static const struct intel_watermark_params i830_bc_wm_info = { 541 .fifo_size = I855GM_FIFO_SIZE, 542 .max_wm = I915_MAX_WM/2, 543 .default_wm = 1, 544 .guard_size = 2, 545 .cacheline_size = I830_FIFO_LINE_SIZE, 546 }; 547 static const struct intel_watermark_params i845_wm_info = { 548 .fifo_size = I830_FIFO_SIZE, 549 .max_wm = I915_MAX_WM, 550 .default_wm = 1, 551 .guard_size = 2, 552 .cacheline_size = I830_FIFO_LINE_SIZE, 553 }; 554 555 /** 556 * intel_calculate_wm - calculate watermark level 557 * @clock_in_khz: pixel clock 558 * @wm: chip FIFO params 559 * @cpp: bytes per pixel 560 * @latency_ns: memory latency for the platform 561 * 562 * Calculate the watermark level (the level at which the display plane will 563 * start fetching from memory again). Each chip has a different display 564 * FIFO size and allocation, so the caller needs to figure that out and pass 565 * in the correct intel_watermark_params structure. 566 * 567 * As the pixel clock runs, the FIFO will be drained at a rate that depends 568 * on the pixel size. When it reaches the watermark level, it'll start 569 * fetching FIFO line sized based chunks from memory until the FIFO fills 570 * past the watermark point. If the FIFO drains completely, a FIFO underrun 571 * will occur, and a display engine hang could result. 572 */ 573 static unsigned long intel_calculate_wm(unsigned long clock_in_khz, 574 const struct intel_watermark_params *wm, 575 int fifo_size, int cpp, 576 unsigned long latency_ns) 577 { 578 long entries_required, wm_size; 579 580 /* 581 * Note: we need to make sure we don't overflow for various clock & 582 * latency values. 583 * clocks go from a few thousand to several hundred thousand. 584 * latency is usually a few thousand 585 */ 586 entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) / 587 1000; 588 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); 589 590 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required); 591 592 wm_size = fifo_size - (entries_required + wm->guard_size); 593 594 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size); 595 596 /* Don't promote wm_size to unsigned... */ 597 if (wm_size > (long)wm->max_wm) 598 wm_size = wm->max_wm; 599 if (wm_size <= 0) 600 wm_size = wm->default_wm; 601 602 /* 603 * Bspec seems to indicate that the value shouldn't be lower than 604 * 'burst size + 1'. Certainly 830 is quite unhappy with low values. 605 * Lets go for 8 which is the burst size since certain platforms 606 * already use a hardcoded 8 (which is what the spec says should be 607 * done). 608 */ 609 if (wm_size <= 8) 610 wm_size = 8; 611 612 return wm_size; 613 } 614 615 static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv) 616 { 617 struct intel_crtc *crtc, *enabled = NULL; 618 619 for_each_intel_crtc(&dev_priv->drm, crtc) { 620 if (intel_crtc_active(crtc)) { 621 if (enabled) 622 return NULL; 623 enabled = crtc; 624 } 625 } 626 627 return enabled; 628 } 629 630 static void pineview_update_wm(struct intel_crtc *unused_crtc) 631 { 632 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); 633 struct intel_crtc *crtc; 634 const struct cxsr_latency *latency; 635 u32 reg; 636 unsigned long wm; 637 638 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv), 639 dev_priv->is_ddr3, 640 dev_priv->fsb_freq, 641 dev_priv->mem_freq); 642 if (!latency) { 643 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 644 intel_set_memory_cxsr(dev_priv, false); 645 return; 646 } 647 648 crtc = single_enabled_crtc(dev_priv); 649 if (crtc) { 650 const struct drm_display_mode *adjusted_mode = 651 &crtc->config->base.adjusted_mode; 652 const struct drm_framebuffer *fb = 653 crtc->base.primary->state->fb; 654 int cpp = drm_format_plane_cpp(fb->pixel_format, 0); 655 int clock = adjusted_mode->crtc_clock; 656 657 /* Display SR */ 658 wm = intel_calculate_wm(clock, &pineview_display_wm, 659 pineview_display_wm.fifo_size, 660 cpp, latency->display_sr); 661 reg = I915_READ(DSPFW1); 662 reg &= ~DSPFW_SR_MASK; 663 reg |= FW_WM(wm, SR); 664 I915_WRITE(DSPFW1, reg); 665 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); 666 667 /* cursor SR */ 668 wm = intel_calculate_wm(clock, &pineview_cursor_wm, 669 pineview_display_wm.fifo_size, 670 cpp, latency->cursor_sr); 671 reg = I915_READ(DSPFW3); 672 reg &= ~DSPFW_CURSOR_SR_MASK; 673 reg |= FW_WM(wm, CURSOR_SR); 674 I915_WRITE(DSPFW3, reg); 675 676 /* Display HPLL off SR */ 677 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, 678 pineview_display_hplloff_wm.fifo_size, 679 cpp, latency->display_hpll_disable); 680 reg = I915_READ(DSPFW3); 681 reg &= ~DSPFW_HPLL_SR_MASK; 682 reg |= FW_WM(wm, HPLL_SR); 683 I915_WRITE(DSPFW3, reg); 684 685 /* cursor HPLL off SR */ 686 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, 687 pineview_display_hplloff_wm.fifo_size, 688 cpp, latency->cursor_hpll_disable); 689 reg = I915_READ(DSPFW3); 690 reg &= ~DSPFW_HPLL_CURSOR_MASK; 691 reg |= FW_WM(wm, HPLL_CURSOR); 692 I915_WRITE(DSPFW3, reg); 693 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); 694 695 intel_set_memory_cxsr(dev_priv, true); 696 } else { 697 intel_set_memory_cxsr(dev_priv, false); 698 } 699 } 700 701 static bool g4x_compute_wm0(struct drm_i915_private *dev_priv, 702 int plane, 703 const struct intel_watermark_params *display, 704 int display_latency_ns, 705 const struct intel_watermark_params *cursor, 706 int cursor_latency_ns, 707 int *plane_wm, 708 int *cursor_wm) 709 { 710 struct intel_crtc *crtc; 711 const struct drm_display_mode *adjusted_mode; 712 const struct drm_framebuffer *fb; 713 int htotal, hdisplay, clock, cpp; 714 int line_time_us, line_count; 715 int entries, tlb_miss; 716 717 crtc = intel_get_crtc_for_plane(dev_priv, plane); 718 if (!intel_crtc_active(crtc)) { 719 *cursor_wm = cursor->guard_size; 720 *plane_wm = display->guard_size; 721 return false; 722 } 723 724 adjusted_mode = &crtc->config->base.adjusted_mode; 725 fb = crtc->base.primary->state->fb; 726 clock = adjusted_mode->crtc_clock; 727 htotal = adjusted_mode->crtc_htotal; 728 hdisplay = crtc->config->pipe_src_w; 729 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 730 731 /* Use the small buffer method to calculate plane watermark */ 732 entries = ((clock * cpp / 1000) * display_latency_ns) / 1000; 733 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; 734 if (tlb_miss > 0) 735 entries += tlb_miss; 736 entries = DIV_ROUND_UP(entries, display->cacheline_size); 737 *plane_wm = entries + display->guard_size; 738 if (*plane_wm > (int)display->max_wm) 739 *plane_wm = display->max_wm; 740 741 /* Use the large buffer method to calculate cursor watermark */ 742 line_time_us = max(htotal * 1000 / clock, 1); 743 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; 744 entries = line_count * crtc->base.cursor->state->crtc_w * cpp; 745 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; 746 if (tlb_miss > 0) 747 entries += tlb_miss; 748 entries = DIV_ROUND_UP(entries, cursor->cacheline_size); 749 *cursor_wm = entries + cursor->guard_size; 750 if (*cursor_wm > (int)cursor->max_wm) 751 *cursor_wm = (int)cursor->max_wm; 752 753 return true; 754 } 755 756 /* 757 * Check the wm result. 758 * 759 * If any calculated watermark values is larger than the maximum value that 760 * can be programmed into the associated watermark register, that watermark 761 * must be disabled. 762 */ 763 static bool g4x_check_srwm(struct drm_i915_private *dev_priv, 764 int display_wm, int cursor_wm, 765 const struct intel_watermark_params *display, 766 const struct intel_watermark_params *cursor) 767 { 768 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", 769 display_wm, cursor_wm); 770 771 if (display_wm > display->max_wm) { 772 DRM_DEBUG_KMS("display watermark is too large(%d/%u), disabling\n", 773 display_wm, display->max_wm); 774 return false; 775 } 776 777 if (cursor_wm > cursor->max_wm) { 778 DRM_DEBUG_KMS("cursor watermark is too large(%d/%u), disabling\n", 779 cursor_wm, cursor->max_wm); 780 return false; 781 } 782 783 if (!(display_wm || cursor_wm)) { 784 DRM_DEBUG_KMS("SR latency is 0, disabling\n"); 785 return false; 786 } 787 788 return true; 789 } 790 791 static bool g4x_compute_srwm(struct drm_i915_private *dev_priv, 792 int plane, 793 int latency_ns, 794 const struct intel_watermark_params *display, 795 const struct intel_watermark_params *cursor, 796 int *display_wm, int *cursor_wm) 797 { 798 struct intel_crtc *crtc; 799 const struct drm_display_mode *adjusted_mode; 800 const struct drm_framebuffer *fb; 801 int hdisplay, htotal, cpp, clock; 802 unsigned long line_time_us; 803 int line_count, line_size; 804 int small, large; 805 int entries; 806 807 if (!latency_ns) { 808 *display_wm = *cursor_wm = 0; 809 return false; 810 } 811 812 crtc = intel_get_crtc_for_plane(dev_priv, plane); 813 adjusted_mode = &crtc->config->base.adjusted_mode; 814 fb = crtc->base.primary->state->fb; 815 clock = adjusted_mode->crtc_clock; 816 htotal = adjusted_mode->crtc_htotal; 817 hdisplay = crtc->config->pipe_src_w; 818 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 819 820 line_time_us = max(htotal * 1000 / clock, 1); 821 line_count = (latency_ns / line_time_us + 1000) / 1000; 822 line_size = hdisplay * cpp; 823 824 /* Use the minimum of the small and large buffer method for primary */ 825 small = ((clock * cpp / 1000) * latency_ns) / 1000; 826 large = line_count * line_size; 827 828 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); 829 *display_wm = entries + display->guard_size; 830 831 /* calculate the self-refresh watermark for display cursor */ 832 entries = line_count * cpp * crtc->base.cursor->state->crtc_w; 833 entries = DIV_ROUND_UP(entries, cursor->cacheline_size); 834 *cursor_wm = entries + cursor->guard_size; 835 836 return g4x_check_srwm(dev_priv, 837 *display_wm, *cursor_wm, 838 display, cursor); 839 } 840 841 #define FW_WM_VLV(value, plane) \ 842 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) 843 844 static void vlv_write_wm_values(struct intel_crtc *crtc, 845 const struct vlv_wm_values *wm) 846 { 847 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 848 enum i915_pipe pipe = crtc->pipe; 849 850 I915_WRITE(VLV_DDL(pipe), 851 (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) | 852 (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) | 853 (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) | 854 (wm->ddl[pipe].primary << DDL_PLANE_SHIFT)); 855 856 I915_WRITE(DSPFW1, 857 FW_WM(wm->sr.plane, SR) | 858 FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) | 859 FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) | 860 FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA)); 861 I915_WRITE(DSPFW2, 862 FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) | 863 FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) | 864 FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA)); 865 I915_WRITE(DSPFW3, 866 FW_WM(wm->sr.cursor, CURSOR_SR)); 867 868 if (IS_CHERRYVIEW(dev_priv)) { 869 I915_WRITE(DSPFW7_CHV, 870 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) | 871 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC)); 872 I915_WRITE(DSPFW8_CHV, 873 FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) | 874 FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE)); 875 I915_WRITE(DSPFW9_CHV, 876 FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) | 877 FW_WM(wm->pipe[PIPE_C].cursor, CURSORC)); 878 I915_WRITE(DSPHOWM, 879 FW_WM(wm->sr.plane >> 9, SR_HI) | 880 FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) | 881 FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) | 882 FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) | 883 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) | 884 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) | 885 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) | 886 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) | 887 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) | 888 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI)); 889 } else { 890 I915_WRITE(DSPFW7, 891 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) | 892 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC)); 893 I915_WRITE(DSPHOWM, 894 FW_WM(wm->sr.plane >> 9, SR_HI) | 895 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) | 896 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) | 897 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) | 898 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) | 899 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) | 900 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI)); 901 } 902 903 /* zero (unused) WM1 watermarks */ 904 I915_WRITE(DSPFW4, 0); 905 I915_WRITE(DSPFW5, 0); 906 I915_WRITE(DSPFW6, 0); 907 I915_WRITE(DSPHOWM1, 0); 908 909 POSTING_READ(DSPFW1); 910 } 911 912 #undef FW_WM_VLV 913 914 enum vlv_wm_level { 915 VLV_WM_LEVEL_PM2, 916 VLV_WM_LEVEL_PM5, 917 VLV_WM_LEVEL_DDR_DVFS, 918 }; 919 920 /* latency must be in 0.1us units. */ 921 static unsigned int vlv_wm_method2(unsigned int pixel_rate, 922 unsigned int pipe_htotal, 923 unsigned int horiz_pixels, 924 unsigned int cpp, 925 unsigned int latency) 926 { 927 unsigned int ret; 928 929 ret = (latency * pixel_rate) / (pipe_htotal * 10000); 930 ret = (ret + 1) * horiz_pixels * cpp; 931 ret = DIV_ROUND_UP(ret, 64); 932 933 return ret; 934 } 935 936 static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv) 937 { 938 /* all latencies in usec */ 939 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; 940 941 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2; 942 943 if (IS_CHERRYVIEW(dev_priv)) { 944 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; 945 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; 946 947 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS; 948 } 949 } 950 951 static uint16_t vlv_compute_wm_level(struct intel_plane *plane, 952 struct intel_crtc *crtc, 953 const struct intel_plane_state *state, 954 int level) 955 { 956 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 957 int clock, htotal, cpp, width, wm; 958 959 if (dev_priv->wm.pri_latency[level] == 0) 960 return USHRT_MAX; 961 962 if (!state->base.visible) 963 return 0; 964 965 cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0); 966 clock = crtc->config->base.adjusted_mode.crtc_clock; 967 htotal = crtc->config->base.adjusted_mode.crtc_htotal; 968 width = crtc->config->pipe_src_w; 969 if (WARN_ON(htotal == 0)) 970 htotal = 1; 971 972 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) { 973 /* 974 * FIXME the formula gives values that are 975 * too big for the cursor FIFO, and hence we 976 * would never be able to use cursors. For 977 * now just hardcode the watermark. 978 */ 979 wm = 63; 980 } else { 981 wm = vlv_wm_method2(clock, htotal, width, cpp, 982 dev_priv->wm.pri_latency[level] * 10); 983 } 984 985 return min_t(int, wm, USHRT_MAX); 986 } 987 988 static void vlv_compute_fifo(struct intel_crtc *crtc) 989 { 990 struct drm_device *dev = crtc->base.dev; 991 struct vlv_wm_state *wm_state = &crtc->wm_state; 992 struct intel_plane *plane; 993 unsigned int total_rate = 0; 994 const int fifo_size = 512 - 1; 995 int fifo_extra, fifo_left = fifo_size; 996 997 for_each_intel_plane_on_crtc(dev, crtc, plane) { 998 struct intel_plane_state *state = 999 to_intel_plane_state(plane->base.state); 1000 1001 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) 1002 continue; 1003 1004 if (state->base.visible) { 1005 wm_state->num_active_planes++; 1006 total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0); 1007 } 1008 } 1009 1010 for_each_intel_plane_on_crtc(dev, crtc, plane) { 1011 struct intel_plane_state *state = 1012 to_intel_plane_state(plane->base.state); 1013 unsigned int rate; 1014 1015 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) { 1016 plane->wm.fifo_size = 63; 1017 continue; 1018 } 1019 1020 if (!state->base.visible) { 1021 plane->wm.fifo_size = 0; 1022 continue; 1023 } 1024 1025 rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0); 1026 plane->wm.fifo_size = fifo_size * rate / total_rate; 1027 fifo_left -= plane->wm.fifo_size; 1028 } 1029 1030 fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1); 1031 1032 /* spread the remainder evenly */ 1033 for_each_intel_plane_on_crtc(dev, crtc, plane) { 1034 int plane_extra; 1035 1036 if (fifo_left == 0) 1037 break; 1038 1039 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) 1040 continue; 1041 1042 /* give it all to the first plane if none are active */ 1043 if (plane->wm.fifo_size == 0 && 1044 wm_state->num_active_planes) 1045 continue; 1046 1047 plane_extra = min(fifo_extra, fifo_left); 1048 plane->wm.fifo_size += plane_extra; 1049 fifo_left -= plane_extra; 1050 } 1051 1052 WARN_ON(fifo_left != 0); 1053 } 1054 1055 static void vlv_invert_wms(struct intel_crtc *crtc) 1056 { 1057 struct vlv_wm_state *wm_state = &crtc->wm_state; 1058 int level; 1059 1060 for (level = 0; level < wm_state->num_levels; level++) { 1061 struct drm_device *dev = crtc->base.dev; 1062 const int sr_fifo_size = 1063 INTEL_INFO(to_i915(dev))->num_pipes * 512 - 1; 1064 struct intel_plane *plane; 1065 1066 wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane; 1067 wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor; 1068 1069 for_each_intel_plane_on_crtc(dev, crtc, plane) { 1070 switch (plane->base.type) { 1071 int sprite; 1072 case DRM_PLANE_TYPE_CURSOR: 1073 wm_state->wm[level].cursor = plane->wm.fifo_size - 1074 wm_state->wm[level].cursor; 1075 break; 1076 case DRM_PLANE_TYPE_PRIMARY: 1077 wm_state->wm[level].primary = plane->wm.fifo_size - 1078 wm_state->wm[level].primary; 1079 break; 1080 case DRM_PLANE_TYPE_OVERLAY: 1081 sprite = plane->plane; 1082 wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size - 1083 wm_state->wm[level].sprite[sprite]; 1084 break; 1085 } 1086 } 1087 } 1088 } 1089 1090 static void vlv_compute_wm(struct intel_crtc *crtc) 1091 { 1092 struct drm_device *dev = crtc->base.dev; 1093 struct drm_i915_private *dev_priv = to_i915(dev); 1094 struct vlv_wm_state *wm_state = &crtc->wm_state; 1095 struct intel_plane *plane; 1096 int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1; 1097 int level; 1098 1099 memset(wm_state, 0, sizeof(*wm_state)); 1100 1101 wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed; 1102 wm_state->num_levels = dev_priv->wm.max_level + 1; 1103 1104 wm_state->num_active_planes = 0; 1105 1106 vlv_compute_fifo(crtc); 1107 1108 if (wm_state->num_active_planes != 1) 1109 wm_state->cxsr = false; 1110 1111 if (wm_state->cxsr) { 1112 for (level = 0; level < wm_state->num_levels; level++) { 1113 wm_state->sr[level].plane = sr_fifo_size; 1114 wm_state->sr[level].cursor = 63; 1115 } 1116 } 1117 1118 for_each_intel_plane_on_crtc(dev, crtc, plane) { 1119 struct intel_plane_state *state = 1120 to_intel_plane_state(plane->base.state); 1121 1122 if (!state->base.visible) 1123 continue; 1124 1125 /* normal watermarks */ 1126 for (level = 0; level < wm_state->num_levels; level++) { 1127 int wm = vlv_compute_wm_level(plane, crtc, state, level); 1128 int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511; 1129 1130 /* hack */ 1131 if (WARN_ON(level == 0 && wm > max_wm)) 1132 wm = max_wm; 1133 1134 if (wm > plane->wm.fifo_size) 1135 break; 1136 1137 switch (plane->base.type) { 1138 int sprite; 1139 case DRM_PLANE_TYPE_CURSOR: 1140 wm_state->wm[level].cursor = wm; 1141 break; 1142 case DRM_PLANE_TYPE_PRIMARY: 1143 wm_state->wm[level].primary = wm; 1144 break; 1145 case DRM_PLANE_TYPE_OVERLAY: 1146 sprite = plane->plane; 1147 wm_state->wm[level].sprite[sprite] = wm; 1148 break; 1149 } 1150 } 1151 1152 wm_state->num_levels = level; 1153 1154 if (!wm_state->cxsr) 1155 continue; 1156 1157 /* maxfifo watermarks */ 1158 switch (plane->base.type) { 1159 int sprite, level; 1160 case DRM_PLANE_TYPE_CURSOR: 1161 for (level = 0; level < wm_state->num_levels; level++) 1162 wm_state->sr[level].cursor = 1163 wm_state->wm[level].cursor; 1164 break; 1165 case DRM_PLANE_TYPE_PRIMARY: 1166 for (level = 0; level < wm_state->num_levels; level++) 1167 wm_state->sr[level].plane = 1168 min(wm_state->sr[level].plane, 1169 wm_state->wm[level].primary); 1170 break; 1171 case DRM_PLANE_TYPE_OVERLAY: 1172 sprite = plane->plane; 1173 for (level = 0; level < wm_state->num_levels; level++) 1174 wm_state->sr[level].plane = 1175 min(wm_state->sr[level].plane, 1176 wm_state->wm[level].sprite[sprite]); 1177 break; 1178 } 1179 } 1180 1181 /* clear any (partially) filled invalid levels */ 1182 for (level = wm_state->num_levels; level < dev_priv->wm.max_level + 1; level++) { 1183 memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level])); 1184 memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level])); 1185 } 1186 1187 vlv_invert_wms(crtc); 1188 } 1189 1190 #define VLV_FIFO(plane, value) \ 1191 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV) 1192 1193 static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc) 1194 { 1195 struct drm_device *dev = crtc->base.dev; 1196 struct drm_i915_private *dev_priv = to_i915(dev); 1197 struct intel_plane *plane; 1198 int sprite0_start = 0, sprite1_start = 0, fifo_size = 0; 1199 1200 for_each_intel_plane_on_crtc(dev, crtc, plane) { 1201 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) { 1202 WARN_ON(plane->wm.fifo_size != 63); 1203 continue; 1204 } 1205 1206 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) 1207 sprite0_start = plane->wm.fifo_size; 1208 else if (plane->plane == 0) 1209 sprite1_start = sprite0_start + plane->wm.fifo_size; 1210 else 1211 fifo_size = sprite1_start + plane->wm.fifo_size; 1212 } 1213 1214 WARN_ON(fifo_size != 512 - 1); 1215 1216 DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n", 1217 pipe_name(crtc->pipe), sprite0_start, 1218 sprite1_start, fifo_size); 1219 1220 switch (crtc->pipe) { 1221 uint32_t dsparb, dsparb2, dsparb3; 1222 case PIPE_A: 1223 dsparb = I915_READ(DSPARB); 1224 dsparb2 = I915_READ(DSPARB2); 1225 1226 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) | 1227 VLV_FIFO(SPRITEB, 0xff)); 1228 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) | 1229 VLV_FIFO(SPRITEB, sprite1_start)); 1230 1231 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) | 1232 VLV_FIFO(SPRITEB_HI, 0x1)); 1233 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) | 1234 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8)); 1235 1236 I915_WRITE(DSPARB, dsparb); 1237 I915_WRITE(DSPARB2, dsparb2); 1238 break; 1239 case PIPE_B: 1240 dsparb = I915_READ(DSPARB); 1241 dsparb2 = I915_READ(DSPARB2); 1242 1243 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) | 1244 VLV_FIFO(SPRITED, 0xff)); 1245 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) | 1246 VLV_FIFO(SPRITED, sprite1_start)); 1247 1248 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) | 1249 VLV_FIFO(SPRITED_HI, 0xff)); 1250 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) | 1251 VLV_FIFO(SPRITED_HI, sprite1_start >> 8)); 1252 1253 I915_WRITE(DSPARB, dsparb); 1254 I915_WRITE(DSPARB2, dsparb2); 1255 break; 1256 case PIPE_C: 1257 dsparb3 = I915_READ(DSPARB3); 1258 dsparb2 = I915_READ(DSPARB2); 1259 1260 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) | 1261 VLV_FIFO(SPRITEF, 0xff)); 1262 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) | 1263 VLV_FIFO(SPRITEF, sprite1_start)); 1264 1265 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) | 1266 VLV_FIFO(SPRITEF_HI, 0xff)); 1267 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) | 1268 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8)); 1269 1270 I915_WRITE(DSPARB3, dsparb3); 1271 I915_WRITE(DSPARB2, dsparb2); 1272 break; 1273 default: 1274 break; 1275 } 1276 } 1277 1278 #undef VLV_FIFO 1279 1280 static void vlv_merge_wm(struct drm_device *dev, 1281 struct vlv_wm_values *wm) 1282 { 1283 struct intel_crtc *crtc; 1284 int num_active_crtcs = 0; 1285 1286 wm->level = to_i915(dev)->wm.max_level; 1287 wm->cxsr = true; 1288 1289 for_each_intel_crtc(dev, crtc) { 1290 const struct vlv_wm_state *wm_state = &crtc->wm_state; 1291 1292 if (!crtc->active) 1293 continue; 1294 1295 if (!wm_state->cxsr) 1296 wm->cxsr = false; 1297 1298 num_active_crtcs++; 1299 wm->level = min_t(int, wm->level, wm_state->num_levels - 1); 1300 } 1301 1302 if (num_active_crtcs != 1) 1303 wm->cxsr = false; 1304 1305 if (num_active_crtcs > 1) 1306 wm->level = VLV_WM_LEVEL_PM2; 1307 1308 for_each_intel_crtc(dev, crtc) { 1309 struct vlv_wm_state *wm_state = &crtc->wm_state; 1310 enum i915_pipe pipe = crtc->pipe; 1311 1312 if (!crtc->active) 1313 continue; 1314 1315 wm->pipe[pipe] = wm_state->wm[wm->level]; 1316 if (wm->cxsr) 1317 wm->sr = wm_state->sr[wm->level]; 1318 1319 wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2; 1320 wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2; 1321 wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2; 1322 wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2; 1323 } 1324 } 1325 1326 static void vlv_update_wm(struct intel_crtc *crtc) 1327 { 1328 struct drm_device *dev = crtc->base.dev; 1329 struct drm_i915_private *dev_priv = to_i915(dev); 1330 enum i915_pipe pipe = crtc->pipe; 1331 struct vlv_wm_values wm = {}; 1332 1333 vlv_compute_wm(crtc); 1334 vlv_merge_wm(dev, &wm); 1335 1336 if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) { 1337 /* FIXME should be part of crtc atomic commit */ 1338 vlv_pipe_set_fifo_size(crtc); 1339 return; 1340 } 1341 1342 if (wm.level < VLV_WM_LEVEL_DDR_DVFS && 1343 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS) 1344 chv_set_memory_dvfs(dev_priv, false); 1345 1346 if (wm.level < VLV_WM_LEVEL_PM5 && 1347 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5) 1348 chv_set_memory_pm5(dev_priv, false); 1349 1350 if (!wm.cxsr && dev_priv->wm.vlv.cxsr) 1351 intel_set_memory_cxsr(dev_priv, false); 1352 1353 /* FIXME should be part of crtc atomic commit */ 1354 vlv_pipe_set_fifo_size(crtc); 1355 1356 vlv_write_wm_values(crtc, &wm); 1357 1358 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, " 1359 "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n", 1360 pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor, 1361 wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1], 1362 wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr); 1363 1364 if (wm.cxsr && !dev_priv->wm.vlv.cxsr) 1365 intel_set_memory_cxsr(dev_priv, true); 1366 1367 if (wm.level >= VLV_WM_LEVEL_PM5 && 1368 dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5) 1369 chv_set_memory_pm5(dev_priv, true); 1370 1371 if (wm.level >= VLV_WM_LEVEL_DDR_DVFS && 1372 dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS) 1373 chv_set_memory_dvfs(dev_priv, true); 1374 1375 dev_priv->wm.vlv = wm; 1376 } 1377 1378 #define single_plane_enabled(mask) is_power_of_2(mask) 1379 1380 static void g4x_update_wm(struct intel_crtc *crtc) 1381 { 1382 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1383 static const int sr_latency_ns = 12000; 1384 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1385 int plane_sr, cursor_sr; 1386 unsigned int enabled = 0; 1387 bool cxsr_enabled; 1388 1389 if (g4x_compute_wm0(dev_priv, PIPE_A, 1390 &g4x_wm_info, pessimal_latency_ns, 1391 &g4x_cursor_wm_info, pessimal_latency_ns, 1392 &planea_wm, &cursora_wm)) 1393 enabled |= 1 << PIPE_A; 1394 1395 if (g4x_compute_wm0(dev_priv, PIPE_B, 1396 &g4x_wm_info, pessimal_latency_ns, 1397 &g4x_cursor_wm_info, pessimal_latency_ns, 1398 &planeb_wm, &cursorb_wm)) 1399 enabled |= 1 << PIPE_B; 1400 1401 if (single_plane_enabled(enabled) && 1402 g4x_compute_srwm(dev_priv, ffs(enabled) - 1, 1403 sr_latency_ns, 1404 &g4x_wm_info, 1405 &g4x_cursor_wm_info, 1406 &plane_sr, &cursor_sr)) { 1407 cxsr_enabled = true; 1408 } else { 1409 cxsr_enabled = false; 1410 intel_set_memory_cxsr(dev_priv, false); 1411 plane_sr = cursor_sr = 0; 1412 } 1413 1414 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, " 1415 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", 1416 planea_wm, cursora_wm, 1417 planeb_wm, cursorb_wm, 1418 plane_sr, cursor_sr); 1419 1420 I915_WRITE(DSPFW1, 1421 FW_WM(plane_sr, SR) | 1422 FW_WM(cursorb_wm, CURSORB) | 1423 FW_WM(planeb_wm, PLANEB) | 1424 FW_WM(planea_wm, PLANEA)); 1425 I915_WRITE(DSPFW2, 1426 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | 1427 FW_WM(cursora_wm, CURSORA)); 1428 /* HPLL off in SR has some issues on G4x... disable it */ 1429 I915_WRITE(DSPFW3, 1430 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) | 1431 FW_WM(cursor_sr, CURSOR_SR)); 1432 1433 if (cxsr_enabled) 1434 intel_set_memory_cxsr(dev_priv, true); 1435 } 1436 1437 static void i965_update_wm(struct intel_crtc *unused_crtc) 1438 { 1439 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); 1440 struct intel_crtc *crtc; 1441 int srwm = 1; 1442 int cursor_sr = 16; 1443 bool cxsr_enabled; 1444 1445 /* Calc sr entries for one plane configs */ 1446 crtc = single_enabled_crtc(dev_priv); 1447 if (crtc) { 1448 /* self-refresh has much higher latency */ 1449 static const int sr_latency_ns = 12000; 1450 const struct drm_display_mode *adjusted_mode = 1451 &crtc->config->base.adjusted_mode; 1452 const struct drm_framebuffer *fb = 1453 crtc->base.primary->state->fb; 1454 int clock = adjusted_mode->crtc_clock; 1455 int htotal = adjusted_mode->crtc_htotal; 1456 int hdisplay = crtc->config->pipe_src_w; 1457 int cpp = drm_format_plane_cpp(fb->pixel_format, 0); 1458 unsigned long line_time_us; 1459 int entries; 1460 1461 line_time_us = max(htotal * 1000 / clock, 1); 1462 1463 /* Use ns/us then divide to preserve precision */ 1464 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1465 cpp * hdisplay; 1466 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); 1467 srwm = I965_FIFO_SIZE - entries; 1468 if (srwm < 0) 1469 srwm = 1; 1470 srwm &= 0x1ff; 1471 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", 1472 entries, srwm); 1473 1474 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1475 cpp * crtc->base.cursor->state->crtc_w; 1476 entries = DIV_ROUND_UP(entries, 1477 i965_cursor_wm_info.cacheline_size); 1478 cursor_sr = i965_cursor_wm_info.fifo_size - 1479 (entries + i965_cursor_wm_info.guard_size); 1480 1481 if (cursor_sr > i965_cursor_wm_info.max_wm) 1482 cursor_sr = i965_cursor_wm_info.max_wm; 1483 1484 DRM_DEBUG_KMS("self-refresh watermark: display plane %d " 1485 "cursor %d\n", srwm, cursor_sr); 1486 1487 cxsr_enabled = true; 1488 } else { 1489 cxsr_enabled = false; 1490 /* Turn off self refresh if both pipes are enabled */ 1491 intel_set_memory_cxsr(dev_priv, false); 1492 } 1493 1494 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 1495 srwm); 1496 1497 /* 965 has limitations... */ 1498 I915_WRITE(DSPFW1, FW_WM(srwm, SR) | 1499 FW_WM(8, CURSORB) | 1500 FW_WM(8, PLANEB) | 1501 FW_WM(8, PLANEA)); 1502 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) | 1503 FW_WM(8, PLANEC_OLD)); 1504 /* update cursor SR watermark */ 1505 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR)); 1506 1507 if (cxsr_enabled) 1508 intel_set_memory_cxsr(dev_priv, true); 1509 } 1510 1511 #undef FW_WM 1512 1513 static void i9xx_update_wm(struct intel_crtc *unused_crtc) 1514 { 1515 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); 1516 const struct intel_watermark_params *wm_info; 1517 uint32_t fwater_lo; 1518 uint32_t fwater_hi; 1519 int cwm, srwm = 1; 1520 int fifo_size; 1521 int planea_wm, planeb_wm; 1522 struct intel_crtc *crtc, *enabled = NULL; 1523 1524 if (IS_I945GM(dev_priv)) 1525 wm_info = &i945_wm_info; 1526 else if (!IS_GEN2(dev_priv)) 1527 wm_info = &i915_wm_info; 1528 else 1529 wm_info = &i830_a_wm_info; 1530 1531 fifo_size = dev_priv->display.get_fifo_size(dev_priv, 0); 1532 crtc = intel_get_crtc_for_plane(dev_priv, 0); 1533 if (intel_crtc_active(crtc)) { 1534 const struct drm_display_mode *adjusted_mode = 1535 &crtc->config->base.adjusted_mode; 1536 const struct drm_framebuffer *fb = 1537 crtc->base.primary->state->fb; 1538 int cpp; 1539 1540 if (IS_GEN2(dev_priv)) 1541 cpp = 4; 1542 else 1543 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 1544 1545 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1546 wm_info, fifo_size, cpp, 1547 pessimal_latency_ns); 1548 enabled = crtc; 1549 } else { 1550 planea_wm = fifo_size - wm_info->guard_size; 1551 if (planea_wm > (long)wm_info->max_wm) 1552 planea_wm = wm_info->max_wm; 1553 } 1554 1555 if (IS_GEN2(dev_priv)) 1556 wm_info = &i830_bc_wm_info; 1557 1558 fifo_size = dev_priv->display.get_fifo_size(dev_priv, 1); 1559 crtc = intel_get_crtc_for_plane(dev_priv, 1); 1560 if (intel_crtc_active(crtc)) { 1561 const struct drm_display_mode *adjusted_mode = 1562 &crtc->config->base.adjusted_mode; 1563 const struct drm_framebuffer *fb = 1564 crtc->base.primary->state->fb; 1565 int cpp; 1566 1567 if (IS_GEN2(dev_priv)) 1568 cpp = 4; 1569 else 1570 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 1571 1572 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1573 wm_info, fifo_size, cpp, 1574 pessimal_latency_ns); 1575 if (enabled == NULL) 1576 enabled = crtc; 1577 else 1578 enabled = NULL; 1579 } else { 1580 planeb_wm = fifo_size - wm_info->guard_size; 1581 if (planeb_wm > (long)wm_info->max_wm) 1582 planeb_wm = wm_info->max_wm; 1583 } 1584 1585 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 1586 1587 if (IS_I915GM(dev_priv) && enabled) { 1588 struct drm_i915_gem_object *obj; 1589 1590 obj = intel_fb_obj(enabled->base.primary->state->fb); 1591 1592 /* self-refresh seems busted with untiled */ 1593 if (!i915_gem_object_is_tiled(obj)) 1594 enabled = NULL; 1595 } 1596 1597 /* 1598 * Overlay gets an aggressive default since video jitter is bad. 1599 */ 1600 cwm = 2; 1601 1602 /* Play safe and disable self-refresh before adjusting watermarks. */ 1603 intel_set_memory_cxsr(dev_priv, false); 1604 1605 /* Calc sr entries for one plane configs */ 1606 if (HAS_FW_BLC(dev_priv) && enabled) { 1607 /* self-refresh has much higher latency */ 1608 static const int sr_latency_ns = 6000; 1609 const struct drm_display_mode *adjusted_mode = 1610 &enabled->config->base.adjusted_mode; 1611 const struct drm_framebuffer *fb = 1612 enabled->base.primary->state->fb; 1613 int clock = adjusted_mode->crtc_clock; 1614 int htotal = adjusted_mode->crtc_htotal; 1615 int hdisplay = enabled->config->pipe_src_w; 1616 int cpp; 1617 unsigned long line_time_us; 1618 int entries; 1619 1620 if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv)) 1621 cpp = 4; 1622 else 1623 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 1624 1625 line_time_us = max(htotal * 1000 / clock, 1); 1626 1627 /* Use ns/us then divide to preserve precision */ 1628 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1629 cpp * hdisplay; 1630 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); 1631 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); 1632 srwm = wm_info->fifo_size - entries; 1633 if (srwm < 0) 1634 srwm = 1; 1635 1636 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) 1637 I915_WRITE(FW_BLC_SELF, 1638 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); 1639 else 1640 I915_WRITE(FW_BLC_SELF, srwm & 0x3f); 1641 } 1642 1643 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 1644 planea_wm, planeb_wm, cwm, srwm); 1645 1646 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); 1647 fwater_hi = (cwm & 0x1f); 1648 1649 /* Set request length to 8 cachelines per fetch */ 1650 fwater_lo = fwater_lo | (1 << 24) | (1 << 8); 1651 fwater_hi = fwater_hi | (1 << 8); 1652 1653 I915_WRITE(FW_BLC, fwater_lo); 1654 I915_WRITE(FW_BLC2, fwater_hi); 1655 1656 if (enabled) 1657 intel_set_memory_cxsr(dev_priv, true); 1658 } 1659 1660 static void i845_update_wm(struct intel_crtc *unused_crtc) 1661 { 1662 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); 1663 struct intel_crtc *crtc; 1664 const struct drm_display_mode *adjusted_mode; 1665 uint32_t fwater_lo; 1666 int planea_wm; 1667 1668 crtc = single_enabled_crtc(dev_priv); 1669 if (crtc == NULL) 1670 return; 1671 1672 adjusted_mode = &crtc->config->base.adjusted_mode; 1673 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1674 &i845_wm_info, 1675 dev_priv->display.get_fifo_size(dev_priv, 0), 1676 4, pessimal_latency_ns); 1677 fwater_lo = I915_READ(FW_BLC) & ~0xfff; 1678 fwater_lo |= (3<<8) | planea_wm; 1679 1680 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); 1681 1682 I915_WRITE(FW_BLC, fwater_lo); 1683 } 1684 1685 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) 1686 { 1687 uint32_t pixel_rate; 1688 1689 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock; 1690 1691 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to 1692 * adjust the pixel_rate here. */ 1693 1694 if (pipe_config->pch_pfit.enabled) { 1695 uint64_t pipe_w, pipe_h, pfit_w, pfit_h; 1696 uint32_t pfit_size = pipe_config->pch_pfit.size; 1697 1698 pipe_w = pipe_config->pipe_src_w; 1699 pipe_h = pipe_config->pipe_src_h; 1700 1701 pfit_w = (pfit_size >> 16) & 0xFFFF; 1702 pfit_h = pfit_size & 0xFFFF; 1703 if (pipe_w < pfit_w) 1704 pipe_w = pfit_w; 1705 if (pipe_h < pfit_h) 1706 pipe_h = pfit_h; 1707 1708 if (WARN_ON(!pfit_w || !pfit_h)) 1709 return pixel_rate; 1710 1711 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h, 1712 pfit_w * pfit_h); 1713 } 1714 1715 return pixel_rate; 1716 } 1717 1718 /* latency must be in 0.1us units. */ 1719 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency) 1720 { 1721 uint64_t ret; 1722 1723 if (WARN(latency == 0, "Latency value missing\n")) 1724 return UINT_MAX; 1725 1726 ret = (uint64_t) pixel_rate * cpp * latency; 1727 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2; 1728 1729 return ret; 1730 } 1731 1732 /* latency must be in 0.1us units. */ 1733 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, 1734 uint32_t horiz_pixels, uint8_t cpp, 1735 uint32_t latency) 1736 { 1737 uint32_t ret; 1738 1739 if (WARN(latency == 0, "Latency value missing\n")) 1740 return UINT_MAX; 1741 if (WARN_ON(!pipe_htotal)) 1742 return UINT_MAX; 1743 1744 ret = (latency * pixel_rate) / (pipe_htotal * 10000); 1745 ret = (ret + 1) * horiz_pixels * cpp; 1746 ret = DIV_ROUND_UP(ret, 64) + 2; 1747 return ret; 1748 } 1749 1750 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, 1751 uint8_t cpp) 1752 { 1753 /* 1754 * Neither of these should be possible since this function shouldn't be 1755 * called if the CRTC is off or the plane is invisible. But let's be 1756 * extra paranoid to avoid a potential divide-by-zero if we screw up 1757 * elsewhere in the driver. 1758 */ 1759 if (WARN_ON(!cpp)) 1760 return 0; 1761 if (WARN_ON(!horiz_pixels)) 1762 return 0; 1763 1764 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2; 1765 } 1766 1767 struct ilk_wm_maximums { 1768 uint16_t pri; 1769 uint16_t spr; 1770 uint16_t cur; 1771 uint16_t fbc; 1772 }; 1773 1774 /* 1775 * For both WM_PIPE and WM_LP. 1776 * mem_value must be in 0.1us units. 1777 */ 1778 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, 1779 const struct intel_plane_state *pstate, 1780 uint32_t mem_value, 1781 bool is_lp) 1782 { 1783 int cpp = pstate->base.fb ? 1784 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0; 1785 uint32_t method1, method2; 1786 1787 if (!cstate->base.active || !pstate->base.visible) 1788 return 0; 1789 1790 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value); 1791 1792 if (!is_lp) 1793 return method1; 1794 1795 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate), 1796 cstate->base.adjusted_mode.crtc_htotal, 1797 drm_rect_width(&pstate->base.dst), 1798 cpp, mem_value); 1799 1800 return min(method1, method2); 1801 } 1802 1803 /* 1804 * For both WM_PIPE and WM_LP. 1805 * mem_value must be in 0.1us units. 1806 */ 1807 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate, 1808 const struct intel_plane_state *pstate, 1809 uint32_t mem_value) 1810 { 1811 int cpp = pstate->base.fb ? 1812 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0; 1813 uint32_t method1, method2; 1814 1815 if (!cstate->base.active || !pstate->base.visible) 1816 return 0; 1817 1818 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value); 1819 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate), 1820 cstate->base.adjusted_mode.crtc_htotal, 1821 drm_rect_width(&pstate->base.dst), 1822 cpp, mem_value); 1823 return min(method1, method2); 1824 } 1825 1826 /* 1827 * For both WM_PIPE and WM_LP. 1828 * mem_value must be in 0.1us units. 1829 */ 1830 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate, 1831 const struct intel_plane_state *pstate, 1832 uint32_t mem_value) 1833 { 1834 /* 1835 * We treat the cursor plane as always-on for the purposes of watermark 1836 * calculation. Until we have two-stage watermark programming merged, 1837 * this is necessary to avoid flickering. 1838 */ 1839 int cpp = 4; 1840 int width = pstate->base.visible ? pstate->base.crtc_w : 64; 1841 1842 if (!cstate->base.active) 1843 return 0; 1844 1845 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate), 1846 cstate->base.adjusted_mode.crtc_htotal, 1847 width, cpp, mem_value); 1848 } 1849 1850 /* Only for WM_LP. */ 1851 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, 1852 const struct intel_plane_state *pstate, 1853 uint32_t pri_val) 1854 { 1855 int cpp = pstate->base.fb ? 1856 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0; 1857 1858 if (!cstate->base.active || !pstate->base.visible) 1859 return 0; 1860 1861 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp); 1862 } 1863 1864 static unsigned int 1865 ilk_display_fifo_size(const struct drm_i915_private *dev_priv) 1866 { 1867 if (INTEL_GEN(dev_priv) >= 8) 1868 return 3072; 1869 else if (INTEL_GEN(dev_priv) >= 7) 1870 return 768; 1871 else 1872 return 512; 1873 } 1874 1875 static unsigned int 1876 ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv, 1877 int level, bool is_sprite) 1878 { 1879 if (INTEL_GEN(dev_priv) >= 8) 1880 /* BDW primary/sprite plane watermarks */ 1881 return level == 0 ? 255 : 2047; 1882 else if (INTEL_GEN(dev_priv) >= 7) 1883 /* IVB/HSW primary/sprite plane watermarks */ 1884 return level == 0 ? 127 : 1023; 1885 else if (!is_sprite) 1886 /* ILK/SNB primary plane watermarks */ 1887 return level == 0 ? 127 : 511; 1888 else 1889 /* ILK/SNB sprite plane watermarks */ 1890 return level == 0 ? 63 : 255; 1891 } 1892 1893 static unsigned int 1894 ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level) 1895 { 1896 if (INTEL_GEN(dev_priv) >= 7) 1897 return level == 0 ? 63 : 255; 1898 else 1899 return level == 0 ? 31 : 63; 1900 } 1901 1902 static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv) 1903 { 1904 if (INTEL_GEN(dev_priv) >= 8) 1905 return 31; 1906 else 1907 return 15; 1908 } 1909 1910 /* Calculate the maximum primary/sprite plane watermark */ 1911 static unsigned int ilk_plane_wm_max(struct drm_device *dev, 1912 int level, 1913 const struct intel_wm_config *config, 1914 enum intel_ddb_partitioning ddb_partitioning, 1915 bool is_sprite) 1916 { 1917 struct drm_i915_private *dev_priv = to_i915(dev); 1918 unsigned int fifo_size = ilk_display_fifo_size(dev_priv); 1919 1920 /* if sprites aren't enabled, sprites get nothing */ 1921 if (is_sprite && !config->sprites_enabled) 1922 return 0; 1923 1924 /* HSW allows LP1+ watermarks even with multiple pipes */ 1925 if (level == 0 || config->num_pipes_active > 1) { 1926 fifo_size /= INTEL_INFO(dev_priv)->num_pipes; 1927 1928 /* 1929 * For some reason the non self refresh 1930 * FIFO size is only half of the self 1931 * refresh FIFO size on ILK/SNB. 1932 */ 1933 if (INTEL_GEN(dev_priv) <= 6) 1934 fifo_size /= 2; 1935 } 1936 1937 if (config->sprites_enabled) { 1938 /* level 0 is always calculated with 1:1 split */ 1939 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) { 1940 if (is_sprite) 1941 fifo_size *= 5; 1942 fifo_size /= 6; 1943 } else { 1944 fifo_size /= 2; 1945 } 1946 } 1947 1948 /* clamp to max that the registers can hold */ 1949 return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite)); 1950 } 1951 1952 /* Calculate the maximum cursor plane watermark */ 1953 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev, 1954 int level, 1955 const struct intel_wm_config *config) 1956 { 1957 /* HSW LP1+ watermarks w/ multiple pipes */ 1958 if (level > 0 && config->num_pipes_active > 1) 1959 return 64; 1960 1961 /* otherwise just report max that registers can hold */ 1962 return ilk_cursor_wm_reg_max(to_i915(dev), level); 1963 } 1964 1965 static void ilk_compute_wm_maximums(struct drm_device *dev, 1966 int level, 1967 const struct intel_wm_config *config, 1968 enum intel_ddb_partitioning ddb_partitioning, 1969 struct ilk_wm_maximums *max) 1970 { 1971 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); 1972 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); 1973 max->cur = ilk_cursor_wm_max(dev, level, config); 1974 max->fbc = ilk_fbc_wm_reg_max(to_i915(dev)); 1975 } 1976 1977 static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv, 1978 int level, 1979 struct ilk_wm_maximums *max) 1980 { 1981 max->pri = ilk_plane_wm_reg_max(dev_priv, level, false); 1982 max->spr = ilk_plane_wm_reg_max(dev_priv, level, true); 1983 max->cur = ilk_cursor_wm_reg_max(dev_priv, level); 1984 max->fbc = ilk_fbc_wm_reg_max(dev_priv); 1985 } 1986 1987 static bool ilk_validate_wm_level(int level, 1988 const struct ilk_wm_maximums *max, 1989 struct intel_wm_level *result) 1990 { 1991 bool ret; 1992 1993 /* already determined to be invalid? */ 1994 if (!result->enable) 1995 return false; 1996 1997 result->enable = result->pri_val <= max->pri && 1998 result->spr_val <= max->spr && 1999 result->cur_val <= max->cur; 2000 2001 ret = result->enable; 2002 2003 /* 2004 * HACK until we can pre-compute everything, 2005 * and thus fail gracefully if LP0 watermarks 2006 * are exceeded... 2007 */ 2008 if (level == 0 && !result->enable) { 2009 if (result->pri_val > max->pri) 2010 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n", 2011 level, result->pri_val, max->pri); 2012 if (result->spr_val > max->spr) 2013 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n", 2014 level, result->spr_val, max->spr); 2015 if (result->cur_val > max->cur) 2016 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n", 2017 level, result->cur_val, max->cur); 2018 2019 result->pri_val = min_t(uint32_t, result->pri_val, max->pri); 2020 result->spr_val = min_t(uint32_t, result->spr_val, max->spr); 2021 result->cur_val = min_t(uint32_t, result->cur_val, max->cur); 2022 result->enable = true; 2023 } 2024 2025 return ret; 2026 } 2027 2028 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, 2029 const struct intel_crtc *intel_crtc, 2030 int level, 2031 struct intel_crtc_state *cstate, 2032 struct intel_plane_state *pristate, 2033 struct intel_plane_state *sprstate, 2034 struct intel_plane_state *curstate, 2035 struct intel_wm_level *result) 2036 { 2037 uint16_t pri_latency = dev_priv->wm.pri_latency[level]; 2038 uint16_t spr_latency = dev_priv->wm.spr_latency[level]; 2039 uint16_t cur_latency = dev_priv->wm.cur_latency[level]; 2040 2041 /* WM1+ latency values stored in 0.5us units */ 2042 if (level > 0) { 2043 pri_latency *= 5; 2044 spr_latency *= 5; 2045 cur_latency *= 5; 2046 } 2047 2048 if (pristate) { 2049 result->pri_val = ilk_compute_pri_wm(cstate, pristate, 2050 pri_latency, level); 2051 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val); 2052 } 2053 2054 if (sprstate) 2055 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency); 2056 2057 if (curstate) 2058 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency); 2059 2060 result->enable = true; 2061 } 2062 2063 static uint32_t 2064 hsw_compute_linetime_wm(const struct intel_crtc_state *cstate) 2065 { 2066 const struct intel_atomic_state *intel_state = 2067 to_intel_atomic_state(cstate->base.state); 2068 const struct drm_display_mode *adjusted_mode = 2069 &cstate->base.adjusted_mode; 2070 u32 linetime, ips_linetime; 2071 2072 if (!cstate->base.active) 2073 return 0; 2074 if (WARN_ON(adjusted_mode->crtc_clock == 0)) 2075 return 0; 2076 if (WARN_ON(intel_state->cdclk == 0)) 2077 return 0; 2078 2079 /* The WM are computed with base on how long it takes to fill a single 2080 * row at the given clock rate, multiplied by 8. 2081 * */ 2082 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 2083 adjusted_mode->crtc_clock); 2084 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 2085 intel_state->cdclk); 2086 2087 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | 2088 PIPE_WM_LINETIME_TIME(linetime); 2089 } 2090 2091 static void intel_read_wm_latency(struct drm_i915_private *dev_priv, 2092 uint16_t wm[8]) 2093 { 2094 if (IS_GEN9(dev_priv)) { 2095 uint32_t val; 2096 int ret, i; 2097 int level, max_level = ilk_wm_max_level(dev_priv); 2098 2099 /* read the first set of memory latencies[0:3] */ 2100 val = 0; /* data0 to be programmed to 0 for first set */ 2101 mutex_lock(&dev_priv->rps.hw_lock); 2102 ret = sandybridge_pcode_read(dev_priv, 2103 GEN9_PCODE_READ_MEM_LATENCY, 2104 &val); 2105 mutex_unlock(&dev_priv->rps.hw_lock); 2106 2107 if (ret) { 2108 DRM_ERROR("SKL Mailbox read error = %d\n", ret); 2109 return; 2110 } 2111 2112 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK; 2113 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & 2114 GEN9_MEM_LATENCY_LEVEL_MASK; 2115 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & 2116 GEN9_MEM_LATENCY_LEVEL_MASK; 2117 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & 2118 GEN9_MEM_LATENCY_LEVEL_MASK; 2119 2120 /* read the second set of memory latencies[4:7] */ 2121 val = 1; /* data0 to be programmed to 1 for second set */ 2122 mutex_lock(&dev_priv->rps.hw_lock); 2123 ret = sandybridge_pcode_read(dev_priv, 2124 GEN9_PCODE_READ_MEM_LATENCY, 2125 &val); 2126 mutex_unlock(&dev_priv->rps.hw_lock); 2127 if (ret) { 2128 DRM_ERROR("SKL Mailbox read error = %d\n", ret); 2129 return; 2130 } 2131 2132 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK; 2133 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & 2134 GEN9_MEM_LATENCY_LEVEL_MASK; 2135 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & 2136 GEN9_MEM_LATENCY_LEVEL_MASK; 2137 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & 2138 GEN9_MEM_LATENCY_LEVEL_MASK; 2139 2140 /* 2141 * If a level n (n > 1) has a 0us latency, all levels m (m >= n) 2142 * need to be disabled. We make sure to sanitize the values out 2143 * of the punit to satisfy this requirement. 2144 */ 2145 for (level = 1; level <= max_level; level++) { 2146 if (wm[level] == 0) { 2147 for (i = level + 1; i <= max_level; i++) 2148 wm[i] = 0; 2149 break; 2150 } 2151 } 2152 2153 /* 2154 * WaWmMemoryReadLatency:skl 2155 * 2156 * punit doesn't take into account the read latency so we need 2157 * to add 2us to the various latency levels we retrieve from the 2158 * punit when level 0 response data us 0us. 2159 */ 2160 if (wm[0] == 0) { 2161 wm[0] += 2; 2162 for (level = 1; level <= max_level; level++) { 2163 if (wm[level] == 0) 2164 break; 2165 wm[level] += 2; 2166 } 2167 } 2168 2169 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2170 uint64_t sskpd = I915_READ64(MCH_SSKPD); 2171 2172 wm[0] = (sskpd >> 56) & 0xFF; 2173 if (wm[0] == 0) 2174 wm[0] = sskpd & 0xF; 2175 wm[1] = (sskpd >> 4) & 0xFF; 2176 wm[2] = (sskpd >> 12) & 0xFF; 2177 wm[3] = (sskpd >> 20) & 0x1FF; 2178 wm[4] = (sskpd >> 32) & 0x1FF; 2179 } else if (INTEL_GEN(dev_priv) >= 6) { 2180 uint32_t sskpd = I915_READ(MCH_SSKPD); 2181 2182 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK; 2183 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK; 2184 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK; 2185 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK; 2186 } else if (INTEL_GEN(dev_priv) >= 5) { 2187 uint32_t mltr = I915_READ(MLTR_ILK); 2188 2189 /* ILK primary LP0 latency is 700 ns */ 2190 wm[0] = 7; 2191 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK; 2192 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK; 2193 } 2194 } 2195 2196 static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv, 2197 uint16_t wm[5]) 2198 { 2199 /* ILK sprite LP0 latency is 1300 ns */ 2200 if (IS_GEN5(dev_priv)) 2201 wm[0] = 13; 2202 } 2203 2204 static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv, 2205 uint16_t wm[5]) 2206 { 2207 /* ILK cursor LP0 latency is 1300 ns */ 2208 if (IS_GEN5(dev_priv)) 2209 wm[0] = 13; 2210 2211 /* WaDoubleCursorLP3Latency:ivb */ 2212 if (IS_IVYBRIDGE(dev_priv)) 2213 wm[3] *= 2; 2214 } 2215 2216 int ilk_wm_max_level(const struct drm_i915_private *dev_priv) 2217 { 2218 /* how many WM levels are we expecting */ 2219 if (INTEL_GEN(dev_priv) >= 9) 2220 return 7; 2221 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 2222 return 4; 2223 else if (INTEL_GEN(dev_priv) >= 6) 2224 return 3; 2225 else 2226 return 2; 2227 } 2228 2229 static void intel_print_wm_latency(struct drm_i915_private *dev_priv, 2230 const char *name, 2231 const uint16_t wm[8]) 2232 { 2233 int level, max_level = ilk_wm_max_level(dev_priv); 2234 2235 for (level = 0; level <= max_level; level++) { 2236 unsigned int latency = wm[level]; 2237 2238 if (latency == 0) { 2239 DRM_ERROR("%s WM%d latency not provided\n", 2240 name, level); 2241 continue; 2242 } 2243 2244 /* 2245 * - latencies are in us on gen9. 2246 * - before then, WM1+ latency values are in 0.5us units 2247 */ 2248 if (IS_GEN9(dev_priv)) 2249 latency *= 10; 2250 else if (level > 0) 2251 latency *= 5; 2252 2253 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n", 2254 name, level, wm[level], 2255 latency / 10, latency % 10); 2256 } 2257 } 2258 2259 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, 2260 uint16_t wm[5], uint16_t min) 2261 { 2262 int level, max_level = ilk_wm_max_level(dev_priv); 2263 2264 if (wm[0] >= min) 2265 return false; 2266 2267 wm[0] = max(wm[0], min); 2268 for (level = 1; level <= max_level; level++) 2269 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5)); 2270 2271 return true; 2272 } 2273 2274 static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) 2275 { 2276 bool changed; 2277 2278 /* 2279 * The BIOS provided WM memory latency values are often 2280 * inadequate for high resolution displays. Adjust them. 2281 */ 2282 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | 2283 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | 2284 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); 2285 2286 if (!changed) 2287 return; 2288 2289 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n"); 2290 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); 2291 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); 2292 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); 2293 } 2294 2295 static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) 2296 { 2297 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); 2298 2299 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency, 2300 sizeof(dev_priv->wm.pri_latency)); 2301 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency, 2302 sizeof(dev_priv->wm.pri_latency)); 2303 2304 intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency); 2305 intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency); 2306 2307 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); 2308 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); 2309 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); 2310 2311 if (IS_GEN6(dev_priv)) 2312 snb_wm_latency_quirk(dev_priv); 2313 } 2314 2315 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv) 2316 { 2317 intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency); 2318 intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency); 2319 } 2320 2321 static bool ilk_validate_pipe_wm(struct drm_device *dev, 2322 struct intel_pipe_wm *pipe_wm) 2323 { 2324 /* LP0 watermark maximums depend on this pipe alone */ 2325 const struct intel_wm_config config = { 2326 .num_pipes_active = 1, 2327 .sprites_enabled = pipe_wm->sprites_enabled, 2328 .sprites_scaled = pipe_wm->sprites_scaled, 2329 }; 2330 struct ilk_wm_maximums max; 2331 2332 /* LP0 watermarks always use 1/2 DDB partitioning */ 2333 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); 2334 2335 /* At least LP0 must be valid */ 2336 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) { 2337 DRM_DEBUG_KMS("LP0 watermark invalid\n"); 2338 return false; 2339 } 2340 2341 return true; 2342 } 2343 2344 /* Compute new watermarks for the pipe */ 2345 static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) 2346 { 2347 struct drm_atomic_state *state = cstate->base.state; 2348 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 2349 struct intel_pipe_wm *pipe_wm; 2350 struct drm_device *dev = state->dev; 2351 const struct drm_i915_private *dev_priv = to_i915(dev); 2352 struct intel_plane *intel_plane; 2353 struct intel_plane_state *pristate = NULL; 2354 struct intel_plane_state *sprstate = NULL; 2355 struct intel_plane_state *curstate = NULL; 2356 int level, max_level = ilk_wm_max_level(dev_priv), usable_level; 2357 struct ilk_wm_maximums max; 2358 2359 pipe_wm = &cstate->wm.ilk.optimal; 2360 2361 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 2362 struct intel_plane_state *ps; 2363 2364 ps = intel_atomic_get_existing_plane_state(state, 2365 intel_plane); 2366 if (!ps) 2367 continue; 2368 2369 if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY) 2370 pristate = ps; 2371 else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) 2372 sprstate = ps; 2373 else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR) 2374 curstate = ps; 2375 } 2376 2377 pipe_wm->pipe_enabled = cstate->base.active; 2378 if (sprstate) { 2379 pipe_wm->sprites_enabled = sprstate->base.visible; 2380 pipe_wm->sprites_scaled = sprstate->base.visible && 2381 (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 || 2382 drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16); 2383 } 2384 2385 usable_level = max_level; 2386 2387 /* ILK/SNB: LP2+ watermarks only w/o sprites */ 2388 if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled) 2389 usable_level = 1; 2390 2391 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ 2392 if (pipe_wm->sprites_scaled) 2393 usable_level = 0; 2394 2395 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, 2396 pristate, sprstate, curstate, &pipe_wm->raw_wm[0]); 2397 2398 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm)); 2399 pipe_wm->wm[0] = pipe_wm->raw_wm[0]; 2400 2401 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 2402 pipe_wm->linetime = hsw_compute_linetime_wm(cstate); 2403 2404 if (!ilk_validate_pipe_wm(dev, pipe_wm)) 2405 return -EINVAL; 2406 2407 ilk_compute_wm_reg_maximums(dev_priv, 1, &max); 2408 2409 for (level = 1; level <= max_level; level++) { 2410 struct intel_wm_level *wm = &pipe_wm->raw_wm[level]; 2411 2412 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, 2413 pristate, sprstate, curstate, wm); 2414 2415 /* 2416 * Disable any watermark level that exceeds the 2417 * register maximums since such watermarks are 2418 * always invalid. 2419 */ 2420 if (level > usable_level) 2421 continue; 2422 2423 if (ilk_validate_wm_level(level, &max, wm)) 2424 pipe_wm->wm[level] = *wm; 2425 else 2426 usable_level = level; 2427 } 2428 2429 return 0; 2430 } 2431 2432 /* 2433 * Build a set of 'intermediate' watermark values that satisfy both the old 2434 * state and the new state. These can be programmed to the hardware 2435 * immediately. 2436 */ 2437 static int ilk_compute_intermediate_wm(struct drm_device *dev, 2438 struct intel_crtc *intel_crtc, 2439 struct intel_crtc_state *newstate) 2440 { 2441 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate; 2442 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk; 2443 int level, max_level = ilk_wm_max_level(to_i915(dev)); 2444 2445 /* 2446 * Start with the final, target watermarks, then combine with the 2447 * currently active watermarks to get values that are safe both before 2448 * and after the vblank. 2449 */ 2450 *a = newstate->wm.ilk.optimal; 2451 a->pipe_enabled |= b->pipe_enabled; 2452 a->sprites_enabled |= b->sprites_enabled; 2453 a->sprites_scaled |= b->sprites_scaled; 2454 2455 for (level = 0; level <= max_level; level++) { 2456 struct intel_wm_level *a_wm = &a->wm[level]; 2457 const struct intel_wm_level *b_wm = &b->wm[level]; 2458 2459 a_wm->enable &= b_wm->enable; 2460 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val); 2461 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val); 2462 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val); 2463 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val); 2464 } 2465 2466 /* 2467 * We need to make sure that these merged watermark values are 2468 * actually a valid configuration themselves. If they're not, 2469 * there's no safe way to transition from the old state to 2470 * the new state, so we need to fail the atomic transaction. 2471 */ 2472 if (!ilk_validate_pipe_wm(dev, a)) 2473 return -EINVAL; 2474 2475 /* 2476 * If our intermediate WM are identical to the final WM, then we can 2477 * omit the post-vblank programming; only update if it's different. 2478 */ 2479 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0) 2480 newstate->wm.need_postvbl_update = false; 2481 2482 return 0; 2483 } 2484 2485 /* 2486 * Merge the watermarks from all active pipes for a specific level. 2487 */ 2488 static void ilk_merge_wm_level(struct drm_device *dev, 2489 int level, 2490 struct intel_wm_level *ret_wm) 2491 { 2492 const struct intel_crtc *intel_crtc; 2493 2494 ret_wm->enable = true; 2495 2496 for_each_intel_crtc(dev, intel_crtc) { 2497 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk; 2498 const struct intel_wm_level *wm = &active->wm[level]; 2499 2500 if (!active->pipe_enabled) 2501 continue; 2502 2503 /* 2504 * The watermark values may have been used in the past, 2505 * so we must maintain them in the registers for some 2506 * time even if the level is now disabled. 2507 */ 2508 if (!wm->enable) 2509 ret_wm->enable = false; 2510 2511 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val); 2512 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val); 2513 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val); 2514 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val); 2515 } 2516 } 2517 2518 /* 2519 * Merge all low power watermarks for all active pipes. 2520 */ 2521 static void ilk_wm_merge(struct drm_device *dev, 2522 const struct intel_wm_config *config, 2523 const struct ilk_wm_maximums *max, 2524 struct intel_pipe_wm *merged) 2525 { 2526 struct drm_i915_private *dev_priv = to_i915(dev); 2527 int level, max_level = ilk_wm_max_level(dev_priv); 2528 int last_enabled_level = max_level; 2529 2530 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ 2531 if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) && 2532 config->num_pipes_active > 1) 2533 last_enabled_level = 0; 2534 2535 /* ILK: FBC WM must be disabled always */ 2536 merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6; 2537 2538 /* merge each WM1+ level */ 2539 for (level = 1; level <= max_level; level++) { 2540 struct intel_wm_level *wm = &merged->wm[level]; 2541 2542 ilk_merge_wm_level(dev, level, wm); 2543 2544 if (level > last_enabled_level) 2545 wm->enable = false; 2546 else if (!ilk_validate_wm_level(level, max, wm)) 2547 /* make sure all following levels get disabled */ 2548 last_enabled_level = level - 1; 2549 2550 /* 2551 * The spec says it is preferred to disable 2552 * FBC WMs instead of disabling a WM level. 2553 */ 2554 if (wm->fbc_val > max->fbc) { 2555 if (wm->enable) 2556 merged->fbc_wm_enabled = false; 2557 wm->fbc_val = 0; 2558 } 2559 } 2560 2561 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ 2562 /* 2563 * FIXME this is racy. FBC might get enabled later. 2564 * What we should check here is whether FBC can be 2565 * enabled sometime later. 2566 */ 2567 if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled && 2568 intel_fbc_is_active(dev_priv)) { 2569 for (level = 2; level <= max_level; level++) { 2570 struct intel_wm_level *wm = &merged->wm[level]; 2571 2572 wm->enable = false; 2573 } 2574 } 2575 } 2576 2577 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) 2578 { 2579 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */ 2580 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); 2581 } 2582 2583 /* The value we need to program into the WM_LPx latency field */ 2584 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level) 2585 { 2586 struct drm_i915_private *dev_priv = to_i915(dev); 2587 2588 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 2589 return 2 * level; 2590 else 2591 return dev_priv->wm.pri_latency[level]; 2592 } 2593 2594 static void ilk_compute_wm_results(struct drm_device *dev, 2595 const struct intel_pipe_wm *merged, 2596 enum intel_ddb_partitioning partitioning, 2597 struct ilk_wm_values *results) 2598 { 2599 struct drm_i915_private *dev_priv = to_i915(dev); 2600 struct intel_crtc *intel_crtc; 2601 int level, wm_lp; 2602 2603 results->enable_fbc_wm = merged->fbc_wm_enabled; 2604 results->partitioning = partitioning; 2605 2606 /* LP1+ register values */ 2607 for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 2608 const struct intel_wm_level *r; 2609 2610 level = ilk_wm_lp_to_level(wm_lp, merged); 2611 2612 r = &merged->wm[level]; 2613 2614 /* 2615 * Maintain the watermark values even if the level is 2616 * disabled. Doing otherwise could cause underruns. 2617 */ 2618 results->wm_lp[wm_lp - 1] = 2619 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) | 2620 (r->pri_val << WM1_LP_SR_SHIFT) | 2621 r->cur_val; 2622 2623 if (r->enable) 2624 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN; 2625 2626 if (INTEL_GEN(dev_priv) >= 8) 2627 results->wm_lp[wm_lp - 1] |= 2628 r->fbc_val << WM1_LP_FBC_SHIFT_BDW; 2629 else 2630 results->wm_lp[wm_lp - 1] |= 2631 r->fbc_val << WM1_LP_FBC_SHIFT; 2632 2633 /* 2634 * Always set WM1S_LP_EN when spr_val != 0, even if the 2635 * level is disabled. Doing otherwise could cause underruns. 2636 */ 2637 if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) { 2638 WARN_ON(wm_lp != 1); 2639 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val; 2640 } else 2641 results->wm_lp_spr[wm_lp - 1] = r->spr_val; 2642 } 2643 2644 /* LP0 register values */ 2645 for_each_intel_crtc(dev, intel_crtc) { 2646 enum i915_pipe pipe = intel_crtc->pipe; 2647 const struct intel_wm_level *r = 2648 &intel_crtc->wm.active.ilk.wm[0]; 2649 2650 if (WARN_ON(!r->enable)) 2651 continue; 2652 2653 results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime; 2654 2655 results->wm_pipe[pipe] = 2656 (r->pri_val << WM0_PIPE_PLANE_SHIFT) | 2657 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) | 2658 r->cur_val; 2659 } 2660 } 2661 2662 /* Find the result with the highest level enabled. Check for enable_fbc_wm in 2663 * case both are at the same level. Prefer r1 in case they're the same. */ 2664 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev, 2665 struct intel_pipe_wm *r1, 2666 struct intel_pipe_wm *r2) 2667 { 2668 int level, max_level = ilk_wm_max_level(to_i915(dev)); 2669 int level1 = 0, level2 = 0; 2670 2671 for (level = 1; level <= max_level; level++) { 2672 if (r1->wm[level].enable) 2673 level1 = level; 2674 if (r2->wm[level].enable) 2675 level2 = level; 2676 } 2677 2678 if (level1 == level2) { 2679 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled) 2680 return r2; 2681 else 2682 return r1; 2683 } else if (level1 > level2) { 2684 return r1; 2685 } else { 2686 return r2; 2687 } 2688 } 2689 2690 /* dirty bits used to track which watermarks need changes */ 2691 #define WM_DIRTY_PIPE(pipe) (1 << (pipe)) 2692 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe))) 2693 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp))) 2694 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3)) 2695 #define WM_DIRTY_FBC (1 << 24) 2696 #define WM_DIRTY_DDB (1 << 25) 2697 2698 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv, 2699 const struct ilk_wm_values *old, 2700 const struct ilk_wm_values *new) 2701 { 2702 unsigned int dirty = 0; 2703 enum i915_pipe pipe; 2704 int wm_lp; 2705 2706 for_each_pipe(dev_priv, pipe) { 2707 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) { 2708 dirty |= WM_DIRTY_LINETIME(pipe); 2709 /* Must disable LP1+ watermarks too */ 2710 dirty |= WM_DIRTY_LP_ALL; 2711 } 2712 2713 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) { 2714 dirty |= WM_DIRTY_PIPE(pipe); 2715 /* Must disable LP1+ watermarks too */ 2716 dirty |= WM_DIRTY_LP_ALL; 2717 } 2718 } 2719 2720 if (old->enable_fbc_wm != new->enable_fbc_wm) { 2721 dirty |= WM_DIRTY_FBC; 2722 /* Must disable LP1+ watermarks too */ 2723 dirty |= WM_DIRTY_LP_ALL; 2724 } 2725 2726 if (old->partitioning != new->partitioning) { 2727 dirty |= WM_DIRTY_DDB; 2728 /* Must disable LP1+ watermarks too */ 2729 dirty |= WM_DIRTY_LP_ALL; 2730 } 2731 2732 /* LP1+ watermarks already deemed dirty, no need to continue */ 2733 if (dirty & WM_DIRTY_LP_ALL) 2734 return dirty; 2735 2736 /* Find the lowest numbered LP1+ watermark in need of an update... */ 2737 for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 2738 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] || 2739 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1]) 2740 break; 2741 } 2742 2743 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */ 2744 for (; wm_lp <= 3; wm_lp++) 2745 dirty |= WM_DIRTY_LP(wm_lp); 2746 2747 return dirty; 2748 } 2749 2750 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, 2751 unsigned int dirty) 2752 { 2753 struct ilk_wm_values *previous = &dev_priv->wm.hw; 2754 bool changed = false; 2755 2756 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) { 2757 previous->wm_lp[2] &= ~WM1_LP_SR_EN; 2758 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]); 2759 changed = true; 2760 } 2761 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) { 2762 previous->wm_lp[1] &= ~WM1_LP_SR_EN; 2763 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]); 2764 changed = true; 2765 } 2766 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) { 2767 previous->wm_lp[0] &= ~WM1_LP_SR_EN; 2768 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]); 2769 changed = true; 2770 } 2771 2772 /* 2773 * Don't touch WM1S_LP_EN here. 2774 * Doing so could cause underruns. 2775 */ 2776 2777 return changed; 2778 } 2779 2780 /* 2781 * The spec says we shouldn't write when we don't need, because every write 2782 * causes WMs to be re-evaluated, expending some power. 2783 */ 2784 static void ilk_write_wm_values(struct drm_i915_private *dev_priv, 2785 struct ilk_wm_values *results) 2786 { 2787 struct ilk_wm_values *previous = &dev_priv->wm.hw; 2788 unsigned int dirty; 2789 uint32_t val; 2790 2791 dirty = ilk_compute_wm_dirty(dev_priv, previous, results); 2792 if (!dirty) 2793 return; 2794 2795 _ilk_disable_lp_wm(dev_priv, dirty); 2796 2797 if (dirty & WM_DIRTY_PIPE(PIPE_A)) 2798 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); 2799 if (dirty & WM_DIRTY_PIPE(PIPE_B)) 2800 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]); 2801 if (dirty & WM_DIRTY_PIPE(PIPE_C)) 2802 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]); 2803 2804 if (dirty & WM_DIRTY_LINETIME(PIPE_A)) 2805 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]); 2806 if (dirty & WM_DIRTY_LINETIME(PIPE_B)) 2807 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]); 2808 if (dirty & WM_DIRTY_LINETIME(PIPE_C)) 2809 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]); 2810 2811 if (dirty & WM_DIRTY_DDB) { 2812 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2813 val = I915_READ(WM_MISC); 2814 if (results->partitioning == INTEL_DDB_PART_1_2) 2815 val &= ~WM_MISC_DATA_PARTITION_5_6; 2816 else 2817 val |= WM_MISC_DATA_PARTITION_5_6; 2818 I915_WRITE(WM_MISC, val); 2819 } else { 2820 val = I915_READ(DISP_ARB_CTL2); 2821 if (results->partitioning == INTEL_DDB_PART_1_2) 2822 val &= ~DISP_DATA_PARTITION_5_6; 2823 else 2824 val |= DISP_DATA_PARTITION_5_6; 2825 I915_WRITE(DISP_ARB_CTL2, val); 2826 } 2827 } 2828 2829 if (dirty & WM_DIRTY_FBC) { 2830 val = I915_READ(DISP_ARB_CTL); 2831 if (results->enable_fbc_wm) 2832 val &= ~DISP_FBC_WM_DIS; 2833 else 2834 val |= DISP_FBC_WM_DIS; 2835 I915_WRITE(DISP_ARB_CTL, val); 2836 } 2837 2838 if (dirty & WM_DIRTY_LP(1) && 2839 previous->wm_lp_spr[0] != results->wm_lp_spr[0]) 2840 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); 2841 2842 if (INTEL_GEN(dev_priv) >= 7) { 2843 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) 2844 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); 2845 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) 2846 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]); 2847 } 2848 2849 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0]) 2850 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); 2851 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1]) 2852 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); 2853 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) 2854 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); 2855 2856 dev_priv->wm.hw = *results; 2857 } 2858 2859 bool ilk_disable_lp_wm(struct drm_device *dev) 2860 { 2861 struct drm_i915_private *dev_priv = to_i915(dev); 2862 2863 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); 2864 } 2865 2866 #define SKL_SAGV_BLOCK_TIME 30 /* µs */ 2867 2868 /* 2869 * Return the index of a plane in the SKL DDB and wm result arrays. Primary 2870 * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and 2871 * other universal planes are in indices 1..n. Note that this may leave unused 2872 * indices between the top "sprite" plane and the cursor. 2873 */ 2874 static int 2875 skl_wm_plane_id(const struct intel_plane *plane) 2876 { 2877 switch (plane->base.type) { 2878 case DRM_PLANE_TYPE_PRIMARY: 2879 return 0; 2880 case DRM_PLANE_TYPE_CURSOR: 2881 return PLANE_CURSOR; 2882 case DRM_PLANE_TYPE_OVERLAY: 2883 return plane->plane + 1; 2884 default: 2885 MISSING_CASE(plane->base.type); 2886 return plane->plane; 2887 } 2888 } 2889 2890 /* 2891 * FIXME: We still don't have the proper code detect if we need to apply the WA, 2892 * so assume we'll always need it in order to avoid underruns. 2893 */ 2894 static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state) 2895 { 2896 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 2897 2898 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) || 2899 IS_KABYLAKE(dev_priv)) 2900 return true; 2901 2902 return false; 2903 } 2904 2905 static bool 2906 intel_has_sagv(struct drm_i915_private *dev_priv) 2907 { 2908 if (IS_KABYLAKE(dev_priv)) 2909 return true; 2910 2911 if (IS_SKYLAKE(dev_priv) && 2912 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED) 2913 return true; 2914 2915 return false; 2916 } 2917 2918 /* 2919 * SAGV dynamically adjusts the system agent voltage and clock frequencies 2920 * depending on power and performance requirements. The display engine access 2921 * to system memory is blocked during the adjustment time. Because of the 2922 * blocking time, having this enabled can cause full system hangs and/or pipe 2923 * underruns if we don't meet all of the following requirements: 2924 * 2925 * - <= 1 pipe enabled 2926 * - All planes can enable watermarks for latencies >= SAGV engine block time 2927 * - We're not using an interlaced display configuration 2928 */ 2929 int 2930 intel_enable_sagv(struct drm_i915_private *dev_priv) 2931 { 2932 int ret; 2933 2934 if (!intel_has_sagv(dev_priv)) 2935 return 0; 2936 2937 if (dev_priv->sagv_status == I915_SAGV_ENABLED) 2938 return 0; 2939 2940 DRM_DEBUG_KMS("Enabling the SAGV\n"); 2941 mutex_lock(&dev_priv->rps.hw_lock); 2942 2943 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL, 2944 GEN9_SAGV_ENABLE); 2945 2946 /* We don't need to wait for the SAGV when enabling */ 2947 mutex_unlock(&dev_priv->rps.hw_lock); 2948 2949 /* 2950 * Some skl systems, pre-release machines in particular, 2951 * don't actually have an SAGV. 2952 */ 2953 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) { 2954 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); 2955 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; 2956 return 0; 2957 } else if (ret < 0) { 2958 DRM_ERROR("Failed to enable the SAGV\n"); 2959 return ret; 2960 } 2961 2962 dev_priv->sagv_status = I915_SAGV_ENABLED; 2963 return 0; 2964 } 2965 2966 int 2967 intel_disable_sagv(struct drm_i915_private *dev_priv) 2968 { 2969 int ret; 2970 2971 if (!intel_has_sagv(dev_priv)) 2972 return 0; 2973 2974 if (dev_priv->sagv_status == I915_SAGV_DISABLED) 2975 return 0; 2976 2977 DRM_DEBUG_KMS("Disabling the SAGV\n"); 2978 mutex_lock(&dev_priv->rps.hw_lock); 2979 2980 /* bspec says to keep retrying for at least 1 ms */ 2981 ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL, 2982 GEN9_SAGV_DISABLE, 2983 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, 2984 1); 2985 mutex_unlock(&dev_priv->rps.hw_lock); 2986 2987 /* 2988 * Some skl systems, pre-release machines in particular, 2989 * don't actually have an SAGV. 2990 */ 2991 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) { 2992 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); 2993 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; 2994 return 0; 2995 } else if (ret < 0) { 2996 DRM_ERROR("Failed to disable the SAGV (%d)\n", ret); 2997 return ret; 2998 } 2999 3000 dev_priv->sagv_status = I915_SAGV_DISABLED; 3001 return 0; 3002 } 3003 3004 bool intel_can_enable_sagv(struct drm_atomic_state *state) 3005 { 3006 struct drm_device *dev = state->dev; 3007 struct drm_i915_private *dev_priv = to_i915(dev); 3008 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 3009 struct intel_crtc *crtc; 3010 struct intel_plane *plane; 3011 struct intel_crtc_state *cstate; 3012 struct skl_plane_wm *wm; 3013 enum i915_pipe pipe; 3014 int level, latency; 3015 3016 if (!intel_has_sagv(dev_priv)) 3017 return false; 3018 3019 /* 3020 * SKL workaround: bspec recommends we disable the SAGV when we have 3021 * more then one pipe enabled 3022 * 3023 * If there are no active CRTCs, no additional checks need be performed 3024 */ 3025 if (hweight32(intel_state->active_crtcs) == 0) 3026 return true; 3027 else if (hweight32(intel_state->active_crtcs) > 1) 3028 return false; 3029 3030 /* Since we're now guaranteed to only have one active CRTC... */ 3031 pipe = ffs(intel_state->active_crtcs) - 1; 3032 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 3033 cstate = to_intel_crtc_state(crtc->base.state); 3034 3035 if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3036 return false; 3037 3038 for_each_intel_plane_on_crtc(dev, crtc, plane) { 3039 wm = &cstate->wm.skl.optimal.planes[skl_wm_plane_id(plane)]; 3040 3041 /* Skip this plane if it's not enabled */ 3042 if (!wm->wm[0].plane_en) 3043 continue; 3044 3045 /* Find the highest enabled wm level for this plane */ 3046 for (level = ilk_wm_max_level(dev_priv); 3047 !wm->wm[level].plane_en; --level) 3048 { } 3049 3050 latency = dev_priv->wm.skl_latency[level]; 3051 3052 if (skl_needs_memory_bw_wa(intel_state) && 3053 plane->base.state->fb->modifier == 3054 I915_FORMAT_MOD_X_TILED) 3055 latency += 15; 3056 3057 /* 3058 * If any of the planes on this pipe don't enable wm levels 3059 * that incur memory latencies higher then 30µs we can't enable 3060 * the SAGV 3061 */ 3062 if (latency < SKL_SAGV_BLOCK_TIME) 3063 return false; 3064 } 3065 3066 return true; 3067 } 3068 3069 static void 3070 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, 3071 const struct intel_crtc_state *cstate, 3072 struct skl_ddb_entry *alloc, /* out */ 3073 int *num_active /* out */) 3074 { 3075 struct drm_atomic_state *state = cstate->base.state; 3076 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 3077 struct drm_i915_private *dev_priv = to_i915(dev); 3078 struct drm_crtc *for_crtc = cstate->base.crtc; 3079 unsigned int pipe_size, ddb_size; 3080 int nth_active_pipe; 3081 3082 if (WARN_ON(!state) || !cstate->base.active) { 3083 alloc->start = 0; 3084 alloc->end = 0; 3085 *num_active = hweight32(dev_priv->active_crtcs); 3086 return; 3087 } 3088 3089 if (intel_state->active_pipe_changes) 3090 *num_active = hweight32(intel_state->active_crtcs); 3091 else 3092 *num_active = hweight32(dev_priv->active_crtcs); 3093 3094 ddb_size = INTEL_INFO(dev_priv)->ddb_size; 3095 WARN_ON(ddb_size == 0); 3096 3097 ddb_size -= 4; /* 4 blocks for bypass path allocation */ 3098 3099 /* 3100 * If the state doesn't change the active CRTC's, then there's 3101 * no need to recalculate; the existing pipe allocation limits 3102 * should remain unchanged. Note that we're safe from racing 3103 * commits since any racing commit that changes the active CRTC 3104 * list would need to grab _all_ crtc locks, including the one 3105 * we currently hold. 3106 */ 3107 if (!intel_state->active_pipe_changes) { 3108 /* 3109 * alloc may be cleared by clear_intel_crtc_state, 3110 * copy from old state to be sure 3111 */ 3112 *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb; 3113 return; 3114 } 3115 3116 nth_active_pipe = hweight32(intel_state->active_crtcs & 3117 (drm_crtc_mask(for_crtc) - 1)); 3118 pipe_size = ddb_size / hweight32(intel_state->active_crtcs); 3119 alloc->start = nth_active_pipe * ddb_size / *num_active; 3120 alloc->end = alloc->start + pipe_size; 3121 } 3122 3123 static unsigned int skl_cursor_allocation(int num_active) 3124 { 3125 if (num_active == 1) 3126 return 32; 3127 3128 return 8; 3129 } 3130 3131 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg) 3132 { 3133 entry->start = reg & 0x3ff; 3134 entry->end = (reg >> 16) & 0x3ff; 3135 if (entry->end) 3136 entry->end += 1; 3137 } 3138 3139 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, 3140 struct skl_ddb_allocation *ddb /* out */) 3141 { 3142 enum i915_pipe pipe; 3143 int plane; 3144 u32 val; 3145 3146 memset(ddb, 0, sizeof(*ddb)); 3147 3148 for_each_pipe(dev_priv, pipe) { 3149 enum intel_display_power_domain power_domain; 3150 3151 power_domain = POWER_DOMAIN_PIPE(pipe); 3152 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 3153 continue; 3154 3155 for_each_universal_plane(dev_priv, pipe, plane) { 3156 val = I915_READ(PLANE_BUF_CFG(pipe, plane)); 3157 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane], 3158 val); 3159 } 3160 3161 val = I915_READ(CUR_BUF_CFG(pipe)); 3162 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR], 3163 val); 3164 3165 intel_display_power_put(dev_priv, power_domain); 3166 } 3167 } 3168 3169 /* 3170 * Determines the downscale amount of a plane for the purposes of watermark calculations. 3171 * The bspec defines downscale amount as: 3172 * 3173 * """ 3174 * Horizontal down scale amount = maximum[1, Horizontal source size / 3175 * Horizontal destination size] 3176 * Vertical down scale amount = maximum[1, Vertical source size / 3177 * Vertical destination size] 3178 * Total down scale amount = Horizontal down scale amount * 3179 * Vertical down scale amount 3180 * """ 3181 * 3182 * Return value is provided in 16.16 fixed point form to retain fractional part. 3183 * Caller should take care of dividing & rounding off the value. 3184 */ 3185 static uint32_t 3186 skl_plane_downscale_amount(const struct intel_plane_state *pstate) 3187 { 3188 uint32_t downscale_h, downscale_w; 3189 uint32_t src_w, src_h, dst_w, dst_h; 3190 3191 if (WARN_ON(!pstate->base.visible)) 3192 return DRM_PLANE_HELPER_NO_SCALING; 3193 3194 /* n.b., src is 16.16 fixed point, dst is whole integer */ 3195 src_w = drm_rect_width(&pstate->base.src); 3196 src_h = drm_rect_height(&pstate->base.src); 3197 dst_w = drm_rect_width(&pstate->base.dst); 3198 dst_h = drm_rect_height(&pstate->base.dst); 3199 if (drm_rotation_90_or_270(pstate->base.rotation)) 3200 swap(dst_w, dst_h); 3201 3202 downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); 3203 downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); 3204 3205 /* Provide result in 16.16 fixed point */ 3206 return (uint64_t)downscale_w * downscale_h >> 16; 3207 } 3208 3209 static unsigned int 3210 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, 3211 const struct drm_plane_state *pstate, 3212 int y) 3213 { 3214 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); 3215 struct drm_framebuffer *fb = pstate->fb; 3216 uint32_t down_scale_amount, data_rate; 3217 uint32_t width = 0, height = 0; 3218 unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888; 3219 3220 if (!intel_pstate->base.visible) 3221 return 0; 3222 if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR) 3223 return 0; 3224 if (y && format != DRM_FORMAT_NV12) 3225 return 0; 3226 3227 width = drm_rect_width(&intel_pstate->base.src) >> 16; 3228 height = drm_rect_height(&intel_pstate->base.src) >> 16; 3229 3230 if (drm_rotation_90_or_270(pstate->rotation)) 3231 swap(width, height); 3232 3233 /* for planar format */ 3234 if (format == DRM_FORMAT_NV12) { 3235 if (y) /* y-plane data rate */ 3236 data_rate = width * height * 3237 drm_format_plane_cpp(format, 0); 3238 else /* uv-plane data rate */ 3239 data_rate = (width / 2) * (height / 2) * 3240 drm_format_plane_cpp(format, 1); 3241 } else { 3242 /* for packed formats */ 3243 data_rate = width * height * drm_format_plane_cpp(format, 0); 3244 } 3245 3246 down_scale_amount = skl_plane_downscale_amount(intel_pstate); 3247 3248 return (uint64_t)data_rate * down_scale_amount >> 16; 3249 } 3250 3251 /* 3252 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching 3253 * a 8192x4096@32bpp framebuffer: 3254 * 3 * 4096 * 8192 * 4 < 2^32 3255 */ 3256 static unsigned int 3257 skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, 3258 unsigned *plane_data_rate, 3259 unsigned *plane_y_data_rate) 3260 { 3261 struct drm_crtc_state *cstate = &intel_cstate->base; 3262 struct drm_atomic_state *state = cstate->state; 3263 const struct drm_plane *plane; 3264 const struct intel_plane *intel_plane; 3265 struct drm_plane_state *pstate; 3266 unsigned int rate, total_data_rate = 0; 3267 int id; 3268 3269 if (WARN_ON(!state)) 3270 return 0; 3271 3272 /* Calculate and cache data rate for each plane */ 3273 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) { 3274 id = skl_wm_plane_id(to_intel_plane(plane)); 3275 intel_plane = to_intel_plane(plane); 3276 3277 /* packed/uv */ 3278 rate = skl_plane_relative_data_rate(intel_cstate, 3279 pstate, 0); 3280 plane_data_rate[id] = rate; 3281 3282 total_data_rate += rate; 3283 3284 /* y-plane */ 3285 rate = skl_plane_relative_data_rate(intel_cstate, 3286 pstate, 1); 3287 plane_y_data_rate[id] = rate; 3288 3289 total_data_rate += rate; 3290 } 3291 3292 return total_data_rate; 3293 } 3294 3295 static uint16_t 3296 skl_ddb_min_alloc(const struct drm_plane_state *pstate, 3297 const int y) 3298 { 3299 struct drm_framebuffer *fb = pstate->fb; 3300 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); 3301 uint32_t src_w, src_h; 3302 uint32_t min_scanlines = 8; 3303 uint8_t plane_bpp; 3304 3305 if (WARN_ON(!fb)) 3306 return 0; 3307 3308 /* For packed formats, no y-plane, return 0 */ 3309 if (y && fb->pixel_format != DRM_FORMAT_NV12) 3310 return 0; 3311 3312 /* For Non Y-tile return 8-blocks */ 3313 if (fb->modifier != I915_FORMAT_MOD_Y_TILED && 3314 fb->modifier != I915_FORMAT_MOD_Yf_TILED) 3315 return 8; 3316 3317 src_w = drm_rect_width(&intel_pstate->base.src) >> 16; 3318 src_h = drm_rect_height(&intel_pstate->base.src) >> 16; 3319 3320 if (drm_rotation_90_or_270(pstate->rotation)) 3321 swap(src_w, src_h); 3322 3323 /* Halve UV plane width and height for NV12 */ 3324 if (fb->pixel_format == DRM_FORMAT_NV12 && !y) { 3325 src_w /= 2; 3326 src_h /= 2; 3327 } 3328 3329 if (fb->pixel_format == DRM_FORMAT_NV12 && !y) 3330 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1); 3331 else 3332 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0); 3333 3334 if (drm_rotation_90_or_270(pstate->rotation)) { 3335 switch (plane_bpp) { 3336 case 1: 3337 min_scanlines = 32; 3338 break; 3339 case 2: 3340 min_scanlines = 16; 3341 break; 3342 case 4: 3343 min_scanlines = 8; 3344 break; 3345 case 8: 3346 min_scanlines = 4; 3347 break; 3348 default: 3349 WARN(1, "Unsupported pixel depth %u for rotation", 3350 plane_bpp); 3351 min_scanlines = 32; 3352 } 3353 } 3354 3355 return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3; 3356 } 3357 3358 static void 3359 skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active, 3360 uint16_t *minimum, uint16_t *y_minimum) 3361 { 3362 struct drm_plane_state *pstate; 3363 struct drm_plane *plane; 3364 3365 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) { 3366 struct intel_plane *intel_plane = to_intel_plane(plane); 3367 int id = skl_wm_plane_id(intel_plane); 3368 3369 if (id == PLANE_CURSOR) 3370 continue; 3371 3372 if (!pstate->visible) 3373 continue; 3374 3375 minimum[id] = skl_ddb_min_alloc(pstate, 0); 3376 y_minimum[id] = skl_ddb_min_alloc(pstate, 1); 3377 } 3378 3379 minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active); 3380 } 3381 3382 static int 3383 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, 3384 struct skl_ddb_allocation *ddb /* out */) 3385 { 3386 struct drm_atomic_state *state = cstate->base.state; 3387 struct drm_crtc *crtc = cstate->base.crtc; 3388 struct drm_device *dev = crtc->dev; 3389 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3390 enum i915_pipe pipe = intel_crtc->pipe; 3391 struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb; 3392 uint16_t alloc_size, start; 3393 uint16_t minimum[I915_MAX_PLANES] = {}; 3394 uint16_t y_minimum[I915_MAX_PLANES] = {}; 3395 unsigned int total_data_rate; 3396 int num_active; 3397 int id, i; 3398 unsigned plane_data_rate[I915_MAX_PLANES] = {}; 3399 unsigned plane_y_data_rate[I915_MAX_PLANES] = {}; 3400 3401 /* Clear the partitioning for disabled planes. */ 3402 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); 3403 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe])); 3404 3405 if (WARN_ON(!state)) 3406 return 0; 3407 3408 if (!cstate->base.active) { 3409 alloc->start = alloc->end = 0; 3410 return 0; 3411 } 3412 3413 skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active); 3414 alloc_size = skl_ddb_entry_size(alloc); 3415 if (alloc_size == 0) { 3416 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); 3417 return 0; 3418 } 3419 3420 skl_ddb_calc_min(cstate, num_active, minimum, y_minimum); 3421 3422 /* 3423 * 1. Allocate the mininum required blocks for each active plane 3424 * and allocate the cursor, it doesn't require extra allocation 3425 * proportional to the data rate. 3426 */ 3427 3428 for (i = 0; i < I915_MAX_PLANES; i++) { 3429 alloc_size -= minimum[i]; 3430 alloc_size -= y_minimum[i]; 3431 } 3432 3433 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR]; 3434 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end; 3435 3436 /* 3437 * 2. Distribute the remaining space in proportion to the amount of 3438 * data each plane needs to fetch from memory. 3439 * 3440 * FIXME: we may not allocate every single block here. 3441 */ 3442 total_data_rate = skl_get_total_relative_data_rate(cstate, 3443 plane_data_rate, 3444 plane_y_data_rate); 3445 if (total_data_rate == 0) 3446 return 0; 3447 3448 start = alloc->start; 3449 for (id = 0; id < I915_MAX_PLANES; id++) { 3450 unsigned int data_rate, y_data_rate; 3451 uint16_t plane_blocks, y_plane_blocks = 0; 3452 3453 if (id == PLANE_CURSOR) 3454 continue; 3455 3456 data_rate = plane_data_rate[id]; 3457 3458 /* 3459 * allocation for (packed formats) or (uv-plane part of planar format): 3460 * promote the expression to 64 bits to avoid overflowing, the 3461 * result is < available as data_rate / total_data_rate < 1 3462 */ 3463 plane_blocks = minimum[id]; 3464 plane_blocks += div_u64((uint64_t)alloc_size * data_rate, 3465 total_data_rate); 3466 3467 /* Leave disabled planes at (0,0) */ 3468 if (data_rate) { 3469 ddb->plane[pipe][id].start = start; 3470 ddb->plane[pipe][id].end = start + plane_blocks; 3471 } 3472 3473 start += plane_blocks; 3474 3475 /* 3476 * allocation for y_plane part of planar format: 3477 */ 3478 y_data_rate = plane_y_data_rate[id]; 3479 3480 y_plane_blocks = y_minimum[id]; 3481 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, 3482 total_data_rate); 3483 3484 if (y_data_rate) { 3485 ddb->y_plane[pipe][id].start = start; 3486 ddb->y_plane[pipe][id].end = start + y_plane_blocks; 3487 } 3488 3489 start += y_plane_blocks; 3490 } 3491 3492 return 0; 3493 } 3494 3495 /* 3496 * The max latency should be 257 (max the punit can code is 255 and we add 2us 3497 * for the read latency) and cpp should always be <= 8, so that 3498 * should allow pixel_rate up to ~2 GHz which seems sufficient since max 3499 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. 3500 */ 3501 static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency) 3502 { 3503 uint32_t wm_intermediate_val, ret; 3504 3505 if (latency == 0) 3506 return UINT_MAX; 3507 3508 wm_intermediate_val = latency * pixel_rate * cpp / 512; 3509 ret = DIV_ROUND_UP(wm_intermediate_val, 1000); 3510 3511 return ret; 3512 } 3513 3514 static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, 3515 uint32_t latency, uint32_t plane_blocks_per_line) 3516 { 3517 uint32_t ret; 3518 uint32_t wm_intermediate_val; 3519 3520 if (latency == 0) 3521 return UINT_MAX; 3522 3523 wm_intermediate_val = latency * pixel_rate; 3524 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) * 3525 plane_blocks_per_line; 3526 3527 return ret; 3528 } 3529 3530 static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate, 3531 struct intel_plane_state *pstate) 3532 { 3533 uint64_t adjusted_pixel_rate; 3534 uint64_t downscale_amount; 3535 uint64_t pixel_rate; 3536 3537 /* Shouldn't reach here on disabled planes... */ 3538 if (WARN_ON(!pstate->base.visible)) 3539 return 0; 3540 3541 /* 3542 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate 3543 * with additional adjustments for plane-specific scaling. 3544 */ 3545 adjusted_pixel_rate = ilk_pipe_pixel_rate(cstate); 3546 downscale_amount = skl_plane_downscale_amount(pstate); 3547 3548 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16; 3549 WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0)); 3550 3551 return pixel_rate; 3552 } 3553 3554 static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, 3555 struct intel_crtc_state *cstate, 3556 struct intel_plane_state *intel_pstate, 3557 uint16_t ddb_allocation, 3558 int level, 3559 uint16_t *out_blocks, /* out */ 3560 uint8_t *out_lines, /* out */ 3561 bool *enabled /* out */) 3562 { 3563 struct drm_plane_state *pstate = &intel_pstate->base; 3564 struct drm_framebuffer *fb = pstate->fb; 3565 uint32_t latency = dev_priv->wm.skl_latency[level]; 3566 uint32_t method1, method2; 3567 uint32_t plane_bytes_per_line, plane_blocks_per_line; 3568 uint32_t res_blocks, res_lines; 3569 uint32_t selected_result; 3570 uint8_t cpp; 3571 uint32_t width = 0, height = 0; 3572 uint32_t plane_pixel_rate; 3573 uint32_t y_tile_minimum, y_min_scanlines; 3574 struct intel_atomic_state *state = 3575 to_intel_atomic_state(cstate->base.state); 3576 bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); 3577 3578 if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) { 3579 *enabled = false; 3580 return 0; 3581 } 3582 3583 if (apply_memory_bw_wa && fb->modifier == I915_FORMAT_MOD_X_TILED) 3584 latency += 15; 3585 3586 width = drm_rect_width(&intel_pstate->base.src) >> 16; 3587 height = drm_rect_height(&intel_pstate->base.src) >> 16; 3588 3589 if (drm_rotation_90_or_270(pstate->rotation)) 3590 swap(width, height); 3591 3592 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 3593 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); 3594 3595 if (drm_rotation_90_or_270(pstate->rotation)) { 3596 int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ? 3597 drm_format_plane_cpp(fb->pixel_format, 1) : 3598 drm_format_plane_cpp(fb->pixel_format, 0); 3599 3600 switch (cpp) { 3601 case 1: 3602 y_min_scanlines = 16; 3603 break; 3604 case 2: 3605 y_min_scanlines = 8; 3606 break; 3607 case 4: 3608 y_min_scanlines = 4; 3609 break; 3610 default: 3611 MISSING_CASE(cpp); 3612 return -EINVAL; 3613 } 3614 } else { 3615 y_min_scanlines = 4; 3616 } 3617 3618 if (apply_memory_bw_wa) 3619 y_min_scanlines *= 2; 3620 3621 plane_bytes_per_line = width * cpp; 3622 if (fb->modifier == I915_FORMAT_MOD_Y_TILED || 3623 fb->modifier == I915_FORMAT_MOD_Yf_TILED) { 3624 plane_blocks_per_line = 3625 DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512); 3626 plane_blocks_per_line /= y_min_scanlines; 3627 } else if (fb->modifier == DRM_FORMAT_MOD_NONE) { 3628 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) 3629 + 1; 3630 } else { 3631 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 3632 } 3633 3634 method1 = skl_wm_method1(plane_pixel_rate, cpp, latency); 3635 method2 = skl_wm_method2(plane_pixel_rate, 3636 cstate->base.adjusted_mode.crtc_htotal, 3637 latency, 3638 plane_blocks_per_line); 3639 3640 y_tile_minimum = plane_blocks_per_line * y_min_scanlines; 3641 3642 if (fb->modifier == I915_FORMAT_MOD_Y_TILED || 3643 fb->modifier == I915_FORMAT_MOD_Yf_TILED) { 3644 selected_result = max(method2, y_tile_minimum); 3645 } else { 3646 if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) && 3647 (plane_bytes_per_line / 512 < 1)) 3648 selected_result = method2; 3649 else if ((ddb_allocation / plane_blocks_per_line) >= 1) 3650 selected_result = min(method1, method2); 3651 else 3652 selected_result = method1; 3653 } 3654 3655 res_blocks = selected_result + 1; 3656 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line); 3657 3658 if (level >= 1 && level <= 7) { 3659 if (fb->modifier == I915_FORMAT_MOD_Y_TILED || 3660 fb->modifier == I915_FORMAT_MOD_Yf_TILED) { 3661 res_blocks += y_tile_minimum; 3662 res_lines += y_min_scanlines; 3663 } else { 3664 res_blocks++; 3665 } 3666 } 3667 3668 if (res_blocks >= ddb_allocation || res_lines > 31) { 3669 *enabled = false; 3670 3671 /* 3672 * If there are no valid level 0 watermarks, then we can't 3673 * support this display configuration. 3674 */ 3675 if (level) { 3676 return 0; 3677 } else { 3678 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n"); 3679 DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n", 3680 to_intel_crtc(cstate->base.crtc)->pipe, 3681 skl_wm_plane_id(to_intel_plane(pstate->plane)), 3682 res_blocks, ddb_allocation, res_lines); 3683 3684 return -EINVAL; 3685 } 3686 } 3687 3688 *out_blocks = res_blocks; 3689 *out_lines = res_lines; 3690 *enabled = true; 3691 3692 return 0; 3693 } 3694 3695 static int 3696 skl_compute_wm_level(const struct drm_i915_private *dev_priv, 3697 struct skl_ddb_allocation *ddb, 3698 struct intel_crtc_state *cstate, 3699 struct intel_plane *intel_plane, 3700 int level, 3701 struct skl_wm_level *result) 3702 { 3703 struct drm_atomic_state *state = cstate->base.state; 3704 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 3705 struct drm_plane *plane = &intel_plane->base; 3706 struct intel_plane_state *intel_pstate = NULL; 3707 uint16_t ddb_blocks; 3708 enum i915_pipe pipe = intel_crtc->pipe; 3709 int ret; 3710 int i = skl_wm_plane_id(intel_plane); 3711 3712 if (state) 3713 intel_pstate = 3714 intel_atomic_get_existing_plane_state(state, 3715 intel_plane); 3716 3717 /* 3718 * Note: If we start supporting multiple pending atomic commits against 3719 * the same planes/CRTC's in the future, plane->state will no longer be 3720 * the correct pre-state to use for the calculations here and we'll 3721 * need to change where we get the 'unchanged' plane data from. 3722 * 3723 * For now this is fine because we only allow one queued commit against 3724 * a CRTC. Even if the plane isn't modified by this transaction and we 3725 * don't have a plane lock, we still have the CRTC's lock, so we know 3726 * that no other transactions are racing with us to update it. 3727 */ 3728 if (!intel_pstate) 3729 intel_pstate = to_intel_plane_state(plane->state); 3730 3731 WARN_ON(!intel_pstate->base.fb); 3732 3733 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); 3734 3735 ret = skl_compute_plane_wm(dev_priv, 3736 cstate, 3737 intel_pstate, 3738 ddb_blocks, 3739 level, 3740 &result->plane_res_b, 3741 &result->plane_res_l, 3742 &result->plane_en); 3743 if (ret) 3744 return ret; 3745 3746 return 0; 3747 } 3748 3749 static uint32_t 3750 skl_compute_linetime_wm(struct intel_crtc_state *cstate) 3751 { 3752 uint32_t pixel_rate; 3753 3754 if (!cstate->base.active) 3755 return 0; 3756 3757 pixel_rate = ilk_pipe_pixel_rate(cstate); 3758 3759 if (WARN_ON(pixel_rate == 0)) 3760 return 0; 3761 3762 return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000, 3763 pixel_rate); 3764 } 3765 3766 static void skl_compute_transition_wm(struct intel_crtc_state *cstate, 3767 struct skl_wm_level *trans_wm /* out */) 3768 { 3769 if (!cstate->base.active) 3770 return; 3771 3772 /* Until we know more, just disable transition WMs */ 3773 trans_wm->plane_en = false; 3774 } 3775 3776 static int skl_build_pipe_wm(struct intel_crtc_state *cstate, 3777 struct skl_ddb_allocation *ddb, 3778 struct skl_pipe_wm *pipe_wm) 3779 { 3780 struct drm_device *dev = cstate->base.crtc->dev; 3781 const struct drm_i915_private *dev_priv = to_i915(dev); 3782 struct intel_plane *intel_plane; 3783 struct skl_plane_wm *wm; 3784 int level, max_level = ilk_wm_max_level(dev_priv); 3785 int ret; 3786 3787 /* 3788 * We'll only calculate watermarks for planes that are actually 3789 * enabled, so make sure all other planes are set as disabled. 3790 */ 3791 memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes)); 3792 3793 for_each_intel_plane_mask(&dev_priv->drm, 3794 intel_plane, 3795 cstate->base.plane_mask) { 3796 wm = &pipe_wm->planes[skl_wm_plane_id(intel_plane)]; 3797 3798 for (level = 0; level <= max_level; level++) { 3799 ret = skl_compute_wm_level(dev_priv, ddb, cstate, 3800 intel_plane, level, 3801 &wm->wm[level]); 3802 if (ret) 3803 return ret; 3804 } 3805 skl_compute_transition_wm(cstate, &wm->trans_wm); 3806 } 3807 pipe_wm->linetime = skl_compute_linetime_wm(cstate); 3808 3809 return 0; 3810 } 3811 3812 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, 3813 i915_reg_t reg, 3814 const struct skl_ddb_entry *entry) 3815 { 3816 if (entry->end) 3817 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start); 3818 else 3819 I915_WRITE(reg, 0); 3820 } 3821 3822 static void skl_write_wm_level(struct drm_i915_private *dev_priv, 3823 i915_reg_t reg, 3824 const struct skl_wm_level *level) 3825 { 3826 uint32_t val = 0; 3827 3828 if (level->plane_en) { 3829 val |= PLANE_WM_EN; 3830 val |= level->plane_res_b; 3831 val |= level->plane_res_l << PLANE_WM_LINES_SHIFT; 3832 } 3833 3834 I915_WRITE(reg, val); 3835 } 3836 3837 static void skl_write_plane_wm(struct intel_crtc *intel_crtc, 3838 const struct skl_plane_wm *wm, 3839 const struct skl_ddb_allocation *ddb, 3840 int plane) 3841 { 3842 struct drm_crtc *crtc = &intel_crtc->base; 3843 struct drm_device *dev = crtc->dev; 3844 struct drm_i915_private *dev_priv = to_i915(dev); 3845 int level, max_level = ilk_wm_max_level(dev_priv); 3846 enum i915_pipe pipe = intel_crtc->pipe; 3847 3848 for (level = 0; level <= max_level; level++) { 3849 skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane, level), 3850 &wm->wm[level]); 3851 } 3852 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane), 3853 &wm->trans_wm); 3854 3855 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane), 3856 &ddb->plane[pipe][plane]); 3857 skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane), 3858 &ddb->y_plane[pipe][plane]); 3859 } 3860 3861 static void skl_write_cursor_wm(struct intel_crtc *intel_crtc, 3862 const struct skl_plane_wm *wm, 3863 const struct skl_ddb_allocation *ddb) 3864 { 3865 struct drm_crtc *crtc = &intel_crtc->base; 3866 struct drm_device *dev = crtc->dev; 3867 struct drm_i915_private *dev_priv = to_i915(dev); 3868 int level, max_level = ilk_wm_max_level(dev_priv); 3869 enum i915_pipe pipe = intel_crtc->pipe; 3870 3871 for (level = 0; level <= max_level; level++) { 3872 skl_write_wm_level(dev_priv, CUR_WM(pipe, level), 3873 &wm->wm[level]); 3874 } 3875 skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm); 3876 3877 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), 3878 &ddb->plane[pipe][PLANE_CURSOR]); 3879 } 3880 3881 bool skl_wm_level_equals(const struct skl_wm_level *l1, 3882 const struct skl_wm_level *l2) 3883 { 3884 if (l1->plane_en != l2->plane_en) 3885 return false; 3886 3887 /* If both planes aren't enabled, the rest shouldn't matter */ 3888 if (!l1->plane_en) 3889 return true; 3890 3891 return (l1->plane_res_l == l2->plane_res_l && 3892 l1->plane_res_b == l2->plane_res_b); 3893 } 3894 3895 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, 3896 const struct skl_ddb_entry *b) 3897 { 3898 return a->start < b->end && b->start < a->end; 3899 } 3900 3901 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries, 3902 const struct skl_ddb_entry *ddb, 3903 int ignore) 3904 { 3905 int i; 3906 3907 for (i = 0; i < I915_MAX_PIPES; i++) 3908 if (i != ignore && entries[i] && 3909 skl_ddb_entries_overlap(ddb, entries[i])) 3910 return true; 3911 3912 return false; 3913 } 3914 3915 static int skl_update_pipe_wm(struct drm_crtc_state *cstate, 3916 const struct skl_pipe_wm *old_pipe_wm, 3917 struct skl_pipe_wm *pipe_wm, /* out */ 3918 struct skl_ddb_allocation *ddb, /* out */ 3919 bool *changed /* out */) 3920 { 3921 struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate); 3922 int ret; 3923 3924 ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm); 3925 if (ret) 3926 return ret; 3927 3928 if (!memcmp(old_pipe_wm, pipe_wm, sizeof(*pipe_wm))) 3929 *changed = false; 3930 else 3931 *changed = true; 3932 3933 return 0; 3934 } 3935 3936 static uint32_t 3937 pipes_modified(struct drm_atomic_state *state) 3938 { 3939 struct drm_crtc *crtc; 3940 struct drm_crtc_state *cstate; 3941 uint32_t i, ret = 0; 3942 3943 for_each_crtc_in_state(state, crtc, cstate, i) 3944 ret |= drm_crtc_mask(crtc); 3945 3946 return ret; 3947 } 3948 3949 static int 3950 skl_ddb_add_affected_planes(struct intel_crtc_state *cstate) 3951 { 3952 struct drm_atomic_state *state = cstate->base.state; 3953 struct drm_device *dev = state->dev; 3954 struct drm_crtc *crtc = cstate->base.crtc; 3955 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3956 struct drm_i915_private *dev_priv = to_i915(dev); 3957 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 3958 struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; 3959 struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; 3960 struct drm_plane_state *plane_state; 3961 struct drm_plane *plane; 3962 enum i915_pipe pipe = intel_crtc->pipe; 3963 int id; 3964 3965 WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc)); 3966 3967 drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) { 3968 id = skl_wm_plane_id(to_intel_plane(plane)); 3969 3970 if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][id], 3971 &new_ddb->plane[pipe][id]) && 3972 skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][id], 3973 &new_ddb->y_plane[pipe][id])) 3974 continue; 3975 3976 plane_state = drm_atomic_get_plane_state(state, plane); 3977 if (IS_ERR(plane_state)) 3978 return PTR_ERR(plane_state); 3979 } 3980 3981 return 0; 3982 } 3983 3984 static int 3985 skl_compute_ddb(struct drm_atomic_state *state) 3986 { 3987 struct drm_device *dev = state->dev; 3988 struct drm_i915_private *dev_priv = to_i915(dev); 3989 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 3990 struct intel_crtc *intel_crtc; 3991 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb; 3992 uint32_t realloc_pipes = pipes_modified(state); 3993 int ret; 3994 3995 /* 3996 * If this is our first atomic update following hardware readout, 3997 * we can't trust the DDB that the BIOS programmed for us. Let's 3998 * pretend that all pipes switched active status so that we'll 3999 * ensure a full DDB recompute. 4000 */ 4001 if (dev_priv->wm.distrust_bios_wm) { 4002 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, 4003 state->acquire_ctx); 4004 if (ret) 4005 return ret; 4006 4007 intel_state->active_pipe_changes = ~0; 4008 4009 /* 4010 * We usually only initialize intel_state->active_crtcs if we 4011 * we're doing a modeset; make sure this field is always 4012 * initialized during the sanitization process that happens 4013 * on the first commit too. 4014 */ 4015 if (!intel_state->modeset) 4016 intel_state->active_crtcs = dev_priv->active_crtcs; 4017 } 4018 4019 /* 4020 * If the modeset changes which CRTC's are active, we need to 4021 * recompute the DDB allocation for *all* active pipes, even 4022 * those that weren't otherwise being modified in any way by this 4023 * atomic commit. Due to the shrinking of the per-pipe allocations 4024 * when new active CRTC's are added, it's possible for a pipe that 4025 * we were already using and aren't changing at all here to suddenly 4026 * become invalid if its DDB needs exceeds its new allocation. 4027 * 4028 * Note that if we wind up doing a full DDB recompute, we can't let 4029 * any other display updates race with this transaction, so we need 4030 * to grab the lock on *all* CRTC's. 4031 */ 4032 if (intel_state->active_pipe_changes) { 4033 realloc_pipes = ~0; 4034 intel_state->wm_results.dirty_pipes = ~0; 4035 } 4036 4037 /* 4038 * We're not recomputing for the pipes not included in the commit, so 4039 * make sure we start with the current state. 4040 */ 4041 memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb)); 4042 4043 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) { 4044 struct intel_crtc_state *cstate; 4045 4046 cstate = intel_atomic_get_crtc_state(state, intel_crtc); 4047 if (IS_ERR(cstate)) 4048 return PTR_ERR(cstate); 4049 4050 ret = skl_allocate_pipe_ddb(cstate, ddb); 4051 if (ret) 4052 return ret; 4053 4054 ret = skl_ddb_add_affected_planes(cstate); 4055 if (ret) 4056 return ret; 4057 } 4058 4059 return 0; 4060 } 4061 4062 static void 4063 skl_copy_wm_for_pipe(struct skl_wm_values *dst, 4064 struct skl_wm_values *src, 4065 enum i915_pipe pipe) 4066 { 4067 memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe], 4068 sizeof(dst->ddb.y_plane[pipe])); 4069 memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe], 4070 sizeof(dst->ddb.plane[pipe])); 4071 } 4072 4073 static void 4074 skl_print_wm_changes(const struct drm_atomic_state *state) 4075 { 4076 const struct drm_device *dev = state->dev; 4077 const struct drm_i915_private *dev_priv = to_i915(dev); 4078 const struct intel_atomic_state *intel_state = 4079 to_intel_atomic_state(state); 4080 const struct drm_crtc *crtc; 4081 const struct drm_crtc_state *cstate; 4082 const struct intel_plane *intel_plane; 4083 const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb; 4084 const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; 4085 int id; 4086 int i; 4087 4088 for_each_crtc_in_state(state, crtc, cstate, i) { 4089 const struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4090 enum i915_pipe pipe = intel_crtc->pipe; 4091 4092 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 4093 const struct skl_ddb_entry *old, *new; 4094 4095 id = skl_wm_plane_id(intel_plane); 4096 old = &old_ddb->plane[pipe][id]; 4097 new = &new_ddb->plane[pipe][id]; 4098 4099 if (skl_ddb_entry_equal(old, new)) 4100 continue; 4101 4102 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n", 4103 intel_plane->base.base.id, 4104 intel_plane->base.name, 4105 old->start, old->end, 4106 new->start, new->end); 4107 } 4108 } 4109 } 4110 4111 static int 4112 skl_compute_wm(struct drm_atomic_state *state) 4113 { 4114 struct drm_crtc *crtc; 4115 struct drm_crtc_state *cstate; 4116 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 4117 struct skl_wm_values *results = &intel_state->wm_results; 4118 struct skl_pipe_wm *pipe_wm; 4119 bool changed = false; 4120 int ret, i; 4121 4122 /* 4123 * If this transaction isn't actually touching any CRTC's, don't 4124 * bother with watermark calculation. Note that if we pass this 4125 * test, we're guaranteed to hold at least one CRTC state mutex, 4126 * which means we can safely use values like dev_priv->active_crtcs 4127 * since any racing commits that want to update them would need to 4128 * hold _all_ CRTC state mutexes. 4129 */ 4130 for_each_crtc_in_state(state, crtc, cstate, i) 4131 changed = true; 4132 if (!changed) 4133 return 0; 4134 4135 /* Clear all dirty flags */ 4136 results->dirty_pipes = 0; 4137 4138 ret = skl_compute_ddb(state); 4139 if (ret) 4140 return ret; 4141 4142 /* 4143 * Calculate WM's for all pipes that are part of this transaction. 4144 * Note that the DDB allocation above may have added more CRTC's that 4145 * weren't otherwise being modified (and set bits in dirty_pipes) if 4146 * pipe allocations had to change. 4147 * 4148 * FIXME: Now that we're doing this in the atomic check phase, we 4149 * should allow skl_update_pipe_wm() to return failure in cases where 4150 * no suitable watermark values can be found. 4151 */ 4152 for_each_crtc_in_state(state, crtc, cstate, i) { 4153 struct intel_crtc_state *intel_cstate = 4154 to_intel_crtc_state(cstate); 4155 const struct skl_pipe_wm *old_pipe_wm = 4156 &to_intel_crtc_state(crtc->state)->wm.skl.optimal; 4157 4158 pipe_wm = &intel_cstate->wm.skl.optimal; 4159 ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, 4160 &results->ddb, &changed); 4161 if (ret) 4162 return ret; 4163 4164 if (changed) 4165 results->dirty_pipes |= drm_crtc_mask(crtc); 4166 4167 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0) 4168 /* This pipe's WM's did not change */ 4169 continue; 4170 4171 intel_cstate->update_wm_pre = true; 4172 } 4173 4174 skl_print_wm_changes(state); 4175 4176 return 0; 4177 } 4178 4179 static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state, 4180 struct intel_crtc_state *cstate) 4181 { 4182 struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc); 4183 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 4184 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; 4185 const struct skl_ddb_allocation *ddb = &state->wm_results.ddb; 4186 enum i915_pipe pipe = crtc->pipe; 4187 int plane; 4188 4189 if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base))) 4190 return; 4191 4192 I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime); 4193 4194 for_each_universal_plane(dev_priv, pipe, plane) 4195 skl_write_plane_wm(crtc, &pipe_wm->planes[plane], ddb, plane); 4196 4197 skl_write_cursor_wm(crtc, &pipe_wm->planes[PLANE_CURSOR], ddb); 4198 } 4199 4200 static void skl_initial_wm(struct intel_atomic_state *state, 4201 struct intel_crtc_state *cstate) 4202 { 4203 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 4204 struct drm_device *dev = intel_crtc->base.dev; 4205 struct drm_i915_private *dev_priv = to_i915(dev); 4206 struct skl_wm_values *results = &state->wm_results; 4207 struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw; 4208 enum i915_pipe pipe = intel_crtc->pipe; 4209 4210 if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0) 4211 return; 4212 4213 mutex_lock(&dev_priv->wm.wm_mutex); 4214 4215 if (cstate->base.active_changed) 4216 skl_atomic_update_crtc_wm(state, cstate); 4217 4218 skl_copy_wm_for_pipe(hw_vals, results, pipe); 4219 4220 mutex_unlock(&dev_priv->wm.wm_mutex); 4221 } 4222 4223 static void ilk_compute_wm_config(struct drm_device *dev, 4224 struct intel_wm_config *config) 4225 { 4226 struct intel_crtc *crtc; 4227 4228 /* Compute the currently _active_ config */ 4229 for_each_intel_crtc(dev, crtc) { 4230 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk; 4231 4232 if (!wm->pipe_enabled) 4233 continue; 4234 4235 config->sprites_enabled |= wm->sprites_enabled; 4236 config->sprites_scaled |= wm->sprites_scaled; 4237 config->num_pipes_active++; 4238 } 4239 } 4240 4241 static void ilk_program_watermarks(struct drm_i915_private *dev_priv) 4242 { 4243 struct drm_device *dev = &dev_priv->drm; 4244 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; 4245 struct ilk_wm_maximums max; 4246 struct intel_wm_config config = {}; 4247 struct ilk_wm_values results = {}; 4248 enum intel_ddb_partitioning partitioning; 4249 4250 ilk_compute_wm_config(dev, &config); 4251 4252 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); 4253 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2); 4254 4255 /* 5/6 split only in single pipe config on IVB+ */ 4256 if (INTEL_GEN(dev_priv) >= 7 && 4257 config.num_pipes_active == 1 && config.sprites_enabled) { 4258 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); 4259 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6); 4260 4261 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); 4262 } else { 4263 best_lp_wm = &lp_wm_1_2; 4264 } 4265 4266 partitioning = (best_lp_wm == &lp_wm_1_2) ? 4267 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; 4268 4269 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results); 4270 4271 ilk_write_wm_values(dev_priv, &results); 4272 } 4273 4274 static void ilk_initial_watermarks(struct intel_atomic_state *state, 4275 struct intel_crtc_state *cstate) 4276 { 4277 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); 4278 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 4279 4280 mutex_lock(&dev_priv->wm.wm_mutex); 4281 intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate; 4282 ilk_program_watermarks(dev_priv); 4283 mutex_unlock(&dev_priv->wm.wm_mutex); 4284 } 4285 4286 static void ilk_optimize_watermarks(struct intel_atomic_state *state, 4287 struct intel_crtc_state *cstate) 4288 { 4289 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); 4290 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 4291 4292 mutex_lock(&dev_priv->wm.wm_mutex); 4293 if (cstate->wm.need_postvbl_update) { 4294 intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal; 4295 ilk_program_watermarks(dev_priv); 4296 } 4297 mutex_unlock(&dev_priv->wm.wm_mutex); 4298 } 4299 4300 static inline void skl_wm_level_from_reg_val(uint32_t val, 4301 struct skl_wm_level *level) 4302 { 4303 level->plane_en = val & PLANE_WM_EN; 4304 level->plane_res_b = val & PLANE_WM_BLOCKS_MASK; 4305 level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) & 4306 PLANE_WM_LINES_MASK; 4307 } 4308 4309 void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc, 4310 struct skl_pipe_wm *out) 4311 { 4312 struct drm_device *dev = crtc->dev; 4313 struct drm_i915_private *dev_priv = to_i915(dev); 4314 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4315 struct intel_plane *intel_plane; 4316 struct skl_plane_wm *wm; 4317 enum i915_pipe pipe = intel_crtc->pipe; 4318 int level, id, max_level; 4319 uint32_t val; 4320 4321 max_level = ilk_wm_max_level(dev_priv); 4322 4323 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 4324 id = skl_wm_plane_id(intel_plane); 4325 wm = &out->planes[id]; 4326 4327 for (level = 0; level <= max_level; level++) { 4328 if (id != PLANE_CURSOR) 4329 val = I915_READ(PLANE_WM(pipe, id, level)); 4330 else 4331 val = I915_READ(CUR_WM(pipe, level)); 4332 4333 skl_wm_level_from_reg_val(val, &wm->wm[level]); 4334 } 4335 4336 if (id != PLANE_CURSOR) 4337 val = I915_READ(PLANE_WM_TRANS(pipe, id)); 4338 else 4339 val = I915_READ(CUR_WM_TRANS(pipe)); 4340 4341 skl_wm_level_from_reg_val(val, &wm->trans_wm); 4342 } 4343 4344 if (!intel_crtc->active) 4345 return; 4346 4347 out->linetime = I915_READ(PIPE_WM_LINETIME(pipe)); 4348 } 4349 4350 void skl_wm_get_hw_state(struct drm_device *dev) 4351 { 4352 struct drm_i915_private *dev_priv = to_i915(dev); 4353 struct skl_wm_values *hw = &dev_priv->wm.skl_hw; 4354 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb; 4355 struct drm_crtc *crtc; 4356 struct intel_crtc *intel_crtc; 4357 struct intel_crtc_state *cstate; 4358 4359 skl_ddb_get_hw_state(dev_priv, ddb); 4360 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4361 intel_crtc = to_intel_crtc(crtc); 4362 cstate = to_intel_crtc_state(crtc->state); 4363 4364 skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal); 4365 4366 if (intel_crtc->active) 4367 hw->dirty_pipes |= drm_crtc_mask(crtc); 4368 } 4369 4370 if (dev_priv->active_crtcs) { 4371 /* Fully recompute DDB on first atomic commit */ 4372 dev_priv->wm.distrust_bios_wm = true; 4373 } else { 4374 /* Easy/common case; just sanitize DDB now if everything off */ 4375 memset(ddb, 0, sizeof(*ddb)); 4376 } 4377 } 4378 4379 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) 4380 { 4381 struct drm_device *dev = crtc->dev; 4382 struct drm_i915_private *dev_priv = to_i915(dev); 4383 struct ilk_wm_values *hw = &dev_priv->wm.hw; 4384 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4385 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4386 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal; 4387 enum i915_pipe pipe = intel_crtc->pipe; 4388 static const i915_reg_t wm0_pipe_reg[] = { 4389 [PIPE_A] = WM0_PIPEA_ILK, 4390 [PIPE_B] = WM0_PIPEB_ILK, 4391 [PIPE_C] = WM0_PIPEC_IVB, 4392 }; 4393 4394 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]); 4395 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 4396 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); 4397 4398 memset(active, 0, sizeof(*active)); 4399 4400 active->pipe_enabled = intel_crtc->active; 4401 4402 if (active->pipe_enabled) { 4403 u32 tmp = hw->wm_pipe[pipe]; 4404 4405 /* 4406 * For active pipes LP0 watermark is marked as 4407 * enabled, and LP1+ watermaks as disabled since 4408 * we can't really reverse compute them in case 4409 * multiple pipes are active. 4410 */ 4411 active->wm[0].enable = true; 4412 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT; 4413 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT; 4414 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK; 4415 active->linetime = hw->wm_linetime[pipe]; 4416 } else { 4417 int level, max_level = ilk_wm_max_level(dev_priv); 4418 4419 /* 4420 * For inactive pipes, all watermark levels 4421 * should be marked as enabled but zeroed, 4422 * which is what we'd compute them to. 4423 */ 4424 for (level = 0; level <= max_level; level++) 4425 active->wm[level].enable = true; 4426 } 4427 4428 intel_crtc->wm.active.ilk = *active; 4429 } 4430 4431 #define _FW_WM(value, plane) \ 4432 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT) 4433 #define _FW_WM_VLV(value, plane) \ 4434 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT) 4435 4436 static void vlv_read_wm_values(struct drm_i915_private *dev_priv, 4437 struct vlv_wm_values *wm) 4438 { 4439 enum i915_pipe pipe; 4440 uint32_t tmp; 4441 4442 for_each_pipe(dev_priv, pipe) { 4443 tmp = I915_READ(VLV_DDL(pipe)); 4444 4445 wm->ddl[pipe].primary = 4446 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 4447 wm->ddl[pipe].cursor = 4448 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 4449 wm->ddl[pipe].sprite[0] = 4450 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 4451 wm->ddl[pipe].sprite[1] = 4452 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 4453 } 4454 4455 tmp = I915_READ(DSPFW1); 4456 wm->sr.plane = _FW_WM(tmp, SR); 4457 wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB); 4458 wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB); 4459 wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA); 4460 4461 tmp = I915_READ(DSPFW2); 4462 wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB); 4463 wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA); 4464 wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA); 4465 4466 tmp = I915_READ(DSPFW3); 4467 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); 4468 4469 if (IS_CHERRYVIEW(dev_priv)) { 4470 tmp = I915_READ(DSPFW7_CHV); 4471 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED); 4472 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC); 4473 4474 tmp = I915_READ(DSPFW8_CHV); 4475 wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF); 4476 wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE); 4477 4478 tmp = I915_READ(DSPFW9_CHV); 4479 wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC); 4480 wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC); 4481 4482 tmp = I915_READ(DSPHOWM); 4483 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; 4484 wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8; 4485 wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8; 4486 wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8; 4487 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8; 4488 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8; 4489 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8; 4490 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8; 4491 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8; 4492 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8; 4493 } else { 4494 tmp = I915_READ(DSPFW7); 4495 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED); 4496 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC); 4497 4498 tmp = I915_READ(DSPHOWM); 4499 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; 4500 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8; 4501 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8; 4502 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8; 4503 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8; 4504 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8; 4505 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8; 4506 } 4507 } 4508 4509 #undef _FW_WM 4510 #undef _FW_WM_VLV 4511 4512 void vlv_wm_get_hw_state(struct drm_device *dev) 4513 { 4514 struct drm_i915_private *dev_priv = to_i915(dev); 4515 struct vlv_wm_values *wm = &dev_priv->wm.vlv; 4516 struct intel_plane *plane; 4517 enum i915_pipe pipe; 4518 u32 val; 4519 4520 vlv_read_wm_values(dev_priv, wm); 4521 4522 for_each_intel_plane(dev, plane) { 4523 switch (plane->base.type) { 4524 int sprite; 4525 case DRM_PLANE_TYPE_CURSOR: 4526 plane->wm.fifo_size = 63; 4527 break; 4528 case DRM_PLANE_TYPE_PRIMARY: 4529 plane->wm.fifo_size = vlv_get_fifo_size(dev_priv, plane->pipe, 0); 4530 break; 4531 case DRM_PLANE_TYPE_OVERLAY: 4532 sprite = plane->plane; 4533 plane->wm.fifo_size = vlv_get_fifo_size(dev_priv, plane->pipe, sprite + 1); 4534 break; 4535 } 4536 } 4537 4538 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 4539 wm->level = VLV_WM_LEVEL_PM2; 4540 4541 if (IS_CHERRYVIEW(dev_priv)) { 4542 mutex_lock(&dev_priv->rps.hw_lock); 4543 4544 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 4545 if (val & DSP_MAXFIFO_PM5_ENABLE) 4546 wm->level = VLV_WM_LEVEL_PM5; 4547 4548 /* 4549 * If DDR DVFS is disabled in the BIOS, Punit 4550 * will never ack the request. So if that happens 4551 * assume we don't have to enable/disable DDR DVFS 4552 * dynamically. To test that just set the REQ_ACK 4553 * bit to poke the Punit, but don't change the 4554 * HIGH/LOW bits so that we don't actually change 4555 * the current state. 4556 */ 4557 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 4558 val |= FORCE_DDR_FREQ_REQ_ACK; 4559 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); 4560 4561 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & 4562 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { 4563 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, " 4564 "assuming DDR DVFS is disabled\n"); 4565 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5; 4566 } else { 4567 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 4568 if ((val & FORCE_DDR_HIGH_FREQ) == 0) 4569 wm->level = VLV_WM_LEVEL_DDR_DVFS; 4570 } 4571 4572 mutex_unlock(&dev_priv->rps.hw_lock); 4573 } 4574 4575 for_each_pipe(dev_priv, pipe) 4576 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", 4577 pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor, 4578 wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]); 4579 4580 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", 4581 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); 4582 } 4583 4584 void ilk_wm_get_hw_state(struct drm_device *dev) 4585 { 4586 struct drm_i915_private *dev_priv = to_i915(dev); 4587 struct ilk_wm_values *hw = &dev_priv->wm.hw; 4588 struct drm_crtc *crtc; 4589 4590 for_each_crtc(dev, crtc) 4591 ilk_pipe_wm_get_hw_state(crtc); 4592 4593 hw->wm_lp[0] = I915_READ(WM1_LP_ILK); 4594 hw->wm_lp[1] = I915_READ(WM2_LP_ILK); 4595 hw->wm_lp[2] = I915_READ(WM3_LP_ILK); 4596 4597 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK); 4598 if (INTEL_GEN(dev_priv) >= 7) { 4599 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); 4600 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); 4601 } 4602 4603 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 4604 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? 4605 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 4606 else if (IS_IVYBRIDGE(dev_priv)) 4607 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ? 4608 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 4609 4610 hw->enable_fbc_wm = 4611 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); 4612 } 4613 4614 /** 4615 * intel_update_watermarks - update FIFO watermark values based on current modes 4616 * 4617 * Calculate watermark values for the various WM regs based on current mode 4618 * and plane configuration. 4619 * 4620 * There are several cases to deal with here: 4621 * - normal (i.e. non-self-refresh) 4622 * - self-refresh (SR) mode 4623 * - lines are large relative to FIFO size (buffer can hold up to 2) 4624 * - lines are small relative to FIFO size (buffer can hold more than 2 4625 * lines), so need to account for TLB latency 4626 * 4627 * The normal calculation is: 4628 * watermark = dotclock * bytes per pixel * latency 4629 * where latency is platform & configuration dependent (we assume pessimal 4630 * values here). 4631 * 4632 * The SR calculation is: 4633 * watermark = (trunc(latency/line time)+1) * surface width * 4634 * bytes per pixel 4635 * where 4636 * line time = htotal / dotclock 4637 * surface width = hdisplay for normal plane and 64 for cursor 4638 * and latency is assumed to be high, as above. 4639 * 4640 * The final value programmed to the register should always be rounded up, 4641 * and include an extra 2 entries to account for clock crossings. 4642 * 4643 * We don't use the sprite, so we can ignore that. And on Crestline we have 4644 * to set the non-SR watermarks to 8. 4645 */ 4646 void intel_update_watermarks(struct intel_crtc *crtc) 4647 { 4648 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4649 4650 if (dev_priv->display.update_wm) 4651 dev_priv->display.update_wm(crtc); 4652 } 4653 4654 /* 4655 * Lock protecting IPS related data structures 4656 */ 4657 DEFINE_SPINLOCK(mchdev_lock); 4658 4659 /* Global for IPS driver to get at the current i915 device. Protected by 4660 * mchdev_lock. */ 4661 static struct drm_i915_private *i915_mch_dev; 4662 4663 bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val) 4664 { 4665 u16 rgvswctl; 4666 4667 assert_spin_locked(&mchdev_lock); 4668 4669 rgvswctl = I915_READ16(MEMSWCTL); 4670 if (rgvswctl & MEMCTL_CMD_STS) { 4671 DRM_DEBUG("gpu busy, RCS change rejected\n"); 4672 return false; /* still busy with another command */ 4673 } 4674 4675 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 4676 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; 4677 I915_WRITE16(MEMSWCTL, rgvswctl); 4678 POSTING_READ16(MEMSWCTL); 4679 4680 rgvswctl |= MEMCTL_CMD_STS; 4681 I915_WRITE16(MEMSWCTL, rgvswctl); 4682 4683 return true; 4684 } 4685 4686 static void ironlake_enable_drps(struct drm_i915_private *dev_priv) 4687 { 4688 u32 rgvmodectl; 4689 u8 fmax, fmin, fstart, vstart; 4690 4691 spin_lock_irq(&mchdev_lock); 4692 4693 rgvmodectl = I915_READ(MEMMODECTL); 4694 4695 /* Enable temp reporting */ 4696 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); 4697 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); 4698 4699 /* 100ms RC evaluation intervals */ 4700 I915_WRITE(RCUPEI, 100000); 4701 I915_WRITE(RCDNEI, 100000); 4702 4703 /* Set max/min thresholds to 90ms and 80ms respectively */ 4704 I915_WRITE(RCBMAXAVG, 90000); 4705 I915_WRITE(RCBMINAVG, 80000); 4706 4707 I915_WRITE(MEMIHYST, 1); 4708 4709 /* Set up min, max, and cur for interrupt handling */ 4710 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; 4711 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 4712 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 4713 MEMMODE_FSTART_SHIFT; 4714 4715 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >> 4716 PXVFREQ_PX_SHIFT; 4717 4718 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */ 4719 dev_priv->ips.fstart = fstart; 4720 4721 dev_priv->ips.max_delay = fstart; 4722 dev_priv->ips.min_delay = fmin; 4723 dev_priv->ips.cur_delay = fstart; 4724 4725 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", 4726 fmax, fmin, fstart); 4727 4728 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 4729 4730 /* 4731 * Interrupts will be enabled in ironlake_irq_postinstall 4732 */ 4733 4734 I915_WRITE(VIDSTART, vstart); 4735 POSTING_READ(VIDSTART); 4736 4737 rgvmodectl |= MEMMODE_SWMODE_EN; 4738 I915_WRITE(MEMMODECTL, rgvmodectl); 4739 4740 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) 4741 DRM_ERROR("stuck trying to change perf mode\n"); 4742 mdelay(1); 4743 4744 ironlake_set_drps(dev_priv, fstart); 4745 4746 dev_priv->ips.last_count1 = I915_READ(DMIEC) + 4747 I915_READ(DDREC) + I915_READ(CSIEC); 4748 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); 4749 dev_priv->ips.last_count2 = I915_READ(GFXEC); 4750 dev_priv->ips.last_time2 = ktime_get_raw_ns(); 4751 4752 spin_unlock_irq(&mchdev_lock); 4753 } 4754 4755 static void ironlake_disable_drps(struct drm_i915_private *dev_priv) 4756 { 4757 u16 rgvswctl; 4758 4759 spin_lock_irq(&mchdev_lock); 4760 4761 rgvswctl = I915_READ16(MEMSWCTL); 4762 4763 /* Ack interrupts, disable EFC interrupt */ 4764 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); 4765 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); 4766 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); 4767 I915_WRITE(DEIIR, DE_PCU_EVENT); 4768 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); 4769 4770 /* Go back to the starting frequency */ 4771 ironlake_set_drps(dev_priv, dev_priv->ips.fstart); 4772 mdelay(1); 4773 rgvswctl |= MEMCTL_CMD_STS; 4774 I915_WRITE(MEMSWCTL, rgvswctl); 4775 mdelay(1); 4776 4777 spin_unlock_irq(&mchdev_lock); 4778 } 4779 4780 /* There's a funny hw issue where the hw returns all 0 when reading from 4781 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value 4782 * ourselves, instead of doing a rmw cycle (which might result in us clearing 4783 * all limits and the gpu stuck at whatever frequency it is at atm). 4784 */ 4785 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val) 4786 { 4787 u32 limits; 4788 4789 /* Only set the down limit when we've reached the lowest level to avoid 4790 * getting more interrupts, otherwise leave this clear. This prevents a 4791 * race in the hw when coming out of rc6: There's a tiny window where 4792 * the hw runs at the minimal clock before selecting the desired 4793 * frequency, if the down threshold expires in that window we will not 4794 * receive a down interrupt. */ 4795 if (IS_GEN9(dev_priv)) { 4796 limits = (dev_priv->rps.max_freq_softlimit) << 23; 4797 if (val <= dev_priv->rps.min_freq_softlimit) 4798 limits |= (dev_priv->rps.min_freq_softlimit) << 14; 4799 } else { 4800 limits = dev_priv->rps.max_freq_softlimit << 24; 4801 if (val <= dev_priv->rps.min_freq_softlimit) 4802 limits |= dev_priv->rps.min_freq_softlimit << 16; 4803 } 4804 4805 return limits; 4806 } 4807 4808 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) 4809 { 4810 int new_power; 4811 u32 threshold_up = 0, threshold_down = 0; /* in % */ 4812 u32 ei_up = 0, ei_down = 0; 4813 4814 new_power = dev_priv->rps.power; 4815 switch (dev_priv->rps.power) { 4816 case LOW_POWER: 4817 if (val > dev_priv->rps.efficient_freq + 1 && 4818 val > dev_priv->rps.cur_freq) 4819 new_power = BETWEEN; 4820 break; 4821 4822 case BETWEEN: 4823 if (val <= dev_priv->rps.efficient_freq && 4824 val < dev_priv->rps.cur_freq) 4825 new_power = LOW_POWER; 4826 else if (val >= dev_priv->rps.rp0_freq && 4827 val > dev_priv->rps.cur_freq) 4828 new_power = HIGH_POWER; 4829 break; 4830 4831 case HIGH_POWER: 4832 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && 4833 val < dev_priv->rps.cur_freq) 4834 new_power = BETWEEN; 4835 break; 4836 } 4837 /* Max/min bins are special */ 4838 if (val <= dev_priv->rps.min_freq_softlimit) 4839 new_power = LOW_POWER; 4840 if (val >= dev_priv->rps.max_freq_softlimit) 4841 new_power = HIGH_POWER; 4842 if (new_power == dev_priv->rps.power) 4843 return; 4844 4845 /* Note the units here are not exactly 1us, but 1280ns. */ 4846 switch (new_power) { 4847 case LOW_POWER: 4848 /* Upclock if more than 95% busy over 16ms */ 4849 ei_up = 16000; 4850 threshold_up = 95; 4851 4852 /* Downclock if less than 85% busy over 32ms */ 4853 ei_down = 32000; 4854 threshold_down = 85; 4855 break; 4856 4857 case BETWEEN: 4858 /* Upclock if more than 90% busy over 13ms */ 4859 ei_up = 13000; 4860 threshold_up = 90; 4861 4862 /* Downclock if less than 75% busy over 32ms */ 4863 ei_down = 32000; 4864 threshold_down = 75; 4865 break; 4866 4867 case HIGH_POWER: 4868 /* Upclock if more than 85% busy over 10ms */ 4869 ei_up = 10000; 4870 threshold_up = 85; 4871 4872 /* Downclock if less than 60% busy over 32ms */ 4873 ei_down = 32000; 4874 threshold_down = 60; 4875 break; 4876 } 4877 4878 /* When byt can survive without system hang with dynamic 4879 * sw freq adjustments, this restriction can be lifted. 4880 */ 4881 if (IS_VALLEYVIEW(dev_priv)) 4882 goto skip_hw_write; 4883 4884 I915_WRITE(GEN6_RP_UP_EI, 4885 GT_INTERVAL_FROM_US(dev_priv, ei_up)); 4886 I915_WRITE(GEN6_RP_UP_THRESHOLD, 4887 GT_INTERVAL_FROM_US(dev_priv, 4888 ei_up * threshold_up / 100)); 4889 4890 I915_WRITE(GEN6_RP_DOWN_EI, 4891 GT_INTERVAL_FROM_US(dev_priv, ei_down)); 4892 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 4893 GT_INTERVAL_FROM_US(dev_priv, 4894 ei_down * threshold_down / 100)); 4895 4896 I915_WRITE(GEN6_RP_CONTROL, 4897 GEN6_RP_MEDIA_TURBO | 4898 GEN6_RP_MEDIA_HW_NORMAL_MODE | 4899 GEN6_RP_MEDIA_IS_GFX | 4900 GEN6_RP_ENABLE | 4901 GEN6_RP_UP_BUSY_AVG | 4902 GEN6_RP_DOWN_IDLE_AVG); 4903 4904 skip_hw_write: 4905 dev_priv->rps.power = new_power; 4906 dev_priv->rps.up_threshold = threshold_up; 4907 dev_priv->rps.down_threshold = threshold_down; 4908 dev_priv->rps.last_adj = 0; 4909 } 4910 4911 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) 4912 { 4913 u32 mask = 0; 4914 4915 /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */ 4916 if (val > dev_priv->rps.min_freq_softlimit) 4917 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; 4918 if (val < dev_priv->rps.max_freq_softlimit) 4919 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; 4920 4921 mask &= dev_priv->pm_rps_events; 4922 4923 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask); 4924 } 4925 4926 /* gen6_set_rps is called to update the frequency request, but should also be 4927 * called when the range (min_delay and max_delay) is modified so that we can 4928 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ 4929 static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val) 4930 { 4931 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ 4932 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) 4933 return; 4934 4935 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4936 WARN_ON(val > dev_priv->rps.max_freq); 4937 WARN_ON(val < dev_priv->rps.min_freq); 4938 4939 /* min/max delay may still have been modified so be sure to 4940 * write the limits value. 4941 */ 4942 if (val != dev_priv->rps.cur_freq) { 4943 gen6_set_rps_thresholds(dev_priv, val); 4944 4945 if (IS_GEN9(dev_priv)) 4946 I915_WRITE(GEN6_RPNSWREQ, 4947 GEN9_FREQUENCY(val)); 4948 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 4949 I915_WRITE(GEN6_RPNSWREQ, 4950 HSW_FREQUENCY(val)); 4951 else 4952 I915_WRITE(GEN6_RPNSWREQ, 4953 GEN6_FREQUENCY(val) | 4954 GEN6_OFFSET(0) | 4955 GEN6_AGGRESSIVE_TURBO); 4956 } 4957 4958 /* Make sure we continue to get interrupts 4959 * until we hit the minimum or maximum frequencies. 4960 */ 4961 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val)); 4962 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 4963 4964 POSTING_READ(GEN6_RPNSWREQ); 4965 4966 dev_priv->rps.cur_freq = val; 4967 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); 4968 } 4969 4970 static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val) 4971 { 4972 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4973 WARN_ON(val > dev_priv->rps.max_freq); 4974 WARN_ON(val < dev_priv->rps.min_freq); 4975 4976 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1), 4977 "Odd GPU freq value\n")) 4978 val &= ~1; 4979 4980 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 4981 4982 if (val != dev_priv->rps.cur_freq) { 4983 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); 4984 if (!IS_CHERRYVIEW(dev_priv)) 4985 gen6_set_rps_thresholds(dev_priv, val); 4986 } 4987 4988 dev_priv->rps.cur_freq = val; 4989 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); 4990 } 4991 4992 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down 4993 * 4994 * * If Gfx is Idle, then 4995 * 1. Forcewake Media well. 4996 * 2. Request idle freq. 4997 * 3. Release Forcewake of Media well. 4998 */ 4999 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) 5000 { 5001 u32 val = dev_priv->rps.idle_freq; 5002 5003 if (dev_priv->rps.cur_freq <= val) 5004 return; 5005 5006 /* Wake up the media well, as that takes a lot less 5007 * power than the Render well. */ 5008 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA); 5009 valleyview_set_rps(dev_priv, val); 5010 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA); 5011 } 5012 5013 void gen6_rps_busy(struct drm_i915_private *dev_priv) 5014 { 5015 mutex_lock(&dev_priv->rps.hw_lock); 5016 if (dev_priv->rps.enabled) { 5017 if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED) 5018 gen6_rps_reset_ei(dev_priv); 5019 I915_WRITE(GEN6_PMINTRMSK, 5020 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); 5021 5022 gen6_enable_rps_interrupts(dev_priv); 5023 5024 /* Ensure we start at the user's desired frequency */ 5025 intel_set_rps(dev_priv, 5026 clamp(dev_priv->rps.cur_freq, 5027 dev_priv->rps.min_freq_softlimit, 5028 dev_priv->rps.max_freq_softlimit)); 5029 } 5030 mutex_unlock(&dev_priv->rps.hw_lock); 5031 } 5032 5033 void gen6_rps_idle(struct drm_i915_private *dev_priv) 5034 { 5035 /* Flush our bottom-half so that it does not race with us 5036 * setting the idle frequency and so that it is bounded by 5037 * our rpm wakeref. And then disable the interrupts to stop any 5038 * futher RPS reclocking whilst we are asleep. 5039 */ 5040 gen6_disable_rps_interrupts(dev_priv); 5041 5042 mutex_lock(&dev_priv->rps.hw_lock); 5043 if (dev_priv->rps.enabled) { 5044 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5045 vlv_set_rps_idle(dev_priv); 5046 else 5047 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); 5048 dev_priv->rps.last_adj = 0; 5049 I915_WRITE(GEN6_PMINTRMSK, 5050 gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 5051 } 5052 mutex_unlock(&dev_priv->rps.hw_lock); 5053 5054 lockmgr(&dev_priv->rps.client_lock, LK_EXCLUSIVE); 5055 while (!list_empty(&dev_priv->rps.clients)) 5056 list_del_init(dev_priv->rps.clients.next); 5057 lockmgr(&dev_priv->rps.client_lock, LK_RELEASE); 5058 } 5059 5060 void gen6_rps_boost(struct drm_i915_private *dev_priv, 5061 struct intel_rps_client *rps, 5062 unsigned long submitted) 5063 { 5064 /* This is intentionally racy! We peek at the state here, then 5065 * validate inside the RPS worker. 5066 */ 5067 if (!(dev_priv->gt.awake && 5068 dev_priv->rps.enabled && 5069 dev_priv->rps.cur_freq < dev_priv->rps.boost_freq)) 5070 return; 5071 5072 /* Force a RPS boost (and don't count it against the client) if 5073 * the GPU is severely congested. 5074 */ 5075 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES)) 5076 rps = NULL; 5077 5078 lockmgr(&dev_priv->rps.client_lock, LK_EXCLUSIVE); 5079 if (rps == NULL || list_empty(&rps->link)) { 5080 spin_lock_irq(&dev_priv->irq_lock); 5081 if (dev_priv->rps.interrupts_enabled) { 5082 dev_priv->rps.client_boost = true; 5083 schedule_work(&dev_priv->rps.work); 5084 } 5085 spin_unlock_irq(&dev_priv->irq_lock); 5086 5087 if (rps != NULL) { 5088 list_add(&rps->link, &dev_priv->rps.clients); 5089 rps->boosts++; 5090 } else 5091 dev_priv->rps.boosts++; 5092 } 5093 lockmgr(&dev_priv->rps.client_lock, LK_RELEASE); 5094 } 5095 5096 void intel_set_rps(struct drm_i915_private *dev_priv, u8 val) 5097 { 5098 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5099 valleyview_set_rps(dev_priv, val); 5100 else 5101 gen6_set_rps(dev_priv, val); 5102 } 5103 5104 static void gen9_disable_rc6(struct drm_i915_private *dev_priv) 5105 { 5106 I915_WRITE(GEN6_RC_CONTROL, 0); 5107 I915_WRITE(GEN9_PG_ENABLE, 0); 5108 } 5109 5110 static void gen9_disable_rps(struct drm_i915_private *dev_priv) 5111 { 5112 I915_WRITE(GEN6_RP_CONTROL, 0); 5113 } 5114 5115 static void gen6_disable_rps(struct drm_i915_private *dev_priv) 5116 { 5117 I915_WRITE(GEN6_RC_CONTROL, 0); 5118 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 5119 I915_WRITE(GEN6_RP_CONTROL, 0); 5120 } 5121 5122 static void cherryview_disable_rps(struct drm_i915_private *dev_priv) 5123 { 5124 I915_WRITE(GEN6_RC_CONTROL, 0); 5125 } 5126 5127 static void valleyview_disable_rps(struct drm_i915_private *dev_priv) 5128 { 5129 /* we're doing forcewake before Disabling RC6, 5130 * This what the BIOS expects when going into suspend */ 5131 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5132 5133 I915_WRITE(GEN6_RC_CONTROL, 0); 5134 5135 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5136 } 5137 5138 static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode) 5139 { 5140 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 5141 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1))) 5142 mode = GEN6_RC_CTL_RC6_ENABLE; 5143 else 5144 mode = 0; 5145 } 5146 if (HAS_RC6p(dev_priv)) 5147 DRM_DEBUG_DRIVER("Enabling RC6 states: " 5148 "RC6 %s RC6p %s RC6pp %s\n", 5149 onoff(mode & GEN6_RC_CTL_RC6_ENABLE), 5150 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE), 5151 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE)); 5152 5153 else 5154 DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n", 5155 onoff(mode & GEN6_RC_CTL_RC6_ENABLE)); 5156 } 5157 5158 static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv) 5159 { 5160 struct i915_ggtt *ggtt = &dev_priv->ggtt; 5161 bool enable_rc6 = true; 5162 unsigned long rc6_ctx_base; 5163 u32 rc_ctl; 5164 int rc_sw_target; 5165 5166 rc_ctl = I915_READ(GEN6_RC_CONTROL); 5167 rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >> 5168 RC_SW_TARGET_STATE_SHIFT; 5169 DRM_DEBUG_DRIVER("BIOS enabled RC states: " 5170 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n", 5171 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE), 5172 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE), 5173 rc_sw_target); 5174 5175 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) { 5176 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n"); 5177 enable_rc6 = false; 5178 } 5179 5180 /* 5181 * The exact context size is not known for BXT, so assume a page size 5182 * for this check. 5183 */ 5184 rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK; 5185 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) && 5186 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base + 5187 ggtt->stolen_reserved_size))) { 5188 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n"); 5189 enable_rc6 = false; 5190 } 5191 5192 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) && 5193 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) && 5194 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) && 5195 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) { 5196 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n"); 5197 enable_rc6 = false; 5198 } 5199 5200 if (!I915_READ(GEN8_PUSHBUS_CONTROL) || 5201 !I915_READ(GEN8_PUSHBUS_ENABLE) || 5202 !I915_READ(GEN8_PUSHBUS_SHIFT)) { 5203 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n"); 5204 enable_rc6 = false; 5205 } 5206 5207 if (!I915_READ(GEN6_GFXPAUSE)) { 5208 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n"); 5209 enable_rc6 = false; 5210 } 5211 5212 if (!I915_READ(GEN8_MISC_CTRL0)) { 5213 DRM_DEBUG_DRIVER("GPM control not setup properly.\n"); 5214 enable_rc6 = false; 5215 } 5216 5217 return enable_rc6; 5218 } 5219 5220 int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6) 5221 { 5222 /* No RC6 before Ironlake and code is gone for ilk. */ 5223 if (INTEL_INFO(dev_priv)->gen < 6) 5224 return 0; 5225 5226 if (!enable_rc6) 5227 return 0; 5228 5229 if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) { 5230 DRM_INFO("RC6 disabled by BIOS\n"); 5231 return 0; 5232 } 5233 5234 /* Respect the kernel parameter if it is set */ 5235 if (enable_rc6 >= 0) { 5236 int mask; 5237 5238 if (HAS_RC6p(dev_priv)) 5239 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE | 5240 INTEL_RC6pp_ENABLE; 5241 else 5242 mask = INTEL_RC6_ENABLE; 5243 5244 if ((enable_rc6 & mask) != enable_rc6) 5245 DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d " 5246 "(requested %d, valid %d)\n", 5247 enable_rc6 & mask, enable_rc6, mask); 5248 5249 return enable_rc6 & mask; 5250 } 5251 5252 if (IS_IVYBRIDGE(dev_priv)) 5253 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 5254 5255 return INTEL_RC6_ENABLE; 5256 } 5257 5258 static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv) 5259 { 5260 /* All of these values are in units of 50MHz */ 5261 5262 /* static values from HW: RP0 > RP1 > RPn (min_freq) */ 5263 if (IS_BROXTON(dev_priv)) { 5264 u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 5265 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff; 5266 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; 5267 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff; 5268 } else { 5269 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 5270 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; 5271 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; 5272 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff; 5273 } 5274 /* hw_max = RP0 until we check for overclocking */ 5275 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; 5276 5277 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; 5278 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) || 5279 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 5280 u32 ddcc_status = 0; 5281 5282 if (sandybridge_pcode_read(dev_priv, 5283 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, 5284 &ddcc_status) == 0) 5285 dev_priv->rps.efficient_freq = 5286 clamp_t(u8, 5287 ((ddcc_status >> 8) & 0xff), 5288 dev_priv->rps.min_freq, 5289 dev_priv->rps.max_freq); 5290 } 5291 5292 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 5293 /* Store the frequency values in 16.66 MHZ units, which is 5294 * the natural hardware unit for SKL 5295 */ 5296 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; 5297 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER; 5298 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER; 5299 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER; 5300 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER; 5301 } 5302 } 5303 5304 static void reset_rps(struct drm_i915_private *dev_priv, 5305 void (*set)(struct drm_i915_private *, u8)) 5306 { 5307 u8 freq = dev_priv->rps.cur_freq; 5308 5309 /* force a reset */ 5310 dev_priv->rps.power = -1; 5311 dev_priv->rps.cur_freq = -1; 5312 5313 set(dev_priv, freq); 5314 } 5315 5316 /* See the Gen9_GT_PM_Programming_Guide doc for the below */ 5317 static void gen9_enable_rps(struct drm_i915_private *dev_priv) 5318 { 5319 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5320 5321 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ 5322 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { 5323 /* 5324 * BIOS could leave the Hw Turbo enabled, so need to explicitly 5325 * clear out the Control register just to avoid inconsitency 5326 * with debugfs interface, which will show Turbo as enabled 5327 * only and that is not expected by the User after adding the 5328 * WaGsvDisableTurbo. Apart from this there is no problem even 5329 * if the Turbo is left enabled in the Control register, as the 5330 * Up/Down interrupts would remain masked. 5331 */ 5332 gen9_disable_rps(dev_priv); 5333 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5334 return; 5335 } 5336 5337 /* Program defaults and thresholds for RPS*/ 5338 I915_WRITE(GEN6_RC_VIDEO_FREQ, 5339 GEN9_FREQUENCY(dev_priv->rps.rp1_freq)); 5340 5341 /* 1 second timeout*/ 5342 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 5343 GT_INTERVAL_FROM_US(dev_priv, 1000000)); 5344 5345 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa); 5346 5347 /* Leaning on the below call to gen6_set_rps to program/setup the 5348 * Up/Down EI & threshold registers, as well as the RP_CONTROL, 5349 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ 5350 reset_rps(dev_priv, gen6_set_rps); 5351 5352 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5353 } 5354 5355 static void gen9_enable_rc6(struct drm_i915_private *dev_priv) 5356 { 5357 struct intel_engine_cs *engine; 5358 enum intel_engine_id id; 5359 uint32_t rc6_mask = 0; 5360 5361 /* 1a: Software RC state - RC0 */ 5362 I915_WRITE(GEN6_RC_STATE, 0); 5363 5364 /* 1b: Get forcewake during program sequence. Although the driver 5365 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ 5366 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5367 5368 /* 2a: Disable RC states. */ 5369 I915_WRITE(GEN6_RC_CONTROL, 0); 5370 5371 /* 2b: Program RC6 thresholds.*/ 5372 5373 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ 5374 if (IS_SKYLAKE(dev_priv)) 5375 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); 5376 else 5377 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); 5378 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 5379 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 5380 for_each_engine(engine, dev_priv, id) 5381 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 5382 5383 if (HAS_GUC(dev_priv)) 5384 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); 5385 5386 I915_WRITE(GEN6_RC_SLEEP, 0); 5387 5388 /* 2c: Program Coarse Power Gating Policies. */ 5389 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25); 5390 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25); 5391 5392 /* 3a: Enable RC6 */ 5393 if (intel_enable_rc6() & INTEL_RC6_ENABLE) 5394 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 5395 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE)); 5396 /* WaRsUseTimeoutMode:bxt */ 5397 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { 5398 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */ 5399 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 5400 GEN7_RC_CTL_TO_MODE | 5401 rc6_mask); 5402 } else { 5403 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */ 5404 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 5405 GEN6_RC_CTL_EI_MODE(1) | 5406 rc6_mask); 5407 } 5408 5409 /* 5410 * 3b: Enable Coarse Power Gating only when RC6 is enabled. 5411 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. 5412 */ 5413 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) 5414 I915_WRITE(GEN9_PG_ENABLE, 0); 5415 else 5416 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 5417 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0); 5418 5419 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5420 } 5421 5422 static void gen8_enable_rps(struct drm_i915_private *dev_priv) 5423 { 5424 struct intel_engine_cs *engine; 5425 enum intel_engine_id id; 5426 uint32_t rc6_mask = 0; 5427 5428 /* 1a: Software RC state - RC0 */ 5429 I915_WRITE(GEN6_RC_STATE, 0); 5430 5431 /* 1c & 1d: Get forcewake during program sequence. Although the driver 5432 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ 5433 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5434 5435 /* 2a: Disable RC states. */ 5436 I915_WRITE(GEN6_RC_CONTROL, 0); 5437 5438 /* 2b: Program RC6 thresholds.*/ 5439 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 5440 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 5441 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 5442 for_each_engine(engine, dev_priv, id) 5443 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 5444 I915_WRITE(GEN6_RC_SLEEP, 0); 5445 if (IS_BROADWELL(dev_priv)) 5446 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ 5447 else 5448 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ 5449 5450 /* 3: Enable RC6 */ 5451 if (intel_enable_rc6() & INTEL_RC6_ENABLE) 5452 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 5453 intel_print_rc6_info(dev_priv, rc6_mask); 5454 if (IS_BROADWELL(dev_priv)) 5455 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 5456 GEN7_RC_CTL_TO_MODE | 5457 rc6_mask); 5458 else 5459 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 5460 GEN6_RC_CTL_EI_MODE(1) | 5461 rc6_mask); 5462 5463 /* 4 Program defaults and thresholds for RPS*/ 5464 I915_WRITE(GEN6_RPNSWREQ, 5465 HSW_FREQUENCY(dev_priv->rps.rp1_freq)); 5466 I915_WRITE(GEN6_RC_VIDEO_FREQ, 5467 HSW_FREQUENCY(dev_priv->rps.rp1_freq)); 5468 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */ 5469 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */ 5470 5471 /* Docs recommend 900MHz, and 300 MHz respectively */ 5472 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 5473 dev_priv->rps.max_freq_softlimit << 24 | 5474 dev_priv->rps.min_freq_softlimit << 16); 5475 5476 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */ 5477 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/ 5478 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */ 5479 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */ 5480 5481 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 5482 5483 /* 5: Enable RPS */ 5484 I915_WRITE(GEN6_RP_CONTROL, 5485 GEN6_RP_MEDIA_TURBO | 5486 GEN6_RP_MEDIA_HW_NORMAL_MODE | 5487 GEN6_RP_MEDIA_IS_GFX | 5488 GEN6_RP_ENABLE | 5489 GEN6_RP_UP_BUSY_AVG | 5490 GEN6_RP_DOWN_IDLE_AVG); 5491 5492 /* 6: Ring frequency + overclocking (our driver does this later */ 5493 5494 reset_rps(dev_priv, gen6_set_rps); 5495 5496 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5497 } 5498 5499 static void gen6_enable_rps(struct drm_i915_private *dev_priv) 5500 { 5501 struct intel_engine_cs *engine; 5502 enum intel_engine_id id; 5503 u32 rc6vids, rc6_mask = 0; 5504 u32 gtfifodbg; 5505 int rc6_mode; 5506 int ret; 5507 5508 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 5509 5510 /* Here begins a magic sequence of register writes to enable 5511 * auto-downclocking. 5512 * 5513 * Perhaps there might be some value in exposing these to 5514 * userspace... 5515 */ 5516 I915_WRITE(GEN6_RC_STATE, 0); 5517 5518 /* Clear the DBG now so we don't confuse earlier errors */ 5519 gtfifodbg = I915_READ(GTFIFODBG); 5520 if (gtfifodbg) { 5521 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); 5522 I915_WRITE(GTFIFODBG, gtfifodbg); 5523 } 5524 5525 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5526 5527 /* disable the counters and set deterministic thresholds */ 5528 I915_WRITE(GEN6_RC_CONTROL, 0); 5529 5530 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); 5531 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); 5532 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); 5533 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); 5534 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); 5535 5536 for_each_engine(engine, dev_priv, id) 5537 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 5538 5539 I915_WRITE(GEN6_RC_SLEEP, 0); 5540 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 5541 if (IS_IVYBRIDGE(dev_priv)) 5542 I915_WRITE(GEN6_RC6_THRESHOLD, 125000); 5543 else 5544 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); 5545 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000); 5546 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 5547 5548 /* Check if we are enabling RC6 */ 5549 rc6_mode = intel_enable_rc6(); 5550 if (rc6_mode & INTEL_RC6_ENABLE) 5551 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; 5552 5553 /* We don't use those on Haswell */ 5554 if (!IS_HASWELL(dev_priv)) { 5555 if (rc6_mode & INTEL_RC6p_ENABLE) 5556 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; 5557 5558 if (rc6_mode & INTEL_RC6pp_ENABLE) 5559 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; 5560 } 5561 5562 intel_print_rc6_info(dev_priv, rc6_mask); 5563 5564 I915_WRITE(GEN6_RC_CONTROL, 5565 rc6_mask | 5566 GEN6_RC_CTL_EI_MODE(1) | 5567 GEN6_RC_CTL_HW_ENABLE); 5568 5569 /* Power down if completely idle for over 50ms */ 5570 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000); 5571 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 5572 5573 reset_rps(dev_priv, gen6_set_rps); 5574 5575 rc6vids = 0; 5576 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 5577 if (IS_GEN6(dev_priv) && ret) { 5578 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); 5579 } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { 5580 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", 5581 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); 5582 rc6vids &= 0xffff00; 5583 rc6vids |= GEN6_ENCODE_RC6_VID(450); 5584 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); 5585 if (ret) 5586 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); 5587 } 5588 5589 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5590 } 5591 5592 static void gen6_update_ring_freq(struct drm_i915_private *dev_priv) 5593 { 5594 int min_freq = 15; 5595 unsigned int gpu_freq; 5596 unsigned int max_ia_freq, min_ring_freq; 5597 unsigned int max_gpu_freq, min_gpu_freq; 5598 int scaling_factor = 180; 5599 5600 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 5601 5602 #if 0 5603 policy = cpufreq_cpu_get(0); 5604 if (policy) { 5605 max_ia_freq = policy->cpuinfo.max_freq; 5606 cpufreq_cpu_put(policy); 5607 } else { 5608 /* 5609 * Default to measured freq if none found, PCU will ensure we 5610 * don't go over 5611 */ 5612 max_ia_freq = tsc_khz; 5613 } 5614 #else 5615 max_ia_freq = tsc_frequency / 1000; 5616 #endif 5617 5618 /* Convert from kHz to MHz */ 5619 max_ia_freq /= 1000; 5620 5621 min_ring_freq = I915_READ(DCLK) & 0xf; 5622 /* convert DDR frequency from units of 266.6MHz to bandwidth */ 5623 min_ring_freq = mult_frac(min_ring_freq, 8, 3); 5624 5625 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 5626 /* Convert GT frequency to 50 HZ units */ 5627 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; 5628 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; 5629 } else { 5630 min_gpu_freq = dev_priv->rps.min_freq; 5631 max_gpu_freq = dev_priv->rps.max_freq; 5632 } 5633 5634 /* 5635 * For each potential GPU frequency, load a ring frequency we'd like 5636 * to use for memory access. We do this by specifying the IA frequency 5637 * the PCU should use as a reference to determine the ring frequency. 5638 */ 5639 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) { 5640 int diff = max_gpu_freq - gpu_freq; 5641 unsigned int ia_freq = 0, ring_freq = 0; 5642 5643 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 5644 /* 5645 * ring_freq = 2 * GT. ring_freq is in 100MHz units 5646 * No floor required for ring frequency on SKL. 5647 */ 5648 ring_freq = gpu_freq; 5649 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 5650 /* max(2 * GT, DDR). NB: GT is 50MHz units */ 5651 ring_freq = max(min_ring_freq, gpu_freq); 5652 } else if (IS_HASWELL(dev_priv)) { 5653 ring_freq = mult_frac(gpu_freq, 5, 4); 5654 ring_freq = max(min_ring_freq, ring_freq); 5655 /* leave ia_freq as the default, chosen by cpufreq */ 5656 } else { 5657 /* On older processors, there is no separate ring 5658 * clock domain, so in order to boost the bandwidth 5659 * of the ring, we need to upclock the CPU (ia_freq). 5660 * 5661 * For GPU frequencies less than 750MHz, 5662 * just use the lowest ring freq. 5663 */ 5664 if (gpu_freq < min_freq) 5665 ia_freq = 800; 5666 else 5667 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); 5668 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); 5669 } 5670 5671 sandybridge_pcode_write(dev_priv, 5672 GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 5673 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT | 5674 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT | 5675 gpu_freq); 5676 } 5677 } 5678 5679 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) 5680 { 5681 u32 val, rp0; 5682 5683 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 5684 5685 switch (INTEL_INFO(dev_priv)->sseu.eu_total) { 5686 case 8: 5687 /* (2 * 4) config */ 5688 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); 5689 break; 5690 case 12: 5691 /* (2 * 6) config */ 5692 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT); 5693 break; 5694 case 16: 5695 /* (2 * 8) config */ 5696 default: 5697 /* Setting (2 * 8) Min RP0 for any other combination */ 5698 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT); 5699 break; 5700 } 5701 5702 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK); 5703 5704 return rp0; 5705 } 5706 5707 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv) 5708 { 5709 u32 val, rpe; 5710 5711 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG); 5712 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; 5713 5714 return rpe; 5715 } 5716 5717 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv) 5718 { 5719 u32 val, rp1; 5720 5721 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 5722 rp1 = (val & FB_GFX_FREQ_FUSE_MASK); 5723 5724 return rp1; 5725 } 5726 5727 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv) 5728 { 5729 u32 val, rp1; 5730 5731 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); 5732 5733 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; 5734 5735 return rp1; 5736 } 5737 5738 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv) 5739 { 5740 u32 val, rp0; 5741 5742 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); 5743 5744 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; 5745 /* Clamp to max */ 5746 rp0 = min_t(u32, rp0, 0xea); 5747 5748 return rp0; 5749 } 5750 5751 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv) 5752 { 5753 u32 val, rpe; 5754 5755 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO); 5756 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; 5757 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI); 5758 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; 5759 5760 return rpe; 5761 } 5762 5763 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) 5764 { 5765 u32 val; 5766 5767 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; 5768 /* 5769 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value 5770 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on 5771 * a BYT-M B0 the above register contains 0xbf. Moreover when setting 5772 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0 5773 * to make sure it matches what Punit accepts. 5774 */ 5775 return max_t(u32, val, 0xc0); 5776 } 5777 5778 /* Check that the pctx buffer wasn't move under us. */ 5779 static void valleyview_check_pctx(struct drm_i915_private *dev_priv) 5780 { 5781 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; 5782 5783 WARN_ON(pctx_addr != dev_priv->mm.stolen_base + 5784 dev_priv->vlv_pctx->stolen->start); 5785 } 5786 5787 5788 /* Check that the pcbr address is not empty. */ 5789 static void cherryview_check_pctx(struct drm_i915_private *dev_priv) 5790 { 5791 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; 5792 5793 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); 5794 } 5795 5796 static void cherryview_setup_pctx(struct drm_i915_private *dev_priv) 5797 { 5798 struct i915_ggtt *ggtt = &dev_priv->ggtt; 5799 unsigned long pctx_paddr, paddr; 5800 u32 pcbr; 5801 int pctx_size = 32*1024; 5802 5803 pcbr = I915_READ(VLV_PCBR); 5804 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { 5805 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); 5806 paddr = (dev_priv->mm.stolen_base + 5807 (ggtt->stolen_size - pctx_size)); 5808 5809 pctx_paddr = (paddr & (~4095)); 5810 I915_WRITE(VLV_PCBR, pctx_paddr); 5811 } 5812 5813 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 5814 } 5815 5816 static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) 5817 { 5818 struct drm_i915_gem_object *pctx; 5819 unsigned long pctx_paddr; 5820 u32 pcbr; 5821 int pctx_size = 24*1024; 5822 5823 pcbr = I915_READ(VLV_PCBR); 5824 if (pcbr) { 5825 /* BIOS set it up already, grab the pre-alloc'd space */ 5826 int pcbr_offset; 5827 5828 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; 5829 pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm, 5830 pcbr_offset, 5831 I915_GTT_OFFSET_NONE, 5832 pctx_size); 5833 goto out; 5834 } 5835 5836 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); 5837 5838 /* 5839 * From the Gunit register HAS: 5840 * The Gfx driver is expected to program this register and ensure 5841 * proper allocation within Gfx stolen memory. For example, this 5842 * register should be programmed such than the PCBR range does not 5843 * overlap with other ranges, such as the frame buffer, protected 5844 * memory, or any other relevant ranges. 5845 */ 5846 pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size); 5847 if (!pctx) { 5848 DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); 5849 goto out; 5850 } 5851 5852 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start; 5853 I915_WRITE(VLV_PCBR, pctx_paddr); 5854 5855 out: 5856 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 5857 dev_priv->vlv_pctx = pctx; 5858 } 5859 5860 static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv) 5861 { 5862 if (WARN_ON(!dev_priv->vlv_pctx)) 5863 return; 5864 5865 i915_gem_object_put(dev_priv->vlv_pctx); 5866 dev_priv->vlv_pctx = NULL; 5867 } 5868 5869 static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv) 5870 { 5871 dev_priv->rps.gpll_ref_freq = 5872 vlv_get_cck_clock(dev_priv, "GPLL ref", 5873 CCK_GPLL_CLOCK_CONTROL, 5874 dev_priv->czclk_freq); 5875 5876 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", 5877 dev_priv->rps.gpll_ref_freq); 5878 } 5879 5880 static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv) 5881 { 5882 u32 val; 5883 5884 valleyview_setup_pctx(dev_priv); 5885 5886 vlv_init_gpll_ref_freq(dev_priv); 5887 5888 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 5889 switch ((val >> 6) & 3) { 5890 case 0: 5891 case 1: 5892 dev_priv->mem_freq = 800; 5893 break; 5894 case 2: 5895 dev_priv->mem_freq = 1066; 5896 break; 5897 case 3: 5898 dev_priv->mem_freq = 1333; 5899 break; 5900 } 5901 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq); 5902 5903 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv); 5904 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 5905 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 5906 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq), 5907 dev_priv->rps.max_freq); 5908 5909 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv); 5910 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", 5911 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 5912 dev_priv->rps.efficient_freq); 5913 5914 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv); 5915 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n", 5916 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), 5917 dev_priv->rps.rp1_freq); 5918 5919 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv); 5920 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 5921 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 5922 dev_priv->rps.min_freq); 5923 } 5924 5925 static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv) 5926 { 5927 u32 val; 5928 5929 cherryview_setup_pctx(dev_priv); 5930 5931 vlv_init_gpll_ref_freq(dev_priv); 5932 5933 mutex_lock(&dev_priv->sb_lock); 5934 val = vlv_cck_read(dev_priv, CCK_FUSE_REG); 5935 mutex_unlock(&dev_priv->sb_lock); 5936 5937 switch ((val >> 2) & 0x7) { 5938 case 3: 5939 dev_priv->mem_freq = 2000; 5940 break; 5941 default: 5942 dev_priv->mem_freq = 1600; 5943 break; 5944 } 5945 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq); 5946 5947 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv); 5948 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 5949 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 5950 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq), 5951 dev_priv->rps.max_freq); 5952 5953 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv); 5954 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", 5955 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 5956 dev_priv->rps.efficient_freq); 5957 5958 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv); 5959 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n", 5960 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), 5961 dev_priv->rps.rp1_freq); 5962 5963 /* PUnit validated range is only [RPe, RP0] */ 5964 dev_priv->rps.min_freq = dev_priv->rps.efficient_freq; 5965 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 5966 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 5967 dev_priv->rps.min_freq); 5968 5969 WARN_ONCE((dev_priv->rps.max_freq | 5970 dev_priv->rps.efficient_freq | 5971 dev_priv->rps.rp1_freq | 5972 dev_priv->rps.min_freq) & 1, 5973 "Odd GPU freq values\n"); 5974 } 5975 5976 static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv) 5977 { 5978 valleyview_cleanup_pctx(dev_priv); 5979 } 5980 5981 static void cherryview_enable_rps(struct drm_i915_private *dev_priv) 5982 { 5983 struct intel_engine_cs *engine; 5984 enum intel_engine_id id; 5985 u32 gtfifodbg, val, rc6_mode = 0, pcbr; 5986 5987 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 5988 5989 gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV | 5990 GT_FIFO_FREE_ENTRIES_CHV); 5991 if (gtfifodbg) { 5992 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", 5993 gtfifodbg); 5994 I915_WRITE(GTFIFODBG, gtfifodbg); 5995 } 5996 5997 cherryview_check_pctx(dev_priv); 5998 5999 /* 1a & 1b: Get forcewake during program sequence. Although the driver 6000 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ 6001 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 6002 6003 /* Disable RC states. */ 6004 I915_WRITE(GEN6_RC_CONTROL, 0); 6005 6006 /* 2a: Program RC6 thresholds.*/ 6007 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 6008 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 6009 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 6010 6011 for_each_engine(engine, dev_priv, id) 6012 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 6013 I915_WRITE(GEN6_RC_SLEEP, 0); 6014 6015 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */ 6016 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186); 6017 6018 /* allows RC6 residency counter to work */ 6019 I915_WRITE(VLV_COUNTER_CONTROL, 6020 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | 6021 VLV_MEDIA_RC6_COUNT_EN | 6022 VLV_RENDER_RC6_COUNT_EN)); 6023 6024 /* For now we assume BIOS is allocating and populating the PCBR */ 6025 pcbr = I915_READ(VLV_PCBR); 6026 6027 /* 3: Enable RC6 */ 6028 if ((intel_enable_rc6() & INTEL_RC6_ENABLE) && 6029 (pcbr >> VLV_PCBR_ADDR_SHIFT)) 6030 rc6_mode = GEN7_RC_CTL_TO_MODE; 6031 6032 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 6033 6034 /* 4 Program defaults and thresholds for RPS*/ 6035 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); 6036 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); 6037 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); 6038 I915_WRITE(GEN6_RP_UP_EI, 66000); 6039 I915_WRITE(GEN6_RP_DOWN_EI, 350000); 6040 6041 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 6042 6043 /* 5: Enable RPS */ 6044 I915_WRITE(GEN6_RP_CONTROL, 6045 GEN6_RP_MEDIA_HW_NORMAL_MODE | 6046 GEN6_RP_MEDIA_IS_GFX | 6047 GEN6_RP_ENABLE | 6048 GEN6_RP_UP_BUSY_AVG | 6049 GEN6_RP_DOWN_IDLE_AVG); 6050 6051 /* Setting Fixed Bias */ 6052 val = VLV_OVERRIDE_EN | 6053 VLV_SOC_TDP_EN | 6054 CHV_BIAS_CPU_50_SOC_50; 6055 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val); 6056 6057 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 6058 6059 /* RPS code assumes GPLL is used */ 6060 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); 6061 6062 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); 6063 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 6064 6065 reset_rps(dev_priv, valleyview_set_rps); 6066 6067 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 6068 } 6069 6070 static void valleyview_enable_rps(struct drm_i915_private *dev_priv) 6071 { 6072 struct intel_engine_cs *engine; 6073 enum intel_engine_id id; 6074 u32 gtfifodbg, val, rc6_mode = 0; 6075 6076 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 6077 6078 valleyview_check_pctx(dev_priv); 6079 6080 gtfifodbg = I915_READ(GTFIFODBG); 6081 if (gtfifodbg) { 6082 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", 6083 gtfifodbg); 6084 I915_WRITE(GTFIFODBG, gtfifodbg); 6085 } 6086 6087 /* If VLV, Forcewake all wells, else re-direct to regular path */ 6088 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 6089 6090 /* Disable RC states. */ 6091 I915_WRITE(GEN6_RC_CONTROL, 0); 6092 6093 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); 6094 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); 6095 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); 6096 I915_WRITE(GEN6_RP_UP_EI, 66000); 6097 I915_WRITE(GEN6_RP_DOWN_EI, 350000); 6098 6099 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 6100 6101 I915_WRITE(GEN6_RP_CONTROL, 6102 GEN6_RP_MEDIA_TURBO | 6103 GEN6_RP_MEDIA_HW_NORMAL_MODE | 6104 GEN6_RP_MEDIA_IS_GFX | 6105 GEN6_RP_ENABLE | 6106 GEN6_RP_UP_BUSY_AVG | 6107 GEN6_RP_DOWN_IDLE_CONT); 6108 6109 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000); 6110 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); 6111 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); 6112 6113 for_each_engine(engine, dev_priv, id) 6114 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 6115 6116 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557); 6117 6118 /* allows RC6 residency counter to work */ 6119 I915_WRITE(VLV_COUNTER_CONTROL, 6120 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN | 6121 VLV_RENDER_RC0_COUNT_EN | 6122 VLV_MEDIA_RC6_COUNT_EN | 6123 VLV_RENDER_RC6_COUNT_EN)); 6124 6125 if (intel_enable_rc6() & INTEL_RC6_ENABLE) 6126 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; 6127 6128 intel_print_rc6_info(dev_priv, rc6_mode); 6129 6130 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 6131 6132 /* Setting Fixed Bias */ 6133 val = VLV_OVERRIDE_EN | 6134 VLV_SOC_TDP_EN | 6135 VLV_BIAS_CPU_125_SOC_875; 6136 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val); 6137 6138 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 6139 6140 /* RPS code assumes GPLL is used */ 6141 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); 6142 6143 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); 6144 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 6145 6146 reset_rps(dev_priv, valleyview_set_rps); 6147 6148 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 6149 } 6150 6151 static unsigned long intel_pxfreq(u32 vidfreq) 6152 { 6153 unsigned long freq; 6154 int div = (vidfreq & 0x3f0000) >> 16; 6155 int post = (vidfreq & 0x3000) >> 12; 6156 int pre = (vidfreq & 0x7); 6157 6158 if (!pre) 6159 return 0; 6160 6161 freq = ((div * 133333) / ((1<<post) * pre)); 6162 6163 return freq; 6164 } 6165 6166 static const struct cparams { 6167 u16 i; 6168 u16 t; 6169 u16 m; 6170 u16 c; 6171 } cparams[] = { 6172 { 1, 1333, 301, 28664 }, 6173 { 1, 1066, 294, 24460 }, 6174 { 1, 800, 294, 25192 }, 6175 { 0, 1333, 276, 27605 }, 6176 { 0, 1066, 276, 27605 }, 6177 { 0, 800, 231, 23784 }, 6178 }; 6179 6180 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) 6181 { 6182 u64 total_count, diff, ret; 6183 u32 count1, count2, count3, m = 0, c = 0; 6184 unsigned long now = jiffies_to_msecs(jiffies), diff1; 6185 int i; 6186 6187 assert_spin_locked(&mchdev_lock); 6188 6189 diff1 = now - dev_priv->ips.last_time1; 6190 6191 /* Prevent division-by-zero if we are asking too fast. 6192 * Also, we don't get interesting results if we are polling 6193 * faster than once in 10ms, so just return the saved value 6194 * in such cases. 6195 */ 6196 if (diff1 <= 10) 6197 return dev_priv->ips.chipset_power; 6198 6199 count1 = I915_READ(DMIEC); 6200 count2 = I915_READ(DDREC); 6201 count3 = I915_READ(CSIEC); 6202 6203 total_count = count1 + count2 + count3; 6204 6205 /* FIXME: handle per-counter overflow */ 6206 if (total_count < dev_priv->ips.last_count1) { 6207 diff = ~0UL - dev_priv->ips.last_count1; 6208 diff += total_count; 6209 } else { 6210 diff = total_count - dev_priv->ips.last_count1; 6211 } 6212 6213 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 6214 if (cparams[i].i == dev_priv->ips.c_m && 6215 cparams[i].t == dev_priv->ips.r_t) { 6216 m = cparams[i].m; 6217 c = cparams[i].c; 6218 break; 6219 } 6220 } 6221 6222 diff = div_u64(diff, diff1); 6223 ret = ((m * diff) + c); 6224 ret = div_u64(ret, 10); 6225 6226 dev_priv->ips.last_count1 = total_count; 6227 dev_priv->ips.last_time1 = now; 6228 6229 dev_priv->ips.chipset_power = ret; 6230 6231 return ret; 6232 } 6233 6234 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 6235 { 6236 unsigned long val; 6237 6238 if (INTEL_INFO(dev_priv)->gen != 5) 6239 return 0; 6240 6241 spin_lock_irq(&mchdev_lock); 6242 6243 val = __i915_chipset_val(dev_priv); 6244 6245 spin_unlock_irq(&mchdev_lock); 6246 6247 return val; 6248 } 6249 6250 unsigned long i915_mch_val(struct drm_i915_private *dev_priv) 6251 { 6252 unsigned long m, x, b; 6253 u32 tsfs; 6254 6255 tsfs = I915_READ(TSFS); 6256 6257 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); 6258 x = I915_READ8(TR1); 6259 6260 b = tsfs & TSFS_INTR_MASK; 6261 6262 return ((m * x) / 127) - b; 6263 } 6264 6265 static int _pxvid_to_vd(u8 pxvid) 6266 { 6267 if (pxvid == 0) 6268 return 0; 6269 6270 if (pxvid >= 8 && pxvid < 31) 6271 pxvid = 31; 6272 6273 return (pxvid + 2) * 125; 6274 } 6275 6276 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 6277 { 6278 const int vd = _pxvid_to_vd(pxvid); 6279 const int vm = vd - 1125; 6280 6281 if (INTEL_INFO(dev_priv)->is_mobile) 6282 return vm > 0 ? vm : 0; 6283 6284 return vd; 6285 } 6286 6287 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) 6288 { 6289 u64 now, diff, diffms; 6290 u32 count; 6291 6292 assert_spin_locked(&mchdev_lock); 6293 6294 now = ktime_get_raw_ns(); 6295 diffms = now - dev_priv->ips.last_time2; 6296 do_div(diffms, NSEC_PER_MSEC); 6297 6298 /* Don't divide by 0 */ 6299 if (!diffms) 6300 return; 6301 6302 count = I915_READ(GFXEC); 6303 6304 if (count < dev_priv->ips.last_count2) { 6305 diff = ~0UL - dev_priv->ips.last_count2; 6306 diff += count; 6307 } else { 6308 diff = count - dev_priv->ips.last_count2; 6309 } 6310 6311 dev_priv->ips.last_count2 = count; 6312 dev_priv->ips.last_time2 = now; 6313 6314 /* More magic constants... */ 6315 diff = diff * 1181; 6316 diff = div_u64(diff, diffms * 10); 6317 dev_priv->ips.gfx_power = diff; 6318 } 6319 6320 void i915_update_gfx_val(struct drm_i915_private *dev_priv) 6321 { 6322 if (INTEL_INFO(dev_priv)->gen != 5) 6323 return; 6324 6325 spin_lock_irq(&mchdev_lock); 6326 6327 __i915_update_gfx_val(dev_priv); 6328 6329 spin_unlock_irq(&mchdev_lock); 6330 } 6331 6332 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) 6333 { 6334 unsigned long t, corr, state1, corr2, state2; 6335 u32 pxvid, ext_v; 6336 6337 assert_spin_locked(&mchdev_lock); 6338 6339 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq)); 6340 pxvid = (pxvid >> 24) & 0x7f; 6341 ext_v = pvid_to_extvid(dev_priv, pxvid); 6342 6343 state1 = ext_v; 6344 6345 t = i915_mch_val(dev_priv); 6346 6347 /* Revel in the empirically derived constants */ 6348 6349 /* Correction factor in 1/100000 units */ 6350 if (t > 80) 6351 corr = ((t * 2349) + 135940); 6352 else if (t >= 50) 6353 corr = ((t * 964) + 29317); 6354 else /* < 50 */ 6355 corr = ((t * 301) + 1004); 6356 6357 corr = corr * ((150142 * state1) / 10000 - 78642); 6358 corr /= 100000; 6359 corr2 = (corr * dev_priv->ips.corr); 6360 6361 state2 = (corr2 * state1) / 10000; 6362 state2 /= 100; /* convert to mW */ 6363 6364 __i915_update_gfx_val(dev_priv); 6365 6366 return dev_priv->ips.gfx_power + state2; 6367 } 6368 6369 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 6370 { 6371 unsigned long val; 6372 6373 if (INTEL_INFO(dev_priv)->gen != 5) 6374 return 0; 6375 6376 spin_lock_irq(&mchdev_lock); 6377 6378 val = __i915_gfx_val(dev_priv); 6379 6380 spin_unlock_irq(&mchdev_lock); 6381 6382 return val; 6383 } 6384 6385 /** 6386 * i915_read_mch_val - return value for IPS use 6387 * 6388 * Calculate and return a value for the IPS driver to use when deciding whether 6389 * we have thermal and power headroom to increase CPU or GPU power budget. 6390 */ 6391 unsigned long i915_read_mch_val(void) 6392 { 6393 struct drm_i915_private *dev_priv; 6394 unsigned long chipset_val, graphics_val, ret = 0; 6395 6396 spin_lock_irq(&mchdev_lock); 6397 if (!i915_mch_dev) 6398 goto out_unlock; 6399 dev_priv = i915_mch_dev; 6400 6401 chipset_val = __i915_chipset_val(dev_priv); 6402 graphics_val = __i915_gfx_val(dev_priv); 6403 6404 ret = chipset_val + graphics_val; 6405 6406 out_unlock: 6407 spin_unlock_irq(&mchdev_lock); 6408 6409 return ret; 6410 } 6411 EXPORT_SYMBOL_GPL(i915_read_mch_val); 6412 6413 /** 6414 * i915_gpu_raise - raise GPU frequency limit 6415 * 6416 * Raise the limit; IPS indicates we have thermal headroom. 6417 */ 6418 bool i915_gpu_raise(void) 6419 { 6420 struct drm_i915_private *dev_priv; 6421 bool ret = true; 6422 6423 spin_lock_irq(&mchdev_lock); 6424 if (!i915_mch_dev) { 6425 ret = false; 6426 goto out_unlock; 6427 } 6428 dev_priv = i915_mch_dev; 6429 6430 if (dev_priv->ips.max_delay > dev_priv->ips.fmax) 6431 dev_priv->ips.max_delay--; 6432 6433 out_unlock: 6434 spin_unlock_irq(&mchdev_lock); 6435 6436 return ret; 6437 } 6438 EXPORT_SYMBOL_GPL(i915_gpu_raise); 6439 6440 /** 6441 * i915_gpu_lower - lower GPU frequency limit 6442 * 6443 * IPS indicates we're close to a thermal limit, so throttle back the GPU 6444 * frequency maximum. 6445 */ 6446 bool i915_gpu_lower(void) 6447 { 6448 struct drm_i915_private *dev_priv; 6449 bool ret = true; 6450 6451 spin_lock_irq(&mchdev_lock); 6452 if (!i915_mch_dev) { 6453 ret = false; 6454 goto out_unlock; 6455 } 6456 dev_priv = i915_mch_dev; 6457 6458 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay) 6459 dev_priv->ips.max_delay++; 6460 6461 out_unlock: 6462 spin_unlock_irq(&mchdev_lock); 6463 6464 return ret; 6465 } 6466 EXPORT_SYMBOL_GPL(i915_gpu_lower); 6467 6468 /** 6469 * i915_gpu_busy - indicate GPU business to IPS 6470 * 6471 * Tell the IPS driver whether or not the GPU is busy. 6472 */ 6473 bool i915_gpu_busy(void) 6474 { 6475 bool ret = false; 6476 6477 spin_lock_irq(&mchdev_lock); 6478 if (i915_mch_dev) 6479 ret = i915_mch_dev->gt.awake; 6480 spin_unlock_irq(&mchdev_lock); 6481 6482 return ret; 6483 } 6484 EXPORT_SYMBOL_GPL(i915_gpu_busy); 6485 6486 /** 6487 * i915_gpu_turbo_disable - disable graphics turbo 6488 * 6489 * Disable graphics turbo by resetting the max frequency and setting the 6490 * current frequency to the default. 6491 */ 6492 bool i915_gpu_turbo_disable(void) 6493 { 6494 struct drm_i915_private *dev_priv; 6495 bool ret = true; 6496 6497 spin_lock_irq(&mchdev_lock); 6498 if (!i915_mch_dev) { 6499 ret = false; 6500 goto out_unlock; 6501 } 6502 dev_priv = i915_mch_dev; 6503 6504 dev_priv->ips.max_delay = dev_priv->ips.fstart; 6505 6506 if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart)) 6507 ret = false; 6508 6509 out_unlock: 6510 spin_unlock_irq(&mchdev_lock); 6511 6512 return ret; 6513 } 6514 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); 6515 6516 /** 6517 * Tells the intel_ips driver that the i915 driver is now loaded, if 6518 * IPS got loaded first. 6519 * 6520 * This awkward dance is so that neither module has to depend on the 6521 * other in order for IPS to do the appropriate communication of 6522 * GPU turbo limits to i915. 6523 */ 6524 static void 6525 ips_ping_for_i915_load(void) 6526 { 6527 #if 0 6528 void (*link)(void); 6529 6530 link = symbol_get(ips_link_to_i915_driver); 6531 if (link) { 6532 link(); 6533 symbol_put(ips_link_to_i915_driver); 6534 } 6535 #endif 6536 } 6537 6538 void intel_gpu_ips_init(struct drm_i915_private *dev_priv) 6539 { 6540 /* We only register the i915 ips part with intel-ips once everything is 6541 * set up, to avoid intel-ips sneaking in and reading bogus values. */ 6542 spin_lock_irq(&mchdev_lock); 6543 i915_mch_dev = dev_priv; 6544 spin_unlock_irq(&mchdev_lock); 6545 6546 ips_ping_for_i915_load(); 6547 } 6548 6549 void intel_gpu_ips_teardown(void) 6550 { 6551 spin_lock_irq(&mchdev_lock); 6552 i915_mch_dev = NULL; 6553 spin_unlock_irq(&mchdev_lock); 6554 } 6555 6556 static void intel_init_emon(struct drm_i915_private *dev_priv) 6557 { 6558 u32 lcfuse; 6559 u8 pxw[16]; 6560 int i; 6561 6562 /* Disable to program */ 6563 I915_WRITE(ECR, 0); 6564 POSTING_READ(ECR); 6565 6566 /* Program energy weights for various events */ 6567 I915_WRITE(SDEW, 0x15040d00); 6568 I915_WRITE(CSIEW0, 0x007f0000); 6569 I915_WRITE(CSIEW1, 0x1e220004); 6570 I915_WRITE(CSIEW2, 0x04000004); 6571 6572 for (i = 0; i < 5; i++) 6573 I915_WRITE(PEW(i), 0); 6574 for (i = 0; i < 3; i++) 6575 I915_WRITE(DEW(i), 0); 6576 6577 /* Program P-state weights to account for frequency power adjustment */ 6578 for (i = 0; i < 16; i++) { 6579 u32 pxvidfreq = I915_READ(PXVFREQ(i)); 6580 unsigned long freq = intel_pxfreq(pxvidfreq); 6581 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> 6582 PXVFREQ_PX_SHIFT; 6583 unsigned long val; 6584 6585 val = vid * vid; 6586 val *= (freq / 1000); 6587 val *= 255; 6588 val /= (127*127*900); 6589 if (val > 0xff) 6590 DRM_ERROR("bad pxval: %ld\n", val); 6591 pxw[i] = val; 6592 } 6593 /* Render standby states get 0 weight */ 6594 pxw[14] = 0; 6595 pxw[15] = 0; 6596 6597 for (i = 0; i < 4; i++) { 6598 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | 6599 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); 6600 I915_WRITE(PXW(i), val); 6601 } 6602 6603 /* Adjust magic regs to magic values (more experimental results) */ 6604 I915_WRITE(OGW0, 0); 6605 I915_WRITE(OGW1, 0); 6606 I915_WRITE(EG0, 0x00007f00); 6607 I915_WRITE(EG1, 0x0000000e); 6608 I915_WRITE(EG2, 0x000e0000); 6609 I915_WRITE(EG3, 0x68000300); 6610 I915_WRITE(EG4, 0x42000000); 6611 I915_WRITE(EG5, 0x00140031); 6612 I915_WRITE(EG6, 0); 6613 I915_WRITE(EG7, 0); 6614 6615 for (i = 0; i < 8; i++) 6616 I915_WRITE(PXWL(i), 0); 6617 6618 /* Enable PMON + select events */ 6619 I915_WRITE(ECR, 0x80000019); 6620 6621 lcfuse = I915_READ(LCFUSE02); 6622 6623 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); 6624 } 6625 6626 void intel_init_gt_powersave(struct drm_i915_private *dev_priv) 6627 { 6628 /* 6629 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a 6630 * requirement. 6631 */ 6632 if (!i915.enable_rc6) { 6633 DRM_INFO("RC6 disabled, disabling runtime PM support\n"); 6634 intel_runtime_pm_get(dev_priv); 6635 } 6636 6637 mutex_lock(&dev_priv->drm.struct_mutex); 6638 mutex_lock(&dev_priv->rps.hw_lock); 6639 6640 /* Initialize RPS limits (for userspace) */ 6641 if (IS_CHERRYVIEW(dev_priv)) 6642 cherryview_init_gt_powersave(dev_priv); 6643 else if (IS_VALLEYVIEW(dev_priv)) 6644 valleyview_init_gt_powersave(dev_priv); 6645 else if (INTEL_GEN(dev_priv) >= 6) 6646 gen6_init_rps_frequencies(dev_priv); 6647 6648 /* Derive initial user preferences/limits from the hardware limits */ 6649 dev_priv->rps.idle_freq = dev_priv->rps.min_freq; 6650 dev_priv->rps.cur_freq = dev_priv->rps.idle_freq; 6651 6652 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 6653 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; 6654 6655 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 6656 dev_priv->rps.min_freq_softlimit = 6657 max_t(int, 6658 dev_priv->rps.efficient_freq, 6659 intel_freq_opcode(dev_priv, 450)); 6660 6661 /* After setting max-softlimit, find the overclock max freq */ 6662 if (IS_GEN6(dev_priv) || 6663 IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) { 6664 u32 params = 0; 6665 6666 sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, ¶ms); 6667 if (params & BIT(31)) { /* OC supported */ 6668 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n", 6669 (dev_priv->rps.max_freq & 0xff) * 50, 6670 (params & 0xff) * 50); 6671 dev_priv->rps.max_freq = params & 0xff; 6672 } 6673 } 6674 6675 /* Finally allow us to boost to max by default */ 6676 dev_priv->rps.boost_freq = dev_priv->rps.max_freq; 6677 6678 mutex_unlock(&dev_priv->rps.hw_lock); 6679 mutex_unlock(&dev_priv->drm.struct_mutex); 6680 6681 intel_autoenable_gt_powersave(dev_priv); 6682 } 6683 6684 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) 6685 { 6686 if (IS_VALLEYVIEW(dev_priv)) 6687 valleyview_cleanup_gt_powersave(dev_priv); 6688 6689 if (!i915.enable_rc6) 6690 intel_runtime_pm_put(dev_priv); 6691 } 6692 6693 /** 6694 * intel_suspend_gt_powersave - suspend PM work and helper threads 6695 * @dev_priv: i915 device 6696 * 6697 * We don't want to disable RC6 or other features here, we just want 6698 * to make sure any work we've queued has finished and won't bother 6699 * us while we're suspended. 6700 */ 6701 void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv) 6702 { 6703 if (INTEL_GEN(dev_priv) < 6) 6704 return; 6705 6706 if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work)) 6707 intel_runtime_pm_put(dev_priv); 6708 6709 /* gen6_rps_idle() will be called later to disable interrupts */ 6710 } 6711 6712 void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv) 6713 { 6714 dev_priv->rps.enabled = true; /* force disabling */ 6715 intel_disable_gt_powersave(dev_priv); 6716 6717 gen6_reset_rps_interrupts(dev_priv); 6718 } 6719 6720 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) 6721 { 6722 if (!READ_ONCE(dev_priv->rps.enabled)) 6723 return; 6724 6725 mutex_lock(&dev_priv->rps.hw_lock); 6726 6727 if (INTEL_GEN(dev_priv) >= 9) { 6728 gen9_disable_rc6(dev_priv); 6729 gen9_disable_rps(dev_priv); 6730 } else if (IS_CHERRYVIEW(dev_priv)) { 6731 cherryview_disable_rps(dev_priv); 6732 } else if (IS_VALLEYVIEW(dev_priv)) { 6733 valleyview_disable_rps(dev_priv); 6734 } else if (INTEL_GEN(dev_priv) >= 6) { 6735 gen6_disable_rps(dev_priv); 6736 } else if (IS_IRONLAKE_M(dev_priv)) { 6737 ironlake_disable_drps(dev_priv); 6738 } 6739 6740 dev_priv->rps.enabled = false; 6741 mutex_unlock(&dev_priv->rps.hw_lock); 6742 } 6743 6744 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) 6745 { 6746 /* We shouldn't be disabling as we submit, so this should be less 6747 * racy than it appears! 6748 */ 6749 if (READ_ONCE(dev_priv->rps.enabled)) 6750 return; 6751 6752 /* Powersaving is controlled by the host when inside a VM */ 6753 if (intel_vgpu_active(dev_priv)) 6754 return; 6755 6756 mutex_lock(&dev_priv->rps.hw_lock); 6757 6758 if (IS_CHERRYVIEW(dev_priv)) { 6759 cherryview_enable_rps(dev_priv); 6760 } else if (IS_VALLEYVIEW(dev_priv)) { 6761 valleyview_enable_rps(dev_priv); 6762 } else if (INTEL_GEN(dev_priv) >= 9) { 6763 gen9_enable_rc6(dev_priv); 6764 gen9_enable_rps(dev_priv); 6765 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 6766 gen6_update_ring_freq(dev_priv); 6767 } else if (IS_BROADWELL(dev_priv)) { 6768 gen8_enable_rps(dev_priv); 6769 gen6_update_ring_freq(dev_priv); 6770 } else if (INTEL_GEN(dev_priv) >= 6) { 6771 gen6_enable_rps(dev_priv); 6772 gen6_update_ring_freq(dev_priv); 6773 } else if (IS_IRONLAKE_M(dev_priv)) { 6774 ironlake_enable_drps(dev_priv); 6775 intel_init_emon(dev_priv); 6776 } 6777 6778 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq); 6779 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq); 6780 6781 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq); 6782 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq); 6783 6784 dev_priv->rps.enabled = true; 6785 mutex_unlock(&dev_priv->rps.hw_lock); 6786 } 6787 6788 static void __intel_autoenable_gt_powersave(struct work_struct *work) 6789 { 6790 struct drm_i915_private *dev_priv = 6791 container_of(work, typeof(*dev_priv), rps.autoenable_work.work); 6792 struct intel_engine_cs *rcs; 6793 struct drm_i915_gem_request *req; 6794 6795 if (READ_ONCE(dev_priv->rps.enabled)) 6796 goto out; 6797 6798 rcs = dev_priv->engine[RCS]; 6799 if (rcs->last_context) 6800 goto out; 6801 6802 if (!rcs->init_context) 6803 goto out; 6804 6805 mutex_lock(&dev_priv->drm.struct_mutex); 6806 6807 req = i915_gem_request_alloc(rcs, dev_priv->kernel_context); 6808 if (IS_ERR(req)) 6809 goto unlock; 6810 6811 if (!i915.enable_execlists && i915_switch_context(req) == 0) 6812 rcs->init_context(req); 6813 6814 /* Mark the device busy, calling intel_enable_gt_powersave() */ 6815 i915_add_request_no_flush(req); 6816 6817 unlock: 6818 mutex_unlock(&dev_priv->drm.struct_mutex); 6819 out: 6820 intel_runtime_pm_put(dev_priv); 6821 } 6822 6823 void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv) 6824 { 6825 if (READ_ONCE(dev_priv->rps.enabled)) 6826 return; 6827 6828 if (IS_IRONLAKE_M(dev_priv)) { 6829 ironlake_enable_drps(dev_priv); 6830 intel_init_emon(dev_priv); 6831 } else if (INTEL_INFO(dev_priv)->gen >= 6) { 6832 /* 6833 * PCU communication is slow and this doesn't need to be 6834 * done at any specific time, so do this out of our fast path 6835 * to make resume and init faster. 6836 * 6837 * We depend on the HW RC6 power context save/restore 6838 * mechanism when entering D3 through runtime PM suspend. So 6839 * disable RPM until RPS/RC6 is properly setup. We can only 6840 * get here via the driver load/system resume/runtime resume 6841 * paths, so the _noresume version is enough (and in case of 6842 * runtime resume it's necessary). 6843 */ 6844 if (queue_delayed_work(dev_priv->wq, 6845 &dev_priv->rps.autoenable_work, 6846 round_jiffies_up_relative(HZ))) 6847 intel_runtime_pm_get_noresume(dev_priv); 6848 } 6849 } 6850 6851 static void ibx_init_clock_gating(struct drm_i915_private *dev_priv) 6852 { 6853 /* 6854 * On Ibex Peak and Cougar Point, we need to disable clock 6855 * gating for the panel power sequencer or it will fail to 6856 * start up when no ports are active. 6857 */ 6858 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 6859 } 6860 6861 static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv) 6862 { 6863 enum i915_pipe pipe; 6864 6865 for_each_pipe(dev_priv, pipe) { 6866 I915_WRITE(DSPCNTR(pipe), 6867 I915_READ(DSPCNTR(pipe)) | 6868 DISPPLANE_TRICKLE_FEED_DISABLE); 6869 6870 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe))); 6871 POSTING_READ(DSPSURF(pipe)); 6872 } 6873 } 6874 6875 static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) 6876 { 6877 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); 6878 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); 6879 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); 6880 6881 /* 6882 * Don't touch WM1S_LP_EN here. 6883 * Doing so could cause underruns. 6884 */ 6885 } 6886 6887 static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv) 6888 { 6889 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 6890 6891 /* 6892 * Required for FBC 6893 * WaFbcDisableDpfcClockGating:ilk 6894 */ 6895 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | 6896 ILK_DPFCUNIT_CLOCK_GATE_DISABLE | 6897 ILK_DPFDUNIT_CLOCK_GATE_ENABLE; 6898 6899 I915_WRITE(PCH_3DCGDIS0, 6900 MARIUNIT_CLOCK_GATE_DISABLE | 6901 SVSMUNIT_CLOCK_GATE_DISABLE); 6902 I915_WRITE(PCH_3DCGDIS1, 6903 VFMUNIT_CLOCK_GATE_DISABLE); 6904 6905 /* 6906 * According to the spec the following bits should be set in 6907 * order to enable memory self-refresh 6908 * The bit 22/21 of 0x42004 6909 * The bit 5 of 0x42020 6910 * The bit 15 of 0x45000 6911 */ 6912 I915_WRITE(ILK_DISPLAY_CHICKEN2, 6913 (I915_READ(ILK_DISPLAY_CHICKEN2) | 6914 ILK_DPARB_GATE | ILK_VSDPFD_FULL)); 6915 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE; 6916 I915_WRITE(DISP_ARB_CTL, 6917 (I915_READ(DISP_ARB_CTL) | 6918 DISP_FBC_WM_DIS)); 6919 6920 ilk_init_lp_watermarks(dev_priv); 6921 6922 /* 6923 * Based on the document from hardware guys the following bits 6924 * should be set unconditionally in order to enable FBC. 6925 * The bit 22 of 0x42000 6926 * The bit 22 of 0x42004 6927 * The bit 7,8,9 of 0x42020. 6928 */ 6929 if (IS_IRONLAKE_M(dev_priv)) { 6930 /* WaFbcAsynchFlipDisableFbcQueue:ilk */ 6931 I915_WRITE(ILK_DISPLAY_CHICKEN1, 6932 I915_READ(ILK_DISPLAY_CHICKEN1) | 6933 ILK_FBCQ_DIS); 6934 I915_WRITE(ILK_DISPLAY_CHICKEN2, 6935 I915_READ(ILK_DISPLAY_CHICKEN2) | 6936 ILK_DPARB_GATE); 6937 } 6938 6939 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 6940 6941 I915_WRITE(ILK_DISPLAY_CHICKEN2, 6942 I915_READ(ILK_DISPLAY_CHICKEN2) | 6943 ILK_ELPIN_409_SELECT); 6944 I915_WRITE(_3D_CHICKEN2, 6945 _3D_CHICKEN2_WM_READ_PIPELINED << 16 | 6946 _3D_CHICKEN2_WM_READ_PIPELINED); 6947 6948 /* WaDisableRenderCachePipelinedFlush:ilk */ 6949 I915_WRITE(CACHE_MODE_0, 6950 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); 6951 6952 /* WaDisable_RenderCache_OperationalFlush:ilk */ 6953 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 6954 6955 g4x_disable_trickle_feed(dev_priv); 6956 6957 ibx_init_clock_gating(dev_priv); 6958 } 6959 6960 static void cpt_init_clock_gating(struct drm_i915_private *dev_priv) 6961 { 6962 int pipe; 6963 uint32_t val; 6964 6965 /* 6966 * On Ibex Peak and Cougar Point, we need to disable clock 6967 * gating for the panel power sequencer or it will fail to 6968 * start up when no ports are active. 6969 */ 6970 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE | 6971 PCH_DPLUNIT_CLOCK_GATE_DISABLE | 6972 PCH_CPUNIT_CLOCK_GATE_DISABLE); 6973 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | 6974 DPLS_EDP_PPS_FIX_DIS); 6975 /* The below fixes the weird display corruption, a few pixels shifted 6976 * downward, on (only) LVDS of some HP laptops with IVY. 6977 */ 6978 for_each_pipe(dev_priv, pipe) { 6979 val = I915_READ(TRANS_CHICKEN2(pipe)); 6980 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 6981 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 6982 if (dev_priv->vbt.fdi_rx_polarity_inverted) 6983 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 6984 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 6985 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER; 6986 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH; 6987 I915_WRITE(TRANS_CHICKEN2(pipe), val); 6988 } 6989 /* WADP0ClockGatingDisable */ 6990 for_each_pipe(dev_priv, pipe) { 6991 I915_WRITE(TRANS_CHICKEN1(pipe), 6992 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 6993 } 6994 } 6995 6996 static void gen6_check_mch_setup(struct drm_i915_private *dev_priv) 6997 { 6998 uint32_t tmp; 6999 7000 tmp = I915_READ(MCH_SSKPD); 7001 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) 7002 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", 7003 tmp); 7004 } 7005 7006 static void gen6_init_clock_gating(struct drm_i915_private *dev_priv) 7007 { 7008 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 7009 7010 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 7011 7012 I915_WRITE(ILK_DISPLAY_CHICKEN2, 7013 I915_READ(ILK_DISPLAY_CHICKEN2) | 7014 ILK_ELPIN_409_SELECT); 7015 7016 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */ 7017 I915_WRITE(_3D_CHICKEN, 7018 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); 7019 7020 /* WaDisable_RenderCache_OperationalFlush:snb */ 7021 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 7022 7023 /* 7024 * BSpec recoomends 8x4 when MSAA is used, 7025 * however in practice 16x4 seems fastest. 7026 * 7027 * Note that PS/WM thread counts depend on the WIZ hashing 7028 * disable bit, which we don't touch here, but it's good 7029 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 7030 */ 7031 I915_WRITE(GEN6_GT_MODE, 7032 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); 7033 7034 ilk_init_lp_watermarks(dev_priv); 7035 7036 I915_WRITE(CACHE_MODE_0, 7037 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 7038 7039 I915_WRITE(GEN6_UCGCTL1, 7040 I915_READ(GEN6_UCGCTL1) | 7041 GEN6_BLBUNIT_CLOCK_GATE_DISABLE | 7042 GEN6_CSUNIT_CLOCK_GATE_DISABLE); 7043 7044 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 7045 * gating disable must be set. Failure to set it results in 7046 * flickering pixels due to Z write ordering failures after 7047 * some amount of runtime in the Mesa "fire" demo, and Unigine 7048 * Sanctuary and Tropics, and apparently anything else with 7049 * alpha test or pixel discard. 7050 * 7051 * According to the spec, bit 11 (RCCUNIT) must also be set, 7052 * but we didn't debug actual testcases to find it out. 7053 * 7054 * WaDisableRCCUnitClockGating:snb 7055 * WaDisableRCPBUnitClockGating:snb 7056 */ 7057 I915_WRITE(GEN6_UCGCTL2, 7058 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | 7059 GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 7060 7061 /* WaStripsFansDisableFastClipPerformanceFix:snb */ 7062 I915_WRITE(_3D_CHICKEN3, 7063 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL)); 7064 7065 /* 7066 * Bspec says: 7067 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and 7068 * 3DSTATE_SF number of SF output attributes is more than 16." 7069 */ 7070 I915_WRITE(_3D_CHICKEN3, 7071 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH)); 7072 7073 /* 7074 * According to the spec the following bits should be 7075 * set in order to enable memory self-refresh and fbc: 7076 * The bit21 and bit22 of 0x42000 7077 * The bit21 and bit22 of 0x42004 7078 * The bit5 and bit7 of 0x42020 7079 * The bit14 of 0x70180 7080 * The bit14 of 0x71180 7081 * 7082 * WaFbcAsynchFlipDisableFbcQueue:snb 7083 */ 7084 I915_WRITE(ILK_DISPLAY_CHICKEN1, 7085 I915_READ(ILK_DISPLAY_CHICKEN1) | 7086 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); 7087 I915_WRITE(ILK_DISPLAY_CHICKEN2, 7088 I915_READ(ILK_DISPLAY_CHICKEN2) | 7089 ILK_DPARB_GATE | ILK_VSDPFD_FULL); 7090 I915_WRITE(ILK_DSPCLK_GATE_D, 7091 I915_READ(ILK_DSPCLK_GATE_D) | 7092 ILK_DPARBUNIT_CLOCK_GATE_ENABLE | 7093 ILK_DPFDUNIT_CLOCK_GATE_ENABLE); 7094 7095 g4x_disable_trickle_feed(dev_priv); 7096 7097 cpt_init_clock_gating(dev_priv); 7098 7099 gen6_check_mch_setup(dev_priv); 7100 } 7101 7102 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) 7103 { 7104 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE); 7105 7106 /* 7107 * WaVSThreadDispatchOverride:ivb,vlv 7108 * 7109 * This actually overrides the dispatch 7110 * mode for all thread types. 7111 */ 7112 reg &= ~GEN7_FF_SCHED_MASK; 7113 reg |= GEN7_FF_TS_SCHED_HW; 7114 reg |= GEN7_FF_VS_SCHED_HW; 7115 reg |= GEN7_FF_DS_SCHED_HW; 7116 7117 I915_WRITE(GEN7_FF_THREAD_MODE, reg); 7118 } 7119 7120 static void lpt_init_clock_gating(struct drm_i915_private *dev_priv) 7121 { 7122 /* 7123 * TODO: this bit should only be enabled when really needed, then 7124 * disabled when not needed anymore in order to save power. 7125 */ 7126 if (HAS_PCH_LPT_LP(dev_priv)) 7127 I915_WRITE(SOUTH_DSPCLK_GATE_D, 7128 I915_READ(SOUTH_DSPCLK_GATE_D) | 7129 PCH_LP_PARTITION_LEVEL_DISABLE); 7130 7131 /* WADPOClockGatingDisable:hsw */ 7132 I915_WRITE(TRANS_CHICKEN1(PIPE_A), 7133 I915_READ(TRANS_CHICKEN1(PIPE_A)) | 7134 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 7135 } 7136 7137 static void lpt_suspend_hw(struct drm_i915_private *dev_priv) 7138 { 7139 if (HAS_PCH_LPT_LP(dev_priv)) { 7140 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D); 7141 7142 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 7143 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 7144 } 7145 } 7146 7147 static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, 7148 int general_prio_credits, 7149 int high_prio_credits) 7150 { 7151 u32 misccpctl; 7152 7153 /* WaTempDisableDOPClkGating:bdw */ 7154 misccpctl = I915_READ(GEN7_MISCCPCTL); 7155 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 7156 7157 I915_WRITE(GEN8_L3SQCREG1, 7158 L3_GENERAL_PRIO_CREDITS(general_prio_credits) | 7159 L3_HIGH_PRIO_CREDITS(high_prio_credits)); 7160 7161 /* 7162 * Wait at least 100 clocks before re-enabling clock gating. 7163 * See the definition of L3SQCREG1 in BSpec. 7164 */ 7165 POSTING_READ(GEN8_L3SQCREG1); 7166 udelay(1); 7167 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 7168 } 7169 7170 static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv) 7171 { 7172 gen9_init_clock_gating(dev_priv); 7173 7174 /* WaDisableSDEUnitClockGating:kbl */ 7175 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) 7176 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 7177 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 7178 7179 /* WaDisableGamClockGating:kbl */ 7180 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) 7181 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | 7182 GEN6_GAMUNIT_CLOCK_GATE_DISABLE); 7183 7184 /* WaFbcNukeOnHostModify:kbl */ 7185 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | 7186 ILK_DPFC_NUKE_ON_ANY_MODIFICATION); 7187 } 7188 7189 static void skylake_init_clock_gating(struct drm_i915_private *dev_priv) 7190 { 7191 gen9_init_clock_gating(dev_priv); 7192 7193 /* WAC6entrylatency:skl */ 7194 I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) | 7195 FBC_LLC_FULLY_OPEN); 7196 7197 /* WaFbcNukeOnHostModify:skl */ 7198 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | 7199 ILK_DPFC_NUKE_ON_ANY_MODIFICATION); 7200 } 7201 7202 static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv) 7203 { 7204 enum i915_pipe pipe; 7205 7206 ilk_init_lp_watermarks(dev_priv); 7207 7208 /* WaSwitchSolVfFArbitrationPriority:bdw */ 7209 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 7210 7211 /* WaPsrDPAMaskVBlankInSRD:bdw */ 7212 I915_WRITE(CHICKEN_PAR1_1, 7213 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); 7214 7215 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ 7216 for_each_pipe(dev_priv, pipe) { 7217 I915_WRITE(CHICKEN_PIPESL_1(pipe), 7218 I915_READ(CHICKEN_PIPESL_1(pipe)) | 7219 BDW_DPRS_MASK_VBLANK_SRD); 7220 } 7221 7222 /* WaVSRefCountFullforceMissDisable:bdw */ 7223 /* WaDSRefCountFullforceMissDisable:bdw */ 7224 I915_WRITE(GEN7_FF_THREAD_MODE, 7225 I915_READ(GEN7_FF_THREAD_MODE) & 7226 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); 7227 7228 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, 7229 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); 7230 7231 /* WaDisableSDEUnitClockGating:bdw */ 7232 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 7233 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 7234 7235 /* WaProgramL3SqcReg1Default:bdw */ 7236 gen8_set_l3sqc_credits(dev_priv, 30, 2); 7237 7238 /* 7239 * WaGttCachingOffByDefault:bdw 7240 * GTT cache may not work with big pages, so if those 7241 * are ever enabled GTT cache may need to be disabled. 7242 */ 7243 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL); 7244 7245 /* WaKVMNotificationOnConfigChange:bdw */ 7246 I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1) 7247 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT); 7248 7249 lpt_init_clock_gating(dev_priv); 7250 } 7251 7252 static void haswell_init_clock_gating(struct drm_i915_private *dev_priv) 7253 { 7254 ilk_init_lp_watermarks(dev_priv); 7255 7256 /* L3 caching of data atomics doesn't work -- disable it. */ 7257 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); 7258 I915_WRITE(HSW_ROW_CHICKEN3, 7259 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE)); 7260 7261 /* This is required by WaCatErrorRejectionIssue:hsw */ 7262 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 7263 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 7264 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 7265 7266 /* WaVSRefCountFullforceMissDisable:hsw */ 7267 I915_WRITE(GEN7_FF_THREAD_MODE, 7268 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME); 7269 7270 /* WaDisable_RenderCache_OperationalFlush:hsw */ 7271 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 7272 7273 /* enable HiZ Raw Stall Optimization */ 7274 I915_WRITE(CACHE_MODE_0_GEN7, 7275 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); 7276 7277 /* WaDisable4x2SubspanOptimization:hsw */ 7278 I915_WRITE(CACHE_MODE_1, 7279 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 7280 7281 /* 7282 * BSpec recommends 8x4 when MSAA is used, 7283 * however in practice 16x4 seems fastest. 7284 * 7285 * Note that PS/WM thread counts depend on the WIZ hashing 7286 * disable bit, which we don't touch here, but it's good 7287 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 7288 */ 7289 I915_WRITE(GEN7_GT_MODE, 7290 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); 7291 7292 /* WaSampleCChickenBitEnable:hsw */ 7293 I915_WRITE(HALF_SLICE_CHICKEN3, 7294 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE)); 7295 7296 /* WaSwitchSolVfFArbitrationPriority:hsw */ 7297 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 7298 7299 /* WaRsPkgCStateDisplayPMReq:hsw */ 7300 I915_WRITE(CHICKEN_PAR1_1, 7301 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); 7302 7303 lpt_init_clock_gating(dev_priv); 7304 } 7305 7306 static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv) 7307 { 7308 uint32_t snpcr; 7309 7310 ilk_init_lp_watermarks(dev_priv); 7311 7312 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); 7313 7314 /* WaDisableEarlyCull:ivb */ 7315 I915_WRITE(_3D_CHICKEN3, 7316 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); 7317 7318 /* WaDisableBackToBackFlipFix:ivb */ 7319 I915_WRITE(IVB_CHICKEN3, 7320 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 7321 CHICKEN3_DGMG_DONE_FIX_DISABLE); 7322 7323 /* WaDisablePSDDualDispatchEnable:ivb */ 7324 if (IS_IVB_GT1(dev_priv)) 7325 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 7326 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 7327 7328 /* WaDisable_RenderCache_OperationalFlush:ivb */ 7329 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 7330 7331 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ 7332 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 7333 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 7334 7335 /* WaApplyL3ControlAndL3ChickenMode:ivb */ 7336 I915_WRITE(GEN7_L3CNTLREG1, 7337 GEN7_WA_FOR_GEN7_L3_CONTROL); 7338 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, 7339 GEN7_WA_L3_CHICKEN_MODE); 7340 if (IS_IVB_GT1(dev_priv)) 7341 I915_WRITE(GEN7_ROW_CHICKEN2, 7342 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 7343 else { 7344 /* must write both registers */ 7345 I915_WRITE(GEN7_ROW_CHICKEN2, 7346 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 7347 I915_WRITE(GEN7_ROW_CHICKEN2_GT2, 7348 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 7349 } 7350 7351 /* WaForceL3Serialization:ivb */ 7352 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 7353 ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 7354 7355 /* 7356 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 7357 * This implements the WaDisableRCZUnitClockGating:ivb workaround. 7358 */ 7359 I915_WRITE(GEN6_UCGCTL2, 7360 GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 7361 7362 /* This is required by WaCatErrorRejectionIssue:ivb */ 7363 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 7364 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 7365 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 7366 7367 g4x_disable_trickle_feed(dev_priv); 7368 7369 gen7_setup_fixed_func_scheduler(dev_priv); 7370 7371 if (0) { /* causes HiZ corruption on ivb:gt1 */ 7372 /* enable HiZ Raw Stall Optimization */ 7373 I915_WRITE(CACHE_MODE_0_GEN7, 7374 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); 7375 } 7376 7377 /* WaDisable4x2SubspanOptimization:ivb */ 7378 I915_WRITE(CACHE_MODE_1, 7379 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 7380 7381 /* 7382 * BSpec recommends 8x4 when MSAA is used, 7383 * however in practice 16x4 seems fastest. 7384 * 7385 * Note that PS/WM thread counts depend on the WIZ hashing 7386 * disable bit, which we don't touch here, but it's good 7387 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 7388 */ 7389 I915_WRITE(GEN7_GT_MODE, 7390 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); 7391 7392 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 7393 snpcr &= ~GEN6_MBC_SNPCR_MASK; 7394 snpcr |= GEN6_MBC_SNPCR_MED; 7395 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 7396 7397 if (!HAS_PCH_NOP(dev_priv)) 7398 cpt_init_clock_gating(dev_priv); 7399 7400 gen6_check_mch_setup(dev_priv); 7401 } 7402 7403 static void valleyview_init_clock_gating(struct drm_i915_private *dev_priv) 7404 { 7405 /* WaDisableEarlyCull:vlv */ 7406 I915_WRITE(_3D_CHICKEN3, 7407 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); 7408 7409 /* WaDisableBackToBackFlipFix:vlv */ 7410 I915_WRITE(IVB_CHICKEN3, 7411 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 7412 CHICKEN3_DGMG_DONE_FIX_DISABLE); 7413 7414 /* WaPsdDispatchEnable:vlv */ 7415 /* WaDisablePSDDualDispatchEnable:vlv */ 7416 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 7417 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP | 7418 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 7419 7420 /* WaDisable_RenderCache_OperationalFlush:vlv */ 7421 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 7422 7423 /* WaForceL3Serialization:vlv */ 7424 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 7425 ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 7426 7427 /* WaDisableDopClockGating:vlv */ 7428 I915_WRITE(GEN7_ROW_CHICKEN2, 7429 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 7430 7431 /* This is required by WaCatErrorRejectionIssue:vlv */ 7432 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 7433 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 7434 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 7435 7436 gen7_setup_fixed_func_scheduler(dev_priv); 7437 7438 /* 7439 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 7440 * This implements the WaDisableRCZUnitClockGating:vlv workaround. 7441 */ 7442 I915_WRITE(GEN6_UCGCTL2, 7443 GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 7444 7445 /* WaDisableL3Bank2xClockGate:vlv 7446 * Disabling L3 clock gating- MMIO 940c[25] = 1 7447 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */ 7448 I915_WRITE(GEN7_UCGCTL4, 7449 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE); 7450 7451 /* 7452 * BSpec says this must be set, even though 7453 * WaDisable4x2SubspanOptimization isn't listed for VLV. 7454 */ 7455 I915_WRITE(CACHE_MODE_1, 7456 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 7457 7458 /* 7459 * BSpec recommends 8x4 when MSAA is used, 7460 * however in practice 16x4 seems fastest. 7461 * 7462 * Note that PS/WM thread counts depend on the WIZ hashing 7463 * disable bit, which we don't touch here, but it's good 7464 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 7465 */ 7466 I915_WRITE(GEN7_GT_MODE, 7467 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); 7468 7469 /* 7470 * WaIncreaseL3CreditsForVLVB0:vlv 7471 * This is the hardware default actually. 7472 */ 7473 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE); 7474 7475 /* 7476 * WaDisableVLVClockGating_VBIIssue:vlv 7477 * Disable clock gating on th GCFG unit to prevent a delay 7478 * in the reporting of vblank events. 7479 */ 7480 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); 7481 } 7482 7483 static void cherryview_init_clock_gating(struct drm_i915_private *dev_priv) 7484 { 7485 /* WaVSRefCountFullforceMissDisable:chv */ 7486 /* WaDSRefCountFullforceMissDisable:chv */ 7487 I915_WRITE(GEN7_FF_THREAD_MODE, 7488 I915_READ(GEN7_FF_THREAD_MODE) & 7489 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); 7490 7491 /* WaDisableSemaphoreAndSyncFlipWait:chv */ 7492 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, 7493 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); 7494 7495 /* WaDisableCSUnitClockGating:chv */ 7496 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | 7497 GEN6_CSUNIT_CLOCK_GATE_DISABLE); 7498 7499 /* WaDisableSDEUnitClockGating:chv */ 7500 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 7501 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 7502 7503 /* 7504 * WaProgramL3SqcReg1Default:chv 7505 * See gfxspecs/Related Documents/Performance Guide/ 7506 * LSQC Setting Recommendations. 7507 */ 7508 gen8_set_l3sqc_credits(dev_priv, 38, 2); 7509 7510 /* 7511 * GTT cache may not work with big pages, so if those 7512 * are ever enabled GTT cache may need to be disabled. 7513 */ 7514 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL); 7515 } 7516 7517 static void g4x_init_clock_gating(struct drm_i915_private *dev_priv) 7518 { 7519 uint32_t dspclk_gate; 7520 7521 I915_WRITE(RENCLK_GATE_D1, 0); 7522 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | 7523 GS_UNIT_CLOCK_GATE_DISABLE | 7524 CL_UNIT_CLOCK_GATE_DISABLE); 7525 I915_WRITE(RAMCLK_GATE_D, 0); 7526 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | 7527 OVRUNIT_CLOCK_GATE_DISABLE | 7528 OVCUNIT_CLOCK_GATE_DISABLE; 7529 if (IS_GM45(dev_priv)) 7530 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; 7531 I915_WRITE(DSPCLK_GATE_D, dspclk_gate); 7532 7533 /* WaDisableRenderCachePipelinedFlush */ 7534 I915_WRITE(CACHE_MODE_0, 7535 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); 7536 7537 /* WaDisable_RenderCache_OperationalFlush:g4x */ 7538 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 7539 7540 g4x_disable_trickle_feed(dev_priv); 7541 } 7542 7543 static void crestline_init_clock_gating(struct drm_i915_private *dev_priv) 7544 { 7545 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); 7546 I915_WRITE(RENCLK_GATE_D2, 0); 7547 I915_WRITE(DSPCLK_GATE_D, 0); 7548 I915_WRITE(RAMCLK_GATE_D, 0); 7549 I915_WRITE16(DEUC, 0); 7550 I915_WRITE(MI_ARB_STATE, 7551 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 7552 7553 /* WaDisable_RenderCache_OperationalFlush:gen4 */ 7554 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 7555 } 7556 7557 static void broadwater_init_clock_gating(struct drm_i915_private *dev_priv) 7558 { 7559 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | 7560 I965_RCC_CLOCK_GATE_DISABLE | 7561 I965_RCPB_CLOCK_GATE_DISABLE | 7562 I965_ISC_CLOCK_GATE_DISABLE | 7563 I965_FBC_CLOCK_GATE_DISABLE); 7564 I915_WRITE(RENCLK_GATE_D2, 0); 7565 I915_WRITE(MI_ARB_STATE, 7566 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 7567 7568 /* WaDisable_RenderCache_OperationalFlush:gen4 */ 7569 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 7570 } 7571 7572 static void gen3_init_clock_gating(struct drm_i915_private *dev_priv) 7573 { 7574 u32 dstate = I915_READ(D_STATE); 7575 7576 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 7577 DSTATE_DOT_CLOCK_GATING; 7578 I915_WRITE(D_STATE, dstate); 7579 7580 if (IS_PINEVIEW(dev_priv)) 7581 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); 7582 7583 /* IIR "flip pending" means done if this bit is set */ 7584 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); 7585 7586 /* interrupts should cause a wake up from C3 */ 7587 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN)); 7588 7589 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 7590 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); 7591 7592 I915_WRITE(MI_ARB_STATE, 7593 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 7594 } 7595 7596 static void i85x_init_clock_gating(struct drm_i915_private *dev_priv) 7597 { 7598 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 7599 7600 /* interrupts should cause a wake up from C3 */ 7601 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) | 7602 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE)); 7603 7604 I915_WRITE(MEM_MODE, 7605 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE)); 7606 } 7607 7608 static void i830_init_clock_gating(struct drm_i915_private *dev_priv) 7609 { 7610 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 7611 7612 I915_WRITE(MEM_MODE, 7613 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) | 7614 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE)); 7615 } 7616 7617 void intel_init_clock_gating(struct drm_i915_private *dev_priv) 7618 { 7619 dev_priv->display.init_clock_gating(dev_priv); 7620 } 7621 7622 void intel_suspend_hw(struct drm_i915_private *dev_priv) 7623 { 7624 if (HAS_PCH_LPT(dev_priv)) 7625 lpt_suspend_hw(dev_priv); 7626 } 7627 7628 static void nop_init_clock_gating(struct drm_i915_private *dev_priv) 7629 { 7630 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n"); 7631 } 7632 7633 /** 7634 * intel_init_clock_gating_hooks - setup the clock gating hooks 7635 * @dev_priv: device private 7636 * 7637 * Setup the hooks that configure which clocks of a given platform can be 7638 * gated and also apply various GT and display specific workarounds for these 7639 * platforms. Note that some GT specific workarounds are applied separately 7640 * when GPU contexts or batchbuffers start their execution. 7641 */ 7642 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) 7643 { 7644 if (IS_SKYLAKE(dev_priv)) 7645 dev_priv->display.init_clock_gating = skylake_init_clock_gating; 7646 else if (IS_KABYLAKE(dev_priv)) 7647 dev_priv->display.init_clock_gating = kabylake_init_clock_gating; 7648 else if (IS_BROXTON(dev_priv)) 7649 dev_priv->display.init_clock_gating = bxt_init_clock_gating; 7650 else if (IS_BROADWELL(dev_priv)) 7651 dev_priv->display.init_clock_gating = broadwell_init_clock_gating; 7652 else if (IS_CHERRYVIEW(dev_priv)) 7653 dev_priv->display.init_clock_gating = cherryview_init_clock_gating; 7654 else if (IS_HASWELL(dev_priv)) 7655 dev_priv->display.init_clock_gating = haswell_init_clock_gating; 7656 else if (IS_IVYBRIDGE(dev_priv)) 7657 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; 7658 else if (IS_VALLEYVIEW(dev_priv)) 7659 dev_priv->display.init_clock_gating = valleyview_init_clock_gating; 7660 else if (IS_GEN6(dev_priv)) 7661 dev_priv->display.init_clock_gating = gen6_init_clock_gating; 7662 else if (IS_GEN5(dev_priv)) 7663 dev_priv->display.init_clock_gating = ironlake_init_clock_gating; 7664 else if (IS_G4X(dev_priv)) 7665 dev_priv->display.init_clock_gating = g4x_init_clock_gating; 7666 else if (IS_CRESTLINE(dev_priv)) 7667 dev_priv->display.init_clock_gating = crestline_init_clock_gating; 7668 else if (IS_BROADWATER(dev_priv)) 7669 dev_priv->display.init_clock_gating = broadwater_init_clock_gating; 7670 else if (IS_GEN3(dev_priv)) 7671 dev_priv->display.init_clock_gating = gen3_init_clock_gating; 7672 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv)) 7673 dev_priv->display.init_clock_gating = i85x_init_clock_gating; 7674 else if (IS_GEN2(dev_priv)) 7675 dev_priv->display.init_clock_gating = i830_init_clock_gating; 7676 else { 7677 MISSING_CASE(INTEL_DEVID(dev_priv)); 7678 dev_priv->display.init_clock_gating = nop_init_clock_gating; 7679 } 7680 } 7681 7682 /* Set up chip specific power management-related functions */ 7683 void intel_init_pm(struct drm_i915_private *dev_priv) 7684 { 7685 intel_fbc_init(dev_priv); 7686 7687 /* For cxsr */ 7688 if (IS_PINEVIEW(dev_priv)) 7689 i915_pineview_get_mem_freq(dev_priv); 7690 else if (IS_GEN5(dev_priv)) 7691 i915_ironlake_get_mem_freq(dev_priv); 7692 7693 /* For FIFO watermark updates */ 7694 if (INTEL_GEN(dev_priv) >= 9) { 7695 skl_setup_wm_latency(dev_priv); 7696 dev_priv->display.initial_watermarks = skl_initial_wm; 7697 dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm; 7698 dev_priv->display.compute_global_watermarks = skl_compute_wm; 7699 } else if (HAS_PCH_SPLIT(dev_priv)) { 7700 ilk_setup_wm_latency(dev_priv); 7701 7702 if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] && 7703 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || 7704 (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] && 7705 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { 7706 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm; 7707 dev_priv->display.compute_intermediate_wm = 7708 ilk_compute_intermediate_wm; 7709 dev_priv->display.initial_watermarks = 7710 ilk_initial_watermarks; 7711 dev_priv->display.optimize_watermarks = 7712 ilk_optimize_watermarks; 7713 } else { 7714 DRM_DEBUG_KMS("Failed to read display plane latency. " 7715 "Disable CxSR\n"); 7716 } 7717 } else if (IS_CHERRYVIEW(dev_priv)) { 7718 vlv_setup_wm_latency(dev_priv); 7719 dev_priv->display.update_wm = vlv_update_wm; 7720 } else if (IS_VALLEYVIEW(dev_priv)) { 7721 vlv_setup_wm_latency(dev_priv); 7722 dev_priv->display.update_wm = vlv_update_wm; 7723 } else if (IS_PINEVIEW(dev_priv)) { 7724 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv), 7725 dev_priv->is_ddr3, 7726 dev_priv->fsb_freq, 7727 dev_priv->mem_freq)) { 7728 DRM_INFO("failed to find known CxSR latency " 7729 "(found ddr%s fsb freq %d, mem freq %d), " 7730 "disabling CxSR\n", 7731 (dev_priv->is_ddr3 == 1) ? "3" : "2", 7732 dev_priv->fsb_freq, dev_priv->mem_freq); 7733 /* Disable CxSR and never update its watermark again */ 7734 intel_set_memory_cxsr(dev_priv, false); 7735 dev_priv->display.update_wm = NULL; 7736 } else 7737 dev_priv->display.update_wm = pineview_update_wm; 7738 } else if (IS_G4X(dev_priv)) { 7739 dev_priv->display.update_wm = g4x_update_wm; 7740 } else if (IS_GEN4(dev_priv)) { 7741 dev_priv->display.update_wm = i965_update_wm; 7742 } else if (IS_GEN3(dev_priv)) { 7743 dev_priv->display.update_wm = i9xx_update_wm; 7744 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 7745 } else if (IS_GEN2(dev_priv)) { 7746 if (INTEL_INFO(dev_priv)->num_pipes == 1) { 7747 dev_priv->display.update_wm = i845_update_wm; 7748 dev_priv->display.get_fifo_size = i845_get_fifo_size; 7749 } else { 7750 dev_priv->display.update_wm = i9xx_update_wm; 7751 dev_priv->display.get_fifo_size = i830_get_fifo_size; 7752 } 7753 } else { 7754 DRM_ERROR("unexpected fall-through in intel_init_pm\n"); 7755 } 7756 } 7757 7758 static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv) 7759 { 7760 uint32_t flags = 7761 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK; 7762 7763 switch (flags) { 7764 case GEN6_PCODE_SUCCESS: 7765 return 0; 7766 case GEN6_PCODE_UNIMPLEMENTED_CMD: 7767 case GEN6_PCODE_ILLEGAL_CMD: 7768 return -ENXIO; 7769 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: 7770 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: 7771 return -EOVERFLOW; 7772 case GEN6_PCODE_TIMEOUT: 7773 return -ETIMEDOUT; 7774 default: 7775 MISSING_CASE(flags) 7776 return 0; 7777 } 7778 } 7779 7780 static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv) 7781 { 7782 uint32_t flags = 7783 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK; 7784 7785 switch (flags) { 7786 case GEN6_PCODE_SUCCESS: 7787 return 0; 7788 case GEN6_PCODE_ILLEGAL_CMD: 7789 return -ENXIO; 7790 case GEN7_PCODE_TIMEOUT: 7791 return -ETIMEDOUT; 7792 case GEN7_PCODE_ILLEGAL_DATA: 7793 return -EINVAL; 7794 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: 7795 return -EOVERFLOW; 7796 default: 7797 MISSING_CASE(flags); 7798 return 0; 7799 } 7800 } 7801 7802 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val) 7803 { 7804 int status; 7805 7806 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7807 7808 /* GEN6_PCODE_* are outside of the forcewake domain, we can 7809 * use te fw I915_READ variants to reduce the amount of work 7810 * required when reading/writing. 7811 */ 7812 7813 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { 7814 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n"); 7815 return -EAGAIN; 7816 } 7817 7818 I915_WRITE_FW(GEN6_PCODE_DATA, *val); 7819 I915_WRITE_FW(GEN6_PCODE_DATA1, 0); 7820 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 7821 7822 if (intel_wait_for_register_fw(dev_priv, 7823 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0, 7824 500)) { 7825 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox); 7826 return -ETIMEDOUT; 7827 } 7828 7829 *val = I915_READ_FW(GEN6_PCODE_DATA); 7830 I915_WRITE_FW(GEN6_PCODE_DATA, 0); 7831 7832 if (INTEL_GEN(dev_priv) > 6) 7833 status = gen7_check_mailbox_status(dev_priv); 7834 else 7835 status = gen6_check_mailbox_status(dev_priv); 7836 7837 if (status) { 7838 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n", 7839 status); 7840 return status; 7841 } 7842 7843 return 0; 7844 } 7845 7846 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, 7847 u32 mbox, u32 val) 7848 { 7849 int status; 7850 7851 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7852 7853 /* GEN6_PCODE_* are outside of the forcewake domain, we can 7854 * use te fw I915_READ variants to reduce the amount of work 7855 * required when reading/writing. 7856 */ 7857 7858 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { 7859 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n"); 7860 return -EAGAIN; 7861 } 7862 7863 I915_WRITE_FW(GEN6_PCODE_DATA, val); 7864 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 7865 7866 if (intel_wait_for_register_fw(dev_priv, 7867 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0, 7868 500)) { 7869 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox); 7870 return -ETIMEDOUT; 7871 } 7872 7873 I915_WRITE_FW(GEN6_PCODE_DATA, 0); 7874 7875 if (INTEL_GEN(dev_priv) > 6) 7876 status = gen7_check_mailbox_status(dev_priv); 7877 else 7878 status = gen6_check_mailbox_status(dev_priv); 7879 7880 if (status) { 7881 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n", 7882 status); 7883 return status; 7884 } 7885 7886 return 0; 7887 } 7888 7889 static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox, 7890 u32 request, u32 reply_mask, u32 reply, 7891 u32 *status) 7892 { 7893 u32 val = request; 7894 7895 *status = sandybridge_pcode_read(dev_priv, mbox, &val); 7896 7897 return *status || ((val & reply_mask) == reply); 7898 } 7899 7900 /** 7901 * skl_pcode_request - send PCODE request until acknowledgment 7902 * @dev_priv: device private 7903 * @mbox: PCODE mailbox ID the request is targeted for 7904 * @request: request ID 7905 * @reply_mask: mask used to check for request acknowledgment 7906 * @reply: value used to check for request acknowledgment 7907 * @timeout_base_ms: timeout for polling with preemption enabled 7908 * 7909 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE 7910 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires. 7911 * The request is acknowledged once the PCODE reply dword equals @reply after 7912 * applying @reply_mask. Polling is first attempted with preemption enabled 7913 * for @timeout_base_ms and if this times out for another 50 ms with 7914 * preemption disabled. 7915 * 7916 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some 7917 * other error as reported by PCODE. 7918 */ 7919 int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, 7920 u32 reply_mask, u32 reply, int timeout_base_ms) 7921 { 7922 u32 status; 7923 int ret; 7924 7925 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7926 7927 #define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \ 7928 &status) 7929 7930 /* 7931 * Prime the PCODE by doing a request first. Normally it guarantees 7932 * that a subsequent request, at most @timeout_base_ms later, succeeds. 7933 * _wait_for() doesn't guarantee when its passed condition is evaluated 7934 * first, so send the first request explicitly. 7935 */ 7936 if (COND) { 7937 ret = 0; 7938 goto out; 7939 } 7940 ret = _wait_for(COND, timeout_base_ms * 1000, 10); 7941 if (!ret) 7942 goto out; 7943 7944 /* 7945 * The above can time out if the number of requests was low (2 in the 7946 * worst case) _and_ PCODE was busy for some reason even after a 7947 * (queued) request and @timeout_base_ms delay. As a workaround retry 7948 * the poll with preemption disabled to maximize the number of 7949 * requests. Increase the timeout from @timeout_base_ms to 50ms to 7950 * account for interrupts that could reduce the number of these 7951 * requests, and for any quirks of the PCODE firmware that delays 7952 * the request completion. 7953 */ 7954 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n"); 7955 WARN_ON_ONCE(timeout_base_ms > 3); 7956 preempt_disable(); 7957 ret = wait_for_atomic(COND, 50); 7958 preempt_enable(); 7959 7960 out: 7961 return ret ? ret : status; 7962 #undef COND 7963 } 7964 7965 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) 7966 { 7967 /* 7968 * N = val - 0xb7 7969 * Slow = Fast = GPLL ref * N 7970 */ 7971 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000); 7972 } 7973 7974 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val) 7975 { 7976 return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7; 7977 } 7978 7979 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) 7980 { 7981 /* 7982 * N = val / 2 7983 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2 7984 */ 7985 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000); 7986 } 7987 7988 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) 7989 { 7990 /* CHV needs even values */ 7991 return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2; 7992 } 7993 7994 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) 7995 { 7996 if (IS_GEN9(dev_priv)) 7997 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, 7998 GEN9_FREQ_SCALER); 7999 else if (IS_CHERRYVIEW(dev_priv)) 8000 return chv_gpu_freq(dev_priv, val); 8001 else if (IS_VALLEYVIEW(dev_priv)) 8002 return byt_gpu_freq(dev_priv, val); 8003 else 8004 return val * GT_FREQUENCY_MULTIPLIER; 8005 } 8006 8007 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) 8008 { 8009 if (IS_GEN9(dev_priv)) 8010 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, 8011 GT_FREQUENCY_MULTIPLIER); 8012 else if (IS_CHERRYVIEW(dev_priv)) 8013 return chv_freq_opcode(dev_priv, val); 8014 else if (IS_VALLEYVIEW(dev_priv)) 8015 return byt_freq_opcode(dev_priv, val); 8016 else 8017 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); 8018 } 8019 8020 struct request_boost { 8021 struct work_struct work; 8022 struct drm_i915_gem_request *req; 8023 }; 8024 8025 static void __intel_rps_boost_work(struct work_struct *work) 8026 { 8027 struct request_boost *boost = container_of(work, struct request_boost, work); 8028 struct drm_i915_gem_request *req = boost->req; 8029 8030 if (!i915_gem_request_completed(req)) 8031 gen6_rps_boost(req->i915, NULL, req->emitted_jiffies); 8032 8033 i915_gem_request_put(req); 8034 kfree(boost); 8035 } 8036 8037 void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req) 8038 { 8039 struct request_boost *boost; 8040 8041 if (req == NULL || INTEL_GEN(req->i915) < 6) 8042 return; 8043 8044 if (i915_gem_request_completed(req)) 8045 return; 8046 8047 boost = kmalloc(sizeof(*boost), M_DRM, GFP_ATOMIC); 8048 if (boost == NULL) 8049 return; 8050 8051 boost->req = i915_gem_request_get(req); 8052 8053 INIT_WORK(&boost->work, __intel_rps_boost_work); 8054 queue_work(req->i915->wq, &boost->work); 8055 } 8056 8057 void intel_pm_setup(struct drm_device *dev) 8058 { 8059 struct drm_i915_private *dev_priv = to_i915(dev); 8060 8061 lockinit(&dev_priv->rps.hw_lock, "i915 rps.hw_lock", 0, LK_CANRECURSE); 8062 lockinit(&dev_priv->rps.client_lock, "i915rcl", 0, 0); 8063 8064 INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work, 8065 __intel_autoenable_gt_powersave); 8066 INIT_LIST_HEAD(&dev_priv->rps.clients); 8067 8068 dev_priv->pm.suspended = false; 8069 atomic_set(&dev_priv->pm.wakeref_count, 0); 8070 } 8071