1 /* 2 * Copyright © 2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eugeni Dodonov <eugeni.dodonov@intel.com> 25 * 26 */ 27 28 #include <linux/cpufreq.h> 29 #include <drm/drm_plane_helper.h> 30 #include "i915_drv.h" 31 #include "intel_drv.h" 32 #include <linux/module.h> 33 34 /** 35 * DOC: RC6 36 * 37 * RC6 is a special power stage which allows the GPU to enter an very 38 * low-voltage mode when idle, using down to 0V while at this stage. This 39 * stage is entered automatically when the GPU is idle when RC6 support is 40 * enabled, and as soon as new workload arises GPU wakes up automatically as well. 41 * 42 * There are different RC6 modes available in Intel GPU, which differentiate 43 * among each other with the latency required to enter and leave RC6 and 44 * voltage consumed by the GPU in different states. 45 * 46 * The combination of the following flags define which states GPU is allowed 47 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and 48 * RC6pp is deepest RC6. Their support by hardware varies according to the 49 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one 50 * which brings the most power savings; deeper states save more power, but 51 * require higher latency to switch to and wake up. 52 */ 53 #define INTEL_RC6_ENABLE (1<<0) 54 #define INTEL_RC6p_ENABLE (1<<1) 55 #define INTEL_RC6pp_ENABLE (1<<2) 56 57 static void gen9_init_clock_gating(struct drm_device *dev) 58 { 59 struct drm_i915_private *dev_priv = to_i915(dev); 60 61 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */ 62 I915_WRITE(CHICKEN_PAR1_1, 63 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); 64 65 I915_WRITE(GEN8_CONFIG0, 66 I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES); 67 68 /* WaEnableChickenDCPR:skl,bxt,kbl */ 69 I915_WRITE(GEN8_CHICKEN_DCPR_1, 70 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); 71 72 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */ 73 /* WaFbcWakeMemOn:skl,bxt,kbl */ 74 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 75 DISP_FBC_WM_DIS | 76 DISP_FBC_MEMORY_WAKE); 77 78 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */ 79 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | 80 ILK_DPFC_DISABLE_DUMMY0); 81 } 82 83 static void bxt_init_clock_gating(struct drm_device *dev) 84 { 85 struct drm_i915_private *dev_priv = to_i915(dev); 86 87 gen9_init_clock_gating(dev); 88 89 /* WaDisableSDEUnitClockGating:bxt */ 90 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 91 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 92 93 /* 94 * FIXME: 95 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only. 96 */ 97 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 98 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); 99 100 /* 101 * Wa: Backlight PWM may stop in the asserted state, causing backlight 102 * to stay fully on. 103 */ 104 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) 105 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | 106 PWM1_GATING_DIS | PWM2_GATING_DIS); 107 } 108 109 static void i915_pineview_get_mem_freq(struct drm_device *dev) 110 { 111 struct drm_i915_private *dev_priv = to_i915(dev); 112 u32 tmp; 113 114 tmp = I915_READ(CLKCFG); 115 116 switch (tmp & CLKCFG_FSB_MASK) { 117 case CLKCFG_FSB_533: 118 dev_priv->fsb_freq = 533; /* 133*4 */ 119 break; 120 case CLKCFG_FSB_800: 121 dev_priv->fsb_freq = 800; /* 200*4 */ 122 break; 123 case CLKCFG_FSB_667: 124 dev_priv->fsb_freq = 667; /* 167*4 */ 125 break; 126 case CLKCFG_FSB_400: 127 dev_priv->fsb_freq = 400; /* 100*4 */ 128 break; 129 } 130 131 switch (tmp & CLKCFG_MEM_MASK) { 132 case CLKCFG_MEM_533: 133 dev_priv->mem_freq = 533; 134 break; 135 case CLKCFG_MEM_667: 136 dev_priv->mem_freq = 667; 137 break; 138 case CLKCFG_MEM_800: 139 dev_priv->mem_freq = 800; 140 break; 141 } 142 143 /* detect pineview DDR3 setting */ 144 tmp = I915_READ(CSHRDDR3CTL); 145 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; 146 } 147 148 static void i915_ironlake_get_mem_freq(struct drm_device *dev) 149 { 150 struct drm_i915_private *dev_priv = to_i915(dev); 151 u16 ddrpll, csipll; 152 153 ddrpll = I915_READ16(DDRMPLL1); 154 csipll = I915_READ16(CSIPLL0); 155 156 switch (ddrpll & 0xff) { 157 case 0xc: 158 dev_priv->mem_freq = 800; 159 break; 160 case 0x10: 161 dev_priv->mem_freq = 1066; 162 break; 163 case 0x14: 164 dev_priv->mem_freq = 1333; 165 break; 166 case 0x18: 167 dev_priv->mem_freq = 1600; 168 break; 169 default: 170 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", 171 ddrpll & 0xff); 172 dev_priv->mem_freq = 0; 173 break; 174 } 175 176 dev_priv->ips.r_t = dev_priv->mem_freq; 177 178 switch (csipll & 0x3ff) { 179 case 0x00c: 180 dev_priv->fsb_freq = 3200; 181 break; 182 case 0x00e: 183 dev_priv->fsb_freq = 3733; 184 break; 185 case 0x010: 186 dev_priv->fsb_freq = 4266; 187 break; 188 case 0x012: 189 dev_priv->fsb_freq = 4800; 190 break; 191 case 0x014: 192 dev_priv->fsb_freq = 5333; 193 break; 194 case 0x016: 195 dev_priv->fsb_freq = 5866; 196 break; 197 case 0x018: 198 dev_priv->fsb_freq = 6400; 199 break; 200 default: 201 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", 202 csipll & 0x3ff); 203 dev_priv->fsb_freq = 0; 204 break; 205 } 206 207 if (dev_priv->fsb_freq == 3200) { 208 dev_priv->ips.c_m = 0; 209 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { 210 dev_priv->ips.c_m = 1; 211 } else { 212 dev_priv->ips.c_m = 2; 213 } 214 } 215 216 static const struct cxsr_latency cxsr_latency_table[] = { 217 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ 218 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ 219 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ 220 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ 221 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ 222 223 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ 224 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ 225 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ 226 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ 227 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ 228 229 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ 230 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ 231 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ 232 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ 233 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ 234 235 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ 236 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ 237 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ 238 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ 239 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ 240 241 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ 242 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ 243 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ 244 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ 245 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ 246 247 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ 248 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ 249 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ 250 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ 251 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ 252 }; 253 254 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, 255 int is_ddr3, 256 int fsb, 257 int mem) 258 { 259 const struct cxsr_latency *latency; 260 int i; 261 262 if (fsb == 0 || mem == 0) 263 return NULL; 264 265 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { 266 latency = &cxsr_latency_table[i]; 267 if (is_desktop == latency->is_desktop && 268 is_ddr3 == latency->is_ddr3 && 269 fsb == latency->fsb_freq && mem == latency->mem_freq) 270 return latency; 271 } 272 273 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 274 275 return NULL; 276 } 277 278 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) 279 { 280 u32 val; 281 282 mutex_lock(&dev_priv->rps.hw_lock); 283 284 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 285 if (enable) 286 val &= ~FORCE_DDR_HIGH_FREQ; 287 else 288 val |= FORCE_DDR_HIGH_FREQ; 289 val &= ~FORCE_DDR_LOW_FREQ; 290 val |= FORCE_DDR_FREQ_REQ_ACK; 291 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); 292 293 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & 294 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) 295 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n"); 296 297 mutex_unlock(&dev_priv->rps.hw_lock); 298 } 299 300 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) 301 { 302 u32 val; 303 304 mutex_lock(&dev_priv->rps.hw_lock); 305 306 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 307 if (enable) 308 val |= DSP_MAXFIFO_PM5_ENABLE; 309 else 310 val &= ~DSP_MAXFIFO_PM5_ENABLE; 311 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 312 313 mutex_unlock(&dev_priv->rps.hw_lock); 314 } 315 316 #define FW_WM(value, plane) \ 317 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) 318 319 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) 320 { 321 struct drm_device *dev = &dev_priv->drm; 322 u32 val; 323 324 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 325 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); 326 POSTING_READ(FW_BLC_SELF_VLV); 327 dev_priv->wm.vlv.cxsr = enable; 328 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) { 329 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); 330 POSTING_READ(FW_BLC_SELF); 331 } else if (IS_PINEVIEW(dev)) { 332 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN; 333 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0; 334 I915_WRITE(DSPFW3, val); 335 POSTING_READ(DSPFW3); 336 } else if (IS_I945G(dev) || IS_I945GM(dev)) { 337 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : 338 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); 339 I915_WRITE(FW_BLC_SELF, val); 340 POSTING_READ(FW_BLC_SELF); 341 } else if (IS_I915GM(dev)) { 342 /* 343 * FIXME can't find a bit like this for 915G, and 344 * and yet it does have the related watermark in 345 * FW_BLC_SELF. What's going on? 346 */ 347 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : 348 _MASKED_BIT_DISABLE(INSTPM_SELF_EN); 349 I915_WRITE(INSTPM, val); 350 POSTING_READ(INSTPM); 351 } else { 352 return; 353 } 354 355 DRM_DEBUG_KMS("memory self-refresh is %s\n", 356 enable ? "enabled" : "disabled"); 357 } 358 359 360 /* 361 * Latency for FIFO fetches is dependent on several factors: 362 * - memory configuration (speed, channels) 363 * - chipset 364 * - current MCH state 365 * It can be fairly high in some situations, so here we assume a fairly 366 * pessimal value. It's a tradeoff between extra memory fetches (if we 367 * set this value too high, the FIFO will fetch frequently to stay full) 368 * and power consumption (set it too low to save power and we might see 369 * FIFO underruns and display "flicker"). 370 * 371 * A value of 5us seems to be a good balance; safe for very low end 372 * platforms but not overly aggressive on lower latency configs. 373 */ 374 static const int pessimal_latency_ns = 5000; 375 376 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \ 377 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) 378 379 static int vlv_get_fifo_size(struct drm_device *dev, 380 enum i915_pipe pipe, int plane) 381 { 382 struct drm_i915_private *dev_priv = to_i915(dev); 383 int sprite0_start, sprite1_start, size; 384 385 switch (pipe) { 386 uint32_t dsparb, dsparb2, dsparb3; 387 case PIPE_A: 388 dsparb = I915_READ(DSPARB); 389 dsparb2 = I915_READ(DSPARB2); 390 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0); 391 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4); 392 break; 393 case PIPE_B: 394 dsparb = I915_READ(DSPARB); 395 dsparb2 = I915_READ(DSPARB2); 396 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8); 397 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12); 398 break; 399 case PIPE_C: 400 dsparb2 = I915_READ(DSPARB2); 401 dsparb3 = I915_READ(DSPARB3); 402 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16); 403 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20); 404 break; 405 default: 406 return 0; 407 } 408 409 switch (plane) { 410 case 0: 411 size = sprite0_start; 412 break; 413 case 1: 414 size = sprite1_start - sprite0_start; 415 break; 416 case 2: 417 size = 512 - 1 - sprite1_start; 418 break; 419 default: 420 return 0; 421 } 422 423 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n", 424 pipe_name(pipe), plane == 0 ? "primary" : "sprite", 425 plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1), 426 size); 427 428 return size; 429 } 430 431 static int i9xx_get_fifo_size(struct drm_device *dev, int plane) 432 { 433 struct drm_i915_private *dev_priv = to_i915(dev); 434 uint32_t dsparb = I915_READ(DSPARB); 435 int size; 436 437 size = dsparb & 0x7f; 438 if (plane) 439 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; 440 441 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 442 plane ? "B" : "A", size); 443 444 return size; 445 } 446 447 static int i830_get_fifo_size(struct drm_device *dev, int plane) 448 { 449 struct drm_i915_private *dev_priv = to_i915(dev); 450 uint32_t dsparb = I915_READ(DSPARB); 451 int size; 452 453 size = dsparb & 0x1ff; 454 if (plane) 455 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; 456 size >>= 1; /* Convert to cachelines */ 457 458 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 459 plane ? "B" : "A", size); 460 461 return size; 462 } 463 464 static int i845_get_fifo_size(struct drm_device *dev, int plane) 465 { 466 struct drm_i915_private *dev_priv = to_i915(dev); 467 uint32_t dsparb = I915_READ(DSPARB); 468 int size; 469 470 size = dsparb & 0x7f; 471 size >>= 2; /* Convert to cachelines */ 472 473 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 474 plane ? "B" : "A", 475 size); 476 477 return size; 478 } 479 480 /* Pineview has different values for various configs */ 481 static const struct intel_watermark_params pineview_display_wm = { 482 .fifo_size = PINEVIEW_DISPLAY_FIFO, 483 .max_wm = PINEVIEW_MAX_WM, 484 .default_wm = PINEVIEW_DFT_WM, 485 .guard_size = PINEVIEW_GUARD_WM, 486 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 487 }; 488 static const struct intel_watermark_params pineview_display_hplloff_wm = { 489 .fifo_size = PINEVIEW_DISPLAY_FIFO, 490 .max_wm = PINEVIEW_MAX_WM, 491 .default_wm = PINEVIEW_DFT_HPLLOFF_WM, 492 .guard_size = PINEVIEW_GUARD_WM, 493 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 494 }; 495 static const struct intel_watermark_params pineview_cursor_wm = { 496 .fifo_size = PINEVIEW_CURSOR_FIFO, 497 .max_wm = PINEVIEW_CURSOR_MAX_WM, 498 .default_wm = PINEVIEW_CURSOR_DFT_WM, 499 .guard_size = PINEVIEW_CURSOR_GUARD_WM, 500 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 501 }; 502 static const struct intel_watermark_params pineview_cursor_hplloff_wm = { 503 .fifo_size = PINEVIEW_CURSOR_FIFO, 504 .max_wm = PINEVIEW_CURSOR_MAX_WM, 505 .default_wm = PINEVIEW_CURSOR_DFT_WM, 506 .guard_size = PINEVIEW_CURSOR_GUARD_WM, 507 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 508 }; 509 static const struct intel_watermark_params g4x_wm_info = { 510 .fifo_size = G4X_FIFO_SIZE, 511 .max_wm = G4X_MAX_WM, 512 .default_wm = G4X_MAX_WM, 513 .guard_size = 2, 514 .cacheline_size = G4X_FIFO_LINE_SIZE, 515 }; 516 static const struct intel_watermark_params g4x_cursor_wm_info = { 517 .fifo_size = I965_CURSOR_FIFO, 518 .max_wm = I965_CURSOR_MAX_WM, 519 .default_wm = I965_CURSOR_DFT_WM, 520 .guard_size = 2, 521 .cacheline_size = G4X_FIFO_LINE_SIZE, 522 }; 523 static const struct intel_watermark_params i965_cursor_wm_info = { 524 .fifo_size = I965_CURSOR_FIFO, 525 .max_wm = I965_CURSOR_MAX_WM, 526 .default_wm = I965_CURSOR_DFT_WM, 527 .guard_size = 2, 528 .cacheline_size = I915_FIFO_LINE_SIZE, 529 }; 530 static const struct intel_watermark_params i945_wm_info = { 531 .fifo_size = I945_FIFO_SIZE, 532 .max_wm = I915_MAX_WM, 533 .default_wm = 1, 534 .guard_size = 2, 535 .cacheline_size = I915_FIFO_LINE_SIZE, 536 }; 537 static const struct intel_watermark_params i915_wm_info = { 538 .fifo_size = I915_FIFO_SIZE, 539 .max_wm = I915_MAX_WM, 540 .default_wm = 1, 541 .guard_size = 2, 542 .cacheline_size = I915_FIFO_LINE_SIZE, 543 }; 544 static const struct intel_watermark_params i830_a_wm_info = { 545 .fifo_size = I855GM_FIFO_SIZE, 546 .max_wm = I915_MAX_WM, 547 .default_wm = 1, 548 .guard_size = 2, 549 .cacheline_size = I830_FIFO_LINE_SIZE, 550 }; 551 static const struct intel_watermark_params i830_bc_wm_info = { 552 .fifo_size = I855GM_FIFO_SIZE, 553 .max_wm = I915_MAX_WM/2, 554 .default_wm = 1, 555 .guard_size = 2, 556 .cacheline_size = I830_FIFO_LINE_SIZE, 557 }; 558 static const struct intel_watermark_params i845_wm_info = { 559 .fifo_size = I830_FIFO_SIZE, 560 .max_wm = I915_MAX_WM, 561 .default_wm = 1, 562 .guard_size = 2, 563 .cacheline_size = I830_FIFO_LINE_SIZE, 564 }; 565 566 /** 567 * intel_calculate_wm - calculate watermark level 568 * @clock_in_khz: pixel clock 569 * @wm: chip FIFO params 570 * @cpp: bytes per pixel 571 * @latency_ns: memory latency for the platform 572 * 573 * Calculate the watermark level (the level at which the display plane will 574 * start fetching from memory again). Each chip has a different display 575 * FIFO size and allocation, so the caller needs to figure that out and pass 576 * in the correct intel_watermark_params structure. 577 * 578 * As the pixel clock runs, the FIFO will be drained at a rate that depends 579 * on the pixel size. When it reaches the watermark level, it'll start 580 * fetching FIFO line sized based chunks from memory until the FIFO fills 581 * past the watermark point. If the FIFO drains completely, a FIFO underrun 582 * will occur, and a display engine hang could result. 583 */ 584 static unsigned long intel_calculate_wm(unsigned long clock_in_khz, 585 const struct intel_watermark_params *wm, 586 int fifo_size, int cpp, 587 unsigned long latency_ns) 588 { 589 long entries_required, wm_size; 590 591 /* 592 * Note: we need to make sure we don't overflow for various clock & 593 * latency values. 594 * clocks go from a few thousand to several hundred thousand. 595 * latency is usually a few thousand 596 */ 597 entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) / 598 1000; 599 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); 600 601 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required); 602 603 wm_size = fifo_size - (entries_required + wm->guard_size); 604 605 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size); 606 607 /* Don't promote wm_size to unsigned... */ 608 if (wm_size > (long)wm->max_wm) 609 wm_size = wm->max_wm; 610 if (wm_size <= 0) 611 wm_size = wm->default_wm; 612 613 /* 614 * Bspec seems to indicate that the value shouldn't be lower than 615 * 'burst size + 1'. Certainly 830 is quite unhappy with low values. 616 * Lets go for 8 which is the burst size since certain platforms 617 * already use a hardcoded 8 (which is what the spec says should be 618 * done). 619 */ 620 if (wm_size <= 8) 621 wm_size = 8; 622 623 return wm_size; 624 } 625 626 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) 627 { 628 struct drm_crtc *crtc, *enabled = NULL; 629 630 for_each_crtc(dev, crtc) { 631 if (intel_crtc_active(crtc)) { 632 if (enabled) 633 return NULL; 634 enabled = crtc; 635 } 636 } 637 638 return enabled; 639 } 640 641 static void pineview_update_wm(struct drm_crtc *unused_crtc) 642 { 643 struct drm_device *dev = unused_crtc->dev; 644 struct drm_i915_private *dev_priv = to_i915(dev); 645 struct drm_crtc *crtc; 646 const struct cxsr_latency *latency; 647 u32 reg; 648 unsigned long wm; 649 650 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, 651 dev_priv->fsb_freq, dev_priv->mem_freq); 652 if (!latency) { 653 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 654 intel_set_memory_cxsr(dev_priv, false); 655 return; 656 } 657 658 crtc = single_enabled_crtc(dev); 659 if (crtc) { 660 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 661 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0); 662 int clock = adjusted_mode->crtc_clock; 663 664 /* Display SR */ 665 wm = intel_calculate_wm(clock, &pineview_display_wm, 666 pineview_display_wm.fifo_size, 667 cpp, latency->display_sr); 668 reg = I915_READ(DSPFW1); 669 reg &= ~DSPFW_SR_MASK; 670 reg |= FW_WM(wm, SR); 671 I915_WRITE(DSPFW1, reg); 672 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); 673 674 /* cursor SR */ 675 wm = intel_calculate_wm(clock, &pineview_cursor_wm, 676 pineview_display_wm.fifo_size, 677 cpp, latency->cursor_sr); 678 reg = I915_READ(DSPFW3); 679 reg &= ~DSPFW_CURSOR_SR_MASK; 680 reg |= FW_WM(wm, CURSOR_SR); 681 I915_WRITE(DSPFW3, reg); 682 683 /* Display HPLL off SR */ 684 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, 685 pineview_display_hplloff_wm.fifo_size, 686 cpp, latency->display_hpll_disable); 687 reg = I915_READ(DSPFW3); 688 reg &= ~DSPFW_HPLL_SR_MASK; 689 reg |= FW_WM(wm, HPLL_SR); 690 I915_WRITE(DSPFW3, reg); 691 692 /* cursor HPLL off SR */ 693 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, 694 pineview_display_hplloff_wm.fifo_size, 695 cpp, latency->cursor_hpll_disable); 696 reg = I915_READ(DSPFW3); 697 reg &= ~DSPFW_HPLL_CURSOR_MASK; 698 reg |= FW_WM(wm, HPLL_CURSOR); 699 I915_WRITE(DSPFW3, reg); 700 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); 701 702 intel_set_memory_cxsr(dev_priv, true); 703 } else { 704 intel_set_memory_cxsr(dev_priv, false); 705 } 706 } 707 708 static bool g4x_compute_wm0(struct drm_device *dev, 709 int plane, 710 const struct intel_watermark_params *display, 711 int display_latency_ns, 712 const struct intel_watermark_params *cursor, 713 int cursor_latency_ns, 714 int *plane_wm, 715 int *cursor_wm) 716 { 717 struct drm_crtc *crtc; 718 const struct drm_display_mode *adjusted_mode; 719 int htotal, hdisplay, clock, cpp; 720 int line_time_us, line_count; 721 int entries, tlb_miss; 722 723 crtc = intel_get_crtc_for_plane(dev, plane); 724 if (!intel_crtc_active(crtc)) { 725 *cursor_wm = cursor->guard_size; 726 *plane_wm = display->guard_size; 727 return false; 728 } 729 730 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 731 clock = adjusted_mode->crtc_clock; 732 htotal = adjusted_mode->crtc_htotal; 733 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; 734 cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0); 735 736 /* Use the small buffer method to calculate plane watermark */ 737 entries = ((clock * cpp / 1000) * display_latency_ns) / 1000; 738 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; 739 if (tlb_miss > 0) 740 entries += tlb_miss; 741 entries = DIV_ROUND_UP(entries, display->cacheline_size); 742 *plane_wm = entries + display->guard_size; 743 if (*plane_wm > (int)display->max_wm) 744 *plane_wm = display->max_wm; 745 746 /* Use the large buffer method to calculate cursor watermark */ 747 line_time_us = max(htotal * 1000 / clock, 1); 748 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; 749 entries = line_count * crtc->cursor->state->crtc_w * cpp; 750 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; 751 if (tlb_miss > 0) 752 entries += tlb_miss; 753 entries = DIV_ROUND_UP(entries, cursor->cacheline_size); 754 *cursor_wm = entries + cursor->guard_size; 755 if (*cursor_wm > (int)cursor->max_wm) 756 *cursor_wm = (int)cursor->max_wm; 757 758 return true; 759 } 760 761 /* 762 * Check the wm result. 763 * 764 * If any calculated watermark values is larger than the maximum value that 765 * can be programmed into the associated watermark register, that watermark 766 * must be disabled. 767 */ 768 static bool g4x_check_srwm(struct drm_device *dev, 769 int display_wm, int cursor_wm, 770 const struct intel_watermark_params *display, 771 const struct intel_watermark_params *cursor) 772 { 773 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", 774 display_wm, cursor_wm); 775 776 if (display_wm > display->max_wm) { 777 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n", 778 display_wm, display->max_wm); 779 return false; 780 } 781 782 if (cursor_wm > cursor->max_wm) { 783 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n", 784 cursor_wm, cursor->max_wm); 785 return false; 786 } 787 788 if (!(display_wm || cursor_wm)) { 789 DRM_DEBUG_KMS("SR latency is 0, disabling\n"); 790 return false; 791 } 792 793 return true; 794 } 795 796 static bool g4x_compute_srwm(struct drm_device *dev, 797 int plane, 798 int latency_ns, 799 const struct intel_watermark_params *display, 800 const struct intel_watermark_params *cursor, 801 int *display_wm, int *cursor_wm) 802 { 803 struct drm_crtc *crtc; 804 const struct drm_display_mode *adjusted_mode; 805 int hdisplay, htotal, cpp, clock; 806 unsigned long line_time_us; 807 int line_count, line_size; 808 int small, large; 809 int entries; 810 811 if (!latency_ns) { 812 *display_wm = *cursor_wm = 0; 813 return false; 814 } 815 816 crtc = intel_get_crtc_for_plane(dev, plane); 817 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 818 clock = adjusted_mode->crtc_clock; 819 htotal = adjusted_mode->crtc_htotal; 820 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; 821 cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0); 822 823 line_time_us = max(htotal * 1000 / clock, 1); 824 line_count = (latency_ns / line_time_us + 1000) / 1000; 825 line_size = hdisplay * cpp; 826 827 /* Use the minimum of the small and large buffer method for primary */ 828 small = ((clock * cpp / 1000) * latency_ns) / 1000; 829 large = line_count * line_size; 830 831 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); 832 *display_wm = entries + display->guard_size; 833 834 /* calculate the self-refresh watermark for display cursor */ 835 entries = line_count * cpp * crtc->cursor->state->crtc_w; 836 entries = DIV_ROUND_UP(entries, cursor->cacheline_size); 837 *cursor_wm = entries + cursor->guard_size; 838 839 return g4x_check_srwm(dev, 840 *display_wm, *cursor_wm, 841 display, cursor); 842 } 843 844 #define FW_WM_VLV(value, plane) \ 845 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) 846 847 static void vlv_write_wm_values(struct intel_crtc *crtc, 848 const struct vlv_wm_values *wm) 849 { 850 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 851 enum i915_pipe pipe = crtc->pipe; 852 853 I915_WRITE(VLV_DDL(pipe), 854 (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) | 855 (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) | 856 (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) | 857 (wm->ddl[pipe].primary << DDL_PLANE_SHIFT)); 858 859 I915_WRITE(DSPFW1, 860 FW_WM(wm->sr.plane, SR) | 861 FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) | 862 FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) | 863 FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA)); 864 I915_WRITE(DSPFW2, 865 FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) | 866 FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) | 867 FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA)); 868 I915_WRITE(DSPFW3, 869 FW_WM(wm->sr.cursor, CURSOR_SR)); 870 871 if (IS_CHERRYVIEW(dev_priv)) { 872 I915_WRITE(DSPFW7_CHV, 873 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) | 874 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC)); 875 I915_WRITE(DSPFW8_CHV, 876 FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) | 877 FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE)); 878 I915_WRITE(DSPFW9_CHV, 879 FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) | 880 FW_WM(wm->pipe[PIPE_C].cursor, CURSORC)); 881 I915_WRITE(DSPHOWM, 882 FW_WM(wm->sr.plane >> 9, SR_HI) | 883 FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) | 884 FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) | 885 FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) | 886 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) | 887 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) | 888 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) | 889 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) | 890 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) | 891 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI)); 892 } else { 893 I915_WRITE(DSPFW7, 894 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) | 895 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC)); 896 I915_WRITE(DSPHOWM, 897 FW_WM(wm->sr.plane >> 9, SR_HI) | 898 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) | 899 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) | 900 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) | 901 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) | 902 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) | 903 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI)); 904 } 905 906 /* zero (unused) WM1 watermarks */ 907 I915_WRITE(DSPFW4, 0); 908 I915_WRITE(DSPFW5, 0); 909 I915_WRITE(DSPFW6, 0); 910 I915_WRITE(DSPHOWM1, 0); 911 912 POSTING_READ(DSPFW1); 913 } 914 915 #undef FW_WM_VLV 916 917 enum vlv_wm_level { 918 VLV_WM_LEVEL_PM2, 919 VLV_WM_LEVEL_PM5, 920 VLV_WM_LEVEL_DDR_DVFS, 921 }; 922 923 /* latency must be in 0.1us units. */ 924 static unsigned int vlv_wm_method2(unsigned int pixel_rate, 925 unsigned int pipe_htotal, 926 unsigned int horiz_pixels, 927 unsigned int cpp, 928 unsigned int latency) 929 { 930 unsigned int ret; 931 932 ret = (latency * pixel_rate) / (pipe_htotal * 10000); 933 ret = (ret + 1) * horiz_pixels * cpp; 934 ret = DIV_ROUND_UP(ret, 64); 935 936 return ret; 937 } 938 939 static void vlv_setup_wm_latency(struct drm_device *dev) 940 { 941 struct drm_i915_private *dev_priv = to_i915(dev); 942 943 /* all latencies in usec */ 944 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; 945 946 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2; 947 948 if (IS_CHERRYVIEW(dev_priv)) { 949 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; 950 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; 951 952 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS; 953 } 954 } 955 956 static uint16_t vlv_compute_wm_level(struct intel_plane *plane, 957 struct intel_crtc *crtc, 958 const struct intel_plane_state *state, 959 int level) 960 { 961 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 962 int clock, htotal, cpp, width, wm; 963 964 if (dev_priv->wm.pri_latency[level] == 0) 965 return USHRT_MAX; 966 967 if (!state->visible) 968 return 0; 969 970 cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0); 971 clock = crtc->config->base.adjusted_mode.crtc_clock; 972 htotal = crtc->config->base.adjusted_mode.crtc_htotal; 973 width = crtc->config->pipe_src_w; 974 if (WARN_ON(htotal == 0)) 975 htotal = 1; 976 977 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) { 978 /* 979 * FIXME the formula gives values that are 980 * too big for the cursor FIFO, and hence we 981 * would never be able to use cursors. For 982 * now just hardcode the watermark. 983 */ 984 wm = 63; 985 } else { 986 wm = vlv_wm_method2(clock, htotal, width, cpp, 987 dev_priv->wm.pri_latency[level] * 10); 988 } 989 990 return min_t(int, wm, USHRT_MAX); 991 } 992 993 static void vlv_compute_fifo(struct intel_crtc *crtc) 994 { 995 struct drm_device *dev = crtc->base.dev; 996 struct vlv_wm_state *wm_state = &crtc->wm_state; 997 struct intel_plane *plane; 998 unsigned int total_rate = 0; 999 const int fifo_size = 512 - 1; 1000 int fifo_extra, fifo_left = fifo_size; 1001 1002 for_each_intel_plane_on_crtc(dev, crtc, plane) { 1003 struct intel_plane_state *state = 1004 to_intel_plane_state(plane->base.state); 1005 1006 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) 1007 continue; 1008 1009 if (state->visible) { 1010 wm_state->num_active_planes++; 1011 total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0); 1012 } 1013 } 1014 1015 for_each_intel_plane_on_crtc(dev, crtc, plane) { 1016 struct intel_plane_state *state = 1017 to_intel_plane_state(plane->base.state); 1018 unsigned int rate; 1019 1020 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) { 1021 plane->wm.fifo_size = 63; 1022 continue; 1023 } 1024 1025 if (!state->visible) { 1026 plane->wm.fifo_size = 0; 1027 continue; 1028 } 1029 1030 rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0); 1031 plane->wm.fifo_size = fifo_size * rate / total_rate; 1032 fifo_left -= plane->wm.fifo_size; 1033 } 1034 1035 fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1); 1036 1037 /* spread the remainder evenly */ 1038 for_each_intel_plane_on_crtc(dev, crtc, plane) { 1039 int plane_extra; 1040 1041 if (fifo_left == 0) 1042 break; 1043 1044 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) 1045 continue; 1046 1047 /* give it all to the first plane if none are active */ 1048 if (plane->wm.fifo_size == 0 && 1049 wm_state->num_active_planes) 1050 continue; 1051 1052 plane_extra = min(fifo_extra, fifo_left); 1053 plane->wm.fifo_size += plane_extra; 1054 fifo_left -= plane_extra; 1055 } 1056 1057 WARN_ON(fifo_left != 0); 1058 } 1059 1060 static void vlv_invert_wms(struct intel_crtc *crtc) 1061 { 1062 struct vlv_wm_state *wm_state = &crtc->wm_state; 1063 int level; 1064 1065 for (level = 0; level < wm_state->num_levels; level++) { 1066 struct drm_device *dev = crtc->base.dev; 1067 const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1; 1068 struct intel_plane *plane; 1069 1070 wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane; 1071 wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor; 1072 1073 for_each_intel_plane_on_crtc(dev, crtc, plane) { 1074 switch (plane->base.type) { 1075 int sprite; 1076 case DRM_PLANE_TYPE_CURSOR: 1077 wm_state->wm[level].cursor = plane->wm.fifo_size - 1078 wm_state->wm[level].cursor; 1079 break; 1080 case DRM_PLANE_TYPE_PRIMARY: 1081 wm_state->wm[level].primary = plane->wm.fifo_size - 1082 wm_state->wm[level].primary; 1083 break; 1084 case DRM_PLANE_TYPE_OVERLAY: 1085 sprite = plane->plane; 1086 wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size - 1087 wm_state->wm[level].sprite[sprite]; 1088 break; 1089 } 1090 } 1091 } 1092 } 1093 1094 static void vlv_compute_wm(struct intel_crtc *crtc) 1095 { 1096 struct drm_device *dev = crtc->base.dev; 1097 struct vlv_wm_state *wm_state = &crtc->wm_state; 1098 struct intel_plane *plane; 1099 int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1; 1100 int level; 1101 1102 memset(wm_state, 0, sizeof(*wm_state)); 1103 1104 wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed; 1105 wm_state->num_levels = to_i915(dev)->wm.max_level + 1; 1106 1107 wm_state->num_active_planes = 0; 1108 1109 vlv_compute_fifo(crtc); 1110 1111 if (wm_state->num_active_planes != 1) 1112 wm_state->cxsr = false; 1113 1114 if (wm_state->cxsr) { 1115 for (level = 0; level < wm_state->num_levels; level++) { 1116 wm_state->sr[level].plane = sr_fifo_size; 1117 wm_state->sr[level].cursor = 63; 1118 } 1119 } 1120 1121 for_each_intel_plane_on_crtc(dev, crtc, plane) { 1122 struct intel_plane_state *state = 1123 to_intel_plane_state(plane->base.state); 1124 1125 if (!state->visible) 1126 continue; 1127 1128 /* normal watermarks */ 1129 for (level = 0; level < wm_state->num_levels; level++) { 1130 int wm = vlv_compute_wm_level(plane, crtc, state, level); 1131 int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511; 1132 1133 /* hack */ 1134 if (WARN_ON(level == 0 && wm > max_wm)) 1135 wm = max_wm; 1136 1137 if (wm > plane->wm.fifo_size) 1138 break; 1139 1140 switch (plane->base.type) { 1141 int sprite; 1142 case DRM_PLANE_TYPE_CURSOR: 1143 wm_state->wm[level].cursor = wm; 1144 break; 1145 case DRM_PLANE_TYPE_PRIMARY: 1146 wm_state->wm[level].primary = wm; 1147 break; 1148 case DRM_PLANE_TYPE_OVERLAY: 1149 sprite = plane->plane; 1150 wm_state->wm[level].sprite[sprite] = wm; 1151 break; 1152 } 1153 } 1154 1155 wm_state->num_levels = level; 1156 1157 if (!wm_state->cxsr) 1158 continue; 1159 1160 /* maxfifo watermarks */ 1161 switch (plane->base.type) { 1162 int sprite, level; 1163 case DRM_PLANE_TYPE_CURSOR: 1164 for (level = 0; level < wm_state->num_levels; level++) 1165 wm_state->sr[level].cursor = 1166 wm_state->wm[level].cursor; 1167 break; 1168 case DRM_PLANE_TYPE_PRIMARY: 1169 for (level = 0; level < wm_state->num_levels; level++) 1170 wm_state->sr[level].plane = 1171 min(wm_state->sr[level].plane, 1172 wm_state->wm[level].primary); 1173 break; 1174 case DRM_PLANE_TYPE_OVERLAY: 1175 sprite = plane->plane; 1176 for (level = 0; level < wm_state->num_levels; level++) 1177 wm_state->sr[level].plane = 1178 min(wm_state->sr[level].plane, 1179 wm_state->wm[level].sprite[sprite]); 1180 break; 1181 } 1182 } 1183 1184 /* clear any (partially) filled invalid levels */ 1185 for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) { 1186 memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level])); 1187 memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level])); 1188 } 1189 1190 vlv_invert_wms(crtc); 1191 } 1192 1193 #define VLV_FIFO(plane, value) \ 1194 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV) 1195 1196 static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc) 1197 { 1198 struct drm_device *dev = crtc->base.dev; 1199 struct drm_i915_private *dev_priv = to_i915(dev); 1200 struct intel_plane *plane; 1201 int sprite0_start = 0, sprite1_start = 0, fifo_size = 0; 1202 1203 for_each_intel_plane_on_crtc(dev, crtc, plane) { 1204 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) { 1205 WARN_ON(plane->wm.fifo_size != 63); 1206 continue; 1207 } 1208 1209 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) 1210 sprite0_start = plane->wm.fifo_size; 1211 else if (plane->plane == 0) 1212 sprite1_start = sprite0_start + plane->wm.fifo_size; 1213 else 1214 fifo_size = sprite1_start + plane->wm.fifo_size; 1215 } 1216 1217 WARN_ON(fifo_size != 512 - 1); 1218 1219 DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n", 1220 pipe_name(crtc->pipe), sprite0_start, 1221 sprite1_start, fifo_size); 1222 1223 switch (crtc->pipe) { 1224 uint32_t dsparb, dsparb2, dsparb3; 1225 case PIPE_A: 1226 dsparb = I915_READ(DSPARB); 1227 dsparb2 = I915_READ(DSPARB2); 1228 1229 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) | 1230 VLV_FIFO(SPRITEB, 0xff)); 1231 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) | 1232 VLV_FIFO(SPRITEB, sprite1_start)); 1233 1234 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) | 1235 VLV_FIFO(SPRITEB_HI, 0x1)); 1236 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) | 1237 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8)); 1238 1239 I915_WRITE(DSPARB, dsparb); 1240 I915_WRITE(DSPARB2, dsparb2); 1241 break; 1242 case PIPE_B: 1243 dsparb = I915_READ(DSPARB); 1244 dsparb2 = I915_READ(DSPARB2); 1245 1246 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) | 1247 VLV_FIFO(SPRITED, 0xff)); 1248 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) | 1249 VLV_FIFO(SPRITED, sprite1_start)); 1250 1251 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) | 1252 VLV_FIFO(SPRITED_HI, 0xff)); 1253 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) | 1254 VLV_FIFO(SPRITED_HI, sprite1_start >> 8)); 1255 1256 I915_WRITE(DSPARB, dsparb); 1257 I915_WRITE(DSPARB2, dsparb2); 1258 break; 1259 case PIPE_C: 1260 dsparb3 = I915_READ(DSPARB3); 1261 dsparb2 = I915_READ(DSPARB2); 1262 1263 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) | 1264 VLV_FIFO(SPRITEF, 0xff)); 1265 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) | 1266 VLV_FIFO(SPRITEF, sprite1_start)); 1267 1268 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) | 1269 VLV_FIFO(SPRITEF_HI, 0xff)); 1270 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) | 1271 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8)); 1272 1273 I915_WRITE(DSPARB3, dsparb3); 1274 I915_WRITE(DSPARB2, dsparb2); 1275 break; 1276 default: 1277 break; 1278 } 1279 } 1280 1281 #undef VLV_FIFO 1282 1283 static void vlv_merge_wm(struct drm_device *dev, 1284 struct vlv_wm_values *wm) 1285 { 1286 struct intel_crtc *crtc; 1287 int num_active_crtcs = 0; 1288 1289 wm->level = to_i915(dev)->wm.max_level; 1290 wm->cxsr = true; 1291 1292 for_each_intel_crtc(dev, crtc) { 1293 const struct vlv_wm_state *wm_state = &crtc->wm_state; 1294 1295 if (!crtc->active) 1296 continue; 1297 1298 if (!wm_state->cxsr) 1299 wm->cxsr = false; 1300 1301 num_active_crtcs++; 1302 wm->level = min_t(int, wm->level, wm_state->num_levels - 1); 1303 } 1304 1305 if (num_active_crtcs != 1) 1306 wm->cxsr = false; 1307 1308 if (num_active_crtcs > 1) 1309 wm->level = VLV_WM_LEVEL_PM2; 1310 1311 for_each_intel_crtc(dev, crtc) { 1312 struct vlv_wm_state *wm_state = &crtc->wm_state; 1313 enum i915_pipe pipe = crtc->pipe; 1314 1315 if (!crtc->active) 1316 continue; 1317 1318 wm->pipe[pipe] = wm_state->wm[wm->level]; 1319 if (wm->cxsr) 1320 wm->sr = wm_state->sr[wm->level]; 1321 1322 wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2; 1323 wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2; 1324 wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2; 1325 wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2; 1326 } 1327 } 1328 1329 static void vlv_update_wm(struct drm_crtc *crtc) 1330 { 1331 struct drm_device *dev = crtc->dev; 1332 struct drm_i915_private *dev_priv = to_i915(dev); 1333 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1334 enum i915_pipe pipe = intel_crtc->pipe; 1335 struct vlv_wm_values wm = {}; 1336 1337 vlv_compute_wm(intel_crtc); 1338 vlv_merge_wm(dev, &wm); 1339 1340 if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) { 1341 /* FIXME should be part of crtc atomic commit */ 1342 vlv_pipe_set_fifo_size(intel_crtc); 1343 return; 1344 } 1345 1346 if (wm.level < VLV_WM_LEVEL_DDR_DVFS && 1347 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS) 1348 chv_set_memory_dvfs(dev_priv, false); 1349 1350 if (wm.level < VLV_WM_LEVEL_PM5 && 1351 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5) 1352 chv_set_memory_pm5(dev_priv, false); 1353 1354 if (!wm.cxsr && dev_priv->wm.vlv.cxsr) 1355 intel_set_memory_cxsr(dev_priv, false); 1356 1357 /* FIXME should be part of crtc atomic commit */ 1358 vlv_pipe_set_fifo_size(intel_crtc); 1359 1360 vlv_write_wm_values(intel_crtc, &wm); 1361 1362 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, " 1363 "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n", 1364 pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor, 1365 wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1], 1366 wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr); 1367 1368 if (wm.cxsr && !dev_priv->wm.vlv.cxsr) 1369 intel_set_memory_cxsr(dev_priv, true); 1370 1371 if (wm.level >= VLV_WM_LEVEL_PM5 && 1372 dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5) 1373 chv_set_memory_pm5(dev_priv, true); 1374 1375 if (wm.level >= VLV_WM_LEVEL_DDR_DVFS && 1376 dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS) 1377 chv_set_memory_dvfs(dev_priv, true); 1378 1379 dev_priv->wm.vlv = wm; 1380 } 1381 1382 #define single_plane_enabled(mask) is_power_of_2(mask) 1383 1384 static void g4x_update_wm(struct drm_crtc *crtc) 1385 { 1386 struct drm_device *dev = crtc->dev; 1387 static const int sr_latency_ns = 12000; 1388 struct drm_i915_private *dev_priv = to_i915(dev); 1389 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1390 int plane_sr, cursor_sr; 1391 unsigned int enabled = 0; 1392 bool cxsr_enabled; 1393 1394 if (g4x_compute_wm0(dev, PIPE_A, 1395 &g4x_wm_info, pessimal_latency_ns, 1396 &g4x_cursor_wm_info, pessimal_latency_ns, 1397 &planea_wm, &cursora_wm)) 1398 enabled |= 1 << PIPE_A; 1399 1400 if (g4x_compute_wm0(dev, PIPE_B, 1401 &g4x_wm_info, pessimal_latency_ns, 1402 &g4x_cursor_wm_info, pessimal_latency_ns, 1403 &planeb_wm, &cursorb_wm)) 1404 enabled |= 1 << PIPE_B; 1405 1406 if (single_plane_enabled(enabled) && 1407 g4x_compute_srwm(dev, ffs(enabled) - 1, 1408 sr_latency_ns, 1409 &g4x_wm_info, 1410 &g4x_cursor_wm_info, 1411 &plane_sr, &cursor_sr)) { 1412 cxsr_enabled = true; 1413 } else { 1414 cxsr_enabled = false; 1415 intel_set_memory_cxsr(dev_priv, false); 1416 plane_sr = cursor_sr = 0; 1417 } 1418 1419 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, " 1420 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", 1421 planea_wm, cursora_wm, 1422 planeb_wm, cursorb_wm, 1423 plane_sr, cursor_sr); 1424 1425 I915_WRITE(DSPFW1, 1426 FW_WM(plane_sr, SR) | 1427 FW_WM(cursorb_wm, CURSORB) | 1428 FW_WM(planeb_wm, PLANEB) | 1429 FW_WM(planea_wm, PLANEA)); 1430 I915_WRITE(DSPFW2, 1431 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | 1432 FW_WM(cursora_wm, CURSORA)); 1433 /* HPLL off in SR has some issues on G4x... disable it */ 1434 I915_WRITE(DSPFW3, 1435 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) | 1436 FW_WM(cursor_sr, CURSOR_SR)); 1437 1438 if (cxsr_enabled) 1439 intel_set_memory_cxsr(dev_priv, true); 1440 } 1441 1442 static void i965_update_wm(struct drm_crtc *unused_crtc) 1443 { 1444 struct drm_device *dev = unused_crtc->dev; 1445 struct drm_i915_private *dev_priv = to_i915(dev); 1446 struct drm_crtc *crtc; 1447 int srwm = 1; 1448 int cursor_sr = 16; 1449 bool cxsr_enabled; 1450 1451 /* Calc sr entries for one plane configs */ 1452 crtc = single_enabled_crtc(dev); 1453 if (crtc) { 1454 /* self-refresh has much higher latency */ 1455 static const int sr_latency_ns = 12000; 1456 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 1457 int clock = adjusted_mode->crtc_clock; 1458 int htotal = adjusted_mode->crtc_htotal; 1459 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; 1460 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0); 1461 unsigned long line_time_us; 1462 int entries; 1463 1464 line_time_us = max(htotal * 1000 / clock, 1); 1465 1466 /* Use ns/us then divide to preserve precision */ 1467 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1468 cpp * hdisplay; 1469 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); 1470 srwm = I965_FIFO_SIZE - entries; 1471 if (srwm < 0) 1472 srwm = 1; 1473 srwm &= 0x1ff; 1474 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", 1475 entries, srwm); 1476 1477 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1478 cpp * crtc->cursor->state->crtc_w; 1479 entries = DIV_ROUND_UP(entries, 1480 i965_cursor_wm_info.cacheline_size); 1481 cursor_sr = i965_cursor_wm_info.fifo_size - 1482 (entries + i965_cursor_wm_info.guard_size); 1483 1484 if (cursor_sr > i965_cursor_wm_info.max_wm) 1485 cursor_sr = i965_cursor_wm_info.max_wm; 1486 1487 DRM_DEBUG_KMS("self-refresh watermark: display plane %d " 1488 "cursor %d\n", srwm, cursor_sr); 1489 1490 cxsr_enabled = true; 1491 } else { 1492 cxsr_enabled = false; 1493 /* Turn off self refresh if both pipes are enabled */ 1494 intel_set_memory_cxsr(dev_priv, false); 1495 } 1496 1497 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 1498 srwm); 1499 1500 /* 965 has limitations... */ 1501 I915_WRITE(DSPFW1, FW_WM(srwm, SR) | 1502 FW_WM(8, CURSORB) | 1503 FW_WM(8, PLANEB) | 1504 FW_WM(8, PLANEA)); 1505 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) | 1506 FW_WM(8, PLANEC_OLD)); 1507 /* update cursor SR watermark */ 1508 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR)); 1509 1510 if (cxsr_enabled) 1511 intel_set_memory_cxsr(dev_priv, true); 1512 } 1513 1514 #undef FW_WM 1515 1516 static void i9xx_update_wm(struct drm_crtc *unused_crtc) 1517 { 1518 struct drm_device *dev = unused_crtc->dev; 1519 struct drm_i915_private *dev_priv = to_i915(dev); 1520 const struct intel_watermark_params *wm_info; 1521 uint32_t fwater_lo; 1522 uint32_t fwater_hi; 1523 int cwm, srwm = 1; 1524 int fifo_size; 1525 int planea_wm, planeb_wm; 1526 struct drm_crtc *crtc, *enabled = NULL; 1527 1528 if (IS_I945GM(dev)) 1529 wm_info = &i945_wm_info; 1530 else if (!IS_GEN2(dev)) 1531 wm_info = &i915_wm_info; 1532 else 1533 wm_info = &i830_a_wm_info; 1534 1535 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 1536 crtc = intel_get_crtc_for_plane(dev, 0); 1537 if (intel_crtc_active(crtc)) { 1538 const struct drm_display_mode *adjusted_mode; 1539 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0); 1540 if (IS_GEN2(dev)) 1541 cpp = 4; 1542 1543 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 1544 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1545 wm_info, fifo_size, cpp, 1546 pessimal_latency_ns); 1547 enabled = crtc; 1548 } else { 1549 planea_wm = fifo_size - wm_info->guard_size; 1550 if (planea_wm > (long)wm_info->max_wm) 1551 planea_wm = wm_info->max_wm; 1552 } 1553 1554 if (IS_GEN2(dev)) 1555 wm_info = &i830_bc_wm_info; 1556 1557 fifo_size = dev_priv->display.get_fifo_size(dev, 1); 1558 crtc = intel_get_crtc_for_plane(dev, 1); 1559 if (intel_crtc_active(crtc)) { 1560 const struct drm_display_mode *adjusted_mode; 1561 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0); 1562 if (IS_GEN2(dev)) 1563 cpp = 4; 1564 1565 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 1566 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1567 wm_info, fifo_size, cpp, 1568 pessimal_latency_ns); 1569 if (enabled == NULL) 1570 enabled = crtc; 1571 else 1572 enabled = NULL; 1573 } else { 1574 planeb_wm = fifo_size - wm_info->guard_size; 1575 if (planeb_wm > (long)wm_info->max_wm) 1576 planeb_wm = wm_info->max_wm; 1577 } 1578 1579 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 1580 1581 if (IS_I915GM(dev) && enabled) { 1582 struct drm_i915_gem_object *obj; 1583 1584 obj = intel_fb_obj(enabled->primary->state->fb); 1585 1586 /* self-refresh seems busted with untiled */ 1587 if (!i915_gem_object_is_tiled(obj)) 1588 enabled = NULL; 1589 } 1590 1591 /* 1592 * Overlay gets an aggressive default since video jitter is bad. 1593 */ 1594 cwm = 2; 1595 1596 /* Play safe and disable self-refresh before adjusting watermarks. */ 1597 intel_set_memory_cxsr(dev_priv, false); 1598 1599 /* Calc sr entries for one plane configs */ 1600 if (HAS_FW_BLC(dev) && enabled) { 1601 /* self-refresh has much higher latency */ 1602 static const int sr_latency_ns = 6000; 1603 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode; 1604 int clock = adjusted_mode->crtc_clock; 1605 int htotal = adjusted_mode->crtc_htotal; 1606 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w; 1607 int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0); 1608 unsigned long line_time_us; 1609 int entries; 1610 1611 if (IS_I915GM(dev) || IS_I945GM(dev)) 1612 cpp = 4; 1613 1614 line_time_us = max(htotal * 1000 / clock, 1); 1615 1616 /* Use ns/us then divide to preserve precision */ 1617 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1618 cpp * hdisplay; 1619 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); 1620 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); 1621 srwm = wm_info->fifo_size - entries; 1622 if (srwm < 0) 1623 srwm = 1; 1624 1625 if (IS_I945G(dev) || IS_I945GM(dev)) 1626 I915_WRITE(FW_BLC_SELF, 1627 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); 1628 else 1629 I915_WRITE(FW_BLC_SELF, srwm & 0x3f); 1630 } 1631 1632 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 1633 planea_wm, planeb_wm, cwm, srwm); 1634 1635 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); 1636 fwater_hi = (cwm & 0x1f); 1637 1638 /* Set request length to 8 cachelines per fetch */ 1639 fwater_lo = fwater_lo | (1 << 24) | (1 << 8); 1640 fwater_hi = fwater_hi | (1 << 8); 1641 1642 I915_WRITE(FW_BLC, fwater_lo); 1643 I915_WRITE(FW_BLC2, fwater_hi); 1644 1645 if (enabled) 1646 intel_set_memory_cxsr(dev_priv, true); 1647 } 1648 1649 static void i845_update_wm(struct drm_crtc *unused_crtc) 1650 { 1651 struct drm_device *dev = unused_crtc->dev; 1652 struct drm_i915_private *dev_priv = to_i915(dev); 1653 struct drm_crtc *crtc; 1654 const struct drm_display_mode *adjusted_mode; 1655 uint32_t fwater_lo; 1656 int planea_wm; 1657 1658 crtc = single_enabled_crtc(dev); 1659 if (crtc == NULL) 1660 return; 1661 1662 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 1663 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1664 &i845_wm_info, 1665 dev_priv->display.get_fifo_size(dev, 0), 1666 4, pessimal_latency_ns); 1667 fwater_lo = I915_READ(FW_BLC) & ~0xfff; 1668 fwater_lo |= (3<<8) | planea_wm; 1669 1670 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); 1671 1672 I915_WRITE(FW_BLC, fwater_lo); 1673 } 1674 1675 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) 1676 { 1677 uint32_t pixel_rate; 1678 1679 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock; 1680 1681 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to 1682 * adjust the pixel_rate here. */ 1683 1684 if (pipe_config->pch_pfit.enabled) { 1685 uint64_t pipe_w, pipe_h, pfit_w, pfit_h; 1686 uint32_t pfit_size = pipe_config->pch_pfit.size; 1687 1688 pipe_w = pipe_config->pipe_src_w; 1689 pipe_h = pipe_config->pipe_src_h; 1690 1691 pfit_w = (pfit_size >> 16) & 0xFFFF; 1692 pfit_h = pfit_size & 0xFFFF; 1693 if (pipe_w < pfit_w) 1694 pipe_w = pfit_w; 1695 if (pipe_h < pfit_h) 1696 pipe_h = pfit_h; 1697 1698 if (WARN_ON(!pfit_w || !pfit_h)) 1699 return pixel_rate; 1700 1701 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h, 1702 pfit_w * pfit_h); 1703 } 1704 1705 return pixel_rate; 1706 } 1707 1708 /* latency must be in 0.1us units. */ 1709 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency) 1710 { 1711 uint64_t ret; 1712 1713 if (WARN(latency == 0, "Latency value missing\n")) 1714 return UINT_MAX; 1715 1716 ret = (uint64_t) pixel_rate * cpp * latency; 1717 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2; 1718 1719 return ret; 1720 } 1721 1722 /* latency must be in 0.1us units. */ 1723 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, 1724 uint32_t horiz_pixels, uint8_t cpp, 1725 uint32_t latency) 1726 { 1727 uint32_t ret; 1728 1729 if (WARN(latency == 0, "Latency value missing\n")) 1730 return UINT_MAX; 1731 if (WARN_ON(!pipe_htotal)) 1732 return UINT_MAX; 1733 1734 ret = (latency * pixel_rate) / (pipe_htotal * 10000); 1735 ret = (ret + 1) * horiz_pixels * cpp; 1736 ret = DIV_ROUND_UP(ret, 64) + 2; 1737 return ret; 1738 } 1739 1740 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, 1741 uint8_t cpp) 1742 { 1743 /* 1744 * Neither of these should be possible since this function shouldn't be 1745 * called if the CRTC is off or the plane is invisible. But let's be 1746 * extra paranoid to avoid a potential divide-by-zero if we screw up 1747 * elsewhere in the driver. 1748 */ 1749 if (WARN_ON(!cpp)) 1750 return 0; 1751 if (WARN_ON(!horiz_pixels)) 1752 return 0; 1753 1754 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2; 1755 } 1756 1757 struct ilk_wm_maximums { 1758 uint16_t pri; 1759 uint16_t spr; 1760 uint16_t cur; 1761 uint16_t fbc; 1762 }; 1763 1764 /* 1765 * For both WM_PIPE and WM_LP. 1766 * mem_value must be in 0.1us units. 1767 */ 1768 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, 1769 const struct intel_plane_state *pstate, 1770 uint32_t mem_value, 1771 bool is_lp) 1772 { 1773 int cpp = pstate->base.fb ? 1774 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0; 1775 uint32_t method1, method2; 1776 1777 if (!cstate->base.active || !pstate->visible) 1778 return 0; 1779 1780 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value); 1781 1782 if (!is_lp) 1783 return method1; 1784 1785 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate), 1786 cstate->base.adjusted_mode.crtc_htotal, 1787 drm_rect_width(&pstate->dst), 1788 cpp, mem_value); 1789 1790 return min(method1, method2); 1791 } 1792 1793 /* 1794 * For both WM_PIPE and WM_LP. 1795 * mem_value must be in 0.1us units. 1796 */ 1797 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate, 1798 const struct intel_plane_state *pstate, 1799 uint32_t mem_value) 1800 { 1801 int cpp = pstate->base.fb ? 1802 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0; 1803 uint32_t method1, method2; 1804 1805 if (!cstate->base.active || !pstate->visible) 1806 return 0; 1807 1808 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value); 1809 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate), 1810 cstate->base.adjusted_mode.crtc_htotal, 1811 drm_rect_width(&pstate->dst), 1812 cpp, mem_value); 1813 return min(method1, method2); 1814 } 1815 1816 /* 1817 * For both WM_PIPE and WM_LP. 1818 * mem_value must be in 0.1us units. 1819 */ 1820 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate, 1821 const struct intel_plane_state *pstate, 1822 uint32_t mem_value) 1823 { 1824 /* 1825 * We treat the cursor plane as always-on for the purposes of watermark 1826 * calculation. Until we have two-stage watermark programming merged, 1827 * this is necessary to avoid flickering. 1828 */ 1829 int cpp = 4; 1830 int width = pstate->visible ? pstate->base.crtc_w : 64; 1831 1832 if (!cstate->base.active) 1833 return 0; 1834 1835 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate), 1836 cstate->base.adjusted_mode.crtc_htotal, 1837 width, cpp, mem_value); 1838 } 1839 1840 /* Only for WM_LP. */ 1841 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, 1842 const struct intel_plane_state *pstate, 1843 uint32_t pri_val) 1844 { 1845 int cpp = pstate->base.fb ? 1846 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0; 1847 1848 if (!cstate->base.active || !pstate->visible) 1849 return 0; 1850 1851 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), cpp); 1852 } 1853 1854 static unsigned int ilk_display_fifo_size(const struct drm_device *dev) 1855 { 1856 if (INTEL_INFO(dev)->gen >= 8) 1857 return 3072; 1858 else if (INTEL_INFO(dev)->gen >= 7) 1859 return 768; 1860 else 1861 return 512; 1862 } 1863 1864 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev, 1865 int level, bool is_sprite) 1866 { 1867 if (INTEL_INFO(dev)->gen >= 8) 1868 /* BDW primary/sprite plane watermarks */ 1869 return level == 0 ? 255 : 2047; 1870 else if (INTEL_INFO(dev)->gen >= 7) 1871 /* IVB/HSW primary/sprite plane watermarks */ 1872 return level == 0 ? 127 : 1023; 1873 else if (!is_sprite) 1874 /* ILK/SNB primary plane watermarks */ 1875 return level == 0 ? 127 : 511; 1876 else 1877 /* ILK/SNB sprite plane watermarks */ 1878 return level == 0 ? 63 : 255; 1879 } 1880 1881 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev, 1882 int level) 1883 { 1884 if (INTEL_INFO(dev)->gen >= 7) 1885 return level == 0 ? 63 : 255; 1886 else 1887 return level == 0 ? 31 : 63; 1888 } 1889 1890 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev) 1891 { 1892 if (INTEL_INFO(dev)->gen >= 8) 1893 return 31; 1894 else 1895 return 15; 1896 } 1897 1898 /* Calculate the maximum primary/sprite plane watermark */ 1899 static unsigned int ilk_plane_wm_max(const struct drm_device *dev, 1900 int level, 1901 const struct intel_wm_config *config, 1902 enum intel_ddb_partitioning ddb_partitioning, 1903 bool is_sprite) 1904 { 1905 unsigned int fifo_size = ilk_display_fifo_size(dev); 1906 1907 /* if sprites aren't enabled, sprites get nothing */ 1908 if (is_sprite && !config->sprites_enabled) 1909 return 0; 1910 1911 /* HSW allows LP1+ watermarks even with multiple pipes */ 1912 if (level == 0 || config->num_pipes_active > 1) { 1913 fifo_size /= INTEL_INFO(dev)->num_pipes; 1914 1915 /* 1916 * For some reason the non self refresh 1917 * FIFO size is only half of the self 1918 * refresh FIFO size on ILK/SNB. 1919 */ 1920 if (INTEL_INFO(dev)->gen <= 6) 1921 fifo_size /= 2; 1922 } 1923 1924 if (config->sprites_enabled) { 1925 /* level 0 is always calculated with 1:1 split */ 1926 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) { 1927 if (is_sprite) 1928 fifo_size *= 5; 1929 fifo_size /= 6; 1930 } else { 1931 fifo_size /= 2; 1932 } 1933 } 1934 1935 /* clamp to max that the registers can hold */ 1936 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite)); 1937 } 1938 1939 /* Calculate the maximum cursor plane watermark */ 1940 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev, 1941 int level, 1942 const struct intel_wm_config *config) 1943 { 1944 /* HSW LP1+ watermarks w/ multiple pipes */ 1945 if (level > 0 && config->num_pipes_active > 1) 1946 return 64; 1947 1948 /* otherwise just report max that registers can hold */ 1949 return ilk_cursor_wm_reg_max(dev, level); 1950 } 1951 1952 static void ilk_compute_wm_maximums(const struct drm_device *dev, 1953 int level, 1954 const struct intel_wm_config *config, 1955 enum intel_ddb_partitioning ddb_partitioning, 1956 struct ilk_wm_maximums *max) 1957 { 1958 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); 1959 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); 1960 max->cur = ilk_cursor_wm_max(dev, level, config); 1961 max->fbc = ilk_fbc_wm_reg_max(dev); 1962 } 1963 1964 static void ilk_compute_wm_reg_maximums(struct drm_device *dev, 1965 int level, 1966 struct ilk_wm_maximums *max) 1967 { 1968 max->pri = ilk_plane_wm_reg_max(dev, level, false); 1969 max->spr = ilk_plane_wm_reg_max(dev, level, true); 1970 max->cur = ilk_cursor_wm_reg_max(dev, level); 1971 max->fbc = ilk_fbc_wm_reg_max(dev); 1972 } 1973 1974 static bool ilk_validate_wm_level(int level, 1975 const struct ilk_wm_maximums *max, 1976 struct intel_wm_level *result) 1977 { 1978 bool ret; 1979 1980 /* already determined to be invalid? */ 1981 if (!result->enable) 1982 return false; 1983 1984 result->enable = result->pri_val <= max->pri && 1985 result->spr_val <= max->spr && 1986 result->cur_val <= max->cur; 1987 1988 ret = result->enable; 1989 1990 /* 1991 * HACK until we can pre-compute everything, 1992 * and thus fail gracefully if LP0 watermarks 1993 * are exceeded... 1994 */ 1995 if (level == 0 && !result->enable) { 1996 if (result->pri_val > max->pri) 1997 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n", 1998 level, result->pri_val, max->pri); 1999 if (result->spr_val > max->spr) 2000 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n", 2001 level, result->spr_val, max->spr); 2002 if (result->cur_val > max->cur) 2003 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n", 2004 level, result->cur_val, max->cur); 2005 2006 result->pri_val = min_t(uint32_t, result->pri_val, max->pri); 2007 result->spr_val = min_t(uint32_t, result->spr_val, max->spr); 2008 result->cur_val = min_t(uint32_t, result->cur_val, max->cur); 2009 result->enable = true; 2010 } 2011 2012 return ret; 2013 } 2014 2015 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, 2016 const struct intel_crtc *intel_crtc, 2017 int level, 2018 struct intel_crtc_state *cstate, 2019 struct intel_plane_state *pristate, 2020 struct intel_plane_state *sprstate, 2021 struct intel_plane_state *curstate, 2022 struct intel_wm_level *result) 2023 { 2024 uint16_t pri_latency = dev_priv->wm.pri_latency[level]; 2025 uint16_t spr_latency = dev_priv->wm.spr_latency[level]; 2026 uint16_t cur_latency = dev_priv->wm.cur_latency[level]; 2027 2028 /* WM1+ latency values stored in 0.5us units */ 2029 if (level > 0) { 2030 pri_latency *= 5; 2031 spr_latency *= 5; 2032 cur_latency *= 5; 2033 } 2034 2035 if (pristate) { 2036 result->pri_val = ilk_compute_pri_wm(cstate, pristate, 2037 pri_latency, level); 2038 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val); 2039 } 2040 2041 if (sprstate) 2042 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency); 2043 2044 if (curstate) 2045 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency); 2046 2047 result->enable = true; 2048 } 2049 2050 static uint32_t 2051 hsw_compute_linetime_wm(const struct intel_crtc_state *cstate) 2052 { 2053 const struct intel_atomic_state *intel_state = 2054 to_intel_atomic_state(cstate->base.state); 2055 const struct drm_display_mode *adjusted_mode = 2056 &cstate->base.adjusted_mode; 2057 u32 linetime, ips_linetime; 2058 2059 if (!cstate->base.active) 2060 return 0; 2061 if (WARN_ON(adjusted_mode->crtc_clock == 0)) 2062 return 0; 2063 if (WARN_ON(intel_state->cdclk == 0)) 2064 return 0; 2065 2066 /* The WM are computed with base on how long it takes to fill a single 2067 * row at the given clock rate, multiplied by 8. 2068 * */ 2069 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 2070 adjusted_mode->crtc_clock); 2071 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 2072 intel_state->cdclk); 2073 2074 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | 2075 PIPE_WM_LINETIME_TIME(linetime); 2076 } 2077 2078 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8]) 2079 { 2080 struct drm_i915_private *dev_priv = to_i915(dev); 2081 2082 if (IS_GEN9(dev)) { 2083 uint32_t val; 2084 int ret, i; 2085 int level, max_level = ilk_wm_max_level(dev); 2086 2087 /* read the first set of memory latencies[0:3] */ 2088 val = 0; /* data0 to be programmed to 0 for first set */ 2089 mutex_lock(&dev_priv->rps.hw_lock); 2090 ret = sandybridge_pcode_read(dev_priv, 2091 GEN9_PCODE_READ_MEM_LATENCY, 2092 &val); 2093 mutex_unlock(&dev_priv->rps.hw_lock); 2094 2095 if (ret) { 2096 DRM_ERROR("SKL Mailbox read error = %d\n", ret); 2097 return; 2098 } 2099 2100 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK; 2101 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & 2102 GEN9_MEM_LATENCY_LEVEL_MASK; 2103 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & 2104 GEN9_MEM_LATENCY_LEVEL_MASK; 2105 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & 2106 GEN9_MEM_LATENCY_LEVEL_MASK; 2107 2108 /* read the second set of memory latencies[4:7] */ 2109 val = 1; /* data0 to be programmed to 1 for second set */ 2110 mutex_lock(&dev_priv->rps.hw_lock); 2111 ret = sandybridge_pcode_read(dev_priv, 2112 GEN9_PCODE_READ_MEM_LATENCY, 2113 &val); 2114 mutex_unlock(&dev_priv->rps.hw_lock); 2115 if (ret) { 2116 DRM_ERROR("SKL Mailbox read error = %d\n", ret); 2117 return; 2118 } 2119 2120 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK; 2121 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & 2122 GEN9_MEM_LATENCY_LEVEL_MASK; 2123 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & 2124 GEN9_MEM_LATENCY_LEVEL_MASK; 2125 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & 2126 GEN9_MEM_LATENCY_LEVEL_MASK; 2127 2128 /* 2129 * WaWmMemoryReadLatency:skl 2130 * 2131 * punit doesn't take into account the read latency so we need 2132 * to add 2us to the various latency levels we retrieve from 2133 * the punit. 2134 * - W0 is a bit special in that it's the only level that 2135 * can't be disabled if we want to have display working, so 2136 * we always add 2us there. 2137 * - For levels >=1, punit returns 0us latency when they are 2138 * disabled, so we respect that and don't add 2us then 2139 * 2140 * Additionally, if a level n (n > 1) has a 0us latency, all 2141 * levels m (m >= n) need to be disabled. We make sure to 2142 * sanitize the values out of the punit to satisfy this 2143 * requirement. 2144 */ 2145 wm[0] += 2; 2146 for (level = 1; level <= max_level; level++) 2147 if (wm[level] != 0) 2148 wm[level] += 2; 2149 else { 2150 for (i = level + 1; i <= max_level; i++) 2151 wm[i] = 0; 2152 2153 break; 2154 } 2155 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2156 uint64_t sskpd = I915_READ64(MCH_SSKPD); 2157 2158 wm[0] = (sskpd >> 56) & 0xFF; 2159 if (wm[0] == 0) 2160 wm[0] = sskpd & 0xF; 2161 wm[1] = (sskpd >> 4) & 0xFF; 2162 wm[2] = (sskpd >> 12) & 0xFF; 2163 wm[3] = (sskpd >> 20) & 0x1FF; 2164 wm[4] = (sskpd >> 32) & 0x1FF; 2165 } else if (INTEL_INFO(dev)->gen >= 6) { 2166 uint32_t sskpd = I915_READ(MCH_SSKPD); 2167 2168 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK; 2169 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK; 2170 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK; 2171 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK; 2172 } else if (INTEL_INFO(dev)->gen >= 5) { 2173 uint32_t mltr = I915_READ(MLTR_ILK); 2174 2175 /* ILK primary LP0 latency is 700 ns */ 2176 wm[0] = 7; 2177 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK; 2178 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK; 2179 } 2180 } 2181 2182 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5]) 2183 { 2184 /* ILK sprite LP0 latency is 1300 ns */ 2185 if (IS_GEN5(dev)) 2186 wm[0] = 13; 2187 } 2188 2189 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5]) 2190 { 2191 /* ILK cursor LP0 latency is 1300 ns */ 2192 if (IS_GEN5(dev)) 2193 wm[0] = 13; 2194 2195 /* WaDoubleCursorLP3Latency:ivb */ 2196 if (IS_IVYBRIDGE(dev)) 2197 wm[3] *= 2; 2198 } 2199 2200 int ilk_wm_max_level(const struct drm_device *dev) 2201 { 2202 /* how many WM levels are we expecting */ 2203 if (INTEL_INFO(dev)->gen >= 9) 2204 return 7; 2205 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2206 return 4; 2207 else if (INTEL_INFO(dev)->gen >= 6) 2208 return 3; 2209 else 2210 return 2; 2211 } 2212 2213 static void intel_print_wm_latency(struct drm_device *dev, 2214 const char *name, 2215 const uint16_t wm[8]) 2216 { 2217 int level, max_level = ilk_wm_max_level(dev); 2218 2219 for (level = 0; level <= max_level; level++) { 2220 unsigned int latency = wm[level]; 2221 2222 if (latency == 0) { 2223 DRM_ERROR("%s WM%d latency not provided\n", 2224 name, level); 2225 continue; 2226 } 2227 2228 /* 2229 * - latencies are in us on gen9. 2230 * - before then, WM1+ latency values are in 0.5us units 2231 */ 2232 if (IS_GEN9(dev)) 2233 latency *= 10; 2234 else if (level > 0) 2235 latency *= 5; 2236 2237 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n", 2238 name, level, wm[level], 2239 latency / 10, latency % 10); 2240 } 2241 } 2242 2243 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, 2244 uint16_t wm[5], uint16_t min) 2245 { 2246 int level, max_level = ilk_wm_max_level(&dev_priv->drm); 2247 2248 if (wm[0] >= min) 2249 return false; 2250 2251 wm[0] = max(wm[0], min); 2252 for (level = 1; level <= max_level; level++) 2253 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5)); 2254 2255 return true; 2256 } 2257 2258 static void snb_wm_latency_quirk(struct drm_device *dev) 2259 { 2260 struct drm_i915_private *dev_priv = to_i915(dev); 2261 bool changed; 2262 2263 /* 2264 * The BIOS provided WM memory latency values are often 2265 * inadequate for high resolution displays. Adjust them. 2266 */ 2267 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | 2268 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | 2269 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); 2270 2271 if (!changed) 2272 return; 2273 2274 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n"); 2275 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); 2276 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); 2277 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); 2278 } 2279 2280 static void ilk_setup_wm_latency(struct drm_device *dev) 2281 { 2282 struct drm_i915_private *dev_priv = to_i915(dev); 2283 2284 intel_read_wm_latency(dev, dev_priv->wm.pri_latency); 2285 2286 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency, 2287 sizeof(dev_priv->wm.pri_latency)); 2288 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency, 2289 sizeof(dev_priv->wm.pri_latency)); 2290 2291 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency); 2292 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency); 2293 2294 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); 2295 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); 2296 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); 2297 2298 if (IS_GEN6(dev)) 2299 snb_wm_latency_quirk(dev); 2300 } 2301 2302 static void skl_setup_wm_latency(struct drm_device *dev) 2303 { 2304 struct drm_i915_private *dev_priv = to_i915(dev); 2305 2306 intel_read_wm_latency(dev, dev_priv->wm.skl_latency); 2307 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency); 2308 } 2309 2310 static bool ilk_validate_pipe_wm(struct drm_device *dev, 2311 struct intel_pipe_wm *pipe_wm) 2312 { 2313 /* LP0 watermark maximums depend on this pipe alone */ 2314 const struct intel_wm_config config = { 2315 .num_pipes_active = 1, 2316 .sprites_enabled = pipe_wm->sprites_enabled, 2317 .sprites_scaled = pipe_wm->sprites_scaled, 2318 }; 2319 struct ilk_wm_maximums max; 2320 2321 /* LP0 watermarks always use 1/2 DDB partitioning */ 2322 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); 2323 2324 /* At least LP0 must be valid */ 2325 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) { 2326 DRM_DEBUG_KMS("LP0 watermark invalid\n"); 2327 return false; 2328 } 2329 2330 return true; 2331 } 2332 2333 /* Compute new watermarks for the pipe */ 2334 static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) 2335 { 2336 struct drm_atomic_state *state = cstate->base.state; 2337 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 2338 struct intel_pipe_wm *pipe_wm; 2339 struct drm_device *dev = state->dev; 2340 const struct drm_i915_private *dev_priv = to_i915(dev); 2341 struct intel_plane *intel_plane; 2342 struct intel_plane_state *pristate = NULL; 2343 struct intel_plane_state *sprstate = NULL; 2344 struct intel_plane_state *curstate = NULL; 2345 int level, max_level = ilk_wm_max_level(dev), usable_level; 2346 struct ilk_wm_maximums max; 2347 2348 pipe_wm = &cstate->wm.ilk.optimal; 2349 2350 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 2351 struct intel_plane_state *ps; 2352 2353 ps = intel_atomic_get_existing_plane_state(state, 2354 intel_plane); 2355 if (!ps) 2356 continue; 2357 2358 if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY) 2359 pristate = ps; 2360 else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) 2361 sprstate = ps; 2362 else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR) 2363 curstate = ps; 2364 } 2365 2366 pipe_wm->pipe_enabled = cstate->base.active; 2367 if (sprstate) { 2368 pipe_wm->sprites_enabled = sprstate->visible; 2369 pipe_wm->sprites_scaled = sprstate->visible && 2370 (drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 || 2371 drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16); 2372 } 2373 2374 usable_level = max_level; 2375 2376 /* ILK/SNB: LP2+ watermarks only w/o sprites */ 2377 if (INTEL_INFO(dev)->gen <= 6 && pipe_wm->sprites_enabled) 2378 usable_level = 1; 2379 2380 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ 2381 if (pipe_wm->sprites_scaled) 2382 usable_level = 0; 2383 2384 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, 2385 pristate, sprstate, curstate, &pipe_wm->raw_wm[0]); 2386 2387 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm)); 2388 pipe_wm->wm[0] = pipe_wm->raw_wm[0]; 2389 2390 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2391 pipe_wm->linetime = hsw_compute_linetime_wm(cstate); 2392 2393 if (!ilk_validate_pipe_wm(dev, pipe_wm)) 2394 return -EINVAL; 2395 2396 ilk_compute_wm_reg_maximums(dev, 1, &max); 2397 2398 for (level = 1; level <= max_level; level++) { 2399 struct intel_wm_level *wm = &pipe_wm->raw_wm[level]; 2400 2401 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, 2402 pristate, sprstate, curstate, wm); 2403 2404 /* 2405 * Disable any watermark level that exceeds the 2406 * register maximums since such watermarks are 2407 * always invalid. 2408 */ 2409 if (level > usable_level) 2410 continue; 2411 2412 if (ilk_validate_wm_level(level, &max, wm)) 2413 pipe_wm->wm[level] = *wm; 2414 else 2415 usable_level = level; 2416 } 2417 2418 return 0; 2419 } 2420 2421 /* 2422 * Build a set of 'intermediate' watermark values that satisfy both the old 2423 * state and the new state. These can be programmed to the hardware 2424 * immediately. 2425 */ 2426 static int ilk_compute_intermediate_wm(struct drm_device *dev, 2427 struct intel_crtc *intel_crtc, 2428 struct intel_crtc_state *newstate) 2429 { 2430 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate; 2431 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk; 2432 int level, max_level = ilk_wm_max_level(dev); 2433 2434 /* 2435 * Start with the final, target watermarks, then combine with the 2436 * currently active watermarks to get values that are safe both before 2437 * and after the vblank. 2438 */ 2439 *a = newstate->wm.ilk.optimal; 2440 a->pipe_enabled |= b->pipe_enabled; 2441 a->sprites_enabled |= b->sprites_enabled; 2442 a->sprites_scaled |= b->sprites_scaled; 2443 2444 for (level = 0; level <= max_level; level++) { 2445 struct intel_wm_level *a_wm = &a->wm[level]; 2446 const struct intel_wm_level *b_wm = &b->wm[level]; 2447 2448 a_wm->enable &= b_wm->enable; 2449 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val); 2450 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val); 2451 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val); 2452 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val); 2453 } 2454 2455 /* 2456 * We need to make sure that these merged watermark values are 2457 * actually a valid configuration themselves. If they're not, 2458 * there's no safe way to transition from the old state to 2459 * the new state, so we need to fail the atomic transaction. 2460 */ 2461 if (!ilk_validate_pipe_wm(dev, a)) 2462 return -EINVAL; 2463 2464 /* 2465 * If our intermediate WM are identical to the final WM, then we can 2466 * omit the post-vblank programming; only update if it's different. 2467 */ 2468 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0) 2469 newstate->wm.need_postvbl_update = false; 2470 2471 return 0; 2472 } 2473 2474 /* 2475 * Merge the watermarks from all active pipes for a specific level. 2476 */ 2477 static void ilk_merge_wm_level(struct drm_device *dev, 2478 int level, 2479 struct intel_wm_level *ret_wm) 2480 { 2481 const struct intel_crtc *intel_crtc; 2482 2483 ret_wm->enable = true; 2484 2485 for_each_intel_crtc(dev, intel_crtc) { 2486 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk; 2487 const struct intel_wm_level *wm = &active->wm[level]; 2488 2489 if (!active->pipe_enabled) 2490 continue; 2491 2492 /* 2493 * The watermark values may have been used in the past, 2494 * so we must maintain them in the registers for some 2495 * time even if the level is now disabled. 2496 */ 2497 if (!wm->enable) 2498 ret_wm->enable = false; 2499 2500 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val); 2501 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val); 2502 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val); 2503 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val); 2504 } 2505 } 2506 2507 /* 2508 * Merge all low power watermarks for all active pipes. 2509 */ 2510 static void ilk_wm_merge(struct drm_device *dev, 2511 const struct intel_wm_config *config, 2512 const struct ilk_wm_maximums *max, 2513 struct intel_pipe_wm *merged) 2514 { 2515 struct drm_i915_private *dev_priv = to_i915(dev); 2516 int level, max_level = ilk_wm_max_level(dev); 2517 int last_enabled_level = max_level; 2518 2519 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ 2520 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) && 2521 config->num_pipes_active > 1) 2522 last_enabled_level = 0; 2523 2524 /* ILK: FBC WM must be disabled always */ 2525 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6; 2526 2527 /* merge each WM1+ level */ 2528 for (level = 1; level <= max_level; level++) { 2529 struct intel_wm_level *wm = &merged->wm[level]; 2530 2531 ilk_merge_wm_level(dev, level, wm); 2532 2533 if (level > last_enabled_level) 2534 wm->enable = false; 2535 else if (!ilk_validate_wm_level(level, max, wm)) 2536 /* make sure all following levels get disabled */ 2537 last_enabled_level = level - 1; 2538 2539 /* 2540 * The spec says it is preferred to disable 2541 * FBC WMs instead of disabling a WM level. 2542 */ 2543 if (wm->fbc_val > max->fbc) { 2544 if (wm->enable) 2545 merged->fbc_wm_enabled = false; 2546 wm->fbc_val = 0; 2547 } 2548 } 2549 2550 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ 2551 /* 2552 * FIXME this is racy. FBC might get enabled later. 2553 * What we should check here is whether FBC can be 2554 * enabled sometime later. 2555 */ 2556 if (IS_GEN5(dev) && !merged->fbc_wm_enabled && 2557 intel_fbc_is_active(dev_priv)) { 2558 for (level = 2; level <= max_level; level++) { 2559 struct intel_wm_level *wm = &merged->wm[level]; 2560 2561 wm->enable = false; 2562 } 2563 } 2564 } 2565 2566 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) 2567 { 2568 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */ 2569 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); 2570 } 2571 2572 /* The value we need to program into the WM_LPx latency field */ 2573 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level) 2574 { 2575 struct drm_i915_private *dev_priv = to_i915(dev); 2576 2577 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2578 return 2 * level; 2579 else 2580 return dev_priv->wm.pri_latency[level]; 2581 } 2582 2583 static void ilk_compute_wm_results(struct drm_device *dev, 2584 const struct intel_pipe_wm *merged, 2585 enum intel_ddb_partitioning partitioning, 2586 struct ilk_wm_values *results) 2587 { 2588 struct intel_crtc *intel_crtc; 2589 int level, wm_lp; 2590 2591 results->enable_fbc_wm = merged->fbc_wm_enabled; 2592 results->partitioning = partitioning; 2593 2594 /* LP1+ register values */ 2595 for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 2596 const struct intel_wm_level *r; 2597 2598 level = ilk_wm_lp_to_level(wm_lp, merged); 2599 2600 r = &merged->wm[level]; 2601 2602 /* 2603 * Maintain the watermark values even if the level is 2604 * disabled. Doing otherwise could cause underruns. 2605 */ 2606 results->wm_lp[wm_lp - 1] = 2607 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) | 2608 (r->pri_val << WM1_LP_SR_SHIFT) | 2609 r->cur_val; 2610 2611 if (r->enable) 2612 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN; 2613 2614 if (INTEL_INFO(dev)->gen >= 8) 2615 results->wm_lp[wm_lp - 1] |= 2616 r->fbc_val << WM1_LP_FBC_SHIFT_BDW; 2617 else 2618 results->wm_lp[wm_lp - 1] |= 2619 r->fbc_val << WM1_LP_FBC_SHIFT; 2620 2621 /* 2622 * Always set WM1S_LP_EN when spr_val != 0, even if the 2623 * level is disabled. Doing otherwise could cause underruns. 2624 */ 2625 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) { 2626 WARN_ON(wm_lp != 1); 2627 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val; 2628 } else 2629 results->wm_lp_spr[wm_lp - 1] = r->spr_val; 2630 } 2631 2632 /* LP0 register values */ 2633 for_each_intel_crtc(dev, intel_crtc) { 2634 enum i915_pipe pipe = intel_crtc->pipe; 2635 const struct intel_wm_level *r = 2636 &intel_crtc->wm.active.ilk.wm[0]; 2637 2638 if (WARN_ON(!r->enable)) 2639 continue; 2640 2641 results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime; 2642 2643 results->wm_pipe[pipe] = 2644 (r->pri_val << WM0_PIPE_PLANE_SHIFT) | 2645 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) | 2646 r->cur_val; 2647 } 2648 } 2649 2650 /* Find the result with the highest level enabled. Check for enable_fbc_wm in 2651 * case both are at the same level. Prefer r1 in case they're the same. */ 2652 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev, 2653 struct intel_pipe_wm *r1, 2654 struct intel_pipe_wm *r2) 2655 { 2656 int level, max_level = ilk_wm_max_level(dev); 2657 int level1 = 0, level2 = 0; 2658 2659 for (level = 1; level <= max_level; level++) { 2660 if (r1->wm[level].enable) 2661 level1 = level; 2662 if (r2->wm[level].enable) 2663 level2 = level; 2664 } 2665 2666 if (level1 == level2) { 2667 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled) 2668 return r2; 2669 else 2670 return r1; 2671 } else if (level1 > level2) { 2672 return r1; 2673 } else { 2674 return r2; 2675 } 2676 } 2677 2678 /* dirty bits used to track which watermarks need changes */ 2679 #define WM_DIRTY_PIPE(pipe) (1 << (pipe)) 2680 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe))) 2681 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp))) 2682 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3)) 2683 #define WM_DIRTY_FBC (1 << 24) 2684 #define WM_DIRTY_DDB (1 << 25) 2685 2686 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv, 2687 const struct ilk_wm_values *old, 2688 const struct ilk_wm_values *new) 2689 { 2690 unsigned int dirty = 0; 2691 enum i915_pipe pipe; 2692 int wm_lp; 2693 2694 for_each_pipe(dev_priv, pipe) { 2695 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) { 2696 dirty |= WM_DIRTY_LINETIME(pipe); 2697 /* Must disable LP1+ watermarks too */ 2698 dirty |= WM_DIRTY_LP_ALL; 2699 } 2700 2701 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) { 2702 dirty |= WM_DIRTY_PIPE(pipe); 2703 /* Must disable LP1+ watermarks too */ 2704 dirty |= WM_DIRTY_LP_ALL; 2705 } 2706 } 2707 2708 if (old->enable_fbc_wm != new->enable_fbc_wm) { 2709 dirty |= WM_DIRTY_FBC; 2710 /* Must disable LP1+ watermarks too */ 2711 dirty |= WM_DIRTY_LP_ALL; 2712 } 2713 2714 if (old->partitioning != new->partitioning) { 2715 dirty |= WM_DIRTY_DDB; 2716 /* Must disable LP1+ watermarks too */ 2717 dirty |= WM_DIRTY_LP_ALL; 2718 } 2719 2720 /* LP1+ watermarks already deemed dirty, no need to continue */ 2721 if (dirty & WM_DIRTY_LP_ALL) 2722 return dirty; 2723 2724 /* Find the lowest numbered LP1+ watermark in need of an update... */ 2725 for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 2726 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] || 2727 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1]) 2728 break; 2729 } 2730 2731 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */ 2732 for (; wm_lp <= 3; wm_lp++) 2733 dirty |= WM_DIRTY_LP(wm_lp); 2734 2735 return dirty; 2736 } 2737 2738 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, 2739 unsigned int dirty) 2740 { 2741 struct ilk_wm_values *previous = &dev_priv->wm.hw; 2742 bool changed = false; 2743 2744 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) { 2745 previous->wm_lp[2] &= ~WM1_LP_SR_EN; 2746 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]); 2747 changed = true; 2748 } 2749 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) { 2750 previous->wm_lp[1] &= ~WM1_LP_SR_EN; 2751 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]); 2752 changed = true; 2753 } 2754 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) { 2755 previous->wm_lp[0] &= ~WM1_LP_SR_EN; 2756 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]); 2757 changed = true; 2758 } 2759 2760 /* 2761 * Don't touch WM1S_LP_EN here. 2762 * Doing so could cause underruns. 2763 */ 2764 2765 return changed; 2766 } 2767 2768 /* 2769 * The spec says we shouldn't write when we don't need, because every write 2770 * causes WMs to be re-evaluated, expending some power. 2771 */ 2772 static void ilk_write_wm_values(struct drm_i915_private *dev_priv, 2773 struct ilk_wm_values *results) 2774 { 2775 struct drm_device *dev = &dev_priv->drm; 2776 struct ilk_wm_values *previous = &dev_priv->wm.hw; 2777 unsigned int dirty; 2778 uint32_t val; 2779 2780 dirty = ilk_compute_wm_dirty(dev_priv, previous, results); 2781 if (!dirty) 2782 return; 2783 2784 _ilk_disable_lp_wm(dev_priv, dirty); 2785 2786 if (dirty & WM_DIRTY_PIPE(PIPE_A)) 2787 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); 2788 if (dirty & WM_DIRTY_PIPE(PIPE_B)) 2789 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]); 2790 if (dirty & WM_DIRTY_PIPE(PIPE_C)) 2791 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]); 2792 2793 if (dirty & WM_DIRTY_LINETIME(PIPE_A)) 2794 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]); 2795 if (dirty & WM_DIRTY_LINETIME(PIPE_B)) 2796 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]); 2797 if (dirty & WM_DIRTY_LINETIME(PIPE_C)) 2798 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]); 2799 2800 if (dirty & WM_DIRTY_DDB) { 2801 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2802 val = I915_READ(WM_MISC); 2803 if (results->partitioning == INTEL_DDB_PART_1_2) 2804 val &= ~WM_MISC_DATA_PARTITION_5_6; 2805 else 2806 val |= WM_MISC_DATA_PARTITION_5_6; 2807 I915_WRITE(WM_MISC, val); 2808 } else { 2809 val = I915_READ(DISP_ARB_CTL2); 2810 if (results->partitioning == INTEL_DDB_PART_1_2) 2811 val &= ~DISP_DATA_PARTITION_5_6; 2812 else 2813 val |= DISP_DATA_PARTITION_5_6; 2814 I915_WRITE(DISP_ARB_CTL2, val); 2815 } 2816 } 2817 2818 if (dirty & WM_DIRTY_FBC) { 2819 val = I915_READ(DISP_ARB_CTL); 2820 if (results->enable_fbc_wm) 2821 val &= ~DISP_FBC_WM_DIS; 2822 else 2823 val |= DISP_FBC_WM_DIS; 2824 I915_WRITE(DISP_ARB_CTL, val); 2825 } 2826 2827 if (dirty & WM_DIRTY_LP(1) && 2828 previous->wm_lp_spr[0] != results->wm_lp_spr[0]) 2829 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); 2830 2831 if (INTEL_INFO(dev)->gen >= 7) { 2832 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) 2833 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); 2834 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) 2835 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]); 2836 } 2837 2838 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0]) 2839 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); 2840 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1]) 2841 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); 2842 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) 2843 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); 2844 2845 dev_priv->wm.hw = *results; 2846 } 2847 2848 bool ilk_disable_lp_wm(struct drm_device *dev) 2849 { 2850 struct drm_i915_private *dev_priv = to_i915(dev); 2851 2852 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); 2853 } 2854 2855 /* 2856 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the 2857 * different active planes. 2858 */ 2859 2860 #define SKL_DDB_SIZE 896 /* in blocks */ 2861 #define BXT_DDB_SIZE 512 2862 2863 /* 2864 * Return the index of a plane in the SKL DDB and wm result arrays. Primary 2865 * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and 2866 * other universal planes are in indices 1..n. Note that this may leave unused 2867 * indices between the top "sprite" plane and the cursor. 2868 */ 2869 static int 2870 skl_wm_plane_id(const struct intel_plane *plane) 2871 { 2872 switch (plane->base.type) { 2873 case DRM_PLANE_TYPE_PRIMARY: 2874 return 0; 2875 case DRM_PLANE_TYPE_CURSOR: 2876 return PLANE_CURSOR; 2877 case DRM_PLANE_TYPE_OVERLAY: 2878 return plane->plane + 1; 2879 default: 2880 MISSING_CASE(plane->base.type); 2881 return plane->plane; 2882 } 2883 } 2884 2885 static void 2886 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, 2887 const struct intel_crtc_state *cstate, 2888 struct skl_ddb_entry *alloc, /* out */ 2889 int *num_active /* out */) 2890 { 2891 struct drm_atomic_state *state = cstate->base.state; 2892 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 2893 struct drm_i915_private *dev_priv = to_i915(dev); 2894 struct drm_crtc *for_crtc = cstate->base.crtc; 2895 unsigned int pipe_size, ddb_size; 2896 int nth_active_pipe; 2897 int pipe = to_intel_crtc(for_crtc)->pipe; 2898 2899 if (WARN_ON(!state) || !cstate->base.active) { 2900 alloc->start = 0; 2901 alloc->end = 0; 2902 *num_active = hweight32(dev_priv->active_crtcs); 2903 return; 2904 } 2905 2906 if (intel_state->active_pipe_changes) 2907 *num_active = hweight32(intel_state->active_crtcs); 2908 else 2909 *num_active = hweight32(dev_priv->active_crtcs); 2910 2911 if (IS_BROXTON(dev)) 2912 ddb_size = BXT_DDB_SIZE; 2913 else 2914 ddb_size = SKL_DDB_SIZE; 2915 2916 ddb_size -= 4; /* 4 blocks for bypass path allocation */ 2917 2918 /* 2919 * If the state doesn't change the active CRTC's, then there's 2920 * no need to recalculate; the existing pipe allocation limits 2921 * should remain unchanged. Note that we're safe from racing 2922 * commits since any racing commit that changes the active CRTC 2923 * list would need to grab _all_ crtc locks, including the one 2924 * we currently hold. 2925 */ 2926 if (!intel_state->active_pipe_changes) { 2927 *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe]; 2928 return; 2929 } 2930 2931 nth_active_pipe = hweight32(intel_state->active_crtcs & 2932 (drm_crtc_mask(for_crtc) - 1)); 2933 pipe_size = ddb_size / hweight32(intel_state->active_crtcs); 2934 alloc->start = nth_active_pipe * ddb_size / *num_active; 2935 alloc->end = alloc->start + pipe_size; 2936 } 2937 2938 static unsigned int skl_cursor_allocation(int num_active) 2939 { 2940 if (num_active == 1) 2941 return 32; 2942 2943 return 8; 2944 } 2945 2946 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg) 2947 { 2948 entry->start = reg & 0x3ff; 2949 entry->end = (reg >> 16) & 0x3ff; 2950 if (entry->end) 2951 entry->end += 1; 2952 } 2953 2954 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, 2955 struct skl_ddb_allocation *ddb /* out */) 2956 { 2957 enum i915_pipe pipe; 2958 int plane; 2959 u32 val; 2960 2961 memset(ddb, 0, sizeof(*ddb)); 2962 2963 for_each_pipe(dev_priv, pipe) { 2964 enum intel_display_power_domain power_domain; 2965 2966 power_domain = POWER_DOMAIN_PIPE(pipe); 2967 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 2968 continue; 2969 2970 for_each_plane(dev_priv, pipe, plane) { 2971 val = I915_READ(PLANE_BUF_CFG(pipe, plane)); 2972 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane], 2973 val); 2974 } 2975 2976 val = I915_READ(CUR_BUF_CFG(pipe)); 2977 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR], 2978 val); 2979 2980 intel_display_power_put(dev_priv, power_domain); 2981 } 2982 } 2983 2984 /* 2985 * Determines the downscale amount of a plane for the purposes of watermark calculations. 2986 * The bspec defines downscale amount as: 2987 * 2988 * """ 2989 * Horizontal down scale amount = maximum[1, Horizontal source size / 2990 * Horizontal destination size] 2991 * Vertical down scale amount = maximum[1, Vertical source size / 2992 * Vertical destination size] 2993 * Total down scale amount = Horizontal down scale amount * 2994 * Vertical down scale amount 2995 * """ 2996 * 2997 * Return value is provided in 16.16 fixed point form to retain fractional part. 2998 * Caller should take care of dividing & rounding off the value. 2999 */ 3000 static uint32_t 3001 skl_plane_downscale_amount(const struct intel_plane_state *pstate) 3002 { 3003 uint32_t downscale_h, downscale_w; 3004 uint32_t src_w, src_h, dst_w, dst_h; 3005 3006 if (WARN_ON(!pstate->visible)) 3007 return DRM_PLANE_HELPER_NO_SCALING; 3008 3009 /* n.b., src is 16.16 fixed point, dst is whole integer */ 3010 src_w = drm_rect_width(&pstate->src); 3011 src_h = drm_rect_height(&pstate->src); 3012 dst_w = drm_rect_width(&pstate->dst); 3013 dst_h = drm_rect_height(&pstate->dst); 3014 if (intel_rotation_90_or_270(pstate->base.rotation)) 3015 swap(dst_w, dst_h); 3016 3017 downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); 3018 downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); 3019 3020 /* Provide result in 16.16 fixed point */ 3021 return (uint64_t)downscale_w * downscale_h >> 16; 3022 } 3023 3024 static unsigned int 3025 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, 3026 struct drm_plane_state *pstate, 3027 int y) 3028 { 3029 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); 3030 struct drm_framebuffer *fb = pstate->fb; 3031 uint32_t down_scale_amount, data_rate; 3032 uint32_t width = 0, height = 0; 3033 unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888; 3034 3035 if (!intel_pstate->visible) 3036 return 0; 3037 if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR) 3038 return 0; 3039 if (y && format != DRM_FORMAT_NV12) 3040 return 0; 3041 3042 width = drm_rect_width(&intel_pstate->src) >> 16; 3043 height = drm_rect_height(&intel_pstate->src) >> 16; 3044 3045 if (intel_rotation_90_or_270(pstate->rotation)) 3046 swap(width, height); 3047 3048 /* for planar format */ 3049 if (format == DRM_FORMAT_NV12) { 3050 if (y) /* y-plane data rate */ 3051 data_rate = width * height * 3052 drm_format_plane_cpp(format, 0); 3053 else /* uv-plane data rate */ 3054 data_rate = (width / 2) * (height / 2) * 3055 drm_format_plane_cpp(format, 1); 3056 } else { 3057 /* for packed formats */ 3058 data_rate = width * height * drm_format_plane_cpp(format, 0); 3059 } 3060 3061 down_scale_amount = skl_plane_downscale_amount(intel_pstate); 3062 3063 return (uint64_t)data_rate * down_scale_amount >> 16; 3064 } 3065 3066 /* 3067 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching 3068 * a 8192x4096@32bpp framebuffer: 3069 * 3 * 4096 * 8192 * 4 < 2^32 3070 */ 3071 static unsigned int 3072 skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate) 3073 { 3074 struct drm_crtc_state *cstate = &intel_cstate->base; 3075 struct drm_atomic_state *state = cstate->state; 3076 struct drm_crtc *crtc = cstate->crtc; 3077 struct drm_device *dev = crtc->dev; 3078 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3079 struct drm_plane *plane; 3080 const struct intel_plane *intel_plane; 3081 struct drm_plane_state *pstate; 3082 unsigned int rate, total_data_rate = 0; 3083 int id; 3084 int i; 3085 3086 if (WARN_ON(!state)) 3087 return 0; 3088 3089 /* Calculate and cache data rate for each plane */ 3090 for_each_plane_in_state(state, plane, pstate, i) { 3091 id = skl_wm_plane_id(to_intel_plane(plane)); 3092 intel_plane = to_intel_plane(plane); 3093 3094 if (intel_plane->pipe != intel_crtc->pipe) 3095 continue; 3096 3097 /* packed/uv */ 3098 rate = skl_plane_relative_data_rate(intel_cstate, 3099 pstate, 0); 3100 intel_cstate->wm.skl.plane_data_rate[id] = rate; 3101 3102 /* y-plane */ 3103 rate = skl_plane_relative_data_rate(intel_cstate, 3104 pstate, 1); 3105 intel_cstate->wm.skl.plane_y_data_rate[id] = rate; 3106 } 3107 3108 /* Calculate CRTC's total data rate from cached values */ 3109 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3110 int id = skl_wm_plane_id(intel_plane); 3111 3112 /* packed/uv */ 3113 total_data_rate += intel_cstate->wm.skl.plane_data_rate[id]; 3114 total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id]; 3115 } 3116 3117 WARN_ON(cstate->plane_mask && total_data_rate == 0); 3118 3119 return total_data_rate; 3120 } 3121 3122 static uint16_t 3123 skl_ddb_min_alloc(struct drm_plane_state *pstate, 3124 const int y) 3125 { 3126 struct drm_framebuffer *fb = pstate->fb; 3127 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); 3128 uint32_t src_w, src_h; 3129 uint32_t min_scanlines = 8; 3130 uint8_t plane_bpp; 3131 3132 if (WARN_ON(!fb)) 3133 return 0; 3134 3135 /* For packed formats, no y-plane, return 0 */ 3136 if (y && fb->pixel_format != DRM_FORMAT_NV12) 3137 return 0; 3138 3139 /* For Non Y-tile return 8-blocks */ 3140 if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED && 3141 fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED) 3142 return 8; 3143 3144 src_w = drm_rect_width(&intel_pstate->src) >> 16; 3145 src_h = drm_rect_height(&intel_pstate->src) >> 16; 3146 3147 if (intel_rotation_90_or_270(pstate->rotation)) 3148 swap(src_w, src_h); 3149 3150 /* Halve UV plane width and height for NV12 */ 3151 if (fb->pixel_format == DRM_FORMAT_NV12 && !y) { 3152 src_w /= 2; 3153 src_h /= 2; 3154 } 3155 3156 if (fb->pixel_format == DRM_FORMAT_NV12 && !y) 3157 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1); 3158 else 3159 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0); 3160 3161 if (intel_rotation_90_or_270(pstate->rotation)) { 3162 switch (plane_bpp) { 3163 case 1: 3164 min_scanlines = 32; 3165 break; 3166 case 2: 3167 min_scanlines = 16; 3168 break; 3169 case 4: 3170 min_scanlines = 8; 3171 break; 3172 case 8: 3173 min_scanlines = 4; 3174 break; 3175 default: 3176 WARN(1, "Unsupported pixel depth %u for rotation", 3177 plane_bpp); 3178 min_scanlines = 32; 3179 } 3180 } 3181 3182 return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3; 3183 } 3184 3185 static int 3186 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, 3187 struct skl_ddb_allocation *ddb /* out */) 3188 { 3189 struct drm_atomic_state *state = cstate->base.state; 3190 struct drm_crtc *crtc = cstate->base.crtc; 3191 struct drm_device *dev = crtc->dev; 3192 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3193 struct intel_plane *intel_plane; 3194 struct drm_plane *plane; 3195 struct drm_plane_state *pstate; 3196 enum i915_pipe pipe = intel_crtc->pipe; 3197 struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; 3198 uint16_t alloc_size, start, cursor_blocks; 3199 uint16_t *minimum = cstate->wm.skl.minimum_blocks; 3200 uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks; 3201 unsigned int total_data_rate; 3202 int num_active; 3203 int id, i; 3204 3205 if (WARN_ON(!state)) 3206 return 0; 3207 3208 if (!cstate->base.active) { 3209 ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0; 3210 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); 3211 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe])); 3212 return 0; 3213 } 3214 3215 skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active); 3216 alloc_size = skl_ddb_entry_size(alloc); 3217 if (alloc_size == 0) { 3218 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); 3219 return 0; 3220 } 3221 3222 cursor_blocks = skl_cursor_allocation(num_active); 3223 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks; 3224 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end; 3225 3226 alloc_size -= cursor_blocks; 3227 3228 /* 1. Allocate the mininum required blocks for each active plane */ 3229 for_each_plane_in_state(state, plane, pstate, i) { 3230 intel_plane = to_intel_plane(plane); 3231 id = skl_wm_plane_id(intel_plane); 3232 3233 if (intel_plane->pipe != pipe) 3234 continue; 3235 3236 if (!to_intel_plane_state(pstate)->visible) { 3237 minimum[id] = 0; 3238 y_minimum[id] = 0; 3239 continue; 3240 } 3241 if (plane->type == DRM_PLANE_TYPE_CURSOR) { 3242 minimum[id] = 0; 3243 y_minimum[id] = 0; 3244 continue; 3245 } 3246 3247 minimum[id] = skl_ddb_min_alloc(pstate, 0); 3248 y_minimum[id] = skl_ddb_min_alloc(pstate, 1); 3249 } 3250 3251 for (i = 0; i < PLANE_CURSOR; i++) { 3252 alloc_size -= minimum[i]; 3253 alloc_size -= y_minimum[i]; 3254 } 3255 3256 /* 3257 * 2. Distribute the remaining space in proportion to the amount of 3258 * data each plane needs to fetch from memory. 3259 * 3260 * FIXME: we may not allocate every single block here. 3261 */ 3262 total_data_rate = skl_get_total_relative_data_rate(cstate); 3263 if (total_data_rate == 0) 3264 return 0; 3265 3266 start = alloc->start; 3267 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3268 unsigned int data_rate, y_data_rate; 3269 uint16_t plane_blocks, y_plane_blocks = 0; 3270 int id = skl_wm_plane_id(intel_plane); 3271 3272 data_rate = cstate->wm.skl.plane_data_rate[id]; 3273 3274 /* 3275 * allocation for (packed formats) or (uv-plane part of planar format): 3276 * promote the expression to 64 bits to avoid overflowing, the 3277 * result is < available as data_rate / total_data_rate < 1 3278 */ 3279 plane_blocks = minimum[id]; 3280 plane_blocks += div_u64((uint64_t)alloc_size * data_rate, 3281 total_data_rate); 3282 3283 /* Leave disabled planes at (0,0) */ 3284 if (data_rate) { 3285 ddb->plane[pipe][id].start = start; 3286 ddb->plane[pipe][id].end = start + plane_blocks; 3287 } 3288 3289 start += plane_blocks; 3290 3291 /* 3292 * allocation for y_plane part of planar format: 3293 */ 3294 y_data_rate = cstate->wm.skl.plane_y_data_rate[id]; 3295 3296 y_plane_blocks = y_minimum[id]; 3297 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, 3298 total_data_rate); 3299 3300 if (y_data_rate) { 3301 ddb->y_plane[pipe][id].start = start; 3302 ddb->y_plane[pipe][id].end = start + y_plane_blocks; 3303 } 3304 3305 start += y_plane_blocks; 3306 } 3307 3308 return 0; 3309 } 3310 3311 static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config) 3312 { 3313 /* TODO: Take into account the scalers once we support them */ 3314 return config->base.adjusted_mode.crtc_clock; 3315 } 3316 3317 /* 3318 * The max latency should be 257 (max the punit can code is 255 and we add 2us 3319 * for the read latency) and cpp should always be <= 8, so that 3320 * should allow pixel_rate up to ~2 GHz which seems sufficient since max 3321 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. 3322 */ 3323 static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency) 3324 { 3325 uint32_t wm_intermediate_val, ret; 3326 3327 if (latency == 0) 3328 return UINT_MAX; 3329 3330 wm_intermediate_val = latency * pixel_rate * cpp / 512; 3331 ret = DIV_ROUND_UP(wm_intermediate_val, 1000); 3332 3333 return ret; 3334 } 3335 3336 static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, 3337 uint32_t horiz_pixels, uint8_t cpp, 3338 uint64_t tiling, uint32_t latency) 3339 { 3340 uint32_t ret; 3341 uint32_t plane_bytes_per_line, plane_blocks_per_line; 3342 uint32_t wm_intermediate_val; 3343 3344 if (latency == 0) 3345 return UINT_MAX; 3346 3347 plane_bytes_per_line = horiz_pixels * cpp; 3348 3349 if (tiling == I915_FORMAT_MOD_Y_TILED || 3350 tiling == I915_FORMAT_MOD_Yf_TILED) { 3351 plane_bytes_per_line *= 4; 3352 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 3353 plane_blocks_per_line /= 4; 3354 } else if (tiling == DRM_FORMAT_MOD_NONE) { 3355 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1; 3356 } else { 3357 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 3358 } 3359 3360 wm_intermediate_val = latency * pixel_rate; 3361 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) * 3362 plane_blocks_per_line; 3363 3364 return ret; 3365 } 3366 3367 static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate, 3368 struct intel_plane_state *pstate) 3369 { 3370 uint64_t adjusted_pixel_rate; 3371 uint64_t downscale_amount; 3372 uint64_t pixel_rate; 3373 3374 /* Shouldn't reach here on disabled planes... */ 3375 if (WARN_ON(!pstate->visible)) 3376 return 0; 3377 3378 /* 3379 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate 3380 * with additional adjustments for plane-specific scaling. 3381 */ 3382 adjusted_pixel_rate = skl_pipe_pixel_rate(cstate); 3383 downscale_amount = skl_plane_downscale_amount(pstate); 3384 3385 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16; 3386 WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0)); 3387 3388 return pixel_rate; 3389 } 3390 3391 static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, 3392 struct intel_crtc_state *cstate, 3393 struct intel_plane_state *intel_pstate, 3394 uint16_t ddb_allocation, 3395 int level, 3396 uint16_t *out_blocks, /* out */ 3397 uint8_t *out_lines, /* out */ 3398 bool *enabled /* out */) 3399 { 3400 struct drm_plane_state *pstate = &intel_pstate->base; 3401 struct drm_framebuffer *fb = pstate->fb; 3402 uint32_t latency = dev_priv->wm.skl_latency[level]; 3403 uint32_t method1, method2; 3404 uint32_t plane_bytes_per_line, plane_blocks_per_line; 3405 uint32_t res_blocks, res_lines; 3406 uint32_t selected_result; 3407 uint8_t cpp; 3408 uint32_t width = 0, height = 0; 3409 uint32_t plane_pixel_rate; 3410 3411 if (latency == 0 || !cstate->base.active || !intel_pstate->visible) { 3412 *enabled = false; 3413 return 0; 3414 } 3415 3416 width = drm_rect_width(&intel_pstate->src) >> 16; 3417 height = drm_rect_height(&intel_pstate->src) >> 16; 3418 3419 if (intel_rotation_90_or_270(pstate->rotation)) 3420 swap(width, height); 3421 3422 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 3423 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); 3424 3425 method1 = skl_wm_method1(plane_pixel_rate, cpp, latency); 3426 method2 = skl_wm_method2(plane_pixel_rate, 3427 cstate->base.adjusted_mode.crtc_htotal, 3428 width, 3429 cpp, 3430 fb->modifier[0], 3431 latency); 3432 3433 plane_bytes_per_line = width * cpp; 3434 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 3435 3436 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || 3437 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) { 3438 uint32_t min_scanlines = 4; 3439 uint32_t y_tile_minimum; 3440 if (intel_rotation_90_or_270(pstate->rotation)) { 3441 int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ? 3442 drm_format_plane_cpp(fb->pixel_format, 1) : 3443 drm_format_plane_cpp(fb->pixel_format, 0); 3444 3445 switch (cpp) { 3446 case 1: 3447 min_scanlines = 16; 3448 break; 3449 case 2: 3450 min_scanlines = 8; 3451 break; 3452 case 8: 3453 WARN(1, "Unsupported pixel depth for rotation"); 3454 } 3455 } 3456 y_tile_minimum = plane_blocks_per_line * min_scanlines; 3457 selected_result = max(method2, y_tile_minimum); 3458 } else { 3459 if ((ddb_allocation / plane_blocks_per_line) >= 1) 3460 selected_result = min(method1, method2); 3461 else 3462 selected_result = method1; 3463 } 3464 3465 res_blocks = selected_result + 1; 3466 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line); 3467 3468 if (level >= 1 && level <= 7) { 3469 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || 3470 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) 3471 res_lines += 4; 3472 else 3473 res_blocks++; 3474 } 3475 3476 if (res_blocks >= ddb_allocation || res_lines > 31) { 3477 *enabled = false; 3478 3479 /* 3480 * If there are no valid level 0 watermarks, then we can't 3481 * support this display configuration. 3482 */ 3483 if (level) { 3484 return 0; 3485 } else { 3486 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n"); 3487 DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n", 3488 to_intel_crtc(cstate->base.crtc)->pipe, 3489 skl_wm_plane_id(to_intel_plane(pstate->plane)), 3490 res_blocks, ddb_allocation, res_lines); 3491 3492 return -EINVAL; 3493 } 3494 } 3495 3496 *out_blocks = res_blocks; 3497 *out_lines = res_lines; 3498 *enabled = true; 3499 3500 return 0; 3501 } 3502 3503 static int 3504 skl_compute_wm_level(const struct drm_i915_private *dev_priv, 3505 struct skl_ddb_allocation *ddb, 3506 struct intel_crtc_state *cstate, 3507 int level, 3508 struct skl_wm_level *result) 3509 { 3510 struct drm_atomic_state *state = cstate->base.state; 3511 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 3512 struct drm_plane *plane; 3513 struct intel_plane *intel_plane; 3514 struct intel_plane_state *intel_pstate; 3515 uint16_t ddb_blocks; 3516 enum i915_pipe pipe = intel_crtc->pipe; 3517 int ret; 3518 3519 /* 3520 * We'll only calculate watermarks for planes that are actually 3521 * enabled, so make sure all other planes are set as disabled. 3522 */ 3523 memset(result, 0, sizeof(*result)); 3524 3525 for_each_intel_plane_mask(&dev_priv->drm, 3526 intel_plane, 3527 cstate->base.plane_mask) { 3528 int i = skl_wm_plane_id(intel_plane); 3529 3530 plane = &intel_plane->base; 3531 intel_pstate = NULL; 3532 if (state) 3533 intel_pstate = 3534 intel_atomic_get_existing_plane_state(state, 3535 intel_plane); 3536 3537 /* 3538 * Note: If we start supporting multiple pending atomic commits 3539 * against the same planes/CRTC's in the future, plane->state 3540 * will no longer be the correct pre-state to use for the 3541 * calculations here and we'll need to change where we get the 3542 * 'unchanged' plane data from. 3543 * 3544 * For now this is fine because we only allow one queued commit 3545 * against a CRTC. Even if the plane isn't modified by this 3546 * transaction and we don't have a plane lock, we still have 3547 * the CRTC's lock, so we know that no other transactions are 3548 * racing with us to update it. 3549 */ 3550 if (!intel_pstate) 3551 intel_pstate = to_intel_plane_state(plane->state); 3552 3553 WARN_ON(!intel_pstate->base.fb); 3554 3555 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); 3556 3557 ret = skl_compute_plane_wm(dev_priv, 3558 cstate, 3559 intel_pstate, 3560 ddb_blocks, 3561 level, 3562 &result->plane_res_b[i], 3563 &result->plane_res_l[i], 3564 &result->plane_en[i]); 3565 if (ret) 3566 return ret; 3567 } 3568 3569 return 0; 3570 } 3571 3572 static uint32_t 3573 skl_compute_linetime_wm(struct intel_crtc_state *cstate) 3574 { 3575 if (!cstate->base.active) 3576 return 0; 3577 3578 if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0)) 3579 return 0; 3580 3581 return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000, 3582 skl_pipe_pixel_rate(cstate)); 3583 } 3584 3585 static void skl_compute_transition_wm(struct intel_crtc_state *cstate, 3586 struct skl_wm_level *trans_wm /* out */) 3587 { 3588 struct drm_crtc *crtc = cstate->base.crtc; 3589 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3590 struct intel_plane *intel_plane; 3591 3592 if (!cstate->base.active) 3593 return; 3594 3595 /* Until we know more, just disable transition WMs */ 3596 for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) { 3597 int i = skl_wm_plane_id(intel_plane); 3598 3599 trans_wm->plane_en[i] = false; 3600 } 3601 } 3602 3603 static int skl_build_pipe_wm(struct intel_crtc_state *cstate, 3604 struct skl_ddb_allocation *ddb, 3605 struct skl_pipe_wm *pipe_wm) 3606 { 3607 struct drm_device *dev = cstate->base.crtc->dev; 3608 const struct drm_i915_private *dev_priv = to_i915(dev); 3609 int level, max_level = ilk_wm_max_level(dev); 3610 int ret; 3611 3612 for (level = 0; level <= max_level; level++) { 3613 ret = skl_compute_wm_level(dev_priv, ddb, cstate, 3614 level, &pipe_wm->wm[level]); 3615 if (ret) 3616 return ret; 3617 } 3618 pipe_wm->linetime = skl_compute_linetime_wm(cstate); 3619 3620 skl_compute_transition_wm(cstate, &pipe_wm->trans_wm); 3621 3622 return 0; 3623 } 3624 3625 static void skl_compute_wm_results(struct drm_device *dev, 3626 struct skl_pipe_wm *p_wm, 3627 struct skl_wm_values *r, 3628 struct intel_crtc *intel_crtc) 3629 { 3630 int level, max_level = ilk_wm_max_level(dev); 3631 enum i915_pipe pipe = intel_crtc->pipe; 3632 uint32_t temp; 3633 int i; 3634 3635 for (level = 0; level <= max_level; level++) { 3636 for (i = 0; i < intel_num_planes(intel_crtc); i++) { 3637 temp = 0; 3638 3639 temp |= p_wm->wm[level].plane_res_l[i] << 3640 PLANE_WM_LINES_SHIFT; 3641 temp |= p_wm->wm[level].plane_res_b[i]; 3642 if (p_wm->wm[level].plane_en[i]) 3643 temp |= PLANE_WM_EN; 3644 3645 r->plane[pipe][i][level] = temp; 3646 } 3647 3648 temp = 0; 3649 3650 temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT; 3651 temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR]; 3652 3653 if (p_wm->wm[level].plane_en[PLANE_CURSOR]) 3654 temp |= PLANE_WM_EN; 3655 3656 r->plane[pipe][PLANE_CURSOR][level] = temp; 3657 3658 } 3659 3660 /* transition WMs */ 3661 for (i = 0; i < intel_num_planes(intel_crtc); i++) { 3662 temp = 0; 3663 temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT; 3664 temp |= p_wm->trans_wm.plane_res_b[i]; 3665 if (p_wm->trans_wm.plane_en[i]) 3666 temp |= PLANE_WM_EN; 3667 3668 r->plane_trans[pipe][i] = temp; 3669 } 3670 3671 temp = 0; 3672 temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT; 3673 temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR]; 3674 if (p_wm->trans_wm.plane_en[PLANE_CURSOR]) 3675 temp |= PLANE_WM_EN; 3676 3677 r->plane_trans[pipe][PLANE_CURSOR] = temp; 3678 3679 r->wm_linetime[pipe] = p_wm->linetime; 3680 } 3681 3682 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, 3683 i915_reg_t reg, 3684 const struct skl_ddb_entry *entry) 3685 { 3686 if (entry->end) 3687 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start); 3688 else 3689 I915_WRITE(reg, 0); 3690 } 3691 3692 static void skl_write_wm_values(struct drm_i915_private *dev_priv, 3693 const struct skl_wm_values *new) 3694 { 3695 struct drm_device *dev = &dev_priv->drm; 3696 struct intel_crtc *crtc; 3697 3698 for_each_intel_crtc(dev, crtc) { 3699 int i, level, max_level = ilk_wm_max_level(dev); 3700 enum i915_pipe pipe = crtc->pipe; 3701 3702 if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0) 3703 continue; 3704 if (!crtc->active) 3705 continue; 3706 3707 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]); 3708 3709 for (level = 0; level <= max_level; level++) { 3710 for (i = 0; i < intel_num_planes(crtc); i++) 3711 I915_WRITE(PLANE_WM(pipe, i, level), 3712 new->plane[pipe][i][level]); 3713 I915_WRITE(CUR_WM(pipe, level), 3714 new->plane[pipe][PLANE_CURSOR][level]); 3715 } 3716 for (i = 0; i < intel_num_planes(crtc); i++) 3717 I915_WRITE(PLANE_WM_TRANS(pipe, i), 3718 new->plane_trans[pipe][i]); 3719 I915_WRITE(CUR_WM_TRANS(pipe), 3720 new->plane_trans[pipe][PLANE_CURSOR]); 3721 3722 for (i = 0; i < intel_num_planes(crtc); i++) { 3723 skl_ddb_entry_write(dev_priv, 3724 PLANE_BUF_CFG(pipe, i), 3725 &new->ddb.plane[pipe][i]); 3726 skl_ddb_entry_write(dev_priv, 3727 PLANE_NV12_BUF_CFG(pipe, i), 3728 &new->ddb.y_plane[pipe][i]); 3729 } 3730 3731 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), 3732 &new->ddb.plane[pipe][PLANE_CURSOR]); 3733 } 3734 } 3735 3736 /* 3737 * When setting up a new DDB allocation arrangement, we need to correctly 3738 * sequence the times at which the new allocations for the pipes are taken into 3739 * account or we'll have pipes fetching from space previously allocated to 3740 * another pipe. 3741 * 3742 * Roughly the sequence looks like: 3743 * 1. re-allocate the pipe(s) with the allocation being reduced and not 3744 * overlapping with a previous light-up pipe (another way to put it is: 3745 * pipes with their new allocation strickly included into their old ones). 3746 * 2. re-allocate the other pipes that get their allocation reduced 3747 * 3. allocate the pipes having their allocation increased 3748 * 3749 * Steps 1. and 2. are here to take care of the following case: 3750 * - Initially DDB looks like this: 3751 * | B | C | 3752 * - enable pipe A. 3753 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C 3754 * allocation 3755 * | A | B | C | 3756 * 3757 * We need to sequence the re-allocation: C, B, A (and not B, C, A). 3758 */ 3759 3760 static void 3761 skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int pass) 3762 { 3763 int plane; 3764 3765 DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass); 3766 3767 for_each_plane(dev_priv, pipe, plane) { 3768 I915_WRITE(PLANE_SURF(pipe, plane), 3769 I915_READ(PLANE_SURF(pipe, plane))); 3770 } 3771 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe))); 3772 } 3773 3774 static bool 3775 skl_ddb_allocation_included(const struct skl_ddb_allocation *old, 3776 const struct skl_ddb_allocation *new, 3777 enum i915_pipe pipe) 3778 { 3779 uint16_t old_size, new_size; 3780 3781 old_size = skl_ddb_entry_size(&old->pipe[pipe]); 3782 new_size = skl_ddb_entry_size(&new->pipe[pipe]); 3783 3784 return old_size != new_size && 3785 new->pipe[pipe].start >= old->pipe[pipe].start && 3786 new->pipe[pipe].end <= old->pipe[pipe].end; 3787 } 3788 3789 static void skl_flush_wm_values(struct drm_i915_private *dev_priv, 3790 struct skl_wm_values *new_values) 3791 { 3792 struct drm_device *dev = &dev_priv->drm; 3793 struct skl_ddb_allocation *cur_ddb, *new_ddb; 3794 bool reallocated[I915_MAX_PIPES] = {}; 3795 struct intel_crtc *crtc; 3796 enum i915_pipe pipe; 3797 3798 new_ddb = &new_values->ddb; 3799 cur_ddb = &dev_priv->wm.skl_hw.ddb; 3800 3801 /* 3802 * First pass: flush the pipes with the new allocation contained into 3803 * the old space. 3804 * 3805 * We'll wait for the vblank on those pipes to ensure we can safely 3806 * re-allocate the freed space without this pipe fetching from it. 3807 */ 3808 for_each_intel_crtc(dev, crtc) { 3809 if (!crtc->active) 3810 continue; 3811 3812 pipe = crtc->pipe; 3813 3814 if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe)) 3815 continue; 3816 3817 skl_wm_flush_pipe(dev_priv, pipe, 1); 3818 intel_wait_for_vblank(dev, pipe); 3819 3820 reallocated[pipe] = true; 3821 } 3822 3823 3824 /* 3825 * Second pass: flush the pipes that are having their allocation 3826 * reduced, but overlapping with a previous allocation. 3827 * 3828 * Here as well we need to wait for the vblank to make sure the freed 3829 * space is not used anymore. 3830 */ 3831 for_each_intel_crtc(dev, crtc) { 3832 if (!crtc->active) 3833 continue; 3834 3835 pipe = crtc->pipe; 3836 3837 if (reallocated[pipe]) 3838 continue; 3839 3840 if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) < 3841 skl_ddb_entry_size(&cur_ddb->pipe[pipe])) { 3842 skl_wm_flush_pipe(dev_priv, pipe, 2); 3843 intel_wait_for_vblank(dev, pipe); 3844 reallocated[pipe] = true; 3845 } 3846 } 3847 3848 /* 3849 * Third pass: flush the pipes that got more space allocated. 3850 * 3851 * We don't need to actively wait for the update here, next vblank 3852 * will just get more DDB space with the correct WM values. 3853 */ 3854 for_each_intel_crtc(dev, crtc) { 3855 if (!crtc->active) 3856 continue; 3857 3858 pipe = crtc->pipe; 3859 3860 /* 3861 * At this point, only the pipes more space than before are 3862 * left to re-allocate. 3863 */ 3864 if (reallocated[pipe]) 3865 continue; 3866 3867 skl_wm_flush_pipe(dev_priv, pipe, 3); 3868 } 3869 } 3870 3871 static int skl_update_pipe_wm(struct drm_crtc_state *cstate, 3872 struct skl_ddb_allocation *ddb, /* out */ 3873 struct skl_pipe_wm *pipe_wm, /* out */ 3874 bool *changed /* out */) 3875 { 3876 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc); 3877 struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate); 3878 int ret; 3879 3880 ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm); 3881 if (ret) 3882 return ret; 3883 3884 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm))) 3885 *changed = false; 3886 else 3887 *changed = true; 3888 3889 return 0; 3890 } 3891 3892 static uint32_t 3893 pipes_modified(struct drm_atomic_state *state) 3894 { 3895 struct drm_crtc *crtc; 3896 struct drm_crtc_state *cstate; 3897 uint32_t i, ret = 0; 3898 3899 for_each_crtc_in_state(state, crtc, cstate, i) 3900 ret |= drm_crtc_mask(crtc); 3901 3902 return ret; 3903 } 3904 3905 static int 3906 skl_compute_ddb(struct drm_atomic_state *state) 3907 { 3908 struct drm_device *dev = state->dev; 3909 struct drm_i915_private *dev_priv = to_i915(dev); 3910 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 3911 struct intel_crtc *intel_crtc; 3912 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb; 3913 uint32_t realloc_pipes = pipes_modified(state); 3914 int ret; 3915 3916 /* 3917 * If this is our first atomic update following hardware readout, 3918 * we can't trust the DDB that the BIOS programmed for us. Let's 3919 * pretend that all pipes switched active status so that we'll 3920 * ensure a full DDB recompute. 3921 */ 3922 if (dev_priv->wm.distrust_bios_wm) 3923 intel_state->active_pipe_changes = ~0; 3924 3925 /* 3926 * If the modeset changes which CRTC's are active, we need to 3927 * recompute the DDB allocation for *all* active pipes, even 3928 * those that weren't otherwise being modified in any way by this 3929 * atomic commit. Due to the shrinking of the per-pipe allocations 3930 * when new active CRTC's are added, it's possible for a pipe that 3931 * we were already using and aren't changing at all here to suddenly 3932 * become invalid if its DDB needs exceeds its new allocation. 3933 * 3934 * Note that if we wind up doing a full DDB recompute, we can't let 3935 * any other display updates race with this transaction, so we need 3936 * to grab the lock on *all* CRTC's. 3937 */ 3938 if (intel_state->active_pipe_changes) { 3939 realloc_pipes = ~0; 3940 intel_state->wm_results.dirty_pipes = ~0; 3941 } 3942 3943 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) { 3944 struct intel_crtc_state *cstate; 3945 3946 cstate = intel_atomic_get_crtc_state(state, intel_crtc); 3947 if (IS_ERR(cstate)) 3948 return PTR_ERR(cstate); 3949 3950 ret = skl_allocate_pipe_ddb(cstate, ddb); 3951 if (ret) 3952 return ret; 3953 } 3954 3955 return 0; 3956 } 3957 3958 static int 3959 skl_compute_wm(struct drm_atomic_state *state) 3960 { 3961 struct drm_crtc *crtc; 3962 struct drm_crtc_state *cstate; 3963 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 3964 struct skl_wm_values *results = &intel_state->wm_results; 3965 struct skl_pipe_wm *pipe_wm; 3966 bool changed = false; 3967 int ret, i; 3968 3969 /* 3970 * If this transaction isn't actually touching any CRTC's, don't 3971 * bother with watermark calculation. Note that if we pass this 3972 * test, we're guaranteed to hold at least one CRTC state mutex, 3973 * which means we can safely use values like dev_priv->active_crtcs 3974 * since any racing commits that want to update them would need to 3975 * hold _all_ CRTC state mutexes. 3976 */ 3977 for_each_crtc_in_state(state, crtc, cstate, i) 3978 changed = true; 3979 if (!changed) 3980 return 0; 3981 3982 /* Clear all dirty flags */ 3983 results->dirty_pipes = 0; 3984 3985 ret = skl_compute_ddb(state); 3986 if (ret) 3987 return ret; 3988 3989 /* 3990 * Calculate WM's for all pipes that are part of this transaction. 3991 * Note that the DDB allocation above may have added more CRTC's that 3992 * weren't otherwise being modified (and set bits in dirty_pipes) if 3993 * pipe allocations had to change. 3994 * 3995 * FIXME: Now that we're doing this in the atomic check phase, we 3996 * should allow skl_update_pipe_wm() to return failure in cases where 3997 * no suitable watermark values can be found. 3998 */ 3999 for_each_crtc_in_state(state, crtc, cstate, i) { 4000 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4001 struct intel_crtc_state *intel_cstate = 4002 to_intel_crtc_state(cstate); 4003 4004 pipe_wm = &intel_cstate->wm.skl.optimal; 4005 ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm, 4006 &changed); 4007 if (ret) 4008 return ret; 4009 4010 if (changed) 4011 results->dirty_pipes |= drm_crtc_mask(crtc); 4012 4013 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0) 4014 /* This pipe's WM's did not change */ 4015 continue; 4016 4017 intel_cstate->update_wm_pre = true; 4018 skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc); 4019 } 4020 4021 return 0; 4022 } 4023 4024 static void skl_update_wm(struct drm_crtc *crtc) 4025 { 4026 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4027 struct drm_device *dev = crtc->dev; 4028 struct drm_i915_private *dev_priv = to_i915(dev); 4029 struct skl_wm_values *results = &dev_priv->wm.skl_results; 4030 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4031 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; 4032 4033 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0) 4034 return; 4035 4036 intel_crtc->wm.active.skl = *pipe_wm; 4037 4038 mutex_lock(&dev_priv->wm.wm_mutex); 4039 4040 skl_write_wm_values(dev_priv, results); 4041 skl_flush_wm_values(dev_priv, results); 4042 4043 /* store the new configuration */ 4044 dev_priv->wm.skl_hw = *results; 4045 4046 mutex_unlock(&dev_priv->wm.wm_mutex); 4047 } 4048 4049 static void ilk_compute_wm_config(struct drm_device *dev, 4050 struct intel_wm_config *config) 4051 { 4052 struct intel_crtc *crtc; 4053 4054 /* Compute the currently _active_ config */ 4055 for_each_intel_crtc(dev, crtc) { 4056 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk; 4057 4058 if (!wm->pipe_enabled) 4059 continue; 4060 4061 config->sprites_enabled |= wm->sprites_enabled; 4062 config->sprites_scaled |= wm->sprites_scaled; 4063 config->num_pipes_active++; 4064 } 4065 } 4066 4067 static void ilk_program_watermarks(struct drm_i915_private *dev_priv) 4068 { 4069 struct drm_device *dev = &dev_priv->drm; 4070 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; 4071 struct ilk_wm_maximums max; 4072 struct intel_wm_config config = {}; 4073 struct ilk_wm_values results = {}; 4074 enum intel_ddb_partitioning partitioning; 4075 4076 ilk_compute_wm_config(dev, &config); 4077 4078 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); 4079 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2); 4080 4081 /* 5/6 split only in single pipe config on IVB+ */ 4082 if (INTEL_INFO(dev)->gen >= 7 && 4083 config.num_pipes_active == 1 && config.sprites_enabled) { 4084 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); 4085 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6); 4086 4087 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); 4088 } else { 4089 best_lp_wm = &lp_wm_1_2; 4090 } 4091 4092 partitioning = (best_lp_wm == &lp_wm_1_2) ? 4093 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; 4094 4095 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results); 4096 4097 ilk_write_wm_values(dev_priv, &results); 4098 } 4099 4100 static void ilk_initial_watermarks(struct intel_crtc_state *cstate) 4101 { 4102 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); 4103 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 4104 4105 mutex_lock(&dev_priv->wm.wm_mutex); 4106 intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate; 4107 ilk_program_watermarks(dev_priv); 4108 mutex_unlock(&dev_priv->wm.wm_mutex); 4109 } 4110 4111 static void ilk_optimize_watermarks(struct intel_crtc_state *cstate) 4112 { 4113 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); 4114 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 4115 4116 mutex_lock(&dev_priv->wm.wm_mutex); 4117 if (cstate->wm.need_postvbl_update) { 4118 intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal; 4119 ilk_program_watermarks(dev_priv); 4120 } 4121 mutex_unlock(&dev_priv->wm.wm_mutex); 4122 } 4123 4124 static void skl_pipe_wm_active_state(uint32_t val, 4125 struct skl_pipe_wm *active, 4126 bool is_transwm, 4127 bool is_cursor, 4128 int i, 4129 int level) 4130 { 4131 bool is_enabled = (val & PLANE_WM_EN) != 0; 4132 4133 if (!is_transwm) { 4134 if (!is_cursor) { 4135 active->wm[level].plane_en[i] = is_enabled; 4136 active->wm[level].plane_res_b[i] = 4137 val & PLANE_WM_BLOCKS_MASK; 4138 active->wm[level].plane_res_l[i] = 4139 (val >> PLANE_WM_LINES_SHIFT) & 4140 PLANE_WM_LINES_MASK; 4141 } else { 4142 active->wm[level].plane_en[PLANE_CURSOR] = is_enabled; 4143 active->wm[level].plane_res_b[PLANE_CURSOR] = 4144 val & PLANE_WM_BLOCKS_MASK; 4145 active->wm[level].plane_res_l[PLANE_CURSOR] = 4146 (val >> PLANE_WM_LINES_SHIFT) & 4147 PLANE_WM_LINES_MASK; 4148 } 4149 } else { 4150 if (!is_cursor) { 4151 active->trans_wm.plane_en[i] = is_enabled; 4152 active->trans_wm.plane_res_b[i] = 4153 val & PLANE_WM_BLOCKS_MASK; 4154 active->trans_wm.plane_res_l[i] = 4155 (val >> PLANE_WM_LINES_SHIFT) & 4156 PLANE_WM_LINES_MASK; 4157 } else { 4158 active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled; 4159 active->trans_wm.plane_res_b[PLANE_CURSOR] = 4160 val & PLANE_WM_BLOCKS_MASK; 4161 active->trans_wm.plane_res_l[PLANE_CURSOR] = 4162 (val >> PLANE_WM_LINES_SHIFT) & 4163 PLANE_WM_LINES_MASK; 4164 } 4165 } 4166 } 4167 4168 static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc) 4169 { 4170 struct drm_device *dev = crtc->dev; 4171 struct drm_i915_private *dev_priv = to_i915(dev); 4172 struct skl_wm_values *hw = &dev_priv->wm.skl_hw; 4173 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4174 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4175 struct skl_pipe_wm *active = &cstate->wm.skl.optimal; 4176 enum i915_pipe pipe = intel_crtc->pipe; 4177 int level, i, max_level; 4178 uint32_t temp; 4179 4180 max_level = ilk_wm_max_level(dev); 4181 4182 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); 4183 4184 for (level = 0; level <= max_level; level++) { 4185 for (i = 0; i < intel_num_planes(intel_crtc); i++) 4186 hw->plane[pipe][i][level] = 4187 I915_READ(PLANE_WM(pipe, i, level)); 4188 hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level)); 4189 } 4190 4191 for (i = 0; i < intel_num_planes(intel_crtc); i++) 4192 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i)); 4193 hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe)); 4194 4195 if (!intel_crtc->active) 4196 return; 4197 4198 hw->dirty_pipes |= drm_crtc_mask(crtc); 4199 4200 active->linetime = hw->wm_linetime[pipe]; 4201 4202 for (level = 0; level <= max_level; level++) { 4203 for (i = 0; i < intel_num_planes(intel_crtc); i++) { 4204 temp = hw->plane[pipe][i][level]; 4205 skl_pipe_wm_active_state(temp, active, false, 4206 false, i, level); 4207 } 4208 temp = hw->plane[pipe][PLANE_CURSOR][level]; 4209 skl_pipe_wm_active_state(temp, active, false, true, i, level); 4210 } 4211 4212 for (i = 0; i < intel_num_planes(intel_crtc); i++) { 4213 temp = hw->plane_trans[pipe][i]; 4214 skl_pipe_wm_active_state(temp, active, true, false, i, 0); 4215 } 4216 4217 temp = hw->plane_trans[pipe][PLANE_CURSOR]; 4218 skl_pipe_wm_active_state(temp, active, true, true, i, 0); 4219 4220 intel_crtc->wm.active.skl = *active; 4221 } 4222 4223 void skl_wm_get_hw_state(struct drm_device *dev) 4224 { 4225 struct drm_i915_private *dev_priv = to_i915(dev); 4226 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb; 4227 struct drm_crtc *crtc; 4228 4229 skl_ddb_get_hw_state(dev_priv, ddb); 4230 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 4231 skl_pipe_wm_get_hw_state(crtc); 4232 4233 if (dev_priv->active_crtcs) { 4234 /* Fully recompute DDB on first atomic commit */ 4235 dev_priv->wm.distrust_bios_wm = true; 4236 } else { 4237 /* Easy/common case; just sanitize DDB now if everything off */ 4238 memset(ddb, 0, sizeof(*ddb)); 4239 } 4240 } 4241 4242 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) 4243 { 4244 struct drm_device *dev = crtc->dev; 4245 struct drm_i915_private *dev_priv = to_i915(dev); 4246 struct ilk_wm_values *hw = &dev_priv->wm.hw; 4247 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4248 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4249 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal; 4250 enum i915_pipe pipe = intel_crtc->pipe; 4251 static const i915_reg_t wm0_pipe_reg[] = { 4252 [PIPE_A] = WM0_PIPEA_ILK, 4253 [PIPE_B] = WM0_PIPEB_ILK, 4254 [PIPE_C] = WM0_PIPEC_IVB, 4255 }; 4256 4257 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]); 4258 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 4259 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); 4260 4261 memset(active, 0, sizeof(*active)); 4262 4263 active->pipe_enabled = intel_crtc->active; 4264 4265 if (active->pipe_enabled) { 4266 u32 tmp = hw->wm_pipe[pipe]; 4267 4268 /* 4269 * For active pipes LP0 watermark is marked as 4270 * enabled, and LP1+ watermaks as disabled since 4271 * we can't really reverse compute them in case 4272 * multiple pipes are active. 4273 */ 4274 active->wm[0].enable = true; 4275 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT; 4276 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT; 4277 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK; 4278 active->linetime = hw->wm_linetime[pipe]; 4279 } else { 4280 int level, max_level = ilk_wm_max_level(dev); 4281 4282 /* 4283 * For inactive pipes, all watermark levels 4284 * should be marked as enabled but zeroed, 4285 * which is what we'd compute them to. 4286 */ 4287 for (level = 0; level <= max_level; level++) 4288 active->wm[level].enable = true; 4289 } 4290 4291 intel_crtc->wm.active.ilk = *active; 4292 } 4293 4294 #define _FW_WM(value, plane) \ 4295 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT) 4296 #define _FW_WM_VLV(value, plane) \ 4297 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT) 4298 4299 static void vlv_read_wm_values(struct drm_i915_private *dev_priv, 4300 struct vlv_wm_values *wm) 4301 { 4302 enum i915_pipe pipe; 4303 uint32_t tmp; 4304 4305 for_each_pipe(dev_priv, pipe) { 4306 tmp = I915_READ(VLV_DDL(pipe)); 4307 4308 wm->ddl[pipe].primary = 4309 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 4310 wm->ddl[pipe].cursor = 4311 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 4312 wm->ddl[pipe].sprite[0] = 4313 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 4314 wm->ddl[pipe].sprite[1] = 4315 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 4316 } 4317 4318 tmp = I915_READ(DSPFW1); 4319 wm->sr.plane = _FW_WM(tmp, SR); 4320 wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB); 4321 wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB); 4322 wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA); 4323 4324 tmp = I915_READ(DSPFW2); 4325 wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB); 4326 wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA); 4327 wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA); 4328 4329 tmp = I915_READ(DSPFW3); 4330 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); 4331 4332 if (IS_CHERRYVIEW(dev_priv)) { 4333 tmp = I915_READ(DSPFW7_CHV); 4334 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED); 4335 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC); 4336 4337 tmp = I915_READ(DSPFW8_CHV); 4338 wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF); 4339 wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE); 4340 4341 tmp = I915_READ(DSPFW9_CHV); 4342 wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC); 4343 wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC); 4344 4345 tmp = I915_READ(DSPHOWM); 4346 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; 4347 wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8; 4348 wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8; 4349 wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8; 4350 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8; 4351 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8; 4352 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8; 4353 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8; 4354 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8; 4355 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8; 4356 } else { 4357 tmp = I915_READ(DSPFW7); 4358 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED); 4359 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC); 4360 4361 tmp = I915_READ(DSPHOWM); 4362 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; 4363 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8; 4364 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8; 4365 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8; 4366 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8; 4367 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8; 4368 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8; 4369 } 4370 } 4371 4372 #undef _FW_WM 4373 #undef _FW_WM_VLV 4374 4375 void vlv_wm_get_hw_state(struct drm_device *dev) 4376 { 4377 struct drm_i915_private *dev_priv = to_i915(dev); 4378 struct vlv_wm_values *wm = &dev_priv->wm.vlv; 4379 struct intel_plane *plane; 4380 enum i915_pipe pipe; 4381 u32 val; 4382 4383 vlv_read_wm_values(dev_priv, wm); 4384 4385 for_each_intel_plane(dev, plane) { 4386 switch (plane->base.type) { 4387 int sprite; 4388 case DRM_PLANE_TYPE_CURSOR: 4389 plane->wm.fifo_size = 63; 4390 break; 4391 case DRM_PLANE_TYPE_PRIMARY: 4392 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0); 4393 break; 4394 case DRM_PLANE_TYPE_OVERLAY: 4395 sprite = plane->plane; 4396 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1); 4397 break; 4398 } 4399 } 4400 4401 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 4402 wm->level = VLV_WM_LEVEL_PM2; 4403 4404 if (IS_CHERRYVIEW(dev_priv)) { 4405 mutex_lock(&dev_priv->rps.hw_lock); 4406 4407 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 4408 if (val & DSP_MAXFIFO_PM5_ENABLE) 4409 wm->level = VLV_WM_LEVEL_PM5; 4410 4411 /* 4412 * If DDR DVFS is disabled in the BIOS, Punit 4413 * will never ack the request. So if that happens 4414 * assume we don't have to enable/disable DDR DVFS 4415 * dynamically. To test that just set the REQ_ACK 4416 * bit to poke the Punit, but don't change the 4417 * HIGH/LOW bits so that we don't actually change 4418 * the current state. 4419 */ 4420 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 4421 val |= FORCE_DDR_FREQ_REQ_ACK; 4422 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); 4423 4424 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & 4425 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { 4426 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, " 4427 "assuming DDR DVFS is disabled\n"); 4428 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5; 4429 } else { 4430 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 4431 if ((val & FORCE_DDR_HIGH_FREQ) == 0) 4432 wm->level = VLV_WM_LEVEL_DDR_DVFS; 4433 } 4434 4435 mutex_unlock(&dev_priv->rps.hw_lock); 4436 } 4437 4438 for_each_pipe(dev_priv, pipe) 4439 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", 4440 pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor, 4441 wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]); 4442 4443 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", 4444 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); 4445 } 4446 4447 void ilk_wm_get_hw_state(struct drm_device *dev) 4448 { 4449 struct drm_i915_private *dev_priv = to_i915(dev); 4450 struct ilk_wm_values *hw = &dev_priv->wm.hw; 4451 struct drm_crtc *crtc; 4452 4453 for_each_crtc(dev, crtc) 4454 ilk_pipe_wm_get_hw_state(crtc); 4455 4456 hw->wm_lp[0] = I915_READ(WM1_LP_ILK); 4457 hw->wm_lp[1] = I915_READ(WM2_LP_ILK); 4458 hw->wm_lp[2] = I915_READ(WM3_LP_ILK); 4459 4460 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK); 4461 if (INTEL_INFO(dev)->gen >= 7) { 4462 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); 4463 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); 4464 } 4465 4466 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 4467 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? 4468 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 4469 else if (IS_IVYBRIDGE(dev)) 4470 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ? 4471 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 4472 4473 hw->enable_fbc_wm = 4474 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); 4475 } 4476 4477 /** 4478 * intel_update_watermarks - update FIFO watermark values based on current modes 4479 * 4480 * Calculate watermark values for the various WM regs based on current mode 4481 * and plane configuration. 4482 * 4483 * There are several cases to deal with here: 4484 * - normal (i.e. non-self-refresh) 4485 * - self-refresh (SR) mode 4486 * - lines are large relative to FIFO size (buffer can hold up to 2) 4487 * - lines are small relative to FIFO size (buffer can hold more than 2 4488 * lines), so need to account for TLB latency 4489 * 4490 * The normal calculation is: 4491 * watermark = dotclock * bytes per pixel * latency 4492 * where latency is platform & configuration dependent (we assume pessimal 4493 * values here). 4494 * 4495 * The SR calculation is: 4496 * watermark = (trunc(latency/line time)+1) * surface width * 4497 * bytes per pixel 4498 * where 4499 * line time = htotal / dotclock 4500 * surface width = hdisplay for normal plane and 64 for cursor 4501 * and latency is assumed to be high, as above. 4502 * 4503 * The final value programmed to the register should always be rounded up, 4504 * and include an extra 2 entries to account for clock crossings. 4505 * 4506 * We don't use the sprite, so we can ignore that. And on Crestline we have 4507 * to set the non-SR watermarks to 8. 4508 */ 4509 void intel_update_watermarks(struct drm_crtc *crtc) 4510 { 4511 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 4512 4513 if (dev_priv->display.update_wm) 4514 dev_priv->display.update_wm(crtc); 4515 } 4516 4517 /* 4518 * Lock protecting IPS related data structures 4519 */ 4520 DEFINE_SPINLOCK(mchdev_lock); 4521 4522 /* Global for IPS driver to get at the current i915 device. Protected by 4523 * mchdev_lock. */ 4524 static struct drm_i915_private *i915_mch_dev; 4525 4526 bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val) 4527 { 4528 u16 rgvswctl; 4529 4530 assert_spin_locked(&mchdev_lock); 4531 4532 rgvswctl = I915_READ16(MEMSWCTL); 4533 if (rgvswctl & MEMCTL_CMD_STS) { 4534 DRM_DEBUG("gpu busy, RCS change rejected\n"); 4535 return false; /* still busy with another command */ 4536 } 4537 4538 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 4539 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; 4540 I915_WRITE16(MEMSWCTL, rgvswctl); 4541 POSTING_READ16(MEMSWCTL); 4542 4543 rgvswctl |= MEMCTL_CMD_STS; 4544 I915_WRITE16(MEMSWCTL, rgvswctl); 4545 4546 return true; 4547 } 4548 4549 static void ironlake_enable_drps(struct drm_i915_private *dev_priv) 4550 { 4551 u32 rgvmodectl; 4552 u8 fmax, fmin, fstart, vstart; 4553 4554 spin_lock_irq(&mchdev_lock); 4555 4556 rgvmodectl = I915_READ(MEMMODECTL); 4557 4558 /* Enable temp reporting */ 4559 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); 4560 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); 4561 4562 /* 100ms RC evaluation intervals */ 4563 I915_WRITE(RCUPEI, 100000); 4564 I915_WRITE(RCDNEI, 100000); 4565 4566 /* Set max/min thresholds to 90ms and 80ms respectively */ 4567 I915_WRITE(RCBMAXAVG, 90000); 4568 I915_WRITE(RCBMINAVG, 80000); 4569 4570 I915_WRITE(MEMIHYST, 1); 4571 4572 /* Set up min, max, and cur for interrupt handling */ 4573 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; 4574 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 4575 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 4576 MEMMODE_FSTART_SHIFT; 4577 4578 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >> 4579 PXVFREQ_PX_SHIFT; 4580 4581 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */ 4582 dev_priv->ips.fstart = fstart; 4583 4584 dev_priv->ips.max_delay = fstart; 4585 dev_priv->ips.min_delay = fmin; 4586 dev_priv->ips.cur_delay = fstart; 4587 4588 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", 4589 fmax, fmin, fstart); 4590 4591 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 4592 4593 /* 4594 * Interrupts will be enabled in ironlake_irq_postinstall 4595 */ 4596 4597 I915_WRITE(VIDSTART, vstart); 4598 POSTING_READ(VIDSTART); 4599 4600 rgvmodectl |= MEMMODE_SWMODE_EN; 4601 I915_WRITE(MEMMODECTL, rgvmodectl); 4602 4603 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) 4604 DRM_ERROR("stuck trying to change perf mode\n"); 4605 mdelay(1); 4606 4607 ironlake_set_drps(dev_priv, fstart); 4608 4609 dev_priv->ips.last_count1 = I915_READ(DMIEC) + 4610 I915_READ(DDREC) + I915_READ(CSIEC); 4611 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); 4612 dev_priv->ips.last_count2 = I915_READ(GFXEC); 4613 dev_priv->ips.last_time2 = ktime_get_raw_ns(); 4614 4615 spin_unlock_irq(&mchdev_lock); 4616 } 4617 4618 static void ironlake_disable_drps(struct drm_i915_private *dev_priv) 4619 { 4620 u16 rgvswctl; 4621 4622 spin_lock_irq(&mchdev_lock); 4623 4624 rgvswctl = I915_READ16(MEMSWCTL); 4625 4626 /* Ack interrupts, disable EFC interrupt */ 4627 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); 4628 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); 4629 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); 4630 I915_WRITE(DEIIR, DE_PCU_EVENT); 4631 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); 4632 4633 /* Go back to the starting frequency */ 4634 ironlake_set_drps(dev_priv, dev_priv->ips.fstart); 4635 mdelay(1); 4636 rgvswctl |= MEMCTL_CMD_STS; 4637 I915_WRITE(MEMSWCTL, rgvswctl); 4638 mdelay(1); 4639 4640 spin_unlock_irq(&mchdev_lock); 4641 } 4642 4643 /* There's a funny hw issue where the hw returns all 0 when reading from 4644 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value 4645 * ourselves, instead of doing a rmw cycle (which might result in us clearing 4646 * all limits and the gpu stuck at whatever frequency it is at atm). 4647 */ 4648 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val) 4649 { 4650 u32 limits; 4651 4652 /* Only set the down limit when we've reached the lowest level to avoid 4653 * getting more interrupts, otherwise leave this clear. This prevents a 4654 * race in the hw when coming out of rc6: There's a tiny window where 4655 * the hw runs at the minimal clock before selecting the desired 4656 * frequency, if the down threshold expires in that window we will not 4657 * receive a down interrupt. */ 4658 if (IS_GEN9(dev_priv)) { 4659 limits = (dev_priv->rps.max_freq_softlimit) << 23; 4660 if (val <= dev_priv->rps.min_freq_softlimit) 4661 limits |= (dev_priv->rps.min_freq_softlimit) << 14; 4662 } else { 4663 limits = dev_priv->rps.max_freq_softlimit << 24; 4664 if (val <= dev_priv->rps.min_freq_softlimit) 4665 limits |= dev_priv->rps.min_freq_softlimit << 16; 4666 } 4667 4668 return limits; 4669 } 4670 4671 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) 4672 { 4673 int new_power; 4674 u32 threshold_up = 0, threshold_down = 0; /* in % */ 4675 u32 ei_up = 0, ei_down = 0; 4676 4677 new_power = dev_priv->rps.power; 4678 switch (dev_priv->rps.power) { 4679 case LOW_POWER: 4680 if (val > dev_priv->rps.efficient_freq + 1 && 4681 val > dev_priv->rps.cur_freq) 4682 new_power = BETWEEN; 4683 break; 4684 4685 case BETWEEN: 4686 if (val <= dev_priv->rps.efficient_freq && 4687 val < dev_priv->rps.cur_freq) 4688 new_power = LOW_POWER; 4689 else if (val >= dev_priv->rps.rp0_freq && 4690 val > dev_priv->rps.cur_freq) 4691 new_power = HIGH_POWER; 4692 break; 4693 4694 case HIGH_POWER: 4695 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && 4696 val < dev_priv->rps.cur_freq) 4697 new_power = BETWEEN; 4698 break; 4699 } 4700 /* Max/min bins are special */ 4701 if (val <= dev_priv->rps.min_freq_softlimit) 4702 new_power = LOW_POWER; 4703 if (val >= dev_priv->rps.max_freq_softlimit) 4704 new_power = HIGH_POWER; 4705 if (new_power == dev_priv->rps.power) 4706 return; 4707 4708 /* Note the units here are not exactly 1us, but 1280ns. */ 4709 switch (new_power) { 4710 case LOW_POWER: 4711 /* Upclock if more than 95% busy over 16ms */ 4712 ei_up = 16000; 4713 threshold_up = 95; 4714 4715 /* Downclock if less than 85% busy over 32ms */ 4716 ei_down = 32000; 4717 threshold_down = 85; 4718 break; 4719 4720 case BETWEEN: 4721 /* Upclock if more than 90% busy over 13ms */ 4722 ei_up = 13000; 4723 threshold_up = 90; 4724 4725 /* Downclock if less than 75% busy over 32ms */ 4726 ei_down = 32000; 4727 threshold_down = 75; 4728 break; 4729 4730 case HIGH_POWER: 4731 /* Upclock if more than 85% busy over 10ms */ 4732 ei_up = 10000; 4733 threshold_up = 85; 4734 4735 /* Downclock if less than 60% busy over 32ms */ 4736 ei_down = 32000; 4737 threshold_down = 60; 4738 break; 4739 } 4740 4741 I915_WRITE(GEN6_RP_UP_EI, 4742 GT_INTERVAL_FROM_US(dev_priv, ei_up)); 4743 I915_WRITE(GEN6_RP_UP_THRESHOLD, 4744 GT_INTERVAL_FROM_US(dev_priv, 4745 ei_up * threshold_up / 100)); 4746 4747 I915_WRITE(GEN6_RP_DOWN_EI, 4748 GT_INTERVAL_FROM_US(dev_priv, ei_down)); 4749 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 4750 GT_INTERVAL_FROM_US(dev_priv, 4751 ei_down * threshold_down / 100)); 4752 4753 I915_WRITE(GEN6_RP_CONTROL, 4754 GEN6_RP_MEDIA_TURBO | 4755 GEN6_RP_MEDIA_HW_NORMAL_MODE | 4756 GEN6_RP_MEDIA_IS_GFX | 4757 GEN6_RP_ENABLE | 4758 GEN6_RP_UP_BUSY_AVG | 4759 GEN6_RP_DOWN_IDLE_AVG); 4760 4761 dev_priv->rps.power = new_power; 4762 dev_priv->rps.up_threshold = threshold_up; 4763 dev_priv->rps.down_threshold = threshold_down; 4764 dev_priv->rps.last_adj = 0; 4765 } 4766 4767 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) 4768 { 4769 u32 mask = 0; 4770 4771 if (val > dev_priv->rps.min_freq_softlimit) 4772 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; 4773 if (val < dev_priv->rps.max_freq_softlimit) 4774 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; 4775 4776 mask &= dev_priv->pm_rps_events; 4777 4778 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask); 4779 } 4780 4781 /* gen6_set_rps is called to update the frequency request, but should also be 4782 * called when the range (min_delay and max_delay) is modified so that we can 4783 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ 4784 static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val) 4785 { 4786 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ 4787 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) 4788 return; 4789 4790 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4791 WARN_ON(val > dev_priv->rps.max_freq); 4792 WARN_ON(val < dev_priv->rps.min_freq); 4793 4794 /* min/max delay may still have been modified so be sure to 4795 * write the limits value. 4796 */ 4797 if (val != dev_priv->rps.cur_freq) { 4798 gen6_set_rps_thresholds(dev_priv, val); 4799 4800 if (IS_GEN9(dev_priv)) 4801 I915_WRITE(GEN6_RPNSWREQ, 4802 GEN9_FREQUENCY(val)); 4803 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 4804 I915_WRITE(GEN6_RPNSWREQ, 4805 HSW_FREQUENCY(val)); 4806 else 4807 I915_WRITE(GEN6_RPNSWREQ, 4808 GEN6_FREQUENCY(val) | 4809 GEN6_OFFSET(0) | 4810 GEN6_AGGRESSIVE_TURBO); 4811 } 4812 4813 /* Make sure we continue to get interrupts 4814 * until we hit the minimum or maximum frequencies. 4815 */ 4816 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val)); 4817 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 4818 4819 POSTING_READ(GEN6_RPNSWREQ); 4820 4821 dev_priv->rps.cur_freq = val; 4822 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); 4823 } 4824 4825 static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val) 4826 { 4827 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4828 WARN_ON(val > dev_priv->rps.max_freq); 4829 WARN_ON(val < dev_priv->rps.min_freq); 4830 4831 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1), 4832 "Odd GPU freq value\n")) 4833 val &= ~1; 4834 4835 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 4836 4837 if (val != dev_priv->rps.cur_freq) { 4838 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); 4839 if (!IS_CHERRYVIEW(dev_priv)) 4840 gen6_set_rps_thresholds(dev_priv, val); 4841 } 4842 4843 dev_priv->rps.cur_freq = val; 4844 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); 4845 } 4846 4847 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down 4848 * 4849 * * If Gfx is Idle, then 4850 * 1. Forcewake Media well. 4851 * 2. Request idle freq. 4852 * 3. Release Forcewake of Media well. 4853 */ 4854 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) 4855 { 4856 u32 val = dev_priv->rps.idle_freq; 4857 4858 if (dev_priv->rps.cur_freq <= val) 4859 return; 4860 4861 /* Wake up the media well, as that takes a lot less 4862 * power than the Render well. */ 4863 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA); 4864 valleyview_set_rps(dev_priv, val); 4865 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA); 4866 } 4867 4868 void gen6_rps_busy(struct drm_i915_private *dev_priv) 4869 { 4870 mutex_lock(&dev_priv->rps.hw_lock); 4871 if (dev_priv->rps.enabled) { 4872 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) 4873 gen6_rps_reset_ei(dev_priv); 4874 I915_WRITE(GEN6_PMINTRMSK, 4875 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); 4876 4877 gen6_enable_rps_interrupts(dev_priv); 4878 4879 /* Ensure we start at the user's desired frequency */ 4880 intel_set_rps(dev_priv, 4881 clamp(dev_priv->rps.cur_freq, 4882 dev_priv->rps.min_freq_softlimit, 4883 dev_priv->rps.max_freq_softlimit)); 4884 } 4885 mutex_unlock(&dev_priv->rps.hw_lock); 4886 } 4887 4888 void gen6_rps_idle(struct drm_i915_private *dev_priv) 4889 { 4890 /* Flush our bottom-half so that it does not race with us 4891 * setting the idle frequency and so that it is bounded by 4892 * our rpm wakeref. And then disable the interrupts to stop any 4893 * futher RPS reclocking whilst we are asleep. 4894 */ 4895 gen6_disable_rps_interrupts(dev_priv); 4896 4897 mutex_lock(&dev_priv->rps.hw_lock); 4898 if (dev_priv->rps.enabled) { 4899 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4900 vlv_set_rps_idle(dev_priv); 4901 else 4902 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); 4903 dev_priv->rps.last_adj = 0; 4904 I915_WRITE(GEN6_PMINTRMSK, 4905 gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 4906 } 4907 mutex_unlock(&dev_priv->rps.hw_lock); 4908 4909 lockmgr(&dev_priv->rps.client_lock, LK_EXCLUSIVE); 4910 while (!list_empty(&dev_priv->rps.clients)) 4911 list_del_init(dev_priv->rps.clients.next); 4912 lockmgr(&dev_priv->rps.client_lock, LK_RELEASE); 4913 } 4914 4915 void gen6_rps_boost(struct drm_i915_private *dev_priv, 4916 struct intel_rps_client *rps, 4917 unsigned long submitted) 4918 { 4919 /* This is intentionally racy! We peek at the state here, then 4920 * validate inside the RPS worker. 4921 */ 4922 if (!(dev_priv->gt.awake && 4923 dev_priv->rps.enabled && 4924 dev_priv->rps.cur_freq < dev_priv->rps.boost_freq)) 4925 return; 4926 4927 /* Force a RPS boost (and don't count it against the client) if 4928 * the GPU is severely congested. 4929 */ 4930 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES)) 4931 rps = NULL; 4932 4933 lockmgr(&dev_priv->rps.client_lock, LK_EXCLUSIVE); 4934 if (rps == NULL || list_empty(&rps->link)) { 4935 spin_lock_irq(&dev_priv->irq_lock); 4936 if (dev_priv->rps.interrupts_enabled) { 4937 dev_priv->rps.client_boost = true; 4938 schedule_work(&dev_priv->rps.work); 4939 } 4940 spin_unlock_irq(&dev_priv->irq_lock); 4941 4942 if (rps != NULL) { 4943 list_add(&rps->link, &dev_priv->rps.clients); 4944 rps->boosts++; 4945 } else 4946 dev_priv->rps.boosts++; 4947 } 4948 lockmgr(&dev_priv->rps.client_lock, LK_RELEASE); 4949 } 4950 4951 void intel_set_rps(struct drm_i915_private *dev_priv, u8 val) 4952 { 4953 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4954 valleyview_set_rps(dev_priv, val); 4955 else 4956 gen6_set_rps(dev_priv, val); 4957 } 4958 4959 static void gen9_disable_rc6(struct drm_i915_private *dev_priv) 4960 { 4961 I915_WRITE(GEN6_RC_CONTROL, 0); 4962 I915_WRITE(GEN9_PG_ENABLE, 0); 4963 } 4964 4965 static void gen9_disable_rps(struct drm_i915_private *dev_priv) 4966 { 4967 I915_WRITE(GEN6_RP_CONTROL, 0); 4968 } 4969 4970 static void gen6_disable_rps(struct drm_i915_private *dev_priv) 4971 { 4972 I915_WRITE(GEN6_RC_CONTROL, 0); 4973 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 4974 I915_WRITE(GEN6_RP_CONTROL, 0); 4975 } 4976 4977 static void cherryview_disable_rps(struct drm_i915_private *dev_priv) 4978 { 4979 I915_WRITE(GEN6_RC_CONTROL, 0); 4980 } 4981 4982 static void valleyview_disable_rps(struct drm_i915_private *dev_priv) 4983 { 4984 /* we're doing forcewake before Disabling RC6, 4985 * This what the BIOS expects when going into suspend */ 4986 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4987 4988 I915_WRITE(GEN6_RC_CONTROL, 0); 4989 4990 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4991 } 4992 4993 static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode) 4994 { 4995 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4996 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1))) 4997 mode = GEN6_RC_CTL_RC6_ENABLE; 4998 else 4999 mode = 0; 5000 } 5001 if (HAS_RC6p(dev_priv)) 5002 DRM_DEBUG_DRIVER("Enabling RC6 states: " 5003 "RC6 %s RC6p %s RC6pp %s\n", 5004 onoff(mode & GEN6_RC_CTL_RC6_ENABLE), 5005 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE), 5006 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE)); 5007 5008 else 5009 DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n", 5010 onoff(mode & GEN6_RC_CTL_RC6_ENABLE)); 5011 } 5012 5013 static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv) 5014 { 5015 struct i915_ggtt *ggtt = &dev_priv->ggtt; 5016 bool enable_rc6 = true; 5017 unsigned long rc6_ctx_base; 5018 u32 rc_ctl; 5019 int rc_sw_target; 5020 5021 rc_ctl = I915_READ(GEN6_RC_CONTROL); 5022 rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >> 5023 RC_SW_TARGET_STATE_SHIFT; 5024 DRM_DEBUG_DRIVER("BIOS enabled RC states: " 5025 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n", 5026 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE), 5027 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE), 5028 rc_sw_target); 5029 5030 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) { 5031 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n"); 5032 enable_rc6 = false; 5033 } 5034 5035 /* 5036 * The exact context size is not known for BXT, so assume a page size 5037 * for this check. 5038 */ 5039 rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK; 5040 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) && 5041 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base + 5042 ggtt->stolen_reserved_size))) { 5043 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n"); 5044 enable_rc6 = false; 5045 } 5046 5047 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) && 5048 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) && 5049 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) && 5050 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) { 5051 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n"); 5052 enable_rc6 = false; 5053 } 5054 5055 if (!I915_READ(GEN8_PUSHBUS_CONTROL) || 5056 !I915_READ(GEN8_PUSHBUS_ENABLE) || 5057 !I915_READ(GEN8_PUSHBUS_SHIFT)) { 5058 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n"); 5059 enable_rc6 = false; 5060 } 5061 5062 if (!I915_READ(GEN6_GFXPAUSE)) { 5063 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n"); 5064 enable_rc6 = false; 5065 } 5066 5067 if (!I915_READ(GEN8_MISC_CTRL0)) { 5068 DRM_DEBUG_DRIVER("GPM control not setup properly.\n"); 5069 enable_rc6 = false; 5070 } 5071 5072 return enable_rc6; 5073 } 5074 5075 int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6) 5076 { 5077 /* No RC6 before Ironlake and code is gone for ilk. */ 5078 if (INTEL_INFO(dev_priv)->gen < 6) 5079 return 0; 5080 5081 if (!enable_rc6) 5082 return 0; 5083 5084 if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) { 5085 DRM_INFO("RC6 disabled by BIOS\n"); 5086 return 0; 5087 } 5088 5089 /* Respect the kernel parameter if it is set */ 5090 if (enable_rc6 >= 0) { 5091 int mask; 5092 5093 if (HAS_RC6p(dev_priv)) 5094 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE | 5095 INTEL_RC6pp_ENABLE; 5096 else 5097 mask = INTEL_RC6_ENABLE; 5098 5099 if ((enable_rc6 & mask) != enable_rc6) 5100 DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d " 5101 "(requested %d, valid %d)\n", 5102 enable_rc6 & mask, enable_rc6, mask); 5103 5104 return enable_rc6 & mask; 5105 } 5106 5107 if (IS_IVYBRIDGE(dev_priv)) 5108 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 5109 5110 return INTEL_RC6_ENABLE; 5111 } 5112 5113 static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv) 5114 { 5115 /* All of these values are in units of 50MHz */ 5116 5117 /* static values from HW: RP0 > RP1 > RPn (min_freq) */ 5118 if (IS_BROXTON(dev_priv)) { 5119 u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 5120 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff; 5121 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; 5122 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff; 5123 } else { 5124 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 5125 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; 5126 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; 5127 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff; 5128 } 5129 /* hw_max = RP0 until we check for overclocking */ 5130 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; 5131 5132 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; 5133 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) || 5134 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 5135 u32 ddcc_status = 0; 5136 5137 if (sandybridge_pcode_read(dev_priv, 5138 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, 5139 &ddcc_status) == 0) 5140 dev_priv->rps.efficient_freq = 5141 clamp_t(u8, 5142 ((ddcc_status >> 8) & 0xff), 5143 dev_priv->rps.min_freq, 5144 dev_priv->rps.max_freq); 5145 } 5146 5147 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 5148 /* Store the frequency values in 16.66 MHZ units, which is 5149 * the natural hardware unit for SKL 5150 */ 5151 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; 5152 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER; 5153 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER; 5154 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER; 5155 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER; 5156 } 5157 } 5158 5159 static void reset_rps(struct drm_i915_private *dev_priv, 5160 void (*set)(struct drm_i915_private *, u8)) 5161 { 5162 u8 freq = dev_priv->rps.cur_freq; 5163 5164 /* force a reset */ 5165 dev_priv->rps.power = -1; 5166 dev_priv->rps.cur_freq = -1; 5167 5168 set(dev_priv, freq); 5169 } 5170 5171 /* See the Gen9_GT_PM_Programming_Guide doc for the below */ 5172 static void gen9_enable_rps(struct drm_i915_private *dev_priv) 5173 { 5174 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5175 5176 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ 5177 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { 5178 /* 5179 * BIOS could leave the Hw Turbo enabled, so need to explicitly 5180 * clear out the Control register just to avoid inconsitency 5181 * with debugfs interface, which will show Turbo as enabled 5182 * only and that is not expected by the User after adding the 5183 * WaGsvDisableTurbo. Apart from this there is no problem even 5184 * if the Turbo is left enabled in the Control register, as the 5185 * Up/Down interrupts would remain masked. 5186 */ 5187 gen9_disable_rps(dev_priv); 5188 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5189 return; 5190 } 5191 5192 /* Program defaults and thresholds for RPS*/ 5193 I915_WRITE(GEN6_RC_VIDEO_FREQ, 5194 GEN9_FREQUENCY(dev_priv->rps.rp1_freq)); 5195 5196 /* 1 second timeout*/ 5197 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 5198 GT_INTERVAL_FROM_US(dev_priv, 1000000)); 5199 5200 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa); 5201 5202 /* Leaning on the below call to gen6_set_rps to program/setup the 5203 * Up/Down EI & threshold registers, as well as the RP_CONTROL, 5204 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ 5205 reset_rps(dev_priv, gen6_set_rps); 5206 5207 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5208 } 5209 5210 static void gen9_enable_rc6(struct drm_i915_private *dev_priv) 5211 { 5212 struct intel_engine_cs *engine; 5213 uint32_t rc6_mask = 0; 5214 5215 /* 1a: Software RC state - RC0 */ 5216 I915_WRITE(GEN6_RC_STATE, 0); 5217 5218 /* 1b: Get forcewake during program sequence. Although the driver 5219 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ 5220 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5221 5222 /* 2a: Disable RC states. */ 5223 I915_WRITE(GEN6_RC_CONTROL, 0); 5224 5225 /* 2b: Program RC6 thresholds.*/ 5226 5227 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ 5228 if (IS_SKYLAKE(dev_priv)) 5229 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); 5230 else 5231 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); 5232 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 5233 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 5234 for_each_engine(engine, dev_priv) 5235 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 5236 5237 if (HAS_GUC(dev_priv)) 5238 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); 5239 5240 I915_WRITE(GEN6_RC_SLEEP, 0); 5241 5242 /* 2c: Program Coarse Power Gating Policies. */ 5243 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25); 5244 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25); 5245 5246 /* 3a: Enable RC6 */ 5247 if (intel_enable_rc6() & INTEL_RC6_ENABLE) 5248 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 5249 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE)); 5250 /* WaRsUseTimeoutMode */ 5251 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) || 5252 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { 5253 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */ 5254 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 5255 GEN7_RC_CTL_TO_MODE | 5256 rc6_mask); 5257 } else { 5258 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */ 5259 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 5260 GEN6_RC_CTL_EI_MODE(1) | 5261 rc6_mask); 5262 } 5263 5264 /* 5265 * 3b: Enable Coarse Power Gating only when RC6 is enabled. 5266 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. 5267 */ 5268 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) 5269 I915_WRITE(GEN9_PG_ENABLE, 0); 5270 else 5271 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 5272 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0); 5273 5274 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5275 } 5276 5277 static void gen8_enable_rps(struct drm_i915_private *dev_priv) 5278 { 5279 struct intel_engine_cs *engine; 5280 uint32_t rc6_mask = 0; 5281 5282 /* 1a: Software RC state - RC0 */ 5283 I915_WRITE(GEN6_RC_STATE, 0); 5284 5285 /* 1c & 1d: Get forcewake during program sequence. Although the driver 5286 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ 5287 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5288 5289 /* 2a: Disable RC states. */ 5290 I915_WRITE(GEN6_RC_CONTROL, 0); 5291 5292 /* 2b: Program RC6 thresholds.*/ 5293 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 5294 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 5295 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 5296 for_each_engine(engine, dev_priv) 5297 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 5298 I915_WRITE(GEN6_RC_SLEEP, 0); 5299 if (IS_BROADWELL(dev_priv)) 5300 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ 5301 else 5302 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ 5303 5304 /* 3: Enable RC6 */ 5305 if (intel_enable_rc6() & INTEL_RC6_ENABLE) 5306 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 5307 intel_print_rc6_info(dev_priv, rc6_mask); 5308 if (IS_BROADWELL(dev_priv)) 5309 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 5310 GEN7_RC_CTL_TO_MODE | 5311 rc6_mask); 5312 else 5313 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 5314 GEN6_RC_CTL_EI_MODE(1) | 5315 rc6_mask); 5316 5317 /* 4 Program defaults and thresholds for RPS*/ 5318 I915_WRITE(GEN6_RPNSWREQ, 5319 HSW_FREQUENCY(dev_priv->rps.rp1_freq)); 5320 I915_WRITE(GEN6_RC_VIDEO_FREQ, 5321 HSW_FREQUENCY(dev_priv->rps.rp1_freq)); 5322 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */ 5323 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */ 5324 5325 /* Docs recommend 900MHz, and 300 MHz respectively */ 5326 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 5327 dev_priv->rps.max_freq_softlimit << 24 | 5328 dev_priv->rps.min_freq_softlimit << 16); 5329 5330 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */ 5331 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/ 5332 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */ 5333 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */ 5334 5335 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 5336 5337 /* 5: Enable RPS */ 5338 I915_WRITE(GEN6_RP_CONTROL, 5339 GEN6_RP_MEDIA_TURBO | 5340 GEN6_RP_MEDIA_HW_NORMAL_MODE | 5341 GEN6_RP_MEDIA_IS_GFX | 5342 GEN6_RP_ENABLE | 5343 GEN6_RP_UP_BUSY_AVG | 5344 GEN6_RP_DOWN_IDLE_AVG); 5345 5346 /* 6: Ring frequency + overclocking (our driver does this later */ 5347 5348 reset_rps(dev_priv, gen6_set_rps); 5349 5350 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5351 } 5352 5353 static void gen6_enable_rps(struct drm_i915_private *dev_priv) 5354 { 5355 struct intel_engine_cs *engine; 5356 u32 rc6vids, rc6_mask = 0; 5357 u32 gtfifodbg; 5358 int rc6_mode; 5359 int ret; 5360 5361 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 5362 5363 /* Here begins a magic sequence of register writes to enable 5364 * auto-downclocking. 5365 * 5366 * Perhaps there might be some value in exposing these to 5367 * userspace... 5368 */ 5369 I915_WRITE(GEN6_RC_STATE, 0); 5370 5371 /* Clear the DBG now so we don't confuse earlier errors */ 5372 gtfifodbg = I915_READ(GTFIFODBG); 5373 if (gtfifodbg) { 5374 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); 5375 I915_WRITE(GTFIFODBG, gtfifodbg); 5376 } 5377 5378 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5379 5380 /* disable the counters and set deterministic thresholds */ 5381 I915_WRITE(GEN6_RC_CONTROL, 0); 5382 5383 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); 5384 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); 5385 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); 5386 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); 5387 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); 5388 5389 for_each_engine(engine, dev_priv) 5390 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 5391 5392 I915_WRITE(GEN6_RC_SLEEP, 0); 5393 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 5394 if (IS_IVYBRIDGE(dev_priv)) 5395 I915_WRITE(GEN6_RC6_THRESHOLD, 125000); 5396 else 5397 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); 5398 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000); 5399 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 5400 5401 /* Check if we are enabling RC6 */ 5402 rc6_mode = intel_enable_rc6(); 5403 if (rc6_mode & INTEL_RC6_ENABLE) 5404 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; 5405 5406 /* We don't use those on Haswell */ 5407 if (!IS_HASWELL(dev_priv)) { 5408 if (rc6_mode & INTEL_RC6p_ENABLE) 5409 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; 5410 5411 if (rc6_mode & INTEL_RC6pp_ENABLE) 5412 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; 5413 } 5414 5415 intel_print_rc6_info(dev_priv, rc6_mask); 5416 5417 I915_WRITE(GEN6_RC_CONTROL, 5418 rc6_mask | 5419 GEN6_RC_CTL_EI_MODE(1) | 5420 GEN6_RC_CTL_HW_ENABLE); 5421 5422 /* Power down if completely idle for over 50ms */ 5423 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000); 5424 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 5425 5426 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); 5427 if (ret) 5428 DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); 5429 5430 reset_rps(dev_priv, gen6_set_rps); 5431 5432 rc6vids = 0; 5433 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 5434 if (IS_GEN6(dev_priv) && ret) { 5435 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); 5436 } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { 5437 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", 5438 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); 5439 rc6vids &= 0xffff00; 5440 rc6vids |= GEN6_ENCODE_RC6_VID(450); 5441 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); 5442 if (ret) 5443 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); 5444 } 5445 5446 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5447 } 5448 5449 static void gen6_update_ring_freq(struct drm_i915_private *dev_priv) 5450 { 5451 int min_freq = 15; 5452 unsigned int gpu_freq; 5453 unsigned int max_ia_freq, min_ring_freq; 5454 unsigned int max_gpu_freq, min_gpu_freq; 5455 int scaling_factor = 180; 5456 5457 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 5458 5459 #if 0 5460 policy = cpufreq_cpu_get(0); 5461 if (policy) { 5462 max_ia_freq = policy->cpuinfo.max_freq; 5463 cpufreq_cpu_put(policy); 5464 } else { 5465 /* 5466 * Default to measured freq if none found, PCU will ensure we 5467 * don't go over 5468 */ 5469 max_ia_freq = tsc_khz; 5470 } 5471 #else 5472 max_ia_freq = tsc_frequency / 1000; 5473 #endif 5474 5475 /* Convert from kHz to MHz */ 5476 max_ia_freq /= 1000; 5477 5478 min_ring_freq = I915_READ(DCLK) & 0xf; 5479 /* convert DDR frequency from units of 266.6MHz to bandwidth */ 5480 min_ring_freq = mult_frac(min_ring_freq, 8, 3); 5481 5482 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 5483 /* Convert GT frequency to 50 HZ units */ 5484 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; 5485 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; 5486 } else { 5487 min_gpu_freq = dev_priv->rps.min_freq; 5488 max_gpu_freq = dev_priv->rps.max_freq; 5489 } 5490 5491 /* 5492 * For each potential GPU frequency, load a ring frequency we'd like 5493 * to use for memory access. We do this by specifying the IA frequency 5494 * the PCU should use as a reference to determine the ring frequency. 5495 */ 5496 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) { 5497 int diff = max_gpu_freq - gpu_freq; 5498 unsigned int ia_freq = 0, ring_freq = 0; 5499 5500 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 5501 /* 5502 * ring_freq = 2 * GT. ring_freq is in 100MHz units 5503 * No floor required for ring frequency on SKL. 5504 */ 5505 ring_freq = gpu_freq; 5506 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 5507 /* max(2 * GT, DDR). NB: GT is 50MHz units */ 5508 ring_freq = max(min_ring_freq, gpu_freq); 5509 } else if (IS_HASWELL(dev_priv)) { 5510 ring_freq = mult_frac(gpu_freq, 5, 4); 5511 ring_freq = max(min_ring_freq, ring_freq); 5512 /* leave ia_freq as the default, chosen by cpufreq */ 5513 } else { 5514 /* On older processors, there is no separate ring 5515 * clock domain, so in order to boost the bandwidth 5516 * of the ring, we need to upclock the CPU (ia_freq). 5517 * 5518 * For GPU frequencies less than 750MHz, 5519 * just use the lowest ring freq. 5520 */ 5521 if (gpu_freq < min_freq) 5522 ia_freq = 800; 5523 else 5524 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); 5525 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); 5526 } 5527 5528 sandybridge_pcode_write(dev_priv, 5529 GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 5530 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT | 5531 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT | 5532 gpu_freq); 5533 } 5534 } 5535 5536 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) 5537 { 5538 u32 val, rp0; 5539 5540 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 5541 5542 switch (INTEL_INFO(dev_priv)->eu_total) { 5543 case 8: 5544 /* (2 * 4) config */ 5545 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); 5546 break; 5547 case 12: 5548 /* (2 * 6) config */ 5549 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT); 5550 break; 5551 case 16: 5552 /* (2 * 8) config */ 5553 default: 5554 /* Setting (2 * 8) Min RP0 for any other combination */ 5555 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT); 5556 break; 5557 } 5558 5559 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK); 5560 5561 return rp0; 5562 } 5563 5564 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv) 5565 { 5566 u32 val, rpe; 5567 5568 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG); 5569 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; 5570 5571 return rpe; 5572 } 5573 5574 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv) 5575 { 5576 u32 val, rp1; 5577 5578 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 5579 rp1 = (val & FB_GFX_FREQ_FUSE_MASK); 5580 5581 return rp1; 5582 } 5583 5584 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv) 5585 { 5586 u32 val, rp1; 5587 5588 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); 5589 5590 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; 5591 5592 return rp1; 5593 } 5594 5595 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv) 5596 { 5597 u32 val, rp0; 5598 5599 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); 5600 5601 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; 5602 /* Clamp to max */ 5603 rp0 = min_t(u32, rp0, 0xea); 5604 5605 return rp0; 5606 } 5607 5608 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv) 5609 { 5610 u32 val, rpe; 5611 5612 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO); 5613 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; 5614 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI); 5615 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; 5616 5617 return rpe; 5618 } 5619 5620 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) 5621 { 5622 u32 val; 5623 5624 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; 5625 /* 5626 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value 5627 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on 5628 * a BYT-M B0 the above register contains 0xbf. Moreover when setting 5629 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0 5630 * to make sure it matches what Punit accepts. 5631 */ 5632 return max_t(u32, val, 0xc0); 5633 } 5634 5635 /* Check that the pctx buffer wasn't move under us. */ 5636 static void valleyview_check_pctx(struct drm_i915_private *dev_priv) 5637 { 5638 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; 5639 5640 WARN_ON(pctx_addr != dev_priv->mm.stolen_base + 5641 dev_priv->vlv_pctx->stolen->start); 5642 } 5643 5644 5645 /* Check that the pcbr address is not empty. */ 5646 static void cherryview_check_pctx(struct drm_i915_private *dev_priv) 5647 { 5648 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; 5649 5650 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); 5651 } 5652 5653 static void cherryview_setup_pctx(struct drm_i915_private *dev_priv) 5654 { 5655 struct i915_ggtt *ggtt = &dev_priv->ggtt; 5656 unsigned long pctx_paddr, paddr; 5657 u32 pcbr; 5658 int pctx_size = 32*1024; 5659 5660 pcbr = I915_READ(VLV_PCBR); 5661 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { 5662 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); 5663 paddr = (dev_priv->mm.stolen_base + 5664 (ggtt->stolen_size - pctx_size)); 5665 5666 pctx_paddr = (paddr & (~4095)); 5667 I915_WRITE(VLV_PCBR, pctx_paddr); 5668 } 5669 5670 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 5671 } 5672 5673 static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) 5674 { 5675 struct drm_i915_gem_object *pctx; 5676 unsigned long pctx_paddr; 5677 u32 pcbr; 5678 int pctx_size = 24*1024; 5679 5680 mutex_lock(&dev_priv->drm.struct_mutex); 5681 5682 pcbr = I915_READ(VLV_PCBR); 5683 if (pcbr) { 5684 /* BIOS set it up already, grab the pre-alloc'd space */ 5685 int pcbr_offset; 5686 5687 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; 5688 pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm, 5689 pcbr_offset, 5690 I915_GTT_OFFSET_NONE, 5691 pctx_size); 5692 goto out; 5693 } 5694 5695 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); 5696 5697 /* 5698 * From the Gunit register HAS: 5699 * The Gfx driver is expected to program this register and ensure 5700 * proper allocation within Gfx stolen memory. For example, this 5701 * register should be programmed such than the PCBR range does not 5702 * overlap with other ranges, such as the frame buffer, protected 5703 * memory, or any other relevant ranges. 5704 */ 5705 pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size); 5706 if (!pctx) { 5707 DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); 5708 goto out; 5709 } 5710 5711 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start; 5712 I915_WRITE(VLV_PCBR, pctx_paddr); 5713 5714 out: 5715 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 5716 dev_priv->vlv_pctx = pctx; 5717 mutex_unlock(&dev_priv->drm.struct_mutex); 5718 } 5719 5720 static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv) 5721 { 5722 if (WARN_ON(!dev_priv->vlv_pctx)) 5723 return; 5724 5725 i915_gem_object_put_unlocked(dev_priv->vlv_pctx); 5726 dev_priv->vlv_pctx = NULL; 5727 } 5728 5729 static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv) 5730 { 5731 dev_priv->rps.gpll_ref_freq = 5732 vlv_get_cck_clock(dev_priv, "GPLL ref", 5733 CCK_GPLL_CLOCK_CONTROL, 5734 dev_priv->czclk_freq); 5735 5736 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", 5737 dev_priv->rps.gpll_ref_freq); 5738 } 5739 5740 static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv) 5741 { 5742 u32 val; 5743 5744 valleyview_setup_pctx(dev_priv); 5745 5746 vlv_init_gpll_ref_freq(dev_priv); 5747 5748 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 5749 switch ((val >> 6) & 3) { 5750 case 0: 5751 case 1: 5752 dev_priv->mem_freq = 800; 5753 break; 5754 case 2: 5755 dev_priv->mem_freq = 1066; 5756 break; 5757 case 3: 5758 dev_priv->mem_freq = 1333; 5759 break; 5760 } 5761 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq); 5762 5763 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv); 5764 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 5765 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 5766 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq), 5767 dev_priv->rps.max_freq); 5768 5769 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv); 5770 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", 5771 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 5772 dev_priv->rps.efficient_freq); 5773 5774 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv); 5775 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n", 5776 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), 5777 dev_priv->rps.rp1_freq); 5778 5779 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv); 5780 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 5781 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 5782 dev_priv->rps.min_freq); 5783 } 5784 5785 static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv) 5786 { 5787 u32 val; 5788 5789 cherryview_setup_pctx(dev_priv); 5790 5791 vlv_init_gpll_ref_freq(dev_priv); 5792 5793 mutex_lock(&dev_priv->sb_lock); 5794 val = vlv_cck_read(dev_priv, CCK_FUSE_REG); 5795 mutex_unlock(&dev_priv->sb_lock); 5796 5797 switch ((val >> 2) & 0x7) { 5798 case 3: 5799 dev_priv->mem_freq = 2000; 5800 break; 5801 default: 5802 dev_priv->mem_freq = 1600; 5803 break; 5804 } 5805 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq); 5806 5807 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv); 5808 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 5809 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 5810 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq), 5811 dev_priv->rps.max_freq); 5812 5813 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv); 5814 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", 5815 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 5816 dev_priv->rps.efficient_freq); 5817 5818 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv); 5819 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n", 5820 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), 5821 dev_priv->rps.rp1_freq); 5822 5823 /* PUnit validated range is only [RPe, RP0] */ 5824 dev_priv->rps.min_freq = dev_priv->rps.efficient_freq; 5825 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 5826 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 5827 dev_priv->rps.min_freq); 5828 5829 WARN_ONCE((dev_priv->rps.max_freq | 5830 dev_priv->rps.efficient_freq | 5831 dev_priv->rps.rp1_freq | 5832 dev_priv->rps.min_freq) & 1, 5833 "Odd GPU freq values\n"); 5834 } 5835 5836 static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv) 5837 { 5838 valleyview_cleanup_pctx(dev_priv); 5839 } 5840 5841 static void cherryview_enable_rps(struct drm_i915_private *dev_priv) 5842 { 5843 struct intel_engine_cs *engine; 5844 u32 gtfifodbg, val, rc6_mode = 0, pcbr; 5845 5846 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 5847 5848 gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV | 5849 GT_FIFO_FREE_ENTRIES_CHV); 5850 if (gtfifodbg) { 5851 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", 5852 gtfifodbg); 5853 I915_WRITE(GTFIFODBG, gtfifodbg); 5854 } 5855 5856 cherryview_check_pctx(dev_priv); 5857 5858 /* 1a & 1b: Get forcewake during program sequence. Although the driver 5859 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ 5860 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5861 5862 /* Disable RC states. */ 5863 I915_WRITE(GEN6_RC_CONTROL, 0); 5864 5865 /* 2a: Program RC6 thresholds.*/ 5866 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 5867 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 5868 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 5869 5870 for_each_engine(engine, dev_priv) 5871 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 5872 I915_WRITE(GEN6_RC_SLEEP, 0); 5873 5874 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */ 5875 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186); 5876 5877 /* allows RC6 residency counter to work */ 5878 I915_WRITE(VLV_COUNTER_CONTROL, 5879 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | 5880 VLV_MEDIA_RC6_COUNT_EN | 5881 VLV_RENDER_RC6_COUNT_EN)); 5882 5883 /* For now we assume BIOS is allocating and populating the PCBR */ 5884 pcbr = I915_READ(VLV_PCBR); 5885 5886 /* 3: Enable RC6 */ 5887 if ((intel_enable_rc6() & INTEL_RC6_ENABLE) && 5888 (pcbr >> VLV_PCBR_ADDR_SHIFT)) 5889 rc6_mode = GEN7_RC_CTL_TO_MODE; 5890 5891 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 5892 5893 /* 4 Program defaults and thresholds for RPS*/ 5894 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); 5895 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); 5896 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); 5897 I915_WRITE(GEN6_RP_UP_EI, 66000); 5898 I915_WRITE(GEN6_RP_DOWN_EI, 350000); 5899 5900 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 5901 5902 /* 5: Enable RPS */ 5903 I915_WRITE(GEN6_RP_CONTROL, 5904 GEN6_RP_MEDIA_HW_NORMAL_MODE | 5905 GEN6_RP_MEDIA_IS_GFX | 5906 GEN6_RP_ENABLE | 5907 GEN6_RP_UP_BUSY_AVG | 5908 GEN6_RP_DOWN_IDLE_AVG); 5909 5910 /* Setting Fixed Bias */ 5911 val = VLV_OVERRIDE_EN | 5912 VLV_SOC_TDP_EN | 5913 CHV_BIAS_CPU_50_SOC_50; 5914 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val); 5915 5916 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 5917 5918 /* RPS code assumes GPLL is used */ 5919 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); 5920 5921 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); 5922 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 5923 5924 reset_rps(dev_priv, valleyview_set_rps); 5925 5926 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5927 } 5928 5929 static void valleyview_enable_rps(struct drm_i915_private *dev_priv) 5930 { 5931 struct intel_engine_cs *engine; 5932 u32 gtfifodbg, val, rc6_mode = 0; 5933 5934 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 5935 5936 valleyview_check_pctx(dev_priv); 5937 5938 gtfifodbg = I915_READ(GTFIFODBG); 5939 if (gtfifodbg) { 5940 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", 5941 gtfifodbg); 5942 I915_WRITE(GTFIFODBG, gtfifodbg); 5943 } 5944 5945 /* If VLV, Forcewake all wells, else re-direct to regular path */ 5946 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5947 5948 /* Disable RC states. */ 5949 I915_WRITE(GEN6_RC_CONTROL, 0); 5950 5951 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); 5952 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); 5953 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); 5954 I915_WRITE(GEN6_RP_UP_EI, 66000); 5955 I915_WRITE(GEN6_RP_DOWN_EI, 350000); 5956 5957 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 5958 5959 I915_WRITE(GEN6_RP_CONTROL, 5960 GEN6_RP_MEDIA_TURBO | 5961 GEN6_RP_MEDIA_HW_NORMAL_MODE | 5962 GEN6_RP_MEDIA_IS_GFX | 5963 GEN6_RP_ENABLE | 5964 GEN6_RP_UP_BUSY_AVG | 5965 GEN6_RP_DOWN_IDLE_CONT); 5966 5967 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000); 5968 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); 5969 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); 5970 5971 for_each_engine(engine, dev_priv) 5972 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 5973 5974 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557); 5975 5976 /* allows RC6 residency counter to work */ 5977 I915_WRITE(VLV_COUNTER_CONTROL, 5978 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN | 5979 VLV_RENDER_RC0_COUNT_EN | 5980 VLV_MEDIA_RC6_COUNT_EN | 5981 VLV_RENDER_RC6_COUNT_EN)); 5982 5983 if (intel_enable_rc6() & INTEL_RC6_ENABLE) 5984 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; 5985 5986 intel_print_rc6_info(dev_priv, rc6_mode); 5987 5988 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 5989 5990 /* Setting Fixed Bias */ 5991 val = VLV_OVERRIDE_EN | 5992 VLV_SOC_TDP_EN | 5993 VLV_BIAS_CPU_125_SOC_875; 5994 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val); 5995 5996 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 5997 5998 /* RPS code assumes GPLL is used */ 5999 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); 6000 6001 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); 6002 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 6003 6004 reset_rps(dev_priv, valleyview_set_rps); 6005 6006 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 6007 } 6008 6009 static unsigned long intel_pxfreq(u32 vidfreq) 6010 { 6011 unsigned long freq; 6012 int div = (vidfreq & 0x3f0000) >> 16; 6013 int post = (vidfreq & 0x3000) >> 12; 6014 int pre = (vidfreq & 0x7); 6015 6016 if (!pre) 6017 return 0; 6018 6019 freq = ((div * 133333) / ((1<<post) * pre)); 6020 6021 return freq; 6022 } 6023 6024 static const struct cparams { 6025 u16 i; 6026 u16 t; 6027 u16 m; 6028 u16 c; 6029 } cparams[] = { 6030 { 1, 1333, 301, 28664 }, 6031 { 1, 1066, 294, 24460 }, 6032 { 1, 800, 294, 25192 }, 6033 { 0, 1333, 276, 27605 }, 6034 { 0, 1066, 276, 27605 }, 6035 { 0, 800, 231, 23784 }, 6036 }; 6037 6038 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) 6039 { 6040 u64 total_count, diff, ret; 6041 u32 count1, count2, count3, m = 0, c = 0; 6042 unsigned long now = jiffies_to_msecs(jiffies), diff1; 6043 int i; 6044 6045 assert_spin_locked(&mchdev_lock); 6046 6047 diff1 = now - dev_priv->ips.last_time1; 6048 6049 /* Prevent division-by-zero if we are asking too fast. 6050 * Also, we don't get interesting results if we are polling 6051 * faster than once in 10ms, so just return the saved value 6052 * in such cases. 6053 */ 6054 if (diff1 <= 10) 6055 return dev_priv->ips.chipset_power; 6056 6057 count1 = I915_READ(DMIEC); 6058 count2 = I915_READ(DDREC); 6059 count3 = I915_READ(CSIEC); 6060 6061 total_count = count1 + count2 + count3; 6062 6063 /* FIXME: handle per-counter overflow */ 6064 if (total_count < dev_priv->ips.last_count1) { 6065 diff = ~0UL - dev_priv->ips.last_count1; 6066 diff += total_count; 6067 } else { 6068 diff = total_count - dev_priv->ips.last_count1; 6069 } 6070 6071 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 6072 if (cparams[i].i == dev_priv->ips.c_m && 6073 cparams[i].t == dev_priv->ips.r_t) { 6074 m = cparams[i].m; 6075 c = cparams[i].c; 6076 break; 6077 } 6078 } 6079 6080 diff = div_u64(diff, diff1); 6081 ret = ((m * diff) + c); 6082 ret = div_u64(ret, 10); 6083 6084 dev_priv->ips.last_count1 = total_count; 6085 dev_priv->ips.last_time1 = now; 6086 6087 dev_priv->ips.chipset_power = ret; 6088 6089 return ret; 6090 } 6091 6092 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 6093 { 6094 unsigned long val; 6095 6096 if (INTEL_INFO(dev_priv)->gen != 5) 6097 return 0; 6098 6099 spin_lock_irq(&mchdev_lock); 6100 6101 val = __i915_chipset_val(dev_priv); 6102 6103 spin_unlock_irq(&mchdev_lock); 6104 6105 return val; 6106 } 6107 6108 unsigned long i915_mch_val(struct drm_i915_private *dev_priv) 6109 { 6110 unsigned long m, x, b; 6111 u32 tsfs; 6112 6113 tsfs = I915_READ(TSFS); 6114 6115 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); 6116 x = I915_READ8(TR1); 6117 6118 b = tsfs & TSFS_INTR_MASK; 6119 6120 return ((m * x) / 127) - b; 6121 } 6122 6123 static int _pxvid_to_vd(u8 pxvid) 6124 { 6125 if (pxvid == 0) 6126 return 0; 6127 6128 if (pxvid >= 8 && pxvid < 31) 6129 pxvid = 31; 6130 6131 return (pxvid + 2) * 125; 6132 } 6133 6134 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 6135 { 6136 const int vd = _pxvid_to_vd(pxvid); 6137 const int vm = vd - 1125; 6138 6139 if (INTEL_INFO(dev_priv)->is_mobile) 6140 return vm > 0 ? vm : 0; 6141 6142 return vd; 6143 } 6144 6145 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) 6146 { 6147 u64 now, diff, diffms; 6148 u32 count; 6149 6150 assert_spin_locked(&mchdev_lock); 6151 6152 now = ktime_get_raw_ns(); 6153 diffms = now - dev_priv->ips.last_time2; 6154 do_div(diffms, NSEC_PER_MSEC); 6155 6156 /* Don't divide by 0 */ 6157 if (!diffms) 6158 return; 6159 6160 count = I915_READ(GFXEC); 6161 6162 if (count < dev_priv->ips.last_count2) { 6163 diff = ~0UL - dev_priv->ips.last_count2; 6164 diff += count; 6165 } else { 6166 diff = count - dev_priv->ips.last_count2; 6167 } 6168 6169 dev_priv->ips.last_count2 = count; 6170 dev_priv->ips.last_time2 = now; 6171 6172 /* More magic constants... */ 6173 diff = diff * 1181; 6174 diff = div_u64(diff, diffms * 10); 6175 dev_priv->ips.gfx_power = diff; 6176 } 6177 6178 void i915_update_gfx_val(struct drm_i915_private *dev_priv) 6179 { 6180 if (INTEL_INFO(dev_priv)->gen != 5) 6181 return; 6182 6183 spin_lock_irq(&mchdev_lock); 6184 6185 __i915_update_gfx_val(dev_priv); 6186 6187 spin_unlock_irq(&mchdev_lock); 6188 } 6189 6190 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) 6191 { 6192 unsigned long t, corr, state1, corr2, state2; 6193 u32 pxvid, ext_v; 6194 6195 assert_spin_locked(&mchdev_lock); 6196 6197 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq)); 6198 pxvid = (pxvid >> 24) & 0x7f; 6199 ext_v = pvid_to_extvid(dev_priv, pxvid); 6200 6201 state1 = ext_v; 6202 6203 t = i915_mch_val(dev_priv); 6204 6205 /* Revel in the empirically derived constants */ 6206 6207 /* Correction factor in 1/100000 units */ 6208 if (t > 80) 6209 corr = ((t * 2349) + 135940); 6210 else if (t >= 50) 6211 corr = ((t * 964) + 29317); 6212 else /* < 50 */ 6213 corr = ((t * 301) + 1004); 6214 6215 corr = corr * ((150142 * state1) / 10000 - 78642); 6216 corr /= 100000; 6217 corr2 = (corr * dev_priv->ips.corr); 6218 6219 state2 = (corr2 * state1) / 10000; 6220 state2 /= 100; /* convert to mW */ 6221 6222 __i915_update_gfx_val(dev_priv); 6223 6224 return dev_priv->ips.gfx_power + state2; 6225 } 6226 6227 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 6228 { 6229 unsigned long val; 6230 6231 if (INTEL_INFO(dev_priv)->gen != 5) 6232 return 0; 6233 6234 spin_lock_irq(&mchdev_lock); 6235 6236 val = __i915_gfx_val(dev_priv); 6237 6238 spin_unlock_irq(&mchdev_lock); 6239 6240 return val; 6241 } 6242 6243 /** 6244 * i915_read_mch_val - return value for IPS use 6245 * 6246 * Calculate and return a value for the IPS driver to use when deciding whether 6247 * we have thermal and power headroom to increase CPU or GPU power budget. 6248 */ 6249 unsigned long i915_read_mch_val(void) 6250 { 6251 struct drm_i915_private *dev_priv; 6252 unsigned long chipset_val, graphics_val, ret = 0; 6253 6254 spin_lock_irq(&mchdev_lock); 6255 if (!i915_mch_dev) 6256 goto out_unlock; 6257 dev_priv = i915_mch_dev; 6258 6259 chipset_val = __i915_chipset_val(dev_priv); 6260 graphics_val = __i915_gfx_val(dev_priv); 6261 6262 ret = chipset_val + graphics_val; 6263 6264 out_unlock: 6265 spin_unlock_irq(&mchdev_lock); 6266 6267 return ret; 6268 } 6269 EXPORT_SYMBOL_GPL(i915_read_mch_val); 6270 6271 /** 6272 * i915_gpu_raise - raise GPU frequency limit 6273 * 6274 * Raise the limit; IPS indicates we have thermal headroom. 6275 */ 6276 bool i915_gpu_raise(void) 6277 { 6278 struct drm_i915_private *dev_priv; 6279 bool ret = true; 6280 6281 spin_lock_irq(&mchdev_lock); 6282 if (!i915_mch_dev) { 6283 ret = false; 6284 goto out_unlock; 6285 } 6286 dev_priv = i915_mch_dev; 6287 6288 if (dev_priv->ips.max_delay > dev_priv->ips.fmax) 6289 dev_priv->ips.max_delay--; 6290 6291 out_unlock: 6292 spin_unlock_irq(&mchdev_lock); 6293 6294 return ret; 6295 } 6296 EXPORT_SYMBOL_GPL(i915_gpu_raise); 6297 6298 /** 6299 * i915_gpu_lower - lower GPU frequency limit 6300 * 6301 * IPS indicates we're close to a thermal limit, so throttle back the GPU 6302 * frequency maximum. 6303 */ 6304 bool i915_gpu_lower(void) 6305 { 6306 struct drm_i915_private *dev_priv; 6307 bool ret = true; 6308 6309 spin_lock_irq(&mchdev_lock); 6310 if (!i915_mch_dev) { 6311 ret = false; 6312 goto out_unlock; 6313 } 6314 dev_priv = i915_mch_dev; 6315 6316 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay) 6317 dev_priv->ips.max_delay++; 6318 6319 out_unlock: 6320 spin_unlock_irq(&mchdev_lock); 6321 6322 return ret; 6323 } 6324 EXPORT_SYMBOL_GPL(i915_gpu_lower); 6325 6326 /** 6327 * i915_gpu_busy - indicate GPU business to IPS 6328 * 6329 * Tell the IPS driver whether or not the GPU is busy. 6330 */ 6331 bool i915_gpu_busy(void) 6332 { 6333 bool ret = false; 6334 6335 spin_lock_irq(&mchdev_lock); 6336 if (i915_mch_dev) 6337 ret = i915_mch_dev->gt.awake; 6338 spin_unlock_irq(&mchdev_lock); 6339 6340 return ret; 6341 } 6342 EXPORT_SYMBOL_GPL(i915_gpu_busy); 6343 6344 /** 6345 * i915_gpu_turbo_disable - disable graphics turbo 6346 * 6347 * Disable graphics turbo by resetting the max frequency and setting the 6348 * current frequency to the default. 6349 */ 6350 bool i915_gpu_turbo_disable(void) 6351 { 6352 struct drm_i915_private *dev_priv; 6353 bool ret = true; 6354 6355 spin_lock_irq(&mchdev_lock); 6356 if (!i915_mch_dev) { 6357 ret = false; 6358 goto out_unlock; 6359 } 6360 dev_priv = i915_mch_dev; 6361 6362 dev_priv->ips.max_delay = dev_priv->ips.fstart; 6363 6364 if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart)) 6365 ret = false; 6366 6367 out_unlock: 6368 spin_unlock_irq(&mchdev_lock); 6369 6370 return ret; 6371 } 6372 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); 6373 6374 /** 6375 * Tells the intel_ips driver that the i915 driver is now loaded, if 6376 * IPS got loaded first. 6377 * 6378 * This awkward dance is so that neither module has to depend on the 6379 * other in order for IPS to do the appropriate communication of 6380 * GPU turbo limits to i915. 6381 */ 6382 static void 6383 ips_ping_for_i915_load(void) 6384 { 6385 #if 0 6386 void (*link)(void); 6387 6388 link = symbol_get(ips_link_to_i915_driver); 6389 if (link) { 6390 link(); 6391 symbol_put(ips_link_to_i915_driver); 6392 } 6393 #endif 6394 } 6395 6396 void intel_gpu_ips_init(struct drm_i915_private *dev_priv) 6397 { 6398 /* We only register the i915 ips part with intel-ips once everything is 6399 * set up, to avoid intel-ips sneaking in and reading bogus values. */ 6400 spin_lock_irq(&mchdev_lock); 6401 i915_mch_dev = dev_priv; 6402 spin_unlock_irq(&mchdev_lock); 6403 6404 ips_ping_for_i915_load(); 6405 } 6406 6407 void intel_gpu_ips_teardown(void) 6408 { 6409 spin_lock_irq(&mchdev_lock); 6410 i915_mch_dev = NULL; 6411 spin_unlock_irq(&mchdev_lock); 6412 } 6413 6414 static void intel_init_emon(struct drm_i915_private *dev_priv) 6415 { 6416 u32 lcfuse; 6417 u8 pxw[16]; 6418 int i; 6419 6420 /* Disable to program */ 6421 I915_WRITE(ECR, 0); 6422 POSTING_READ(ECR); 6423 6424 /* Program energy weights for various events */ 6425 I915_WRITE(SDEW, 0x15040d00); 6426 I915_WRITE(CSIEW0, 0x007f0000); 6427 I915_WRITE(CSIEW1, 0x1e220004); 6428 I915_WRITE(CSIEW2, 0x04000004); 6429 6430 for (i = 0; i < 5; i++) 6431 I915_WRITE(PEW(i), 0); 6432 for (i = 0; i < 3; i++) 6433 I915_WRITE(DEW(i), 0); 6434 6435 /* Program P-state weights to account for frequency power adjustment */ 6436 for (i = 0; i < 16; i++) { 6437 u32 pxvidfreq = I915_READ(PXVFREQ(i)); 6438 unsigned long freq = intel_pxfreq(pxvidfreq); 6439 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> 6440 PXVFREQ_PX_SHIFT; 6441 unsigned long val; 6442 6443 val = vid * vid; 6444 val *= (freq / 1000); 6445 val *= 255; 6446 val /= (127*127*900); 6447 if (val > 0xff) 6448 DRM_ERROR("bad pxval: %ld\n", val); 6449 pxw[i] = val; 6450 } 6451 /* Render standby states get 0 weight */ 6452 pxw[14] = 0; 6453 pxw[15] = 0; 6454 6455 for (i = 0; i < 4; i++) { 6456 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | 6457 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); 6458 I915_WRITE(PXW(i), val); 6459 } 6460 6461 /* Adjust magic regs to magic values (more experimental results) */ 6462 I915_WRITE(OGW0, 0); 6463 I915_WRITE(OGW1, 0); 6464 I915_WRITE(EG0, 0x00007f00); 6465 I915_WRITE(EG1, 0x0000000e); 6466 I915_WRITE(EG2, 0x000e0000); 6467 I915_WRITE(EG3, 0x68000300); 6468 I915_WRITE(EG4, 0x42000000); 6469 I915_WRITE(EG5, 0x00140031); 6470 I915_WRITE(EG6, 0); 6471 I915_WRITE(EG7, 0); 6472 6473 for (i = 0; i < 8; i++) 6474 I915_WRITE(PXWL(i), 0); 6475 6476 /* Enable PMON + select events */ 6477 I915_WRITE(ECR, 0x80000019); 6478 6479 lcfuse = I915_READ(LCFUSE02); 6480 6481 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); 6482 } 6483 6484 void intel_init_gt_powersave(struct drm_i915_private *dev_priv) 6485 { 6486 /* 6487 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a 6488 * requirement. 6489 */ 6490 if (!i915.enable_rc6) { 6491 DRM_INFO("RC6 disabled, disabling runtime PM support\n"); 6492 intel_runtime_pm_get(dev_priv); 6493 } 6494 6495 mutex_lock(&dev_priv->rps.hw_lock); 6496 6497 /* Initialize RPS limits (for userspace) */ 6498 if (IS_CHERRYVIEW(dev_priv)) 6499 cherryview_init_gt_powersave(dev_priv); 6500 else if (IS_VALLEYVIEW(dev_priv)) 6501 valleyview_init_gt_powersave(dev_priv); 6502 else if (INTEL_GEN(dev_priv) >= 6) 6503 gen6_init_rps_frequencies(dev_priv); 6504 6505 /* Derive initial user preferences/limits from the hardware limits */ 6506 dev_priv->rps.idle_freq = dev_priv->rps.min_freq; 6507 dev_priv->rps.cur_freq = dev_priv->rps.idle_freq; 6508 6509 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 6510 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; 6511 6512 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 6513 dev_priv->rps.min_freq_softlimit = 6514 max_t(int, 6515 dev_priv->rps.efficient_freq, 6516 intel_freq_opcode(dev_priv, 450)); 6517 6518 /* After setting max-softlimit, find the overclock max freq */ 6519 if (IS_GEN6(dev_priv) || 6520 IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) { 6521 u32 params = 0; 6522 6523 sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, ¶ms); 6524 if (params & BIT(31)) { /* OC supported */ 6525 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n", 6526 (dev_priv->rps.max_freq & 0xff) * 50, 6527 (params & 0xff) * 50); 6528 dev_priv->rps.max_freq = params & 0xff; 6529 } 6530 } 6531 6532 /* Finally allow us to boost to max by default */ 6533 dev_priv->rps.boost_freq = dev_priv->rps.max_freq; 6534 6535 mutex_unlock(&dev_priv->rps.hw_lock); 6536 6537 intel_autoenable_gt_powersave(dev_priv); 6538 } 6539 6540 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) 6541 { 6542 if (IS_VALLEYVIEW(dev_priv)) 6543 valleyview_cleanup_gt_powersave(dev_priv); 6544 6545 if (!i915.enable_rc6) 6546 intel_runtime_pm_put(dev_priv); 6547 } 6548 6549 /** 6550 * intel_suspend_gt_powersave - suspend PM work and helper threads 6551 * @dev_priv: i915 device 6552 * 6553 * We don't want to disable RC6 or other features here, we just want 6554 * to make sure any work we've queued has finished and won't bother 6555 * us while we're suspended. 6556 */ 6557 void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv) 6558 { 6559 if (INTEL_GEN(dev_priv) < 6) 6560 return; 6561 6562 if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work)) 6563 intel_runtime_pm_put(dev_priv); 6564 6565 /* gen6_rps_idle() will be called later to disable interrupts */ 6566 } 6567 6568 void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv) 6569 { 6570 dev_priv->rps.enabled = true; /* force disabling */ 6571 intel_disable_gt_powersave(dev_priv); 6572 6573 gen6_reset_rps_interrupts(dev_priv); 6574 } 6575 6576 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) 6577 { 6578 if (!READ_ONCE(dev_priv->rps.enabled)) 6579 return; 6580 6581 mutex_lock(&dev_priv->rps.hw_lock); 6582 6583 if (INTEL_GEN(dev_priv) >= 9) { 6584 gen9_disable_rc6(dev_priv); 6585 gen9_disable_rps(dev_priv); 6586 } else if (IS_CHERRYVIEW(dev_priv)) { 6587 cherryview_disable_rps(dev_priv); 6588 } else if (IS_VALLEYVIEW(dev_priv)) { 6589 valleyview_disable_rps(dev_priv); 6590 } else if (INTEL_GEN(dev_priv) >= 6) { 6591 gen6_disable_rps(dev_priv); 6592 } else if (IS_IRONLAKE_M(dev_priv)) { 6593 ironlake_disable_drps(dev_priv); 6594 } 6595 6596 dev_priv->rps.enabled = false; 6597 mutex_unlock(&dev_priv->rps.hw_lock); 6598 } 6599 6600 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) 6601 { 6602 /* We shouldn't be disabling as we submit, so this should be less 6603 * racy than it appears! 6604 */ 6605 if (READ_ONCE(dev_priv->rps.enabled)) 6606 return; 6607 6608 /* Powersaving is controlled by the host when inside a VM */ 6609 if (intel_vgpu_active(dev_priv)) 6610 return; 6611 6612 mutex_lock(&dev_priv->rps.hw_lock); 6613 6614 if (IS_CHERRYVIEW(dev_priv)) { 6615 cherryview_enable_rps(dev_priv); 6616 } else if (IS_VALLEYVIEW(dev_priv)) { 6617 valleyview_enable_rps(dev_priv); 6618 } else if (INTEL_GEN(dev_priv) >= 9) { 6619 gen9_enable_rc6(dev_priv); 6620 gen9_enable_rps(dev_priv); 6621 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 6622 gen6_update_ring_freq(dev_priv); 6623 } else if (IS_BROADWELL(dev_priv)) { 6624 gen8_enable_rps(dev_priv); 6625 gen6_update_ring_freq(dev_priv); 6626 } else if (INTEL_GEN(dev_priv) >= 6) { 6627 gen6_enable_rps(dev_priv); 6628 gen6_update_ring_freq(dev_priv); 6629 } else if (IS_IRONLAKE_M(dev_priv)) { 6630 ironlake_enable_drps(dev_priv); 6631 intel_init_emon(dev_priv); 6632 } 6633 6634 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq); 6635 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq); 6636 6637 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq); 6638 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq); 6639 6640 dev_priv->rps.enabled = true; 6641 mutex_unlock(&dev_priv->rps.hw_lock); 6642 } 6643 6644 static void __intel_autoenable_gt_powersave(struct work_struct *work) 6645 { 6646 struct drm_i915_private *dev_priv = 6647 container_of(work, typeof(*dev_priv), rps.autoenable_work.work); 6648 struct intel_engine_cs *rcs; 6649 struct drm_i915_gem_request *req; 6650 6651 if (READ_ONCE(dev_priv->rps.enabled)) 6652 goto out; 6653 6654 rcs = &dev_priv->engine[RCS]; 6655 if (rcs->last_context) 6656 goto out; 6657 6658 if (!rcs->init_context) 6659 goto out; 6660 6661 mutex_lock(&dev_priv->drm.struct_mutex); 6662 6663 req = i915_gem_request_alloc(rcs, dev_priv->kernel_context); 6664 if (IS_ERR(req)) 6665 goto unlock; 6666 6667 if (!i915.enable_execlists && i915_switch_context(req) == 0) 6668 rcs->init_context(req); 6669 6670 /* Mark the device busy, calling intel_enable_gt_powersave() */ 6671 i915_add_request_no_flush(req); 6672 6673 unlock: 6674 mutex_unlock(&dev_priv->drm.struct_mutex); 6675 out: 6676 intel_runtime_pm_put(dev_priv); 6677 } 6678 6679 void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv) 6680 { 6681 if (READ_ONCE(dev_priv->rps.enabled)) 6682 return; 6683 6684 if (IS_IRONLAKE_M(dev_priv)) { 6685 ironlake_enable_drps(dev_priv); 6686 mutex_lock(&dev_priv->drm.struct_mutex); 6687 intel_init_emon(dev_priv); 6688 mutex_unlock(&dev_priv->drm.struct_mutex); 6689 } else if (INTEL_INFO(dev_priv)->gen >= 6) { 6690 /* 6691 * PCU communication is slow and this doesn't need to be 6692 * done at any specific time, so do this out of our fast path 6693 * to make resume and init faster. 6694 * 6695 * We depend on the HW RC6 power context save/restore 6696 * mechanism when entering D3 through runtime PM suspend. So 6697 * disable RPM until RPS/RC6 is properly setup. We can only 6698 * get here via the driver load/system resume/runtime resume 6699 * paths, so the _noresume version is enough (and in case of 6700 * runtime resume it's necessary). 6701 */ 6702 if (queue_delayed_work(dev_priv->wq, 6703 &dev_priv->rps.autoenable_work, 6704 round_jiffies_up_relative(HZ))) 6705 intel_runtime_pm_get_noresume(dev_priv); 6706 } 6707 } 6708 6709 static void ibx_init_clock_gating(struct drm_device *dev) 6710 { 6711 struct drm_i915_private *dev_priv = to_i915(dev); 6712 6713 /* 6714 * On Ibex Peak and Cougar Point, we need to disable clock 6715 * gating for the panel power sequencer or it will fail to 6716 * start up when no ports are active. 6717 */ 6718 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 6719 } 6720 6721 static void g4x_disable_trickle_feed(struct drm_device *dev) 6722 { 6723 struct drm_i915_private *dev_priv = to_i915(dev); 6724 enum i915_pipe pipe; 6725 6726 for_each_pipe(dev_priv, pipe) { 6727 I915_WRITE(DSPCNTR(pipe), 6728 I915_READ(DSPCNTR(pipe)) | 6729 DISPPLANE_TRICKLE_FEED_DISABLE); 6730 6731 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe))); 6732 POSTING_READ(DSPSURF(pipe)); 6733 } 6734 } 6735 6736 static void ilk_init_lp_watermarks(struct drm_device *dev) 6737 { 6738 struct drm_i915_private *dev_priv = to_i915(dev); 6739 6740 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); 6741 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); 6742 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); 6743 6744 /* 6745 * Don't touch WM1S_LP_EN here. 6746 * Doing so could cause underruns. 6747 */ 6748 } 6749 6750 static void ironlake_init_clock_gating(struct drm_device *dev) 6751 { 6752 struct drm_i915_private *dev_priv = to_i915(dev); 6753 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 6754 6755 /* 6756 * Required for FBC 6757 * WaFbcDisableDpfcClockGating:ilk 6758 */ 6759 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | 6760 ILK_DPFCUNIT_CLOCK_GATE_DISABLE | 6761 ILK_DPFDUNIT_CLOCK_GATE_ENABLE; 6762 6763 I915_WRITE(PCH_3DCGDIS0, 6764 MARIUNIT_CLOCK_GATE_DISABLE | 6765 SVSMUNIT_CLOCK_GATE_DISABLE); 6766 I915_WRITE(PCH_3DCGDIS1, 6767 VFMUNIT_CLOCK_GATE_DISABLE); 6768 6769 /* 6770 * According to the spec the following bits should be set in 6771 * order to enable memory self-refresh 6772 * The bit 22/21 of 0x42004 6773 * The bit 5 of 0x42020 6774 * The bit 15 of 0x45000 6775 */ 6776 I915_WRITE(ILK_DISPLAY_CHICKEN2, 6777 (I915_READ(ILK_DISPLAY_CHICKEN2) | 6778 ILK_DPARB_GATE | ILK_VSDPFD_FULL)); 6779 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE; 6780 I915_WRITE(DISP_ARB_CTL, 6781 (I915_READ(DISP_ARB_CTL) | 6782 DISP_FBC_WM_DIS)); 6783 6784 ilk_init_lp_watermarks(dev); 6785 6786 /* 6787 * Based on the document from hardware guys the following bits 6788 * should be set unconditionally in order to enable FBC. 6789 * The bit 22 of 0x42000 6790 * The bit 22 of 0x42004 6791 * The bit 7,8,9 of 0x42020. 6792 */ 6793 if (IS_IRONLAKE_M(dev)) { 6794 /* WaFbcAsynchFlipDisableFbcQueue:ilk */ 6795 I915_WRITE(ILK_DISPLAY_CHICKEN1, 6796 I915_READ(ILK_DISPLAY_CHICKEN1) | 6797 ILK_FBCQ_DIS); 6798 I915_WRITE(ILK_DISPLAY_CHICKEN2, 6799 I915_READ(ILK_DISPLAY_CHICKEN2) | 6800 ILK_DPARB_GATE); 6801 } 6802 6803 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 6804 6805 I915_WRITE(ILK_DISPLAY_CHICKEN2, 6806 I915_READ(ILK_DISPLAY_CHICKEN2) | 6807 ILK_ELPIN_409_SELECT); 6808 I915_WRITE(_3D_CHICKEN2, 6809 _3D_CHICKEN2_WM_READ_PIPELINED << 16 | 6810 _3D_CHICKEN2_WM_READ_PIPELINED); 6811 6812 /* WaDisableRenderCachePipelinedFlush:ilk */ 6813 I915_WRITE(CACHE_MODE_0, 6814 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); 6815 6816 /* WaDisable_RenderCache_OperationalFlush:ilk */ 6817 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 6818 6819 g4x_disable_trickle_feed(dev); 6820 6821 ibx_init_clock_gating(dev); 6822 } 6823 6824 static void cpt_init_clock_gating(struct drm_device *dev) 6825 { 6826 struct drm_i915_private *dev_priv = to_i915(dev); 6827 int pipe; 6828 uint32_t val; 6829 6830 /* 6831 * On Ibex Peak and Cougar Point, we need to disable clock 6832 * gating for the panel power sequencer or it will fail to 6833 * start up when no ports are active. 6834 */ 6835 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE | 6836 PCH_DPLUNIT_CLOCK_GATE_DISABLE | 6837 PCH_CPUNIT_CLOCK_GATE_DISABLE); 6838 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | 6839 DPLS_EDP_PPS_FIX_DIS); 6840 /* The below fixes the weird display corruption, a few pixels shifted 6841 * downward, on (only) LVDS of some HP laptops with IVY. 6842 */ 6843 for_each_pipe(dev_priv, pipe) { 6844 val = I915_READ(TRANS_CHICKEN2(pipe)); 6845 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 6846 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 6847 if (dev_priv->vbt.fdi_rx_polarity_inverted) 6848 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 6849 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 6850 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER; 6851 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH; 6852 I915_WRITE(TRANS_CHICKEN2(pipe), val); 6853 } 6854 /* WADP0ClockGatingDisable */ 6855 for_each_pipe(dev_priv, pipe) { 6856 I915_WRITE(TRANS_CHICKEN1(pipe), 6857 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 6858 } 6859 } 6860 6861 static void gen6_check_mch_setup(struct drm_device *dev) 6862 { 6863 struct drm_i915_private *dev_priv = to_i915(dev); 6864 uint32_t tmp; 6865 6866 tmp = I915_READ(MCH_SSKPD); 6867 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) 6868 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", 6869 tmp); 6870 } 6871 6872 static void gen6_init_clock_gating(struct drm_device *dev) 6873 { 6874 struct drm_i915_private *dev_priv = to_i915(dev); 6875 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 6876 6877 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 6878 6879 I915_WRITE(ILK_DISPLAY_CHICKEN2, 6880 I915_READ(ILK_DISPLAY_CHICKEN2) | 6881 ILK_ELPIN_409_SELECT); 6882 6883 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */ 6884 I915_WRITE(_3D_CHICKEN, 6885 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); 6886 6887 /* WaDisable_RenderCache_OperationalFlush:snb */ 6888 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 6889 6890 /* 6891 * BSpec recoomends 8x4 when MSAA is used, 6892 * however in practice 16x4 seems fastest. 6893 * 6894 * Note that PS/WM thread counts depend on the WIZ hashing 6895 * disable bit, which we don't touch here, but it's good 6896 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 6897 */ 6898 I915_WRITE(GEN6_GT_MODE, 6899 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); 6900 6901 ilk_init_lp_watermarks(dev); 6902 6903 I915_WRITE(CACHE_MODE_0, 6904 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 6905 6906 I915_WRITE(GEN6_UCGCTL1, 6907 I915_READ(GEN6_UCGCTL1) | 6908 GEN6_BLBUNIT_CLOCK_GATE_DISABLE | 6909 GEN6_CSUNIT_CLOCK_GATE_DISABLE); 6910 6911 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 6912 * gating disable must be set. Failure to set it results in 6913 * flickering pixels due to Z write ordering failures after 6914 * some amount of runtime in the Mesa "fire" demo, and Unigine 6915 * Sanctuary and Tropics, and apparently anything else with 6916 * alpha test or pixel discard. 6917 * 6918 * According to the spec, bit 11 (RCCUNIT) must also be set, 6919 * but we didn't debug actual testcases to find it out. 6920 * 6921 * WaDisableRCCUnitClockGating:snb 6922 * WaDisableRCPBUnitClockGating:snb 6923 */ 6924 I915_WRITE(GEN6_UCGCTL2, 6925 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | 6926 GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 6927 6928 /* WaStripsFansDisableFastClipPerformanceFix:snb */ 6929 I915_WRITE(_3D_CHICKEN3, 6930 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL)); 6931 6932 /* 6933 * Bspec says: 6934 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and 6935 * 3DSTATE_SF number of SF output attributes is more than 16." 6936 */ 6937 I915_WRITE(_3D_CHICKEN3, 6938 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH)); 6939 6940 /* 6941 * According to the spec the following bits should be 6942 * set in order to enable memory self-refresh and fbc: 6943 * The bit21 and bit22 of 0x42000 6944 * The bit21 and bit22 of 0x42004 6945 * The bit5 and bit7 of 0x42020 6946 * The bit14 of 0x70180 6947 * The bit14 of 0x71180 6948 * 6949 * WaFbcAsynchFlipDisableFbcQueue:snb 6950 */ 6951 I915_WRITE(ILK_DISPLAY_CHICKEN1, 6952 I915_READ(ILK_DISPLAY_CHICKEN1) | 6953 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); 6954 I915_WRITE(ILK_DISPLAY_CHICKEN2, 6955 I915_READ(ILK_DISPLAY_CHICKEN2) | 6956 ILK_DPARB_GATE | ILK_VSDPFD_FULL); 6957 I915_WRITE(ILK_DSPCLK_GATE_D, 6958 I915_READ(ILK_DSPCLK_GATE_D) | 6959 ILK_DPARBUNIT_CLOCK_GATE_ENABLE | 6960 ILK_DPFDUNIT_CLOCK_GATE_ENABLE); 6961 6962 g4x_disable_trickle_feed(dev); 6963 6964 cpt_init_clock_gating(dev); 6965 6966 gen6_check_mch_setup(dev); 6967 } 6968 6969 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) 6970 { 6971 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE); 6972 6973 /* 6974 * WaVSThreadDispatchOverride:ivb,vlv 6975 * 6976 * This actually overrides the dispatch 6977 * mode for all thread types. 6978 */ 6979 reg &= ~GEN7_FF_SCHED_MASK; 6980 reg |= GEN7_FF_TS_SCHED_HW; 6981 reg |= GEN7_FF_VS_SCHED_HW; 6982 reg |= GEN7_FF_DS_SCHED_HW; 6983 6984 I915_WRITE(GEN7_FF_THREAD_MODE, reg); 6985 } 6986 6987 static void lpt_init_clock_gating(struct drm_device *dev) 6988 { 6989 struct drm_i915_private *dev_priv = to_i915(dev); 6990 6991 /* 6992 * TODO: this bit should only be enabled when really needed, then 6993 * disabled when not needed anymore in order to save power. 6994 */ 6995 if (HAS_PCH_LPT_LP(dev)) 6996 I915_WRITE(SOUTH_DSPCLK_GATE_D, 6997 I915_READ(SOUTH_DSPCLK_GATE_D) | 6998 PCH_LP_PARTITION_LEVEL_DISABLE); 6999 7000 /* WADPOClockGatingDisable:hsw */ 7001 I915_WRITE(TRANS_CHICKEN1(PIPE_A), 7002 I915_READ(TRANS_CHICKEN1(PIPE_A)) | 7003 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 7004 } 7005 7006 static void lpt_suspend_hw(struct drm_device *dev) 7007 { 7008 struct drm_i915_private *dev_priv = to_i915(dev); 7009 7010 if (HAS_PCH_LPT_LP(dev)) { 7011 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D); 7012 7013 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 7014 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 7015 } 7016 } 7017 7018 static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, 7019 int general_prio_credits, 7020 int high_prio_credits) 7021 { 7022 u32 misccpctl; 7023 7024 /* WaTempDisableDOPClkGating:bdw */ 7025 misccpctl = I915_READ(GEN7_MISCCPCTL); 7026 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 7027 7028 I915_WRITE(GEN8_L3SQCREG1, 7029 L3_GENERAL_PRIO_CREDITS(general_prio_credits) | 7030 L3_HIGH_PRIO_CREDITS(high_prio_credits)); 7031 7032 /* 7033 * Wait at least 100 clocks before re-enabling clock gating. 7034 * See the definition of L3SQCREG1 in BSpec. 7035 */ 7036 POSTING_READ(GEN8_L3SQCREG1); 7037 udelay(1); 7038 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 7039 } 7040 7041 static void kabylake_init_clock_gating(struct drm_device *dev) 7042 { 7043 struct drm_i915_private *dev_priv = to_i915(dev); 7044 7045 gen9_init_clock_gating(dev); 7046 7047 /* WaDisableSDEUnitClockGating:kbl */ 7048 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) 7049 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 7050 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 7051 7052 /* WaDisableGamClockGating:kbl */ 7053 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) 7054 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | 7055 GEN6_GAMUNIT_CLOCK_GATE_DISABLE); 7056 7057 /* WaFbcNukeOnHostModify:kbl */ 7058 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | 7059 ILK_DPFC_NUKE_ON_ANY_MODIFICATION); 7060 } 7061 7062 static void skylake_init_clock_gating(struct drm_device *dev) 7063 { 7064 struct drm_i915_private *dev_priv = to_i915(dev); 7065 7066 gen9_init_clock_gating(dev); 7067 7068 /* WAC6entrylatency:skl */ 7069 I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) | 7070 FBC_LLC_FULLY_OPEN); 7071 7072 /* WaFbcNukeOnHostModify:skl */ 7073 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | 7074 ILK_DPFC_NUKE_ON_ANY_MODIFICATION); 7075 } 7076 7077 static void broadwell_init_clock_gating(struct drm_device *dev) 7078 { 7079 struct drm_i915_private *dev_priv = to_i915(dev); 7080 enum i915_pipe pipe; 7081 7082 ilk_init_lp_watermarks(dev); 7083 7084 /* WaSwitchSolVfFArbitrationPriority:bdw */ 7085 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 7086 7087 /* WaPsrDPAMaskVBlankInSRD:bdw */ 7088 I915_WRITE(CHICKEN_PAR1_1, 7089 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); 7090 7091 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ 7092 for_each_pipe(dev_priv, pipe) { 7093 I915_WRITE(CHICKEN_PIPESL_1(pipe), 7094 I915_READ(CHICKEN_PIPESL_1(pipe)) | 7095 BDW_DPRS_MASK_VBLANK_SRD); 7096 } 7097 7098 /* WaVSRefCountFullforceMissDisable:bdw */ 7099 /* WaDSRefCountFullforceMissDisable:bdw */ 7100 I915_WRITE(GEN7_FF_THREAD_MODE, 7101 I915_READ(GEN7_FF_THREAD_MODE) & 7102 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); 7103 7104 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, 7105 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); 7106 7107 /* WaDisableSDEUnitClockGating:bdw */ 7108 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 7109 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 7110 7111 /* WaProgramL3SqcReg1Default:bdw */ 7112 gen8_set_l3sqc_credits(dev_priv, 30, 2); 7113 7114 /* 7115 * WaGttCachingOffByDefault:bdw 7116 * GTT cache may not work with big pages, so if those 7117 * are ever enabled GTT cache may need to be disabled. 7118 */ 7119 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL); 7120 7121 /* WaKVMNotificationOnConfigChange:bdw */ 7122 I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1) 7123 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT); 7124 7125 lpt_init_clock_gating(dev); 7126 } 7127 7128 static void haswell_init_clock_gating(struct drm_device *dev) 7129 { 7130 struct drm_i915_private *dev_priv = to_i915(dev); 7131 7132 ilk_init_lp_watermarks(dev); 7133 7134 /* L3 caching of data atomics doesn't work -- disable it. */ 7135 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); 7136 I915_WRITE(HSW_ROW_CHICKEN3, 7137 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE)); 7138 7139 /* This is required by WaCatErrorRejectionIssue:hsw */ 7140 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 7141 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 7142 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 7143 7144 /* WaVSRefCountFullforceMissDisable:hsw */ 7145 I915_WRITE(GEN7_FF_THREAD_MODE, 7146 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME); 7147 7148 /* WaDisable_RenderCache_OperationalFlush:hsw */ 7149 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 7150 7151 /* enable HiZ Raw Stall Optimization */ 7152 I915_WRITE(CACHE_MODE_0_GEN7, 7153 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); 7154 7155 /* WaDisable4x2SubspanOptimization:hsw */ 7156 I915_WRITE(CACHE_MODE_1, 7157 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 7158 7159 /* 7160 * BSpec recommends 8x4 when MSAA is used, 7161 * however in practice 16x4 seems fastest. 7162 * 7163 * Note that PS/WM thread counts depend on the WIZ hashing 7164 * disable bit, which we don't touch here, but it's good 7165 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 7166 */ 7167 I915_WRITE(GEN7_GT_MODE, 7168 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); 7169 7170 /* WaSampleCChickenBitEnable:hsw */ 7171 I915_WRITE(HALF_SLICE_CHICKEN3, 7172 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE)); 7173 7174 /* WaSwitchSolVfFArbitrationPriority:hsw */ 7175 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 7176 7177 /* WaRsPkgCStateDisplayPMReq:hsw */ 7178 I915_WRITE(CHICKEN_PAR1_1, 7179 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); 7180 7181 lpt_init_clock_gating(dev); 7182 } 7183 7184 static void ivybridge_init_clock_gating(struct drm_device *dev) 7185 { 7186 struct drm_i915_private *dev_priv = to_i915(dev); 7187 uint32_t snpcr; 7188 7189 ilk_init_lp_watermarks(dev); 7190 7191 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); 7192 7193 /* WaDisableEarlyCull:ivb */ 7194 I915_WRITE(_3D_CHICKEN3, 7195 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); 7196 7197 /* WaDisableBackToBackFlipFix:ivb */ 7198 I915_WRITE(IVB_CHICKEN3, 7199 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 7200 CHICKEN3_DGMG_DONE_FIX_DISABLE); 7201 7202 /* WaDisablePSDDualDispatchEnable:ivb */ 7203 if (IS_IVB_GT1(dev)) 7204 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 7205 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 7206 7207 /* WaDisable_RenderCache_OperationalFlush:ivb */ 7208 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 7209 7210 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ 7211 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 7212 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 7213 7214 /* WaApplyL3ControlAndL3ChickenMode:ivb */ 7215 I915_WRITE(GEN7_L3CNTLREG1, 7216 GEN7_WA_FOR_GEN7_L3_CONTROL); 7217 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, 7218 GEN7_WA_L3_CHICKEN_MODE); 7219 if (IS_IVB_GT1(dev)) 7220 I915_WRITE(GEN7_ROW_CHICKEN2, 7221 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 7222 else { 7223 /* must write both registers */ 7224 I915_WRITE(GEN7_ROW_CHICKEN2, 7225 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 7226 I915_WRITE(GEN7_ROW_CHICKEN2_GT2, 7227 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 7228 } 7229 7230 /* WaForceL3Serialization:ivb */ 7231 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 7232 ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 7233 7234 /* 7235 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 7236 * This implements the WaDisableRCZUnitClockGating:ivb workaround. 7237 */ 7238 I915_WRITE(GEN6_UCGCTL2, 7239 GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 7240 7241 /* This is required by WaCatErrorRejectionIssue:ivb */ 7242 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 7243 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 7244 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 7245 7246 g4x_disable_trickle_feed(dev); 7247 7248 gen7_setup_fixed_func_scheduler(dev_priv); 7249 7250 if (0) { /* causes HiZ corruption on ivb:gt1 */ 7251 /* enable HiZ Raw Stall Optimization */ 7252 I915_WRITE(CACHE_MODE_0_GEN7, 7253 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); 7254 } 7255 7256 /* WaDisable4x2SubspanOptimization:ivb */ 7257 I915_WRITE(CACHE_MODE_1, 7258 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 7259 7260 /* 7261 * BSpec recommends 8x4 when MSAA is used, 7262 * however in practice 16x4 seems fastest. 7263 * 7264 * Note that PS/WM thread counts depend on the WIZ hashing 7265 * disable bit, which we don't touch here, but it's good 7266 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 7267 */ 7268 I915_WRITE(GEN7_GT_MODE, 7269 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); 7270 7271 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 7272 snpcr &= ~GEN6_MBC_SNPCR_MASK; 7273 snpcr |= GEN6_MBC_SNPCR_MED; 7274 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 7275 7276 if (!HAS_PCH_NOP(dev)) 7277 cpt_init_clock_gating(dev); 7278 7279 gen6_check_mch_setup(dev); 7280 } 7281 7282 static void valleyview_init_clock_gating(struct drm_device *dev) 7283 { 7284 struct drm_i915_private *dev_priv = to_i915(dev); 7285 7286 /* WaDisableEarlyCull:vlv */ 7287 I915_WRITE(_3D_CHICKEN3, 7288 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); 7289 7290 /* WaDisableBackToBackFlipFix:vlv */ 7291 I915_WRITE(IVB_CHICKEN3, 7292 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 7293 CHICKEN3_DGMG_DONE_FIX_DISABLE); 7294 7295 /* WaPsdDispatchEnable:vlv */ 7296 /* WaDisablePSDDualDispatchEnable:vlv */ 7297 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 7298 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP | 7299 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 7300 7301 /* WaDisable_RenderCache_OperationalFlush:vlv */ 7302 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 7303 7304 /* WaForceL3Serialization:vlv */ 7305 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 7306 ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 7307 7308 /* WaDisableDopClockGating:vlv */ 7309 I915_WRITE(GEN7_ROW_CHICKEN2, 7310 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 7311 7312 /* This is required by WaCatErrorRejectionIssue:vlv */ 7313 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 7314 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 7315 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 7316 7317 gen7_setup_fixed_func_scheduler(dev_priv); 7318 7319 /* 7320 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 7321 * This implements the WaDisableRCZUnitClockGating:vlv workaround. 7322 */ 7323 I915_WRITE(GEN6_UCGCTL2, 7324 GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 7325 7326 /* WaDisableL3Bank2xClockGate:vlv 7327 * Disabling L3 clock gating- MMIO 940c[25] = 1 7328 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */ 7329 I915_WRITE(GEN7_UCGCTL4, 7330 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE); 7331 7332 /* 7333 * BSpec says this must be set, even though 7334 * WaDisable4x2SubspanOptimization isn't listed for VLV. 7335 */ 7336 I915_WRITE(CACHE_MODE_1, 7337 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 7338 7339 /* 7340 * BSpec recommends 8x4 when MSAA is used, 7341 * however in practice 16x4 seems fastest. 7342 * 7343 * Note that PS/WM thread counts depend on the WIZ hashing 7344 * disable bit, which we don't touch here, but it's good 7345 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 7346 */ 7347 I915_WRITE(GEN7_GT_MODE, 7348 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); 7349 7350 /* 7351 * WaIncreaseL3CreditsForVLVB0:vlv 7352 * This is the hardware default actually. 7353 */ 7354 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE); 7355 7356 /* 7357 * WaDisableVLVClockGating_VBIIssue:vlv 7358 * Disable clock gating on th GCFG unit to prevent a delay 7359 * in the reporting of vblank events. 7360 */ 7361 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); 7362 } 7363 7364 static void cherryview_init_clock_gating(struct drm_device *dev) 7365 { 7366 struct drm_i915_private *dev_priv = to_i915(dev); 7367 7368 /* WaVSRefCountFullforceMissDisable:chv */ 7369 /* WaDSRefCountFullforceMissDisable:chv */ 7370 I915_WRITE(GEN7_FF_THREAD_MODE, 7371 I915_READ(GEN7_FF_THREAD_MODE) & 7372 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); 7373 7374 /* WaDisableSemaphoreAndSyncFlipWait:chv */ 7375 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, 7376 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); 7377 7378 /* WaDisableCSUnitClockGating:chv */ 7379 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | 7380 GEN6_CSUNIT_CLOCK_GATE_DISABLE); 7381 7382 /* WaDisableSDEUnitClockGating:chv */ 7383 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 7384 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 7385 7386 /* 7387 * WaProgramL3SqcReg1Default:chv 7388 * See gfxspecs/Related Documents/Performance Guide/ 7389 * LSQC Setting Recommendations. 7390 */ 7391 gen8_set_l3sqc_credits(dev_priv, 38, 2); 7392 7393 /* 7394 * GTT cache may not work with big pages, so if those 7395 * are ever enabled GTT cache may need to be disabled. 7396 */ 7397 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL); 7398 } 7399 7400 static void g4x_init_clock_gating(struct drm_device *dev) 7401 { 7402 struct drm_i915_private *dev_priv = to_i915(dev); 7403 uint32_t dspclk_gate; 7404 7405 I915_WRITE(RENCLK_GATE_D1, 0); 7406 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | 7407 GS_UNIT_CLOCK_GATE_DISABLE | 7408 CL_UNIT_CLOCK_GATE_DISABLE); 7409 I915_WRITE(RAMCLK_GATE_D, 0); 7410 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | 7411 OVRUNIT_CLOCK_GATE_DISABLE | 7412 OVCUNIT_CLOCK_GATE_DISABLE; 7413 if (IS_GM45(dev)) 7414 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; 7415 I915_WRITE(DSPCLK_GATE_D, dspclk_gate); 7416 7417 /* WaDisableRenderCachePipelinedFlush */ 7418 I915_WRITE(CACHE_MODE_0, 7419 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); 7420 7421 /* WaDisable_RenderCache_OperationalFlush:g4x */ 7422 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 7423 7424 g4x_disable_trickle_feed(dev); 7425 } 7426 7427 static void crestline_init_clock_gating(struct drm_device *dev) 7428 { 7429 struct drm_i915_private *dev_priv = to_i915(dev); 7430 7431 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); 7432 I915_WRITE(RENCLK_GATE_D2, 0); 7433 I915_WRITE(DSPCLK_GATE_D, 0); 7434 I915_WRITE(RAMCLK_GATE_D, 0); 7435 I915_WRITE16(DEUC, 0); 7436 I915_WRITE(MI_ARB_STATE, 7437 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 7438 7439 /* WaDisable_RenderCache_OperationalFlush:gen4 */ 7440 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 7441 } 7442 7443 static void broadwater_init_clock_gating(struct drm_device *dev) 7444 { 7445 struct drm_i915_private *dev_priv = to_i915(dev); 7446 7447 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | 7448 I965_RCC_CLOCK_GATE_DISABLE | 7449 I965_RCPB_CLOCK_GATE_DISABLE | 7450 I965_ISC_CLOCK_GATE_DISABLE | 7451 I965_FBC_CLOCK_GATE_DISABLE); 7452 I915_WRITE(RENCLK_GATE_D2, 0); 7453 I915_WRITE(MI_ARB_STATE, 7454 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 7455 7456 /* WaDisable_RenderCache_OperationalFlush:gen4 */ 7457 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 7458 } 7459 7460 static void gen3_init_clock_gating(struct drm_device *dev) 7461 { 7462 struct drm_i915_private *dev_priv = to_i915(dev); 7463 u32 dstate = I915_READ(D_STATE); 7464 7465 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 7466 DSTATE_DOT_CLOCK_GATING; 7467 I915_WRITE(D_STATE, dstate); 7468 7469 if (IS_PINEVIEW(dev)) 7470 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); 7471 7472 /* IIR "flip pending" means done if this bit is set */ 7473 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); 7474 7475 /* interrupts should cause a wake up from C3 */ 7476 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN)); 7477 7478 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 7479 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); 7480 7481 I915_WRITE(MI_ARB_STATE, 7482 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 7483 } 7484 7485 static void i85x_init_clock_gating(struct drm_device *dev) 7486 { 7487 struct drm_i915_private *dev_priv = to_i915(dev); 7488 7489 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 7490 7491 /* interrupts should cause a wake up from C3 */ 7492 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) | 7493 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE)); 7494 7495 I915_WRITE(MEM_MODE, 7496 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE)); 7497 } 7498 7499 static void i830_init_clock_gating(struct drm_device *dev) 7500 { 7501 struct drm_i915_private *dev_priv = to_i915(dev); 7502 7503 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 7504 7505 I915_WRITE(MEM_MODE, 7506 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) | 7507 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE)); 7508 } 7509 7510 void intel_init_clock_gating(struct drm_device *dev) 7511 { 7512 struct drm_i915_private *dev_priv = to_i915(dev); 7513 7514 dev_priv->display.init_clock_gating(dev); 7515 } 7516 7517 void intel_suspend_hw(struct drm_device *dev) 7518 { 7519 if (HAS_PCH_LPT(dev)) 7520 lpt_suspend_hw(dev); 7521 } 7522 7523 static void nop_init_clock_gating(struct drm_device *dev) 7524 { 7525 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n"); 7526 } 7527 7528 /** 7529 * intel_init_clock_gating_hooks - setup the clock gating hooks 7530 * @dev_priv: device private 7531 * 7532 * Setup the hooks that configure which clocks of a given platform can be 7533 * gated and also apply various GT and display specific workarounds for these 7534 * platforms. Note that some GT specific workarounds are applied separately 7535 * when GPU contexts or batchbuffers start their execution. 7536 */ 7537 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) 7538 { 7539 if (IS_SKYLAKE(dev_priv)) 7540 dev_priv->display.init_clock_gating = skylake_init_clock_gating; 7541 else if (IS_KABYLAKE(dev_priv)) 7542 dev_priv->display.init_clock_gating = kabylake_init_clock_gating; 7543 else if (IS_BROXTON(dev_priv)) 7544 dev_priv->display.init_clock_gating = bxt_init_clock_gating; 7545 else if (IS_BROADWELL(dev_priv)) 7546 dev_priv->display.init_clock_gating = broadwell_init_clock_gating; 7547 else if (IS_CHERRYVIEW(dev_priv)) 7548 dev_priv->display.init_clock_gating = cherryview_init_clock_gating; 7549 else if (IS_HASWELL(dev_priv)) 7550 dev_priv->display.init_clock_gating = haswell_init_clock_gating; 7551 else if (IS_IVYBRIDGE(dev_priv)) 7552 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; 7553 else if (IS_VALLEYVIEW(dev_priv)) 7554 dev_priv->display.init_clock_gating = valleyview_init_clock_gating; 7555 else if (IS_GEN6(dev_priv)) 7556 dev_priv->display.init_clock_gating = gen6_init_clock_gating; 7557 else if (IS_GEN5(dev_priv)) 7558 dev_priv->display.init_clock_gating = ironlake_init_clock_gating; 7559 else if (IS_G4X(dev_priv)) 7560 dev_priv->display.init_clock_gating = g4x_init_clock_gating; 7561 else if (IS_CRESTLINE(dev_priv)) 7562 dev_priv->display.init_clock_gating = crestline_init_clock_gating; 7563 else if (IS_BROADWATER(dev_priv)) 7564 dev_priv->display.init_clock_gating = broadwater_init_clock_gating; 7565 else if (IS_GEN3(dev_priv)) 7566 dev_priv->display.init_clock_gating = gen3_init_clock_gating; 7567 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv)) 7568 dev_priv->display.init_clock_gating = i85x_init_clock_gating; 7569 else if (IS_GEN2(dev_priv)) 7570 dev_priv->display.init_clock_gating = i830_init_clock_gating; 7571 else { 7572 MISSING_CASE(INTEL_DEVID(dev_priv)); 7573 dev_priv->display.init_clock_gating = nop_init_clock_gating; 7574 } 7575 } 7576 7577 /* Set up chip specific power management-related functions */ 7578 void intel_init_pm(struct drm_device *dev) 7579 { 7580 struct drm_i915_private *dev_priv = to_i915(dev); 7581 7582 intel_fbc_init(dev_priv); 7583 7584 /* For cxsr */ 7585 if (IS_PINEVIEW(dev)) 7586 i915_pineview_get_mem_freq(dev); 7587 else if (IS_GEN5(dev)) 7588 i915_ironlake_get_mem_freq(dev); 7589 7590 /* For FIFO watermark updates */ 7591 if (INTEL_INFO(dev)->gen >= 9) { 7592 skl_setup_wm_latency(dev); 7593 dev_priv->display.update_wm = skl_update_wm; 7594 dev_priv->display.compute_global_watermarks = skl_compute_wm; 7595 } else if (HAS_PCH_SPLIT(dev)) { 7596 ilk_setup_wm_latency(dev); 7597 7598 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] && 7599 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || 7600 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] && 7601 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { 7602 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm; 7603 dev_priv->display.compute_intermediate_wm = 7604 ilk_compute_intermediate_wm; 7605 dev_priv->display.initial_watermarks = 7606 ilk_initial_watermarks; 7607 dev_priv->display.optimize_watermarks = 7608 ilk_optimize_watermarks; 7609 } else { 7610 DRM_DEBUG_KMS("Failed to read display plane latency. " 7611 "Disable CxSR\n"); 7612 } 7613 } else if (IS_CHERRYVIEW(dev)) { 7614 vlv_setup_wm_latency(dev); 7615 dev_priv->display.update_wm = vlv_update_wm; 7616 } else if (IS_VALLEYVIEW(dev)) { 7617 vlv_setup_wm_latency(dev); 7618 dev_priv->display.update_wm = vlv_update_wm; 7619 } else if (IS_PINEVIEW(dev)) { 7620 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), 7621 dev_priv->is_ddr3, 7622 dev_priv->fsb_freq, 7623 dev_priv->mem_freq)) { 7624 DRM_INFO("failed to find known CxSR latency " 7625 "(found ddr%s fsb freq %d, mem freq %d), " 7626 "disabling CxSR\n", 7627 (dev_priv->is_ddr3 == 1) ? "3" : "2", 7628 dev_priv->fsb_freq, dev_priv->mem_freq); 7629 /* Disable CxSR and never update its watermark again */ 7630 intel_set_memory_cxsr(dev_priv, false); 7631 dev_priv->display.update_wm = NULL; 7632 } else 7633 dev_priv->display.update_wm = pineview_update_wm; 7634 } else if (IS_G4X(dev)) { 7635 dev_priv->display.update_wm = g4x_update_wm; 7636 } else if (IS_GEN4(dev)) { 7637 dev_priv->display.update_wm = i965_update_wm; 7638 } else if (IS_GEN3(dev)) { 7639 dev_priv->display.update_wm = i9xx_update_wm; 7640 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 7641 } else if (IS_GEN2(dev)) { 7642 if (INTEL_INFO(dev)->num_pipes == 1) { 7643 dev_priv->display.update_wm = i845_update_wm; 7644 dev_priv->display.get_fifo_size = i845_get_fifo_size; 7645 } else { 7646 dev_priv->display.update_wm = i9xx_update_wm; 7647 dev_priv->display.get_fifo_size = i830_get_fifo_size; 7648 } 7649 } else { 7650 DRM_ERROR("unexpected fall-through in intel_init_pm\n"); 7651 } 7652 } 7653 7654 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val) 7655 { 7656 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7657 7658 /* GEN6_PCODE_* are outside of the forcewake domain, we can 7659 * use te fw I915_READ variants to reduce the amount of work 7660 * required when reading/writing. 7661 */ 7662 7663 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { 7664 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n"); 7665 return -EAGAIN; 7666 } 7667 7668 I915_WRITE_FW(GEN6_PCODE_DATA, *val); 7669 I915_WRITE_FW(GEN6_PCODE_DATA1, 0); 7670 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 7671 7672 if (intel_wait_for_register_fw(dev_priv, 7673 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0, 7674 500)) { 7675 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox); 7676 return -ETIMEDOUT; 7677 } 7678 7679 *val = I915_READ_FW(GEN6_PCODE_DATA); 7680 I915_WRITE_FW(GEN6_PCODE_DATA, 0); 7681 7682 return 0; 7683 } 7684 7685 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, 7686 u32 mbox, u32 val) 7687 { 7688 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7689 7690 /* GEN6_PCODE_* are outside of the forcewake domain, we can 7691 * use te fw I915_READ variants to reduce the amount of work 7692 * required when reading/writing. 7693 */ 7694 7695 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { 7696 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n"); 7697 return -EAGAIN; 7698 } 7699 7700 I915_WRITE_FW(GEN6_PCODE_DATA, val); 7701 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 7702 7703 if (intel_wait_for_register_fw(dev_priv, 7704 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0, 7705 500)) { 7706 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox); 7707 return -ETIMEDOUT; 7708 } 7709 7710 I915_WRITE_FW(GEN6_PCODE_DATA, 0); 7711 7712 return 0; 7713 } 7714 7715 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) 7716 { 7717 /* 7718 * N = val - 0xb7 7719 * Slow = Fast = GPLL ref * N 7720 */ 7721 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000); 7722 } 7723 7724 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val) 7725 { 7726 return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7; 7727 } 7728 7729 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) 7730 { 7731 /* 7732 * N = val / 2 7733 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2 7734 */ 7735 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000); 7736 } 7737 7738 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) 7739 { 7740 /* CHV needs even values */ 7741 return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2; 7742 } 7743 7744 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) 7745 { 7746 if (IS_GEN9(dev_priv)) 7747 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, 7748 GEN9_FREQ_SCALER); 7749 else if (IS_CHERRYVIEW(dev_priv)) 7750 return chv_gpu_freq(dev_priv, val); 7751 else if (IS_VALLEYVIEW(dev_priv)) 7752 return byt_gpu_freq(dev_priv, val); 7753 else 7754 return val * GT_FREQUENCY_MULTIPLIER; 7755 } 7756 7757 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) 7758 { 7759 if (IS_GEN9(dev_priv)) 7760 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, 7761 GT_FREQUENCY_MULTIPLIER); 7762 else if (IS_CHERRYVIEW(dev_priv)) 7763 return chv_freq_opcode(dev_priv, val); 7764 else if (IS_VALLEYVIEW(dev_priv)) 7765 return byt_freq_opcode(dev_priv, val); 7766 else 7767 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); 7768 } 7769 7770 struct request_boost { 7771 struct work_struct work; 7772 struct drm_i915_gem_request *req; 7773 }; 7774 7775 static void __intel_rps_boost_work(struct work_struct *work) 7776 { 7777 struct request_boost *boost = container_of(work, struct request_boost, work); 7778 struct drm_i915_gem_request *req = boost->req; 7779 7780 if (!i915_gem_request_completed(req)) 7781 gen6_rps_boost(req->i915, NULL, req->emitted_jiffies); 7782 7783 i915_gem_request_put(req); 7784 kfree(boost); 7785 } 7786 7787 void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req) 7788 { 7789 struct request_boost *boost; 7790 7791 if (req == NULL || INTEL_GEN(req->i915) < 6) 7792 return; 7793 7794 if (i915_gem_request_completed(req)) 7795 return; 7796 7797 boost = kmalloc(sizeof(*boost), M_DRM, GFP_ATOMIC); 7798 if (boost == NULL) 7799 return; 7800 7801 boost->req = i915_gem_request_get(req); 7802 7803 INIT_WORK(&boost->work, __intel_rps_boost_work); 7804 queue_work(req->i915->wq, &boost->work); 7805 } 7806 7807 void intel_pm_setup(struct drm_device *dev) 7808 { 7809 struct drm_i915_private *dev_priv = to_i915(dev); 7810 7811 lockinit(&dev_priv->rps.hw_lock, "i915 rps.hw_lock", 0, LK_CANRECURSE); 7812 lockinit(&dev_priv->rps.client_lock, "i915rcl", 0, 0); 7813 7814 INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work, 7815 __intel_autoenable_gt_powersave); 7816 INIT_LIST_HEAD(&dev_priv->rps.clients); 7817 7818 dev_priv->pm.suspended = false; 7819 atomic_set(&dev_priv->pm.wakeref_count, 0); 7820 atomic_set(&dev_priv->pm.atomic_seq, 0); 7821 } 7822