1 /* 2 * Copyright © 2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eugeni Dodonov <eugeni.dodonov@intel.com> 25 * 26 */ 27 28 #include "i915_drv.h" 29 #include "intel_drv.h" 30 #include <linux/module.h> 31 #include <machine/clock.h> 32 33 /** 34 * RC6 is a special power stage which allows the GPU to enter an very 35 * low-voltage mode when idle, using down to 0V while at this stage. This 36 * stage is entered automatically when the GPU is idle when RC6 support is 37 * enabled, and as soon as new workload arises GPU wakes up automatically as well. 38 * 39 * There are different RC6 modes available in Intel GPU, which differentiate 40 * among each other with the latency required to enter and leave RC6 and 41 * voltage consumed by the GPU in different states. 42 * 43 * The combination of the following flags define which states GPU is allowed 44 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and 45 * RC6pp is deepest RC6. Their support by hardware varies according to the 46 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one 47 * which brings the most power savings; deeper states save more power, but 48 * require higher latency to switch to and wake up. 49 */ 50 #define INTEL_RC6_ENABLE (1<<0) 51 #define INTEL_RC6p_ENABLE (1<<1) 52 #define INTEL_RC6pp_ENABLE (1<<2) 53 54 static void bxt_init_clock_gating(struct drm_device *dev) 55 { 56 struct drm_i915_private *dev_priv = dev->dev_private; 57 58 /* WaDisableSDEUnitClockGating:bxt */ 59 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 60 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 61 62 /* 63 * FIXME: 64 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only. 65 */ 66 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 67 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); 68 69 /* 70 * Wa: Backlight PWM may stop in the asserted state, causing backlight 71 * to stay fully on. 72 */ 73 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) 74 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | 75 PWM1_GATING_DIS | PWM2_GATING_DIS); 76 } 77 78 static void i915_pineview_get_mem_freq(struct drm_device *dev) 79 { 80 struct drm_i915_private *dev_priv = dev->dev_private; 81 u32 tmp; 82 83 tmp = I915_READ(CLKCFG); 84 85 switch (tmp & CLKCFG_FSB_MASK) { 86 case CLKCFG_FSB_533: 87 dev_priv->fsb_freq = 533; /* 133*4 */ 88 break; 89 case CLKCFG_FSB_800: 90 dev_priv->fsb_freq = 800; /* 200*4 */ 91 break; 92 case CLKCFG_FSB_667: 93 dev_priv->fsb_freq = 667; /* 167*4 */ 94 break; 95 case CLKCFG_FSB_400: 96 dev_priv->fsb_freq = 400; /* 100*4 */ 97 break; 98 } 99 100 switch (tmp & CLKCFG_MEM_MASK) { 101 case CLKCFG_MEM_533: 102 dev_priv->mem_freq = 533; 103 break; 104 case CLKCFG_MEM_667: 105 dev_priv->mem_freq = 667; 106 break; 107 case CLKCFG_MEM_800: 108 dev_priv->mem_freq = 800; 109 break; 110 } 111 112 /* detect pineview DDR3 setting */ 113 tmp = I915_READ(CSHRDDR3CTL); 114 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; 115 } 116 117 static void i915_ironlake_get_mem_freq(struct drm_device *dev) 118 { 119 struct drm_i915_private *dev_priv = dev->dev_private; 120 u16 ddrpll, csipll; 121 122 ddrpll = I915_READ16(DDRMPLL1); 123 csipll = I915_READ16(CSIPLL0); 124 125 switch (ddrpll & 0xff) { 126 case 0xc: 127 dev_priv->mem_freq = 800; 128 break; 129 case 0x10: 130 dev_priv->mem_freq = 1066; 131 break; 132 case 0x14: 133 dev_priv->mem_freq = 1333; 134 break; 135 case 0x18: 136 dev_priv->mem_freq = 1600; 137 break; 138 default: 139 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", 140 ddrpll & 0xff); 141 dev_priv->mem_freq = 0; 142 break; 143 } 144 145 dev_priv->ips.r_t = dev_priv->mem_freq; 146 147 switch (csipll & 0x3ff) { 148 case 0x00c: 149 dev_priv->fsb_freq = 3200; 150 break; 151 case 0x00e: 152 dev_priv->fsb_freq = 3733; 153 break; 154 case 0x010: 155 dev_priv->fsb_freq = 4266; 156 break; 157 case 0x012: 158 dev_priv->fsb_freq = 4800; 159 break; 160 case 0x014: 161 dev_priv->fsb_freq = 5333; 162 break; 163 case 0x016: 164 dev_priv->fsb_freq = 5866; 165 break; 166 case 0x018: 167 dev_priv->fsb_freq = 6400; 168 break; 169 default: 170 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", 171 csipll & 0x3ff); 172 dev_priv->fsb_freq = 0; 173 break; 174 } 175 176 if (dev_priv->fsb_freq == 3200) { 177 dev_priv->ips.c_m = 0; 178 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { 179 dev_priv->ips.c_m = 1; 180 } else { 181 dev_priv->ips.c_m = 2; 182 } 183 } 184 185 static const struct cxsr_latency cxsr_latency_table[] = { 186 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ 187 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ 188 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ 189 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ 190 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ 191 192 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ 193 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ 194 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ 195 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ 196 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ 197 198 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ 199 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ 200 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ 201 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ 202 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ 203 204 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ 205 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ 206 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ 207 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ 208 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ 209 210 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ 211 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ 212 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ 213 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ 214 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ 215 216 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ 217 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ 218 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ 219 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ 220 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ 221 }; 222 223 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, 224 int is_ddr3, 225 int fsb, 226 int mem) 227 { 228 const struct cxsr_latency *latency; 229 int i; 230 231 if (fsb == 0 || mem == 0) 232 return NULL; 233 234 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { 235 latency = &cxsr_latency_table[i]; 236 if (is_desktop == latency->is_desktop && 237 is_ddr3 == latency->is_ddr3 && 238 fsb == latency->fsb_freq && mem == latency->mem_freq) 239 return latency; 240 } 241 242 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 243 244 return NULL; 245 } 246 247 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) 248 { 249 u32 val; 250 251 mutex_lock(&dev_priv->rps.hw_lock); 252 253 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 254 if (enable) 255 val &= ~FORCE_DDR_HIGH_FREQ; 256 else 257 val |= FORCE_DDR_HIGH_FREQ; 258 val &= ~FORCE_DDR_LOW_FREQ; 259 val |= FORCE_DDR_FREQ_REQ_ACK; 260 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); 261 262 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & 263 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) 264 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n"); 265 266 mutex_unlock(&dev_priv->rps.hw_lock); 267 } 268 269 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) 270 { 271 u32 val; 272 273 mutex_lock(&dev_priv->rps.hw_lock); 274 275 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 276 if (enable) 277 val |= DSP_MAXFIFO_PM5_ENABLE; 278 else 279 val &= ~DSP_MAXFIFO_PM5_ENABLE; 280 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 281 282 mutex_unlock(&dev_priv->rps.hw_lock); 283 } 284 285 #define FW_WM(value, plane) \ 286 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) 287 288 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) 289 { 290 struct drm_device *dev = dev_priv->dev; 291 u32 val; 292 293 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 294 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); 295 POSTING_READ(FW_BLC_SELF_VLV); 296 dev_priv->wm.vlv.cxsr = enable; 297 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) { 298 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); 299 POSTING_READ(FW_BLC_SELF); 300 } else if (IS_PINEVIEW(dev)) { 301 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN; 302 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0; 303 I915_WRITE(DSPFW3, val); 304 POSTING_READ(DSPFW3); 305 } else if (IS_I945G(dev) || IS_I945GM(dev)) { 306 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : 307 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); 308 I915_WRITE(FW_BLC_SELF, val); 309 POSTING_READ(FW_BLC_SELF); 310 } else if (IS_I915GM(dev)) { 311 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : 312 _MASKED_BIT_DISABLE(INSTPM_SELF_EN); 313 I915_WRITE(INSTPM, val); 314 POSTING_READ(INSTPM); 315 } else { 316 return; 317 } 318 319 DRM_DEBUG_KMS("memory self-refresh is %s\n", 320 enable ? "enabled" : "disabled"); 321 } 322 323 324 /* 325 * Latency for FIFO fetches is dependent on several factors: 326 * - memory configuration (speed, channels) 327 * - chipset 328 * - current MCH state 329 * It can be fairly high in some situations, so here we assume a fairly 330 * pessimal value. It's a tradeoff between extra memory fetches (if we 331 * set this value too high, the FIFO will fetch frequently to stay full) 332 * and power consumption (set it too low to save power and we might see 333 * FIFO underruns and display "flicker"). 334 * 335 * A value of 5us seems to be a good balance; safe for very low end 336 * platforms but not overly aggressive on lower latency configs. 337 */ 338 static const int pessimal_latency_ns = 5000; 339 340 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \ 341 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) 342 343 static int vlv_get_fifo_size(struct drm_device *dev, 344 enum i915_pipe pipe, int plane) 345 { 346 struct drm_i915_private *dev_priv = dev->dev_private; 347 int sprite0_start, sprite1_start, size; 348 349 switch (pipe) { 350 uint32_t dsparb, dsparb2, dsparb3; 351 case PIPE_A: 352 dsparb = I915_READ(DSPARB); 353 dsparb2 = I915_READ(DSPARB2); 354 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0); 355 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4); 356 break; 357 case PIPE_B: 358 dsparb = I915_READ(DSPARB); 359 dsparb2 = I915_READ(DSPARB2); 360 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8); 361 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12); 362 break; 363 case PIPE_C: 364 dsparb2 = I915_READ(DSPARB2); 365 dsparb3 = I915_READ(DSPARB3); 366 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16); 367 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20); 368 break; 369 default: 370 return 0; 371 } 372 373 switch (plane) { 374 case 0: 375 size = sprite0_start; 376 break; 377 case 1: 378 size = sprite1_start - sprite0_start; 379 break; 380 case 2: 381 size = 512 - 1 - sprite1_start; 382 break; 383 default: 384 return 0; 385 } 386 387 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n", 388 pipe_name(pipe), plane == 0 ? "primary" : "sprite", 389 plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1), 390 size); 391 392 return size; 393 } 394 395 static int i9xx_get_fifo_size(struct drm_device *dev, int plane) 396 { 397 struct drm_i915_private *dev_priv = dev->dev_private; 398 uint32_t dsparb = I915_READ(DSPARB); 399 int size; 400 401 size = dsparb & 0x7f; 402 if (plane) 403 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; 404 405 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 406 plane ? "B" : "A", size); 407 408 return size; 409 } 410 411 static int i830_get_fifo_size(struct drm_device *dev, int plane) 412 { 413 struct drm_i915_private *dev_priv = dev->dev_private; 414 uint32_t dsparb = I915_READ(DSPARB); 415 int size; 416 417 size = dsparb & 0x1ff; 418 if (plane) 419 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; 420 size >>= 1; /* Convert to cachelines */ 421 422 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 423 plane ? "B" : "A", size); 424 425 return size; 426 } 427 428 static int i845_get_fifo_size(struct drm_device *dev, int plane) 429 { 430 struct drm_i915_private *dev_priv = dev->dev_private; 431 uint32_t dsparb = I915_READ(DSPARB); 432 int size; 433 434 size = dsparb & 0x7f; 435 size >>= 2; /* Convert to cachelines */ 436 437 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 438 plane ? "B" : "A", 439 size); 440 441 return size; 442 } 443 444 /* Pineview has different values for various configs */ 445 static const struct intel_watermark_params pineview_display_wm = { 446 .fifo_size = PINEVIEW_DISPLAY_FIFO, 447 .max_wm = PINEVIEW_MAX_WM, 448 .default_wm = PINEVIEW_DFT_WM, 449 .guard_size = PINEVIEW_GUARD_WM, 450 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 451 }; 452 static const struct intel_watermark_params pineview_display_hplloff_wm = { 453 .fifo_size = PINEVIEW_DISPLAY_FIFO, 454 .max_wm = PINEVIEW_MAX_WM, 455 .default_wm = PINEVIEW_DFT_HPLLOFF_WM, 456 .guard_size = PINEVIEW_GUARD_WM, 457 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 458 }; 459 static const struct intel_watermark_params pineview_cursor_wm = { 460 .fifo_size = PINEVIEW_CURSOR_FIFO, 461 .max_wm = PINEVIEW_CURSOR_MAX_WM, 462 .default_wm = PINEVIEW_CURSOR_DFT_WM, 463 .guard_size = PINEVIEW_CURSOR_GUARD_WM, 464 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 465 }; 466 static const struct intel_watermark_params pineview_cursor_hplloff_wm = { 467 .fifo_size = PINEVIEW_CURSOR_FIFO, 468 .max_wm = PINEVIEW_CURSOR_MAX_WM, 469 .default_wm = PINEVIEW_CURSOR_DFT_WM, 470 .guard_size = PINEVIEW_CURSOR_GUARD_WM, 471 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 472 }; 473 static const struct intel_watermark_params g4x_wm_info = { 474 .fifo_size = G4X_FIFO_SIZE, 475 .max_wm = G4X_MAX_WM, 476 .default_wm = G4X_MAX_WM, 477 .guard_size = 2, 478 .cacheline_size = G4X_FIFO_LINE_SIZE, 479 }; 480 static const struct intel_watermark_params g4x_cursor_wm_info = { 481 .fifo_size = I965_CURSOR_FIFO, 482 .max_wm = I965_CURSOR_MAX_WM, 483 .default_wm = I965_CURSOR_DFT_WM, 484 .guard_size = 2, 485 .cacheline_size = G4X_FIFO_LINE_SIZE, 486 }; 487 static const struct intel_watermark_params valleyview_wm_info = { 488 .fifo_size = VALLEYVIEW_FIFO_SIZE, 489 .max_wm = VALLEYVIEW_MAX_WM, 490 .default_wm = VALLEYVIEW_MAX_WM, 491 .guard_size = 2, 492 .cacheline_size = G4X_FIFO_LINE_SIZE, 493 }; 494 static const struct intel_watermark_params valleyview_cursor_wm_info = { 495 .fifo_size = I965_CURSOR_FIFO, 496 .max_wm = VALLEYVIEW_CURSOR_MAX_WM, 497 .default_wm = I965_CURSOR_DFT_WM, 498 .guard_size = 2, 499 .cacheline_size = G4X_FIFO_LINE_SIZE, 500 }; 501 static const struct intel_watermark_params i965_cursor_wm_info = { 502 .fifo_size = I965_CURSOR_FIFO, 503 .max_wm = I965_CURSOR_MAX_WM, 504 .default_wm = I965_CURSOR_DFT_WM, 505 .guard_size = 2, 506 .cacheline_size = I915_FIFO_LINE_SIZE, 507 }; 508 static const struct intel_watermark_params i945_wm_info = { 509 .fifo_size = I945_FIFO_SIZE, 510 .max_wm = I915_MAX_WM, 511 .default_wm = 1, 512 .guard_size = 2, 513 .cacheline_size = I915_FIFO_LINE_SIZE, 514 }; 515 static const struct intel_watermark_params i915_wm_info = { 516 .fifo_size = I915_FIFO_SIZE, 517 .max_wm = I915_MAX_WM, 518 .default_wm = 1, 519 .guard_size = 2, 520 .cacheline_size = I915_FIFO_LINE_SIZE, 521 }; 522 static const struct intel_watermark_params i830_a_wm_info = { 523 .fifo_size = I855GM_FIFO_SIZE, 524 .max_wm = I915_MAX_WM, 525 .default_wm = 1, 526 .guard_size = 2, 527 .cacheline_size = I830_FIFO_LINE_SIZE, 528 }; 529 static const struct intel_watermark_params i830_bc_wm_info = { 530 .fifo_size = I855GM_FIFO_SIZE, 531 .max_wm = I915_MAX_WM/2, 532 .default_wm = 1, 533 .guard_size = 2, 534 .cacheline_size = I830_FIFO_LINE_SIZE, 535 }; 536 static const struct intel_watermark_params i845_wm_info = { 537 .fifo_size = I830_FIFO_SIZE, 538 .max_wm = I915_MAX_WM, 539 .default_wm = 1, 540 .guard_size = 2, 541 .cacheline_size = I830_FIFO_LINE_SIZE, 542 }; 543 544 /** 545 * intel_calculate_wm - calculate watermark level 546 * @clock_in_khz: pixel clock 547 * @wm: chip FIFO params 548 * @pixel_size: display pixel size 549 * @latency_ns: memory latency for the platform 550 * 551 * Calculate the watermark level (the level at which the display plane will 552 * start fetching from memory again). Each chip has a different display 553 * FIFO size and allocation, so the caller needs to figure that out and pass 554 * in the correct intel_watermark_params structure. 555 * 556 * As the pixel clock runs, the FIFO will be drained at a rate that depends 557 * on the pixel size. When it reaches the watermark level, it'll start 558 * fetching FIFO line sized based chunks from memory until the FIFO fills 559 * past the watermark point. If the FIFO drains completely, a FIFO underrun 560 * will occur, and a display engine hang could result. 561 */ 562 static unsigned long intel_calculate_wm(unsigned long clock_in_khz, 563 const struct intel_watermark_params *wm, 564 int fifo_size, 565 int pixel_size, 566 unsigned long latency_ns) 567 { 568 long entries_required, wm_size; 569 570 /* 571 * Note: we need to make sure we don't overflow for various clock & 572 * latency values. 573 * clocks go from a few thousand to several hundred thousand. 574 * latency is usually a few thousand 575 */ 576 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / 577 1000; 578 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); 579 580 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required); 581 582 wm_size = fifo_size - (entries_required + wm->guard_size); 583 584 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size); 585 586 /* Don't promote wm_size to unsigned... */ 587 if (wm_size > (long)wm->max_wm) 588 wm_size = wm->max_wm; 589 if (wm_size <= 0) 590 wm_size = wm->default_wm; 591 592 /* 593 * Bspec seems to indicate that the value shouldn't be lower than 594 * 'burst size + 1'. Certainly 830 is quite unhappy with low values. 595 * Lets go for 8 which is the burst size since certain platforms 596 * already use a hardcoded 8 (which is what the spec says should be 597 * done). 598 */ 599 if (wm_size <= 8) 600 wm_size = 8; 601 602 return wm_size; 603 } 604 605 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) 606 { 607 struct drm_crtc *crtc, *enabled = NULL; 608 609 for_each_crtc(dev, crtc) { 610 if (intel_crtc_active(crtc)) { 611 if (enabled) 612 return NULL; 613 enabled = crtc; 614 } 615 } 616 617 return enabled; 618 } 619 620 static void pineview_update_wm(struct drm_crtc *unused_crtc) 621 { 622 struct drm_device *dev = unused_crtc->dev; 623 struct drm_i915_private *dev_priv = dev->dev_private; 624 struct drm_crtc *crtc; 625 const struct cxsr_latency *latency; 626 u32 reg; 627 unsigned long wm; 628 629 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, 630 dev_priv->fsb_freq, dev_priv->mem_freq); 631 if (!latency) { 632 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 633 intel_set_memory_cxsr(dev_priv, false); 634 return; 635 } 636 637 crtc = single_enabled_crtc(dev); 638 if (crtc) { 639 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 640 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; 641 int clock = adjusted_mode->crtc_clock; 642 643 /* Display SR */ 644 wm = intel_calculate_wm(clock, &pineview_display_wm, 645 pineview_display_wm.fifo_size, 646 pixel_size, latency->display_sr); 647 reg = I915_READ(DSPFW1); 648 reg &= ~DSPFW_SR_MASK; 649 reg |= FW_WM(wm, SR); 650 I915_WRITE(DSPFW1, reg); 651 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); 652 653 /* cursor SR */ 654 wm = intel_calculate_wm(clock, &pineview_cursor_wm, 655 pineview_display_wm.fifo_size, 656 pixel_size, latency->cursor_sr); 657 reg = I915_READ(DSPFW3); 658 reg &= ~DSPFW_CURSOR_SR_MASK; 659 reg |= FW_WM(wm, CURSOR_SR); 660 I915_WRITE(DSPFW3, reg); 661 662 /* Display HPLL off SR */ 663 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, 664 pineview_display_hplloff_wm.fifo_size, 665 pixel_size, latency->display_hpll_disable); 666 reg = I915_READ(DSPFW3); 667 reg &= ~DSPFW_HPLL_SR_MASK; 668 reg |= FW_WM(wm, HPLL_SR); 669 I915_WRITE(DSPFW3, reg); 670 671 /* cursor HPLL off SR */ 672 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, 673 pineview_display_hplloff_wm.fifo_size, 674 pixel_size, latency->cursor_hpll_disable); 675 reg = I915_READ(DSPFW3); 676 reg &= ~DSPFW_HPLL_CURSOR_MASK; 677 reg |= FW_WM(wm, HPLL_CURSOR); 678 I915_WRITE(DSPFW3, reg); 679 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); 680 681 intel_set_memory_cxsr(dev_priv, true); 682 } else { 683 intel_set_memory_cxsr(dev_priv, false); 684 } 685 } 686 687 static bool g4x_compute_wm0(struct drm_device *dev, 688 int plane, 689 const struct intel_watermark_params *display, 690 int display_latency_ns, 691 const struct intel_watermark_params *cursor, 692 int cursor_latency_ns, 693 int *plane_wm, 694 int *cursor_wm) 695 { 696 struct drm_crtc *crtc; 697 const struct drm_display_mode *adjusted_mode; 698 int htotal, hdisplay, clock, pixel_size; 699 int line_time_us, line_count; 700 int entries, tlb_miss; 701 702 crtc = intel_get_crtc_for_plane(dev, plane); 703 if (!intel_crtc_active(crtc)) { 704 *cursor_wm = cursor->guard_size; 705 *plane_wm = display->guard_size; 706 return false; 707 } 708 709 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 710 clock = adjusted_mode->crtc_clock; 711 htotal = adjusted_mode->crtc_htotal; 712 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; 713 pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; 714 715 /* Use the small buffer method to calculate plane watermark */ 716 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; 717 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; 718 if (tlb_miss > 0) 719 entries += tlb_miss; 720 entries = DIV_ROUND_UP(entries, display->cacheline_size); 721 *plane_wm = entries + display->guard_size; 722 if (*plane_wm > (int)display->max_wm) 723 *plane_wm = display->max_wm; 724 725 /* Use the large buffer method to calculate cursor watermark */ 726 line_time_us = max(htotal * 1000 / clock, 1); 727 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; 728 entries = line_count * crtc->cursor->state->crtc_w * pixel_size; 729 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; 730 if (tlb_miss > 0) 731 entries += tlb_miss; 732 entries = DIV_ROUND_UP(entries, cursor->cacheline_size); 733 *cursor_wm = entries + cursor->guard_size; 734 if (*cursor_wm > (int)cursor->max_wm) 735 *cursor_wm = (int)cursor->max_wm; 736 737 return true; 738 } 739 740 /* 741 * Check the wm result. 742 * 743 * If any calculated watermark values is larger than the maximum value that 744 * can be programmed into the associated watermark register, that watermark 745 * must be disabled. 746 */ 747 static bool g4x_check_srwm(struct drm_device *dev, 748 int display_wm, int cursor_wm, 749 const struct intel_watermark_params *display, 750 const struct intel_watermark_params *cursor) 751 { 752 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", 753 display_wm, cursor_wm); 754 755 if (display_wm > display->max_wm) { 756 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n", 757 display_wm, display->max_wm); 758 return false; 759 } 760 761 if (cursor_wm > cursor->max_wm) { 762 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n", 763 cursor_wm, cursor->max_wm); 764 return false; 765 } 766 767 if (!(display_wm || cursor_wm)) { 768 DRM_DEBUG_KMS("SR latency is 0, disabling\n"); 769 return false; 770 } 771 772 return true; 773 } 774 775 static bool g4x_compute_srwm(struct drm_device *dev, 776 int plane, 777 int latency_ns, 778 const struct intel_watermark_params *display, 779 const struct intel_watermark_params *cursor, 780 int *display_wm, int *cursor_wm) 781 { 782 struct drm_crtc *crtc; 783 const struct drm_display_mode *adjusted_mode; 784 int hdisplay, htotal, pixel_size, clock; 785 unsigned long line_time_us; 786 int line_count, line_size; 787 int small, large; 788 int entries; 789 790 if (!latency_ns) { 791 *display_wm = *cursor_wm = 0; 792 return false; 793 } 794 795 crtc = intel_get_crtc_for_plane(dev, plane); 796 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 797 clock = adjusted_mode->crtc_clock; 798 htotal = adjusted_mode->crtc_htotal; 799 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; 800 pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; 801 802 line_time_us = max(htotal * 1000 / clock, 1); 803 line_count = (latency_ns / line_time_us + 1000) / 1000; 804 line_size = hdisplay * pixel_size; 805 806 /* Use the minimum of the small and large buffer method for primary */ 807 small = ((clock * pixel_size / 1000) * latency_ns) / 1000; 808 large = line_count * line_size; 809 810 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); 811 *display_wm = entries + display->guard_size; 812 813 /* calculate the self-refresh watermark for display cursor */ 814 entries = line_count * pixel_size * crtc->cursor->state->crtc_w; 815 entries = DIV_ROUND_UP(entries, cursor->cacheline_size); 816 *cursor_wm = entries + cursor->guard_size; 817 818 return g4x_check_srwm(dev, 819 *display_wm, *cursor_wm, 820 display, cursor); 821 } 822 823 #define FW_WM_VLV(value, plane) \ 824 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) 825 826 static void vlv_write_wm_values(struct intel_crtc *crtc, 827 const struct vlv_wm_values *wm) 828 { 829 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 830 enum i915_pipe pipe = crtc->pipe; 831 832 I915_WRITE(VLV_DDL(pipe), 833 (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) | 834 (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) | 835 (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) | 836 (wm->ddl[pipe].primary << DDL_PLANE_SHIFT)); 837 838 I915_WRITE(DSPFW1, 839 FW_WM(wm->sr.plane, SR) | 840 FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) | 841 FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) | 842 FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA)); 843 I915_WRITE(DSPFW2, 844 FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) | 845 FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) | 846 FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA)); 847 I915_WRITE(DSPFW3, 848 FW_WM(wm->sr.cursor, CURSOR_SR)); 849 850 if (IS_CHERRYVIEW(dev_priv)) { 851 I915_WRITE(DSPFW7_CHV, 852 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) | 853 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC)); 854 I915_WRITE(DSPFW8_CHV, 855 FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) | 856 FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE)); 857 I915_WRITE(DSPFW9_CHV, 858 FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) | 859 FW_WM(wm->pipe[PIPE_C].cursor, CURSORC)); 860 I915_WRITE(DSPHOWM, 861 FW_WM(wm->sr.plane >> 9, SR_HI) | 862 FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) | 863 FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) | 864 FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) | 865 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) | 866 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) | 867 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) | 868 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) | 869 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) | 870 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI)); 871 } else { 872 I915_WRITE(DSPFW7, 873 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) | 874 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC)); 875 I915_WRITE(DSPHOWM, 876 FW_WM(wm->sr.plane >> 9, SR_HI) | 877 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) | 878 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) | 879 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) | 880 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) | 881 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) | 882 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI)); 883 } 884 885 /* zero (unused) WM1 watermarks */ 886 I915_WRITE(DSPFW4, 0); 887 I915_WRITE(DSPFW5, 0); 888 I915_WRITE(DSPFW6, 0); 889 I915_WRITE(DSPHOWM1, 0); 890 891 POSTING_READ(DSPFW1); 892 } 893 894 #undef FW_WM_VLV 895 896 enum vlv_wm_level { 897 VLV_WM_LEVEL_PM2, 898 VLV_WM_LEVEL_PM5, 899 VLV_WM_LEVEL_DDR_DVFS, 900 }; 901 902 /* latency must be in 0.1us units. */ 903 static unsigned int vlv_wm_method2(unsigned int pixel_rate, 904 unsigned int pipe_htotal, 905 unsigned int horiz_pixels, 906 unsigned int bytes_per_pixel, 907 unsigned int latency) 908 { 909 unsigned int ret; 910 911 ret = (latency * pixel_rate) / (pipe_htotal * 10000); 912 ret = (ret + 1) * horiz_pixels * bytes_per_pixel; 913 ret = DIV_ROUND_UP(ret, 64); 914 915 return ret; 916 } 917 918 static void vlv_setup_wm_latency(struct drm_device *dev) 919 { 920 struct drm_i915_private *dev_priv = dev->dev_private; 921 922 /* all latencies in usec */ 923 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; 924 925 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2; 926 927 if (IS_CHERRYVIEW(dev_priv)) { 928 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; 929 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; 930 931 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS; 932 } 933 } 934 935 static uint16_t vlv_compute_wm_level(struct intel_plane *plane, 936 struct intel_crtc *crtc, 937 const struct intel_plane_state *state, 938 int level) 939 { 940 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 941 int clock, htotal, pixel_size, width, wm; 942 943 if (dev_priv->wm.pri_latency[level] == 0) 944 return USHRT_MAX; 945 946 if (!state->visible) 947 return 0; 948 949 pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0); 950 clock = crtc->config->base.adjusted_mode.crtc_clock; 951 htotal = crtc->config->base.adjusted_mode.crtc_htotal; 952 width = crtc->config->pipe_src_w; 953 if (WARN_ON(htotal == 0)) 954 htotal = 1; 955 956 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) { 957 /* 958 * FIXME the formula gives values that are 959 * too big for the cursor FIFO, and hence we 960 * would never be able to use cursors. For 961 * now just hardcode the watermark. 962 */ 963 wm = 63; 964 } else { 965 wm = vlv_wm_method2(clock, htotal, width, pixel_size, 966 dev_priv->wm.pri_latency[level] * 10); 967 } 968 969 return min_t(int, wm, USHRT_MAX); 970 } 971 972 static void vlv_compute_fifo(struct intel_crtc *crtc) 973 { 974 struct drm_device *dev = crtc->base.dev; 975 struct vlv_wm_state *wm_state = &crtc->wm_state; 976 struct intel_plane *plane; 977 unsigned int total_rate = 0; 978 const int fifo_size = 512 - 1; 979 int fifo_extra, fifo_left = fifo_size; 980 981 for_each_intel_plane_on_crtc(dev, crtc, plane) { 982 struct intel_plane_state *state = 983 to_intel_plane_state(plane->base.state); 984 985 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) 986 continue; 987 988 if (state->visible) { 989 wm_state->num_active_planes++; 990 total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0); 991 } 992 } 993 994 for_each_intel_plane_on_crtc(dev, crtc, plane) { 995 struct intel_plane_state *state = 996 to_intel_plane_state(plane->base.state); 997 unsigned int rate; 998 999 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) { 1000 plane->wm.fifo_size = 63; 1001 continue; 1002 } 1003 1004 if (!state->visible) { 1005 plane->wm.fifo_size = 0; 1006 continue; 1007 } 1008 1009 rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0); 1010 plane->wm.fifo_size = fifo_size * rate / total_rate; 1011 fifo_left -= plane->wm.fifo_size; 1012 } 1013 1014 fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1); 1015 1016 /* spread the remainder evenly */ 1017 for_each_intel_plane_on_crtc(dev, crtc, plane) { 1018 int plane_extra; 1019 1020 if (fifo_left == 0) 1021 break; 1022 1023 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) 1024 continue; 1025 1026 /* give it all to the first plane if none are active */ 1027 if (plane->wm.fifo_size == 0 && 1028 wm_state->num_active_planes) 1029 continue; 1030 1031 plane_extra = min(fifo_extra, fifo_left); 1032 plane->wm.fifo_size += plane_extra; 1033 fifo_left -= plane_extra; 1034 } 1035 1036 WARN_ON(fifo_left != 0); 1037 } 1038 1039 static void vlv_invert_wms(struct intel_crtc *crtc) 1040 { 1041 struct vlv_wm_state *wm_state = &crtc->wm_state; 1042 int level; 1043 1044 for (level = 0; level < wm_state->num_levels; level++) { 1045 struct drm_device *dev = crtc->base.dev; 1046 const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1; 1047 struct intel_plane *plane; 1048 1049 wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane; 1050 wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor; 1051 1052 for_each_intel_plane_on_crtc(dev, crtc, plane) { 1053 switch (plane->base.type) { 1054 int sprite; 1055 case DRM_PLANE_TYPE_CURSOR: 1056 wm_state->wm[level].cursor = plane->wm.fifo_size - 1057 wm_state->wm[level].cursor; 1058 break; 1059 case DRM_PLANE_TYPE_PRIMARY: 1060 wm_state->wm[level].primary = plane->wm.fifo_size - 1061 wm_state->wm[level].primary; 1062 break; 1063 case DRM_PLANE_TYPE_OVERLAY: 1064 sprite = plane->plane; 1065 wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size - 1066 wm_state->wm[level].sprite[sprite]; 1067 break; 1068 } 1069 } 1070 } 1071 } 1072 1073 static void vlv_compute_wm(struct intel_crtc *crtc) 1074 { 1075 struct drm_device *dev = crtc->base.dev; 1076 struct vlv_wm_state *wm_state = &crtc->wm_state; 1077 struct intel_plane *plane; 1078 int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1; 1079 int level; 1080 1081 memset(wm_state, 0, sizeof(*wm_state)); 1082 1083 wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed; 1084 wm_state->num_levels = to_i915(dev)->wm.max_level + 1; 1085 1086 wm_state->num_active_planes = 0; 1087 1088 vlv_compute_fifo(crtc); 1089 1090 if (wm_state->num_active_planes != 1) 1091 wm_state->cxsr = false; 1092 1093 if (wm_state->cxsr) { 1094 for (level = 0; level < wm_state->num_levels; level++) { 1095 wm_state->sr[level].plane = sr_fifo_size; 1096 wm_state->sr[level].cursor = 63; 1097 } 1098 } 1099 1100 for_each_intel_plane_on_crtc(dev, crtc, plane) { 1101 struct intel_plane_state *state = 1102 to_intel_plane_state(plane->base.state); 1103 1104 if (!state->visible) 1105 continue; 1106 1107 /* normal watermarks */ 1108 for (level = 0; level < wm_state->num_levels; level++) { 1109 int wm = vlv_compute_wm_level(plane, crtc, state, level); 1110 int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511; 1111 1112 /* hack */ 1113 if (WARN_ON(level == 0 && wm > max_wm)) 1114 wm = max_wm; 1115 1116 if (wm > plane->wm.fifo_size) 1117 break; 1118 1119 switch (plane->base.type) { 1120 int sprite; 1121 case DRM_PLANE_TYPE_CURSOR: 1122 wm_state->wm[level].cursor = wm; 1123 break; 1124 case DRM_PLANE_TYPE_PRIMARY: 1125 wm_state->wm[level].primary = wm; 1126 break; 1127 case DRM_PLANE_TYPE_OVERLAY: 1128 sprite = plane->plane; 1129 wm_state->wm[level].sprite[sprite] = wm; 1130 break; 1131 } 1132 } 1133 1134 wm_state->num_levels = level; 1135 1136 if (!wm_state->cxsr) 1137 continue; 1138 1139 /* maxfifo watermarks */ 1140 switch (plane->base.type) { 1141 int sprite, level; 1142 case DRM_PLANE_TYPE_CURSOR: 1143 for (level = 0; level < wm_state->num_levels; level++) 1144 wm_state->sr[level].cursor = 1145 wm_state->wm[level].cursor; 1146 break; 1147 case DRM_PLANE_TYPE_PRIMARY: 1148 for (level = 0; level < wm_state->num_levels; level++) 1149 wm_state->sr[level].plane = 1150 min(wm_state->sr[level].plane, 1151 wm_state->wm[level].primary); 1152 break; 1153 case DRM_PLANE_TYPE_OVERLAY: 1154 sprite = plane->plane; 1155 for (level = 0; level < wm_state->num_levels; level++) 1156 wm_state->sr[level].plane = 1157 min(wm_state->sr[level].plane, 1158 wm_state->wm[level].sprite[sprite]); 1159 break; 1160 } 1161 } 1162 1163 /* clear any (partially) filled invalid levels */ 1164 for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) { 1165 memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level])); 1166 memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level])); 1167 } 1168 1169 vlv_invert_wms(crtc); 1170 } 1171 1172 #define VLV_FIFO(plane, value) \ 1173 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV) 1174 1175 static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc) 1176 { 1177 struct drm_device *dev = crtc->base.dev; 1178 struct drm_i915_private *dev_priv = to_i915(dev); 1179 struct intel_plane *plane; 1180 int sprite0_start = 0, sprite1_start = 0, fifo_size = 0; 1181 1182 for_each_intel_plane_on_crtc(dev, crtc, plane) { 1183 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) { 1184 WARN_ON(plane->wm.fifo_size != 63); 1185 continue; 1186 } 1187 1188 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) 1189 sprite0_start = plane->wm.fifo_size; 1190 else if (plane->plane == 0) 1191 sprite1_start = sprite0_start + plane->wm.fifo_size; 1192 else 1193 fifo_size = sprite1_start + plane->wm.fifo_size; 1194 } 1195 1196 WARN_ON(fifo_size != 512 - 1); 1197 1198 DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n", 1199 pipe_name(crtc->pipe), sprite0_start, 1200 sprite1_start, fifo_size); 1201 1202 switch (crtc->pipe) { 1203 uint32_t dsparb, dsparb2, dsparb3; 1204 case PIPE_A: 1205 dsparb = I915_READ(DSPARB); 1206 dsparb2 = I915_READ(DSPARB2); 1207 1208 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) | 1209 VLV_FIFO(SPRITEB, 0xff)); 1210 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) | 1211 VLV_FIFO(SPRITEB, sprite1_start)); 1212 1213 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) | 1214 VLV_FIFO(SPRITEB_HI, 0x1)); 1215 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) | 1216 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8)); 1217 1218 I915_WRITE(DSPARB, dsparb); 1219 I915_WRITE(DSPARB2, dsparb2); 1220 break; 1221 case PIPE_B: 1222 dsparb = I915_READ(DSPARB); 1223 dsparb2 = I915_READ(DSPARB2); 1224 1225 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) | 1226 VLV_FIFO(SPRITED, 0xff)); 1227 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) | 1228 VLV_FIFO(SPRITED, sprite1_start)); 1229 1230 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) | 1231 VLV_FIFO(SPRITED_HI, 0xff)); 1232 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) | 1233 VLV_FIFO(SPRITED_HI, sprite1_start >> 8)); 1234 1235 I915_WRITE(DSPARB, dsparb); 1236 I915_WRITE(DSPARB2, dsparb2); 1237 break; 1238 case PIPE_C: 1239 dsparb3 = I915_READ(DSPARB3); 1240 dsparb2 = I915_READ(DSPARB2); 1241 1242 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) | 1243 VLV_FIFO(SPRITEF, 0xff)); 1244 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) | 1245 VLV_FIFO(SPRITEF, sprite1_start)); 1246 1247 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) | 1248 VLV_FIFO(SPRITEF_HI, 0xff)); 1249 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) | 1250 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8)); 1251 1252 I915_WRITE(DSPARB3, dsparb3); 1253 I915_WRITE(DSPARB2, dsparb2); 1254 break; 1255 default: 1256 break; 1257 } 1258 } 1259 1260 #undef VLV_FIFO 1261 1262 static void vlv_merge_wm(struct drm_device *dev, 1263 struct vlv_wm_values *wm) 1264 { 1265 struct intel_crtc *crtc; 1266 int num_active_crtcs = 0; 1267 1268 wm->level = to_i915(dev)->wm.max_level; 1269 wm->cxsr = true; 1270 1271 for_each_intel_crtc(dev, crtc) { 1272 const struct vlv_wm_state *wm_state = &crtc->wm_state; 1273 1274 if (!crtc->active) 1275 continue; 1276 1277 if (!wm_state->cxsr) 1278 wm->cxsr = false; 1279 1280 num_active_crtcs++; 1281 wm->level = min_t(int, wm->level, wm_state->num_levels - 1); 1282 } 1283 1284 if (num_active_crtcs != 1) 1285 wm->cxsr = false; 1286 1287 if (num_active_crtcs > 1) 1288 wm->level = VLV_WM_LEVEL_PM2; 1289 1290 for_each_intel_crtc(dev, crtc) { 1291 struct vlv_wm_state *wm_state = &crtc->wm_state; 1292 enum i915_pipe pipe = crtc->pipe; 1293 1294 if (!crtc->active) 1295 continue; 1296 1297 wm->pipe[pipe] = wm_state->wm[wm->level]; 1298 if (wm->cxsr) 1299 wm->sr = wm_state->sr[wm->level]; 1300 1301 wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2; 1302 wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2; 1303 wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2; 1304 wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2; 1305 } 1306 } 1307 1308 static void vlv_update_wm(struct drm_crtc *crtc) 1309 { 1310 struct drm_device *dev = crtc->dev; 1311 struct drm_i915_private *dev_priv = dev->dev_private; 1312 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1313 enum i915_pipe pipe = intel_crtc->pipe; 1314 struct vlv_wm_values wm = {}; 1315 1316 vlv_compute_wm(intel_crtc); 1317 vlv_merge_wm(dev, &wm); 1318 1319 if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) { 1320 /* FIXME should be part of crtc atomic commit */ 1321 vlv_pipe_set_fifo_size(intel_crtc); 1322 return; 1323 } 1324 1325 if (wm.level < VLV_WM_LEVEL_DDR_DVFS && 1326 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS) 1327 chv_set_memory_dvfs(dev_priv, false); 1328 1329 if (wm.level < VLV_WM_LEVEL_PM5 && 1330 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5) 1331 chv_set_memory_pm5(dev_priv, false); 1332 1333 if (!wm.cxsr && dev_priv->wm.vlv.cxsr) 1334 intel_set_memory_cxsr(dev_priv, false); 1335 1336 /* FIXME should be part of crtc atomic commit */ 1337 vlv_pipe_set_fifo_size(intel_crtc); 1338 1339 vlv_write_wm_values(intel_crtc, &wm); 1340 1341 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, " 1342 "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n", 1343 pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor, 1344 wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1], 1345 wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr); 1346 1347 if (wm.cxsr && !dev_priv->wm.vlv.cxsr) 1348 intel_set_memory_cxsr(dev_priv, true); 1349 1350 if (wm.level >= VLV_WM_LEVEL_PM5 && 1351 dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5) 1352 chv_set_memory_pm5(dev_priv, true); 1353 1354 if (wm.level >= VLV_WM_LEVEL_DDR_DVFS && 1355 dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS) 1356 chv_set_memory_dvfs(dev_priv, true); 1357 1358 dev_priv->wm.vlv = wm; 1359 } 1360 1361 #define single_plane_enabled(mask) is_power_of_2(mask) 1362 1363 static void g4x_update_wm(struct drm_crtc *crtc) 1364 { 1365 struct drm_device *dev = crtc->dev; 1366 static const int sr_latency_ns = 12000; 1367 struct drm_i915_private *dev_priv = dev->dev_private; 1368 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1369 int plane_sr, cursor_sr; 1370 unsigned int enabled = 0; 1371 bool cxsr_enabled; 1372 1373 if (g4x_compute_wm0(dev, PIPE_A, 1374 &g4x_wm_info, pessimal_latency_ns, 1375 &g4x_cursor_wm_info, pessimal_latency_ns, 1376 &planea_wm, &cursora_wm)) 1377 enabled |= 1 << PIPE_A; 1378 1379 if (g4x_compute_wm0(dev, PIPE_B, 1380 &g4x_wm_info, pessimal_latency_ns, 1381 &g4x_cursor_wm_info, pessimal_latency_ns, 1382 &planeb_wm, &cursorb_wm)) 1383 enabled |= 1 << PIPE_B; 1384 1385 if (single_plane_enabled(enabled) && 1386 g4x_compute_srwm(dev, ffs(enabled) - 1, 1387 sr_latency_ns, 1388 &g4x_wm_info, 1389 &g4x_cursor_wm_info, 1390 &plane_sr, &cursor_sr)) { 1391 cxsr_enabled = true; 1392 } else { 1393 cxsr_enabled = false; 1394 intel_set_memory_cxsr(dev_priv, false); 1395 plane_sr = cursor_sr = 0; 1396 } 1397 1398 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, " 1399 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", 1400 planea_wm, cursora_wm, 1401 planeb_wm, cursorb_wm, 1402 plane_sr, cursor_sr); 1403 1404 I915_WRITE(DSPFW1, 1405 FW_WM(plane_sr, SR) | 1406 FW_WM(cursorb_wm, CURSORB) | 1407 FW_WM(planeb_wm, PLANEB) | 1408 FW_WM(planea_wm, PLANEA)); 1409 I915_WRITE(DSPFW2, 1410 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | 1411 FW_WM(cursora_wm, CURSORA)); 1412 /* HPLL off in SR has some issues on G4x... disable it */ 1413 I915_WRITE(DSPFW3, 1414 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) | 1415 FW_WM(cursor_sr, CURSOR_SR)); 1416 1417 if (cxsr_enabled) 1418 intel_set_memory_cxsr(dev_priv, true); 1419 } 1420 1421 static void i965_update_wm(struct drm_crtc *unused_crtc) 1422 { 1423 struct drm_device *dev = unused_crtc->dev; 1424 struct drm_i915_private *dev_priv = dev->dev_private; 1425 struct drm_crtc *crtc; 1426 int srwm = 1; 1427 int cursor_sr = 16; 1428 bool cxsr_enabled; 1429 1430 /* Calc sr entries for one plane configs */ 1431 crtc = single_enabled_crtc(dev); 1432 if (crtc) { 1433 /* self-refresh has much higher latency */ 1434 static const int sr_latency_ns = 12000; 1435 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 1436 int clock = adjusted_mode->crtc_clock; 1437 int htotal = adjusted_mode->crtc_htotal; 1438 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; 1439 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; 1440 unsigned long line_time_us; 1441 int entries; 1442 1443 line_time_us = max(htotal * 1000 / clock, 1); 1444 1445 /* Use ns/us then divide to preserve precision */ 1446 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1447 pixel_size * hdisplay; 1448 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); 1449 srwm = I965_FIFO_SIZE - entries; 1450 if (srwm < 0) 1451 srwm = 1; 1452 srwm &= 0x1ff; 1453 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", 1454 entries, srwm); 1455 1456 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1457 pixel_size * crtc->cursor->state->crtc_w; 1458 entries = DIV_ROUND_UP(entries, 1459 i965_cursor_wm_info.cacheline_size); 1460 cursor_sr = i965_cursor_wm_info.fifo_size - 1461 (entries + i965_cursor_wm_info.guard_size); 1462 1463 if (cursor_sr > i965_cursor_wm_info.max_wm) 1464 cursor_sr = i965_cursor_wm_info.max_wm; 1465 1466 DRM_DEBUG_KMS("self-refresh watermark: display plane %d " 1467 "cursor %d\n", srwm, cursor_sr); 1468 1469 cxsr_enabled = true; 1470 } else { 1471 cxsr_enabled = false; 1472 /* Turn off self refresh if both pipes are enabled */ 1473 intel_set_memory_cxsr(dev_priv, false); 1474 } 1475 1476 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 1477 srwm); 1478 1479 /* 965 has limitations... */ 1480 I915_WRITE(DSPFW1, FW_WM(srwm, SR) | 1481 FW_WM(8, CURSORB) | 1482 FW_WM(8, PLANEB) | 1483 FW_WM(8, PLANEA)); 1484 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) | 1485 FW_WM(8, PLANEC_OLD)); 1486 /* update cursor SR watermark */ 1487 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR)); 1488 1489 if (cxsr_enabled) 1490 intel_set_memory_cxsr(dev_priv, true); 1491 } 1492 1493 #undef FW_WM 1494 1495 static void i9xx_update_wm(struct drm_crtc *unused_crtc) 1496 { 1497 struct drm_device *dev = unused_crtc->dev; 1498 struct drm_i915_private *dev_priv = dev->dev_private; 1499 const struct intel_watermark_params *wm_info; 1500 uint32_t fwater_lo; 1501 uint32_t fwater_hi; 1502 int cwm, srwm = 1; 1503 int fifo_size; 1504 int planea_wm, planeb_wm; 1505 struct drm_crtc *crtc, *enabled = NULL; 1506 1507 if (IS_I945GM(dev)) 1508 wm_info = &i945_wm_info; 1509 else if (!IS_GEN2(dev)) 1510 wm_info = &i915_wm_info; 1511 else 1512 wm_info = &i830_a_wm_info; 1513 1514 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 1515 crtc = intel_get_crtc_for_plane(dev, 0); 1516 if (intel_crtc_active(crtc)) { 1517 const struct drm_display_mode *adjusted_mode; 1518 int cpp = crtc->primary->state->fb->bits_per_pixel / 8; 1519 if (IS_GEN2(dev)) 1520 cpp = 4; 1521 1522 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 1523 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1524 wm_info, fifo_size, cpp, 1525 pessimal_latency_ns); 1526 enabled = crtc; 1527 } else { 1528 planea_wm = fifo_size - wm_info->guard_size; 1529 if (planea_wm > (long)wm_info->max_wm) 1530 planea_wm = wm_info->max_wm; 1531 } 1532 1533 if (IS_GEN2(dev)) 1534 wm_info = &i830_bc_wm_info; 1535 1536 fifo_size = dev_priv->display.get_fifo_size(dev, 1); 1537 crtc = intel_get_crtc_for_plane(dev, 1); 1538 if (intel_crtc_active(crtc)) { 1539 const struct drm_display_mode *adjusted_mode; 1540 int cpp = crtc->primary->state->fb->bits_per_pixel / 8; 1541 if (IS_GEN2(dev)) 1542 cpp = 4; 1543 1544 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 1545 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1546 wm_info, fifo_size, cpp, 1547 pessimal_latency_ns); 1548 if (enabled == NULL) 1549 enabled = crtc; 1550 else 1551 enabled = NULL; 1552 } else { 1553 planeb_wm = fifo_size - wm_info->guard_size; 1554 if (planeb_wm > (long)wm_info->max_wm) 1555 planeb_wm = wm_info->max_wm; 1556 } 1557 1558 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 1559 1560 if (IS_I915GM(dev) && enabled) { 1561 struct drm_i915_gem_object *obj; 1562 1563 obj = intel_fb_obj(enabled->primary->state->fb); 1564 1565 /* self-refresh seems busted with untiled */ 1566 if (obj->tiling_mode == I915_TILING_NONE) 1567 enabled = NULL; 1568 } 1569 1570 /* 1571 * Overlay gets an aggressive default since video jitter is bad. 1572 */ 1573 cwm = 2; 1574 1575 /* Play safe and disable self-refresh before adjusting watermarks. */ 1576 intel_set_memory_cxsr(dev_priv, false); 1577 1578 /* Calc sr entries for one plane configs */ 1579 if (HAS_FW_BLC(dev) && enabled) { 1580 /* self-refresh has much higher latency */ 1581 static const int sr_latency_ns = 6000; 1582 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode; 1583 int clock = adjusted_mode->crtc_clock; 1584 int htotal = adjusted_mode->crtc_htotal; 1585 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w; 1586 int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8; 1587 unsigned long line_time_us; 1588 int entries; 1589 1590 line_time_us = max(htotal * 1000 / clock, 1); 1591 1592 /* Use ns/us then divide to preserve precision */ 1593 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1594 pixel_size * hdisplay; 1595 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); 1596 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); 1597 srwm = wm_info->fifo_size - entries; 1598 if (srwm < 0) 1599 srwm = 1; 1600 1601 if (IS_I945G(dev) || IS_I945GM(dev)) 1602 I915_WRITE(FW_BLC_SELF, 1603 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); 1604 else if (IS_I915GM(dev)) 1605 I915_WRITE(FW_BLC_SELF, srwm & 0x3f); 1606 } 1607 1608 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 1609 planea_wm, planeb_wm, cwm, srwm); 1610 1611 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); 1612 fwater_hi = (cwm & 0x1f); 1613 1614 /* Set request length to 8 cachelines per fetch */ 1615 fwater_lo = fwater_lo | (1 << 24) | (1 << 8); 1616 fwater_hi = fwater_hi | (1 << 8); 1617 1618 I915_WRITE(FW_BLC, fwater_lo); 1619 I915_WRITE(FW_BLC2, fwater_hi); 1620 1621 if (enabled) 1622 intel_set_memory_cxsr(dev_priv, true); 1623 } 1624 1625 static void i845_update_wm(struct drm_crtc *unused_crtc) 1626 { 1627 struct drm_device *dev = unused_crtc->dev; 1628 struct drm_i915_private *dev_priv = dev->dev_private; 1629 struct drm_crtc *crtc; 1630 const struct drm_display_mode *adjusted_mode; 1631 uint32_t fwater_lo; 1632 int planea_wm; 1633 1634 crtc = single_enabled_crtc(dev); 1635 if (crtc == NULL) 1636 return; 1637 1638 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 1639 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1640 &i845_wm_info, 1641 dev_priv->display.get_fifo_size(dev, 0), 1642 4, pessimal_latency_ns); 1643 fwater_lo = I915_READ(FW_BLC) & ~0xfff; 1644 fwater_lo |= (3<<8) | planea_wm; 1645 1646 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); 1647 1648 I915_WRITE(FW_BLC, fwater_lo); 1649 } 1650 1651 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) 1652 { 1653 uint32_t pixel_rate; 1654 1655 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock; 1656 1657 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to 1658 * adjust the pixel_rate here. */ 1659 1660 if (pipe_config->pch_pfit.enabled) { 1661 uint64_t pipe_w, pipe_h, pfit_w, pfit_h; 1662 uint32_t pfit_size = pipe_config->pch_pfit.size; 1663 1664 pipe_w = pipe_config->pipe_src_w; 1665 pipe_h = pipe_config->pipe_src_h; 1666 1667 pfit_w = (pfit_size >> 16) & 0xFFFF; 1668 pfit_h = pfit_size & 0xFFFF; 1669 if (pipe_w < pfit_w) 1670 pipe_w = pfit_w; 1671 if (pipe_h < pfit_h) 1672 pipe_h = pfit_h; 1673 1674 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h, 1675 pfit_w * pfit_h); 1676 } 1677 1678 return pixel_rate; 1679 } 1680 1681 /* latency must be in 0.1us units. */ 1682 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, 1683 uint32_t latency) 1684 { 1685 uint64_t ret; 1686 1687 if (WARN(latency == 0, "Latency value missing\n")) 1688 return UINT_MAX; 1689 1690 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency; 1691 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2; 1692 1693 return ret; 1694 } 1695 1696 /* latency must be in 0.1us units. */ 1697 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, 1698 uint32_t horiz_pixels, uint8_t bytes_per_pixel, 1699 uint32_t latency) 1700 { 1701 uint32_t ret; 1702 1703 if (WARN(latency == 0, "Latency value missing\n")) 1704 return UINT_MAX; 1705 1706 ret = (latency * pixel_rate) / (pipe_htotal * 10000); 1707 ret = (ret + 1) * horiz_pixels * bytes_per_pixel; 1708 ret = DIV_ROUND_UP(ret, 64) + 2; 1709 return ret; 1710 } 1711 1712 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, 1713 uint8_t bytes_per_pixel) 1714 { 1715 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; 1716 } 1717 1718 struct ilk_wm_maximums { 1719 uint16_t pri; 1720 uint16_t spr; 1721 uint16_t cur; 1722 uint16_t fbc; 1723 }; 1724 1725 /* 1726 * For both WM_PIPE and WM_LP. 1727 * mem_value must be in 0.1us units. 1728 */ 1729 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, 1730 const struct intel_plane_state *pstate, 1731 uint32_t mem_value, 1732 bool is_lp) 1733 { 1734 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; 1735 uint32_t method1, method2; 1736 1737 if (!cstate->base.active || !pstate->visible) 1738 return 0; 1739 1740 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value); 1741 1742 if (!is_lp) 1743 return method1; 1744 1745 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate), 1746 cstate->base.adjusted_mode.crtc_htotal, 1747 drm_rect_width(&pstate->dst), 1748 bpp, 1749 mem_value); 1750 1751 return min(method1, method2); 1752 } 1753 1754 /* 1755 * For both WM_PIPE and WM_LP. 1756 * mem_value must be in 0.1us units. 1757 */ 1758 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate, 1759 const struct intel_plane_state *pstate, 1760 uint32_t mem_value) 1761 { 1762 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; 1763 uint32_t method1, method2; 1764 1765 if (!cstate->base.active || !pstate->visible) 1766 return 0; 1767 1768 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value); 1769 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate), 1770 cstate->base.adjusted_mode.crtc_htotal, 1771 drm_rect_width(&pstate->dst), 1772 bpp, 1773 mem_value); 1774 return min(method1, method2); 1775 } 1776 1777 /* 1778 * For both WM_PIPE and WM_LP. 1779 * mem_value must be in 0.1us units. 1780 */ 1781 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate, 1782 const struct intel_plane_state *pstate, 1783 uint32_t mem_value) 1784 { 1785 /* 1786 * We treat the cursor plane as always-on for the purposes of watermark 1787 * calculation. Until we have two-stage watermark programming merged, 1788 * this is necessary to avoid flickering. 1789 */ 1790 int cpp = 4; 1791 int width = pstate->visible ? pstate->base.crtc_w : 64; 1792 1793 if (!cstate->base.active) 1794 return 0; 1795 1796 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate), 1797 cstate->base.adjusted_mode.crtc_htotal, 1798 width, cpp, mem_value); 1799 } 1800 1801 /* Only for WM_LP. */ 1802 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, 1803 const struct intel_plane_state *pstate, 1804 uint32_t pri_val) 1805 { 1806 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; 1807 1808 if (!cstate->base.active || !pstate->visible) 1809 return 0; 1810 1811 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), bpp); 1812 } 1813 1814 static unsigned int ilk_display_fifo_size(const struct drm_device *dev) 1815 { 1816 if (INTEL_INFO(dev)->gen >= 8) 1817 return 3072; 1818 else if (INTEL_INFO(dev)->gen >= 7) 1819 return 768; 1820 else 1821 return 512; 1822 } 1823 1824 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev, 1825 int level, bool is_sprite) 1826 { 1827 if (INTEL_INFO(dev)->gen >= 8) 1828 /* BDW primary/sprite plane watermarks */ 1829 return level == 0 ? 255 : 2047; 1830 else if (INTEL_INFO(dev)->gen >= 7) 1831 /* IVB/HSW primary/sprite plane watermarks */ 1832 return level == 0 ? 127 : 1023; 1833 else if (!is_sprite) 1834 /* ILK/SNB primary plane watermarks */ 1835 return level == 0 ? 127 : 511; 1836 else 1837 /* ILK/SNB sprite plane watermarks */ 1838 return level == 0 ? 63 : 255; 1839 } 1840 1841 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev, 1842 int level) 1843 { 1844 if (INTEL_INFO(dev)->gen >= 7) 1845 return level == 0 ? 63 : 255; 1846 else 1847 return level == 0 ? 31 : 63; 1848 } 1849 1850 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev) 1851 { 1852 if (INTEL_INFO(dev)->gen >= 8) 1853 return 31; 1854 else 1855 return 15; 1856 } 1857 1858 /* Calculate the maximum primary/sprite plane watermark */ 1859 static unsigned int ilk_plane_wm_max(const struct drm_device *dev, 1860 int level, 1861 const struct intel_wm_config *config, 1862 enum intel_ddb_partitioning ddb_partitioning, 1863 bool is_sprite) 1864 { 1865 unsigned int fifo_size = ilk_display_fifo_size(dev); 1866 1867 /* if sprites aren't enabled, sprites get nothing */ 1868 if (is_sprite && !config->sprites_enabled) 1869 return 0; 1870 1871 /* HSW allows LP1+ watermarks even with multiple pipes */ 1872 if (level == 0 || config->num_pipes_active > 1) { 1873 fifo_size /= INTEL_INFO(dev)->num_pipes; 1874 1875 /* 1876 * For some reason the non self refresh 1877 * FIFO size is only half of the self 1878 * refresh FIFO size on ILK/SNB. 1879 */ 1880 if (INTEL_INFO(dev)->gen <= 6) 1881 fifo_size /= 2; 1882 } 1883 1884 if (config->sprites_enabled) { 1885 /* level 0 is always calculated with 1:1 split */ 1886 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) { 1887 if (is_sprite) 1888 fifo_size *= 5; 1889 fifo_size /= 6; 1890 } else { 1891 fifo_size /= 2; 1892 } 1893 } 1894 1895 /* clamp to max that the registers can hold */ 1896 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite)); 1897 } 1898 1899 /* Calculate the maximum cursor plane watermark */ 1900 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev, 1901 int level, 1902 const struct intel_wm_config *config) 1903 { 1904 /* HSW LP1+ watermarks w/ multiple pipes */ 1905 if (level > 0 && config->num_pipes_active > 1) 1906 return 64; 1907 1908 /* otherwise just report max that registers can hold */ 1909 return ilk_cursor_wm_reg_max(dev, level); 1910 } 1911 1912 static void ilk_compute_wm_maximums(const struct drm_device *dev, 1913 int level, 1914 const struct intel_wm_config *config, 1915 enum intel_ddb_partitioning ddb_partitioning, 1916 struct ilk_wm_maximums *max) 1917 { 1918 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); 1919 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); 1920 max->cur = ilk_cursor_wm_max(dev, level, config); 1921 max->fbc = ilk_fbc_wm_reg_max(dev); 1922 } 1923 1924 static void ilk_compute_wm_reg_maximums(struct drm_device *dev, 1925 int level, 1926 struct ilk_wm_maximums *max) 1927 { 1928 max->pri = ilk_plane_wm_reg_max(dev, level, false); 1929 max->spr = ilk_plane_wm_reg_max(dev, level, true); 1930 max->cur = ilk_cursor_wm_reg_max(dev, level); 1931 max->fbc = ilk_fbc_wm_reg_max(dev); 1932 } 1933 1934 static bool ilk_validate_wm_level(int level, 1935 const struct ilk_wm_maximums *max, 1936 struct intel_wm_level *result) 1937 { 1938 bool ret; 1939 1940 /* already determined to be invalid? */ 1941 if (!result->enable) 1942 return false; 1943 1944 result->enable = result->pri_val <= max->pri && 1945 result->spr_val <= max->spr && 1946 result->cur_val <= max->cur; 1947 1948 ret = result->enable; 1949 1950 /* 1951 * HACK until we can pre-compute everything, 1952 * and thus fail gracefully if LP0 watermarks 1953 * are exceeded... 1954 */ 1955 if (level == 0 && !result->enable) { 1956 if (result->pri_val > max->pri) 1957 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n", 1958 level, result->pri_val, max->pri); 1959 if (result->spr_val > max->spr) 1960 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n", 1961 level, result->spr_val, max->spr); 1962 if (result->cur_val > max->cur) 1963 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n", 1964 level, result->cur_val, max->cur); 1965 1966 result->pri_val = min_t(uint32_t, result->pri_val, max->pri); 1967 result->spr_val = min_t(uint32_t, result->spr_val, max->spr); 1968 result->cur_val = min_t(uint32_t, result->cur_val, max->cur); 1969 result->enable = true; 1970 } 1971 1972 return ret; 1973 } 1974 1975 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, 1976 const struct intel_crtc *intel_crtc, 1977 int level, 1978 struct intel_crtc_state *cstate, 1979 struct intel_plane_state *pristate, 1980 struct intel_plane_state *sprstate, 1981 struct intel_plane_state *curstate, 1982 struct intel_wm_level *result) 1983 { 1984 uint16_t pri_latency = dev_priv->wm.pri_latency[level]; 1985 uint16_t spr_latency = dev_priv->wm.spr_latency[level]; 1986 uint16_t cur_latency = dev_priv->wm.cur_latency[level]; 1987 1988 /* WM1+ latency values stored in 0.5us units */ 1989 if (level > 0) { 1990 pri_latency *= 5; 1991 spr_latency *= 5; 1992 cur_latency *= 5; 1993 } 1994 1995 result->pri_val = ilk_compute_pri_wm(cstate, pristate, 1996 pri_latency, level); 1997 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency); 1998 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency); 1999 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val); 2000 result->enable = true; 2001 } 2002 2003 static uint32_t 2004 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) 2005 { 2006 struct drm_i915_private *dev_priv = dev->dev_private; 2007 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2008 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 2009 u32 linetime, ips_linetime; 2010 2011 if (!intel_crtc->active) 2012 return 0; 2013 2014 /* The WM are computed with base on how long it takes to fill a single 2015 * row at the given clock rate, multiplied by 8. 2016 * */ 2017 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 2018 adjusted_mode->crtc_clock); 2019 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 2020 dev_priv->cdclk_freq); 2021 2022 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | 2023 PIPE_WM_LINETIME_TIME(linetime); 2024 } 2025 2026 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8]) 2027 { 2028 struct drm_i915_private *dev_priv = dev->dev_private; 2029 2030 if (IS_GEN9(dev)) { 2031 uint32_t val; 2032 int ret, i; 2033 int level, max_level = ilk_wm_max_level(dev); 2034 2035 /* read the first set of memory latencies[0:3] */ 2036 val = 0; /* data0 to be programmed to 0 for first set */ 2037 mutex_lock(&dev_priv->rps.hw_lock); 2038 ret = sandybridge_pcode_read(dev_priv, 2039 GEN9_PCODE_READ_MEM_LATENCY, 2040 &val); 2041 mutex_unlock(&dev_priv->rps.hw_lock); 2042 2043 if (ret) { 2044 DRM_ERROR("SKL Mailbox read error = %d\n", ret); 2045 return; 2046 } 2047 2048 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK; 2049 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & 2050 GEN9_MEM_LATENCY_LEVEL_MASK; 2051 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & 2052 GEN9_MEM_LATENCY_LEVEL_MASK; 2053 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & 2054 GEN9_MEM_LATENCY_LEVEL_MASK; 2055 2056 /* read the second set of memory latencies[4:7] */ 2057 val = 1; /* data0 to be programmed to 1 for second set */ 2058 mutex_lock(&dev_priv->rps.hw_lock); 2059 ret = sandybridge_pcode_read(dev_priv, 2060 GEN9_PCODE_READ_MEM_LATENCY, 2061 &val); 2062 mutex_unlock(&dev_priv->rps.hw_lock); 2063 if (ret) { 2064 DRM_ERROR("SKL Mailbox read error = %d\n", ret); 2065 return; 2066 } 2067 2068 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK; 2069 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & 2070 GEN9_MEM_LATENCY_LEVEL_MASK; 2071 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & 2072 GEN9_MEM_LATENCY_LEVEL_MASK; 2073 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & 2074 GEN9_MEM_LATENCY_LEVEL_MASK; 2075 2076 /* 2077 * WaWmMemoryReadLatency:skl 2078 * 2079 * punit doesn't take into account the read latency so we need 2080 * to add 2us to the various latency levels we retrieve from 2081 * the punit. 2082 * - W0 is a bit special in that it's the only level that 2083 * can't be disabled if we want to have display working, so 2084 * we always add 2us there. 2085 * - For levels >=1, punit returns 0us latency when they are 2086 * disabled, so we respect that and don't add 2us then 2087 * 2088 * Additionally, if a level n (n > 1) has a 0us latency, all 2089 * levels m (m >= n) need to be disabled. We make sure to 2090 * sanitize the values out of the punit to satisfy this 2091 * requirement. 2092 */ 2093 wm[0] += 2; 2094 for (level = 1; level <= max_level; level++) 2095 if (wm[level] != 0) 2096 wm[level] += 2; 2097 else { 2098 for (i = level + 1; i <= max_level; i++) 2099 wm[i] = 0; 2100 2101 break; 2102 } 2103 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2104 uint64_t sskpd = I915_READ64(MCH_SSKPD); 2105 2106 wm[0] = (sskpd >> 56) & 0xFF; 2107 if (wm[0] == 0) 2108 wm[0] = sskpd & 0xF; 2109 wm[1] = (sskpd >> 4) & 0xFF; 2110 wm[2] = (sskpd >> 12) & 0xFF; 2111 wm[3] = (sskpd >> 20) & 0x1FF; 2112 wm[4] = (sskpd >> 32) & 0x1FF; 2113 } else if (INTEL_INFO(dev)->gen >= 6) { 2114 uint32_t sskpd = I915_READ(MCH_SSKPD); 2115 2116 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK; 2117 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK; 2118 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK; 2119 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK; 2120 } else if (INTEL_INFO(dev)->gen >= 5) { 2121 uint32_t mltr = I915_READ(MLTR_ILK); 2122 2123 /* ILK primary LP0 latency is 700 ns */ 2124 wm[0] = 7; 2125 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK; 2126 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK; 2127 } 2128 } 2129 2130 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5]) 2131 { 2132 /* ILK sprite LP0 latency is 1300 ns */ 2133 if (INTEL_INFO(dev)->gen == 5) 2134 wm[0] = 13; 2135 } 2136 2137 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5]) 2138 { 2139 /* ILK cursor LP0 latency is 1300 ns */ 2140 if (INTEL_INFO(dev)->gen == 5) 2141 wm[0] = 13; 2142 2143 /* WaDoubleCursorLP3Latency:ivb */ 2144 if (IS_IVYBRIDGE(dev)) 2145 wm[3] *= 2; 2146 } 2147 2148 int ilk_wm_max_level(const struct drm_device *dev) 2149 { 2150 /* how many WM levels are we expecting */ 2151 if (INTEL_INFO(dev)->gen >= 9) 2152 return 7; 2153 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2154 return 4; 2155 else if (INTEL_INFO(dev)->gen >= 6) 2156 return 3; 2157 else 2158 return 2; 2159 } 2160 2161 static void intel_print_wm_latency(struct drm_device *dev, 2162 const char *name, 2163 const uint16_t wm[8]) 2164 { 2165 int level, max_level = ilk_wm_max_level(dev); 2166 2167 for (level = 0; level <= max_level; level++) { 2168 unsigned int latency = wm[level]; 2169 2170 if (latency == 0) { 2171 DRM_ERROR("%s WM%d latency not provided\n", 2172 name, level); 2173 continue; 2174 } 2175 2176 /* 2177 * - latencies are in us on gen9. 2178 * - before then, WM1+ latency values are in 0.5us units 2179 */ 2180 if (IS_GEN9(dev)) 2181 latency *= 10; 2182 else if (level > 0) 2183 latency *= 5; 2184 2185 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n", 2186 name, level, wm[level], 2187 latency / 10, latency % 10); 2188 } 2189 } 2190 2191 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, 2192 uint16_t wm[5], uint16_t min) 2193 { 2194 int level, max_level = ilk_wm_max_level(dev_priv->dev); 2195 2196 if (wm[0] >= min) 2197 return false; 2198 2199 wm[0] = max(wm[0], min); 2200 for (level = 1; level <= max_level; level++) 2201 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5)); 2202 2203 return true; 2204 } 2205 2206 static void snb_wm_latency_quirk(struct drm_device *dev) 2207 { 2208 struct drm_i915_private *dev_priv = dev->dev_private; 2209 bool changed; 2210 2211 /* 2212 * The BIOS provided WM memory latency values are often 2213 * inadequate for high resolution displays. Adjust them. 2214 */ 2215 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | 2216 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | 2217 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); 2218 2219 if (!changed) 2220 return; 2221 2222 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n"); 2223 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); 2224 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); 2225 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); 2226 } 2227 2228 static void ilk_setup_wm_latency(struct drm_device *dev) 2229 { 2230 struct drm_i915_private *dev_priv = dev->dev_private; 2231 2232 intel_read_wm_latency(dev, dev_priv->wm.pri_latency); 2233 2234 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency, 2235 sizeof(dev_priv->wm.pri_latency)); 2236 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency, 2237 sizeof(dev_priv->wm.pri_latency)); 2238 2239 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency); 2240 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency); 2241 2242 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); 2243 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); 2244 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); 2245 2246 if (IS_GEN6(dev)) 2247 snb_wm_latency_quirk(dev); 2248 } 2249 2250 static void skl_setup_wm_latency(struct drm_device *dev) 2251 { 2252 struct drm_i915_private *dev_priv = dev->dev_private; 2253 2254 intel_read_wm_latency(dev, dev_priv->wm.skl_latency); 2255 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency); 2256 } 2257 2258 /* Compute new watermarks for the pipe */ 2259 static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc, 2260 struct drm_atomic_state *state) 2261 { 2262 struct intel_pipe_wm *pipe_wm; 2263 struct drm_device *dev = intel_crtc->base.dev; 2264 const struct drm_i915_private *dev_priv = dev->dev_private; 2265 struct intel_crtc_state *cstate = NULL; 2266 struct intel_plane *intel_plane; 2267 struct drm_plane_state *ps; 2268 struct intel_plane_state *pristate = NULL; 2269 struct intel_plane_state *sprstate = NULL; 2270 struct intel_plane_state *curstate = NULL; 2271 int level, max_level = ilk_wm_max_level(dev); 2272 /* LP0 watermark maximums depend on this pipe alone */ 2273 struct intel_wm_config config = { 2274 .num_pipes_active = 1, 2275 }; 2276 struct ilk_wm_maximums max; 2277 2278 cstate = intel_atomic_get_crtc_state(state, intel_crtc); 2279 if (IS_ERR(cstate)) 2280 return PTR_ERR(cstate); 2281 2282 pipe_wm = &cstate->wm.optimal.ilk; 2283 2284 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 2285 ps = drm_atomic_get_plane_state(state, 2286 &intel_plane->base); 2287 if (IS_ERR(ps)) 2288 return PTR_ERR(ps); 2289 2290 if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY) 2291 pristate = to_intel_plane_state(ps); 2292 else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) 2293 sprstate = to_intel_plane_state(ps); 2294 else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR) 2295 curstate = to_intel_plane_state(ps); 2296 } 2297 2298 config.sprites_enabled = sprstate->visible; 2299 config.sprites_scaled = sprstate->visible && 2300 (drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 || 2301 drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16); 2302 2303 pipe_wm->pipe_enabled = cstate->base.active; 2304 pipe_wm->sprites_enabled = config.sprites_enabled; 2305 pipe_wm->sprites_scaled = config.sprites_scaled; 2306 2307 /* ILK/SNB: LP2+ watermarks only w/o sprites */ 2308 if (INTEL_INFO(dev)->gen <= 6 && sprstate->visible) 2309 max_level = 1; 2310 2311 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ 2312 if (config.sprites_scaled) 2313 max_level = 0; 2314 2315 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, 2316 pristate, sprstate, curstate, &pipe_wm->wm[0]); 2317 2318 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2319 pipe_wm->linetime = hsw_compute_linetime_wm(dev, 2320 &intel_crtc->base); 2321 2322 /* LP0 watermarks always use 1/2 DDB partitioning */ 2323 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); 2324 2325 /* At least LP0 must be valid */ 2326 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) 2327 return -EINVAL; 2328 2329 ilk_compute_wm_reg_maximums(dev, 1, &max); 2330 2331 for (level = 1; level <= max_level; level++) { 2332 struct intel_wm_level wm = {}; 2333 2334 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, 2335 pristate, sprstate, curstate, &wm); 2336 2337 /* 2338 * Disable any watermark level that exceeds the 2339 * register maximums since such watermarks are 2340 * always invalid. 2341 */ 2342 if (!ilk_validate_wm_level(level, &max, &wm)) 2343 break; 2344 2345 pipe_wm->wm[level] = wm; 2346 } 2347 2348 return 0; 2349 } 2350 2351 /* 2352 * Merge the watermarks from all active pipes for a specific level. 2353 */ 2354 static void ilk_merge_wm_level(struct drm_device *dev, 2355 int level, 2356 struct intel_wm_level *ret_wm) 2357 { 2358 struct intel_crtc *intel_crtc; 2359 2360 ret_wm->enable = true; 2361 2362 for_each_intel_crtc(dev, intel_crtc) { 2363 const struct intel_crtc_state *cstate = 2364 to_intel_crtc_state(intel_crtc->base.state); 2365 const struct intel_pipe_wm *active = &cstate->wm.optimal.ilk; 2366 const struct intel_wm_level *wm = &active->wm[level]; 2367 2368 if (!active->pipe_enabled) 2369 continue; 2370 2371 /* 2372 * The watermark values may have been used in the past, 2373 * so we must maintain them in the registers for some 2374 * time even if the level is now disabled. 2375 */ 2376 if (!wm->enable) 2377 ret_wm->enable = false; 2378 2379 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val); 2380 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val); 2381 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val); 2382 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val); 2383 } 2384 } 2385 2386 /* 2387 * Merge all low power watermarks for all active pipes. 2388 */ 2389 static void ilk_wm_merge(struct drm_device *dev, 2390 const struct intel_wm_config *config, 2391 const struct ilk_wm_maximums *max, 2392 struct intel_pipe_wm *merged) 2393 { 2394 struct drm_i915_private *dev_priv = dev->dev_private; 2395 int level, max_level = ilk_wm_max_level(dev); 2396 int last_enabled_level = max_level; 2397 2398 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ 2399 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) && 2400 config->num_pipes_active > 1) 2401 return; 2402 2403 /* ILK: FBC WM must be disabled always */ 2404 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6; 2405 2406 /* merge each WM1+ level */ 2407 for (level = 1; level <= max_level; level++) { 2408 struct intel_wm_level *wm = &merged->wm[level]; 2409 2410 ilk_merge_wm_level(dev, level, wm); 2411 2412 if (level > last_enabled_level) 2413 wm->enable = false; 2414 else if (!ilk_validate_wm_level(level, max, wm)) 2415 /* make sure all following levels get disabled */ 2416 last_enabled_level = level - 1; 2417 2418 /* 2419 * The spec says it is preferred to disable 2420 * FBC WMs instead of disabling a WM level. 2421 */ 2422 if (wm->fbc_val > max->fbc) { 2423 if (wm->enable) 2424 merged->fbc_wm_enabled = false; 2425 wm->fbc_val = 0; 2426 } 2427 } 2428 2429 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ 2430 /* 2431 * FIXME this is racy. FBC might get enabled later. 2432 * What we should check here is whether FBC can be 2433 * enabled sometime later. 2434 */ 2435 if (IS_GEN5(dev) && !merged->fbc_wm_enabled && 2436 intel_fbc_is_active(dev_priv)) { 2437 for (level = 2; level <= max_level; level++) { 2438 struct intel_wm_level *wm = &merged->wm[level]; 2439 2440 wm->enable = false; 2441 } 2442 } 2443 } 2444 2445 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) 2446 { 2447 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */ 2448 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); 2449 } 2450 2451 /* The value we need to program into the WM_LPx latency field */ 2452 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level) 2453 { 2454 struct drm_i915_private *dev_priv = dev->dev_private; 2455 2456 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2457 return 2 * level; 2458 else 2459 return dev_priv->wm.pri_latency[level]; 2460 } 2461 2462 static void ilk_compute_wm_results(struct drm_device *dev, 2463 const struct intel_pipe_wm *merged, 2464 enum intel_ddb_partitioning partitioning, 2465 struct ilk_wm_values *results) 2466 { 2467 struct intel_crtc *intel_crtc; 2468 int level, wm_lp; 2469 2470 results->enable_fbc_wm = merged->fbc_wm_enabled; 2471 results->partitioning = partitioning; 2472 2473 /* LP1+ register values */ 2474 for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 2475 const struct intel_wm_level *r; 2476 2477 level = ilk_wm_lp_to_level(wm_lp, merged); 2478 2479 r = &merged->wm[level]; 2480 2481 /* 2482 * Maintain the watermark values even if the level is 2483 * disabled. Doing otherwise could cause underruns. 2484 */ 2485 results->wm_lp[wm_lp - 1] = 2486 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) | 2487 (r->pri_val << WM1_LP_SR_SHIFT) | 2488 r->cur_val; 2489 2490 if (r->enable) 2491 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN; 2492 2493 if (INTEL_INFO(dev)->gen >= 8) 2494 results->wm_lp[wm_lp - 1] |= 2495 r->fbc_val << WM1_LP_FBC_SHIFT_BDW; 2496 else 2497 results->wm_lp[wm_lp - 1] |= 2498 r->fbc_val << WM1_LP_FBC_SHIFT; 2499 2500 /* 2501 * Always set WM1S_LP_EN when spr_val != 0, even if the 2502 * level is disabled. Doing otherwise could cause underruns. 2503 */ 2504 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) { 2505 WARN_ON(wm_lp != 1); 2506 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val; 2507 } else 2508 results->wm_lp_spr[wm_lp - 1] = r->spr_val; 2509 } 2510 2511 /* LP0 register values */ 2512 for_each_intel_crtc(dev, intel_crtc) { 2513 const struct intel_crtc_state *cstate = 2514 to_intel_crtc_state(intel_crtc->base.state); 2515 enum i915_pipe pipe = intel_crtc->pipe; 2516 const struct intel_wm_level *r = &cstate->wm.optimal.ilk.wm[0]; 2517 2518 if (WARN_ON(!r->enable)) 2519 continue; 2520 2521 results->wm_linetime[pipe] = cstate->wm.optimal.ilk.linetime; 2522 2523 results->wm_pipe[pipe] = 2524 (r->pri_val << WM0_PIPE_PLANE_SHIFT) | 2525 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) | 2526 r->cur_val; 2527 } 2528 } 2529 2530 /* Find the result with the highest level enabled. Check for enable_fbc_wm in 2531 * case both are at the same level. Prefer r1 in case they're the same. */ 2532 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev, 2533 struct intel_pipe_wm *r1, 2534 struct intel_pipe_wm *r2) 2535 { 2536 int level, max_level = ilk_wm_max_level(dev); 2537 int level1 = 0, level2 = 0; 2538 2539 for (level = 1; level <= max_level; level++) { 2540 if (r1->wm[level].enable) 2541 level1 = level; 2542 if (r2->wm[level].enable) 2543 level2 = level; 2544 } 2545 2546 if (level1 == level2) { 2547 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled) 2548 return r2; 2549 else 2550 return r1; 2551 } else if (level1 > level2) { 2552 return r1; 2553 } else { 2554 return r2; 2555 } 2556 } 2557 2558 /* dirty bits used to track which watermarks need changes */ 2559 #define WM_DIRTY_PIPE(pipe) (1 << (pipe)) 2560 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe))) 2561 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp))) 2562 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3)) 2563 #define WM_DIRTY_FBC (1 << 24) 2564 #define WM_DIRTY_DDB (1 << 25) 2565 2566 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv, 2567 const struct ilk_wm_values *old, 2568 const struct ilk_wm_values *new) 2569 { 2570 unsigned int dirty = 0; 2571 enum i915_pipe pipe; 2572 int wm_lp; 2573 2574 for_each_pipe(dev_priv, pipe) { 2575 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) { 2576 dirty |= WM_DIRTY_LINETIME(pipe); 2577 /* Must disable LP1+ watermarks too */ 2578 dirty |= WM_DIRTY_LP_ALL; 2579 } 2580 2581 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) { 2582 dirty |= WM_DIRTY_PIPE(pipe); 2583 /* Must disable LP1+ watermarks too */ 2584 dirty |= WM_DIRTY_LP_ALL; 2585 } 2586 } 2587 2588 if (old->enable_fbc_wm != new->enable_fbc_wm) { 2589 dirty |= WM_DIRTY_FBC; 2590 /* Must disable LP1+ watermarks too */ 2591 dirty |= WM_DIRTY_LP_ALL; 2592 } 2593 2594 if (old->partitioning != new->partitioning) { 2595 dirty |= WM_DIRTY_DDB; 2596 /* Must disable LP1+ watermarks too */ 2597 dirty |= WM_DIRTY_LP_ALL; 2598 } 2599 2600 /* LP1+ watermarks already deemed dirty, no need to continue */ 2601 if (dirty & WM_DIRTY_LP_ALL) 2602 return dirty; 2603 2604 /* Find the lowest numbered LP1+ watermark in need of an update... */ 2605 for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 2606 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] || 2607 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1]) 2608 break; 2609 } 2610 2611 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */ 2612 for (; wm_lp <= 3; wm_lp++) 2613 dirty |= WM_DIRTY_LP(wm_lp); 2614 2615 return dirty; 2616 } 2617 2618 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, 2619 unsigned int dirty) 2620 { 2621 struct ilk_wm_values *previous = &dev_priv->wm.hw; 2622 bool changed = false; 2623 2624 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) { 2625 previous->wm_lp[2] &= ~WM1_LP_SR_EN; 2626 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]); 2627 changed = true; 2628 } 2629 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) { 2630 previous->wm_lp[1] &= ~WM1_LP_SR_EN; 2631 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]); 2632 changed = true; 2633 } 2634 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) { 2635 previous->wm_lp[0] &= ~WM1_LP_SR_EN; 2636 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]); 2637 changed = true; 2638 } 2639 2640 /* 2641 * Don't touch WM1S_LP_EN here. 2642 * Doing so could cause underruns. 2643 */ 2644 2645 return changed; 2646 } 2647 2648 /* 2649 * The spec says we shouldn't write when we don't need, because every write 2650 * causes WMs to be re-evaluated, expending some power. 2651 */ 2652 static void ilk_write_wm_values(struct drm_i915_private *dev_priv, 2653 struct ilk_wm_values *results) 2654 { 2655 struct drm_device *dev = dev_priv->dev; 2656 struct ilk_wm_values *previous = &dev_priv->wm.hw; 2657 unsigned int dirty; 2658 uint32_t val; 2659 2660 dirty = ilk_compute_wm_dirty(dev_priv, previous, results); 2661 if (!dirty) 2662 return; 2663 2664 _ilk_disable_lp_wm(dev_priv, dirty); 2665 2666 if (dirty & WM_DIRTY_PIPE(PIPE_A)) 2667 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); 2668 if (dirty & WM_DIRTY_PIPE(PIPE_B)) 2669 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]); 2670 if (dirty & WM_DIRTY_PIPE(PIPE_C)) 2671 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]); 2672 2673 if (dirty & WM_DIRTY_LINETIME(PIPE_A)) 2674 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]); 2675 if (dirty & WM_DIRTY_LINETIME(PIPE_B)) 2676 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]); 2677 if (dirty & WM_DIRTY_LINETIME(PIPE_C)) 2678 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]); 2679 2680 if (dirty & WM_DIRTY_DDB) { 2681 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2682 val = I915_READ(WM_MISC); 2683 if (results->partitioning == INTEL_DDB_PART_1_2) 2684 val &= ~WM_MISC_DATA_PARTITION_5_6; 2685 else 2686 val |= WM_MISC_DATA_PARTITION_5_6; 2687 I915_WRITE(WM_MISC, val); 2688 } else { 2689 val = I915_READ(DISP_ARB_CTL2); 2690 if (results->partitioning == INTEL_DDB_PART_1_2) 2691 val &= ~DISP_DATA_PARTITION_5_6; 2692 else 2693 val |= DISP_DATA_PARTITION_5_6; 2694 I915_WRITE(DISP_ARB_CTL2, val); 2695 } 2696 } 2697 2698 if (dirty & WM_DIRTY_FBC) { 2699 val = I915_READ(DISP_ARB_CTL); 2700 if (results->enable_fbc_wm) 2701 val &= ~DISP_FBC_WM_DIS; 2702 else 2703 val |= DISP_FBC_WM_DIS; 2704 I915_WRITE(DISP_ARB_CTL, val); 2705 } 2706 2707 if (dirty & WM_DIRTY_LP(1) && 2708 previous->wm_lp_spr[0] != results->wm_lp_spr[0]) 2709 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); 2710 2711 if (INTEL_INFO(dev)->gen >= 7) { 2712 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) 2713 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); 2714 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) 2715 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]); 2716 } 2717 2718 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0]) 2719 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); 2720 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1]) 2721 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); 2722 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) 2723 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); 2724 2725 dev_priv->wm.hw = *results; 2726 } 2727 2728 static bool ilk_disable_lp_wm(struct drm_device *dev) 2729 { 2730 struct drm_i915_private *dev_priv = dev->dev_private; 2731 2732 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); 2733 } 2734 2735 /* 2736 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the 2737 * different active planes. 2738 */ 2739 2740 #define SKL_DDB_SIZE 896 /* in blocks */ 2741 #define BXT_DDB_SIZE 512 2742 2743 /* 2744 * Return the index of a plane in the SKL DDB and wm result arrays. Primary 2745 * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and 2746 * other universal planes are in indices 1..n. Note that this may leave unused 2747 * indices between the top "sprite" plane and the cursor. 2748 */ 2749 static int 2750 skl_wm_plane_id(const struct intel_plane *plane) 2751 { 2752 switch (plane->base.type) { 2753 case DRM_PLANE_TYPE_PRIMARY: 2754 return 0; 2755 case DRM_PLANE_TYPE_CURSOR: 2756 return PLANE_CURSOR; 2757 case DRM_PLANE_TYPE_OVERLAY: 2758 return plane->plane + 1; 2759 default: 2760 MISSING_CASE(plane->base.type); 2761 return plane->plane; 2762 } 2763 } 2764 2765 static void 2766 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, 2767 const struct intel_crtc_state *cstate, 2768 const struct intel_wm_config *config, 2769 struct skl_ddb_entry *alloc /* out */) 2770 { 2771 struct drm_crtc *for_crtc = cstate->base.crtc; 2772 struct drm_crtc *crtc; 2773 unsigned int pipe_size, ddb_size; 2774 int nth_active_pipe; 2775 2776 if (!cstate->base.active) { 2777 alloc->start = 0; 2778 alloc->end = 0; 2779 return; 2780 } 2781 2782 if (IS_BROXTON(dev)) 2783 ddb_size = BXT_DDB_SIZE; 2784 else 2785 ddb_size = SKL_DDB_SIZE; 2786 2787 ddb_size -= 4; /* 4 blocks for bypass path allocation */ 2788 2789 nth_active_pipe = 0; 2790 for_each_crtc(dev, crtc) { 2791 if (!to_intel_crtc(crtc)->active) 2792 continue; 2793 2794 if (crtc == for_crtc) 2795 break; 2796 2797 nth_active_pipe++; 2798 } 2799 2800 pipe_size = ddb_size / config->num_pipes_active; 2801 alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active; 2802 alloc->end = alloc->start + pipe_size; 2803 } 2804 2805 static unsigned int skl_cursor_allocation(const struct intel_wm_config *config) 2806 { 2807 if (config->num_pipes_active == 1) 2808 return 32; 2809 2810 return 8; 2811 } 2812 2813 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg) 2814 { 2815 entry->start = reg & 0x3ff; 2816 entry->end = (reg >> 16) & 0x3ff; 2817 if (entry->end) 2818 entry->end += 1; 2819 } 2820 2821 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, 2822 struct skl_ddb_allocation *ddb /* out */) 2823 { 2824 enum i915_pipe pipe; 2825 int plane; 2826 u32 val; 2827 2828 memset(ddb, 0, sizeof(*ddb)); 2829 2830 for_each_pipe(dev_priv, pipe) { 2831 enum intel_display_power_domain power_domain; 2832 2833 power_domain = POWER_DOMAIN_PIPE(pipe); 2834 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 2835 continue; 2836 2837 for_each_plane(dev_priv, pipe, plane) { 2838 val = I915_READ(PLANE_BUF_CFG(pipe, plane)); 2839 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane], 2840 val); 2841 } 2842 2843 val = I915_READ(CUR_BUF_CFG(pipe)); 2844 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR], 2845 val); 2846 2847 intel_display_power_put(dev_priv, power_domain); 2848 } 2849 } 2850 2851 static unsigned int 2852 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, 2853 const struct drm_plane_state *pstate, 2854 int y) 2855 { 2856 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 2857 struct drm_framebuffer *fb = pstate->fb; 2858 2859 /* for planar format */ 2860 if (fb->pixel_format == DRM_FORMAT_NV12) { 2861 if (y) /* y-plane data rate */ 2862 return intel_crtc->config->pipe_src_w * 2863 intel_crtc->config->pipe_src_h * 2864 drm_format_plane_cpp(fb->pixel_format, 0); 2865 else /* uv-plane data rate */ 2866 return (intel_crtc->config->pipe_src_w/2) * 2867 (intel_crtc->config->pipe_src_h/2) * 2868 drm_format_plane_cpp(fb->pixel_format, 1); 2869 } 2870 2871 /* for packed formats */ 2872 return intel_crtc->config->pipe_src_w * 2873 intel_crtc->config->pipe_src_h * 2874 drm_format_plane_cpp(fb->pixel_format, 0); 2875 } 2876 2877 /* 2878 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching 2879 * a 8192x4096@32bpp framebuffer: 2880 * 3 * 4096 * 8192 * 4 < 2^32 2881 */ 2882 static unsigned int 2883 skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate) 2884 { 2885 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 2886 struct drm_device *dev = intel_crtc->base.dev; 2887 struct intel_plane *intel_plane; 2888 unsigned int total_data_rate = 0; 2889 2890 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 2891 const struct drm_plane_state *pstate = intel_plane->base.state; 2892 2893 if (pstate->fb == NULL) 2894 continue; 2895 2896 if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR) 2897 continue; 2898 2899 /* packed/uv */ 2900 total_data_rate += skl_plane_relative_data_rate(cstate, 2901 pstate, 2902 0); 2903 2904 if (pstate->fb->pixel_format == DRM_FORMAT_NV12) 2905 /* y-plane */ 2906 total_data_rate += skl_plane_relative_data_rate(cstate, 2907 pstate, 2908 1); 2909 } 2910 2911 return total_data_rate; 2912 } 2913 2914 static void 2915 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, 2916 struct skl_ddb_allocation *ddb /* out */) 2917 { 2918 struct drm_crtc *crtc = cstate->base.crtc; 2919 struct drm_device *dev = crtc->dev; 2920 struct drm_i915_private *dev_priv = to_i915(dev); 2921 struct intel_wm_config *config = &dev_priv->wm.config; 2922 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2923 struct intel_plane *intel_plane; 2924 enum i915_pipe pipe = intel_crtc->pipe; 2925 struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; 2926 uint16_t alloc_size, start, cursor_blocks; 2927 uint16_t minimum[I915_MAX_PLANES]; 2928 uint16_t y_minimum[I915_MAX_PLANES]; 2929 unsigned int total_data_rate; 2930 2931 skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc); 2932 alloc_size = skl_ddb_entry_size(alloc); 2933 if (alloc_size == 0) { 2934 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); 2935 memset(&ddb->plane[pipe][PLANE_CURSOR], 0, 2936 sizeof(ddb->plane[pipe][PLANE_CURSOR])); 2937 return; 2938 } 2939 2940 cursor_blocks = skl_cursor_allocation(config); 2941 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks; 2942 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end; 2943 2944 alloc_size -= cursor_blocks; 2945 alloc->end -= cursor_blocks; 2946 2947 /* 1. Allocate the mininum required blocks for each active plane */ 2948 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 2949 struct drm_plane *plane = &intel_plane->base; 2950 struct drm_framebuffer *fb = plane->state->fb; 2951 int id = skl_wm_plane_id(intel_plane); 2952 2953 if (fb == NULL) 2954 continue; 2955 if (plane->type == DRM_PLANE_TYPE_CURSOR) 2956 continue; 2957 2958 minimum[id] = 8; 2959 alloc_size -= minimum[id]; 2960 y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0; 2961 alloc_size -= y_minimum[id]; 2962 } 2963 2964 /* 2965 * 2. Distribute the remaining space in proportion to the amount of 2966 * data each plane needs to fetch from memory. 2967 * 2968 * FIXME: we may not allocate every single block here. 2969 */ 2970 total_data_rate = skl_get_total_relative_data_rate(cstate); 2971 2972 start = alloc->start; 2973 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 2974 struct drm_plane *plane = &intel_plane->base; 2975 struct drm_plane_state *pstate = intel_plane->base.state; 2976 unsigned int data_rate, y_data_rate; 2977 uint16_t plane_blocks, y_plane_blocks = 0; 2978 int id = skl_wm_plane_id(intel_plane); 2979 2980 if (pstate->fb == NULL) 2981 continue; 2982 if (plane->type == DRM_PLANE_TYPE_CURSOR) 2983 continue; 2984 2985 data_rate = skl_plane_relative_data_rate(cstate, pstate, 0); 2986 2987 /* 2988 * allocation for (packed formats) or (uv-plane part of planar format): 2989 * promote the expression to 64 bits to avoid overflowing, the 2990 * result is < available as data_rate / total_data_rate < 1 2991 */ 2992 plane_blocks = minimum[id]; 2993 plane_blocks += div_u64((uint64_t)alloc_size * data_rate, 2994 total_data_rate); 2995 2996 ddb->plane[pipe][id].start = start; 2997 ddb->plane[pipe][id].end = start + plane_blocks; 2998 2999 start += plane_blocks; 3000 3001 /* 3002 * allocation for y_plane part of planar format: 3003 */ 3004 if (pstate->fb->pixel_format == DRM_FORMAT_NV12) { 3005 y_data_rate = skl_plane_relative_data_rate(cstate, 3006 pstate, 3007 1); 3008 y_plane_blocks = y_minimum[id]; 3009 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, 3010 total_data_rate); 3011 3012 ddb->y_plane[pipe][id].start = start; 3013 ddb->y_plane[pipe][id].end = start + y_plane_blocks; 3014 3015 start += y_plane_blocks; 3016 } 3017 3018 } 3019 3020 } 3021 3022 static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config) 3023 { 3024 /* TODO: Take into account the scalers once we support them */ 3025 return config->base.adjusted_mode.crtc_clock; 3026 } 3027 3028 /* 3029 * The max latency should be 257 (max the punit can code is 255 and we add 2us 3030 * for the read latency) and bytes_per_pixel should always be <= 8, so that 3031 * should allow pixel_rate up to ~2 GHz which seems sufficient since max 3032 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. 3033 */ 3034 static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, 3035 uint32_t latency) 3036 { 3037 uint32_t wm_intermediate_val, ret; 3038 3039 if (latency == 0) 3040 return UINT_MAX; 3041 3042 wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512; 3043 ret = DIV_ROUND_UP(wm_intermediate_val, 1000); 3044 3045 return ret; 3046 } 3047 3048 static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, 3049 uint32_t horiz_pixels, uint8_t bytes_per_pixel, 3050 uint64_t tiling, uint32_t latency) 3051 { 3052 uint32_t ret; 3053 uint32_t plane_bytes_per_line, plane_blocks_per_line; 3054 uint32_t wm_intermediate_val; 3055 3056 if (latency == 0) 3057 return UINT_MAX; 3058 3059 plane_bytes_per_line = horiz_pixels * bytes_per_pixel; 3060 3061 if (tiling == I915_FORMAT_MOD_Y_TILED || 3062 tiling == I915_FORMAT_MOD_Yf_TILED) { 3063 plane_bytes_per_line *= 4; 3064 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 3065 plane_blocks_per_line /= 4; 3066 } else { 3067 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 3068 } 3069 3070 wm_intermediate_val = latency * pixel_rate; 3071 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) * 3072 plane_blocks_per_line; 3073 3074 return ret; 3075 } 3076 3077 static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb, 3078 const struct intel_crtc *intel_crtc) 3079 { 3080 struct drm_device *dev = intel_crtc->base.dev; 3081 struct drm_i915_private *dev_priv = dev->dev_private; 3082 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; 3083 3084 /* 3085 * If ddb allocation of pipes changed, it may require recalculation of 3086 * watermarks 3087 */ 3088 if (memcmp(new_ddb->pipe, cur_ddb->pipe, sizeof(new_ddb->pipe))) 3089 return true; 3090 3091 return false; 3092 } 3093 3094 static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, 3095 struct intel_crtc_state *cstate, 3096 struct intel_plane *intel_plane, 3097 uint16_t ddb_allocation, 3098 int level, 3099 uint16_t *out_blocks, /* out */ 3100 uint8_t *out_lines /* out */) 3101 { 3102 struct drm_plane *plane = &intel_plane->base; 3103 struct drm_framebuffer *fb = plane->state->fb; 3104 uint32_t latency = dev_priv->wm.skl_latency[level]; 3105 uint32_t method1, method2; 3106 uint32_t plane_bytes_per_line, plane_blocks_per_line; 3107 uint32_t res_blocks, res_lines; 3108 uint32_t selected_result; 3109 uint8_t bytes_per_pixel; 3110 3111 if (latency == 0 || !cstate->base.active || !fb) 3112 return false; 3113 3114 bytes_per_pixel = drm_format_plane_cpp(fb->pixel_format, 0); 3115 method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate), 3116 bytes_per_pixel, 3117 latency); 3118 method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate), 3119 cstate->base.adjusted_mode.crtc_htotal, 3120 cstate->pipe_src_w, 3121 bytes_per_pixel, 3122 fb->modifier[0], 3123 latency); 3124 3125 plane_bytes_per_line = cstate->pipe_src_w * bytes_per_pixel; 3126 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 3127 3128 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || 3129 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) { 3130 uint32_t min_scanlines = 4; 3131 uint32_t y_tile_minimum; 3132 if (intel_rotation_90_or_270(plane->state->rotation)) { 3133 int bpp = (fb->pixel_format == DRM_FORMAT_NV12) ? 3134 drm_format_plane_cpp(fb->pixel_format, 1) : 3135 drm_format_plane_cpp(fb->pixel_format, 0); 3136 3137 switch (bpp) { 3138 case 1: 3139 min_scanlines = 16; 3140 break; 3141 case 2: 3142 min_scanlines = 8; 3143 break; 3144 case 8: 3145 WARN(1, "Unsupported pixel depth for rotation"); 3146 } 3147 } 3148 y_tile_minimum = plane_blocks_per_line * min_scanlines; 3149 selected_result = max(method2, y_tile_minimum); 3150 } else { 3151 if ((ddb_allocation / plane_blocks_per_line) >= 1) 3152 selected_result = min(method1, method2); 3153 else 3154 selected_result = method1; 3155 } 3156 3157 res_blocks = selected_result + 1; 3158 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line); 3159 3160 if (level >= 1 && level <= 7) { 3161 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || 3162 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) 3163 res_lines += 4; 3164 else 3165 res_blocks++; 3166 } 3167 3168 if (res_blocks >= ddb_allocation || res_lines > 31) 3169 return false; 3170 3171 *out_blocks = res_blocks; 3172 *out_lines = res_lines; 3173 3174 return true; 3175 } 3176 3177 static void skl_compute_wm_level(const struct drm_i915_private *dev_priv, 3178 struct skl_ddb_allocation *ddb, 3179 struct intel_crtc_state *cstate, 3180 int level, 3181 struct skl_wm_level *result) 3182 { 3183 struct drm_device *dev = dev_priv->dev; 3184 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 3185 struct intel_plane *intel_plane; 3186 uint16_t ddb_blocks; 3187 enum i915_pipe pipe = intel_crtc->pipe; 3188 3189 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3190 int i = skl_wm_plane_id(intel_plane); 3191 3192 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); 3193 3194 result->plane_en[i] = skl_compute_plane_wm(dev_priv, 3195 cstate, 3196 intel_plane, 3197 ddb_blocks, 3198 level, 3199 &result->plane_res_b[i], 3200 &result->plane_res_l[i]); 3201 } 3202 } 3203 3204 static uint32_t 3205 skl_compute_linetime_wm(struct intel_crtc_state *cstate) 3206 { 3207 if (!cstate->base.active) 3208 return 0; 3209 3210 if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0)) 3211 return 0; 3212 3213 return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000, 3214 skl_pipe_pixel_rate(cstate)); 3215 } 3216 3217 static void skl_compute_transition_wm(struct intel_crtc_state *cstate, 3218 struct skl_wm_level *trans_wm /* out */) 3219 { 3220 struct drm_crtc *crtc = cstate->base.crtc; 3221 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3222 struct intel_plane *intel_plane; 3223 3224 if (!cstate->base.active) 3225 return; 3226 3227 /* Until we know more, just disable transition WMs */ 3228 for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) { 3229 int i = skl_wm_plane_id(intel_plane); 3230 3231 trans_wm->plane_en[i] = false; 3232 } 3233 } 3234 3235 static void skl_compute_pipe_wm(struct intel_crtc_state *cstate, 3236 struct skl_ddb_allocation *ddb, 3237 struct skl_pipe_wm *pipe_wm) 3238 { 3239 struct drm_device *dev = cstate->base.crtc->dev; 3240 const struct drm_i915_private *dev_priv = dev->dev_private; 3241 int level, max_level = ilk_wm_max_level(dev); 3242 3243 for (level = 0; level <= max_level; level++) { 3244 skl_compute_wm_level(dev_priv, ddb, cstate, 3245 level, &pipe_wm->wm[level]); 3246 } 3247 pipe_wm->linetime = skl_compute_linetime_wm(cstate); 3248 3249 skl_compute_transition_wm(cstate, &pipe_wm->trans_wm); 3250 } 3251 3252 static void skl_compute_wm_results(struct drm_device *dev, 3253 struct skl_pipe_wm *p_wm, 3254 struct skl_wm_values *r, 3255 struct intel_crtc *intel_crtc) 3256 { 3257 int level, max_level = ilk_wm_max_level(dev); 3258 enum i915_pipe pipe = intel_crtc->pipe; 3259 uint32_t temp; 3260 int i; 3261 3262 for (level = 0; level <= max_level; level++) { 3263 for (i = 0; i < intel_num_planes(intel_crtc); i++) { 3264 temp = 0; 3265 3266 temp |= p_wm->wm[level].plane_res_l[i] << 3267 PLANE_WM_LINES_SHIFT; 3268 temp |= p_wm->wm[level].plane_res_b[i]; 3269 if (p_wm->wm[level].plane_en[i]) 3270 temp |= PLANE_WM_EN; 3271 3272 r->plane[pipe][i][level] = temp; 3273 } 3274 3275 temp = 0; 3276 3277 temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT; 3278 temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR]; 3279 3280 if (p_wm->wm[level].plane_en[PLANE_CURSOR]) 3281 temp |= PLANE_WM_EN; 3282 3283 r->plane[pipe][PLANE_CURSOR][level] = temp; 3284 3285 } 3286 3287 /* transition WMs */ 3288 for (i = 0; i < intel_num_planes(intel_crtc); i++) { 3289 temp = 0; 3290 temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT; 3291 temp |= p_wm->trans_wm.plane_res_b[i]; 3292 if (p_wm->trans_wm.plane_en[i]) 3293 temp |= PLANE_WM_EN; 3294 3295 r->plane_trans[pipe][i] = temp; 3296 } 3297 3298 temp = 0; 3299 temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT; 3300 temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR]; 3301 if (p_wm->trans_wm.plane_en[PLANE_CURSOR]) 3302 temp |= PLANE_WM_EN; 3303 3304 r->plane_trans[pipe][PLANE_CURSOR] = temp; 3305 3306 r->wm_linetime[pipe] = p_wm->linetime; 3307 } 3308 3309 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, 3310 i915_reg_t reg, 3311 const struct skl_ddb_entry *entry) 3312 { 3313 if (entry->end) 3314 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start); 3315 else 3316 I915_WRITE(reg, 0); 3317 } 3318 3319 static void skl_write_wm_values(struct drm_i915_private *dev_priv, 3320 const struct skl_wm_values *new) 3321 { 3322 struct drm_device *dev = dev_priv->dev; 3323 struct intel_crtc *crtc; 3324 3325 for_each_intel_crtc(dev, crtc) { 3326 int i, level, max_level = ilk_wm_max_level(dev); 3327 enum i915_pipe pipe = crtc->pipe; 3328 3329 if (!new->dirty[pipe]) 3330 continue; 3331 3332 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]); 3333 3334 for (level = 0; level <= max_level; level++) { 3335 for (i = 0; i < intel_num_planes(crtc); i++) 3336 I915_WRITE(PLANE_WM(pipe, i, level), 3337 new->plane[pipe][i][level]); 3338 I915_WRITE(CUR_WM(pipe, level), 3339 new->plane[pipe][PLANE_CURSOR][level]); 3340 } 3341 for (i = 0; i < intel_num_planes(crtc); i++) 3342 I915_WRITE(PLANE_WM_TRANS(pipe, i), 3343 new->plane_trans[pipe][i]); 3344 I915_WRITE(CUR_WM_TRANS(pipe), 3345 new->plane_trans[pipe][PLANE_CURSOR]); 3346 3347 for (i = 0; i < intel_num_planes(crtc); i++) { 3348 skl_ddb_entry_write(dev_priv, 3349 PLANE_BUF_CFG(pipe, i), 3350 &new->ddb.plane[pipe][i]); 3351 skl_ddb_entry_write(dev_priv, 3352 PLANE_NV12_BUF_CFG(pipe, i), 3353 &new->ddb.y_plane[pipe][i]); 3354 } 3355 3356 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), 3357 &new->ddb.plane[pipe][PLANE_CURSOR]); 3358 } 3359 } 3360 3361 /* 3362 * When setting up a new DDB allocation arrangement, we need to correctly 3363 * sequence the times at which the new allocations for the pipes are taken into 3364 * account or we'll have pipes fetching from space previously allocated to 3365 * another pipe. 3366 * 3367 * Roughly the sequence looks like: 3368 * 1. re-allocate the pipe(s) with the allocation being reduced and not 3369 * overlapping with a previous light-up pipe (another way to put it is: 3370 * pipes with their new allocation strickly included into their old ones). 3371 * 2. re-allocate the other pipes that get their allocation reduced 3372 * 3. allocate the pipes having their allocation increased 3373 * 3374 * Steps 1. and 2. are here to take care of the following case: 3375 * - Initially DDB looks like this: 3376 * | B | C | 3377 * - enable pipe A. 3378 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C 3379 * allocation 3380 * | A | B | C | 3381 * 3382 * We need to sequence the re-allocation: C, B, A (and not B, C, A). 3383 */ 3384 3385 static void 3386 skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int pass) 3387 { 3388 int plane; 3389 3390 DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass); 3391 3392 for_each_plane(dev_priv, pipe, plane) { 3393 I915_WRITE(PLANE_SURF(pipe, plane), 3394 I915_READ(PLANE_SURF(pipe, plane))); 3395 } 3396 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe))); 3397 } 3398 3399 static bool 3400 skl_ddb_allocation_included(const struct skl_ddb_allocation *old, 3401 const struct skl_ddb_allocation *new, 3402 enum i915_pipe pipe) 3403 { 3404 uint16_t old_size, new_size; 3405 3406 old_size = skl_ddb_entry_size(&old->pipe[pipe]); 3407 new_size = skl_ddb_entry_size(&new->pipe[pipe]); 3408 3409 return old_size != new_size && 3410 new->pipe[pipe].start >= old->pipe[pipe].start && 3411 new->pipe[pipe].end <= old->pipe[pipe].end; 3412 } 3413 3414 static void skl_flush_wm_values(struct drm_i915_private *dev_priv, 3415 struct skl_wm_values *new_values) 3416 { 3417 struct drm_device *dev = dev_priv->dev; 3418 struct skl_ddb_allocation *cur_ddb, *new_ddb; 3419 bool reallocated[I915_MAX_PIPES] = {}; 3420 struct intel_crtc *crtc; 3421 enum i915_pipe pipe; 3422 3423 new_ddb = &new_values->ddb; 3424 cur_ddb = &dev_priv->wm.skl_hw.ddb; 3425 3426 /* 3427 * First pass: flush the pipes with the new allocation contained into 3428 * the old space. 3429 * 3430 * We'll wait for the vblank on those pipes to ensure we can safely 3431 * re-allocate the freed space without this pipe fetching from it. 3432 */ 3433 for_each_intel_crtc(dev, crtc) { 3434 if (!crtc->active) 3435 continue; 3436 3437 pipe = crtc->pipe; 3438 3439 if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe)) 3440 continue; 3441 3442 skl_wm_flush_pipe(dev_priv, pipe, 1); 3443 intel_wait_for_vblank(dev, pipe); 3444 3445 reallocated[pipe] = true; 3446 } 3447 3448 3449 /* 3450 * Second pass: flush the pipes that are having their allocation 3451 * reduced, but overlapping with a previous allocation. 3452 * 3453 * Here as well we need to wait for the vblank to make sure the freed 3454 * space is not used anymore. 3455 */ 3456 for_each_intel_crtc(dev, crtc) { 3457 if (!crtc->active) 3458 continue; 3459 3460 pipe = crtc->pipe; 3461 3462 if (reallocated[pipe]) 3463 continue; 3464 3465 if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) < 3466 skl_ddb_entry_size(&cur_ddb->pipe[pipe])) { 3467 skl_wm_flush_pipe(dev_priv, pipe, 2); 3468 intel_wait_for_vblank(dev, pipe); 3469 reallocated[pipe] = true; 3470 } 3471 } 3472 3473 /* 3474 * Third pass: flush the pipes that got more space allocated. 3475 * 3476 * We don't need to actively wait for the update here, next vblank 3477 * will just get more DDB space with the correct WM values. 3478 */ 3479 for_each_intel_crtc(dev, crtc) { 3480 if (!crtc->active) 3481 continue; 3482 3483 pipe = crtc->pipe; 3484 3485 /* 3486 * At this point, only the pipes more space than before are 3487 * left to re-allocate. 3488 */ 3489 if (reallocated[pipe]) 3490 continue; 3491 3492 skl_wm_flush_pipe(dev_priv, pipe, 3); 3493 } 3494 } 3495 3496 static bool skl_update_pipe_wm(struct drm_crtc *crtc, 3497 struct skl_ddb_allocation *ddb, /* out */ 3498 struct skl_pipe_wm *pipe_wm /* out */) 3499 { 3500 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3501 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 3502 3503 skl_allocate_pipe_ddb(cstate, ddb); 3504 skl_compute_pipe_wm(cstate, ddb, pipe_wm); 3505 3506 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm))) 3507 return false; 3508 3509 intel_crtc->wm.active.skl = *pipe_wm; 3510 3511 return true; 3512 } 3513 3514 static void skl_update_other_pipe_wm(struct drm_device *dev, 3515 struct drm_crtc *crtc, 3516 struct skl_wm_values *r) 3517 { 3518 struct intel_crtc *intel_crtc; 3519 struct intel_crtc *this_crtc = to_intel_crtc(crtc); 3520 3521 /* 3522 * If the WM update hasn't changed the allocation for this_crtc (the 3523 * crtc we are currently computing the new WM values for), other 3524 * enabled crtcs will keep the same allocation and we don't need to 3525 * recompute anything for them. 3526 */ 3527 if (!skl_ddb_allocation_changed(&r->ddb, this_crtc)) 3528 return; 3529 3530 /* 3531 * Otherwise, because of this_crtc being freshly enabled/disabled, the 3532 * other active pipes need new DDB allocation and WM values. 3533 */ 3534 for_each_intel_crtc(dev, intel_crtc) { 3535 struct skl_pipe_wm pipe_wm = {}; 3536 bool wm_changed; 3537 3538 if (this_crtc->pipe == intel_crtc->pipe) 3539 continue; 3540 3541 if (!intel_crtc->active) 3542 continue; 3543 3544 wm_changed = skl_update_pipe_wm(&intel_crtc->base, 3545 &r->ddb, &pipe_wm); 3546 3547 /* 3548 * If we end up re-computing the other pipe WM values, it's 3549 * because it was really needed, so we expect the WM values to 3550 * be different. 3551 */ 3552 WARN_ON(!wm_changed); 3553 3554 skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc); 3555 r->dirty[intel_crtc->pipe] = true; 3556 } 3557 } 3558 3559 static void skl_clear_wm(struct skl_wm_values *watermarks, enum i915_pipe pipe) 3560 { 3561 watermarks->wm_linetime[pipe] = 0; 3562 memset(watermarks->plane[pipe], 0, 3563 sizeof(uint32_t) * 8 * I915_MAX_PLANES); 3564 memset(watermarks->plane_trans[pipe], 3565 0, sizeof(uint32_t) * I915_MAX_PLANES); 3566 watermarks->plane_trans[pipe][PLANE_CURSOR] = 0; 3567 3568 /* Clear ddb entries for pipe */ 3569 memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry)); 3570 memset(&watermarks->ddb.plane[pipe], 0, 3571 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES); 3572 memset(&watermarks->ddb.y_plane[pipe], 0, 3573 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES); 3574 memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0, 3575 sizeof(struct skl_ddb_entry)); 3576 3577 } 3578 3579 static void skl_update_wm(struct drm_crtc *crtc) 3580 { 3581 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3582 struct drm_device *dev = crtc->dev; 3583 struct drm_i915_private *dev_priv = dev->dev_private; 3584 struct skl_wm_values *results = &dev_priv->wm.skl_results; 3585 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 3586 struct skl_pipe_wm *pipe_wm = &cstate->wm.optimal.skl; 3587 3588 3589 /* Clear all dirty flags */ 3590 memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES); 3591 3592 skl_clear_wm(results, intel_crtc->pipe); 3593 3594 if (!skl_update_pipe_wm(crtc, &results->ddb, pipe_wm)) 3595 return; 3596 3597 skl_compute_wm_results(dev, pipe_wm, results, intel_crtc); 3598 results->dirty[intel_crtc->pipe] = true; 3599 3600 skl_update_other_pipe_wm(dev, crtc, results); 3601 skl_write_wm_values(dev_priv, results); 3602 skl_flush_wm_values(dev_priv, results); 3603 3604 /* store the new configuration */ 3605 dev_priv->wm.skl_hw = *results; 3606 } 3607 3608 static void ilk_program_watermarks(struct drm_i915_private *dev_priv) 3609 { 3610 struct drm_device *dev = dev_priv->dev; 3611 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; 3612 struct ilk_wm_maximums max; 3613 struct intel_wm_config *config = &dev_priv->wm.config; 3614 struct ilk_wm_values results = {}; 3615 enum intel_ddb_partitioning partitioning; 3616 3617 ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_1_2, &max); 3618 ilk_wm_merge(dev, config, &max, &lp_wm_1_2); 3619 3620 /* 5/6 split only in single pipe config on IVB+ */ 3621 if (INTEL_INFO(dev)->gen >= 7 && 3622 config->num_pipes_active == 1 && config->sprites_enabled) { 3623 ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_5_6, &max); 3624 ilk_wm_merge(dev, config, &max, &lp_wm_5_6); 3625 3626 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); 3627 } else { 3628 best_lp_wm = &lp_wm_1_2; 3629 } 3630 3631 partitioning = (best_lp_wm == &lp_wm_1_2) ? 3632 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; 3633 3634 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results); 3635 3636 ilk_write_wm_values(dev_priv, &results); 3637 } 3638 3639 static void ilk_update_wm(struct drm_crtc *crtc) 3640 { 3641 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 3642 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3643 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 3644 3645 WARN_ON(cstate->base.active != intel_crtc->active); 3646 3647 /* 3648 * IVB workaround: must disable low power watermarks for at least 3649 * one frame before enabling scaling. LP watermarks can be re-enabled 3650 * when scaling is disabled. 3651 * 3652 * WaCxSRDisabledForSpriteScaling:ivb 3653 */ 3654 if (cstate->disable_lp_wm) { 3655 ilk_disable_lp_wm(crtc->dev); 3656 intel_wait_for_vblank(crtc->dev, intel_crtc->pipe); 3657 } 3658 3659 intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk; 3660 3661 ilk_program_watermarks(dev_priv); 3662 } 3663 3664 static void skl_pipe_wm_active_state(uint32_t val, 3665 struct skl_pipe_wm *active, 3666 bool is_transwm, 3667 bool is_cursor, 3668 int i, 3669 int level) 3670 { 3671 bool is_enabled = (val & PLANE_WM_EN) != 0; 3672 3673 if (!is_transwm) { 3674 if (!is_cursor) { 3675 active->wm[level].plane_en[i] = is_enabled; 3676 active->wm[level].plane_res_b[i] = 3677 val & PLANE_WM_BLOCKS_MASK; 3678 active->wm[level].plane_res_l[i] = 3679 (val >> PLANE_WM_LINES_SHIFT) & 3680 PLANE_WM_LINES_MASK; 3681 } else { 3682 active->wm[level].plane_en[PLANE_CURSOR] = is_enabled; 3683 active->wm[level].plane_res_b[PLANE_CURSOR] = 3684 val & PLANE_WM_BLOCKS_MASK; 3685 active->wm[level].plane_res_l[PLANE_CURSOR] = 3686 (val >> PLANE_WM_LINES_SHIFT) & 3687 PLANE_WM_LINES_MASK; 3688 } 3689 } else { 3690 if (!is_cursor) { 3691 active->trans_wm.plane_en[i] = is_enabled; 3692 active->trans_wm.plane_res_b[i] = 3693 val & PLANE_WM_BLOCKS_MASK; 3694 active->trans_wm.plane_res_l[i] = 3695 (val >> PLANE_WM_LINES_SHIFT) & 3696 PLANE_WM_LINES_MASK; 3697 } else { 3698 active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled; 3699 active->trans_wm.plane_res_b[PLANE_CURSOR] = 3700 val & PLANE_WM_BLOCKS_MASK; 3701 active->trans_wm.plane_res_l[PLANE_CURSOR] = 3702 (val >> PLANE_WM_LINES_SHIFT) & 3703 PLANE_WM_LINES_MASK; 3704 } 3705 } 3706 } 3707 3708 static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc) 3709 { 3710 struct drm_device *dev = crtc->dev; 3711 struct drm_i915_private *dev_priv = dev->dev_private; 3712 struct skl_wm_values *hw = &dev_priv->wm.skl_hw; 3713 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3714 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 3715 struct skl_pipe_wm *active = &cstate->wm.optimal.skl; 3716 enum i915_pipe pipe = intel_crtc->pipe; 3717 int level, i, max_level; 3718 uint32_t temp; 3719 3720 max_level = ilk_wm_max_level(dev); 3721 3722 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); 3723 3724 for (level = 0; level <= max_level; level++) { 3725 for (i = 0; i < intel_num_planes(intel_crtc); i++) 3726 hw->plane[pipe][i][level] = 3727 I915_READ(PLANE_WM(pipe, i, level)); 3728 hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level)); 3729 } 3730 3731 for (i = 0; i < intel_num_planes(intel_crtc); i++) 3732 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i)); 3733 hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe)); 3734 3735 if (!intel_crtc->active) 3736 return; 3737 3738 hw->dirty[pipe] = true; 3739 3740 active->linetime = hw->wm_linetime[pipe]; 3741 3742 for (level = 0; level <= max_level; level++) { 3743 for (i = 0; i < intel_num_planes(intel_crtc); i++) { 3744 temp = hw->plane[pipe][i][level]; 3745 skl_pipe_wm_active_state(temp, active, false, 3746 false, i, level); 3747 } 3748 temp = hw->plane[pipe][PLANE_CURSOR][level]; 3749 skl_pipe_wm_active_state(temp, active, false, true, i, level); 3750 } 3751 3752 for (i = 0; i < intel_num_planes(intel_crtc); i++) { 3753 temp = hw->plane_trans[pipe][i]; 3754 skl_pipe_wm_active_state(temp, active, true, false, i, 0); 3755 } 3756 3757 temp = hw->plane_trans[pipe][PLANE_CURSOR]; 3758 skl_pipe_wm_active_state(temp, active, true, true, i, 0); 3759 3760 intel_crtc->wm.active.skl = *active; 3761 } 3762 3763 void skl_wm_get_hw_state(struct drm_device *dev) 3764 { 3765 struct drm_i915_private *dev_priv = dev->dev_private; 3766 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb; 3767 struct drm_crtc *crtc; 3768 3769 skl_ddb_get_hw_state(dev_priv, ddb); 3770 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 3771 skl_pipe_wm_get_hw_state(crtc); 3772 } 3773 3774 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) 3775 { 3776 struct drm_device *dev = crtc->dev; 3777 struct drm_i915_private *dev_priv = dev->dev_private; 3778 struct ilk_wm_values *hw = &dev_priv->wm.hw; 3779 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3780 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 3781 struct intel_pipe_wm *active = &cstate->wm.optimal.ilk; 3782 enum i915_pipe pipe = intel_crtc->pipe; 3783 static const i915_reg_t wm0_pipe_reg[] = { 3784 [PIPE_A] = WM0_PIPEA_ILK, 3785 [PIPE_B] = WM0_PIPEB_ILK, 3786 [PIPE_C] = WM0_PIPEC_IVB, 3787 }; 3788 3789 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]); 3790 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 3791 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); 3792 3793 active->pipe_enabled = intel_crtc->active; 3794 3795 if (active->pipe_enabled) { 3796 u32 tmp = hw->wm_pipe[pipe]; 3797 3798 /* 3799 * For active pipes LP0 watermark is marked as 3800 * enabled, and LP1+ watermaks as disabled since 3801 * we can't really reverse compute them in case 3802 * multiple pipes are active. 3803 */ 3804 active->wm[0].enable = true; 3805 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT; 3806 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT; 3807 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK; 3808 active->linetime = hw->wm_linetime[pipe]; 3809 } else { 3810 int level, max_level = ilk_wm_max_level(dev); 3811 3812 /* 3813 * For inactive pipes, all watermark levels 3814 * should be marked as enabled but zeroed, 3815 * which is what we'd compute them to. 3816 */ 3817 for (level = 0; level <= max_level; level++) 3818 active->wm[level].enable = true; 3819 } 3820 3821 intel_crtc->wm.active.ilk = *active; 3822 } 3823 3824 #define _FW_WM(value, plane) \ 3825 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT) 3826 #define _FW_WM_VLV(value, plane) \ 3827 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT) 3828 3829 static void vlv_read_wm_values(struct drm_i915_private *dev_priv, 3830 struct vlv_wm_values *wm) 3831 { 3832 enum i915_pipe pipe; 3833 uint32_t tmp; 3834 3835 for_each_pipe(dev_priv, pipe) { 3836 tmp = I915_READ(VLV_DDL(pipe)); 3837 3838 wm->ddl[pipe].primary = 3839 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 3840 wm->ddl[pipe].cursor = 3841 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 3842 wm->ddl[pipe].sprite[0] = 3843 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 3844 wm->ddl[pipe].sprite[1] = 3845 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 3846 } 3847 3848 tmp = I915_READ(DSPFW1); 3849 wm->sr.plane = _FW_WM(tmp, SR); 3850 wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB); 3851 wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB); 3852 wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA); 3853 3854 tmp = I915_READ(DSPFW2); 3855 wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB); 3856 wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA); 3857 wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA); 3858 3859 tmp = I915_READ(DSPFW3); 3860 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); 3861 3862 if (IS_CHERRYVIEW(dev_priv)) { 3863 tmp = I915_READ(DSPFW7_CHV); 3864 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED); 3865 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC); 3866 3867 tmp = I915_READ(DSPFW8_CHV); 3868 wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF); 3869 wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE); 3870 3871 tmp = I915_READ(DSPFW9_CHV); 3872 wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC); 3873 wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC); 3874 3875 tmp = I915_READ(DSPHOWM); 3876 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; 3877 wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8; 3878 wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8; 3879 wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8; 3880 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8; 3881 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8; 3882 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8; 3883 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8; 3884 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8; 3885 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8; 3886 } else { 3887 tmp = I915_READ(DSPFW7); 3888 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED); 3889 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC); 3890 3891 tmp = I915_READ(DSPHOWM); 3892 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; 3893 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8; 3894 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8; 3895 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8; 3896 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8; 3897 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8; 3898 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8; 3899 } 3900 } 3901 3902 #undef _FW_WM 3903 #undef _FW_WM_VLV 3904 3905 void vlv_wm_get_hw_state(struct drm_device *dev) 3906 { 3907 struct drm_i915_private *dev_priv = to_i915(dev); 3908 struct vlv_wm_values *wm = &dev_priv->wm.vlv; 3909 struct intel_plane *plane; 3910 enum i915_pipe pipe; 3911 u32 val; 3912 3913 vlv_read_wm_values(dev_priv, wm); 3914 3915 for_each_intel_plane(dev, plane) { 3916 switch (plane->base.type) { 3917 int sprite; 3918 case DRM_PLANE_TYPE_CURSOR: 3919 plane->wm.fifo_size = 63; 3920 break; 3921 case DRM_PLANE_TYPE_PRIMARY: 3922 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0); 3923 break; 3924 case DRM_PLANE_TYPE_OVERLAY: 3925 sprite = plane->plane; 3926 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1); 3927 break; 3928 } 3929 } 3930 3931 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 3932 wm->level = VLV_WM_LEVEL_PM2; 3933 3934 if (IS_CHERRYVIEW(dev_priv)) { 3935 mutex_lock(&dev_priv->rps.hw_lock); 3936 3937 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 3938 if (val & DSP_MAXFIFO_PM5_ENABLE) 3939 wm->level = VLV_WM_LEVEL_PM5; 3940 3941 /* 3942 * If DDR DVFS is disabled in the BIOS, Punit 3943 * will never ack the request. So if that happens 3944 * assume we don't have to enable/disable DDR DVFS 3945 * dynamically. To test that just set the REQ_ACK 3946 * bit to poke the Punit, but don't change the 3947 * HIGH/LOW bits so that we don't actually change 3948 * the current state. 3949 */ 3950 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 3951 val |= FORCE_DDR_FREQ_REQ_ACK; 3952 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); 3953 3954 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & 3955 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { 3956 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, " 3957 "assuming DDR DVFS is disabled\n"); 3958 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5; 3959 } else { 3960 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 3961 if ((val & FORCE_DDR_HIGH_FREQ) == 0) 3962 wm->level = VLV_WM_LEVEL_DDR_DVFS; 3963 } 3964 3965 mutex_unlock(&dev_priv->rps.hw_lock); 3966 } 3967 3968 for_each_pipe(dev_priv, pipe) 3969 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", 3970 pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor, 3971 wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]); 3972 3973 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", 3974 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); 3975 } 3976 3977 void ilk_wm_get_hw_state(struct drm_device *dev) 3978 { 3979 struct drm_i915_private *dev_priv = dev->dev_private; 3980 struct ilk_wm_values *hw = &dev_priv->wm.hw; 3981 struct drm_crtc *crtc; 3982 3983 for_each_crtc(dev, crtc) 3984 ilk_pipe_wm_get_hw_state(crtc); 3985 3986 hw->wm_lp[0] = I915_READ(WM1_LP_ILK); 3987 hw->wm_lp[1] = I915_READ(WM2_LP_ILK); 3988 hw->wm_lp[2] = I915_READ(WM3_LP_ILK); 3989 3990 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK); 3991 if (INTEL_INFO(dev)->gen >= 7) { 3992 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); 3993 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); 3994 } 3995 3996 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 3997 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? 3998 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 3999 else if (IS_IVYBRIDGE(dev)) 4000 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ? 4001 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 4002 4003 hw->enable_fbc_wm = 4004 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); 4005 } 4006 4007 /** 4008 * intel_update_watermarks - update FIFO watermark values based on current modes 4009 * 4010 * Calculate watermark values for the various WM regs based on current mode 4011 * and plane configuration. 4012 * 4013 * There are several cases to deal with here: 4014 * - normal (i.e. non-self-refresh) 4015 * - self-refresh (SR) mode 4016 * - lines are large relative to FIFO size (buffer can hold up to 2) 4017 * - lines are small relative to FIFO size (buffer can hold more than 2 4018 * lines), so need to account for TLB latency 4019 * 4020 * The normal calculation is: 4021 * watermark = dotclock * bytes per pixel * latency 4022 * where latency is platform & configuration dependent (we assume pessimal 4023 * values here). 4024 * 4025 * The SR calculation is: 4026 * watermark = (trunc(latency/line time)+1) * surface width * 4027 * bytes per pixel 4028 * where 4029 * line time = htotal / dotclock 4030 * surface width = hdisplay for normal plane and 64 for cursor 4031 * and latency is assumed to be high, as above. 4032 * 4033 * The final value programmed to the register should always be rounded up, 4034 * and include an extra 2 entries to account for clock crossings. 4035 * 4036 * We don't use the sprite, so we can ignore that. And on Crestline we have 4037 * to set the non-SR watermarks to 8. 4038 */ 4039 void intel_update_watermarks(struct drm_crtc *crtc) 4040 { 4041 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 4042 4043 if (dev_priv->display.update_wm) 4044 dev_priv->display.update_wm(crtc); 4045 } 4046 4047 /** 4048 * Lock protecting IPS related data structures 4049 */ 4050 struct lock mchdev_lock; 4051 LOCK_SYSINIT(mchdev, &mchdev_lock, "mchdev", LK_CANRECURSE); 4052 4053 /* Global for IPS driver to get at the current i915 device. Protected by 4054 * mchdev_lock. */ 4055 static struct drm_i915_private *i915_mch_dev; 4056 4057 bool ironlake_set_drps(struct drm_device *dev, u8 val) 4058 { 4059 struct drm_i915_private *dev_priv = dev->dev_private; 4060 u16 rgvswctl; 4061 4062 assert_spin_locked(&mchdev_lock); 4063 4064 rgvswctl = I915_READ16(MEMSWCTL); 4065 if (rgvswctl & MEMCTL_CMD_STS) { 4066 DRM_DEBUG("gpu busy, RCS change rejected\n"); 4067 return false; /* still busy with another command */ 4068 } 4069 4070 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 4071 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; 4072 I915_WRITE16(MEMSWCTL, rgvswctl); 4073 POSTING_READ16(MEMSWCTL); 4074 4075 rgvswctl |= MEMCTL_CMD_STS; 4076 I915_WRITE16(MEMSWCTL, rgvswctl); 4077 4078 return true; 4079 } 4080 4081 static void ironlake_enable_drps(struct drm_device *dev) 4082 { 4083 struct drm_i915_private *dev_priv = dev->dev_private; 4084 u32 rgvmodectl = I915_READ(MEMMODECTL); 4085 u8 fmax, fmin, fstart, vstart; 4086 4087 spin_lock_irq(&mchdev_lock); 4088 4089 /* Enable temp reporting */ 4090 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); 4091 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); 4092 4093 /* 100ms RC evaluation intervals */ 4094 I915_WRITE(RCUPEI, 100000); 4095 I915_WRITE(RCDNEI, 100000); 4096 4097 /* Set max/min thresholds to 90ms and 80ms respectively */ 4098 I915_WRITE(RCBMAXAVG, 90000); 4099 I915_WRITE(RCBMINAVG, 80000); 4100 4101 I915_WRITE(MEMIHYST, 1); 4102 4103 /* Set up min, max, and cur for interrupt handling */ 4104 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; 4105 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 4106 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 4107 MEMMODE_FSTART_SHIFT; 4108 4109 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >> 4110 PXVFREQ_PX_SHIFT; 4111 4112 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */ 4113 dev_priv->ips.fstart = fstart; 4114 4115 dev_priv->ips.max_delay = fstart; 4116 dev_priv->ips.min_delay = fmin; 4117 dev_priv->ips.cur_delay = fstart; 4118 4119 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", 4120 fmax, fmin, fstart); 4121 4122 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 4123 4124 /* 4125 * Interrupts will be enabled in ironlake_irq_postinstall 4126 */ 4127 4128 I915_WRITE(VIDSTART, vstart); 4129 POSTING_READ(VIDSTART); 4130 4131 rgvmodectl |= MEMMODE_SWMODE_EN; 4132 I915_WRITE(MEMMODECTL, rgvmodectl); 4133 4134 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) 4135 DRM_ERROR("stuck trying to change perf mode\n"); 4136 mdelay(1); 4137 4138 ironlake_set_drps(dev, fstart); 4139 4140 dev_priv->ips.last_count1 = I915_READ(DMIEC) + 4141 I915_READ(DDREC) + I915_READ(CSIEC); 4142 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); 4143 dev_priv->ips.last_count2 = I915_READ(GFXEC); 4144 dev_priv->ips.last_time2 = ktime_get_raw_ns(); 4145 4146 spin_unlock_irq(&mchdev_lock); 4147 } 4148 4149 static void ironlake_disable_drps(struct drm_device *dev) 4150 { 4151 struct drm_i915_private *dev_priv = dev->dev_private; 4152 u16 rgvswctl; 4153 4154 spin_lock_irq(&mchdev_lock); 4155 4156 rgvswctl = I915_READ16(MEMSWCTL); 4157 4158 /* Ack interrupts, disable EFC interrupt */ 4159 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); 4160 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); 4161 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); 4162 I915_WRITE(DEIIR, DE_PCU_EVENT); 4163 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); 4164 4165 /* Go back to the starting frequency */ 4166 ironlake_set_drps(dev, dev_priv->ips.fstart); 4167 mdelay(1); 4168 rgvswctl |= MEMCTL_CMD_STS; 4169 I915_WRITE(MEMSWCTL, rgvswctl); 4170 mdelay(1); 4171 4172 spin_unlock_irq(&mchdev_lock); 4173 } 4174 4175 /* There's a funny hw issue where the hw returns all 0 when reading from 4176 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value 4177 * ourselves, instead of doing a rmw cycle (which might result in us clearing 4178 * all limits and the gpu stuck at whatever frequency it is at atm). 4179 */ 4180 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val) 4181 { 4182 u32 limits; 4183 4184 /* Only set the down limit when we've reached the lowest level to avoid 4185 * getting more interrupts, otherwise leave this clear. This prevents a 4186 * race in the hw when coming out of rc6: There's a tiny window where 4187 * the hw runs at the minimal clock before selecting the desired 4188 * frequency, if the down threshold expires in that window we will not 4189 * receive a down interrupt. */ 4190 if (IS_GEN9(dev_priv->dev)) { 4191 limits = (dev_priv->rps.max_freq_softlimit) << 23; 4192 if (val <= dev_priv->rps.min_freq_softlimit) 4193 limits |= (dev_priv->rps.min_freq_softlimit) << 14; 4194 } else { 4195 limits = dev_priv->rps.max_freq_softlimit << 24; 4196 if (val <= dev_priv->rps.min_freq_softlimit) 4197 limits |= dev_priv->rps.min_freq_softlimit << 16; 4198 } 4199 4200 return limits; 4201 } 4202 4203 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) 4204 { 4205 int new_power; 4206 u32 threshold_up = 0, threshold_down = 0; /* in % */ 4207 u32 ei_up = 0, ei_down = 0; 4208 4209 new_power = dev_priv->rps.power; 4210 switch (dev_priv->rps.power) { 4211 case LOW_POWER: 4212 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq) 4213 new_power = BETWEEN; 4214 break; 4215 4216 case BETWEEN: 4217 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq) 4218 new_power = LOW_POWER; 4219 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq) 4220 new_power = HIGH_POWER; 4221 break; 4222 4223 case HIGH_POWER: 4224 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq) 4225 new_power = BETWEEN; 4226 break; 4227 } 4228 /* Max/min bins are special */ 4229 if (val <= dev_priv->rps.min_freq_softlimit) 4230 new_power = LOW_POWER; 4231 if (val >= dev_priv->rps.max_freq_softlimit) 4232 new_power = HIGH_POWER; 4233 if (new_power == dev_priv->rps.power) 4234 return; 4235 4236 /* Note the units here are not exactly 1us, but 1280ns. */ 4237 switch (new_power) { 4238 case LOW_POWER: 4239 /* Upclock if more than 95% busy over 16ms */ 4240 ei_up = 16000; 4241 threshold_up = 95; 4242 4243 /* Downclock if less than 85% busy over 32ms */ 4244 ei_down = 32000; 4245 threshold_down = 85; 4246 break; 4247 4248 case BETWEEN: 4249 /* Upclock if more than 90% busy over 13ms */ 4250 ei_up = 13000; 4251 threshold_up = 90; 4252 4253 /* Downclock if less than 75% busy over 32ms */ 4254 ei_down = 32000; 4255 threshold_down = 75; 4256 break; 4257 4258 case HIGH_POWER: 4259 /* Upclock if more than 85% busy over 10ms */ 4260 ei_up = 10000; 4261 threshold_up = 85; 4262 4263 /* Downclock if less than 60% busy over 32ms */ 4264 ei_down = 32000; 4265 threshold_down = 60; 4266 break; 4267 } 4268 4269 I915_WRITE(GEN6_RP_UP_EI, 4270 GT_INTERVAL_FROM_US(dev_priv, ei_up)); 4271 I915_WRITE(GEN6_RP_UP_THRESHOLD, 4272 GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100))); 4273 4274 I915_WRITE(GEN6_RP_DOWN_EI, 4275 GT_INTERVAL_FROM_US(dev_priv, ei_down)); 4276 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 4277 GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100))); 4278 4279 I915_WRITE(GEN6_RP_CONTROL, 4280 GEN6_RP_MEDIA_TURBO | 4281 GEN6_RP_MEDIA_HW_NORMAL_MODE | 4282 GEN6_RP_MEDIA_IS_GFX | 4283 GEN6_RP_ENABLE | 4284 GEN6_RP_UP_BUSY_AVG | 4285 GEN6_RP_DOWN_IDLE_AVG); 4286 4287 dev_priv->rps.power = new_power; 4288 dev_priv->rps.up_threshold = threshold_up; 4289 dev_priv->rps.down_threshold = threshold_down; 4290 dev_priv->rps.last_adj = 0; 4291 } 4292 4293 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) 4294 { 4295 u32 mask = 0; 4296 4297 if (val > dev_priv->rps.min_freq_softlimit) 4298 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; 4299 if (val < dev_priv->rps.max_freq_softlimit) 4300 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; 4301 4302 mask &= dev_priv->pm_rps_events; 4303 4304 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask); 4305 } 4306 4307 /* gen6_set_rps is called to update the frequency request, but should also be 4308 * called when the range (min_delay and max_delay) is modified so that we can 4309 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ 4310 static void gen6_set_rps(struct drm_device *dev, u8 val) 4311 { 4312 struct drm_i915_private *dev_priv = dev->dev_private; 4313 4314 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ 4315 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 4316 return; 4317 4318 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4319 WARN_ON(val > dev_priv->rps.max_freq); 4320 WARN_ON(val < dev_priv->rps.min_freq); 4321 4322 /* min/max delay may still have been modified so be sure to 4323 * write the limits value. 4324 */ 4325 if (val != dev_priv->rps.cur_freq) { 4326 gen6_set_rps_thresholds(dev_priv, val); 4327 4328 if (IS_GEN9(dev)) 4329 I915_WRITE(GEN6_RPNSWREQ, 4330 GEN9_FREQUENCY(val)); 4331 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 4332 I915_WRITE(GEN6_RPNSWREQ, 4333 HSW_FREQUENCY(val)); 4334 else 4335 I915_WRITE(GEN6_RPNSWREQ, 4336 GEN6_FREQUENCY(val) | 4337 GEN6_OFFSET(0) | 4338 GEN6_AGGRESSIVE_TURBO); 4339 } 4340 4341 /* Make sure we continue to get interrupts 4342 * until we hit the minimum or maximum frequencies. 4343 */ 4344 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val)); 4345 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 4346 4347 POSTING_READ(GEN6_RPNSWREQ); 4348 4349 dev_priv->rps.cur_freq = val; 4350 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); 4351 } 4352 4353 static void valleyview_set_rps(struct drm_device *dev, u8 val) 4354 { 4355 struct drm_i915_private *dev_priv = dev->dev_private; 4356 4357 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4358 WARN_ON(val > dev_priv->rps.max_freq); 4359 WARN_ON(val < dev_priv->rps.min_freq); 4360 4361 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1), 4362 "Odd GPU freq value\n")) 4363 val &= ~1; 4364 4365 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 4366 4367 if (val != dev_priv->rps.cur_freq) { 4368 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); 4369 if (!IS_CHERRYVIEW(dev_priv)) 4370 gen6_set_rps_thresholds(dev_priv, val); 4371 } 4372 4373 dev_priv->rps.cur_freq = val; 4374 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); 4375 } 4376 4377 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down 4378 * 4379 * * If Gfx is Idle, then 4380 * 1. Forcewake Media well. 4381 * 2. Request idle freq. 4382 * 3. Release Forcewake of Media well. 4383 */ 4384 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) 4385 { 4386 u32 val = dev_priv->rps.idle_freq; 4387 4388 if (dev_priv->rps.cur_freq <= val) 4389 return; 4390 4391 /* Wake up the media well, as that takes a lot less 4392 * power than the Render well. */ 4393 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA); 4394 valleyview_set_rps(dev_priv->dev, val); 4395 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA); 4396 } 4397 4398 void gen6_rps_busy(struct drm_i915_private *dev_priv) 4399 { 4400 mutex_lock(&dev_priv->rps.hw_lock); 4401 if (dev_priv->rps.enabled) { 4402 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) 4403 gen6_rps_reset_ei(dev_priv); 4404 I915_WRITE(GEN6_PMINTRMSK, 4405 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); 4406 } 4407 mutex_unlock(&dev_priv->rps.hw_lock); 4408 } 4409 4410 void gen6_rps_idle(struct drm_i915_private *dev_priv) 4411 { 4412 struct drm_device *dev = dev_priv->dev; 4413 4414 mutex_lock(&dev_priv->rps.hw_lock); 4415 if (dev_priv->rps.enabled) { 4416 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 4417 vlv_set_rps_idle(dev_priv); 4418 else 4419 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 4420 dev_priv->rps.last_adj = 0; 4421 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 4422 } 4423 mutex_unlock(&dev_priv->rps.hw_lock); 4424 4425 lockmgr(&dev_priv->rps.client_lock, LK_EXCLUSIVE); 4426 while (!list_empty(&dev_priv->rps.clients)) 4427 list_del_init(dev_priv->rps.clients.next); 4428 lockmgr(&dev_priv->rps.client_lock, LK_RELEASE); 4429 } 4430 4431 void gen6_rps_boost(struct drm_i915_private *dev_priv, 4432 struct intel_rps_client *rps, 4433 unsigned long submitted) 4434 { 4435 /* This is intentionally racy! We peek at the state here, then 4436 * validate inside the RPS worker. 4437 */ 4438 if (!(dev_priv->mm.busy && 4439 dev_priv->rps.enabled && 4440 dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)) 4441 return; 4442 4443 /* Force a RPS boost (and don't count it against the client) if 4444 * the GPU is severely congested. 4445 */ 4446 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES)) 4447 rps = NULL; 4448 4449 lockmgr(&dev_priv->rps.client_lock, LK_EXCLUSIVE); 4450 if (rps == NULL || list_empty(&rps->link)) { 4451 spin_lock_irq(&dev_priv->irq_lock); 4452 if (dev_priv->rps.interrupts_enabled) { 4453 dev_priv->rps.client_boost = true; 4454 queue_work(dev_priv->wq, &dev_priv->rps.work); 4455 } 4456 spin_unlock_irq(&dev_priv->irq_lock); 4457 4458 if (rps != NULL) { 4459 list_add(&rps->link, &dev_priv->rps.clients); 4460 rps->boosts++; 4461 } else 4462 dev_priv->rps.boosts++; 4463 } 4464 lockmgr(&dev_priv->rps.client_lock, LK_RELEASE); 4465 } 4466 4467 void intel_set_rps(struct drm_device *dev, u8 val) 4468 { 4469 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 4470 valleyview_set_rps(dev, val); 4471 else 4472 gen6_set_rps(dev, val); 4473 } 4474 4475 static void gen9_disable_rps(struct drm_device *dev) 4476 { 4477 struct drm_i915_private *dev_priv = dev->dev_private; 4478 4479 I915_WRITE(GEN6_RC_CONTROL, 0); 4480 I915_WRITE(GEN9_PG_ENABLE, 0); 4481 } 4482 4483 static void gen6_disable_rps(struct drm_device *dev) 4484 { 4485 struct drm_i915_private *dev_priv = dev->dev_private; 4486 4487 I915_WRITE(GEN6_RC_CONTROL, 0); 4488 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 4489 } 4490 4491 static void cherryview_disable_rps(struct drm_device *dev) 4492 { 4493 struct drm_i915_private *dev_priv = dev->dev_private; 4494 4495 I915_WRITE(GEN6_RC_CONTROL, 0); 4496 } 4497 4498 static void valleyview_disable_rps(struct drm_device *dev) 4499 { 4500 struct drm_i915_private *dev_priv = dev->dev_private; 4501 4502 /* we're doing forcewake before Disabling RC6, 4503 * This what the BIOS expects when going into suspend */ 4504 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4505 4506 I915_WRITE(GEN6_RC_CONTROL, 0); 4507 4508 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4509 } 4510 4511 static void intel_print_rc6_info(struct drm_device *dev, u32 mode) 4512 { 4513 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 4514 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1))) 4515 mode = GEN6_RC_CTL_RC6_ENABLE; 4516 else 4517 mode = 0; 4518 } 4519 if (HAS_RC6p(dev)) 4520 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n", 4521 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", 4522 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", 4523 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); 4524 4525 else 4526 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n", 4527 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off"); 4528 } 4529 4530 static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) 4531 { 4532 /* No RC6 before Ironlake and code is gone for ilk. */ 4533 if (INTEL_INFO(dev)->gen < 6) 4534 return 0; 4535 4536 /* Respect the kernel parameter if it is set */ 4537 if (enable_rc6 >= 0) { 4538 int mask; 4539 4540 if (HAS_RC6p(dev)) 4541 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE | 4542 INTEL_RC6pp_ENABLE; 4543 else 4544 mask = INTEL_RC6_ENABLE; 4545 4546 if ((enable_rc6 & mask) != enable_rc6) 4547 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n", 4548 enable_rc6 & mask, enable_rc6, mask); 4549 4550 return enable_rc6 & mask; 4551 } 4552 4553 if (IS_IVYBRIDGE(dev)) 4554 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 4555 4556 return INTEL_RC6_ENABLE; 4557 } 4558 4559 int intel_enable_rc6(const struct drm_device *dev) 4560 { 4561 return i915.enable_rc6; 4562 } 4563 4564 static void gen6_init_rps_frequencies(struct drm_device *dev) 4565 { 4566 struct drm_i915_private *dev_priv = dev->dev_private; 4567 uint32_t rp_state_cap; 4568 u32 ddcc_status = 0; 4569 int ret; 4570 4571 /* All of these values are in units of 50MHz */ 4572 dev_priv->rps.cur_freq = 0; 4573 /* static values from HW: RP0 > RP1 > RPn (min_freq) */ 4574 if (IS_BROXTON(dev)) { 4575 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 4576 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff; 4577 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; 4578 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff; 4579 } else { 4580 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 4581 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; 4582 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; 4583 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff; 4584 } 4585 4586 /* hw_max = RP0 until we check for overclocking */ 4587 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; 4588 4589 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; 4590 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || 4591 IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 4592 ret = sandybridge_pcode_read(dev_priv, 4593 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, 4594 &ddcc_status); 4595 if (0 == ret) 4596 dev_priv->rps.efficient_freq = 4597 clamp_t(u8, 4598 ((ddcc_status >> 8) & 0xff), 4599 dev_priv->rps.min_freq, 4600 dev_priv->rps.max_freq); 4601 } 4602 4603 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 4604 /* Store the frequency values in 16.66 MHZ units, which is 4605 the natural hardware unit for SKL */ 4606 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; 4607 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER; 4608 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER; 4609 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER; 4610 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER; 4611 } 4612 4613 dev_priv->rps.idle_freq = dev_priv->rps.min_freq; 4614 4615 /* Preserve min/max settings in case of re-init */ 4616 if (dev_priv->rps.max_freq_softlimit == 0) 4617 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 4618 4619 if (dev_priv->rps.min_freq_softlimit == 0) { 4620 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 4621 dev_priv->rps.min_freq_softlimit = 4622 max_t(int, dev_priv->rps.efficient_freq, 4623 intel_freq_opcode(dev_priv, 450)); 4624 else 4625 dev_priv->rps.min_freq_softlimit = 4626 dev_priv->rps.min_freq; 4627 } 4628 } 4629 4630 /* See the Gen9_GT_PM_Programming_Guide doc for the below */ 4631 static void gen9_enable_rps(struct drm_device *dev) 4632 { 4633 struct drm_i915_private *dev_priv = dev->dev_private; 4634 4635 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4636 4637 gen6_init_rps_frequencies(dev); 4638 4639 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ 4640 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 4641 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4642 return; 4643 } 4644 4645 /* Program defaults and thresholds for RPS*/ 4646 I915_WRITE(GEN6_RC_VIDEO_FREQ, 4647 GEN9_FREQUENCY(dev_priv->rps.rp1_freq)); 4648 4649 /* 1 second timeout*/ 4650 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 4651 GT_INTERVAL_FROM_US(dev_priv, 1000000)); 4652 4653 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa); 4654 4655 /* Leaning on the below call to gen6_set_rps to program/setup the 4656 * Up/Down EI & threshold registers, as well as the RP_CONTROL, 4657 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ 4658 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 4659 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 4660 4661 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4662 } 4663 4664 static void gen9_enable_rc6(struct drm_device *dev) 4665 { 4666 struct drm_i915_private *dev_priv = dev->dev_private; 4667 struct intel_engine_cs *ring; 4668 uint32_t rc6_mask = 0; 4669 int unused; 4670 4671 /* 1a: Software RC state - RC0 */ 4672 I915_WRITE(GEN6_RC_STATE, 0); 4673 4674 /* 1b: Get forcewake during program sequence. Although the driver 4675 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ 4676 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4677 4678 /* 2a: Disable RC states. */ 4679 I915_WRITE(GEN6_RC_CONTROL, 0); 4680 4681 /* 2b: Program RC6 thresholds.*/ 4682 4683 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ 4684 if (IS_SKYLAKE(dev)) 4685 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); 4686 else 4687 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); 4688 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 4689 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 4690 for_each_ring(ring, dev_priv, unused) 4691 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 4692 4693 if (HAS_GUC_UCODE(dev)) 4694 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); 4695 4696 I915_WRITE(GEN6_RC_SLEEP, 0); 4697 4698 /* 2c: Program Coarse Power Gating Policies. */ 4699 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25); 4700 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25); 4701 4702 /* 3a: Enable RC6 */ 4703 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 4704 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 4705 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 4706 "on" : "off"); 4707 /* WaRsUseTimeoutMode */ 4708 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || 4709 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 4710 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */ 4711 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 4712 GEN7_RC_CTL_TO_MODE | 4713 rc6_mask); 4714 } else { 4715 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */ 4716 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 4717 GEN6_RC_CTL_EI_MODE(1) | 4718 rc6_mask); 4719 } 4720 4721 /* 4722 * 3b: Enable Coarse Power Gating only when RC6 is enabled. 4723 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. 4724 */ 4725 if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || 4726 ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0))) 4727 I915_WRITE(GEN9_PG_ENABLE, 0); 4728 else 4729 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 4730 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0); 4731 4732 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4733 4734 } 4735 4736 static void gen8_enable_rps(struct drm_device *dev) 4737 { 4738 struct drm_i915_private *dev_priv = dev->dev_private; 4739 struct intel_engine_cs *ring; 4740 uint32_t rc6_mask = 0; 4741 int unused; 4742 4743 /* 1a: Software RC state - RC0 */ 4744 I915_WRITE(GEN6_RC_STATE, 0); 4745 4746 /* 1c & 1d: Get forcewake during program sequence. Although the driver 4747 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ 4748 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4749 4750 /* 2a: Disable RC states. */ 4751 I915_WRITE(GEN6_RC_CONTROL, 0); 4752 4753 /* Initialize rps frequencies */ 4754 gen6_init_rps_frequencies(dev); 4755 4756 /* 2b: Program RC6 thresholds.*/ 4757 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 4758 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 4759 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 4760 for_each_ring(ring, dev_priv, unused) 4761 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 4762 I915_WRITE(GEN6_RC_SLEEP, 0); 4763 if (IS_BROADWELL(dev)) 4764 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ 4765 else 4766 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ 4767 4768 /* 3: Enable RC6 */ 4769 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 4770 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 4771 intel_print_rc6_info(dev, rc6_mask); 4772 if (IS_BROADWELL(dev)) 4773 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 4774 GEN7_RC_CTL_TO_MODE | 4775 rc6_mask); 4776 else 4777 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 4778 GEN6_RC_CTL_EI_MODE(1) | 4779 rc6_mask); 4780 4781 /* 4 Program defaults and thresholds for RPS*/ 4782 I915_WRITE(GEN6_RPNSWREQ, 4783 HSW_FREQUENCY(dev_priv->rps.rp1_freq)); 4784 I915_WRITE(GEN6_RC_VIDEO_FREQ, 4785 HSW_FREQUENCY(dev_priv->rps.rp1_freq)); 4786 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */ 4787 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */ 4788 4789 /* Docs recommend 900MHz, and 300 MHz respectively */ 4790 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 4791 dev_priv->rps.max_freq_softlimit << 24 | 4792 dev_priv->rps.min_freq_softlimit << 16); 4793 4794 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */ 4795 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/ 4796 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */ 4797 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */ 4798 4799 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 4800 4801 /* 5: Enable RPS */ 4802 I915_WRITE(GEN6_RP_CONTROL, 4803 GEN6_RP_MEDIA_TURBO | 4804 GEN6_RP_MEDIA_HW_NORMAL_MODE | 4805 GEN6_RP_MEDIA_IS_GFX | 4806 GEN6_RP_ENABLE | 4807 GEN6_RP_UP_BUSY_AVG | 4808 GEN6_RP_DOWN_IDLE_AVG); 4809 4810 /* 6: Ring frequency + overclocking (our driver does this later */ 4811 4812 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 4813 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 4814 4815 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4816 } 4817 4818 static void gen6_enable_rps(struct drm_device *dev) 4819 { 4820 struct drm_i915_private *dev_priv = dev->dev_private; 4821 struct intel_engine_cs *ring; 4822 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; 4823 u32 gtfifodbg; 4824 int rc6_mode; 4825 int i, ret; 4826 4827 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4828 4829 /* Here begins a magic sequence of register writes to enable 4830 * auto-downclocking. 4831 * 4832 * Perhaps there might be some value in exposing these to 4833 * userspace... 4834 */ 4835 I915_WRITE(GEN6_RC_STATE, 0); 4836 4837 /* Clear the DBG now so we don't confuse earlier errors */ 4838 if ((gtfifodbg = I915_READ(GTFIFODBG))) { 4839 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); 4840 I915_WRITE(GTFIFODBG, gtfifodbg); 4841 } 4842 4843 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4844 4845 /* Initialize rps frequencies */ 4846 gen6_init_rps_frequencies(dev); 4847 4848 /* disable the counters and set deterministic thresholds */ 4849 I915_WRITE(GEN6_RC_CONTROL, 0); 4850 4851 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); 4852 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); 4853 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); 4854 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); 4855 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); 4856 4857 for_each_ring(ring, dev_priv, i) 4858 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 4859 4860 I915_WRITE(GEN6_RC_SLEEP, 0); 4861 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 4862 if (IS_IVYBRIDGE(dev)) 4863 I915_WRITE(GEN6_RC6_THRESHOLD, 125000); 4864 else 4865 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); 4866 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000); 4867 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 4868 4869 /* Check if we are enabling RC6 */ 4870 rc6_mode = intel_enable_rc6(dev_priv->dev); 4871 if (rc6_mode & INTEL_RC6_ENABLE) 4872 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; 4873 4874 /* We don't use those on Haswell */ 4875 if (!IS_HASWELL(dev)) { 4876 if (rc6_mode & INTEL_RC6p_ENABLE) 4877 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; 4878 4879 if (rc6_mode & INTEL_RC6pp_ENABLE) 4880 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; 4881 } 4882 4883 intel_print_rc6_info(dev, rc6_mask); 4884 4885 I915_WRITE(GEN6_RC_CONTROL, 4886 rc6_mask | 4887 GEN6_RC_CTL_EI_MODE(1) | 4888 GEN6_RC_CTL_HW_ENABLE); 4889 4890 /* Power down if completely idle for over 50ms */ 4891 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000); 4892 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 4893 4894 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); 4895 if (ret) 4896 DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); 4897 4898 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); 4899 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */ 4900 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n", 4901 (dev_priv->rps.max_freq_softlimit & 0xff) * 50, 4902 (pcu_mbox & 0xff) * 50); 4903 dev_priv->rps.max_freq = pcu_mbox & 0xff; 4904 } 4905 4906 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 4907 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 4908 4909 rc6vids = 0; 4910 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 4911 if (IS_GEN6(dev) && ret) { 4912 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); 4913 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { 4914 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", 4915 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); 4916 rc6vids &= 0xffff00; 4917 rc6vids |= GEN6_ENCODE_RC6_VID(450); 4918 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); 4919 if (ret) 4920 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); 4921 } 4922 4923 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4924 } 4925 4926 static void __gen6_update_ring_freq(struct drm_device *dev) 4927 { 4928 struct drm_i915_private *dev_priv = dev->dev_private; 4929 int min_freq = 15; 4930 unsigned int gpu_freq; 4931 unsigned int max_ia_freq, min_ring_freq; 4932 unsigned int max_gpu_freq, min_gpu_freq; 4933 int scaling_factor = 180; 4934 4935 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4936 4937 #if 0 4938 policy = cpufreq_cpu_get(0); 4939 if (policy) { 4940 max_ia_freq = policy->cpuinfo.max_freq; 4941 cpufreq_cpu_put(policy); 4942 } else { 4943 /* 4944 * Default to measured freq if none found, PCU will ensure we 4945 * don't go over 4946 */ 4947 max_ia_freq = tsc_khz; 4948 } 4949 #else 4950 max_ia_freq = tsc_frequency / 1000; 4951 #endif 4952 4953 /* Convert from kHz to MHz */ 4954 max_ia_freq /= 1000; 4955 4956 min_ring_freq = I915_READ(DCLK) & 0xf; 4957 /* convert DDR frequency from units of 266.6MHz to bandwidth */ 4958 min_ring_freq = mult_frac(min_ring_freq, 8, 3); 4959 4960 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 4961 /* Convert GT frequency to 50 HZ units */ 4962 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; 4963 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; 4964 } else { 4965 min_gpu_freq = dev_priv->rps.min_freq; 4966 max_gpu_freq = dev_priv->rps.max_freq; 4967 } 4968 4969 /* 4970 * For each potential GPU frequency, load a ring frequency we'd like 4971 * to use for memory access. We do this by specifying the IA frequency 4972 * the PCU should use as a reference to determine the ring frequency. 4973 */ 4974 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) { 4975 int diff = max_gpu_freq - gpu_freq; 4976 unsigned int ia_freq = 0, ring_freq = 0; 4977 4978 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 4979 /* 4980 * ring_freq = 2 * GT. ring_freq is in 100MHz units 4981 * No floor required for ring frequency on SKL. 4982 */ 4983 ring_freq = gpu_freq; 4984 } else if (INTEL_INFO(dev)->gen >= 8) { 4985 /* max(2 * GT, DDR). NB: GT is 50MHz units */ 4986 ring_freq = max(min_ring_freq, gpu_freq); 4987 } else if (IS_HASWELL(dev)) { 4988 ring_freq = mult_frac(gpu_freq, 5, 4); 4989 ring_freq = max(min_ring_freq, ring_freq); 4990 /* leave ia_freq as the default, chosen by cpufreq */ 4991 } else { 4992 /* On older processors, there is no separate ring 4993 * clock domain, so in order to boost the bandwidth 4994 * of the ring, we need to upclock the CPU (ia_freq). 4995 * 4996 * For GPU frequencies less than 750MHz, 4997 * just use the lowest ring freq. 4998 */ 4999 if (gpu_freq < min_freq) 5000 ia_freq = 800; 5001 else 5002 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); 5003 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); 5004 } 5005 5006 sandybridge_pcode_write(dev_priv, 5007 GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 5008 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT | 5009 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT | 5010 gpu_freq); 5011 } 5012 } 5013 5014 void gen6_update_ring_freq(struct drm_device *dev) 5015 { 5016 struct drm_i915_private *dev_priv = dev->dev_private; 5017 5018 if (!HAS_CORE_RING_FREQ(dev)) 5019 return; 5020 5021 mutex_lock(&dev_priv->rps.hw_lock); 5022 __gen6_update_ring_freq(dev); 5023 mutex_unlock(&dev_priv->rps.hw_lock); 5024 } 5025 5026 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) 5027 { 5028 struct drm_device *dev = dev_priv->dev; 5029 u32 val, rp0; 5030 5031 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 5032 5033 switch (INTEL_INFO(dev)->eu_total) { 5034 case 8: 5035 /* (2 * 4) config */ 5036 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); 5037 break; 5038 case 12: 5039 /* (2 * 6) config */ 5040 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT); 5041 break; 5042 case 16: 5043 /* (2 * 8) config */ 5044 default: 5045 /* Setting (2 * 8) Min RP0 for any other combination */ 5046 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT); 5047 break; 5048 } 5049 5050 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK); 5051 5052 return rp0; 5053 } 5054 5055 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv) 5056 { 5057 u32 val, rpe; 5058 5059 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG); 5060 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; 5061 5062 return rpe; 5063 } 5064 5065 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv) 5066 { 5067 u32 val, rp1; 5068 5069 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 5070 rp1 = (val & FB_GFX_FREQ_FUSE_MASK); 5071 5072 return rp1; 5073 } 5074 5075 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv) 5076 { 5077 u32 val, rp1; 5078 5079 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); 5080 5081 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; 5082 5083 return rp1; 5084 } 5085 5086 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv) 5087 { 5088 u32 val, rp0; 5089 5090 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); 5091 5092 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; 5093 /* Clamp to max */ 5094 rp0 = min_t(u32, rp0, 0xea); 5095 5096 return rp0; 5097 } 5098 5099 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv) 5100 { 5101 u32 val, rpe; 5102 5103 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO); 5104 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; 5105 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI); 5106 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; 5107 5108 return rpe; 5109 } 5110 5111 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) 5112 { 5113 u32 val; 5114 5115 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; 5116 /* 5117 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value 5118 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on 5119 * a BYT-M B0 the above register contains 0xbf. Moreover when setting 5120 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0 5121 * to make sure it matches what Punit accepts. 5122 */ 5123 return max_t(u32, val, 0xc0); 5124 } 5125 5126 /* Check that the pctx buffer wasn't move under us. */ 5127 static void valleyview_check_pctx(struct drm_i915_private *dev_priv) 5128 { 5129 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; 5130 5131 /* DragonFly - if EDID fails vlv_pctx can wind up NULL */ 5132 if (WARN_ON(!dev_priv->vlv_pctx)) 5133 return; 5134 5135 WARN_ON(pctx_addr != dev_priv->mm.stolen_base + 5136 dev_priv->vlv_pctx->stolen->start); 5137 } 5138 5139 5140 /* Check that the pcbr address is not empty. */ 5141 static void cherryview_check_pctx(struct drm_i915_private *dev_priv) 5142 { 5143 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; 5144 5145 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); 5146 } 5147 5148 static void cherryview_setup_pctx(struct drm_device *dev) 5149 { 5150 struct drm_i915_private *dev_priv = dev->dev_private; 5151 unsigned long pctx_paddr, paddr; 5152 struct i915_gtt *gtt = &dev_priv->gtt; 5153 u32 pcbr; 5154 int pctx_size = 32*1024; 5155 5156 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 5157 5158 pcbr = I915_READ(VLV_PCBR); 5159 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { 5160 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); 5161 paddr = (dev_priv->mm.stolen_base + 5162 (gtt->stolen_size - pctx_size)); 5163 5164 pctx_paddr = (paddr & (~4095)); 5165 I915_WRITE(VLV_PCBR, pctx_paddr); 5166 } 5167 5168 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 5169 } 5170 5171 static void valleyview_setup_pctx(struct drm_device *dev) 5172 { 5173 struct drm_i915_private *dev_priv = dev->dev_private; 5174 struct drm_i915_gem_object *pctx; 5175 unsigned long pctx_paddr; 5176 u32 pcbr; 5177 int pctx_size = 24*1024; 5178 5179 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 5180 5181 pcbr = I915_READ(VLV_PCBR); 5182 if (pcbr) { 5183 /* BIOS set it up already, grab the pre-alloc'd space */ 5184 int pcbr_offset; 5185 5186 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; 5187 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev, 5188 pcbr_offset, 5189 I915_GTT_OFFSET_NONE, 5190 pctx_size); 5191 goto out; 5192 } 5193 5194 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); 5195 5196 /* 5197 * From the Gunit register HAS: 5198 * The Gfx driver is expected to program this register and ensure 5199 * proper allocation within Gfx stolen memory. For example, this 5200 * register should be programmed such than the PCBR range does not 5201 * overlap with other ranges, such as the frame buffer, protected 5202 * memory, or any other relevant ranges. 5203 */ 5204 pctx = i915_gem_object_create_stolen(dev, pctx_size); 5205 if (!pctx) { 5206 DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); 5207 return; 5208 } 5209 5210 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start; 5211 I915_WRITE(VLV_PCBR, pctx_paddr); 5212 5213 out: 5214 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 5215 dev_priv->vlv_pctx = pctx; 5216 } 5217 5218 static void valleyview_cleanup_pctx(struct drm_device *dev) 5219 { 5220 struct drm_i915_private *dev_priv = dev->dev_private; 5221 5222 if (WARN_ON(!dev_priv->vlv_pctx)) 5223 return; 5224 5225 drm_gem_object_unreference(&dev_priv->vlv_pctx->base); 5226 dev_priv->vlv_pctx = NULL; 5227 } 5228 5229 static void valleyview_init_gt_powersave(struct drm_device *dev) 5230 { 5231 struct drm_i915_private *dev_priv = dev->dev_private; 5232 u32 val; 5233 5234 valleyview_setup_pctx(dev); 5235 5236 mutex_lock(&dev_priv->rps.hw_lock); 5237 5238 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 5239 switch ((val >> 6) & 3) { 5240 case 0: 5241 case 1: 5242 dev_priv->mem_freq = 800; 5243 break; 5244 case 2: 5245 dev_priv->mem_freq = 1066; 5246 break; 5247 case 3: 5248 dev_priv->mem_freq = 1333; 5249 break; 5250 } 5251 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq); 5252 5253 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv); 5254 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 5255 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 5256 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq), 5257 dev_priv->rps.max_freq); 5258 5259 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv); 5260 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", 5261 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 5262 dev_priv->rps.efficient_freq); 5263 5264 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv); 5265 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n", 5266 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), 5267 dev_priv->rps.rp1_freq); 5268 5269 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv); 5270 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 5271 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 5272 dev_priv->rps.min_freq); 5273 5274 dev_priv->rps.idle_freq = dev_priv->rps.min_freq; 5275 5276 /* Preserve min/max settings in case of re-init */ 5277 if (dev_priv->rps.max_freq_softlimit == 0) 5278 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 5279 5280 if (dev_priv->rps.min_freq_softlimit == 0) 5281 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; 5282 5283 mutex_unlock(&dev_priv->rps.hw_lock); 5284 } 5285 5286 static void cherryview_init_gt_powersave(struct drm_device *dev) 5287 { 5288 struct drm_i915_private *dev_priv = dev->dev_private; 5289 u32 val; 5290 5291 cherryview_setup_pctx(dev); 5292 5293 mutex_lock(&dev_priv->rps.hw_lock); 5294 5295 mutex_lock(&dev_priv->sb_lock); 5296 val = vlv_cck_read(dev_priv, CCK_FUSE_REG); 5297 mutex_unlock(&dev_priv->sb_lock); 5298 5299 switch ((val >> 2) & 0x7) { 5300 case 3: 5301 dev_priv->mem_freq = 2000; 5302 break; 5303 default: 5304 dev_priv->mem_freq = 1600; 5305 break; 5306 } 5307 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq); 5308 5309 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv); 5310 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 5311 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 5312 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq), 5313 dev_priv->rps.max_freq); 5314 5315 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv); 5316 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", 5317 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 5318 dev_priv->rps.efficient_freq); 5319 5320 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv); 5321 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n", 5322 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), 5323 dev_priv->rps.rp1_freq); 5324 5325 /* PUnit validated range is only [RPe, RP0] */ 5326 dev_priv->rps.min_freq = dev_priv->rps.efficient_freq; 5327 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 5328 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 5329 dev_priv->rps.min_freq); 5330 5331 WARN_ONCE((dev_priv->rps.max_freq | 5332 dev_priv->rps.efficient_freq | 5333 dev_priv->rps.rp1_freq | 5334 dev_priv->rps.min_freq) & 1, 5335 "Odd GPU freq values\n"); 5336 5337 dev_priv->rps.idle_freq = dev_priv->rps.min_freq; 5338 5339 /* Preserve min/max settings in case of re-init */ 5340 if (dev_priv->rps.max_freq_softlimit == 0) 5341 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 5342 5343 if (dev_priv->rps.min_freq_softlimit == 0) 5344 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; 5345 5346 mutex_unlock(&dev_priv->rps.hw_lock); 5347 } 5348 5349 static void valleyview_cleanup_gt_powersave(struct drm_device *dev) 5350 { 5351 valleyview_cleanup_pctx(dev); 5352 } 5353 5354 static void cherryview_enable_rps(struct drm_device *dev) 5355 { 5356 struct drm_i915_private *dev_priv = dev->dev_private; 5357 struct intel_engine_cs *ring; 5358 u32 gtfifodbg, val, rc6_mode = 0, pcbr; 5359 int i; 5360 5361 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 5362 5363 gtfifodbg = I915_READ(GTFIFODBG); 5364 if (gtfifodbg) { 5365 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", 5366 gtfifodbg); 5367 I915_WRITE(GTFIFODBG, gtfifodbg); 5368 } 5369 5370 cherryview_check_pctx(dev_priv); 5371 5372 /* 1a & 1b: Get forcewake during program sequence. Although the driver 5373 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ 5374 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5375 5376 /* Disable RC states. */ 5377 I915_WRITE(GEN6_RC_CONTROL, 0); 5378 5379 /* 2a: Program RC6 thresholds.*/ 5380 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 5381 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 5382 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 5383 5384 for_each_ring(ring, dev_priv, i) 5385 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 5386 I915_WRITE(GEN6_RC_SLEEP, 0); 5387 5388 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */ 5389 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186); 5390 5391 /* allows RC6 residency counter to work */ 5392 I915_WRITE(VLV_COUNTER_CONTROL, 5393 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | 5394 VLV_MEDIA_RC6_COUNT_EN | 5395 VLV_RENDER_RC6_COUNT_EN)); 5396 5397 /* For now we assume BIOS is allocating and populating the PCBR */ 5398 pcbr = I915_READ(VLV_PCBR); 5399 5400 /* 3: Enable RC6 */ 5401 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && 5402 (pcbr >> VLV_PCBR_ADDR_SHIFT)) 5403 rc6_mode = GEN7_RC_CTL_TO_MODE; 5404 5405 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 5406 5407 /* 4 Program defaults and thresholds for RPS*/ 5408 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); 5409 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); 5410 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); 5411 I915_WRITE(GEN6_RP_UP_EI, 66000); 5412 I915_WRITE(GEN6_RP_DOWN_EI, 350000); 5413 5414 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 5415 5416 /* 5: Enable RPS */ 5417 I915_WRITE(GEN6_RP_CONTROL, 5418 GEN6_RP_MEDIA_HW_NORMAL_MODE | 5419 GEN6_RP_MEDIA_IS_GFX | 5420 GEN6_RP_ENABLE | 5421 GEN6_RP_UP_BUSY_AVG | 5422 GEN6_RP_DOWN_IDLE_AVG); 5423 5424 /* Setting Fixed Bias */ 5425 val = VLV_OVERRIDE_EN | 5426 VLV_SOC_TDP_EN | 5427 CHV_BIAS_CPU_50_SOC_50; 5428 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val); 5429 5430 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 5431 5432 /* RPS code assumes GPLL is used */ 5433 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); 5434 5435 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); 5436 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 5437 5438 dev_priv->rps.cur_freq = (val >> 8) & 0xff; 5439 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", 5440 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq), 5441 dev_priv->rps.cur_freq); 5442 5443 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", 5444 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 5445 dev_priv->rps.efficient_freq); 5446 5447 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); 5448 5449 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5450 } 5451 5452 static void valleyview_enable_rps(struct drm_device *dev) 5453 { 5454 struct drm_i915_private *dev_priv = dev->dev_private; 5455 struct intel_engine_cs *ring; 5456 u32 gtfifodbg, val, rc6_mode = 0; 5457 int i; 5458 5459 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 5460 5461 valleyview_check_pctx(dev_priv); 5462 5463 if ((gtfifodbg = I915_READ(GTFIFODBG))) { 5464 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", 5465 gtfifodbg); 5466 I915_WRITE(GTFIFODBG, gtfifodbg); 5467 } 5468 5469 /* If VLV, Forcewake all wells, else re-direct to regular path */ 5470 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5471 5472 /* Disable RC states. */ 5473 I915_WRITE(GEN6_RC_CONTROL, 0); 5474 5475 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); 5476 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); 5477 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); 5478 I915_WRITE(GEN6_RP_UP_EI, 66000); 5479 I915_WRITE(GEN6_RP_DOWN_EI, 350000); 5480 5481 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 5482 5483 I915_WRITE(GEN6_RP_CONTROL, 5484 GEN6_RP_MEDIA_TURBO | 5485 GEN6_RP_MEDIA_HW_NORMAL_MODE | 5486 GEN6_RP_MEDIA_IS_GFX | 5487 GEN6_RP_ENABLE | 5488 GEN6_RP_UP_BUSY_AVG | 5489 GEN6_RP_DOWN_IDLE_CONT); 5490 5491 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000); 5492 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); 5493 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); 5494 5495 for_each_ring(ring, dev_priv, i) 5496 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 5497 5498 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557); 5499 5500 /* allows RC6 residency counter to work */ 5501 I915_WRITE(VLV_COUNTER_CONTROL, 5502 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN | 5503 VLV_RENDER_RC0_COUNT_EN | 5504 VLV_MEDIA_RC6_COUNT_EN | 5505 VLV_RENDER_RC6_COUNT_EN)); 5506 5507 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 5508 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; 5509 5510 intel_print_rc6_info(dev, rc6_mode); 5511 5512 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 5513 5514 /* Setting Fixed Bias */ 5515 val = VLV_OVERRIDE_EN | 5516 VLV_SOC_TDP_EN | 5517 VLV_BIAS_CPU_125_SOC_875; 5518 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val); 5519 5520 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 5521 5522 /* RPS code assumes GPLL is used */ 5523 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); 5524 5525 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); 5526 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 5527 5528 dev_priv->rps.cur_freq = (val >> 8) & 0xff; 5529 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", 5530 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq), 5531 dev_priv->rps.cur_freq); 5532 5533 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", 5534 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 5535 dev_priv->rps.efficient_freq); 5536 5537 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); 5538 5539 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5540 } 5541 5542 static unsigned long intel_pxfreq(u32 vidfreq) 5543 { 5544 unsigned long freq; 5545 int div = (vidfreq & 0x3f0000) >> 16; 5546 int post = (vidfreq & 0x3000) >> 12; 5547 int pre = (vidfreq & 0x7); 5548 5549 if (!pre) 5550 return 0; 5551 5552 freq = ((div * 133333) / ((1<<post) * pre)); 5553 5554 return freq; 5555 } 5556 5557 static const struct cparams { 5558 u16 i; 5559 u16 t; 5560 u16 m; 5561 u16 c; 5562 } cparams[] = { 5563 { 1, 1333, 301, 28664 }, 5564 { 1, 1066, 294, 24460 }, 5565 { 1, 800, 294, 25192 }, 5566 { 0, 1333, 276, 27605 }, 5567 { 0, 1066, 276, 27605 }, 5568 { 0, 800, 231, 23784 }, 5569 }; 5570 5571 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) 5572 { 5573 u64 total_count, diff, ret; 5574 u32 count1, count2, count3, m = 0, c = 0; 5575 unsigned long now = jiffies_to_msecs(jiffies), diff1; 5576 int i; 5577 5578 assert_spin_locked(&mchdev_lock); 5579 5580 diff1 = now - dev_priv->ips.last_time1; 5581 5582 /* Prevent division-by-zero if we are asking too fast. 5583 * Also, we don't get interesting results if we are polling 5584 * faster than once in 10ms, so just return the saved value 5585 * in such cases. 5586 */ 5587 if (diff1 <= 10) 5588 return dev_priv->ips.chipset_power; 5589 5590 count1 = I915_READ(DMIEC); 5591 count2 = I915_READ(DDREC); 5592 count3 = I915_READ(CSIEC); 5593 5594 total_count = count1 + count2 + count3; 5595 5596 /* FIXME: handle per-counter overflow */ 5597 if (total_count < dev_priv->ips.last_count1) { 5598 diff = ~0UL - dev_priv->ips.last_count1; 5599 diff += total_count; 5600 } else { 5601 diff = total_count - dev_priv->ips.last_count1; 5602 } 5603 5604 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 5605 if (cparams[i].i == dev_priv->ips.c_m && 5606 cparams[i].t == dev_priv->ips.r_t) { 5607 m = cparams[i].m; 5608 c = cparams[i].c; 5609 break; 5610 } 5611 } 5612 5613 diff = div_u64(diff, diff1); 5614 ret = ((m * diff) + c); 5615 ret = div_u64(ret, 10); 5616 5617 dev_priv->ips.last_count1 = total_count; 5618 dev_priv->ips.last_time1 = now; 5619 5620 dev_priv->ips.chipset_power = ret; 5621 5622 return ret; 5623 } 5624 5625 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 5626 { 5627 struct drm_device *dev = dev_priv->dev; 5628 unsigned long val; 5629 5630 if (INTEL_INFO(dev)->gen != 5) 5631 return 0; 5632 5633 spin_lock_irq(&mchdev_lock); 5634 5635 val = __i915_chipset_val(dev_priv); 5636 5637 spin_unlock_irq(&mchdev_lock); 5638 5639 return val; 5640 } 5641 5642 unsigned long i915_mch_val(struct drm_i915_private *dev_priv) 5643 { 5644 unsigned long m, x, b; 5645 u32 tsfs; 5646 5647 tsfs = I915_READ(TSFS); 5648 5649 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); 5650 x = I915_READ8(TR1); 5651 5652 b = tsfs & TSFS_INTR_MASK; 5653 5654 return ((m * x) / 127) - b; 5655 } 5656 5657 static int _pxvid_to_vd(u8 pxvid) 5658 { 5659 if (pxvid == 0) 5660 return 0; 5661 5662 if (pxvid >= 8 && pxvid < 31) 5663 pxvid = 31; 5664 5665 return (pxvid + 2) * 125; 5666 } 5667 5668 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 5669 { 5670 struct drm_device *dev = dev_priv->dev; 5671 const int vd = _pxvid_to_vd(pxvid); 5672 const int vm = vd - 1125; 5673 5674 if (INTEL_INFO(dev)->is_mobile) 5675 return vm > 0 ? vm : 0; 5676 5677 return vd; 5678 } 5679 5680 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) 5681 { 5682 u64 now, diff, diffms; 5683 u32 count; 5684 5685 assert_spin_locked(&mchdev_lock); 5686 5687 now = ktime_get_raw_ns(); 5688 diffms = now - dev_priv->ips.last_time2; 5689 do_div(diffms, NSEC_PER_MSEC); 5690 5691 /* Don't divide by 0 */ 5692 if (!diffms) 5693 return; 5694 5695 count = I915_READ(GFXEC); 5696 5697 if (count < dev_priv->ips.last_count2) { 5698 diff = ~0UL - dev_priv->ips.last_count2; 5699 diff += count; 5700 } else { 5701 diff = count - dev_priv->ips.last_count2; 5702 } 5703 5704 dev_priv->ips.last_count2 = count; 5705 dev_priv->ips.last_time2 = now; 5706 5707 /* More magic constants... */ 5708 diff = diff * 1181; 5709 diff = div_u64(diff, diffms * 10); 5710 dev_priv->ips.gfx_power = diff; 5711 } 5712 5713 void i915_update_gfx_val(struct drm_i915_private *dev_priv) 5714 { 5715 struct drm_device *dev = dev_priv->dev; 5716 5717 if (INTEL_INFO(dev)->gen != 5) 5718 return; 5719 5720 spin_lock_irq(&mchdev_lock); 5721 5722 __i915_update_gfx_val(dev_priv); 5723 5724 spin_unlock_irq(&mchdev_lock); 5725 } 5726 5727 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) 5728 { 5729 unsigned long t, corr, state1, corr2, state2; 5730 u32 pxvid, ext_v; 5731 5732 assert_spin_locked(&mchdev_lock); 5733 5734 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq)); 5735 pxvid = (pxvid >> 24) & 0x7f; 5736 ext_v = pvid_to_extvid(dev_priv, pxvid); 5737 5738 state1 = ext_v; 5739 5740 t = i915_mch_val(dev_priv); 5741 5742 /* Revel in the empirically derived constants */ 5743 5744 /* Correction factor in 1/100000 units */ 5745 if (t > 80) 5746 corr = ((t * 2349) + 135940); 5747 else if (t >= 50) 5748 corr = ((t * 964) + 29317); 5749 else /* < 50 */ 5750 corr = ((t * 301) + 1004); 5751 5752 corr = corr * ((150142 * state1) / 10000 - 78642); 5753 corr /= 100000; 5754 corr2 = (corr * dev_priv->ips.corr); 5755 5756 state2 = (corr2 * state1) / 10000; 5757 state2 /= 100; /* convert to mW */ 5758 5759 __i915_update_gfx_val(dev_priv); 5760 5761 return dev_priv->ips.gfx_power + state2; 5762 } 5763 5764 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 5765 { 5766 struct drm_device *dev = dev_priv->dev; 5767 unsigned long val; 5768 5769 if (INTEL_INFO(dev)->gen != 5) 5770 return 0; 5771 5772 spin_lock_irq(&mchdev_lock); 5773 5774 val = __i915_gfx_val(dev_priv); 5775 5776 spin_unlock_irq(&mchdev_lock); 5777 5778 return val; 5779 } 5780 5781 /** 5782 * i915_read_mch_val - return value for IPS use 5783 * 5784 * Calculate and return a value for the IPS driver to use when deciding whether 5785 * we have thermal and power headroom to increase CPU or GPU power budget. 5786 */ 5787 unsigned long i915_read_mch_val(void) 5788 { 5789 struct drm_i915_private *dev_priv; 5790 unsigned long chipset_val, graphics_val, ret = 0; 5791 5792 spin_lock_irq(&mchdev_lock); 5793 if (!i915_mch_dev) 5794 goto out_unlock; 5795 dev_priv = i915_mch_dev; 5796 5797 chipset_val = __i915_chipset_val(dev_priv); 5798 graphics_val = __i915_gfx_val(dev_priv); 5799 5800 ret = chipset_val + graphics_val; 5801 5802 out_unlock: 5803 spin_unlock_irq(&mchdev_lock); 5804 5805 return ret; 5806 } 5807 5808 /** 5809 * i915_gpu_raise - raise GPU frequency limit 5810 * 5811 * Raise the limit; IPS indicates we have thermal headroom. 5812 */ 5813 bool i915_gpu_raise(void) 5814 { 5815 struct drm_i915_private *dev_priv; 5816 bool ret = true; 5817 5818 spin_lock_irq(&mchdev_lock); 5819 if (!i915_mch_dev) { 5820 ret = false; 5821 goto out_unlock; 5822 } 5823 dev_priv = i915_mch_dev; 5824 5825 if (dev_priv->ips.max_delay > dev_priv->ips.fmax) 5826 dev_priv->ips.max_delay--; 5827 5828 out_unlock: 5829 spin_unlock_irq(&mchdev_lock); 5830 5831 return ret; 5832 } 5833 5834 /** 5835 * i915_gpu_lower - lower GPU frequency limit 5836 * 5837 * IPS indicates we're close to a thermal limit, so throttle back the GPU 5838 * frequency maximum. 5839 */ 5840 bool i915_gpu_lower(void) 5841 { 5842 struct drm_i915_private *dev_priv; 5843 bool ret = true; 5844 5845 spin_lock_irq(&mchdev_lock); 5846 if (!i915_mch_dev) { 5847 ret = false; 5848 goto out_unlock; 5849 } 5850 dev_priv = i915_mch_dev; 5851 5852 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay) 5853 dev_priv->ips.max_delay++; 5854 5855 out_unlock: 5856 spin_unlock_irq(&mchdev_lock); 5857 5858 return ret; 5859 } 5860 5861 /** 5862 * i915_gpu_busy - indicate GPU business to IPS 5863 * 5864 * Tell the IPS driver whether or not the GPU is busy. 5865 */ 5866 bool i915_gpu_busy(void) 5867 { 5868 struct drm_i915_private *dev_priv; 5869 struct intel_engine_cs *ring; 5870 bool ret = false; 5871 int i; 5872 5873 spin_lock_irq(&mchdev_lock); 5874 if (!i915_mch_dev) 5875 goto out_unlock; 5876 dev_priv = i915_mch_dev; 5877 5878 for_each_ring(ring, dev_priv, i) 5879 ret |= !list_empty(&ring->request_list); 5880 5881 out_unlock: 5882 spin_unlock_irq(&mchdev_lock); 5883 5884 return ret; 5885 } 5886 5887 /** 5888 * i915_gpu_turbo_disable - disable graphics turbo 5889 * 5890 * Disable graphics turbo by resetting the max frequency and setting the 5891 * current frequency to the default. 5892 */ 5893 bool i915_gpu_turbo_disable(void) 5894 { 5895 struct drm_i915_private *dev_priv; 5896 bool ret = true; 5897 5898 spin_lock_irq(&mchdev_lock); 5899 if (!i915_mch_dev) { 5900 ret = false; 5901 goto out_unlock; 5902 } 5903 dev_priv = i915_mch_dev; 5904 5905 dev_priv->ips.max_delay = dev_priv->ips.fstart; 5906 5907 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart)) 5908 ret = false; 5909 5910 out_unlock: 5911 spin_unlock_irq(&mchdev_lock); 5912 5913 return ret; 5914 } 5915 5916 #if 0 5917 /** 5918 * Tells the intel_ips driver that the i915 driver is now loaded, if 5919 * IPS got loaded first. 5920 * 5921 * This awkward dance is so that neither module has to depend on the 5922 * other in order for IPS to do the appropriate communication of 5923 * GPU turbo limits to i915. 5924 */ 5925 static void 5926 ips_ping_for_i915_load(void) 5927 { 5928 void (*link)(void); 5929 5930 link = symbol_get(ips_link_to_i915_driver); 5931 if (link) { 5932 link(); 5933 symbol_put(ips_link_to_i915_driver); 5934 } 5935 } 5936 #endif 5937 5938 void intel_gpu_ips_init(struct drm_i915_private *dev_priv) 5939 { 5940 /* We only register the i915 ips part with intel-ips once everything is 5941 * set up, to avoid intel-ips sneaking in and reading bogus values. */ 5942 spin_lock_irq(&mchdev_lock); 5943 i915_mch_dev = dev_priv; 5944 spin_unlock_irq(&mchdev_lock); 5945 5946 } 5947 5948 void intel_gpu_ips_teardown(void) 5949 { 5950 spin_lock_irq(&mchdev_lock); 5951 i915_mch_dev = NULL; 5952 spin_unlock_irq(&mchdev_lock); 5953 } 5954 5955 static void intel_init_emon(struct drm_device *dev) 5956 { 5957 struct drm_i915_private *dev_priv = dev->dev_private; 5958 u32 lcfuse; 5959 u8 pxw[16]; 5960 int i; 5961 5962 /* Disable to program */ 5963 I915_WRITE(ECR, 0); 5964 POSTING_READ(ECR); 5965 5966 /* Program energy weights for various events */ 5967 I915_WRITE(SDEW, 0x15040d00); 5968 I915_WRITE(CSIEW0, 0x007f0000); 5969 I915_WRITE(CSIEW1, 0x1e220004); 5970 I915_WRITE(CSIEW2, 0x04000004); 5971 5972 for (i = 0; i < 5; i++) 5973 I915_WRITE(PEW(i), 0); 5974 for (i = 0; i < 3; i++) 5975 I915_WRITE(DEW(i), 0); 5976 5977 /* Program P-state weights to account for frequency power adjustment */ 5978 for (i = 0; i < 16; i++) { 5979 u32 pxvidfreq = I915_READ(PXVFREQ(i)); 5980 unsigned long freq = intel_pxfreq(pxvidfreq); 5981 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> 5982 PXVFREQ_PX_SHIFT; 5983 unsigned long val; 5984 5985 val = vid * vid; 5986 val *= (freq / 1000); 5987 val *= 255; 5988 val /= (127*127*900); 5989 if (val > 0xff) 5990 DRM_ERROR("bad pxval: %ld\n", val); 5991 pxw[i] = val; 5992 } 5993 /* Render standby states get 0 weight */ 5994 pxw[14] = 0; 5995 pxw[15] = 0; 5996 5997 for (i = 0; i < 4; i++) { 5998 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | 5999 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); 6000 I915_WRITE(PXW(i), val); 6001 } 6002 6003 /* Adjust magic regs to magic values (more experimental results) */ 6004 I915_WRITE(OGW0, 0); 6005 I915_WRITE(OGW1, 0); 6006 I915_WRITE(EG0, 0x00007f00); 6007 I915_WRITE(EG1, 0x0000000e); 6008 I915_WRITE(EG2, 0x000e0000); 6009 I915_WRITE(EG3, 0x68000300); 6010 I915_WRITE(EG4, 0x42000000); 6011 I915_WRITE(EG5, 0x00140031); 6012 I915_WRITE(EG6, 0); 6013 I915_WRITE(EG7, 0); 6014 6015 for (i = 0; i < 8; i++) 6016 I915_WRITE(PXWL(i), 0); 6017 6018 /* Enable PMON + select events */ 6019 I915_WRITE(ECR, 0x80000019); 6020 6021 lcfuse = I915_READ(LCFUSE02); 6022 6023 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); 6024 } 6025 6026 void intel_init_gt_powersave(struct drm_device *dev) 6027 { 6028 struct drm_i915_private *dev_priv = dev->dev_private; 6029 6030 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); 6031 /* 6032 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a 6033 * requirement. 6034 */ 6035 if (!i915.enable_rc6) { 6036 DRM_INFO("RC6 disabled, disabling runtime PM support\n"); 6037 intel_runtime_pm_get(dev_priv); 6038 } 6039 6040 if (IS_CHERRYVIEW(dev)) 6041 cherryview_init_gt_powersave(dev); 6042 else if (IS_VALLEYVIEW(dev)) 6043 valleyview_init_gt_powersave(dev); 6044 } 6045 6046 void intel_cleanup_gt_powersave(struct drm_device *dev) 6047 { 6048 struct drm_i915_private *dev_priv = dev->dev_private; 6049 6050 if (IS_CHERRYVIEW(dev)) 6051 return; 6052 else if (IS_VALLEYVIEW(dev)) 6053 valleyview_cleanup_gt_powersave(dev); 6054 6055 if (!i915.enable_rc6) 6056 intel_runtime_pm_put(dev_priv); 6057 } 6058 6059 static void gen6_suspend_rps(struct drm_device *dev) 6060 { 6061 #if 0 6062 struct drm_i915_private *dev_priv = dev->dev_private; 6063 6064 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 6065 #endif 6066 6067 gen6_disable_rps_interrupts(dev); 6068 } 6069 6070 /** 6071 * intel_suspend_gt_powersave - suspend PM work and helper threads 6072 * @dev: drm device 6073 * 6074 * We don't want to disable RC6 or other features here, we just want 6075 * to make sure any work we've queued has finished and won't bother 6076 * us while we're suspended. 6077 */ 6078 void intel_suspend_gt_powersave(struct drm_device *dev) 6079 { 6080 struct drm_i915_private *dev_priv = dev->dev_private; 6081 6082 if (INTEL_INFO(dev)->gen < 6) 6083 return; 6084 6085 gen6_suspend_rps(dev); 6086 6087 /* Force GPU to min freq during suspend */ 6088 gen6_rps_idle(dev_priv); 6089 } 6090 6091 void intel_disable_gt_powersave(struct drm_device *dev) 6092 { 6093 struct drm_i915_private *dev_priv = dev->dev_private; 6094 6095 if (IS_IRONLAKE_M(dev)) { 6096 ironlake_disable_drps(dev); 6097 } else if (INTEL_INFO(dev)->gen >= 6) { 6098 intel_suspend_gt_powersave(dev); 6099 6100 mutex_lock(&dev_priv->rps.hw_lock); 6101 if (INTEL_INFO(dev)->gen >= 9) 6102 gen9_disable_rps(dev); 6103 else if (IS_CHERRYVIEW(dev)) 6104 cherryview_disable_rps(dev); 6105 else if (IS_VALLEYVIEW(dev)) 6106 valleyview_disable_rps(dev); 6107 else 6108 gen6_disable_rps(dev); 6109 6110 dev_priv->rps.enabled = false; 6111 mutex_unlock(&dev_priv->rps.hw_lock); 6112 } 6113 } 6114 6115 static void intel_gen6_powersave_work(struct work_struct *work) 6116 { 6117 struct drm_i915_private *dev_priv = 6118 container_of(work, struct drm_i915_private, 6119 rps.delayed_resume_work.work); 6120 struct drm_device *dev = dev_priv->dev; 6121 6122 mutex_lock(&dev_priv->rps.hw_lock); 6123 6124 gen6_reset_rps_interrupts(dev); 6125 6126 if (IS_CHERRYVIEW(dev)) { 6127 cherryview_enable_rps(dev); 6128 } else if (IS_VALLEYVIEW(dev)) { 6129 valleyview_enable_rps(dev); 6130 } else if (INTEL_INFO(dev)->gen >= 9) { 6131 gen9_enable_rc6(dev); 6132 gen9_enable_rps(dev); 6133 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 6134 __gen6_update_ring_freq(dev); 6135 } else if (IS_BROADWELL(dev)) { 6136 gen8_enable_rps(dev); 6137 __gen6_update_ring_freq(dev); 6138 } else { 6139 gen6_enable_rps(dev); 6140 __gen6_update_ring_freq(dev); 6141 } 6142 6143 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq); 6144 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq); 6145 6146 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq); 6147 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq); 6148 6149 dev_priv->rps.enabled = true; 6150 6151 gen6_enable_rps_interrupts(dev); 6152 6153 mutex_unlock(&dev_priv->rps.hw_lock); 6154 6155 intel_runtime_pm_put(dev_priv); 6156 } 6157 6158 void intel_enable_gt_powersave(struct drm_device *dev) 6159 { 6160 struct drm_i915_private *dev_priv = dev->dev_private; 6161 6162 /* Powersaving is controlled by the host when inside a VM */ 6163 if (intel_vgpu_active(dev)) 6164 return; 6165 6166 if (IS_IRONLAKE_M(dev)) { 6167 mutex_lock(&dev->struct_mutex); 6168 ironlake_enable_drps(dev); 6169 intel_init_emon(dev); 6170 mutex_unlock(&dev->struct_mutex); 6171 } else if (INTEL_INFO(dev)->gen >= 6) { 6172 /* 6173 * PCU communication is slow and this doesn't need to be 6174 * done at any specific time, so do this out of our fast path 6175 * to make resume and init faster. 6176 * 6177 * We depend on the HW RC6 power context save/restore 6178 * mechanism when entering D3 through runtime PM suspend. So 6179 * disable RPM until RPS/RC6 is properly setup. We can only 6180 * get here via the driver load/system resume/runtime resume 6181 * paths, so the _noresume version is enough (and in case of 6182 * runtime resume it's necessary). 6183 */ 6184 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work, 6185 round_jiffies_up_relative(HZ))) 6186 intel_runtime_pm_get_noresume(dev_priv); 6187 } 6188 } 6189 6190 void intel_reset_gt_powersave(struct drm_device *dev) 6191 { 6192 struct drm_i915_private *dev_priv = dev->dev_private; 6193 6194 if (INTEL_INFO(dev)->gen < 6) 6195 return; 6196 6197 gen6_suspend_rps(dev); 6198 dev_priv->rps.enabled = false; 6199 } 6200 6201 static void ibx_init_clock_gating(struct drm_device *dev) 6202 { 6203 struct drm_i915_private *dev_priv = dev->dev_private; 6204 6205 /* 6206 * On Ibex Peak and Cougar Point, we need to disable clock 6207 * gating for the panel power sequencer or it will fail to 6208 * start up when no ports are active. 6209 */ 6210 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 6211 } 6212 6213 static void g4x_disable_trickle_feed(struct drm_device *dev) 6214 { 6215 struct drm_i915_private *dev_priv = dev->dev_private; 6216 enum i915_pipe pipe; 6217 6218 for_each_pipe(dev_priv, pipe) { 6219 I915_WRITE(DSPCNTR(pipe), 6220 I915_READ(DSPCNTR(pipe)) | 6221 DISPPLANE_TRICKLE_FEED_DISABLE); 6222 6223 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe))); 6224 POSTING_READ(DSPSURF(pipe)); 6225 } 6226 } 6227 6228 static void ilk_init_lp_watermarks(struct drm_device *dev) 6229 { 6230 struct drm_i915_private *dev_priv = dev->dev_private; 6231 6232 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); 6233 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); 6234 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); 6235 6236 /* 6237 * Don't touch WM1S_LP_EN here. 6238 * Doing so could cause underruns. 6239 */ 6240 } 6241 6242 static void ironlake_init_clock_gating(struct drm_device *dev) 6243 { 6244 struct drm_i915_private *dev_priv = dev->dev_private; 6245 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 6246 6247 /* 6248 * Required for FBC 6249 * WaFbcDisableDpfcClockGating:ilk 6250 */ 6251 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | 6252 ILK_DPFCUNIT_CLOCK_GATE_DISABLE | 6253 ILK_DPFDUNIT_CLOCK_GATE_ENABLE; 6254 6255 I915_WRITE(PCH_3DCGDIS0, 6256 MARIUNIT_CLOCK_GATE_DISABLE | 6257 SVSMUNIT_CLOCK_GATE_DISABLE); 6258 I915_WRITE(PCH_3DCGDIS1, 6259 VFMUNIT_CLOCK_GATE_DISABLE); 6260 6261 /* 6262 * According to the spec the following bits should be set in 6263 * order to enable memory self-refresh 6264 * The bit 22/21 of 0x42004 6265 * The bit 5 of 0x42020 6266 * The bit 15 of 0x45000 6267 */ 6268 I915_WRITE(ILK_DISPLAY_CHICKEN2, 6269 (I915_READ(ILK_DISPLAY_CHICKEN2) | 6270 ILK_DPARB_GATE | ILK_VSDPFD_FULL)); 6271 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE; 6272 I915_WRITE(DISP_ARB_CTL, 6273 (I915_READ(DISP_ARB_CTL) | 6274 DISP_FBC_WM_DIS)); 6275 6276 ilk_init_lp_watermarks(dev); 6277 6278 /* 6279 * Based on the document from hardware guys the following bits 6280 * should be set unconditionally in order to enable FBC. 6281 * The bit 22 of 0x42000 6282 * The bit 22 of 0x42004 6283 * The bit 7,8,9 of 0x42020. 6284 */ 6285 if (IS_IRONLAKE_M(dev)) { 6286 /* WaFbcAsynchFlipDisableFbcQueue:ilk */ 6287 I915_WRITE(ILK_DISPLAY_CHICKEN1, 6288 I915_READ(ILK_DISPLAY_CHICKEN1) | 6289 ILK_FBCQ_DIS); 6290 I915_WRITE(ILK_DISPLAY_CHICKEN2, 6291 I915_READ(ILK_DISPLAY_CHICKEN2) | 6292 ILK_DPARB_GATE); 6293 } 6294 6295 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 6296 6297 I915_WRITE(ILK_DISPLAY_CHICKEN2, 6298 I915_READ(ILK_DISPLAY_CHICKEN2) | 6299 ILK_ELPIN_409_SELECT); 6300 I915_WRITE(_3D_CHICKEN2, 6301 _3D_CHICKEN2_WM_READ_PIPELINED << 16 | 6302 _3D_CHICKEN2_WM_READ_PIPELINED); 6303 6304 /* WaDisableRenderCachePipelinedFlush:ilk */ 6305 I915_WRITE(CACHE_MODE_0, 6306 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); 6307 6308 /* WaDisable_RenderCache_OperationalFlush:ilk */ 6309 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 6310 6311 g4x_disable_trickle_feed(dev); 6312 6313 ibx_init_clock_gating(dev); 6314 } 6315 6316 static void cpt_init_clock_gating(struct drm_device *dev) 6317 { 6318 struct drm_i915_private *dev_priv = dev->dev_private; 6319 int pipe; 6320 uint32_t val; 6321 6322 /* 6323 * On Ibex Peak and Cougar Point, we need to disable clock 6324 * gating for the panel power sequencer or it will fail to 6325 * start up when no ports are active. 6326 */ 6327 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE | 6328 PCH_DPLUNIT_CLOCK_GATE_DISABLE | 6329 PCH_CPUNIT_CLOCK_GATE_DISABLE); 6330 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | 6331 DPLS_EDP_PPS_FIX_DIS); 6332 /* The below fixes the weird display corruption, a few pixels shifted 6333 * downward, on (only) LVDS of some HP laptops with IVY. 6334 */ 6335 for_each_pipe(dev_priv, pipe) { 6336 val = I915_READ(TRANS_CHICKEN2(pipe)); 6337 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 6338 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 6339 if (dev_priv->vbt.fdi_rx_polarity_inverted) 6340 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 6341 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 6342 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER; 6343 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH; 6344 I915_WRITE(TRANS_CHICKEN2(pipe), val); 6345 } 6346 /* WADP0ClockGatingDisable */ 6347 for_each_pipe(dev_priv, pipe) { 6348 I915_WRITE(TRANS_CHICKEN1(pipe), 6349 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 6350 } 6351 } 6352 6353 static void gen6_check_mch_setup(struct drm_device *dev) 6354 { 6355 struct drm_i915_private *dev_priv = dev->dev_private; 6356 uint32_t tmp; 6357 6358 tmp = I915_READ(MCH_SSKPD); 6359 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) 6360 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", 6361 tmp); 6362 } 6363 6364 static void gen6_init_clock_gating(struct drm_device *dev) 6365 { 6366 struct drm_i915_private *dev_priv = dev->dev_private; 6367 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 6368 6369 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 6370 6371 I915_WRITE(ILK_DISPLAY_CHICKEN2, 6372 I915_READ(ILK_DISPLAY_CHICKEN2) | 6373 ILK_ELPIN_409_SELECT); 6374 6375 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */ 6376 I915_WRITE(_3D_CHICKEN, 6377 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); 6378 6379 /* WaDisable_RenderCache_OperationalFlush:snb */ 6380 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 6381 6382 /* 6383 * BSpec recoomends 8x4 when MSAA is used, 6384 * however in practice 16x4 seems fastest. 6385 * 6386 * Note that PS/WM thread counts depend on the WIZ hashing 6387 * disable bit, which we don't touch here, but it's good 6388 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 6389 */ 6390 I915_WRITE(GEN6_GT_MODE, 6391 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); 6392 6393 ilk_init_lp_watermarks(dev); 6394 6395 I915_WRITE(CACHE_MODE_0, 6396 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 6397 6398 I915_WRITE(GEN6_UCGCTL1, 6399 I915_READ(GEN6_UCGCTL1) | 6400 GEN6_BLBUNIT_CLOCK_GATE_DISABLE | 6401 GEN6_CSUNIT_CLOCK_GATE_DISABLE); 6402 6403 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 6404 * gating disable must be set. Failure to set it results in 6405 * flickering pixels due to Z write ordering failures after 6406 * some amount of runtime in the Mesa "fire" demo, and Unigine 6407 * Sanctuary and Tropics, and apparently anything else with 6408 * alpha test or pixel discard. 6409 * 6410 * According to the spec, bit 11 (RCCUNIT) must also be set, 6411 * but we didn't debug actual testcases to find it out. 6412 * 6413 * WaDisableRCCUnitClockGating:snb 6414 * WaDisableRCPBUnitClockGating:snb 6415 */ 6416 I915_WRITE(GEN6_UCGCTL2, 6417 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | 6418 GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 6419 6420 /* WaStripsFansDisableFastClipPerformanceFix:snb */ 6421 I915_WRITE(_3D_CHICKEN3, 6422 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL)); 6423 6424 /* 6425 * Bspec says: 6426 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and 6427 * 3DSTATE_SF number of SF output attributes is more than 16." 6428 */ 6429 I915_WRITE(_3D_CHICKEN3, 6430 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH)); 6431 6432 /* 6433 * According to the spec the following bits should be 6434 * set in order to enable memory self-refresh and fbc: 6435 * The bit21 and bit22 of 0x42000 6436 * The bit21 and bit22 of 0x42004 6437 * The bit5 and bit7 of 0x42020 6438 * The bit14 of 0x70180 6439 * The bit14 of 0x71180 6440 * 6441 * WaFbcAsynchFlipDisableFbcQueue:snb 6442 */ 6443 I915_WRITE(ILK_DISPLAY_CHICKEN1, 6444 I915_READ(ILK_DISPLAY_CHICKEN1) | 6445 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); 6446 I915_WRITE(ILK_DISPLAY_CHICKEN2, 6447 I915_READ(ILK_DISPLAY_CHICKEN2) | 6448 ILK_DPARB_GATE | ILK_VSDPFD_FULL); 6449 I915_WRITE(ILK_DSPCLK_GATE_D, 6450 I915_READ(ILK_DSPCLK_GATE_D) | 6451 ILK_DPARBUNIT_CLOCK_GATE_ENABLE | 6452 ILK_DPFDUNIT_CLOCK_GATE_ENABLE); 6453 6454 g4x_disable_trickle_feed(dev); 6455 6456 cpt_init_clock_gating(dev); 6457 6458 gen6_check_mch_setup(dev); 6459 } 6460 6461 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) 6462 { 6463 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE); 6464 6465 /* 6466 * WaVSThreadDispatchOverride:ivb,vlv 6467 * 6468 * This actually overrides the dispatch 6469 * mode for all thread types. 6470 */ 6471 reg &= ~GEN7_FF_SCHED_MASK; 6472 reg |= GEN7_FF_TS_SCHED_HW; 6473 reg |= GEN7_FF_VS_SCHED_HW; 6474 reg |= GEN7_FF_DS_SCHED_HW; 6475 6476 I915_WRITE(GEN7_FF_THREAD_MODE, reg); 6477 } 6478 6479 static void lpt_init_clock_gating(struct drm_device *dev) 6480 { 6481 struct drm_i915_private *dev_priv = dev->dev_private; 6482 6483 /* 6484 * TODO: this bit should only be enabled when really needed, then 6485 * disabled when not needed anymore in order to save power. 6486 */ 6487 if (HAS_PCH_LPT_LP(dev)) 6488 I915_WRITE(SOUTH_DSPCLK_GATE_D, 6489 I915_READ(SOUTH_DSPCLK_GATE_D) | 6490 PCH_LP_PARTITION_LEVEL_DISABLE); 6491 6492 /* WADPOClockGatingDisable:hsw */ 6493 I915_WRITE(TRANS_CHICKEN1(PIPE_A), 6494 I915_READ(TRANS_CHICKEN1(PIPE_A)) | 6495 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 6496 } 6497 6498 static void lpt_suspend_hw(struct drm_device *dev) 6499 { 6500 struct drm_i915_private *dev_priv = dev->dev_private; 6501 6502 if (HAS_PCH_LPT_LP(dev)) { 6503 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D); 6504 6505 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 6506 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 6507 } 6508 } 6509 6510 static void broadwell_init_clock_gating(struct drm_device *dev) 6511 { 6512 struct drm_i915_private *dev_priv = dev->dev_private; 6513 enum i915_pipe pipe; 6514 uint32_t misccpctl; 6515 6516 ilk_init_lp_watermarks(dev); 6517 6518 /* WaSwitchSolVfFArbitrationPriority:bdw */ 6519 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 6520 6521 /* WaPsrDPAMaskVBlankInSRD:bdw */ 6522 I915_WRITE(CHICKEN_PAR1_1, 6523 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); 6524 6525 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ 6526 for_each_pipe(dev_priv, pipe) { 6527 I915_WRITE(CHICKEN_PIPESL_1(pipe), 6528 I915_READ(CHICKEN_PIPESL_1(pipe)) | 6529 BDW_DPRS_MASK_VBLANK_SRD); 6530 } 6531 6532 /* WaVSRefCountFullforceMissDisable:bdw */ 6533 /* WaDSRefCountFullforceMissDisable:bdw */ 6534 I915_WRITE(GEN7_FF_THREAD_MODE, 6535 I915_READ(GEN7_FF_THREAD_MODE) & 6536 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); 6537 6538 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, 6539 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); 6540 6541 /* WaDisableSDEUnitClockGating:bdw */ 6542 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 6543 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 6544 6545 /* 6546 * WaProgramL3SqcReg1Default:bdw 6547 * WaTempDisableDOPClkGating:bdw 6548 */ 6549 misccpctl = I915_READ(GEN7_MISCCPCTL); 6550 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 6551 I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT); 6552 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 6553 6554 /* 6555 * WaGttCachingOffByDefault:bdw 6556 * GTT cache may not work with big pages, so if those 6557 * are ever enabled GTT cache may need to be disabled. 6558 */ 6559 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL); 6560 6561 lpt_init_clock_gating(dev); 6562 } 6563 6564 static void haswell_init_clock_gating(struct drm_device *dev) 6565 { 6566 struct drm_i915_private *dev_priv = dev->dev_private; 6567 6568 ilk_init_lp_watermarks(dev); 6569 6570 /* L3 caching of data atomics doesn't work -- disable it. */ 6571 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); 6572 I915_WRITE(HSW_ROW_CHICKEN3, 6573 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE)); 6574 6575 /* This is required by WaCatErrorRejectionIssue:hsw */ 6576 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 6577 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 6578 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 6579 6580 /* WaVSRefCountFullforceMissDisable:hsw */ 6581 I915_WRITE(GEN7_FF_THREAD_MODE, 6582 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME); 6583 6584 /* WaDisable_RenderCache_OperationalFlush:hsw */ 6585 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 6586 6587 /* enable HiZ Raw Stall Optimization */ 6588 I915_WRITE(CACHE_MODE_0_GEN7, 6589 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); 6590 6591 /* WaDisable4x2SubspanOptimization:hsw */ 6592 I915_WRITE(CACHE_MODE_1, 6593 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 6594 6595 /* 6596 * BSpec recommends 8x4 when MSAA is used, 6597 * however in practice 16x4 seems fastest. 6598 * 6599 * Note that PS/WM thread counts depend on the WIZ hashing 6600 * disable bit, which we don't touch here, but it's good 6601 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 6602 */ 6603 I915_WRITE(GEN7_GT_MODE, 6604 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); 6605 6606 /* WaSampleCChickenBitEnable:hsw */ 6607 I915_WRITE(HALF_SLICE_CHICKEN3, 6608 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE)); 6609 6610 /* WaSwitchSolVfFArbitrationPriority:hsw */ 6611 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 6612 6613 /* WaRsPkgCStateDisplayPMReq:hsw */ 6614 I915_WRITE(CHICKEN_PAR1_1, 6615 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); 6616 6617 lpt_init_clock_gating(dev); 6618 } 6619 6620 static void ivybridge_init_clock_gating(struct drm_device *dev) 6621 { 6622 struct drm_i915_private *dev_priv = dev->dev_private; 6623 uint32_t snpcr; 6624 6625 ilk_init_lp_watermarks(dev); 6626 6627 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); 6628 6629 /* WaDisableEarlyCull:ivb */ 6630 I915_WRITE(_3D_CHICKEN3, 6631 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); 6632 6633 /* WaDisableBackToBackFlipFix:ivb */ 6634 I915_WRITE(IVB_CHICKEN3, 6635 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 6636 CHICKEN3_DGMG_DONE_FIX_DISABLE); 6637 6638 /* WaDisablePSDDualDispatchEnable:ivb */ 6639 if (IS_IVB_GT1(dev)) 6640 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 6641 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 6642 6643 /* WaDisable_RenderCache_OperationalFlush:ivb */ 6644 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 6645 6646 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ 6647 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 6648 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 6649 6650 /* WaApplyL3ControlAndL3ChickenMode:ivb */ 6651 I915_WRITE(GEN7_L3CNTLREG1, 6652 GEN7_WA_FOR_GEN7_L3_CONTROL); 6653 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, 6654 GEN7_WA_L3_CHICKEN_MODE); 6655 if (IS_IVB_GT1(dev)) 6656 I915_WRITE(GEN7_ROW_CHICKEN2, 6657 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 6658 else { 6659 /* must write both registers */ 6660 I915_WRITE(GEN7_ROW_CHICKEN2, 6661 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 6662 I915_WRITE(GEN7_ROW_CHICKEN2_GT2, 6663 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 6664 } 6665 6666 /* WaForceL3Serialization:ivb */ 6667 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 6668 ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 6669 6670 /* 6671 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 6672 * This implements the WaDisableRCZUnitClockGating:ivb workaround. 6673 */ 6674 I915_WRITE(GEN6_UCGCTL2, 6675 GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 6676 6677 /* This is required by WaCatErrorRejectionIssue:ivb */ 6678 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 6679 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 6680 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 6681 6682 g4x_disable_trickle_feed(dev); 6683 6684 gen7_setup_fixed_func_scheduler(dev_priv); 6685 6686 if (0) { /* causes HiZ corruption on ivb:gt1 */ 6687 /* enable HiZ Raw Stall Optimization */ 6688 I915_WRITE(CACHE_MODE_0_GEN7, 6689 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); 6690 } 6691 6692 /* WaDisable4x2SubspanOptimization:ivb */ 6693 I915_WRITE(CACHE_MODE_1, 6694 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 6695 6696 /* 6697 * BSpec recommends 8x4 when MSAA is used, 6698 * however in practice 16x4 seems fastest. 6699 * 6700 * Note that PS/WM thread counts depend on the WIZ hashing 6701 * disable bit, which we don't touch here, but it's good 6702 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 6703 */ 6704 I915_WRITE(GEN7_GT_MODE, 6705 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); 6706 6707 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 6708 snpcr &= ~GEN6_MBC_SNPCR_MASK; 6709 snpcr |= GEN6_MBC_SNPCR_MED; 6710 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 6711 6712 if (!HAS_PCH_NOP(dev)) 6713 cpt_init_clock_gating(dev); 6714 6715 gen6_check_mch_setup(dev); 6716 } 6717 6718 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 6719 { 6720 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); 6721 6722 /* 6723 * Disable trickle feed and enable pnd deadline calculation 6724 */ 6725 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 6726 I915_WRITE(CBR1_VLV, 0); 6727 } 6728 6729 static void valleyview_init_clock_gating(struct drm_device *dev) 6730 { 6731 struct drm_i915_private *dev_priv = dev->dev_private; 6732 6733 vlv_init_display_clock_gating(dev_priv); 6734 6735 /* WaDisableEarlyCull:vlv */ 6736 I915_WRITE(_3D_CHICKEN3, 6737 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); 6738 6739 /* WaDisableBackToBackFlipFix:vlv */ 6740 I915_WRITE(IVB_CHICKEN3, 6741 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 6742 CHICKEN3_DGMG_DONE_FIX_DISABLE); 6743 6744 /* WaPsdDispatchEnable:vlv */ 6745 /* WaDisablePSDDualDispatchEnable:vlv */ 6746 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 6747 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP | 6748 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 6749 6750 /* WaDisable_RenderCache_OperationalFlush:vlv */ 6751 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 6752 6753 /* WaForceL3Serialization:vlv */ 6754 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 6755 ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 6756 6757 /* WaDisableDopClockGating:vlv */ 6758 I915_WRITE(GEN7_ROW_CHICKEN2, 6759 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 6760 6761 /* This is required by WaCatErrorRejectionIssue:vlv */ 6762 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 6763 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 6764 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 6765 6766 gen7_setup_fixed_func_scheduler(dev_priv); 6767 6768 /* 6769 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 6770 * This implements the WaDisableRCZUnitClockGating:vlv workaround. 6771 */ 6772 I915_WRITE(GEN6_UCGCTL2, 6773 GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 6774 6775 /* WaDisableL3Bank2xClockGate:vlv 6776 * Disabling L3 clock gating- MMIO 940c[25] = 1 6777 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */ 6778 I915_WRITE(GEN7_UCGCTL4, 6779 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE); 6780 6781 /* 6782 * BSpec says this must be set, even though 6783 * WaDisable4x2SubspanOptimization isn't listed for VLV. 6784 */ 6785 I915_WRITE(CACHE_MODE_1, 6786 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 6787 6788 /* 6789 * BSpec recommends 8x4 when MSAA is used, 6790 * however in practice 16x4 seems fastest. 6791 * 6792 * Note that PS/WM thread counts depend on the WIZ hashing 6793 * disable bit, which we don't touch here, but it's good 6794 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 6795 */ 6796 I915_WRITE(GEN7_GT_MODE, 6797 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); 6798 6799 /* 6800 * WaIncreaseL3CreditsForVLVB0:vlv 6801 * This is the hardware default actually. 6802 */ 6803 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE); 6804 6805 /* 6806 * WaDisableVLVClockGating_VBIIssue:vlv 6807 * Disable clock gating on th GCFG unit to prevent a delay 6808 * in the reporting of vblank events. 6809 */ 6810 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); 6811 } 6812 6813 static void cherryview_init_clock_gating(struct drm_device *dev) 6814 { 6815 struct drm_i915_private *dev_priv = dev->dev_private; 6816 6817 vlv_init_display_clock_gating(dev_priv); 6818 6819 /* WaVSRefCountFullforceMissDisable:chv */ 6820 /* WaDSRefCountFullforceMissDisable:chv */ 6821 I915_WRITE(GEN7_FF_THREAD_MODE, 6822 I915_READ(GEN7_FF_THREAD_MODE) & 6823 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); 6824 6825 /* WaDisableSemaphoreAndSyncFlipWait:chv */ 6826 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, 6827 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); 6828 6829 /* WaDisableCSUnitClockGating:chv */ 6830 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | 6831 GEN6_CSUNIT_CLOCK_GATE_DISABLE); 6832 6833 /* WaDisableSDEUnitClockGating:chv */ 6834 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 6835 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 6836 6837 /* 6838 * GTT cache may not work with big pages, so if those 6839 * are ever enabled GTT cache may need to be disabled. 6840 */ 6841 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL); 6842 } 6843 6844 static void g4x_init_clock_gating(struct drm_device *dev) 6845 { 6846 struct drm_i915_private *dev_priv = dev->dev_private; 6847 uint32_t dspclk_gate; 6848 6849 I915_WRITE(RENCLK_GATE_D1, 0); 6850 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | 6851 GS_UNIT_CLOCK_GATE_DISABLE | 6852 CL_UNIT_CLOCK_GATE_DISABLE); 6853 I915_WRITE(RAMCLK_GATE_D, 0); 6854 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | 6855 OVRUNIT_CLOCK_GATE_DISABLE | 6856 OVCUNIT_CLOCK_GATE_DISABLE; 6857 if (IS_GM45(dev)) 6858 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; 6859 I915_WRITE(DSPCLK_GATE_D, dspclk_gate); 6860 6861 /* WaDisableRenderCachePipelinedFlush */ 6862 I915_WRITE(CACHE_MODE_0, 6863 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); 6864 6865 /* WaDisable_RenderCache_OperationalFlush:g4x */ 6866 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 6867 6868 g4x_disable_trickle_feed(dev); 6869 } 6870 6871 static void crestline_init_clock_gating(struct drm_device *dev) 6872 { 6873 struct drm_i915_private *dev_priv = dev->dev_private; 6874 6875 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); 6876 I915_WRITE(RENCLK_GATE_D2, 0); 6877 I915_WRITE(DSPCLK_GATE_D, 0); 6878 I915_WRITE(RAMCLK_GATE_D, 0); 6879 I915_WRITE16(DEUC, 0); 6880 I915_WRITE(MI_ARB_STATE, 6881 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 6882 6883 /* WaDisable_RenderCache_OperationalFlush:gen4 */ 6884 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 6885 } 6886 6887 static void broadwater_init_clock_gating(struct drm_device *dev) 6888 { 6889 struct drm_i915_private *dev_priv = dev->dev_private; 6890 6891 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | 6892 I965_RCC_CLOCK_GATE_DISABLE | 6893 I965_RCPB_CLOCK_GATE_DISABLE | 6894 I965_ISC_CLOCK_GATE_DISABLE | 6895 I965_FBC_CLOCK_GATE_DISABLE); 6896 I915_WRITE(RENCLK_GATE_D2, 0); 6897 I915_WRITE(MI_ARB_STATE, 6898 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 6899 6900 /* WaDisable_RenderCache_OperationalFlush:gen4 */ 6901 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 6902 } 6903 6904 static void gen3_init_clock_gating(struct drm_device *dev) 6905 { 6906 struct drm_i915_private *dev_priv = dev->dev_private; 6907 u32 dstate = I915_READ(D_STATE); 6908 6909 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 6910 DSTATE_DOT_CLOCK_GATING; 6911 I915_WRITE(D_STATE, dstate); 6912 6913 if (IS_PINEVIEW(dev)) 6914 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); 6915 6916 /* IIR "flip pending" means done if this bit is set */ 6917 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); 6918 6919 /* interrupts should cause a wake up from C3 */ 6920 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN)); 6921 6922 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 6923 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); 6924 6925 I915_WRITE(MI_ARB_STATE, 6926 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 6927 } 6928 6929 static void i85x_init_clock_gating(struct drm_device *dev) 6930 { 6931 struct drm_i915_private *dev_priv = dev->dev_private; 6932 6933 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 6934 6935 /* interrupts should cause a wake up from C3 */ 6936 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) | 6937 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE)); 6938 6939 I915_WRITE(MEM_MODE, 6940 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE)); 6941 } 6942 6943 static void i830_init_clock_gating(struct drm_device *dev) 6944 { 6945 struct drm_i915_private *dev_priv = dev->dev_private; 6946 6947 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 6948 6949 I915_WRITE(MEM_MODE, 6950 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) | 6951 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE)); 6952 } 6953 6954 void intel_init_clock_gating(struct drm_device *dev) 6955 { 6956 struct drm_i915_private *dev_priv = dev->dev_private; 6957 6958 if (dev_priv->display.init_clock_gating) 6959 dev_priv->display.init_clock_gating(dev); 6960 } 6961 6962 void intel_suspend_hw(struct drm_device *dev) 6963 { 6964 if (HAS_PCH_LPT(dev)) 6965 lpt_suspend_hw(dev); 6966 } 6967 6968 /* Set up chip specific power management-related functions */ 6969 void intel_init_pm(struct drm_device *dev) 6970 { 6971 struct drm_i915_private *dev_priv = dev->dev_private; 6972 6973 intel_fbc_init(dev_priv); 6974 6975 /* For cxsr */ 6976 if (IS_PINEVIEW(dev)) 6977 i915_pineview_get_mem_freq(dev); 6978 else if (IS_GEN5(dev)) 6979 i915_ironlake_get_mem_freq(dev); 6980 6981 /* For FIFO watermark updates */ 6982 if (INTEL_INFO(dev)->gen >= 9) { 6983 skl_setup_wm_latency(dev); 6984 6985 if (IS_BROXTON(dev)) 6986 dev_priv->display.init_clock_gating = 6987 bxt_init_clock_gating; 6988 dev_priv->display.update_wm = skl_update_wm; 6989 } else if (HAS_PCH_SPLIT(dev)) { 6990 ilk_setup_wm_latency(dev); 6991 6992 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] && 6993 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || 6994 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] && 6995 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { 6996 dev_priv->display.update_wm = ilk_update_wm; 6997 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm; 6998 } else { 6999 DRM_DEBUG_KMS("Failed to read display plane latency. " 7000 "Disable CxSR\n"); 7001 } 7002 7003 if (IS_GEN5(dev)) 7004 dev_priv->display.init_clock_gating = ironlake_init_clock_gating; 7005 else if (IS_GEN6(dev)) 7006 dev_priv->display.init_clock_gating = gen6_init_clock_gating; 7007 else if (IS_IVYBRIDGE(dev)) 7008 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; 7009 else if (IS_HASWELL(dev)) 7010 dev_priv->display.init_clock_gating = haswell_init_clock_gating; 7011 else if (INTEL_INFO(dev)->gen == 8) 7012 dev_priv->display.init_clock_gating = broadwell_init_clock_gating; 7013 } else if (IS_CHERRYVIEW(dev)) { 7014 vlv_setup_wm_latency(dev); 7015 7016 dev_priv->display.update_wm = vlv_update_wm; 7017 dev_priv->display.init_clock_gating = 7018 cherryview_init_clock_gating; 7019 } else if (IS_VALLEYVIEW(dev)) { 7020 vlv_setup_wm_latency(dev); 7021 7022 dev_priv->display.update_wm = vlv_update_wm; 7023 dev_priv->display.init_clock_gating = 7024 valleyview_init_clock_gating; 7025 } else if (IS_PINEVIEW(dev)) { 7026 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), 7027 dev_priv->is_ddr3, 7028 dev_priv->fsb_freq, 7029 dev_priv->mem_freq)) { 7030 DRM_INFO("failed to find known CxSR latency " 7031 "(found ddr%s fsb freq %d, mem freq %d), " 7032 "disabling CxSR\n", 7033 (dev_priv->is_ddr3 == 1) ? "3" : "2", 7034 dev_priv->fsb_freq, dev_priv->mem_freq); 7035 /* Disable CxSR and never update its watermark again */ 7036 intel_set_memory_cxsr(dev_priv, false); 7037 dev_priv->display.update_wm = NULL; 7038 } else 7039 dev_priv->display.update_wm = pineview_update_wm; 7040 dev_priv->display.init_clock_gating = gen3_init_clock_gating; 7041 } else if (IS_G4X(dev)) { 7042 dev_priv->display.update_wm = g4x_update_wm; 7043 dev_priv->display.init_clock_gating = g4x_init_clock_gating; 7044 } else if (IS_GEN4(dev)) { 7045 dev_priv->display.update_wm = i965_update_wm; 7046 if (IS_CRESTLINE(dev)) 7047 dev_priv->display.init_clock_gating = crestline_init_clock_gating; 7048 else if (IS_BROADWATER(dev)) 7049 dev_priv->display.init_clock_gating = broadwater_init_clock_gating; 7050 } else if (IS_GEN3(dev)) { 7051 dev_priv->display.update_wm = i9xx_update_wm; 7052 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 7053 dev_priv->display.init_clock_gating = gen3_init_clock_gating; 7054 } else if (IS_GEN2(dev)) { 7055 if (INTEL_INFO(dev)->num_pipes == 1) { 7056 dev_priv->display.update_wm = i845_update_wm; 7057 dev_priv->display.get_fifo_size = i845_get_fifo_size; 7058 } else { 7059 dev_priv->display.update_wm = i9xx_update_wm; 7060 dev_priv->display.get_fifo_size = i830_get_fifo_size; 7061 } 7062 7063 if (IS_I85X(dev) || IS_I865G(dev)) 7064 dev_priv->display.init_clock_gating = i85x_init_clock_gating; 7065 else 7066 dev_priv->display.init_clock_gating = i830_init_clock_gating; 7067 } else { 7068 DRM_ERROR("unexpected fall-through in intel_init_pm\n"); 7069 } 7070 } 7071 7072 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val) 7073 { 7074 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7075 7076 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { 7077 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n"); 7078 return -EAGAIN; 7079 } 7080 7081 I915_WRITE(GEN6_PCODE_DATA, *val); 7082 I915_WRITE(GEN6_PCODE_DATA1, 0); 7083 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 7084 7085 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 7086 500)) { 7087 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox); 7088 return -ETIMEDOUT; 7089 } 7090 7091 *val = I915_READ(GEN6_PCODE_DATA); 7092 I915_WRITE(GEN6_PCODE_DATA, 0); 7093 7094 return 0; 7095 } 7096 7097 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val) 7098 { 7099 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7100 7101 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { 7102 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n"); 7103 return -EAGAIN; 7104 } 7105 7106 I915_WRITE(GEN6_PCODE_DATA, val); 7107 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 7108 7109 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 7110 500)) { 7111 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox); 7112 return -ETIMEDOUT; 7113 } 7114 7115 I915_WRITE(GEN6_PCODE_DATA, 0); 7116 7117 return 0; 7118 } 7119 7120 static int vlv_gpu_freq_div(unsigned int czclk_freq) 7121 { 7122 switch (czclk_freq) { 7123 case 200: 7124 return 10; 7125 case 267: 7126 return 12; 7127 case 320: 7128 case 333: 7129 return 16; 7130 case 400: 7131 return 20; 7132 default: 7133 return -1; 7134 } 7135 } 7136 7137 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) 7138 { 7139 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000); 7140 7141 div = vlv_gpu_freq_div(czclk_freq); 7142 if (div < 0) 7143 return div; 7144 7145 return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div); 7146 } 7147 7148 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val) 7149 { 7150 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000); 7151 7152 mul = vlv_gpu_freq_div(czclk_freq); 7153 if (mul < 0) 7154 return mul; 7155 7156 return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6; 7157 } 7158 7159 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) 7160 { 7161 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000); 7162 7163 div = vlv_gpu_freq_div(czclk_freq) / 2; 7164 if (div < 0) 7165 return div; 7166 7167 return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2; 7168 } 7169 7170 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) 7171 { 7172 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000); 7173 7174 mul = vlv_gpu_freq_div(czclk_freq) / 2; 7175 if (mul < 0) 7176 return mul; 7177 7178 /* CHV needs even values */ 7179 return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2; 7180 } 7181 7182 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) 7183 { 7184 if (IS_GEN9(dev_priv->dev)) 7185 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, 7186 GEN9_FREQ_SCALER); 7187 else if (IS_CHERRYVIEW(dev_priv->dev)) 7188 return chv_gpu_freq(dev_priv, val); 7189 else if (IS_VALLEYVIEW(dev_priv->dev)) 7190 return byt_gpu_freq(dev_priv, val); 7191 else 7192 return val * GT_FREQUENCY_MULTIPLIER; 7193 } 7194 7195 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) 7196 { 7197 if (IS_GEN9(dev_priv->dev)) 7198 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, 7199 GT_FREQUENCY_MULTIPLIER); 7200 else if (IS_CHERRYVIEW(dev_priv->dev)) 7201 return chv_freq_opcode(dev_priv, val); 7202 else if (IS_VALLEYVIEW(dev_priv->dev)) 7203 return byt_freq_opcode(dev_priv, val); 7204 else 7205 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); 7206 } 7207 7208 struct request_boost { 7209 struct work_struct work; 7210 struct drm_i915_gem_request *req; 7211 }; 7212 7213 static void __intel_rps_boost_work(struct work_struct *work) 7214 { 7215 struct request_boost *boost = container_of(work, struct request_boost, work); 7216 struct drm_i915_gem_request *req = boost->req; 7217 7218 if (!i915_gem_request_completed(req, true)) 7219 gen6_rps_boost(to_i915(req->ring->dev), NULL, 7220 req->emitted_jiffies); 7221 7222 i915_gem_request_unreference__unlocked(req); 7223 kfree(boost); 7224 } 7225 7226 void intel_queue_rps_boost_for_request(struct drm_device *dev, 7227 struct drm_i915_gem_request *req) 7228 { 7229 struct request_boost *boost; 7230 7231 if (req == NULL || INTEL_INFO(dev)->gen < 6) 7232 return; 7233 7234 if (i915_gem_request_completed(req, true)) 7235 return; 7236 7237 boost = kmalloc(sizeof(*boost), M_DRM, M_NOWAIT); 7238 if (boost == NULL) 7239 return; 7240 7241 i915_gem_request_reference(req); 7242 boost->req = req; 7243 7244 INIT_WORK(&boost->work, __intel_rps_boost_work); 7245 queue_work(to_i915(dev)->wq, &boost->work); 7246 } 7247 7248 void intel_pm_setup(struct drm_device *dev) 7249 { 7250 struct drm_i915_private *dev_priv = dev->dev_private; 7251 7252 lockinit(&dev_priv->rps.hw_lock, "i915 rps.hw_lock", 0, LK_CANRECURSE); 7253 lockinit(&dev_priv->rps.client_lock, "i915rcl", 0, LK_CANRECURSE); 7254 7255 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, 7256 intel_gen6_powersave_work); 7257 INIT_LIST_HEAD(&dev_priv->rps.clients); 7258 INIT_LIST_HEAD(&dev_priv->rps.semaphores.link); 7259 INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link); 7260 7261 dev_priv->pm.suspended = false; 7262 atomic_set(&dev_priv->pm.wakeref_count, 0); 7263 atomic_set(&dev_priv->pm.atomic_seq, 0); 7264 } 7265