1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *
26 */
27
28 #include <linux/bitops.h>
29 #include <linux/cpufreq.h>
30 #include <linux/export.h>
31 #include "i915_drv.h"
32 #include "intel_drv.h"
33 #ifndef __NetBSD__
34 #include "../../../platform/x86/intel_ips.h"
35 #endif
36 #include <linux/module.h>
37 #include <linux/kgdb.h>
38 #include <linux/log2.h>
39 #include <linux/math64.h>
40 #include <linux/time.h>
41 #include <asm/param.h>
42 #include <linux/vgaarb.h>
43 #include <drm/i915_powerwell.h>
44 #include <linux/pm_runtime.h>
45
46 /**
47 * RC6 is a special power stage which allows the GPU to enter an very
48 * low-voltage mode when idle, using down to 0V while at this stage. This
49 * stage is entered automatically when the GPU is idle when RC6 support is
50 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
51 *
52 * There are different RC6 modes available in Intel GPU, which differentiate
53 * among each other with the latency required to enter and leave RC6 and
54 * voltage consumed by the GPU in different states.
55 *
56 * The combination of the following flags define which states GPU is allowed
57 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
58 * RC6pp is deepest RC6. Their support by hardware varies according to the
59 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
60 * which brings the most power savings; deeper states save more power, but
61 * require higher latency to switch to and wake up.
62 */
63 #define INTEL_RC6_ENABLE (1<<0)
64 #define INTEL_RC6p_ENABLE (1<<1)
65 #define INTEL_RC6pp_ENABLE (1<<2)
66
67 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
68 * framebuffer contents in-memory, aiming at reducing the required bandwidth
69 * during in-memory transfers and, therefore, reduce the power packet.
70 *
71 * The benefits of FBC are mostly visible with solid backgrounds and
72 * variation-less patterns.
73 *
74 * FBC-related functionality can be enabled by the means of the
75 * i915.i915_enable_fbc parameter
76 */
77
i8xx_disable_fbc(struct drm_device * dev)78 static void i8xx_disable_fbc(struct drm_device *dev)
79 {
80 struct drm_i915_private *dev_priv = dev->dev_private;
81 u32 fbc_ctl;
82
83 /* Disable compression */
84 fbc_ctl = I915_READ(FBC_CONTROL);
85 if ((fbc_ctl & FBC_CTL_EN) == 0)
86 return;
87
88 fbc_ctl &= ~FBC_CTL_EN;
89 I915_WRITE(FBC_CONTROL, fbc_ctl);
90
91 /* Wait for compressing bit to clear */
92 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
93 DRM_DEBUG_KMS("FBC idle timed out\n");
94 return;
95 }
96
97 DRM_DEBUG_KMS("disabled FBC\n");
98 }
99
i8xx_enable_fbc(struct drm_crtc * crtc)100 static void i8xx_enable_fbc(struct drm_crtc *crtc)
101 {
102 struct drm_device *dev = crtc->dev;
103 struct drm_i915_private *dev_priv = dev->dev_private;
104 struct drm_framebuffer *fb = crtc->primary->fb;
105 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
106 struct drm_i915_gem_object *obj = intel_fb->obj;
107 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
108 int cfb_pitch;
109 int i;
110 u32 fbc_ctl;
111
112 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
113 if (fb->pitches[0] < cfb_pitch)
114 cfb_pitch = fb->pitches[0];
115
116 /* FBC_CTL wants 32B or 64B units */
117 if (IS_GEN2(dev))
118 cfb_pitch = (cfb_pitch / 32) - 1;
119 else
120 cfb_pitch = (cfb_pitch / 64) - 1;
121
122 /* Clear old tags */
123 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
124 I915_WRITE(FBC_TAG + (i * 4), 0);
125
126 if (IS_GEN4(dev)) {
127 u32 fbc_ctl2;
128
129 /* Set it up... */
130 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
131 fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
132 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
133 I915_WRITE(FBC_FENCE_OFF, crtc->y);
134 }
135
136 /* enable it... */
137 fbc_ctl = I915_READ(FBC_CONTROL);
138 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
139 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
140 if (IS_I945GM(dev))
141 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
142 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
143 fbc_ctl |= obj->fence_reg;
144 I915_WRITE(FBC_CONTROL, fbc_ctl);
145
146 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
147 cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
148 }
149
i8xx_fbc_enabled(struct drm_device * dev)150 static bool i8xx_fbc_enabled(struct drm_device *dev)
151 {
152 struct drm_i915_private *dev_priv = dev->dev_private;
153
154 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
155 }
156
g4x_enable_fbc(struct drm_crtc * crtc)157 static void g4x_enable_fbc(struct drm_crtc *crtc)
158 {
159 struct drm_device *dev = crtc->dev;
160 struct drm_i915_private *dev_priv = dev->dev_private;
161 struct drm_framebuffer *fb = crtc->primary->fb;
162 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
163 struct drm_i915_gem_object *obj = intel_fb->obj;
164 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
165 u32 dpfc_ctl;
166
167 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
168 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
169 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
170 else
171 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
172 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
173
174 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
175
176 /* enable it... */
177 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
178
179 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
180 }
181
g4x_disable_fbc(struct drm_device * dev)182 static void g4x_disable_fbc(struct drm_device *dev)
183 {
184 struct drm_i915_private *dev_priv = dev->dev_private;
185 u32 dpfc_ctl;
186
187 /* Disable compression */
188 dpfc_ctl = I915_READ(DPFC_CONTROL);
189 if (dpfc_ctl & DPFC_CTL_EN) {
190 dpfc_ctl &= ~DPFC_CTL_EN;
191 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
192
193 DRM_DEBUG_KMS("disabled FBC\n");
194 }
195 }
196
g4x_fbc_enabled(struct drm_device * dev)197 static bool g4x_fbc_enabled(struct drm_device *dev)
198 {
199 struct drm_i915_private *dev_priv = dev->dev_private;
200
201 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
202 }
203
sandybridge_blit_fbc_update(struct drm_device * dev)204 static void sandybridge_blit_fbc_update(struct drm_device *dev)
205 {
206 struct drm_i915_private *dev_priv = dev->dev_private;
207 u32 blt_ecoskpd;
208
209 /* Make sure blitter notifies FBC of writes */
210
211 /* Blitter is part of Media powerwell on VLV. No impact of
212 * his param in other platforms for now */
213 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
214
215 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
216 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
217 GEN6_BLITTER_LOCK_SHIFT;
218 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
219 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
220 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
221 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
222 GEN6_BLITTER_LOCK_SHIFT);
223 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
224 POSTING_READ(GEN6_BLITTER_ECOSKPD);
225
226 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
227 }
228
ironlake_enable_fbc(struct drm_crtc * crtc)229 static void ironlake_enable_fbc(struct drm_crtc *crtc)
230 {
231 struct drm_device *dev = crtc->dev;
232 struct drm_i915_private *dev_priv = dev->dev_private;
233 struct drm_framebuffer *fb = crtc->primary->fb;
234 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
235 struct drm_i915_gem_object *obj = intel_fb->obj;
236 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
237 u32 dpfc_ctl;
238
239 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
240 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
241 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
242 else
243 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
244 dpfc_ctl |= DPFC_CTL_FENCE_EN;
245 if (IS_GEN5(dev))
246 dpfc_ctl |= obj->fence_reg;
247
248 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
249 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
250 /* enable it... */
251 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
252
253 if (IS_GEN6(dev)) {
254 I915_WRITE(SNB_DPFC_CTL_SA,
255 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
256 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
257 sandybridge_blit_fbc_update(dev);
258 }
259
260 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
261 }
262
ironlake_disable_fbc(struct drm_device * dev)263 static void ironlake_disable_fbc(struct drm_device *dev)
264 {
265 struct drm_i915_private *dev_priv = dev->dev_private;
266 u32 dpfc_ctl;
267
268 /* Disable compression */
269 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
270 if (dpfc_ctl & DPFC_CTL_EN) {
271 dpfc_ctl &= ~DPFC_CTL_EN;
272 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
273
274 DRM_DEBUG_KMS("disabled FBC\n");
275 }
276 }
277
ironlake_fbc_enabled(struct drm_device * dev)278 static bool ironlake_fbc_enabled(struct drm_device *dev)
279 {
280 struct drm_i915_private *dev_priv = dev->dev_private;
281
282 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
283 }
284
gen7_enable_fbc(struct drm_crtc * crtc)285 static void gen7_enable_fbc(struct drm_crtc *crtc)
286 {
287 struct drm_device *dev = crtc->dev;
288 struct drm_i915_private *dev_priv = dev->dev_private;
289 struct drm_framebuffer *fb = crtc->primary->fb;
290 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
291 struct drm_i915_gem_object *obj = intel_fb->obj;
292 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
293 u32 dpfc_ctl;
294
295 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
296 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
297 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
298 else
299 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
300 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
301
302 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
303
304 if (IS_IVYBRIDGE(dev)) {
305 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
306 I915_WRITE(ILK_DISPLAY_CHICKEN1,
307 I915_READ(ILK_DISPLAY_CHICKEN1) |
308 ILK_FBCQ_DIS);
309 } else {
310 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
311 I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
312 I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
313 HSW_FBCQ_DIS);
314 }
315
316 I915_WRITE(SNB_DPFC_CTL_SA,
317 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
318 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
319
320 sandybridge_blit_fbc_update(dev);
321
322 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
323 }
324
intel_fbc_enabled(struct drm_device * dev)325 bool intel_fbc_enabled(struct drm_device *dev)
326 {
327 struct drm_i915_private *dev_priv = dev->dev_private;
328
329 if (!dev_priv->display.fbc_enabled)
330 return false;
331
332 return dev_priv->display.fbc_enabled(dev);
333 }
334
intel_fbc_work_fn(struct work_struct * __work)335 static void intel_fbc_work_fn(struct work_struct *__work)
336 {
337 struct intel_fbc_work *work =
338 container_of(to_delayed_work(__work),
339 struct intel_fbc_work, work);
340 struct drm_device *dev = work->crtc->dev;
341 struct drm_i915_private *dev_priv = dev->dev_private;
342
343 mutex_lock(&dev->struct_mutex);
344 if (work == dev_priv->fbc.fbc_work) {
345 /* Double check that we haven't switched fb without cancelling
346 * the prior work.
347 */
348 if (work->crtc->primary->fb == work->fb) {
349 dev_priv->display.enable_fbc(work->crtc);
350
351 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
352 dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
353 dev_priv->fbc.y = work->crtc->y;
354 }
355
356 dev_priv->fbc.fbc_work = NULL;
357 }
358 mutex_unlock(&dev->struct_mutex);
359
360 kfree(work);
361 }
362
intel_cancel_fbc_work(struct drm_i915_private * dev_priv)363 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
364 {
365 if (dev_priv->fbc.fbc_work == NULL)
366 return;
367
368 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
369
370 /* Synchronisation is provided by struct_mutex and checking of
371 * dev_priv->fbc.fbc_work, so we can perform the cancellation
372 * entirely asynchronously.
373 */
374 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
375 /* tasklet was killed before being run, clean up */
376 kfree(dev_priv->fbc.fbc_work);
377
378 /* Mark the work as no longer wanted so that if it does
379 * wake-up (because the work was already running and waiting
380 * for our mutex), it will discover that is no longer
381 * necessary to run.
382 */
383 dev_priv->fbc.fbc_work = NULL;
384 }
385
intel_enable_fbc(struct drm_crtc * crtc)386 static void intel_enable_fbc(struct drm_crtc *crtc)
387 {
388 struct intel_fbc_work *work;
389 struct drm_device *dev = crtc->dev;
390 struct drm_i915_private *dev_priv = dev->dev_private;
391
392 if (!dev_priv->display.enable_fbc)
393 return;
394
395 intel_cancel_fbc_work(dev_priv);
396
397 work = kzalloc(sizeof(*work), GFP_KERNEL);
398 if (work == NULL) {
399 DRM_ERROR("Failed to allocate FBC work structure\n");
400 dev_priv->display.enable_fbc(crtc);
401 return;
402 }
403
404 work->crtc = crtc;
405 work->fb = crtc->primary->fb;
406 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
407
408 dev_priv->fbc.fbc_work = work;
409
410 /* Delay the actual enabling to let pageflipping cease and the
411 * display to settle before starting the compression. Note that
412 * this delay also serves a second purpose: it allows for a
413 * vblank to pass after disabling the FBC before we attempt
414 * to modify the control registers.
415 *
416 * A more complicated solution would involve tracking vblanks
417 * following the termination of the page-flipping sequence
418 * and indeed performing the enable as a co-routine and not
419 * waiting synchronously upon the vblank.
420 *
421 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
422 */
423 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
424 }
425
intel_disable_fbc(struct drm_device * dev)426 void intel_disable_fbc(struct drm_device *dev)
427 {
428 struct drm_i915_private *dev_priv = dev->dev_private;
429
430 intel_cancel_fbc_work(dev_priv);
431
432 if (!dev_priv->display.disable_fbc)
433 return;
434
435 dev_priv->display.disable_fbc(dev);
436 dev_priv->fbc.plane = -1;
437 }
438
set_no_fbc_reason(struct drm_i915_private * dev_priv,enum no_fbc_reason reason)439 static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
440 enum no_fbc_reason reason)
441 {
442 if (dev_priv->fbc.no_fbc_reason == reason)
443 return false;
444
445 dev_priv->fbc.no_fbc_reason = reason;
446 return true;
447 }
448
449 /**
450 * intel_update_fbc - enable/disable FBC as needed
451 * @dev: the drm_device
452 *
453 * Set up the framebuffer compression hardware at mode set time. We
454 * enable it if possible:
455 * - plane A only (on pre-965)
456 * - no pixel mulitply/line duplication
457 * - no alpha buffer discard
458 * - no dual wide
459 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
460 *
461 * We can't assume that any compression will take place (worst case),
462 * so the compressed buffer has to be the same size as the uncompressed
463 * one. It also must reside (along with the line length buffer) in
464 * stolen memory.
465 *
466 * We need to enable/disable FBC on a global basis.
467 */
intel_update_fbc(struct drm_device * dev)468 void intel_update_fbc(struct drm_device *dev)
469 {
470 struct drm_i915_private *dev_priv = dev->dev_private;
471 struct drm_crtc *crtc = NULL, *tmp_crtc;
472 struct intel_crtc *intel_crtc;
473 struct drm_framebuffer *fb;
474 struct intel_framebuffer *intel_fb;
475 struct drm_i915_gem_object *obj;
476 const struct drm_display_mode *adjusted_mode;
477 unsigned int max_width, max_height;
478
479 if (!HAS_FBC(dev)) {
480 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
481 return;
482 }
483
484 if (!i915.powersave) {
485 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
486 DRM_DEBUG_KMS("fbc disabled per module param\n");
487 return;
488 }
489
490 /*
491 * If FBC is already on, we just have to verify that we can
492 * keep it that way...
493 * Need to disable if:
494 * - more than one pipe is active
495 * - changing FBC params (stride, fence, mode)
496 * - new fb is too large to fit in compressed buffer
497 * - going to an unsupported config (interlace, pixel multiply, etc.)
498 */
499 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
500 if (intel_crtc_active(tmp_crtc) &&
501 to_intel_crtc(tmp_crtc)->primary_enabled) {
502 if (crtc) {
503 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
504 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
505 goto out_disable;
506 }
507 crtc = tmp_crtc;
508 }
509 }
510
511 if (!crtc || crtc->primary->fb == NULL) {
512 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
513 DRM_DEBUG_KMS("no output, disabling\n");
514 goto out_disable;
515 }
516
517 intel_crtc = to_intel_crtc(crtc);
518 fb = crtc->primary->fb;
519 intel_fb = to_intel_framebuffer(fb);
520 obj = intel_fb->obj;
521 adjusted_mode = &intel_crtc->config.adjusted_mode;
522
523 if (i915.enable_fbc < 0 &&
524 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
525 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
526 DRM_DEBUG_KMS("disabled per chip default\n");
527 goto out_disable;
528 }
529 if (!i915.enable_fbc) {
530 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
531 DRM_DEBUG_KMS("fbc disabled per module param\n");
532 goto out_disable;
533 }
534 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
535 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
536 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
537 DRM_DEBUG_KMS("mode incompatible with compression, "
538 "disabling\n");
539 goto out_disable;
540 }
541
542 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
543 max_width = 4096;
544 max_height = 2048;
545 } else {
546 max_width = 2048;
547 max_height = 1536;
548 }
549 if (intel_crtc->config.pipe_src_w > max_width ||
550 intel_crtc->config.pipe_src_h > max_height) {
551 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
552 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
553 goto out_disable;
554 }
555 if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
556 intel_crtc->plane != PLANE_A) {
557 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
558 DRM_DEBUG_KMS("plane not A, disabling compression\n");
559 goto out_disable;
560 }
561
562 /* The use of a CPU fence is mandatory in order to detect writes
563 * by the CPU to the scanout and trigger updates to the FBC.
564 */
565 if (obj->tiling_mode != I915_TILING_X ||
566 obj->fence_reg == I915_FENCE_REG_NONE) {
567 if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
568 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
569 goto out_disable;
570 }
571
572 /* If the kernel debugger is active, always disable compression */
573 if (in_dbg_master())
574 goto out_disable;
575
576 if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
577 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
578 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
579 goto out_disable;
580 }
581
582 /* If the scanout has not changed, don't modify the FBC settings.
583 * Note that we make the fundamental assumption that the fb->obj
584 * cannot be unpinned (and have its GTT offset and fence revoked)
585 * without first being decoupled from the scanout and FBC disabled.
586 */
587 if (dev_priv->fbc.plane == intel_crtc->plane &&
588 dev_priv->fbc.fb_id == fb->base.id &&
589 dev_priv->fbc.y == crtc->y)
590 return;
591
592 if (intel_fbc_enabled(dev)) {
593 /* We update FBC along two paths, after changing fb/crtc
594 * configuration (modeswitching) and after page-flipping
595 * finishes. For the latter, we know that not only did
596 * we disable the FBC at the start of the page-flip
597 * sequence, but also more than one vblank has passed.
598 *
599 * For the former case of modeswitching, it is possible
600 * to switch between two FBC valid configurations
601 * instantaneously so we do need to disable the FBC
602 * before we can modify its control registers. We also
603 * have to wait for the next vblank for that to take
604 * effect. However, since we delay enabling FBC we can
605 * assume that a vblank has passed since disabling and
606 * that we can safely alter the registers in the deferred
607 * callback.
608 *
609 * In the scenario that we go from a valid to invalid
610 * and then back to valid FBC configuration we have
611 * no strict enforcement that a vblank occurred since
612 * disabling the FBC. However, along all current pipe
613 * disabling paths we do need to wait for a vblank at
614 * some point. And we wait before enabling FBC anyway.
615 */
616 DRM_DEBUG_KMS("disabling active FBC for update\n");
617 intel_disable_fbc(dev);
618 }
619
620 intel_enable_fbc(crtc);
621 dev_priv->fbc.no_fbc_reason = FBC_OK;
622 return;
623
624 out_disable:
625 /* Multiple disables should be harmless */
626 if (intel_fbc_enabled(dev)) {
627 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
628 intel_disable_fbc(dev);
629 }
630 i915_gem_stolen_cleanup_compression(dev);
631 }
632
i915_pineview_get_mem_freq(struct drm_device * dev)633 static void i915_pineview_get_mem_freq(struct drm_device *dev)
634 {
635 struct drm_i915_private *dev_priv = dev->dev_private;
636 u32 tmp;
637
638 tmp = I915_READ(CLKCFG);
639
640 switch (tmp & CLKCFG_FSB_MASK) {
641 case CLKCFG_FSB_533:
642 dev_priv->fsb_freq = 533; /* 133*4 */
643 break;
644 case CLKCFG_FSB_800:
645 dev_priv->fsb_freq = 800; /* 200*4 */
646 break;
647 case CLKCFG_FSB_667:
648 dev_priv->fsb_freq = 667; /* 167*4 */
649 break;
650 case CLKCFG_FSB_400:
651 dev_priv->fsb_freq = 400; /* 100*4 */
652 break;
653 }
654
655 switch (tmp & CLKCFG_MEM_MASK) {
656 case CLKCFG_MEM_533:
657 dev_priv->mem_freq = 533;
658 break;
659 case CLKCFG_MEM_667:
660 dev_priv->mem_freq = 667;
661 break;
662 case CLKCFG_MEM_800:
663 dev_priv->mem_freq = 800;
664 break;
665 }
666
667 /* detect pineview DDR3 setting */
668 tmp = I915_READ(CSHRDDR3CTL);
669 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
670 }
671
i915_ironlake_get_mem_freq(struct drm_device * dev)672 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
673 {
674 struct drm_i915_private *dev_priv = dev->dev_private;
675 u16 ddrpll, csipll;
676
677 ddrpll = I915_READ16(DDRMPLL1);
678 csipll = I915_READ16(CSIPLL0);
679
680 switch (ddrpll & 0xff) {
681 case 0xc:
682 dev_priv->mem_freq = 800;
683 break;
684 case 0x10:
685 dev_priv->mem_freq = 1066;
686 break;
687 case 0x14:
688 dev_priv->mem_freq = 1333;
689 break;
690 case 0x18:
691 dev_priv->mem_freq = 1600;
692 break;
693 default:
694 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
695 ddrpll & 0xff);
696 dev_priv->mem_freq = 0;
697 break;
698 }
699
700 dev_priv->ips.r_t = dev_priv->mem_freq;
701
702 switch (csipll & 0x3ff) {
703 case 0x00c:
704 dev_priv->fsb_freq = 3200;
705 break;
706 case 0x00e:
707 dev_priv->fsb_freq = 3733;
708 break;
709 case 0x010:
710 dev_priv->fsb_freq = 4266;
711 break;
712 case 0x012:
713 dev_priv->fsb_freq = 4800;
714 break;
715 case 0x014:
716 dev_priv->fsb_freq = 5333;
717 break;
718 case 0x016:
719 dev_priv->fsb_freq = 5866;
720 break;
721 case 0x018:
722 dev_priv->fsb_freq = 6400;
723 break;
724 default:
725 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
726 csipll & 0x3ff);
727 dev_priv->fsb_freq = 0;
728 break;
729 }
730
731 if (dev_priv->fsb_freq == 3200) {
732 dev_priv->ips.c_m = 0;
733 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
734 dev_priv->ips.c_m = 1;
735 } else {
736 dev_priv->ips.c_m = 2;
737 }
738 }
739
740 static const struct cxsr_latency cxsr_latency_table[] = {
741 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
742 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
743 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
744 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
745 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
746
747 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
748 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
749 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
750 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
751 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
752
753 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
754 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
755 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
756 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
757 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
758
759 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
760 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
761 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
762 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
763 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
764
765 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
766 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
767 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
768 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
769 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
770
771 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
772 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
773 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
774 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
775 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
776 };
777
intel_get_cxsr_latency(int is_desktop,int is_ddr3,int fsb,int mem)778 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
779 int is_ddr3,
780 int fsb,
781 int mem)
782 {
783 const struct cxsr_latency *latency;
784 int i;
785
786 if (fsb == 0 || mem == 0)
787 return NULL;
788
789 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
790 latency = &cxsr_latency_table[i];
791 if (is_desktop == latency->is_desktop &&
792 is_ddr3 == latency->is_ddr3 &&
793 fsb == latency->fsb_freq && mem == latency->mem_freq)
794 return latency;
795 }
796
797 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
798
799 return NULL;
800 }
801
pineview_disable_cxsr(struct drm_device * dev)802 static void pineview_disable_cxsr(struct drm_device *dev)
803 {
804 struct drm_i915_private *dev_priv = dev->dev_private;
805
806 /* deactivate cxsr */
807 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
808 }
809
810 /*
811 * Latency for FIFO fetches is dependent on several factors:
812 * - memory configuration (speed, channels)
813 * - chipset
814 * - current MCH state
815 * It can be fairly high in some situations, so here we assume a fairly
816 * pessimal value. It's a tradeoff between extra memory fetches (if we
817 * set this value too high, the FIFO will fetch frequently to stay full)
818 * and power consumption (set it too low to save power and we might see
819 * FIFO underruns and display "flicker").
820 *
821 * A value of 5us seems to be a good balance; safe for very low end
822 * platforms but not overly aggressive on lower latency configs.
823 */
824 static const int latency_ns = 5000;
825
i9xx_get_fifo_size(struct drm_device * dev,int plane)826 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
827 {
828 struct drm_i915_private *dev_priv = dev->dev_private;
829 uint32_t dsparb = I915_READ(DSPARB);
830 int size;
831
832 size = dsparb & 0x7f;
833 if (plane)
834 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
835
836 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
837 plane ? "B" : "A", size);
838
839 return size;
840 }
841
i830_get_fifo_size(struct drm_device * dev,int plane)842 static int i830_get_fifo_size(struct drm_device *dev, int plane)
843 {
844 struct drm_i915_private *dev_priv = dev->dev_private;
845 uint32_t dsparb = I915_READ(DSPARB);
846 int size;
847
848 size = dsparb & 0x1ff;
849 if (plane)
850 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
851 size >>= 1; /* Convert to cachelines */
852
853 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
854 plane ? "B" : "A", size);
855
856 return size;
857 }
858
i845_get_fifo_size(struct drm_device * dev,int plane)859 static int i845_get_fifo_size(struct drm_device *dev, int plane)
860 {
861 struct drm_i915_private *dev_priv = dev->dev_private;
862 uint32_t dsparb = I915_READ(DSPARB);
863 int size;
864
865 size = dsparb & 0x7f;
866 size >>= 2; /* Convert to cachelines */
867
868 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
869 plane ? "B" : "A",
870 size);
871
872 return size;
873 }
874
875 /* Pineview has different values for various configs */
876 static const struct intel_watermark_params pineview_display_wm = {
877 PINEVIEW_DISPLAY_FIFO,
878 PINEVIEW_MAX_WM,
879 PINEVIEW_DFT_WM,
880 PINEVIEW_GUARD_WM,
881 PINEVIEW_FIFO_LINE_SIZE
882 };
883 static const struct intel_watermark_params pineview_display_hplloff_wm = {
884 PINEVIEW_DISPLAY_FIFO,
885 PINEVIEW_MAX_WM,
886 PINEVIEW_DFT_HPLLOFF_WM,
887 PINEVIEW_GUARD_WM,
888 PINEVIEW_FIFO_LINE_SIZE
889 };
890 static const struct intel_watermark_params pineview_cursor_wm = {
891 PINEVIEW_CURSOR_FIFO,
892 PINEVIEW_CURSOR_MAX_WM,
893 PINEVIEW_CURSOR_DFT_WM,
894 PINEVIEW_CURSOR_GUARD_WM,
895 PINEVIEW_FIFO_LINE_SIZE,
896 };
897 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
898 PINEVIEW_CURSOR_FIFO,
899 PINEVIEW_CURSOR_MAX_WM,
900 PINEVIEW_CURSOR_DFT_WM,
901 PINEVIEW_CURSOR_GUARD_WM,
902 PINEVIEW_FIFO_LINE_SIZE
903 };
904 static const struct intel_watermark_params g4x_wm_info = {
905 G4X_FIFO_SIZE,
906 G4X_MAX_WM,
907 G4X_MAX_WM,
908 2,
909 G4X_FIFO_LINE_SIZE,
910 };
911 static const struct intel_watermark_params g4x_cursor_wm_info = {
912 I965_CURSOR_FIFO,
913 I965_CURSOR_MAX_WM,
914 I965_CURSOR_DFT_WM,
915 2,
916 G4X_FIFO_LINE_SIZE,
917 };
918 static const struct intel_watermark_params valleyview_wm_info = {
919 VALLEYVIEW_FIFO_SIZE,
920 VALLEYVIEW_MAX_WM,
921 VALLEYVIEW_MAX_WM,
922 2,
923 G4X_FIFO_LINE_SIZE,
924 };
925 static const struct intel_watermark_params valleyview_cursor_wm_info = {
926 I965_CURSOR_FIFO,
927 VALLEYVIEW_CURSOR_MAX_WM,
928 I965_CURSOR_DFT_WM,
929 2,
930 G4X_FIFO_LINE_SIZE,
931 };
932 static const struct intel_watermark_params i965_cursor_wm_info = {
933 I965_CURSOR_FIFO,
934 I965_CURSOR_MAX_WM,
935 I965_CURSOR_DFT_WM,
936 2,
937 I915_FIFO_LINE_SIZE,
938 };
939 static const struct intel_watermark_params i945_wm_info = {
940 I945_FIFO_SIZE,
941 I915_MAX_WM,
942 1,
943 2,
944 I915_FIFO_LINE_SIZE
945 };
946 static const struct intel_watermark_params i915_wm_info = {
947 I915_FIFO_SIZE,
948 I915_MAX_WM,
949 1,
950 2,
951 I915_FIFO_LINE_SIZE
952 };
953 static const struct intel_watermark_params i830_wm_info = {
954 I855GM_FIFO_SIZE,
955 I915_MAX_WM,
956 1,
957 2,
958 I830_FIFO_LINE_SIZE
959 };
960 static const struct intel_watermark_params i845_wm_info = {
961 I830_FIFO_SIZE,
962 I915_MAX_WM,
963 1,
964 2,
965 I830_FIFO_LINE_SIZE
966 };
967
968 /**
969 * intel_calculate_wm - calculate watermark level
970 * @clock_in_khz: pixel clock
971 * @wm: chip FIFO params
972 * @pixel_size: display pixel size
973 * @latency_ns: memory latency for the platform
974 *
975 * Calculate the watermark level (the level at which the display plane will
976 * start fetching from memory again). Each chip has a different display
977 * FIFO size and allocation, so the caller needs to figure that out and pass
978 * in the correct intel_watermark_params structure.
979 *
980 * As the pixel clock runs, the FIFO will be drained at a rate that depends
981 * on the pixel size. When it reaches the watermark level, it'll start
982 * fetching FIFO line sized based chunks from memory until the FIFO fills
983 * past the watermark point. If the FIFO drains completely, a FIFO underrun
984 * will occur, and a display engine hang could result.
985 */
intel_calculate_wm(unsigned long clock_in_khz,const struct intel_watermark_params * wm,int fifo_size,int pixel_size,unsigned long latency_ns)986 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
987 const struct intel_watermark_params *wm,
988 int fifo_size,
989 int pixel_size,
990 unsigned long latency_ns)
991 {
992 long entries_required, wm_size;
993
994 /*
995 * Note: we need to make sure we don't overflow for various clock &
996 * latency values.
997 * clocks go from a few thousand to several hundred thousand.
998 * latency is usually a few thousand
999 */
1000 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
1001 1000;
1002 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
1003
1004 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
1005
1006 wm_size = fifo_size - (entries_required + wm->guard_size);
1007
1008 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
1009
1010 /* Don't promote wm_size to unsigned... */
1011 if (wm_size > (long)wm->max_wm)
1012 wm_size = wm->max_wm;
1013 if (wm_size <= 0)
1014 wm_size = wm->default_wm;
1015 return wm_size;
1016 }
1017
single_enabled_crtc(struct drm_device * dev)1018 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1019 {
1020 struct drm_crtc *crtc, *enabled = NULL;
1021
1022 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1023 if (intel_crtc_active(crtc)) {
1024 if (enabled)
1025 return NULL;
1026 enabled = crtc;
1027 }
1028 }
1029
1030 return enabled;
1031 }
1032
pineview_update_wm(struct drm_crtc * unused_crtc)1033 static void pineview_update_wm(struct drm_crtc *unused_crtc)
1034 {
1035 struct drm_device *dev = unused_crtc->dev;
1036 struct drm_i915_private *dev_priv = dev->dev_private;
1037 struct drm_crtc *crtc;
1038 const struct cxsr_latency *latency;
1039 u32 reg;
1040 unsigned long wm;
1041
1042 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1043 dev_priv->fsb_freq, dev_priv->mem_freq);
1044 if (!latency) {
1045 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1046 pineview_disable_cxsr(dev);
1047 return;
1048 }
1049
1050 crtc = single_enabled_crtc(dev);
1051 if (crtc) {
1052 const struct drm_display_mode *adjusted_mode;
1053 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1054 int clock;
1055
1056 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1057 clock = adjusted_mode->crtc_clock;
1058
1059 /* Display SR */
1060 wm = intel_calculate_wm(clock, &pineview_display_wm,
1061 pineview_display_wm.fifo_size,
1062 pixel_size, latency->display_sr);
1063 reg = I915_READ(DSPFW1);
1064 reg &= ~DSPFW_SR_MASK;
1065 reg |= wm << DSPFW_SR_SHIFT;
1066 I915_WRITE(DSPFW1, reg);
1067 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1068
1069 /* cursor SR */
1070 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1071 pineview_display_wm.fifo_size,
1072 pixel_size, latency->cursor_sr);
1073 reg = I915_READ(DSPFW3);
1074 reg &= ~DSPFW_CURSOR_SR_MASK;
1075 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1076 I915_WRITE(DSPFW3, reg);
1077
1078 /* Display HPLL off SR */
1079 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1080 pineview_display_hplloff_wm.fifo_size,
1081 pixel_size, latency->display_hpll_disable);
1082 reg = I915_READ(DSPFW3);
1083 reg &= ~DSPFW_HPLL_SR_MASK;
1084 reg |= wm & DSPFW_HPLL_SR_MASK;
1085 I915_WRITE(DSPFW3, reg);
1086
1087 /* cursor HPLL off SR */
1088 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1089 pineview_display_hplloff_wm.fifo_size,
1090 pixel_size, latency->cursor_hpll_disable);
1091 reg = I915_READ(DSPFW3);
1092 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1093 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1094 I915_WRITE(DSPFW3, reg);
1095 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1096
1097 /* activate cxsr */
1098 I915_WRITE(DSPFW3,
1099 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1100 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1101 } else {
1102 pineview_disable_cxsr(dev);
1103 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1104 }
1105 }
1106
g4x_compute_wm0(struct drm_device * dev,int plane,const struct intel_watermark_params * display,int display_latency_ns,const struct intel_watermark_params * cursor,int cursor_latency_ns,int * plane_wm,int * cursor_wm)1107 static bool g4x_compute_wm0(struct drm_device *dev,
1108 int plane,
1109 const struct intel_watermark_params *display,
1110 int display_latency_ns,
1111 const struct intel_watermark_params *cursor,
1112 int cursor_latency_ns,
1113 int *plane_wm,
1114 int *cursor_wm)
1115 {
1116 struct drm_crtc *crtc;
1117 const struct drm_display_mode *adjusted_mode;
1118 int htotal, hdisplay, clock, pixel_size;
1119 int line_time_us, line_count;
1120 int entries, tlb_miss;
1121
1122 crtc = intel_get_crtc_for_plane(dev, plane);
1123 if (!intel_crtc_active(crtc)) {
1124 *cursor_wm = cursor->guard_size;
1125 *plane_wm = display->guard_size;
1126 return false;
1127 }
1128
1129 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1130 clock = adjusted_mode->crtc_clock;
1131 htotal = adjusted_mode->crtc_htotal;
1132 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1133 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1134
1135 /* Use the small buffer method to calculate plane watermark */
1136 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1137 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1138 if (tlb_miss > 0)
1139 entries += tlb_miss;
1140 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1141 *plane_wm = entries + display->guard_size;
1142 if (*plane_wm > (int)display->max_wm)
1143 *plane_wm = display->max_wm;
1144
1145 /* Use the large buffer method to calculate cursor watermark */
1146 line_time_us = max(htotal * 1000 / clock, 1);
1147 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1148 entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
1149 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1150 if (tlb_miss > 0)
1151 entries += tlb_miss;
1152 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1153 *cursor_wm = entries + cursor->guard_size;
1154 if (*cursor_wm > (int)cursor->max_wm)
1155 *cursor_wm = (int)cursor->max_wm;
1156
1157 return true;
1158 }
1159
1160 /*
1161 * Check the wm result.
1162 *
1163 * If any calculated watermark values is larger than the maximum value that
1164 * can be programmed into the associated watermark register, that watermark
1165 * must be disabled.
1166 */
g4x_check_srwm(struct drm_device * dev,int display_wm,int cursor_wm,const struct intel_watermark_params * display,const struct intel_watermark_params * cursor)1167 static bool g4x_check_srwm(struct drm_device *dev,
1168 int display_wm, int cursor_wm,
1169 const struct intel_watermark_params *display,
1170 const struct intel_watermark_params *cursor)
1171 {
1172 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1173 display_wm, cursor_wm);
1174
1175 if (display_wm > display->max_wm) {
1176 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1177 display_wm, display->max_wm);
1178 return false;
1179 }
1180
1181 if (cursor_wm > cursor->max_wm) {
1182 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1183 cursor_wm, cursor->max_wm);
1184 return false;
1185 }
1186
1187 if (!(display_wm || cursor_wm)) {
1188 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1189 return false;
1190 }
1191
1192 return true;
1193 }
1194
g4x_compute_srwm(struct drm_device * dev,int plane,int latency_ns,const struct intel_watermark_params * display,const struct intel_watermark_params * cursor,int * display_wm,int * cursor_wm)1195 static bool g4x_compute_srwm(struct drm_device *dev,
1196 int plane,
1197 int latency_ns,
1198 const struct intel_watermark_params *display,
1199 const struct intel_watermark_params *cursor,
1200 int *display_wm, int *cursor_wm)
1201 {
1202 struct drm_crtc *crtc;
1203 const struct drm_display_mode *adjusted_mode;
1204 int hdisplay, htotal, pixel_size, clock;
1205 unsigned long line_time_us;
1206 int line_count, line_size;
1207 int small, large;
1208 int entries;
1209
1210 if (!latency_ns) {
1211 *display_wm = *cursor_wm = 0;
1212 return false;
1213 }
1214
1215 crtc = intel_get_crtc_for_plane(dev, plane);
1216 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1217 clock = adjusted_mode->crtc_clock;
1218 htotal = adjusted_mode->crtc_htotal;
1219 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1220 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1221
1222 line_time_us = max(htotal * 1000 / clock, 1);
1223 line_count = (latency_ns / line_time_us + 1000) / 1000;
1224 line_size = hdisplay * pixel_size;
1225
1226 /* Use the minimum of the small and large buffer method for primary */
1227 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1228 large = line_count * line_size;
1229
1230 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1231 *display_wm = entries + display->guard_size;
1232
1233 /* calculate the self-refresh watermark for display cursor */
1234 entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
1235 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1236 *cursor_wm = entries + cursor->guard_size;
1237
1238 return g4x_check_srwm(dev,
1239 *display_wm, *cursor_wm,
1240 display, cursor);
1241 }
1242
vlv_compute_drain_latency(struct drm_device * dev,int plane,int * plane_prec_mult,int * plane_dl,int * cursor_prec_mult,int * cursor_dl)1243 static bool vlv_compute_drain_latency(struct drm_device *dev,
1244 int plane,
1245 int *plane_prec_mult,
1246 int *plane_dl,
1247 int *cursor_prec_mult,
1248 int *cursor_dl)
1249 {
1250 struct drm_crtc *crtc;
1251 int clock, pixel_size;
1252 int entries;
1253
1254 crtc = intel_get_crtc_for_plane(dev, plane);
1255 if (!intel_crtc_active(crtc))
1256 return false;
1257
1258 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1259 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
1260
1261 entries = (clock / 1000) * pixel_size;
1262 *plane_prec_mult = (entries > 256) ?
1263 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1264 *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
1265 pixel_size);
1266
1267 entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
1268 *cursor_prec_mult = (entries > 256) ?
1269 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1270 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
1271
1272 return true;
1273 }
1274
1275 /*
1276 * Update drain latency registers of memory arbiter
1277 *
1278 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1279 * to be programmed. Each plane has a drain latency multiplier and a drain
1280 * latency value.
1281 */
1282
vlv_update_drain_latency(struct drm_device * dev)1283 static void vlv_update_drain_latency(struct drm_device *dev)
1284 {
1285 struct drm_i915_private *dev_priv = dev->dev_private;
1286 int planea_prec, planea_dl, planeb_prec, planeb_dl;
1287 int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1288 int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
1289 either 16 or 32 */
1290
1291 /* For plane A, Cursor A */
1292 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1293 &cursor_prec_mult, &cursora_dl)) {
1294 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1295 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
1296 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1297 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
1298
1299 I915_WRITE(VLV_DDL1, cursora_prec |
1300 (cursora_dl << DDL_CURSORA_SHIFT) |
1301 planea_prec | planea_dl);
1302 }
1303
1304 /* For plane B, Cursor B */
1305 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1306 &cursor_prec_mult, &cursorb_dl)) {
1307 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1308 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
1309 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1310 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
1311
1312 I915_WRITE(VLV_DDL2, cursorb_prec |
1313 (cursorb_dl << DDL_CURSORB_SHIFT) |
1314 planeb_prec | planeb_dl);
1315 }
1316 }
1317
1318 #define single_plane_enabled(mask) is_power_of_2(mask)
1319
valleyview_update_wm(struct drm_crtc * crtc)1320 static void valleyview_update_wm(struct drm_crtc *crtc)
1321 {
1322 struct drm_device *dev = crtc->dev;
1323 static const int sr_latency_ns = 12000;
1324 struct drm_i915_private *dev_priv = dev->dev_private;
1325 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1326 int plane_sr, cursor_sr;
1327 int ignore_plane_sr, ignore_cursor_sr;
1328 unsigned int enabled = 0;
1329
1330 vlv_update_drain_latency(dev);
1331
1332 if (g4x_compute_wm0(dev, PIPE_A,
1333 &valleyview_wm_info, latency_ns,
1334 &valleyview_cursor_wm_info, latency_ns,
1335 &planea_wm, &cursora_wm))
1336 enabled |= 1 << PIPE_A;
1337
1338 if (g4x_compute_wm0(dev, PIPE_B,
1339 &valleyview_wm_info, latency_ns,
1340 &valleyview_cursor_wm_info, latency_ns,
1341 &planeb_wm, &cursorb_wm))
1342 enabled |= 1 << PIPE_B;
1343
1344 if (single_plane_enabled(enabled) &&
1345 g4x_compute_srwm(dev, ffs(enabled) - 1,
1346 sr_latency_ns,
1347 &valleyview_wm_info,
1348 &valleyview_cursor_wm_info,
1349 &plane_sr, &ignore_cursor_sr) &&
1350 g4x_compute_srwm(dev, ffs(enabled) - 1,
1351 2*sr_latency_ns,
1352 &valleyview_wm_info,
1353 &valleyview_cursor_wm_info,
1354 &ignore_plane_sr, &cursor_sr)) {
1355 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1356 } else {
1357 I915_WRITE(FW_BLC_SELF_VLV,
1358 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1359 plane_sr = cursor_sr = 0;
1360 }
1361
1362 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1363 planea_wm, cursora_wm,
1364 planeb_wm, cursorb_wm,
1365 plane_sr, cursor_sr);
1366
1367 I915_WRITE(DSPFW1,
1368 (plane_sr << DSPFW_SR_SHIFT) |
1369 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1370 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1371 planea_wm);
1372 I915_WRITE(DSPFW2,
1373 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1374 (cursora_wm << DSPFW_CURSORA_SHIFT));
1375 I915_WRITE(DSPFW3,
1376 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1377 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1378 }
1379
g4x_update_wm(struct drm_crtc * crtc)1380 static void g4x_update_wm(struct drm_crtc *crtc)
1381 {
1382 struct drm_device *dev = crtc->dev;
1383 static const int sr_latency_ns = 12000;
1384 struct drm_i915_private *dev_priv = dev->dev_private;
1385 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1386 int plane_sr, cursor_sr;
1387 unsigned int enabled = 0;
1388
1389 if (g4x_compute_wm0(dev, PIPE_A,
1390 &g4x_wm_info, latency_ns,
1391 &g4x_cursor_wm_info, latency_ns,
1392 &planea_wm, &cursora_wm))
1393 enabled |= 1 << PIPE_A;
1394
1395 if (g4x_compute_wm0(dev, PIPE_B,
1396 &g4x_wm_info, latency_ns,
1397 &g4x_cursor_wm_info, latency_ns,
1398 &planeb_wm, &cursorb_wm))
1399 enabled |= 1 << PIPE_B;
1400
1401 if (single_plane_enabled(enabled) &&
1402 g4x_compute_srwm(dev, ffs(enabled) - 1,
1403 sr_latency_ns,
1404 &g4x_wm_info,
1405 &g4x_cursor_wm_info,
1406 &plane_sr, &cursor_sr)) {
1407 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1408 } else {
1409 I915_WRITE(FW_BLC_SELF,
1410 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1411 plane_sr = cursor_sr = 0;
1412 }
1413
1414 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1415 planea_wm, cursora_wm,
1416 planeb_wm, cursorb_wm,
1417 plane_sr, cursor_sr);
1418
1419 I915_WRITE(DSPFW1,
1420 (plane_sr << DSPFW_SR_SHIFT) |
1421 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1422 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1423 planea_wm);
1424 I915_WRITE(DSPFW2,
1425 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1426 (cursora_wm << DSPFW_CURSORA_SHIFT));
1427 /* HPLL off in SR has some issues on G4x... disable it */
1428 I915_WRITE(DSPFW3,
1429 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1430 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1431 }
1432
i965_update_wm(struct drm_crtc * unused_crtc)1433 static void i965_update_wm(struct drm_crtc *unused_crtc)
1434 {
1435 struct drm_device *dev = unused_crtc->dev;
1436 struct drm_i915_private *dev_priv = dev->dev_private;
1437 struct drm_crtc *crtc;
1438 int srwm = 1;
1439 int cursor_sr = 16;
1440
1441 /* Calc sr entries for one plane configs */
1442 crtc = single_enabled_crtc(dev);
1443 if (crtc) {
1444 /* self-refresh has much higher latency */
1445 static const int sr_latency_ns = 12000;
1446 const struct drm_display_mode *adjusted_mode =
1447 &to_intel_crtc(crtc)->config.adjusted_mode;
1448 int clock = adjusted_mode->crtc_clock;
1449 int htotal = adjusted_mode->crtc_htotal;
1450 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1451 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1452 unsigned long line_time_us;
1453 int entries;
1454
1455 line_time_us = max(htotal * 1000 / clock, 1);
1456
1457 /* Use ns/us then divide to preserve precision */
1458 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1459 pixel_size * hdisplay;
1460 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1461 srwm = I965_FIFO_SIZE - entries;
1462 if (srwm < 0)
1463 srwm = 1;
1464 srwm &= 0x1ff;
1465 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1466 entries, srwm);
1467
1468 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1469 pixel_size * to_intel_crtc(crtc)->cursor_width;
1470 entries = DIV_ROUND_UP(entries,
1471 i965_cursor_wm_info.cacheline_size);
1472 cursor_sr = i965_cursor_wm_info.fifo_size -
1473 (entries + i965_cursor_wm_info.guard_size);
1474
1475 if (cursor_sr > i965_cursor_wm_info.max_wm)
1476 cursor_sr = i965_cursor_wm_info.max_wm;
1477
1478 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1479 "cursor %d\n", srwm, cursor_sr);
1480
1481 if (IS_CRESTLINE(dev))
1482 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1483 } else {
1484 /* Turn off self refresh if both pipes are enabled */
1485 if (IS_CRESTLINE(dev))
1486 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1487 & ~FW_BLC_SELF_EN);
1488 }
1489
1490 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1491 srwm);
1492
1493 /* 965 has limitations... */
1494 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1495 (8 << 16) | (8 << 8) | (8 << 0));
1496 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1497 /* update cursor SR watermark */
1498 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1499 }
1500
i9xx_update_wm(struct drm_crtc * unused_crtc)1501 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1502 {
1503 struct drm_device *dev = unused_crtc->dev;
1504 struct drm_i915_private *dev_priv = dev->dev_private;
1505 const struct intel_watermark_params *wm_info;
1506 uint32_t fwater_lo;
1507 uint32_t fwater_hi;
1508 int cwm, srwm = 1;
1509 int fifo_size;
1510 int planea_wm, planeb_wm;
1511 struct drm_crtc *crtc, *enabled = NULL;
1512
1513 if (IS_I945GM(dev))
1514 wm_info = &i945_wm_info;
1515 else if (!IS_GEN2(dev))
1516 wm_info = &i915_wm_info;
1517 else
1518 wm_info = &i830_wm_info;
1519
1520 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1521 crtc = intel_get_crtc_for_plane(dev, 0);
1522 if (intel_crtc_active(crtc)) {
1523 const struct drm_display_mode *adjusted_mode;
1524 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1525 if (IS_GEN2(dev))
1526 cpp = 4;
1527
1528 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1529 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1530 wm_info, fifo_size, cpp,
1531 latency_ns);
1532 enabled = crtc;
1533 } else
1534 planea_wm = fifo_size - wm_info->guard_size;
1535
1536 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1537 crtc = intel_get_crtc_for_plane(dev, 1);
1538 if (intel_crtc_active(crtc)) {
1539 const struct drm_display_mode *adjusted_mode;
1540 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1541 if (IS_GEN2(dev))
1542 cpp = 4;
1543
1544 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1545 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1546 wm_info, fifo_size, cpp,
1547 latency_ns);
1548 if (enabled == NULL)
1549 enabled = crtc;
1550 else
1551 enabled = NULL;
1552 } else
1553 planeb_wm = fifo_size - wm_info->guard_size;
1554
1555 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1556
1557 if (IS_I915GM(dev) && enabled) {
1558 struct intel_framebuffer *fb;
1559
1560 fb = to_intel_framebuffer(enabled->primary->fb);
1561
1562 /* self-refresh seems busted with untiled */
1563 if (fb->obj->tiling_mode == I915_TILING_NONE)
1564 enabled = NULL;
1565 }
1566
1567 /*
1568 * Overlay gets an aggressive default since video jitter is bad.
1569 */
1570 cwm = 2;
1571
1572 /* Play safe and disable self-refresh before adjusting watermarks. */
1573 if (IS_I945G(dev) || IS_I945GM(dev))
1574 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1575 else if (IS_I915GM(dev))
1576 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
1577
1578 /* Calc sr entries for one plane configs */
1579 if (HAS_FW_BLC(dev) && enabled) {
1580 /* self-refresh has much higher latency */
1581 static const int sr_latency_ns = 6000;
1582 const struct drm_display_mode *adjusted_mode =
1583 &to_intel_crtc(enabled)->config.adjusted_mode;
1584 int clock = adjusted_mode->crtc_clock;
1585 int htotal = adjusted_mode->crtc_htotal;
1586 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
1587 int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
1588 unsigned long line_time_us;
1589 int entries;
1590
1591 line_time_us = max(htotal * 1000 / clock, 1);
1592
1593 /* Use ns/us then divide to preserve precision */
1594 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1595 pixel_size * hdisplay;
1596 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1597 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1598 srwm = wm_info->fifo_size - entries;
1599 if (srwm < 0)
1600 srwm = 1;
1601
1602 if (IS_I945G(dev) || IS_I945GM(dev))
1603 I915_WRITE(FW_BLC_SELF,
1604 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1605 else if (IS_I915GM(dev))
1606 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1607 }
1608
1609 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1610 planea_wm, planeb_wm, cwm, srwm);
1611
1612 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1613 fwater_hi = (cwm & 0x1f);
1614
1615 /* Set request length to 8 cachelines per fetch */
1616 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1617 fwater_hi = fwater_hi | (1 << 8);
1618
1619 I915_WRITE(FW_BLC, fwater_lo);
1620 I915_WRITE(FW_BLC2, fwater_hi);
1621
1622 if (HAS_FW_BLC(dev)) {
1623 if (enabled) {
1624 if (IS_I945G(dev) || IS_I945GM(dev))
1625 I915_WRITE(FW_BLC_SELF,
1626 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1627 else if (IS_I915GM(dev))
1628 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
1629 DRM_DEBUG_KMS("memory self refresh enabled\n");
1630 } else
1631 DRM_DEBUG_KMS("memory self refresh disabled\n");
1632 }
1633 }
1634
i845_update_wm(struct drm_crtc * unused_crtc)1635 static void i845_update_wm(struct drm_crtc *unused_crtc)
1636 {
1637 struct drm_device *dev = unused_crtc->dev;
1638 struct drm_i915_private *dev_priv = dev->dev_private;
1639 struct drm_crtc *crtc;
1640 const struct drm_display_mode *adjusted_mode;
1641 uint32_t fwater_lo;
1642 int planea_wm;
1643
1644 crtc = single_enabled_crtc(dev);
1645 if (crtc == NULL)
1646 return;
1647
1648 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1649 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1650 &i845_wm_info,
1651 dev_priv->display.get_fifo_size(dev, 0),
1652 4, latency_ns);
1653 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1654 fwater_lo |= (3<<8) | planea_wm;
1655
1656 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1657
1658 I915_WRITE(FW_BLC, fwater_lo);
1659 }
1660
ilk_pipe_pixel_rate(struct drm_device * dev,struct drm_crtc * crtc)1661 static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
1662 struct drm_crtc *crtc)
1663 {
1664 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1665 uint32_t pixel_rate;
1666
1667 pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
1668
1669 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1670 * adjust the pixel_rate here. */
1671
1672 if (intel_crtc->config.pch_pfit.enabled) {
1673 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1674 uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
1675
1676 pipe_w = intel_crtc->config.pipe_src_w;
1677 pipe_h = intel_crtc->config.pipe_src_h;
1678 pfit_w = (pfit_size >> 16) & 0xFFFF;
1679 pfit_h = pfit_size & 0xFFFF;
1680 if (pipe_w < pfit_w)
1681 pipe_w = pfit_w;
1682 if (pipe_h < pfit_h)
1683 pipe_h = pfit_h;
1684
1685 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1686 pfit_w * pfit_h);
1687 }
1688
1689 return pixel_rate;
1690 }
1691
1692 /* latency must be in 0.1us units. */
ilk_wm_method1(uint32_t pixel_rate,uint8_t bytes_per_pixel,uint32_t latency)1693 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
1694 uint32_t latency)
1695 {
1696 uint64_t ret;
1697
1698 if (WARN(latency == 0, "Latency value missing\n"))
1699 return UINT_MAX;
1700
1701 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
1702 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1703
1704 return ret;
1705 }
1706
1707 /* latency must be in 0.1us units. */
ilk_wm_method2(uint32_t pixel_rate,uint32_t pipe_htotal,uint32_t horiz_pixels,uint8_t bytes_per_pixel,uint32_t latency)1708 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1709 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
1710 uint32_t latency)
1711 {
1712 uint32_t ret;
1713
1714 if (WARN(latency == 0, "Latency value missing\n"))
1715 return UINT_MAX;
1716
1717 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1718 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
1719 ret = DIV_ROUND_UP(ret, 64) + 2;
1720 return ret;
1721 }
1722
ilk_wm_fbc(uint32_t pri_val,uint32_t horiz_pixels,uint8_t bytes_per_pixel)1723 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1724 uint8_t bytes_per_pixel)
1725 {
1726 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1727 }
1728
1729 struct ilk_pipe_wm_parameters {
1730 bool active;
1731 uint32_t pipe_htotal;
1732 uint32_t pixel_rate;
1733 struct intel_plane_wm_parameters pri;
1734 struct intel_plane_wm_parameters spr;
1735 struct intel_plane_wm_parameters cur;
1736 };
1737
1738 struct ilk_wm_maximums {
1739 uint16_t pri;
1740 uint16_t spr;
1741 uint16_t cur;
1742 uint16_t fbc;
1743 };
1744
1745 /* used in computing the new watermarks state */
1746 struct intel_wm_config {
1747 unsigned int num_pipes_active;
1748 bool sprites_enabled;
1749 bool sprites_scaled;
1750 };
1751
1752 /*
1753 * For both WM_PIPE and WM_LP.
1754 * mem_value must be in 0.1us units.
1755 */
ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters * params,uint32_t mem_value,bool is_lp)1756 static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
1757 uint32_t mem_value,
1758 bool is_lp)
1759 {
1760 uint32_t method1, method2;
1761
1762 if (!params->active || !params->pri.enabled)
1763 return 0;
1764
1765 method1 = ilk_wm_method1(params->pixel_rate,
1766 params->pri.bytes_per_pixel,
1767 mem_value);
1768
1769 if (!is_lp)
1770 return method1;
1771
1772 method2 = ilk_wm_method2(params->pixel_rate,
1773 params->pipe_htotal,
1774 params->pri.horiz_pixels,
1775 params->pri.bytes_per_pixel,
1776 mem_value);
1777
1778 return min(method1, method2);
1779 }
1780
1781 /*
1782 * For both WM_PIPE and WM_LP.
1783 * mem_value must be in 0.1us units.
1784 */
ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters * params,uint32_t mem_value)1785 static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
1786 uint32_t mem_value)
1787 {
1788 uint32_t method1, method2;
1789
1790 if (!params->active || !params->spr.enabled)
1791 return 0;
1792
1793 method1 = ilk_wm_method1(params->pixel_rate,
1794 params->spr.bytes_per_pixel,
1795 mem_value);
1796 method2 = ilk_wm_method2(params->pixel_rate,
1797 params->pipe_htotal,
1798 params->spr.horiz_pixels,
1799 params->spr.bytes_per_pixel,
1800 mem_value);
1801 return min(method1, method2);
1802 }
1803
1804 /*
1805 * For both WM_PIPE and WM_LP.
1806 * mem_value must be in 0.1us units.
1807 */
ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters * params,uint32_t mem_value)1808 static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
1809 uint32_t mem_value)
1810 {
1811 if (!params->active || !params->cur.enabled)
1812 return 0;
1813
1814 return ilk_wm_method2(params->pixel_rate,
1815 params->pipe_htotal,
1816 params->cur.horiz_pixels,
1817 params->cur.bytes_per_pixel,
1818 mem_value);
1819 }
1820
1821 /* Only for WM_LP. */
ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters * params,uint32_t pri_val)1822 static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
1823 uint32_t pri_val)
1824 {
1825 if (!params->active || !params->pri.enabled)
1826 return 0;
1827
1828 return ilk_wm_fbc(pri_val,
1829 params->pri.horiz_pixels,
1830 params->pri.bytes_per_pixel);
1831 }
1832
ilk_display_fifo_size(const struct drm_device * dev)1833 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1834 {
1835 if (INTEL_INFO(dev)->gen >= 8)
1836 return 3072;
1837 else if (INTEL_INFO(dev)->gen >= 7)
1838 return 768;
1839 else
1840 return 512;
1841 }
1842
1843 /* Calculate the maximum primary/sprite plane watermark */
ilk_plane_wm_max(const struct drm_device * dev,int level,const struct intel_wm_config * config,enum intel_ddb_partitioning ddb_partitioning,bool is_sprite)1844 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1845 int level,
1846 const struct intel_wm_config *config,
1847 enum intel_ddb_partitioning ddb_partitioning,
1848 bool is_sprite)
1849 {
1850 unsigned int fifo_size = ilk_display_fifo_size(dev);
1851 unsigned int max;
1852
1853 /* if sprites aren't enabled, sprites get nothing */
1854 if (is_sprite && !config->sprites_enabled)
1855 return 0;
1856
1857 /* HSW allows LP1+ watermarks even with multiple pipes */
1858 if (level == 0 || config->num_pipes_active > 1) {
1859 fifo_size /= INTEL_INFO(dev)->num_pipes;
1860
1861 /*
1862 * For some reason the non self refresh
1863 * FIFO size is only half of the self
1864 * refresh FIFO size on ILK/SNB.
1865 */
1866 if (INTEL_INFO(dev)->gen <= 6)
1867 fifo_size /= 2;
1868 }
1869
1870 if (config->sprites_enabled) {
1871 /* level 0 is always calculated with 1:1 split */
1872 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1873 if (is_sprite)
1874 fifo_size *= 5;
1875 fifo_size /= 6;
1876 } else {
1877 fifo_size /= 2;
1878 }
1879 }
1880
1881 /* clamp to max that the registers can hold */
1882 if (INTEL_INFO(dev)->gen >= 8)
1883 max = level == 0 ? 255 : 2047;
1884 else if (INTEL_INFO(dev)->gen >= 7)
1885 /* IVB/HSW primary/sprite plane watermarks */
1886 max = level == 0 ? 127 : 1023;
1887 else if (!is_sprite)
1888 /* ILK/SNB primary plane watermarks */
1889 max = level == 0 ? 127 : 511;
1890 else
1891 /* ILK/SNB sprite plane watermarks */
1892 max = level == 0 ? 63 : 255;
1893
1894 return min(fifo_size, max);
1895 }
1896
1897 /* Calculate the maximum cursor plane watermark */
ilk_cursor_wm_max(const struct drm_device * dev,int level,const struct intel_wm_config * config)1898 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1899 int level,
1900 const struct intel_wm_config *config)
1901 {
1902 /* HSW LP1+ watermarks w/ multiple pipes */
1903 if (level > 0 && config->num_pipes_active > 1)
1904 return 64;
1905
1906 /* otherwise just report max that registers can hold */
1907 if (INTEL_INFO(dev)->gen >= 7)
1908 return level == 0 ? 63 : 255;
1909 else
1910 return level == 0 ? 31 : 63;
1911 }
1912
1913 /* Calculate the maximum FBC watermark */
ilk_fbc_wm_max(const struct drm_device * dev)1914 static unsigned int ilk_fbc_wm_max(const struct drm_device *dev)
1915 {
1916 /* max that registers can hold */
1917 if (INTEL_INFO(dev)->gen >= 8)
1918 return 31;
1919 else
1920 return 15;
1921 }
1922
ilk_compute_wm_maximums(const struct drm_device * dev,int level,const struct intel_wm_config * config,enum intel_ddb_partitioning ddb_partitioning,struct ilk_wm_maximums * max)1923 static void ilk_compute_wm_maximums(const struct drm_device *dev,
1924 int level,
1925 const struct intel_wm_config *config,
1926 enum intel_ddb_partitioning ddb_partitioning,
1927 struct ilk_wm_maximums *max)
1928 {
1929 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1930 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1931 max->cur = ilk_cursor_wm_max(dev, level, config);
1932 max->fbc = ilk_fbc_wm_max(dev);
1933 }
1934
ilk_validate_wm_level(int level,const struct ilk_wm_maximums * max,struct intel_wm_level * result)1935 static bool ilk_validate_wm_level(int level,
1936 const struct ilk_wm_maximums *max,
1937 struct intel_wm_level *result)
1938 {
1939 bool ret;
1940
1941 /* already determined to be invalid? */
1942 if (!result->enable)
1943 return false;
1944
1945 result->enable = result->pri_val <= max->pri &&
1946 result->spr_val <= max->spr &&
1947 result->cur_val <= max->cur;
1948
1949 ret = result->enable;
1950
1951 /*
1952 * HACK until we can pre-compute everything,
1953 * and thus fail gracefully if LP0 watermarks
1954 * are exceeded...
1955 */
1956 if (level == 0 && !result->enable) {
1957 if (result->pri_val > max->pri)
1958 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
1959 level, result->pri_val, max->pri);
1960 if (result->spr_val > max->spr)
1961 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
1962 level, result->spr_val, max->spr);
1963 if (result->cur_val > max->cur)
1964 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
1965 level, result->cur_val, max->cur);
1966
1967 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
1968 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
1969 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
1970 result->enable = true;
1971 }
1972
1973 return ret;
1974 }
1975
ilk_compute_wm_level(const struct drm_i915_private * dev_priv,int level,const struct ilk_pipe_wm_parameters * p,struct intel_wm_level * result)1976 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
1977 int level,
1978 const struct ilk_pipe_wm_parameters *p,
1979 struct intel_wm_level *result)
1980 {
1981 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
1982 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
1983 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
1984
1985 /* WM1+ latency values stored in 0.5us units */
1986 if (level > 0) {
1987 pri_latency *= 5;
1988 spr_latency *= 5;
1989 cur_latency *= 5;
1990 }
1991
1992 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
1993 result->spr_val = ilk_compute_spr_wm(p, spr_latency);
1994 result->cur_val = ilk_compute_cur_wm(p, cur_latency);
1995 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
1996 result->enable = true;
1997 }
1998
1999 static uint32_t
hsw_compute_linetime_wm(struct drm_device * dev,struct drm_crtc * crtc)2000 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2001 {
2002 struct drm_i915_private *dev_priv = dev->dev_private;
2003 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2004 struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
2005 u32 linetime, ips_linetime;
2006
2007 if (!intel_crtc_active(crtc))
2008 return 0;
2009
2010 /* The WM are computed with base on how long it takes to fill a single
2011 * row at the given clock rate, multiplied by 8.
2012 * */
2013 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2014 mode->crtc_clock);
2015 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2016 intel_ddi_get_cdclk_freq(dev_priv));
2017
2018 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2019 PIPE_WM_LINETIME_TIME(linetime);
2020 }
2021
intel_read_wm_latency(struct drm_device * dev,uint16_t wm[5])2022 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
2023 {
2024 struct drm_i915_private *dev_priv = dev->dev_private;
2025
2026 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2027 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2028
2029 wm[0] = (sskpd >> 56) & 0xFF;
2030 if (wm[0] == 0)
2031 wm[0] = sskpd & 0xF;
2032 wm[1] = (sskpd >> 4) & 0xFF;
2033 wm[2] = (sskpd >> 12) & 0xFF;
2034 wm[3] = (sskpd >> 20) & 0x1FF;
2035 wm[4] = (sskpd >> 32) & 0x1FF;
2036 } else if (INTEL_INFO(dev)->gen >= 6) {
2037 uint32_t sskpd = I915_READ(MCH_SSKPD);
2038
2039 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2040 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2041 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2042 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2043 } else if (INTEL_INFO(dev)->gen >= 5) {
2044 uint32_t mltr = I915_READ(MLTR_ILK);
2045
2046 /* ILK primary LP0 latency is 700 ns */
2047 wm[0] = 7;
2048 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2049 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2050 }
2051 }
2052
intel_fixup_spr_wm_latency(struct drm_device * dev,uint16_t wm[5])2053 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2054 {
2055 /* ILK sprite LP0 latency is 1300 ns */
2056 if (INTEL_INFO(dev)->gen == 5)
2057 wm[0] = 13;
2058 }
2059
intel_fixup_cur_wm_latency(struct drm_device * dev,uint16_t wm[5])2060 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2061 {
2062 /* ILK cursor LP0 latency is 1300 ns */
2063 if (INTEL_INFO(dev)->gen == 5)
2064 wm[0] = 13;
2065
2066 /* WaDoubleCursorLP3Latency:ivb */
2067 if (IS_IVYBRIDGE(dev))
2068 wm[3] *= 2;
2069 }
2070
ilk_wm_max_level(const struct drm_device * dev)2071 static int ilk_wm_max_level(const struct drm_device *dev)
2072 {
2073 /* how many WM levels are we expecting */
2074 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2075 return 4;
2076 else if (INTEL_INFO(dev)->gen >= 6)
2077 return 3;
2078 else
2079 return 2;
2080 }
2081
intel_print_wm_latency(struct drm_device * dev,const char * name,const uint16_t wm[5])2082 static void intel_print_wm_latency(struct drm_device *dev,
2083 const char *name,
2084 const uint16_t wm[5])
2085 {
2086 int level, max_level = ilk_wm_max_level(dev);
2087
2088 for (level = 0; level <= max_level; level++) {
2089 unsigned int latency = wm[level];
2090
2091 if (latency == 0) {
2092 DRM_ERROR("%s WM%d latency not provided\n",
2093 name, level);
2094 continue;
2095 }
2096
2097 /* WM1+ latency values in 0.5us units */
2098 if (level > 0)
2099 latency *= 5;
2100
2101 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2102 name, level, wm[level],
2103 latency / 10, latency % 10);
2104 }
2105 }
2106
ilk_increase_wm_latency(struct drm_i915_private * dev_priv,uint16_t wm[5],uint16_t min)2107 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2108 uint16_t wm[5], uint16_t min)
2109 {
2110 int level, max_level = ilk_wm_max_level(dev_priv->dev);
2111
2112 if (wm[0] >= min)
2113 return false;
2114
2115 wm[0] = max(wm[0], min);
2116 for (level = 1; level <= max_level; level++)
2117 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2118
2119 return true;
2120 }
2121
snb_wm_latency_quirk(struct drm_device * dev)2122 static void snb_wm_latency_quirk(struct drm_device *dev)
2123 {
2124 struct drm_i915_private *dev_priv = dev->dev_private;
2125 bool changed;
2126
2127 /*
2128 * The BIOS provided WM memory latency values are often
2129 * inadequate for high resolution displays. Adjust them.
2130 */
2131 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2132 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2133 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2134
2135 if (!changed)
2136 return;
2137
2138 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2139 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2140 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2141 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2142 }
2143
ilk_setup_wm_latency(struct drm_device * dev)2144 static void ilk_setup_wm_latency(struct drm_device *dev)
2145 {
2146 struct drm_i915_private *dev_priv = dev->dev_private;
2147
2148 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2149
2150 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2151 sizeof(dev_priv->wm.pri_latency));
2152 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2153 sizeof(dev_priv->wm.pri_latency));
2154
2155 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2156 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2157
2158 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2159 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2160 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2161
2162 if (IS_GEN6(dev))
2163 snb_wm_latency_quirk(dev);
2164 }
2165
ilk_compute_wm_parameters(struct drm_crtc * crtc,struct ilk_pipe_wm_parameters * p,struct intel_wm_config * config)2166 static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2167 struct ilk_pipe_wm_parameters *p,
2168 struct intel_wm_config *config)
2169 {
2170 struct drm_device *dev = crtc->dev;
2171 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2172 enum i915_pipe pipe = intel_crtc->pipe;
2173 struct drm_plane *plane;
2174
2175 p->active = intel_crtc_active(crtc);
2176 if (p->active) {
2177 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
2178 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2179 p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
2180 p->cur.bytes_per_pixel = 4;
2181 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2182 p->cur.horiz_pixels = intel_crtc->cursor_width;
2183 /* TODO: for now, assume primary and cursor planes are always enabled. */
2184 p->pri.enabled = true;
2185 p->cur.enabled = true;
2186 }
2187
2188 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2189 config->num_pipes_active += intel_crtc_active(crtc);
2190
2191 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
2192 struct intel_plane *intel_plane = to_intel_plane(plane);
2193
2194 if (intel_plane->pipe == pipe)
2195 p->spr = intel_plane->wm;
2196
2197 config->sprites_enabled |= intel_plane->wm.enabled;
2198 config->sprites_scaled |= intel_plane->wm.scaled;
2199 }
2200 }
2201
2202 /* Compute new watermarks for the pipe */
intel_compute_pipe_wm(struct drm_crtc * crtc,const struct ilk_pipe_wm_parameters * params,struct intel_pipe_wm * pipe_wm)2203 static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2204 const struct ilk_pipe_wm_parameters *params,
2205 struct intel_pipe_wm *pipe_wm)
2206 {
2207 struct drm_device *dev = crtc->dev;
2208 const struct drm_i915_private *dev_priv = dev->dev_private;
2209 int level, max_level = ilk_wm_max_level(dev);
2210 /* LP0 watermark maximums depend on this pipe alone */
2211 struct intel_wm_config config = {
2212 .num_pipes_active = 1,
2213 .sprites_enabled = params->spr.enabled,
2214 .sprites_scaled = params->spr.scaled,
2215 };
2216 struct ilk_wm_maximums max;
2217
2218 /* LP0 watermarks always use 1/2 DDB partitioning */
2219 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2220
2221 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2222 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
2223 max_level = 1;
2224
2225 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2226 if (params->spr.scaled)
2227 max_level = 0;
2228
2229 for (level = 0; level <= max_level; level++)
2230 ilk_compute_wm_level(dev_priv, level, params,
2231 &pipe_wm->wm[level]);
2232
2233 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2234 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2235
2236 /* At least LP0 must be valid */
2237 return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
2238 }
2239
2240 /*
2241 * Merge the watermarks from all active pipes for a specific level.
2242 */
ilk_merge_wm_level(struct drm_device * dev,int level,struct intel_wm_level * ret_wm)2243 static void ilk_merge_wm_level(struct drm_device *dev,
2244 int level,
2245 struct intel_wm_level *ret_wm)
2246 {
2247 const struct intel_crtc *intel_crtc;
2248
2249 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2250 const struct intel_wm_level *wm =
2251 &intel_crtc->wm.active.wm[level];
2252
2253 if (!wm->enable)
2254 return;
2255
2256 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2257 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2258 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2259 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2260 }
2261
2262 ret_wm->enable = true;
2263 }
2264
2265 /*
2266 * Merge all low power watermarks for all active pipes.
2267 */
ilk_wm_merge(struct drm_device * dev,const struct intel_wm_config * config,const struct ilk_wm_maximums * max,struct intel_pipe_wm * merged)2268 static void ilk_wm_merge(struct drm_device *dev,
2269 const struct intel_wm_config *config,
2270 const struct ilk_wm_maximums *max,
2271 struct intel_pipe_wm *merged)
2272 {
2273 int level, max_level = ilk_wm_max_level(dev);
2274
2275 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2276 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2277 config->num_pipes_active > 1)
2278 return;
2279
2280 /* ILK: FBC WM must be disabled always */
2281 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2282
2283 /* merge each WM1+ level */
2284 for (level = 1; level <= max_level; level++) {
2285 struct intel_wm_level *wm = &merged->wm[level];
2286
2287 ilk_merge_wm_level(dev, level, wm);
2288
2289 if (!ilk_validate_wm_level(level, max, wm))
2290 break;
2291
2292 /*
2293 * The spec says it is preferred to disable
2294 * FBC WMs instead of disabling a WM level.
2295 */
2296 if (wm->fbc_val > max->fbc) {
2297 merged->fbc_wm_enabled = false;
2298 wm->fbc_val = 0;
2299 }
2300 }
2301
2302 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2303 /*
2304 * FIXME this is racy. FBC might get enabled later.
2305 * What we should check here is whether FBC can be
2306 * enabled sometime later.
2307 */
2308 if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
2309 for (level = 2; level <= max_level; level++) {
2310 struct intel_wm_level *wm = &merged->wm[level];
2311
2312 wm->enable = false;
2313 }
2314 }
2315 }
2316
ilk_wm_lp_to_level(int wm_lp,const struct intel_pipe_wm * pipe_wm)2317 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2318 {
2319 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2320 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2321 }
2322
2323 /* The value we need to program into the WM_LPx latency field */
ilk_wm_lp_latency(struct drm_device * dev,int level)2324 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2325 {
2326 struct drm_i915_private *dev_priv = dev->dev_private;
2327
2328 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2329 return 2 * level;
2330 else
2331 return dev_priv->wm.pri_latency[level];
2332 }
2333
ilk_compute_wm_results(struct drm_device * dev,const struct intel_pipe_wm * merged,enum intel_ddb_partitioning partitioning,struct ilk_wm_values * results)2334 static void ilk_compute_wm_results(struct drm_device *dev,
2335 const struct intel_pipe_wm *merged,
2336 enum intel_ddb_partitioning partitioning,
2337 struct ilk_wm_values *results)
2338 {
2339 struct intel_crtc *intel_crtc;
2340 int level, wm_lp;
2341
2342 results->enable_fbc_wm = merged->fbc_wm_enabled;
2343 results->partitioning = partitioning;
2344
2345 /* LP1+ register values */
2346 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2347 const struct intel_wm_level *r;
2348
2349 level = ilk_wm_lp_to_level(wm_lp, merged);
2350
2351 r = &merged->wm[level];
2352 if (!r->enable)
2353 break;
2354
2355 results->wm_lp[wm_lp - 1] = WM3_LP_EN |
2356 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2357 (r->pri_val << WM1_LP_SR_SHIFT) |
2358 r->cur_val;
2359
2360 if (INTEL_INFO(dev)->gen >= 8)
2361 results->wm_lp[wm_lp - 1] |=
2362 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2363 else
2364 results->wm_lp[wm_lp - 1] |=
2365 r->fbc_val << WM1_LP_FBC_SHIFT;
2366
2367 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2368 WARN_ON(wm_lp != 1);
2369 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2370 } else
2371 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2372 }
2373
2374 /* LP0 register values */
2375 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2376 enum i915_pipe pipe = intel_crtc->pipe;
2377 const struct intel_wm_level *r =
2378 &intel_crtc->wm.active.wm[0];
2379
2380 if (WARN_ON(!r->enable))
2381 continue;
2382
2383 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2384
2385 results->wm_pipe[pipe] =
2386 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2387 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2388 r->cur_val;
2389 }
2390 }
2391
2392 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2393 * case both are at the same level. Prefer r1 in case they're the same. */
ilk_find_best_result(struct drm_device * dev,struct intel_pipe_wm * r1,struct intel_pipe_wm * r2)2394 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2395 struct intel_pipe_wm *r1,
2396 struct intel_pipe_wm *r2)
2397 {
2398 int level, max_level = ilk_wm_max_level(dev);
2399 int level1 = 0, level2 = 0;
2400
2401 for (level = 1; level <= max_level; level++) {
2402 if (r1->wm[level].enable)
2403 level1 = level;
2404 if (r2->wm[level].enable)
2405 level2 = level;
2406 }
2407
2408 if (level1 == level2) {
2409 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2410 return r2;
2411 else
2412 return r1;
2413 } else if (level1 > level2) {
2414 return r1;
2415 } else {
2416 return r2;
2417 }
2418 }
2419
2420 /* dirty bits used to track which watermarks need changes */
2421 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2422 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2423 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2424 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2425 #define WM_DIRTY_FBC (1 << 24)
2426 #define WM_DIRTY_DDB (1 << 25)
2427
ilk_compute_wm_dirty(struct drm_device * dev,const struct ilk_wm_values * old,const struct ilk_wm_values * new)2428 static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
2429 const struct ilk_wm_values *old,
2430 const struct ilk_wm_values *new)
2431 {
2432 unsigned int dirty = 0;
2433 enum i915_pipe pipe;
2434 int wm_lp;
2435
2436 for_each_pipe(pipe) {
2437 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2438 dirty |= WM_DIRTY_LINETIME(pipe);
2439 /* Must disable LP1+ watermarks too */
2440 dirty |= WM_DIRTY_LP_ALL;
2441 }
2442
2443 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2444 dirty |= WM_DIRTY_PIPE(pipe);
2445 /* Must disable LP1+ watermarks too */
2446 dirty |= WM_DIRTY_LP_ALL;
2447 }
2448 }
2449
2450 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2451 dirty |= WM_DIRTY_FBC;
2452 /* Must disable LP1+ watermarks too */
2453 dirty |= WM_DIRTY_LP_ALL;
2454 }
2455
2456 if (old->partitioning != new->partitioning) {
2457 dirty |= WM_DIRTY_DDB;
2458 /* Must disable LP1+ watermarks too */
2459 dirty |= WM_DIRTY_LP_ALL;
2460 }
2461
2462 /* LP1+ watermarks already deemed dirty, no need to continue */
2463 if (dirty & WM_DIRTY_LP_ALL)
2464 return dirty;
2465
2466 /* Find the lowest numbered LP1+ watermark in need of an update... */
2467 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2468 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2469 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2470 break;
2471 }
2472
2473 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2474 for (; wm_lp <= 3; wm_lp++)
2475 dirty |= WM_DIRTY_LP(wm_lp);
2476
2477 return dirty;
2478 }
2479
_ilk_disable_lp_wm(struct drm_i915_private * dev_priv,unsigned int dirty)2480 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2481 unsigned int dirty)
2482 {
2483 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2484 bool changed = false;
2485
2486 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2487 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2488 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2489 changed = true;
2490 }
2491 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2492 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2493 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2494 changed = true;
2495 }
2496 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2497 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2498 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2499 changed = true;
2500 }
2501
2502 /*
2503 * Don't touch WM1S_LP_EN here.
2504 * Doing so could cause underruns.
2505 */
2506
2507 return changed;
2508 }
2509
2510 /*
2511 * The spec says we shouldn't write when we don't need, because every write
2512 * causes WMs to be re-evaluated, expending some power.
2513 */
ilk_write_wm_values(struct drm_i915_private * dev_priv,struct ilk_wm_values * results)2514 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2515 struct ilk_wm_values *results)
2516 {
2517 struct drm_device *dev = dev_priv->dev;
2518 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2519 unsigned int dirty;
2520 uint32_t val;
2521
2522 dirty = ilk_compute_wm_dirty(dev, previous, results);
2523 if (!dirty)
2524 return;
2525
2526 _ilk_disable_lp_wm(dev_priv, dirty);
2527
2528 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2529 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2530 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2531 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2532 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2533 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2534
2535 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2536 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2537 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2538 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2539 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2540 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2541
2542 if (dirty & WM_DIRTY_DDB) {
2543 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2544 val = I915_READ(WM_MISC);
2545 if (results->partitioning == INTEL_DDB_PART_1_2)
2546 val &= ~WM_MISC_DATA_PARTITION_5_6;
2547 else
2548 val |= WM_MISC_DATA_PARTITION_5_6;
2549 I915_WRITE(WM_MISC, val);
2550 } else {
2551 val = I915_READ(DISP_ARB_CTL2);
2552 if (results->partitioning == INTEL_DDB_PART_1_2)
2553 val &= ~DISP_DATA_PARTITION_5_6;
2554 else
2555 val |= DISP_DATA_PARTITION_5_6;
2556 I915_WRITE(DISP_ARB_CTL2, val);
2557 }
2558 }
2559
2560 if (dirty & WM_DIRTY_FBC) {
2561 val = I915_READ(DISP_ARB_CTL);
2562 if (results->enable_fbc_wm)
2563 val &= ~DISP_FBC_WM_DIS;
2564 else
2565 val |= DISP_FBC_WM_DIS;
2566 I915_WRITE(DISP_ARB_CTL, val);
2567 }
2568
2569 if (dirty & WM_DIRTY_LP(1) &&
2570 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2571 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2572
2573 if (INTEL_INFO(dev)->gen >= 7) {
2574 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2575 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2576 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2577 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2578 }
2579
2580 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2581 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2582 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2583 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2584 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2585 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2586
2587 dev_priv->wm.hw = *results;
2588 }
2589
ilk_disable_lp_wm(struct drm_device * dev)2590 static bool ilk_disable_lp_wm(struct drm_device *dev)
2591 {
2592 struct drm_i915_private *dev_priv = dev->dev_private;
2593
2594 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2595 }
2596
ilk_update_wm(struct drm_crtc * crtc)2597 static void ilk_update_wm(struct drm_crtc *crtc)
2598 {
2599 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2600 struct drm_device *dev = crtc->dev;
2601 struct drm_i915_private *dev_priv = dev->dev_private;
2602 struct ilk_wm_maximums max;
2603 static const struct ilk_pipe_wm_parameters zero_params;
2604 struct ilk_pipe_wm_parameters params = zero_params;
2605 static const struct ilk_wm_values zero_values;
2606 struct ilk_wm_values results = zero_values;
2607 enum intel_ddb_partitioning partitioning;
2608 static const struct intel_pipe_wm zero_wm;
2609 struct intel_pipe_wm pipe_wm = zero_wm;
2610 struct intel_pipe_wm lp_wm_1_2 = zero_wm, lp_wm_5_6 = zero_wm,
2611 *best_lp_wm;
2612 static const struct intel_wm_config zero_config;
2613 struct intel_wm_config config = zero_config;
2614
2615 ilk_compute_wm_parameters(crtc, ¶ms, &config);
2616
2617 intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm);
2618
2619 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
2620 return;
2621
2622 intel_crtc->wm.active = pipe_wm;
2623
2624 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
2625 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
2626
2627 /* 5/6 split only in single pipe config on IVB+ */
2628 if (INTEL_INFO(dev)->gen >= 7 &&
2629 config.num_pipes_active == 1 && config.sprites_enabled) {
2630 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
2631 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
2632
2633 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
2634 } else {
2635 best_lp_wm = &lp_wm_1_2;
2636 }
2637
2638 partitioning = (best_lp_wm == &lp_wm_1_2) ?
2639 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
2640
2641 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
2642
2643 ilk_write_wm_values(dev_priv, &results);
2644 }
2645
ilk_update_sprite_wm(struct drm_plane * plane,struct drm_crtc * crtc,uint32_t sprite_width,int pixel_size,bool enabled,bool scaled)2646 static void ilk_update_sprite_wm(struct drm_plane *plane,
2647 struct drm_crtc *crtc,
2648 uint32_t sprite_width, int pixel_size,
2649 bool enabled, bool scaled)
2650 {
2651 struct drm_device *dev = plane->dev;
2652 struct intel_plane *intel_plane = to_intel_plane(plane);
2653
2654 intel_plane->wm.enabled = enabled;
2655 intel_plane->wm.scaled = scaled;
2656 intel_plane->wm.horiz_pixels = sprite_width;
2657 intel_plane->wm.bytes_per_pixel = pixel_size;
2658
2659 /*
2660 * IVB workaround: must disable low power watermarks for at least
2661 * one frame before enabling scaling. LP watermarks can be re-enabled
2662 * when scaling is disabled.
2663 *
2664 * WaCxSRDisabledForSpriteScaling:ivb
2665 */
2666 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
2667 intel_wait_for_vblank(dev, intel_plane->pipe);
2668
2669 ilk_update_wm(crtc);
2670 }
2671
ilk_pipe_wm_get_hw_state(struct drm_crtc * crtc)2672 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
2673 {
2674 struct drm_device *dev = crtc->dev;
2675 struct drm_i915_private *dev_priv = dev->dev_private;
2676 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2677 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2678 struct intel_pipe_wm *active = &intel_crtc->wm.active;
2679 enum i915_pipe pipe = intel_crtc->pipe;
2680 static const unsigned int wm0_pipe_reg[] = {
2681 [PIPE_A] = WM0_PIPEA_ILK,
2682 [PIPE_B] = WM0_PIPEB_ILK,
2683 [PIPE_C] = WM0_PIPEC_IVB,
2684 };
2685
2686 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
2687 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2688 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
2689
2690 if (intel_crtc_active(crtc)) {
2691 u32 tmp = hw->wm_pipe[pipe];
2692
2693 /*
2694 * For active pipes LP0 watermark is marked as
2695 * enabled, and LP1+ watermaks as disabled since
2696 * we can't really reverse compute them in case
2697 * multiple pipes are active.
2698 */
2699 active->wm[0].enable = true;
2700 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
2701 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
2702 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
2703 active->linetime = hw->wm_linetime[pipe];
2704 } else {
2705 int level, max_level = ilk_wm_max_level(dev);
2706
2707 /*
2708 * For inactive pipes, all watermark levels
2709 * should be marked as enabled but zeroed,
2710 * which is what we'd compute them to.
2711 */
2712 for (level = 0; level <= max_level; level++)
2713 active->wm[level].enable = true;
2714 }
2715 }
2716
ilk_wm_get_hw_state(struct drm_device * dev)2717 void ilk_wm_get_hw_state(struct drm_device *dev)
2718 {
2719 struct drm_i915_private *dev_priv = dev->dev_private;
2720 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2721 struct drm_crtc *crtc;
2722
2723 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2724 ilk_pipe_wm_get_hw_state(crtc);
2725
2726 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
2727 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
2728 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
2729
2730 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2731 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2732 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2733
2734 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2735 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2736 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2737 else if (IS_IVYBRIDGE(dev))
2738 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
2739 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2740
2741 hw->enable_fbc_wm =
2742 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2743 }
2744
2745 /**
2746 * intel_update_watermarks - update FIFO watermark values based on current modes
2747 *
2748 * Calculate watermark values for the various WM regs based on current mode
2749 * and plane configuration.
2750 *
2751 * There are several cases to deal with here:
2752 * - normal (i.e. non-self-refresh)
2753 * - self-refresh (SR) mode
2754 * - lines are large relative to FIFO size (buffer can hold up to 2)
2755 * - lines are small relative to FIFO size (buffer can hold more than 2
2756 * lines), so need to account for TLB latency
2757 *
2758 * The normal calculation is:
2759 * watermark = dotclock * bytes per pixel * latency
2760 * where latency is platform & configuration dependent (we assume pessimal
2761 * values here).
2762 *
2763 * The SR calculation is:
2764 * watermark = (trunc(latency/line time)+1) * surface width *
2765 * bytes per pixel
2766 * where
2767 * line time = htotal / dotclock
2768 * surface width = hdisplay for normal plane and 64 for cursor
2769 * and latency is assumed to be high, as above.
2770 *
2771 * The final value programmed to the register should always be rounded up,
2772 * and include an extra 2 entries to account for clock crossings.
2773 *
2774 * We don't use the sprite, so we can ignore that. And on Crestline we have
2775 * to set the non-SR watermarks to 8.
2776 */
intel_update_watermarks(struct drm_crtc * crtc)2777 void intel_update_watermarks(struct drm_crtc *crtc)
2778 {
2779 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
2780
2781 if (dev_priv->display.update_wm)
2782 dev_priv->display.update_wm(crtc);
2783 }
2784
intel_update_sprite_watermarks(struct drm_plane * plane,struct drm_crtc * crtc,uint32_t sprite_width,int pixel_size,bool enabled,bool scaled)2785 void intel_update_sprite_watermarks(struct drm_plane *plane,
2786 struct drm_crtc *crtc,
2787 uint32_t sprite_width, int pixel_size,
2788 bool enabled, bool scaled)
2789 {
2790 struct drm_i915_private *dev_priv = plane->dev->dev_private;
2791
2792 if (dev_priv->display.update_sprite_wm)
2793 dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
2794 pixel_size, enabled, scaled);
2795 }
2796
2797 static struct drm_i915_gem_object *
intel_alloc_context_page(struct drm_device * dev)2798 intel_alloc_context_page(struct drm_device *dev)
2799 {
2800 struct drm_i915_gem_object *ctx;
2801 int ret;
2802
2803 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2804
2805 ctx = i915_gem_alloc_object(dev, 4096);
2806 if (!ctx) {
2807 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2808 return NULL;
2809 }
2810
2811 ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
2812 if (ret) {
2813 DRM_ERROR("failed to pin power context: %d\n", ret);
2814 goto err_unref;
2815 }
2816
2817 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
2818 if (ret) {
2819 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
2820 goto err_unpin;
2821 }
2822
2823 return ctx;
2824
2825 err_unpin:
2826 i915_gem_object_ggtt_unpin(ctx);
2827 err_unref:
2828 drm_gem_object_unreference(&ctx->base);
2829 return NULL;
2830 }
2831
2832 /**
2833 * Lock protecting IPS related data structures
2834 */
2835 #ifdef __NetBSD__
2836 spinlock_t mchdev_lock;
2837 #else
2838 DEFINE_SPINLOCK(mchdev_lock);
2839 #endif
2840
2841 /* Global for IPS driver to get at the current i915 device. Protected by
2842 * mchdev_lock. */
2843 static struct drm_i915_private *i915_mch_dev;
2844
ironlake_set_drps(struct drm_device * dev,u8 val)2845 bool ironlake_set_drps(struct drm_device *dev, u8 val)
2846 {
2847 struct drm_i915_private *dev_priv = dev->dev_private;
2848 u16 rgvswctl;
2849
2850 assert_spin_locked(&mchdev_lock);
2851
2852 rgvswctl = I915_READ16(MEMSWCTL);
2853 if (rgvswctl & MEMCTL_CMD_STS) {
2854 DRM_DEBUG("gpu busy, RCS change rejected\n");
2855 return false; /* still busy with another command */
2856 }
2857
2858 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2859 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2860 I915_WRITE16(MEMSWCTL, rgvswctl);
2861 POSTING_READ16(MEMSWCTL);
2862
2863 rgvswctl |= MEMCTL_CMD_STS;
2864 I915_WRITE16(MEMSWCTL, rgvswctl);
2865
2866 return true;
2867 }
2868
ironlake_enable_drps(struct drm_device * dev)2869 static void ironlake_enable_drps(struct drm_device *dev)
2870 {
2871 struct drm_i915_private *dev_priv = dev->dev_private;
2872 u32 rgvmodectl = I915_READ(MEMMODECTL);
2873 u8 fmax, fmin, fstart, vstart;
2874
2875 spin_lock_irq(&mchdev_lock);
2876
2877 /* Enable temp reporting */
2878 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2879 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2880
2881 /* 100ms RC evaluation intervals */
2882 I915_WRITE(RCUPEI, 100000);
2883 I915_WRITE(RCDNEI, 100000);
2884
2885 /* Set max/min thresholds to 90ms and 80ms respectively */
2886 I915_WRITE(RCBMAXAVG, 90000);
2887 I915_WRITE(RCBMINAVG, 80000);
2888
2889 I915_WRITE(MEMIHYST, 1);
2890
2891 /* Set up min, max, and cur for interrupt handling */
2892 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2893 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2894 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2895 MEMMODE_FSTART_SHIFT;
2896
2897 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2898 PXVFREQ_PX_SHIFT;
2899
2900 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
2901 dev_priv->ips.fstart = fstart;
2902
2903 dev_priv->ips.max_delay = fstart;
2904 dev_priv->ips.min_delay = fmin;
2905 dev_priv->ips.cur_delay = fstart;
2906
2907 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2908 fmax, fmin, fstart);
2909
2910 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2911
2912 /*
2913 * Interrupts will be enabled in ironlake_irq_postinstall
2914 */
2915
2916 I915_WRITE(VIDSTART, vstart);
2917 POSTING_READ(VIDSTART);
2918
2919 rgvmodectl |= MEMMODE_SWMODE_EN;
2920 I915_WRITE(MEMMODECTL, rgvmodectl);
2921
2922 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2923 DRM_ERROR("stuck trying to change perf mode\n");
2924 mdelay(1);
2925
2926 ironlake_set_drps(dev, fstart);
2927
2928 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2929 I915_READ(0x112e0);
2930 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
2931 dev_priv->ips.last_count2 = I915_READ(0x112f4);
2932 getrawmonotonic(&dev_priv->ips.last_time2);
2933
2934 spin_unlock_irq(&mchdev_lock);
2935 }
2936
ironlake_disable_drps(struct drm_device * dev)2937 static void ironlake_disable_drps(struct drm_device *dev)
2938 {
2939 struct drm_i915_private *dev_priv = dev->dev_private;
2940 u16 rgvswctl;
2941
2942 spin_lock_irq(&mchdev_lock);
2943
2944 rgvswctl = I915_READ16(MEMSWCTL);
2945
2946 /* Ack interrupts, disable EFC interrupt */
2947 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
2948 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
2949 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
2950 I915_WRITE(DEIIR, DE_PCU_EVENT);
2951 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
2952
2953 /* Go back to the starting frequency */
2954 ironlake_set_drps(dev, dev_priv->ips.fstart);
2955 mdelay(1);
2956 rgvswctl |= MEMCTL_CMD_STS;
2957 I915_WRITE(MEMSWCTL, rgvswctl);
2958 mdelay(1);
2959
2960 spin_unlock_irq(&mchdev_lock);
2961 }
2962
2963 /* There's a funny hw issue where the hw returns all 0 when reading from
2964 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
2965 * ourselves, instead of doing a rmw cycle (which might result in us clearing
2966 * all limits and the gpu stuck at whatever frequency it is at atm).
2967 */
gen6_rps_limits(struct drm_i915_private * dev_priv,u8 val)2968 static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
2969 {
2970 u32 limits;
2971
2972 /* Only set the down limit when we've reached the lowest level to avoid
2973 * getting more interrupts, otherwise leave this clear. This prevents a
2974 * race in the hw when coming out of rc6: There's a tiny window where
2975 * the hw runs at the minimal clock before selecting the desired
2976 * frequency, if the down threshold expires in that window we will not
2977 * receive a down interrupt. */
2978 limits = dev_priv->rps.max_freq_softlimit << 24;
2979 if (val <= dev_priv->rps.min_freq_softlimit)
2980 limits |= dev_priv->rps.min_freq_softlimit << 16;
2981
2982 return limits;
2983 }
2984
gen6_set_rps_thresholds(struct drm_i915_private * dev_priv,u8 val)2985 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
2986 {
2987 int new_power;
2988
2989 new_power = dev_priv->rps.power;
2990 switch (dev_priv->rps.power) {
2991 case LOW_POWER:
2992 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
2993 new_power = BETWEEN;
2994 break;
2995
2996 case BETWEEN:
2997 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
2998 new_power = LOW_POWER;
2999 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
3000 new_power = HIGH_POWER;
3001 break;
3002
3003 case HIGH_POWER:
3004 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
3005 new_power = BETWEEN;
3006 break;
3007 }
3008 /* Max/min bins are special */
3009 if (val == dev_priv->rps.min_freq_softlimit)
3010 new_power = LOW_POWER;
3011 if (val == dev_priv->rps.max_freq_softlimit)
3012 new_power = HIGH_POWER;
3013 if (new_power == dev_priv->rps.power)
3014 return;
3015
3016 /* Note the units here are not exactly 1us, but 1280ns. */
3017 switch (new_power) {
3018 case LOW_POWER:
3019 /* Upclock if more than 95% busy over 16ms */
3020 I915_WRITE(GEN6_RP_UP_EI, 12500);
3021 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
3022
3023 /* Downclock if less than 85% busy over 32ms */
3024 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3025 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
3026
3027 I915_WRITE(GEN6_RP_CONTROL,
3028 GEN6_RP_MEDIA_TURBO |
3029 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3030 GEN6_RP_MEDIA_IS_GFX |
3031 GEN6_RP_ENABLE |
3032 GEN6_RP_UP_BUSY_AVG |
3033 GEN6_RP_DOWN_IDLE_AVG);
3034 break;
3035
3036 case BETWEEN:
3037 /* Upclock if more than 90% busy over 13ms */
3038 I915_WRITE(GEN6_RP_UP_EI, 10250);
3039 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
3040
3041 /* Downclock if less than 75% busy over 32ms */
3042 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3043 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
3044
3045 I915_WRITE(GEN6_RP_CONTROL,
3046 GEN6_RP_MEDIA_TURBO |
3047 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3048 GEN6_RP_MEDIA_IS_GFX |
3049 GEN6_RP_ENABLE |
3050 GEN6_RP_UP_BUSY_AVG |
3051 GEN6_RP_DOWN_IDLE_AVG);
3052 break;
3053
3054 case HIGH_POWER:
3055 /* Upclock if more than 85% busy over 10ms */
3056 I915_WRITE(GEN6_RP_UP_EI, 8000);
3057 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
3058
3059 /* Downclock if less than 60% busy over 32ms */
3060 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3061 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
3062
3063 I915_WRITE(GEN6_RP_CONTROL,
3064 GEN6_RP_MEDIA_TURBO |
3065 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3066 GEN6_RP_MEDIA_IS_GFX |
3067 GEN6_RP_ENABLE |
3068 GEN6_RP_UP_BUSY_AVG |
3069 GEN6_RP_DOWN_IDLE_AVG);
3070 break;
3071 }
3072
3073 dev_priv->rps.power = new_power;
3074 dev_priv->rps.last_adj = 0;
3075 }
3076
gen6_rps_pm_mask(struct drm_i915_private * dev_priv,u8 val)3077 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
3078 {
3079 u32 mask = 0;
3080
3081 if (val > dev_priv->rps.min_freq_softlimit)
3082 mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
3083 if (val < dev_priv->rps.max_freq_softlimit)
3084 mask |= GEN6_PM_RP_UP_THRESHOLD;
3085
3086 /* IVB and SNB hard hangs on looping batchbuffer
3087 * if GEN6_PM_UP_EI_EXPIRED is masked.
3088 */
3089 if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
3090 mask |= GEN6_PM_RP_UP_EI_EXPIRED;
3091
3092 return ~mask;
3093 }
3094
3095 /* gen6_set_rps is called to update the frequency request, but should also be
3096 * called when the range (min_delay and max_delay) is modified so that we can
3097 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
gen6_set_rps(struct drm_device * dev,u8 val)3098 void gen6_set_rps(struct drm_device *dev, u8 val)
3099 {
3100 struct drm_i915_private *dev_priv = dev->dev_private;
3101
3102 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3103 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3104 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3105
3106 /* min/max delay may still have been modified so be sure to
3107 * write the limits value.
3108 */
3109 if (val != dev_priv->rps.cur_freq) {
3110 gen6_set_rps_thresholds(dev_priv, val);
3111
3112 if (IS_HASWELL(dev))
3113 I915_WRITE(GEN6_RPNSWREQ,
3114 HSW_FREQUENCY(val));
3115 else
3116 I915_WRITE(GEN6_RPNSWREQ,
3117 GEN6_FREQUENCY(val) |
3118 GEN6_OFFSET(0) |
3119 GEN6_AGGRESSIVE_TURBO);
3120 }
3121
3122 /* Make sure we continue to get interrupts
3123 * until we hit the minimum or maximum frequencies.
3124 */
3125 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
3126 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3127
3128 POSTING_READ(GEN6_RPNSWREQ);
3129
3130 dev_priv->rps.cur_freq = val;
3131 trace_intel_gpu_freq_change(val * 50);
3132 }
3133
3134 /* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
3135 *
3136 * * If Gfx is Idle, then
3137 * 1. Mask Turbo interrupts
3138 * 2. Bring up Gfx clock
3139 * 3. Change the freq to Rpn and wait till P-Unit updates freq
3140 * 4. Clear the Force GFX CLK ON bit so that Gfx can down
3141 * 5. Unmask Turbo interrupts
3142 */
vlv_set_rps_idle(struct drm_i915_private * dev_priv)3143 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
3144 {
3145 /*
3146 * When we are idle. Drop to min voltage state.
3147 */
3148
3149 if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
3150 return;
3151
3152 /* Mask turbo interrupt so that they will not come in between */
3153 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3154
3155 /* Bring up the Gfx clock */
3156 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
3157 I915_READ(VLV_GTLC_SURVIVABILITY_REG) |
3158 VLV_GFX_CLK_FORCE_ON_BIT);
3159
3160 if (wait_for(((VLV_GFX_CLK_STATUS_BIT &
3161 I915_READ(VLV_GTLC_SURVIVABILITY_REG)) != 0), 5)) {
3162 DRM_ERROR("GFX_CLK_ON request timed out\n");
3163 return;
3164 }
3165
3166 dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
3167
3168 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
3169 dev_priv->rps.min_freq_softlimit);
3170
3171 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
3172 & GENFREQSTATUS) == 0, 5))
3173 DRM_ERROR("timed out waiting for Punit\n");
3174
3175 /* Release the Gfx clock */
3176 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
3177 I915_READ(VLV_GTLC_SURVIVABILITY_REG) &
3178 ~VLV_GFX_CLK_FORCE_ON_BIT);
3179
3180 I915_WRITE(GEN6_PMINTRMSK,
3181 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
3182 }
3183
gen6_rps_idle(struct drm_i915_private * dev_priv)3184 void gen6_rps_idle(struct drm_i915_private *dev_priv)
3185 {
3186 struct drm_device *dev = dev_priv->dev;
3187
3188 mutex_lock(&dev_priv->rps.hw_lock);
3189 if (dev_priv->rps.enabled) {
3190 if (IS_VALLEYVIEW(dev))
3191 vlv_set_rps_idle(dev_priv);
3192 else
3193 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3194 dev_priv->rps.last_adj = 0;
3195 }
3196 mutex_unlock(&dev_priv->rps.hw_lock);
3197 }
3198
gen6_rps_boost(struct drm_i915_private * dev_priv)3199 void gen6_rps_boost(struct drm_i915_private *dev_priv)
3200 {
3201 struct drm_device *dev = dev_priv->dev;
3202
3203 mutex_lock(&dev_priv->rps.hw_lock);
3204 if (dev_priv->rps.enabled) {
3205 if (IS_VALLEYVIEW(dev))
3206 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3207 else
3208 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3209 dev_priv->rps.last_adj = 0;
3210 }
3211 mutex_unlock(&dev_priv->rps.hw_lock);
3212 }
3213
valleyview_set_rps(struct drm_device * dev,u8 val)3214 void valleyview_set_rps(struct drm_device *dev, u8 val)
3215 {
3216 struct drm_i915_private *dev_priv = dev->dev_private;
3217
3218 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3219 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3220 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3221
3222 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3223 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3224 dev_priv->rps.cur_freq,
3225 vlv_gpu_freq(dev_priv, val), val);
3226
3227 if (val != dev_priv->rps.cur_freq)
3228 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3229
3230 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3231
3232 dev_priv->rps.cur_freq = val;
3233 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
3234 }
3235
gen6_disable_rps_interrupts(struct drm_device * dev)3236 static void gen6_disable_rps_interrupts(struct drm_device *dev)
3237 {
3238 struct drm_i915_private *dev_priv = dev->dev_private;
3239
3240 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3241 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
3242 ~dev_priv->pm_rps_events);
3243 /* Complete PM interrupt masking here doesn't race with the rps work
3244 * item again unmasking PM interrupts because that is using a different
3245 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3246 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3247
3248 spin_lock_irq(&dev_priv->irq_lock);
3249 dev_priv->rps.pm_iir = 0;
3250 spin_unlock_irq(&dev_priv->irq_lock);
3251
3252 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3253 }
3254
gen6_disable_rps(struct drm_device * dev)3255 static void gen6_disable_rps(struct drm_device *dev)
3256 {
3257 struct drm_i915_private *dev_priv = dev->dev_private;
3258
3259 I915_WRITE(GEN6_RC_CONTROL, 0);
3260 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3261
3262 gen6_disable_rps_interrupts(dev);
3263 }
3264
valleyview_disable_rps(struct drm_device * dev)3265 static void valleyview_disable_rps(struct drm_device *dev)
3266 {
3267 struct drm_i915_private *dev_priv = dev->dev_private;
3268
3269 I915_WRITE(GEN6_RC_CONTROL, 0);
3270
3271 gen6_disable_rps_interrupts(dev);
3272 }
3273
intel_print_rc6_info(struct drm_device * dev,u32 mode)3274 static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3275 {
3276 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3277 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3278 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3279 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3280 }
3281
intel_enable_rc6(const struct drm_device * dev)3282 int intel_enable_rc6(const struct drm_device *dev)
3283 {
3284 /* No RC6 before Ironlake */
3285 if (INTEL_INFO(dev)->gen < 5)
3286 return 0;
3287
3288 /* Respect the kernel parameter if it is set */
3289 if (i915.enable_rc6 >= 0)
3290 return i915.enable_rc6;
3291
3292 /* Disable RC6 on Ironlake */
3293 if (INTEL_INFO(dev)->gen == 5)
3294 return 0;
3295
3296 if (IS_IVYBRIDGE(dev))
3297 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3298
3299 return INTEL_RC6_ENABLE;
3300 }
3301
gen6_enable_rps_interrupts(struct drm_device * dev)3302 static void gen6_enable_rps_interrupts(struct drm_device *dev)
3303 {
3304 struct drm_i915_private *dev_priv = dev->dev_private;
3305
3306 spin_lock_irq(&dev_priv->irq_lock);
3307 WARN_ON(dev_priv->rps.pm_iir);
3308 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3309 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3310 spin_unlock_irq(&dev_priv->irq_lock);
3311 }
3312
gen8_enable_rps(struct drm_device * dev)3313 static void gen8_enable_rps(struct drm_device *dev)
3314 {
3315 struct drm_i915_private *dev_priv = dev->dev_private;
3316 struct intel_ring_buffer *ring;
3317 uint32_t rc6_mask = 0;
3318 int unused;
3319
3320 /* 1a: Software RC state - RC0 */
3321 I915_WRITE(GEN6_RC_STATE, 0);
3322
3323 /* 1c & 1d: Get forcewake during program sequence. Although the driver
3324 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
3325 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3326
3327 /* 2a: Disable RC states. */
3328 I915_WRITE(GEN6_RC_CONTROL, 0);
3329
3330 (void)I915_READ(GEN6_RP_STATE_CAP);
3331
3332 /* 2b: Program RC6 thresholds.*/
3333 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
3334 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
3335 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
3336 for_each_ring(ring, dev_priv, unused)
3337 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3338 I915_WRITE(GEN6_RC_SLEEP, 0);
3339 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
3340
3341 /* 3: Enable RC6 */
3342 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3343 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
3344 intel_print_rc6_info(dev, rc6_mask);
3345 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3346 GEN6_RC_CTL_EI_MODE(1) |
3347 rc6_mask);
3348
3349 /* 4 Program defaults and thresholds for RPS*/
3350 I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */
3351 I915_WRITE(GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(12)); /* Request 600 MHz */
3352 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
3353 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
3354
3355 /* Docs recommend 900MHz, and 300 MHz respectively */
3356 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3357 dev_priv->rps.max_freq_softlimit << 24 |
3358 dev_priv->rps.min_freq_softlimit << 16);
3359
3360 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
3361 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
3362 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
3363 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
3364
3365 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3366
3367 /* 5: Enable RPS */
3368 I915_WRITE(GEN6_RP_CONTROL,
3369 GEN6_RP_MEDIA_TURBO |
3370 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3371 GEN6_RP_MEDIA_IS_GFX |
3372 GEN6_RP_ENABLE |
3373 GEN6_RP_UP_BUSY_AVG |
3374 GEN6_RP_DOWN_IDLE_AVG);
3375
3376 /* 6: Ring frequency + overclocking (our driver does this later */
3377
3378 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
3379
3380 gen6_enable_rps_interrupts(dev);
3381
3382 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3383 }
3384
gen6_enable_rps(struct drm_device * dev)3385 static void gen6_enable_rps(struct drm_device *dev)
3386 {
3387 struct drm_i915_private *dev_priv = dev->dev_private;
3388 struct intel_ring_buffer *ring;
3389 u32 rp_state_cap;
3390 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
3391 u32 gtfifodbg;
3392 int rc6_mode;
3393 int i, ret;
3394
3395 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3396
3397 /* Here begins a magic sequence of register writes to enable
3398 * auto-downclocking.
3399 *
3400 * Perhaps there might be some value in exposing these to
3401 * userspace...
3402 */
3403 I915_WRITE(GEN6_RC_STATE, 0);
3404
3405 /* Clear the DBG now so we don't confuse earlier errors */
3406 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3407 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3408 I915_WRITE(GTFIFODBG, gtfifodbg);
3409 }
3410
3411 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3412
3413 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3414 (void)I915_READ(GEN6_GT_PERF_STATUS);
3415
3416 /* All of these values are in units of 50MHz */
3417 dev_priv->rps.cur_freq = 0;
3418 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
3419 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
3420 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
3421 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
3422 /* XXX: only BYT has a special efficient freq */
3423 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
3424 /* hw_max = RP0 until we check for overclocking */
3425 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
3426
3427 /* Preserve min/max settings in case of re-init */
3428 if (dev_priv->rps.max_freq_softlimit == 0)
3429 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3430
3431 if (dev_priv->rps.min_freq_softlimit == 0)
3432 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3433
3434 /* disable the counters and set deterministic thresholds */
3435 I915_WRITE(GEN6_RC_CONTROL, 0);
3436
3437 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
3438 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
3439 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
3440 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3441 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3442
3443 for_each_ring(ring, dev_priv, i)
3444 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3445
3446 I915_WRITE(GEN6_RC_SLEEP, 0);
3447 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
3448 if (IS_IVYBRIDGE(dev))
3449 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
3450 else
3451 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3452 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
3453 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
3454
3455 /* Check if we are enabling RC6 */
3456 rc6_mode = intel_enable_rc6(dev_priv->dev);
3457 if (rc6_mode & INTEL_RC6_ENABLE)
3458 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
3459
3460 /* We don't use those on Haswell */
3461 if (!IS_HASWELL(dev)) {
3462 if (rc6_mode & INTEL_RC6p_ENABLE)
3463 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
3464
3465 if (rc6_mode & INTEL_RC6pp_ENABLE)
3466 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3467 }
3468
3469 intel_print_rc6_info(dev, rc6_mask);
3470
3471 I915_WRITE(GEN6_RC_CONTROL,
3472 rc6_mask |
3473 GEN6_RC_CTL_EI_MODE(1) |
3474 GEN6_RC_CTL_HW_ENABLE);
3475
3476 /* Power down if completely idle for over 50ms */
3477 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
3478 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3479
3480 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3481 if (ret)
3482 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3483
3484 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
3485 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
3486 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
3487 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
3488 (pcu_mbox & 0xff) * 50);
3489 dev_priv->rps.max_freq = pcu_mbox & 0xff;
3490 }
3491
3492 dev_priv->rps.power = HIGH_POWER; /* force a reset */
3493 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3494
3495 gen6_enable_rps_interrupts(dev);
3496
3497 rc6vids = 0;
3498 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
3499 if (IS_GEN6(dev) && ret) {
3500 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
3501 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
3502 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
3503 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
3504 rc6vids &= 0xffff00;
3505 rc6vids |= GEN6_ENCODE_RC6_VID(450);
3506 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
3507 if (ret)
3508 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
3509 }
3510
3511 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3512 }
3513
gen6_update_ring_freq(struct drm_device * dev)3514 void gen6_update_ring_freq(struct drm_device *dev)
3515 {
3516 struct drm_i915_private *dev_priv = dev->dev_private;
3517 int min_freq = 15;
3518 unsigned int gpu_freq;
3519 unsigned int max_ia_freq, min_ring_freq;
3520 int scaling_factor = 180;
3521 #ifndef __NetBSD__
3522 struct cpufreq_policy *policy;
3523 #endif
3524
3525 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3526
3527 #ifdef __NetBSD__
3528 {
3529 extern uint64_t tsc_freq; /* x86 TSC frequency in Hz */
3530 max_ia_freq = (tsc_freq / 1000);
3531 }
3532 #else
3533 policy = cpufreq_cpu_get(0);
3534 if (policy) {
3535 max_ia_freq = policy->cpuinfo.max_freq;
3536 cpufreq_cpu_put(policy);
3537 } else {
3538 /*
3539 * Default to measured freq if none found, PCU will ensure we
3540 * don't go over
3541 */
3542 max_ia_freq = tsc_khz;
3543 }
3544 #endif
3545
3546 /* Convert from kHz to MHz */
3547 max_ia_freq /= 1000;
3548
3549 min_ring_freq = I915_READ(DCLK) & 0xf;
3550 /* convert DDR frequency from units of 266.6MHz to bandwidth */
3551 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
3552
3553 /*
3554 * For each potential GPU frequency, load a ring frequency we'd like
3555 * to use for memory access. We do this by specifying the IA frequency
3556 * the PCU should use as a reference to determine the ring frequency.
3557 */
3558 for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
3559 gpu_freq--) {
3560 int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
3561 unsigned int ia_freq = 0, ring_freq = 0;
3562
3563 if (INTEL_INFO(dev)->gen >= 8) {
3564 /* max(2 * GT, DDR). NB: GT is 50MHz units */
3565 ring_freq = max(min_ring_freq, gpu_freq);
3566 } else if (IS_HASWELL(dev)) {
3567 ring_freq = mult_frac(gpu_freq, 5, 4);
3568 ring_freq = max(min_ring_freq, ring_freq);
3569 /* leave ia_freq as the default, chosen by cpufreq */
3570 } else {
3571 /* On older processors, there is no separate ring
3572 * clock domain, so in order to boost the bandwidth
3573 * of the ring, we need to upclock the CPU (ia_freq).
3574 *
3575 * For GPU frequencies less than 750MHz,
3576 * just use the lowest ring freq.
3577 */
3578 if (gpu_freq < min_freq)
3579 ia_freq = 800;
3580 else
3581 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
3582 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
3583 }
3584
3585 sandybridge_pcode_write(dev_priv,
3586 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
3587 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
3588 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
3589 gpu_freq);
3590 }
3591 }
3592
valleyview_rps_max_freq(struct drm_i915_private * dev_priv)3593 int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
3594 {
3595 u32 val, rp0;
3596
3597 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
3598
3599 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
3600 /* Clamp to max */
3601 rp0 = min_t(u32, rp0, 0xea);
3602
3603 return rp0;
3604 }
3605
valleyview_rps_rpe_freq(struct drm_i915_private * dev_priv)3606 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3607 {
3608 u32 val, rpe;
3609
3610 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
3611 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
3612 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
3613 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
3614
3615 return rpe;
3616 }
3617
valleyview_rps_min_freq(struct drm_i915_private * dev_priv)3618 int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3619 {
3620 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3621 }
3622
3623 /* Check that the pctx buffer wasn't move under us. */
valleyview_check_pctx(struct drm_i915_private * dev_priv)3624 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
3625 {
3626 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
3627
3628 if (WARN_ON(!dev_priv->vlv_pctx))
3629 return;
3630 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
3631 dev_priv->vlv_pctx->stolen->start);
3632 }
3633
valleyview_setup_pctx(struct drm_device * dev)3634 static void valleyview_setup_pctx(struct drm_device *dev)
3635 {
3636 struct drm_i915_private *dev_priv = dev->dev_private;
3637 struct drm_i915_gem_object *pctx;
3638 unsigned long pctx_paddr;
3639 u32 pcbr;
3640 int pctx_size = 24*1024;
3641
3642 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3643
3644 pcbr = I915_READ(VLV_PCBR);
3645 if (pcbr) {
3646 /* BIOS set it up already, grab the pre-alloc'd space */
3647 int pcbr_offset;
3648
3649 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
3650 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
3651 pcbr_offset,
3652 I915_GTT_OFFSET_NONE,
3653 pctx_size);
3654 goto out;
3655 }
3656
3657 /*
3658 * From the Gunit register HAS:
3659 * The Gfx driver is expected to program this register and ensure
3660 * proper allocation within Gfx stolen memory. For example, this
3661 * register should be programmed such than the PCBR range does not
3662 * overlap with other ranges, such as the frame buffer, protected
3663 * memory, or any other relevant ranges.
3664 */
3665 pctx = i915_gem_object_create_stolen(dev, pctx_size);
3666 if (!pctx) {
3667 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
3668 return;
3669 }
3670
3671 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
3672 I915_WRITE(VLV_PCBR, pctx_paddr);
3673
3674 out:
3675 dev_priv->vlv_pctx = pctx;
3676 }
3677
valleyview_cleanup_pctx(struct drm_device * dev)3678 static void valleyview_cleanup_pctx(struct drm_device *dev)
3679 {
3680 struct drm_i915_private *dev_priv = dev->dev_private;
3681
3682 if (WARN_ON(!dev_priv->vlv_pctx))
3683 return;
3684
3685 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3686 dev_priv->vlv_pctx = NULL;
3687 }
3688
valleyview_enable_rps(struct drm_device * dev)3689 static void valleyview_enable_rps(struct drm_device *dev)
3690 {
3691 struct drm_i915_private *dev_priv = dev->dev_private;
3692 struct intel_ring_buffer *ring;
3693 u32 gtfifodbg, val, rc6_mode = 0;
3694 int i;
3695
3696 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3697
3698 valleyview_check_pctx(dev_priv);
3699
3700 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3701 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
3702 gtfifodbg);
3703 I915_WRITE(GTFIFODBG, gtfifodbg);
3704 }
3705
3706 /* If VLV, Forcewake all wells, else re-direct to regular path */
3707 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3708
3709 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3710 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3711 I915_WRITE(GEN6_RP_UP_EI, 66000);
3712 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3713
3714 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3715
3716 I915_WRITE(GEN6_RP_CONTROL,
3717 GEN6_RP_MEDIA_TURBO |
3718 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3719 GEN6_RP_MEDIA_IS_GFX |
3720 GEN6_RP_ENABLE |
3721 GEN6_RP_UP_BUSY_AVG |
3722 GEN6_RP_DOWN_IDLE_CONT);
3723
3724 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
3725 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3726 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3727
3728 for_each_ring(ring, dev_priv, i)
3729 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3730
3731 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
3732
3733 /* allows RC6 residency counter to work */
3734 I915_WRITE(VLV_COUNTER_CONTROL,
3735 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
3736 VLV_MEDIA_RC6_COUNT_EN |
3737 VLV_RENDER_RC6_COUNT_EN));
3738 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3739 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
3740
3741 intel_print_rc6_info(dev, rc6_mode);
3742
3743 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
3744
3745 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3746
3747 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
3748 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
3749
3750 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
3751 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
3752 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3753 dev_priv->rps.cur_freq);
3754
3755 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
3756 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
3757 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3758 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
3759 dev_priv->rps.max_freq);
3760
3761 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
3762 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3763 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
3764 dev_priv->rps.efficient_freq);
3765
3766 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
3767 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3768 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
3769 dev_priv->rps.min_freq);
3770
3771 /* Preserve min/max settings in case of re-init */
3772 if (dev_priv->rps.max_freq_softlimit == 0)
3773 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3774
3775 if (dev_priv->rps.min_freq_softlimit == 0)
3776 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3777
3778 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
3779 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
3780 dev_priv->rps.efficient_freq);
3781
3782 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
3783
3784 gen6_enable_rps_interrupts(dev);
3785
3786 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3787 }
3788
ironlake_teardown_rc6(struct drm_device * dev)3789 void ironlake_teardown_rc6(struct drm_device *dev)
3790 {
3791 struct drm_i915_private *dev_priv = dev->dev_private;
3792
3793 if (dev_priv->ips.renderctx) {
3794 i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
3795 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
3796 dev_priv->ips.renderctx = NULL;
3797 }
3798
3799 if (dev_priv->ips.pwrctx) {
3800 i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
3801 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
3802 dev_priv->ips.pwrctx = NULL;
3803 }
3804 }
3805
ironlake_disable_rc6(struct drm_device * dev)3806 static void ironlake_disable_rc6(struct drm_device *dev)
3807 {
3808 struct drm_i915_private *dev_priv = dev->dev_private;
3809
3810 if (I915_READ(PWRCTXA)) {
3811 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
3812 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
3813 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
3814 50);
3815
3816 I915_WRITE(PWRCTXA, 0);
3817 POSTING_READ(PWRCTXA);
3818
3819 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3820 POSTING_READ(RSTDBYCTL);
3821 }
3822 }
3823
ironlake_setup_rc6(struct drm_device * dev)3824 static int ironlake_setup_rc6(struct drm_device *dev)
3825 {
3826 struct drm_i915_private *dev_priv = dev->dev_private;
3827
3828 if (dev_priv->ips.renderctx == NULL)
3829 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
3830 if (!dev_priv->ips.renderctx)
3831 return -ENOMEM;
3832
3833 if (dev_priv->ips.pwrctx == NULL)
3834 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
3835 if (!dev_priv->ips.pwrctx) {
3836 ironlake_teardown_rc6(dev);
3837 return -ENOMEM;
3838 }
3839
3840 return 0;
3841 }
3842
ironlake_enable_rc6(struct drm_device * dev)3843 static void ironlake_enable_rc6(struct drm_device *dev)
3844 {
3845 struct drm_i915_private *dev_priv = dev->dev_private;
3846 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
3847 bool was_interruptible;
3848 int ret;
3849
3850 /* rc6 disabled by default due to repeated reports of hanging during
3851 * boot and resume.
3852 */
3853 if (!intel_enable_rc6(dev))
3854 return;
3855
3856 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3857
3858 ret = ironlake_setup_rc6(dev);
3859 if (ret)
3860 return;
3861
3862 was_interruptible = dev_priv->mm.interruptible;
3863 dev_priv->mm.interruptible = false;
3864
3865 /*
3866 * GPU can automatically power down the render unit if given a page
3867 * to save state.
3868 */
3869 ret = intel_ring_begin(ring, 6);
3870 if (ret) {
3871 ironlake_teardown_rc6(dev);
3872 dev_priv->mm.interruptible = was_interruptible;
3873 return;
3874 }
3875
3876 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
3877 intel_ring_emit(ring, MI_SET_CONTEXT);
3878 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
3879 MI_MM_SPACE_GTT |
3880 MI_SAVE_EXT_STATE_EN |
3881 MI_RESTORE_EXT_STATE_EN |
3882 MI_RESTORE_INHIBIT);
3883 intel_ring_emit(ring, MI_SUSPEND_FLUSH);
3884 intel_ring_emit(ring, MI_NOOP);
3885 intel_ring_emit(ring, MI_FLUSH);
3886 intel_ring_advance(ring);
3887
3888 /*
3889 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
3890 * does an implicit flush, combined with MI_FLUSH above, it should be
3891 * safe to assume that renderctx is valid
3892 */
3893 ret = intel_ring_idle(ring);
3894 dev_priv->mm.interruptible = was_interruptible;
3895 if (ret) {
3896 DRM_ERROR("failed to enable ironlake power savings\n");
3897 ironlake_teardown_rc6(dev);
3898 return;
3899 }
3900
3901 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
3902 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3903
3904 intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
3905 }
3906
intel_pxfreq(u32 vidfreq)3907 static unsigned long intel_pxfreq(u32 vidfreq)
3908 {
3909 unsigned long freq;
3910 int div = (vidfreq & 0x3f0000) >> 16;
3911 int post = (vidfreq & 0x3000) >> 12;
3912 int pre = (vidfreq & 0x7);
3913
3914 if (!pre)
3915 return 0;
3916
3917 freq = ((div * 133333) / ((1<<post) * pre));
3918
3919 return freq;
3920 }
3921
3922 static const struct cparams {
3923 u16 i;
3924 u16 t;
3925 u16 m;
3926 u16 c;
3927 } cparams[] = {
3928 { 1, 1333, 301, 28664 },
3929 { 1, 1066, 294, 24460 },
3930 { 1, 800, 294, 25192 },
3931 { 0, 1333, 276, 27605 },
3932 { 0, 1066, 276, 27605 },
3933 { 0, 800, 231, 23784 },
3934 };
3935
__i915_chipset_val(struct drm_i915_private * dev_priv)3936 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
3937 {
3938 u64 total_count, diff, ret;
3939 u32 count1, count2, count3, m = 0, c = 0;
3940 unsigned long now = jiffies_to_msecs(jiffies), diff1;
3941 int i;
3942
3943 assert_spin_locked(&mchdev_lock);
3944
3945 diff1 = now - dev_priv->ips.last_time1;
3946
3947 /* Prevent division-by-zero if we are asking too fast.
3948 * Also, we don't get interesting results if we are polling
3949 * faster than once in 10ms, so just return the saved value
3950 * in such cases.
3951 */
3952 if (diff1 <= 10)
3953 return dev_priv->ips.chipset_power;
3954
3955 count1 = I915_READ(DMIEC);
3956 count2 = I915_READ(DDREC);
3957 count3 = I915_READ(CSIEC);
3958
3959 total_count = count1 + count2 + count3;
3960
3961 /* FIXME: handle per-counter overflow */
3962 if (total_count < dev_priv->ips.last_count1) {
3963 diff = ~0UL - dev_priv->ips.last_count1;
3964 diff += total_count;
3965 } else {
3966 diff = total_count - dev_priv->ips.last_count1;
3967 }
3968
3969 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
3970 if (cparams[i].i == dev_priv->ips.c_m &&
3971 cparams[i].t == dev_priv->ips.r_t) {
3972 m = cparams[i].m;
3973 c = cparams[i].c;
3974 break;
3975 }
3976 }
3977
3978 diff = div_u64(diff, diff1);
3979 ret = ((m * diff) + c);
3980 ret = div_u64(ret, 10);
3981
3982 dev_priv->ips.last_count1 = total_count;
3983 dev_priv->ips.last_time1 = now;
3984
3985 dev_priv->ips.chipset_power = ret;
3986
3987 return ret;
3988 }
3989
i915_chipset_val(struct drm_i915_private * dev_priv)3990 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
3991 {
3992 struct drm_device *dev = dev_priv->dev;
3993 unsigned long val;
3994
3995 if (INTEL_INFO(dev)->gen != 5)
3996 return 0;
3997
3998 spin_lock_irq(&mchdev_lock);
3999
4000 val = __i915_chipset_val(dev_priv);
4001
4002 spin_unlock_irq(&mchdev_lock);
4003
4004 return val;
4005 }
4006
i915_mch_val(struct drm_i915_private * dev_priv)4007 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
4008 {
4009 unsigned long m, x, b;
4010 u32 tsfs;
4011
4012 tsfs = I915_READ(TSFS);
4013
4014 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
4015 x = I915_READ8(TR1);
4016
4017 b = tsfs & TSFS_INTR_MASK;
4018
4019 return ((m * x) / 127) - b;
4020 }
4021
pvid_to_extvid(struct drm_i915_private * dev_priv,u8 pxvid)4022 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
4023 {
4024 struct drm_device *dev = dev_priv->dev;
4025 static const struct v_table {
4026 u16 vd; /* in .1 mil */
4027 u16 vm; /* in .1 mil */
4028 } v_table[] = {
4029 { 0, 0, },
4030 { 375, 0, },
4031 { 500, 0, },
4032 { 625, 0, },
4033 { 750, 0, },
4034 { 875, 0, },
4035 { 1000, 0, },
4036 { 1125, 0, },
4037 { 4125, 3000, },
4038 { 4125, 3000, },
4039 { 4125, 3000, },
4040 { 4125, 3000, },
4041 { 4125, 3000, },
4042 { 4125, 3000, },
4043 { 4125, 3000, },
4044 { 4125, 3000, },
4045 { 4125, 3000, },
4046 { 4125, 3000, },
4047 { 4125, 3000, },
4048 { 4125, 3000, },
4049 { 4125, 3000, },
4050 { 4125, 3000, },
4051 { 4125, 3000, },
4052 { 4125, 3000, },
4053 { 4125, 3000, },
4054 { 4125, 3000, },
4055 { 4125, 3000, },
4056 { 4125, 3000, },
4057 { 4125, 3000, },
4058 { 4125, 3000, },
4059 { 4125, 3000, },
4060 { 4125, 3000, },
4061 { 4250, 3125, },
4062 { 4375, 3250, },
4063 { 4500, 3375, },
4064 { 4625, 3500, },
4065 { 4750, 3625, },
4066 { 4875, 3750, },
4067 { 5000, 3875, },
4068 { 5125, 4000, },
4069 { 5250, 4125, },
4070 { 5375, 4250, },
4071 { 5500, 4375, },
4072 { 5625, 4500, },
4073 { 5750, 4625, },
4074 { 5875, 4750, },
4075 { 6000, 4875, },
4076 { 6125, 5000, },
4077 { 6250, 5125, },
4078 { 6375, 5250, },
4079 { 6500, 5375, },
4080 { 6625, 5500, },
4081 { 6750, 5625, },
4082 { 6875, 5750, },
4083 { 7000, 5875, },
4084 { 7125, 6000, },
4085 { 7250, 6125, },
4086 { 7375, 6250, },
4087 { 7500, 6375, },
4088 { 7625, 6500, },
4089 { 7750, 6625, },
4090 { 7875, 6750, },
4091 { 8000, 6875, },
4092 { 8125, 7000, },
4093 { 8250, 7125, },
4094 { 8375, 7250, },
4095 { 8500, 7375, },
4096 { 8625, 7500, },
4097 { 8750, 7625, },
4098 { 8875, 7750, },
4099 { 9000, 7875, },
4100 { 9125, 8000, },
4101 { 9250, 8125, },
4102 { 9375, 8250, },
4103 { 9500, 8375, },
4104 { 9625, 8500, },
4105 { 9750, 8625, },
4106 { 9875, 8750, },
4107 { 10000, 8875, },
4108 { 10125, 9000, },
4109 { 10250, 9125, },
4110 { 10375, 9250, },
4111 { 10500, 9375, },
4112 { 10625, 9500, },
4113 { 10750, 9625, },
4114 { 10875, 9750, },
4115 { 11000, 9875, },
4116 { 11125, 10000, },
4117 { 11250, 10125, },
4118 { 11375, 10250, },
4119 { 11500, 10375, },
4120 { 11625, 10500, },
4121 { 11750, 10625, },
4122 { 11875, 10750, },
4123 { 12000, 10875, },
4124 { 12125, 11000, },
4125 { 12250, 11125, },
4126 { 12375, 11250, },
4127 { 12500, 11375, },
4128 { 12625, 11500, },
4129 { 12750, 11625, },
4130 { 12875, 11750, },
4131 { 13000, 11875, },
4132 { 13125, 12000, },
4133 { 13250, 12125, },
4134 { 13375, 12250, },
4135 { 13500, 12375, },
4136 { 13625, 12500, },
4137 { 13750, 12625, },
4138 { 13875, 12750, },
4139 { 14000, 12875, },
4140 { 14125, 13000, },
4141 { 14250, 13125, },
4142 { 14375, 13250, },
4143 { 14500, 13375, },
4144 { 14625, 13500, },
4145 { 14750, 13625, },
4146 { 14875, 13750, },
4147 { 15000, 13875, },
4148 { 15125, 14000, },
4149 { 15250, 14125, },
4150 { 15375, 14250, },
4151 { 15500, 14375, },
4152 { 15625, 14500, },
4153 { 15750, 14625, },
4154 { 15875, 14750, },
4155 { 16000, 14875, },
4156 { 16125, 15000, },
4157 };
4158 if (INTEL_INFO(dev)->is_mobile)
4159 return v_table[pxvid].vm;
4160 else
4161 return v_table[pxvid].vd;
4162 }
4163
__i915_update_gfx_val(struct drm_i915_private * dev_priv)4164 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
4165 {
4166 struct timespec now, diff1;
4167 u64 diff;
4168 unsigned long diffms;
4169 u32 count;
4170
4171 assert_spin_locked(&mchdev_lock);
4172
4173 getrawmonotonic(&now);
4174 diff1 = timespec_sub(now, dev_priv->ips.last_time2);
4175
4176 /* Don't divide by 0 */
4177 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
4178 if (!diffms)
4179 return;
4180
4181 count = I915_READ(GFXEC);
4182
4183 if (count < dev_priv->ips.last_count2) {
4184 diff = ~0UL - dev_priv->ips.last_count2;
4185 diff += count;
4186 } else {
4187 diff = count - dev_priv->ips.last_count2;
4188 }
4189
4190 dev_priv->ips.last_count2 = count;
4191 dev_priv->ips.last_time2 = now;
4192
4193 /* More magic constants... */
4194 diff = diff * 1181;
4195 diff = div_u64(diff, diffms * 10);
4196 dev_priv->ips.gfx_power = diff;
4197 }
4198
i915_update_gfx_val(struct drm_i915_private * dev_priv)4199 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
4200 {
4201 struct drm_device *dev = dev_priv->dev;
4202
4203 if (INTEL_INFO(dev)->gen != 5)
4204 return;
4205
4206 spin_lock_irq(&mchdev_lock);
4207
4208 __i915_update_gfx_val(dev_priv);
4209
4210 spin_unlock_irq(&mchdev_lock);
4211 }
4212
__i915_gfx_val(struct drm_i915_private * dev_priv)4213 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
4214 {
4215 unsigned long t, corr, state1, corr2, state2;
4216 u32 pxvid, ext_v;
4217
4218 assert_spin_locked(&mchdev_lock);
4219
4220 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
4221 pxvid = (pxvid >> 24) & 0x7f;
4222 ext_v = pvid_to_extvid(dev_priv, pxvid);
4223
4224 state1 = ext_v;
4225
4226 t = i915_mch_val(dev_priv);
4227
4228 /* Revel in the empirically derived constants */
4229
4230 /* Correction factor in 1/100000 units */
4231 if (t > 80)
4232 corr = ((t * 2349) + 135940);
4233 else if (t >= 50)
4234 corr = ((t * 964) + 29317);
4235 else /* < 50 */
4236 corr = ((t * 301) + 1004);
4237
4238 corr = corr * ((150142 * state1) / 10000 - 78642);
4239 corr /= 100000;
4240 corr2 = (corr * dev_priv->ips.corr);
4241
4242 state2 = (corr2 * state1) / 10000;
4243 state2 /= 100; /* convert to mW */
4244
4245 __i915_update_gfx_val(dev_priv);
4246
4247 return dev_priv->ips.gfx_power + state2;
4248 }
4249
i915_gfx_val(struct drm_i915_private * dev_priv)4250 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
4251 {
4252 struct drm_device *dev = dev_priv->dev;
4253 unsigned long val;
4254
4255 if (INTEL_INFO(dev)->gen != 5)
4256 return 0;
4257
4258 spin_lock_irq(&mchdev_lock);
4259
4260 val = __i915_gfx_val(dev_priv);
4261
4262 spin_unlock_irq(&mchdev_lock);
4263
4264 return val;
4265 }
4266
4267 /**
4268 * i915_read_mch_val - return value for IPS use
4269 *
4270 * Calculate and return a value for the IPS driver to use when deciding whether
4271 * we have thermal and power headroom to increase CPU or GPU power budget.
4272 */
i915_read_mch_val(void)4273 unsigned long i915_read_mch_val(void)
4274 {
4275 struct drm_i915_private *dev_priv;
4276 unsigned long chipset_val, graphics_val, ret = 0;
4277
4278 spin_lock_irq(&mchdev_lock);
4279 if (!i915_mch_dev)
4280 goto out_unlock;
4281 dev_priv = i915_mch_dev;
4282
4283 chipset_val = __i915_chipset_val(dev_priv);
4284 graphics_val = __i915_gfx_val(dev_priv);
4285
4286 ret = chipset_val + graphics_val;
4287
4288 out_unlock:
4289 spin_unlock_irq(&mchdev_lock);
4290
4291 return ret;
4292 }
4293 EXPORT_SYMBOL_GPL(i915_read_mch_val);
4294
4295 /**
4296 * i915_gpu_raise - raise GPU frequency limit
4297 *
4298 * Raise the limit; IPS indicates we have thermal headroom.
4299 */
i915_gpu_raise(void)4300 bool i915_gpu_raise(void)
4301 {
4302 struct drm_i915_private *dev_priv;
4303 bool ret = true;
4304
4305 spin_lock_irq(&mchdev_lock);
4306 if (!i915_mch_dev) {
4307 ret = false;
4308 goto out_unlock;
4309 }
4310 dev_priv = i915_mch_dev;
4311
4312 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
4313 dev_priv->ips.max_delay--;
4314
4315 out_unlock:
4316 spin_unlock_irq(&mchdev_lock);
4317
4318 return ret;
4319 }
4320 EXPORT_SYMBOL_GPL(i915_gpu_raise);
4321
4322 /**
4323 * i915_gpu_lower - lower GPU frequency limit
4324 *
4325 * IPS indicates we're close to a thermal limit, so throttle back the GPU
4326 * frequency maximum.
4327 */
i915_gpu_lower(void)4328 bool i915_gpu_lower(void)
4329 {
4330 struct drm_i915_private *dev_priv;
4331 bool ret = true;
4332
4333 spin_lock_irq(&mchdev_lock);
4334 if (!i915_mch_dev) {
4335 ret = false;
4336 goto out_unlock;
4337 }
4338 dev_priv = i915_mch_dev;
4339
4340 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
4341 dev_priv->ips.max_delay++;
4342
4343 out_unlock:
4344 spin_unlock_irq(&mchdev_lock);
4345
4346 return ret;
4347 }
4348 EXPORT_SYMBOL_GPL(i915_gpu_lower);
4349
4350 /**
4351 * i915_gpu_busy - indicate GPU business to IPS
4352 *
4353 * Tell the IPS driver whether or not the GPU is busy.
4354 */
i915_gpu_busy(void)4355 bool i915_gpu_busy(void)
4356 {
4357 struct drm_i915_private *dev_priv;
4358 struct intel_ring_buffer *ring;
4359 bool ret = false;
4360 int i;
4361
4362 spin_lock_irq(&mchdev_lock);
4363 if (!i915_mch_dev)
4364 goto out_unlock;
4365 dev_priv = i915_mch_dev;
4366
4367 for_each_ring(ring, dev_priv, i)
4368 ret |= !list_empty(&ring->request_list);
4369
4370 out_unlock:
4371 spin_unlock_irq(&mchdev_lock);
4372
4373 return ret;
4374 }
4375 EXPORT_SYMBOL_GPL(i915_gpu_busy);
4376
4377 /**
4378 * i915_gpu_turbo_disable - disable graphics turbo
4379 *
4380 * Disable graphics turbo by resetting the max frequency and setting the
4381 * current frequency to the default.
4382 */
i915_gpu_turbo_disable(void)4383 bool i915_gpu_turbo_disable(void)
4384 {
4385 struct drm_i915_private *dev_priv;
4386 bool ret = true;
4387
4388 spin_lock_irq(&mchdev_lock);
4389 if (!i915_mch_dev) {
4390 ret = false;
4391 goto out_unlock;
4392 }
4393 dev_priv = i915_mch_dev;
4394
4395 dev_priv->ips.max_delay = dev_priv->ips.fstart;
4396
4397 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
4398 ret = false;
4399
4400 out_unlock:
4401 spin_unlock_irq(&mchdev_lock);
4402
4403 return ret;
4404 }
4405 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
4406
4407 /**
4408 * Tells the intel_ips driver that the i915 driver is now loaded, if
4409 * IPS got loaded first.
4410 *
4411 * This awkward dance is so that neither module has to depend on the
4412 * other in order for IPS to do the appropriate communication of
4413 * GPU turbo limits to i915.
4414 */
4415 static void
ips_ping_for_i915_load(void)4416 ips_ping_for_i915_load(void)
4417 {
4418 #ifndef __NetBSD__ /* XXX IPS GPU turbo limits what? */
4419 void (*link)(void);
4420
4421 link = symbol_get(ips_link_to_i915_driver);
4422 if (link) {
4423 link();
4424 symbol_put(ips_link_to_i915_driver);
4425 }
4426 #endif
4427 }
4428
intel_gpu_ips_init(struct drm_i915_private * dev_priv)4429 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
4430 {
4431 /* We only register the i915 ips part with intel-ips once everything is
4432 * set up, to avoid intel-ips sneaking in and reading bogus values. */
4433 spin_lock_irq(&mchdev_lock);
4434 i915_mch_dev = dev_priv;
4435 spin_unlock_irq(&mchdev_lock);
4436
4437 ips_ping_for_i915_load();
4438 }
4439
intel_gpu_ips_teardown(void)4440 void intel_gpu_ips_teardown(void)
4441 {
4442 spin_lock_irq(&mchdev_lock);
4443 i915_mch_dev = NULL;
4444 spin_unlock_irq(&mchdev_lock);
4445 }
4446
intel_init_emon(struct drm_device * dev)4447 static void intel_init_emon(struct drm_device *dev)
4448 {
4449 struct drm_i915_private *dev_priv = dev->dev_private;
4450 u32 lcfuse;
4451 u8 pxw[16];
4452 int i;
4453
4454 /* Disable to program */
4455 I915_WRITE(ECR, 0);
4456 POSTING_READ(ECR);
4457
4458 /* Program energy weights for various events */
4459 I915_WRITE(SDEW, 0x15040d00);
4460 I915_WRITE(CSIEW0, 0x007f0000);
4461 I915_WRITE(CSIEW1, 0x1e220004);
4462 I915_WRITE(CSIEW2, 0x04000004);
4463
4464 for (i = 0; i < 5; i++)
4465 I915_WRITE(PEW + (i * 4), 0);
4466 for (i = 0; i < 3; i++)
4467 I915_WRITE(DEW + (i * 4), 0);
4468
4469 /* Program P-state weights to account for frequency power adjustment */
4470 for (i = 0; i < 16; i++) {
4471 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
4472 unsigned long freq = intel_pxfreq(pxvidfreq);
4473 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
4474 PXVFREQ_PX_SHIFT;
4475 unsigned long val;
4476
4477 val = vid * vid;
4478 val *= (freq / 1000);
4479 val *= 255;
4480 val /= (127*127*900);
4481 if (val > 0xff)
4482 DRM_ERROR("bad pxval: %ld\n", val);
4483 pxw[i] = val;
4484 }
4485 /* Render standby states get 0 weight */
4486 pxw[14] = 0;
4487 pxw[15] = 0;
4488
4489 for (i = 0; i < 4; i++) {
4490 u32 val = ((u32)pxw[i*4] << 24) | ((u32)pxw[(i*4)+1] << 16) |
4491 ((u32)pxw[(i*4)+2] << 8) | ((u32)pxw[(i*4)+3]);
4492 I915_WRITE(PXW + (i * 4), val);
4493 }
4494
4495 /* Adjust magic regs to magic values (more experimental results) */
4496 I915_WRITE(OGW0, 0);
4497 I915_WRITE(OGW1, 0);
4498 I915_WRITE(EG0, 0x00007f00);
4499 I915_WRITE(EG1, 0x0000000e);
4500 I915_WRITE(EG2, 0x000e0000);
4501 I915_WRITE(EG3, 0x68000300);
4502 I915_WRITE(EG4, 0x42000000);
4503 I915_WRITE(EG5, 0x00140031);
4504 I915_WRITE(EG6, 0);
4505 I915_WRITE(EG7, 0);
4506
4507 for (i = 0; i < 8; i++)
4508 I915_WRITE(PXWL + (i * 4), 0);
4509
4510 /* Enable PMON + select events */
4511 I915_WRITE(ECR, 0x80000019);
4512
4513 lcfuse = I915_READ(LCFUSE02);
4514
4515 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
4516 }
4517
intel_init_gt_powersave(struct drm_device * dev)4518 void intel_init_gt_powersave(struct drm_device *dev)
4519 {
4520 if (IS_VALLEYVIEW(dev))
4521 valleyview_setup_pctx(dev);
4522 }
4523
intel_cleanup_gt_powersave(struct drm_device * dev)4524 void intel_cleanup_gt_powersave(struct drm_device *dev)
4525 {
4526 if (IS_VALLEYVIEW(dev))
4527 valleyview_cleanup_pctx(dev);
4528 }
4529
intel_disable_gt_powersave(struct drm_device * dev)4530 void intel_disable_gt_powersave(struct drm_device *dev)
4531 {
4532 struct drm_i915_private *dev_priv = dev->dev_private;
4533
4534 /* Interrupts should be disabled already to avoid re-arming. */
4535 WARN_ON(dev->irq_enabled);
4536
4537 if (IS_IRONLAKE_M(dev)) {
4538 ironlake_disable_drps(dev);
4539 ironlake_disable_rc6(dev);
4540 } else if (INTEL_INFO(dev)->gen >= 6) {
4541 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
4542 cancel_work_sync(&dev_priv->rps.work);
4543 mutex_lock(&dev_priv->rps.hw_lock);
4544 if (IS_VALLEYVIEW(dev))
4545 valleyview_disable_rps(dev);
4546 else
4547 gen6_disable_rps(dev);
4548 dev_priv->rps.enabled = false;
4549 mutex_unlock(&dev_priv->rps.hw_lock);
4550 }
4551 }
4552
intel_gen6_powersave_work(struct work_struct * work)4553 static void intel_gen6_powersave_work(struct work_struct *work)
4554 {
4555 struct drm_i915_private *dev_priv =
4556 container_of(work, struct drm_i915_private,
4557 rps.delayed_resume_work.work);
4558 struct drm_device *dev = dev_priv->dev;
4559
4560 mutex_lock(&dev_priv->rps.hw_lock);
4561
4562 if (IS_VALLEYVIEW(dev)) {
4563 valleyview_enable_rps(dev);
4564 } else if (IS_BROADWELL(dev)) {
4565 gen8_enable_rps(dev);
4566 gen6_update_ring_freq(dev);
4567 } else {
4568 gen6_enable_rps(dev);
4569 gen6_update_ring_freq(dev);
4570 }
4571 dev_priv->rps.enabled = true;
4572 mutex_unlock(&dev_priv->rps.hw_lock);
4573 }
4574
intel_enable_gt_powersave(struct drm_device * dev)4575 void intel_enable_gt_powersave(struct drm_device *dev)
4576 {
4577 struct drm_i915_private *dev_priv = dev->dev_private;
4578
4579 if (IS_IRONLAKE_M(dev)) {
4580 ironlake_enable_drps(dev);
4581 ironlake_enable_rc6(dev);
4582 intel_init_emon(dev);
4583 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
4584 /*
4585 * PCU communication is slow and this doesn't need to be
4586 * done at any specific time, so do this out of our fast path
4587 * to make resume and init faster.
4588 */
4589 schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
4590 round_jiffies_up_relative(HZ));
4591 }
4592 }
4593
ibx_init_clock_gating(struct drm_device * dev)4594 static void ibx_init_clock_gating(struct drm_device *dev)
4595 {
4596 struct drm_i915_private *dev_priv = dev->dev_private;
4597
4598 /*
4599 * On Ibex Peak and Cougar Point, we need to disable clock
4600 * gating for the panel power sequencer or it will fail to
4601 * start up when no ports are active.
4602 */
4603 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
4604 }
4605
g4x_disable_trickle_feed(struct drm_device * dev)4606 static void g4x_disable_trickle_feed(struct drm_device *dev)
4607 {
4608 struct drm_i915_private *dev_priv = dev->dev_private;
4609 int pipe;
4610
4611 for_each_pipe(pipe) {
4612 I915_WRITE(DSPCNTR(pipe),
4613 I915_READ(DSPCNTR(pipe)) |
4614 DISPPLANE_TRICKLE_FEED_DISABLE);
4615 intel_flush_primary_plane(dev_priv, pipe);
4616 }
4617 }
4618
ilk_init_lp_watermarks(struct drm_device * dev)4619 static void ilk_init_lp_watermarks(struct drm_device *dev)
4620 {
4621 struct drm_i915_private *dev_priv = dev->dev_private;
4622
4623 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
4624 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
4625 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
4626
4627 /*
4628 * Don't touch WM1S_LP_EN here.
4629 * Doing so could cause underruns.
4630 */
4631 }
4632
ironlake_init_clock_gating(struct drm_device * dev)4633 static void ironlake_init_clock_gating(struct drm_device *dev)
4634 {
4635 struct drm_i915_private *dev_priv = dev->dev_private;
4636 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
4637
4638 /*
4639 * Required for FBC
4640 * WaFbcDisableDpfcClockGating:ilk
4641 */
4642 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
4643 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
4644 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
4645
4646 I915_WRITE(PCH_3DCGDIS0,
4647 MARIUNIT_CLOCK_GATE_DISABLE |
4648 SVSMUNIT_CLOCK_GATE_DISABLE);
4649 I915_WRITE(PCH_3DCGDIS1,
4650 VFMUNIT_CLOCK_GATE_DISABLE);
4651
4652 /*
4653 * According to the spec the following bits should be set in
4654 * order to enable memory self-refresh
4655 * The bit 22/21 of 0x42004
4656 * The bit 5 of 0x42020
4657 * The bit 15 of 0x45000
4658 */
4659 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4660 (I915_READ(ILK_DISPLAY_CHICKEN2) |
4661 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
4662 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
4663 I915_WRITE(DISP_ARB_CTL,
4664 (I915_READ(DISP_ARB_CTL) |
4665 DISP_FBC_WM_DIS));
4666
4667 ilk_init_lp_watermarks(dev);
4668
4669 /*
4670 * Based on the document from hardware guys the following bits
4671 * should be set unconditionally in order to enable FBC.
4672 * The bit 22 of 0x42000
4673 * The bit 22 of 0x42004
4674 * The bit 7,8,9 of 0x42020.
4675 */
4676 if (IS_IRONLAKE_M(dev)) {
4677 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
4678 I915_WRITE(ILK_DISPLAY_CHICKEN1,
4679 I915_READ(ILK_DISPLAY_CHICKEN1) |
4680 ILK_FBCQ_DIS);
4681 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4682 I915_READ(ILK_DISPLAY_CHICKEN2) |
4683 ILK_DPARB_GATE);
4684 }
4685
4686 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
4687
4688 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4689 I915_READ(ILK_DISPLAY_CHICKEN2) |
4690 ILK_ELPIN_409_SELECT);
4691 I915_WRITE(_3D_CHICKEN2,
4692 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
4693 _3D_CHICKEN2_WM_READ_PIPELINED);
4694
4695 /* WaDisableRenderCachePipelinedFlush:ilk */
4696 I915_WRITE(CACHE_MODE_0,
4697 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
4698
4699 g4x_disable_trickle_feed(dev);
4700
4701 ibx_init_clock_gating(dev);
4702 }
4703
cpt_init_clock_gating(struct drm_device * dev)4704 static void cpt_init_clock_gating(struct drm_device *dev)
4705 {
4706 struct drm_i915_private *dev_priv = dev->dev_private;
4707 int pipe;
4708 uint32_t val;
4709
4710 /*
4711 * On Ibex Peak and Cougar Point, we need to disable clock
4712 * gating for the panel power sequencer or it will fail to
4713 * start up when no ports are active.
4714 */
4715 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
4716 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
4717 PCH_CPUNIT_CLOCK_GATE_DISABLE);
4718 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
4719 DPLS_EDP_PPS_FIX_DIS);
4720 /* The below fixes the weird display corruption, a few pixels shifted
4721 * downward, on (only) LVDS of some HP laptops with IVY.
4722 */
4723 for_each_pipe(pipe) {
4724 val = I915_READ(TRANS_CHICKEN2(pipe));
4725 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
4726 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
4727 if (dev_priv->vbt.fdi_rx_polarity_inverted)
4728 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
4729 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
4730 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
4731 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
4732 I915_WRITE(TRANS_CHICKEN2(pipe), val);
4733 }
4734 /* WADP0ClockGatingDisable */
4735 for_each_pipe(pipe) {
4736 I915_WRITE(TRANS_CHICKEN1(pipe),
4737 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
4738 }
4739 }
4740
gen6_check_mch_setup(struct drm_device * dev)4741 static void gen6_check_mch_setup(struct drm_device *dev)
4742 {
4743 struct drm_i915_private *dev_priv = dev->dev_private;
4744 uint32_t tmp;
4745
4746 tmp = I915_READ(MCH_SSKPD);
4747 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
4748 DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
4749 DRM_INFO("This can cause pipe underruns and display issues.\n");
4750 DRM_INFO("Please upgrade your BIOS to fix this.\n");
4751 }
4752 }
4753
gen6_init_clock_gating(struct drm_device * dev)4754 static void gen6_init_clock_gating(struct drm_device *dev)
4755 {
4756 struct drm_i915_private *dev_priv = dev->dev_private;
4757 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
4758
4759 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
4760
4761 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4762 I915_READ(ILK_DISPLAY_CHICKEN2) |
4763 ILK_ELPIN_409_SELECT);
4764
4765 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
4766 I915_WRITE(_3D_CHICKEN,
4767 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
4768
4769 /* WaSetupGtModeTdRowDispatch:snb */
4770 if (IS_SNB_GT1(dev))
4771 I915_WRITE(GEN6_GT_MODE,
4772 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
4773
4774 /*
4775 * BSpec recoomends 8x4 when MSAA is used,
4776 * however in practice 16x4 seems fastest.
4777 *
4778 * Note that PS/WM thread counts depend on the WIZ hashing
4779 * disable bit, which we don't touch here, but it's good
4780 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
4781 */
4782 I915_WRITE(GEN6_GT_MODE,
4783 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
4784
4785 ilk_init_lp_watermarks(dev);
4786
4787 I915_WRITE(CACHE_MODE_0,
4788 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
4789
4790 I915_WRITE(GEN6_UCGCTL1,
4791 I915_READ(GEN6_UCGCTL1) |
4792 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
4793 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
4794
4795 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
4796 * gating disable must be set. Failure to set it results in
4797 * flickering pixels due to Z write ordering failures after
4798 * some amount of runtime in the Mesa "fire" demo, and Unigine
4799 * Sanctuary and Tropics, and apparently anything else with
4800 * alpha test or pixel discard.
4801 *
4802 * According to the spec, bit 11 (RCCUNIT) must also be set,
4803 * but we didn't debug actual testcases to find it out.
4804 *
4805 * WaDisableRCCUnitClockGating:snb
4806 * WaDisableRCPBUnitClockGating:snb
4807 */
4808 I915_WRITE(GEN6_UCGCTL2,
4809 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
4810 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
4811
4812 /* WaStripsFansDisableFastClipPerformanceFix:snb */
4813 I915_WRITE(_3D_CHICKEN3,
4814 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
4815
4816 /*
4817 * Bspec says:
4818 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
4819 * 3DSTATE_SF number of SF output attributes is more than 16."
4820 */
4821 I915_WRITE(_3D_CHICKEN3,
4822 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
4823
4824 /*
4825 * According to the spec the following bits should be
4826 * set in order to enable memory self-refresh and fbc:
4827 * The bit21 and bit22 of 0x42000
4828 * The bit21 and bit22 of 0x42004
4829 * The bit5 and bit7 of 0x42020
4830 * The bit14 of 0x70180
4831 * The bit14 of 0x71180
4832 *
4833 * WaFbcAsynchFlipDisableFbcQueue:snb
4834 */
4835 I915_WRITE(ILK_DISPLAY_CHICKEN1,
4836 I915_READ(ILK_DISPLAY_CHICKEN1) |
4837 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
4838 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4839 I915_READ(ILK_DISPLAY_CHICKEN2) |
4840 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
4841 I915_WRITE(ILK_DSPCLK_GATE_D,
4842 I915_READ(ILK_DSPCLK_GATE_D) |
4843 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
4844 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
4845
4846 g4x_disable_trickle_feed(dev);
4847
4848 cpt_init_clock_gating(dev);
4849
4850 gen6_check_mch_setup(dev);
4851 }
4852
gen7_setup_fixed_func_scheduler(struct drm_i915_private * dev_priv)4853 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
4854 {
4855 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
4856
4857 /*
4858 * WaVSThreadDispatchOverride:ivb,vlv
4859 *
4860 * This actually overrides the dispatch
4861 * mode for all thread types.
4862 */
4863 reg &= ~GEN7_FF_SCHED_MASK;
4864 reg |= GEN7_FF_TS_SCHED_HW;
4865 reg |= GEN7_FF_VS_SCHED_HW;
4866 reg |= GEN7_FF_DS_SCHED_HW;
4867
4868 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
4869 }
4870
lpt_init_clock_gating(struct drm_device * dev)4871 static void lpt_init_clock_gating(struct drm_device *dev)
4872 {
4873 struct drm_i915_private *dev_priv = dev->dev_private;
4874
4875 /*
4876 * TODO: this bit should only be enabled when really needed, then
4877 * disabled when not needed anymore in order to save power.
4878 */
4879 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
4880 I915_WRITE(SOUTH_DSPCLK_GATE_D,
4881 I915_READ(SOUTH_DSPCLK_GATE_D) |
4882 PCH_LP_PARTITION_LEVEL_DISABLE);
4883
4884 /* WADPOClockGatingDisable:hsw */
4885 I915_WRITE(_TRANSA_CHICKEN1,
4886 I915_READ(_TRANSA_CHICKEN1) |
4887 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
4888 }
4889
lpt_suspend_hw(struct drm_device * dev)4890 static void lpt_suspend_hw(struct drm_device *dev)
4891 {
4892 struct drm_i915_private *dev_priv = dev->dev_private;
4893
4894 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
4895 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
4896
4897 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4898 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4899 }
4900 }
4901
gen8_init_clock_gating(struct drm_device * dev)4902 static void gen8_init_clock_gating(struct drm_device *dev)
4903 {
4904 struct drm_i915_private *dev_priv = dev->dev_private;
4905 enum i915_pipe pipe;
4906
4907 I915_WRITE(WM3_LP_ILK, 0);
4908 I915_WRITE(WM2_LP_ILK, 0);
4909 I915_WRITE(WM1_LP_ILK, 0);
4910
4911 /* FIXME(BDW): Check all the w/a, some might only apply to
4912 * pre-production hw. */
4913
4914 /* WaDisablePartialInstShootdown:bdw */
4915 I915_WRITE(GEN8_ROW_CHICKEN,
4916 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
4917
4918 /* WaDisableThreadStallDopClockGating:bdw */
4919 /* FIXME: Unclear whether we really need this on production bdw. */
4920 I915_WRITE(GEN8_ROW_CHICKEN,
4921 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
4922
4923 /*
4924 * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
4925 * pre-production hardware
4926 */
4927 I915_WRITE(HALF_SLICE_CHICKEN3,
4928 _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
4929 I915_WRITE(HALF_SLICE_CHICKEN3,
4930 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
4931 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
4932
4933 I915_WRITE(_3D_CHICKEN3,
4934 _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
4935
4936 I915_WRITE(COMMON_SLICE_CHICKEN2,
4937 _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
4938
4939 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
4940 _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
4941
4942 /* WaSwitchSolVfFArbitrationPriority:bdw */
4943 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
4944
4945 /* WaPsrDPAMaskVBlankInSRD:bdw */
4946 I915_WRITE(CHICKEN_PAR1_1,
4947 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
4948
4949 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
4950 for_each_pipe(pipe) {
4951 I915_WRITE(CHICKEN_PIPESL_1(pipe),
4952 I915_READ(CHICKEN_PIPESL_1(pipe)) |
4953 BDW_DPRS_MASK_VBLANK_SRD);
4954 }
4955
4956 /* Use Force Non-Coherent whenever executing a 3D context. This is a
4957 * workaround for for a possible hang in the unlikely event a TLB
4958 * invalidation occurs during a PSD flush.
4959 */
4960 I915_WRITE(HDC_CHICKEN0,
4961 I915_READ(HDC_CHICKEN0) |
4962 _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
4963
4964 /* WaVSRefCountFullforceMissDisable:bdw */
4965 /* WaDSRefCountFullforceMissDisable:bdw */
4966 I915_WRITE(GEN7_FF_THREAD_MODE,
4967 I915_READ(GEN7_FF_THREAD_MODE) &
4968 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
4969
4970 /*
4971 * BSpec recommends 8x4 when MSAA is used,
4972 * however in practice 16x4 seems fastest.
4973 *
4974 * Note that PS/WM thread counts depend on the WIZ hashing
4975 * disable bit, which we don't touch here, but it's good
4976 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
4977 */
4978 I915_WRITE(GEN7_GT_MODE,
4979 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
4980
4981 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
4982 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
4983
4984 /* WaDisableSDEUnitClockGating:bdw */
4985 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
4986 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
4987
4988 /* Wa4x4STCOptimizationDisable:bdw */
4989 I915_WRITE(CACHE_MODE_1,
4990 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
4991 }
4992
haswell_init_clock_gating(struct drm_device * dev)4993 static void haswell_init_clock_gating(struct drm_device *dev)
4994 {
4995 struct drm_i915_private *dev_priv = dev->dev_private;
4996
4997 ilk_init_lp_watermarks(dev);
4998
4999 /* L3 caching of data atomics doesn't work -- disable it. */
5000 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
5001 I915_WRITE(HSW_ROW_CHICKEN3,
5002 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
5003
5004 /* This is required by WaCatErrorRejectionIssue:hsw */
5005 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5006 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5007 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5008
5009 /* WaVSRefCountFullforceMissDisable:hsw */
5010 I915_WRITE(GEN7_FF_THREAD_MODE,
5011 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
5012
5013 /* enable HiZ Raw Stall Optimization */
5014 I915_WRITE(CACHE_MODE_0_GEN7,
5015 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
5016
5017 /* WaDisable4x2SubspanOptimization:hsw */
5018 I915_WRITE(CACHE_MODE_1,
5019 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5020
5021 /*
5022 * BSpec recommends 8x4 when MSAA is used,
5023 * however in practice 16x4 seems fastest.
5024 *
5025 * Note that PS/WM thread counts depend on the WIZ hashing
5026 * disable bit, which we don't touch here, but it's good
5027 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5028 */
5029 I915_WRITE(GEN7_GT_MODE,
5030 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5031
5032 /* WaSwitchSolVfFArbitrationPriority:hsw */
5033 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5034
5035 /* WaRsPkgCStateDisplayPMReq:hsw */
5036 I915_WRITE(CHICKEN_PAR1_1,
5037 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
5038
5039 lpt_init_clock_gating(dev);
5040 }
5041
ivybridge_init_clock_gating(struct drm_device * dev)5042 static void ivybridge_init_clock_gating(struct drm_device *dev)
5043 {
5044 struct drm_i915_private *dev_priv = dev->dev_private;
5045 uint32_t snpcr;
5046
5047 ilk_init_lp_watermarks(dev);
5048
5049 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
5050
5051 /* WaDisableEarlyCull:ivb */
5052 I915_WRITE(_3D_CHICKEN3,
5053 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5054
5055 /* WaDisableBackToBackFlipFix:ivb */
5056 I915_WRITE(IVB_CHICKEN3,
5057 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5058 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5059
5060 /* WaDisablePSDDualDispatchEnable:ivb */
5061 if (IS_IVB_GT1(dev))
5062 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5063 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5064
5065 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
5066 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
5067 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
5068
5069 /* WaApplyL3ControlAndL3ChickenMode:ivb */
5070 I915_WRITE(GEN7_L3CNTLREG1,
5071 GEN7_WA_FOR_GEN7_L3_CONTROL);
5072 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
5073 GEN7_WA_L3_CHICKEN_MODE);
5074 if (IS_IVB_GT1(dev))
5075 I915_WRITE(GEN7_ROW_CHICKEN2,
5076 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5077 else {
5078 /* must write both registers */
5079 I915_WRITE(GEN7_ROW_CHICKEN2,
5080 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5081 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
5082 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5083 }
5084
5085 /* WaForceL3Serialization:ivb */
5086 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5087 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5088
5089 /*
5090 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5091 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
5092 */
5093 I915_WRITE(GEN6_UCGCTL2,
5094 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5095
5096 /* This is required by WaCatErrorRejectionIssue:ivb */
5097 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5098 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5099 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5100
5101 g4x_disable_trickle_feed(dev);
5102
5103 gen7_setup_fixed_func_scheduler(dev_priv);
5104
5105 if (0) { /* causes HiZ corruption on ivb:gt1 */
5106 /* enable HiZ Raw Stall Optimization */
5107 I915_WRITE(CACHE_MODE_0_GEN7,
5108 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
5109 }
5110
5111 /* WaDisable4x2SubspanOptimization:ivb */
5112 I915_WRITE(CACHE_MODE_1,
5113 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5114
5115 /*
5116 * BSpec recommends 8x4 when MSAA is used,
5117 * however in practice 16x4 seems fastest.
5118 *
5119 * Note that PS/WM thread counts depend on the WIZ hashing
5120 * disable bit, which we don't touch here, but it's good
5121 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5122 */
5123 I915_WRITE(GEN7_GT_MODE,
5124 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5125
5126 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5127 snpcr &= ~GEN6_MBC_SNPCR_MASK;
5128 snpcr |= GEN6_MBC_SNPCR_MED;
5129 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
5130
5131 if (!HAS_PCH_NOP(dev))
5132 cpt_init_clock_gating(dev);
5133
5134 gen6_check_mch_setup(dev);
5135 }
5136
valleyview_init_clock_gating(struct drm_device * dev)5137 static void valleyview_init_clock_gating(struct drm_device *dev)
5138 {
5139 struct drm_i915_private *dev_priv = dev->dev_private;
5140 u32 val;
5141
5142 mutex_lock(&dev_priv->rps.hw_lock);
5143 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5144 mutex_unlock(&dev_priv->rps.hw_lock);
5145 switch ((val >> 6) & 3) {
5146 case 0:
5147 case 1:
5148 dev_priv->mem_freq = 800;
5149 break;
5150 case 2:
5151 dev_priv->mem_freq = 1066;
5152 break;
5153 case 3:
5154 dev_priv->mem_freq = 1333;
5155 break;
5156 }
5157 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
5158
5159 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5160
5161 /* WaDisableEarlyCull:vlv */
5162 I915_WRITE(_3D_CHICKEN3,
5163 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5164
5165 /* WaDisableBackToBackFlipFix:vlv */
5166 I915_WRITE(IVB_CHICKEN3,
5167 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5168 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5169
5170 /* WaPsdDispatchEnable:vlv */
5171 /* WaDisablePSDDualDispatchEnable:vlv */
5172 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5173 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
5174 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5175
5176 /* WaForceL3Serialization:vlv */
5177 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5178 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5179
5180 /* WaDisableDopClockGating:vlv */
5181 I915_WRITE(GEN7_ROW_CHICKEN2,
5182 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5183
5184 /* This is required by WaCatErrorRejectionIssue:vlv */
5185 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5186 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5187 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5188
5189 gen7_setup_fixed_func_scheduler(dev_priv);
5190
5191 /*
5192 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5193 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
5194 */
5195 I915_WRITE(GEN6_UCGCTL2,
5196 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5197
5198 /* WaDisableL3Bank2xClockGate:vlv */
5199 I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
5200
5201 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5202
5203 /*
5204 * BSpec says this must be set, even though
5205 * WaDisable4x2SubspanOptimization isn't listed for VLV.
5206 */
5207 I915_WRITE(CACHE_MODE_1,
5208 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5209
5210 /*
5211 * WaIncreaseL3CreditsForVLVB0:vlv
5212 * This is the hardware default actually.
5213 */
5214 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
5215
5216 /*
5217 * WaDisableVLVClockGating_VBIIssue:vlv
5218 * Disable clock gating on th GCFG unit to prevent a delay
5219 * in the reporting of vblank events.
5220 */
5221 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
5222 }
5223
g4x_init_clock_gating(struct drm_device * dev)5224 static void g4x_init_clock_gating(struct drm_device *dev)
5225 {
5226 struct drm_i915_private *dev_priv = dev->dev_private;
5227 uint32_t dspclk_gate;
5228
5229 I915_WRITE(RENCLK_GATE_D1, 0);
5230 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
5231 GS_UNIT_CLOCK_GATE_DISABLE |
5232 CL_UNIT_CLOCK_GATE_DISABLE);
5233 I915_WRITE(RAMCLK_GATE_D, 0);
5234 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
5235 OVRUNIT_CLOCK_GATE_DISABLE |
5236 OVCUNIT_CLOCK_GATE_DISABLE;
5237 if (IS_GM45(dev))
5238 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
5239 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
5240
5241 /* WaDisableRenderCachePipelinedFlush */
5242 I915_WRITE(CACHE_MODE_0,
5243 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5244
5245 g4x_disable_trickle_feed(dev);
5246 }
5247
crestline_init_clock_gating(struct drm_device * dev)5248 static void crestline_init_clock_gating(struct drm_device *dev)
5249 {
5250 struct drm_i915_private *dev_priv = dev->dev_private;
5251
5252 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
5253 I915_WRITE(RENCLK_GATE_D2, 0);
5254 I915_WRITE(DSPCLK_GATE_D, 0);
5255 I915_WRITE(RAMCLK_GATE_D, 0);
5256 I915_WRITE16(DEUC, 0);
5257 I915_WRITE(MI_ARB_STATE,
5258 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5259 }
5260
broadwater_init_clock_gating(struct drm_device * dev)5261 static void broadwater_init_clock_gating(struct drm_device *dev)
5262 {
5263 struct drm_i915_private *dev_priv = dev->dev_private;
5264
5265 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
5266 I965_RCC_CLOCK_GATE_DISABLE |
5267 I965_RCPB_CLOCK_GATE_DISABLE |
5268 I965_ISC_CLOCK_GATE_DISABLE |
5269 I965_FBC_CLOCK_GATE_DISABLE);
5270 I915_WRITE(RENCLK_GATE_D2, 0);
5271 I915_WRITE(MI_ARB_STATE,
5272 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5273 }
5274
gen3_init_clock_gating(struct drm_device * dev)5275 static void gen3_init_clock_gating(struct drm_device *dev)
5276 {
5277 struct drm_i915_private *dev_priv = dev->dev_private;
5278 u32 dstate = I915_READ(D_STATE);
5279
5280 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
5281 DSTATE_DOT_CLOCK_GATING;
5282 I915_WRITE(D_STATE, dstate);
5283
5284 if (IS_PINEVIEW(dev))
5285 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
5286
5287 /* IIR "flip pending" means done if this bit is set */
5288 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
5289 }
5290
i85x_init_clock_gating(struct drm_device * dev)5291 static void i85x_init_clock_gating(struct drm_device *dev)
5292 {
5293 struct drm_i915_private *dev_priv = dev->dev_private;
5294
5295 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
5296 }
5297
i830_init_clock_gating(struct drm_device * dev)5298 static void i830_init_clock_gating(struct drm_device *dev)
5299 {
5300 struct drm_i915_private *dev_priv = dev->dev_private;
5301
5302 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
5303 }
5304
intel_init_clock_gating(struct drm_device * dev)5305 void intel_init_clock_gating(struct drm_device *dev)
5306 {
5307 struct drm_i915_private *dev_priv = dev->dev_private;
5308
5309 dev_priv->display.init_clock_gating(dev);
5310 }
5311
intel_suspend_hw(struct drm_device * dev)5312 void intel_suspend_hw(struct drm_device *dev)
5313 {
5314 if (HAS_PCH_LPT(dev))
5315 lpt_suspend_hw(dev);
5316 }
5317
5318 #define for_each_power_well(i, power_well, domain_mask, power_domains) \
5319 for (i = 0; \
5320 i < (power_domains)->power_well_count && \
5321 ((power_well) = &(power_domains)->power_wells[i]); \
5322 i++) \
5323 if ((power_well)->domains & (domain_mask))
5324
5325 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
5326 for (i = (power_domains)->power_well_count - 1; \
5327 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
5328 i--) \
5329 if ((power_well)->domains & (domain_mask))
5330
5331 /**
5332 * We should only use the power well if we explicitly asked the hardware to
5333 * enable it, so check if it's enabled and also check if we've requested it to
5334 * be enabled.
5335 */
hsw_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)5336 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
5337 struct i915_power_well *power_well)
5338 {
5339 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5340 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
5341 }
5342
intel_display_power_enabled_sw(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)5343 bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
5344 enum intel_display_power_domain domain)
5345 {
5346 struct i915_power_domains *power_domains;
5347
5348 power_domains = &dev_priv->power_domains;
5349
5350 return power_domains->domain_use_count[domain];
5351 }
5352
intel_display_power_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)5353 bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
5354 enum intel_display_power_domain domain)
5355 {
5356 struct i915_power_domains *power_domains;
5357 struct i915_power_well *power_well;
5358 bool is_enabled;
5359 int i;
5360
5361 if (dev_priv->pm.suspended)
5362 return false;
5363
5364 power_domains = &dev_priv->power_domains;
5365
5366 is_enabled = true;
5367
5368 mutex_lock(&power_domains->lock);
5369 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
5370 if (power_well->always_on)
5371 continue;
5372
5373 if (!power_well->ops->is_enabled(dev_priv, power_well)) {
5374 is_enabled = false;
5375 break;
5376 }
5377 }
5378 mutex_unlock(&power_domains->lock);
5379
5380 return is_enabled;
5381 }
5382
5383 /*
5384 * Starting with Haswell, we have a "Power Down Well" that can be turned off
5385 * when not needed anymore. We have 4 registers that can request the power well
5386 * to be enabled, and it will only be disabled if none of the registers is
5387 * requesting it to be enabled.
5388 */
hsw_power_well_post_enable(struct drm_i915_private * dev_priv)5389 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
5390 {
5391 struct drm_device *dev = dev_priv->dev;
5392 unsigned long irqflags;
5393
5394 #ifndef __NetBSD__ /* XXX Haswell VGA what? */
5395 /*
5396 * After we re-enable the power well, if we touch VGA register 0x3d5
5397 * we'll get unclaimed register interrupts. This stops after we write
5398 * anything to the VGA MSR register. The vgacon module uses this
5399 * register all the time, so if we unbind our driver and, as a
5400 * consequence, bind vgacon, we'll get stuck in an infinite loop at
5401 * console_unlock(). So make here we touch the VGA MSR register, making
5402 * sure vgacon can keep working normally without triggering interrupts
5403 * and error messages.
5404 */
5405 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
5406 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
5407 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
5408 #endif
5409
5410 if (IS_BROADWELL(dev)) {
5411 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
5412 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
5413 dev_priv->de_irq_mask[PIPE_B]);
5414 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
5415 ~dev_priv->de_irq_mask[PIPE_B] |
5416 GEN8_PIPE_VBLANK);
5417 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
5418 dev_priv->de_irq_mask[PIPE_C]);
5419 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
5420 ~dev_priv->de_irq_mask[PIPE_C] |
5421 GEN8_PIPE_VBLANK);
5422 POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
5423 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
5424 }
5425 }
5426
reset_vblank_counter(struct drm_device * dev,enum i915_pipe pipe)5427 static void reset_vblank_counter(struct drm_device *dev, enum i915_pipe pipe)
5428 {
5429 assert_spin_locked(&dev->vbl_lock);
5430
5431 dev->vblank[pipe].last = 0;
5432 }
5433
hsw_power_well_post_disable(struct drm_i915_private * dev_priv)5434 static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
5435 {
5436 struct drm_device *dev = dev_priv->dev;
5437 enum i915_pipe pipe;
5438 unsigned long irqflags;
5439
5440 /*
5441 * After this, the registers on the pipes that are part of the power
5442 * well will become zero, so we have to adjust our counters according to
5443 * that.
5444 *
5445 * FIXME: Should we do this in general in drm_vblank_post_modeset?
5446 */
5447 spin_lock_irqsave(&dev->vbl_lock, irqflags);
5448 for_each_pipe(pipe)
5449 if (pipe != PIPE_A)
5450 reset_vblank_counter(dev, pipe);
5451 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
5452 }
5453
hsw_set_power_well(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,bool enable)5454 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
5455 struct i915_power_well *power_well, bool enable)
5456 {
5457 bool is_enabled, enable_requested;
5458 uint32_t tmp;
5459
5460 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
5461 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
5462 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
5463
5464 if (enable) {
5465 if (!enable_requested)
5466 I915_WRITE(HSW_PWR_WELL_DRIVER,
5467 HSW_PWR_WELL_ENABLE_REQUEST);
5468
5469 if (!is_enabled) {
5470 DRM_DEBUG_KMS("Enabling power well\n");
5471 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
5472 HSW_PWR_WELL_STATE_ENABLED), 20))
5473 DRM_ERROR("Timeout enabling power well\n");
5474 }
5475
5476 hsw_power_well_post_enable(dev_priv);
5477 } else {
5478 if (enable_requested) {
5479 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
5480 POSTING_READ(HSW_PWR_WELL_DRIVER);
5481 DRM_DEBUG_KMS("Requesting to disable the power well\n");
5482
5483 hsw_power_well_post_disable(dev_priv);
5484 }
5485 }
5486 }
5487
hsw_power_well_sync_hw(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)5488 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
5489 struct i915_power_well *power_well)
5490 {
5491 hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
5492
5493 /*
5494 * We're taking over the BIOS, so clear any requests made by it since
5495 * the driver is in charge now.
5496 */
5497 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
5498 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
5499 }
5500
hsw_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)5501 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
5502 struct i915_power_well *power_well)
5503 {
5504 hsw_set_power_well(dev_priv, power_well, true);
5505 }
5506
hsw_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)5507 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
5508 struct i915_power_well *power_well)
5509 {
5510 hsw_set_power_well(dev_priv, power_well, false);
5511 }
5512
i9xx_always_on_power_well_noop(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)5513 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
5514 struct i915_power_well *power_well)
5515 {
5516 }
5517
i9xx_always_on_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)5518 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
5519 struct i915_power_well *power_well)
5520 {
5521 return true;
5522 }
5523
vlv_set_power_well(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,bool enable)5524 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
5525 struct i915_power_well *power_well, bool enable)
5526 {
5527 enum punit_power_well power_well_id = power_well->data;
5528 u32 mask;
5529 u32 state;
5530 u32 ctrl;
5531
5532 mask = PUNIT_PWRGT_MASK(power_well_id);
5533 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
5534 PUNIT_PWRGT_PWR_GATE(power_well_id);
5535
5536 mutex_lock(&dev_priv->rps.hw_lock);
5537
5538 #define COND \
5539 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
5540
5541 if (COND)
5542 goto out;
5543
5544 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
5545 ctrl &= ~mask;
5546 ctrl |= state;
5547 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
5548
5549 if (wait_for(COND, 100))
5550 DRM_ERROR("timout setting power well state %08x (%08x)\n",
5551 state,
5552 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
5553
5554 #undef COND
5555
5556 out:
5557 mutex_unlock(&dev_priv->rps.hw_lock);
5558 }
5559
vlv_power_well_sync_hw(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)5560 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
5561 struct i915_power_well *power_well)
5562 {
5563 vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
5564 }
5565
vlv_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)5566 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
5567 struct i915_power_well *power_well)
5568 {
5569 vlv_set_power_well(dev_priv, power_well, true);
5570 }
5571
vlv_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)5572 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
5573 struct i915_power_well *power_well)
5574 {
5575 vlv_set_power_well(dev_priv, power_well, false);
5576 }
5577
vlv_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)5578 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
5579 struct i915_power_well *power_well)
5580 {
5581 int power_well_id = power_well->data;
5582 bool enabled = false;
5583 u32 mask;
5584 u32 state;
5585 u32 ctrl;
5586
5587 mask = PUNIT_PWRGT_MASK(power_well_id);
5588 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
5589
5590 mutex_lock(&dev_priv->rps.hw_lock);
5591
5592 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
5593 /*
5594 * We only ever set the power-on and power-gate states, anything
5595 * else is unexpected.
5596 */
5597 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
5598 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
5599 if (state == ctrl)
5600 enabled = true;
5601
5602 /*
5603 * A transient state at this point would mean some unexpected party
5604 * is poking at the power controls too.
5605 */
5606 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
5607 WARN_ON(ctrl != state);
5608
5609 mutex_unlock(&dev_priv->rps.hw_lock);
5610
5611 return enabled;
5612 }
5613
vlv_display_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)5614 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
5615 struct i915_power_well *power_well)
5616 {
5617 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
5618
5619 vlv_set_power_well(dev_priv, power_well, true);
5620
5621 spin_lock_irq(&dev_priv->irq_lock);
5622 valleyview_enable_display_irqs(dev_priv);
5623 spin_unlock_irq(&dev_priv->irq_lock);
5624
5625 /*
5626 * During driver initialization we need to defer enabling hotplug
5627 * processing until fbdev is set up.
5628 */
5629 if (dev_priv->enable_hotplug_processing)
5630 intel_hpd_init(dev_priv->dev);
5631
5632 i915_redisable_vga_power_on(dev_priv->dev);
5633 }
5634
vlv_display_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)5635 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
5636 struct i915_power_well *power_well)
5637 {
5638 struct drm_device *dev = dev_priv->dev;
5639 enum i915_pipe pipe;
5640
5641 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
5642
5643 spin_lock_irq(&dev_priv->irq_lock);
5644 for_each_pipe(pipe)
5645 __intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
5646
5647 valleyview_disable_display_irqs(dev_priv);
5648 spin_unlock_irq(&dev_priv->irq_lock);
5649
5650 spin_lock_irq(&dev->vbl_lock);
5651 for_each_pipe(pipe)
5652 reset_vblank_counter(dev, pipe);
5653 spin_unlock_irq(&dev->vbl_lock);
5654
5655 vlv_set_power_well(dev_priv, power_well, false);
5656 }
5657
check_power_well_state(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)5658 static void check_power_well_state(struct drm_i915_private *dev_priv,
5659 struct i915_power_well *power_well)
5660 {
5661 bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
5662
5663 if (power_well->always_on || !i915.disable_power_well) {
5664 if (!enabled)
5665 goto mismatch;
5666
5667 return;
5668 }
5669
5670 if (enabled != (power_well->count > 0))
5671 goto mismatch;
5672
5673 return;
5674
5675 mismatch:
5676 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
5677 power_well->name, power_well->always_on, enabled,
5678 power_well->count, i915.disable_power_well);
5679 }
5680
intel_display_power_get(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)5681 void intel_display_power_get(struct drm_i915_private *dev_priv,
5682 enum intel_display_power_domain domain)
5683 {
5684 struct i915_power_domains *power_domains;
5685 struct i915_power_well *power_well;
5686 int i;
5687
5688 intel_runtime_pm_get(dev_priv);
5689
5690 power_domains = &dev_priv->power_domains;
5691
5692 mutex_lock(&power_domains->lock);
5693
5694 for_each_power_well(i, power_well, BIT(domain), power_domains) {
5695 if (!power_well->count++) {
5696 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
5697 power_well->ops->enable(dev_priv, power_well);
5698 }
5699
5700 check_power_well_state(dev_priv, power_well);
5701 }
5702
5703 power_domains->domain_use_count[domain]++;
5704
5705 mutex_unlock(&power_domains->lock);
5706 }
5707
intel_display_power_put(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)5708 void intel_display_power_put(struct drm_i915_private *dev_priv,
5709 enum intel_display_power_domain domain)
5710 {
5711 struct i915_power_domains *power_domains;
5712 struct i915_power_well *power_well;
5713 int i;
5714
5715 power_domains = &dev_priv->power_domains;
5716
5717 mutex_lock(&power_domains->lock);
5718
5719 WARN_ON(!power_domains->domain_use_count[domain]);
5720 power_domains->domain_use_count[domain]--;
5721
5722 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
5723 WARN_ON(!power_well->count);
5724
5725 if (!--power_well->count && i915.disable_power_well) {
5726 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
5727 power_well->ops->disable(dev_priv, power_well);
5728 }
5729
5730 check_power_well_state(dev_priv, power_well);
5731 }
5732
5733 mutex_unlock(&power_domains->lock);
5734
5735 intel_runtime_pm_put(dev_priv);
5736 }
5737
5738 static struct i915_power_domains *hsw_pwr;
5739
5740 /* Display audio driver power well request */
i915_request_power_well(void)5741 void i915_request_power_well(void)
5742 {
5743 struct drm_i915_private *dev_priv;
5744
5745 if (WARN_ON(!hsw_pwr))
5746 return;
5747
5748 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
5749 power_domains);
5750 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
5751 }
5752 EXPORT_SYMBOL_GPL(i915_request_power_well);
5753
5754 /* Display audio driver power well release */
i915_release_power_well(void)5755 void i915_release_power_well(void)
5756 {
5757 struct drm_i915_private *dev_priv;
5758
5759 if (WARN_ON(!hsw_pwr))
5760 return;
5761
5762 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
5763 power_domains);
5764 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
5765 }
5766 EXPORT_SYMBOL_GPL(i915_release_power_well);
5767
5768 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
5769
5770 #define HSW_ALWAYS_ON_POWER_DOMAINS ( \
5771 BIT(POWER_DOMAIN_PIPE_A) | \
5772 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
5773 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
5774 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
5775 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
5776 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
5777 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
5778 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
5779 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
5780 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
5781 BIT(POWER_DOMAIN_PORT_CRT) | \
5782 BIT(POWER_DOMAIN_INIT))
5783 #define HSW_DISPLAY_POWER_DOMAINS ( \
5784 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
5785 BIT(POWER_DOMAIN_INIT))
5786
5787 #define BDW_ALWAYS_ON_POWER_DOMAINS ( \
5788 HSW_ALWAYS_ON_POWER_DOMAINS | \
5789 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
5790 #define BDW_DISPLAY_POWER_DOMAINS ( \
5791 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
5792 BIT(POWER_DOMAIN_INIT))
5793
5794 #define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
5795 #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
5796
5797 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
5798 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
5799 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
5800 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
5801 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
5802 BIT(POWER_DOMAIN_PORT_CRT) | \
5803 BIT(POWER_DOMAIN_INIT))
5804
5805 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
5806 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
5807 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
5808 BIT(POWER_DOMAIN_INIT))
5809
5810 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
5811 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
5812 BIT(POWER_DOMAIN_INIT))
5813
5814 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
5815 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
5816 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
5817 BIT(POWER_DOMAIN_INIT))
5818
5819 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
5820 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
5821 BIT(POWER_DOMAIN_INIT))
5822
5823 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
5824 .sync_hw = i9xx_always_on_power_well_noop,
5825 .enable = i9xx_always_on_power_well_noop,
5826 .disable = i9xx_always_on_power_well_noop,
5827 .is_enabled = i9xx_always_on_power_well_enabled,
5828 };
5829
5830 static struct i915_power_well i9xx_always_on_power_well[] = {
5831 {
5832 .name = "always-on",
5833 .always_on = 1,
5834 .domains = POWER_DOMAIN_MASK,
5835 .ops = &i9xx_always_on_power_well_ops,
5836 },
5837 };
5838
5839 static const struct i915_power_well_ops hsw_power_well_ops = {
5840 .sync_hw = hsw_power_well_sync_hw,
5841 .enable = hsw_power_well_enable,
5842 .disable = hsw_power_well_disable,
5843 .is_enabled = hsw_power_well_enabled,
5844 };
5845
5846 static struct i915_power_well hsw_power_wells[] = {
5847 {
5848 .name = "always-on",
5849 .always_on = 1,
5850 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
5851 .ops = &i9xx_always_on_power_well_ops,
5852 },
5853 {
5854 .name = "display",
5855 .domains = HSW_DISPLAY_POWER_DOMAINS,
5856 .ops = &hsw_power_well_ops,
5857 },
5858 };
5859
5860 static struct i915_power_well bdw_power_wells[] = {
5861 {
5862 .name = "always-on",
5863 .always_on = 1,
5864 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
5865 .ops = &i9xx_always_on_power_well_ops,
5866 },
5867 {
5868 .name = "display",
5869 .domains = BDW_DISPLAY_POWER_DOMAINS,
5870 .ops = &hsw_power_well_ops,
5871 },
5872 };
5873
5874 static const struct i915_power_well_ops vlv_display_power_well_ops = {
5875 .sync_hw = vlv_power_well_sync_hw,
5876 .enable = vlv_display_power_well_enable,
5877 .disable = vlv_display_power_well_disable,
5878 .is_enabled = vlv_power_well_enabled,
5879 };
5880
5881 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
5882 .sync_hw = vlv_power_well_sync_hw,
5883 .enable = vlv_power_well_enable,
5884 .disable = vlv_power_well_disable,
5885 .is_enabled = vlv_power_well_enabled,
5886 };
5887
5888 static struct i915_power_well vlv_power_wells[] = {
5889 {
5890 .name = "always-on",
5891 .always_on = 1,
5892 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
5893 .ops = &i9xx_always_on_power_well_ops,
5894 },
5895 {
5896 .name = "display",
5897 .domains = VLV_DISPLAY_POWER_DOMAINS,
5898 .data = PUNIT_POWER_WELL_DISP2D,
5899 .ops = &vlv_display_power_well_ops,
5900 },
5901 {
5902 .name = "dpio-common",
5903 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
5904 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
5905 .ops = &vlv_dpio_power_well_ops,
5906 },
5907 {
5908 .name = "dpio-tx-b-01",
5909 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
5910 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
5911 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
5912 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
5913 .ops = &vlv_dpio_power_well_ops,
5914 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
5915 },
5916 {
5917 .name = "dpio-tx-b-23",
5918 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
5919 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
5920 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
5921 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
5922 .ops = &vlv_dpio_power_well_ops,
5923 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
5924 },
5925 {
5926 .name = "dpio-tx-c-01",
5927 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
5928 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
5929 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
5930 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
5931 .ops = &vlv_dpio_power_well_ops,
5932 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
5933 },
5934 {
5935 .name = "dpio-tx-c-23",
5936 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
5937 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
5938 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
5939 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
5940 .ops = &vlv_dpio_power_well_ops,
5941 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
5942 },
5943 };
5944
5945 #define set_power_wells(power_domains, __power_wells) ({ \
5946 (power_domains)->power_wells = (__power_wells); \
5947 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
5948 })
5949
intel_power_domains_init(struct drm_i915_private * dev_priv)5950 int intel_power_domains_init(struct drm_i915_private *dev_priv)
5951 {
5952 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5953
5954 #ifdef __NetBSD__
5955 linux_mutex_init(&power_domains->lock);
5956 #else
5957 mutex_init(&power_domains->lock);
5958 #endif
5959
5960 /*
5961 * The enabling order will be from lower to higher indexed wells,
5962 * the disabling order is reversed.
5963 */
5964 if (IS_HASWELL(dev_priv->dev)) {
5965 set_power_wells(power_domains, hsw_power_wells);
5966 hsw_pwr = power_domains;
5967 } else if (IS_BROADWELL(dev_priv->dev)) {
5968 set_power_wells(power_domains, bdw_power_wells);
5969 hsw_pwr = power_domains;
5970 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
5971 set_power_wells(power_domains, vlv_power_wells);
5972 } else {
5973 set_power_wells(power_domains, i9xx_always_on_power_well);
5974 }
5975
5976 return 0;
5977 }
5978
intel_power_domains_remove(struct drm_i915_private * dev_priv)5979 void intel_power_domains_remove(struct drm_i915_private *dev_priv)
5980 {
5981 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5982
5983 hsw_pwr = NULL;
5984 #ifdef __NetBSD__
5985 linux_mutex_destroy(&power_domains->lock);
5986 #else
5987 mutex_destroy(&power_domains->lock);
5988 #endif
5989 }
5990
intel_power_domains_resume(struct drm_i915_private * dev_priv)5991 static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
5992 {
5993 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5994 struct i915_power_well *power_well;
5995 int i;
5996
5997 mutex_lock(&power_domains->lock);
5998 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains)
5999 power_well->ops->sync_hw(dev_priv, power_well);
6000 mutex_unlock(&power_domains->lock);
6001 }
6002
intel_power_domains_init_hw(struct drm_i915_private * dev_priv)6003 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
6004 {
6005 /* For now, we need the power well to be always enabled. */
6006 intel_display_set_init_power(dev_priv, true);
6007 intel_power_domains_resume(dev_priv);
6008 }
6009
intel_aux_display_runtime_get(struct drm_i915_private * dev_priv)6010 void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
6011 {
6012 intel_runtime_pm_get(dev_priv);
6013 }
6014
intel_aux_display_runtime_put(struct drm_i915_private * dev_priv)6015 void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
6016 {
6017 intel_runtime_pm_put(dev_priv);
6018 }
6019
intel_runtime_pm_get(struct drm_i915_private * dev_priv)6020 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
6021 {
6022 struct drm_device *dev = dev_priv->dev;
6023 struct device *device = dev->dev;
6024
6025 if (!HAS_RUNTIME_PM(dev))
6026 return;
6027
6028 pm_runtime_get_sync(device);
6029 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
6030 }
6031
intel_runtime_pm_put(struct drm_i915_private * dev_priv)6032 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
6033 {
6034 struct drm_device *dev = dev_priv->dev;
6035 struct device *device = dev->dev;
6036
6037 if (!HAS_RUNTIME_PM(dev))
6038 return;
6039
6040 pm_runtime_mark_last_busy(device);
6041 pm_runtime_put_autosuspend(device);
6042 }
6043
intel_init_runtime_pm(struct drm_i915_private * dev_priv)6044 void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
6045 {
6046 struct drm_device *dev = dev_priv->dev;
6047 struct device *device = dev->dev;
6048
6049 if (!HAS_RUNTIME_PM(dev))
6050 return;
6051
6052 pm_runtime_set_active(device);
6053
6054 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
6055 pm_runtime_mark_last_busy(device);
6056 pm_runtime_use_autosuspend(device);
6057
6058 pm_runtime_put_autosuspend(device);
6059 }
6060
intel_fini_runtime_pm(struct drm_i915_private * dev_priv)6061 void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
6062 {
6063 struct drm_device *dev = dev_priv->dev;
6064 struct device *device = dev->dev;
6065
6066 if (!HAS_RUNTIME_PM(dev))
6067 return;
6068
6069 /* Make sure we're not suspended first. */
6070 pm_runtime_get_sync(device);
6071 pm_runtime_disable(device);
6072 }
6073
6074 /* Set up chip specific power management-related functions */
intel_init_pm(struct drm_device * dev)6075 void intel_init_pm(struct drm_device *dev)
6076 {
6077 struct drm_i915_private *dev_priv = dev->dev_private;
6078
6079 if (HAS_FBC(dev)) {
6080 if (INTEL_INFO(dev)->gen >= 7) {
6081 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6082 dev_priv->display.enable_fbc = gen7_enable_fbc;
6083 dev_priv->display.disable_fbc = ironlake_disable_fbc;
6084 } else if (INTEL_INFO(dev)->gen >= 5) {
6085 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6086 dev_priv->display.enable_fbc = ironlake_enable_fbc;
6087 dev_priv->display.disable_fbc = ironlake_disable_fbc;
6088 } else if (IS_GM45(dev)) {
6089 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
6090 dev_priv->display.enable_fbc = g4x_enable_fbc;
6091 dev_priv->display.disable_fbc = g4x_disable_fbc;
6092 } else {
6093 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
6094 dev_priv->display.enable_fbc = i8xx_enable_fbc;
6095 dev_priv->display.disable_fbc = i8xx_disable_fbc;
6096
6097 /* This value was pulled out of someone's hat */
6098 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
6099 }
6100 }
6101
6102 /* For cxsr */
6103 if (IS_PINEVIEW(dev))
6104 i915_pineview_get_mem_freq(dev);
6105 else if (IS_GEN5(dev))
6106 i915_ironlake_get_mem_freq(dev);
6107
6108 /* For FIFO watermark updates */
6109 if (HAS_PCH_SPLIT(dev)) {
6110 ilk_setup_wm_latency(dev);
6111
6112 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
6113 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
6114 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
6115 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
6116 dev_priv->display.update_wm = ilk_update_wm;
6117 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
6118 } else {
6119 DRM_DEBUG_KMS("Failed to read display plane latency. "
6120 "Disable CxSR\n");
6121 }
6122
6123 if (IS_GEN5(dev))
6124 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
6125 else if (IS_GEN6(dev))
6126 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
6127 else if (IS_IVYBRIDGE(dev))
6128 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
6129 else if (IS_HASWELL(dev))
6130 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
6131 else if (INTEL_INFO(dev)->gen == 8)
6132 dev_priv->display.init_clock_gating = gen8_init_clock_gating;
6133 } else if (IS_VALLEYVIEW(dev)) {
6134 dev_priv->display.update_wm = valleyview_update_wm;
6135 dev_priv->display.init_clock_gating =
6136 valleyview_init_clock_gating;
6137 } else if (IS_PINEVIEW(dev)) {
6138 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
6139 dev_priv->is_ddr3,
6140 dev_priv->fsb_freq,
6141 dev_priv->mem_freq)) {
6142 DRM_INFO("failed to find known CxSR latency "
6143 "(found ddr%s fsb freq %d, mem freq %d), "
6144 "disabling CxSR\n",
6145 (dev_priv->is_ddr3 == 1) ? "3" : "2",
6146 dev_priv->fsb_freq, dev_priv->mem_freq);
6147 /* Disable CxSR and never update its watermark again */
6148 pineview_disable_cxsr(dev);
6149 dev_priv->display.update_wm = NULL;
6150 } else
6151 dev_priv->display.update_wm = pineview_update_wm;
6152 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6153 } else if (IS_G4X(dev)) {
6154 dev_priv->display.update_wm = g4x_update_wm;
6155 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
6156 } else if (IS_GEN4(dev)) {
6157 dev_priv->display.update_wm = i965_update_wm;
6158 if (IS_CRESTLINE(dev))
6159 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
6160 else if (IS_BROADWATER(dev))
6161 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
6162 } else if (IS_GEN3(dev)) {
6163 dev_priv->display.update_wm = i9xx_update_wm;
6164 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
6165 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6166 } else if (IS_GEN2(dev)) {
6167 if (INTEL_INFO(dev)->num_pipes == 1) {
6168 dev_priv->display.update_wm = i845_update_wm;
6169 dev_priv->display.get_fifo_size = i845_get_fifo_size;
6170 } else {
6171 dev_priv->display.update_wm = i9xx_update_wm;
6172 dev_priv->display.get_fifo_size = i830_get_fifo_size;
6173 }
6174
6175 if (IS_I85X(dev) || IS_I865G(dev))
6176 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
6177 else
6178 dev_priv->display.init_clock_gating = i830_init_clock_gating;
6179 } else {
6180 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
6181 }
6182 }
6183
sandybridge_pcode_read(struct drm_i915_private * dev_priv,u8 mbox,u32 * val)6184 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
6185 {
6186 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6187
6188 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
6189 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
6190 return -EAGAIN;
6191 }
6192
6193 I915_WRITE(GEN6_PCODE_DATA, *val);
6194 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
6195
6196 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6197 500)) {
6198 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
6199 return -ETIMEDOUT;
6200 }
6201
6202 *val = I915_READ(GEN6_PCODE_DATA);
6203 I915_WRITE(GEN6_PCODE_DATA, 0);
6204
6205 return 0;
6206 }
6207
sandybridge_pcode_write(struct drm_i915_private * dev_priv,u8 mbox,u32 val)6208 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
6209 {
6210 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6211
6212 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
6213 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
6214 return -EAGAIN;
6215 }
6216
6217 I915_WRITE(GEN6_PCODE_DATA, val);
6218 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
6219
6220 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6221 500)) {
6222 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
6223 return -ETIMEDOUT;
6224 }
6225
6226 I915_WRITE(GEN6_PCODE_DATA, 0);
6227
6228 return 0;
6229 }
6230
vlv_gpu_freq(struct drm_i915_private * dev_priv,int val)6231 int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
6232 {
6233 int div;
6234
6235 /* 4 x czclk */
6236 switch (dev_priv->mem_freq) {
6237 case 800:
6238 div = 10;
6239 break;
6240 case 1066:
6241 div = 12;
6242 break;
6243 case 1333:
6244 div = 16;
6245 break;
6246 default:
6247 return -1;
6248 }
6249
6250 return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
6251 }
6252
vlv_freq_opcode(struct drm_i915_private * dev_priv,int val)6253 int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
6254 {
6255 int mul;
6256
6257 /* 4 x czclk */
6258 switch (dev_priv->mem_freq) {
6259 case 800:
6260 mul = 10;
6261 break;
6262 case 1066:
6263 mul = 12;
6264 break;
6265 case 1333:
6266 mul = 16;
6267 break;
6268 default:
6269 return -1;
6270 }
6271
6272 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
6273 }
6274
intel_pm_setup(struct drm_device * dev)6275 void intel_pm_setup(struct drm_device *dev)
6276 {
6277 struct drm_i915_private *dev_priv = dev->dev_private;
6278
6279 #ifdef __NetBSD__
6280 linux_mutex_init(&dev_priv->rps.hw_lock);
6281 #else
6282 mutex_init(&dev_priv->rps.hw_lock);
6283 #endif
6284
6285 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
6286 intel_gen6_powersave_work);
6287
6288 dev_priv->pm.suspended = false;
6289 dev_priv->pm.irqs_disabled = false;
6290 }
6291