15ca02815Sjsg // SPDX-License-Identifier: MIT
25ca02815Sjsg /*
35ca02815Sjsg * Copyright © 2020 Intel Corporation
45ca02815Sjsg */
55ca02815Sjsg #include <linux/kernel.h>
61bb76ff1Sjsg #include <linux/pm_qos.h>
75ca02815Sjsg #include <linux/slab.h>
85ca02815Sjsg
95ca02815Sjsg #include <drm/drm_atomic_helper.h>
105ca02815Sjsg #include <drm/drm_fourcc.h>
115ca02815Sjsg #include <drm/drm_plane.h>
121bb76ff1Sjsg #include <drm/drm_vblank_work.h>
135ca02815Sjsg
145ca02815Sjsg #include "i915_vgpu.h"
151bb76ff1Sjsg #include "i9xx_plane.h"
161bb76ff1Sjsg #include "icl_dsi.h"
175ca02815Sjsg #include "intel_atomic.h"
185ca02815Sjsg #include "intel_atomic_plane.h"
195ca02815Sjsg #include "intel_color.h"
205ca02815Sjsg #include "intel_crtc.h"
215ca02815Sjsg #include "intel_cursor.h"
225ca02815Sjsg #include "intel_display_debugfs.h"
23f005ef32Sjsg #include "intel_display_irq.h"
241bb76ff1Sjsg #include "intel_display_trace.h"
255ca02815Sjsg #include "intel_display_types.h"
261bb76ff1Sjsg #include "intel_drrs.h"
275ca02815Sjsg #include "intel_dsi.h"
28f005ef32Sjsg #include "intel_fifo_underrun.h"
295ca02815Sjsg #include "intel_pipe_crc.h"
305ca02815Sjsg #include "intel_psr.h"
315ca02815Sjsg #include "intel_sprite.h"
32f005ef32Sjsg #include "intel_vblank.h"
335ca02815Sjsg #include "intel_vrr.h"
345ca02815Sjsg #include "skl_universal_plane.h"
355ca02815Sjsg
assert_vblank_disabled(struct drm_crtc * crtc)365ca02815Sjsg static void assert_vblank_disabled(struct drm_crtc *crtc)
375ca02815Sjsg {
38f005ef32Sjsg struct drm_i915_private *i915 = to_i915(crtc->dev);
39f005ef32Sjsg
40f005ef32Sjsg if (I915_STATE_WARN(i915, drm_crtc_vblank_get(crtc) == 0,
41f005ef32Sjsg "[CRTC:%d:%s] vblank assertion failure (expected off, current on)\n",
42f005ef32Sjsg crtc->base.id, crtc->name))
435ca02815Sjsg drm_crtc_vblank_put(crtc);
445ca02815Sjsg }
455ca02815Sjsg
intel_first_crtc(struct drm_i915_private * i915)461bb76ff1Sjsg struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915)
471bb76ff1Sjsg {
481bb76ff1Sjsg return to_intel_crtc(drm_crtc_from_index(&i915->drm, 0));
491bb76ff1Sjsg }
501bb76ff1Sjsg
intel_crtc_for_pipe(struct drm_i915_private * i915,enum pipe pipe)511bb76ff1Sjsg struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915,
521bb76ff1Sjsg enum pipe pipe)
531bb76ff1Sjsg {
541bb76ff1Sjsg struct intel_crtc *crtc;
551bb76ff1Sjsg
561bb76ff1Sjsg for_each_intel_crtc(&i915->drm, crtc) {
571bb76ff1Sjsg if (crtc->pipe == pipe)
581bb76ff1Sjsg return crtc;
591bb76ff1Sjsg }
601bb76ff1Sjsg
611bb76ff1Sjsg return NULL;
621bb76ff1Sjsg }
631bb76ff1Sjsg
intel_crtc_wait_for_next_vblank(struct intel_crtc * crtc)641bb76ff1Sjsg void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc)
651bb76ff1Sjsg {
661bb76ff1Sjsg drm_crtc_wait_one_vblank(&crtc->base);
671bb76ff1Sjsg }
681bb76ff1Sjsg
intel_wait_for_vblank_if_active(struct drm_i915_private * i915,enum pipe pipe)691bb76ff1Sjsg void intel_wait_for_vblank_if_active(struct drm_i915_private *i915,
701bb76ff1Sjsg enum pipe pipe)
711bb76ff1Sjsg {
721bb76ff1Sjsg struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
731bb76ff1Sjsg
741bb76ff1Sjsg if (crtc->active)
751bb76ff1Sjsg intel_crtc_wait_for_next_vblank(crtc);
761bb76ff1Sjsg }
771bb76ff1Sjsg
intel_crtc_get_vblank_counter(struct intel_crtc * crtc)785ca02815Sjsg u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
795ca02815Sjsg {
805ca02815Sjsg struct drm_device *dev = crtc->base.dev;
815ca02815Sjsg struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
825ca02815Sjsg
835ca02815Sjsg if (!crtc->active)
845ca02815Sjsg return 0;
855ca02815Sjsg
865ca02815Sjsg if (!vblank->max_vblank_count)
875ca02815Sjsg return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
885ca02815Sjsg
895ca02815Sjsg return crtc->base.funcs->get_vblank_counter(&crtc->base);
905ca02815Sjsg }
915ca02815Sjsg
intel_crtc_max_vblank_count(const struct intel_crtc_state * crtc_state)925ca02815Sjsg u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
935ca02815Sjsg {
945ca02815Sjsg struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
955ca02815Sjsg
965ca02815Sjsg /*
975ca02815Sjsg * From Gen 11, In case of dsi cmd mode, frame counter wouldnt
985ca02815Sjsg * have updated at the beginning of TE, if we want to use
995ca02815Sjsg * the hw counter, then we would find it updated in only
1005ca02815Sjsg * the next TE, hence switching to sw counter.
1015ca02815Sjsg */
1025ca02815Sjsg if (crtc_state->mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 |
1035ca02815Sjsg I915_MODE_FLAG_DSI_USE_TE1))
1045ca02815Sjsg return 0;
1055ca02815Sjsg
1065ca02815Sjsg /*
1075ca02815Sjsg * On i965gm the hardware frame counter reads
1085ca02815Sjsg * zero when the TV encoder is enabled :(
1095ca02815Sjsg */
1105ca02815Sjsg if (IS_I965GM(dev_priv) &&
1115ca02815Sjsg (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1125ca02815Sjsg return 0;
1135ca02815Sjsg
1145ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
1155ca02815Sjsg return 0xffffffff; /* full 32 bit counter */
1165ca02815Sjsg else if (DISPLAY_VER(dev_priv) >= 3)
1175ca02815Sjsg return 0xffffff; /* only 24 bits of frame count */
1185ca02815Sjsg else
1195ca02815Sjsg return 0; /* Gen2 doesn't have a hardware frame counter */
1205ca02815Sjsg }
1215ca02815Sjsg
intel_crtc_vblank_on(const struct intel_crtc_state * crtc_state)1225ca02815Sjsg void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1235ca02815Sjsg {
1245ca02815Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1255ca02815Sjsg
1265ca02815Sjsg assert_vblank_disabled(&crtc->base);
1275ca02815Sjsg drm_crtc_set_max_vblank_count(&crtc->base,
1285ca02815Sjsg intel_crtc_max_vblank_count(crtc_state));
1295ca02815Sjsg drm_crtc_vblank_on(&crtc->base);
1305ca02815Sjsg
1315ca02815Sjsg /*
1325ca02815Sjsg * Should really happen exactly when we enable the pipe
1335ca02815Sjsg * but we want the frame counters in the trace, and that
1345ca02815Sjsg * requires vblank support on some platforms/outputs.
1355ca02815Sjsg */
1365ca02815Sjsg trace_intel_pipe_enable(crtc);
1375ca02815Sjsg }
1385ca02815Sjsg
intel_crtc_vblank_off(const struct intel_crtc_state * crtc_state)1395ca02815Sjsg void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
1405ca02815Sjsg {
1415ca02815Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1425ca02815Sjsg
1435ca02815Sjsg /*
1445ca02815Sjsg * Should really happen exactly when we disable the pipe
1455ca02815Sjsg * but we want the frame counters in the trace, and that
1465ca02815Sjsg * requires vblank support on some platforms/outputs.
1475ca02815Sjsg */
1485ca02815Sjsg trace_intel_pipe_disable(crtc);
1495ca02815Sjsg
1505ca02815Sjsg drm_crtc_vblank_off(&crtc->base);
1515ca02815Sjsg assert_vblank_disabled(&crtc->base);
1525ca02815Sjsg }
1535ca02815Sjsg
intel_crtc_state_alloc(struct intel_crtc * crtc)1545ca02815Sjsg struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
1555ca02815Sjsg {
1565ca02815Sjsg struct intel_crtc_state *crtc_state;
1575ca02815Sjsg
1585ca02815Sjsg crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
1595ca02815Sjsg
1605ca02815Sjsg if (crtc_state)
1615ca02815Sjsg intel_crtc_state_reset(crtc_state, crtc);
1625ca02815Sjsg
1635ca02815Sjsg return crtc_state;
1645ca02815Sjsg }
1655ca02815Sjsg
intel_crtc_state_reset(struct intel_crtc_state * crtc_state,struct intel_crtc * crtc)1665ca02815Sjsg void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
1675ca02815Sjsg struct intel_crtc *crtc)
1685ca02815Sjsg {
1695ca02815Sjsg memset(crtc_state, 0, sizeof(*crtc_state));
1705ca02815Sjsg
1715ca02815Sjsg __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
1725ca02815Sjsg
1735ca02815Sjsg crtc_state->cpu_transcoder = INVALID_TRANSCODER;
1745ca02815Sjsg crtc_state->master_transcoder = INVALID_TRANSCODER;
1755ca02815Sjsg crtc_state->hsw_workaround_pipe = INVALID_PIPE;
1765ca02815Sjsg crtc_state->scaler_state.scaler_id = -1;
1775ca02815Sjsg crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
1785ca02815Sjsg }
1795ca02815Sjsg
intel_crtc_alloc(void)1805ca02815Sjsg static struct intel_crtc *intel_crtc_alloc(void)
1815ca02815Sjsg {
1825ca02815Sjsg struct intel_crtc_state *crtc_state;
1835ca02815Sjsg struct intel_crtc *crtc;
1845ca02815Sjsg
1855ca02815Sjsg crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
1865ca02815Sjsg if (!crtc)
1875ca02815Sjsg return ERR_PTR(-ENOMEM);
1885ca02815Sjsg
1895ca02815Sjsg crtc_state = intel_crtc_state_alloc(crtc);
1905ca02815Sjsg if (!crtc_state) {
1915ca02815Sjsg kfree(crtc);
1925ca02815Sjsg return ERR_PTR(-ENOMEM);
1935ca02815Sjsg }
1945ca02815Sjsg
1955ca02815Sjsg crtc->base.state = &crtc_state->uapi;
1965ca02815Sjsg crtc->config = crtc_state;
1975ca02815Sjsg
1985ca02815Sjsg return crtc;
1995ca02815Sjsg }
2005ca02815Sjsg
intel_crtc_free(struct intel_crtc * crtc)2015ca02815Sjsg static void intel_crtc_free(struct intel_crtc *crtc)
2025ca02815Sjsg {
2035ca02815Sjsg intel_crtc_destroy_state(&crtc->base, crtc->base.state);
2045ca02815Sjsg kfree(crtc);
2055ca02815Sjsg }
2065ca02815Sjsg
intel_crtc_destroy(struct drm_crtc * _crtc)2075ca02815Sjsg static void intel_crtc_destroy(struct drm_crtc *_crtc)
2085ca02815Sjsg {
2095ca02815Sjsg struct intel_crtc *crtc = to_intel_crtc(_crtc);
2105ca02815Sjsg
2111bb76ff1Sjsg cpu_latency_qos_remove_request(&crtc->vblank_pm_qos);
2121bb76ff1Sjsg
2135ca02815Sjsg drm_crtc_cleanup(&crtc->base);
2145ca02815Sjsg kfree(crtc);
2155ca02815Sjsg }
2165ca02815Sjsg
intel_crtc_late_register(struct drm_crtc * crtc)2175ca02815Sjsg static int intel_crtc_late_register(struct drm_crtc *crtc)
2185ca02815Sjsg {
219f005ef32Sjsg intel_crtc_debugfs_add(to_intel_crtc(crtc));
2205ca02815Sjsg return 0;
2215ca02815Sjsg }
2225ca02815Sjsg
2235ca02815Sjsg #define INTEL_CRTC_FUNCS \
2245ca02815Sjsg .set_config = drm_atomic_helper_set_config, \
2255ca02815Sjsg .destroy = intel_crtc_destroy, \
2265ca02815Sjsg .page_flip = drm_atomic_helper_page_flip, \
2275ca02815Sjsg .atomic_duplicate_state = intel_crtc_duplicate_state, \
2285ca02815Sjsg .atomic_destroy_state = intel_crtc_destroy_state, \
2295ca02815Sjsg .set_crc_source = intel_crtc_set_crc_source, \
2305ca02815Sjsg .verify_crc_source = intel_crtc_verify_crc_source, \
2315ca02815Sjsg .get_crc_sources = intel_crtc_get_crc_sources, \
2325ca02815Sjsg .late_register = intel_crtc_late_register
2335ca02815Sjsg
2345ca02815Sjsg static const struct drm_crtc_funcs bdw_crtc_funcs = {
2355ca02815Sjsg INTEL_CRTC_FUNCS,
2365ca02815Sjsg
2375ca02815Sjsg .get_vblank_counter = g4x_get_vblank_counter,
2385ca02815Sjsg .enable_vblank = bdw_enable_vblank,
2395ca02815Sjsg .disable_vblank = bdw_disable_vblank,
2405ca02815Sjsg .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
2415ca02815Sjsg };
2425ca02815Sjsg
2435ca02815Sjsg static const struct drm_crtc_funcs ilk_crtc_funcs = {
2445ca02815Sjsg INTEL_CRTC_FUNCS,
2455ca02815Sjsg
2465ca02815Sjsg .get_vblank_counter = g4x_get_vblank_counter,
2475ca02815Sjsg .enable_vblank = ilk_enable_vblank,
2485ca02815Sjsg .disable_vblank = ilk_disable_vblank,
2495ca02815Sjsg .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
2505ca02815Sjsg };
2515ca02815Sjsg
2525ca02815Sjsg static const struct drm_crtc_funcs g4x_crtc_funcs = {
2535ca02815Sjsg INTEL_CRTC_FUNCS,
2545ca02815Sjsg
2555ca02815Sjsg .get_vblank_counter = g4x_get_vblank_counter,
2565ca02815Sjsg .enable_vblank = i965_enable_vblank,
2575ca02815Sjsg .disable_vblank = i965_disable_vblank,
2585ca02815Sjsg .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
2595ca02815Sjsg };
2605ca02815Sjsg
2615ca02815Sjsg static const struct drm_crtc_funcs i965_crtc_funcs = {
2625ca02815Sjsg INTEL_CRTC_FUNCS,
2635ca02815Sjsg
2645ca02815Sjsg .get_vblank_counter = i915_get_vblank_counter,
2655ca02815Sjsg .enable_vblank = i965_enable_vblank,
2665ca02815Sjsg .disable_vblank = i965_disable_vblank,
2675ca02815Sjsg .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
2685ca02815Sjsg };
2695ca02815Sjsg
2705ca02815Sjsg static const struct drm_crtc_funcs i915gm_crtc_funcs = {
2715ca02815Sjsg INTEL_CRTC_FUNCS,
2725ca02815Sjsg
2735ca02815Sjsg .get_vblank_counter = i915_get_vblank_counter,
2745ca02815Sjsg .enable_vblank = i915gm_enable_vblank,
2755ca02815Sjsg .disable_vblank = i915gm_disable_vblank,
2765ca02815Sjsg .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
2775ca02815Sjsg };
2785ca02815Sjsg
2795ca02815Sjsg static const struct drm_crtc_funcs i915_crtc_funcs = {
2805ca02815Sjsg INTEL_CRTC_FUNCS,
2815ca02815Sjsg
2825ca02815Sjsg .get_vblank_counter = i915_get_vblank_counter,
2835ca02815Sjsg .enable_vblank = i8xx_enable_vblank,
2845ca02815Sjsg .disable_vblank = i8xx_disable_vblank,
2855ca02815Sjsg .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
2865ca02815Sjsg };
2875ca02815Sjsg
2885ca02815Sjsg static const struct drm_crtc_funcs i8xx_crtc_funcs = {
2895ca02815Sjsg INTEL_CRTC_FUNCS,
2905ca02815Sjsg
2915ca02815Sjsg /* no hw vblank counter */
2925ca02815Sjsg .enable_vblank = i8xx_enable_vblank,
2935ca02815Sjsg .disable_vblank = i8xx_disable_vblank,
2945ca02815Sjsg .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
2955ca02815Sjsg };
2965ca02815Sjsg
intel_crtc_init(struct drm_i915_private * dev_priv,enum pipe pipe)2975ca02815Sjsg int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
2985ca02815Sjsg {
2995ca02815Sjsg struct intel_plane *primary, *cursor;
3005ca02815Sjsg const struct drm_crtc_funcs *funcs;
3015ca02815Sjsg struct intel_crtc *crtc;
3025ca02815Sjsg int sprite, ret;
3035ca02815Sjsg
3045ca02815Sjsg crtc = intel_crtc_alloc();
3055ca02815Sjsg if (IS_ERR(crtc))
3065ca02815Sjsg return PTR_ERR(crtc);
3075ca02815Sjsg
3085ca02815Sjsg crtc->pipe = pipe;
309f005ef32Sjsg crtc->num_scalers = DISPLAY_RUNTIME_INFO(dev_priv)->num_scalers[pipe];
3105ca02815Sjsg
3115ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 9)
3125ca02815Sjsg primary = skl_universal_plane_create(dev_priv, pipe,
3135ca02815Sjsg PLANE_PRIMARY);
3145ca02815Sjsg else
3155ca02815Sjsg primary = intel_primary_plane_create(dev_priv, pipe);
3165ca02815Sjsg if (IS_ERR(primary)) {
3175ca02815Sjsg ret = PTR_ERR(primary);
3185ca02815Sjsg goto fail;
3195ca02815Sjsg }
3205ca02815Sjsg crtc->plane_ids_mask |= BIT(primary->id);
3215ca02815Sjsg
322f005ef32Sjsg intel_init_fifo_underrun_reporting(dev_priv, crtc, false);
323f005ef32Sjsg
3245ca02815Sjsg for_each_sprite(dev_priv, pipe, sprite) {
3255ca02815Sjsg struct intel_plane *plane;
3265ca02815Sjsg
3275ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 9)
3285ca02815Sjsg plane = skl_universal_plane_create(dev_priv, pipe,
3295ca02815Sjsg PLANE_SPRITE0 + sprite);
3305ca02815Sjsg else
3315ca02815Sjsg plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
3325ca02815Sjsg if (IS_ERR(plane)) {
3335ca02815Sjsg ret = PTR_ERR(plane);
3345ca02815Sjsg goto fail;
3355ca02815Sjsg }
3365ca02815Sjsg crtc->plane_ids_mask |= BIT(plane->id);
3375ca02815Sjsg }
3385ca02815Sjsg
3395ca02815Sjsg cursor = intel_cursor_plane_create(dev_priv, pipe);
3405ca02815Sjsg if (IS_ERR(cursor)) {
3415ca02815Sjsg ret = PTR_ERR(cursor);
3425ca02815Sjsg goto fail;
3435ca02815Sjsg }
3445ca02815Sjsg crtc->plane_ids_mask |= BIT(cursor->id);
3455ca02815Sjsg
3465ca02815Sjsg if (HAS_GMCH(dev_priv)) {
3475ca02815Sjsg if (IS_CHERRYVIEW(dev_priv) ||
3485ca02815Sjsg IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
3495ca02815Sjsg funcs = &g4x_crtc_funcs;
3505ca02815Sjsg else if (DISPLAY_VER(dev_priv) == 4)
3515ca02815Sjsg funcs = &i965_crtc_funcs;
3525ca02815Sjsg else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
3535ca02815Sjsg funcs = &i915gm_crtc_funcs;
3545ca02815Sjsg else if (DISPLAY_VER(dev_priv) == 3)
3555ca02815Sjsg funcs = &i915_crtc_funcs;
3565ca02815Sjsg else
3575ca02815Sjsg funcs = &i8xx_crtc_funcs;
3585ca02815Sjsg } else {
3595ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 8)
3605ca02815Sjsg funcs = &bdw_crtc_funcs;
3615ca02815Sjsg else
3625ca02815Sjsg funcs = &ilk_crtc_funcs;
3635ca02815Sjsg }
3645ca02815Sjsg
3655ca02815Sjsg ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
3665ca02815Sjsg &primary->base, &cursor->base,
3675ca02815Sjsg funcs, "pipe %c", pipe_name(pipe));
3685ca02815Sjsg if (ret)
3695ca02815Sjsg goto fail;
3705ca02815Sjsg
3715ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 11)
3725ca02815Sjsg drm_crtc_create_scaling_filter_property(&crtc->base,
3735ca02815Sjsg BIT(DRM_SCALING_FILTER_DEFAULT) |
3745ca02815Sjsg BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
3755ca02815Sjsg
376f005ef32Sjsg intel_color_crtc_init(crtc);
377f005ef32Sjsg intel_drrs_crtc_init(crtc);
3785ca02815Sjsg intel_crtc_crc_init(crtc);
3795ca02815Sjsg
3801bb76ff1Sjsg cpu_latency_qos_add_request(&crtc->vblank_pm_qos, PM_QOS_DEFAULT_VALUE);
3811bb76ff1Sjsg
3825ca02815Sjsg drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
3835ca02815Sjsg
3845ca02815Sjsg return 0;
3855ca02815Sjsg
3865ca02815Sjsg fail:
3875ca02815Sjsg intel_crtc_free(crtc);
3885ca02815Sjsg
3895ca02815Sjsg return ret;
3905ca02815Sjsg }
3915ca02815Sjsg
intel_crtc_needs_vblank_work(const struct intel_crtc_state * crtc_state)3921bb76ff1Sjsg static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_state)
3931bb76ff1Sjsg {
3941bb76ff1Sjsg return crtc_state->hw.active &&
3951bb76ff1Sjsg !intel_crtc_needs_modeset(crtc_state) &&
3961bb76ff1Sjsg !crtc_state->preload_luts &&
397f005ef32Sjsg intel_crtc_needs_color_update(crtc_state);
3981bb76ff1Sjsg }
3991bb76ff1Sjsg
intel_crtc_vblank_work(struct kthread_work * base)4001bb76ff1Sjsg static void intel_crtc_vblank_work(struct kthread_work *base)
4011bb76ff1Sjsg {
4021bb76ff1Sjsg struct drm_vblank_work *work = to_drm_vblank_work(base);
4031bb76ff1Sjsg struct intel_crtc_state *crtc_state =
4041bb76ff1Sjsg container_of(work, typeof(*crtc_state), vblank_work);
4051bb76ff1Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4061bb76ff1Sjsg
4071bb76ff1Sjsg trace_intel_crtc_vblank_work_start(crtc);
4081bb76ff1Sjsg
4091bb76ff1Sjsg intel_color_load_luts(crtc_state);
4101bb76ff1Sjsg
4111bb76ff1Sjsg if (crtc_state->uapi.event) {
4121bb76ff1Sjsg spin_lock_irq(&crtc->base.dev->event_lock);
4131bb76ff1Sjsg drm_crtc_send_vblank_event(&crtc->base, crtc_state->uapi.event);
4141bb76ff1Sjsg crtc_state->uapi.event = NULL;
4151bb76ff1Sjsg spin_unlock_irq(&crtc->base.dev->event_lock);
4161bb76ff1Sjsg }
4171bb76ff1Sjsg
4181bb76ff1Sjsg trace_intel_crtc_vblank_work_end(crtc);
4191bb76ff1Sjsg }
4201bb76ff1Sjsg
intel_crtc_vblank_work_init(struct intel_crtc_state * crtc_state)4211bb76ff1Sjsg static void intel_crtc_vblank_work_init(struct intel_crtc_state *crtc_state)
4221bb76ff1Sjsg {
4231bb76ff1Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4241bb76ff1Sjsg
4251bb76ff1Sjsg drm_vblank_work_init(&crtc_state->vblank_work, &crtc->base,
4261bb76ff1Sjsg intel_crtc_vblank_work);
4271bb76ff1Sjsg /*
4281bb76ff1Sjsg * Interrupt latency is critical for getting the vblank
4291bb76ff1Sjsg * work executed as early as possible during the vblank.
4301bb76ff1Sjsg */
4311bb76ff1Sjsg cpu_latency_qos_update_request(&crtc->vblank_pm_qos, 0);
4321bb76ff1Sjsg }
4331bb76ff1Sjsg
intel_wait_for_vblank_workers(struct intel_atomic_state * state)4341bb76ff1Sjsg void intel_wait_for_vblank_workers(struct intel_atomic_state *state)
4351bb76ff1Sjsg {
4361bb76ff1Sjsg struct intel_crtc_state *crtc_state;
4371bb76ff1Sjsg struct intel_crtc *crtc;
4381bb76ff1Sjsg int i;
4391bb76ff1Sjsg
4401bb76ff1Sjsg for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
4411bb76ff1Sjsg if (!intel_crtc_needs_vblank_work(crtc_state))
4421bb76ff1Sjsg continue;
4431bb76ff1Sjsg
4441bb76ff1Sjsg drm_vblank_work_flush(&crtc_state->vblank_work);
4451bb76ff1Sjsg cpu_latency_qos_update_request(&crtc->vblank_pm_qos,
4461bb76ff1Sjsg PM_QOS_DEFAULT_VALUE);
4471bb76ff1Sjsg }
4481bb76ff1Sjsg }
4491bb76ff1Sjsg
intel_usecs_to_scanlines(const struct drm_display_mode * adjusted_mode,int usecs)4505ca02815Sjsg int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
4515ca02815Sjsg int usecs)
4525ca02815Sjsg {
4535ca02815Sjsg /* paranoia */
4545ca02815Sjsg if (!adjusted_mode->crtc_htotal)
4555ca02815Sjsg return 1;
4565ca02815Sjsg
4575ca02815Sjsg return DIV_ROUND_UP(usecs * adjusted_mode->crtc_clock,
4585ca02815Sjsg 1000 * adjusted_mode->crtc_htotal);
4595ca02815Sjsg }
4605ca02815Sjsg
intel_mode_vblank_start(const struct drm_display_mode * mode)4615ca02815Sjsg static int intel_mode_vblank_start(const struct drm_display_mode *mode)
4625ca02815Sjsg {
4635ca02815Sjsg int vblank_start = mode->crtc_vblank_start;
4645ca02815Sjsg
4655ca02815Sjsg if (mode->flags & DRM_MODE_FLAG_INTERLACE)
4665ca02815Sjsg vblank_start = DIV_ROUND_UP(vblank_start, 2);
4675ca02815Sjsg
4685ca02815Sjsg return vblank_start;
4695ca02815Sjsg }
4705ca02815Sjsg
intel_crtc_vblank_evade_scanlines(struct intel_atomic_state * state,struct intel_crtc * crtc,int * min,int * max,int * vblank_start)471f5087435Sjsg static void intel_crtc_vblank_evade_scanlines(struct intel_atomic_state *state,
472f5087435Sjsg struct intel_crtc *crtc,
473f5087435Sjsg int *min, int *max, int *vblank_start)
474f5087435Sjsg {
475f5f7381eSjsg const struct intel_crtc_state *old_crtc_state =
476f5f7381eSjsg intel_atomic_get_old_crtc_state(state, crtc);
477f5087435Sjsg const struct intel_crtc_state *new_crtc_state =
478f5087435Sjsg intel_atomic_get_new_crtc_state(state, crtc);
479f5f7381eSjsg const struct intel_crtc_state *crtc_state;
480f5f7381eSjsg const struct drm_display_mode *adjusted_mode;
481f5087435Sjsg
482f5f7381eSjsg /*
483f5f7381eSjsg * During fastsets/etc. the transcoder is still
484f5f7381eSjsg * running with the old timings at this point.
485f5f7381eSjsg *
486f5f7381eSjsg * TODO: maybe just use the active timings here?
487f5f7381eSjsg */
488f5f7381eSjsg if (intel_crtc_needs_modeset(new_crtc_state))
489f5f7381eSjsg crtc_state = new_crtc_state;
490f5087435Sjsg else
491f5f7381eSjsg crtc_state = old_crtc_state;
492f5f7381eSjsg
493f5f7381eSjsg adjusted_mode = &crtc_state->hw.adjusted_mode;
494f5f7381eSjsg
495f5f7381eSjsg if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
496f5f7381eSjsg if (intel_vrr_is_push_sent(crtc_state))
497f5f7381eSjsg *vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
498f5f7381eSjsg else
499f5f7381eSjsg *vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
500f5087435Sjsg } else {
501f5087435Sjsg *vblank_start = intel_mode_vblank_start(adjusted_mode);
502f5087435Sjsg }
503f5087435Sjsg
504f5087435Sjsg /* FIXME needs to be calibrated sensibly */
505f5087435Sjsg *min = *vblank_start - intel_usecs_to_scanlines(adjusted_mode,
506f5087435Sjsg VBLANK_EVASION_TIME_US);
507f5087435Sjsg *max = *vblank_start - 1;
508f5087435Sjsg
509f5087435Sjsg /*
510f5087435Sjsg * M/N is double buffered on the transcoder's undelayed vblank,
511f5087435Sjsg * so with seamless M/N we must evade both vblanks.
512f5087435Sjsg */
513*1b89dd6fSjsg if (new_crtc_state->update_m_n)
514f5087435Sjsg *min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
515f5087435Sjsg }
516f5087435Sjsg
5175ca02815Sjsg /**
5185ca02815Sjsg * intel_pipe_update_start() - start update of a set of display registers
519dafb1c99Sjsg * @state: the atomic state
520dafb1c99Sjsg * @crtc: the crtc
5215ca02815Sjsg *
5225ca02815Sjsg * Mark the start of an update to pipe registers that should be updated
5235ca02815Sjsg * atomically regarding vblank. If the next vblank will happens within
5245ca02815Sjsg * the next 100 us, this function waits until the vblank passes.
5255ca02815Sjsg *
5265ca02815Sjsg * After a successful call to this function, interrupts will be disabled
5275ca02815Sjsg * until a subsequent call to intel_pipe_update_end(). That is done to
5285ca02815Sjsg * avoid random delays.
5295ca02815Sjsg */
intel_pipe_update_start(struct intel_atomic_state * state,struct intel_crtc * crtc)530dafb1c99Sjsg void intel_pipe_update_start(struct intel_atomic_state *state,
531dafb1c99Sjsg struct intel_crtc *crtc)
5325ca02815Sjsg {
5335ca02815Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
534dafb1c99Sjsg struct intel_crtc_state *new_crtc_state =
535dafb1c99Sjsg intel_atomic_get_new_crtc_state(state, crtc);
5365ca02815Sjsg long timeout = msecs_to_jiffies_timeout(1);
5375ca02815Sjsg int scanline, min, max, vblank_start;
5385ca02815Sjsg wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
5395ca02815Sjsg bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
5405ca02815Sjsg intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
5415ca02815Sjsg DEFINE_WAIT(wait);
5425ca02815Sjsg
5431bb76ff1Sjsg intel_psr_lock(new_crtc_state);
5441bb76ff1Sjsg
5451bb76ff1Sjsg if (new_crtc_state->do_async_flip)
5465ca02815Sjsg return;
5475ca02815Sjsg
5481bb76ff1Sjsg if (intel_crtc_needs_vblank_work(new_crtc_state))
5491bb76ff1Sjsg intel_crtc_vblank_work_init(new_crtc_state);
5501bb76ff1Sjsg
551f5087435Sjsg intel_crtc_vblank_evade_scanlines(state, crtc, &min, &max, &vblank_start);
5525ca02815Sjsg if (min <= 0 || max <= 0)
5535ca02815Sjsg goto irq_disable;
5545ca02815Sjsg
5555ca02815Sjsg if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base)))
5565ca02815Sjsg goto irq_disable;
5575ca02815Sjsg
5585ca02815Sjsg /*
5595ca02815Sjsg * Wait for psr to idle out after enabling the VBL interrupts
5605ca02815Sjsg * VBL interrupts will start the PSR exit and prevent a PSR
5615ca02815Sjsg * re-entry as well.
5625ca02815Sjsg */
5631bb76ff1Sjsg intel_psr_wait_for_idle_locked(new_crtc_state);
5645ca02815Sjsg
5655ca02815Sjsg local_irq_disable();
5665ca02815Sjsg
5675ca02815Sjsg crtc->debug.min_vbl = min;
5685ca02815Sjsg crtc->debug.max_vbl = max;
5695ca02815Sjsg trace_intel_pipe_update_start(crtc);
5705ca02815Sjsg
5715ca02815Sjsg for (;;) {
5725ca02815Sjsg /*
5735ca02815Sjsg * prepare_to_wait() has a memory barrier, which guarantees
5745ca02815Sjsg * other CPUs can see the task state update by the time we
5755ca02815Sjsg * read the scanline.
5765ca02815Sjsg */
5775ca02815Sjsg prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
5785ca02815Sjsg
5795ca02815Sjsg scanline = intel_get_crtc_scanline(crtc);
5805ca02815Sjsg if (scanline < min || scanline > max)
5815ca02815Sjsg break;
5825ca02815Sjsg
5835ca02815Sjsg if (!timeout) {
5845ca02815Sjsg drm_err(&dev_priv->drm,
5855ca02815Sjsg "Potential atomic update failure on pipe %c\n",
5865ca02815Sjsg pipe_name(crtc->pipe));
5875ca02815Sjsg break;
5885ca02815Sjsg }
5895ca02815Sjsg
5905ca02815Sjsg local_irq_enable();
5915ca02815Sjsg
5925ca02815Sjsg timeout = schedule_timeout(timeout);
5935ca02815Sjsg
5945ca02815Sjsg local_irq_disable();
5955ca02815Sjsg }
5965ca02815Sjsg
5975ca02815Sjsg finish_wait(wq, &wait);
5985ca02815Sjsg
5995ca02815Sjsg drm_crtc_vblank_put(&crtc->base);
6005ca02815Sjsg
6015ca02815Sjsg /*
6025ca02815Sjsg * On VLV/CHV DSI the scanline counter would appear to
6035ca02815Sjsg * increment approx. 1/3 of a scanline before start of vblank.
6045ca02815Sjsg * The registers still get latched at start of vblank however.
6055ca02815Sjsg * This means we must not write any registers on the first
6065ca02815Sjsg * line of vblank (since not the whole line is actually in
6075ca02815Sjsg * vblank). And unfortunately we can't use the interrupt to
6085ca02815Sjsg * wait here since it will fire too soon. We could use the
6095ca02815Sjsg * frame start interrupt instead since it will fire after the
6105ca02815Sjsg * critical scanline, but that would require more changes
6115ca02815Sjsg * in the interrupt code. So for now we'll just do the nasty
6125ca02815Sjsg * thing and poll for the bad scanline to pass us by.
6135ca02815Sjsg *
6145ca02815Sjsg * FIXME figure out if BXT+ DSI suffers from this as well
6155ca02815Sjsg */
6165ca02815Sjsg while (need_vlv_dsi_wa && scanline == vblank_start)
6175ca02815Sjsg scanline = intel_get_crtc_scanline(crtc);
6185ca02815Sjsg
6195ca02815Sjsg crtc->debug.scanline_start = scanline;
6205ca02815Sjsg crtc->debug.start_vbl_time = ktime_get();
6215ca02815Sjsg crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
6225ca02815Sjsg
6235ca02815Sjsg trace_intel_pipe_update_vblank_evaded(crtc);
6245ca02815Sjsg return;
6255ca02815Sjsg
6265ca02815Sjsg irq_disable:
6275ca02815Sjsg local_irq_disable();
6285ca02815Sjsg }
6295ca02815Sjsg
6305ca02815Sjsg #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
dbg_vblank_evade(struct intel_crtc * crtc,ktime_t end)6315ca02815Sjsg static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end)
6325ca02815Sjsg {
6335ca02815Sjsg u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time));
6345ca02815Sjsg unsigned int h;
6355ca02815Sjsg
6365ca02815Sjsg h = ilog2(delta >> 9);
6375ca02815Sjsg if (h >= ARRAY_SIZE(crtc->debug.vbl.times))
6385ca02815Sjsg h = ARRAY_SIZE(crtc->debug.vbl.times) - 1;
6395ca02815Sjsg crtc->debug.vbl.times[h]++;
6405ca02815Sjsg
6415ca02815Sjsg crtc->debug.vbl.sum += delta;
6425ca02815Sjsg if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min)
6435ca02815Sjsg crtc->debug.vbl.min = delta;
6445ca02815Sjsg if (delta > crtc->debug.vbl.max)
6455ca02815Sjsg crtc->debug.vbl.max = delta;
6465ca02815Sjsg
6475ca02815Sjsg if (delta > 1000 * VBLANK_EVASION_TIME_US) {
6485ca02815Sjsg drm_dbg_kms(crtc->base.dev,
6495ca02815Sjsg "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
6505ca02815Sjsg pipe_name(crtc->pipe),
6515ca02815Sjsg div_u64(delta, 1000),
6525ca02815Sjsg VBLANK_EVASION_TIME_US);
6535ca02815Sjsg crtc->debug.vbl.over++;
6545ca02815Sjsg }
6555ca02815Sjsg }
6565ca02815Sjsg #else
dbg_vblank_evade(struct intel_crtc * crtc,ktime_t end)6575ca02815Sjsg static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {}
6585ca02815Sjsg #endif
6595ca02815Sjsg
6605ca02815Sjsg /**
6615ca02815Sjsg * intel_pipe_update_end() - end update of a set of display registers
662dafb1c99Sjsg * @state: the atomic state
663dafb1c99Sjsg * @crtc: the crtc
6645ca02815Sjsg *
6655ca02815Sjsg * Mark the end of an update started with intel_pipe_update_start(). This
6665ca02815Sjsg * re-enables interrupts and verifies the update was actually completed
6675ca02815Sjsg * before a vblank.
6685ca02815Sjsg */
intel_pipe_update_end(struct intel_atomic_state * state,struct intel_crtc * crtc)669dafb1c99Sjsg void intel_pipe_update_end(struct intel_atomic_state *state,
670dafb1c99Sjsg struct intel_crtc *crtc)
6715ca02815Sjsg {
672dafb1c99Sjsg struct intel_crtc_state *new_crtc_state =
673dafb1c99Sjsg intel_atomic_get_new_crtc_state(state, crtc);
6745ca02815Sjsg enum pipe pipe = crtc->pipe;
6755ca02815Sjsg int scanline_end = intel_get_crtc_scanline(crtc);
6765ca02815Sjsg u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
6775ca02815Sjsg ktime_t end_vbl_time = ktime_get();
6785ca02815Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6795ca02815Sjsg
6801bb76ff1Sjsg intel_psr_unlock(new_crtc_state);
6811bb76ff1Sjsg
6821bb76ff1Sjsg if (new_crtc_state->do_async_flip)
6835ca02815Sjsg return;
6845ca02815Sjsg
6855ca02815Sjsg trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end);
6865ca02815Sjsg
6875ca02815Sjsg /*
6885ca02815Sjsg * Incase of mipi dsi command mode, we need to set frame update
6895ca02815Sjsg * request for every commit.
6905ca02815Sjsg */
6915ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 11 &&
6925ca02815Sjsg intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
6935ca02815Sjsg icl_dsi_frame_update(new_crtc_state);
6945ca02815Sjsg
6955ca02815Sjsg /* We're still in the vblank-evade critical section, this can't race.
6965ca02815Sjsg * Would be slightly nice to just grab the vblank count and arm the
6975ca02815Sjsg * event outside of the critical section - the spinlock might spin for a
6985ca02815Sjsg * while ... */
6991bb76ff1Sjsg if (intel_crtc_needs_vblank_work(new_crtc_state)) {
7001bb76ff1Sjsg drm_vblank_work_schedule(&new_crtc_state->vblank_work,
7011bb76ff1Sjsg drm_crtc_accurate_vblank_count(&crtc->base) + 1,
7021bb76ff1Sjsg false);
7031bb76ff1Sjsg } else if (new_crtc_state->uapi.event) {
7045ca02815Sjsg drm_WARN_ON(&dev_priv->drm,
7055ca02815Sjsg drm_crtc_vblank_get(&crtc->base) != 0);
7065ca02815Sjsg
7075ca02815Sjsg spin_lock(&crtc->base.dev->event_lock);
7085ca02815Sjsg drm_crtc_arm_vblank_event(&crtc->base,
7095ca02815Sjsg new_crtc_state->uapi.event);
7105ca02815Sjsg spin_unlock(&crtc->base.dev->event_lock);
7115ca02815Sjsg
7125ca02815Sjsg new_crtc_state->uapi.event = NULL;
7135ca02815Sjsg }
7145ca02815Sjsg
7151bb76ff1Sjsg /*
7161bb76ff1Sjsg * Send VRR Push to terminate Vblank. If we are already in vblank
7171bb76ff1Sjsg * this has to be done _after_ sampling the frame counter, as
7181bb76ff1Sjsg * otherwise the push would immediately terminate the vblank and
7191bb76ff1Sjsg * the sampled frame counter would correspond to the next frame
7201bb76ff1Sjsg * instead of the current frame.
7211bb76ff1Sjsg *
7221bb76ff1Sjsg * There is a tiny race here (iff vblank evasion failed us) where
7231bb76ff1Sjsg * we might sample the frame counter just before vmax vblank start
7241bb76ff1Sjsg * but the push would be sent just after it. That would cause the
7251bb76ff1Sjsg * push to affect the next frame instead of the current frame,
7261bb76ff1Sjsg * which would cause the next frame to terminate already at vmin
7271bb76ff1Sjsg * vblank start instead of vmax vblank start.
7281bb76ff1Sjsg */
7295ca02815Sjsg intel_vrr_send_push(new_crtc_state);
7305ca02815Sjsg
7311bb76ff1Sjsg local_irq_enable();
7321bb76ff1Sjsg
7335ca02815Sjsg if (intel_vgpu_active(dev_priv))
7345ca02815Sjsg return;
7355ca02815Sjsg
7365ca02815Sjsg if (crtc->debug.start_vbl_count &&
7375ca02815Sjsg crtc->debug.start_vbl_count != end_vbl_count) {
7385ca02815Sjsg drm_err(&dev_priv->drm,
7395ca02815Sjsg "Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
7405ca02815Sjsg pipe_name(pipe), crtc->debug.start_vbl_count,
7415ca02815Sjsg end_vbl_count,
7425ca02815Sjsg ktime_us_delta(end_vbl_time,
7435ca02815Sjsg crtc->debug.start_vbl_time),
7445ca02815Sjsg crtc->debug.min_vbl, crtc->debug.max_vbl,
7455ca02815Sjsg crtc->debug.scanline_start, scanline_end);
7465ca02815Sjsg }
7475ca02815Sjsg
7485ca02815Sjsg dbg_vblank_evade(crtc, end_vbl_time);
7495ca02815Sjsg }
750