1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
5 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
6 *
7 * Author: Rob Clark <robdclark@gmail.com>
8 */
9
10 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
11 #include <linux/debugfs.h>
12 #include <linux/kthread.h>
13 #include <linux/seq_file.h>
14
15 #include <drm/drm_atomic.h>
16 #include <drm/drm_crtc.h>
17 #include <drm/drm_file.h>
18 #include <drm/drm_probe_helper.h>
19 #include <drm/drm_framebuffer.h>
20
21 #include "msm_drv.h"
22 #include "dpu_kms.h"
23 #include "dpu_hwio.h"
24 #include "dpu_hw_catalog.h"
25 #include "dpu_hw_intf.h"
26 #include "dpu_hw_ctl.h"
27 #include "dpu_hw_dspp.h"
28 #include "dpu_hw_dsc.h"
29 #include "dpu_hw_merge3d.h"
30 #include "dpu_hw_cdm.h"
31 #include "dpu_formats.h"
32 #include "dpu_encoder_phys.h"
33 #include "dpu_crtc.h"
34 #include "dpu_trace.h"
35 #include "dpu_core_irq.h"
36 #include "disp/msm_disp_snapshot.h"
37
38 #define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\
39 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
40
41 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
42 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
43
44 #define DPU_ERROR_ENC_RATELIMITED(e, fmt, ...) DPU_ERROR_RATELIMITED("enc%d " fmt,\
45 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
46
47 /*
48 * Two to anticipate panels that can do cmd/vid dynamic switching
49 * plan is to create all possible physical encoder types, and switch between
50 * them at runtime
51 */
52 #define NUM_PHYS_ENCODER_TYPES 2
53
54 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \
55 (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
56
57 #define MAX_CHANNELS_PER_ENC 2
58
59 #define IDLE_SHORT_TIMEOUT 1
60
61 #define MAX_HDISPLAY_SPLIT 1080
62
63 /* timeout in frames waiting for frame done */
64 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
65
66 /**
67 * enum dpu_enc_rc_events - events for resource control state machine
68 * @DPU_ENC_RC_EVENT_KICKOFF:
69 * This event happens at NORMAL priority.
70 * Event that signals the start of the transfer. When this event is
71 * received, enable MDP/DSI core clocks. Regardless of the previous
72 * state, the resource should be in ON state at the end of this event.
73 * @DPU_ENC_RC_EVENT_FRAME_DONE:
74 * This event happens at INTERRUPT level.
75 * Event signals the end of the data transfer after the PP FRAME_DONE
76 * event. At the end of this event, a delayed work is scheduled to go to
77 * IDLE_PC state after IDLE_TIMEOUT time.
78 * @DPU_ENC_RC_EVENT_PRE_STOP:
79 * This event happens at NORMAL priority.
80 * This event, when received during the ON state, leave the RC STATE
81 * in the PRE_OFF state. It should be followed by the STOP event as
82 * part of encoder disable.
83 * If received during IDLE or OFF states, it will do nothing.
84 * @DPU_ENC_RC_EVENT_STOP:
85 * This event happens at NORMAL priority.
86 * When this event is received, disable all the MDP/DSI core clocks, and
87 * disable IRQs. It should be called from the PRE_OFF or IDLE states.
88 * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
89 * PRE_OFF is expected when PRE_STOP was executed during the ON state.
90 * Resource state should be in OFF at the end of the event.
91 * @DPU_ENC_RC_EVENT_ENTER_IDLE:
92 * This event happens at NORMAL priority from a work item.
93 * Event signals that there were no frame updates for IDLE_TIMEOUT time.
94 * This would disable MDP/DSI core clocks and change the resource state
95 * to IDLE.
96 */
97 enum dpu_enc_rc_events {
98 DPU_ENC_RC_EVENT_KICKOFF = 1,
99 DPU_ENC_RC_EVENT_FRAME_DONE,
100 DPU_ENC_RC_EVENT_PRE_STOP,
101 DPU_ENC_RC_EVENT_STOP,
102 DPU_ENC_RC_EVENT_ENTER_IDLE
103 };
104
105 /*
106 * enum dpu_enc_rc_states - states that the resource control maintains
107 * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
108 * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
109 * @DPU_ENC_RC_STATE_ON: Resource is in ON state
110 * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
111 * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
112 */
113 enum dpu_enc_rc_states {
114 DPU_ENC_RC_STATE_OFF,
115 DPU_ENC_RC_STATE_PRE_OFF,
116 DPU_ENC_RC_STATE_ON,
117 DPU_ENC_RC_STATE_IDLE
118 };
119
120 /**
121 * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
122 * encoders. Virtual encoder manages one "logical" display. Physical
123 * encoders manage one intf block, tied to a specific panel/sub-panel.
124 * Virtual encoder defers as much as possible to the physical encoders.
125 * Virtual encoder registers itself with the DRM Framework as the encoder.
126 * @base: drm_encoder base class for registration with DRM
127 * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
128 * @enabled: True if the encoder is active, protected by enc_lock
129 * @commit_done_timedout: True if there has been a timeout on commit after
130 * enabling the encoder.
131 * @num_phys_encs: Actual number of physical encoders contained.
132 * @phys_encs: Container of physical encoders managed.
133 * @cur_master: Pointer to the current master in this mode. Optimization
134 * Only valid after enable. Cleared as disable.
135 * @cur_slave: As above but for the slave encoder.
136 * @hw_pp: Handle to the pingpong blocks used for the display. No.
137 * pingpong blocks can be different than num_phys_encs.
138 * @hw_dsc: Handle to the DSC blocks used for the display.
139 * @dsc_mask: Bitmask of used DSC blocks.
140 * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped
141 * for partial update right-only cases, such as pingpong
142 * split where virtual pingpong does not generate IRQs
143 * @crtc: Pointer to the currently assigned crtc. Normally you
144 * would use crtc->state->encoder_mask to determine the
145 * link between encoder/crtc. However in this case we need
146 * to track crtc in the disable() hook which is called
147 * _after_ encoder_mask is cleared.
148 * @connector: If a mode is set, cached pointer to the active connector
149 * @enc_lock: Lock around physical encoder
150 * create/destroy/enable/disable
151 * @frame_busy_mask: Bitmask tracking which phys_enc we are still
152 * busy processing current command.
153 * Bit0 = phys_encs[0] etc.
154 * @crtc_frame_event_cb: callback handler for frame event
155 * @crtc_frame_event_cb_data: callback handler private data
156 * @frame_done_timeout_ms: frame done timeout in ms
157 * @frame_done_timeout_cnt: atomic counter tracking the number of frame
158 * done timeouts
159 * @frame_done_timer: watchdog timer for frame done event
160 * @disp_info: local copy of msm_display_info struct
161 * @idle_pc_supported: indicate if idle power collaps is supported
162 * @rc_lock: resource control mutex lock to protect
163 * virt encoder over various state changes
164 * @rc_state: resource controller state
165 * @delayed_off_work: delayed worker to schedule disabling of
166 * clks and resources after IDLE_TIMEOUT time.
167 * @topology: topology of the display
168 * @idle_timeout: idle timeout duration in milliseconds
169 * @wide_bus_en: wide bus is enabled on this interface
170 * @dsc: drm_dsc_config pointer, for DSC-enabled encoders
171 */
172 struct dpu_encoder_virt {
173 struct drm_encoder base;
174 spinlock_t enc_spinlock;
175
176 bool enabled;
177 bool commit_done_timedout;
178
179 unsigned int num_phys_encs;
180 struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
181 struct dpu_encoder_phys *cur_master;
182 struct dpu_encoder_phys *cur_slave;
183 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
184 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
185
186 unsigned int dsc_mask;
187
188 bool intfs_swapped;
189
190 struct drm_crtc *crtc;
191 struct drm_connector *connector;
192
193 struct mutex enc_lock;
194 DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
195 void (*crtc_frame_event_cb)(void *, u32 event);
196 void *crtc_frame_event_cb_data;
197
198 atomic_t frame_done_timeout_ms;
199 atomic_t frame_done_timeout_cnt;
200 struct timer_list frame_done_timer;
201
202 struct msm_display_info disp_info;
203
204 bool idle_pc_supported;
205 struct mutex rc_lock;
206 enum dpu_enc_rc_states rc_state;
207 struct delayed_work delayed_off_work;
208 struct msm_display_topology topology;
209
210 u32 idle_timeout;
211
212 bool wide_bus_en;
213
214 /* DSC configuration */
215 struct drm_dsc_config *dsc;
216 };
217
218 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
219
220 static u32 dither_matrix[DITHER_MATRIX_SZ] = {
221 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
222 };
223
dpu_encoder_get_drm_fmt(struct dpu_encoder_phys * phys_enc)224 u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc)
225 {
226 struct drm_encoder *drm_enc;
227 struct dpu_encoder_virt *dpu_enc;
228 struct drm_display_info *info;
229 struct drm_display_mode *mode;
230
231 drm_enc = phys_enc->parent;
232 dpu_enc = to_dpu_encoder_virt(drm_enc);
233 info = &dpu_enc->connector->display_info;
234 mode = &phys_enc->cached_mode;
235
236 if (drm_mode_is_420_only(info, mode))
237 return DRM_FORMAT_YUV420;
238
239 return DRM_FORMAT_RGB888;
240 }
241
dpu_encoder_needs_periph_flush(struct dpu_encoder_phys * phys_enc)242 bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc)
243 {
244 struct drm_encoder *drm_enc;
245 struct dpu_encoder_virt *dpu_enc;
246 struct msm_display_info *disp_info;
247 struct msm_drm_private *priv;
248 struct drm_display_mode *mode;
249
250 drm_enc = phys_enc->parent;
251 dpu_enc = to_dpu_encoder_virt(drm_enc);
252 disp_info = &dpu_enc->disp_info;
253 priv = drm_enc->dev->dev_private;
254 mode = &phys_enc->cached_mode;
255
256 return phys_enc->hw_intf->cap->type == INTF_DP &&
257 msm_dp_needs_periph_flush(priv->dp[disp_info->h_tile_instance[0]], mode);
258 }
259
dpu_encoder_is_widebus_enabled(const struct drm_encoder * drm_enc)260 bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
261 {
262 const struct dpu_encoder_virt *dpu_enc;
263 struct msm_drm_private *priv = drm_enc->dev->dev_private;
264 const struct msm_display_info *disp_info;
265 int index;
266
267 dpu_enc = to_dpu_encoder_virt(drm_enc);
268 disp_info = &dpu_enc->disp_info;
269 index = disp_info->h_tile_instance[0];
270
271 if (disp_info->intf_type == INTF_DP)
272 return msm_dp_wide_bus_available(priv->dp[index]);
273 else if (disp_info->intf_type == INTF_DSI)
274 return msm_dsi_wide_bus_enabled(priv->dsi[index]);
275
276 return false;
277 }
278
dpu_encoder_is_dsc_enabled(const struct drm_encoder * drm_enc)279 bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc)
280 {
281 const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
282
283 return dpu_enc->dsc ? true : false;
284 }
285
dpu_encoder_get_crc_values_cnt(const struct drm_encoder * drm_enc)286 int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
287 {
288 struct dpu_encoder_virt *dpu_enc;
289 int i, num_intf = 0;
290
291 dpu_enc = to_dpu_encoder_virt(drm_enc);
292
293 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
294 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
295
296 if (phys->hw_intf && phys->hw_intf->ops.setup_misr
297 && phys->hw_intf->ops.collect_misr)
298 num_intf++;
299 }
300
301 return num_intf;
302 }
303
dpu_encoder_setup_misr(const struct drm_encoder * drm_enc)304 void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc)
305 {
306 struct dpu_encoder_virt *dpu_enc;
307
308 int i;
309
310 dpu_enc = to_dpu_encoder_virt(drm_enc);
311
312 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
313 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
314
315 if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr)
316 continue;
317
318 phys->hw_intf->ops.setup_misr(phys->hw_intf);
319 }
320 }
321
dpu_encoder_get_crc(const struct drm_encoder * drm_enc,u32 * crcs,int pos)322 int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos)
323 {
324 struct dpu_encoder_virt *dpu_enc;
325
326 int i, rc = 0, entries_added = 0;
327
328 if (!drm_enc->crtc) {
329 DRM_ERROR("no crtc found for encoder %d\n", drm_enc->index);
330 return -EINVAL;
331 }
332
333 dpu_enc = to_dpu_encoder_virt(drm_enc);
334
335 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
336 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
337
338 if (!phys->hw_intf || !phys->hw_intf->ops.collect_misr)
339 continue;
340
341 rc = phys->hw_intf->ops.collect_misr(phys->hw_intf, &crcs[pos + entries_added]);
342 if (rc)
343 return rc;
344 entries_added++;
345 }
346
347 return entries_added;
348 }
349
_dpu_encoder_setup_dither(struct dpu_hw_pingpong * hw_pp,unsigned bpc)350 static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
351 {
352 struct dpu_hw_dither_cfg dither_cfg = { 0 };
353
354 if (!hw_pp->ops.setup_dither)
355 return;
356
357 switch (bpc) {
358 case 6:
359 dither_cfg.c0_bitdepth = 6;
360 dither_cfg.c1_bitdepth = 6;
361 dither_cfg.c2_bitdepth = 6;
362 dither_cfg.c3_bitdepth = 6;
363 dither_cfg.temporal_en = 0;
364 break;
365 default:
366 hw_pp->ops.setup_dither(hw_pp, NULL);
367 return;
368 }
369
370 memcpy(&dither_cfg.matrix, dither_matrix,
371 sizeof(u32) * DITHER_MATRIX_SZ);
372
373 hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
374 }
375
dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)376 static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)
377 {
378 switch (intf_mode) {
379 case INTF_MODE_VIDEO:
380 return "INTF_MODE_VIDEO";
381 case INTF_MODE_CMD:
382 return "INTF_MODE_CMD";
383 case INTF_MODE_WB_BLOCK:
384 return "INTF_MODE_WB_BLOCK";
385 case INTF_MODE_WB_LINE:
386 return "INTF_MODE_WB_LINE";
387 default:
388 return "INTF_MODE_UNKNOWN";
389 }
390 }
391
dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys * phys_enc,enum dpu_intr_idx intr_idx)392 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
393 enum dpu_intr_idx intr_idx)
394 {
395 DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n",
396 DRMID(phys_enc->parent),
397 dpu_encoder_helper_get_intf_type(phys_enc->intf_mode),
398 phys_enc->hw_intf ? phys_enc->hw_intf->idx - INTF_0 : -1,
399 phys_enc->hw_wb ? phys_enc->hw_wb->idx - WB_0 : -1,
400 phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
401
402 dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc,
403 DPU_ENCODER_FRAME_EVENT_ERROR);
404 }
405
406 static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
407 u32 irq_idx, struct dpu_encoder_wait_info *info);
408
dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys * phys_enc,unsigned int irq_idx,void (* func)(void * arg),struct dpu_encoder_wait_info * wait_info)409 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
410 unsigned int irq_idx,
411 void (*func)(void *arg),
412 struct dpu_encoder_wait_info *wait_info)
413 {
414 u32 irq_status;
415 int ret;
416
417 if (!wait_info) {
418 DPU_ERROR("invalid params\n");
419 return -EINVAL;
420 }
421 /* note: do master / slave checking outside */
422
423 /* return EWOULDBLOCK since we know the wait isn't necessary */
424 if (phys_enc->enable_state == DPU_ENC_DISABLED) {
425 DRM_ERROR("encoder is disabled id=%u, callback=%ps, IRQ=[%d, %d]\n",
426 DRMID(phys_enc->parent), func,
427 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
428 return -EWOULDBLOCK;
429 }
430
431 if (irq_idx < 0) {
432 DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n",
433 DRMID(phys_enc->parent), func);
434 return 0;
435 }
436
437 DRM_DEBUG_KMS("id=%u, callback=%ps, IRQ=[%d, %d], pp=%d, pending_cnt=%d\n",
438 DRMID(phys_enc->parent), func,
439 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), phys_enc->hw_pp->idx - PINGPONG_0,
440 atomic_read(wait_info->atomic_cnt));
441
442 ret = dpu_encoder_helper_wait_event_timeout(
443 DRMID(phys_enc->parent),
444 irq_idx,
445 wait_info);
446
447 if (ret <= 0) {
448 irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq_idx);
449 if (irq_status) {
450 unsigned long flags;
451
452 DRM_DEBUG_KMS("IRQ=[%d, %d] not triggered id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n",
453 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
454 DRMID(phys_enc->parent), func,
455 phys_enc->hw_pp->idx - PINGPONG_0,
456 atomic_read(wait_info->atomic_cnt));
457 local_irq_save(flags);
458 func(phys_enc);
459 local_irq_restore(flags);
460 ret = 0;
461 } else {
462 ret = -ETIMEDOUT;
463 DRM_DEBUG_KMS("IRQ=[%d, %d] timeout id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n",
464 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
465 DRMID(phys_enc->parent), func,
466 phys_enc->hw_pp->idx - PINGPONG_0,
467 atomic_read(wait_info->atomic_cnt));
468 }
469 } else {
470 ret = 0;
471 trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
472 func, DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
473 phys_enc->hw_pp->idx - PINGPONG_0,
474 atomic_read(wait_info->atomic_cnt));
475 }
476
477 return ret;
478 }
479
dpu_encoder_get_vsync_count(struct drm_encoder * drm_enc)480 int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
481 {
482 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
483 struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL;
484 return phys ? atomic_read(&phys->vsync_cnt) : 0;
485 }
486
dpu_encoder_get_linecount(struct drm_encoder * drm_enc)487 int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
488 {
489 struct dpu_encoder_virt *dpu_enc;
490 struct dpu_encoder_phys *phys;
491 int linecount = 0;
492
493 dpu_enc = to_dpu_encoder_virt(drm_enc);
494 phys = dpu_enc ? dpu_enc->cur_master : NULL;
495
496 if (phys && phys->ops.get_line_count)
497 linecount = phys->ops.get_line_count(phys);
498
499 return linecount;
500 }
501
dpu_encoder_helper_split_config(struct dpu_encoder_phys * phys_enc,enum dpu_intf interface)502 void dpu_encoder_helper_split_config(
503 struct dpu_encoder_phys *phys_enc,
504 enum dpu_intf interface)
505 {
506 struct dpu_encoder_virt *dpu_enc;
507 struct split_pipe_cfg cfg = { 0 };
508 struct dpu_hw_mdp *hw_mdptop;
509 struct msm_display_info *disp_info;
510
511 if (!phys_enc->hw_mdptop || !phys_enc->parent) {
512 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
513 return;
514 }
515
516 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
517 hw_mdptop = phys_enc->hw_mdptop;
518 disp_info = &dpu_enc->disp_info;
519
520 if (disp_info->intf_type != INTF_DSI)
521 return;
522
523 /**
524 * disable split modes since encoder will be operating in as the only
525 * encoder, either for the entire use case in the case of, for example,
526 * single DSI, or for this frame in the case of left/right only partial
527 * update.
528 */
529 if (phys_enc->split_role == ENC_ROLE_SOLO) {
530 if (hw_mdptop->ops.setup_split_pipe)
531 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
532 return;
533 }
534
535 cfg.en = true;
536 cfg.mode = phys_enc->intf_mode;
537 cfg.intf = interface;
538
539 if (cfg.en && phys_enc->ops.needs_single_flush &&
540 phys_enc->ops.needs_single_flush(phys_enc))
541 cfg.split_flush_en = true;
542
543 if (phys_enc->split_role == ENC_ROLE_MASTER) {
544 DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
545
546 if (hw_mdptop->ops.setup_split_pipe)
547 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
548 }
549 }
550
dpu_encoder_use_dsc_merge(struct drm_encoder * drm_enc)551 bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
552 {
553 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
554 int i, intf_count = 0, num_dsc = 0;
555
556 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
557 if (dpu_enc->phys_encs[i])
558 intf_count++;
559
560 /* See dpu_encoder_get_topology, we only support 2:2:1 topology */
561 if (dpu_enc->dsc)
562 num_dsc = 2;
563
564 return (num_dsc > 0) && (num_dsc > intf_count);
565 }
566
dpu_encoder_get_dsc_config(struct drm_encoder * drm_enc)567 static struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
568 {
569 struct msm_drm_private *priv = drm_enc->dev->dev_private;
570 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
571 int index = dpu_enc->disp_info.h_tile_instance[0];
572
573 if (dpu_enc->disp_info.intf_type == INTF_DSI)
574 return msm_dsi_get_dsc_config(priv->dsi[index]);
575
576 return NULL;
577 }
578
dpu_encoder_get_topology(struct dpu_encoder_virt * dpu_enc,struct dpu_kms * dpu_kms,struct drm_display_mode * mode,struct drm_crtc_state * crtc_state,struct drm_dsc_config * dsc)579 static struct msm_display_topology dpu_encoder_get_topology(
580 struct dpu_encoder_virt *dpu_enc,
581 struct dpu_kms *dpu_kms,
582 struct drm_display_mode *mode,
583 struct drm_crtc_state *crtc_state,
584 struct drm_dsc_config *dsc)
585 {
586 struct msm_display_topology topology = {0};
587 int i, intf_count = 0;
588
589 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
590 if (dpu_enc->phys_encs[i])
591 intf_count++;
592
593 /* Datapath topology selection
594 *
595 * Dual display
596 * 2 LM, 2 INTF ( Split display using 2 interfaces)
597 *
598 * Single display
599 * 1 LM, 1 INTF
600 * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
601 *
602 * Add dspps to the reservation requirements if ctm is requested
603 */
604 if (intf_count == 2)
605 topology.num_lm = 2;
606 else if (!dpu_kms->catalog->caps->has_3d_merge)
607 topology.num_lm = 1;
608 else
609 topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
610
611 if (crtc_state->ctm)
612 topology.num_dspp = topology.num_lm;
613
614 topology.num_intf = intf_count;
615
616 if (dsc) {
617 /*
618 * In case of Display Stream Compression (DSC), we would use
619 * 2 DSC encoders, 2 layer mixers and 1 interface
620 * this is power optimal and can drive up to (including) 4k
621 * screens
622 */
623 topology.num_dsc = 2;
624 topology.num_lm = 2;
625 topology.num_intf = 1;
626 }
627
628 return topology;
629 }
630
dpu_encoder_virt_atomic_check(struct drm_encoder * drm_enc,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)631 static int dpu_encoder_virt_atomic_check(
632 struct drm_encoder *drm_enc,
633 struct drm_crtc_state *crtc_state,
634 struct drm_connector_state *conn_state)
635 {
636 struct dpu_encoder_virt *dpu_enc;
637 struct msm_drm_private *priv;
638 struct dpu_kms *dpu_kms;
639 struct drm_display_mode *adj_mode;
640 struct msm_display_topology topology;
641 struct msm_display_info *disp_info;
642 struct dpu_global_state *global_state;
643 struct drm_framebuffer *fb;
644 struct drm_dsc_config *dsc;
645 int ret = 0;
646
647 if (!drm_enc || !crtc_state || !conn_state) {
648 DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
649 drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
650 return -EINVAL;
651 }
652
653 dpu_enc = to_dpu_encoder_virt(drm_enc);
654 DPU_DEBUG_ENC(dpu_enc, "\n");
655
656 priv = drm_enc->dev->dev_private;
657 disp_info = &dpu_enc->disp_info;
658 dpu_kms = to_dpu_kms(priv->kms);
659 adj_mode = &crtc_state->adjusted_mode;
660 global_state = dpu_kms_get_global_state(crtc_state->state);
661 if (IS_ERR(global_state))
662 return PTR_ERR(global_state);
663
664 trace_dpu_enc_atomic_check(DRMID(drm_enc));
665
666 dsc = dpu_encoder_get_dsc_config(drm_enc);
667
668 topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc);
669
670 /*
671 * Use CDM only for writeback or DP at the moment as other interfaces cannot handle it.
672 * If writeback itself cannot handle cdm for some reason it will fail in its atomic_check()
673 * earlier.
674 */
675 if (disp_info->intf_type == INTF_WB && conn_state->writeback_job) {
676 fb = conn_state->writeback_job->fb;
677
678 if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb)))
679 topology.needs_cdm = true;
680 } else if (disp_info->intf_type == INTF_DP) {
681 if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode))
682 topology.needs_cdm = true;
683 }
684
685 if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm)
686 crtc_state->mode_changed = true;
687 else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm)
688 crtc_state->mode_changed = true;
689 /*
690 * Release and Allocate resources on every modeset
691 * Dont allocate when active is false.
692 */
693 if (drm_atomic_crtc_needs_modeset(crtc_state)) {
694 dpu_rm_release(global_state, drm_enc);
695
696 if (!crtc_state->active_changed || crtc_state->enable)
697 ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
698 drm_enc, crtc_state, topology);
699 }
700
701 trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
702
703 return ret;
704 }
705
_dpu_encoder_update_vsync_source(struct dpu_encoder_virt * dpu_enc,struct msm_display_info * disp_info)706 static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
707 struct msm_display_info *disp_info)
708 {
709 struct dpu_vsync_source_cfg vsync_cfg = { 0 };
710 struct msm_drm_private *priv;
711 struct dpu_kms *dpu_kms;
712 struct dpu_hw_mdp *hw_mdptop;
713 struct drm_encoder *drm_enc;
714 struct dpu_encoder_phys *phys_enc;
715 int i;
716
717 if (!dpu_enc || !disp_info) {
718 DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
719 dpu_enc != NULL, disp_info != NULL);
720 return;
721 } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
722 DPU_ERROR("invalid num phys enc %d/%d\n",
723 dpu_enc->num_phys_encs,
724 (int) ARRAY_SIZE(dpu_enc->hw_pp));
725 return;
726 }
727
728 drm_enc = &dpu_enc->base;
729 /* this pointers are checked in virt_enable_helper */
730 priv = drm_enc->dev->dev_private;
731
732 dpu_kms = to_dpu_kms(priv->kms);
733 hw_mdptop = dpu_kms->hw_mdp;
734 if (!hw_mdptop) {
735 DPU_ERROR("invalid mdptop\n");
736 return;
737 }
738
739 if (hw_mdptop->ops.setup_vsync_source &&
740 disp_info->is_cmd_mode) {
741 for (i = 0; i < dpu_enc->num_phys_encs; i++)
742 vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
743
744 vsync_cfg.pp_count = dpu_enc->num_phys_encs;
745 vsync_cfg.frame_rate = drm_mode_vrefresh(&dpu_enc->base.crtc->state->adjusted_mode);
746
747 if (disp_info->is_te_using_watchdog_timer)
748 vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
749 else
750 vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
751
752 hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
753
754 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
755 phys_enc = dpu_enc->phys_encs[i];
756
757 if (phys_enc->has_intf_te && phys_enc->hw_intf->ops.vsync_sel)
758 phys_enc->hw_intf->ops.vsync_sel(phys_enc->hw_intf,
759 vsync_cfg.vsync_source);
760 }
761 }
762 }
763
_dpu_encoder_irq_enable(struct drm_encoder * drm_enc)764 static void _dpu_encoder_irq_enable(struct drm_encoder *drm_enc)
765 {
766 struct dpu_encoder_virt *dpu_enc;
767 int i;
768
769 if (!drm_enc) {
770 DPU_ERROR("invalid encoder\n");
771 return;
772 }
773
774 dpu_enc = to_dpu_encoder_virt(drm_enc);
775
776 DPU_DEBUG_ENC(dpu_enc, "\n");
777 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
778 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
779
780 phys->ops.irq_enable(phys);
781 }
782 }
783
_dpu_encoder_irq_disable(struct drm_encoder * drm_enc)784 static void _dpu_encoder_irq_disable(struct drm_encoder *drm_enc)
785 {
786 struct dpu_encoder_virt *dpu_enc;
787 int i;
788
789 if (!drm_enc) {
790 DPU_ERROR("invalid encoder\n");
791 return;
792 }
793
794 dpu_enc = to_dpu_encoder_virt(drm_enc);
795
796 DPU_DEBUG_ENC(dpu_enc, "\n");
797 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
798 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
799
800 phys->ops.irq_disable(phys);
801 }
802 }
803
_dpu_encoder_resource_enable(struct drm_encoder * drm_enc)804 static void _dpu_encoder_resource_enable(struct drm_encoder *drm_enc)
805 {
806 struct msm_drm_private *priv;
807 struct dpu_kms *dpu_kms;
808 struct dpu_encoder_virt *dpu_enc;
809
810 dpu_enc = to_dpu_encoder_virt(drm_enc);
811 priv = drm_enc->dev->dev_private;
812 dpu_kms = to_dpu_kms(priv->kms);
813
814 trace_dpu_enc_rc_enable(DRMID(drm_enc));
815
816 if (!dpu_enc->cur_master) {
817 DPU_ERROR("encoder master not set\n");
818 return;
819 }
820
821 /* enable DPU core clks */
822 pm_runtime_get_sync(&dpu_kms->pdev->dev);
823
824 /* enable all the irq */
825 _dpu_encoder_irq_enable(drm_enc);
826 }
827
_dpu_encoder_resource_disable(struct drm_encoder * drm_enc)828 static void _dpu_encoder_resource_disable(struct drm_encoder *drm_enc)
829 {
830 struct msm_drm_private *priv;
831 struct dpu_kms *dpu_kms;
832 struct dpu_encoder_virt *dpu_enc;
833
834 dpu_enc = to_dpu_encoder_virt(drm_enc);
835 priv = drm_enc->dev->dev_private;
836 dpu_kms = to_dpu_kms(priv->kms);
837
838 trace_dpu_enc_rc_disable(DRMID(drm_enc));
839
840 if (!dpu_enc->cur_master) {
841 DPU_ERROR("encoder master not set\n");
842 return;
843 }
844
845 /* disable all the irq */
846 _dpu_encoder_irq_disable(drm_enc);
847
848 /* disable DPU core clks */
849 pm_runtime_put_sync(&dpu_kms->pdev->dev);
850 }
851
dpu_encoder_resource_control(struct drm_encoder * drm_enc,u32 sw_event)852 static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
853 u32 sw_event)
854 {
855 struct dpu_encoder_virt *dpu_enc;
856 struct msm_drm_private *priv;
857 bool is_vid_mode = false;
858
859 if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
860 DPU_ERROR("invalid parameters\n");
861 return -EINVAL;
862 }
863 dpu_enc = to_dpu_encoder_virt(drm_enc);
864 priv = drm_enc->dev->dev_private;
865 is_vid_mode = !dpu_enc->disp_info.is_cmd_mode;
866
867 /*
868 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
869 * events and return early for other events (ie wb display).
870 */
871 if (!dpu_enc->idle_pc_supported &&
872 (sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
873 sw_event != DPU_ENC_RC_EVENT_STOP &&
874 sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
875 return 0;
876
877 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
878 dpu_enc->rc_state, "begin");
879
880 switch (sw_event) {
881 case DPU_ENC_RC_EVENT_KICKOFF:
882 /* cancel delayed off work, if any */
883 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
884 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
885 sw_event);
886
887 mutex_lock(&dpu_enc->rc_lock);
888
889 /* return if the resource control is already in ON state */
890 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
891 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n",
892 DRMID(drm_enc), sw_event);
893 mutex_unlock(&dpu_enc->rc_lock);
894 return 0;
895 } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
896 dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
897 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n",
898 DRMID(drm_enc), sw_event,
899 dpu_enc->rc_state);
900 mutex_unlock(&dpu_enc->rc_lock);
901 return -EINVAL;
902 }
903
904 if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
905 _dpu_encoder_irq_enable(drm_enc);
906 else
907 _dpu_encoder_resource_enable(drm_enc);
908
909 dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
910
911 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
912 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
913 "kickoff");
914
915 mutex_unlock(&dpu_enc->rc_lock);
916 break;
917
918 case DPU_ENC_RC_EVENT_FRAME_DONE:
919 /*
920 * mutex lock is not used as this event happens at interrupt
921 * context. And locking is not required as, the other events
922 * like KICKOFF and STOP does a wait-for-idle before executing
923 * the resource_control
924 */
925 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
926 DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
927 DRMID(drm_enc), sw_event,
928 dpu_enc->rc_state);
929 return -EINVAL;
930 }
931
932 /*
933 * schedule off work item only when there are no
934 * frames pending
935 */
936 if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
937 DRM_DEBUG_KMS("id:%d skip schedule work\n",
938 DRMID(drm_enc));
939 return 0;
940 }
941
942 queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
943 msecs_to_jiffies(dpu_enc->idle_timeout));
944
945 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
946 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
947 "frame done");
948 break;
949
950 case DPU_ENC_RC_EVENT_PRE_STOP:
951 /* cancel delayed off work, if any */
952 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
953 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
954 sw_event);
955
956 mutex_lock(&dpu_enc->rc_lock);
957
958 if (is_vid_mode &&
959 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
960 _dpu_encoder_irq_enable(drm_enc);
961 }
962 /* skip if is already OFF or IDLE, resources are off already */
963 else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
964 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
965 DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
966 DRMID(drm_enc), sw_event,
967 dpu_enc->rc_state);
968 mutex_unlock(&dpu_enc->rc_lock);
969 return 0;
970 }
971
972 dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
973
974 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
975 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
976 "pre stop");
977
978 mutex_unlock(&dpu_enc->rc_lock);
979 break;
980
981 case DPU_ENC_RC_EVENT_STOP:
982 mutex_lock(&dpu_enc->rc_lock);
983
984 /* return if the resource control is already in OFF state */
985 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
986 DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
987 DRMID(drm_enc), sw_event);
988 mutex_unlock(&dpu_enc->rc_lock);
989 return 0;
990 } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
991 DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
992 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
993 mutex_unlock(&dpu_enc->rc_lock);
994 return -EINVAL;
995 }
996
997 /**
998 * expect to arrive here only if in either idle state or pre-off
999 * and in IDLE state the resources are already disabled
1000 */
1001 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
1002 _dpu_encoder_resource_disable(drm_enc);
1003
1004 dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
1005
1006 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
1007 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
1008 "stop");
1009
1010 mutex_unlock(&dpu_enc->rc_lock);
1011 break;
1012
1013 case DPU_ENC_RC_EVENT_ENTER_IDLE:
1014 mutex_lock(&dpu_enc->rc_lock);
1015
1016 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
1017 DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
1018 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
1019 mutex_unlock(&dpu_enc->rc_lock);
1020 return 0;
1021 }
1022
1023 /*
1024 * if we are in ON but a frame was just kicked off,
1025 * ignore the IDLE event, it's probably a stale timer event
1026 */
1027 if (dpu_enc->frame_busy_mask[0]) {
1028 DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
1029 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
1030 mutex_unlock(&dpu_enc->rc_lock);
1031 return 0;
1032 }
1033
1034 if (is_vid_mode)
1035 _dpu_encoder_irq_disable(drm_enc);
1036 else
1037 _dpu_encoder_resource_disable(drm_enc);
1038
1039 dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
1040
1041 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
1042 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
1043 "idle");
1044
1045 mutex_unlock(&dpu_enc->rc_lock);
1046 break;
1047
1048 default:
1049 DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
1050 sw_event);
1051 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
1052 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
1053 "error");
1054 break;
1055 }
1056
1057 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
1058 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
1059 "end");
1060 return 0;
1061 }
1062
dpu_encoder_prepare_wb_job(struct drm_encoder * drm_enc,struct drm_writeback_job * job)1063 void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
1064 struct drm_writeback_job *job)
1065 {
1066 struct dpu_encoder_virt *dpu_enc;
1067 int i;
1068
1069 dpu_enc = to_dpu_encoder_virt(drm_enc);
1070
1071 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1072 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1073
1074 if (phys->ops.prepare_wb_job)
1075 phys->ops.prepare_wb_job(phys, job);
1076
1077 }
1078 }
1079
dpu_encoder_cleanup_wb_job(struct drm_encoder * drm_enc,struct drm_writeback_job * job)1080 void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
1081 struct drm_writeback_job *job)
1082 {
1083 struct dpu_encoder_virt *dpu_enc;
1084 int i;
1085
1086 dpu_enc = to_dpu_encoder_virt(drm_enc);
1087
1088 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1089 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1090
1091 if (phys->ops.cleanup_wb_job)
1092 phys->ops.cleanup_wb_job(phys, job);
1093
1094 }
1095 }
1096
dpu_encoder_virt_atomic_mode_set(struct drm_encoder * drm_enc,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)1097 static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
1098 struct drm_crtc_state *crtc_state,
1099 struct drm_connector_state *conn_state)
1100 {
1101 struct dpu_encoder_virt *dpu_enc;
1102 struct msm_drm_private *priv;
1103 struct dpu_kms *dpu_kms;
1104 struct dpu_crtc_state *cstate;
1105 struct dpu_global_state *global_state;
1106 struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
1107 struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
1108 struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
1109 struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
1110 struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
1111 int num_lm, num_ctl, num_pp, num_dsc;
1112 unsigned int dsc_mask = 0;
1113 int i;
1114
1115 if (!drm_enc) {
1116 DPU_ERROR("invalid encoder\n");
1117 return;
1118 }
1119
1120 dpu_enc = to_dpu_encoder_virt(drm_enc);
1121 DPU_DEBUG_ENC(dpu_enc, "\n");
1122
1123 priv = drm_enc->dev->dev_private;
1124 dpu_kms = to_dpu_kms(priv->kms);
1125
1126 global_state = dpu_kms_get_existing_global_state(dpu_kms);
1127 if (IS_ERR_OR_NULL(global_state)) {
1128 DPU_ERROR("Failed to get global state");
1129 return;
1130 }
1131
1132 trace_dpu_enc_mode_set(DRMID(drm_enc));
1133
1134 /* Query resource that have been reserved in atomic check step. */
1135 num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1136 drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
1137 ARRAY_SIZE(hw_pp));
1138 num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1139 drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
1140 num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1141 drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
1142 dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1143 drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
1144 ARRAY_SIZE(hw_dspp));
1145
1146 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1147 dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
1148 : NULL;
1149
1150 num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1151 drm_enc->base.id, DPU_HW_BLK_DSC,
1152 hw_dsc, ARRAY_SIZE(hw_dsc));
1153 for (i = 0; i < num_dsc; i++) {
1154 dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
1155 dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0);
1156 }
1157
1158 dpu_enc->dsc_mask = dsc_mask;
1159
1160 if ((dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) ||
1161 dpu_enc->disp_info.intf_type == INTF_DP) {
1162 struct dpu_hw_blk *hw_cdm = NULL;
1163
1164 dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1165 drm_enc->base.id, DPU_HW_BLK_CDM,
1166 &hw_cdm, 1);
1167 dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL;
1168 }
1169
1170 cstate = to_dpu_crtc_state(crtc_state);
1171
1172 for (i = 0; i < num_lm; i++) {
1173 int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
1174
1175 cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
1176 cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
1177 cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
1178 }
1179
1180 cstate->num_mixers = num_lm;
1181
1182 dpu_enc->connector = conn_state->connector;
1183
1184 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1185 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1186
1187 if (!dpu_enc->hw_pp[i]) {
1188 DPU_ERROR_ENC(dpu_enc,
1189 "no pp block assigned at idx: %d\n", i);
1190 return;
1191 }
1192
1193 if (!hw_ctl[i]) {
1194 DPU_ERROR_ENC(dpu_enc,
1195 "no ctl block assigned at idx: %d\n", i);
1196 return;
1197 }
1198
1199 phys->hw_pp = dpu_enc->hw_pp[i];
1200 phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
1201
1202 phys->cached_mode = crtc_state->adjusted_mode;
1203 }
1204 }
1205
_dpu_encoder_virt_enable_helper(struct drm_encoder * drm_enc)1206 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
1207 {
1208 struct dpu_encoder_virt *dpu_enc = NULL;
1209 int i;
1210
1211 if (!drm_enc || !drm_enc->dev) {
1212 DPU_ERROR("invalid parameters\n");
1213 return;
1214 }
1215
1216 dpu_enc = to_dpu_encoder_virt(drm_enc);
1217 if (!dpu_enc || !dpu_enc->cur_master) {
1218 DPU_ERROR("invalid dpu encoder/master\n");
1219 return;
1220 }
1221
1222
1223 if (dpu_enc->disp_info.intf_type == INTF_DP &&
1224 dpu_enc->cur_master->hw_mdptop &&
1225 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
1226 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
1227 dpu_enc->cur_master->hw_mdptop);
1228
1229 _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
1230
1231 if (dpu_enc->disp_info.intf_type == INTF_DSI &&
1232 !WARN_ON(dpu_enc->num_phys_encs == 0)) {
1233 unsigned bpc = dpu_enc->connector->display_info.bpc;
1234 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1235 if (!dpu_enc->hw_pp[i])
1236 continue;
1237 _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
1238 }
1239 }
1240 }
1241
dpu_encoder_virt_runtime_resume(struct drm_encoder * drm_enc)1242 void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
1243 {
1244 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1245
1246 mutex_lock(&dpu_enc->enc_lock);
1247
1248 if (!dpu_enc->enabled)
1249 goto out;
1250
1251 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
1252 dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
1253 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
1254 dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
1255
1256 _dpu_encoder_virt_enable_helper(drm_enc);
1257
1258 out:
1259 mutex_unlock(&dpu_enc->enc_lock);
1260 }
1261
dpu_encoder_virt_atomic_enable(struct drm_encoder * drm_enc,struct drm_atomic_state * state)1262 static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc,
1263 struct drm_atomic_state *state)
1264 {
1265 struct dpu_encoder_virt *dpu_enc = NULL;
1266 int ret = 0;
1267 struct drm_display_mode *cur_mode = NULL;
1268
1269 dpu_enc = to_dpu_encoder_virt(drm_enc);
1270 dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc);
1271
1272 atomic_set(&dpu_enc->frame_done_timeout_cnt, 0);
1273
1274 mutex_lock(&dpu_enc->enc_lock);
1275
1276 dpu_enc->commit_done_timedout = false;
1277
1278 cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
1279
1280 dpu_enc->wide_bus_en = dpu_encoder_is_widebus_enabled(drm_enc);
1281
1282 trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
1283 cur_mode->vdisplay);
1284
1285 /* always enable slave encoder before master */
1286 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
1287 dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
1288
1289 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
1290 dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
1291
1292 ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1293 if (ret) {
1294 DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
1295 ret);
1296 goto out;
1297 }
1298
1299 _dpu_encoder_virt_enable_helper(drm_enc);
1300
1301 dpu_enc->enabled = true;
1302
1303 out:
1304 mutex_unlock(&dpu_enc->enc_lock);
1305 }
1306
dpu_encoder_virt_atomic_disable(struct drm_encoder * drm_enc,struct drm_atomic_state * state)1307 static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc,
1308 struct drm_atomic_state *state)
1309 {
1310 struct dpu_encoder_virt *dpu_enc = NULL;
1311 struct drm_crtc *crtc;
1312 struct drm_crtc_state *old_state = NULL;
1313 int i = 0;
1314
1315 dpu_enc = to_dpu_encoder_virt(drm_enc);
1316 DPU_DEBUG_ENC(dpu_enc, "\n");
1317
1318 crtc = drm_atomic_get_old_crtc_for_encoder(state, drm_enc);
1319 if (crtc)
1320 old_state = drm_atomic_get_old_crtc_state(state, crtc);
1321
1322 /*
1323 * The encoder is already disabled if self refresh mode was set earlier,
1324 * in the old_state for the corresponding crtc.
1325 */
1326 if (old_state && old_state->self_refresh_active)
1327 return;
1328
1329 mutex_lock(&dpu_enc->enc_lock);
1330 dpu_enc->enabled = false;
1331
1332 trace_dpu_enc_disable(DRMID(drm_enc));
1333
1334 /* wait for idle */
1335 dpu_encoder_wait_for_tx_complete(drm_enc);
1336
1337 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
1338
1339 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1340 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1341
1342 if (phys->ops.disable)
1343 phys->ops.disable(phys);
1344 }
1345
1346
1347 /* after phys waits for frame-done, should be no more frames pending */
1348 if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
1349 DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
1350 del_timer_sync(&dpu_enc->frame_done_timer);
1351 }
1352
1353 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
1354
1355 dpu_enc->connector = NULL;
1356
1357 DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
1358
1359 mutex_unlock(&dpu_enc->enc_lock);
1360 }
1361
dpu_encoder_get_intf(const struct dpu_mdss_cfg * catalog,struct dpu_rm * dpu_rm,enum dpu_intf_type type,u32 controller_id)1362 static struct dpu_hw_intf *dpu_encoder_get_intf(const struct dpu_mdss_cfg *catalog,
1363 struct dpu_rm *dpu_rm,
1364 enum dpu_intf_type type, u32 controller_id)
1365 {
1366 int i = 0;
1367
1368 if (type == INTF_WB)
1369 return NULL;
1370
1371 for (i = 0; i < catalog->intf_count; i++) {
1372 if (catalog->intf[i].type == type
1373 && catalog->intf[i].controller_id == controller_id) {
1374 return dpu_rm_get_intf(dpu_rm, catalog->intf[i].id);
1375 }
1376 }
1377
1378 return NULL;
1379 }
1380
dpu_encoder_vblank_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phy_enc)1381 void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
1382 struct dpu_encoder_phys *phy_enc)
1383 {
1384 struct dpu_encoder_virt *dpu_enc = NULL;
1385 unsigned long lock_flags;
1386
1387 if (!drm_enc || !phy_enc)
1388 return;
1389
1390 DPU_ATRACE_BEGIN("encoder_vblank_callback");
1391 dpu_enc = to_dpu_encoder_virt(drm_enc);
1392
1393 atomic_inc(&phy_enc->vsync_cnt);
1394
1395 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1396 if (dpu_enc->crtc)
1397 dpu_crtc_vblank_callback(dpu_enc->crtc);
1398 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1399
1400 DPU_ATRACE_END("encoder_vblank_callback");
1401 }
1402
dpu_encoder_underrun_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phy_enc)1403 void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
1404 struct dpu_encoder_phys *phy_enc)
1405 {
1406 if (!phy_enc)
1407 return;
1408
1409 DPU_ATRACE_BEGIN("encoder_underrun_callback");
1410 atomic_inc(&phy_enc->underrun_cnt);
1411
1412 /* trigger dump only on the first underrun */
1413 if (atomic_read(&phy_enc->underrun_cnt) == 1)
1414 msm_disp_snapshot_state(drm_enc->dev);
1415
1416 trace_dpu_enc_underrun_cb(DRMID(drm_enc),
1417 atomic_read(&phy_enc->underrun_cnt));
1418 DPU_ATRACE_END("encoder_underrun_callback");
1419 }
1420
dpu_encoder_assign_crtc(struct drm_encoder * drm_enc,struct drm_crtc * crtc)1421 void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
1422 {
1423 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1424 unsigned long lock_flags;
1425
1426 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1427 /* crtc should always be cleared before re-assigning */
1428 WARN_ON(crtc && dpu_enc->crtc);
1429 dpu_enc->crtc = crtc;
1430 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1431 }
1432
dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder * drm_enc,struct drm_crtc * crtc,bool enable)1433 void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
1434 struct drm_crtc *crtc, bool enable)
1435 {
1436 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1437 unsigned long lock_flags;
1438 int i;
1439
1440 trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
1441
1442 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1443 if (dpu_enc->crtc != crtc) {
1444 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1445 return;
1446 }
1447 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1448
1449 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1450 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1451
1452 if (phys->ops.control_vblank_irq)
1453 phys->ops.control_vblank_irq(phys, enable);
1454 }
1455 }
1456
dpu_encoder_register_frame_event_callback(struct drm_encoder * drm_enc,void (* frame_event_cb)(void *,u32 event),void * frame_event_cb_data)1457 void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
1458 void (*frame_event_cb)(void *, u32 event),
1459 void *frame_event_cb_data)
1460 {
1461 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1462 unsigned long lock_flags;
1463 bool enable;
1464
1465 enable = frame_event_cb ? true : false;
1466
1467 if (!drm_enc) {
1468 DPU_ERROR("invalid encoder\n");
1469 return;
1470 }
1471 trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
1472
1473 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1474 dpu_enc->crtc_frame_event_cb = frame_event_cb;
1475 dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
1476 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1477 }
1478
dpu_encoder_frame_done_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * ready_phys,u32 event)1479 void dpu_encoder_frame_done_callback(
1480 struct drm_encoder *drm_enc,
1481 struct dpu_encoder_phys *ready_phys, u32 event)
1482 {
1483 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1484 unsigned int i;
1485
1486 if (event & (DPU_ENCODER_FRAME_EVENT_DONE
1487 | DPU_ENCODER_FRAME_EVENT_ERROR
1488 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
1489
1490 if (!dpu_enc->frame_busy_mask[0]) {
1491 /**
1492 * suppress frame_done without waiter,
1493 * likely autorefresh
1494 */
1495 trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event,
1496 dpu_encoder_helper_get_intf_type(ready_phys->intf_mode),
1497 ready_phys->hw_intf ? ready_phys->hw_intf->idx : -1,
1498 ready_phys->hw_wb ? ready_phys->hw_wb->idx : -1);
1499 return;
1500 }
1501
1502 /* One of the physical encoders has become idle */
1503 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1504 if (dpu_enc->phys_encs[i] == ready_phys) {
1505 trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
1506 dpu_enc->frame_busy_mask[0]);
1507 clear_bit(i, dpu_enc->frame_busy_mask);
1508 }
1509 }
1510
1511 if (!dpu_enc->frame_busy_mask[0]) {
1512 atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
1513 del_timer(&dpu_enc->frame_done_timer);
1514
1515 dpu_encoder_resource_control(drm_enc,
1516 DPU_ENC_RC_EVENT_FRAME_DONE);
1517
1518 if (dpu_enc->crtc_frame_event_cb)
1519 dpu_enc->crtc_frame_event_cb(
1520 dpu_enc->crtc_frame_event_cb_data,
1521 event);
1522 }
1523 } else {
1524 if (dpu_enc->crtc_frame_event_cb)
1525 dpu_enc->crtc_frame_event_cb(
1526 dpu_enc->crtc_frame_event_cb_data, event);
1527 }
1528 }
1529
dpu_encoder_off_work(struct work_struct * work)1530 static void dpu_encoder_off_work(struct work_struct *work)
1531 {
1532 struct dpu_encoder_virt *dpu_enc = container_of(work,
1533 struct dpu_encoder_virt, delayed_off_work.work);
1534
1535 dpu_encoder_resource_control(&dpu_enc->base,
1536 DPU_ENC_RC_EVENT_ENTER_IDLE);
1537
1538 dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
1539 DPU_ENCODER_FRAME_EVENT_IDLE);
1540 }
1541
1542 /**
1543 * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
1544 * @drm_enc: Pointer to drm encoder structure
1545 * @phys: Pointer to physical encoder structure
1546 * @extra_flush_bits: Additional bit mask to include in flush trigger
1547 */
_dpu_encoder_trigger_flush(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phys,uint32_t extra_flush_bits)1548 static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
1549 struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
1550 {
1551 struct dpu_hw_ctl *ctl;
1552 int pending_kickoff_cnt;
1553 u32 ret = UINT_MAX;
1554
1555 if (!phys->hw_pp) {
1556 DPU_ERROR("invalid pingpong hw\n");
1557 return;
1558 }
1559
1560 ctl = phys->hw_ctl;
1561 if (!ctl->ops.trigger_flush) {
1562 DPU_ERROR("missing trigger cb\n");
1563 return;
1564 }
1565
1566 pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
1567
1568 if (extra_flush_bits && ctl->ops.update_pending_flush)
1569 ctl->ops.update_pending_flush(ctl, extra_flush_bits);
1570
1571 ctl->ops.trigger_flush(ctl);
1572
1573 if (ctl->ops.get_pending_flush)
1574 ret = ctl->ops.get_pending_flush(ctl);
1575
1576 trace_dpu_enc_trigger_flush(DRMID(drm_enc),
1577 dpu_encoder_helper_get_intf_type(phys->intf_mode),
1578 phys->hw_intf ? phys->hw_intf->idx : -1,
1579 phys->hw_wb ? phys->hw_wb->idx : -1,
1580 pending_kickoff_cnt, ctl->idx,
1581 extra_flush_bits, ret);
1582 }
1583
1584 /**
1585 * _dpu_encoder_trigger_start - trigger start for a physical encoder
1586 * @phys: Pointer to physical encoder structure
1587 */
_dpu_encoder_trigger_start(struct dpu_encoder_phys * phys)1588 static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
1589 {
1590 if (!phys) {
1591 DPU_ERROR("invalid argument(s)\n");
1592 return;
1593 }
1594
1595 if (!phys->hw_pp) {
1596 DPU_ERROR("invalid pingpong hw\n");
1597 return;
1598 }
1599
1600 if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
1601 phys->ops.trigger_start(phys);
1602 }
1603
dpu_encoder_helper_trigger_start(struct dpu_encoder_phys * phys_enc)1604 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
1605 {
1606 struct dpu_hw_ctl *ctl;
1607
1608 ctl = phys_enc->hw_ctl;
1609 if (ctl->ops.trigger_start) {
1610 ctl->ops.trigger_start(ctl);
1611 trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
1612 }
1613 }
1614
dpu_encoder_helper_wait_event_timeout(int32_t drm_id,unsigned int irq_idx,struct dpu_encoder_wait_info * info)1615 static int dpu_encoder_helper_wait_event_timeout(
1616 int32_t drm_id,
1617 unsigned int irq_idx,
1618 struct dpu_encoder_wait_info *info)
1619 {
1620 int rc = 0;
1621 s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
1622 s64 jiffies = msecs_to_jiffies(info->timeout_ms);
1623 s64 time;
1624
1625 do {
1626 rc = wait_event_timeout(*(info->wq),
1627 atomic_read(info->atomic_cnt) == 0, jiffies);
1628 time = ktime_to_ms(ktime_get());
1629
1630 trace_dpu_enc_wait_event_timeout(drm_id,
1631 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
1632 rc, time,
1633 expected_time,
1634 atomic_read(info->atomic_cnt));
1635 /* If we timed out, counter is valid and time is less, wait again */
1636 } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
1637 (time < expected_time));
1638
1639 return rc;
1640 }
1641
dpu_encoder_helper_hw_reset(struct dpu_encoder_phys * phys_enc)1642 static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
1643 {
1644 struct dpu_encoder_virt *dpu_enc;
1645 struct dpu_hw_ctl *ctl;
1646 int rc;
1647 struct drm_encoder *drm_enc;
1648
1649 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
1650 ctl = phys_enc->hw_ctl;
1651 drm_enc = phys_enc->parent;
1652
1653 if (!ctl->ops.reset)
1654 return;
1655
1656 DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc),
1657 ctl->idx);
1658
1659 rc = ctl->ops.reset(ctl);
1660 if (rc) {
1661 DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
1662 msm_disp_snapshot_state(drm_enc->dev);
1663 }
1664
1665 phys_enc->enable_state = DPU_ENC_ENABLED;
1666 }
1667
1668 /**
1669 * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
1670 * Iterate through the physical encoders and perform consolidated flush
1671 * and/or control start triggering as needed. This is done in the virtual
1672 * encoder rather than the individual physical ones in order to handle
1673 * use cases that require visibility into multiple physical encoders at
1674 * a time.
1675 * @dpu_enc: Pointer to virtual encoder structure
1676 */
_dpu_encoder_kickoff_phys(struct dpu_encoder_virt * dpu_enc)1677 static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
1678 {
1679 struct dpu_hw_ctl *ctl;
1680 uint32_t i, pending_flush;
1681 unsigned long lock_flags;
1682
1683 pending_flush = 0x0;
1684
1685 /* update pending counts and trigger kickoff ctl flush atomically */
1686 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1687
1688 /* don't perform flush/start operations for slave encoders */
1689 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1690 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1691
1692 if (phys->enable_state == DPU_ENC_DISABLED)
1693 continue;
1694
1695 ctl = phys->hw_ctl;
1696
1697 /*
1698 * This is cleared in frame_done worker, which isn't invoked
1699 * for async commits. So don't set this for async, since it'll
1700 * roll over to the next commit.
1701 */
1702 if (phys->split_role != ENC_ROLE_SLAVE)
1703 set_bit(i, dpu_enc->frame_busy_mask);
1704
1705 if (!phys->ops.needs_single_flush ||
1706 !phys->ops.needs_single_flush(phys))
1707 _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
1708 else if (ctl->ops.get_pending_flush)
1709 pending_flush |= ctl->ops.get_pending_flush(ctl);
1710 }
1711
1712 /* for split flush, combine pending flush masks and send to master */
1713 if (pending_flush && dpu_enc->cur_master) {
1714 _dpu_encoder_trigger_flush(
1715 &dpu_enc->base,
1716 dpu_enc->cur_master,
1717 pending_flush);
1718 }
1719
1720 _dpu_encoder_trigger_start(dpu_enc->cur_master);
1721
1722 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1723 }
1724
dpu_encoder_trigger_kickoff_pending(struct drm_encoder * drm_enc)1725 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
1726 {
1727 struct dpu_encoder_virt *dpu_enc;
1728 struct dpu_encoder_phys *phys;
1729 unsigned int i;
1730 struct dpu_hw_ctl *ctl;
1731 struct msm_display_info *disp_info;
1732
1733 if (!drm_enc) {
1734 DPU_ERROR("invalid encoder\n");
1735 return;
1736 }
1737 dpu_enc = to_dpu_encoder_virt(drm_enc);
1738 disp_info = &dpu_enc->disp_info;
1739
1740 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1741 phys = dpu_enc->phys_encs[i];
1742
1743 ctl = phys->hw_ctl;
1744 if (ctl->ops.clear_pending_flush)
1745 ctl->ops.clear_pending_flush(ctl);
1746
1747 /* update only for command mode primary ctl */
1748 if ((phys == dpu_enc->cur_master) &&
1749 disp_info->is_cmd_mode
1750 && ctl->ops.trigger_pending)
1751 ctl->ops.trigger_pending(ctl);
1752 }
1753 }
1754
_dpu_encoder_calculate_linetime(struct dpu_encoder_virt * dpu_enc,struct drm_display_mode * mode)1755 static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
1756 struct drm_display_mode *mode)
1757 {
1758 u64 pclk_rate;
1759 u32 pclk_period;
1760 u32 line_time;
1761
1762 /*
1763 * For linetime calculation, only operate on master encoder.
1764 */
1765 if (!dpu_enc->cur_master)
1766 return 0;
1767
1768 if (!dpu_enc->cur_master->ops.get_line_count) {
1769 DPU_ERROR("get_line_count function not defined\n");
1770 return 0;
1771 }
1772
1773 pclk_rate = mode->clock; /* pixel clock in kHz */
1774 if (pclk_rate == 0) {
1775 DPU_ERROR("pclk is 0, cannot calculate line time\n");
1776 return 0;
1777 }
1778
1779 pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
1780 if (pclk_period == 0) {
1781 DPU_ERROR("pclk period is 0\n");
1782 return 0;
1783 }
1784
1785 /*
1786 * Line time calculation based on Pixel clock and HTOTAL.
1787 * Final unit is in ns.
1788 */
1789 line_time = (pclk_period * mode->htotal) / 1000;
1790 if (line_time == 0) {
1791 DPU_ERROR("line time calculation is 0\n");
1792 return 0;
1793 }
1794
1795 DPU_DEBUG_ENC(dpu_enc,
1796 "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
1797 pclk_rate, pclk_period, line_time);
1798
1799 return line_time;
1800 }
1801
dpu_encoder_vsync_time(struct drm_encoder * drm_enc,ktime_t * wakeup_time)1802 int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
1803 {
1804 struct drm_display_mode *mode;
1805 struct dpu_encoder_virt *dpu_enc;
1806 u32 cur_line;
1807 u32 line_time;
1808 u32 vtotal, time_to_vsync;
1809 ktime_t cur_time;
1810
1811 dpu_enc = to_dpu_encoder_virt(drm_enc);
1812
1813 if (!drm_enc->crtc || !drm_enc->crtc->state) {
1814 DPU_ERROR("crtc/crtc state object is NULL\n");
1815 return -EINVAL;
1816 }
1817 mode = &drm_enc->crtc->state->adjusted_mode;
1818
1819 line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
1820 if (!line_time)
1821 return -EINVAL;
1822
1823 cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
1824
1825 vtotal = mode->vtotal;
1826 if (cur_line >= vtotal)
1827 time_to_vsync = line_time * vtotal;
1828 else
1829 time_to_vsync = line_time * (vtotal - cur_line);
1830
1831 if (time_to_vsync == 0) {
1832 DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
1833 vtotal);
1834 return -EINVAL;
1835 }
1836
1837 cur_time = ktime_get();
1838 *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
1839
1840 DPU_DEBUG_ENC(dpu_enc,
1841 "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
1842 cur_line, vtotal, time_to_vsync,
1843 ktime_to_ms(cur_time),
1844 ktime_to_ms(*wakeup_time));
1845 return 0;
1846 }
1847
1848 static u32
dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config * dsc,u32 enc_ip_width)1849 dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc,
1850 u32 enc_ip_width)
1851 {
1852 int ssm_delay, total_pixels, soft_slice_per_enc;
1853
1854 soft_slice_per_enc = enc_ip_width / dsc->slice_width;
1855
1856 /*
1857 * minimum number of initial line pixels is a sum of:
1858 * 1. sub-stream multiplexer delay (83 groups for 8bpc,
1859 * 91 for 10 bpc) * 3
1860 * 2. for two soft slice cases, add extra sub-stream multiplexer * 3
1861 * 3. the initial xmit delay
1862 * 4. total pipeline delay through the "lock step" of encoder (47)
1863 * 5. 6 additional pixels as the output of the rate buffer is
1864 * 48 bits wide
1865 */
1866 ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92);
1867 total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47;
1868 if (soft_slice_per_enc > 1)
1869 total_pixels += (ssm_delay * 3);
1870 return DIV_ROUND_UP(total_pixels, dsc->slice_width);
1871 }
1872
dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl * ctl,struct dpu_hw_dsc * hw_dsc,struct dpu_hw_pingpong * hw_pp,struct drm_dsc_config * dsc,u32 common_mode,u32 initial_lines)1873 static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl,
1874 struct dpu_hw_dsc *hw_dsc,
1875 struct dpu_hw_pingpong *hw_pp,
1876 struct drm_dsc_config *dsc,
1877 u32 common_mode,
1878 u32 initial_lines)
1879 {
1880 if (hw_dsc->ops.dsc_config)
1881 hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines);
1882
1883 if (hw_dsc->ops.dsc_config_thresh)
1884 hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
1885
1886 if (hw_pp->ops.setup_dsc)
1887 hw_pp->ops.setup_dsc(hw_pp);
1888
1889 if (hw_dsc->ops.dsc_bind_pingpong_blk)
1890 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, hw_pp->idx);
1891
1892 if (hw_pp->ops.enable_dsc)
1893 hw_pp->ops.enable_dsc(hw_pp);
1894
1895 if (ctl->ops.update_pending_flush_dsc)
1896 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx);
1897 }
1898
dpu_encoder_prep_dsc(struct dpu_encoder_virt * dpu_enc,struct drm_dsc_config * dsc)1899 static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
1900 struct drm_dsc_config *dsc)
1901 {
1902 /* coding only for 2LM, 2enc, 1 dsc config */
1903 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
1904 struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
1905 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
1906 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
1907 int this_frame_slices;
1908 int intf_ip_w, enc_ip_w;
1909 int dsc_common_mode;
1910 int pic_width;
1911 u32 initial_lines;
1912 int i;
1913
1914 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1915 hw_pp[i] = dpu_enc->hw_pp[i];
1916 hw_dsc[i] = dpu_enc->hw_dsc[i];
1917
1918 if (!hw_pp[i] || !hw_dsc[i]) {
1919 DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n");
1920 return;
1921 }
1922 }
1923
1924 dsc_common_mode = 0;
1925 pic_width = dsc->pic_width;
1926
1927 dsc_common_mode = DSC_MODE_SPLIT_PANEL;
1928 if (dpu_encoder_use_dsc_merge(enc_master->parent))
1929 dsc_common_mode |= DSC_MODE_MULTIPLEX;
1930 if (enc_master->intf_mode == INTF_MODE_VIDEO)
1931 dsc_common_mode |= DSC_MODE_VIDEO;
1932
1933 this_frame_slices = pic_width / dsc->slice_width;
1934 intf_ip_w = this_frame_slices * dsc->slice_width;
1935
1936 /*
1937 * dsc merge case: when using 2 encoders for the same stream,
1938 * no. of slices need to be same on both the encoders.
1939 */
1940 enc_ip_w = intf_ip_w / 2;
1941 initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
1942
1943 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1944 dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i],
1945 dsc, dsc_common_mode, initial_lines);
1946 }
1947
dpu_encoder_prepare_for_kickoff(struct drm_encoder * drm_enc)1948 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
1949 {
1950 struct dpu_encoder_virt *dpu_enc;
1951 struct dpu_encoder_phys *phys;
1952 bool needs_hw_reset = false;
1953 unsigned int i;
1954
1955 dpu_enc = to_dpu_encoder_virt(drm_enc);
1956
1957 trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
1958
1959 /* prepare for next kickoff, may include waiting on previous kickoff */
1960 DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
1961 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1962 phys = dpu_enc->phys_encs[i];
1963 if (phys->ops.prepare_for_kickoff)
1964 phys->ops.prepare_for_kickoff(phys);
1965 if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
1966 needs_hw_reset = true;
1967 }
1968 DPU_ATRACE_END("enc_prepare_for_kickoff");
1969
1970 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1971
1972 /* if any phys needs reset, reset all phys, in-order */
1973 if (needs_hw_reset) {
1974 trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
1975 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1976 dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
1977 }
1978 }
1979
1980 if (dpu_enc->dsc)
1981 dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc);
1982 }
1983
dpu_encoder_is_valid_for_commit(struct drm_encoder * drm_enc)1984 bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
1985 {
1986 struct dpu_encoder_virt *dpu_enc;
1987 unsigned int i;
1988 struct dpu_encoder_phys *phys;
1989
1990 dpu_enc = to_dpu_encoder_virt(drm_enc);
1991
1992 if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) {
1993 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1994 phys = dpu_enc->phys_encs[i];
1995 if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) {
1996 DPU_DEBUG("invalid FB not kicking off\n");
1997 return false;
1998 }
1999 }
2000 }
2001
2002 return true;
2003 }
2004
dpu_encoder_kickoff(struct drm_encoder * drm_enc)2005 void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
2006 {
2007 struct dpu_encoder_virt *dpu_enc;
2008 struct dpu_encoder_phys *phys;
2009 unsigned long timeout_ms;
2010 unsigned int i;
2011
2012 DPU_ATRACE_BEGIN("encoder_kickoff");
2013 dpu_enc = to_dpu_encoder_virt(drm_enc);
2014
2015 trace_dpu_enc_kickoff(DRMID(drm_enc));
2016
2017 timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
2018 drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
2019
2020 atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
2021 mod_timer(&dpu_enc->frame_done_timer,
2022 jiffies + msecs_to_jiffies(timeout_ms));
2023
2024 /* All phys encs are ready to go, trigger the kickoff */
2025 _dpu_encoder_kickoff_phys(dpu_enc);
2026
2027 /* allow phys encs to handle any post-kickoff business */
2028 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2029 phys = dpu_enc->phys_encs[i];
2030 if (phys->ops.handle_post_kickoff)
2031 phys->ops.handle_post_kickoff(phys);
2032 }
2033
2034 DPU_ATRACE_END("encoder_kickoff");
2035 }
2036
dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys * phys_enc)2037 static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
2038 {
2039 struct dpu_hw_mixer_cfg mixer;
2040 int i, num_lm;
2041 struct dpu_global_state *global_state;
2042 struct dpu_hw_blk *hw_lm[2];
2043 struct dpu_hw_mixer *hw_mixer[2];
2044 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
2045
2046 memset(&mixer, 0, sizeof(mixer));
2047
2048 /* reset all mixers for this encoder */
2049 if (phys_enc->hw_ctl->ops.clear_all_blendstages)
2050 phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
2051
2052 global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms);
2053
2054 num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state,
2055 phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
2056
2057 for (i = 0; i < num_lm; i++) {
2058 hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
2059 if (phys_enc->hw_ctl->ops.update_pending_flush_mixer)
2060 phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
2061
2062 /* clear all blendstages */
2063 if (phys_enc->hw_ctl->ops.setup_blendstage)
2064 phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
2065 }
2066 }
2067
dpu_encoder_dsc_pipe_clr(struct dpu_hw_ctl * ctl,struct dpu_hw_dsc * hw_dsc,struct dpu_hw_pingpong * hw_pp)2068 static void dpu_encoder_dsc_pipe_clr(struct dpu_hw_ctl *ctl,
2069 struct dpu_hw_dsc *hw_dsc,
2070 struct dpu_hw_pingpong *hw_pp)
2071 {
2072 if (hw_dsc->ops.dsc_disable)
2073 hw_dsc->ops.dsc_disable(hw_dsc);
2074
2075 if (hw_pp->ops.disable_dsc)
2076 hw_pp->ops.disable_dsc(hw_pp);
2077
2078 if (hw_dsc->ops.dsc_bind_pingpong_blk)
2079 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, PINGPONG_NONE);
2080
2081 if (ctl->ops.update_pending_flush_dsc)
2082 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx);
2083 }
2084
dpu_encoder_unprep_dsc(struct dpu_encoder_virt * dpu_enc)2085 static void dpu_encoder_unprep_dsc(struct dpu_encoder_virt *dpu_enc)
2086 {
2087 /* coding only for 2LM, 2enc, 1 dsc config */
2088 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
2089 struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
2090 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
2091 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
2092 int i;
2093
2094 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
2095 hw_pp[i] = dpu_enc->hw_pp[i];
2096 hw_dsc[i] = dpu_enc->hw_dsc[i];
2097
2098 if (hw_pp[i] && hw_dsc[i])
2099 dpu_encoder_dsc_pipe_clr(ctl, hw_dsc[i], hw_pp[i]);
2100 }
2101 }
2102
dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys * phys_enc)2103 void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
2104 {
2105 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
2106 struct dpu_hw_intf_cfg intf_cfg = { 0 };
2107 int i;
2108 struct dpu_encoder_virt *dpu_enc;
2109
2110 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
2111
2112 phys_enc->hw_ctl->ops.reset(ctl);
2113
2114 dpu_encoder_helper_reset_mixers(phys_enc);
2115
2116 /*
2117 * TODO: move the once-only operation like CTL flush/trigger
2118 * into dpu_encoder_virt_disable() and all operations which need
2119 * to be done per phys encoder into the phys_disable() op.
2120 */
2121 if (phys_enc->hw_wb) {
2122 /* disable the PP block */
2123 if (phys_enc->hw_wb->ops.bind_pingpong_blk)
2124 phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, PINGPONG_NONE);
2125
2126 /* mark WB flush as pending */
2127 if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
2128 phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
2129 } else {
2130 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2131 if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
2132 phys_enc->hw_intf->ops.bind_pingpong_blk(
2133 dpu_enc->phys_encs[i]->hw_intf,
2134 PINGPONG_NONE);
2135
2136 /* mark INTF flush as pending */
2137 if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
2138 phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl,
2139 dpu_enc->phys_encs[i]->hw_intf->idx);
2140 }
2141 }
2142
2143 /* reset the merge 3D HW block */
2144 if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
2145 phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
2146 BLEND_3D_NONE);
2147 if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
2148 phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl,
2149 phys_enc->hw_pp->merge_3d->idx);
2150 }
2151
2152 if (phys_enc->hw_cdm) {
2153 if (phys_enc->hw_cdm->ops.bind_pingpong_blk && phys_enc->hw_pp)
2154 phys_enc->hw_cdm->ops.bind_pingpong_blk(phys_enc->hw_cdm,
2155 PINGPONG_NONE);
2156 if (phys_enc->hw_ctl->ops.update_pending_flush_cdm)
2157 phys_enc->hw_ctl->ops.update_pending_flush_cdm(phys_enc->hw_ctl,
2158 phys_enc->hw_cdm->idx);
2159 }
2160
2161 if (dpu_enc->dsc) {
2162 dpu_encoder_unprep_dsc(dpu_enc);
2163 dpu_enc->dsc = NULL;
2164 }
2165
2166 intf_cfg.stream_sel = 0; /* Don't care value for video mode */
2167 intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
2168 intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
2169
2170 if (phys_enc->hw_intf)
2171 intf_cfg.intf = phys_enc->hw_intf->idx;
2172 if (phys_enc->hw_wb)
2173 intf_cfg.wb = phys_enc->hw_wb->idx;
2174
2175 if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d)
2176 intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
2177
2178 if (ctl->ops.reset_intf_cfg)
2179 ctl->ops.reset_intf_cfg(ctl, &intf_cfg);
2180
2181 ctl->ops.trigger_flush(ctl);
2182 ctl->ops.trigger_start(ctl);
2183 ctl->ops.clear_pending_flush(ctl);
2184 }
2185
dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys * phys_enc,const struct msm_format * dpu_fmt,u32 output_type)2186 void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc,
2187 const struct msm_format *dpu_fmt,
2188 u32 output_type)
2189 {
2190 struct dpu_hw_cdm *hw_cdm;
2191 struct dpu_hw_cdm_cfg *cdm_cfg;
2192 struct dpu_hw_pingpong *hw_pp;
2193 int ret;
2194
2195 if (!phys_enc)
2196 return;
2197
2198 cdm_cfg = &phys_enc->cdm_cfg;
2199 hw_pp = phys_enc->hw_pp;
2200 hw_cdm = phys_enc->hw_cdm;
2201
2202 if (!hw_cdm)
2203 return;
2204
2205 if (!MSM_FORMAT_IS_YUV(dpu_fmt)) {
2206 DPU_DEBUG("[enc:%d] cdm_disable fmt:%p4cc\n", DRMID(phys_enc->parent),
2207 &dpu_fmt->pixel_format);
2208 if (hw_cdm->ops.bind_pingpong_blk)
2209 hw_cdm->ops.bind_pingpong_blk(hw_cdm, PINGPONG_NONE);
2210
2211 return;
2212 }
2213
2214 memset(cdm_cfg, 0, sizeof(struct dpu_hw_cdm_cfg));
2215
2216 cdm_cfg->output_width = phys_enc->cached_mode.hdisplay;
2217 cdm_cfg->output_height = phys_enc->cached_mode.vdisplay;
2218 cdm_cfg->output_fmt = dpu_fmt;
2219 cdm_cfg->output_type = output_type;
2220 cdm_cfg->output_bit_depth = MSM_FORMAT_IS_DX(dpu_fmt) ?
2221 CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT;
2222 cdm_cfg->csc_cfg = &dpu_csc10_rgb2yuv_601l;
2223
2224 /* enable 10 bit logic */
2225 switch (cdm_cfg->output_fmt->chroma_sample) {
2226 case CHROMA_FULL:
2227 cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
2228 cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
2229 break;
2230 case CHROMA_H2V1:
2231 cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
2232 cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
2233 break;
2234 case CHROMA_420:
2235 cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
2236 cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE;
2237 break;
2238 case CHROMA_H1V2:
2239 default:
2240 DPU_ERROR("[enc:%d] unsupported chroma sampling type\n",
2241 DRMID(phys_enc->parent));
2242 cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
2243 cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
2244 break;
2245 }
2246
2247 DPU_DEBUG("[enc:%d] cdm_enable:%d,%d,%p4cc,%d,%d,%d,%d]\n",
2248 DRMID(phys_enc->parent), cdm_cfg->output_width,
2249 cdm_cfg->output_height, &cdm_cfg->output_fmt->pixel_format,
2250 cdm_cfg->output_type, cdm_cfg->output_bit_depth,
2251 cdm_cfg->h_cdwn_type, cdm_cfg->v_cdwn_type);
2252
2253 if (hw_cdm->ops.enable) {
2254 cdm_cfg->pp_id = hw_pp->idx;
2255 ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg);
2256 if (ret < 0) {
2257 DPU_ERROR("[enc:%d] failed to enable CDM; ret:%d\n",
2258 DRMID(phys_enc->parent), ret);
2259 return;
2260 }
2261 }
2262 }
2263
2264 #ifdef CONFIG_DEBUG_FS
_dpu_encoder_status_show(struct seq_file * s,void * data)2265 static int _dpu_encoder_status_show(struct seq_file *s, void *data)
2266 {
2267 struct drm_encoder *drm_enc = s->private;
2268 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
2269 int i;
2270
2271 mutex_lock(&dpu_enc->enc_lock);
2272 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2273 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2274
2275 seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d frame_done_cnt:%d",
2276 phys->hw_intf ? phys->hw_intf->idx - INTF_0 : -1,
2277 phys->hw_wb ? phys->hw_wb->idx - WB_0 : -1,
2278 atomic_read(&phys->vsync_cnt),
2279 atomic_read(&phys->underrun_cnt),
2280 atomic_read(&dpu_enc->frame_done_timeout_cnt));
2281
2282 seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode));
2283 }
2284 mutex_unlock(&dpu_enc->enc_lock);
2285
2286 return 0;
2287 }
2288
2289 DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
2290
dpu_encoder_debugfs_init(struct drm_encoder * drm_enc,struct dentry * root)2291 static void dpu_encoder_debugfs_init(struct drm_encoder *drm_enc, struct dentry *root)
2292 {
2293 /* don't error check these */
2294 debugfs_create_file("status", 0600,
2295 root, drm_enc, &_dpu_encoder_status_fops);
2296 }
2297 #else
2298 #define dpu_encoder_debugfs_init NULL
2299 #endif
2300
dpu_encoder_virt_add_phys_encs(struct drm_device * dev,struct msm_display_info * disp_info,struct dpu_encoder_virt * dpu_enc,struct dpu_enc_phys_init_params * params)2301 static int dpu_encoder_virt_add_phys_encs(
2302 struct drm_device *dev,
2303 struct msm_display_info *disp_info,
2304 struct dpu_encoder_virt *dpu_enc,
2305 struct dpu_enc_phys_init_params *params)
2306 {
2307 struct dpu_encoder_phys *enc = NULL;
2308
2309 DPU_DEBUG_ENC(dpu_enc, "\n");
2310
2311 /*
2312 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
2313 * in this function, check up-front.
2314 */
2315 if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
2316 ARRAY_SIZE(dpu_enc->phys_encs)) {
2317 DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
2318 dpu_enc->num_phys_encs);
2319 return -EINVAL;
2320 }
2321
2322
2323 if (disp_info->intf_type == INTF_WB) {
2324 enc = dpu_encoder_phys_wb_init(dev, params);
2325
2326 if (IS_ERR(enc)) {
2327 DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
2328 PTR_ERR(enc));
2329 return PTR_ERR(enc);
2330 }
2331
2332 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2333 ++dpu_enc->num_phys_encs;
2334 } else if (disp_info->is_cmd_mode) {
2335 enc = dpu_encoder_phys_cmd_init(dev, params);
2336
2337 if (IS_ERR(enc)) {
2338 DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
2339 PTR_ERR(enc));
2340 return PTR_ERR(enc);
2341 }
2342
2343 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2344 ++dpu_enc->num_phys_encs;
2345 } else {
2346 enc = dpu_encoder_phys_vid_init(dev, params);
2347
2348 if (IS_ERR(enc)) {
2349 DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
2350 PTR_ERR(enc));
2351 return PTR_ERR(enc);
2352 }
2353
2354 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2355 ++dpu_enc->num_phys_encs;
2356 }
2357
2358 if (params->split_role == ENC_ROLE_SLAVE)
2359 dpu_enc->cur_slave = enc;
2360 else
2361 dpu_enc->cur_master = enc;
2362
2363 return 0;
2364 }
2365
dpu_encoder_setup_display(struct dpu_encoder_virt * dpu_enc,struct dpu_kms * dpu_kms,struct msm_display_info * disp_info)2366 static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
2367 struct dpu_kms *dpu_kms,
2368 struct msm_display_info *disp_info)
2369 {
2370 int ret = 0;
2371 int i = 0;
2372 struct dpu_enc_phys_init_params phys_params;
2373
2374 if (!dpu_enc) {
2375 DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
2376 return -EINVAL;
2377 }
2378
2379 dpu_enc->cur_master = NULL;
2380
2381 memset(&phys_params, 0, sizeof(phys_params));
2382 phys_params.dpu_kms = dpu_kms;
2383 phys_params.parent = &dpu_enc->base;
2384 phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
2385
2386 WARN_ON(disp_info->num_of_h_tiles < 1);
2387
2388 DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
2389
2390 if (disp_info->intf_type != INTF_WB)
2391 dpu_enc->idle_pc_supported =
2392 dpu_kms->catalog->caps->has_idle_pc;
2393
2394 mutex_lock(&dpu_enc->enc_lock);
2395 for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
2396 /*
2397 * Left-most tile is at index 0, content is controller id
2398 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
2399 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
2400 */
2401 u32 controller_id = disp_info->h_tile_instance[i];
2402
2403 if (disp_info->num_of_h_tiles > 1) {
2404 if (i == 0)
2405 phys_params.split_role = ENC_ROLE_MASTER;
2406 else
2407 phys_params.split_role = ENC_ROLE_SLAVE;
2408 } else {
2409 phys_params.split_role = ENC_ROLE_SOLO;
2410 }
2411
2412 DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
2413 i, controller_id, phys_params.split_role);
2414
2415 phys_params.hw_intf = dpu_encoder_get_intf(dpu_kms->catalog, &dpu_kms->rm,
2416 disp_info->intf_type,
2417 controller_id);
2418
2419 if (disp_info->intf_type == INTF_WB && controller_id < WB_MAX)
2420 phys_params.hw_wb = dpu_rm_get_wb(&dpu_kms->rm, controller_id);
2421
2422 if (!phys_params.hw_intf && !phys_params.hw_wb) {
2423 DPU_ERROR_ENC(dpu_enc, "no intf or wb block assigned at idx: %d\n", i);
2424 ret = -EINVAL;
2425 break;
2426 }
2427
2428 if (phys_params.hw_intf && phys_params.hw_wb) {
2429 DPU_ERROR_ENC(dpu_enc,
2430 "invalid phys both intf and wb block at idx: %d\n", i);
2431 ret = -EINVAL;
2432 break;
2433 }
2434
2435 ret = dpu_encoder_virt_add_phys_encs(dpu_kms->dev, disp_info,
2436 dpu_enc, &phys_params);
2437 if (ret) {
2438 DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
2439 break;
2440 }
2441 }
2442
2443 mutex_unlock(&dpu_enc->enc_lock);
2444
2445 return ret;
2446 }
2447
dpu_encoder_frame_done_timeout(struct timer_list * t)2448 static void dpu_encoder_frame_done_timeout(struct timer_list *t)
2449 {
2450 struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
2451 frame_done_timer);
2452 struct drm_encoder *drm_enc = &dpu_enc->base;
2453 u32 event;
2454
2455 if (!drm_enc->dev) {
2456 DPU_ERROR("invalid parameters\n");
2457 return;
2458 }
2459
2460 if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
2461 DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
2462 DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
2463 return;
2464 } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
2465 DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
2466 return;
2467 }
2468
2469 DPU_ERROR_ENC_RATELIMITED(dpu_enc, "frame done timeout\n");
2470
2471 if (atomic_inc_return(&dpu_enc->frame_done_timeout_cnt) == 1)
2472 msm_disp_snapshot_state(drm_enc->dev);
2473
2474 event = DPU_ENCODER_FRAME_EVENT_ERROR;
2475 trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
2476 dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
2477 }
2478
2479 static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
2480 .atomic_mode_set = dpu_encoder_virt_atomic_mode_set,
2481 .atomic_disable = dpu_encoder_virt_atomic_disable,
2482 .atomic_enable = dpu_encoder_virt_atomic_enable,
2483 .atomic_check = dpu_encoder_virt_atomic_check,
2484 };
2485
2486 static const struct drm_encoder_funcs dpu_encoder_funcs = {
2487 .debugfs_init = dpu_encoder_debugfs_init,
2488 };
2489
dpu_encoder_init(struct drm_device * dev,int drm_enc_mode,struct msm_display_info * disp_info)2490 struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
2491 int drm_enc_mode,
2492 struct msm_display_info *disp_info)
2493 {
2494 struct msm_drm_private *priv = dev->dev_private;
2495 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
2496 struct dpu_encoder_virt *dpu_enc;
2497 int ret;
2498
2499 dpu_enc = drmm_encoder_alloc(dev, struct dpu_encoder_virt, base,
2500 &dpu_encoder_funcs, drm_enc_mode, NULL);
2501 if (IS_ERR(dpu_enc))
2502 return ERR_CAST(dpu_enc);
2503
2504 drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
2505
2506 spin_lock_init(&dpu_enc->enc_spinlock);
2507 dpu_enc->enabled = false;
2508 mutex_init(&dpu_enc->enc_lock);
2509 mutex_init(&dpu_enc->rc_lock);
2510
2511 ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
2512 if (ret) {
2513 DPU_ERROR("failed to setup encoder\n");
2514 return ERR_PTR(-ENOMEM);
2515 }
2516
2517 atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
2518 atomic_set(&dpu_enc->frame_done_timeout_cnt, 0);
2519 timer_setup(&dpu_enc->frame_done_timer,
2520 dpu_encoder_frame_done_timeout, 0);
2521
2522 INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
2523 dpu_encoder_off_work);
2524 dpu_enc->idle_timeout = IDLE_TIMEOUT;
2525
2526 memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
2527
2528 DPU_DEBUG_ENC(dpu_enc, "created\n");
2529
2530 return &dpu_enc->base;
2531 }
2532
2533 /**
2534 * dpu_encoder_wait_for_commit_done() - Wait for encoder to flush pending state
2535 * @drm_enc: encoder pointer
2536 *
2537 * Wait for hardware to have flushed the current pending changes to hardware at
2538 * a vblank or CTL_START. Physical encoders will map this differently depending
2539 * on the type: vid mode -> vsync_irq, cmd mode -> CTL_START.
2540 *
2541 * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
2542 */
dpu_encoder_wait_for_commit_done(struct drm_encoder * drm_enc)2543 int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_enc)
2544 {
2545 struct dpu_encoder_virt *dpu_enc = NULL;
2546 int i, ret = 0;
2547
2548 if (!drm_enc) {
2549 DPU_ERROR("invalid encoder\n");
2550 return -EINVAL;
2551 }
2552 dpu_enc = to_dpu_encoder_virt(drm_enc);
2553 DPU_DEBUG_ENC(dpu_enc, "\n");
2554
2555 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2556 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2557
2558 if (phys->ops.wait_for_commit_done) {
2559 DPU_ATRACE_BEGIN("wait_for_commit_done");
2560 ret = phys->ops.wait_for_commit_done(phys);
2561 DPU_ATRACE_END("wait_for_commit_done");
2562 if (ret == -ETIMEDOUT && !dpu_enc->commit_done_timedout) {
2563 dpu_enc->commit_done_timedout = true;
2564 msm_disp_snapshot_state(drm_enc->dev);
2565 }
2566 if (ret)
2567 return ret;
2568 }
2569 }
2570
2571 return ret;
2572 }
2573
2574 /**
2575 * dpu_encoder_wait_for_tx_complete() - Wait for encoder to transfer pixels to panel
2576 * @drm_enc: encoder pointer
2577 *
2578 * Wait for the hardware to transfer all the pixels to the panel. Physical
2579 * encoders will map this differently depending on the type: vid mode -> vsync_irq,
2580 * cmd mode -> pp_done.
2581 *
2582 * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
2583 */
dpu_encoder_wait_for_tx_complete(struct drm_encoder * drm_enc)2584 int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_enc)
2585 {
2586 struct dpu_encoder_virt *dpu_enc = NULL;
2587 int i, ret = 0;
2588
2589 if (!drm_enc) {
2590 DPU_ERROR("invalid encoder\n");
2591 return -EINVAL;
2592 }
2593 dpu_enc = to_dpu_encoder_virt(drm_enc);
2594 DPU_DEBUG_ENC(dpu_enc, "\n");
2595
2596 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2597 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2598
2599 if (phys->ops.wait_for_tx_complete) {
2600 DPU_ATRACE_BEGIN("wait_for_tx_complete");
2601 ret = phys->ops.wait_for_tx_complete(phys);
2602 DPU_ATRACE_END("wait_for_tx_complete");
2603 if (ret)
2604 return ret;
2605 }
2606 }
2607
2608 return ret;
2609 }
2610
dpu_encoder_get_intf_mode(struct drm_encoder * encoder)2611 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
2612 {
2613 struct dpu_encoder_virt *dpu_enc = NULL;
2614
2615 if (!encoder) {
2616 DPU_ERROR("invalid encoder\n");
2617 return INTF_MODE_NONE;
2618 }
2619 dpu_enc = to_dpu_encoder_virt(encoder);
2620
2621 if (dpu_enc->cur_master)
2622 return dpu_enc->cur_master->intf_mode;
2623
2624 if (dpu_enc->num_phys_encs)
2625 return dpu_enc->phys_encs[0]->intf_mode;
2626
2627 return INTF_MODE_NONE;
2628 }
2629
dpu_encoder_helper_get_dsc(struct dpu_encoder_phys * phys_enc)2630 unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
2631 {
2632 struct drm_encoder *encoder = phys_enc->parent;
2633 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
2634
2635 return dpu_enc->dsc_mask;
2636 }
2637
dpu_encoder_phys_init(struct dpu_encoder_phys * phys_enc,struct dpu_enc_phys_init_params * p)2638 void dpu_encoder_phys_init(struct dpu_encoder_phys *phys_enc,
2639 struct dpu_enc_phys_init_params *p)
2640 {
2641 phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
2642 phys_enc->hw_intf = p->hw_intf;
2643 phys_enc->hw_wb = p->hw_wb;
2644 phys_enc->parent = p->parent;
2645 phys_enc->dpu_kms = p->dpu_kms;
2646 phys_enc->split_role = p->split_role;
2647 phys_enc->enc_spinlock = p->enc_spinlock;
2648 phys_enc->enable_state = DPU_ENC_DISABLED;
2649
2650 atomic_set(&phys_enc->pending_kickoff_cnt, 0);
2651 atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
2652
2653 atomic_set(&phys_enc->vsync_cnt, 0);
2654 atomic_set(&phys_enc->underrun_cnt, 0);
2655
2656 init_waitqueue_head(&phys_enc->pending_kickoff_wq);
2657 }
2658