1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hwseq.h"
33 #include "dcn10/dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10/dcn10_optc.h"
38 #include "dcn10/dcn10_dpp.h"
39 #include "dcn10/dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10/dcn10_hubp.h"
46 #include "dcn10/dcn10_hubbub.h"
47 #include "dcn10/dcn10_cm_common.h"
48 #include "dccg.h"
49 #include "clk_mgr.h"
50 #include "link_hwss.h"
51 #include "dpcd_defs.h"
52 #include "dsc.h"
53 #include "dce/dmub_psr.h"
54 #include "dc_dmub_srv.h"
55 #include "dce/dmub_hw_lock_mgr.h"
56 #include "dc_trace.h"
57 #include "dce/dmub_outbox.h"
58 #include "link.h"
59 
60 #define DC_LOGGER \
61 	dc_logger
62 #define DC_LOGGER_INIT(logger) \
63 	struct dal_logger *dc_logger = logger
64 
65 #define CTX \
66 	hws->ctx
67 #define REG(reg)\
68 	hws->regs->reg
69 
70 #undef FN
71 #define FN(reg_name, field_name) \
72 	hws->shifts->field_name, hws->masks->field_name
73 
74 /*print is 17 wide, first two characters are spaces*/
75 #define DTN_INFO_MICRO_SEC(ref_cycle) \
76 	print_microsec(dc_ctx, log_ctx, ref_cycle)
77 
78 #define GAMMA_HW_POINTS_NUM 256
79 
80 #define PGFSM_POWER_ON 0
81 #define PGFSM_POWER_OFF 2
82 
83 static void print_microsec(struct dc_context *dc_ctx,
84 			   struct dc_log_buffer_ctx *log_ctx,
85 			   uint32_t ref_cycle)
86 {
87 	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
88 	static const unsigned int frac = 1000;
89 	uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
90 
91 	DTN_INFO("  %11d.%03d",
92 			us_x10 / frac,
93 			us_x10 % frac);
94 }
95 
96 void dcn10_lock_all_pipes(struct dc *dc,
97 	struct dc_state *context,
98 	bool lock)
99 {
100 	struct pipe_ctx *pipe_ctx;
101 	struct pipe_ctx *old_pipe_ctx;
102 	struct timing_generator *tg;
103 	int i;
104 
105 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
106 		old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
107 		pipe_ctx = &context->res_ctx.pipe_ctx[i];
108 		tg = pipe_ctx->stream_res.tg;
109 
110 		/*
111 		 * Only lock the top pipe's tg to prevent redundant
112 		 * (un)locking. Also skip if pipe is disabled.
113 		 */
114 		if (pipe_ctx->top_pipe ||
115 		    !pipe_ctx->stream ||
116 		    (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
117 		    !tg->funcs->is_tg_enabled(tg) ||
118 			pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM)
119 			continue;
120 
121 		if (lock)
122 			dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
123 		else
124 			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
125 	}
126 }
127 
128 static void log_mpc_crc(struct dc *dc,
129 	struct dc_log_buffer_ctx *log_ctx)
130 {
131 	struct dc_context *dc_ctx = dc->ctx;
132 	struct dce_hwseq *hws = dc->hwseq;
133 
134 	if (REG(MPC_CRC_RESULT_GB))
135 		DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
136 		REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
137 	if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
138 		DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
139 		REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
140 }
141 
142 static void dcn10_log_hubbub_state(struct dc *dc,
143 				   struct dc_log_buffer_ctx *log_ctx)
144 {
145 	struct dc_context *dc_ctx = dc->ctx;
146 	struct dcn_hubbub_wm wm;
147 	int i;
148 
149 	memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
150 	dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
151 
152 	DTN_INFO("HUBBUB WM:      data_urgent  pte_meta_urgent"
153 			"         sr_enter          sr_exit  dram_clk_change\n");
154 
155 	for (i = 0; i < 4; i++) {
156 		struct dcn_hubbub_wm_set *s;
157 
158 		s = &wm.sets[i];
159 		DTN_INFO("WM_Set[%d]:", s->wm_set);
160 		DTN_INFO_MICRO_SEC(s->data_urgent);
161 		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
162 		DTN_INFO_MICRO_SEC(s->sr_enter);
163 		DTN_INFO_MICRO_SEC(s->sr_exit);
164 		DTN_INFO_MICRO_SEC(s->dram_clk_change);
165 		DTN_INFO("\n");
166 	}
167 
168 	DTN_INFO("\n");
169 }
170 
171 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
172 {
173 	struct dc_context *dc_ctx = dc->ctx;
174 	struct resource_pool *pool = dc->res_pool;
175 	int i;
176 
177 	DTN_INFO(
178 		"HUBP:  format  addr_hi  width  height  rot  mir  sw_mode  dcc_en  blank_en  clock_en  ttu_dis  underflow   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
179 	for (i = 0; i < pool->pipe_count; i++) {
180 		struct hubp *hubp = pool->hubps[i];
181 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
182 
183 		hubp->funcs->hubp_read_state(hubp);
184 
185 		if (!s->blank_en) {
186 			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh",
187 					hubp->inst,
188 					s->pixel_format,
189 					s->inuse_addr_hi,
190 					s->viewport_width,
191 					s->viewport_height,
192 					s->rotation_angle,
193 					s->h_mirror_en,
194 					s->sw_mode,
195 					s->dcc_en,
196 					s->blank_en,
197 					s->clock_en,
198 					s->ttu_disable,
199 					s->underflow_status);
200 			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
201 			DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
202 			DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
203 			DTN_INFO("\n");
204 		}
205 	}
206 
207 	DTN_INFO("\n=========RQ========\n");
208 	DTN_INFO("HUBP:  drq_exp_m  prq_exp_m  mrq_exp_m  crq_exp_m  plane1_ba  L:chunk_s  min_chu_s  meta_ch_s"
209 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h  C:chunk_s  min_chu_s  meta_ch_s"
210 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h\n");
211 	for (i = 0; i < pool->pipe_count; i++) {
212 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
213 		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
214 
215 		if (!s->blank_en)
216 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
217 				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
218 				rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
219 				rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
220 				rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
221 				rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
222 				rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
223 				rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
224 				rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
225 				rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
226 	}
227 
228 	DTN_INFO("========DLG========\n");
229 	DTN_INFO("HUBP:  rc_hbe     dlg_vbe    min_d_y_n  rc_per_ht  rc_x_a_s "
230 			"  dst_y_a_s  dst_y_pf   dst_y_vvb  dst_y_rvb  dst_y_vfl  dst_y_rfl  rf_pix_fq"
231 			"  vratio_pf  vrat_pf_c  rc_pg_vbl  rc_pg_vbc  rc_mc_vbl  rc_mc_vbc  rc_pg_fll"
232 			"  rc_pg_flc  rc_mc_fll  rc_mc_flc  pr_nom_l   pr_nom_c   rc_pg_nl   rc_pg_nc "
233 			"  mr_nom_l   mr_nom_c   rc_mc_nl   rc_mc_nc   rc_ld_pl   rc_ld_pc   rc_ld_l  "
234 			"  rc_ld_c    cha_cur0   ofst_cur1  cha_cur1   vr_af_vc0  ddrq_limt  x_rt_dlay"
235 			"  x_rp_dlay  x_rr_sfl\n");
236 	for (i = 0; i < pool->pipe_count; i++) {
237 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
238 		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
239 
240 		if (!s->blank_en)
241 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
242 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
243 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
244 				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
245 				dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
246 				dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
247 				dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
248 				dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
249 				dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
250 				dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
251 				dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
252 				dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
253 				dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
254 				dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
255 				dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
256 				dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
257 				dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
258 				dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
259 				dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
260 				dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
261 				dlg_regs->xfc_reg_remote_surface_flip_latency);
262 	}
263 
264 	DTN_INFO("========TTU========\n");
265 	DTN_INFO("HUBP:  qos_ll_wm  qos_lh_wm  mn_ttu_vb  qos_l_flp  rc_rd_p_l  rc_rd_l    rc_rd_p_c"
266 			"  rc_rd_c    rc_rd_c0   rc_rd_pc0  rc_rd_c1   rc_rd_pc1  qos_lf_l   qos_rds_l"
267 			"  qos_lf_c   qos_rds_c  qos_lf_c0  qos_rds_c0 qos_lf_c1  qos_rds_c1\n");
268 	for (i = 0; i < pool->pipe_count; i++) {
269 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
270 		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
271 
272 		if (!s->blank_en)
273 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
274 				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
275 				ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
276 				ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
277 				ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
278 				ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
279 				ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
280 				ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
281 	}
282 	DTN_INFO("\n");
283 }
284 
285 void dcn10_log_hw_state(struct dc *dc,
286 	struct dc_log_buffer_ctx *log_ctx)
287 {
288 	struct dc_context *dc_ctx = dc->ctx;
289 	struct resource_pool *pool = dc->res_pool;
290 	int i;
291 
292 	DTN_INFO_BEGIN();
293 
294 	dcn10_log_hubbub_state(dc, log_ctx);
295 
296 	dcn10_log_hubp_states(dc, log_ctx);
297 
298 	DTN_INFO("DPP:    IGAM format  IGAM mode    DGAM mode    RGAM mode"
299 			"  GAMUT mode  C11 C12   C13 C14   C21 C22   C23 C24   "
300 			"C31 C32   C33 C34\n");
301 	for (i = 0; i < pool->pipe_count; i++) {
302 		struct dpp *dpp = pool->dpps[i];
303 		struct dcn_dpp_state s = {0};
304 
305 		dpp->funcs->dpp_read_state(dpp, &s);
306 
307 		if (!s.is_enabled)
308 			continue;
309 
310 		DTN_INFO("[%2d]:  %11xh  %-11s  %-11s  %-11s"
311 				"%8x    %08xh %08xh %08xh %08xh %08xh %08xh",
312 				dpp->inst,
313 				s.igam_input_format,
314 				(s.igam_lut_mode == 0) ? "BypassFixed" :
315 					((s.igam_lut_mode == 1) ? "BypassFloat" :
316 					((s.igam_lut_mode == 2) ? "RAM" :
317 					((s.igam_lut_mode == 3) ? "RAM" :
318 								 "Unknown"))),
319 				(s.dgam_lut_mode == 0) ? "Bypass" :
320 					((s.dgam_lut_mode == 1) ? "sRGB" :
321 					((s.dgam_lut_mode == 2) ? "Ycc" :
322 					((s.dgam_lut_mode == 3) ? "RAM" :
323 					((s.dgam_lut_mode == 4) ? "RAM" :
324 								 "Unknown")))),
325 				(s.rgam_lut_mode == 0) ? "Bypass" :
326 					((s.rgam_lut_mode == 1) ? "sRGB" :
327 					((s.rgam_lut_mode == 2) ? "Ycc" :
328 					((s.rgam_lut_mode == 3) ? "RAM" :
329 					((s.rgam_lut_mode == 4) ? "RAM" :
330 								 "Unknown")))),
331 				s.gamut_remap_mode,
332 				s.gamut_remap_c11_c12,
333 				s.gamut_remap_c13_c14,
334 				s.gamut_remap_c21_c22,
335 				s.gamut_remap_c23_c24,
336 				s.gamut_remap_c31_c32,
337 				s.gamut_remap_c33_c34);
338 		DTN_INFO("\n");
339 	}
340 	DTN_INFO("\n");
341 
342 	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
343 	for (i = 0; i < pool->pipe_count; i++) {
344 		struct mpcc_state s = {0};
345 
346 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
347 		if (s.opp_id != 0xf)
348 			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
349 				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
350 				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
351 				s.idle);
352 	}
353 	DTN_INFO("\n");
354 
355 	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");
356 
357 	for (i = 0; i < pool->timing_generator_count; i++) {
358 		struct timing_generator *tg = pool->timing_generators[i];
359 		struct dcn_otg_state s = {0};
360 		/* Read shared OTG state registers for all DCNx */
361 		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
362 
363 		/*
364 		 * For DCN2 and greater, a register on the OPP is used to
365 		 * determine if the CRTC is blanked instead of the OTG. So use
366 		 * dpg_is_blanked() if exists, otherwise fallback on otg.
367 		 *
368 		 * TODO: Implement DCN-specific read_otg_state hooks.
369 		 */
370 		if (pool->opps[i]->funcs->dpg_is_blanked)
371 			s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
372 		else
373 			s.blank_enabled = tg->funcs->is_blanked(tg);
374 
375 		//only print if OTG master is enabled
376 		if ((s.otg_enabled & 1) == 0)
377 			continue;
378 
379 		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d  %9d %8d\n",
380 				tg->inst,
381 				s.v_blank_start,
382 				s.v_blank_end,
383 				s.v_sync_a_start,
384 				s.v_sync_a_end,
385 				s.v_sync_a_pol,
386 				s.v_total_max,
387 				s.v_total_min,
388 				s.v_total_max_sel,
389 				s.v_total_min_sel,
390 				s.h_blank_start,
391 				s.h_blank_end,
392 				s.h_sync_a_start,
393 				s.h_sync_a_end,
394 				s.h_sync_a_pol,
395 				s.h_total,
396 				s.v_total,
397 				s.underflow_occurred_status,
398 				s.blank_enabled);
399 
400 		// Clear underflow for debug purposes
401 		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
402 		// This function is called only from Windows or Diags test environment, hence it's safe to clear
403 		// it from here without affecting the original intent.
404 		tg->funcs->clear_optc_underflow(tg);
405 	}
406 	DTN_INFO("\n");
407 
408 	// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
409 	// TODO: Update golden log header to reflect this name change
410 	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
411 	for (i = 0; i < pool->res_cap->num_dsc; i++) {
412 		struct display_stream_compressor *dsc = pool->dscs[i];
413 		struct dcn_dsc_state s = {0};
414 
415 		dsc->funcs->dsc_read_state(dsc, &s);
416 		DTN_INFO("[%d]: %-9d %-12d %-10d\n",
417 		dsc->inst,
418 			s.dsc_clock_en,
419 			s.dsc_slice_width,
420 			s.dsc_bits_per_pixel);
421 		DTN_INFO("\n");
422 	}
423 	DTN_INFO("\n");
424 
425 	DTN_INFO("S_ENC: DSC_MODE  SEC_GSP7_LINE_NUM"
426 			"  VBID6_LINE_REFERENCE  VBID6_LINE_NUM  SEC_GSP7_ENABLE  SEC_STREAM_ENABLE\n");
427 	for (i = 0; i < pool->stream_enc_count; i++) {
428 		struct stream_encoder *enc = pool->stream_enc[i];
429 		struct enc_state s = {0};
430 
431 		if (enc->funcs->enc_read_state) {
432 			enc->funcs->enc_read_state(enc, &s);
433 			DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
434 				enc->id,
435 				s.dsc_mode,
436 				s.sec_gsp_pps_line_num,
437 				s.vbid6_line_reference,
438 				s.vbid6_line_num,
439 				s.sec_gsp_pps_enable,
440 				s.sec_stream_enable);
441 			DTN_INFO("\n");
442 		}
443 	}
444 	DTN_INFO("\n");
445 
446 	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS  DP_LINK_TRAINING_COMPLETE\n");
447 	for (i = 0; i < dc->link_count; i++) {
448 		struct link_encoder *lenc = dc->links[i]->link_enc;
449 
450 		struct link_enc_state s = {0};
451 
452 		if (lenc && lenc->funcs->read_state) {
453 			lenc->funcs->read_state(lenc, &s);
454 			DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
455 				i,
456 				s.dphy_fec_en,
457 				s.dphy_fec_ready_shadow,
458 				s.dphy_fec_active_status,
459 				s.dp_link_training_complete);
460 			DTN_INFO("\n");
461 		}
462 	}
463 	DTN_INFO("\n");
464 
465 	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
466 		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
467 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
468 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
469 			dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
470 			dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
471 			dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
472 			dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
473 			dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
474 
475 	log_mpc_crc(dc, log_ctx);
476 
477 	{
478 		if (pool->hpo_dp_stream_enc_count > 0) {
479 			DTN_INFO("DP HPO S_ENC:  Enabled  OTG   Format   Depth   Vid   SDP   Compressed  Link\n");
480 			for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
481 				struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
482 				struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
483 
484 				if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
485 					hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
486 
487 					DTN_INFO("[%d]:                 %d    %d   %6s       %d     %d     %d            %d     %d\n",
488 							hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
489 							hpo_dp_se_state.stream_enc_enabled,
490 							hpo_dp_se_state.otg_inst,
491 							(hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
492 									((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
493 									(hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
494 							(hpo_dp_se_state.component_depth == 0) ? 6 :
495 									((hpo_dp_se_state.component_depth == 1) ? 8 :
496 									(hpo_dp_se_state.component_depth == 2) ? 10 : 12),
497 							hpo_dp_se_state.vid_stream_enabled,
498 							hpo_dp_se_state.sdp_enabled,
499 							hpo_dp_se_state.compressed_format,
500 							hpo_dp_se_state.mapped_to_link_enc);
501 				}
502 			}
503 
504 			DTN_INFO("\n");
505 		}
506 
507 		/* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
508 		if (pool->hpo_dp_link_enc_count) {
509 			DTN_INFO("DP HPO L_ENC:  Enabled  Mode   Lanes   Stream  Slots   VC Rate X    VC Rate Y\n");
510 
511 			for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
512 				struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
513 				struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
514 
515 				if (hpo_dp_link_enc->funcs->read_state) {
516 					hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
517 					DTN_INFO("[%d]:                 %d  %6s     %d        %d      %d     %d     %d\n",
518 							hpo_dp_link_enc->inst,
519 							hpo_dp_le_state.link_enc_enabled,
520 							(hpo_dp_le_state.link_mode == 0) ? "TPS1" :
521 									(hpo_dp_le_state.link_mode == 1) ? "TPS2" :
522 									(hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
523 							hpo_dp_le_state.lane_count,
524 							hpo_dp_le_state.stream_src[0],
525 							hpo_dp_le_state.slot_count[0],
526 							hpo_dp_le_state.vc_rate_x[0],
527 							hpo_dp_le_state.vc_rate_y[0]);
528 					DTN_INFO("\n");
529 				}
530 			}
531 
532 			DTN_INFO("\n");
533 		}
534 	}
535 
536 	DTN_INFO_END();
537 }
538 
539 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
540 {
541 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
542 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
543 
544 	if (tg->funcs->is_optc_underflow_occurred(tg)) {
545 		tg->funcs->clear_optc_underflow(tg);
546 		return true;
547 	}
548 
549 	if (hubp->funcs->hubp_get_underflow_status(hubp)) {
550 		hubp->funcs->hubp_clear_underflow(hubp);
551 		return true;
552 	}
553 	return false;
554 }
555 
556 void dcn10_enable_power_gating_plane(
557 	struct dce_hwseq *hws,
558 	bool enable)
559 {
560 	bool force_on = true; /* disable power gating */
561 
562 	if (enable)
563 		force_on = false;
564 
565 	/* DCHUBP0/1/2/3 */
566 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
567 	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
568 	REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
569 	REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
570 
571 	/* DPP0/1/2/3 */
572 	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
573 	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
574 	REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
575 	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
576 }
577 
578 void dcn10_disable_vga(
579 	struct dce_hwseq *hws)
580 {
581 	unsigned int in_vga1_mode = 0;
582 	unsigned int in_vga2_mode = 0;
583 	unsigned int in_vga3_mode = 0;
584 	unsigned int in_vga4_mode = 0;
585 
586 	REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
587 	REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
588 	REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
589 	REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
590 
591 	if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
592 			in_vga3_mode == 0 && in_vga4_mode == 0)
593 		return;
594 
595 	REG_WRITE(D1VGA_CONTROL, 0);
596 	REG_WRITE(D2VGA_CONTROL, 0);
597 	REG_WRITE(D3VGA_CONTROL, 0);
598 	REG_WRITE(D4VGA_CONTROL, 0);
599 
600 	/* HW Engineer's Notes:
601 	 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
602 	 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
603 	 *
604 	 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
605 	 *  VGA_TEST_ENABLE, to leave it in the same state as before.
606 	 */
607 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
608 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
609 }
610 
611 /**
612  * dcn10_dpp_pg_control - DPP power gate control.
613  *
614  * @hws: dce_hwseq reference.
615  * @dpp_inst: DPP instance reference.
616  * @power_on: true if we want to enable power gate, false otherwise.
617  *
618  * Enable or disable power gate in the specific DPP instance.
619  */
620 void dcn10_dpp_pg_control(
621 		struct dce_hwseq *hws,
622 		unsigned int dpp_inst,
623 		bool power_on)
624 {
625 	uint32_t power_gate = power_on ? 0 : 1;
626 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
627 
628 	if (hws->ctx->dc->debug.disable_dpp_power_gate)
629 		return;
630 	if (REG(DOMAIN1_PG_CONFIG) == 0)
631 		return;
632 
633 	switch (dpp_inst) {
634 	case 0: /* DPP0 */
635 		REG_UPDATE(DOMAIN1_PG_CONFIG,
636 				DOMAIN1_POWER_GATE, power_gate);
637 
638 		REG_WAIT(DOMAIN1_PG_STATUS,
639 				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
640 				1, 1000);
641 		break;
642 	case 1: /* DPP1 */
643 		REG_UPDATE(DOMAIN3_PG_CONFIG,
644 				DOMAIN3_POWER_GATE, power_gate);
645 
646 		REG_WAIT(DOMAIN3_PG_STATUS,
647 				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
648 				1, 1000);
649 		break;
650 	case 2: /* DPP2 */
651 		REG_UPDATE(DOMAIN5_PG_CONFIG,
652 				DOMAIN5_POWER_GATE, power_gate);
653 
654 		REG_WAIT(DOMAIN5_PG_STATUS,
655 				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
656 				1, 1000);
657 		break;
658 	case 3: /* DPP3 */
659 		REG_UPDATE(DOMAIN7_PG_CONFIG,
660 				DOMAIN7_POWER_GATE, power_gate);
661 
662 		REG_WAIT(DOMAIN7_PG_STATUS,
663 				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
664 				1, 1000);
665 		break;
666 	default:
667 		BREAK_TO_DEBUGGER();
668 		break;
669 	}
670 }
671 
672 /**
673  * dcn10_hubp_pg_control - HUBP power gate control.
674  *
675  * @hws: dce_hwseq reference.
676  * @hubp_inst: DPP instance reference.
677  * @power_on: true if we want to enable power gate, false otherwise.
678  *
679  * Enable or disable power gate in the specific HUBP instance.
680  */
681 void dcn10_hubp_pg_control(
682 		struct dce_hwseq *hws,
683 		unsigned int hubp_inst,
684 		bool power_on)
685 {
686 	uint32_t power_gate = power_on ? 0 : 1;
687 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
688 
689 	if (hws->ctx->dc->debug.disable_hubp_power_gate)
690 		return;
691 	if (REG(DOMAIN0_PG_CONFIG) == 0)
692 		return;
693 
694 	switch (hubp_inst) {
695 	case 0: /* DCHUBP0 */
696 		REG_UPDATE(DOMAIN0_PG_CONFIG,
697 				DOMAIN0_POWER_GATE, power_gate);
698 
699 		REG_WAIT(DOMAIN0_PG_STATUS,
700 				DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
701 				1, 1000);
702 		break;
703 	case 1: /* DCHUBP1 */
704 		REG_UPDATE(DOMAIN2_PG_CONFIG,
705 				DOMAIN2_POWER_GATE, power_gate);
706 
707 		REG_WAIT(DOMAIN2_PG_STATUS,
708 				DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
709 				1, 1000);
710 		break;
711 	case 2: /* DCHUBP2 */
712 		REG_UPDATE(DOMAIN4_PG_CONFIG,
713 				DOMAIN4_POWER_GATE, power_gate);
714 
715 		REG_WAIT(DOMAIN4_PG_STATUS,
716 				DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
717 				1, 1000);
718 		break;
719 	case 3: /* DCHUBP3 */
720 		REG_UPDATE(DOMAIN6_PG_CONFIG,
721 				DOMAIN6_POWER_GATE, power_gate);
722 
723 		REG_WAIT(DOMAIN6_PG_STATUS,
724 				DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
725 				1, 1000);
726 		break;
727 	default:
728 		BREAK_TO_DEBUGGER();
729 		break;
730 	}
731 }
732 
733 static void power_on_plane_resources(
734 	struct dce_hwseq *hws,
735 	int plane_id)
736 {
737 	DC_LOGGER_INIT(hws->ctx->logger);
738 
739 	if (hws->funcs.dpp_root_clock_control)
740 		hws->funcs.dpp_root_clock_control(hws, plane_id, true);
741 
742 	if (REG(DC_IP_REQUEST_CNTL)) {
743 		REG_SET(DC_IP_REQUEST_CNTL, 0,
744 				IP_REQUEST_EN, 1);
745 
746 		if (hws->funcs.dpp_pg_control)
747 			hws->funcs.dpp_pg_control(hws, plane_id, true);
748 
749 		if (hws->funcs.hubp_pg_control)
750 			hws->funcs.hubp_pg_control(hws, plane_id, true);
751 
752 		REG_SET(DC_IP_REQUEST_CNTL, 0,
753 				IP_REQUEST_EN, 0);
754 		DC_LOG_DEBUG(
755 				"Un-gated front end for pipe %d\n", plane_id);
756 	}
757 }
758 
759 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
760 {
761 	struct dce_hwseq *hws = dc->hwseq;
762 	struct hubp *hubp = dc->res_pool->hubps[0];
763 
764 	if (!hws->wa_state.DEGVIDCN10_253_applied)
765 		return;
766 
767 	hubp->funcs->set_blank(hubp, true);
768 
769 	REG_SET(DC_IP_REQUEST_CNTL, 0,
770 			IP_REQUEST_EN, 1);
771 
772 	hws->funcs.hubp_pg_control(hws, 0, false);
773 	REG_SET(DC_IP_REQUEST_CNTL, 0,
774 			IP_REQUEST_EN, 0);
775 
776 	hws->wa_state.DEGVIDCN10_253_applied = false;
777 }
778 
779 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
780 {
781 	struct dce_hwseq *hws = dc->hwseq;
782 	struct hubp *hubp = dc->res_pool->hubps[0];
783 	int i;
784 
785 	if (dc->debug.disable_stutter)
786 		return;
787 
788 	if (!hws->wa.DEGVIDCN10_253)
789 		return;
790 
791 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
792 		if (!dc->res_pool->hubps[i]->power_gated)
793 			return;
794 	}
795 
796 	/* all pipe power gated, apply work around to enable stutter. */
797 
798 	REG_SET(DC_IP_REQUEST_CNTL, 0,
799 			IP_REQUEST_EN, 1);
800 
801 	hws->funcs.hubp_pg_control(hws, 0, true);
802 	REG_SET(DC_IP_REQUEST_CNTL, 0,
803 			IP_REQUEST_EN, 0);
804 
805 	hubp->funcs->set_hubp_blank_en(hubp, false);
806 	hws->wa_state.DEGVIDCN10_253_applied = true;
807 }
808 
809 void dcn10_bios_golden_init(struct dc *dc)
810 {
811 	struct dce_hwseq *hws = dc->hwseq;
812 	struct dc_bios *bp = dc->ctx->dc_bios;
813 	int i;
814 	bool allow_self_fresh_force_enable = true;
815 
816 	if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
817 		return;
818 
819 	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
820 		allow_self_fresh_force_enable =
821 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
822 
823 
824 	/* WA for making DF sleep when idle after resume from S0i3.
825 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
826 	 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
827 	 * before calling command table and it changed to 1 after,
828 	 * it should be set back to 0.
829 	 */
830 
831 	/* initialize dcn global */
832 	bp->funcs->enable_disp_power_gating(bp,
833 			CONTROLLER_ID_D0, ASIC_PIPE_INIT);
834 
835 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
836 		/* initialize dcn per pipe */
837 		bp->funcs->enable_disp_power_gating(bp,
838 				CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
839 	}
840 
841 	if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
842 		if (allow_self_fresh_force_enable == false &&
843 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
844 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
845 										!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
846 
847 }
848 
849 static void false_optc_underflow_wa(
850 		struct dc *dc,
851 		const struct dc_stream_state *stream,
852 		struct timing_generator *tg)
853 {
854 	int i;
855 	bool underflow;
856 
857 	if (!dc->hwseq->wa.false_optc_underflow)
858 		return;
859 
860 	underflow = tg->funcs->is_optc_underflow_occurred(tg);
861 
862 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
863 		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
864 
865 		if (old_pipe_ctx->stream != stream)
866 			continue;
867 
868 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
869 	}
870 
871 	if (tg->funcs->set_blank_data_double_buffer)
872 		tg->funcs->set_blank_data_double_buffer(tg, true);
873 
874 	if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
875 		tg->funcs->clear_optc_underflow(tg);
876 }
877 
878 static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
879 {
880 	struct pipe_ctx *other_pipe;
881 	int vready_offset = pipe->pipe_dlg_param.vready_offset;
882 
883 	/* Always use the largest vready_offset of all connected pipes */
884 	for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
885 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
886 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
887 	}
888 	for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
889 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
890 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
891 	}
892 	for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
893 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
894 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
895 	}
896 	for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
897 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
898 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
899 	}
900 
901 	return vready_offset;
902 }
903 
904 enum dc_status dcn10_enable_stream_timing(
905 		struct pipe_ctx *pipe_ctx,
906 		struct dc_state *context,
907 		struct dc *dc)
908 {
909 	struct dc_stream_state *stream = pipe_ctx->stream;
910 	enum dc_color_space color_space;
911 	struct tg_color black_color = {0};
912 
913 	/* by upper caller loop, pipe0 is parent pipe and be called first.
914 	 * back end is set up by for pipe0. Other children pipe share back end
915 	 * with pipe 0. No program is needed.
916 	 */
917 	if (pipe_ctx->top_pipe != NULL)
918 		return DC_OK;
919 
920 	/* TODO check if timing_changed, disable stream if timing changed */
921 
922 	/* HW program guide assume display already disable
923 	 * by unplug sequence. OTG assume stop.
924 	 */
925 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
926 
927 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
928 			pipe_ctx->clock_source,
929 			&pipe_ctx->stream_res.pix_clk_params,
930 			dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
931 			&pipe_ctx->pll_settings)) {
932 		BREAK_TO_DEBUGGER();
933 		return DC_ERROR_UNEXPECTED;
934 	}
935 
936 	if (dc_is_hdmi_tmds_signal(stream->signal)) {
937 		stream->link->phy_state.symclk_ref_cnts.otg = 1;
938 		if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
939 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
940 		else
941 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
942 	}
943 
944 	pipe_ctx->stream_res.tg->funcs->program_timing(
945 			pipe_ctx->stream_res.tg,
946 			&stream->timing,
947 			calculate_vready_offset_for_group(pipe_ctx),
948 			pipe_ctx->pipe_dlg_param.vstartup_start,
949 			pipe_ctx->pipe_dlg_param.vupdate_offset,
950 			pipe_ctx->pipe_dlg_param.vupdate_width,
951 			pipe_ctx->stream->signal,
952 			true);
953 
954 #if 0 /* move to after enable_crtc */
955 	/* TODO: OPP FMT, ABM. etc. should be done here. */
956 	/* or FPGA now. instance 0 only. TODO: move to opp.c */
957 
958 	inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
959 
960 	pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
961 				pipe_ctx->stream_res.opp,
962 				&stream->bit_depth_params,
963 				&stream->clamping);
964 #endif
965 	/* program otg blank color */
966 	color_space = stream->output_color_space;
967 	color_space_to_black_color(dc, color_space, &black_color);
968 
969 	/*
970 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
971 	 * alternate between Cb and Cr, so both channels need the pixel
972 	 * value for Y
973 	 */
974 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
975 		black_color.color_r_cr = black_color.color_g_y;
976 
977 	if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
978 		pipe_ctx->stream_res.tg->funcs->set_blank_color(
979 				pipe_ctx->stream_res.tg,
980 				&black_color);
981 
982 	if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
983 			!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
984 		pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
985 		hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
986 		false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
987 	}
988 
989 	/* VTG is  within DCHUB command block. DCFCLK is always on */
990 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
991 		BREAK_TO_DEBUGGER();
992 		return DC_ERROR_UNEXPECTED;
993 	}
994 
995 	/* TODO program crtc source select for non-virtual signal*/
996 	/* TODO program FMT */
997 	/* TODO setup link_enc */
998 	/* TODO set stream attributes */
999 	/* TODO program audio */
1000 	/* TODO enable stream if timing changed */
1001 	/* TODO unblank stream if DP */
1002 
1003 	return DC_OK;
1004 }
1005 
1006 static void dcn10_reset_back_end_for_pipe(
1007 		struct dc *dc,
1008 		struct pipe_ctx *pipe_ctx,
1009 		struct dc_state *context)
1010 {
1011 	int i;
1012 	struct dc_link *link;
1013 	DC_LOGGER_INIT(dc->ctx->logger);
1014 	if (pipe_ctx->stream_res.stream_enc == NULL) {
1015 		pipe_ctx->stream = NULL;
1016 		return;
1017 	}
1018 
1019 	link = pipe_ctx->stream->link;
1020 	/* DPMS may already disable or */
1021 	/* dpms_off status is incorrect due to fastboot
1022 	 * feature. When system resume from S4 with second
1023 	 * screen only, the dpms_off would be true but
1024 	 * VBIOS lit up eDP, so check link status too.
1025 	 */
1026 	if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1027 		dc->link_srv->set_dpms_off(pipe_ctx);
1028 	else if (pipe_ctx->stream_res.audio)
1029 		dc->hwss.disable_audio_stream(pipe_ctx);
1030 
1031 	if (pipe_ctx->stream_res.audio) {
1032 		/*disable az_endpoint*/
1033 		pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1034 
1035 		/*free audio*/
1036 		if (dc->caps.dynamic_audio == true) {
1037 			/*we have to dynamic arbitrate the audio endpoints*/
1038 			/*we free the resource, need reset is_audio_acquired*/
1039 			update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1040 					pipe_ctx->stream_res.audio, false);
1041 			pipe_ctx->stream_res.audio = NULL;
1042 		}
1043 	}
1044 
1045 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
1046 	 * back end share by all pipes and will be disable only when disable
1047 	 * parent pipe.
1048 	 */
1049 	if (pipe_ctx->top_pipe == NULL) {
1050 
1051 		if (pipe_ctx->stream_res.abm)
1052 			dc->hwss.set_abm_immediate_disable(pipe_ctx);
1053 
1054 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1055 
1056 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1057 		if (pipe_ctx->stream_res.tg->funcs->set_drr)
1058 			pipe_ctx->stream_res.tg->funcs->set_drr(
1059 					pipe_ctx->stream_res.tg, NULL);
1060 		pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
1061 	}
1062 
1063 	for (i = 0; i < dc->res_pool->pipe_count; i++)
1064 		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1065 			break;
1066 
1067 	if (i == dc->res_pool->pipe_count)
1068 		return;
1069 
1070 	pipe_ctx->stream = NULL;
1071 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1072 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1073 }
1074 
1075 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1076 {
1077 	struct hubp *hubp ;
1078 	unsigned int i;
1079 	bool need_recover = true;
1080 
1081 	if (!dc->debug.recovery_enabled)
1082 		return false;
1083 
1084 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1085 		struct pipe_ctx *pipe_ctx =
1086 			&dc->current_state->res_ctx.pipe_ctx[i];
1087 		if (pipe_ctx != NULL) {
1088 			hubp = pipe_ctx->plane_res.hubp;
1089 			if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
1090 				if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
1091 					/* one pipe underflow, we will reset all the pipes*/
1092 					need_recover = true;
1093 				}
1094 			}
1095 		}
1096 	}
1097 	if (!need_recover)
1098 		return false;
1099 	/*
1100 	DCHUBP_CNTL:HUBP_BLANK_EN=1
1101 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1102 	DCHUBP_CNTL:HUBP_DISABLE=1
1103 	DCHUBP_CNTL:HUBP_DISABLE=0
1104 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1105 	DCSURF_PRIMARY_SURFACE_ADDRESS
1106 	DCHUBP_CNTL:HUBP_BLANK_EN=0
1107 	*/
1108 
1109 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1110 		struct pipe_ctx *pipe_ctx =
1111 			&dc->current_state->res_ctx.pipe_ctx[i];
1112 		if (pipe_ctx != NULL) {
1113 			hubp = pipe_ctx->plane_res.hubp;
1114 			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1115 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1116 				hubp->funcs->set_hubp_blank_en(hubp, true);
1117 		}
1118 	}
1119 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1120 	hubbub1_soft_reset(dc->res_pool->hubbub, true);
1121 
1122 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1123 		struct pipe_ctx *pipe_ctx =
1124 			&dc->current_state->res_ctx.pipe_ctx[i];
1125 		if (pipe_ctx != NULL) {
1126 			hubp = pipe_ctx->plane_res.hubp;
1127 			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
1128 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1129 				hubp->funcs->hubp_disable_control(hubp, true);
1130 		}
1131 	}
1132 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1133 		struct pipe_ctx *pipe_ctx =
1134 			&dc->current_state->res_ctx.pipe_ctx[i];
1135 		if (pipe_ctx != NULL) {
1136 			hubp = pipe_ctx->plane_res.hubp;
1137 			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
1138 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1139 				hubp->funcs->hubp_disable_control(hubp, true);
1140 		}
1141 	}
1142 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1143 	hubbub1_soft_reset(dc->res_pool->hubbub, false);
1144 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1145 		struct pipe_ctx *pipe_ctx =
1146 			&dc->current_state->res_ctx.pipe_ctx[i];
1147 		if (pipe_ctx != NULL) {
1148 			hubp = pipe_ctx->plane_res.hubp;
1149 			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1150 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1151 				hubp->funcs->set_hubp_blank_en(hubp, true);
1152 		}
1153 	}
1154 	return true;
1155 
1156 }
1157 
1158 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1159 {
1160 	struct hubbub *hubbub = dc->res_pool->hubbub;
1161 	static bool should_log_hw_state; /* prevent hw state log by default */
1162 
1163 	if (!hubbub->funcs->verify_allow_pstate_change_high)
1164 		return;
1165 
1166 	if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1167 		int i = 0;
1168 
1169 		if (should_log_hw_state)
1170 			dcn10_log_hw_state(dc, NULL);
1171 
1172 		TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1173 		BREAK_TO_DEBUGGER();
1174 		if (dcn10_hw_wa_force_recovery(dc)) {
1175 			/*check again*/
1176 			if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1177 				BREAK_TO_DEBUGGER();
1178 		}
1179 	}
1180 }
1181 
1182 /* trigger HW to start disconnect plane from stream on the next vsync */
1183 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1184 {
1185 	struct dce_hwseq *hws = dc->hwseq;
1186 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1187 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
1188 	struct mpc *mpc = dc->res_pool->mpc;
1189 	struct mpc_tree *mpc_tree_params;
1190 	struct mpcc *mpcc_to_remove = NULL;
1191 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1192 
1193 	mpc_tree_params = &(opp->mpc_tree_params);
1194 	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1195 
1196 	/*Already reset*/
1197 	if (mpcc_to_remove == NULL)
1198 		return;
1199 
1200 	mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1201 	// Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1202 	// so don't wait for MPCC_IDLE in the programming sequence
1203 	if (opp != NULL && !pipe_ctx->plane_state->is_phantom)
1204 		opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1205 
1206 	dc->optimized_required = true;
1207 
1208 	if (hubp->funcs->hubp_disconnect)
1209 		hubp->funcs->hubp_disconnect(hubp);
1210 
1211 	if (dc->debug.sanity_checks)
1212 		hws->funcs.verify_allow_pstate_change_high(dc);
1213 }
1214 
1215 /**
1216  * dcn10_plane_atomic_power_down - Power down plane components.
1217  *
1218  * @dc: dc struct reference. used for grab hwseq.
1219  * @dpp: dpp struct reference.
1220  * @hubp: hubp struct reference.
1221  *
1222  * Keep in mind that this operation requires a power gate configuration;
1223  * however, requests for switch power gate are precisely controlled to avoid
1224  * problems. For this reason, power gate request is usually disabled. This
1225  * function first needs to enable the power gate request before disabling DPP
1226  * and HUBP. Finally, it disables the power gate request again.
1227  */
1228 void dcn10_plane_atomic_power_down(struct dc *dc,
1229 		struct dpp *dpp,
1230 		struct hubp *hubp)
1231 {
1232 	struct dce_hwseq *hws = dc->hwseq;
1233 	DC_LOGGER_INIT(dc->ctx->logger);
1234 
1235 	if (REG(DC_IP_REQUEST_CNTL)) {
1236 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1237 				IP_REQUEST_EN, 1);
1238 
1239 		if (hws->funcs.dpp_pg_control)
1240 			hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1241 
1242 		if (hws->funcs.hubp_pg_control)
1243 			hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1244 
1245 		dpp->funcs->dpp_reset(dpp);
1246 
1247 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1248 				IP_REQUEST_EN, 0);
1249 		DC_LOG_DEBUG(
1250 				"Power gated front end %d\n", hubp->inst);
1251 	}
1252 
1253 	if (hws->funcs.dpp_root_clock_control)
1254 		hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
1255 }
1256 
1257 /* disable HW used by plane.
1258  * note:  cannot disable until disconnect is complete
1259  */
1260 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1261 {
1262 	struct dce_hwseq *hws = dc->hwseq;
1263 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1264 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1265 	int opp_id = hubp->opp_id;
1266 
1267 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1268 
1269 	hubp->funcs->hubp_clk_cntl(hubp, false);
1270 
1271 	dpp->funcs->dpp_dppclk_control(dpp, false, false);
1272 
1273 	if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1274 		pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1275 				pipe_ctx->stream_res.opp,
1276 				false);
1277 
1278 	hubp->power_gated = true;
1279 	dc->optimized_required = false; /* We're powering off, no need to optimize */
1280 
1281 	hws->funcs.plane_atomic_power_down(dc,
1282 			pipe_ctx->plane_res.dpp,
1283 			pipe_ctx->plane_res.hubp);
1284 
1285 	pipe_ctx->stream = NULL;
1286 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1287 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1288 	pipe_ctx->top_pipe = NULL;
1289 	pipe_ctx->bottom_pipe = NULL;
1290 	pipe_ctx->plane_state = NULL;
1291 }
1292 
1293 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1294 {
1295 	struct dce_hwseq *hws = dc->hwseq;
1296 	DC_LOGGER_INIT(dc->ctx->logger);
1297 
1298 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1299 		return;
1300 
1301 	hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1302 
1303 	apply_DEGVIDCN10_253_wa(dc);
1304 
1305 	DC_LOG_DC("Power down front end %d\n",
1306 					pipe_ctx->pipe_idx);
1307 }
1308 
1309 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1310 {
1311 	int i;
1312 	struct dce_hwseq *hws = dc->hwseq;
1313 	struct hubbub *hubbub = dc->res_pool->hubbub;
1314 	bool can_apply_seamless_boot = false;
1315 
1316 	for (i = 0; i < context->stream_count; i++) {
1317 		if (context->streams[i]->apply_seamless_boot_optimization) {
1318 			can_apply_seamless_boot = true;
1319 			break;
1320 		}
1321 	}
1322 
1323 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1324 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1325 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1326 
1327 		/* There is assumption that pipe_ctx is not mapping irregularly
1328 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1329 		 * we will use the pipe, so don't disable
1330 		 */
1331 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1332 			continue;
1333 
1334 		/* Blank controller using driver code instead of
1335 		 * command table.
1336 		 */
1337 		if (tg->funcs->is_tg_enabled(tg)) {
1338 			if (hws->funcs.init_blank != NULL) {
1339 				hws->funcs.init_blank(dc, tg);
1340 				tg->funcs->lock(tg);
1341 			} else {
1342 				tg->funcs->lock(tg);
1343 				tg->funcs->set_blank(tg, true);
1344 				hwss_wait_for_blank_complete(tg);
1345 			}
1346 		}
1347 	}
1348 
1349 	/* Reset det size */
1350 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1351 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1352 		struct hubp *hubp = dc->res_pool->hubps[i];
1353 
1354 		/* Do not need to reset for seamless boot */
1355 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1356 			continue;
1357 
1358 		if (hubbub && hubp) {
1359 			if (hubbub->funcs->program_det_size)
1360 				hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1361 		}
1362 	}
1363 
1364 	/* num_opp will be equal to number of mpcc */
1365 	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1366 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1367 
1368 		/* Cannot reset the MPC mux if seamless boot */
1369 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1370 			continue;
1371 
1372 		dc->res_pool->mpc->funcs->mpc_init_single_inst(
1373 				dc->res_pool->mpc, i);
1374 	}
1375 
1376 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1377 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1378 		struct hubp *hubp = dc->res_pool->hubps[i];
1379 		struct dpp *dpp = dc->res_pool->dpps[i];
1380 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1381 
1382 		/* There is assumption that pipe_ctx is not mapping irregularly
1383 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1384 		 * we will use the pipe, so don't disable
1385 		 */
1386 		if (can_apply_seamless_boot &&
1387 			pipe_ctx->stream != NULL &&
1388 			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1389 				pipe_ctx->stream_res.tg)) {
1390 			// Enable double buffering for OTG_BLANK no matter if
1391 			// seamless boot is enabled or not to suppress global sync
1392 			// signals when OTG blanked. This is to prevent pipe from
1393 			// requesting data while in PSR.
1394 			tg->funcs->tg_init(tg);
1395 			hubp->power_gated = true;
1396 			continue;
1397 		}
1398 
1399 		/* Disable on the current state so the new one isn't cleared. */
1400 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1401 
1402 		dpp->funcs->dpp_reset(dpp);
1403 
1404 		pipe_ctx->stream_res.tg = tg;
1405 		pipe_ctx->pipe_idx = i;
1406 
1407 		pipe_ctx->plane_res.hubp = hubp;
1408 		pipe_ctx->plane_res.dpp = dpp;
1409 		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1410 		hubp->mpcc_id = dpp->inst;
1411 		hubp->opp_id = OPP_ID_INVALID;
1412 		hubp->power_gated = false;
1413 
1414 		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1415 		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1416 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1417 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1418 
1419 		hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1420 
1421 		if (tg->funcs->is_tg_enabled(tg))
1422 			tg->funcs->unlock(tg);
1423 
1424 		dc->hwss.disable_plane(dc, pipe_ctx);
1425 
1426 		pipe_ctx->stream_res.tg = NULL;
1427 		pipe_ctx->plane_res.hubp = NULL;
1428 
1429 		if (tg->funcs->is_tg_enabled(tg)) {
1430 			if (tg->funcs->init_odm)
1431 				tg->funcs->init_odm(tg);
1432 		}
1433 
1434 		tg->funcs->tg_init(tg);
1435 	}
1436 
1437 	/* Power gate DSCs */
1438 	if (hws->funcs.dsc_pg_control != NULL) {
1439 		uint32_t num_opps = 0;
1440 		uint32_t opp_id_src0 = OPP_ID_INVALID;
1441 		uint32_t opp_id_src1 = OPP_ID_INVALID;
1442 
1443 		// Step 1: To find out which OPTC is running & OPTC DSC is ON
1444 		// We can't use res_pool->res_cap->num_timing_generator to check
1445 		// Because it records display pipes default setting built in driver,
1446 		// not display pipes of the current chip.
1447 		// Some ASICs would be fused display pipes less than the default setting.
1448 		// In dcnxx_resource_construct function, driver would obatin real information.
1449 		for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1450 			uint32_t optc_dsc_state = 0;
1451 			struct timing_generator *tg = dc->res_pool->timing_generators[i];
1452 
1453 			if (tg->funcs->is_tg_enabled(tg)) {
1454 				if (tg->funcs->get_dsc_status)
1455 					tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1456 				// Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1457 				// non-zero value is DSC enabled
1458 				if (optc_dsc_state != 0) {
1459 					tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1460 					break;
1461 				}
1462 			}
1463 		}
1464 
1465 		// Step 2: To power down DSC but skip DSC  of running OPTC
1466 		for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1467 			struct dcn_dsc_state s  = {0};
1468 
1469 			dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1470 
1471 			if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1472 				s.dsc_clock_en && s.dsc_fw_en)
1473 				continue;
1474 
1475 			hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1476 		}
1477 	}
1478 }
1479 
1480 void dcn10_init_hw(struct dc *dc)
1481 {
1482 	int i;
1483 	struct abm *abm = dc->res_pool->abm;
1484 	struct dmcu *dmcu = dc->res_pool->dmcu;
1485 	struct dce_hwseq *hws = dc->hwseq;
1486 	struct dc_bios *dcb = dc->ctx->dc_bios;
1487 	struct resource_pool *res_pool = dc->res_pool;
1488 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1489 	bool   is_optimized_init_done = false;
1490 
1491 	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1492 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1493 
1494 	/* Align bw context with hw config when system resume. */
1495 	if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1496 		dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1497 		dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1498 	}
1499 
1500 	// Initialize the dccg
1501 	if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1502 		dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1503 
1504 	if (!dcb->funcs->is_accelerated_mode(dcb))
1505 		hws->funcs.disable_vga(dc->hwseq);
1506 
1507 	if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv))
1508 		hws->funcs.bios_golden_init(dc);
1509 
1510 
1511 	if (dc->ctx->dc_bios->fw_info_valid) {
1512 		res_pool->ref_clocks.xtalin_clock_inKhz =
1513 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1514 
1515 		if (res_pool->dccg && res_pool->hubbub) {
1516 
1517 			(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1518 					dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1519 					&res_pool->ref_clocks.dccg_ref_clock_inKhz);
1520 
1521 			(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1522 					res_pool->ref_clocks.dccg_ref_clock_inKhz,
1523 					&res_pool->ref_clocks.dchub_ref_clock_inKhz);
1524 		} else {
1525 			// Not all ASICs have DCCG sw component
1526 			res_pool->ref_clocks.dccg_ref_clock_inKhz =
1527 					res_pool->ref_clocks.xtalin_clock_inKhz;
1528 			res_pool->ref_clocks.dchub_ref_clock_inKhz =
1529 					res_pool->ref_clocks.xtalin_clock_inKhz;
1530 		}
1531 	} else
1532 		ASSERT_CRITICAL(false);
1533 
1534 	for (i = 0; i < dc->link_count; i++) {
1535 		/* Power up AND update implementation according to the
1536 		 * required signal (which may be different from the
1537 		 * default signal on connector).
1538 		 */
1539 		struct dc_link *link = dc->links[i];
1540 
1541 		if (!is_optimized_init_done)
1542 			link->link_enc->funcs->hw_init(link->link_enc);
1543 
1544 		/* Check for enabled DIG to identify enabled display */
1545 		if (link->link_enc->funcs->is_dig_enabled &&
1546 			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1547 			link->link_status.link_active = true;
1548 			if (link->link_enc->funcs->fec_is_active &&
1549 					link->link_enc->funcs->fec_is_active(link->link_enc))
1550 				link->fec_state = dc_link_fec_enabled;
1551 		}
1552 	}
1553 
1554 	/* we want to turn off all dp displays before doing detection */
1555 	dc->link_srv->blank_all_dp_displays(dc);
1556 
1557 	if (hws->funcs.enable_power_gating_plane)
1558 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1559 
1560 	/* If taking control over from VBIOS, we may want to optimize our first
1561 	 * mode set, so we need to skip powering down pipes until we know which
1562 	 * pipes we want to use.
1563 	 * Otherwise, if taking control is not possible, we need to power
1564 	 * everything down.
1565 	 */
1566 	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1567 		if (!is_optimized_init_done) {
1568 			hws->funcs.init_pipes(dc, dc->current_state);
1569 			if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1570 				dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1571 						!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1572 		}
1573 	}
1574 
1575 	if (!is_optimized_init_done) {
1576 
1577 		for (i = 0; i < res_pool->audio_count; i++) {
1578 			struct audio *audio = res_pool->audios[i];
1579 
1580 			audio->funcs->hw_init(audio);
1581 		}
1582 
1583 		for (i = 0; i < dc->link_count; i++) {
1584 			struct dc_link *link = dc->links[i];
1585 
1586 			if (link->panel_cntl)
1587 				backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1588 		}
1589 
1590 		if (abm != NULL)
1591 			abm->funcs->abm_init(abm, backlight);
1592 
1593 		if (dmcu != NULL && !dmcu->auto_load_dmcu)
1594 			dmcu->funcs->dmcu_init(dmcu);
1595 	}
1596 
1597 	if (abm != NULL && dmcu != NULL)
1598 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1599 
1600 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1601 	if (!is_optimized_init_done)
1602 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1603 
1604 	if (!dc->debug.disable_clock_gate) {
1605 		/* enable all DCN clock gating */
1606 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1607 
1608 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1609 
1610 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1611 	}
1612 
1613 	if (dc->clk_mgr->funcs->notify_wm_ranges)
1614 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1615 }
1616 
1617 /* In headless boot cases, DIG may be turned
1618  * on which causes HW/SW discrepancies.
1619  * To avoid this, power down hardware on boot
1620  * if DIG is turned on
1621  */
1622 void dcn10_power_down_on_boot(struct dc *dc)
1623 {
1624 	struct dc_link *edp_links[MAX_NUM_EDP];
1625 	struct dc_link *edp_link = NULL;
1626 	int edp_num;
1627 	int i = 0;
1628 
1629 	dc_get_edp_links(dc, edp_links, &edp_num);
1630 	if (edp_num)
1631 		edp_link = edp_links[0];
1632 
1633 	if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1634 			edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1635 			dc->hwseq->funcs.edp_backlight_control &&
1636 			dc->hwss.power_down &&
1637 			dc->hwss.edp_power_control) {
1638 		dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1639 		dc->hwss.power_down(dc);
1640 		dc->hwss.edp_power_control(edp_link, false);
1641 	} else {
1642 		for (i = 0; i < dc->link_count; i++) {
1643 			struct dc_link *link = dc->links[i];
1644 
1645 			if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1646 					link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1647 					dc->hwss.power_down) {
1648 				dc->hwss.power_down(dc);
1649 				break;
1650 			}
1651 
1652 		}
1653 	}
1654 
1655 	/*
1656 	 * Call update_clocks with empty context
1657 	 * to send DISPLAY_OFF
1658 	 * Otherwise DISPLAY_OFF may not be asserted
1659 	 */
1660 	if (dc->clk_mgr->funcs->set_low_power_state)
1661 		dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1662 }
1663 
1664 void dcn10_reset_hw_ctx_wrap(
1665 		struct dc *dc,
1666 		struct dc_state *context)
1667 {
1668 	int i;
1669 	struct dce_hwseq *hws = dc->hwseq;
1670 
1671 	/* Reset Back End*/
1672 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1673 		struct pipe_ctx *pipe_ctx_old =
1674 			&dc->current_state->res_ctx.pipe_ctx[i];
1675 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1676 
1677 		if (!pipe_ctx_old->stream)
1678 			continue;
1679 
1680 		if (pipe_ctx_old->top_pipe)
1681 			continue;
1682 
1683 		if (!pipe_ctx->stream ||
1684 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1685 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1686 
1687 			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1688 			if (hws->funcs.enable_stream_gating)
1689 				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1690 			if (old_clk)
1691 				old_clk->funcs->cs_power_down(old_clk);
1692 		}
1693 	}
1694 }
1695 
1696 static bool patch_address_for_sbs_tb_stereo(
1697 		struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1698 {
1699 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1700 	bool sec_split = pipe_ctx->top_pipe &&
1701 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1702 	if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1703 		(pipe_ctx->stream->timing.timing_3d_format ==
1704 		 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1705 		 pipe_ctx->stream->timing.timing_3d_format ==
1706 		 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1707 		*addr = plane_state->address.grph_stereo.left_addr;
1708 		plane_state->address.grph_stereo.left_addr =
1709 		plane_state->address.grph_stereo.right_addr;
1710 		return true;
1711 	} else {
1712 		if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1713 			plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1714 			plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1715 			plane_state->address.grph_stereo.right_addr =
1716 			plane_state->address.grph_stereo.left_addr;
1717 			plane_state->address.grph_stereo.right_meta_addr =
1718 			plane_state->address.grph_stereo.left_meta_addr;
1719 		}
1720 	}
1721 	return false;
1722 }
1723 
1724 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1725 {
1726 	bool addr_patched = false;
1727 	PHYSICAL_ADDRESS_LOC addr;
1728 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1729 
1730 	if (plane_state == NULL)
1731 		return;
1732 
1733 	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1734 
1735 	pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1736 			pipe_ctx->plane_res.hubp,
1737 			&plane_state->address,
1738 			plane_state->flip_immediate);
1739 
1740 	plane_state->status.requested_address = plane_state->address;
1741 
1742 	if (plane_state->flip_immediate)
1743 		plane_state->status.current_address = plane_state->address;
1744 
1745 	if (addr_patched)
1746 		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1747 }
1748 
1749 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1750 			const struct dc_plane_state *plane_state)
1751 {
1752 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1753 	const struct dc_transfer_func *tf = NULL;
1754 	bool result = true;
1755 
1756 	if (dpp_base == NULL)
1757 		return false;
1758 
1759 	if (plane_state->in_transfer_func)
1760 		tf = plane_state->in_transfer_func;
1761 
1762 	if (plane_state->gamma_correction &&
1763 		!dpp_base->ctx->dc->debug.always_use_regamma
1764 		&& !plane_state->gamma_correction->is_identity
1765 			&& dce_use_lut(plane_state->format))
1766 		dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1767 
1768 	if (tf == NULL)
1769 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1770 	else if (tf->type == TF_TYPE_PREDEFINED) {
1771 		switch (tf->tf) {
1772 		case TRANSFER_FUNCTION_SRGB:
1773 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1774 			break;
1775 		case TRANSFER_FUNCTION_BT709:
1776 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1777 			break;
1778 		case TRANSFER_FUNCTION_LINEAR:
1779 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1780 			break;
1781 		case TRANSFER_FUNCTION_PQ:
1782 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1783 			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1784 			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1785 			result = true;
1786 			break;
1787 		default:
1788 			result = false;
1789 			break;
1790 		}
1791 	} else if (tf->type == TF_TYPE_BYPASS) {
1792 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1793 	} else {
1794 		cm_helper_translate_curve_to_degamma_hw_format(tf,
1795 					&dpp_base->degamma_params);
1796 		dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1797 				&dpp_base->degamma_params);
1798 		result = true;
1799 	}
1800 
1801 	return result;
1802 }
1803 
1804 #define MAX_NUM_HW_POINTS 0x200
1805 
1806 static void log_tf(struct dc_context *ctx,
1807 				struct dc_transfer_func *tf, uint32_t hw_points_num)
1808 {
1809 	// DC_LOG_GAMMA is default logging of all hw points
1810 	// DC_LOG_ALL_GAMMA logs all points, not only hw points
1811 	// DC_LOG_ALL_TF_POINTS logs all channels of the tf
1812 	int i = 0;
1813 
1814 	DC_LOG_GAMMA("Gamma Correction TF");
1815 	DC_LOG_ALL_GAMMA("Logging all tf points...");
1816 	DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1817 
1818 	for (i = 0; i < hw_points_num; i++) {
1819 		DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1820 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1821 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1822 	}
1823 
1824 	for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1825 		DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1826 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1827 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1828 	}
1829 }
1830 
1831 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1832 				const struct dc_stream_state *stream)
1833 {
1834 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1835 
1836 	if (dpp == NULL)
1837 		return false;
1838 
1839 	dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1840 
1841 	if (stream->out_transfer_func &&
1842 	    stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1843 	    stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1844 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1845 
1846 	/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1847 	 * update.
1848 	 */
1849 	else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
1850 			stream->out_transfer_func,
1851 			&dpp->regamma_params, false)) {
1852 		dpp->funcs->dpp_program_regamma_pwl(
1853 				dpp,
1854 				&dpp->regamma_params, OPP_REGAMMA_USER);
1855 	} else
1856 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1857 
1858 	if (stream != NULL && stream->ctx != NULL &&
1859 			stream->out_transfer_func != NULL) {
1860 		log_tf(stream->ctx,
1861 				stream->out_transfer_func,
1862 				dpp->regamma_params.hw_points_num);
1863 	}
1864 
1865 	return true;
1866 }
1867 
1868 void dcn10_pipe_control_lock(
1869 	struct dc *dc,
1870 	struct pipe_ctx *pipe,
1871 	bool lock)
1872 {
1873 	struct dce_hwseq *hws = dc->hwseq;
1874 
1875 	/* use TG master update lock to lock everything on the TG
1876 	 * therefore only top pipe need to lock
1877 	 */
1878 	if (!pipe || pipe->top_pipe)
1879 		return;
1880 
1881 	if (dc->debug.sanity_checks)
1882 		hws->funcs.verify_allow_pstate_change_high(dc);
1883 
1884 	if (lock)
1885 		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1886 	else
1887 		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1888 
1889 	if (dc->debug.sanity_checks)
1890 		hws->funcs.verify_allow_pstate_change_high(dc);
1891 }
1892 
1893 /**
1894  * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1895  *
1896  * Software keepout workaround to prevent cursor update locking from stalling
1897  * out cursor updates indefinitely or from old values from being retained in
1898  * the case where the viewport changes in the same frame as the cursor.
1899  *
1900  * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1901  * too close to VUPDATE, then stall out until VUPDATE finishes.
1902  *
1903  * TODO: Optimize cursor programming to be once per frame before VUPDATE
1904  *       to avoid the need for this workaround.
1905  *
1906  * @dc: Current DC state
1907  * @pipe_ctx: Pipe_ctx pointer for delayed cursor update
1908  *
1909  * Return: void
1910  */
1911 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1912 {
1913 	struct dc_stream_state *stream = pipe_ctx->stream;
1914 	struct crtc_position position;
1915 	uint32_t vupdate_start, vupdate_end;
1916 	unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1917 	unsigned int us_per_line, us_vupdate;
1918 
1919 	if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1920 		return;
1921 
1922 	if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1923 		return;
1924 
1925 	dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1926 				       &vupdate_end);
1927 
1928 	dc->hwss.get_position(&pipe_ctx, 1, &position);
1929 	vpos = position.vertical_count;
1930 
1931 	/* Avoid wraparound calculation issues */
1932 	vupdate_start += stream->timing.v_total;
1933 	vupdate_end += stream->timing.v_total;
1934 	vpos += stream->timing.v_total;
1935 
1936 	if (vpos <= vupdate_start) {
1937 		/* VPOS is in VACTIVE or back porch. */
1938 		lines_to_vupdate = vupdate_start - vpos;
1939 	} else if (vpos > vupdate_end) {
1940 		/* VPOS is in the front porch. */
1941 		return;
1942 	} else {
1943 		/* VPOS is in VUPDATE. */
1944 		lines_to_vupdate = 0;
1945 	}
1946 
1947 	/* Calculate time until VUPDATE in microseconds. */
1948 	us_per_line =
1949 		stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1950 	us_to_vupdate = lines_to_vupdate * us_per_line;
1951 
1952 	/* 70 us is a conservative estimate of cursor update time*/
1953 	if (us_to_vupdate > 70)
1954 		return;
1955 
1956 	/* Stall out until the cursor update completes. */
1957 	if (vupdate_end < vupdate_start)
1958 		vupdate_end += stream->timing.v_total;
1959 	us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1960 	udelay(us_to_vupdate + us_vupdate);
1961 }
1962 
1963 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1964 {
1965 	/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1966 	if (!pipe || pipe->top_pipe)
1967 		return;
1968 
1969 	/* Prevent cursor lock from stalling out cursor updates. */
1970 	if (lock)
1971 		delay_cursor_until_vupdate(dc, pipe);
1972 
1973 	if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1974 		union dmub_hw_lock_flags hw_locks = { 0 };
1975 		struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1976 
1977 		hw_locks.bits.lock_cursor = 1;
1978 		inst_flags.opp_inst = pipe->stream_res.opp->inst;
1979 
1980 		dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1981 					lock,
1982 					&hw_locks,
1983 					&inst_flags);
1984 	} else
1985 		dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1986 				pipe->stream_res.opp->inst, lock);
1987 }
1988 
1989 static bool wait_for_reset_trigger_to_occur(
1990 	struct dc_context *dc_ctx,
1991 	struct timing_generator *tg)
1992 {
1993 	bool rc = false;
1994 
1995 	DC_LOGGER_INIT(dc_ctx->logger);
1996 
1997 	/* To avoid endless loop we wait at most
1998 	 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1999 	const uint32_t frames_to_wait_on_triggered_reset = 10;
2000 	int i;
2001 
2002 	for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
2003 
2004 		if (!tg->funcs->is_counter_moving(tg)) {
2005 			DC_ERROR("TG counter is not moving!\n");
2006 			break;
2007 		}
2008 
2009 		if (tg->funcs->did_triggered_reset_occur(tg)) {
2010 			rc = true;
2011 			/* usually occurs at i=1 */
2012 			DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
2013 					i);
2014 			break;
2015 		}
2016 
2017 		/* Wait for one frame. */
2018 		tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
2019 		tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
2020 	}
2021 
2022 	if (false == rc)
2023 		DC_ERROR("GSL: Timeout on reset trigger!\n");
2024 
2025 	return rc;
2026 }
2027 
2028 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
2029 				      uint64_t *denominator,
2030 				      bool checkUint32Bounary)
2031 {
2032 	int i;
2033 	bool ret = checkUint32Bounary == false;
2034 	uint64_t max_int32 = 0xffffffff;
2035 	uint64_t num, denom;
2036 	static const uint16_t prime_numbers[] = {
2037 		2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2038 		47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2039 		107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2040 		167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2041 		229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2042 		283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2043 		359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2044 		431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2045 		491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2046 		571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2047 		641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2048 		709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2049 		787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2050 		859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2051 		941, 947, 953, 967, 971, 977, 983, 991, 997};
2052 	int count = ARRAY_SIZE(prime_numbers);
2053 
2054 	num = *numerator;
2055 	denom = *denominator;
2056 	for (i = 0; i < count; i++) {
2057 		uint32_t num_remainder, denom_remainder;
2058 		uint64_t num_result, denom_result;
2059 		if (checkUint32Bounary &&
2060 			num <= max_int32 && denom <= max_int32) {
2061 			ret = true;
2062 			break;
2063 		}
2064 		do {
2065 			num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2066 			denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2067 			if (num_remainder == 0 && denom_remainder == 0) {
2068 				num = num_result;
2069 				denom = denom_result;
2070 			}
2071 		} while (num_remainder == 0 && denom_remainder == 0);
2072 	}
2073 	*numerator = num;
2074 	*denominator = denom;
2075 	return ret;
2076 }
2077 
2078 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2079 {
2080 	uint32_t master_pipe_refresh_rate =
2081 		pipe->stream->timing.pix_clk_100hz * 100 /
2082 		pipe->stream->timing.h_total /
2083 		pipe->stream->timing.v_total;
2084 	return master_pipe_refresh_rate <= 30;
2085 }
2086 
2087 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2088 				 bool account_low_refresh_rate)
2089 {
2090 	uint32_t clock_divider = 1;
2091 	uint32_t numpipes = 1;
2092 
2093 	if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2094 		clock_divider *= 2;
2095 
2096 	if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2097 		clock_divider *= 2;
2098 
2099 	while (pipe->next_odm_pipe) {
2100 		pipe = pipe->next_odm_pipe;
2101 		numpipes++;
2102 	}
2103 	clock_divider *= numpipes;
2104 
2105 	return clock_divider;
2106 }
2107 
2108 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2109 				    struct pipe_ctx *grouped_pipes[])
2110 {
2111 	struct dc_context *dc_ctx = dc->ctx;
2112 	int i, master = -1, embedded = -1;
2113 	struct dc_crtc_timing *hw_crtc_timing;
2114 	uint64_t phase[MAX_PIPES];
2115 	uint64_t modulo[MAX_PIPES];
2116 	unsigned int pclk;
2117 
2118 	uint32_t embedded_pix_clk_100hz;
2119 	uint16_t embedded_h_total;
2120 	uint16_t embedded_v_total;
2121 	uint32_t dp_ref_clk_100hz =
2122 		dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2123 
2124 	DC_LOGGER_INIT(dc_ctx->logger);
2125 
2126 	hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
2127 	if (!hw_crtc_timing)
2128 		return master;
2129 
2130 	if (dc->config.vblank_alignment_dto_params &&
2131 		dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2132 		embedded_h_total =
2133 			(dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2134 		embedded_v_total =
2135 			(dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2136 		embedded_pix_clk_100hz =
2137 			dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2138 
2139 		for (i = 0; i < group_size; i++) {
2140 			grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2141 					grouped_pipes[i]->stream_res.tg,
2142 					&hw_crtc_timing[i]);
2143 			dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2144 				dc->res_pool->dp_clock_source,
2145 				grouped_pipes[i]->stream_res.tg->inst,
2146 				&pclk);
2147 			hw_crtc_timing[i].pix_clk_100hz = pclk;
2148 			if (dc_is_embedded_signal(
2149 					grouped_pipes[i]->stream->signal)) {
2150 				embedded = i;
2151 				master = i;
2152 				phase[i] = embedded_pix_clk_100hz*100;
2153 				modulo[i] = dp_ref_clk_100hz*100;
2154 			} else {
2155 
2156 				phase[i] = (uint64_t)embedded_pix_clk_100hz*
2157 					hw_crtc_timing[i].h_total*
2158 					hw_crtc_timing[i].v_total;
2159 				phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2160 				modulo[i] = (uint64_t)dp_ref_clk_100hz*
2161 					embedded_h_total*
2162 					embedded_v_total;
2163 
2164 				if (reduceSizeAndFraction(&phase[i],
2165 						&modulo[i], true) == false) {
2166 					/*
2167 					 * this will help to stop reporting
2168 					 * this timing synchronizable
2169 					 */
2170 					DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2171 					grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2172 				}
2173 			}
2174 		}
2175 
2176 		for (i = 0; i < group_size; i++) {
2177 			if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2178 				dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2179 					dc->res_pool->dp_clock_source,
2180 					grouped_pipes[i]->stream_res.tg->inst,
2181 					phase[i], modulo[i]);
2182 				dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2183 					dc->res_pool->dp_clock_source,
2184 					grouped_pipes[i]->stream_res.tg->inst, &pclk);
2185 				grouped_pipes[i]->stream->timing.pix_clk_100hz =
2186 					pclk*get_clock_divider(grouped_pipes[i], false);
2187 				if (master == -1)
2188 					master = i;
2189 			}
2190 		}
2191 
2192 	}
2193 
2194 	kfree(hw_crtc_timing);
2195 	return master;
2196 }
2197 
2198 void dcn10_enable_vblanks_synchronization(
2199 	struct dc *dc,
2200 	int group_index,
2201 	int group_size,
2202 	struct pipe_ctx *grouped_pipes[])
2203 {
2204 	struct dc_context *dc_ctx = dc->ctx;
2205 	struct output_pixel_processor *opp;
2206 	struct timing_generator *tg;
2207 	int i, width, height, master;
2208 
2209 	DC_LOGGER_INIT(dc_ctx->logger);
2210 
2211 	for (i = 1; i < group_size; i++) {
2212 		opp = grouped_pipes[i]->stream_res.opp;
2213 		tg = grouped_pipes[i]->stream_res.tg;
2214 		tg->funcs->get_otg_active_size(tg, &width, &height);
2215 
2216 		if (!tg->funcs->is_tg_enabled(tg)) {
2217 			DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2218 			return;
2219 		}
2220 
2221 		if (opp->funcs->opp_program_dpg_dimensions)
2222 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2223 	}
2224 
2225 	for (i = 0; i < group_size; i++) {
2226 		if (grouped_pipes[i]->stream == NULL)
2227 			continue;
2228 		grouped_pipes[i]->stream->vblank_synchronized = false;
2229 		grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2230 	}
2231 
2232 	DC_SYNC_INFO("Aligning DP DTOs\n");
2233 
2234 	master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2235 
2236 	DC_SYNC_INFO("Synchronizing VBlanks\n");
2237 
2238 	if (master >= 0) {
2239 		for (i = 0; i < group_size; i++) {
2240 			if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2241 				grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2242 					grouped_pipes[master]->stream_res.tg,
2243 					grouped_pipes[i]->stream_res.tg,
2244 					grouped_pipes[master]->stream->timing.pix_clk_100hz,
2245 					grouped_pipes[i]->stream->timing.pix_clk_100hz,
2246 					get_clock_divider(grouped_pipes[master], false),
2247 					get_clock_divider(grouped_pipes[i], false));
2248 			grouped_pipes[i]->stream->vblank_synchronized = true;
2249 		}
2250 		grouped_pipes[master]->stream->vblank_synchronized = true;
2251 		DC_SYNC_INFO("Sync complete\n");
2252 	}
2253 
2254 	for (i = 1; i < group_size; i++) {
2255 		opp = grouped_pipes[i]->stream_res.opp;
2256 		tg = grouped_pipes[i]->stream_res.tg;
2257 		tg->funcs->get_otg_active_size(tg, &width, &height);
2258 		if (opp->funcs->opp_program_dpg_dimensions)
2259 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2260 	}
2261 }
2262 
2263 void dcn10_enable_timing_synchronization(
2264 	struct dc *dc,
2265 	int group_index,
2266 	int group_size,
2267 	struct pipe_ctx *grouped_pipes[])
2268 {
2269 	struct dc_context *dc_ctx = dc->ctx;
2270 	struct output_pixel_processor *opp;
2271 	struct timing_generator *tg;
2272 	int i, width, height;
2273 
2274 	DC_LOGGER_INIT(dc_ctx->logger);
2275 
2276 	DC_SYNC_INFO("Setting up OTG reset trigger\n");
2277 
2278 	for (i = 1; i < group_size; i++) {
2279 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2280 			continue;
2281 
2282 		opp = grouped_pipes[i]->stream_res.opp;
2283 		tg = grouped_pipes[i]->stream_res.tg;
2284 		tg->funcs->get_otg_active_size(tg, &width, &height);
2285 
2286 		if (!tg->funcs->is_tg_enabled(tg)) {
2287 			DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2288 			return;
2289 		}
2290 
2291 		if (opp->funcs->opp_program_dpg_dimensions)
2292 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2293 	}
2294 
2295 	for (i = 0; i < group_size; i++) {
2296 		if (grouped_pipes[i]->stream == NULL)
2297 			continue;
2298 
2299 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2300 			continue;
2301 
2302 		grouped_pipes[i]->stream->vblank_synchronized = false;
2303 	}
2304 
2305 	for (i = 1; i < group_size; i++) {
2306 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2307 			continue;
2308 
2309 		grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2310 				grouped_pipes[i]->stream_res.tg,
2311 				grouped_pipes[0]->stream_res.tg->inst);
2312 	}
2313 
2314 	DC_SYNC_INFO("Waiting for trigger\n");
2315 
2316 	/* Need to get only check 1 pipe for having reset as all the others are
2317 	 * synchronized. Look at last pipe programmed to reset.
2318 	 */
2319 
2320 	if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
2321 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2322 
2323 	for (i = 1; i < group_size; i++) {
2324 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2325 			continue;
2326 
2327 		grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2328 				grouped_pipes[i]->stream_res.tg);
2329 	}
2330 
2331 	for (i = 1; i < group_size; i++) {
2332 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2333 			continue;
2334 
2335 		opp = grouped_pipes[i]->stream_res.opp;
2336 		tg = grouped_pipes[i]->stream_res.tg;
2337 		tg->funcs->get_otg_active_size(tg, &width, &height);
2338 		if (opp->funcs->opp_program_dpg_dimensions)
2339 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2340 	}
2341 
2342 	DC_SYNC_INFO("Sync complete\n");
2343 }
2344 
2345 void dcn10_enable_per_frame_crtc_position_reset(
2346 	struct dc *dc,
2347 	int group_size,
2348 	struct pipe_ctx *grouped_pipes[])
2349 {
2350 	struct dc_context *dc_ctx = dc->ctx;
2351 	int i;
2352 
2353 	DC_LOGGER_INIT(dc_ctx->logger);
2354 
2355 	DC_SYNC_INFO("Setting up\n");
2356 	for (i = 0; i < group_size; i++)
2357 		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2358 			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2359 					grouped_pipes[i]->stream_res.tg,
2360 					0,
2361 					&grouped_pipes[i]->stream->triggered_crtc_reset);
2362 
2363 	DC_SYNC_INFO("Waiting for trigger\n");
2364 
2365 	for (i = 0; i < group_size; i++)
2366 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2367 
2368 	DC_SYNC_INFO("Multi-display sync is complete\n");
2369 }
2370 
2371 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2372 		struct vm_system_aperture_param *apt,
2373 		struct dce_hwseq *hws)
2374 {
2375 	PHYSICAL_ADDRESS_LOC physical_page_number;
2376 	uint32_t logical_addr_low;
2377 	uint32_t logical_addr_high;
2378 
2379 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2380 			PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2381 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2382 			PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2383 
2384 	REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2385 			LOGICAL_ADDR, &logical_addr_low);
2386 
2387 	REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2388 			LOGICAL_ADDR, &logical_addr_high);
2389 
2390 	apt->sys_default.quad_part =  physical_page_number.quad_part << 12;
2391 	apt->sys_low.quad_part =  (int64_t)logical_addr_low << 18;
2392 	apt->sys_high.quad_part =  (int64_t)logical_addr_high << 18;
2393 }
2394 
2395 /* Temporary read settings, future will get values from kmd directly */
2396 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2397 		struct vm_context0_param *vm0,
2398 		struct dce_hwseq *hws)
2399 {
2400 	PHYSICAL_ADDRESS_LOC fb_base;
2401 	PHYSICAL_ADDRESS_LOC fb_offset;
2402 	uint32_t fb_base_value;
2403 	uint32_t fb_offset_value;
2404 
2405 	REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2406 	REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2407 
2408 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2409 			PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2410 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2411 			PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2412 
2413 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2414 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2415 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2416 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2417 
2418 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2419 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2420 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2421 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2422 
2423 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2424 			PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2425 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2426 			PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2427 
2428 	/*
2429 	 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2430 	 * Therefore we need to do
2431 	 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2432 	 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2433 	 */
2434 	fb_base.quad_part = (uint64_t)fb_base_value << 24;
2435 	fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2436 	vm0->pte_base.quad_part += fb_base.quad_part;
2437 	vm0->pte_base.quad_part -= fb_offset.quad_part;
2438 }
2439 
2440 
2441 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2442 {
2443 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2444 	struct vm_system_aperture_param apt = {0};
2445 	struct vm_context0_param vm0 = {0};
2446 
2447 	mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2448 	mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2449 
2450 	hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2451 	hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2452 }
2453 
2454 static void dcn10_enable_plane(
2455 	struct dc *dc,
2456 	struct pipe_ctx *pipe_ctx,
2457 	struct dc_state *context)
2458 {
2459 	struct dce_hwseq *hws = dc->hwseq;
2460 
2461 	if (dc->debug.sanity_checks) {
2462 		hws->funcs.verify_allow_pstate_change_high(dc);
2463 	}
2464 
2465 	undo_DEGVIDCN10_253_wa(dc);
2466 
2467 	power_on_plane_resources(dc->hwseq,
2468 		pipe_ctx->plane_res.hubp->inst);
2469 
2470 	/* enable DCFCLK current DCHUB */
2471 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2472 
2473 	/* make sure OPP_PIPE_CLOCK_EN = 1 */
2474 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2475 			pipe_ctx->stream_res.opp,
2476 			true);
2477 
2478 	if (dc->config.gpu_vm_support)
2479 		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2480 
2481 	if (dc->debug.sanity_checks) {
2482 		hws->funcs.verify_allow_pstate_change_high(dc);
2483 	}
2484 
2485 	if (!pipe_ctx->top_pipe
2486 		&& pipe_ctx->plane_state
2487 		&& pipe_ctx->plane_state->flip_int_enabled
2488 		&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2489 			pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2490 
2491 }
2492 
2493 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2494 {
2495 	int i = 0;
2496 	struct dpp_grph_csc_adjustment adjust;
2497 	memset(&adjust, 0, sizeof(adjust));
2498 	adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2499 
2500 
2501 	if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2502 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2503 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2504 			adjust.temperature_matrix[i] =
2505 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2506 	} else if (pipe_ctx->plane_state &&
2507 		   pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2508 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2509 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2510 			adjust.temperature_matrix[i] =
2511 				pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2512 	}
2513 
2514 	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2515 }
2516 
2517 
2518 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2519 {
2520 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2521 		if (pipe_ctx->top_pipe) {
2522 			struct pipe_ctx *top = pipe_ctx->top_pipe;
2523 
2524 			while (top->top_pipe)
2525 				top = top->top_pipe; // Traverse to top pipe_ctx
2526 			if (top->plane_state && top->plane_state->layer_index == 0)
2527 				return true; // Front MPO plane not hidden
2528 		}
2529 	}
2530 	return false;
2531 }
2532 
2533 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2534 {
2535 	// Override rear plane RGB bias to fix MPO brightness
2536 	uint16_t rgb_bias = matrix[3];
2537 
2538 	matrix[3] = 0;
2539 	matrix[7] = 0;
2540 	matrix[11] = 0;
2541 	pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2542 	matrix[3] = rgb_bias;
2543 	matrix[7] = rgb_bias;
2544 	matrix[11] = rgb_bias;
2545 }
2546 
2547 void dcn10_program_output_csc(struct dc *dc,
2548 		struct pipe_ctx *pipe_ctx,
2549 		enum dc_color_space colorspace,
2550 		uint16_t *matrix,
2551 		int opp_id)
2552 {
2553 	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2554 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2555 
2556 			/* MPO is broken with RGB colorspaces when OCSC matrix
2557 			 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2558 			 * Blending adds offsets from front + rear to rear plane
2559 			 *
2560 			 * Fix is to set RGB bias to 0 on rear plane, top plane
2561 			 * black value pixels add offset instead of rear + front
2562 			 */
2563 
2564 			int16_t rgb_bias = matrix[3];
2565 			// matrix[3/7/11] are all the same offset value
2566 
2567 			if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2568 				dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2569 			} else {
2570 				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2571 			}
2572 		}
2573 	} else {
2574 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2575 			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2576 	}
2577 }
2578 
2579 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2580 {
2581 	struct dc_bias_and_scale bns_params = {0};
2582 
2583 	// program the input csc
2584 	dpp->funcs->dpp_setup(dpp,
2585 			plane_state->format,
2586 			EXPANSION_MODE_ZERO,
2587 			plane_state->input_csc_color_matrix,
2588 			plane_state->color_space,
2589 			NULL);
2590 
2591 	//set scale and bias registers
2592 	build_prescale_params(&bns_params, plane_state);
2593 	if (dpp->funcs->dpp_program_bias_and_scale)
2594 		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2595 }
2596 
2597 void dcn10_update_visual_confirm_color(struct dc *dc,
2598 		struct pipe_ctx *pipe_ctx,
2599 		int mpcc_id)
2600 {
2601 	struct mpc *mpc = dc->res_pool->mpc;
2602 
2603 	if (mpc->funcs->set_bg_color) {
2604 		memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color));
2605 		mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id);
2606 	}
2607 }
2608 
2609 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2610 {
2611 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2612 	struct mpcc_blnd_cfg blnd_cfg = {0};
2613 	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2614 	int mpcc_id;
2615 	struct mpcc *new_mpcc;
2616 	struct mpc *mpc = dc->res_pool->mpc;
2617 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2618 
2619 	blnd_cfg.overlap_only = false;
2620 	blnd_cfg.global_gain = 0xff;
2621 
2622 	if (per_pixel_alpha) {
2623 		/* DCN1.0 has output CM before MPC which seems to screw with
2624 		 * pre-multiplied alpha.
2625 		 */
2626 		blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2627 				pipe_ctx->stream->output_color_space)
2628 						&& pipe_ctx->plane_state->pre_multiplied_alpha);
2629 		if (pipe_ctx->plane_state->global_alpha) {
2630 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2631 			blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2632 		} else {
2633 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2634 		}
2635 	} else {
2636 		blnd_cfg.pre_multiplied_alpha = false;
2637 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2638 	}
2639 
2640 	if (pipe_ctx->plane_state->global_alpha)
2641 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2642 	else
2643 		blnd_cfg.global_alpha = 0xff;
2644 
2645 	/*
2646 	 * TODO: remove hack
2647 	 * Note: currently there is a bug in init_hw such that
2648 	 * on resume from hibernate, BIOS sets up MPCC0, and
2649 	 * we do mpcc_remove but the mpcc cannot go to idle
2650 	 * after remove. This cause us to pick mpcc1 here,
2651 	 * which causes a pstate hang for yet unknown reason.
2652 	 */
2653 	mpcc_id = hubp->inst;
2654 
2655 	/* If there is no full update, don't need to touch MPC tree*/
2656 	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2657 		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2658 		dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2659 		return;
2660 	}
2661 
2662 	/* check if this MPCC is already being used */
2663 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2664 	/* remove MPCC if being used */
2665 	if (new_mpcc != NULL)
2666 		mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2667 	else
2668 		if (dc->debug.sanity_checks)
2669 			mpc->funcs->assert_mpcc_idle_before_connect(
2670 					dc->res_pool->mpc, mpcc_id);
2671 
2672 	/* Call MPC to insert new plane */
2673 	new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2674 			mpc_tree_params,
2675 			&blnd_cfg,
2676 			NULL,
2677 			NULL,
2678 			hubp->inst,
2679 			mpcc_id);
2680 	dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2681 
2682 	ASSERT(new_mpcc != NULL);
2683 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2684 	hubp->mpcc_id = mpcc_id;
2685 }
2686 
2687 static void update_scaler(struct pipe_ctx *pipe_ctx)
2688 {
2689 	bool per_pixel_alpha =
2690 			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2691 
2692 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2693 	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2694 	/* scaler configuration */
2695 	pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2696 			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2697 }
2698 
2699 static void dcn10_update_dchubp_dpp(
2700 	struct dc *dc,
2701 	struct pipe_ctx *pipe_ctx,
2702 	struct dc_state *context)
2703 {
2704 	struct dce_hwseq *hws = dc->hwseq;
2705 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2706 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2707 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2708 	struct plane_size size = plane_state->plane_size;
2709 	unsigned int compat_level = 0;
2710 	bool should_divided_by_2 = false;
2711 
2712 	/* depends on DML calculation, DPP clock value may change dynamically */
2713 	/* If request max dpp clk is lower than current dispclk, no need to
2714 	 * divided by 2
2715 	 */
2716 	if (plane_state->update_flags.bits.full_update) {
2717 
2718 		/* new calculated dispclk, dppclk are stored in
2719 		 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2720 		 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2721 		 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2722 		 * dispclk will put in use after optimize_bandwidth when
2723 		 * ramp_up_dispclk_with_dpp is called.
2724 		 * there are two places for dppclk be put in use. One location
2725 		 * is the same as the location as dispclk. Another is within
2726 		 * update_dchubp_dpp which happens between pre_bandwidth and
2727 		 * optimize_bandwidth.
2728 		 * dppclk updated within update_dchubp_dpp will cause new
2729 		 * clock values of dispclk and dppclk not be in use at the same
2730 		 * time. when clocks are decreased, this may cause dppclk is
2731 		 * lower than previous configuration and let pipe stuck.
2732 		 * for example, eDP + external dp,  change resolution of DP from
2733 		 * 1920x1080x144hz to 1280x960x60hz.
2734 		 * before change: dispclk = 337889 dppclk = 337889
2735 		 * change mode, dcn10_validate_bandwidth calculate
2736 		 *                dispclk = 143122 dppclk = 143122
2737 		 * update_dchubp_dpp be executed before dispclk be updated,
2738 		 * dispclk = 337889, but dppclk use new value dispclk /2 =
2739 		 * 168944. this will cause pipe pstate warning issue.
2740 		 * solution: between pre_bandwidth and optimize_bandwidth, while
2741 		 * dispclk is going to be decreased, keep dppclk = dispclk
2742 		 **/
2743 		if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2744 				dc->clk_mgr->clks.dispclk_khz)
2745 			should_divided_by_2 = false;
2746 		else
2747 			should_divided_by_2 =
2748 					context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2749 					dc->clk_mgr->clks.dispclk_khz / 2;
2750 
2751 		dpp->funcs->dpp_dppclk_control(
2752 				dpp,
2753 				should_divided_by_2,
2754 				true);
2755 
2756 		if (dc->res_pool->dccg)
2757 			dc->res_pool->dccg->funcs->update_dpp_dto(
2758 					dc->res_pool->dccg,
2759 					dpp->inst,
2760 					pipe_ctx->plane_res.bw.dppclk_khz);
2761 		else
2762 			dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2763 						dc->clk_mgr->clks.dispclk_khz / 2 :
2764 							dc->clk_mgr->clks.dispclk_khz;
2765 	}
2766 
2767 	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2768 	 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2769 	 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2770 	 */
2771 	if (plane_state->update_flags.bits.full_update) {
2772 		hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2773 
2774 		hubp->funcs->hubp_setup(
2775 			hubp,
2776 			&pipe_ctx->dlg_regs,
2777 			&pipe_ctx->ttu_regs,
2778 			&pipe_ctx->rq_regs,
2779 			&pipe_ctx->pipe_dlg_param);
2780 		hubp->funcs->hubp_setup_interdependent(
2781 			hubp,
2782 			&pipe_ctx->dlg_regs,
2783 			&pipe_ctx->ttu_regs);
2784 	}
2785 
2786 	size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2787 
2788 	if (plane_state->update_flags.bits.full_update ||
2789 		plane_state->update_flags.bits.bpp_change)
2790 		dcn10_update_dpp(dpp, plane_state);
2791 
2792 	if (plane_state->update_flags.bits.full_update ||
2793 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2794 		plane_state->update_flags.bits.global_alpha_change)
2795 		hws->funcs.update_mpcc(dc, pipe_ctx);
2796 
2797 	if (plane_state->update_flags.bits.full_update ||
2798 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2799 		plane_state->update_flags.bits.global_alpha_change ||
2800 		plane_state->update_flags.bits.scaling_change ||
2801 		plane_state->update_flags.bits.position_change) {
2802 		update_scaler(pipe_ctx);
2803 	}
2804 
2805 	if (plane_state->update_flags.bits.full_update ||
2806 		plane_state->update_flags.bits.scaling_change ||
2807 		plane_state->update_flags.bits.position_change) {
2808 		hubp->funcs->mem_program_viewport(
2809 			hubp,
2810 			&pipe_ctx->plane_res.scl_data.viewport,
2811 			&pipe_ctx->plane_res.scl_data.viewport_c);
2812 	}
2813 
2814 	if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2815 		dc->hwss.set_cursor_position(pipe_ctx);
2816 		dc->hwss.set_cursor_attribute(pipe_ctx);
2817 
2818 		if (dc->hwss.set_cursor_sdr_white_level)
2819 			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2820 	}
2821 
2822 	if (plane_state->update_flags.bits.full_update) {
2823 		/*gamut remap*/
2824 		dc->hwss.program_gamut_remap(pipe_ctx);
2825 
2826 		dc->hwss.program_output_csc(dc,
2827 				pipe_ctx,
2828 				pipe_ctx->stream->output_color_space,
2829 				pipe_ctx->stream->csc_color_matrix.matrix,
2830 				pipe_ctx->stream_res.opp->inst);
2831 	}
2832 
2833 	if (plane_state->update_flags.bits.full_update ||
2834 		plane_state->update_flags.bits.pixel_format_change ||
2835 		plane_state->update_flags.bits.horizontal_mirror_change ||
2836 		plane_state->update_flags.bits.rotation_change ||
2837 		plane_state->update_flags.bits.swizzle_change ||
2838 		plane_state->update_flags.bits.dcc_change ||
2839 		plane_state->update_flags.bits.bpp_change ||
2840 		plane_state->update_flags.bits.scaling_change ||
2841 		plane_state->update_flags.bits.plane_size_change) {
2842 		hubp->funcs->hubp_program_surface_config(
2843 			hubp,
2844 			plane_state->format,
2845 			&plane_state->tiling_info,
2846 			&size,
2847 			plane_state->rotation,
2848 			&plane_state->dcc,
2849 			plane_state->horizontal_mirror,
2850 			compat_level);
2851 	}
2852 
2853 	hubp->power_gated = false;
2854 
2855 	hws->funcs.update_plane_addr(dc, pipe_ctx);
2856 
2857 	if (is_pipe_tree_visible(pipe_ctx))
2858 		hubp->funcs->set_blank(hubp, false);
2859 }
2860 
2861 void dcn10_blank_pixel_data(
2862 		struct dc *dc,
2863 		struct pipe_ctx *pipe_ctx,
2864 		bool blank)
2865 {
2866 	enum dc_color_space color_space;
2867 	struct tg_color black_color = {0};
2868 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
2869 	struct dc_stream_state *stream = pipe_ctx->stream;
2870 
2871 	/* program otg blank color */
2872 	color_space = stream->output_color_space;
2873 	color_space_to_black_color(dc, color_space, &black_color);
2874 
2875 	/*
2876 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
2877 	 * alternate between Cb and Cr, so both channels need the pixel
2878 	 * value for Y
2879 	 */
2880 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2881 		black_color.color_r_cr = black_color.color_g_y;
2882 
2883 
2884 	if (stream_res->tg->funcs->set_blank_color)
2885 		stream_res->tg->funcs->set_blank_color(
2886 				stream_res->tg,
2887 				&black_color);
2888 
2889 	if (!blank) {
2890 		if (stream_res->tg->funcs->set_blank)
2891 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2892 		if (stream_res->abm) {
2893 			dc->hwss.set_pipe(pipe_ctx);
2894 			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2895 		}
2896 	} else {
2897 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
2898 		if (stream_res->tg->funcs->set_blank) {
2899 			stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2900 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2901 		}
2902 	}
2903 }
2904 
2905 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2906 {
2907 	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2908 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2909 	struct custom_float_format fmt;
2910 
2911 	fmt.exponenta_bits = 6;
2912 	fmt.mantissa_bits = 12;
2913 	fmt.sign = true;
2914 
2915 
2916 	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2917 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2918 
2919 	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2920 			pipe_ctx->plane_res.dpp, hw_mult);
2921 }
2922 
2923 void dcn10_program_pipe(
2924 		struct dc *dc,
2925 		struct pipe_ctx *pipe_ctx,
2926 		struct dc_state *context)
2927 {
2928 	struct dce_hwseq *hws = dc->hwseq;
2929 
2930 	if (pipe_ctx->top_pipe == NULL) {
2931 		bool blank = !is_pipe_tree_visible(pipe_ctx);
2932 
2933 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
2934 				pipe_ctx->stream_res.tg,
2935 				calculate_vready_offset_for_group(pipe_ctx),
2936 				pipe_ctx->pipe_dlg_param.vstartup_start,
2937 				pipe_ctx->pipe_dlg_param.vupdate_offset,
2938 				pipe_ctx->pipe_dlg_param.vupdate_width);
2939 
2940 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2941 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2942 
2943 		if (hws->funcs.setup_vupdate_interrupt)
2944 			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2945 
2946 		hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2947 	}
2948 
2949 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2950 		dcn10_enable_plane(dc, pipe_ctx, context);
2951 
2952 	dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2953 
2954 	hws->funcs.set_hdr_multiplier(pipe_ctx);
2955 
2956 	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2957 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2958 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
2959 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2960 
2961 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2962 	 * only do gamma programming for full update.
2963 	 * TODO: This can be further optimized/cleaned up
2964 	 * Always call this for now since it does memcmp inside before
2965 	 * doing heavy calculation and programming
2966 	 */
2967 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2968 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2969 }
2970 
2971 void dcn10_wait_for_pending_cleared(struct dc *dc,
2972 		struct dc_state *context)
2973 {
2974 		struct pipe_ctx *pipe_ctx;
2975 		struct timing_generator *tg;
2976 		int i;
2977 
2978 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2979 			pipe_ctx = &context->res_ctx.pipe_ctx[i];
2980 			tg = pipe_ctx->stream_res.tg;
2981 
2982 			/*
2983 			 * Only wait for top pipe's tg penindg bit
2984 			 * Also skip if pipe is disabled.
2985 			 */
2986 			if (pipe_ctx->top_pipe ||
2987 			    !pipe_ctx->stream || !pipe_ctx->plane_state ||
2988 			    !tg->funcs->is_tg_enabled(tg))
2989 				continue;
2990 
2991 			/*
2992 			 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2993 			 * For some reason waiting for OTG_UPDATE_PENDING cleared
2994 			 * seems to not trigger the update right away, and if we
2995 			 * lock again before VUPDATE then we don't get a separated
2996 			 * operation.
2997 			 */
2998 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
2999 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
3000 		}
3001 }
3002 
3003 void dcn10_post_unlock_program_front_end(
3004 		struct dc *dc,
3005 		struct dc_state *context)
3006 {
3007 	int i;
3008 
3009 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3010 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3011 
3012 		if (!pipe_ctx->top_pipe &&
3013 			!pipe_ctx->prev_odm_pipe &&
3014 			pipe_ctx->stream) {
3015 			struct timing_generator *tg = pipe_ctx->stream_res.tg;
3016 
3017 			if (context->stream_status[i].plane_count == 0)
3018 				false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3019 		}
3020 	}
3021 
3022 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3023 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3024 			dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
3025 
3026 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3027 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3028 			dc->hwss.optimize_bandwidth(dc, context);
3029 			break;
3030 		}
3031 
3032 	if (dc->hwseq->wa.DEGVIDCN10_254)
3033 		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3034 }
3035 
3036 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3037 {
3038 	uint8_t i;
3039 
3040 	for (i = 0; i < context->stream_count; i++) {
3041 		if (context->streams[i]->timing.timing_3d_format
3042 				== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3043 			/*
3044 			 * Disable stutter
3045 			 */
3046 			hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3047 			break;
3048 		}
3049 	}
3050 }
3051 
3052 void dcn10_prepare_bandwidth(
3053 		struct dc *dc,
3054 		struct dc_state *context)
3055 {
3056 	struct dce_hwseq *hws = dc->hwseq;
3057 	struct hubbub *hubbub = dc->res_pool->hubbub;
3058 	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3059 
3060 	if (dc->debug.sanity_checks)
3061 		hws->funcs.verify_allow_pstate_change_high(dc);
3062 
3063 	if (context->stream_count == 0)
3064 		context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3065 
3066 	dc->clk_mgr->funcs->update_clocks(
3067 			dc->clk_mgr,
3068 			context,
3069 			false);
3070 
3071 	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3072 			&context->bw_ctx.bw.dcn.watermarks,
3073 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3074 			true);
3075 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3076 
3077 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3078 		DC_FP_START();
3079 		dcn_get_soc_clks(
3080 			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3081 		DC_FP_END();
3082 		dcn_bw_notify_pplib_of_wm_ranges(
3083 			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3084 	}
3085 
3086 	if (dc->debug.sanity_checks)
3087 		hws->funcs.verify_allow_pstate_change_high(dc);
3088 }
3089 
3090 void dcn10_optimize_bandwidth(
3091 		struct dc *dc,
3092 		struct dc_state *context)
3093 {
3094 	struct dce_hwseq *hws = dc->hwseq;
3095 	struct hubbub *hubbub = dc->res_pool->hubbub;
3096 	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3097 
3098 	if (dc->debug.sanity_checks)
3099 		hws->funcs.verify_allow_pstate_change_high(dc);
3100 
3101 	if (context->stream_count == 0)
3102 		context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3103 
3104 	dc->clk_mgr->funcs->update_clocks(
3105 			dc->clk_mgr,
3106 			context,
3107 			true);
3108 
3109 	hubbub->funcs->program_watermarks(hubbub,
3110 			&context->bw_ctx.bw.dcn.watermarks,
3111 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3112 			true);
3113 
3114 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3115 
3116 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3117 		DC_FP_START();
3118 		dcn_get_soc_clks(
3119 			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3120 		DC_FP_END();
3121 		dcn_bw_notify_pplib_of_wm_ranges(
3122 			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3123 	}
3124 
3125 	if (dc->debug.sanity_checks)
3126 		hws->funcs.verify_allow_pstate_change_high(dc);
3127 }
3128 
3129 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3130 		int num_pipes, struct dc_crtc_timing_adjust adjust)
3131 {
3132 	int i = 0;
3133 	struct drr_params params = {0};
3134 	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3135 	unsigned int event_triggers = 0x800;
3136 	// Note DRR trigger events are generated regardless of whether num frames met.
3137 	unsigned int num_frames = 2;
3138 
3139 	params.vertical_total_max = adjust.v_total_max;
3140 	params.vertical_total_min = adjust.v_total_min;
3141 	params.vertical_total_mid = adjust.v_total_mid;
3142 	params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3143 	/* TODO: If multiple pipes are to be supported, you need
3144 	 * some GSL stuff. Static screen triggers may be programmed differently
3145 	 * as well.
3146 	 */
3147 	for (i = 0; i < num_pipes; i++) {
3148 		if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
3149 			if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
3150 				pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3151 					pipe_ctx[i]->stream_res.tg, &params);
3152 			if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3153 				if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
3154 					pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3155 						pipe_ctx[i]->stream_res.tg,
3156 						event_triggers, num_frames);
3157 		}
3158 	}
3159 }
3160 
3161 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3162 		int num_pipes,
3163 		struct crtc_position *position)
3164 {
3165 	int i = 0;
3166 
3167 	/* TODO: handle pipes > 1
3168 	 */
3169 	for (i = 0; i < num_pipes; i++)
3170 		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3171 }
3172 
3173 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3174 		int num_pipes, const struct dc_static_screen_params *params)
3175 {
3176 	unsigned int i;
3177 	unsigned int triggers = 0;
3178 
3179 	if (params->triggers.surface_update)
3180 		triggers |= 0x80;
3181 	if (params->triggers.cursor_update)
3182 		triggers |= 0x2;
3183 	if (params->triggers.force_trigger)
3184 		triggers |= 0x1;
3185 
3186 	for (i = 0; i < num_pipes; i++)
3187 		pipe_ctx[i]->stream_res.tg->funcs->
3188 			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3189 					triggers, params->num_frames);
3190 }
3191 
3192 static void dcn10_config_stereo_parameters(
3193 		struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3194 {
3195 	enum view_3d_format view_format = stream->view_format;
3196 	enum dc_timing_3d_format timing_3d_format =\
3197 			stream->timing.timing_3d_format;
3198 	bool non_stereo_timing = false;
3199 
3200 	if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3201 		timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3202 		timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3203 		non_stereo_timing = true;
3204 
3205 	if (non_stereo_timing == false &&
3206 		view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3207 
3208 		flags->PROGRAM_STEREO         = 1;
3209 		flags->PROGRAM_POLARITY       = 1;
3210 		if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3211 			timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3212 			timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3213 			timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3214 
3215 			if (stream->link && stream->link->ddc) {
3216 				enum display_dongle_type dongle = \
3217 						stream->link->ddc->dongle_type;
3218 
3219 				if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3220 					dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3221 					dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3222 					flags->DISABLE_STEREO_DP_SYNC = 1;
3223 			}
3224 		}
3225 		flags->RIGHT_EYE_POLARITY =\
3226 				stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3227 		if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3228 			flags->FRAME_PACKED = 1;
3229 	}
3230 
3231 	return;
3232 }
3233 
3234 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3235 {
3236 	struct crtc_stereo_flags flags = { 0 };
3237 	struct dc_stream_state *stream = pipe_ctx->stream;
3238 
3239 	dcn10_config_stereo_parameters(stream, &flags);
3240 
3241 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3242 		if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3243 			dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3244 	} else {
3245 		dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3246 	}
3247 
3248 	pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3249 		pipe_ctx->stream_res.opp,
3250 		flags.PROGRAM_STEREO == 1,
3251 		&stream->timing);
3252 
3253 	pipe_ctx->stream_res.tg->funcs->program_stereo(
3254 		pipe_ctx->stream_res.tg,
3255 		&stream->timing,
3256 		&flags);
3257 
3258 	return;
3259 }
3260 
3261 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3262 {
3263 	int i;
3264 
3265 	for (i = 0; i < res_pool->pipe_count; i++) {
3266 		if (res_pool->hubps[i]->inst == mpcc_inst)
3267 			return res_pool->hubps[i];
3268 	}
3269 	ASSERT(false);
3270 	return NULL;
3271 }
3272 
3273 void dcn10_wait_for_mpcc_disconnect(
3274 		struct dc *dc,
3275 		struct resource_pool *res_pool,
3276 		struct pipe_ctx *pipe_ctx)
3277 {
3278 	struct dce_hwseq *hws = dc->hwseq;
3279 	int mpcc_inst;
3280 
3281 	if (dc->debug.sanity_checks) {
3282 		hws->funcs.verify_allow_pstate_change_high(dc);
3283 	}
3284 
3285 	if (!pipe_ctx->stream_res.opp)
3286 		return;
3287 
3288 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3289 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3290 			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3291 
3292 			if (pipe_ctx->stream_res.tg &&
3293 				pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3294 				res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3295 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3296 			hubp->funcs->set_blank(hubp, true);
3297 		}
3298 	}
3299 
3300 	if (dc->debug.sanity_checks) {
3301 		hws->funcs.verify_allow_pstate_change_high(dc);
3302 	}
3303 
3304 }
3305 
3306 bool dcn10_dummy_display_power_gating(
3307 	struct dc *dc,
3308 	uint8_t controller_id,
3309 	struct dc_bios *dcb,
3310 	enum pipe_gating_control power_gating)
3311 {
3312 	return true;
3313 }
3314 
3315 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3316 {
3317 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3318 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3319 	bool flip_pending;
3320 	struct dc *dc = pipe_ctx->stream->ctx->dc;
3321 
3322 	if (plane_state == NULL)
3323 		return;
3324 
3325 	flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3326 					pipe_ctx->plane_res.hubp);
3327 
3328 	plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3329 
3330 	if (!flip_pending)
3331 		plane_state->status.current_address = plane_state->status.requested_address;
3332 
3333 	if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3334 			tg->funcs->is_stereo_left_eye) {
3335 		plane_state->status.is_right_eye =
3336 				!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3337 	}
3338 
3339 	if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3340 		struct dce_hwseq *hwseq = dc->hwseq;
3341 		struct timing_generator *tg = dc->res_pool->timing_generators[0];
3342 		unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3343 
3344 		if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3345 			struct hubbub *hubbub = dc->res_pool->hubbub;
3346 
3347 			hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3348 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3349 		}
3350 	}
3351 }
3352 
3353 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3354 {
3355 	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3356 
3357 	/* In DCN, this programming sequence is owned by the hubbub */
3358 	hubbub->funcs->update_dchub(hubbub, dh_data);
3359 }
3360 
3361 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3362 {
3363 	struct pipe_ctx *test_pipe, *split_pipe;
3364 	const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3365 	struct rect r1 = scl_data->recout, r2, r2_half;
3366 	int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3367 	int cur_layer = pipe_ctx->plane_state->layer_index;
3368 
3369 	/**
3370 	 * Disable the cursor if there's another pipe above this with a
3371 	 * plane that contains this pipe's viewport to prevent double cursor
3372 	 * and incorrect scaling artifacts.
3373 	 */
3374 	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3375 	     test_pipe = test_pipe->top_pipe) {
3376 		// Skip invisible layer and pipe-split plane on same layer
3377 		if (!test_pipe->plane_state ||
3378 		    !test_pipe->plane_state->visible ||
3379 		    test_pipe->plane_state->layer_index == cur_layer)
3380 			continue;
3381 
3382 		r2 = test_pipe->plane_res.scl_data.recout;
3383 		r2_r = r2.x + r2.width;
3384 		r2_b = r2.y + r2.height;
3385 		split_pipe = test_pipe;
3386 
3387 		/**
3388 		 * There is another half plane on same layer because of
3389 		 * pipe-split, merge together per same height.
3390 		 */
3391 		for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3392 		     split_pipe = split_pipe->top_pipe)
3393 			if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3394 				r2_half = split_pipe->plane_res.scl_data.recout;
3395 				r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3396 				r2.width = r2.width + r2_half.width;
3397 				r2_r = r2.x + r2.width;
3398 				break;
3399 			}
3400 
3401 		if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3402 			return true;
3403 	}
3404 
3405 	return false;
3406 }
3407 
3408 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3409 {
3410 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3411 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3412 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
3413 	struct dc_cursor_mi_param param = {
3414 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3415 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3416 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
3417 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3418 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3419 		.rotation = pipe_ctx->plane_state->rotation,
3420 		.mirror = pipe_ctx->plane_state->horizontal_mirror
3421 	};
3422 	bool pipe_split_on = false;
3423 	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3424 		(pipe_ctx->prev_odm_pipe != NULL);
3425 
3426 	int x_plane = pipe_ctx->plane_state->dst_rect.x;
3427 	int y_plane = pipe_ctx->plane_state->dst_rect.y;
3428 	int x_pos = pos_cpy.x;
3429 	int y_pos = pos_cpy.y;
3430 
3431 	if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
3432 		if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
3433 			(pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
3434 			pipe_split_on = true;
3435 		}
3436 	}
3437 
3438 	/**
3439 	 * DC cursor is stream space, HW cursor is plane space and drawn
3440 	 * as part of the framebuffer.
3441 	 *
3442 	 * Cursor position can't be negative, but hotspot can be used to
3443 	 * shift cursor out of the plane bounds. Hotspot must be smaller
3444 	 * than the cursor size.
3445 	 */
3446 
3447 	/**
3448 	 * Translate cursor from stream space to plane space.
3449 	 *
3450 	 * If the cursor is scaled then we need to scale the position
3451 	 * to be in the approximately correct place. We can't do anything
3452 	 * about the actual size being incorrect, that's a limitation of
3453 	 * the hardware.
3454 	 */
3455 	if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3456 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3457 				pipe_ctx->plane_state->dst_rect.width;
3458 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3459 				pipe_ctx->plane_state->dst_rect.height;
3460 	} else {
3461 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3462 				pipe_ctx->plane_state->dst_rect.width;
3463 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3464 				pipe_ctx->plane_state->dst_rect.height;
3465 	}
3466 
3467 	/**
3468 	 * If the cursor's source viewport is clipped then we need to
3469 	 * translate the cursor to appear in the correct position on
3470 	 * the screen.
3471 	 *
3472 	 * This translation isn't affected by scaling so it needs to be
3473 	 * done *after* we adjust the position for the scale factor.
3474 	 *
3475 	 * This is only done by opt-in for now since there are still
3476 	 * some usecases like tiled display that might enable the
3477 	 * cursor on both streams while expecting dc to clip it.
3478 	 */
3479 	if (pos_cpy.translate_by_source) {
3480 		x_pos += pipe_ctx->plane_state->src_rect.x;
3481 		y_pos += pipe_ctx->plane_state->src_rect.y;
3482 	}
3483 
3484 	/**
3485 	 * If the position is negative then we need to add to the hotspot
3486 	 * to shift the cursor outside the plane.
3487 	 */
3488 
3489 	if (x_pos < 0) {
3490 		pos_cpy.x_hotspot -= x_pos;
3491 		x_pos = 0;
3492 	}
3493 
3494 	if (y_pos < 0) {
3495 		pos_cpy.y_hotspot -= y_pos;
3496 		y_pos = 0;
3497 	}
3498 
3499 	pos_cpy.x = (uint32_t)x_pos;
3500 	pos_cpy.y = (uint32_t)y_pos;
3501 
3502 	if (pipe_ctx->plane_state->address.type
3503 			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3504 		pos_cpy.enable = false;
3505 
3506 	if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3507 		pos_cpy.enable = false;
3508 
3509 
3510 	if (param.rotation == ROTATION_ANGLE_0) {
3511 		int viewport_width =
3512 			pipe_ctx->plane_res.scl_data.viewport.width;
3513 		int viewport_x =
3514 			pipe_ctx->plane_res.scl_data.viewport.x;
3515 
3516 		if (param.mirror) {
3517 			if (pipe_split_on || odm_combine_on) {
3518 				if (pos_cpy.x >= viewport_width + viewport_x) {
3519 					pos_cpy.x = 2 * viewport_width
3520 							- pos_cpy.x + 2 * viewport_x;
3521 				} else {
3522 					uint32_t temp_x = pos_cpy.x;
3523 
3524 					pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3525 					if (temp_x >= viewport_x +
3526 						(int)hubp->curs_attr.width || pos_cpy.x
3527 						<= (int)hubp->curs_attr.width +
3528 						pipe_ctx->plane_state->src_rect.x) {
3529 						pos_cpy.x = temp_x + viewport_width;
3530 					}
3531 				}
3532 			} else {
3533 				pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3534 			}
3535 		}
3536 	}
3537 	// Swap axis and mirror horizontally
3538 	else if (param.rotation == ROTATION_ANGLE_90) {
3539 		uint32_t temp_x = pos_cpy.x;
3540 
3541 		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3542 				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3543 		pos_cpy.y = temp_x;
3544 	}
3545 	// Swap axis and mirror vertically
3546 	else if (param.rotation == ROTATION_ANGLE_270) {
3547 		uint32_t temp_y = pos_cpy.y;
3548 		int viewport_height =
3549 			pipe_ctx->plane_res.scl_data.viewport.height;
3550 		int viewport_y =
3551 			pipe_ctx->plane_res.scl_data.viewport.y;
3552 
3553 		/**
3554 		 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3555 		 * For pipe split cases:
3556 		 * - apply offset of viewport.y to normalize pos_cpy.x
3557 		 * - calculate the pos_cpy.y as before
3558 		 * - shift pos_cpy.y back by same offset to get final value
3559 		 * - since we iterate through both pipes, use the lower
3560 		 *   viewport.y for offset
3561 		 * For non pipe split cases, use the same calculation for
3562 		 *  pos_cpy.y as the 180 degree rotation case below,
3563 		 *  but use pos_cpy.x as our input because we are rotating
3564 		 *  270 degrees
3565 		 */
3566 		if (pipe_split_on || odm_combine_on) {
3567 			int pos_cpy_x_offset;
3568 			int other_pipe_viewport_y;
3569 
3570 			if (pipe_split_on) {
3571 				if (pipe_ctx->bottom_pipe) {
3572 					other_pipe_viewport_y =
3573 						pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3574 				} else {
3575 					other_pipe_viewport_y =
3576 						pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3577 				}
3578 			} else {
3579 				if (pipe_ctx->next_odm_pipe) {
3580 					other_pipe_viewport_y =
3581 						pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3582 				} else {
3583 					other_pipe_viewport_y =
3584 						pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3585 				}
3586 			}
3587 			pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3588 				other_pipe_viewport_y : viewport_y;
3589 			pos_cpy.x -= pos_cpy_x_offset;
3590 			if (pos_cpy.x > viewport_height) {
3591 				pos_cpy.x = pos_cpy.x - viewport_height;
3592 				pos_cpy.y = viewport_height - pos_cpy.x;
3593 			} else {
3594 				pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3595 			}
3596 			pos_cpy.y += pos_cpy_x_offset;
3597 		} else {
3598 			pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3599 		}
3600 		pos_cpy.x = temp_y;
3601 	}
3602 	// Mirror horizontally and vertically
3603 	else if (param.rotation == ROTATION_ANGLE_180) {
3604 		int viewport_width =
3605 			pipe_ctx->plane_res.scl_data.viewport.width;
3606 		int viewport_x =
3607 			pipe_ctx->plane_res.scl_data.viewport.x;
3608 
3609 		if (!param.mirror) {
3610 			if (pipe_split_on || odm_combine_on) {
3611 				if (pos_cpy.x >= viewport_width + viewport_x) {
3612 					pos_cpy.x = 2 * viewport_width
3613 							- pos_cpy.x + 2 * viewport_x;
3614 				} else {
3615 					uint32_t temp_x = pos_cpy.x;
3616 
3617 					pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3618 					if (temp_x >= viewport_x +
3619 						(int)hubp->curs_attr.width || pos_cpy.x
3620 						<= (int)hubp->curs_attr.width +
3621 						pipe_ctx->plane_state->src_rect.x) {
3622 						pos_cpy.x = 2 * viewport_width - temp_x;
3623 					}
3624 				}
3625 			} else {
3626 				pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3627 			}
3628 		}
3629 
3630 		/**
3631 		 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3632 		 * Calculation:
3633 		 *   delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3634 		 *   pos_cpy.y_new = viewport.y + delta_from_bottom
3635 		 * Simplify it as:
3636 		 *   pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3637 		 */
3638 		pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3639 			pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3640 	}
3641 
3642 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3643 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3644 }
3645 
3646 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3647 {
3648 	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3649 
3650 	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3651 			pipe_ctx->plane_res.hubp, attributes);
3652 	pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3653 		pipe_ctx->plane_res.dpp, attributes);
3654 }
3655 
3656 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3657 {
3658 	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3659 	struct fixed31_32 multiplier;
3660 	struct dpp_cursor_attributes opt_attr = { 0 };
3661 	uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3662 	struct custom_float_format fmt;
3663 
3664 	if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3665 		return;
3666 
3667 	fmt.exponenta_bits = 5;
3668 	fmt.mantissa_bits = 10;
3669 	fmt.sign = true;
3670 
3671 	if (sdr_white_level > 80) {
3672 		multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3673 		convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3674 	}
3675 
3676 	opt_attr.scale = hw_scale;
3677 	opt_attr.bias = 0;
3678 
3679 	pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3680 			pipe_ctx->plane_res.dpp, &opt_attr);
3681 }
3682 
3683 /*
3684  * apply_front_porch_workaround  TODO FPGA still need?
3685  *
3686  * This is a workaround for a bug that has existed since R5xx and has not been
3687  * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3688  */
3689 static void apply_front_porch_workaround(
3690 	struct dc_crtc_timing *timing)
3691 {
3692 	if (timing->flags.INTERLACE == 1) {
3693 		if (timing->v_front_porch < 2)
3694 			timing->v_front_porch = 2;
3695 	} else {
3696 		if (timing->v_front_porch < 1)
3697 			timing->v_front_porch = 1;
3698 	}
3699 }
3700 
3701 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3702 {
3703 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3704 	struct dc_crtc_timing patched_crtc_timing;
3705 	int vesa_sync_start;
3706 	int asic_blank_end;
3707 	int interlace_factor;
3708 
3709 	patched_crtc_timing = *dc_crtc_timing;
3710 	apply_front_porch_workaround(&patched_crtc_timing);
3711 
3712 	interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3713 
3714 	vesa_sync_start = patched_crtc_timing.v_addressable +
3715 			patched_crtc_timing.v_border_bottom +
3716 			patched_crtc_timing.v_front_porch;
3717 
3718 	asic_blank_end = (patched_crtc_timing.v_total -
3719 			vesa_sync_start -
3720 			patched_crtc_timing.v_border_top)
3721 			* interlace_factor;
3722 
3723 	return asic_blank_end -
3724 			pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3725 }
3726 
3727 void dcn10_calc_vupdate_position(
3728 		struct dc *dc,
3729 		struct pipe_ctx *pipe_ctx,
3730 		uint32_t *start_line,
3731 		uint32_t *end_line)
3732 {
3733 	const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3734 	int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3735 
3736 	if (vupdate_pos >= 0)
3737 		*start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
3738 	else
3739 		*start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
3740 	*end_line = (*start_line + 2) % timing->v_total;
3741 }
3742 
3743 static void dcn10_cal_vline_position(
3744 		struct dc *dc,
3745 		struct pipe_ctx *pipe_ctx,
3746 		uint32_t *start_line,
3747 		uint32_t *end_line)
3748 {
3749 	const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3750 	int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
3751 
3752 	if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
3753 		if (vline_pos > 0)
3754 			vline_pos--;
3755 		else if (vline_pos < 0)
3756 			vline_pos++;
3757 
3758 		vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3759 		if (vline_pos >= 0)
3760 			*start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
3761 		else
3762 			*start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
3763 		*end_line = (*start_line + 2) % timing->v_total;
3764 	} else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
3765 		// vsync is line 0 so start_line is just the requested line offset
3766 		*start_line = vline_pos;
3767 		*end_line = (*start_line + 2) % timing->v_total;
3768 	} else
3769 		ASSERT(0);
3770 }
3771 
3772 void dcn10_setup_periodic_interrupt(
3773 		struct dc *dc,
3774 		struct pipe_ctx *pipe_ctx)
3775 {
3776 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3777 	uint32_t start_line = 0;
3778 	uint32_t end_line = 0;
3779 
3780 	dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
3781 
3782 	tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3783 }
3784 
3785 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3786 {
3787 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3788 	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3789 
3790 	if (start_line < 0) {
3791 		ASSERT(0);
3792 		start_line = 0;
3793 	}
3794 
3795 	if (tg->funcs->setup_vertical_interrupt2)
3796 		tg->funcs->setup_vertical_interrupt2(tg, start_line);
3797 }
3798 
3799 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3800 		struct dc_link_settings *link_settings)
3801 {
3802 	struct encoder_unblank_param params = {0};
3803 	struct dc_stream_state *stream = pipe_ctx->stream;
3804 	struct dc_link *link = stream->link;
3805 	struct dce_hwseq *hws = link->dc->hwseq;
3806 
3807 	/* only 3 items below are used by unblank */
3808 	params.timing = pipe_ctx->stream->timing;
3809 
3810 	params.link_settings.link_rate = link_settings->link_rate;
3811 
3812 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3813 		if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3814 			params.timing.pix_clk_100hz /= 2;
3815 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
3816 	}
3817 
3818 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3819 		hws->funcs.edp_backlight_control(link, true);
3820 	}
3821 }
3822 
3823 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3824 				const uint8_t *custom_sdp_message,
3825 				unsigned int sdp_message_size)
3826 {
3827 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3828 		pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3829 				pipe_ctx->stream_res.stream_enc,
3830 				custom_sdp_message,
3831 				sdp_message_size);
3832 	}
3833 }
3834 enum dc_status dcn10_set_clock(struct dc *dc,
3835 			enum dc_clock_type clock_type,
3836 			uint32_t clk_khz,
3837 			uint32_t stepping)
3838 {
3839 	struct dc_state *context = dc->current_state;
3840 	struct dc_clock_config clock_cfg = {0};
3841 	struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3842 
3843 	if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3844 		return DC_FAIL_UNSUPPORTED_1;
3845 
3846 	dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3847 		context, clock_type, &clock_cfg);
3848 
3849 	if (clk_khz > clock_cfg.max_clock_khz)
3850 		return DC_FAIL_CLK_EXCEED_MAX;
3851 
3852 	if (clk_khz < clock_cfg.min_clock_khz)
3853 		return DC_FAIL_CLK_BELOW_MIN;
3854 
3855 	if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3856 		return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3857 
3858 	/*update internal request clock for update clock use*/
3859 	if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3860 		current_clocks->dispclk_khz = clk_khz;
3861 	else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3862 		current_clocks->dppclk_khz = clk_khz;
3863 	else
3864 		return DC_ERROR_UNEXPECTED;
3865 
3866 	if (dc->clk_mgr->funcs->update_clocks)
3867 				dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3868 				context, true);
3869 	return DC_OK;
3870 
3871 }
3872 
3873 void dcn10_get_clock(struct dc *dc,
3874 			enum dc_clock_type clock_type,
3875 			struct dc_clock_config *clock_cfg)
3876 {
3877 	struct dc_state *context = dc->current_state;
3878 
3879 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3880 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3881 
3882 }
3883 
3884 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3885 {
3886 	struct resource_pool *pool = dc->res_pool;
3887 	int i;
3888 
3889 	for (i = 0; i < pool->pipe_count; i++) {
3890 		struct hubp *hubp = pool->hubps[i];
3891 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3892 
3893 		hubp->funcs->hubp_read_state(hubp);
3894 
3895 		if (!s->blank_en)
3896 			dcc_en_bits[i] = s->dcc_en ? 1 : 0;
3897 	}
3898 }
3899