1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dc_link_dp.h"
49 #include "dccg.h"
50 #include "clk_mgr.h"
51 #include "link_hwss.h"
52 #include "dpcd_defs.h"
53 #include "dsc.h"
54 #include "dce/dmub_hw_lock_mgr.h"
55 #include "dc_trace.h"
56 #include "dce/dmub_outbox.h"
57 
58 #define DC_LOGGER_INIT(logger)
59 
60 #define CTX \
61 	hws->ctx
62 #define REG(reg)\
63 	hws->regs->reg
64 
65 #undef FN
66 #define FN(reg_name, field_name) \
67 	hws->shifts->field_name, hws->masks->field_name
68 
69 /*print is 17 wide, first two characters are spaces*/
70 #define DTN_INFO_MICRO_SEC(ref_cycle) \
71 	print_microsec(dc_ctx, log_ctx, ref_cycle)
72 
73 #define GAMMA_HW_POINTS_NUM 256
74 
print_microsec(struct dc_context * dc_ctx,struct dc_log_buffer_ctx * log_ctx,uint32_t ref_cycle)75 void print_microsec(struct dc_context *dc_ctx,
76 	struct dc_log_buffer_ctx *log_ctx,
77 	uint32_t ref_cycle)
78 {
79 	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
80 	static const unsigned int frac = 1000;
81 	uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
82 
83 	DTN_INFO("  %11d.%03d",
84 			us_x10 / frac,
85 			us_x10 % frac);
86 }
87 
dcn10_lock_all_pipes(struct dc * dc,struct dc_state * context,bool lock)88 void dcn10_lock_all_pipes(struct dc *dc,
89 	struct dc_state *context,
90 	bool lock)
91 {
92 	struct pipe_ctx *pipe_ctx;
93 	struct timing_generator *tg;
94 	int i;
95 
96 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
97 		pipe_ctx = &context->res_ctx.pipe_ctx[i];
98 		tg = pipe_ctx->stream_res.tg;
99 
100 		/*
101 		 * Only lock the top pipe's tg to prevent redundant
102 		 * (un)locking. Also skip if pipe is disabled.
103 		 */
104 		if (pipe_ctx->top_pipe ||
105 		    !pipe_ctx->stream || !pipe_ctx->plane_state ||
106 		    !tg->funcs->is_tg_enabled(tg))
107 			continue;
108 
109 		if (lock)
110 			dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
111 		else
112 			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
113 	}
114 }
115 
log_mpc_crc(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)116 static void log_mpc_crc(struct dc *dc,
117 	struct dc_log_buffer_ctx *log_ctx)
118 {
119 	struct dc_context *dc_ctx = dc->ctx;
120 	struct dce_hwseq *hws = dc->hwseq;
121 
122 	if (REG(MPC_CRC_RESULT_GB))
123 		DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
124 		REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
125 	if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
126 		DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
127 		REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
128 }
129 
dcn10_log_hubbub_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)130 void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
131 {
132 	struct dc_context *dc_ctx = dc->ctx;
133 	struct dcn_hubbub_wm wm;
134 	int i;
135 
136 	memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
137 	dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
138 
139 	DTN_INFO("HUBBUB WM:      data_urgent  pte_meta_urgent"
140 			"         sr_enter          sr_exit  dram_clk_change\n");
141 
142 	for (i = 0; i < 4; i++) {
143 		struct dcn_hubbub_wm_set *s;
144 
145 		s = &wm.sets[i];
146 		DTN_INFO("WM_Set[%d]:", s->wm_set);
147 		DTN_INFO_MICRO_SEC(s->data_urgent);
148 		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
149 		DTN_INFO_MICRO_SEC(s->sr_enter);
150 		DTN_INFO_MICRO_SEC(s->sr_exit);
151 		DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
152 		DTN_INFO("\n");
153 	}
154 
155 	DTN_INFO("\n");
156 }
157 
dcn10_log_hubp_states(struct dc * dc,void * log_ctx)158 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
159 {
160 	struct dc_context *dc_ctx = dc->ctx;
161 	struct resource_pool *pool = dc->res_pool;
162 	int i;
163 
164 	DTN_INFO(
165 		"HUBP:  format  addr_hi  width  height  rot  mir  sw_mode  dcc_en  blank_en  clock_en  ttu_dis  underflow   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
166 	for (i = 0; i < pool->pipe_count; i++) {
167 		struct hubp *hubp = pool->hubps[i];
168 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
169 
170 		hubp->funcs->hubp_read_state(hubp);
171 
172 		if (!s->blank_en) {
173 			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh",
174 					hubp->inst,
175 					s->pixel_format,
176 					s->inuse_addr_hi,
177 					s->viewport_width,
178 					s->viewport_height,
179 					s->rotation_angle,
180 					s->h_mirror_en,
181 					s->sw_mode,
182 					s->dcc_en,
183 					s->blank_en,
184 					s->clock_en,
185 					s->ttu_disable,
186 					s->underflow_status);
187 			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
188 			DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
189 			DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
190 			DTN_INFO("\n");
191 		}
192 	}
193 
194 	DTN_INFO("\n=========RQ========\n");
195 	DTN_INFO("HUBP:  drq_exp_m  prq_exp_m  mrq_exp_m  crq_exp_m  plane1_ba  L:chunk_s  min_chu_s  meta_ch_s"
196 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h  C:chunk_s  min_chu_s  meta_ch_s"
197 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h\n");
198 	for (i = 0; i < pool->pipe_count; i++) {
199 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
200 		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
201 
202 		if (!s->blank_en)
203 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
204 				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
205 				rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
206 				rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
207 				rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
208 				rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
209 				rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
210 				rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
211 				rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
212 				rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
213 	}
214 
215 	DTN_INFO("========DLG========\n");
216 	DTN_INFO("HUBP:  rc_hbe     dlg_vbe    min_d_y_n  rc_per_ht  rc_x_a_s "
217 			"  dst_y_a_s  dst_y_pf   dst_y_vvb  dst_y_rvb  dst_y_vfl  dst_y_rfl  rf_pix_fq"
218 			"  vratio_pf  vrat_pf_c  rc_pg_vbl  rc_pg_vbc  rc_mc_vbl  rc_mc_vbc  rc_pg_fll"
219 			"  rc_pg_flc  rc_mc_fll  rc_mc_flc  pr_nom_l   pr_nom_c   rc_pg_nl   rc_pg_nc "
220 			"  mr_nom_l   mr_nom_c   rc_mc_nl   rc_mc_nc   rc_ld_pl   rc_ld_pc   rc_ld_l  "
221 			"  rc_ld_c    cha_cur0   ofst_cur1  cha_cur1   vr_af_vc0  ddrq_limt  x_rt_dlay"
222 			"  x_rp_dlay  x_rr_sfl\n");
223 	for (i = 0; i < pool->pipe_count; i++) {
224 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
225 		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
226 
227 		if (!s->blank_en)
228 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
229 				"%  8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
230 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
231 				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
232 				dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
233 				dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
234 				dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
235 				dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
236 				dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
237 				dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
238 				dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
239 				dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
240 				dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
241 				dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
242 				dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
243 				dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
244 				dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
245 				dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
246 				dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
247 				dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
248 				dlg_regs->xfc_reg_remote_surface_flip_latency);
249 	}
250 
251 	DTN_INFO("========TTU========\n");
252 	DTN_INFO("HUBP:  qos_ll_wm  qos_lh_wm  mn_ttu_vb  qos_l_flp  rc_rd_p_l  rc_rd_l    rc_rd_p_c"
253 			"  rc_rd_c    rc_rd_c0   rc_rd_pc0  rc_rd_c1   rc_rd_pc1  qos_lf_l   qos_rds_l"
254 			"  qos_lf_c   qos_rds_c  qos_lf_c0  qos_rds_c0 qos_lf_c1  qos_rds_c1\n");
255 	for (i = 0; i < pool->pipe_count; i++) {
256 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
257 		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
258 
259 		if (!s->blank_en)
260 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
261 				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
262 				ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
263 				ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
264 				ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
265 				ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
266 				ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
267 				ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
268 	}
269 	DTN_INFO("\n");
270 }
271 
dcn10_log_hw_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)272 void dcn10_log_hw_state(struct dc *dc,
273 	struct dc_log_buffer_ctx *log_ctx)
274 {
275 	struct dc_context *dc_ctx = dc->ctx;
276 	struct resource_pool *pool = dc->res_pool;
277 	int i;
278 
279 	DTN_INFO_BEGIN();
280 
281 	dcn10_log_hubbub_state(dc, log_ctx);
282 
283 	dcn10_log_hubp_states(dc, log_ctx);
284 
285 	DTN_INFO("DPP:    IGAM format  IGAM mode    DGAM mode    RGAM mode"
286 			"  GAMUT mode  C11 C12   C13 C14   C21 C22   C23 C24   "
287 			"C31 C32   C33 C34\n");
288 	for (i = 0; i < pool->pipe_count; i++) {
289 		struct dpp *dpp = pool->dpps[i];
290 		struct dcn_dpp_state s = {0};
291 
292 		dpp->funcs->dpp_read_state(dpp, &s);
293 
294 		if (!s.is_enabled)
295 			continue;
296 
297 		DTN_INFO("[%2d]:  %11xh  %-11s  %-11s  %-11s"
298 				"%8x    %08xh %08xh %08xh %08xh %08xh %08xh",
299 				dpp->inst,
300 				s.igam_input_format,
301 				(s.igam_lut_mode == 0) ? "BypassFixed" :
302 					((s.igam_lut_mode == 1) ? "BypassFloat" :
303 					((s.igam_lut_mode == 2) ? "RAM" :
304 					((s.igam_lut_mode == 3) ? "RAM" :
305 								 "Unknown"))),
306 				(s.dgam_lut_mode == 0) ? "Bypass" :
307 					((s.dgam_lut_mode == 1) ? "sRGB" :
308 					((s.dgam_lut_mode == 2) ? "Ycc" :
309 					((s.dgam_lut_mode == 3) ? "RAM" :
310 					((s.dgam_lut_mode == 4) ? "RAM" :
311 								 "Unknown")))),
312 				(s.rgam_lut_mode == 0) ? "Bypass" :
313 					((s.rgam_lut_mode == 1) ? "sRGB" :
314 					((s.rgam_lut_mode == 2) ? "Ycc" :
315 					((s.rgam_lut_mode == 3) ? "RAM" :
316 					((s.rgam_lut_mode == 4) ? "RAM" :
317 								 "Unknown")))),
318 				s.gamut_remap_mode,
319 				s.gamut_remap_c11_c12,
320 				s.gamut_remap_c13_c14,
321 				s.gamut_remap_c21_c22,
322 				s.gamut_remap_c23_c24,
323 				s.gamut_remap_c31_c32,
324 				s.gamut_remap_c33_c34);
325 		DTN_INFO("\n");
326 	}
327 	DTN_INFO("\n");
328 
329 	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
330 	for (i = 0; i < pool->pipe_count; i++) {
331 		struct mpcc_state s = {0};
332 
333 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
334 		if (s.opp_id != 0xf)
335 			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
336 				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
337 				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
338 				s.idle);
339 	}
340 	DTN_INFO("\n");
341 
342 	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");
343 
344 	for (i = 0; i < pool->timing_generator_count; i++) {
345 		struct timing_generator *tg = pool->timing_generators[i];
346 		struct dcn_otg_state s = {0};
347 		/* Read shared OTG state registers for all DCNx */
348 		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
349 
350 		/*
351 		 * For DCN2 and greater, a register on the OPP is used to
352 		 * determine if the CRTC is blanked instead of the OTG. So use
353 		 * dpg_is_blanked() if exists, otherwise fallback on otg.
354 		 *
355 		 * TODO: Implement DCN-specific read_otg_state hooks.
356 		 */
357 		if (pool->opps[i]->funcs->dpg_is_blanked)
358 			s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
359 		else
360 			s.blank_enabled = tg->funcs->is_blanked(tg);
361 
362 		//only print if OTG master is enabled
363 		if ((s.otg_enabled & 1) == 0)
364 			continue;
365 
366 		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d  %9d %8d\n",
367 				tg->inst,
368 				s.v_blank_start,
369 				s.v_blank_end,
370 				s.v_sync_a_start,
371 				s.v_sync_a_end,
372 				s.v_sync_a_pol,
373 				s.v_total_max,
374 				s.v_total_min,
375 				s.v_total_max_sel,
376 				s.v_total_min_sel,
377 				s.h_blank_start,
378 				s.h_blank_end,
379 				s.h_sync_a_start,
380 				s.h_sync_a_end,
381 				s.h_sync_a_pol,
382 				s.h_total,
383 				s.v_total,
384 				s.underflow_occurred_status,
385 				s.blank_enabled);
386 
387 		// Clear underflow for debug purposes
388 		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
389 		// This function is called only from Windows or Diags test environment, hence it's safe to clear
390 		// it from here without affecting the original intent.
391 		tg->funcs->clear_optc_underflow(tg);
392 	}
393 	DTN_INFO("\n");
394 
395 	// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
396 	// TODO: Update golden log header to reflect this name change
397 	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
398 	for (i = 0; i < pool->res_cap->num_dsc; i++) {
399 		struct display_stream_compressor *dsc = pool->dscs[i];
400 		struct dcn_dsc_state s = {0};
401 
402 		dsc->funcs->dsc_read_state(dsc, &s);
403 		DTN_INFO("[%d]: %-9d %-12d %-10d\n",
404 		dsc->inst,
405 			s.dsc_clock_en,
406 			s.dsc_slice_width,
407 			s.dsc_bits_per_pixel);
408 		DTN_INFO("\n");
409 	}
410 	DTN_INFO("\n");
411 
412 	DTN_INFO("S_ENC: DSC_MODE  SEC_GSP7_LINE_NUM"
413 			"  VBID6_LINE_REFERENCE  VBID6_LINE_NUM  SEC_GSP7_ENABLE  SEC_STREAM_ENABLE\n");
414 	for (i = 0; i < pool->stream_enc_count; i++) {
415 		struct stream_encoder *enc = pool->stream_enc[i];
416 		struct enc_state s = {0};
417 
418 		if (enc->funcs->enc_read_state) {
419 			enc->funcs->enc_read_state(enc, &s);
420 			DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
421 				enc->id,
422 				s.dsc_mode,
423 				s.sec_gsp_pps_line_num,
424 				s.vbid6_line_reference,
425 				s.vbid6_line_num,
426 				s.sec_gsp_pps_enable,
427 				s.sec_stream_enable);
428 			DTN_INFO("\n");
429 		}
430 	}
431 	DTN_INFO("\n");
432 
433 	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS  DP_LINK_TRAINING_COMPLETE\n");
434 	for (i = 0; i < dc->link_count; i++) {
435 		struct link_encoder *lenc = dc->links[i]->link_enc;
436 
437 		struct link_enc_state s = {0};
438 
439 		if (lenc->funcs->read_state) {
440 			lenc->funcs->read_state(lenc, &s);
441 			DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
442 				i,
443 				s.dphy_fec_en,
444 				s.dphy_fec_ready_shadow,
445 				s.dphy_fec_active_status,
446 				s.dp_link_training_complete);
447 			DTN_INFO("\n");
448 		}
449 	}
450 	DTN_INFO("\n");
451 
452 	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
453 		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
454 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
455 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
456 			dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
457 			dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
458 			dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
459 			dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
460 			dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
461 
462 	log_mpc_crc(dc, log_ctx);
463 
464 	DTN_INFO_END();
465 }
466 
dcn10_did_underflow_occur(struct dc * dc,struct pipe_ctx * pipe_ctx)467 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
468 {
469 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
470 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
471 
472 	if (tg->funcs->is_optc_underflow_occurred(tg)) {
473 		tg->funcs->clear_optc_underflow(tg);
474 		return true;
475 	}
476 
477 	if (hubp->funcs->hubp_get_underflow_status(hubp)) {
478 		hubp->funcs->hubp_clear_underflow(hubp);
479 		return true;
480 	}
481 	return false;
482 }
483 
dcn10_enable_power_gating_plane(struct dce_hwseq * hws,bool enable)484 void dcn10_enable_power_gating_plane(
485 	struct dce_hwseq *hws,
486 	bool enable)
487 {
488 	bool force_on = true; /* disable power gating */
489 
490 	if (enable)
491 		force_on = false;
492 
493 	/* DCHUBP0/1/2/3 */
494 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
495 	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
496 	REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
497 	REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
498 
499 	/* DPP0/1/2/3 */
500 	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
501 	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
502 	REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
503 	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
504 }
505 
dcn10_disable_vga(struct dce_hwseq * hws)506 void dcn10_disable_vga(
507 	struct dce_hwseq *hws)
508 {
509 	unsigned int in_vga1_mode = 0;
510 	unsigned int in_vga2_mode = 0;
511 	unsigned int in_vga3_mode = 0;
512 	unsigned int in_vga4_mode = 0;
513 
514 	REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
515 	REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
516 	REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
517 	REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
518 
519 	if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
520 			in_vga3_mode == 0 && in_vga4_mode == 0)
521 		return;
522 
523 	REG_WRITE(D1VGA_CONTROL, 0);
524 	REG_WRITE(D2VGA_CONTROL, 0);
525 	REG_WRITE(D3VGA_CONTROL, 0);
526 	REG_WRITE(D4VGA_CONTROL, 0);
527 
528 	/* HW Engineer's Notes:
529 	 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
530 	 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
531 	 *
532 	 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
533 	 *  VGA_TEST_ENABLE, to leave it in the same state as before.
534 	 */
535 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
536 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
537 }
538 
dcn10_dpp_pg_control(struct dce_hwseq * hws,unsigned int dpp_inst,bool power_on)539 void dcn10_dpp_pg_control(
540 		struct dce_hwseq *hws,
541 		unsigned int dpp_inst,
542 		bool power_on)
543 {
544 	uint32_t power_gate = power_on ? 0 : 1;
545 	uint32_t pwr_status = power_on ? 0 : 2;
546 
547 	if (hws->ctx->dc->debug.disable_dpp_power_gate)
548 		return;
549 	if (REG(DOMAIN1_PG_CONFIG) == 0)
550 		return;
551 
552 	switch (dpp_inst) {
553 	case 0: /* DPP0 */
554 		REG_UPDATE(DOMAIN1_PG_CONFIG,
555 				DOMAIN1_POWER_GATE, power_gate);
556 
557 		REG_WAIT(DOMAIN1_PG_STATUS,
558 				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
559 				1, 1000);
560 		break;
561 	case 1: /* DPP1 */
562 		REG_UPDATE(DOMAIN3_PG_CONFIG,
563 				DOMAIN3_POWER_GATE, power_gate);
564 
565 		REG_WAIT(DOMAIN3_PG_STATUS,
566 				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
567 				1, 1000);
568 		break;
569 	case 2: /* DPP2 */
570 		REG_UPDATE(DOMAIN5_PG_CONFIG,
571 				DOMAIN5_POWER_GATE, power_gate);
572 
573 		REG_WAIT(DOMAIN5_PG_STATUS,
574 				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
575 				1, 1000);
576 		break;
577 	case 3: /* DPP3 */
578 		REG_UPDATE(DOMAIN7_PG_CONFIG,
579 				DOMAIN7_POWER_GATE, power_gate);
580 
581 		REG_WAIT(DOMAIN7_PG_STATUS,
582 				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
583 				1, 1000);
584 		break;
585 	default:
586 		BREAK_TO_DEBUGGER();
587 		break;
588 	}
589 }
590 
dcn10_hubp_pg_control(struct dce_hwseq * hws,unsigned int hubp_inst,bool power_on)591 void dcn10_hubp_pg_control(
592 		struct dce_hwseq *hws,
593 		unsigned int hubp_inst,
594 		bool power_on)
595 {
596 	uint32_t power_gate = power_on ? 0 : 1;
597 	uint32_t pwr_status = power_on ? 0 : 2;
598 
599 	if (hws->ctx->dc->debug.disable_hubp_power_gate)
600 		return;
601 	if (REG(DOMAIN0_PG_CONFIG) == 0)
602 		return;
603 
604 	switch (hubp_inst) {
605 	case 0: /* DCHUBP0 */
606 		REG_UPDATE(DOMAIN0_PG_CONFIG,
607 				DOMAIN0_POWER_GATE, power_gate);
608 
609 		REG_WAIT(DOMAIN0_PG_STATUS,
610 				DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
611 				1, 1000);
612 		break;
613 	case 1: /* DCHUBP1 */
614 		REG_UPDATE(DOMAIN2_PG_CONFIG,
615 				DOMAIN2_POWER_GATE, power_gate);
616 
617 		REG_WAIT(DOMAIN2_PG_STATUS,
618 				DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
619 				1, 1000);
620 		break;
621 	case 2: /* DCHUBP2 */
622 		REG_UPDATE(DOMAIN4_PG_CONFIG,
623 				DOMAIN4_POWER_GATE, power_gate);
624 
625 		REG_WAIT(DOMAIN4_PG_STATUS,
626 				DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
627 				1, 1000);
628 		break;
629 	case 3: /* DCHUBP3 */
630 		REG_UPDATE(DOMAIN6_PG_CONFIG,
631 				DOMAIN6_POWER_GATE, power_gate);
632 
633 		REG_WAIT(DOMAIN6_PG_STATUS,
634 				DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
635 				1, 1000);
636 		break;
637 	default:
638 		BREAK_TO_DEBUGGER();
639 		break;
640 	}
641 }
642 
power_on_plane(struct dce_hwseq * hws,int plane_id)643 static void power_on_plane(
644 	struct dce_hwseq *hws,
645 	int plane_id)
646 {
647 	DC_LOGGER_INIT(hws->ctx->logger);
648 	if (REG(DC_IP_REQUEST_CNTL)) {
649 		REG_SET(DC_IP_REQUEST_CNTL, 0,
650 				IP_REQUEST_EN, 1);
651 
652 		if (hws->funcs.dpp_pg_control)
653 			hws->funcs.dpp_pg_control(hws, plane_id, true);
654 
655 		if (hws->funcs.hubp_pg_control)
656 			hws->funcs.hubp_pg_control(hws, plane_id, true);
657 
658 		REG_SET(DC_IP_REQUEST_CNTL, 0,
659 				IP_REQUEST_EN, 0);
660 		DC_LOG_DEBUG(
661 				"Un-gated front end for pipe %d\n", plane_id);
662 	}
663 }
664 
undo_DEGVIDCN10_253_wa(struct dc * dc)665 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
666 {
667 	struct dce_hwseq *hws = dc->hwseq;
668 	struct hubp *hubp = dc->res_pool->hubps[0];
669 
670 	if (!hws->wa_state.DEGVIDCN10_253_applied)
671 		return;
672 
673 	hubp->funcs->set_blank(hubp, true);
674 
675 	REG_SET(DC_IP_REQUEST_CNTL, 0,
676 			IP_REQUEST_EN, 1);
677 
678 	hws->funcs.hubp_pg_control(hws, 0, false);
679 	REG_SET(DC_IP_REQUEST_CNTL, 0,
680 			IP_REQUEST_EN, 0);
681 
682 	hws->wa_state.DEGVIDCN10_253_applied = false;
683 }
684 
apply_DEGVIDCN10_253_wa(struct dc * dc)685 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
686 {
687 	struct dce_hwseq *hws = dc->hwseq;
688 	struct hubp *hubp = dc->res_pool->hubps[0];
689 	int i;
690 
691 	if (dc->debug.disable_stutter)
692 		return;
693 
694 	if (!hws->wa.DEGVIDCN10_253)
695 		return;
696 
697 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
698 		if (!dc->res_pool->hubps[i]->power_gated)
699 			return;
700 	}
701 
702 	/* all pipe power gated, apply work around to enable stutter. */
703 
704 	REG_SET(DC_IP_REQUEST_CNTL, 0,
705 			IP_REQUEST_EN, 1);
706 
707 	hws->funcs.hubp_pg_control(hws, 0, true);
708 	REG_SET(DC_IP_REQUEST_CNTL, 0,
709 			IP_REQUEST_EN, 0);
710 
711 	hubp->funcs->set_hubp_blank_en(hubp, false);
712 	hws->wa_state.DEGVIDCN10_253_applied = true;
713 }
714 
dcn10_bios_golden_init(struct dc * dc)715 void dcn10_bios_golden_init(struct dc *dc)
716 {
717 	struct dce_hwseq *hws = dc->hwseq;
718 	struct dc_bios *bp = dc->ctx->dc_bios;
719 	int i;
720 	bool allow_self_fresh_force_enable = true;
721 
722 	if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
723 		return;
724 
725 	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
726 		allow_self_fresh_force_enable =
727 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
728 
729 
730 	/* WA for making DF sleep when idle after resume from S0i3.
731 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
732 	 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
733 	 * before calling command table and it changed to 1 after,
734 	 * it should be set back to 0.
735 	 */
736 
737 	/* initialize dcn global */
738 	bp->funcs->enable_disp_power_gating(bp,
739 			CONTROLLER_ID_D0, ASIC_PIPE_INIT);
740 
741 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
742 		/* initialize dcn per pipe */
743 		bp->funcs->enable_disp_power_gating(bp,
744 				CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
745 	}
746 
747 	if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
748 		if (allow_self_fresh_force_enable == false &&
749 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
750 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
751 										!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
752 
753 }
754 
false_optc_underflow_wa(struct dc * dc,const struct dc_stream_state * stream,struct timing_generator * tg)755 static void false_optc_underflow_wa(
756 		struct dc *dc,
757 		const struct dc_stream_state *stream,
758 		struct timing_generator *tg)
759 {
760 	int i;
761 	bool underflow;
762 
763 	if (!dc->hwseq->wa.false_optc_underflow)
764 		return;
765 
766 	underflow = tg->funcs->is_optc_underflow_occurred(tg);
767 
768 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
769 		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
770 
771 		if (old_pipe_ctx->stream != stream)
772 			continue;
773 
774 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
775 	}
776 
777 	if (tg->funcs->set_blank_data_double_buffer)
778 		tg->funcs->set_blank_data_double_buffer(tg, true);
779 
780 	if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
781 		tg->funcs->clear_optc_underflow(tg);
782 }
783 
dcn10_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)784 enum dc_status dcn10_enable_stream_timing(
785 		struct pipe_ctx *pipe_ctx,
786 		struct dc_state *context,
787 		struct dc *dc)
788 {
789 	struct dc_stream_state *stream = pipe_ctx->stream;
790 	enum dc_color_space color_space;
791 	struct tg_color black_color = {0};
792 
793 	/* by upper caller loop, pipe0 is parent pipe and be called first.
794 	 * back end is set up by for pipe0. Other children pipe share back end
795 	 * with pipe 0. No program is needed.
796 	 */
797 	if (pipe_ctx->top_pipe != NULL)
798 		return DC_OK;
799 
800 	/* TODO check if timing_changed, disable stream if timing changed */
801 
802 	/* HW program guide assume display already disable
803 	 * by unplug sequence. OTG assume stop.
804 	 */
805 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
806 
807 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
808 			pipe_ctx->clock_source,
809 			&pipe_ctx->stream_res.pix_clk_params,
810 			&pipe_ctx->pll_settings)) {
811 		BREAK_TO_DEBUGGER();
812 		return DC_ERROR_UNEXPECTED;
813 	}
814 
815 	pipe_ctx->stream_res.tg->funcs->program_timing(
816 			pipe_ctx->stream_res.tg,
817 			&stream->timing,
818 			pipe_ctx->pipe_dlg_param.vready_offset,
819 			pipe_ctx->pipe_dlg_param.vstartup_start,
820 			pipe_ctx->pipe_dlg_param.vupdate_offset,
821 			pipe_ctx->pipe_dlg_param.vupdate_width,
822 			pipe_ctx->stream->signal,
823 			true);
824 
825 #if 0 /* move to after enable_crtc */
826 	/* TODO: OPP FMT, ABM. etc. should be done here. */
827 	/* or FPGA now. instance 0 only. TODO: move to opp.c */
828 
829 	inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
830 
831 	pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
832 				pipe_ctx->stream_res.opp,
833 				&stream->bit_depth_params,
834 				&stream->clamping);
835 #endif
836 	/* program otg blank color */
837 	color_space = stream->output_color_space;
838 	color_space_to_black_color(dc, color_space, &black_color);
839 
840 	/*
841 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
842 	 * alternate between Cb and Cr, so both channels need the pixel
843 	 * value for Y
844 	 */
845 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
846 		black_color.color_r_cr = black_color.color_g_y;
847 
848 	if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
849 		pipe_ctx->stream_res.tg->funcs->set_blank_color(
850 				pipe_ctx->stream_res.tg,
851 				&black_color);
852 
853 	if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
854 			!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
855 		pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
856 		hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
857 		false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
858 	}
859 
860 	/* VTG is  within DCHUB command block. DCFCLK is always on */
861 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
862 		BREAK_TO_DEBUGGER();
863 		return DC_ERROR_UNEXPECTED;
864 	}
865 
866 	/* TODO program crtc source select for non-virtual signal*/
867 	/* TODO program FMT */
868 	/* TODO setup link_enc */
869 	/* TODO set stream attributes */
870 	/* TODO program audio */
871 	/* TODO enable stream if timing changed */
872 	/* TODO unblank stream if DP */
873 
874 	return DC_OK;
875 }
876 
dcn10_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)877 static void dcn10_reset_back_end_for_pipe(
878 		struct dc *dc,
879 		struct pipe_ctx *pipe_ctx,
880 		struct dc_state *context)
881 {
882 	int i;
883 	struct dc_link *link;
884 	DC_LOGGER_INIT(dc->ctx->logger);
885 	if (pipe_ctx->stream_res.stream_enc == NULL) {
886 		pipe_ctx->stream = NULL;
887 		return;
888 	}
889 
890 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
891 		link = pipe_ctx->stream->link;
892 		/* DPMS may already disable or */
893 		/* dpms_off status is incorrect due to fastboot
894 		 * feature. When system resume from S4 with second
895 		 * screen only, the dpms_off would be true but
896 		 * VBIOS lit up eDP, so check link status too.
897 		 */
898 		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
899 			core_link_disable_stream(pipe_ctx);
900 		else if (pipe_ctx->stream_res.audio)
901 			dc->hwss.disable_audio_stream(pipe_ctx);
902 
903 		if (pipe_ctx->stream_res.audio) {
904 			/*disable az_endpoint*/
905 			pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
906 
907 			/*free audio*/
908 			if (dc->caps.dynamic_audio == true) {
909 				/*we have to dynamic arbitrate the audio endpoints*/
910 				/*we free the resource, need reset is_audio_acquired*/
911 				update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
912 						pipe_ctx->stream_res.audio, false);
913 				pipe_ctx->stream_res.audio = NULL;
914 			}
915 		}
916 	}
917 
918 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
919 	 * back end share by all pipes and will be disable only when disable
920 	 * parent pipe.
921 	 */
922 	if (pipe_ctx->top_pipe == NULL) {
923 
924 		if (pipe_ctx->stream_res.abm)
925 			dc->hwss.set_abm_immediate_disable(pipe_ctx);
926 
927 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
928 
929 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
930 		if (pipe_ctx->stream_res.tg->funcs->set_drr)
931 			pipe_ctx->stream_res.tg->funcs->set_drr(
932 					pipe_ctx->stream_res.tg, NULL);
933 	}
934 
935 	for (i = 0; i < dc->res_pool->pipe_count; i++)
936 		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
937 			break;
938 
939 	if (i == dc->res_pool->pipe_count)
940 		return;
941 
942 	pipe_ctx->stream = NULL;
943 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
944 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
945 }
946 
dcn10_hw_wa_force_recovery(struct dc * dc)947 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
948 {
949 	struct hubp *hubp ;
950 	unsigned int i;
951 	bool need_recover = true;
952 
953 	if (!dc->debug.recovery_enabled)
954 		return false;
955 
956 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
957 		struct pipe_ctx *pipe_ctx =
958 			&dc->current_state->res_ctx.pipe_ctx[i];
959 		if (pipe_ctx != NULL) {
960 			hubp = pipe_ctx->plane_res.hubp;
961 			if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
962 				if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
963 					/* one pipe underflow, we will reset all the pipes*/
964 					need_recover = true;
965 				}
966 			}
967 		}
968 	}
969 	if (!need_recover)
970 		return false;
971 	/*
972 	DCHUBP_CNTL:HUBP_BLANK_EN=1
973 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
974 	DCHUBP_CNTL:HUBP_DISABLE=1
975 	DCHUBP_CNTL:HUBP_DISABLE=0
976 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
977 	DCSURF_PRIMARY_SURFACE_ADDRESS
978 	DCHUBP_CNTL:HUBP_BLANK_EN=0
979 	*/
980 
981 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
982 		struct pipe_ctx *pipe_ctx =
983 			&dc->current_state->res_ctx.pipe_ctx[i];
984 		if (pipe_ctx != NULL) {
985 			hubp = pipe_ctx->plane_res.hubp;
986 			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
987 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
988 				hubp->funcs->set_hubp_blank_en(hubp, true);
989 		}
990 	}
991 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
992 	hubbub1_soft_reset(dc->res_pool->hubbub, true);
993 
994 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
995 		struct pipe_ctx *pipe_ctx =
996 			&dc->current_state->res_ctx.pipe_ctx[i];
997 		if (pipe_ctx != NULL) {
998 			hubp = pipe_ctx->plane_res.hubp;
999 			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
1000 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1001 				hubp->funcs->hubp_disable_control(hubp, true);
1002 		}
1003 	}
1004 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1005 		struct pipe_ctx *pipe_ctx =
1006 			&dc->current_state->res_ctx.pipe_ctx[i];
1007 		if (pipe_ctx != NULL) {
1008 			hubp = pipe_ctx->plane_res.hubp;
1009 			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
1010 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1011 				hubp->funcs->hubp_disable_control(hubp, true);
1012 		}
1013 	}
1014 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1015 	hubbub1_soft_reset(dc->res_pool->hubbub, false);
1016 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1017 		struct pipe_ctx *pipe_ctx =
1018 			&dc->current_state->res_ctx.pipe_ctx[i];
1019 		if (pipe_ctx != NULL) {
1020 			hubp = pipe_ctx->plane_res.hubp;
1021 			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1022 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1023 				hubp->funcs->set_hubp_blank_en(hubp, true);
1024 		}
1025 	}
1026 	return true;
1027 
1028 }
1029 
dcn10_verify_allow_pstate_change_high(struct dc * dc)1030 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1031 {
1032 	static bool should_log_hw_state; /* prevent hw state log by default */
1033 
1034 	if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
1035 		int i = 0;
1036 
1037 		if (should_log_hw_state)
1038 			dcn10_log_hw_state(dc, NULL);
1039 
1040 		TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1041 		BREAK_TO_DEBUGGER();
1042 		if (dcn10_hw_wa_force_recovery(dc)) {
1043 		/*check again*/
1044 			if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
1045 				BREAK_TO_DEBUGGER();
1046 		}
1047 	}
1048 }
1049 
1050 /* trigger HW to start disconnect plane from stream on the next vsync */
dcn10_plane_atomic_disconnect(struct dc * dc,struct pipe_ctx * pipe_ctx)1051 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1052 {
1053 	struct dce_hwseq *hws = dc->hwseq;
1054 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1055 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
1056 	struct mpc *mpc = dc->res_pool->mpc;
1057 	struct mpc_tree *mpc_tree_params;
1058 	struct mpcc *mpcc_to_remove = NULL;
1059 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1060 
1061 	mpc_tree_params = &(opp->mpc_tree_params);
1062 	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1063 
1064 	/*Already reset*/
1065 	if (mpcc_to_remove == NULL)
1066 		return;
1067 
1068 	mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1069 	if (opp != NULL)
1070 		opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1071 
1072 	dc->optimized_required = true;
1073 
1074 	if (hubp->funcs->hubp_disconnect)
1075 		hubp->funcs->hubp_disconnect(hubp);
1076 
1077 	if (dc->debug.sanity_checks)
1078 		hws->funcs.verify_allow_pstate_change_high(dc);
1079 }
1080 
dcn10_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)1081 void dcn10_plane_atomic_power_down(struct dc *dc,
1082 		struct dpp *dpp,
1083 		struct hubp *hubp)
1084 {
1085 	struct dce_hwseq *hws = dc->hwseq;
1086 	DC_LOGGER_INIT(dc->ctx->logger);
1087 
1088 	if (REG(DC_IP_REQUEST_CNTL)) {
1089 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1090 				IP_REQUEST_EN, 1);
1091 
1092 		if (hws->funcs.dpp_pg_control)
1093 			hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1094 
1095 		if (hws->funcs.hubp_pg_control)
1096 			hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1097 
1098 		dpp->funcs->dpp_reset(dpp);
1099 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1100 				IP_REQUEST_EN, 0);
1101 		DC_LOG_DEBUG(
1102 				"Power gated front end %d\n", hubp->inst);
1103 	}
1104 }
1105 
1106 /* disable HW used by plane.
1107  * note:  cannot disable until disconnect is complete
1108  */
dcn10_plane_atomic_disable(struct dc * dc,struct pipe_ctx * pipe_ctx)1109 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1110 {
1111 	struct dce_hwseq *hws = dc->hwseq;
1112 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1113 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1114 	int opp_id = hubp->opp_id;
1115 
1116 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1117 
1118 	hubp->funcs->hubp_clk_cntl(hubp, false);
1119 
1120 	dpp->funcs->dpp_dppclk_control(dpp, false, false);
1121 
1122 	if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1123 		pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1124 				pipe_ctx->stream_res.opp,
1125 				false);
1126 
1127 	hubp->power_gated = true;
1128 	dc->optimized_required = false; /* We're powering off, no need to optimize */
1129 
1130 	hws->funcs.plane_atomic_power_down(dc,
1131 			pipe_ctx->plane_res.dpp,
1132 			pipe_ctx->plane_res.hubp);
1133 
1134 	pipe_ctx->stream = NULL;
1135 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1136 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1137 	pipe_ctx->top_pipe = NULL;
1138 	pipe_ctx->bottom_pipe = NULL;
1139 	pipe_ctx->plane_state = NULL;
1140 }
1141 
dcn10_disable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx)1142 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1143 {
1144 	struct dce_hwseq *hws = dc->hwseq;
1145 	DC_LOGGER_INIT(dc->ctx->logger);
1146 
1147 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1148 		return;
1149 
1150 	hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1151 
1152 	apply_DEGVIDCN10_253_wa(dc);
1153 
1154 	DC_LOG_DC("Power down front end %d\n",
1155 					pipe_ctx->pipe_idx);
1156 }
1157 
dcn10_init_pipes(struct dc * dc,struct dc_state * context)1158 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1159 {
1160 	int i;
1161 	struct dce_hwseq *hws = dc->hwseq;
1162 	bool can_apply_seamless_boot = false;
1163 
1164 	for (i = 0; i < context->stream_count; i++) {
1165 		if (context->streams[i]->apply_seamless_boot_optimization) {
1166 			can_apply_seamless_boot = true;
1167 			break;
1168 		}
1169 	}
1170 
1171 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1172 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1173 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1174 
1175 		/* There is assumption that pipe_ctx is not mapping irregularly
1176 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1177 		 * we will use the pipe, so don't disable
1178 		 */
1179 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1180 			continue;
1181 
1182 		/* Blank controller using driver code instead of
1183 		 * command table.
1184 		 */
1185 		if (tg->funcs->is_tg_enabled(tg)) {
1186 			if (hws->funcs.init_blank != NULL) {
1187 				hws->funcs.init_blank(dc, tg);
1188 				tg->funcs->lock(tg);
1189 			} else {
1190 				tg->funcs->lock(tg);
1191 				tg->funcs->set_blank(tg, true);
1192 				hwss_wait_for_blank_complete(tg);
1193 			}
1194 		}
1195 	}
1196 
1197 	/* num_opp will be equal to number of mpcc */
1198 	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1199 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1200 
1201 		/* Cannot reset the MPC mux if seamless boot */
1202 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1203 			continue;
1204 
1205 		dc->res_pool->mpc->funcs->mpc_init_single_inst(
1206 				dc->res_pool->mpc, i);
1207 	}
1208 
1209 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1210 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1211 		struct hubp *hubp = dc->res_pool->hubps[i];
1212 		struct dpp *dpp = dc->res_pool->dpps[i];
1213 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1214 
1215 		/* There is assumption that pipe_ctx is not mapping irregularly
1216 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1217 		 * we will use the pipe, so don't disable
1218 		 */
1219 		if (can_apply_seamless_boot &&
1220 			pipe_ctx->stream != NULL &&
1221 			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1222 				pipe_ctx->stream_res.tg)) {
1223 			// Enable double buffering for OTG_BLANK no matter if
1224 			// seamless boot is enabled or not to suppress global sync
1225 			// signals when OTG blanked. This is to prevent pipe from
1226 			// requesting data while in PSR.
1227 			tg->funcs->tg_init(tg);
1228 			hubp->power_gated = true;
1229 			continue;
1230 		}
1231 
1232 		/* Disable on the current state so the new one isn't cleared. */
1233 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1234 
1235 		dpp->funcs->dpp_reset(dpp);
1236 
1237 		pipe_ctx->stream_res.tg = tg;
1238 		pipe_ctx->pipe_idx = i;
1239 
1240 		pipe_ctx->plane_res.hubp = hubp;
1241 		pipe_ctx->plane_res.dpp = dpp;
1242 		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1243 		hubp->mpcc_id = dpp->inst;
1244 		hubp->opp_id = OPP_ID_INVALID;
1245 		hubp->power_gated = false;
1246 
1247 		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1248 		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1249 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1250 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1251 
1252 		hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1253 
1254 		if (tg->funcs->is_tg_enabled(tg))
1255 			tg->funcs->unlock(tg);
1256 
1257 		dc->hwss.disable_plane(dc, pipe_ctx);
1258 
1259 		pipe_ctx->stream_res.tg = NULL;
1260 		pipe_ctx->plane_res.hubp = NULL;
1261 
1262 		tg->funcs->tg_init(tg);
1263 	}
1264 }
1265 
dcn10_init_hw(struct dc * dc)1266 void dcn10_init_hw(struct dc *dc)
1267 {
1268 	int i, j;
1269 	struct abm *abm = dc->res_pool->abm;
1270 	struct dmcu *dmcu = dc->res_pool->dmcu;
1271 	struct dce_hwseq *hws = dc->hwseq;
1272 	struct dc_bios *dcb = dc->ctx->dc_bios;
1273 	struct resource_pool *res_pool = dc->res_pool;
1274 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1275 	bool   is_optimized_init_done = false;
1276 
1277 	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1278 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1279 
1280 	// Initialize the dccg
1281 	if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1282 		dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1283 
1284 	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1285 
1286 		REG_WRITE(REFCLK_CNTL, 0);
1287 		REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1288 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1289 
1290 		if (!dc->debug.disable_clock_gate) {
1291 			/* enable all DCN clock gating */
1292 			REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1293 
1294 			REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1295 
1296 			REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1297 		}
1298 
1299 		//Enable ability to power gate / don't force power on permanently
1300 		if (hws->funcs.enable_power_gating_plane)
1301 			hws->funcs.enable_power_gating_plane(hws, true);
1302 
1303 		return;
1304 	}
1305 
1306 	if (!dcb->funcs->is_accelerated_mode(dcb))
1307 		hws->funcs.disable_vga(dc->hwseq);
1308 
1309 	hws->funcs.bios_golden_init(dc);
1310 
1311 	if (dc->ctx->dc_bios->fw_info_valid) {
1312 		res_pool->ref_clocks.xtalin_clock_inKhz =
1313 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1314 
1315 		if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1316 			if (res_pool->dccg && res_pool->hubbub) {
1317 
1318 				(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1319 						dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1320 						&res_pool->ref_clocks.dccg_ref_clock_inKhz);
1321 
1322 				(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1323 						res_pool->ref_clocks.dccg_ref_clock_inKhz,
1324 						&res_pool->ref_clocks.dchub_ref_clock_inKhz);
1325 			} else {
1326 				// Not all ASICs have DCCG sw component
1327 				res_pool->ref_clocks.dccg_ref_clock_inKhz =
1328 						res_pool->ref_clocks.xtalin_clock_inKhz;
1329 				res_pool->ref_clocks.dchub_ref_clock_inKhz =
1330 						res_pool->ref_clocks.xtalin_clock_inKhz;
1331 			}
1332 		}
1333 	} else
1334 		ASSERT_CRITICAL(false);
1335 
1336 	for (i = 0; i < dc->link_count; i++) {
1337 		/* Power up AND update implementation according to the
1338 		 * required signal (which may be different from the
1339 		 * default signal on connector).
1340 		 */
1341 		struct dc_link *link = dc->links[i];
1342 
1343 		if (!is_optimized_init_done)
1344 			link->link_enc->funcs->hw_init(link->link_enc);
1345 
1346 		/* Check for enabled DIG to identify enabled display */
1347 		if (link->link_enc->funcs->is_dig_enabled &&
1348 			link->link_enc->funcs->is_dig_enabled(link->link_enc))
1349 			link->link_status.link_active = true;
1350 	}
1351 
1352 	/* Power gate DSCs */
1353 	if (!is_optimized_init_done) {
1354 		for (i = 0; i < res_pool->res_cap->num_dsc; i++)
1355 			if (hws->funcs.dsc_pg_control != NULL)
1356 				hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
1357 	}
1358 
1359 	/* Enable outbox notification feature of dmub */
1360 	if (dc->debug.enable_dmub_aux_for_legacy_ddc)
1361 		dmub_enable_outbox_notification(dc);
1362 
1363 	/* we want to turn off all dp displays before doing detection */
1364 	if (dc->config.power_down_display_on_boot) {
1365 		uint8_t dpcd_power_state = '\0';
1366 		enum dc_status status = DC_ERROR_UNEXPECTED;
1367 
1368 		for (i = 0; i < dc->link_count; i++) {
1369 			if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
1370 				continue;
1371 
1372 			/*
1373 			 * If any of the displays are lit up turn them off.
1374 			 * The reason is that some MST hubs cannot be turned off
1375 			 * completely until we tell them to do so.
1376 			 * If not turned off, then displays connected to MST hub
1377 			 * won't light up.
1378 			 */
1379 			status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
1380 							&dpcd_power_state, sizeof(dpcd_power_state));
1381 			if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
1382 				/* blank dp stream before power off receiver*/
1383 				if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
1384 					unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
1385 
1386 					for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1387 						if (fe == dc->res_pool->stream_enc[j]->id) {
1388 							dc->res_pool->stream_enc[j]->funcs->dp_blank(
1389 										dc->res_pool->stream_enc[j]);
1390 							break;
1391 						}
1392 					}
1393 				}
1394 				dp_receiver_power_ctrl(dc->links[i], false);
1395 			}
1396 		}
1397 	}
1398 
1399 	/* If taking control over from VBIOS, we may want to optimize our first
1400 	 * mode set, so we need to skip powering down pipes until we know which
1401 	 * pipes we want to use.
1402 	 * Otherwise, if taking control is not possible, we need to power
1403 	 * everything down.
1404 	 */
1405 	if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
1406 		if (!is_optimized_init_done) {
1407 			hws->funcs.init_pipes(dc, dc->current_state);
1408 			if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1409 				dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1410 						!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1411 		}
1412 	}
1413 
1414 	if (!is_optimized_init_done) {
1415 
1416 		for (i = 0; i < res_pool->audio_count; i++) {
1417 			struct audio *audio = res_pool->audios[i];
1418 
1419 			audio->funcs->hw_init(audio);
1420 		}
1421 
1422 		for (i = 0; i < dc->link_count; i++) {
1423 			struct dc_link *link = dc->links[i];
1424 
1425 			if (link->panel_cntl)
1426 				backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1427 		}
1428 
1429 		if (abm != NULL)
1430 			abm->funcs->abm_init(abm, backlight);
1431 
1432 		if (dmcu != NULL && !dmcu->auto_load_dmcu)
1433 			dmcu->funcs->dmcu_init(dmcu);
1434 	}
1435 
1436 	if (abm != NULL && dmcu != NULL)
1437 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1438 
1439 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1440 	if (!is_optimized_init_done)
1441 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1442 
1443 	if (!dc->debug.disable_clock_gate) {
1444 		/* enable all DCN clock gating */
1445 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1446 
1447 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1448 
1449 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1450 	}
1451 	if (hws->funcs.enable_power_gating_plane)
1452 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1453 
1454 	if (dc->clk_mgr->funcs->notify_wm_ranges)
1455 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1456 }
1457 
1458 /* In headless boot cases, DIG may be turned
1459  * on which causes HW/SW discrepancies.
1460  * To avoid this, power down hardware on boot
1461  * if DIG is turned on
1462  */
dcn10_power_down_on_boot(struct dc * dc)1463 void dcn10_power_down_on_boot(struct dc *dc)
1464 {
1465 	struct dc_link *edp_links[MAX_NUM_EDP];
1466 	struct dc_link *edp_link;
1467 	int edp_num;
1468 	int i = 0;
1469 
1470 	get_edp_links(dc, edp_links, &edp_num);
1471 
1472 	if (edp_num) {
1473 		for (i = 0; i < edp_num; i++) {
1474 			edp_link = edp_links[i];
1475 			if (edp_link->link_enc->funcs->is_dig_enabled &&
1476 					edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1477 					dc->hwseq->funcs.edp_backlight_control &&
1478 					dc->hwss.power_down &&
1479 					dc->hwss.edp_power_control) {
1480 				dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1481 				dc->hwss.power_down(dc);
1482 				dc->hwss.edp_power_control(edp_link, false);
1483 			}
1484 		}
1485 	} else {
1486 		for (i = 0; i < dc->link_count; i++) {
1487 			struct dc_link *link = dc->links[i];
1488 
1489 			if (link->link_enc->funcs->is_dig_enabled &&
1490 					link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1491 					dc->hwss.power_down) {
1492 				dc->hwss.power_down(dc);
1493 				break;
1494 			}
1495 
1496 		}
1497 	}
1498 
1499 	/*
1500 	 * Call update_clocks with empty context
1501 	 * to send DISPLAY_OFF
1502 	 * Otherwise DISPLAY_OFF may not be asserted
1503 	 */
1504 	if (dc->clk_mgr->funcs->set_low_power_state)
1505 		dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1506 }
1507 
dcn10_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1508 void dcn10_reset_hw_ctx_wrap(
1509 		struct dc *dc,
1510 		struct dc_state *context)
1511 {
1512 	int i;
1513 	struct dce_hwseq *hws = dc->hwseq;
1514 
1515 	/* Reset Back End*/
1516 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1517 		struct pipe_ctx *pipe_ctx_old =
1518 			&dc->current_state->res_ctx.pipe_ctx[i];
1519 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1520 
1521 		if (!pipe_ctx_old->stream)
1522 			continue;
1523 
1524 		if (pipe_ctx_old->top_pipe)
1525 			continue;
1526 
1527 		if (!pipe_ctx->stream ||
1528 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1529 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1530 
1531 			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1532 			if (hws->funcs.enable_stream_gating)
1533 				hws->funcs.enable_stream_gating(dc, pipe_ctx);
1534 			if (old_clk)
1535 				old_clk->funcs->cs_power_down(old_clk);
1536 		}
1537 	}
1538 }
1539 
patch_address_for_sbs_tb_stereo(struct pipe_ctx * pipe_ctx,PHYSICAL_ADDRESS_LOC * addr)1540 static bool patch_address_for_sbs_tb_stereo(
1541 		struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1542 {
1543 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1544 	bool sec_split = pipe_ctx->top_pipe &&
1545 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1546 	if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1547 		(pipe_ctx->stream->timing.timing_3d_format ==
1548 		 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1549 		 pipe_ctx->stream->timing.timing_3d_format ==
1550 		 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1551 		*addr = plane_state->address.grph_stereo.left_addr;
1552 		plane_state->address.grph_stereo.left_addr =
1553 		plane_state->address.grph_stereo.right_addr;
1554 		return true;
1555 	} else {
1556 		if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1557 			plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1558 			plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1559 			plane_state->address.grph_stereo.right_addr =
1560 			plane_state->address.grph_stereo.left_addr;
1561 			plane_state->address.grph_stereo.right_meta_addr =
1562 			plane_state->address.grph_stereo.left_meta_addr;
1563 		}
1564 	}
1565 	return false;
1566 }
1567 
dcn10_update_plane_addr(const struct dc * dc,struct pipe_ctx * pipe_ctx)1568 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1569 {
1570 	bool addr_patched = false;
1571 	PHYSICAL_ADDRESS_LOC addr;
1572 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1573 
1574 	if (plane_state == NULL)
1575 		return;
1576 
1577 	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1578 
1579 	pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1580 			pipe_ctx->plane_res.hubp,
1581 			&plane_state->address,
1582 			plane_state->flip_immediate);
1583 
1584 	plane_state->status.requested_address = plane_state->address;
1585 
1586 	if (plane_state->flip_immediate)
1587 		plane_state->status.current_address = plane_state->address;
1588 
1589 	if (addr_patched)
1590 		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1591 }
1592 
dcn10_set_input_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)1593 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1594 			const struct dc_plane_state *plane_state)
1595 {
1596 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1597 	const struct dc_transfer_func *tf = NULL;
1598 	bool result = true;
1599 
1600 	if (dpp_base == NULL)
1601 		return false;
1602 
1603 	if (plane_state->in_transfer_func)
1604 		tf = plane_state->in_transfer_func;
1605 
1606 	if (plane_state->gamma_correction &&
1607 		!dpp_base->ctx->dc->debug.always_use_regamma
1608 		&& !plane_state->gamma_correction->is_identity
1609 			&& dce_use_lut(plane_state->format))
1610 		dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1611 
1612 	if (tf == NULL)
1613 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1614 	else if (tf->type == TF_TYPE_PREDEFINED) {
1615 		switch (tf->tf) {
1616 		case TRANSFER_FUNCTION_SRGB:
1617 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1618 			break;
1619 		case TRANSFER_FUNCTION_BT709:
1620 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1621 			break;
1622 		case TRANSFER_FUNCTION_LINEAR:
1623 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1624 			break;
1625 		case TRANSFER_FUNCTION_PQ:
1626 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1627 			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1628 			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1629 			result = true;
1630 			break;
1631 		default:
1632 			result = false;
1633 			break;
1634 		}
1635 	} else if (tf->type == TF_TYPE_BYPASS) {
1636 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1637 	} else {
1638 		cm_helper_translate_curve_to_degamma_hw_format(tf,
1639 					&dpp_base->degamma_params);
1640 		dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1641 				&dpp_base->degamma_params);
1642 		result = true;
1643 	}
1644 
1645 	return result;
1646 }
1647 
1648 #define MAX_NUM_HW_POINTS 0x200
1649 
log_tf(struct dc_context * ctx,struct dc_transfer_func * tf,uint32_t hw_points_num)1650 static void log_tf(struct dc_context *ctx,
1651 				struct dc_transfer_func *tf, uint32_t hw_points_num)
1652 {
1653 	// DC_LOG_GAMMA is default logging of all hw points
1654 	// DC_LOG_ALL_GAMMA logs all points, not only hw points
1655 	// DC_LOG_ALL_TF_POINTS logs all channels of the tf
1656 	int i = 0;
1657 
1658 	DC_LOGGER_INIT(ctx->logger);
1659 	DC_LOG_GAMMA("Gamma Correction TF");
1660 	DC_LOG_ALL_GAMMA("Logging all tf points...");
1661 	DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1662 
1663 	for (i = 0; i < hw_points_num; i++) {
1664 		DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1665 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1666 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1667 	}
1668 
1669 	for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1670 		DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1671 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1672 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1673 	}
1674 }
1675 
dcn10_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)1676 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1677 				const struct dc_stream_state *stream)
1678 {
1679 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1680 
1681 	if (dpp == NULL)
1682 		return false;
1683 
1684 	dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1685 
1686 	if (stream->out_transfer_func &&
1687 	    stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1688 	    stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1689 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1690 
1691 	/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1692 	 * update.
1693 	 */
1694 	else if (cm_helper_translate_curve_to_hw_format(
1695 			stream->out_transfer_func,
1696 			&dpp->regamma_params, false)) {
1697 		dpp->funcs->dpp_program_regamma_pwl(
1698 				dpp,
1699 				&dpp->regamma_params, OPP_REGAMMA_USER);
1700 	} else
1701 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1702 
1703 	if (stream != NULL && stream->ctx != NULL &&
1704 			stream->out_transfer_func != NULL) {
1705 		log_tf(stream->ctx,
1706 				stream->out_transfer_func,
1707 				dpp->regamma_params.hw_points_num);
1708 	}
1709 
1710 	return true;
1711 }
1712 
dcn10_pipe_control_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1713 void dcn10_pipe_control_lock(
1714 	struct dc *dc,
1715 	struct pipe_ctx *pipe,
1716 	bool lock)
1717 {
1718 	struct dce_hwseq *hws = dc->hwseq;
1719 
1720 	/* use TG master update lock to lock everything on the TG
1721 	 * therefore only top pipe need to lock
1722 	 */
1723 	if (!pipe || pipe->top_pipe)
1724 		return;
1725 
1726 	if (dc->debug.sanity_checks)
1727 		hws->funcs.verify_allow_pstate_change_high(dc);
1728 
1729 	if (lock)
1730 		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1731 	else
1732 		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1733 
1734 	if (dc->debug.sanity_checks)
1735 		hws->funcs.verify_allow_pstate_change_high(dc);
1736 }
1737 
1738 /**
1739  * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1740  *
1741  * Software keepout workaround to prevent cursor update locking from stalling
1742  * out cursor updates indefinitely or from old values from being retained in
1743  * the case where the viewport changes in the same frame as the cursor.
1744  *
1745  * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1746  * too close to VUPDATE, then stall out until VUPDATE finishes.
1747  *
1748  * TODO: Optimize cursor programming to be once per frame before VUPDATE
1749  *       to avoid the need for this workaround.
1750  */
delay_cursor_until_vupdate(struct dc * dc,struct pipe_ctx * pipe_ctx)1751 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1752 {
1753 	struct dc_stream_state *stream = pipe_ctx->stream;
1754 	struct crtc_position position;
1755 	uint32_t vupdate_start, vupdate_end;
1756 	unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1757 	unsigned int us_per_line, us_vupdate;
1758 
1759 	if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1760 		return;
1761 
1762 	if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1763 		return;
1764 
1765 	dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1766 				       &vupdate_end);
1767 
1768 	dc->hwss.get_position(&pipe_ctx, 1, &position);
1769 	vpos = position.vertical_count;
1770 
1771 	/* Avoid wraparound calculation issues */
1772 	vupdate_start += stream->timing.v_total;
1773 	vupdate_end += stream->timing.v_total;
1774 	vpos += stream->timing.v_total;
1775 
1776 	if (vpos <= vupdate_start) {
1777 		/* VPOS is in VACTIVE or back porch. */
1778 		lines_to_vupdate = vupdate_start - vpos;
1779 	} else if (vpos > vupdate_end) {
1780 		/* VPOS is in the front porch. */
1781 		return;
1782 	} else {
1783 		/* VPOS is in VUPDATE. */
1784 		lines_to_vupdate = 0;
1785 	}
1786 
1787 	/* Calculate time until VUPDATE in microseconds. */
1788 	us_per_line =
1789 		stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1790 	us_to_vupdate = lines_to_vupdate * us_per_line;
1791 
1792 	/* 70 us is a conservative estimate of cursor update time*/
1793 	if (us_to_vupdate > 70)
1794 		return;
1795 
1796 	/* Stall out until the cursor update completes. */
1797 	if (vupdate_end < vupdate_start)
1798 		vupdate_end += stream->timing.v_total;
1799 	us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1800 	udelay(us_to_vupdate + us_vupdate);
1801 }
1802 
dcn10_cursor_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1803 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1804 {
1805 	/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1806 	if (!pipe || pipe->top_pipe)
1807 		return;
1808 
1809 	/* Prevent cursor lock from stalling out cursor updates. */
1810 	if (lock)
1811 		delay_cursor_until_vupdate(dc, pipe);
1812 
1813 	if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1814 		union dmub_hw_lock_flags hw_locks = { 0 };
1815 		struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1816 
1817 		hw_locks.bits.lock_cursor = 1;
1818 		inst_flags.opp_inst = pipe->stream_res.opp->inst;
1819 
1820 		dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1821 					lock,
1822 					&hw_locks,
1823 					&inst_flags);
1824 	} else
1825 		dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1826 				pipe->stream_res.opp->inst, lock);
1827 }
1828 
wait_for_reset_trigger_to_occur(struct dc_context * dc_ctx,struct timing_generator * tg)1829 static bool wait_for_reset_trigger_to_occur(
1830 	struct dc_context *dc_ctx,
1831 	struct timing_generator *tg)
1832 {
1833 	bool rc = false;
1834 
1835 	/* To avoid endless loop we wait at most
1836 	 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1837 	const uint32_t frames_to_wait_on_triggered_reset = 10;
1838 	int i;
1839 
1840 	for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1841 
1842 		if (!tg->funcs->is_counter_moving(tg)) {
1843 			DC_ERROR("TG counter is not moving!\n");
1844 			break;
1845 		}
1846 
1847 		if (tg->funcs->did_triggered_reset_occur(tg)) {
1848 			rc = true;
1849 			/* usually occurs at i=1 */
1850 			DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1851 					i);
1852 			break;
1853 		}
1854 
1855 		/* Wait for one frame. */
1856 		tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1857 		tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1858 	}
1859 
1860 	if (false == rc)
1861 		DC_ERROR("GSL: Timeout on reset trigger!\n");
1862 
1863 	return rc;
1864 }
1865 
reduceSizeAndFraction(uint64_t * numerator,uint64_t * denominator,bool checkUint32Bounary)1866 uint64_t reduceSizeAndFraction(
1867 	uint64_t *numerator,
1868 	uint64_t *denominator,
1869 	bool checkUint32Bounary)
1870 {
1871 	int i;
1872 	bool ret = checkUint32Bounary == false;
1873 	uint64_t max_int32 = 0xffffffff;
1874 	uint64_t num, denom;
1875 	static const uint16_t prime_numbers[] = {
1876 		2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
1877 		47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
1878 		107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
1879 		167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
1880 		229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
1881 		283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
1882 		359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
1883 		431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
1884 		491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
1885 		571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
1886 		641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
1887 		709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
1888 		787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
1889 		859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
1890 		941, 947, 953, 967, 971, 977, 983, 991, 997};
1891 	int count = ARRAY_SIZE(prime_numbers);
1892 
1893 	num = *numerator;
1894 	denom = *denominator;
1895 	for (i = 0; i < count; i++) {
1896 		uint32_t num_remainder, denom_remainder;
1897 		uint64_t num_result, denom_result;
1898 		if (checkUint32Bounary &&
1899 			num <= max_int32 && denom <= max_int32) {
1900 			ret = true;
1901 			break;
1902 		}
1903 		do {
1904 			num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
1905 			denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
1906 			if (num_remainder == 0 && denom_remainder == 0) {
1907 				num = num_result;
1908 				denom = denom_result;
1909 			}
1910 		} while (num_remainder == 0 && denom_remainder == 0);
1911 	}
1912 	*numerator = num;
1913 	*denominator = denom;
1914 	return ret;
1915 }
1916 
is_low_refresh_rate(struct pipe_ctx * pipe)1917 bool is_low_refresh_rate(struct pipe_ctx *pipe)
1918 {
1919 	uint32_t master_pipe_refresh_rate =
1920 		pipe->stream->timing.pix_clk_100hz * 100 /
1921 		pipe->stream->timing.h_total /
1922 		pipe->stream->timing.v_total;
1923 	return master_pipe_refresh_rate <= 30;
1924 }
1925 
get_clock_divider(struct pipe_ctx * pipe,bool account_low_refresh_rate)1926 uint8_t get_clock_divider(struct pipe_ctx *pipe, bool account_low_refresh_rate)
1927 {
1928 	uint32_t clock_divider = 1;
1929 	uint32_t numpipes = 1;
1930 
1931 	if (account_low_refresh_rate && is_low_refresh_rate(pipe))
1932 		clock_divider *= 2;
1933 
1934 	if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
1935 		clock_divider *= 2;
1936 
1937 	while (pipe->next_odm_pipe) {
1938 		pipe = pipe->next_odm_pipe;
1939 		numpipes++;
1940 	}
1941 	clock_divider *= numpipes;
1942 
1943 	return clock_divider;
1944 }
1945 
dcn10_align_pixel_clocks(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])1946 int dcn10_align_pixel_clocks(
1947 	struct dc *dc,
1948 	int group_size,
1949 	struct pipe_ctx *grouped_pipes[])
1950 {
1951 	struct dc_context *dc_ctx = dc->ctx;
1952 	int i, master = -1, embedded = -1;
1953 	struct dc_crtc_timing hw_crtc_timing[MAX_PIPES] = {0};
1954 	uint64_t phase[MAX_PIPES];
1955 	uint64_t modulo[MAX_PIPES];
1956 	unsigned int pclk;
1957 
1958 	uint32_t embedded_pix_clk_100hz;
1959 	uint16_t embedded_h_total;
1960 	uint16_t embedded_v_total;
1961 	bool clamshell_closed = false;
1962 	uint32_t dp_ref_clk_100hz =
1963 		dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
1964 
1965 	if (dc->config.vblank_alignment_dto_params &&
1966 		dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
1967 		clamshell_closed =
1968 			(dc->config.vblank_alignment_dto_params >> 63);
1969 		embedded_h_total =
1970 			(dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
1971 		embedded_v_total =
1972 			(dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
1973 		embedded_pix_clk_100hz =
1974 			dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
1975 
1976 		for (i = 0; i < group_size; i++) {
1977 			grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
1978 					grouped_pipes[i]->stream_res.tg,
1979 					&hw_crtc_timing[i]);
1980 			dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1981 				dc->res_pool->dp_clock_source,
1982 				grouped_pipes[i]->stream_res.tg->inst,
1983 				&pclk);
1984 			hw_crtc_timing[i].pix_clk_100hz = pclk;
1985 			if (dc_is_embedded_signal(
1986 					grouped_pipes[i]->stream->signal)) {
1987 				embedded = i;
1988 				master = i;
1989 				phase[i] = embedded_pix_clk_100hz*100;
1990 				modulo[i] = dp_ref_clk_100hz*100;
1991 			} else {
1992 
1993 				phase[i] = (uint64_t)embedded_pix_clk_100hz*
1994 					hw_crtc_timing[i].h_total*
1995 					hw_crtc_timing[i].v_total;
1996 				phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
1997 				modulo[i] = (uint64_t)dp_ref_clk_100hz*
1998 					embedded_h_total*
1999 					embedded_v_total;
2000 
2001 				if (reduceSizeAndFraction(&phase[i],
2002 						&modulo[i], true) == false) {
2003 					/*
2004 					 * this will help to stop reporting
2005 					 * this timing synchronizable
2006 					 */
2007 					DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2008 					grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2009 				}
2010 			}
2011 		}
2012 
2013 		for (i = 0; i < group_size; i++) {
2014 			if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2015 				dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2016 					dc->res_pool->dp_clock_source,
2017 					grouped_pipes[i]->stream_res.tg->inst,
2018 					phase[i], modulo[i]);
2019 				dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2020 					dc->res_pool->dp_clock_source,
2021 					grouped_pipes[i]->stream_res.tg->inst, &pclk);
2022 					grouped_pipes[i]->stream->timing.pix_clk_100hz =
2023 						pclk*get_clock_divider(grouped_pipes[i], false);
2024 				if (master == -1)
2025 					master = i;
2026 			}
2027 		}
2028 
2029 	}
2030 	return master;
2031 }
2032 
dcn10_enable_vblanks_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2033 void dcn10_enable_vblanks_synchronization(
2034 	struct dc *dc,
2035 	int group_index,
2036 	int group_size,
2037 	struct pipe_ctx *grouped_pipes[])
2038 {
2039 	struct dc_context *dc_ctx = dc->ctx;
2040 	struct output_pixel_processor *opp;
2041 	struct timing_generator *tg;
2042 	int i, width, height, master;
2043 
2044 	for (i = 1; i < group_size; i++) {
2045 		opp = grouped_pipes[i]->stream_res.opp;
2046 		tg = grouped_pipes[i]->stream_res.tg;
2047 		tg->funcs->get_otg_active_size(tg, &width, &height);
2048 		if (opp->funcs->opp_program_dpg_dimensions)
2049 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2050 	}
2051 
2052 	for (i = 0; i < group_size; i++) {
2053 		if (grouped_pipes[i]->stream == NULL)
2054 			continue;
2055 		grouped_pipes[i]->stream->vblank_synchronized = false;
2056 		grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2057 	}
2058 
2059 	DC_SYNC_INFO("Aligning DP DTOs\n");
2060 
2061 	master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2062 
2063 	DC_SYNC_INFO("Synchronizing VBlanks\n");
2064 
2065 	if (master >= 0) {
2066 		for (i = 0; i < group_size; i++) {
2067 			if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2068 			grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2069 				grouped_pipes[master]->stream_res.tg,
2070 				grouped_pipes[i]->stream_res.tg,
2071 				grouped_pipes[master]->stream->timing.pix_clk_100hz,
2072 				grouped_pipes[i]->stream->timing.pix_clk_100hz,
2073 				get_clock_divider(grouped_pipes[master], false),
2074 				get_clock_divider(grouped_pipes[i], false));
2075 				grouped_pipes[i]->stream->vblank_synchronized = true;
2076 		}
2077 		grouped_pipes[master]->stream->vblank_synchronized = true;
2078 		DC_SYNC_INFO("Sync complete\n");
2079 	}
2080 
2081 	for (i = 1; i < group_size; i++) {
2082 		opp = grouped_pipes[i]->stream_res.opp;
2083 		tg = grouped_pipes[i]->stream_res.tg;
2084 		tg->funcs->get_otg_active_size(tg, &width, &height);
2085 		if (opp->funcs->opp_program_dpg_dimensions)
2086 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2087 	}
2088 }
2089 
dcn10_enable_timing_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2090 void dcn10_enable_timing_synchronization(
2091 	struct dc *dc,
2092 	int group_index,
2093 	int group_size,
2094 	struct pipe_ctx *grouped_pipes[])
2095 {
2096 	struct dc_context *dc_ctx = dc->ctx;
2097 	struct output_pixel_processor *opp;
2098 	struct timing_generator *tg;
2099 	int i, width, height;
2100 
2101 	DC_SYNC_INFO("Setting up OTG reset trigger\n");
2102 
2103 	for (i = 1; i < group_size; i++) {
2104 		opp = grouped_pipes[i]->stream_res.opp;
2105 		tg = grouped_pipes[i]->stream_res.tg;
2106 		tg->funcs->get_otg_active_size(tg, &width, &height);
2107 		if (opp->funcs->opp_program_dpg_dimensions)
2108 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2109 	}
2110 
2111 	for (i = 0; i < group_size; i++) {
2112 		if (grouped_pipes[i]->stream == NULL)
2113 			continue;
2114 		grouped_pipes[i]->stream->vblank_synchronized = false;
2115 	}
2116 
2117 	for (i = 1; i < group_size; i++)
2118 		grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2119 				grouped_pipes[i]->stream_res.tg,
2120 				grouped_pipes[0]->stream_res.tg->inst);
2121 
2122 	DC_SYNC_INFO("Waiting for trigger\n");
2123 
2124 	/* Need to get only check 1 pipe for having reset as all the others are
2125 	 * synchronized. Look at last pipe programmed to reset.
2126 	 */
2127 
2128 	wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2129 	for (i = 1; i < group_size; i++)
2130 		grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2131 				grouped_pipes[i]->stream_res.tg);
2132 
2133 	for (i = 1; i < group_size; i++) {
2134 		opp = grouped_pipes[i]->stream_res.opp;
2135 		tg = grouped_pipes[i]->stream_res.tg;
2136 		tg->funcs->get_otg_active_size(tg, &width, &height);
2137 		if (opp->funcs->opp_program_dpg_dimensions)
2138 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2139 	}
2140 
2141 	DC_SYNC_INFO("Sync complete\n");
2142 }
2143 
dcn10_enable_per_frame_crtc_position_reset(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2144 void dcn10_enable_per_frame_crtc_position_reset(
2145 	struct dc *dc,
2146 	int group_size,
2147 	struct pipe_ctx *grouped_pipes[])
2148 {
2149 	struct dc_context *dc_ctx = dc->ctx;
2150 	int i;
2151 
2152 	DC_SYNC_INFO("Setting up\n");
2153 	for (i = 0; i < group_size; i++)
2154 		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2155 			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2156 					grouped_pipes[i]->stream_res.tg,
2157 					0,
2158 					&grouped_pipes[i]->stream->triggered_crtc_reset);
2159 
2160 	DC_SYNC_INFO("Waiting for trigger\n");
2161 
2162 	for (i = 0; i < group_size; i++)
2163 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2164 
2165 	DC_SYNC_INFO("Multi-display sync is complete\n");
2166 }
2167 
2168 /*static void print_rq_dlg_ttu(
2169 		struct dc *dc,
2170 		struct pipe_ctx *pipe_ctx)
2171 {
2172 	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
2173 			"\n============== DML TTU Output parameters [%d] ==============\n"
2174 			"qos_level_low_wm: %d, \n"
2175 			"qos_level_high_wm: %d, \n"
2176 			"min_ttu_vblank: %d, \n"
2177 			"qos_level_flip: %d, \n"
2178 			"refcyc_per_req_delivery_l: %d, \n"
2179 			"qos_level_fixed_l: %d, \n"
2180 			"qos_ramp_disable_l: %d, \n"
2181 			"refcyc_per_req_delivery_pre_l: %d, \n"
2182 			"refcyc_per_req_delivery_c: %d, \n"
2183 			"qos_level_fixed_c: %d, \n"
2184 			"qos_ramp_disable_c: %d, \n"
2185 			"refcyc_per_req_delivery_pre_c: %d\n"
2186 			"=============================================================\n",
2187 			pipe_ctx->pipe_idx,
2188 			pipe_ctx->ttu_regs.qos_level_low_wm,
2189 			pipe_ctx->ttu_regs.qos_level_high_wm,
2190 			pipe_ctx->ttu_regs.min_ttu_vblank,
2191 			pipe_ctx->ttu_regs.qos_level_flip,
2192 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_l,
2193 			pipe_ctx->ttu_regs.qos_level_fixed_l,
2194 			pipe_ctx->ttu_regs.qos_ramp_disable_l,
2195 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_l,
2196 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_c,
2197 			pipe_ctx->ttu_regs.qos_level_fixed_c,
2198 			pipe_ctx->ttu_regs.qos_ramp_disable_c,
2199 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
2200 			);
2201 
2202 	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
2203 			"\n============== DML DLG Output parameters [%d] ==============\n"
2204 			"refcyc_h_blank_end: %d, \n"
2205 			"dlg_vblank_end: %d, \n"
2206 			"min_dst_y_next_start: %d, \n"
2207 			"refcyc_per_htotal: %d, \n"
2208 			"refcyc_x_after_scaler: %d, \n"
2209 			"dst_y_after_scaler: %d, \n"
2210 			"dst_y_prefetch: %d, \n"
2211 			"dst_y_per_vm_vblank: %d, \n"
2212 			"dst_y_per_row_vblank: %d, \n"
2213 			"ref_freq_to_pix_freq: %d, \n"
2214 			"vratio_prefetch: %d, \n"
2215 			"refcyc_per_pte_group_vblank_l: %d, \n"
2216 			"refcyc_per_meta_chunk_vblank_l: %d, \n"
2217 			"dst_y_per_pte_row_nom_l: %d, \n"
2218 			"refcyc_per_pte_group_nom_l: %d, \n",
2219 			pipe_ctx->pipe_idx,
2220 			pipe_ctx->dlg_regs.refcyc_h_blank_end,
2221 			pipe_ctx->dlg_regs.dlg_vblank_end,
2222 			pipe_ctx->dlg_regs.min_dst_y_next_start,
2223 			pipe_ctx->dlg_regs.refcyc_per_htotal,
2224 			pipe_ctx->dlg_regs.refcyc_x_after_scaler,
2225 			pipe_ctx->dlg_regs.dst_y_after_scaler,
2226 			pipe_ctx->dlg_regs.dst_y_prefetch,
2227 			pipe_ctx->dlg_regs.dst_y_per_vm_vblank,
2228 			pipe_ctx->dlg_regs.dst_y_per_row_vblank,
2229 			pipe_ctx->dlg_regs.ref_freq_to_pix_freq,
2230 			pipe_ctx->dlg_regs.vratio_prefetch,
2231 			pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_l,
2232 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_l,
2233 			pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_l,
2234 			pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
2235 			);
2236 
2237 	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
2238 			"\ndst_y_per_meta_row_nom_l: %d, \n"
2239 			"refcyc_per_meta_chunk_nom_l: %d, \n"
2240 			"refcyc_per_line_delivery_pre_l: %d, \n"
2241 			"refcyc_per_line_delivery_l: %d, \n"
2242 			"vratio_prefetch_c: %d, \n"
2243 			"refcyc_per_pte_group_vblank_c: %d, \n"
2244 			"refcyc_per_meta_chunk_vblank_c: %d, \n"
2245 			"dst_y_per_pte_row_nom_c: %d, \n"
2246 			"refcyc_per_pte_group_nom_c: %d, \n"
2247 			"dst_y_per_meta_row_nom_c: %d, \n"
2248 			"refcyc_per_meta_chunk_nom_c: %d, \n"
2249 			"refcyc_per_line_delivery_pre_c: %d, \n"
2250 			"refcyc_per_line_delivery_c: %d \n"
2251 			"========================================================\n",
2252 			pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_l,
2253 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_l,
2254 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_l,
2255 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_l,
2256 			pipe_ctx->dlg_regs.vratio_prefetch_c,
2257 			pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_c,
2258 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_c,
2259 			pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_c,
2260 			pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_c,
2261 			pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_c,
2262 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_c,
2263 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_c,
2264 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
2265 			);
2266 
2267 	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
2268 			"\n============== DML RQ Output parameters [%d] ==============\n"
2269 			"chunk_size: %d \n"
2270 			"min_chunk_size: %d \n"
2271 			"meta_chunk_size: %d \n"
2272 			"min_meta_chunk_size: %d \n"
2273 			"dpte_group_size: %d \n"
2274 			"mpte_group_size: %d \n"
2275 			"swath_height: %d \n"
2276 			"pte_row_height_linear: %d \n"
2277 			"========================================================\n",
2278 			pipe_ctx->pipe_idx,
2279 			pipe_ctx->rq_regs.rq_regs_l.chunk_size,
2280 			pipe_ctx->rq_regs.rq_regs_l.min_chunk_size,
2281 			pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size,
2282 			pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size,
2283 			pipe_ctx->rq_regs.rq_regs_l.dpte_group_size,
2284 			pipe_ctx->rq_regs.rq_regs_l.mpte_group_size,
2285 			pipe_ctx->rq_regs.rq_regs_l.swath_height,
2286 			pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear
2287 			);
2288 }
2289 */
2290 
mmhub_read_vm_system_aperture_settings(struct dcn10_hubp * hubp1,struct vm_system_aperture_param * apt,struct dce_hwseq * hws)2291 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2292 		struct vm_system_aperture_param *apt,
2293 		struct dce_hwseq *hws)
2294 {
2295 	PHYSICAL_ADDRESS_LOC physical_page_number;
2296 	uint32_t logical_addr_low;
2297 	uint32_t logical_addr_high;
2298 
2299 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2300 			PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2301 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2302 			PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2303 
2304 	REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2305 			LOGICAL_ADDR, &logical_addr_low);
2306 
2307 	REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2308 			LOGICAL_ADDR, &logical_addr_high);
2309 
2310 	apt->sys_default.quad_part =  physical_page_number.quad_part << 12;
2311 	apt->sys_low.quad_part =  (int64_t)logical_addr_low << 18;
2312 	apt->sys_high.quad_part =  (int64_t)logical_addr_high << 18;
2313 }
2314 
2315 /* Temporary read settings, future will get values from kmd directly */
mmhub_read_vm_context0_settings(struct dcn10_hubp * hubp1,struct vm_context0_param * vm0,struct dce_hwseq * hws)2316 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2317 		struct vm_context0_param *vm0,
2318 		struct dce_hwseq *hws)
2319 {
2320 	PHYSICAL_ADDRESS_LOC fb_base;
2321 	PHYSICAL_ADDRESS_LOC fb_offset;
2322 	uint32_t fb_base_value;
2323 	uint32_t fb_offset_value;
2324 
2325 	REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2326 	REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2327 
2328 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2329 			PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2330 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2331 			PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2332 
2333 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2334 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2335 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2336 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2337 
2338 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2339 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2340 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2341 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2342 
2343 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2344 			PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2345 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2346 			PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2347 
2348 	/*
2349 	 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2350 	 * Therefore we need to do
2351 	 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2352 	 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2353 	 */
2354 	fb_base.quad_part = (uint64_t)fb_base_value << 24;
2355 	fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2356 	vm0->pte_base.quad_part += fb_base.quad_part;
2357 	vm0->pte_base.quad_part -= fb_offset.quad_part;
2358 }
2359 
2360 
dcn10_program_pte_vm(struct dce_hwseq * hws,struct hubp * hubp)2361 void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2362 {
2363 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2364 	struct vm_system_aperture_param apt = { {{ 0 } } };
2365 	struct vm_context0_param vm0 = { { { 0 } } };
2366 
2367 	mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2368 	mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2369 
2370 	hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2371 	hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2372 }
2373 
dcn10_enable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2374 static void dcn10_enable_plane(
2375 	struct dc *dc,
2376 	struct pipe_ctx *pipe_ctx,
2377 	struct dc_state *context)
2378 {
2379 	struct dce_hwseq *hws = dc->hwseq;
2380 
2381 	if (dc->debug.sanity_checks) {
2382 		hws->funcs.verify_allow_pstate_change_high(dc);
2383 	}
2384 
2385 	undo_DEGVIDCN10_253_wa(dc);
2386 
2387 	power_on_plane(dc->hwseq,
2388 		pipe_ctx->plane_res.hubp->inst);
2389 
2390 	/* enable DCFCLK current DCHUB */
2391 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2392 
2393 	/* make sure OPP_PIPE_CLOCK_EN = 1 */
2394 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2395 			pipe_ctx->stream_res.opp,
2396 			true);
2397 
2398 /* TODO: enable/disable in dm as per update type.
2399 	if (plane_state) {
2400 		DC_LOG_DC(dc->ctx->logger,
2401 				"Pipe:%d 0x%x: addr hi:0x%x, "
2402 				"addr low:0x%x, "
2403 				"src: %d, %d, %d,"
2404 				" %d; dst: %d, %d, %d, %d;\n",
2405 				pipe_ctx->pipe_idx,
2406 				plane_state,
2407 				plane_state->address.grph.addr.high_part,
2408 				plane_state->address.grph.addr.low_part,
2409 				plane_state->src_rect.x,
2410 				plane_state->src_rect.y,
2411 				plane_state->src_rect.width,
2412 				plane_state->src_rect.height,
2413 				plane_state->dst_rect.x,
2414 				plane_state->dst_rect.y,
2415 				plane_state->dst_rect.width,
2416 				plane_state->dst_rect.height);
2417 
2418 		DC_LOG_DC(dc->ctx->logger,
2419 				"Pipe %d: width, height, x, y         format:%d\n"
2420 				"viewport:%d, %d, %d, %d\n"
2421 				"recout:  %d, %d, %d, %d\n",
2422 				pipe_ctx->pipe_idx,
2423 				plane_state->format,
2424 				pipe_ctx->plane_res.scl_data.viewport.width,
2425 				pipe_ctx->plane_res.scl_data.viewport.height,
2426 				pipe_ctx->plane_res.scl_data.viewport.x,
2427 				pipe_ctx->plane_res.scl_data.viewport.y,
2428 				pipe_ctx->plane_res.scl_data.recout.width,
2429 				pipe_ctx->plane_res.scl_data.recout.height,
2430 				pipe_ctx->plane_res.scl_data.recout.x,
2431 				pipe_ctx->plane_res.scl_data.recout.y);
2432 		print_rq_dlg_ttu(dc, pipe_ctx);
2433 	}
2434 */
2435 	if (dc->config.gpu_vm_support)
2436 		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2437 
2438 	if (dc->debug.sanity_checks) {
2439 		hws->funcs.verify_allow_pstate_change_high(dc);
2440 	}
2441 
2442 	if (!pipe_ctx->top_pipe
2443 		&& pipe_ctx->plane_state
2444 		&& pipe_ctx->plane_state->flip_int_enabled
2445 		&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2446 			pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2447 
2448 }
2449 
dcn10_program_gamut_remap(struct pipe_ctx * pipe_ctx)2450 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2451 {
2452 	int i = 0;
2453 	struct dpp_grph_csc_adjustment adjust;
2454 	memset(&adjust, 0, sizeof(adjust));
2455 	adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2456 
2457 
2458 	if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2459 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2460 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2461 			adjust.temperature_matrix[i] =
2462 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2463 	} else if (pipe_ctx->plane_state &&
2464 		   pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2465 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2466 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2467 			adjust.temperature_matrix[i] =
2468 				pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2469 	}
2470 
2471 	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2472 }
2473 
2474 
dcn10_is_rear_mpo_fix_required(struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace)2475 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2476 {
2477 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2478 		if (pipe_ctx->top_pipe) {
2479 			struct pipe_ctx *top = pipe_ctx->top_pipe;
2480 
2481 			while (top->top_pipe)
2482 				top = top->top_pipe; // Traverse to top pipe_ctx
2483 			if (top->plane_state && top->plane_state->layer_index == 0)
2484 				return true; // Front MPO plane not hidden
2485 		}
2486 	}
2487 	return false;
2488 }
2489 
dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx * pipe_ctx,uint16_t * matrix)2490 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2491 {
2492 	// Override rear plane RGB bias to fix MPO brightness
2493 	uint16_t rgb_bias = matrix[3];
2494 
2495 	matrix[3] = 0;
2496 	matrix[7] = 0;
2497 	matrix[11] = 0;
2498 	pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2499 	matrix[3] = rgb_bias;
2500 	matrix[7] = rgb_bias;
2501 	matrix[11] = rgb_bias;
2502 }
2503 
dcn10_program_output_csc(struct dc * dc,struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace,uint16_t * matrix,int opp_id)2504 void dcn10_program_output_csc(struct dc *dc,
2505 		struct pipe_ctx *pipe_ctx,
2506 		enum dc_color_space colorspace,
2507 		uint16_t *matrix,
2508 		int opp_id)
2509 {
2510 	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2511 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2512 
2513 			/* MPO is broken with RGB colorspaces when OCSC matrix
2514 			 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2515 			 * Blending adds offsets from front + rear to rear plane
2516 			 *
2517 			 * Fix is to set RGB bias to 0 on rear plane, top plane
2518 			 * black value pixels add offset instead of rear + front
2519 			 */
2520 
2521 			int16_t rgb_bias = matrix[3];
2522 			// matrix[3/7/11] are all the same offset value
2523 
2524 			if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2525 				dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2526 			} else {
2527 				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2528 			}
2529 		}
2530 	} else {
2531 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2532 			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2533 	}
2534 }
2535 
dcn10_get_surface_visual_confirm_color(const struct pipe_ctx * pipe_ctx,struct tg_color * color)2536 void dcn10_get_surface_visual_confirm_color(
2537 		const struct pipe_ctx *pipe_ctx,
2538 		struct tg_color *color)
2539 {
2540 	uint32_t color_value = MAX_TG_COLOR_VALUE;
2541 
2542 	switch (pipe_ctx->plane_res.scl_data.format) {
2543 	case PIXEL_FORMAT_ARGB8888:
2544 		/* set border color to red */
2545 		color->color_r_cr = color_value;
2546 		break;
2547 
2548 	case PIXEL_FORMAT_ARGB2101010:
2549 		/* set border color to blue */
2550 		color->color_b_cb = color_value;
2551 		break;
2552 	case PIXEL_FORMAT_420BPP8:
2553 		/* set border color to green */
2554 		color->color_g_y = color_value;
2555 		break;
2556 	case PIXEL_FORMAT_420BPP10:
2557 		/* set border color to yellow */
2558 		color->color_g_y = color_value;
2559 		color->color_r_cr = color_value;
2560 		break;
2561 	case PIXEL_FORMAT_FP16:
2562 		/* set border color to white */
2563 		color->color_r_cr = color_value;
2564 		color->color_b_cb = color_value;
2565 		color->color_g_y = color_value;
2566 		break;
2567 	default:
2568 		break;
2569 	}
2570 }
2571 
dcn10_get_hdr_visual_confirm_color(struct pipe_ctx * pipe_ctx,struct tg_color * color)2572 void dcn10_get_hdr_visual_confirm_color(
2573 		struct pipe_ctx *pipe_ctx,
2574 		struct tg_color *color)
2575 {
2576 	uint32_t color_value = MAX_TG_COLOR_VALUE;
2577 
2578 	// Determine the overscan color based on the top-most (desktop) plane's context
2579 	struct pipe_ctx *top_pipe_ctx  = pipe_ctx;
2580 
2581 	while (top_pipe_ctx->top_pipe != NULL)
2582 		top_pipe_ctx = top_pipe_ctx->top_pipe;
2583 
2584 	switch (top_pipe_ctx->plane_res.scl_data.format) {
2585 	case PIXEL_FORMAT_ARGB2101010:
2586 		if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2587 			/* HDR10, ARGB2101010 - set border color to red */
2588 			color->color_r_cr = color_value;
2589 		} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
2590 			/* FreeSync 2 ARGB2101010 - set border color to pink */
2591 			color->color_r_cr = color_value;
2592 			color->color_b_cb = color_value;
2593 		}
2594 		break;
2595 	case PIXEL_FORMAT_FP16:
2596 		if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2597 			/* HDR10, FP16 - set border color to blue */
2598 			color->color_b_cb = color_value;
2599 		} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
2600 			/* FreeSync 2 HDR - set border color to green */
2601 			color->color_g_y = color_value;
2602 		}
2603 		break;
2604 	default:
2605 		/* SDR - set border color to Gray */
2606 		color->color_r_cr = color_value/2;
2607 		color->color_b_cb = color_value/2;
2608 		color->color_g_y = color_value/2;
2609 		break;
2610 	}
2611 }
2612 
dcn10_update_dpp(struct dpp * dpp,struct dc_plane_state * plane_state)2613 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2614 {
2615 	struct dc_bias_and_scale bns_params = {0};
2616 
2617 	// program the input csc
2618 	dpp->funcs->dpp_setup(dpp,
2619 			plane_state->format,
2620 			EXPANSION_MODE_ZERO,
2621 			plane_state->input_csc_color_matrix,
2622 			plane_state->color_space,
2623 			NULL);
2624 
2625 	//set scale and bias registers
2626 	build_prescale_params(&bns_params, plane_state);
2627 	if (dpp->funcs->dpp_program_bias_and_scale)
2628 		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2629 }
2630 
dcn10_update_mpcc(struct dc * dc,struct pipe_ctx * pipe_ctx)2631 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2632 {
2633 	struct dce_hwseq *hws = dc->hwseq;
2634 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2635 	struct mpcc_blnd_cfg blnd_cfg = {{0}};
2636 	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2637 	int mpcc_id;
2638 	struct mpcc *new_mpcc;
2639 	struct mpc *mpc = dc->res_pool->mpc;
2640 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2641 
2642 	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
2643 		hws->funcs.get_hdr_visual_confirm_color(
2644 				pipe_ctx, &blnd_cfg.black_color);
2645 	} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
2646 		hws->funcs.get_surface_visual_confirm_color(
2647 				pipe_ctx, &blnd_cfg.black_color);
2648 	} else {
2649 		color_space_to_black_color(
2650 				dc, pipe_ctx->stream->output_color_space,
2651 				&blnd_cfg.black_color);
2652 	}
2653 
2654 	if (per_pixel_alpha)
2655 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2656 	else
2657 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2658 
2659 	blnd_cfg.overlap_only = false;
2660 	blnd_cfg.global_gain = 0xff;
2661 
2662 	if (pipe_ctx->plane_state->global_alpha)
2663 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2664 	else
2665 		blnd_cfg.global_alpha = 0xff;
2666 
2667 	/* DCN1.0 has output CM before MPC which seems to screw with
2668 	 * pre-multiplied alpha.
2669 	 */
2670 	blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
2671 			pipe_ctx->stream->output_color_space)
2672 					&& per_pixel_alpha;
2673 
2674 
2675 	/*
2676 	 * TODO: remove hack
2677 	 * Note: currently there is a bug in init_hw such that
2678 	 * on resume from hibernate, BIOS sets up MPCC0, and
2679 	 * we do mpcc_remove but the mpcc cannot go to idle
2680 	 * after remove. This cause us to pick mpcc1 here,
2681 	 * which causes a pstate hang for yet unknown reason.
2682 	 */
2683 	mpcc_id = hubp->inst;
2684 
2685 	/* If there is no full update, don't need to touch MPC tree*/
2686 	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2687 		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2688 		return;
2689 	}
2690 
2691 	/* check if this MPCC is already being used */
2692 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2693 	/* remove MPCC if being used */
2694 	if (new_mpcc != NULL)
2695 		mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2696 	else
2697 		if (dc->debug.sanity_checks)
2698 			mpc->funcs->assert_mpcc_idle_before_connect(
2699 					dc->res_pool->mpc, mpcc_id);
2700 
2701 	/* Call MPC to insert new plane */
2702 	new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2703 			mpc_tree_params,
2704 			&blnd_cfg,
2705 			NULL,
2706 			NULL,
2707 			hubp->inst,
2708 			mpcc_id);
2709 
2710 	ASSERT(new_mpcc != NULL);
2711 
2712 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2713 	hubp->mpcc_id = mpcc_id;
2714 }
2715 
update_scaler(struct pipe_ctx * pipe_ctx)2716 static void update_scaler(struct pipe_ctx *pipe_ctx)
2717 {
2718 	bool per_pixel_alpha =
2719 			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2720 
2721 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2722 	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
2723 	/* scaler configuration */
2724 	pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2725 			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2726 }
2727 
dcn10_update_dchubp_dpp(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2728 static void dcn10_update_dchubp_dpp(
2729 	struct dc *dc,
2730 	struct pipe_ctx *pipe_ctx,
2731 	struct dc_state *context)
2732 {
2733 	struct dce_hwseq *hws = dc->hwseq;
2734 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2735 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2736 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2737 	struct plane_size size = plane_state->plane_size;
2738 	unsigned int compat_level = 0;
2739 	bool should_divided_by_2 = false;
2740 
2741 	/* depends on DML calculation, DPP clock value may change dynamically */
2742 	/* If request max dpp clk is lower than current dispclk, no need to
2743 	 * divided by 2
2744 	 */
2745 	if (plane_state->update_flags.bits.full_update) {
2746 
2747 		/* new calculated dispclk, dppclk are stored in
2748 		 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2749 		 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2750 		 * dcn_validate_bandwidth compute new dispclk, dppclk.
2751 		 * dispclk will put in use after optimize_bandwidth when
2752 		 * ramp_up_dispclk_with_dpp is called.
2753 		 * there are two places for dppclk be put in use. One location
2754 		 * is the same as the location as dispclk. Another is within
2755 		 * update_dchubp_dpp which happens between pre_bandwidth and
2756 		 * optimize_bandwidth.
2757 		 * dppclk updated within update_dchubp_dpp will cause new
2758 		 * clock values of dispclk and dppclk not be in use at the same
2759 		 * time. when clocks are decreased, this may cause dppclk is
2760 		 * lower than previous configuration and let pipe stuck.
2761 		 * for example, eDP + external dp,  change resolution of DP from
2762 		 * 1920x1080x144hz to 1280x960x60hz.
2763 		 * before change: dispclk = 337889 dppclk = 337889
2764 		 * change mode, dcn_validate_bandwidth calculate
2765 		 *                dispclk = 143122 dppclk = 143122
2766 		 * update_dchubp_dpp be executed before dispclk be updated,
2767 		 * dispclk = 337889, but dppclk use new value dispclk /2 =
2768 		 * 168944. this will cause pipe pstate warning issue.
2769 		 * solution: between pre_bandwidth and optimize_bandwidth, while
2770 		 * dispclk is going to be decreased, keep dppclk = dispclk
2771 		 **/
2772 		if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2773 				dc->clk_mgr->clks.dispclk_khz)
2774 			should_divided_by_2 = false;
2775 		else
2776 			should_divided_by_2 =
2777 					context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2778 					dc->clk_mgr->clks.dispclk_khz / 2;
2779 
2780 		dpp->funcs->dpp_dppclk_control(
2781 				dpp,
2782 				should_divided_by_2,
2783 				true);
2784 
2785 		if (dc->res_pool->dccg)
2786 			dc->res_pool->dccg->funcs->update_dpp_dto(
2787 					dc->res_pool->dccg,
2788 					dpp->inst,
2789 					pipe_ctx->plane_res.bw.dppclk_khz);
2790 		else
2791 			dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2792 						dc->clk_mgr->clks.dispclk_khz / 2 :
2793 							dc->clk_mgr->clks.dispclk_khz;
2794 	}
2795 
2796 	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2797 	 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2798 	 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2799 	 */
2800 	if (plane_state->update_flags.bits.full_update) {
2801 		hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2802 
2803 		hubp->funcs->hubp_setup(
2804 			hubp,
2805 			&pipe_ctx->dlg_regs,
2806 			&pipe_ctx->ttu_regs,
2807 			&pipe_ctx->rq_regs,
2808 			&pipe_ctx->pipe_dlg_param);
2809 		hubp->funcs->hubp_setup_interdependent(
2810 			hubp,
2811 			&pipe_ctx->dlg_regs,
2812 			&pipe_ctx->ttu_regs);
2813 	}
2814 
2815 	size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2816 
2817 	if (plane_state->update_flags.bits.full_update ||
2818 		plane_state->update_flags.bits.bpp_change)
2819 		dcn10_update_dpp(dpp, plane_state);
2820 
2821 	if (plane_state->update_flags.bits.full_update ||
2822 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2823 		plane_state->update_flags.bits.global_alpha_change)
2824 		hws->funcs.update_mpcc(dc, pipe_ctx);
2825 
2826 	if (plane_state->update_flags.bits.full_update ||
2827 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2828 		plane_state->update_flags.bits.global_alpha_change ||
2829 		plane_state->update_flags.bits.scaling_change ||
2830 		plane_state->update_flags.bits.position_change) {
2831 		update_scaler(pipe_ctx);
2832 	}
2833 
2834 	if (plane_state->update_flags.bits.full_update ||
2835 		plane_state->update_flags.bits.scaling_change ||
2836 		plane_state->update_flags.bits.position_change) {
2837 		hubp->funcs->mem_program_viewport(
2838 			hubp,
2839 			&pipe_ctx->plane_res.scl_data.viewport,
2840 			&pipe_ctx->plane_res.scl_data.viewport_c);
2841 	}
2842 
2843 	if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2844 		dc->hwss.set_cursor_position(pipe_ctx);
2845 		dc->hwss.set_cursor_attribute(pipe_ctx);
2846 
2847 		if (dc->hwss.set_cursor_sdr_white_level)
2848 			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2849 	}
2850 
2851 	if (plane_state->update_flags.bits.full_update) {
2852 		/*gamut remap*/
2853 		dc->hwss.program_gamut_remap(pipe_ctx);
2854 
2855 		dc->hwss.program_output_csc(dc,
2856 				pipe_ctx,
2857 				pipe_ctx->stream->output_color_space,
2858 				pipe_ctx->stream->csc_color_matrix.matrix,
2859 				pipe_ctx->stream_res.opp->inst);
2860 	}
2861 
2862 	if (plane_state->update_flags.bits.full_update ||
2863 		plane_state->update_flags.bits.pixel_format_change ||
2864 		plane_state->update_flags.bits.horizontal_mirror_change ||
2865 		plane_state->update_flags.bits.rotation_change ||
2866 		plane_state->update_flags.bits.swizzle_change ||
2867 		plane_state->update_flags.bits.dcc_change ||
2868 		plane_state->update_flags.bits.bpp_change ||
2869 		plane_state->update_flags.bits.scaling_change ||
2870 		plane_state->update_flags.bits.plane_size_change) {
2871 		hubp->funcs->hubp_program_surface_config(
2872 			hubp,
2873 			plane_state->format,
2874 			&plane_state->tiling_info,
2875 			&size,
2876 			plane_state->rotation,
2877 			&plane_state->dcc,
2878 			plane_state->horizontal_mirror,
2879 			compat_level);
2880 	}
2881 
2882 	hubp->power_gated = false;
2883 
2884 	hws->funcs.update_plane_addr(dc, pipe_ctx);
2885 
2886 	if (is_pipe_tree_visible(pipe_ctx))
2887 		hubp->funcs->set_blank(hubp, false);
2888 }
2889 
dcn10_blank_pixel_data(struct dc * dc,struct pipe_ctx * pipe_ctx,bool blank)2890 void dcn10_blank_pixel_data(
2891 		struct dc *dc,
2892 		struct pipe_ctx *pipe_ctx,
2893 		bool blank)
2894 {
2895 	enum dc_color_space color_space;
2896 	struct tg_color black_color = {0};
2897 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
2898 	struct dc_stream_state *stream = pipe_ctx->stream;
2899 
2900 	/* program otg blank color */
2901 	color_space = stream->output_color_space;
2902 	color_space_to_black_color(dc, color_space, &black_color);
2903 
2904 	/*
2905 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
2906 	 * alternate between Cb and Cr, so both channels need the pixel
2907 	 * value for Y
2908 	 */
2909 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2910 		black_color.color_r_cr = black_color.color_g_y;
2911 
2912 
2913 	if (stream_res->tg->funcs->set_blank_color)
2914 		stream_res->tg->funcs->set_blank_color(
2915 				stream_res->tg,
2916 				&black_color);
2917 
2918 	if (!blank) {
2919 		if (stream_res->tg->funcs->set_blank)
2920 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2921 		if (stream_res->abm) {
2922 			dc->hwss.set_pipe(pipe_ctx);
2923 			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2924 		}
2925 	} else if (blank) {
2926 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
2927 		if (stream_res->tg->funcs->set_blank) {
2928 			stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2929 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2930 		}
2931 	}
2932 }
2933 
dcn10_set_hdr_multiplier(struct pipe_ctx * pipe_ctx)2934 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2935 {
2936 	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2937 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2938 	struct custom_float_format fmt;
2939 
2940 	fmt.exponenta_bits = 6;
2941 	fmt.mantissa_bits = 12;
2942 	fmt.sign = true;
2943 
2944 
2945 	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2946 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2947 
2948 	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2949 			pipe_ctx->plane_res.dpp, hw_mult);
2950 }
2951 
dcn10_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2952 void dcn10_program_pipe(
2953 		struct dc *dc,
2954 		struct pipe_ctx *pipe_ctx,
2955 		struct dc_state *context)
2956 {
2957 	struct dce_hwseq *hws = dc->hwseq;
2958 
2959 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2960 		dcn10_enable_plane(dc, pipe_ctx, context);
2961 
2962 	dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2963 
2964 	hws->funcs.set_hdr_multiplier(pipe_ctx);
2965 
2966 	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2967 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2968 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
2969 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2970 
2971 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2972 	 * only do gamma programming for full update.
2973 	 * TODO: This can be further optimized/cleaned up
2974 	 * Always call this for now since it does memcmp inside before
2975 	 * doing heavy calculation and programming
2976 	 */
2977 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2978 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2979 }
2980 
dcn10_program_all_pipe_in_tree(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2981 static void dcn10_program_all_pipe_in_tree(
2982 		struct dc *dc,
2983 		struct pipe_ctx *pipe_ctx,
2984 		struct dc_state *context)
2985 {
2986 	struct dce_hwseq *hws = dc->hwseq;
2987 
2988 	if (pipe_ctx->top_pipe == NULL) {
2989 		bool blank = !is_pipe_tree_visible(pipe_ctx);
2990 
2991 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
2992 				pipe_ctx->stream_res.tg,
2993 				pipe_ctx->pipe_dlg_param.vready_offset,
2994 				pipe_ctx->pipe_dlg_param.vstartup_start,
2995 				pipe_ctx->pipe_dlg_param.vupdate_offset,
2996 				pipe_ctx->pipe_dlg_param.vupdate_width);
2997 
2998 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2999 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
3000 
3001 		if (hws->funcs.setup_vupdate_interrupt)
3002 			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
3003 
3004 		hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
3005 	}
3006 
3007 	if (pipe_ctx->plane_state != NULL)
3008 		hws->funcs.program_pipe(dc, pipe_ctx, context);
3009 
3010 	if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
3011 		dcn10_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
3012 }
3013 
dcn10_find_top_pipe_for_stream(struct dc * dc,struct dc_state * context,const struct dc_stream_state * stream)3014 static struct pipe_ctx *dcn10_find_top_pipe_for_stream(
3015 		struct dc *dc,
3016 		struct dc_state *context,
3017 		const struct dc_stream_state *stream)
3018 {
3019 	int i;
3020 
3021 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3022 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3023 		struct pipe_ctx *old_pipe_ctx =
3024 				&dc->current_state->res_ctx.pipe_ctx[i];
3025 
3026 		if (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state)
3027 			continue;
3028 
3029 		if (pipe_ctx->stream != stream)
3030 			continue;
3031 
3032 		if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
3033 			return pipe_ctx;
3034 	}
3035 	return NULL;
3036 }
3037 
dcn10_wait_for_pending_cleared(struct dc * dc,struct dc_state * context)3038 void dcn10_wait_for_pending_cleared(struct dc *dc,
3039 		struct dc_state *context)
3040 {
3041 		struct pipe_ctx *pipe_ctx;
3042 		struct timing_generator *tg;
3043 		int i;
3044 
3045 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
3046 			pipe_ctx = &context->res_ctx.pipe_ctx[i];
3047 			tg = pipe_ctx->stream_res.tg;
3048 
3049 			/*
3050 			 * Only wait for top pipe's tg penindg bit
3051 			 * Also skip if pipe is disabled.
3052 			 */
3053 			if (pipe_ctx->top_pipe ||
3054 			    !pipe_ctx->stream || !pipe_ctx->plane_state ||
3055 			    !tg->funcs->is_tg_enabled(tg))
3056 				continue;
3057 
3058 			/*
3059 			 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
3060 			 * For some reason waiting for OTG_UPDATE_PENDING cleared
3061 			 * seems to not trigger the update right away, and if we
3062 			 * lock again before VUPDATE then we don't get a separated
3063 			 * operation.
3064 			 */
3065 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
3066 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
3067 		}
3068 }
3069 
dcn10_apply_ctx_for_surface(struct dc * dc,const struct dc_stream_state * stream,int num_planes,struct dc_state * context)3070 void dcn10_apply_ctx_for_surface(
3071 		struct dc *dc,
3072 		const struct dc_stream_state *stream,
3073 		int num_planes,
3074 		struct dc_state *context)
3075 {
3076 	struct dce_hwseq *hws = dc->hwseq;
3077 	int i;
3078 	struct timing_generator *tg;
3079 	uint32_t underflow_check_delay_us;
3080 	bool interdependent_update = false;
3081 	struct pipe_ctx *top_pipe_to_program =
3082 			dcn10_find_top_pipe_for_stream(dc, context, stream);
3083 	DC_LOGGER_INIT(dc->ctx->logger);
3084 
3085 	// Clear pipe_ctx flag
3086 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3087 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3088 		pipe_ctx->update_flags.raw = 0;
3089 	}
3090 
3091 	if (!top_pipe_to_program)
3092 		return;
3093 
3094 	tg = top_pipe_to_program->stream_res.tg;
3095 
3096 	interdependent_update = top_pipe_to_program->plane_state &&
3097 		top_pipe_to_program->plane_state->update_flags.bits.full_update;
3098 
3099 	underflow_check_delay_us = dc->debug.underflow_assert_delay_us;
3100 
3101 	if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
3102 		ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
3103 
3104 	if (underflow_check_delay_us != 0xFFFFFFFF)
3105 		udelay(underflow_check_delay_us);
3106 
3107 	if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
3108 		ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
3109 
3110 	if (num_planes == 0) {
3111 		/* OTG blank before remove all front end */
3112 		hws->funcs.blank_pixel_data(dc, top_pipe_to_program, true);
3113 	}
3114 
3115 	/* Disconnect unused mpcc */
3116 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3117 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3118 		struct pipe_ctx *old_pipe_ctx =
3119 				&dc->current_state->res_ctx.pipe_ctx[i];
3120 
3121 		if ((!pipe_ctx->plane_state ||
3122 		     pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
3123 		    old_pipe_ctx->plane_state &&
3124 		    old_pipe_ctx->stream_res.tg == tg) {
3125 
3126 			hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx);
3127 			pipe_ctx->update_flags.bits.disable = 1;
3128 
3129 			DC_LOG_DC("Reset mpcc for pipe %d\n",
3130 					old_pipe_ctx->pipe_idx);
3131 		}
3132 	}
3133 
3134 	if (num_planes > 0)
3135 		dcn10_program_all_pipe_in_tree(dc, top_pipe_to_program, context);
3136 
3137 	/* Program secondary blending tree and writeback pipes */
3138 	if ((stream->num_wb_info > 0) && (hws->funcs.program_all_writeback_pipes_in_tree))
3139 		hws->funcs.program_all_writeback_pipes_in_tree(dc, stream, context);
3140 	if (interdependent_update)
3141 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
3142 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3143 			/* Skip inactive pipes and ones already updated */
3144 			if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
3145 			    !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
3146 				continue;
3147 
3148 			pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
3149 				pipe_ctx->plane_res.hubp,
3150 				&pipe_ctx->dlg_regs,
3151 				&pipe_ctx->ttu_regs);
3152 		}
3153 }
3154 
dcn10_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)3155 void dcn10_post_unlock_program_front_end(
3156 		struct dc *dc,
3157 		struct dc_state *context)
3158 {
3159 	int i;
3160 
3161 	DC_LOGGER_INIT(dc->ctx->logger);
3162 
3163 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3164 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3165 
3166 		if (!pipe_ctx->top_pipe &&
3167 			!pipe_ctx->prev_odm_pipe &&
3168 			pipe_ctx->stream) {
3169 			struct timing_generator *tg = pipe_ctx->stream_res.tg;
3170 
3171 			if (context->stream_status[i].plane_count == 0)
3172 				false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3173 		}
3174 	}
3175 
3176 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3177 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3178 			dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
3179 
3180 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3181 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3182 			dc->hwss.optimize_bandwidth(dc, context);
3183 			break;
3184 		}
3185 
3186 	if (dc->hwseq->wa.DEGVIDCN10_254)
3187 		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3188 }
3189 
dcn10_stereo_hw_frame_pack_wa(struct dc * dc,struct dc_state * context)3190 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3191 {
3192 	uint8_t i;
3193 
3194 	for (i = 0; i < context->stream_count; i++) {
3195 		if (context->streams[i]->timing.timing_3d_format
3196 				== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3197 			/*
3198 			 * Disable stutter
3199 			 */
3200 			hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3201 			break;
3202 		}
3203 	}
3204 }
3205 
dcn10_prepare_bandwidth(struct dc * dc,struct dc_state * context)3206 void dcn10_prepare_bandwidth(
3207 		struct dc *dc,
3208 		struct dc_state *context)
3209 {
3210 	struct dce_hwseq *hws = dc->hwseq;
3211 	struct hubbub *hubbub = dc->res_pool->hubbub;
3212 
3213 	if (dc->debug.sanity_checks)
3214 		hws->funcs.verify_allow_pstate_change_high(dc);
3215 
3216 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3217 		if (context->stream_count == 0)
3218 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3219 
3220 		dc->clk_mgr->funcs->update_clocks(
3221 				dc->clk_mgr,
3222 				context,
3223 				false);
3224 	}
3225 
3226 	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3227 			&context->bw_ctx.bw.dcn.watermarks,
3228 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3229 			true);
3230 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3231 
3232 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
3233 		dcn_bw_notify_pplib_of_wm_ranges(dc);
3234 
3235 	if (dc->debug.sanity_checks)
3236 		hws->funcs.verify_allow_pstate_change_high(dc);
3237 }
3238 
dcn10_optimize_bandwidth(struct dc * dc,struct dc_state * context)3239 void dcn10_optimize_bandwidth(
3240 		struct dc *dc,
3241 		struct dc_state *context)
3242 {
3243 	struct dce_hwseq *hws = dc->hwseq;
3244 	struct hubbub *hubbub = dc->res_pool->hubbub;
3245 
3246 	if (dc->debug.sanity_checks)
3247 		hws->funcs.verify_allow_pstate_change_high(dc);
3248 
3249 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3250 		if (context->stream_count == 0)
3251 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3252 
3253 		dc->clk_mgr->funcs->update_clocks(
3254 				dc->clk_mgr,
3255 				context,
3256 				true);
3257 	}
3258 
3259 	hubbub->funcs->program_watermarks(hubbub,
3260 			&context->bw_ctx.bw.dcn.watermarks,
3261 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3262 			true);
3263 
3264 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3265 
3266 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
3267 		dcn_bw_notify_pplib_of_wm_ranges(dc);
3268 
3269 	if (dc->debug.sanity_checks)
3270 		hws->funcs.verify_allow_pstate_change_high(dc);
3271 }
3272 
dcn10_set_drr(struct pipe_ctx ** pipe_ctx,int num_pipes,struct dc_crtc_timing_adjust adjust)3273 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3274 		int num_pipes, struct dc_crtc_timing_adjust adjust)
3275 {
3276 	int i = 0;
3277 	struct drr_params params = {0};
3278 	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3279 	unsigned int event_triggers = 0x800;
3280 	// Note DRR trigger events are generated regardless of whether num frames met.
3281 	unsigned int num_frames = 2;
3282 
3283 	params.vertical_total_max = adjust.v_total_max;
3284 	params.vertical_total_min = adjust.v_total_min;
3285 	params.vertical_total_mid = adjust.v_total_mid;
3286 	params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3287 	/* TODO: If multiple pipes are to be supported, you need
3288 	 * some GSL stuff. Static screen triggers may be programmed differently
3289 	 * as well.
3290 	 */
3291 	for (i = 0; i < num_pipes; i++) {
3292 		pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3293 			pipe_ctx[i]->stream_res.tg, &params);
3294 		if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3295 			pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3296 					pipe_ctx[i]->stream_res.tg,
3297 					event_triggers, num_frames);
3298 	}
3299 }
3300 
dcn10_get_position(struct pipe_ctx ** pipe_ctx,int num_pipes,struct crtc_position * position)3301 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3302 		int num_pipes,
3303 		struct crtc_position *position)
3304 {
3305 	int i = 0;
3306 
3307 	/* TODO: handle pipes > 1
3308 	 */
3309 	for (i = 0; i < num_pipes; i++)
3310 		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3311 }
3312 
dcn10_set_static_screen_control(struct pipe_ctx ** pipe_ctx,int num_pipes,const struct dc_static_screen_params * params)3313 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3314 		int num_pipes, const struct dc_static_screen_params *params)
3315 {
3316 	unsigned int i;
3317 	unsigned int triggers = 0;
3318 
3319 	if (params->triggers.surface_update)
3320 		triggers |= 0x80;
3321 	if (params->triggers.cursor_update)
3322 		triggers |= 0x2;
3323 	if (params->triggers.force_trigger)
3324 		triggers |= 0x1;
3325 
3326 	for (i = 0; i < num_pipes; i++)
3327 		pipe_ctx[i]->stream_res.tg->funcs->
3328 			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3329 					triggers, params->num_frames);
3330 }
3331 
dcn10_config_stereo_parameters(struct dc_stream_state * stream,struct crtc_stereo_flags * flags)3332 static void dcn10_config_stereo_parameters(
3333 		struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3334 {
3335 	enum view_3d_format view_format = stream->view_format;
3336 	enum dc_timing_3d_format timing_3d_format =\
3337 			stream->timing.timing_3d_format;
3338 	bool non_stereo_timing = false;
3339 
3340 	if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3341 		timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3342 		timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3343 		non_stereo_timing = true;
3344 
3345 	if (non_stereo_timing == false &&
3346 		view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3347 
3348 		flags->PROGRAM_STEREO         = 1;
3349 		flags->PROGRAM_POLARITY       = 1;
3350 		if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3351 			timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3352 			timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3353 			enum display_dongle_type dongle = \
3354 					stream->link->ddc->dongle_type;
3355 			if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3356 				dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3357 				dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3358 				flags->DISABLE_STEREO_DP_SYNC = 1;
3359 		}
3360 		flags->RIGHT_EYE_POLARITY =\
3361 				stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3362 		if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3363 			flags->FRAME_PACKED = 1;
3364 	}
3365 
3366 	return;
3367 }
3368 
dcn10_setup_stereo(struct pipe_ctx * pipe_ctx,struct dc * dc)3369 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3370 {
3371 	struct crtc_stereo_flags flags = { 0 };
3372 	struct dc_stream_state *stream = pipe_ctx->stream;
3373 
3374 	dcn10_config_stereo_parameters(stream, &flags);
3375 
3376 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3377 		if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3378 			dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3379 	} else {
3380 		dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3381 	}
3382 
3383 	pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3384 		pipe_ctx->stream_res.opp,
3385 		flags.PROGRAM_STEREO == 1,
3386 		&stream->timing);
3387 
3388 	pipe_ctx->stream_res.tg->funcs->program_stereo(
3389 		pipe_ctx->stream_res.tg,
3390 		&stream->timing,
3391 		&flags);
3392 
3393 	return;
3394 }
3395 
get_hubp_by_inst(struct resource_pool * res_pool,int mpcc_inst)3396 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3397 {
3398 	int i;
3399 
3400 	for (i = 0; i < res_pool->pipe_count; i++) {
3401 		if (res_pool->hubps[i]->inst == mpcc_inst)
3402 			return res_pool->hubps[i];
3403 	}
3404 	ASSERT(false);
3405 	return NULL;
3406 }
3407 
dcn10_wait_for_mpcc_disconnect(struct dc * dc,struct resource_pool * res_pool,struct pipe_ctx * pipe_ctx)3408 void dcn10_wait_for_mpcc_disconnect(
3409 		struct dc *dc,
3410 		struct resource_pool *res_pool,
3411 		struct pipe_ctx *pipe_ctx)
3412 {
3413 	struct dce_hwseq *hws = dc->hwseq;
3414 	int mpcc_inst;
3415 
3416 	if (dc->debug.sanity_checks) {
3417 		hws->funcs.verify_allow_pstate_change_high(dc);
3418 	}
3419 
3420 	if (!pipe_ctx->stream_res.opp)
3421 		return;
3422 
3423 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3424 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3425 			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3426 
3427 			res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3428 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3429 			hubp->funcs->set_blank(hubp, true);
3430 		}
3431 	}
3432 
3433 	if (dc->debug.sanity_checks) {
3434 		hws->funcs.verify_allow_pstate_change_high(dc);
3435 	}
3436 
3437 }
3438 
dcn10_dummy_display_power_gating(struct dc * dc,uint8_t controller_id,struct dc_bios * dcb,enum pipe_gating_control power_gating)3439 bool dcn10_dummy_display_power_gating(
3440 	struct dc *dc,
3441 	uint8_t controller_id,
3442 	struct dc_bios *dcb,
3443 	enum pipe_gating_control power_gating)
3444 {
3445 	return true;
3446 }
3447 
dcn10_update_pending_status(struct pipe_ctx * pipe_ctx)3448 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3449 {
3450 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3451 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3452 	bool flip_pending;
3453 	struct dc *dc = plane_state->ctx->dc;
3454 
3455 	if (plane_state == NULL)
3456 		return;
3457 
3458 	flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3459 					pipe_ctx->plane_res.hubp);
3460 
3461 	plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3462 
3463 	if (!flip_pending)
3464 		plane_state->status.current_address = plane_state->status.requested_address;
3465 
3466 	if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3467 			tg->funcs->is_stereo_left_eye) {
3468 		plane_state->status.is_right_eye =
3469 				!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3470 	}
3471 
3472 	if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3473 		struct dce_hwseq *hwseq = dc->hwseq;
3474 		struct timing_generator *tg = dc->res_pool->timing_generators[0];
3475 		unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3476 
3477 		if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3478 			struct hubbub *hubbub = dc->res_pool->hubbub;
3479 
3480 			hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3481 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3482 		}
3483 	}
3484 }
3485 
dcn10_update_dchub(struct dce_hwseq * hws,struct dchub_init_data * dh_data)3486 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3487 {
3488 	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3489 
3490 	/* In DCN, this programming sequence is owned by the hubbub */
3491 	hubbub->funcs->update_dchub(hubbub, dh_data);
3492 }
3493 
dcn10_can_pipe_disable_cursor(struct pipe_ctx * pipe_ctx)3494 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3495 {
3496 	struct pipe_ctx *test_pipe;
3497 	const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2;
3498 	int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b;
3499 
3500 	/**
3501 	 * Disable the cursor if there's another pipe above this with a
3502 	 * plane that contains this pipe's viewport to prevent double cursor
3503 	 * and incorrect scaling artifacts.
3504 	 */
3505 	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3506 	     test_pipe = test_pipe->top_pipe) {
3507 		if (!test_pipe->plane_state->visible)
3508 			continue;
3509 
3510 		r2 = &test_pipe->plane_res.scl_data.recout;
3511 		r2_r = r2->x + r2->width;
3512 		r2_b = r2->y + r2->height;
3513 
3514 		if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b)
3515 			return true;
3516 	}
3517 
3518 	return false;
3519 }
3520 
dcn10_set_cursor_position(struct pipe_ctx * pipe_ctx)3521 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3522 {
3523 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3524 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3525 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
3526 	struct dc_cursor_mi_param param = {
3527 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3528 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3529 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
3530 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3531 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3532 		.rotation = pipe_ctx->plane_state->rotation,
3533 		.mirror = pipe_ctx->plane_state->horizontal_mirror
3534 	};
3535 	bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
3536 		(pipe_ctx->bottom_pipe != NULL);
3537 	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3538 		(pipe_ctx->prev_odm_pipe != NULL);
3539 
3540 	int x_plane = pipe_ctx->plane_state->dst_rect.x;
3541 	int y_plane = pipe_ctx->plane_state->dst_rect.y;
3542 	int x_pos = pos_cpy.x;
3543 	int y_pos = pos_cpy.y;
3544 
3545 	/**
3546 	 * DC cursor is stream space, HW cursor is plane space and drawn
3547 	 * as part of the framebuffer.
3548 	 *
3549 	 * Cursor position can't be negative, but hotspot can be used to
3550 	 * shift cursor out of the plane bounds. Hotspot must be smaller
3551 	 * than the cursor size.
3552 	 */
3553 
3554 	/**
3555 	 * Translate cursor from stream space to plane space.
3556 	 *
3557 	 * If the cursor is scaled then we need to scale the position
3558 	 * to be in the approximately correct place. We can't do anything
3559 	 * about the actual size being incorrect, that's a limitation of
3560 	 * the hardware.
3561 	 */
3562 	x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3563 			pipe_ctx->plane_state->dst_rect.width;
3564 	y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3565 			pipe_ctx->plane_state->dst_rect.height;
3566 
3567 	/**
3568 	 * If the cursor's source viewport is clipped then we need to
3569 	 * translate the cursor to appear in the correct position on
3570 	 * the screen.
3571 	 *
3572 	 * This translation isn't affected by scaling so it needs to be
3573 	 * done *after* we adjust the position for the scale factor.
3574 	 *
3575 	 * This is only done by opt-in for now since there are still
3576 	 * some usecases like tiled display that might enable the
3577 	 * cursor on both streams while expecting dc to clip it.
3578 	 */
3579 	if (pos_cpy.translate_by_source) {
3580 		x_pos += pipe_ctx->plane_state->src_rect.x;
3581 		y_pos += pipe_ctx->plane_state->src_rect.y;
3582 	}
3583 
3584 	/**
3585 	 * If the position is negative then we need to add to the hotspot
3586 	 * to shift the cursor outside the plane.
3587 	 */
3588 
3589 	if (x_pos < 0) {
3590 		pos_cpy.x_hotspot -= x_pos;
3591 		x_pos = 0;
3592 	}
3593 
3594 	if (y_pos < 0) {
3595 		pos_cpy.y_hotspot -= y_pos;
3596 		y_pos = 0;
3597 	}
3598 
3599 	pos_cpy.x = (uint32_t)x_pos;
3600 	pos_cpy.y = (uint32_t)y_pos;
3601 
3602 	if (pipe_ctx->plane_state->address.type
3603 			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3604 		pos_cpy.enable = false;
3605 
3606 	if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3607 		pos_cpy.enable = false;
3608 
3609 	// Swap axis and mirror horizontally
3610 	if (param.rotation == ROTATION_ANGLE_90) {
3611 		uint32_t temp_x = pos_cpy.x;
3612 
3613 		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3614 				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3615 		pos_cpy.y = temp_x;
3616 	}
3617 	// Swap axis and mirror vertically
3618 	else if (param.rotation == ROTATION_ANGLE_270) {
3619 		uint32_t temp_y = pos_cpy.y;
3620 		int viewport_height =
3621 			pipe_ctx->plane_res.scl_data.viewport.height;
3622 		int viewport_y =
3623 			pipe_ctx->plane_res.scl_data.viewport.y;
3624 
3625 		/**
3626 		 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3627 		 * For pipe split cases:
3628 		 * - apply offset of viewport.y to normalize pos_cpy.x
3629 		 * - calculate the pos_cpy.y as before
3630 		 * - shift pos_cpy.y back by same offset to get final value
3631 		 * - since we iterate through both pipes, use the lower
3632 		 *   viewport.y for offset
3633 		 * For non pipe split cases, use the same calculation for
3634 		 *  pos_cpy.y as the 180 degree rotation case below,
3635 		 *  but use pos_cpy.x as our input because we are rotating
3636 		 *  270 degrees
3637 		 */
3638 		if (pipe_split_on || odm_combine_on) {
3639 			int pos_cpy_x_offset;
3640 			int other_pipe_viewport_y;
3641 
3642 			if (pipe_split_on) {
3643 				if (pipe_ctx->bottom_pipe) {
3644 					other_pipe_viewport_y =
3645 						pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3646 				} else {
3647 					other_pipe_viewport_y =
3648 						pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3649 				}
3650 			} else {
3651 				if (pipe_ctx->next_odm_pipe) {
3652 					other_pipe_viewport_y =
3653 						pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3654 				} else {
3655 					other_pipe_viewport_y =
3656 						pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3657 				}
3658 			}
3659 			pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3660 				other_pipe_viewport_y : viewport_y;
3661 			pos_cpy.x -= pos_cpy_x_offset;
3662 			if (pos_cpy.x > viewport_height) {
3663 				pos_cpy.x = pos_cpy.x - viewport_height;
3664 				pos_cpy.y = viewport_height - pos_cpy.x;
3665 			} else {
3666 				pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3667 			}
3668 			pos_cpy.y += pos_cpy_x_offset;
3669 		} else {
3670 			pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3671 		}
3672 		pos_cpy.x = temp_y;
3673 	}
3674 	// Mirror horizontally and vertically
3675 	else if (param.rotation == ROTATION_ANGLE_180) {
3676 		int viewport_width =
3677 			pipe_ctx->plane_res.scl_data.viewport.width;
3678 		int viewport_x =
3679 			pipe_ctx->plane_res.scl_data.viewport.x;
3680 
3681 		if (pipe_split_on || odm_combine_on) {
3682 			if (pos_cpy.x >= viewport_width + viewport_x) {
3683 				pos_cpy.x = 2 * viewport_width
3684 						- pos_cpy.x + 2 * viewport_x;
3685 			} else {
3686 				uint32_t temp_x = pos_cpy.x;
3687 
3688 				pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3689 				if (temp_x >= viewport_x +
3690 					(int)hubp->curs_attr.width || pos_cpy.x
3691 					<= (int)hubp->curs_attr.width +
3692 					pipe_ctx->plane_state->src_rect.x) {
3693 					pos_cpy.x = temp_x + viewport_width;
3694 				}
3695 			}
3696 		} else {
3697 			pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3698 		}
3699 
3700 		/**
3701 		 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3702 		 * Calculation:
3703 		 *   delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3704 		 *   pos_cpy.y_new = viewport.y + delta_from_bottom
3705 		 * Simplify it as:
3706 		 *   pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3707 		 */
3708 		pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3709 			pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3710 	}
3711 
3712 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3713 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3714 }
3715 
dcn10_set_cursor_attribute(struct pipe_ctx * pipe_ctx)3716 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3717 {
3718 	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3719 
3720 	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3721 			pipe_ctx->plane_res.hubp, attributes);
3722 	pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3723 		pipe_ctx->plane_res.dpp, attributes);
3724 }
3725 
dcn10_set_cursor_sdr_white_level(struct pipe_ctx * pipe_ctx)3726 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3727 {
3728 	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3729 	struct fixed31_32 multiplier;
3730 	struct dpp_cursor_attributes opt_attr = { 0 };
3731 	uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3732 	struct custom_float_format fmt;
3733 
3734 	if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3735 		return;
3736 
3737 	fmt.exponenta_bits = 5;
3738 	fmt.mantissa_bits = 10;
3739 	fmt.sign = true;
3740 
3741 	if (sdr_white_level > 80) {
3742 		multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3743 		convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3744 	}
3745 
3746 	opt_attr.scale = hw_scale;
3747 	opt_attr.bias = 0;
3748 
3749 	pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3750 			pipe_ctx->plane_res.dpp, &opt_attr);
3751 }
3752 
3753 /*
3754  * apply_front_porch_workaround  TODO FPGA still need?
3755  *
3756  * This is a workaround for a bug that has existed since R5xx and has not been
3757  * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3758  */
apply_front_porch_workaround(struct dc_crtc_timing * timing)3759 static void apply_front_porch_workaround(
3760 	struct dc_crtc_timing *timing)
3761 {
3762 	if (timing->flags.INTERLACE == 1) {
3763 		if (timing->v_front_porch < 2)
3764 			timing->v_front_porch = 2;
3765 	} else {
3766 		if (timing->v_front_porch < 1)
3767 			timing->v_front_porch = 1;
3768 	}
3769 }
3770 
dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx * pipe_ctx)3771 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3772 {
3773 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3774 	struct dc_crtc_timing patched_crtc_timing;
3775 	int vesa_sync_start;
3776 	int asic_blank_end;
3777 	int interlace_factor;
3778 	int vertical_line_start;
3779 
3780 	patched_crtc_timing = *dc_crtc_timing;
3781 	apply_front_porch_workaround(&patched_crtc_timing);
3782 
3783 	interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3784 
3785 	vesa_sync_start = patched_crtc_timing.v_addressable +
3786 			patched_crtc_timing.v_border_bottom +
3787 			patched_crtc_timing.v_front_porch;
3788 
3789 	asic_blank_end = (patched_crtc_timing.v_total -
3790 			vesa_sync_start -
3791 			patched_crtc_timing.v_border_top)
3792 			* interlace_factor;
3793 
3794 	vertical_line_start = asic_blank_end -
3795 			pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3796 
3797 	return vertical_line_start;
3798 }
3799 
dcn10_calc_vupdate_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3800 void dcn10_calc_vupdate_position(
3801 		struct dc *dc,
3802 		struct pipe_ctx *pipe_ctx,
3803 		uint32_t *start_line,
3804 		uint32_t *end_line)
3805 {
3806 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3807 	int vline_int_offset_from_vupdate =
3808 			pipe_ctx->stream->periodic_interrupt0.lines_offset;
3809 	int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3810 	int start_position;
3811 
3812 	if (vline_int_offset_from_vupdate > 0)
3813 		vline_int_offset_from_vupdate--;
3814 	else if (vline_int_offset_from_vupdate < 0)
3815 		vline_int_offset_from_vupdate++;
3816 
3817 	start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
3818 
3819 	if (start_position >= 0)
3820 		*start_line = start_position;
3821 	else
3822 		*start_line = dc_crtc_timing->v_total + start_position - 1;
3823 
3824 	*end_line = *start_line + 2;
3825 
3826 	if (*end_line >= dc_crtc_timing->v_total)
3827 		*end_line = 2;
3828 }
3829 
dcn10_cal_vline_position(struct dc * dc,struct pipe_ctx * pipe_ctx,enum vline_select vline,uint32_t * start_line,uint32_t * end_line)3830 static void dcn10_cal_vline_position(
3831 		struct dc *dc,
3832 		struct pipe_ctx *pipe_ctx,
3833 		enum vline_select vline,
3834 		uint32_t *start_line,
3835 		uint32_t *end_line)
3836 {
3837 	enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
3838 
3839 	if (vline == VLINE0)
3840 		ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
3841 	else if (vline == VLINE1)
3842 		ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
3843 
3844 	switch (ref_point) {
3845 	case START_V_UPDATE:
3846 		dcn10_calc_vupdate_position(
3847 				dc,
3848 				pipe_ctx,
3849 				start_line,
3850 				end_line);
3851 		break;
3852 	case START_V_SYNC:
3853 		// Suppose to do nothing because vsync is 0;
3854 		break;
3855 	default:
3856 		ASSERT(0);
3857 		break;
3858 	}
3859 }
3860 
dcn10_setup_periodic_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx,enum vline_select vline)3861 void dcn10_setup_periodic_interrupt(
3862 		struct dc *dc,
3863 		struct pipe_ctx *pipe_ctx,
3864 		enum vline_select vline)
3865 {
3866 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3867 
3868 	if (vline == VLINE0) {
3869 		uint32_t start_line = 0;
3870 		uint32_t end_line = 0;
3871 
3872 		dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
3873 
3874 		tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3875 
3876 	} else if (vline == VLINE1) {
3877 		pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
3878 				tg,
3879 				pipe_ctx->stream->periodic_interrupt1.lines_offset);
3880 	}
3881 }
3882 
dcn10_setup_vupdate_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3883 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3884 {
3885 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3886 	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3887 
3888 	if (start_line < 0) {
3889 		ASSERT(0);
3890 		start_line = 0;
3891 	}
3892 
3893 	if (tg->funcs->setup_vertical_interrupt2)
3894 		tg->funcs->setup_vertical_interrupt2(tg, start_line);
3895 }
3896 
dcn10_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)3897 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3898 		struct dc_link_settings *link_settings)
3899 {
3900 	struct encoder_unblank_param params = { { 0 } };
3901 	struct dc_stream_state *stream = pipe_ctx->stream;
3902 	struct dc_link *link = stream->link;
3903 	struct dce_hwseq *hws = link->dc->hwseq;
3904 
3905 	/* only 3 items below are used by unblank */
3906 	params.timing = pipe_ctx->stream->timing;
3907 
3908 	params.link_settings.link_rate = link_settings->link_rate;
3909 
3910 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3911 		if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3912 			params.timing.pix_clk_100hz /= 2;
3913 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
3914 	}
3915 
3916 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3917 		hws->funcs.edp_backlight_control(link, true);
3918 	}
3919 }
3920 
dcn10_send_immediate_sdp_message(struct pipe_ctx * pipe_ctx,const uint8_t * custom_sdp_message,unsigned int sdp_message_size)3921 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3922 				const uint8_t *custom_sdp_message,
3923 				unsigned int sdp_message_size)
3924 {
3925 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3926 		pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3927 				pipe_ctx->stream_res.stream_enc,
3928 				custom_sdp_message,
3929 				sdp_message_size);
3930 	}
3931 }
dcn10_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)3932 enum dc_status dcn10_set_clock(struct dc *dc,
3933 			enum dc_clock_type clock_type,
3934 			uint32_t clk_khz,
3935 			uint32_t stepping)
3936 {
3937 	struct dc_state *context = dc->current_state;
3938 	struct dc_clock_config clock_cfg = {0};
3939 	struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3940 
3941 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3942 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3943 						context, clock_type, &clock_cfg);
3944 
3945 	if (!dc->clk_mgr->funcs->get_clock)
3946 		return DC_FAIL_UNSUPPORTED_1;
3947 
3948 	if (clk_khz > clock_cfg.max_clock_khz)
3949 		return DC_FAIL_CLK_EXCEED_MAX;
3950 
3951 	if (clk_khz < clock_cfg.min_clock_khz)
3952 		return DC_FAIL_CLK_BELOW_MIN;
3953 
3954 	if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3955 		return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3956 
3957 	/*update internal request clock for update clock use*/
3958 	if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3959 		current_clocks->dispclk_khz = clk_khz;
3960 	else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3961 		current_clocks->dppclk_khz = clk_khz;
3962 	else
3963 		return DC_ERROR_UNEXPECTED;
3964 
3965 	if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
3966 				dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3967 				context, true);
3968 	return DC_OK;
3969 
3970 }
3971 
dcn10_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)3972 void dcn10_get_clock(struct dc *dc,
3973 			enum dc_clock_type clock_type,
3974 			struct dc_clock_config *clock_cfg)
3975 {
3976 	struct dc_state *context = dc->current_state;
3977 
3978 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3979 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3980 
3981 }
3982 
dcn10_get_dcc_en_bits(struct dc * dc,int * dcc_en_bits)3983 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3984 {
3985 	struct resource_pool *pool = dc->res_pool;
3986 	int i;
3987 
3988 	for (i = 0; i < pool->pipe_count; i++) {
3989 		struct hubp *hubp = pool->hubps[i];
3990 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3991 
3992 		hubp->funcs->hubp_read_state(hubp);
3993 
3994 		if (!s->blank_en)
3995 			dcc_en_bits[i] = s->dcc_en ? 1 : 0;
3996 	}
3997 }
3998