1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 // header file of functions being implemented
27 #include "dcn32/dcn32_resource.h"
28 #include "dcn20/dcn20_resource.h"
29 #include "dml/dcn32/display_mode_vba_util_32.h"
30 #include "dml/dcn32/dcn32_fpu.h"
31 #include "dc_state_priv.h"
32 #include "dc_stream_priv.h"
33 
is_dual_plane(enum surface_pixel_format format)34 static bool is_dual_plane(enum surface_pixel_format format)
35 {
36 	return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
37 }
38 
dcn32_helper_calculate_mall_bytes_for_cursor(struct dc * dc,struct pipe_ctx * pipe_ctx,bool ignore_cursor_buf)39 uint32_t dcn32_helper_calculate_mall_bytes_for_cursor(
40 		struct dc *dc,
41 		struct pipe_ctx *pipe_ctx,
42 		bool ignore_cursor_buf)
43 {
44 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
45 	uint32_t cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
46 	uint32_t cursor_mall_size_bytes = 0;
47 
48 	switch (pipe_ctx->stream->cursor_attributes.color_format) {
49 	case CURSOR_MODE_MONO:
50 		cursor_size /= 2;
51 		break;
52 	case CURSOR_MODE_COLOR_1BIT_AND:
53 	case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
54 	case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
55 		cursor_size *= 4;
56 		break;
57 
58 	case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
59 	case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
60 		cursor_size *= 8;
61 		break;
62 	}
63 
64 	/* only count if cursor is enabled, and if additional allocation needed outside of the
65 	 * DCN cursor buffer
66 	 */
67 	if (pipe_ctx->stream->cursor_position.enable && (ignore_cursor_buf ||
68 			cursor_size > 16384)) {
69 		/* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1)
70 		 * Note: add 1 mblk in case of cursor misalignment
71 		 */
72 		cursor_mall_size_bytes = ((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) /
73 				DCN3_2_MALL_MBLK_SIZE_BYTES + 1) * DCN3_2_MALL_MBLK_SIZE_BYTES;
74 	}
75 
76 	return cursor_mall_size_bytes;
77 }
78 
79 /**
80  * dcn32_helper_calculate_num_ways_for_subvp(): Calculate number of ways needed for SubVP
81  *
82  * Gets total allocation required for the phantom viewport calculated by DML in bytes and
83  * converts to number of cache ways.
84  *
85  * @dc: current dc state
86  * @context: new dc state
87  *
88  * Return: number of ways required for SubVP
89  */
dcn32_helper_calculate_num_ways_for_subvp(struct dc * dc,struct dc_state * context)90 uint32_t dcn32_helper_calculate_num_ways_for_subvp(
91 		struct dc *dc,
92 		struct dc_state *context)
93 {
94 	if (context->bw_ctx.bw.dcn.mall_subvp_size_bytes > 0) {
95 		if (dc->debug.force_subvp_num_ways) {
96 			return dc->debug.force_subvp_num_ways;
97 		} else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) {
98 			return dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes);
99 		} else {
100 			return 0;
101 		}
102 	} else {
103 		return 0;
104 	}
105 }
106 
dcn32_merge_pipes_for_subvp(struct dc * dc,struct dc_state * context)107 void dcn32_merge_pipes_for_subvp(struct dc *dc,
108 		struct dc_state *context)
109 {
110 	uint32_t i;
111 
112 	/* merge pipes if necessary */
113 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
114 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
115 
116 		// For now merge all pipes for SubVP since pipe split case isn't supported yet
117 
118 		/* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */
119 		if (pipe->prev_odm_pipe) {
120 			/*split off odm pipe*/
121 			pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe;
122 			if (pipe->next_odm_pipe)
123 				pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
124 
125 			pipe->bottom_pipe = NULL;
126 			pipe->next_odm_pipe = NULL;
127 			pipe->plane_state = NULL;
128 			pipe->stream = NULL;
129 			pipe->top_pipe = NULL;
130 			pipe->prev_odm_pipe = NULL;
131 			if (pipe->stream_res.dsc)
132 				dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
133 			memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
134 			memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
135 		} else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
136 			struct pipe_ctx *top_pipe = pipe->top_pipe;
137 			struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
138 
139 			top_pipe->bottom_pipe = bottom_pipe;
140 			if (bottom_pipe)
141 				bottom_pipe->top_pipe = top_pipe;
142 
143 			pipe->top_pipe = NULL;
144 			pipe->bottom_pipe = NULL;
145 			pipe->plane_state = NULL;
146 			pipe->stream = NULL;
147 			memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
148 			memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
149 		}
150 	}
151 }
152 
dcn32_all_pipes_have_stream_and_plane(struct dc * dc,struct dc_state * context)153 bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
154 		struct dc_state *context)
155 {
156 	uint32_t i;
157 
158 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
159 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
160 
161 		if (!pipe->stream)
162 			continue;
163 
164 		if (!pipe->plane_state)
165 			return false;
166 	}
167 	return true;
168 }
169 
dcn32_subvp_in_use(struct dc * dc,struct dc_state * context)170 bool dcn32_subvp_in_use(struct dc *dc,
171 		struct dc_state *context)
172 {
173 	uint32_t i;
174 
175 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
176 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
177 
178 		if (dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE)
179 			return true;
180 	}
181 	return false;
182 }
183 
dcn32_mpo_in_use(struct dc_state * context)184 bool dcn32_mpo_in_use(struct dc_state *context)
185 {
186 	uint32_t i;
187 
188 	for (i = 0; i < context->stream_count; i++) {
189 		if (context->stream_status[i].plane_count > 1)
190 			return true;
191 	}
192 	return false;
193 }
194 
195 
dcn32_any_surfaces_rotated(struct dc * dc,struct dc_state * context)196 bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context)
197 {
198 	uint32_t i;
199 
200 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
201 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
202 
203 		if (!pipe->stream)
204 			continue;
205 
206 		if (pipe->plane_state && pipe->plane_state->rotation != ROTATION_ANGLE_0)
207 			return true;
208 	}
209 	return false;
210 }
211 
dcn32_is_center_timing(struct pipe_ctx * pipe)212 bool dcn32_is_center_timing(struct pipe_ctx *pipe)
213 {
214 	bool is_center_timing = false;
215 
216 	if (pipe->stream) {
217 		if (pipe->stream->timing.v_addressable != pipe->stream->dst.height ||
218 				pipe->stream->timing.v_addressable != pipe->stream->src.height) {
219 			is_center_timing = true;
220 		}
221 
222 		if (pipe->plane_state) {
223 			if (pipe->stream->timing.v_addressable != pipe->plane_state->dst_rect.height &&
224 					pipe->stream->timing.v_addressable != pipe->plane_state->src_rect.height) {
225 				is_center_timing = true;
226 			}
227 		}
228 	}
229 
230 	return is_center_timing;
231 }
232 
dcn32_is_psr_capable(struct pipe_ctx * pipe)233 bool dcn32_is_psr_capable(struct pipe_ctx *pipe)
234 {
235 	bool psr_capable = false;
236 
237 	if (pipe->stream && pipe->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) {
238 		psr_capable = true;
239 	}
240 	return psr_capable;
241 }
242 
override_det_for_subvp(struct dc * dc,struct dc_state * context,uint8_t pipe_segments[])243 static void override_det_for_subvp(struct dc *dc, struct dc_state *context, uint8_t pipe_segments[])
244 {
245 	uint32_t i;
246 	uint8_t fhd_count = 0;
247 	uint8_t subvp_high_refresh_count = 0;
248 	uint8_t stream_count = 0;
249 
250 	// Do not override if a stream has multiple planes
251 	for (i = 0; i < context->stream_count; i++) {
252 		if (context->stream_status[i].plane_count > 1)
253 			return;
254 
255 		if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM)
256 			stream_count++;
257 	}
258 
259 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
260 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
261 
262 		if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
263 			if (dcn32_allow_subvp_high_refresh_rate(dc, context, pipe_ctx)) {
264 
265 				if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) {
266 					fhd_count++;
267 				}
268 				subvp_high_refresh_count++;
269 			}
270 		}
271 	}
272 
273 	if (stream_count == 2 && subvp_high_refresh_count == 2 && fhd_count == 1) {
274 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
275 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
276 
277 			if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
278 				if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) {
279 					if (pipe_segments[i] > 4)
280 						pipe_segments[i] = 4;
281 				}
282 			}
283 		}
284 	}
285 }
286 
287 /**
288  * dcn32_determine_det_override(): Determine DET allocation for each pipe
289  *
290  * This function determines how much DET to allocate for each pipe. The total number of
291  * DET segments will be split equally among each of the streams, and after that the DET
292  * segments per stream will be split equally among the planes for the given stream.
293  *
294  * If there is a plane that's driven by more than 1 pipe (i.e. pipe split), then the
295  * number of DET for that given plane will be split among the pipes driving that plane.
296  *
297  *
298  * High level algorithm:
299  * 1. Split total DET among number of streams
300  * 2. For each stream, split DET among the planes
301  * 3. For each plane, check if there is a pipe split. If yes, split the DET allocation
302  *    among those pipes.
303  * 4. Assign the DET override to the DML pipes.
304  *
305  * @dc: Current DC state
306  * @context: New DC state to be programmed
307  * @pipes: Array of DML pipes
308  *
309  * Return: void
310  */
dcn32_determine_det_override(struct dc * dc,struct dc_state * context,display_e2e_pipe_params_st * pipes)311 void dcn32_determine_det_override(struct dc *dc,
312 		struct dc_state *context,
313 		display_e2e_pipe_params_st *pipes)
314 {
315 	uint32_t i, j, k;
316 	uint8_t pipe_plane_count, stream_segments, plane_segments, pipe_segments[MAX_PIPES] = {0};
317 	uint8_t pipe_counted[MAX_PIPES] = {0};
318 	uint8_t pipe_cnt = 0;
319 	struct dc_plane_state *current_plane = NULL;
320 	uint8_t stream_count = 0;
321 
322 	for (i = 0; i < context->stream_count; i++) {
323 		/* Don't count SubVP streams for DET allocation */
324 		if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM)
325 			stream_count++;
326 	}
327 
328 	if (stream_count > 0) {
329 		stream_segments = 18 / stream_count;
330 		for (i = 0; i < context->stream_count; i++) {
331 			if (dc_state_get_stream_subvp_type(context, context->streams[i]) == SUBVP_PHANTOM)
332 				continue;
333 
334 			if (context->stream_status[i].plane_count > 0)
335 				plane_segments = stream_segments / context->stream_status[i].plane_count;
336 			else
337 				plane_segments = stream_segments;
338 			for (j = 0; j < dc->res_pool->pipe_count; j++) {
339 				pipe_plane_count = 0;
340 				if (context->res_ctx.pipe_ctx[j].stream == context->streams[i] &&
341 						pipe_counted[j] != 1) {
342 					/* Note: pipe_plane_count indicates the number of pipes to be used for a
343 					 * given plane. e.g. pipe_plane_count = 1 means single pipe (i.e. not split),
344 					 * pipe_plane_count = 2 means 2:1 split, etc.
345 					 */
346 					pipe_plane_count++;
347 					pipe_counted[j] = 1;
348 					current_plane = context->res_ctx.pipe_ctx[j].plane_state;
349 					for (k = 0; k < dc->res_pool->pipe_count; k++) {
350 						if (k != j && context->res_ctx.pipe_ctx[k].stream == context->streams[i] &&
351 								context->res_ctx.pipe_ctx[k].plane_state == current_plane) {
352 							pipe_plane_count++;
353 							pipe_counted[k] = 1;
354 						}
355 					}
356 
357 					pipe_segments[j] = plane_segments / pipe_plane_count;
358 					for (k = 0; k < dc->res_pool->pipe_count; k++) {
359 						if (k != j && context->res_ctx.pipe_ctx[k].stream == context->streams[i] &&
360 								context->res_ctx.pipe_ctx[k].plane_state == current_plane) {
361 							pipe_segments[k] = plane_segments / pipe_plane_count;
362 						}
363 					}
364 				}
365 			}
366 		}
367 
368 		override_det_for_subvp(dc, context, pipe_segments);
369 		for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
370 			if (!context->res_ctx.pipe_ctx[i].stream)
371 				continue;
372 			pipes[pipe_cnt].pipe.src.det_size_override = pipe_segments[i] * DCN3_2_DET_SEG_SIZE;
373 			pipe_cnt++;
374 		}
375 	} else {
376 		for (i = 0; i < dc->res_pool->pipe_count; i++)
377 			pipes[i].pipe.src.det_size_override = 4 * DCN3_2_DET_SEG_SIZE; //DCN3_2_DEFAULT_DET_SIZE
378 	}
379 }
380 
dcn32_set_det_allocations(struct dc * dc,struct dc_state * context,display_e2e_pipe_params_st * pipes)381 void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
382 	display_e2e_pipe_params_st *pipes)
383 {
384 	int i, pipe_cnt;
385 	struct resource_context *res_ctx = &context->res_ctx;
386 	struct pipe_ctx *pipe = 0;
387 	bool disable_unbounded_requesting = dc->debug.disable_z9_mpc || dc->debug.disable_unbounded_requesting;
388 
389 	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
390 
391 		if (!res_ctx->pipe_ctx[i].stream)
392 			continue;
393 
394 		pipe = &res_ctx->pipe_ctx[i];
395 		pipe_cnt++;
396 	}
397 
398 	/* For DET allocation, we don't want to use DML policy (not optimal for utilizing all
399 	 * the DET available for each pipe). Use the DET override input to maintain our driver
400 	 * policy.
401 	 */
402 	if (pipe_cnt == 1) {
403 		pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE;
404 		if (pipe->plane_state && !disable_unbounded_requesting && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
405 			if (!is_dual_plane(pipe->plane_state->format)) {
406 				pipes[0].pipe.src.det_size_override = DCN3_2_DEFAULT_DET_SIZE;
407 				pipes[0].pipe.src.unbounded_req_mode = true;
408 				if (pipe->plane_state->src_rect.width >= 5120 &&
409 					pipe->plane_state->src_rect.height >= 2880)
410 					pipes[0].pipe.src.det_size_override = 320; // 5K or higher
411 			}
412 		}
413 	} else
414 		dcn32_determine_det_override(dc, context, pipes);
415 }
416 
417 #define MAX_STRETCHED_V_BLANK 1000 // in micro-seconds (must ensure to match value in FW)
418 /*
419  * Scaling factor for v_blank stretch calculations considering timing in
420  * micro-seconds and pixel clock in 100hz.
421  * Note: the parenthesis are necessary to ensure the correct order of
422  * operation where V_SCALE is used.
423  */
424 #define V_SCALE (10000 / MAX_STRETCHED_V_BLANK)
425 
get_frame_rate_at_max_stretch_100hz(struct dc_stream_state * fpo_candidate_stream,uint32_t fpo_vactive_margin_us)426 static int get_frame_rate_at_max_stretch_100hz(
427 		struct dc_stream_state *fpo_candidate_stream,
428 		uint32_t fpo_vactive_margin_us)
429 {
430 	struct dc_crtc_timing *timing = NULL;
431 	uint32_t sec_per_100_lines;
432 	uint32_t max_v_blank;
433 	uint32_t curr_v_blank;
434 	uint32_t v_stretch_max;
435 	uint32_t stretched_frame_pix_cnt;
436 	uint32_t scaled_stretched_frame_pix_cnt;
437 	uint32_t scaled_refresh_rate;
438 	uint32_t v_scale;
439 
440 	if (fpo_candidate_stream == NULL)
441 		return 0;
442 
443 	/* check if refresh rate at least 120hz */
444 	timing = &fpo_candidate_stream->timing;
445 	if (timing == NULL)
446 		return 0;
447 
448 	v_scale = 10000 / (MAX_STRETCHED_V_BLANK + fpo_vactive_margin_us);
449 
450 	sec_per_100_lines = timing->pix_clk_100hz / timing->h_total + 1;
451 	max_v_blank = sec_per_100_lines / v_scale + 1;
452 	curr_v_blank = timing->v_total - timing->v_addressable;
453 	v_stretch_max = (max_v_blank > curr_v_blank) ? (max_v_blank - curr_v_blank) : (0);
454 	stretched_frame_pix_cnt = (v_stretch_max + timing->v_total) * timing->h_total;
455 	scaled_stretched_frame_pix_cnt = stretched_frame_pix_cnt / 10000;
456 	scaled_refresh_rate = (timing->pix_clk_100hz) / scaled_stretched_frame_pix_cnt + 1;
457 
458 	return scaled_refresh_rate;
459 
460 }
461 
is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(struct dc_stream_state * fpo_candidate_stream,uint32_t fpo_vactive_margin_us,int current_refresh_rate)462 static bool is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(
463 		struct dc_stream_state *fpo_candidate_stream, uint32_t fpo_vactive_margin_us, int current_refresh_rate)
464 {
465 	int refresh_rate_max_stretch_100hz;
466 	int min_refresh_100hz;
467 
468 	if (fpo_candidate_stream == NULL)
469 		return false;
470 
471 	refresh_rate_max_stretch_100hz = get_frame_rate_at_max_stretch_100hz(fpo_candidate_stream, fpo_vactive_margin_us);
472 	min_refresh_100hz = fpo_candidate_stream->timing.min_refresh_in_uhz / 10000;
473 
474 	if (refresh_rate_max_stretch_100hz < min_refresh_100hz)
475 		return false;
476 
477 	if (fpo_candidate_stream->ctx->dc->config.enable_fpo_flicker_detection == 1 &&
478 			!dc_stream_is_refresh_rate_range_flickerless(fpo_candidate_stream, (refresh_rate_max_stretch_100hz / 100), current_refresh_rate, false))
479 		return false;
480 
481 	return true;
482 }
483 
get_refresh_rate(struct dc_stream_state * fpo_candidate_stream)484 static int get_refresh_rate(struct dc_stream_state *fpo_candidate_stream)
485 {
486 	int refresh_rate = 0;
487 	int h_v_total = 0;
488 	struct dc_crtc_timing *timing = NULL;
489 
490 	if (fpo_candidate_stream == NULL)
491 		return 0;
492 
493 	/* check if refresh rate at least 120hz */
494 	timing = &fpo_candidate_stream->timing;
495 	if (timing == NULL)
496 		return 0;
497 
498 	h_v_total = timing->h_total * timing->v_total;
499 	if (h_v_total == 0)
500 		return 0;
501 
502 	refresh_rate = ((timing->pix_clk_100hz * 100) / (h_v_total)) + 1;
503 	return refresh_rate;
504 }
505 
506 /**
507  * dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch() - Determines if config can
508  *								    support FPO
509  *
510  * @dc: current dc state
511  * @context: new dc state
512  *
513  * Return: Pointer to FPO stream candidate if config can support FPO, otherwise NULL
514  */
dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc * dc,struct dc_state * context)515 struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context)
516 {
517 	int refresh_rate = 0;
518 	const int minimum_refreshrate_supported = 120;
519 	struct dc_stream_state *fpo_candidate_stream = NULL;
520 	bool is_fpo_vactive = false;
521 	uint32_t fpo_vactive_margin_us = 0;
522 	struct dc_stream_status *fpo_stream_status = NULL;
523 
524 	if (context == NULL)
525 		return NULL;
526 
527 	if (dc->debug.disable_fams)
528 		return NULL;
529 
530 	if (!dc->caps.dmub_caps.mclk_sw)
531 		return NULL;
532 
533 	if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down)
534 		return NULL;
535 
536 	/* For FPO we can support up to 2 display configs if:
537 	 * - first display uses FPO
538 	 * - Second display switches in VACTIVE */
539 	if (context->stream_count > 2)
540 		return NULL;
541 	else if (context->stream_count == 2) {
542 		DC_FP_START();
543 		dcn32_assign_fpo_vactive_candidate(dc, context, &fpo_candidate_stream);
544 		DC_FP_END();
545 		if (fpo_candidate_stream)
546 			fpo_stream_status = dc_state_get_stream_status(context, fpo_candidate_stream);
547 		DC_FP_START();
548 		is_fpo_vactive = dcn32_find_vactive_pipe(dc, context, fpo_candidate_stream, dc->debug.fpo_vactive_min_active_margin_us);
549 		DC_FP_END();
550 		if (!is_fpo_vactive || dc->debug.disable_fpo_vactive)
551 			return NULL;
552 	} else {
553 		fpo_candidate_stream = context->streams[0];
554 		if (fpo_candidate_stream)
555 			fpo_stream_status = dc_state_get_stream_status(context, fpo_candidate_stream);
556 	}
557 
558 	/* In DCN32/321, FPO uses per-pipe P-State force.
559 	 * If there's no planes, HUBP is power gated and
560 	 * therefore programming UCLK_PSTATE_FORCE does
561 	 * nothing (P-State will always be asserted naturally
562 	 * on a pipe that has HUBP power gated. Therefore we
563 	 * only want to enable FPO if the FPO pipe has both
564 	 * a stream and a plane.
565 	 */
566 	if (!fpo_candidate_stream || !fpo_stream_status || fpo_stream_status->plane_count == 0)
567 		return NULL;
568 
569 	if (fpo_candidate_stream->sink->edid_caps.panel_patch.disable_fams)
570 		return NULL;
571 
572 	refresh_rate = get_refresh_rate(fpo_candidate_stream);
573 	if (refresh_rate < minimum_refreshrate_supported)
574 		return NULL;
575 
576 	fpo_vactive_margin_us = is_fpo_vactive ? dc->debug.fpo_vactive_margin_us : 0; // For now hardcode the FPO + Vactive stretch margin to be 2000us
577 	if (!is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(fpo_candidate_stream, fpo_vactive_margin_us, refresh_rate))
578 		return NULL;
579 
580 	if (!fpo_candidate_stream->allow_freesync)
581 		return NULL;
582 
583 	if (fpo_candidate_stream->vrr_active_variable &&
584 	((dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE) ||
585 	(context->stream_count > 1 && !(dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_ENABLE))))
586 		return NULL;
587 
588 	return fpo_candidate_stream;
589 }
590 
dcn32_check_native_scaling_for_res(struct pipe_ctx * pipe,unsigned int width,unsigned int height)591 bool dcn32_check_native_scaling_for_res(struct pipe_ctx *pipe, unsigned int width, unsigned int height)
592 {
593 	bool is_native_scaling = false;
594 
595 	if (pipe->stream->timing.h_addressable == width &&
596 			pipe->stream->timing.v_addressable == height &&
597 			pipe->plane_state->src_rect.width == width &&
598 			pipe->plane_state->src_rect.height == height &&
599 			pipe->plane_state->dst_rect.width == width &&
600 			pipe->plane_state->dst_rect.height == height)
601 		is_native_scaling = true;
602 
603 	return is_native_scaling;
604 }
605 
606 /**
607  * disallow_subvp_in_active_plus_blank() - Function to determine disallowed subvp + drr/vblank configs
608  *
609  * @pipe: subvp pipe to be used for the subvp + drr/vblank config
610  *
611  * Since subvp is being enabled on more configs (such as 1080p60), we want
612  * to explicitly block any configs that we don't want to enable. We do not
613  * want to enable any 1080p60 (SubVP) + drr / vblank configs since these
614  * are already convered by FPO.
615  *
616  * Return: True if disallowed, false otherwise
617  */
disallow_subvp_in_active_plus_blank(struct pipe_ctx * pipe)618 static bool disallow_subvp_in_active_plus_blank(struct pipe_ctx *pipe)
619 {
620 	bool disallow = false;
621 
622 	if (resource_is_pipe_type(pipe, OPP_HEAD) &&
623 			resource_is_pipe_type(pipe, DPP_PIPE)) {
624 		if (pipe->stream->timing.v_addressable == 1080 && pipe->stream->timing.h_addressable == 1920)
625 			disallow = true;
626 	}
627 	return disallow;
628 }
629 
630 /**
631  * dcn32_subvp_drr_admissable() - Determine if SubVP + DRR config is admissible
632  *
633  * @dc: Current DC state
634  * @context: New DC state to be programmed
635  *
636  * SubVP + DRR is admissible under the following conditions:
637  * - Config must have 2 displays (i.e., 2 non-phantom master pipes)
638  * - One display is SubVP
639  * - Other display must have Freesync enabled
640  * - The potential DRR display must not be PSR capable
641  *
642  * Return: True if admissible, false otherwise
643  */
dcn32_subvp_drr_admissable(struct dc * dc,struct dc_state * context)644 bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
645 {
646 	bool result = false;
647 	uint32_t i;
648 	uint8_t subvp_count = 0;
649 	uint8_t non_subvp_pipes = 0;
650 	bool drr_pipe_found = false;
651 	bool drr_psr_capable = false;
652 	uint64_t refresh_rate = 0;
653 	bool subvp_disallow = false;
654 
655 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
656 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
657 		enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
658 
659 		if (resource_is_pipe_type(pipe, OPP_HEAD) &&
660 				resource_is_pipe_type(pipe, DPP_PIPE)) {
661 			if (pipe_mall_type == SUBVP_MAIN) {
662 				subvp_count++;
663 
664 				subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
665 				refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
666 					pipe->stream->timing.v_total * (unsigned long long)pipe->stream->timing.h_total - (uint64_t)1);
667 				refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
668 				refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
669 			}
670 			if (pipe_mall_type == SUBVP_NONE) {
671 				non_subvp_pipes++;
672 				drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe));
673 				if (pipe->stream->ignore_msa_timing_param &&
674 						(pipe->stream->allow_freesync || pipe->stream->vrr_active_variable || pipe->stream->vrr_active_fixed)) {
675 					drr_pipe_found = true;
676 				}
677 			}
678 		}
679 	}
680 
681 	if (subvp_count == 1 && !subvp_disallow && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable &&
682 		((uint32_t)refresh_rate < 120))
683 		result = true;
684 
685 	return result;
686 }
687 
688 /**
689  * dcn32_subvp_vblank_admissable() - Determine if SubVP + Vblank config is admissible
690  *
691  * @dc: Current DC state
692  * @context: New DC state to be programmed
693  * @vlevel: Voltage level calculated by DML
694  *
695  * SubVP + Vblank is admissible under the following conditions:
696  * - Config must have 2 displays (i.e., 2 non-phantom master pipes)
697  * - One display is SubVP
698  * - Other display must not have Freesync capability
699  * - DML must have output DRAM clock change support as SubVP + Vblank
700  * - The potential vblank display must not be PSR capable
701  *
702  * Return: True if admissible, false otherwise
703  */
dcn32_subvp_vblank_admissable(struct dc * dc,struct dc_state * context,int vlevel)704 bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int vlevel)
705 {
706 	bool result = false;
707 	uint32_t i;
708 	uint8_t subvp_count = 0;
709 	uint8_t non_subvp_pipes = 0;
710 	bool drr_pipe_found = false;
711 	struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
712 	bool vblank_psr_capable = false;
713 	uint64_t refresh_rate = 0;
714 	bool subvp_disallow = false;
715 
716 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
717 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
718 		enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
719 
720 		if (resource_is_pipe_type(pipe, OPP_HEAD) &&
721 				resource_is_pipe_type(pipe, DPP_PIPE)) {
722 			if (pipe_mall_type == SUBVP_MAIN) {
723 				subvp_count++;
724 
725 				subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
726 				refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
727 					pipe->stream->timing.v_total * (unsigned long long)pipe->stream->timing.h_total - (uint64_t)1);
728 				refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
729 				refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
730 			}
731 			if (pipe_mall_type == SUBVP_NONE) {
732 				non_subvp_pipes++;
733 				vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe));
734 				if (pipe->stream->ignore_msa_timing_param &&
735 						(pipe->stream->allow_freesync || pipe->stream->vrr_active_variable || pipe->stream->vrr_active_fixed)) {
736 					drr_pipe_found = true;
737 				}
738 			}
739 		}
740 	}
741 
742 	if (subvp_count == 1 && non_subvp_pipes == 1 && !drr_pipe_found && !vblank_psr_capable &&
743 		((uint32_t)refresh_rate < 120) && !subvp_disallow &&
744 		vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp)
745 		result = true;
746 
747 	return result;
748 }
749 
dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc * dc,struct dc_state * context,display_e2e_pipe_params_st * pipes)750 void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context,
751 		display_e2e_pipe_params_st *pipes)
752 {
753 	int i, pipe_cnt;
754 	struct resource_context *res_ctx = &context->res_ctx;
755 	struct pipe_ctx *pipe = NULL;
756 
757 	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
758 		int odm_slice_count = 0;
759 
760 		if (!res_ctx->pipe_ctx[i].stream)
761 			continue;
762 		pipe = &res_ctx->pipe_ctx[i];
763 		odm_slice_count = resource_get_odm_slice_count(pipe);
764 
765 		if (odm_slice_count == 1)
766 			pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
767 		else if (odm_slice_count == 2)
768 			pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
769 		else if (odm_slice_count == 4)
770 			pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1;
771 
772 		pipe_cnt++;
773 	}
774 }
775 
dcn32_override_min_req_dcfclk(struct dc * dc,struct dc_state * context)776 void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context)
777 {
778 	if (dcn32_subvp_in_use(dc, context) && context->bw_ctx.bw.dcn.clk.dcfclk_khz <= MIN_SUBVP_DCFCLK_KHZ)
779 		context->bw_ctx.bw.dcn.clk.dcfclk_khz = MIN_SUBVP_DCFCLK_KHZ;
780 }
781