xref: /dragonfly/sys/dev/drm/amd/display/dc/core/dc.c (revision 7d3e9a5b)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24 
25 #include "dm_services.h"
26 
27 #include "dc.h"
28 
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32 #include "dce/dce_hwseq.h"
33 
34 #include "resource.h"
35 
36 #include "clock_source.h"
37 #include "dc_bios_types.h"
38 
39 #include "bios_parser_interface.h"
40 #include "include/irq_service_interface.h"
41 #include "transform.h"
42 #include "dmcu.h"
43 #include "dpp.h"
44 #include "timing_generator.h"
45 #include "abm.h"
46 #include "virtual/virtual_link_encoder.h"
47 
48 #include "link_hwss.h"
49 #include "link_encoder.h"
50 
51 #include "dc_link_ddc.h"
52 #include "dm_helpers.h"
53 #include "mem_input.h"
54 #include "hubp.h"
55 
56 #include "dc_link_dp.h"
57 #define DC_LOGGER \
58 	dc->ctx->logger
59 
60 
61 /*******************************************************************************
62  * Private functions
63  ******************************************************************************/
64 
65 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
66 {
67 	if (new > *original)
68 		*original = new;
69 }
70 
71 static void destroy_links(struct dc *dc)
72 {
73 	uint32_t i;
74 
75 	for (i = 0; i < dc->link_count; i++) {
76 		if (NULL != dc->links[i])
77 			link_destroy(&dc->links[i]);
78 	}
79 }
80 
81 static bool create_links(
82 		struct dc *dc,
83 		uint32_t num_virtual_links)
84 {
85 	int i;
86 	int connectors_num;
87 	struct dc_bios *bios = dc->ctx->dc_bios;
88 
89 	dc->link_count = 0;
90 
91 	connectors_num = bios->funcs->get_connectors_number(bios);
92 
93 	if (connectors_num > ENUM_ID_COUNT) {
94 		dm_error(
95 			"DC: Number of connectors %d exceeds maximum of %d!\n",
96 			connectors_num,
97 			ENUM_ID_COUNT);
98 		return false;
99 	}
100 
101 	if (connectors_num == 0 && num_virtual_links == 0) {
102 		dm_error("DC: Number of connectors is zero!\n");
103 	}
104 
105 	dm_output_to_console(
106 		"DC: %s: connectors_num: physical:%d, virtual:%d\n",
107 		__func__,
108 		connectors_num,
109 		num_virtual_links);
110 
111 	for (i = 0; i < connectors_num; i++) {
112 		struct link_init_data link_init_params = {0};
113 		struct dc_link *link;
114 
115 		link_init_params.ctx = dc->ctx;
116 		/* next BIOS object table connector */
117 		link_init_params.connector_index = i;
118 		link_init_params.link_index = dc->link_count;
119 		link_init_params.dc = dc;
120 		link = link_create(&link_init_params);
121 
122 		if (link) {
123 			dc->links[dc->link_count] = link;
124 			link->dc = dc;
125 			++dc->link_count;
126 		}
127 	}
128 
129 	for (i = 0; i < num_virtual_links; i++) {
130 		struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
131 		struct encoder_init_data enc_init = {0};
132 
133 		if (link == NULL) {
134 			BREAK_TO_DEBUGGER();
135 			goto failed_alloc;
136 		}
137 
138 		link->link_index = dc->link_count;
139 		dc->links[dc->link_count] = link;
140 		dc->link_count++;
141 
142 		link->ctx = dc->ctx;
143 		link->dc = dc;
144 		link->connector_signal = SIGNAL_TYPE_VIRTUAL;
145 		link->link_id.type = OBJECT_TYPE_CONNECTOR;
146 		link->link_id.id = CONNECTOR_ID_VIRTUAL;
147 		link->link_id.enum_id = ENUM_ID_1;
148 		link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
149 
150 		if (!link->link_enc) {
151 			BREAK_TO_DEBUGGER();
152 			goto failed_alloc;
153 		}
154 
155 		link->link_status.dpcd_caps = &link->dpcd_caps;
156 
157 		enc_init.ctx = dc->ctx;
158 		enc_init.channel = CHANNEL_ID_UNKNOWN;
159 		enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
160 		enc_init.transmitter = TRANSMITTER_UNKNOWN;
161 		enc_init.connector = link->link_id;
162 		enc_init.encoder.type = OBJECT_TYPE_ENCODER;
163 		enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
164 		enc_init.encoder.enum_id = ENUM_ID_1;
165 		virtual_link_encoder_construct(link->link_enc, &enc_init);
166 	}
167 
168 	return true;
169 
170 failed_alloc:
171 	return false;
172 }
173 
174 /**
175  *****************************************************************************
176  *  Function: dc_stream_adjust_vmin_vmax
177  *
178  *  @brief
179  *     Looks up the pipe context of dc_stream_state and updates the
180  *     vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
181  *     Rate, which is a power-saving feature that targets reducing panel
182  *     refresh rate while the screen is static
183  *
184  *  @param [in] dc: dc reference
185  *  @param [in] stream: Initial dc stream state
186  *  @param [in] adjust: Updated parameters for vertical_total_min and
187  *  vertical_total_max
188  *****************************************************************************
189  */
190 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
191 		struct dc_stream_state **streams, int num_streams,
192 		int vmin, int vmax)
193 {
194 	/* TODO: Support multiple streams */
195 	struct dc_stream_state *stream = streams[0];
196 	int i = 0;
197 	bool ret = false;
198 
199 	for (i = 0; i < MAX_PIPES; i++) {
200 		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
201 
202 		if (pipe->stream == stream && pipe->stream_res.stream_enc) {
203 			dc->hwss.set_drr(&pipe, 1, vmin, vmax);
204 
205 			/* build and update the info frame */
206 			resource_build_info_frame(pipe);
207 			dc->hwss.update_info_frame(pipe);
208 
209 			ret = true;
210 		}
211 	}
212 	return ret;
213 }
214 
215 bool dc_stream_get_crtc_position(struct dc *dc,
216 		struct dc_stream_state **streams, int num_streams,
217 		unsigned int *v_pos, unsigned int *nom_v_pos)
218 {
219 	/* TODO: Support multiple streams */
220 	struct dc_stream_state *stream = streams[0];
221 	int i = 0;
222 	bool ret = false;
223 	struct crtc_position position;
224 
225 	for (i = 0; i < MAX_PIPES; i++) {
226 		struct pipe_ctx *pipe =
227 				&dc->current_state->res_ctx.pipe_ctx[i];
228 
229 		if (pipe->stream == stream && pipe->stream_res.stream_enc) {
230 			dc->hwss.get_position(&pipe, 1, &position);
231 
232 			*v_pos = position.vertical_count;
233 			*nom_v_pos = position.nominal_vcount;
234 			ret = true;
235 		}
236 	}
237 	return ret;
238 }
239 
240 /**
241  * dc_stream_configure_crc: Configure CRC capture for the given stream.
242  * @dc: DC Object
243  * @stream: The stream to configure CRC on.
244  * @enable: Enable CRC if true, disable otherwise.
245  * @continuous: Capture CRC on every frame if true. Otherwise, only capture
246  *              once.
247  *
248  * By default, only CRC0 is configured, and the entire frame is used to
249  * calculate the crc.
250  */
251 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
252 			     bool enable, bool continuous)
253 {
254 	int i;
255 	struct pipe_ctx *pipe;
256 	struct crc_params param;
257 	struct timing_generator *tg;
258 
259 	for (i = 0; i < MAX_PIPES; i++) {
260 		pipe = &dc->current_state->res_ctx.pipe_ctx[i];
261 		if (pipe->stream == stream)
262 			break;
263 	}
264 	/* Stream not found */
265 	if (i == MAX_PIPES)
266 		return false;
267 
268 	/* Always capture the full frame */
269 	param.windowa_x_start = 0;
270 	param.windowa_y_start = 0;
271 	param.windowa_x_end = pipe->stream->timing.h_addressable;
272 	param.windowa_y_end = pipe->stream->timing.v_addressable;
273 	param.windowb_x_start = 0;
274 	param.windowb_y_start = 0;
275 	param.windowb_x_end = pipe->stream->timing.h_addressable;
276 	param.windowb_y_end = pipe->stream->timing.v_addressable;
277 
278 	/* Default to the union of both windows */
279 	param.selection = UNION_WINDOW_A_B;
280 	param.continuous_mode = continuous;
281 	param.enable = enable;
282 
283 	tg = pipe->stream_res.tg;
284 
285 	/* Only call if supported */
286 	if (tg->funcs->configure_crc)
287 		return tg->funcs->configure_crc(tg, &param);
288 	DC_LOG_WARNING("CRC capture not supported.");
289 	return false;
290 }
291 
292 /**
293  * dc_stream_get_crc: Get CRC values for the given stream.
294  * @dc: DC object
295  * @stream: The DC stream state of the stream to get CRCs from.
296  * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
297  *
298  * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
299  * Return false if stream is not found, or if CRCs are not enabled.
300  */
301 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
302 		       uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
303 {
304 	int i;
305 	struct pipe_ctx *pipe;
306 	struct timing_generator *tg;
307 
308 	for (i = 0; i < MAX_PIPES; i++) {
309 		pipe = &dc->current_state->res_ctx.pipe_ctx[i];
310 		if (pipe->stream == stream)
311 			break;
312 	}
313 	/* Stream not found */
314 	if (i == MAX_PIPES)
315 		return false;
316 
317 	tg = pipe->stream_res.tg;
318 
319 	if (tg->funcs->get_crc)
320 		return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
321 	DC_LOG_WARNING("CRC capture not supported.");
322 	return false;
323 }
324 
325 void dc_stream_set_dither_option(struct dc_stream_state *stream,
326 		enum dc_dither_option option)
327 {
328 	struct bit_depth_reduction_params params;
329 	struct dc_link *link = stream->status.link;
330 	struct pipe_ctx *pipes = NULL;
331 	int i;
332 
333 	for (i = 0; i < MAX_PIPES; i++) {
334 		if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
335 				stream) {
336 			pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
337 			break;
338 		}
339 	}
340 
341 	if (!pipes)
342 		return;
343 	if (option > DITHER_OPTION_MAX)
344 		return;
345 
346 	stream->dither_option = option;
347 
348 	memset(&params, 0, sizeof(params));
349 	resource_build_bit_depth_reduction_params(stream, &params);
350 	stream->bit_depth_params = params;
351 
352 	if (pipes->plane_res.xfm &&
353 	    pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
354 		pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
355 			pipes->plane_res.xfm,
356 			pipes->plane_res.scl_data.lb_params.depth,
357 			&stream->bit_depth_params);
358 	}
359 
360 	pipes->stream_res.opp->funcs->
361 		opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
362 }
363 
364 void dc_stream_set_static_screen_events(struct dc *dc,
365 		struct dc_stream_state **streams,
366 		int num_streams,
367 		const struct dc_static_screen_events *events)
368 {
369 	int i = 0;
370 	int j = 0;
371 	struct pipe_ctx *pipes_affected[MAX_PIPES];
372 	int num_pipes_affected = 0;
373 
374 	for (i = 0; i < num_streams; i++) {
375 		struct dc_stream_state *stream = streams[i];
376 
377 		for (j = 0; j < MAX_PIPES; j++) {
378 			if (dc->current_state->res_ctx.pipe_ctx[j].stream
379 					== stream) {
380 				pipes_affected[num_pipes_affected++] =
381 						&dc->current_state->res_ctx.pipe_ctx[j];
382 			}
383 		}
384 	}
385 
386 	dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
387 }
388 
389 void dc_link_set_drive_settings(struct dc *dc,
390 				struct link_training_settings *lt_settings,
391 				const struct dc_link *link)
392 {
393 
394 	int i;
395 
396 	for (i = 0; i < dc->link_count; i++) {
397 		if (dc->links[i] == link)
398 			break;
399 	}
400 
401 	if (i >= dc->link_count)
402 		ASSERT_CRITICAL(false);
403 
404 	dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
405 }
406 
407 void dc_link_perform_link_training(struct dc *dc,
408 				   struct dc_link_settings *link_setting,
409 				   bool skip_video_pattern)
410 {
411 	int i;
412 
413 	for (i = 0; i < dc->link_count; i++)
414 		dc_link_dp_perform_link_training(
415 			dc->links[i],
416 			link_setting,
417 			skip_video_pattern);
418 }
419 
420 void dc_link_set_preferred_link_settings(struct dc *dc,
421 					 struct dc_link_settings *link_setting,
422 					 struct dc_link *link)
423 {
424 	struct dc_link_settings store_settings = *link_setting;
425 	struct dc_stream_state *link_stream =
426 		link->dc->current_state->res_ctx.pipe_ctx[0].stream;
427 
428 	link->preferred_link_setting = store_settings;
429 	if (link_stream)
430 		decide_link_settings(link_stream, &store_settings);
431 
432 	if ((store_settings.lane_count != LANE_COUNT_UNKNOWN) &&
433 		(store_settings.link_rate != LINK_RATE_UNKNOWN))
434 		dp_retrain_link_dp_test(link, &store_settings, false);
435 }
436 
437 void dc_link_enable_hpd(const struct dc_link *link)
438 {
439 	dc_link_dp_enable_hpd(link);
440 }
441 
442 void dc_link_disable_hpd(const struct dc_link *link)
443 {
444 	dc_link_dp_disable_hpd(link);
445 }
446 
447 
448 void dc_link_set_test_pattern(struct dc_link *link,
449 			      enum dp_test_pattern test_pattern,
450 			      const struct link_training_settings *p_link_settings,
451 			      const unsigned char *p_custom_pattern,
452 			      unsigned int cust_pattern_size)
453 {
454 	if (link != NULL)
455 		dc_link_dp_set_test_pattern(
456 			link,
457 			test_pattern,
458 			p_link_settings,
459 			p_custom_pattern,
460 			cust_pattern_size);
461 }
462 
463 static void destruct(struct dc *dc)
464 {
465 	if (dc->current_state) {
466 		dc_release_state(dc->current_state);
467 		dc->current_state = NULL;
468 	}
469 
470 	destroy_links(dc);
471 
472 	dc_destroy_resource_pool(dc);
473 
474 	if (dc->ctx->gpio_service)
475 		dal_gpio_service_destroy(&dc->ctx->gpio_service);
476 
477 	if (dc->ctx->i2caux)
478 		dal_i2caux_destroy(&dc->ctx->i2caux);
479 
480 	if (dc->ctx->created_bios)
481 		dal_bios_parser_destroy(&dc->ctx->dc_bios);
482 
483 	kfree(dc->ctx);
484 	dc->ctx = NULL;
485 
486 	kfree(dc->bw_vbios);
487 	dc->bw_vbios = NULL;
488 
489 	kfree(dc->bw_dceip);
490 	dc->bw_dceip = NULL;
491 
492 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
493 	kfree(dc->dcn_soc);
494 	dc->dcn_soc = NULL;
495 
496 	kfree(dc->dcn_ip);
497 	dc->dcn_ip = NULL;
498 
499 #endif
500 }
501 
502 static bool construct(struct dc *dc,
503 		const struct dc_init_data *init_params)
504 {
505 	struct dc_context *dc_ctx;
506 	struct bw_calcs_dceip *dc_dceip;
507 	struct bw_calcs_vbios *dc_vbios;
508 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
509 	struct dcn_soc_bounding_box *dcn_soc;
510 	struct dcn_ip_params *dcn_ip;
511 #endif
512 
513 	enum dce_version dc_version = DCE_VERSION_UNKNOWN;
514 
515 	dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
516 	if (!dc_dceip) {
517 		dm_error("%s: failed to create dceip\n", __func__);
518 		goto fail;
519 	}
520 
521 	dc->bw_dceip = dc_dceip;
522 
523 	dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
524 	if (!dc_vbios) {
525 		dm_error("%s: failed to create vbios\n", __func__);
526 		goto fail;
527 	}
528 
529 	dc->bw_vbios = dc_vbios;
530 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
531 	dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
532 	if (!dcn_soc) {
533 		dm_error("%s: failed to create dcn_soc\n", __func__);
534 		goto fail;
535 	}
536 
537 	dc->dcn_soc = dcn_soc;
538 
539 	dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
540 	if (!dcn_ip) {
541 		dm_error("%s: failed to create dcn_ip\n", __func__);
542 		goto fail;
543 	}
544 
545 	dc->dcn_ip = dcn_ip;
546 #endif
547 
548 	dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
549 	if (!dc_ctx) {
550 		dm_error("%s: failed to create ctx\n", __func__);
551 		goto fail;
552 	}
553 
554 	dc_ctx->cgs_device = init_params->cgs_device;
555 	dc_ctx->driver_context = init_params->driver;
556 	dc_ctx->dc = dc;
557 	dc_ctx->asic_id = init_params->asic_id;
558 	dc_ctx->dc_sink_id_count = 0;
559 	dc->ctx = dc_ctx;
560 
561 	dc->current_state = dc_create_state();
562 
563 	if (!dc->current_state) {
564 		dm_error("%s: failed to create validate ctx\n", __func__);
565 		goto fail;
566 	}
567 
568 	/* Create logger */
569 
570 	dc_ctx->dce_environment = init_params->dce_environment;
571 
572 	dc_version = resource_parse_asic_id(init_params->asic_id);
573 	dc_ctx->dce_version = dc_version;
574 
575 	/* Resource should construct all asic specific resources.
576 	 * This should be the only place where we need to parse the asic id
577 	 */
578 	if (init_params->vbios_override)
579 		dc_ctx->dc_bios = init_params->vbios_override;
580 	else {
581 		/* Create BIOS parser */
582 		struct bp_init_data bp_init_data;
583 
584 		bp_init_data.ctx = dc_ctx;
585 		bp_init_data.bios = init_params->asic_id.atombios_base_address;
586 
587 		dc_ctx->dc_bios = dal_bios_parser_create(
588 				&bp_init_data, dc_version);
589 
590 		if (!dc_ctx->dc_bios) {
591 			ASSERT_CRITICAL(false);
592 			goto fail;
593 		}
594 
595 		dc_ctx->created_bios = true;
596 		}
597 
598 	/* Create I2C AUX */
599 	dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
600 
601 	if (!dc_ctx->i2caux) {
602 		ASSERT_CRITICAL(false);
603 		goto fail;
604 	}
605 
606 	/* Create GPIO service */
607 	dc_ctx->gpio_service = dal_gpio_service_create(
608 			dc_version,
609 			dc_ctx->dce_environment,
610 			dc_ctx);
611 
612 	if (!dc_ctx->gpio_service) {
613 		ASSERT_CRITICAL(false);
614 		goto fail;
615 	}
616 
617 	dc->res_pool = dc_create_resource_pool(
618 			dc,
619 			init_params->num_virtual_links,
620 			dc_version,
621 			init_params->asic_id);
622 	if (!dc->res_pool)
623 		goto fail;
624 
625 	dc_resource_state_construct(dc, dc->current_state);
626 
627 	if (!create_links(dc, init_params->num_virtual_links))
628 		goto fail;
629 
630 	return true;
631 
632 fail:
633 
634 	destruct(dc);
635 	return false;
636 }
637 
638 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
639 {
640 	int i, j;
641 	struct dc_state *dangling_context = dc_create_state();
642 	struct dc_state *current_ctx;
643 
644 	if (dangling_context == NULL)
645 		return;
646 
647 	dc_resource_state_copy_construct(dc->current_state, dangling_context);
648 
649 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
650 		struct dc_stream_state *old_stream =
651 				dc->current_state->res_ctx.pipe_ctx[i].stream;
652 		bool should_disable = true;
653 
654 		for (j = 0; j < context->stream_count; j++) {
655 			if (old_stream == context->streams[j]) {
656 				should_disable = false;
657 				break;
658 			}
659 		}
660 		if (should_disable && old_stream) {
661 			dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
662 			dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
663 		}
664 	}
665 
666 	current_ctx = dc->current_state;
667 	dc->current_state = dangling_context;
668 	dc_release_state(current_ctx);
669 }
670 
671 /*******************************************************************************
672  * Public functions
673  ******************************************************************************/
674 
675 struct dc *dc_create(const struct dc_init_data *init_params)
676 {
677 	struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
678 	unsigned int full_pipe_count;
679 
680 	if (NULL == dc)
681 		goto alloc_fail;
682 
683 	if (false == construct(dc, init_params))
684 		goto construct_fail;
685 
686 	/*TODO: separate HW and SW initialization*/
687 	dc->hwss.init_hw(dc);
688 
689 	full_pipe_count = dc->res_pool->pipe_count;
690 	if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
691 		full_pipe_count--;
692 	dc->caps.max_streams = min(
693 			full_pipe_count,
694 			dc->res_pool->stream_enc_count);
695 
696 	dc->caps.max_links = dc->link_count;
697 	dc->caps.max_audios = dc->res_pool->audio_count;
698 	dc->caps.linear_pitch_alignment = 64;
699 
700 	/* Populate versioning information */
701 	dc->versions.dc_ver = DC_VER;
702 
703 	if (dc->res_pool->dmcu != NULL)
704 		dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
705 
706 	dc->config = init_params->flags;
707 
708 	DC_LOG_DC("Display Core initialized\n");
709 
710 
711 	/* TODO: missing feature to be enabled */
712 	dc->debug.disable_dfs_bypass = true;
713 
714 	return dc;
715 
716 construct_fail:
717 	kfree(dc);
718 
719 alloc_fail:
720 	return NULL;
721 }
722 
723 void dc_destroy(struct dc **dc)
724 {
725 	destruct(*dc);
726 	kfree(*dc);
727 	*dc = NULL;
728 }
729 
730 static void enable_timing_multisync(
731 		struct dc *dc,
732 		struct dc_state *ctx)
733 {
734 	int i = 0, multisync_count = 0;
735 	int pipe_count = dc->res_pool->pipe_count;
736 	struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
737 
738 	for (i = 0; i < pipe_count; i++) {
739 		if (!ctx->res_ctx.pipe_ctx[i].stream ||
740 				!ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
741 			continue;
742 		if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
743 			continue;
744 		multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
745 		multisync_count++;
746 	}
747 
748 	if (multisync_count > 0) {
749 		dc->hwss.enable_per_frame_crtc_position_reset(
750 			dc, multisync_count, multisync_pipes);
751 	}
752 }
753 
754 static void program_timing_sync(
755 		struct dc *dc,
756 		struct dc_state *ctx)
757 {
758 	int i, j;
759 	int group_index = 0;
760 	int pipe_count = dc->res_pool->pipe_count;
761 	struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
762 
763 	for (i = 0; i < pipe_count; i++) {
764 		if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
765 			continue;
766 
767 		unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
768 	}
769 
770 	for (i = 0; i < pipe_count; i++) {
771 		int group_size = 1;
772 		struct pipe_ctx *pipe_set[MAX_PIPES];
773 
774 		if (!unsynced_pipes[i])
775 			continue;
776 
777 		pipe_set[0] = unsynced_pipes[i];
778 		unsynced_pipes[i] = NULL;
779 
780 		/* Add tg to the set, search rest of the tg's for ones with
781 		 * same timing, add all tgs with same timing to the group
782 		 */
783 		for (j = i + 1; j < pipe_count; j++) {
784 			if (!unsynced_pipes[j])
785 				continue;
786 
787 			if (resource_are_streams_timing_synchronizable(
788 					unsynced_pipes[j]->stream,
789 					pipe_set[0]->stream)) {
790 				pipe_set[group_size] = unsynced_pipes[j];
791 				unsynced_pipes[j] = NULL;
792 				group_size++;
793 			}
794 		}
795 
796 		/* set first unblanked pipe as master */
797 		for (j = 0; j < group_size; j++) {
798 			struct pipe_ctx *temp;
799 
800 			if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
801 				if (j == 0)
802 					break;
803 
804 				temp = pipe_set[0];
805 				pipe_set[0] = pipe_set[j];
806 				pipe_set[j] = temp;
807 				break;
808 			}
809 		}
810 
811 		/* remove any other unblanked pipes as they have already been synced */
812 		for (j = j + 1; j < group_size; j++) {
813 			if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
814 				group_size--;
815 				pipe_set[j] = pipe_set[group_size];
816 				j--;
817 			}
818 		}
819 
820 		if (group_size > 1) {
821 			dc->hwss.enable_timing_synchronization(
822 				dc, group_index, group_size, pipe_set);
823 			group_index++;
824 		}
825 	}
826 }
827 
828 static bool context_changed(
829 		struct dc *dc,
830 		struct dc_state *context)
831 {
832 	uint8_t i;
833 
834 	if (context->stream_count != dc->current_state->stream_count)
835 		return true;
836 
837 	for (i = 0; i < dc->current_state->stream_count; i++) {
838 		if (dc->current_state->streams[i] != context->streams[i])
839 			return true;
840 	}
841 
842 	return false;
843 }
844 
845 bool dc_enable_stereo(
846 	struct dc *dc,
847 	struct dc_state *context,
848 	struct dc_stream_state *streams[],
849 	uint8_t stream_count)
850 {
851 	bool ret = true;
852 	int i, j;
853 	struct pipe_ctx *pipe;
854 
855 	for (i = 0; i < MAX_PIPES; i++) {
856 		if (context != NULL)
857 			pipe = &context->res_ctx.pipe_ctx[i];
858 		else
859 			pipe = &dc->current_state->res_ctx.pipe_ctx[i];
860 		for (j = 0 ; pipe && j < stream_count; j++)  {
861 			if (streams[j] && streams[j] == pipe->stream &&
862 				dc->hwss.setup_stereo)
863 				dc->hwss.setup_stereo(pipe, dc);
864 		}
865 	}
866 
867 	return ret;
868 }
869 
870 /*
871  * Applies given context to HW and copy it into current context.
872  * It's up to the user to release the src context afterwards.
873  */
874 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
875 {
876 	struct dc_bios *dcb = dc->ctx->dc_bios;
877 	enum dc_status result = DC_ERROR_UNEXPECTED;
878 	struct pipe_ctx *pipe;
879 	int i, k, l;
880 	struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
881 
882 	disable_dangling_plane(dc, context);
883 
884 	for (i = 0; i < context->stream_count; i++)
885 		dc_streams[i] =  context->streams[i];
886 
887 	if (!dcb->funcs->is_accelerated_mode(dcb))
888 		dc->hwss.enable_accelerated_mode(dc, context);
889 
890 	dc->hwss.set_bandwidth(dc, context, false);
891 
892 	/* re-program planes for existing stream, in case we need to
893 	 * free up plane resource for later use
894 	 */
895 	for (i = 0; i < context->stream_count; i++) {
896 		if (context->streams[i]->mode_changed)
897 			continue;
898 
899 		dc->hwss.apply_ctx_for_surface(
900 			dc, context->streams[i],
901 			context->stream_status[i].plane_count,
902 			context); /* use new pipe config in new context */
903 	}
904 
905 	/* Program hardware */
906 	dc->hwss.ready_shared_resources(dc, context);
907 
908 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
909 		pipe = &context->res_ctx.pipe_ctx[i];
910 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
911 	}
912 
913 	result = dc->hwss.apply_ctx_to_hw(dc, context);
914 
915 	if (result != DC_OK)
916 		return result;
917 
918 	if (context->stream_count > 1) {
919 		enable_timing_multisync(dc, context);
920 		program_timing_sync(dc, context);
921 	}
922 
923 	/* Program all planes within new context*/
924 	for (i = 0; i < context->stream_count; i++) {
925 		const struct dc_sink *sink = context->streams[i]->sink;
926 
927 		if (!context->streams[i]->mode_changed)
928 			continue;
929 
930 		dc->hwss.apply_ctx_for_surface(
931 				dc, context->streams[i],
932 				context->stream_status[i].plane_count,
933 				context);
934 
935 		/*
936 		 * enable stereo
937 		 * TODO rework dc_enable_stereo call to work with validation sets?
938 		 */
939 		for (k = 0; k < MAX_PIPES; k++) {
940 			pipe = &context->res_ctx.pipe_ctx[k];
941 
942 			for (l = 0 ; pipe && l < context->stream_count; l++)  {
943 				if (context->streams[l] &&
944 					context->streams[l] == pipe->stream &&
945 					dc->hwss.setup_stereo)
946 					dc->hwss.setup_stereo(pipe, dc);
947 			}
948 		}
949 
950 		CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
951 				context->streams[i]->timing.h_addressable,
952 				context->streams[i]->timing.v_addressable,
953 				context->streams[i]->timing.h_total,
954 				context->streams[i]->timing.v_total,
955 				context->streams[i]->timing.pix_clk_khz);
956 	}
957 
958 	dc_enable_stereo(dc, context, dc_streams, context->stream_count);
959 
960 	/* pplib is notified if disp_num changed */
961 	dc->hwss.set_bandwidth(dc, context, true);
962 
963 	for (i = 0; i < context->stream_count; i++)
964 		context->streams[i]->mode_changed = false;
965 
966 	dc_release_state(dc->current_state);
967 
968 	dc->current_state = context;
969 
970 	dc_retain_state(dc->current_state);
971 
972 	dc->hwss.optimize_shared_resources(dc);
973 
974 	return result;
975 }
976 
977 bool dc_commit_state(struct dc *dc, struct dc_state *context)
978 {
979 	enum dc_status result = DC_ERROR_UNEXPECTED;
980 	int i;
981 
982 	if (false == context_changed(dc, context))
983 		return DC_OK;
984 
985 	DC_LOG_DC("%s: %d streams\n",
986 				__func__, context->stream_count);
987 
988 	for (i = 0; i < context->stream_count; i++) {
989 		struct dc_stream_state *stream = context->streams[i];
990 
991 		dc_stream_log(dc, stream);
992 	}
993 
994 	result = dc_commit_state_no_check(dc, context);
995 
996 	return (result == DC_OK);
997 }
998 
999 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
1000 {
1001 	int i;
1002 	struct pipe_ctx *pipe;
1003 
1004 	for (i = 0; i < MAX_PIPES; i++) {
1005 		pipe = &context->res_ctx.pipe_ctx[i];
1006 
1007 		if (!pipe->plane_state)
1008 			continue;
1009 
1010 		/* Must set to false to start with, due to OR in update function */
1011 		pipe->plane_state->status.is_flip_pending = false;
1012 		dc->hwss.update_pending_status(pipe);
1013 		if (pipe->plane_state->status.is_flip_pending)
1014 			return true;
1015 	}
1016 	return false;
1017 }
1018 
1019 bool dc_post_update_surfaces_to_stream(struct dc *dc)
1020 {
1021 	int i;
1022 	struct dc_state *context = dc->current_state;
1023 
1024 	post_surface_trace(dc);
1025 
1026 	if (is_flip_pending_in_pipes(dc, context))
1027 		return true;
1028 
1029 	for (i = 0; i < dc->res_pool->pipe_count; i++)
1030 		if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1031 		    context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1032 			context->res_ctx.pipe_ctx[i].pipe_idx = i;
1033 			dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1034 		}
1035 
1036 	dc->optimized_required = false;
1037 
1038 	dc->hwss.set_bandwidth(dc, context, true);
1039 	return true;
1040 }
1041 
1042 struct dc_state *dc_create_state(void)
1043 {
1044 	struct dc_state *context = kzalloc(sizeof(struct dc_state),
1045 					   GFP_KERNEL);
1046 
1047 	if (!context)
1048 		return NULL;
1049 
1050 	kref_init(&context->refcount);
1051 	return context;
1052 }
1053 
1054 void dc_retain_state(struct dc_state *context)
1055 {
1056 	kref_get(&context->refcount);
1057 }
1058 
1059 static void dc_state_free(struct kref *kref)
1060 {
1061 	struct dc_state *context = container_of(kref, struct dc_state, refcount);
1062 	dc_resource_state_destruct(context);
1063 	kfree(context);
1064 }
1065 
1066 void dc_release_state(struct dc_state *context)
1067 {
1068 	kref_put(&context->refcount, dc_state_free);
1069 }
1070 
1071 static bool is_surface_in_context(
1072 		const struct dc_state *context,
1073 		const struct dc_plane_state *plane_state)
1074 {
1075 	int j;
1076 
1077 	for (j = 0; j < MAX_PIPES; j++) {
1078 		const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1079 
1080 		if (plane_state == pipe_ctx->plane_state) {
1081 			return true;
1082 		}
1083 	}
1084 
1085 	return false;
1086 }
1087 
1088 static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
1089 {
1090 	switch (format) {
1091 	case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
1092 	case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
1093 		return 12;
1094 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
1095 	case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
1096 	case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
1097 	case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
1098 		return 16;
1099 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
1100 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
1101 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
1102 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
1103 		return 32;
1104 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
1105 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
1106 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
1107 		return 64;
1108 	default:
1109 		ASSERT_CRITICAL(false);
1110 		return -1;
1111 	}
1112 }
1113 
1114 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
1115 {
1116 	union surface_update_flags *update_flags = &u->surface->update_flags;
1117 
1118 	if (!u->plane_info)
1119 		return UPDATE_TYPE_FAST;
1120 
1121 	if (u->plane_info->color_space != u->surface->color_space)
1122 		update_flags->bits.color_space_change = 1;
1123 
1124 	if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
1125 		update_flags->bits.horizontal_mirror_change = 1;
1126 
1127 	if (u->plane_info->rotation != u->surface->rotation)
1128 		update_flags->bits.rotation_change = 1;
1129 
1130 	if (u->plane_info->format != u->surface->format)
1131 		update_flags->bits.pixel_format_change = 1;
1132 
1133 	if (u->plane_info->stereo_format != u->surface->stereo_format)
1134 		update_flags->bits.stereo_format_change = 1;
1135 
1136 	if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha)
1137 		update_flags->bits.per_pixel_alpha_change = 1;
1138 
1139 	if (u->plane_info->dcc.enable != u->surface->dcc.enable
1140 			|| u->plane_info->dcc.grph.independent_64b_blks != u->surface->dcc.grph.independent_64b_blks
1141 			|| u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch)
1142 		update_flags->bits.dcc_change = 1;
1143 
1144 	if (pixel_format_to_bpp(u->plane_info->format) !=
1145 			pixel_format_to_bpp(u->surface->format))
1146 		/* different bytes per element will require full bandwidth
1147 		 * and DML calculation
1148 		 */
1149 		update_flags->bits.bpp_change = 1;
1150 
1151 	if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1152 			sizeof(union dc_tiling_info)) != 0) {
1153 		update_flags->bits.swizzle_change = 1;
1154 		/* todo: below are HW dependent, we should add a hook to
1155 		 * DCE/N resource and validated there.
1156 		 */
1157 		if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR)
1158 			/* swizzled mode requires RQ to be setup properly,
1159 			 * thus need to run DML to calculate RQ settings
1160 			 */
1161 			update_flags->bits.bandwidth_change = 1;
1162 	}
1163 
1164 	if (update_flags->bits.rotation_change
1165 			|| update_flags->bits.stereo_format_change
1166 			|| update_flags->bits.pixel_format_change
1167 			|| update_flags->bits.bpp_change
1168 			|| update_flags->bits.bandwidth_change
1169 			|| update_flags->bits.output_tf_change)
1170 		return UPDATE_TYPE_FULL;
1171 
1172 	return UPDATE_TYPE_MED;
1173 }
1174 
1175 static enum surface_update_type get_scaling_info_update_type(
1176 		const struct dc_surface_update *u)
1177 {
1178 	union surface_update_flags *update_flags = &u->surface->update_flags;
1179 
1180 	if (!u->scaling_info)
1181 		return UPDATE_TYPE_FAST;
1182 
1183 	if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1184 			|| u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1185 			|| u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1186 			|| u->scaling_info->dst_rect.height != u->surface->dst_rect.height) {
1187 		update_flags->bits.scaling_change = 1;
1188 
1189 		if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
1190 			|| u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
1191 				&& (u->scaling_info->dst_rect.width < u->surface->src_rect.width
1192 					|| u->scaling_info->dst_rect.height < u->surface->src_rect.height))
1193 			/* Making dst rect smaller requires a bandwidth change */
1194 			update_flags->bits.bandwidth_change = 1;
1195 	}
1196 
1197 	if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1198 		|| u->scaling_info->src_rect.height != u->surface->src_rect.height) {
1199 
1200 		update_flags->bits.scaling_change = 1;
1201 		if (u->scaling_info->src_rect.width > u->surface->src_rect.width
1202 				&& u->scaling_info->src_rect.height > u->surface->src_rect.height)
1203 			/* Making src rect bigger requires a bandwidth change */
1204 			update_flags->bits.clock_change = 1;
1205 	}
1206 
1207 	if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1208 			|| u->scaling_info->src_rect.y != u->surface->src_rect.y
1209 			|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1210 			|| u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1211 			|| u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1212 			|| u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1213 		update_flags->bits.position_change = 1;
1214 
1215 	if (update_flags->bits.clock_change
1216 			|| update_flags->bits.bandwidth_change)
1217 		return UPDATE_TYPE_FULL;
1218 
1219 	if (update_flags->bits.scaling_change
1220 			|| update_flags->bits.position_change)
1221 		return UPDATE_TYPE_MED;
1222 
1223 	return UPDATE_TYPE_FAST;
1224 }
1225 
1226 static enum surface_update_type det_surface_update(const struct dc *dc,
1227 		const struct dc_surface_update *u)
1228 {
1229 	const struct dc_state *context = dc->current_state;
1230 	enum surface_update_type type;
1231 	enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1232 	union surface_update_flags *update_flags = &u->surface->update_flags;
1233 
1234 	update_flags->raw = 0; // Reset all flags
1235 
1236 	if (!is_surface_in_context(context, u->surface)) {
1237 		update_flags->bits.new_plane = 1;
1238 		return UPDATE_TYPE_FULL;
1239 	}
1240 
1241 	if (u->surface->force_full_update) {
1242 		update_flags->bits.full_update = 1;
1243 		return UPDATE_TYPE_FULL;
1244 	}
1245 
1246 	type = get_plane_info_update_type(u);
1247 	elevate_update_type(&overall_type, type);
1248 
1249 	type = get_scaling_info_update_type(u);
1250 	elevate_update_type(&overall_type, type);
1251 
1252 	if (u->in_transfer_func)
1253 		update_flags->bits.in_transfer_func_change = 1;
1254 
1255 	if (u->input_csc_color_matrix)
1256 		update_flags->bits.input_csc_change = 1;
1257 
1258 	if (u->coeff_reduction_factor)
1259 		update_flags->bits.coeff_reduction_change = 1;
1260 
1261 	if (u->gamma) {
1262 		enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
1263 
1264 		if (u->plane_info)
1265 			format = u->plane_info->format;
1266 		else if (u->surface)
1267 			format = u->surface->format;
1268 
1269 		if (dce_use_lut(format))
1270 			update_flags->bits.gamma_change = 1;
1271 	}
1272 
1273 	if (update_flags->bits.in_transfer_func_change) {
1274 		type = UPDATE_TYPE_MED;
1275 		elevate_update_type(&overall_type, type);
1276 	}
1277 
1278 	if (update_flags->bits.input_csc_change
1279 			|| update_flags->bits.coeff_reduction_change
1280 			|| update_flags->bits.gamma_change) {
1281 		type = UPDATE_TYPE_FULL;
1282 		elevate_update_type(&overall_type, type);
1283 	}
1284 
1285 	return overall_type;
1286 }
1287 
1288 static enum surface_update_type check_update_surfaces_for_stream(
1289 		struct dc *dc,
1290 		struct dc_surface_update *updates,
1291 		int surface_count,
1292 		struct dc_stream_update *stream_update,
1293 		const struct dc_stream_status *stream_status)
1294 {
1295 	int i;
1296 	enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1297 
1298 	if (stream_status == NULL || stream_status->plane_count != surface_count)
1299 		return UPDATE_TYPE_FULL;
1300 
1301 	if (stream_update)
1302 		return UPDATE_TYPE_FULL;
1303 
1304 	for (i = 0 ; i < surface_count; i++) {
1305 		enum surface_update_type type =
1306 				det_surface_update(dc, &updates[i]);
1307 
1308 		if (type == UPDATE_TYPE_FULL)
1309 			return type;
1310 
1311 		elevate_update_type(&overall_type, type);
1312 	}
1313 
1314 	return overall_type;
1315 }
1316 
1317 enum surface_update_type dc_check_update_surfaces_for_stream(
1318 		struct dc *dc,
1319 		struct dc_surface_update *updates,
1320 		int surface_count,
1321 		struct dc_stream_update *stream_update,
1322 		const struct dc_stream_status *stream_status)
1323 {
1324 	int i;
1325 	enum surface_update_type type;
1326 
1327 	for (i = 0; i < surface_count; i++)
1328 		updates[i].surface->update_flags.raw = 0;
1329 
1330 	type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
1331 	if (type == UPDATE_TYPE_FULL)
1332 		for (i = 0; i < surface_count; i++)
1333 			updates[i].surface->update_flags.raw = 0xFFFFFFFF;
1334 
1335 	return type;
1336 }
1337 
1338 static struct dc_stream_status *stream_get_status(
1339 	struct dc_state *ctx,
1340 	struct dc_stream_state *stream)
1341 {
1342 	uint8_t i;
1343 
1344 	for (i = 0; i < ctx->stream_count; i++) {
1345 		if (stream == ctx->streams[i]) {
1346 			return &ctx->stream_status[i];
1347 		}
1348 	}
1349 
1350 	return NULL;
1351 }
1352 
1353 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1354 
1355 
1356 static void commit_planes_for_stream(struct dc *dc,
1357 		struct dc_surface_update *srf_updates,
1358 		int surface_count,
1359 		struct dc_stream_state *stream,
1360 		struct dc_stream_update *stream_update,
1361 		enum surface_update_type update_type,
1362 		struct dc_state *context)
1363 {
1364 	int i, j;
1365 	struct pipe_ctx *top_pipe_to_program = NULL;
1366 
1367 	if (update_type == UPDATE_TYPE_FULL) {
1368 		dc->hwss.set_bandwidth(dc, context, false);
1369 		context_clock_trace(dc, context);
1370 	}
1371 
1372 	if (surface_count == 0) {
1373 		/*
1374 		 * In case of turning off screen, no need to program front end a second time.
1375 		 * just return after program front end.
1376 		 */
1377 		dc->hwss.apply_ctx_for_surface(dc, stream, surface_count, context);
1378 		return;
1379 	}
1380 
1381 	/* Full fe update*/
1382 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
1383 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1384 
1385 		if (!pipe_ctx->top_pipe &&
1386 			pipe_ctx->stream &&
1387 			pipe_ctx->stream == stream) {
1388 			struct dc_stream_status *stream_status = NULL;
1389 
1390 			top_pipe_to_program = pipe_ctx;
1391 
1392 			if (update_type == UPDATE_TYPE_FAST || !pipe_ctx->plane_state)
1393 				continue;
1394 
1395 			stream_status =
1396 					stream_get_status(context, pipe_ctx->stream);
1397 
1398 			dc->hwss.apply_ctx_for_surface(
1399 					dc, pipe_ctx->stream, stream_status->plane_count, context);
1400 
1401 			if (stream_update && stream_update->abm_level && pipe_ctx->stream_res.abm) {
1402 				if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
1403 					// if otg funcs defined check if blanked before programming
1404 					if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
1405 						pipe_ctx->stream_res.abm->funcs->set_abm_level(
1406 								pipe_ctx->stream_res.abm, stream->abm_level);
1407 				} else
1408 					pipe_ctx->stream_res.abm->funcs->set_abm_level(
1409 							pipe_ctx->stream_res.abm, stream->abm_level);
1410 			}
1411 
1412 			if (stream_update && stream_update->periodic_fn_vsync_delta &&
1413 					pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
1414 				pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
1415 						pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing,
1416 						pipe_ctx->stream->periodic_fn_vsync_delta);
1417 		}
1418 	}
1419 
1420 	if (update_type == UPDATE_TYPE_FULL)
1421 		context_timing_trace(dc, &context->res_ctx);
1422 
1423 	/* Lock the top pipe while updating plane addrs, since freesync requires
1424 	 *  plane addr update event triggers to be synchronized.
1425 	 *  top_pipe_to_program is expected to never be NULL
1426 	 */
1427 	if (update_type == UPDATE_TYPE_FAST) {
1428 		dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
1429 
1430 		/* Perform requested Updates */
1431 		for (i = 0; i < surface_count; i++) {
1432 			struct dc_plane_state *plane_state = srf_updates[i].surface;
1433 
1434 			for (j = 0; j < dc->res_pool->pipe_count; j++) {
1435 				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1436 
1437 				if (pipe_ctx->stream != stream)
1438 					continue;
1439 
1440 				if (pipe_ctx->plane_state != plane_state)
1441 					continue;
1442 
1443 				if (srf_updates[i].flip_addr)
1444 					dc->hwss.update_plane_addr(dc, pipe_ctx);
1445 			}
1446 		}
1447 
1448 		dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
1449 	}
1450 
1451 	if (stream && stream_update && update_type > UPDATE_TYPE_FAST)
1452 		for (j = 0; j < dc->res_pool->pipe_count; j++) {
1453 			struct pipe_ctx *pipe_ctx =
1454 					&context->res_ctx.pipe_ctx[j];
1455 
1456 			if (pipe_ctx->stream != stream)
1457 				continue;
1458 
1459 			if (stream_update->hdr_static_metadata) {
1460 				resource_build_info_frame(pipe_ctx);
1461 				dc->hwss.update_info_frame(pipe_ctx);
1462 			}
1463 		}
1464 }
1465 
1466 void dc_commit_updates_for_stream(struct dc *dc,
1467 		struct dc_surface_update *srf_updates,
1468 		int surface_count,
1469 		struct dc_stream_state *stream,
1470 		struct dc_stream_update *stream_update,
1471 		struct dc_plane_state **plane_states,
1472 		struct dc_state *state)
1473 {
1474 	const struct dc_stream_status *stream_status;
1475 	enum surface_update_type update_type;
1476 	struct dc_state *context;
1477 	struct dc_context *dc_ctx = dc->ctx;
1478 	int i, j;
1479 
1480 	stream_status = dc_stream_get_status(stream);
1481 	context = dc->current_state;
1482 
1483 	update_type = dc_check_update_surfaces_for_stream(
1484 				dc, srf_updates, surface_count, stream_update, stream_status);
1485 
1486 	if (update_type >= update_surface_trace_level)
1487 		update_surface_trace(dc, srf_updates, surface_count);
1488 
1489 
1490 	if (update_type >= UPDATE_TYPE_FULL) {
1491 
1492 		/* initialize scratch memory for building context */
1493 		context = dc_create_state();
1494 		if (context == NULL) {
1495 			DC_ERROR("Failed to allocate new validate context!\n");
1496 			return;
1497 		}
1498 
1499 		dc_resource_state_copy_construct(state, context);
1500 
1501 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1502 			struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
1503 			struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1504 
1505 			if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
1506 				new_pipe->plane_state->force_full_update = true;
1507 		}
1508 	}
1509 
1510 
1511 	for (i = 0; i < surface_count; i++) {
1512 		struct dc_plane_state *surface = srf_updates[i].surface;
1513 
1514 		/* TODO: On flip we don't build the state, so it still has the
1515 		 * old address. Which is why we are updating the address here
1516 		 */
1517 		if (srf_updates[i].flip_addr) {
1518 			surface->address = srf_updates[i].flip_addr->address;
1519 			surface->flip_immediate = srf_updates[i].flip_addr->flip_immediate;
1520 
1521 		}
1522 
1523 		if (update_type >= UPDATE_TYPE_MED) {
1524 			for (j = 0; j < dc->res_pool->pipe_count; j++) {
1525 				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1526 
1527 				if (pipe_ctx->plane_state != surface)
1528 					continue;
1529 
1530 				resource_build_scaling_params(pipe_ctx);
1531 			}
1532 		}
1533 	}
1534 
1535 	commit_planes_for_stream(
1536 				dc,
1537 				srf_updates,
1538 				surface_count,
1539 				stream,
1540 				stream_update,
1541 				update_type,
1542 				context);
1543 	/*update current_State*/
1544 	if (dc->current_state != context) {
1545 
1546 		struct dc_state *old = dc->current_state;
1547 
1548 		dc->current_state = context;
1549 		dc_release_state(old);
1550 
1551 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1552 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1553 
1554 			if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
1555 				pipe_ctx->plane_state->force_full_update = false;
1556 		}
1557 	}
1558 	/*let's use current_state to update watermark etc*/
1559 	if (update_type >= UPDATE_TYPE_FULL)
1560 		dc_post_update_surfaces_to_stream(dc);
1561 
1562 	return;
1563 
1564 }
1565 
1566 uint8_t dc_get_current_stream_count(struct dc *dc)
1567 {
1568 	return dc->current_state->stream_count;
1569 }
1570 
1571 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
1572 {
1573 	if (i < dc->current_state->stream_count)
1574 		return dc->current_state->streams[i];
1575 	return NULL;
1576 }
1577 
1578 enum dc_irq_source dc_interrupt_to_irq_source(
1579 		struct dc *dc,
1580 		uint32_t src_id,
1581 		uint32_t ext_id)
1582 {
1583 	return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
1584 }
1585 
1586 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
1587 {
1588 
1589 	if (dc == NULL)
1590 		return false;
1591 
1592 	return dal_irq_service_set(dc->res_pool->irqs, src, enable);
1593 }
1594 
1595 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1596 {
1597 	dal_irq_service_ack(dc->res_pool->irqs, src);
1598 }
1599 
1600 void dc_set_power_state(
1601 	struct dc *dc,
1602 	enum dc_acpi_cm_power_state power_state)
1603 {
1604 	struct kref refcount;
1605 
1606 	switch (power_state) {
1607 	case DC_ACPI_CM_POWER_STATE_D0:
1608 		dc_resource_state_construct(dc, dc->current_state);
1609 
1610 		dc->hwss.init_hw(dc);
1611 
1612 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
1613 		if (dc->hwss.init_sys_ctx != NULL &&
1614 			dc->vm_pa_config.valid) {
1615 			dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
1616 		}
1617 #endif
1618 
1619 		break;
1620 	default:
1621 
1622 		dc->hwss.power_down(dc);
1623 
1624 		/* Zero out the current context so that on resume we start with
1625 		 * clean state, and dc hw programming optimizations will not
1626 		 * cause any trouble.
1627 		 */
1628 
1629 		/* Preserve refcount */
1630 		refcount = dc->current_state->refcount;
1631 		dc_resource_state_destruct(dc->current_state);
1632 		memset(dc->current_state, 0,
1633 				sizeof(*dc->current_state));
1634 
1635 		dc->current_state->refcount = refcount;
1636 
1637 		break;
1638 	}
1639 
1640 }
1641 
1642 void dc_resume(struct dc *dc)
1643 {
1644 
1645 	uint32_t i;
1646 
1647 	for (i = 0; i < dc->link_count; i++)
1648 		core_link_resume(dc->links[i]);
1649 }
1650 
1651 bool dc_submit_i2c(
1652 		struct dc *dc,
1653 		uint32_t link_index,
1654 		struct i2c_command *cmd)
1655 {
1656 
1657 	struct dc_link *link = dc->links[link_index];
1658 	struct ddc_service *ddc = link->ddc;
1659 
1660 	return dal_i2caux_submit_i2c_command(
1661 		ddc->ctx->i2caux,
1662 		ddc->ddc_pin,
1663 		cmd);
1664 }
1665 
1666 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
1667 {
1668 	if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1669 		BREAK_TO_DEBUGGER();
1670 		return false;
1671 	}
1672 
1673 	dc_sink_retain(sink);
1674 
1675 	dc_link->remote_sinks[dc_link->sink_count] = sink;
1676 	dc_link->sink_count++;
1677 
1678 	return true;
1679 }
1680 
1681 struct dc_sink *dc_link_add_remote_sink(
1682 		struct dc_link *link,
1683 		const uint8_t *edid,
1684 		int len,
1685 		struct dc_sink_init_data *init_data)
1686 {
1687 	struct dc_sink *dc_sink;
1688 	enum dc_edid_status edid_status;
1689 
1690 	if (len > DC_MAX_EDID_BUFFER_SIZE) {
1691 		dm_error("Max EDID buffer size breached!\n");
1692 		return NULL;
1693 	}
1694 
1695 	if (!init_data) {
1696 		BREAK_TO_DEBUGGER();
1697 		return NULL;
1698 	}
1699 
1700 	if (!init_data->link) {
1701 		BREAK_TO_DEBUGGER();
1702 		return NULL;
1703 	}
1704 
1705 	dc_sink = dc_sink_create(init_data);
1706 
1707 	if (!dc_sink)
1708 		return NULL;
1709 
1710 	memmove(dc_sink->dc_edid.raw_edid, edid, len);
1711 	dc_sink->dc_edid.length = len;
1712 
1713 	if (!link_add_remote_sink_helper(
1714 			link,
1715 			dc_sink))
1716 		goto fail_add_sink;
1717 
1718 	edid_status = dm_helpers_parse_edid_caps(
1719 			link->ctx,
1720 			&dc_sink->dc_edid,
1721 			&dc_sink->edid_caps);
1722 
1723 	/*
1724 	 * Treat device as no EDID device if EDID
1725 	 * parsing fails
1726 	 */
1727 	if (edid_status != EDID_OK) {
1728 		dc_sink->dc_edid.length = 0;
1729 		dm_error("Bad EDID, status%d!\n", edid_status);
1730 	}
1731 
1732 	return dc_sink;
1733 
1734 fail_add_sink:
1735 	dc_sink_release(dc_sink);
1736 	return NULL;
1737 }
1738 
1739 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
1740 {
1741 	int i;
1742 
1743 	if (!link->sink_count) {
1744 		BREAK_TO_DEBUGGER();
1745 		return;
1746 	}
1747 
1748 	for (i = 0; i < link->sink_count; i++) {
1749 		if (link->remote_sinks[i] == sink) {
1750 			dc_sink_release(sink);
1751 			link->remote_sinks[i] = NULL;
1752 
1753 			/* shrink array to remove empty place */
1754 			while (i < link->sink_count - 1) {
1755 				link->remote_sinks[i] = link->remote_sinks[i+1];
1756 				i++;
1757 			}
1758 			link->remote_sinks[i] = NULL;
1759 			link->sink_count--;
1760 			return;
1761 		}
1762 	}
1763 }
1764