1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 */
24
25 #include <linux/slab.h>
26 #include <linux/mm.h>
27
28 #include "dm_services.h"
29
30 #include "dc.h"
31
32 #include "core_status.h"
33 #include "core_types.h"
34 #include "hw_sequencer.h"
35 #include "dce/dce_hwseq.h"
36
37 #include "resource.h"
38
39 #include "clk_mgr.h"
40 #include "clock_source.h"
41 #include "dc_bios_types.h"
42
43 #include "bios_parser_interface.h"
44 #include "bios/bios_parser_helper.h"
45 #include "include/irq_service_interface.h"
46 #include "transform.h"
47 #include "dmcu.h"
48 #include "dpp.h"
49 #include "timing_generator.h"
50 #include "abm.h"
51 #include "virtual/virtual_link_encoder.h"
52 #include "hubp.h"
53
54 #include "link_hwss.h"
55 #include "link_encoder.h"
56 #include "link_enc_cfg.h"
57
58 #include "dc_link.h"
59 #include "dc_link_ddc.h"
60 #include "dm_helpers.h"
61 #include "mem_input.h"
62 #include "hubp.h"
63
64 #include "dc_link_dp.h"
65 #include "dc_dmub_srv.h"
66
67 #include "dsc.h"
68
69 #include "vm_helper.h"
70
71 #include "dce/dce_i2c.h"
72
73 #include "dmub/dmub_srv.h"
74
75 #include "i2caux_interface.h"
76 #include "dce/dmub_hw_lock_mgr.h"
77
78 #include "dc_trace.h"
79
80 #define CTX \
81 dc->ctx
82
83 #define DC_LOGGER \
84 dc->ctx->logger
85
86 static const char DC_BUILD_ID[] = "production-build";
87
88 /**
89 * DOC: Overview
90 *
91 * DC is the OS-agnostic component of the amdgpu DC driver.
92 *
93 * DC maintains and validates a set of structs representing the state of the
94 * driver and writes that state to AMD hardware
95 *
96 * Main DC HW structs:
97 *
98 * struct dc - The central struct. One per driver. Created on driver load,
99 * destroyed on driver unload.
100 *
101 * struct dc_context - One per driver.
102 * Used as a backpointer by most other structs in dc.
103 *
104 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
105 * plugpoints). Created on driver load, destroyed on driver unload.
106 *
107 * struct dc_sink - One per display. Created on boot or hotplug.
108 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
109 * (the display directly attached). It may also have one or more remote
110 * sinks (in the Multi-Stream Transport case)
111 *
112 * struct resource_pool - One per driver. Represents the hw blocks not in the
113 * main pipeline. Not directly accessible by dm.
114 *
115 * Main dc state structs:
116 *
117 * These structs can be created and destroyed as needed. There is a full set of
118 * these structs in dc->current_state representing the currently programmed state.
119 *
120 * struct dc_state - The global DC state to track global state information,
121 * such as bandwidth values.
122 *
123 * struct dc_stream_state - Represents the hw configuration for the pipeline from
124 * a framebuffer to a display. Maps one-to-one with dc_sink.
125 *
126 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
127 * and may have more in the Multi-Plane Overlay case.
128 *
129 * struct resource_context - Represents the programmable state of everything in
130 * the resource_pool. Not directly accessible by dm.
131 *
132 * struct pipe_ctx - A member of struct resource_context. Represents the
133 * internal hardware pipeline components. Each dc_plane_state has either
134 * one or two (in the pipe-split case).
135 */
136
137 /*******************************************************************************
138 * Private functions
139 ******************************************************************************/
140
elevate_update_type(enum surface_update_type * original,enum surface_update_type new)141 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
142 {
143 if (new > *original)
144 *original = new;
145 }
146
destroy_links(struct dc * dc)147 static void destroy_links(struct dc *dc)
148 {
149 uint32_t i;
150
151 for (i = 0; i < dc->link_count; i++) {
152 if (NULL != dc->links[i])
153 link_destroy(&dc->links[i]);
154 }
155 }
156
get_num_of_internal_disp(struct dc_link ** links,uint32_t num_links)157 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
158 {
159 int i;
160 uint32_t count = 0;
161
162 for (i = 0; i < num_links; i++) {
163 if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
164 links[i]->is_internal_display)
165 count++;
166 }
167
168 return count;
169 }
170
get_seamless_boot_stream_count(struct dc_state * ctx)171 static int get_seamless_boot_stream_count(struct dc_state *ctx)
172 {
173 uint8_t i;
174 uint8_t seamless_boot_stream_count = 0;
175
176 for (i = 0; i < ctx->stream_count; i++)
177 if (ctx->streams[i]->apply_seamless_boot_optimization)
178 seamless_boot_stream_count++;
179
180 return seamless_boot_stream_count;
181 }
182
create_links(struct dc * dc,uint32_t num_virtual_links)183 static bool create_links(
184 struct dc *dc,
185 uint32_t num_virtual_links)
186 {
187 int i;
188 int connectors_num;
189 struct dc_bios *bios = dc->ctx->dc_bios;
190
191 dc->link_count = 0;
192
193 connectors_num = bios->funcs->get_connectors_number(bios);
194
195 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
196
197 if (connectors_num > ENUM_ID_COUNT) {
198 dm_error(
199 "DC: Number of connectors %d exceeds maximum of %d!\n",
200 connectors_num,
201 ENUM_ID_COUNT);
202 return false;
203 }
204
205 dm_output_to_console(
206 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
207 __func__,
208 connectors_num,
209 num_virtual_links);
210
211 for (i = 0; i < connectors_num; i++) {
212 struct link_init_data link_init_params = {0};
213 struct dc_link *link;
214
215 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
216
217 link_init_params.ctx = dc->ctx;
218 /* next BIOS object table connector */
219 link_init_params.connector_index = i;
220 link_init_params.link_index = dc->link_count;
221 link_init_params.dc = dc;
222 link = link_create(&link_init_params);
223
224 if (link) {
225 dc->links[dc->link_count] = link;
226 link->dc = dc;
227 ++dc->link_count;
228 }
229 }
230
231 DC_LOG_DC("BIOS object table - end");
232
233 for (i = 0; i < num_virtual_links; i++) {
234 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
235 struct encoder_init_data enc_init = {0};
236
237 if (link == NULL) {
238 BREAK_TO_DEBUGGER();
239 goto failed_alloc;
240 }
241
242 link->link_index = dc->link_count;
243 dc->links[dc->link_count] = link;
244 dc->link_count++;
245
246 link->ctx = dc->ctx;
247 link->dc = dc;
248 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
249 link->link_id.type = OBJECT_TYPE_CONNECTOR;
250 link->link_id.id = CONNECTOR_ID_VIRTUAL;
251 link->link_id.enum_id = ENUM_ID_1;
252 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
253
254 if (!link->link_enc) {
255 BREAK_TO_DEBUGGER();
256 goto failed_alloc;
257 }
258
259 link->link_status.dpcd_caps = &link->dpcd_caps;
260
261 enc_init.ctx = dc->ctx;
262 enc_init.channel = CHANNEL_ID_UNKNOWN;
263 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
264 enc_init.transmitter = TRANSMITTER_UNKNOWN;
265 enc_init.connector = link->link_id;
266 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
267 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
268 enc_init.encoder.enum_id = ENUM_ID_1;
269 virtual_link_encoder_construct(link->link_enc, &enc_init);
270 }
271
272 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
273
274 return true;
275
276 failed_alloc:
277 return false;
278 }
279
dc_perf_trace_create(void)280 static struct dc_perf_trace *dc_perf_trace_create(void)
281 {
282 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
283 }
284
dc_perf_trace_destroy(struct dc_perf_trace ** perf_trace)285 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
286 {
287 kfree(*perf_trace);
288 *perf_trace = NULL;
289 }
290
291 /**
292 * dc_stream_adjust_vmin_vmax:
293 *
294 * Looks up the pipe context of dc_stream_state and updates the
295 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
296 * Rate, which is a power-saving feature that targets reducing panel
297 * refresh rate while the screen is static
298 *
299 * @dc: dc reference
300 * @stream: Initial dc stream state
301 * @adjust: Updated parameters for vertical_total_min and vertical_total_max
302 */
dc_stream_adjust_vmin_vmax(struct dc * dc,struct dc_stream_state * stream,struct dc_crtc_timing_adjust * adjust)303 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
304 struct dc_stream_state *stream,
305 struct dc_crtc_timing_adjust *adjust)
306 {
307 int i = 0;
308 bool ret = false;
309
310 stream->adjust.v_total_max = adjust->v_total_max;
311 stream->adjust.v_total_mid = adjust->v_total_mid;
312 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
313 stream->adjust.v_total_min = adjust->v_total_min;
314
315 for (i = 0; i < MAX_PIPES; i++) {
316 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
317
318 if (pipe->stream == stream && pipe->stream_res.tg) {
319 dc->hwss.set_drr(&pipe,
320 1,
321 *adjust);
322
323 ret = true;
324 }
325 }
326 return ret;
327 }
328
dc_stream_get_crtc_position(struct dc * dc,struct dc_stream_state ** streams,int num_streams,unsigned int * v_pos,unsigned int * nom_v_pos)329 bool dc_stream_get_crtc_position(struct dc *dc,
330 struct dc_stream_state **streams, int num_streams,
331 unsigned int *v_pos, unsigned int *nom_v_pos)
332 {
333 /* TODO: Support multiple streams */
334 const struct dc_stream_state *stream = streams[0];
335 int i = 0;
336 bool ret = false;
337 struct crtc_position position;
338
339 for (i = 0; i < MAX_PIPES; i++) {
340 struct pipe_ctx *pipe =
341 &dc->current_state->res_ctx.pipe_ctx[i];
342
343 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
344 dc->hwss.get_position(&pipe, 1, &position);
345
346 *v_pos = position.vertical_count;
347 *nom_v_pos = position.nominal_vcount;
348 ret = true;
349 }
350 }
351 return ret;
352 }
353
354 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
dc_stream_forward_dmcu_crc_window(struct dc * dc,struct dc_stream_state * stream,struct crc_params * crc_window)355 bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream,
356 struct crc_params *crc_window)
357 {
358 int i;
359 struct dmcu *dmcu = dc->res_pool->dmcu;
360 struct pipe_ctx *pipe;
361 struct crc_region tmp_win, *crc_win;
362 struct otg_phy_mux mapping_tmp, *mux_mapping;
363
364 /*crc window can't be null*/
365 if (!crc_window)
366 return false;
367
368 if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
369 crc_win = &tmp_win;
370 mux_mapping = &mapping_tmp;
371 /*set crc window*/
372 tmp_win.x_start = crc_window->windowa_x_start;
373 tmp_win.y_start = crc_window->windowa_y_start;
374 tmp_win.x_end = crc_window->windowa_x_end;
375 tmp_win.y_end = crc_window->windowa_y_end;
376
377 for (i = 0; i < MAX_PIPES; i++) {
378 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
379 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
380 break;
381 }
382
383 /* Stream not found */
384 if (i == MAX_PIPES)
385 return false;
386
387
388 /*set mux routing info*/
389 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
390 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
391
392 dmcu->funcs->forward_crc_window(dmcu, crc_win, mux_mapping);
393 } else {
394 DC_LOG_DC("dmcu is not initialized");
395 return false;
396 }
397
398 return true;
399 }
400
dc_stream_stop_dmcu_crc_win_update(struct dc * dc,struct dc_stream_state * stream)401 bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *stream)
402 {
403 int i;
404 struct dmcu *dmcu = dc->res_pool->dmcu;
405 struct pipe_ctx *pipe;
406 struct otg_phy_mux mapping_tmp, *mux_mapping;
407
408 if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
409 mux_mapping = &mapping_tmp;
410
411 for (i = 0; i < MAX_PIPES; i++) {
412 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
413 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
414 break;
415 }
416
417 /* Stream not found */
418 if (i == MAX_PIPES)
419 return false;
420
421
422 /*set mux routing info*/
423 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
424 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
425
426 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
427 } else {
428 DC_LOG_DC("dmcu is not initialized");
429 return false;
430 }
431
432 return true;
433 }
434 #endif
435
436 /**
437 * dc_stream_configure_crc() - Configure CRC capture for the given stream.
438 * @dc: DC Object
439 * @stream: The stream to configure CRC on.
440 * @enable: Enable CRC if true, disable otherwise.
441 * @crc_window: CRC window (x/y start/end) information
442 * @continuous: Capture CRC on every frame if true. Otherwise, only capture
443 * once.
444 *
445 * By default, only CRC0 is configured, and the entire frame is used to
446 * calculate the crc.
447 */
dc_stream_configure_crc(struct dc * dc,struct dc_stream_state * stream,struct crc_params * crc_window,bool enable,bool continuous)448 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
449 struct crc_params *crc_window, bool enable, bool continuous)
450 {
451 int i;
452 struct pipe_ctx *pipe;
453 struct crc_params param;
454 struct timing_generator *tg;
455
456 for (i = 0; i < MAX_PIPES; i++) {
457 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
458 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
459 break;
460 }
461 /* Stream not found */
462 if (i == MAX_PIPES)
463 return false;
464
465 /* By default, capture the full frame */
466 param.windowa_x_start = 0;
467 param.windowa_y_start = 0;
468 param.windowa_x_end = pipe->stream->timing.h_addressable;
469 param.windowa_y_end = pipe->stream->timing.v_addressable;
470 param.windowb_x_start = 0;
471 param.windowb_y_start = 0;
472 param.windowb_x_end = pipe->stream->timing.h_addressable;
473 param.windowb_y_end = pipe->stream->timing.v_addressable;
474
475 if (crc_window) {
476 param.windowa_x_start = crc_window->windowa_x_start;
477 param.windowa_y_start = crc_window->windowa_y_start;
478 param.windowa_x_end = crc_window->windowa_x_end;
479 param.windowa_y_end = crc_window->windowa_y_end;
480 param.windowb_x_start = crc_window->windowb_x_start;
481 param.windowb_y_start = crc_window->windowb_y_start;
482 param.windowb_x_end = crc_window->windowb_x_end;
483 param.windowb_y_end = crc_window->windowb_y_end;
484 }
485
486 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
487 param.odm_mode = pipe->next_odm_pipe ? 1:0;
488
489 /* Default to the union of both windows */
490 param.selection = UNION_WINDOW_A_B;
491 param.continuous_mode = continuous;
492 param.enable = enable;
493
494 tg = pipe->stream_res.tg;
495
496 /* Only call if supported */
497 if (tg->funcs->configure_crc)
498 return tg->funcs->configure_crc(tg, ¶m);
499 DC_LOG_WARNING("CRC capture not supported.");
500 return false;
501 }
502
503 /**
504 * dc_stream_get_crc() - Get CRC values for the given stream.
505 * @dc: DC object
506 * @stream: The DC stream state of the stream to get CRCs from.
507 * @r_cr: CRC value for the first of the 3 channels stored here.
508 * @g_y: CRC value for the second of the 3 channels stored here.
509 * @b_cb: CRC value for the third of the 3 channels stored here.
510 *
511 * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
512 * Return false if stream is not found, or if CRCs are not enabled.
513 */
dc_stream_get_crc(struct dc * dc,struct dc_stream_state * stream,uint32_t * r_cr,uint32_t * g_y,uint32_t * b_cb)514 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
515 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
516 {
517 int i;
518 struct pipe_ctx *pipe;
519 struct timing_generator *tg;
520
521 for (i = 0; i < MAX_PIPES; i++) {
522 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
523 if (pipe->stream == stream)
524 break;
525 }
526 /* Stream not found */
527 if (i == MAX_PIPES)
528 return false;
529
530 tg = pipe->stream_res.tg;
531
532 if (tg->funcs->get_crc)
533 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
534 DC_LOG_WARNING("CRC capture not supported.");
535 return false;
536 }
537
dc_stream_set_dyn_expansion(struct dc * dc,struct dc_stream_state * stream,enum dc_dynamic_expansion option)538 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
539 enum dc_dynamic_expansion option)
540 {
541 /* OPP FMT dyn expansion updates*/
542 int i = 0;
543 struct pipe_ctx *pipe_ctx;
544
545 for (i = 0; i < MAX_PIPES; i++) {
546 if (dc->current_state->res_ctx.pipe_ctx[i].stream
547 == stream) {
548 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
549 pipe_ctx->stream_res.opp->dyn_expansion = option;
550 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
551 pipe_ctx->stream_res.opp,
552 COLOR_SPACE_YCBCR601,
553 stream->timing.display_color_depth,
554 stream->signal);
555 }
556 }
557 }
558
dc_stream_set_dither_option(struct dc_stream_state * stream,enum dc_dither_option option)559 void dc_stream_set_dither_option(struct dc_stream_state *stream,
560 enum dc_dither_option option)
561 {
562 struct bit_depth_reduction_params params;
563 struct dc_link *link = stream->link;
564 struct pipe_ctx *pipes = NULL;
565 int i;
566
567 for (i = 0; i < MAX_PIPES; i++) {
568 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
569 stream) {
570 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
571 break;
572 }
573 }
574
575 if (!pipes)
576 return;
577 if (option > DITHER_OPTION_MAX)
578 return;
579
580 stream->dither_option = option;
581
582 memset(¶ms, 0, sizeof(params));
583 resource_build_bit_depth_reduction_params(stream, ¶ms);
584 stream->bit_depth_params = params;
585
586 if (pipes->plane_res.xfm &&
587 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
588 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
589 pipes->plane_res.xfm,
590 pipes->plane_res.scl_data.lb_params.depth,
591 &stream->bit_depth_params);
592 }
593
594 pipes->stream_res.opp->funcs->
595 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms);
596 }
597
dc_stream_set_gamut_remap(struct dc * dc,const struct dc_stream_state * stream)598 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
599 {
600 int i = 0;
601 bool ret = false;
602 struct pipe_ctx *pipes;
603
604 for (i = 0; i < MAX_PIPES; i++) {
605 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
606 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
607 dc->hwss.program_gamut_remap(pipes);
608 ret = true;
609 }
610 }
611
612 return ret;
613 }
614
dc_stream_program_csc_matrix(struct dc * dc,struct dc_stream_state * stream)615 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
616 {
617 int i = 0;
618 bool ret = false;
619 struct pipe_ctx *pipes;
620
621 for (i = 0; i < MAX_PIPES; i++) {
622 if (dc->current_state->res_ctx.pipe_ctx[i].stream
623 == stream) {
624
625 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
626 dc->hwss.program_output_csc(dc,
627 pipes,
628 stream->output_color_space,
629 stream->csc_color_matrix.matrix,
630 pipes->stream_res.opp->inst);
631 ret = true;
632 }
633 }
634
635 return ret;
636 }
637
dc_stream_set_static_screen_params(struct dc * dc,struct dc_stream_state ** streams,int num_streams,const struct dc_static_screen_params * params)638 void dc_stream_set_static_screen_params(struct dc *dc,
639 struct dc_stream_state **streams,
640 int num_streams,
641 const struct dc_static_screen_params *params)
642 {
643 int i = 0;
644 int j = 0;
645 struct pipe_ctx *pipes_affected[MAX_PIPES];
646 int num_pipes_affected = 0;
647
648 for (i = 0; i < num_streams; i++) {
649 struct dc_stream_state *stream = streams[i];
650
651 for (j = 0; j < MAX_PIPES; j++) {
652 if (dc->current_state->res_ctx.pipe_ctx[j].stream
653 == stream) {
654 pipes_affected[num_pipes_affected++] =
655 &dc->current_state->res_ctx.pipe_ctx[j];
656 }
657 }
658 }
659
660 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
661 }
662
dc_destruct(struct dc * dc)663 static void dc_destruct(struct dc *dc)
664 {
665 if (dc->current_state) {
666 dc_release_state(dc->current_state);
667 dc->current_state = NULL;
668 }
669
670 destroy_links(dc);
671
672 if (dc->clk_mgr) {
673 dc_destroy_clk_mgr(dc->clk_mgr);
674 dc->clk_mgr = NULL;
675 }
676
677 dc_destroy_resource_pool(dc);
678
679 if (dc->ctx->gpio_service)
680 dal_gpio_service_destroy(&dc->ctx->gpio_service);
681
682 if (dc->ctx->created_bios)
683 dal_bios_parser_destroy(&dc->ctx->dc_bios);
684
685 dc_perf_trace_destroy(&dc->ctx->perf_trace);
686
687 kfree(dc->ctx);
688 dc->ctx = NULL;
689
690 kfree(dc->bw_vbios);
691 dc->bw_vbios = NULL;
692
693 kfree(dc->bw_dceip);
694 dc->bw_dceip = NULL;
695
696 #ifdef CONFIG_DRM_AMD_DC_DCN
697 kfree(dc->dcn_soc);
698 dc->dcn_soc = NULL;
699
700 kfree(dc->dcn_ip);
701 dc->dcn_ip = NULL;
702
703 #endif
704 kfree(dc->vm_helper);
705 dc->vm_helper = NULL;
706
707 }
708
dc_construct_ctx(struct dc * dc,const struct dc_init_data * init_params)709 static bool dc_construct_ctx(struct dc *dc,
710 const struct dc_init_data *init_params)
711 {
712 struct dc_context *dc_ctx;
713 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
714
715 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
716 if (!dc_ctx)
717 return false;
718
719 dc_ctx->cgs_device = init_params->cgs_device;
720 dc_ctx->driver_context = init_params->driver;
721 dc_ctx->dc = dc;
722 dc_ctx->asic_id = init_params->asic_id;
723 dc_ctx->dc_sink_id_count = 0;
724 dc_ctx->dc_stream_id_count = 0;
725 dc_ctx->dce_environment = init_params->dce_environment;
726
727 /* Create logger */
728
729 dc_version = resource_parse_asic_id(init_params->asic_id);
730 dc_ctx->dce_version = dc_version;
731
732 dc_ctx->perf_trace = dc_perf_trace_create();
733 if (!dc_ctx->perf_trace) {
734 ASSERT_CRITICAL(false);
735 return false;
736 }
737
738 dc->ctx = dc_ctx;
739
740 return true;
741 }
742
dc_construct(struct dc * dc,const struct dc_init_data * init_params)743 static bool dc_construct(struct dc *dc,
744 const struct dc_init_data *init_params)
745 {
746 struct dc_context *dc_ctx;
747 struct bw_calcs_dceip *dc_dceip;
748 struct bw_calcs_vbios *dc_vbios;
749 #ifdef CONFIG_DRM_AMD_DC_DCN
750 struct dcn_soc_bounding_box *dcn_soc;
751 struct dcn_ip_params *dcn_ip;
752 #endif
753
754 dc->config = init_params->flags;
755
756 // Allocate memory for the vm_helper
757 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
758 if (!dc->vm_helper) {
759 dm_error("%s: failed to create dc->vm_helper\n", __func__);
760 goto fail;
761 }
762
763 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
764
765 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
766 if (!dc_dceip) {
767 dm_error("%s: failed to create dceip\n", __func__);
768 goto fail;
769 }
770
771 dc->bw_dceip = dc_dceip;
772
773 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
774 if (!dc_vbios) {
775 dm_error("%s: failed to create vbios\n", __func__);
776 goto fail;
777 }
778
779 dc->bw_vbios = dc_vbios;
780 #ifdef CONFIG_DRM_AMD_DC_DCN
781 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
782 if (!dcn_soc) {
783 dm_error("%s: failed to create dcn_soc\n", __func__);
784 goto fail;
785 }
786
787 dc->dcn_soc = dcn_soc;
788
789 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
790 if (!dcn_ip) {
791 dm_error("%s: failed to create dcn_ip\n", __func__);
792 goto fail;
793 }
794
795 dc->dcn_ip = dcn_ip;
796 #endif
797
798 if (!dc_construct_ctx(dc, init_params)) {
799 dm_error("%s: failed to create ctx\n", __func__);
800 goto fail;
801 }
802
803 dc_ctx = dc->ctx;
804
805 /* Resource should construct all asic specific resources.
806 * This should be the only place where we need to parse the asic id
807 */
808 if (init_params->vbios_override)
809 dc_ctx->dc_bios = init_params->vbios_override;
810 else {
811 /* Create BIOS parser */
812 struct bp_init_data bp_init_data;
813
814 bp_init_data.ctx = dc_ctx;
815 bp_init_data.bios = init_params->asic_id.atombios_base_address;
816
817 dc_ctx->dc_bios = dal_bios_parser_create(
818 &bp_init_data, dc_ctx->dce_version);
819
820 if (!dc_ctx->dc_bios) {
821 ASSERT_CRITICAL(false);
822 goto fail;
823 }
824
825 dc_ctx->created_bios = true;
826 }
827
828 dc->vendor_signature = init_params->vendor_signature;
829
830 /* Create GPIO service */
831 dc_ctx->gpio_service = dal_gpio_service_create(
832 dc_ctx->dce_version,
833 dc_ctx->dce_environment,
834 dc_ctx);
835
836 if (!dc_ctx->gpio_service) {
837 ASSERT_CRITICAL(false);
838 goto fail;
839 }
840
841 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
842 if (!dc->res_pool)
843 goto fail;
844
845 /* set i2c speed if not done by the respective dcnxxx__resource.c */
846 if (dc->caps.i2c_speed_in_khz_hdcp == 0)
847 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
848
849 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
850 if (!dc->clk_mgr)
851 goto fail;
852 #ifdef CONFIG_DRM_AMD_DC_DCN
853 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
854 #endif
855
856 if (dc->res_pool->funcs->update_bw_bounding_box)
857 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
858
859 /* Creation of current_state must occur after dc->dml
860 * is initialized in dc_create_resource_pool because
861 * on creation it copies the contents of dc->dml
862 */
863
864 dc->current_state = dc_create_state(dc);
865
866 if (!dc->current_state) {
867 dm_error("%s: failed to create validate ctx\n", __func__);
868 goto fail;
869 }
870
871 dc_resource_state_construct(dc, dc->current_state);
872
873 if (!create_links(dc, init_params->num_virtual_links))
874 goto fail;
875
876 /* Initialise DIG link encoder resource tracking variables. */
877 link_enc_cfg_init(dc, dc->current_state);
878
879 return true;
880
881 fail:
882 return false;
883 }
884
disable_all_writeback_pipes_for_stream(const struct dc * dc,struct dc_stream_state * stream,struct dc_state * context)885 static void disable_all_writeback_pipes_for_stream(
886 const struct dc *dc,
887 struct dc_stream_state *stream,
888 struct dc_state *context)
889 {
890 int i;
891
892 for (i = 0; i < stream->num_wb_info; i++)
893 stream->writeback_info[i].wb_enabled = false;
894 }
895
apply_ctx_interdependent_lock(struct dc * dc,struct dc_state * context,struct dc_stream_state * stream,bool lock)896 static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
897 struct dc_stream_state *stream, bool lock)
898 {
899 int i = 0;
900
901 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
902 if (dc->hwss.interdependent_update_lock)
903 dc->hwss.interdependent_update_lock(dc, context, lock);
904 else {
905 for (i = 0; i < dc->res_pool->pipe_count; i++) {
906 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
907 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
908
909 // Copied conditions that were previously in dce110_apply_ctx_for_surface
910 if (stream == pipe_ctx->stream) {
911 if (!pipe_ctx->top_pipe &&
912 (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
913 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
914 }
915 }
916 }
917 }
918
disable_dangling_plane(struct dc * dc,struct dc_state * context)919 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
920 {
921 int i, j;
922 struct dc_state *dangling_context = dc_create_state(dc);
923 struct dc_state *current_ctx;
924
925 if (dangling_context == NULL)
926 return;
927
928 dc_resource_state_copy_construct(dc->current_state, dangling_context);
929
930 for (i = 0; i < dc->res_pool->pipe_count; i++) {
931 struct dc_stream_state *old_stream =
932 dc->current_state->res_ctx.pipe_ctx[i].stream;
933 bool should_disable = true;
934
935 for (j = 0; j < context->stream_count; j++) {
936 if (old_stream == context->streams[j]) {
937 should_disable = false;
938 break;
939 }
940 }
941 if (should_disable && old_stream) {
942 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
943 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
944
945 if (dc->hwss.apply_ctx_for_surface) {
946 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
947 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
948 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
949 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
950 }
951 if (dc->hwss.program_front_end_for_ctx) {
952 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
953 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
954 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
955 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
956 }
957 }
958 }
959
960 current_ctx = dc->current_state;
961 dc->current_state = dangling_context;
962 dc_release_state(current_ctx);
963 }
964
disable_vbios_mode_if_required(struct dc * dc,struct dc_state * context)965 static void disable_vbios_mode_if_required(
966 struct dc *dc,
967 struct dc_state *context)
968 {
969 unsigned int i, j;
970
971 /* check if timing_changed, disable stream*/
972 for (i = 0; i < dc->res_pool->pipe_count; i++) {
973 struct dc_stream_state *stream = NULL;
974 struct dc_link *link = NULL;
975 struct pipe_ctx *pipe = NULL;
976
977 pipe = &context->res_ctx.pipe_ctx[i];
978 stream = pipe->stream;
979 if (stream == NULL)
980 continue;
981
982 // only looking for first odm pipe
983 if (pipe->prev_odm_pipe)
984 continue;
985
986 if (stream->link->local_sink &&
987 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
988 link = stream->link;
989 }
990
991 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
992 unsigned int enc_inst, tg_inst = 0;
993 unsigned int pix_clk_100hz;
994
995 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
996 if (enc_inst != ENGINE_ID_UNKNOWN) {
997 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
998 if (dc->res_pool->stream_enc[j]->id == enc_inst) {
999 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1000 dc->res_pool->stream_enc[j]);
1001 break;
1002 }
1003 }
1004
1005 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1006 dc->res_pool->dp_clock_source,
1007 tg_inst, &pix_clk_100hz);
1008
1009 if (link->link_status.link_active) {
1010 uint32_t requested_pix_clk_100hz =
1011 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1012
1013 if (pix_clk_100hz != requested_pix_clk_100hz) {
1014 core_link_disable_stream(pipe);
1015 pipe->stream->dpms_off = false;
1016 }
1017 }
1018 }
1019 }
1020 }
1021 }
1022
wait_for_no_pipes_pending(struct dc * dc,struct dc_state * context)1023 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1024 {
1025 int i;
1026 PERF_TRACE();
1027 for (i = 0; i < MAX_PIPES; i++) {
1028 int count = 0;
1029 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1030
1031 if (!pipe->plane_state)
1032 continue;
1033
1034 /* Timeout 100 ms */
1035 while (count < 100000) {
1036 /* Must set to false to start with, due to OR in update function */
1037 pipe->plane_state->status.is_flip_pending = false;
1038 dc->hwss.update_pending_status(pipe);
1039 if (!pipe->plane_state->status.is_flip_pending)
1040 break;
1041 udelay(1);
1042 count++;
1043 }
1044 ASSERT(!pipe->plane_state->status.is_flip_pending);
1045 }
1046 PERF_TRACE();
1047 }
1048
1049 /*******************************************************************************
1050 * Public functions
1051 ******************************************************************************/
1052
dc_create(const struct dc_init_data * init_params)1053 struct dc *dc_create(const struct dc_init_data *init_params)
1054 {
1055 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1056 unsigned int full_pipe_count;
1057
1058 if (!dc)
1059 return NULL;
1060
1061 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1062 if (!dc_construct_ctx(dc, init_params))
1063 goto destruct_dc;
1064 } else {
1065 if (!dc_construct(dc, init_params))
1066 goto destruct_dc;
1067
1068 full_pipe_count = dc->res_pool->pipe_count;
1069 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1070 full_pipe_count--;
1071 dc->caps.max_streams = min(
1072 full_pipe_count,
1073 dc->res_pool->stream_enc_count);
1074
1075 dc->caps.max_links = dc->link_count;
1076 dc->caps.max_audios = dc->res_pool->audio_count;
1077 dc->caps.linear_pitch_alignment = 64;
1078
1079 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1080
1081 if (dc->res_pool->dmcu != NULL)
1082 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1083 }
1084
1085 /* Populate versioning information */
1086 dc->versions.dc_ver = DC_VER;
1087
1088 dc->build_id = DC_BUILD_ID;
1089
1090 DC_LOG_DC("Display Core initialized\n");
1091
1092
1093
1094 return dc;
1095
1096 destruct_dc:
1097 dc_destruct(dc);
1098 kfree(dc);
1099 return NULL;
1100 }
1101
detect_edp_presence(struct dc * dc)1102 static void detect_edp_presence(struct dc *dc)
1103 {
1104 struct dc_link *edp_links[MAX_NUM_EDP];
1105 struct dc_link *edp_link = NULL;
1106 enum dc_connection_type type;
1107 int i;
1108 int edp_num;
1109
1110 get_edp_links(dc, edp_links, &edp_num);
1111 if (!edp_num)
1112 return;
1113
1114 for (i = 0; i < edp_num; i++) {
1115 edp_link = edp_links[i];
1116 if (dc->config.edp_not_connected) {
1117 edp_link->edp_sink_present = false;
1118 } else {
1119 dc_link_detect_sink(edp_link, &type);
1120 edp_link->edp_sink_present = (type != dc_connection_none);
1121 }
1122 }
1123 }
1124
dc_hardware_init(struct dc * dc)1125 void dc_hardware_init(struct dc *dc)
1126 {
1127
1128 detect_edp_presence(dc);
1129 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1130 dc->hwss.init_hw(dc);
1131 }
1132
dc_init_callbacks(struct dc * dc,const struct dc_callback_init * init_params)1133 void dc_init_callbacks(struct dc *dc,
1134 const struct dc_callback_init *init_params)
1135 {
1136 #ifdef CONFIG_DRM_AMD_DC_HDCP
1137 dc->ctx->cp_psp = init_params->cp_psp;
1138 #endif
1139 }
1140
dc_deinit_callbacks(struct dc * dc)1141 void dc_deinit_callbacks(struct dc *dc)
1142 {
1143 #ifdef CONFIG_DRM_AMD_DC_HDCP
1144 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1145 #endif
1146 }
1147
dc_destroy(struct dc ** dc)1148 void dc_destroy(struct dc **dc)
1149 {
1150 dc_destruct(*dc);
1151 kfree(*dc);
1152 *dc = NULL;
1153 }
1154
enable_timing_multisync(struct dc * dc,struct dc_state * ctx)1155 static void enable_timing_multisync(
1156 struct dc *dc,
1157 struct dc_state *ctx)
1158 {
1159 int i = 0, multisync_count = 0;
1160 int pipe_count = dc->res_pool->pipe_count;
1161 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1162
1163 for (i = 0; i < pipe_count; i++) {
1164 if (!ctx->res_ctx.pipe_ctx[i].stream ||
1165 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1166 continue;
1167 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1168 continue;
1169 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1170 multisync_count++;
1171 }
1172
1173 if (multisync_count > 0) {
1174 dc->hwss.enable_per_frame_crtc_position_reset(
1175 dc, multisync_count, multisync_pipes);
1176 }
1177 }
1178
program_timing_sync(struct dc * dc,struct dc_state * ctx)1179 static void program_timing_sync(
1180 struct dc *dc,
1181 struct dc_state *ctx)
1182 {
1183 int i, j, k;
1184 int group_index = 0;
1185 int num_group = 0;
1186 int pipe_count = dc->res_pool->pipe_count;
1187 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1188
1189 for (i = 0; i < pipe_count; i++) {
1190 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
1191 continue;
1192
1193 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1194 }
1195
1196 for (i = 0; i < pipe_count; i++) {
1197 int group_size = 1;
1198 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1199 struct pipe_ctx *pipe_set[MAX_PIPES];
1200
1201 if (!unsynced_pipes[i])
1202 continue;
1203
1204 pipe_set[0] = unsynced_pipes[i];
1205 unsynced_pipes[i] = NULL;
1206
1207 /* Add tg to the set, search rest of the tg's for ones with
1208 * same timing, add all tgs with same timing to the group
1209 */
1210 for (j = i + 1; j < pipe_count; j++) {
1211 if (!unsynced_pipes[j])
1212 continue;
1213 if (sync_type != TIMING_SYNCHRONIZABLE &&
1214 dc->hwss.enable_vblanks_synchronization &&
1215 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1216 resource_are_vblanks_synchronizable(
1217 unsynced_pipes[j]->stream,
1218 pipe_set[0]->stream)) {
1219 sync_type = VBLANK_SYNCHRONIZABLE;
1220 pipe_set[group_size] = unsynced_pipes[j];
1221 unsynced_pipes[j] = NULL;
1222 group_size++;
1223 } else
1224 if (sync_type != VBLANK_SYNCHRONIZABLE &&
1225 resource_are_streams_timing_synchronizable(
1226 unsynced_pipes[j]->stream,
1227 pipe_set[0]->stream)) {
1228 sync_type = TIMING_SYNCHRONIZABLE;
1229 pipe_set[group_size] = unsynced_pipes[j];
1230 unsynced_pipes[j] = NULL;
1231 group_size++;
1232 }
1233 }
1234
1235 /* set first unblanked pipe as master */
1236 for (j = 0; j < group_size; j++) {
1237 bool is_blanked;
1238
1239 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1240 is_blanked =
1241 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1242 else
1243 is_blanked =
1244 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1245 if (!is_blanked) {
1246 if (j == 0)
1247 break;
1248
1249 swap(pipe_set[0], pipe_set[j]);
1250 break;
1251 }
1252 }
1253
1254 for (k = 0; k < group_size; k++) {
1255 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
1256
1257 status->timing_sync_info.group_id = num_group;
1258 status->timing_sync_info.group_size = group_size;
1259 if (k == 0)
1260 status->timing_sync_info.master = true;
1261 else
1262 status->timing_sync_info.master = false;
1263
1264 }
1265 /* remove any other unblanked pipes as they have already been synced */
1266 for (j = j + 1; j < group_size; j++) {
1267 bool is_blanked;
1268
1269 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1270 is_blanked =
1271 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1272 else
1273 is_blanked =
1274 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1275 if (!is_blanked) {
1276 group_size--;
1277 pipe_set[j] = pipe_set[group_size];
1278 j--;
1279 }
1280 }
1281
1282 if (group_size > 1) {
1283 if (sync_type == TIMING_SYNCHRONIZABLE) {
1284 dc->hwss.enable_timing_synchronization(
1285 dc, group_index, group_size, pipe_set);
1286 } else
1287 if (sync_type == VBLANK_SYNCHRONIZABLE) {
1288 dc->hwss.enable_vblanks_synchronization(
1289 dc, group_index, group_size, pipe_set);
1290 }
1291 group_index++;
1292 }
1293 num_group++;
1294 }
1295 }
1296
context_changed(struct dc * dc,struct dc_state * context)1297 static bool context_changed(
1298 struct dc *dc,
1299 struct dc_state *context)
1300 {
1301 uint8_t i;
1302
1303 if (context->stream_count != dc->current_state->stream_count)
1304 return true;
1305
1306 for (i = 0; i < dc->current_state->stream_count; i++) {
1307 if (dc->current_state->streams[i] != context->streams[i])
1308 return true;
1309 }
1310
1311 return false;
1312 }
1313
dc_validate_seamless_boot_timing(const struct dc * dc,const struct dc_sink * sink,struct dc_crtc_timing * crtc_timing)1314 bool dc_validate_seamless_boot_timing(const struct dc *dc,
1315 const struct dc_sink *sink,
1316 struct dc_crtc_timing *crtc_timing)
1317 {
1318 struct timing_generator *tg;
1319 struct stream_encoder *se = NULL;
1320
1321 struct dc_crtc_timing hw_crtc_timing = {0};
1322
1323 struct dc_link *link = sink->link;
1324 unsigned int i, enc_inst, tg_inst = 0;
1325
1326 /* Support seamless boot on EDP displays only */
1327 if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1328 return false;
1329 }
1330
1331 /* Check for enabled DIG to identify enabled display */
1332 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1333 return false;
1334
1335 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1336
1337 if (enc_inst == ENGINE_ID_UNKNOWN)
1338 return false;
1339
1340 for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1341 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1342
1343 se = dc->res_pool->stream_enc[i];
1344
1345 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1346 dc->res_pool->stream_enc[i]);
1347 break;
1348 }
1349 }
1350
1351 // tg_inst not found
1352 if (i == dc->res_pool->stream_enc_count)
1353 return false;
1354
1355 if (tg_inst >= dc->res_pool->timing_generator_count)
1356 return false;
1357
1358 tg = dc->res_pool->timing_generators[tg_inst];
1359
1360 if (!tg->funcs->get_hw_timing)
1361 return false;
1362
1363 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1364 return false;
1365
1366 if (crtc_timing->h_total != hw_crtc_timing.h_total)
1367 return false;
1368
1369 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1370 return false;
1371
1372 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1373 return false;
1374
1375 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1376 return false;
1377
1378 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1379 return false;
1380
1381 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1382 return false;
1383
1384 if (crtc_timing->v_total != hw_crtc_timing.v_total)
1385 return false;
1386
1387 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1388 return false;
1389
1390 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1391 return false;
1392
1393 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1394 return false;
1395
1396 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1397 return false;
1398
1399 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1400 return false;
1401
1402 /* block DSC for now, as VBIOS does not currently support DSC timings */
1403 if (crtc_timing->flags.DSC)
1404 return false;
1405
1406 if (dc_is_dp_signal(link->connector_signal)) {
1407 unsigned int pix_clk_100hz;
1408
1409 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1410 dc->res_pool->dp_clock_source,
1411 tg_inst, &pix_clk_100hz);
1412
1413 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1414 return false;
1415
1416 if (!se->funcs->dp_get_pixel_format)
1417 return false;
1418
1419 if (!se->funcs->dp_get_pixel_format(
1420 se,
1421 &hw_crtc_timing.pixel_encoding,
1422 &hw_crtc_timing.display_color_depth))
1423 return false;
1424
1425 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1426 return false;
1427
1428 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1429 return false;
1430 }
1431
1432 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1433 return false;
1434 }
1435
1436 if (is_edp_ilr_optimization_required(link, crtc_timing)) {
1437 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1438 return false;
1439 }
1440
1441 return true;
1442 }
1443
dc_enable_stereo(struct dc * dc,struct dc_state * context,struct dc_stream_state * streams[],uint8_t stream_count)1444 void dc_enable_stereo(
1445 struct dc *dc,
1446 struct dc_state *context,
1447 struct dc_stream_state *streams[],
1448 uint8_t stream_count)
1449 {
1450 int i, j;
1451 struct pipe_ctx *pipe;
1452
1453 for (i = 0; i < MAX_PIPES; i++) {
1454 if (context != NULL)
1455 pipe = &context->res_ctx.pipe_ctx[i];
1456 else
1457 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1458 for (j = 0 ; pipe && j < stream_count; j++) {
1459 if (streams[j] && streams[j] == pipe->stream &&
1460 dc->hwss.setup_stereo)
1461 dc->hwss.setup_stereo(pipe, dc);
1462 }
1463 }
1464 }
1465
dc_trigger_sync(struct dc * dc,struct dc_state * context)1466 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1467 {
1468 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1469 enable_timing_multisync(dc, context);
1470 program_timing_sync(dc, context);
1471 }
1472 }
1473
get_stream_mask(struct dc * dc,struct dc_state * context)1474 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1475 {
1476 int i;
1477 unsigned int stream_mask = 0;
1478
1479 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1480 if (context->res_ctx.pipe_ctx[i].stream)
1481 stream_mask |= 1 << i;
1482 }
1483
1484 return stream_mask;
1485 }
1486
1487 /*
1488 * Applies given context to HW and copy it into current context.
1489 * It's up to the user to release the src context afterwards.
1490 */
dc_commit_state_no_check(struct dc * dc,struct dc_state * context)1491 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1492 {
1493 struct dc_bios *dcb = dc->ctx->dc_bios;
1494 enum dc_status result = DC_ERROR_UNEXPECTED;
1495 struct pipe_ctx *pipe;
1496 int i, k, l;
1497 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1498
1499 #if defined(CONFIG_DRM_AMD_DC_DCN)
1500 dc_allow_idle_optimizations(dc, false);
1501 #endif
1502
1503 for (i = 0; i < context->stream_count; i++)
1504 dc_streams[i] = context->streams[i];
1505
1506 if (!dcb->funcs->is_accelerated_mode(dcb)) {
1507 disable_vbios_mode_if_required(dc, context);
1508 dc->hwss.enable_accelerated_mode(dc, context);
1509 }
1510
1511 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1512 context->stream_count == 0)
1513 dc->hwss.prepare_bandwidth(dc, context);
1514
1515 disable_dangling_plane(dc, context);
1516 /* re-program planes for existing stream, in case we need to
1517 * free up plane resource for later use
1518 */
1519 if (dc->hwss.apply_ctx_for_surface) {
1520 for (i = 0; i < context->stream_count; i++) {
1521 if (context->streams[i]->mode_changed)
1522 continue;
1523 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1524 dc->hwss.apply_ctx_for_surface(
1525 dc, context->streams[i],
1526 context->stream_status[i].plane_count,
1527 context); /* use new pipe config in new context */
1528 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1529 dc->hwss.post_unlock_program_front_end(dc, context);
1530 }
1531 }
1532
1533 /* Program hardware */
1534 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1535 pipe = &context->res_ctx.pipe_ctx[i];
1536 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1537 }
1538
1539 result = dc->hwss.apply_ctx_to_hw(dc, context);
1540
1541 if (result != DC_OK)
1542 return result;
1543
1544 dc_trigger_sync(dc, context);
1545
1546 /* Program all planes within new context*/
1547 if (dc->hwss.program_front_end_for_ctx) {
1548 dc->hwss.interdependent_update_lock(dc, context, true);
1549 dc->hwss.program_front_end_for_ctx(dc, context);
1550 dc->hwss.interdependent_update_lock(dc, context, false);
1551 dc->hwss.post_unlock_program_front_end(dc, context);
1552 }
1553 for (i = 0; i < context->stream_count; i++) {
1554 const struct dc_link *link = context->streams[i]->link;
1555
1556 if (!context->streams[i]->mode_changed)
1557 continue;
1558
1559 if (dc->hwss.apply_ctx_for_surface) {
1560 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1561 dc->hwss.apply_ctx_for_surface(
1562 dc, context->streams[i],
1563 context->stream_status[i].plane_count,
1564 context);
1565 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1566 dc->hwss.post_unlock_program_front_end(dc, context);
1567 }
1568
1569 /*
1570 * enable stereo
1571 * TODO rework dc_enable_stereo call to work with validation sets?
1572 */
1573 for (k = 0; k < MAX_PIPES; k++) {
1574 pipe = &context->res_ctx.pipe_ctx[k];
1575
1576 for (l = 0 ; pipe && l < context->stream_count; l++) {
1577 if (context->streams[l] &&
1578 context->streams[l] == pipe->stream &&
1579 dc->hwss.setup_stereo)
1580 dc->hwss.setup_stereo(pipe, dc);
1581 }
1582 }
1583
1584 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1585 context->streams[i]->timing.h_addressable,
1586 context->streams[i]->timing.v_addressable,
1587 context->streams[i]->timing.h_total,
1588 context->streams[i]->timing.v_total,
1589 context->streams[i]->timing.pix_clk_100hz / 10);
1590 }
1591
1592 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1593
1594 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1595 context->stream_count == 0) {
1596 /* Must wait for no flips to be pending before doing optimize bw */
1597 wait_for_no_pipes_pending(dc, context);
1598 /* pplib is notified if disp_num changed */
1599 dc->hwss.optimize_bandwidth(dc, context);
1600 }
1601
1602 if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1603 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1604 else
1605 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1606
1607 context->stream_mask = get_stream_mask(dc, context);
1608
1609 if (context->stream_mask != dc->current_state->stream_mask)
1610 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
1611
1612 for (i = 0; i < context->stream_count; i++)
1613 context->streams[i]->mode_changed = false;
1614
1615 dc_release_state(dc->current_state);
1616
1617 dc->current_state = context;
1618
1619 dc_retain_state(dc->current_state);
1620
1621 return result;
1622 }
1623
dc_commit_state(struct dc * dc,struct dc_state * context)1624 bool dc_commit_state(struct dc *dc, struct dc_state *context)
1625 {
1626 enum dc_status result = DC_ERROR_UNEXPECTED;
1627 int i;
1628
1629 if (!context_changed(dc, context))
1630 return DC_OK;
1631
1632 DC_LOG_DC("%s: %d streams\n",
1633 __func__, context->stream_count);
1634
1635 for (i = 0; i < context->stream_count; i++) {
1636 struct dc_stream_state *stream = context->streams[i];
1637
1638 dc_stream_log(dc, stream);
1639 }
1640
1641 result = dc_commit_state_no_check(dc, context);
1642
1643 return (result == DC_OK);
1644 }
1645
1646 #if defined(CONFIG_DRM_AMD_DC_DCN)
dc_acquire_release_mpc_3dlut(struct dc * dc,bool acquire,struct dc_stream_state * stream,struct dc_3dlut ** lut,struct dc_transfer_func ** shaper)1647 bool dc_acquire_release_mpc_3dlut(
1648 struct dc *dc, bool acquire,
1649 struct dc_stream_state *stream,
1650 struct dc_3dlut **lut,
1651 struct dc_transfer_func **shaper)
1652 {
1653 int pipe_idx;
1654 bool ret = false;
1655 bool found_pipe_idx = false;
1656 const struct resource_pool *pool = dc->res_pool;
1657 struct resource_context *res_ctx = &dc->current_state->res_ctx;
1658 int mpcc_id = 0;
1659
1660 if (pool && res_ctx) {
1661 if (acquire) {
1662 /*find pipe idx for the given stream*/
1663 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
1664 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
1665 found_pipe_idx = true;
1666 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
1667 break;
1668 }
1669 }
1670 } else
1671 found_pipe_idx = true;/*for release pipe_idx is not required*/
1672
1673 if (found_pipe_idx) {
1674 if (acquire && pool->funcs->acquire_post_bldn_3dlut)
1675 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
1676 else if (!acquire && pool->funcs->release_post_bldn_3dlut)
1677 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
1678 }
1679 }
1680 return ret;
1681 }
1682 #endif
is_flip_pending_in_pipes(struct dc * dc,struct dc_state * context)1683 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
1684 {
1685 int i;
1686 struct pipe_ctx *pipe;
1687
1688 for (i = 0; i < MAX_PIPES; i++) {
1689 pipe = &context->res_ctx.pipe_ctx[i];
1690
1691 if (!pipe->plane_state)
1692 continue;
1693
1694 /* Must set to false to start with, due to OR in update function */
1695 pipe->plane_state->status.is_flip_pending = false;
1696 dc->hwss.update_pending_status(pipe);
1697 if (pipe->plane_state->status.is_flip_pending)
1698 return true;
1699 }
1700 return false;
1701 }
1702
dc_post_update_surfaces_to_stream(struct dc * dc)1703 void dc_post_update_surfaces_to_stream(struct dc *dc)
1704 {
1705 int i;
1706 struct dc_state *context = dc->current_state;
1707
1708 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
1709 return;
1710
1711 post_surface_trace(dc);
1712
1713 if (is_flip_pending_in_pipes(dc, context))
1714 return;
1715
1716 for (i = 0; i < dc->res_pool->pipe_count; i++)
1717 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1718 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1719 context->res_ctx.pipe_ctx[i].pipe_idx = i;
1720 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1721 }
1722
1723 dc->hwss.optimize_bandwidth(dc, context);
1724
1725 dc->optimized_required = false;
1726 dc->wm_optimized_required = false;
1727 }
1728
init_state(struct dc * dc,struct dc_state * context)1729 static void init_state(struct dc *dc, struct dc_state *context)
1730 {
1731 /* Each context must have their own instance of VBA and in order to
1732 * initialize and obtain IP and SOC the base DML instance from DC is
1733 * initially copied into every context
1734 */
1735 #ifdef CONFIG_DRM_AMD_DC_DCN
1736 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
1737 #endif
1738 }
1739
dc_create_state(struct dc * dc)1740 struct dc_state *dc_create_state(struct dc *dc)
1741 {
1742 struct dc_state *context = kvzalloc(sizeof(struct dc_state),
1743 GFP_KERNEL);
1744
1745 if (!context)
1746 return NULL;
1747
1748 init_state(dc, context);
1749
1750 kref_init(&context->refcount);
1751
1752 return context;
1753 }
1754
dc_copy_state(struct dc_state * src_ctx)1755 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
1756 {
1757 int i, j;
1758 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
1759
1760 if (!new_ctx)
1761 return NULL;
1762 memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
1763
1764 for (i = 0; i < MAX_PIPES; i++) {
1765 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
1766
1767 if (cur_pipe->top_pipe)
1768 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
1769
1770 if (cur_pipe->bottom_pipe)
1771 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
1772
1773 if (cur_pipe->prev_odm_pipe)
1774 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
1775
1776 if (cur_pipe->next_odm_pipe)
1777 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
1778
1779 }
1780
1781 for (i = 0; i < new_ctx->stream_count; i++) {
1782 dc_stream_retain(new_ctx->streams[i]);
1783 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
1784 dc_plane_state_retain(
1785 new_ctx->stream_status[i].plane_states[j]);
1786 }
1787
1788 kref_init(&new_ctx->refcount);
1789
1790 return new_ctx;
1791 }
1792
dc_retain_state(struct dc_state * context)1793 void dc_retain_state(struct dc_state *context)
1794 {
1795 kref_get(&context->refcount);
1796 }
1797
dc_state_free(struct kref * kref)1798 static void dc_state_free(struct kref *kref)
1799 {
1800 struct dc_state *context = container_of(kref, struct dc_state, refcount);
1801 dc_resource_state_destruct(context);
1802 kvfree(context);
1803 }
1804
dc_release_state(struct dc_state * context)1805 void dc_release_state(struct dc_state *context)
1806 {
1807 kref_put(&context->refcount, dc_state_free);
1808 }
1809
dc_set_generic_gpio_for_stereo(bool enable,struct gpio_service * gpio_service)1810 bool dc_set_generic_gpio_for_stereo(bool enable,
1811 struct gpio_service *gpio_service)
1812 {
1813 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
1814 struct gpio_pin_info pin_info;
1815 struct gpio *generic;
1816 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
1817 GFP_KERNEL);
1818
1819 if (!config)
1820 return false;
1821 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
1822
1823 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
1824 kfree(config);
1825 return false;
1826 } else {
1827 generic = dal_gpio_service_create_generic_mux(
1828 gpio_service,
1829 pin_info.offset,
1830 pin_info.mask);
1831 }
1832
1833 if (!generic) {
1834 kfree(config);
1835 return false;
1836 }
1837
1838 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
1839
1840 config->enable_output_from_mux = enable;
1841 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
1842
1843 if (gpio_result == GPIO_RESULT_OK)
1844 gpio_result = dal_mux_setup_config(generic, config);
1845
1846 if (gpio_result == GPIO_RESULT_OK) {
1847 dal_gpio_close(generic);
1848 dal_gpio_destroy_generic_mux(&generic);
1849 kfree(config);
1850 return true;
1851 } else {
1852 dal_gpio_close(generic);
1853 dal_gpio_destroy_generic_mux(&generic);
1854 kfree(config);
1855 return false;
1856 }
1857 }
1858
is_surface_in_context(const struct dc_state * context,const struct dc_plane_state * plane_state)1859 static bool is_surface_in_context(
1860 const struct dc_state *context,
1861 const struct dc_plane_state *plane_state)
1862 {
1863 int j;
1864
1865 for (j = 0; j < MAX_PIPES; j++) {
1866 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1867
1868 if (plane_state == pipe_ctx->plane_state) {
1869 return true;
1870 }
1871 }
1872
1873 return false;
1874 }
1875
get_plane_info_update_type(const struct dc_surface_update * u)1876 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
1877 {
1878 union surface_update_flags *update_flags = &u->surface->update_flags;
1879 enum surface_update_type update_type = UPDATE_TYPE_FAST;
1880
1881 if (!u->plane_info)
1882 return UPDATE_TYPE_FAST;
1883
1884 if (u->plane_info->color_space != u->surface->color_space) {
1885 update_flags->bits.color_space_change = 1;
1886 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1887 }
1888
1889 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
1890 update_flags->bits.horizontal_mirror_change = 1;
1891 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1892 }
1893
1894 if (u->plane_info->rotation != u->surface->rotation) {
1895 update_flags->bits.rotation_change = 1;
1896 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1897 }
1898
1899 if (u->plane_info->format != u->surface->format) {
1900 update_flags->bits.pixel_format_change = 1;
1901 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1902 }
1903
1904 if (u->plane_info->stereo_format != u->surface->stereo_format) {
1905 update_flags->bits.stereo_format_change = 1;
1906 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1907 }
1908
1909 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
1910 update_flags->bits.per_pixel_alpha_change = 1;
1911 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1912 }
1913
1914 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
1915 update_flags->bits.global_alpha_change = 1;
1916 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1917 }
1918
1919 if (u->plane_info->dcc.enable != u->surface->dcc.enable
1920 || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks
1921 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
1922 update_flags->bits.dcc_change = 1;
1923 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1924 }
1925
1926 if (resource_pixel_format_to_bpp(u->plane_info->format) !=
1927 resource_pixel_format_to_bpp(u->surface->format)) {
1928 /* different bytes per element will require full bandwidth
1929 * and DML calculation
1930 */
1931 update_flags->bits.bpp_change = 1;
1932 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1933 }
1934
1935 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
1936 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
1937 update_flags->bits.plane_size_change = 1;
1938 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1939 }
1940
1941
1942 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1943 sizeof(union dc_tiling_info)) != 0) {
1944 update_flags->bits.swizzle_change = 1;
1945 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1946
1947 /* todo: below are HW dependent, we should add a hook to
1948 * DCE/N resource and validated there.
1949 */
1950 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
1951 /* swizzled mode requires RQ to be setup properly,
1952 * thus need to run DML to calculate RQ settings
1953 */
1954 update_flags->bits.bandwidth_change = 1;
1955 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1956 }
1957 }
1958
1959 /* This should be UPDATE_TYPE_FAST if nothing has changed. */
1960 return update_type;
1961 }
1962
get_scaling_info_update_type(const struct dc_surface_update * u)1963 static enum surface_update_type get_scaling_info_update_type(
1964 const struct dc_surface_update *u)
1965 {
1966 union surface_update_flags *update_flags = &u->surface->update_flags;
1967
1968 if (!u->scaling_info)
1969 return UPDATE_TYPE_FAST;
1970
1971 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1972 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1973 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1974 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
1975 || u->scaling_info->scaling_quality.integer_scaling !=
1976 u->surface->scaling_quality.integer_scaling
1977 ) {
1978 update_flags->bits.scaling_change = 1;
1979
1980 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
1981 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
1982 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
1983 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
1984 /* Making dst rect smaller requires a bandwidth change */
1985 update_flags->bits.bandwidth_change = 1;
1986 }
1987
1988 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1989 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
1990
1991 update_flags->bits.scaling_change = 1;
1992 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
1993 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
1994 /* Making src rect bigger requires a bandwidth change */
1995 update_flags->bits.clock_change = 1;
1996 }
1997
1998 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1999 || u->scaling_info->src_rect.y != u->surface->src_rect.y
2000 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2001 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2002 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2003 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2004 update_flags->bits.position_change = 1;
2005
2006 if (update_flags->bits.clock_change
2007 || update_flags->bits.bandwidth_change
2008 || update_flags->bits.scaling_change)
2009 return UPDATE_TYPE_FULL;
2010
2011 if (update_flags->bits.position_change)
2012 return UPDATE_TYPE_MED;
2013
2014 return UPDATE_TYPE_FAST;
2015 }
2016
det_surface_update(const struct dc * dc,const struct dc_surface_update * u)2017 static enum surface_update_type det_surface_update(const struct dc *dc,
2018 const struct dc_surface_update *u)
2019 {
2020 const struct dc_state *context = dc->current_state;
2021 enum surface_update_type type;
2022 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2023 union surface_update_flags *update_flags = &u->surface->update_flags;
2024
2025 if (u->flip_addr)
2026 update_flags->bits.addr_update = 1;
2027
2028 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2029 update_flags->raw = 0xFFFFFFFF;
2030 return UPDATE_TYPE_FULL;
2031 }
2032
2033 update_flags->raw = 0; // Reset all flags
2034
2035 type = get_plane_info_update_type(u);
2036 elevate_update_type(&overall_type, type);
2037
2038 type = get_scaling_info_update_type(u);
2039 elevate_update_type(&overall_type, type);
2040
2041 if (u->flip_addr)
2042 update_flags->bits.addr_update = 1;
2043
2044 if (u->in_transfer_func)
2045 update_flags->bits.in_transfer_func_change = 1;
2046
2047 if (u->input_csc_color_matrix)
2048 update_flags->bits.input_csc_change = 1;
2049
2050 if (u->coeff_reduction_factor)
2051 update_flags->bits.coeff_reduction_change = 1;
2052
2053 if (u->gamut_remap_matrix)
2054 update_flags->bits.gamut_remap_change = 1;
2055
2056 if (u->gamma) {
2057 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2058
2059 if (u->plane_info)
2060 format = u->plane_info->format;
2061 else if (u->surface)
2062 format = u->surface->format;
2063
2064 if (dce_use_lut(format))
2065 update_flags->bits.gamma_change = 1;
2066 }
2067
2068 if (u->hdr_mult.value)
2069 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2070 update_flags->bits.hdr_mult = 1;
2071 elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2072 }
2073
2074 if (update_flags->bits.in_transfer_func_change) {
2075 type = UPDATE_TYPE_MED;
2076 elevate_update_type(&overall_type, type);
2077 }
2078
2079 if (update_flags->bits.input_csc_change
2080 || update_flags->bits.coeff_reduction_change
2081 || update_flags->bits.gamma_change
2082 || update_flags->bits.gamut_remap_change) {
2083 type = UPDATE_TYPE_FULL;
2084 elevate_update_type(&overall_type, type);
2085 }
2086
2087 return overall_type;
2088 }
2089
check_update_surfaces_for_stream(struct dc * dc,struct dc_surface_update * updates,int surface_count,struct dc_stream_update * stream_update,const struct dc_stream_status * stream_status)2090 static enum surface_update_type check_update_surfaces_for_stream(
2091 struct dc *dc,
2092 struct dc_surface_update *updates,
2093 int surface_count,
2094 struct dc_stream_update *stream_update,
2095 const struct dc_stream_status *stream_status)
2096 {
2097 int i;
2098 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2099
2100 #if defined(CONFIG_DRM_AMD_DC_DCN)
2101 if (dc->idle_optimizations_allowed)
2102 overall_type = UPDATE_TYPE_FULL;
2103
2104 #endif
2105 if (stream_status == NULL || stream_status->plane_count != surface_count)
2106 overall_type = UPDATE_TYPE_FULL;
2107
2108 if (stream_update && stream_update->pending_test_pattern) {
2109 overall_type = UPDATE_TYPE_FULL;
2110 }
2111
2112 /* some stream updates require passive update */
2113 if (stream_update) {
2114 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2115
2116 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2117 (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2118 stream_update->integer_scaling_update)
2119 su_flags->bits.scaling = 1;
2120
2121 if (stream_update->out_transfer_func)
2122 su_flags->bits.out_tf = 1;
2123
2124 if (stream_update->abm_level)
2125 su_flags->bits.abm_level = 1;
2126
2127 if (stream_update->dpms_off)
2128 su_flags->bits.dpms_off = 1;
2129
2130 if (stream_update->gamut_remap)
2131 su_flags->bits.gamut_remap = 1;
2132
2133 if (stream_update->wb_update)
2134 su_flags->bits.wb_update = 1;
2135
2136 if (stream_update->dsc_config)
2137 su_flags->bits.dsc_changed = 1;
2138
2139 if (su_flags->raw != 0)
2140 overall_type = UPDATE_TYPE_FULL;
2141
2142 if (stream_update->output_csc_transform || stream_update->output_color_space)
2143 su_flags->bits.out_csc = 1;
2144 }
2145
2146 for (i = 0 ; i < surface_count; i++) {
2147 enum surface_update_type type =
2148 det_surface_update(dc, &updates[i]);
2149
2150 elevate_update_type(&overall_type, type);
2151 }
2152
2153 return overall_type;
2154 }
2155
2156 /*
2157 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2158 *
2159 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2160 */
dc_check_update_surfaces_for_stream(struct dc * dc,struct dc_surface_update * updates,int surface_count,struct dc_stream_update * stream_update,const struct dc_stream_status * stream_status)2161 enum surface_update_type dc_check_update_surfaces_for_stream(
2162 struct dc *dc,
2163 struct dc_surface_update *updates,
2164 int surface_count,
2165 struct dc_stream_update *stream_update,
2166 const struct dc_stream_status *stream_status)
2167 {
2168 int i;
2169 enum surface_update_type type;
2170
2171 if (stream_update)
2172 stream_update->stream->update_flags.raw = 0;
2173 for (i = 0; i < surface_count; i++)
2174 updates[i].surface->update_flags.raw = 0;
2175
2176 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2177 if (type == UPDATE_TYPE_FULL) {
2178 if (stream_update) {
2179 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2180 stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2181 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2182 }
2183 for (i = 0; i < surface_count; i++)
2184 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2185 }
2186
2187 if (type == UPDATE_TYPE_FAST) {
2188 // If there's an available clock comparator, we use that.
2189 if (dc->clk_mgr->funcs->are_clock_states_equal) {
2190 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2191 dc->optimized_required = true;
2192 // Else we fallback to mem compare.
2193 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2194 dc->optimized_required = true;
2195 }
2196
2197 dc->optimized_required |= dc->wm_optimized_required;
2198 }
2199
2200 return type;
2201 }
2202
stream_get_status(struct dc_state * ctx,struct dc_stream_state * stream)2203 static struct dc_stream_status *stream_get_status(
2204 struct dc_state *ctx,
2205 struct dc_stream_state *stream)
2206 {
2207 uint8_t i;
2208
2209 for (i = 0; i < ctx->stream_count; i++) {
2210 if (stream == ctx->streams[i]) {
2211 return &ctx->stream_status[i];
2212 }
2213 }
2214
2215 return NULL;
2216 }
2217
2218 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2219
copy_surface_update_to_plane(struct dc_plane_state * surface,struct dc_surface_update * srf_update)2220 static void copy_surface_update_to_plane(
2221 struct dc_plane_state *surface,
2222 struct dc_surface_update *srf_update)
2223 {
2224 if (srf_update->flip_addr) {
2225 surface->address = srf_update->flip_addr->address;
2226 surface->flip_immediate =
2227 srf_update->flip_addr->flip_immediate;
2228 surface->time.time_elapsed_in_us[surface->time.index] =
2229 srf_update->flip_addr->flip_timestamp_in_us -
2230 surface->time.prev_update_time_in_us;
2231 surface->time.prev_update_time_in_us =
2232 srf_update->flip_addr->flip_timestamp_in_us;
2233 surface->time.index++;
2234 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2235 surface->time.index = 0;
2236
2237 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2238 }
2239
2240 if (srf_update->scaling_info) {
2241 surface->scaling_quality =
2242 srf_update->scaling_info->scaling_quality;
2243 surface->dst_rect =
2244 srf_update->scaling_info->dst_rect;
2245 surface->src_rect =
2246 srf_update->scaling_info->src_rect;
2247 surface->clip_rect =
2248 srf_update->scaling_info->clip_rect;
2249 }
2250
2251 if (srf_update->plane_info) {
2252 surface->color_space =
2253 srf_update->plane_info->color_space;
2254 surface->format =
2255 srf_update->plane_info->format;
2256 surface->plane_size =
2257 srf_update->plane_info->plane_size;
2258 surface->rotation =
2259 srf_update->plane_info->rotation;
2260 surface->horizontal_mirror =
2261 srf_update->plane_info->horizontal_mirror;
2262 surface->stereo_format =
2263 srf_update->plane_info->stereo_format;
2264 surface->tiling_info =
2265 srf_update->plane_info->tiling_info;
2266 surface->visible =
2267 srf_update->plane_info->visible;
2268 surface->per_pixel_alpha =
2269 srf_update->plane_info->per_pixel_alpha;
2270 surface->global_alpha =
2271 srf_update->plane_info->global_alpha;
2272 surface->global_alpha_value =
2273 srf_update->plane_info->global_alpha_value;
2274 surface->dcc =
2275 srf_update->plane_info->dcc;
2276 surface->layer_index =
2277 srf_update->plane_info->layer_index;
2278 }
2279
2280 if (srf_update->gamma &&
2281 (surface->gamma_correction !=
2282 srf_update->gamma)) {
2283 memcpy(&surface->gamma_correction->entries,
2284 &srf_update->gamma->entries,
2285 sizeof(struct dc_gamma_entries));
2286 surface->gamma_correction->is_identity =
2287 srf_update->gamma->is_identity;
2288 surface->gamma_correction->num_entries =
2289 srf_update->gamma->num_entries;
2290 surface->gamma_correction->type =
2291 srf_update->gamma->type;
2292 }
2293
2294 if (srf_update->in_transfer_func &&
2295 (surface->in_transfer_func !=
2296 srf_update->in_transfer_func)) {
2297 surface->in_transfer_func->sdr_ref_white_level =
2298 srf_update->in_transfer_func->sdr_ref_white_level;
2299 surface->in_transfer_func->tf =
2300 srf_update->in_transfer_func->tf;
2301 surface->in_transfer_func->type =
2302 srf_update->in_transfer_func->type;
2303 memcpy(&surface->in_transfer_func->tf_pts,
2304 &srf_update->in_transfer_func->tf_pts,
2305 sizeof(struct dc_transfer_func_distributed_points));
2306 }
2307
2308 if (srf_update->func_shaper &&
2309 (surface->in_shaper_func !=
2310 srf_update->func_shaper))
2311 memcpy(surface->in_shaper_func, srf_update->func_shaper,
2312 sizeof(*surface->in_shaper_func));
2313
2314 if (srf_update->lut3d_func &&
2315 (surface->lut3d_func !=
2316 srf_update->lut3d_func))
2317 memcpy(surface->lut3d_func, srf_update->lut3d_func,
2318 sizeof(*surface->lut3d_func));
2319
2320 if (srf_update->hdr_mult.value)
2321 surface->hdr_mult =
2322 srf_update->hdr_mult;
2323
2324 if (srf_update->blend_tf &&
2325 (surface->blend_tf !=
2326 srf_update->blend_tf))
2327 memcpy(surface->blend_tf, srf_update->blend_tf,
2328 sizeof(*surface->blend_tf));
2329
2330 if (srf_update->input_csc_color_matrix)
2331 surface->input_csc_color_matrix =
2332 *srf_update->input_csc_color_matrix;
2333
2334 if (srf_update->coeff_reduction_factor)
2335 surface->coeff_reduction_factor =
2336 *srf_update->coeff_reduction_factor;
2337
2338 if (srf_update->gamut_remap_matrix)
2339 surface->gamut_remap_matrix =
2340 *srf_update->gamut_remap_matrix;
2341 }
2342
copy_stream_update_to_stream(struct dc * dc,struct dc_state * context,struct dc_stream_state * stream,struct dc_stream_update * update)2343 static void copy_stream_update_to_stream(struct dc *dc,
2344 struct dc_state *context,
2345 struct dc_stream_state *stream,
2346 struct dc_stream_update *update)
2347 {
2348 struct dc_context *dc_ctx = dc->ctx;
2349
2350 if (update == NULL || stream == NULL)
2351 return;
2352
2353 if (update->src.height && update->src.width)
2354 stream->src = update->src;
2355
2356 if (update->dst.height && update->dst.width)
2357 stream->dst = update->dst;
2358
2359 if (update->out_transfer_func &&
2360 stream->out_transfer_func != update->out_transfer_func) {
2361 stream->out_transfer_func->sdr_ref_white_level =
2362 update->out_transfer_func->sdr_ref_white_level;
2363 stream->out_transfer_func->tf = update->out_transfer_func->tf;
2364 stream->out_transfer_func->type =
2365 update->out_transfer_func->type;
2366 memcpy(&stream->out_transfer_func->tf_pts,
2367 &update->out_transfer_func->tf_pts,
2368 sizeof(struct dc_transfer_func_distributed_points));
2369 }
2370
2371 if (update->hdr_static_metadata)
2372 stream->hdr_static_metadata = *update->hdr_static_metadata;
2373
2374 if (update->abm_level)
2375 stream->abm_level = *update->abm_level;
2376
2377 if (update->periodic_interrupt0)
2378 stream->periodic_interrupt0 = *update->periodic_interrupt0;
2379
2380 if (update->periodic_interrupt1)
2381 stream->periodic_interrupt1 = *update->periodic_interrupt1;
2382
2383 if (update->gamut_remap)
2384 stream->gamut_remap_matrix = *update->gamut_remap;
2385
2386 /* Note: this being updated after mode set is currently not a use case
2387 * however if it arises OCSC would need to be reprogrammed at the
2388 * minimum
2389 */
2390 if (update->output_color_space)
2391 stream->output_color_space = *update->output_color_space;
2392
2393 if (update->output_csc_transform)
2394 stream->csc_color_matrix = *update->output_csc_transform;
2395
2396 if (update->vrr_infopacket)
2397 stream->vrr_infopacket = *update->vrr_infopacket;
2398
2399 if (update->dpms_off)
2400 stream->dpms_off = *update->dpms_off;
2401
2402 if (update->vsc_infopacket)
2403 stream->vsc_infopacket = *update->vsc_infopacket;
2404
2405 if (update->vsp_infopacket)
2406 stream->vsp_infopacket = *update->vsp_infopacket;
2407
2408 if (update->dither_option)
2409 stream->dither_option = *update->dither_option;
2410
2411 if (update->pending_test_pattern)
2412 stream->test_pattern = *update->pending_test_pattern;
2413 /* update current stream with writeback info */
2414 if (update->wb_update) {
2415 int i;
2416
2417 stream->num_wb_info = update->wb_update->num_wb_info;
2418 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2419 for (i = 0; i < stream->num_wb_info; i++)
2420 stream->writeback_info[i] =
2421 update->wb_update->writeback_info[i];
2422 }
2423 if (update->dsc_config) {
2424 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2425 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2426 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2427 update->dsc_config->num_slices_v != 0);
2428
2429 /* Use temporarry context for validating new DSC config */
2430 struct dc_state *dsc_validate_context = dc_create_state(dc);
2431
2432 if (dsc_validate_context) {
2433 dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
2434
2435 stream->timing.dsc_cfg = *update->dsc_config;
2436 stream->timing.flags.DSC = enable_dsc;
2437 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2438 stream->timing.dsc_cfg = old_dsc_cfg;
2439 stream->timing.flags.DSC = old_dsc_enabled;
2440 update->dsc_config = NULL;
2441 }
2442
2443 dc_release_state(dsc_validate_context);
2444 } else {
2445 DC_ERROR("Failed to allocate new validate context for DSC change\n");
2446 update->dsc_config = NULL;
2447 }
2448 }
2449 }
2450
commit_planes_do_stream_update(struct dc * dc,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type update_type,struct dc_state * context)2451 static void commit_planes_do_stream_update(struct dc *dc,
2452 struct dc_stream_state *stream,
2453 struct dc_stream_update *stream_update,
2454 enum surface_update_type update_type,
2455 struct dc_state *context)
2456 {
2457 int j;
2458
2459 // Stream updates
2460 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2461 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2462
2463 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
2464
2465 if (stream_update->periodic_interrupt0 &&
2466 dc->hwss.setup_periodic_interrupt)
2467 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
2468
2469 if (stream_update->periodic_interrupt1 &&
2470 dc->hwss.setup_periodic_interrupt)
2471 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
2472
2473 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
2474 stream_update->vrr_infopacket ||
2475 stream_update->vsc_infopacket ||
2476 stream_update->vsp_infopacket) {
2477 resource_build_info_frame(pipe_ctx);
2478 dc->hwss.update_info_frame(pipe_ctx);
2479 }
2480
2481 if (stream_update->hdr_static_metadata &&
2482 stream->use_dynamic_meta &&
2483 dc->hwss.set_dmdata_attributes &&
2484 pipe_ctx->stream->dmdata_address.quad_part != 0)
2485 dc->hwss.set_dmdata_attributes(pipe_ctx);
2486
2487 if (stream_update->gamut_remap)
2488 dc_stream_set_gamut_remap(dc, stream);
2489
2490 if (stream_update->output_csc_transform)
2491 dc_stream_program_csc_matrix(dc, stream);
2492
2493 if (stream_update->dither_option) {
2494 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
2495 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
2496 &pipe_ctx->stream->bit_depth_params);
2497 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
2498 &stream->bit_depth_params,
2499 &stream->clamping);
2500 while (odm_pipe) {
2501 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
2502 &stream->bit_depth_params,
2503 &stream->clamping);
2504 odm_pipe = odm_pipe->next_odm_pipe;
2505 }
2506 }
2507
2508
2509 /* Full fe update*/
2510 if (update_type == UPDATE_TYPE_FAST)
2511 continue;
2512
2513 if (stream_update->dsc_config)
2514 dp_update_dsc_config(pipe_ctx);
2515
2516 if (stream_update->pending_test_pattern) {
2517 dc_link_dp_set_test_pattern(stream->link,
2518 stream->test_pattern.type,
2519 stream->test_pattern.color_space,
2520 stream->test_pattern.p_link_settings,
2521 stream->test_pattern.p_custom_pattern,
2522 stream->test_pattern.cust_pattern_size);
2523 }
2524
2525 if (stream_update->dpms_off) {
2526 if (*stream_update->dpms_off) {
2527 core_link_disable_stream(pipe_ctx);
2528 /* for dpms, keep acquired resources*/
2529 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
2530 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
2531
2532 dc->optimized_required = true;
2533
2534 } else {
2535 if (get_seamless_boot_stream_count(context) == 0)
2536 dc->hwss.prepare_bandwidth(dc, dc->current_state);
2537
2538 core_link_enable_stream(dc->current_state, pipe_ctx);
2539 }
2540 }
2541
2542 if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
2543 bool should_program_abm = true;
2544
2545 // if otg funcs defined check if blanked before programming
2546 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
2547 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
2548 should_program_abm = false;
2549
2550 if (should_program_abm) {
2551 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
2552 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2553 } else {
2554 pipe_ctx->stream_res.abm->funcs->set_abm_level(
2555 pipe_ctx->stream_res.abm, stream->abm_level);
2556 }
2557 }
2558 }
2559 }
2560 }
2561 }
2562
commit_planes_for_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type update_type,struct dc_state * context)2563 static void commit_planes_for_stream(struct dc *dc,
2564 struct dc_surface_update *srf_updates,
2565 int surface_count,
2566 struct dc_stream_state *stream,
2567 struct dc_stream_update *stream_update,
2568 enum surface_update_type update_type,
2569 struct dc_state *context)
2570 {
2571 int i, j;
2572 struct pipe_ctx *top_pipe_to_program = NULL;
2573
2574 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
2575 /* Optimize seamless boot flag keeps clocks and watermarks high until
2576 * first flip. After first flip, optimization is required to lower
2577 * bandwidth. Important to note that it is expected UEFI will
2578 * only light up a single display on POST, therefore we only expect
2579 * one stream with seamless boot flag set.
2580 */
2581 if (stream->apply_seamless_boot_optimization) {
2582 stream->apply_seamless_boot_optimization = false;
2583
2584 if (get_seamless_boot_stream_count(context) == 0)
2585 dc->optimized_required = true;
2586 }
2587 }
2588
2589 if (update_type == UPDATE_TYPE_FULL) {
2590 #if defined(CONFIG_DRM_AMD_DC_DCN)
2591 dc_allow_idle_optimizations(dc, false);
2592
2593 #endif
2594 if (get_seamless_boot_stream_count(context) == 0)
2595 dc->hwss.prepare_bandwidth(dc, context);
2596
2597 context_clock_trace(dc, context);
2598 }
2599
2600 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2601 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2602
2603 if (!pipe_ctx->top_pipe &&
2604 !pipe_ctx->prev_odm_pipe &&
2605 pipe_ctx->stream &&
2606 pipe_ctx->stream == stream) {
2607 top_pipe_to_program = pipe_ctx;
2608 }
2609 }
2610
2611 #ifdef CONFIG_DRM_AMD_DC_DCN
2612 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
2613 struct pipe_ctx *mpcc_pipe;
2614 struct pipe_ctx *odm_pipe;
2615
2616 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
2617 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
2618 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
2619 }
2620 #endif
2621
2622 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
2623 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
2624 if (should_use_dmub_lock(stream->link)) {
2625 union dmub_hw_lock_flags hw_locks = { 0 };
2626 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2627
2628 hw_locks.bits.lock_dig = 1;
2629 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
2630
2631 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
2632 true,
2633 &hw_locks,
2634 &inst_flags);
2635 } else
2636 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
2637 top_pipe_to_program->stream_res.tg);
2638 }
2639
2640 if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
2641 dc->hwss.interdependent_update_lock(dc, context, true);
2642 else
2643 /* Lock the top pipe while updating plane addrs, since freesync requires
2644 * plane addr update event triggers to be synchronized.
2645 * top_pipe_to_program is expected to never be NULL
2646 */
2647 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
2648
2649 // Stream updates
2650 if (stream_update)
2651 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
2652
2653 if (surface_count == 0) {
2654 /*
2655 * In case of turning off screen, no need to program front end a second time.
2656 * just return after program blank.
2657 */
2658 if (dc->hwss.apply_ctx_for_surface)
2659 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
2660 if (dc->hwss.program_front_end_for_ctx)
2661 dc->hwss.program_front_end_for_ctx(dc, context);
2662
2663 if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
2664 dc->hwss.interdependent_update_lock(dc, context, false);
2665 else
2666 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2667
2668 dc->hwss.post_unlock_program_front_end(dc, context);
2669 return;
2670 }
2671
2672 if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
2673 for (i = 0; i < surface_count; i++) {
2674 struct dc_plane_state *plane_state = srf_updates[i].surface;
2675 /*set logical flag for lock/unlock use*/
2676 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2677 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2678 if (!pipe_ctx->plane_state)
2679 continue;
2680 if (pipe_ctx->plane_state != plane_state)
2681 continue;
2682 plane_state->triplebuffer_flips = false;
2683 if (update_type == UPDATE_TYPE_FAST &&
2684 dc->hwss.program_triplebuffer != NULL &&
2685 !plane_state->flip_immediate && dc->debug.enable_tri_buf) {
2686 /*triple buffer for VUpdate only*/
2687 plane_state->triplebuffer_flips = true;
2688 }
2689 }
2690 if (update_type == UPDATE_TYPE_FULL) {
2691 /* force vsync flip when reconfiguring pipes to prevent underflow */
2692 plane_state->flip_immediate = false;
2693 }
2694 }
2695 }
2696
2697 // Update Type FULL, Surface updates
2698 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2699 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2700
2701 if (!pipe_ctx->top_pipe &&
2702 !pipe_ctx->prev_odm_pipe &&
2703 pipe_ctx->stream &&
2704 pipe_ctx->stream == stream) {
2705 struct dc_stream_status *stream_status = NULL;
2706
2707 if (!pipe_ctx->plane_state)
2708 continue;
2709
2710 /* Full fe update*/
2711 if (update_type == UPDATE_TYPE_FAST)
2712 continue;
2713
2714 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
2715
2716 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
2717 /*turn off triple buffer for full update*/
2718 dc->hwss.program_triplebuffer(
2719 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
2720 }
2721 stream_status =
2722 stream_get_status(context, pipe_ctx->stream);
2723
2724 if (dc->hwss.apply_ctx_for_surface)
2725 dc->hwss.apply_ctx_for_surface(
2726 dc, pipe_ctx->stream, stream_status->plane_count, context);
2727 }
2728 }
2729 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
2730 dc->hwss.program_front_end_for_ctx(dc, context);
2731 #ifdef CONFIG_DRM_AMD_DC_DCN
2732 if (dc->debug.validate_dml_output) {
2733 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2734 struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i];
2735 if (cur_pipe.stream == NULL)
2736 continue;
2737
2738 cur_pipe.plane_res.hubp->funcs->validate_dml_output(
2739 cur_pipe.plane_res.hubp, dc->ctx,
2740 &context->res_ctx.pipe_ctx[i].rq_regs,
2741 &context->res_ctx.pipe_ctx[i].dlg_regs,
2742 &context->res_ctx.pipe_ctx[i].ttu_regs);
2743 }
2744 }
2745 #endif
2746 }
2747
2748 // Update Type FAST, Surface updates
2749 if (update_type == UPDATE_TYPE_FAST) {
2750 if (dc->hwss.set_flip_control_gsl)
2751 for (i = 0; i < surface_count; i++) {
2752 struct dc_plane_state *plane_state = srf_updates[i].surface;
2753
2754 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2755 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2756
2757 if (pipe_ctx->stream != stream)
2758 continue;
2759
2760 if (pipe_ctx->plane_state != plane_state)
2761 continue;
2762
2763 // GSL has to be used for flip immediate
2764 dc->hwss.set_flip_control_gsl(pipe_ctx,
2765 plane_state->flip_immediate);
2766 }
2767 }
2768 /* Perform requested Updates */
2769 for (i = 0; i < surface_count; i++) {
2770 struct dc_plane_state *plane_state = srf_updates[i].surface;
2771
2772 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2773 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2774
2775 if (pipe_ctx->stream != stream)
2776 continue;
2777
2778 if (pipe_ctx->plane_state != plane_state)
2779 continue;
2780 /*program triple buffer after lock based on flip type*/
2781 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
2782 /*only enable triplebuffer for fast_update*/
2783 dc->hwss.program_triplebuffer(
2784 dc, pipe_ctx, plane_state->triplebuffer_flips);
2785 }
2786 if (srf_updates[i].flip_addr)
2787 dc->hwss.update_plane_addr(dc, pipe_ctx);
2788 }
2789 }
2790 }
2791
2792 if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
2793 dc->hwss.interdependent_update_lock(dc, context, false);
2794 else
2795 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2796
2797 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
2798 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
2799 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
2800 top_pipe_to_program->stream_res.tg,
2801 CRTC_STATE_VACTIVE);
2802 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
2803 top_pipe_to_program->stream_res.tg,
2804 CRTC_STATE_VBLANK);
2805 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
2806 top_pipe_to_program->stream_res.tg,
2807 CRTC_STATE_VACTIVE);
2808
2809 if (stream && should_use_dmub_lock(stream->link)) {
2810 union dmub_hw_lock_flags hw_locks = { 0 };
2811 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2812
2813 hw_locks.bits.lock_dig = 1;
2814 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
2815
2816 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
2817 false,
2818 &hw_locks,
2819 &inst_flags);
2820 } else
2821 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
2822 top_pipe_to_program->stream_res.tg);
2823 }
2824
2825 if (update_type != UPDATE_TYPE_FAST)
2826 dc->hwss.post_unlock_program_front_end(dc, context);
2827
2828 // Fire manual trigger only when bottom plane is flipped
2829 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2830 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2831
2832 if (!pipe_ctx->plane_state)
2833 continue;
2834
2835 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
2836 !pipe_ctx->stream || pipe_ctx->stream != stream ||
2837 !pipe_ctx->plane_state->update_flags.bits.addr_update ||
2838 pipe_ctx->plane_state->skip_manual_trigger)
2839 continue;
2840
2841 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
2842 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
2843 }
2844 }
2845
dc_commit_updates_for_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,struct dc_state * state)2846 void dc_commit_updates_for_stream(struct dc *dc,
2847 struct dc_surface_update *srf_updates,
2848 int surface_count,
2849 struct dc_stream_state *stream,
2850 struct dc_stream_update *stream_update,
2851 struct dc_state *state)
2852 {
2853 const struct dc_stream_status *stream_status;
2854 enum surface_update_type update_type;
2855 struct dc_state *context;
2856 struct dc_context *dc_ctx = dc->ctx;
2857 int i, j;
2858
2859 stream_status = dc_stream_get_status(stream);
2860 context = dc->current_state;
2861
2862 update_type = dc_check_update_surfaces_for_stream(
2863 dc, srf_updates, surface_count, stream_update, stream_status);
2864
2865 if (update_type >= update_surface_trace_level)
2866 update_surface_trace(dc, srf_updates, surface_count);
2867
2868
2869 if (update_type >= UPDATE_TYPE_FULL) {
2870
2871 /* initialize scratch memory for building context */
2872 context = dc_create_state(dc);
2873 if (context == NULL) {
2874 DC_ERROR("Failed to allocate new validate context!\n");
2875 return;
2876 }
2877
2878 dc_resource_state_copy_construct(state, context);
2879
2880 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2881 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
2882 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2883
2884 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
2885 new_pipe->plane_state->force_full_update = true;
2886 }
2887 }
2888
2889
2890 for (i = 0; i < surface_count; i++) {
2891 struct dc_plane_state *surface = srf_updates[i].surface;
2892
2893 copy_surface_update_to_plane(surface, &srf_updates[i]);
2894
2895 if (update_type >= UPDATE_TYPE_MED) {
2896 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2897 struct pipe_ctx *pipe_ctx =
2898 &context->res_ctx.pipe_ctx[j];
2899
2900 if (pipe_ctx->plane_state != surface)
2901 continue;
2902
2903 resource_build_scaling_params(pipe_ctx);
2904 }
2905 }
2906 }
2907
2908 copy_stream_update_to_stream(dc, context, stream, stream_update);
2909
2910 if (update_type >= UPDATE_TYPE_FULL) {
2911 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
2912 DC_ERROR("Mode validation failed for stream update!\n");
2913 dc_release_state(context);
2914 return;
2915 }
2916 }
2917
2918 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
2919
2920 commit_planes_for_stream(
2921 dc,
2922 srf_updates,
2923 surface_count,
2924 stream,
2925 stream_update,
2926 update_type,
2927 context);
2928 /*update current_State*/
2929 if (dc->current_state != context) {
2930
2931 struct dc_state *old = dc->current_state;
2932
2933 dc->current_state = context;
2934 dc_release_state(old);
2935
2936 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2937 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2938
2939 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
2940 pipe_ctx->plane_state->force_full_update = false;
2941 }
2942 }
2943 /*let's use current_state to update watermark etc*/
2944 if (update_type >= UPDATE_TYPE_FULL) {
2945 dc_post_update_surfaces_to_stream(dc);
2946
2947 if (dc_ctx->dce_version >= DCE_VERSION_MAX)
2948 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2949 else
2950 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2951 }
2952
2953 return;
2954
2955 }
2956
dc_get_current_stream_count(struct dc * dc)2957 uint8_t dc_get_current_stream_count(struct dc *dc)
2958 {
2959 return dc->current_state->stream_count;
2960 }
2961
dc_get_stream_at_index(struct dc * dc,uint8_t i)2962 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
2963 {
2964 if (i < dc->current_state->stream_count)
2965 return dc->current_state->streams[i];
2966 return NULL;
2967 }
2968
dc_stream_find_from_link(const struct dc_link * link)2969 struct dc_stream_state *dc_stream_find_from_link(const struct dc_link *link)
2970 {
2971 uint8_t i;
2972 struct dc_context *ctx = link->ctx;
2973
2974 for (i = 0; i < ctx->dc->current_state->stream_count; i++) {
2975 if (ctx->dc->current_state->streams[i]->link == link)
2976 return ctx->dc->current_state->streams[i];
2977 }
2978
2979 return NULL;
2980 }
2981
dc_interrupt_to_irq_source(struct dc * dc,uint32_t src_id,uint32_t ext_id)2982 enum dc_irq_source dc_interrupt_to_irq_source(
2983 struct dc *dc,
2984 uint32_t src_id,
2985 uint32_t ext_id)
2986 {
2987 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
2988 }
2989
2990 /*
2991 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
2992 */
dc_interrupt_set(struct dc * dc,enum dc_irq_source src,bool enable)2993 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
2994 {
2995
2996 if (dc == NULL)
2997 return false;
2998
2999 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
3000 }
3001
dc_interrupt_ack(struct dc * dc,enum dc_irq_source src)3002 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
3003 {
3004 dal_irq_service_ack(dc->res_pool->irqs, src);
3005 }
3006
dc_power_down_on_boot(struct dc * dc)3007 void dc_power_down_on_boot(struct dc *dc)
3008 {
3009 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
3010 dc->hwss.power_down_on_boot)
3011 dc->hwss.power_down_on_boot(dc);
3012 }
3013
dc_set_power_state(struct dc * dc,enum dc_acpi_cm_power_state power_state)3014 void dc_set_power_state(
3015 struct dc *dc,
3016 enum dc_acpi_cm_power_state power_state)
3017 {
3018 struct kref refcount;
3019 struct display_mode_lib *dml;
3020
3021 if (!dc->current_state)
3022 return;
3023
3024 switch (power_state) {
3025 case DC_ACPI_CM_POWER_STATE_D0:
3026 dc_resource_state_construct(dc, dc->current_state);
3027
3028 if (dc->ctx->dmub_srv)
3029 dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
3030
3031 dc->hwss.init_hw(dc);
3032
3033 if (dc->hwss.init_sys_ctx != NULL &&
3034 dc->vm_pa_config.valid) {
3035 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
3036 }
3037
3038 break;
3039 default:
3040 ASSERT(dc->current_state->stream_count == 0);
3041 /* Zero out the current context so that on resume we start with
3042 * clean state, and dc hw programming optimizations will not
3043 * cause any trouble.
3044 */
3045 dml = kzalloc(sizeof(struct display_mode_lib),
3046 GFP_KERNEL);
3047
3048 ASSERT(dml);
3049 if (!dml)
3050 return;
3051
3052 /* Preserve refcount */
3053 refcount = dc->current_state->refcount;
3054 /* Preserve display mode lib */
3055 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
3056
3057 dc_resource_state_destruct(dc->current_state);
3058 memset(dc->current_state, 0,
3059 sizeof(*dc->current_state));
3060
3061 dc->current_state->refcount = refcount;
3062 dc->current_state->bw_ctx.dml = *dml;
3063
3064 kfree(dml);
3065
3066 break;
3067 }
3068 }
3069
dc_resume(struct dc * dc)3070 void dc_resume(struct dc *dc)
3071 {
3072 uint32_t i;
3073
3074 for (i = 0; i < dc->link_count; i++)
3075 core_link_resume(dc->links[i]);
3076 }
3077
dc_is_dmcu_initialized(struct dc * dc)3078 bool dc_is_dmcu_initialized(struct dc *dc)
3079 {
3080 struct dmcu *dmcu = dc->res_pool->dmcu;
3081
3082 if (dmcu)
3083 return dmcu->funcs->is_dmcu_initialized(dmcu);
3084 return false;
3085 }
3086
dc_submit_i2c(struct dc * dc,uint32_t link_index,struct i2c_command * cmd)3087 bool dc_submit_i2c(
3088 struct dc *dc,
3089 uint32_t link_index,
3090 struct i2c_command *cmd)
3091 {
3092
3093 struct dc_link *link = dc->links[link_index];
3094 struct ddc_service *ddc = link->ddc;
3095 return dce_i2c_submit_command(
3096 dc->res_pool,
3097 ddc->ddc_pin,
3098 cmd);
3099 }
3100
dc_submit_i2c_oem(struct dc * dc,struct i2c_command * cmd)3101 bool dc_submit_i2c_oem(
3102 struct dc *dc,
3103 struct i2c_command *cmd)
3104 {
3105 struct ddc_service *ddc = dc->res_pool->oem_device;
3106 return dce_i2c_submit_command(
3107 dc->res_pool,
3108 ddc->ddc_pin,
3109 cmd);
3110 }
3111
link_add_remote_sink_helper(struct dc_link * dc_link,struct dc_sink * sink)3112 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
3113 {
3114 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
3115 BREAK_TO_DEBUGGER();
3116 return false;
3117 }
3118
3119 dc_sink_retain(sink);
3120
3121 dc_link->remote_sinks[dc_link->sink_count] = sink;
3122 dc_link->sink_count++;
3123
3124 return true;
3125 }
3126
3127 /*
3128 * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
3129 *
3130 * EDID length is in bytes
3131 */
dc_link_add_remote_sink(struct dc_link * link,const uint8_t * edid,int len,struct dc_sink_init_data * init_data)3132 struct dc_sink *dc_link_add_remote_sink(
3133 struct dc_link *link,
3134 const uint8_t *edid,
3135 int len,
3136 struct dc_sink_init_data *init_data)
3137 {
3138 struct dc_sink *dc_sink;
3139 enum dc_edid_status edid_status;
3140
3141 if (len > DC_MAX_EDID_BUFFER_SIZE) {
3142 dm_error("Max EDID buffer size breached!\n");
3143 return NULL;
3144 }
3145
3146 if (!init_data) {
3147 BREAK_TO_DEBUGGER();
3148 return NULL;
3149 }
3150
3151 if (!init_data->link) {
3152 BREAK_TO_DEBUGGER();
3153 return NULL;
3154 }
3155
3156 dc_sink = dc_sink_create(init_data);
3157
3158 if (!dc_sink)
3159 return NULL;
3160
3161 memmove(dc_sink->dc_edid.raw_edid, edid, len);
3162 dc_sink->dc_edid.length = len;
3163
3164 if (!link_add_remote_sink_helper(
3165 link,
3166 dc_sink))
3167 goto fail_add_sink;
3168
3169 edid_status = dm_helpers_parse_edid_caps(
3170 link->ctx,
3171 &dc_sink->dc_edid,
3172 &dc_sink->edid_caps);
3173
3174 /*
3175 * Treat device as no EDID device if EDID
3176 * parsing fails
3177 */
3178 if (edid_status != EDID_OK) {
3179 dc_sink->dc_edid.length = 0;
3180 dm_error("Bad EDID, status%d!\n", edid_status);
3181 }
3182
3183 return dc_sink;
3184
3185 fail_add_sink:
3186 dc_sink_release(dc_sink);
3187 return NULL;
3188 }
3189
3190 /*
3191 * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
3192 *
3193 * Note that this just removes the struct dc_sink - it doesn't
3194 * program hardware or alter other members of dc_link
3195 */
dc_link_remove_remote_sink(struct dc_link * link,struct dc_sink * sink)3196 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
3197 {
3198 int i;
3199
3200 if (!link->sink_count) {
3201 BREAK_TO_DEBUGGER();
3202 return;
3203 }
3204
3205 for (i = 0; i < link->sink_count; i++) {
3206 if (link->remote_sinks[i] == sink) {
3207 dc_sink_release(sink);
3208 link->remote_sinks[i] = NULL;
3209
3210 /* shrink array to remove empty place */
3211 while (i < link->sink_count - 1) {
3212 link->remote_sinks[i] = link->remote_sinks[i+1];
3213 i++;
3214 }
3215 link->remote_sinks[i] = NULL;
3216 link->sink_count--;
3217 return;
3218 }
3219 }
3220 }
3221
dc_wait_for_vblank(struct dc * dc,struct dc_stream_state * stream)3222 void dc_wait_for_vblank(struct dc *dc, struct dc_stream_state *stream)
3223 {
3224 int i;
3225
3226 for (i = 0; i < dc->res_pool->pipe_count; i++)
3227 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
3228 struct timing_generator *tg =
3229 dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
3230 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
3231 break;
3232 }
3233 }
3234
get_clock_requirements_for_state(struct dc_state * state,struct AsicStateEx * info)3235 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
3236 {
3237 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
3238 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
3239 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
3240 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
3241 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
3242 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
3243 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
3244 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
3245 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
3246 }
dc_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)3247 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
3248 {
3249 if (dc->hwss.set_clock)
3250 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
3251 return DC_ERROR_UNEXPECTED;
3252 }
dc_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)3253 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
3254 {
3255 if (dc->hwss.get_clock)
3256 dc->hwss.get_clock(dc, clock_type, clock_cfg);
3257 }
3258
3259 /* enable/disable eDP PSR without specify stream for eDP */
dc_set_psr_allow_active(struct dc * dc,bool enable)3260 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
3261 {
3262 int i;
3263
3264 for (i = 0; i < dc->current_state->stream_count ; i++) {
3265 struct dc_link *link;
3266 struct dc_stream_state *stream = dc->current_state->streams[i];
3267
3268 link = stream->link;
3269 if (!link)
3270 continue;
3271
3272 if (link->psr_settings.psr_feature_enabled) {
3273 if (enable && !link->psr_settings.psr_allow_active)
3274 return dc_link_set_psr_allow_active(link, true, false, false);
3275 else if (!enable && link->psr_settings.psr_allow_active)
3276 return dc_link_set_psr_allow_active(link, false, true, false);
3277 }
3278 }
3279
3280 return true;
3281 }
3282
3283 #if defined(CONFIG_DRM_AMD_DC_DCN)
3284
dc_allow_idle_optimizations(struct dc * dc,bool allow)3285 void dc_allow_idle_optimizations(struct dc *dc, bool allow)
3286 {
3287 if (dc->debug.disable_idle_power_optimizations)
3288 return;
3289
3290 if (dc->clk_mgr->funcs->is_smu_present)
3291 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
3292 return;
3293
3294 if (allow == dc->idle_optimizations_allowed)
3295 return;
3296
3297 if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
3298 dc->idle_optimizations_allowed = allow;
3299 }
3300
3301 /*
3302 * blank all streams, and set min and max memory clock to
3303 * lowest and highest DPM level, respectively
3304 */
dc_unlock_memory_clock_frequency(struct dc * dc)3305 void dc_unlock_memory_clock_frequency(struct dc *dc)
3306 {
3307 unsigned int i;
3308
3309 for (i = 0; i < MAX_PIPES; i++)
3310 if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
3311 core_link_disable_stream(&dc->current_state->res_ctx.pipe_ctx[i]);
3312
3313 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
3314 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
3315 }
3316
3317 /*
3318 * set min memory clock to the min required for current mode,
3319 * max to maxDPM, and unblank streams
3320 */
dc_lock_memory_clock_frequency(struct dc * dc)3321 void dc_lock_memory_clock_frequency(struct dc *dc)
3322 {
3323 unsigned int i;
3324
3325 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
3326 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
3327 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
3328
3329 for (i = 0; i < MAX_PIPES; i++)
3330 if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
3331 core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
3332 }
3333
dc_is_plane_eligible_for_idle_optimizations(struct dc * dc,struct dc_plane_state * plane,struct dc_cursor_attributes * cursor_attr)3334 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
3335 struct dc_cursor_attributes *cursor_attr)
3336 {
3337 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
3338 return true;
3339 return false;
3340 }
3341
3342 /* cleanup on driver unload */
dc_hardware_release(struct dc * dc)3343 void dc_hardware_release(struct dc *dc)
3344 {
3345 if (dc->hwss.hardware_release)
3346 dc->hwss.hardware_release(dc);
3347 }
3348 #endif
3349
3350 /**
3351 *****************************************************************************
3352 * Function: dc_enable_dmub_notifications
3353 *
3354 * @brief
3355 * Returns whether dmub notification can be enabled
3356 *
3357 * @param
3358 * [in] dc: dc structure
3359 *
3360 * @return
3361 * True to enable dmub notifications, False otherwise
3362 *****************************************************************************
3363 */
dc_enable_dmub_notifications(struct dc * dc)3364 bool dc_enable_dmub_notifications(struct dc *dc)
3365 {
3366 /* dmub aux needs dmub notifications to be enabled */
3367 return dc->debug.enable_dmub_aux_for_legacy_ddc;
3368 }
3369
3370 /**
3371 *****************************************************************************
3372 * Function: dc_process_dmub_aux_transfer_async
3373 *
3374 * @brief
3375 * Submits aux command to dmub via inbox message
3376 * Sets port index appropriately for legacy DDC
3377 *
3378 * @param
3379 * [in] dc: dc structure
3380 * [in] link_index: link index
3381 * [in] payload: aux payload
3382 *
3383 * @return
3384 * True if successful, False if failure
3385 *****************************************************************************
3386 */
dc_process_dmub_aux_transfer_async(struct dc * dc,uint32_t link_index,struct aux_payload * payload)3387 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
3388 uint32_t link_index,
3389 struct aux_payload *payload)
3390 {
3391 uint8_t action;
3392 union dmub_rb_cmd cmd = {0};
3393 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
3394
3395 ASSERT(payload->length <= 16);
3396
3397 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
3398 cmd.dp_aux_access.header.payload_bytes = 0;
3399 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
3400 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
3401 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
3402 cmd.dp_aux_access.aux_control.timeout = 0;
3403 cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
3404 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
3405 cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
3406
3407 /* set aux action */
3408 if (payload->i2c_over_aux) {
3409 if (payload->write) {
3410 if (payload->mot)
3411 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
3412 else
3413 action = DP_AUX_REQ_ACTION_I2C_WRITE;
3414 } else {
3415 if (payload->mot)
3416 action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
3417 else
3418 action = DP_AUX_REQ_ACTION_I2C_READ;
3419 }
3420 } else {
3421 if (payload->write)
3422 action = DP_AUX_REQ_ACTION_DPCD_WRITE;
3423 else
3424 action = DP_AUX_REQ_ACTION_DPCD_READ;
3425 }
3426
3427 cmd.dp_aux_access.aux_control.dpaux.action = action;
3428
3429 if (payload->length && payload->write) {
3430 memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
3431 payload->data,
3432 payload->length
3433 );
3434 }
3435
3436 dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
3437 dc_dmub_srv_cmd_execute(dmub_srv);
3438 dc_dmub_srv_wait_idle(dmub_srv);
3439
3440 return true;
3441 }
3442
3443 /**
3444 *****************************************************************************
3445 * Function: dc_disable_accelerated_mode
3446 *
3447 * @brief
3448 * disable accelerated mode
3449 *
3450 * @param
3451 * [in] dc: dc structure
3452 *
3453 *****************************************************************************
3454 */
dc_disable_accelerated_mode(struct dc * dc)3455 void dc_disable_accelerated_mode(struct dc *dc)
3456 {
3457 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
3458 }
3459