1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 */ 24 25 #include "dm_services.h" 26 27 #include "amdgpu.h" 28 29 #include "dc.h" 30 31 #include "core_status.h" 32 #include "core_types.h" 33 #include "hw_sequencer.h" 34 #include "dce/dce_hwseq.h" 35 36 #include "resource.h" 37 #include "dc_state.h" 38 #include "dc_state_priv.h" 39 40 #include "gpio_service_interface.h" 41 #include "clk_mgr.h" 42 #include "clock_source.h" 43 #include "dc_bios_types.h" 44 45 #include "bios_parser_interface.h" 46 #include "bios/bios_parser_helper.h" 47 #include "include/irq_service_interface.h" 48 #include "transform.h" 49 #include "dmcu.h" 50 #include "dpp.h" 51 #include "timing_generator.h" 52 #include "abm.h" 53 #include "virtual/virtual_link_encoder.h" 54 #include "hubp.h" 55 56 #include "link_hwss.h" 57 #include "link_encoder.h" 58 #include "link_enc_cfg.h" 59 60 #include "link.h" 61 #include "dm_helpers.h" 62 #include "mem_input.h" 63 64 #include "dc_dmub_srv.h" 65 66 #include "dsc.h" 67 68 #include "vm_helper.h" 69 70 #include "dce/dce_i2c.h" 71 72 #include "dmub/dmub_srv.h" 73 74 #include "dce/dmub_psr.h" 75 76 #include "dce/dmub_hw_lock_mgr.h" 77 78 #include "dc_trace.h" 79 80 #include "hw_sequencer_private.h" 81 82 #include "dml2/dml2_internal_types.h" 83 84 #include "dce/dmub_outbox.h" 85 86 #define CTX \ 87 dc->ctx 88 89 #define DC_LOGGER \ 90 dc->ctx->logger 91 92 static const char DC_BUILD_ID[] = "production-build"; 93 94 /** 95 * DOC: Overview 96 * 97 * DC is the OS-agnostic component of the amdgpu DC driver. 98 * 99 * DC maintains and validates a set of structs representing the state of the 100 * driver and writes that state to AMD hardware 101 * 102 * Main DC HW structs: 103 * 104 * struct dc - The central struct. One per driver. Created on driver load, 105 * destroyed on driver unload. 106 * 107 * struct dc_context - One per driver. 108 * Used as a backpointer by most other structs in dc. 109 * 110 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP 111 * plugpoints). Created on driver load, destroyed on driver unload. 112 * 113 * struct dc_sink - One per display. Created on boot or hotplug. 114 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink 115 * (the display directly attached). It may also have one or more remote 116 * sinks (in the Multi-Stream Transport case) 117 * 118 * struct resource_pool - One per driver. Represents the hw blocks not in the 119 * main pipeline. Not directly accessible by dm. 120 * 121 * Main dc state structs: 122 * 123 * These structs can be created and destroyed as needed. There is a full set of 124 * these structs in dc->current_state representing the currently programmed state. 125 * 126 * struct dc_state - The global DC state to track global state information, 127 * such as bandwidth values. 128 * 129 * struct dc_stream_state - Represents the hw configuration for the pipeline from 130 * a framebuffer to a display. Maps one-to-one with dc_sink. 131 * 132 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one, 133 * and may have more in the Multi-Plane Overlay case. 134 * 135 * struct resource_context - Represents the programmable state of everything in 136 * the resource_pool. Not directly accessible by dm. 137 * 138 * struct pipe_ctx - A member of struct resource_context. Represents the 139 * internal hardware pipeline components. Each dc_plane_state has either 140 * one or two (in the pipe-split case). 141 */ 142 143 /* Private functions */ 144 145 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new) 146 { 147 if (new > *original) 148 *original = new; 149 } 150 151 static void destroy_links(struct dc *dc) 152 { 153 uint32_t i; 154 155 for (i = 0; i < dc->link_count; i++) { 156 if (NULL != dc->links[i]) 157 dc->link_srv->destroy_link(&dc->links[i]); 158 } 159 } 160 161 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links) 162 { 163 int i; 164 uint32_t count = 0; 165 166 for (i = 0; i < num_links; i++) { 167 if (links[i]->connector_signal == SIGNAL_TYPE_EDP || 168 links[i]->is_internal_display) 169 count++; 170 } 171 172 return count; 173 } 174 175 static int get_seamless_boot_stream_count(struct dc_state *ctx) 176 { 177 uint8_t i; 178 uint8_t seamless_boot_stream_count = 0; 179 180 for (i = 0; i < ctx->stream_count; i++) 181 if (ctx->streams[i]->apply_seamless_boot_optimization) 182 seamless_boot_stream_count++; 183 184 return seamless_boot_stream_count; 185 } 186 187 static bool create_links( 188 struct dc *dc, 189 uint32_t num_virtual_links) 190 { 191 int i; 192 int connectors_num; 193 struct dc_bios *bios = dc->ctx->dc_bios; 194 195 dc->link_count = 0; 196 197 connectors_num = bios->funcs->get_connectors_number(bios); 198 199 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num); 200 201 if (connectors_num > ENUM_ID_COUNT) { 202 dm_error( 203 "DC: Number of connectors %d exceeds maximum of %d!\n", 204 connectors_num, 205 ENUM_ID_COUNT); 206 return false; 207 } 208 209 dm_output_to_console( 210 "DC: %s: connectors_num: physical:%d, virtual:%d\n", 211 __func__, 212 connectors_num, 213 num_virtual_links); 214 215 for (i = 0; i < connectors_num; i++) { 216 struct link_init_data link_init_params = {0}; 217 struct dc_link *link; 218 219 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count); 220 221 link_init_params.ctx = dc->ctx; 222 /* next BIOS object table connector */ 223 link_init_params.connector_index = i; 224 link_init_params.link_index = dc->link_count; 225 link_init_params.dc = dc; 226 link = dc->link_srv->create_link(&link_init_params); 227 228 if (link) { 229 dc->links[dc->link_count] = link; 230 link->dc = dc; 231 ++dc->link_count; 232 } 233 } 234 235 DC_LOG_DC("BIOS object table - end"); 236 237 /* Create a link for each usb4 dpia port */ 238 for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) { 239 struct link_init_data link_init_params = {0}; 240 struct dc_link *link; 241 242 link_init_params.ctx = dc->ctx; 243 link_init_params.connector_index = i; 244 link_init_params.link_index = dc->link_count; 245 link_init_params.dc = dc; 246 link_init_params.is_dpia_link = true; 247 248 link = dc->link_srv->create_link(&link_init_params); 249 if (link) { 250 dc->links[dc->link_count] = link; 251 link->dc = dc; 252 ++dc->link_count; 253 } 254 } 255 256 for (i = 0; i < num_virtual_links; i++) { 257 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL); 258 struct encoder_init_data enc_init = {0}; 259 260 if (link == NULL) { 261 BREAK_TO_DEBUGGER(); 262 goto failed_alloc; 263 } 264 265 link->link_index = dc->link_count; 266 dc->links[dc->link_count] = link; 267 dc->link_count++; 268 269 link->ctx = dc->ctx; 270 link->dc = dc; 271 link->connector_signal = SIGNAL_TYPE_VIRTUAL; 272 link->link_id.type = OBJECT_TYPE_CONNECTOR; 273 link->link_id.id = CONNECTOR_ID_VIRTUAL; 274 link->link_id.enum_id = ENUM_ID_1; 275 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); 276 277 if (!link->link_enc) { 278 BREAK_TO_DEBUGGER(); 279 goto failed_alloc; 280 } 281 282 link->link_status.dpcd_caps = &link->dpcd_caps; 283 284 enc_init.ctx = dc->ctx; 285 enc_init.channel = CHANNEL_ID_UNKNOWN; 286 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN; 287 enc_init.transmitter = TRANSMITTER_UNKNOWN; 288 enc_init.connector = link->link_id; 289 enc_init.encoder.type = OBJECT_TYPE_ENCODER; 290 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL; 291 enc_init.encoder.enum_id = ENUM_ID_1; 292 virtual_link_encoder_construct(link->link_enc, &enc_init); 293 } 294 295 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count); 296 297 return true; 298 299 failed_alloc: 300 return false; 301 } 302 303 /* Create additional DIG link encoder objects if fewer than the platform 304 * supports were created during link construction. This can happen if the 305 * number of physical connectors is less than the number of DIGs. 306 */ 307 static bool create_link_encoders(struct dc *dc) 308 { 309 bool res = true; 310 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 311 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 312 int i; 313 314 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 315 * link encoders and physical display endpoints and does not require 316 * additional link encoder objects. 317 */ 318 if (num_usb4_dpia == 0) 319 return res; 320 321 /* Create as many link encoder objects as the platform supports. DPIA 322 * endpoints can be programmably mapped to any DIG. 323 */ 324 if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) { 325 for (i = 0; i < num_dig_link_enc; i++) { 326 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 327 328 if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) { 329 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx, 330 (enum engine_id)(ENGINE_ID_DIGA + i)); 331 if (link_enc) { 332 dc->res_pool->link_encoders[i] = link_enc; 333 dc->res_pool->dig_link_enc_count++; 334 } else { 335 res = false; 336 } 337 } 338 } 339 } 340 341 return res; 342 } 343 344 /* Destroy any additional DIG link encoder objects created by 345 * create_link_encoders(). 346 * NB: Must only be called after destroy_links(). 347 */ 348 static void destroy_link_encoders(struct dc *dc) 349 { 350 unsigned int num_usb4_dpia; 351 unsigned int num_dig_link_enc; 352 int i; 353 354 if (!dc->res_pool) 355 return; 356 357 num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 358 num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 359 360 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 361 * link encoders and physical display endpoints and does not require 362 * additional link encoder objects. 363 */ 364 if (num_usb4_dpia == 0) 365 return; 366 367 for (i = 0; i < num_dig_link_enc; i++) { 368 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 369 370 if (link_enc) { 371 link_enc->funcs->destroy(&link_enc); 372 dc->res_pool->link_encoders[i] = NULL; 373 dc->res_pool->dig_link_enc_count--; 374 } 375 } 376 } 377 378 static struct dc_perf_trace *dc_perf_trace_create(void) 379 { 380 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL); 381 } 382 383 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) 384 { 385 kfree(*perf_trace); 386 *perf_trace = NULL; 387 } 388 389 /** 390 * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR 391 * @dc: dc reference 392 * @stream: Initial dc stream state 393 * @adjust: Updated parameters for vertical_total_min and vertical_total_max 394 * 395 * Looks up the pipe context of dc_stream_state and updates the 396 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh 397 * Rate, which is a power-saving feature that targets reducing panel 398 * refresh rate while the screen is static 399 * 400 * Return: %true if the pipe context is found and adjusted; 401 * %false if the pipe context is not found. 402 */ 403 bool dc_stream_adjust_vmin_vmax(struct dc *dc, 404 struct dc_stream_state *stream, 405 struct dc_crtc_timing_adjust *adjust) 406 { 407 int i; 408 409 /* 410 * Don't adjust DRR while there's bandwidth optimizations pending to 411 * avoid conflicting with firmware updates. 412 */ 413 if (dc->ctx->dce_version > DCE_VERSION_MAX) 414 if (dc->optimized_required || dc->wm_optimized_required) 415 return false; 416 417 stream->adjust.v_total_max = adjust->v_total_max; 418 stream->adjust.v_total_mid = adjust->v_total_mid; 419 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; 420 stream->adjust.v_total_min = adjust->v_total_min; 421 422 for (i = 0; i < MAX_PIPES; i++) { 423 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 424 425 if (pipe->stream == stream && pipe->stream_res.tg) { 426 dc->hwss.set_drr(&pipe, 427 1, 428 *adjust); 429 430 return true; 431 } 432 } 433 return false; 434 } 435 436 /** 437 * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of 438 * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate) 439 * 440 * @dc: [in] dc reference 441 * @stream: [in] Initial dc stream state 442 * @refresh_rate: [in] new refresh_rate 443 * 444 * Return: %true if the pipe context is found and there is an associated 445 * timing_generator for the DC; 446 * %false if the pipe context is not found or there is no 447 * timing_generator for the DC. 448 */ 449 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, 450 struct dc_stream_state *stream, 451 uint32_t *refresh_rate) 452 { 453 bool status = false; 454 455 int i = 0; 456 457 for (i = 0; i < MAX_PIPES; i++) { 458 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 459 460 if (pipe->stream == stream && pipe->stream_res.tg) { 461 /* Only execute if a function pointer has been defined for 462 * the DC version in question 463 */ 464 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) { 465 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate); 466 467 status = true; 468 469 break; 470 } 471 } 472 } 473 474 return status; 475 } 476 477 bool dc_stream_get_crtc_position(struct dc *dc, 478 struct dc_stream_state **streams, int num_streams, 479 unsigned int *v_pos, unsigned int *nom_v_pos) 480 { 481 /* TODO: Support multiple streams */ 482 const struct dc_stream_state *stream = streams[0]; 483 int i; 484 bool ret = false; 485 struct crtc_position position; 486 487 for (i = 0; i < MAX_PIPES; i++) { 488 struct pipe_ctx *pipe = 489 &dc->current_state->res_ctx.pipe_ctx[i]; 490 491 if (pipe->stream == stream && pipe->stream_res.stream_enc) { 492 dc->hwss.get_position(&pipe, 1, &position); 493 494 *v_pos = position.vertical_count; 495 *nom_v_pos = position.nominal_vcount; 496 ret = true; 497 } 498 } 499 return ret; 500 } 501 502 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 503 static inline void 504 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv, 505 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) 506 { 507 union dmub_rb_cmd cmd = {0}; 508 509 cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num; 510 cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num; 511 512 if (is_stop) { 513 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 514 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE; 515 } else { 516 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 517 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY; 518 cmd.secure_display.roi_info.x_start = rect->x; 519 cmd.secure_display.roi_info.y_start = rect->y; 520 cmd.secure_display.roi_info.x_end = rect->x + rect->width; 521 cmd.secure_display.roi_info.y_end = rect->y + rect->height; 522 } 523 524 dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 525 } 526 527 static inline void 528 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu, 529 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) 530 { 531 if (is_stop) 532 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping); 533 else 534 dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping); 535 } 536 537 bool 538 dc_stream_forward_crc_window(struct dc_stream_state *stream, 539 struct rect *rect, bool is_stop) 540 { 541 struct dmcu *dmcu; 542 struct dc_dmub_srv *dmub_srv; 543 struct otg_phy_mux mux_mapping; 544 struct pipe_ctx *pipe; 545 int i; 546 struct dc *dc = stream->ctx->dc; 547 548 for (i = 0; i < MAX_PIPES; i++) { 549 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 550 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 551 break; 552 } 553 554 /* Stream not found */ 555 if (i == MAX_PIPES) 556 return false; 557 558 mux_mapping.phy_output_num = stream->link->link_enc_hw_inst; 559 mux_mapping.otg_output_num = pipe->stream_res.tg->inst; 560 561 dmcu = dc->res_pool->dmcu; 562 dmub_srv = dc->ctx->dmub_srv; 563 564 /* forward to dmub */ 565 if (dmub_srv) 566 dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop); 567 /* forward to dmcu */ 568 else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) 569 dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop); 570 else 571 return false; 572 573 return true; 574 } 575 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ 576 577 /** 578 * dc_stream_configure_crc() - Configure CRC capture for the given stream. 579 * @dc: DC Object 580 * @stream: The stream to configure CRC on. 581 * @enable: Enable CRC if true, disable otherwise. 582 * @crc_window: CRC window (x/y start/end) information 583 * @continuous: Capture CRC on every frame if true. Otherwise, only capture 584 * once. 585 * 586 * By default, only CRC0 is configured, and the entire frame is used to 587 * calculate the CRC. 588 * 589 * Return: %false if the stream is not found or CRC capture is not supported; 590 * %true if the stream has been configured. 591 */ 592 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, 593 struct crc_params *crc_window, bool enable, bool continuous) 594 { 595 struct pipe_ctx *pipe; 596 struct crc_params param; 597 struct timing_generator *tg; 598 599 pipe = resource_get_otg_master_for_stream( 600 &dc->current_state->res_ctx, stream); 601 602 /* Stream not found */ 603 if (pipe == NULL) 604 return false; 605 606 /* By default, capture the full frame */ 607 param.windowa_x_start = 0; 608 param.windowa_y_start = 0; 609 param.windowa_x_end = pipe->stream->timing.h_addressable; 610 param.windowa_y_end = pipe->stream->timing.v_addressable; 611 param.windowb_x_start = 0; 612 param.windowb_y_start = 0; 613 param.windowb_x_end = pipe->stream->timing.h_addressable; 614 param.windowb_y_end = pipe->stream->timing.v_addressable; 615 616 if (crc_window) { 617 param.windowa_x_start = crc_window->windowa_x_start; 618 param.windowa_y_start = crc_window->windowa_y_start; 619 param.windowa_x_end = crc_window->windowa_x_end; 620 param.windowa_y_end = crc_window->windowa_y_end; 621 param.windowb_x_start = crc_window->windowb_x_start; 622 param.windowb_y_start = crc_window->windowb_y_start; 623 param.windowb_x_end = crc_window->windowb_x_end; 624 param.windowb_y_end = crc_window->windowb_y_end; 625 } 626 627 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0; 628 param.odm_mode = pipe->next_odm_pipe ? 1:0; 629 630 /* Default to the union of both windows */ 631 param.selection = UNION_WINDOW_A_B; 632 param.continuous_mode = continuous; 633 param.enable = enable; 634 635 tg = pipe->stream_res.tg; 636 637 /* Only call if supported */ 638 if (tg->funcs->configure_crc) 639 return tg->funcs->configure_crc(tg, ¶m); 640 DC_LOG_WARNING("CRC capture not supported."); 641 return false; 642 } 643 644 /** 645 * dc_stream_get_crc() - Get CRC values for the given stream. 646 * 647 * @dc: DC object. 648 * @stream: The DC stream state of the stream to get CRCs from. 649 * @r_cr: CRC value for the red component. 650 * @g_y: CRC value for the green component. 651 * @b_cb: CRC value for the blue component. 652 * 653 * dc_stream_configure_crc needs to be called beforehand to enable CRCs. 654 * 655 * Return: 656 * %false if stream is not found, or if CRCs are not enabled. 657 */ 658 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, 659 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) 660 { 661 int i; 662 struct pipe_ctx *pipe; 663 struct timing_generator *tg; 664 665 for (i = 0; i < MAX_PIPES; i++) { 666 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 667 if (pipe->stream == stream) 668 break; 669 } 670 /* Stream not found */ 671 if (i == MAX_PIPES) 672 return false; 673 674 tg = pipe->stream_res.tg; 675 676 if (tg->funcs->get_crc) 677 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb); 678 DC_LOG_WARNING("CRC capture not supported."); 679 return false; 680 } 681 682 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, 683 enum dc_dynamic_expansion option) 684 { 685 /* OPP FMT dyn expansion updates*/ 686 int i; 687 struct pipe_ctx *pipe_ctx; 688 689 for (i = 0; i < MAX_PIPES; i++) { 690 if (dc->current_state->res_ctx.pipe_ctx[i].stream 691 == stream) { 692 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 693 pipe_ctx->stream_res.opp->dyn_expansion = option; 694 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( 695 pipe_ctx->stream_res.opp, 696 COLOR_SPACE_YCBCR601, 697 stream->timing.display_color_depth, 698 stream->signal); 699 } 700 } 701 } 702 703 void dc_stream_set_dither_option(struct dc_stream_state *stream, 704 enum dc_dither_option option) 705 { 706 struct bit_depth_reduction_params params; 707 struct dc_link *link = stream->link; 708 struct pipe_ctx *pipes = NULL; 709 int i; 710 711 for (i = 0; i < MAX_PIPES; i++) { 712 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream == 713 stream) { 714 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i]; 715 break; 716 } 717 } 718 719 if (!pipes) 720 return; 721 if (option > DITHER_OPTION_MAX) 722 return; 723 724 stream->dither_option = option; 725 726 memset(¶ms, 0, sizeof(params)); 727 resource_build_bit_depth_reduction_params(stream, ¶ms); 728 stream->bit_depth_params = params; 729 730 if (pipes->plane_res.xfm && 731 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) { 732 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth( 733 pipes->plane_res.xfm, 734 pipes->plane_res.scl_data.lb_params.depth, 735 &stream->bit_depth_params); 736 } 737 738 pipes->stream_res.opp->funcs-> 739 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms); 740 } 741 742 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream) 743 { 744 int i; 745 bool ret = false; 746 struct pipe_ctx *pipes; 747 748 for (i = 0; i < MAX_PIPES; i++) { 749 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { 750 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 751 dc->hwss.program_gamut_remap(pipes); 752 ret = true; 753 } 754 } 755 756 return ret; 757 } 758 759 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) 760 { 761 int i; 762 bool ret = false; 763 struct pipe_ctx *pipes; 764 765 for (i = 0; i < MAX_PIPES; i++) { 766 if (dc->current_state->res_ctx.pipe_ctx[i].stream 767 == stream) { 768 769 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 770 dc->hwss.program_output_csc(dc, 771 pipes, 772 stream->output_color_space, 773 stream->csc_color_matrix.matrix, 774 pipes->stream_res.opp->inst); 775 ret = true; 776 } 777 } 778 779 return ret; 780 } 781 782 void dc_stream_set_static_screen_params(struct dc *dc, 783 struct dc_stream_state **streams, 784 int num_streams, 785 const struct dc_static_screen_params *params) 786 { 787 int i, j; 788 struct pipe_ctx *pipes_affected[MAX_PIPES]; 789 int num_pipes_affected = 0; 790 791 for (i = 0; i < num_streams; i++) { 792 struct dc_stream_state *stream = streams[i]; 793 794 for (j = 0; j < MAX_PIPES; j++) { 795 if (dc->current_state->res_ctx.pipe_ctx[j].stream 796 == stream) { 797 pipes_affected[num_pipes_affected++] = 798 &dc->current_state->res_ctx.pipe_ctx[j]; 799 } 800 } 801 } 802 803 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params); 804 } 805 806 static void dc_destruct(struct dc *dc) 807 { 808 // reset link encoder assignment table on destruct 809 if (dc->res_pool && dc->res_pool->funcs->link_encs_assign) 810 link_enc_cfg_init(dc, dc->current_state); 811 812 if (dc->current_state) { 813 dc_state_release(dc->current_state); 814 dc->current_state = NULL; 815 } 816 817 destroy_links(dc); 818 819 destroy_link_encoders(dc); 820 821 if (dc->clk_mgr) { 822 dc_destroy_clk_mgr(dc->clk_mgr); 823 dc->clk_mgr = NULL; 824 } 825 826 dc_destroy_resource_pool(dc); 827 828 if (dc->link_srv) 829 link_destroy_link_service(&dc->link_srv); 830 831 if (dc->ctx->gpio_service) 832 dal_gpio_service_destroy(&dc->ctx->gpio_service); 833 834 if (dc->ctx->created_bios) 835 dal_bios_parser_destroy(&dc->ctx->dc_bios); 836 837 kfree(dc->ctx->logger); 838 dc_perf_trace_destroy(&dc->ctx->perf_trace); 839 840 kfree(dc->ctx); 841 dc->ctx = NULL; 842 843 kfree(dc->bw_vbios); 844 dc->bw_vbios = NULL; 845 846 kfree(dc->bw_dceip); 847 dc->bw_dceip = NULL; 848 849 kfree(dc->dcn_soc); 850 dc->dcn_soc = NULL; 851 852 kfree(dc->dcn_ip); 853 dc->dcn_ip = NULL; 854 855 kfree(dc->vm_helper); 856 dc->vm_helper = NULL; 857 858 } 859 860 static bool dc_construct_ctx(struct dc *dc, 861 const struct dc_init_data *init_params) 862 { 863 struct dc_context *dc_ctx; 864 865 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); 866 if (!dc_ctx) 867 return false; 868 869 dc_ctx->cgs_device = init_params->cgs_device; 870 dc_ctx->driver_context = init_params->driver; 871 dc_ctx->dc = dc; 872 dc_ctx->asic_id = init_params->asic_id; 873 dc_ctx->dc_sink_id_count = 0; 874 dc_ctx->dc_stream_id_count = 0; 875 dc_ctx->dce_environment = init_params->dce_environment; 876 dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets; 877 dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets; 878 dc_ctx->clk_reg_offsets = init_params->clk_reg_offsets; 879 880 /* Create logger */ 881 dc_ctx->logger = kmalloc(sizeof(*dc_ctx->logger), GFP_KERNEL); 882 883 if (!dc_ctx->logger) { 884 kfree(dc_ctx); 885 return false; 886 } 887 888 dc_ctx->logger->dev = adev_to_drm(init_params->driver); 889 dc->dml.logger = dc_ctx->logger; 890 891 dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id); 892 893 dc_ctx->perf_trace = dc_perf_trace_create(); 894 if (!dc_ctx->perf_trace) { 895 kfree(dc_ctx); 896 ASSERT_CRITICAL(false); 897 return false; 898 } 899 900 dc->ctx = dc_ctx; 901 902 dc->link_srv = link_create_link_service(); 903 if (!dc->link_srv) 904 return false; 905 906 return true; 907 } 908 909 static bool dc_construct(struct dc *dc, 910 const struct dc_init_data *init_params) 911 { 912 struct dc_context *dc_ctx; 913 struct bw_calcs_dceip *dc_dceip; 914 struct bw_calcs_vbios *dc_vbios; 915 struct dcn_soc_bounding_box *dcn_soc; 916 struct dcn_ip_params *dcn_ip; 917 918 dc->config = init_params->flags; 919 920 // Allocate memory for the vm_helper 921 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL); 922 if (!dc->vm_helper) { 923 dm_error("%s: failed to create dc->vm_helper\n", __func__); 924 goto fail; 925 } 926 927 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides)); 928 929 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); 930 if (!dc_dceip) { 931 dm_error("%s: failed to create dceip\n", __func__); 932 goto fail; 933 } 934 935 dc->bw_dceip = dc_dceip; 936 937 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL); 938 if (!dc_vbios) { 939 dm_error("%s: failed to create vbios\n", __func__); 940 goto fail; 941 } 942 943 dc->bw_vbios = dc_vbios; 944 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); 945 if (!dcn_soc) { 946 dm_error("%s: failed to create dcn_soc\n", __func__); 947 goto fail; 948 } 949 950 dc->dcn_soc = dcn_soc; 951 952 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL); 953 if (!dcn_ip) { 954 dm_error("%s: failed to create dcn_ip\n", __func__); 955 goto fail; 956 } 957 958 dc->dcn_ip = dcn_ip; 959 960 if (!dc_construct_ctx(dc, init_params)) { 961 dm_error("%s: failed to create ctx\n", __func__); 962 goto fail; 963 } 964 965 dc_ctx = dc->ctx; 966 967 /* Resource should construct all asic specific resources. 968 * This should be the only place where we need to parse the asic id 969 */ 970 if (init_params->vbios_override) 971 dc_ctx->dc_bios = init_params->vbios_override; 972 else { 973 /* Create BIOS parser */ 974 struct bp_init_data bp_init_data; 975 976 bp_init_data.ctx = dc_ctx; 977 bp_init_data.bios = init_params->asic_id.atombios_base_address; 978 979 dc_ctx->dc_bios = dal_bios_parser_create( 980 &bp_init_data, dc_ctx->dce_version); 981 982 if (!dc_ctx->dc_bios) { 983 ASSERT_CRITICAL(false); 984 goto fail; 985 } 986 987 dc_ctx->created_bios = true; 988 } 989 990 dc->vendor_signature = init_params->vendor_signature; 991 992 /* Create GPIO service */ 993 dc_ctx->gpio_service = dal_gpio_service_create( 994 dc_ctx->dce_version, 995 dc_ctx->dce_environment, 996 dc_ctx); 997 998 if (!dc_ctx->gpio_service) { 999 ASSERT_CRITICAL(false); 1000 goto fail; 1001 } 1002 1003 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version); 1004 if (!dc->res_pool) 1005 goto fail; 1006 1007 /* set i2c speed if not done by the respective dcnxxx__resource.c */ 1008 if (dc->caps.i2c_speed_in_khz_hdcp == 0) 1009 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz; 1010 if (dc->caps.max_optimizable_video_width == 0) 1011 dc->caps.max_optimizable_video_width = 5120; 1012 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg); 1013 if (!dc->clk_mgr) 1014 goto fail; 1015 #ifdef CONFIG_DRM_AMD_DC_FP 1016 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; 1017 1018 if (dc->res_pool->funcs->update_bw_bounding_box) { 1019 DC_FP_START(); 1020 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); 1021 DC_FP_END(); 1022 } 1023 #endif 1024 1025 if (!create_links(dc, init_params->num_virtual_links)) 1026 goto fail; 1027 1028 /* Create additional DIG link encoder objects if fewer than the platform 1029 * supports were created during link construction. 1030 */ 1031 if (!create_link_encoders(dc)) 1032 goto fail; 1033 1034 /* Creation of current_state must occur after dc->dml 1035 * is initialized in dc_create_resource_pool because 1036 * on creation it copies the contents of dc->dml 1037 */ 1038 1039 dc->current_state = dc_state_create(dc); 1040 1041 if (!dc->current_state) { 1042 dm_error("%s: failed to create validate ctx\n", __func__); 1043 goto fail; 1044 } 1045 1046 return true; 1047 1048 fail: 1049 return false; 1050 } 1051 1052 static void disable_all_writeback_pipes_for_stream( 1053 const struct dc *dc, 1054 struct dc_stream_state *stream, 1055 struct dc_state *context) 1056 { 1057 int i; 1058 1059 for (i = 0; i < stream->num_wb_info; i++) 1060 stream->writeback_info[i].wb_enabled = false; 1061 } 1062 1063 static void apply_ctx_interdependent_lock(struct dc *dc, 1064 struct dc_state *context, 1065 struct dc_stream_state *stream, 1066 bool lock) 1067 { 1068 int i; 1069 1070 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */ 1071 if (dc->hwss.interdependent_update_lock) 1072 dc->hwss.interdependent_update_lock(dc, context, lock); 1073 else { 1074 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1075 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1076 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 1077 1078 // Copied conditions that were previously in dce110_apply_ctx_for_surface 1079 if (stream == pipe_ctx->stream) { 1080 if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) && 1081 (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) 1082 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock); 1083 } 1084 } 1085 } 1086 } 1087 1088 static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) 1089 { 1090 if (dc->ctx->dce_version >= DCN_VERSION_1_0) { 1091 memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color)); 1092 1093 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) 1094 get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1095 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) 1096 get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1097 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) 1098 get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1099 else { 1100 if (dc->ctx->dce_version < DCN_VERSION_2_0) 1101 color_space_to_black_color( 1102 dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color)); 1103 } 1104 if (dc->ctx->dce_version >= DCN_VERSION_2_0) { 1105 if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) 1106 get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1107 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP) 1108 get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1109 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) 1110 get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1111 } 1112 } 1113 } 1114 1115 static void disable_dangling_plane(struct dc *dc, struct dc_state *context) 1116 { 1117 int i, j; 1118 struct dc_state *dangling_context = dc_state_create_current_copy(dc); 1119 struct dc_state *current_ctx; 1120 struct pipe_ctx *pipe; 1121 struct timing_generator *tg; 1122 1123 if (dangling_context == NULL) 1124 return; 1125 1126 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1127 struct dc_stream_state *old_stream = 1128 dc->current_state->res_ctx.pipe_ctx[i].stream; 1129 bool should_disable = true; 1130 bool pipe_split_change = false; 1131 1132 if ((context->res_ctx.pipe_ctx[i].top_pipe) && 1133 (dc->current_state->res_ctx.pipe_ctx[i].top_pipe)) 1134 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx != 1135 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx; 1136 else 1137 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe != 1138 dc->current_state->res_ctx.pipe_ctx[i].top_pipe; 1139 1140 for (j = 0; j < context->stream_count; j++) { 1141 if (old_stream == context->streams[j]) { 1142 should_disable = false; 1143 break; 1144 } 1145 } 1146 if (!should_disable && pipe_split_change && 1147 dc->current_state->stream_count != context->stream_count) 1148 should_disable = true; 1149 1150 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe && 1151 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) { 1152 struct pipe_ctx *old_pipe, *new_pipe; 1153 1154 old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1155 new_pipe = &context->res_ctx.pipe_ctx[i]; 1156 1157 if (old_pipe->plane_state && !new_pipe->plane_state) 1158 should_disable = true; 1159 } 1160 1161 if (should_disable && old_stream) { 1162 bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM; 1163 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1164 tg = pipe->stream_res.tg; 1165 /* When disabling plane for a phantom pipe, we must turn on the 1166 * phantom OTG so the disable programming gets the double buffer 1167 * update. Otherwise the pipe will be left in a partially disabled 1168 * state that can result in underflow or hang when enabling it 1169 * again for different use. 1170 */ 1171 if (is_phantom) { 1172 if (tg->funcs->enable_crtc) { 1173 int main_pipe_width, main_pipe_height; 1174 struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream); 1175 1176 main_pipe_width = old_paired_stream->dst.width; 1177 main_pipe_height = old_paired_stream->dst.height; 1178 if (dc->hwss.blank_phantom) 1179 dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height); 1180 tg->funcs->enable_crtc(tg); 1181 } 1182 } 1183 1184 if (is_phantom) 1185 dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true); 1186 else 1187 dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context); 1188 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); 1189 1190 if (pipe->stream && pipe->plane_state) { 1191 set_p_state_switch_method(dc, context, pipe); 1192 dc_update_visual_confirm_color(dc, context, pipe); 1193 } 1194 1195 if (dc->hwss.apply_ctx_for_surface) { 1196 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); 1197 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); 1198 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false); 1199 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1200 } 1201 if (dc->hwss.program_front_end_for_ctx) { 1202 dc->hwss.interdependent_update_lock(dc, dc->current_state, true); 1203 dc->hwss.program_front_end_for_ctx(dc, dangling_context); 1204 dc->hwss.interdependent_update_lock(dc, dc->current_state, false); 1205 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1206 } 1207 /* We need to put the phantom OTG back into it's default (disabled) state or we 1208 * can get corruption when transition from one SubVP config to a different one. 1209 * The OTG is set to disable on falling edge of VUPDATE so the plane disable 1210 * will still get it's double buffer update. 1211 */ 1212 if (is_phantom) { 1213 if (tg->funcs->disable_phantom_crtc) 1214 tg->funcs->disable_phantom_crtc(tg); 1215 } 1216 } 1217 } 1218 1219 current_ctx = dc->current_state; 1220 dc->current_state = dangling_context; 1221 dc_state_release(current_ctx); 1222 } 1223 1224 static void disable_vbios_mode_if_required( 1225 struct dc *dc, 1226 struct dc_state *context) 1227 { 1228 unsigned int i, j; 1229 1230 /* check if timing_changed, disable stream*/ 1231 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1232 struct dc_stream_state *stream = NULL; 1233 struct dc_link *link = NULL; 1234 struct pipe_ctx *pipe = NULL; 1235 1236 pipe = &context->res_ctx.pipe_ctx[i]; 1237 stream = pipe->stream; 1238 if (stream == NULL) 1239 continue; 1240 1241 if (stream->apply_seamless_boot_optimization) 1242 continue; 1243 1244 // only looking for first odm pipe 1245 if (pipe->prev_odm_pipe) 1246 continue; 1247 1248 if (stream->link->local_sink && 1249 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { 1250 link = stream->link; 1251 } 1252 1253 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { 1254 unsigned int enc_inst, tg_inst = 0; 1255 unsigned int pix_clk_100hz; 1256 1257 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1258 if (enc_inst != ENGINE_ID_UNKNOWN) { 1259 for (j = 0; j < dc->res_pool->stream_enc_count; j++) { 1260 if (dc->res_pool->stream_enc[j]->id == enc_inst) { 1261 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg( 1262 dc->res_pool->stream_enc[j]); 1263 break; 1264 } 1265 } 1266 1267 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1268 dc->res_pool->dp_clock_source, 1269 tg_inst, &pix_clk_100hz); 1270 1271 if (link->link_status.link_active) { 1272 uint32_t requested_pix_clk_100hz = 1273 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz; 1274 1275 if (pix_clk_100hz != requested_pix_clk_100hz) { 1276 dc->link_srv->set_dpms_off(pipe); 1277 pipe->stream->dpms_off = false; 1278 } 1279 } 1280 } 1281 } 1282 } 1283 } 1284 1285 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) 1286 { 1287 int i; 1288 PERF_TRACE(); 1289 for (i = 0; i < MAX_PIPES; i++) { 1290 int count = 0; 1291 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1292 1293 if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) 1294 continue; 1295 1296 /* Timeout 100 ms */ 1297 while (count < 100000) { 1298 /* Must set to false to start with, due to OR in update function */ 1299 pipe->plane_state->status.is_flip_pending = false; 1300 dc->hwss.update_pending_status(pipe); 1301 if (!pipe->plane_state->status.is_flip_pending) 1302 break; 1303 udelay(1); 1304 count++; 1305 } 1306 ASSERT(!pipe->plane_state->status.is_flip_pending); 1307 } 1308 PERF_TRACE(); 1309 } 1310 1311 /* Public functions */ 1312 1313 struct dc *dc_create(const struct dc_init_data *init_params) 1314 { 1315 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1316 unsigned int full_pipe_count; 1317 1318 if (!dc) 1319 return NULL; 1320 1321 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { 1322 if (!dc_construct_ctx(dc, init_params)) 1323 goto destruct_dc; 1324 } else { 1325 if (!dc_construct(dc, init_params)) 1326 goto destruct_dc; 1327 1328 full_pipe_count = dc->res_pool->pipe_count; 1329 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) 1330 full_pipe_count--; 1331 dc->caps.max_streams = min( 1332 full_pipe_count, 1333 dc->res_pool->stream_enc_count); 1334 1335 dc->caps.max_links = dc->link_count; 1336 dc->caps.max_audios = dc->res_pool->audio_count; 1337 dc->caps.linear_pitch_alignment = 64; 1338 1339 dc->caps.max_dp_protocol_version = DP_VERSION_1_4; 1340 1341 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator; 1342 1343 if (dc->res_pool->dmcu != NULL) 1344 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; 1345 } 1346 1347 dc->dcn_reg_offsets = init_params->dcn_reg_offsets; 1348 dc->nbio_reg_offsets = init_params->nbio_reg_offsets; 1349 dc->clk_reg_offsets = init_params->clk_reg_offsets; 1350 1351 /* Populate versioning information */ 1352 dc->versions.dc_ver = DC_VER; 1353 1354 dc->build_id = DC_BUILD_ID; 1355 1356 DC_LOG_DC("Display Core initialized\n"); 1357 1358 1359 1360 return dc; 1361 1362 destruct_dc: 1363 dc_destruct(dc); 1364 kfree(dc); 1365 return NULL; 1366 } 1367 1368 static void detect_edp_presence(struct dc *dc) 1369 { 1370 struct dc_link *edp_links[MAX_NUM_EDP]; 1371 struct dc_link *edp_link = NULL; 1372 enum dc_connection_type type; 1373 int i; 1374 int edp_num; 1375 1376 dc_get_edp_links(dc, edp_links, &edp_num); 1377 if (!edp_num) 1378 return; 1379 1380 for (i = 0; i < edp_num; i++) { 1381 edp_link = edp_links[i]; 1382 if (dc->config.edp_not_connected) { 1383 edp_link->edp_sink_present = false; 1384 } else { 1385 dc_link_detect_connection_type(edp_link, &type); 1386 edp_link->edp_sink_present = (type != dc_connection_none); 1387 } 1388 } 1389 } 1390 1391 void dc_hardware_init(struct dc *dc) 1392 { 1393 1394 detect_edp_presence(dc); 1395 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) 1396 dc->hwss.init_hw(dc); 1397 } 1398 1399 void dc_init_callbacks(struct dc *dc, 1400 const struct dc_callback_init *init_params) 1401 { 1402 dc->ctx->cp_psp = init_params->cp_psp; 1403 } 1404 1405 void dc_deinit_callbacks(struct dc *dc) 1406 { 1407 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp)); 1408 } 1409 1410 void dc_destroy(struct dc **dc) 1411 { 1412 dc_destruct(*dc); 1413 kfree(*dc); 1414 *dc = NULL; 1415 } 1416 1417 static void enable_timing_multisync( 1418 struct dc *dc, 1419 struct dc_state *ctx) 1420 { 1421 int i, multisync_count = 0; 1422 int pipe_count = dc->res_pool->pipe_count; 1423 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL }; 1424 1425 for (i = 0; i < pipe_count; i++) { 1426 if (!ctx->res_ctx.pipe_ctx[i].stream || 1427 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled) 1428 continue; 1429 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source) 1430 continue; 1431 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i]; 1432 multisync_count++; 1433 } 1434 1435 if (multisync_count > 0) { 1436 dc->hwss.enable_per_frame_crtc_position_reset( 1437 dc, multisync_count, multisync_pipes); 1438 } 1439 } 1440 1441 static void program_timing_sync( 1442 struct dc *dc, 1443 struct dc_state *ctx) 1444 { 1445 int i, j, k; 1446 int group_index = 0; 1447 int num_group = 0; 1448 int pipe_count = dc->res_pool->pipe_count; 1449 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL }; 1450 1451 for (i = 0; i < pipe_count; i++) { 1452 if (!ctx->res_ctx.pipe_ctx[i].stream 1453 || ctx->res_ctx.pipe_ctx[i].top_pipe 1454 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe) 1455 continue; 1456 1457 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i]; 1458 } 1459 1460 for (i = 0; i < pipe_count; i++) { 1461 int group_size = 1; 1462 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE; 1463 struct pipe_ctx *pipe_set[MAX_PIPES]; 1464 1465 if (!unsynced_pipes[i]) 1466 continue; 1467 1468 pipe_set[0] = unsynced_pipes[i]; 1469 unsynced_pipes[i] = NULL; 1470 1471 /* Add tg to the set, search rest of the tg's for ones with 1472 * same timing, add all tgs with same timing to the group 1473 */ 1474 for (j = i + 1; j < pipe_count; j++) { 1475 if (!unsynced_pipes[j]) 1476 continue; 1477 if (sync_type != TIMING_SYNCHRONIZABLE && 1478 dc->hwss.enable_vblanks_synchronization && 1479 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks && 1480 resource_are_vblanks_synchronizable( 1481 unsynced_pipes[j]->stream, 1482 pipe_set[0]->stream)) { 1483 sync_type = VBLANK_SYNCHRONIZABLE; 1484 pipe_set[group_size] = unsynced_pipes[j]; 1485 unsynced_pipes[j] = NULL; 1486 group_size++; 1487 } else 1488 if (sync_type != VBLANK_SYNCHRONIZABLE && 1489 resource_are_streams_timing_synchronizable( 1490 unsynced_pipes[j]->stream, 1491 pipe_set[0]->stream)) { 1492 sync_type = TIMING_SYNCHRONIZABLE; 1493 pipe_set[group_size] = unsynced_pipes[j]; 1494 unsynced_pipes[j] = NULL; 1495 group_size++; 1496 } 1497 } 1498 1499 /* set first unblanked pipe as master */ 1500 for (j = 0; j < group_size; j++) { 1501 bool is_blanked; 1502 1503 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1504 is_blanked = 1505 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1506 else 1507 is_blanked = 1508 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1509 if (!is_blanked) { 1510 if (j == 0) 1511 break; 1512 1513 swap(pipe_set[0], pipe_set[j]); 1514 break; 1515 } 1516 } 1517 1518 for (k = 0; k < group_size; k++) { 1519 struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream); 1520 1521 status->timing_sync_info.group_id = num_group; 1522 status->timing_sync_info.group_size = group_size; 1523 if (k == 0) 1524 status->timing_sync_info.master = true; 1525 else 1526 status->timing_sync_info.master = false; 1527 1528 } 1529 1530 /* remove any other unblanked pipes as they have already been synced */ 1531 if (dc->config.use_pipe_ctx_sync_logic) { 1532 /* check pipe's syncd to decide which pipe to be removed */ 1533 for (j = 1; j < group_size; j++) { 1534 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) { 1535 group_size--; 1536 pipe_set[j] = pipe_set[group_size]; 1537 j--; 1538 } else 1539 /* link slave pipe's syncd with master pipe */ 1540 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd; 1541 } 1542 } else { 1543 /* remove any other pipes by checking valid plane */ 1544 for (j = j + 1; j < group_size; j++) { 1545 bool is_blanked; 1546 1547 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1548 is_blanked = 1549 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1550 else 1551 is_blanked = 1552 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1553 if (!is_blanked) { 1554 group_size--; 1555 pipe_set[j] = pipe_set[group_size]; 1556 j--; 1557 } 1558 } 1559 } 1560 1561 if (group_size > 1) { 1562 if (sync_type == TIMING_SYNCHRONIZABLE) { 1563 dc->hwss.enable_timing_synchronization( 1564 dc, ctx, group_index, group_size, pipe_set); 1565 } else 1566 if (sync_type == VBLANK_SYNCHRONIZABLE) { 1567 dc->hwss.enable_vblanks_synchronization( 1568 dc, group_index, group_size, pipe_set); 1569 } 1570 group_index++; 1571 } 1572 num_group++; 1573 } 1574 } 1575 1576 static bool streams_changed(struct dc *dc, 1577 struct dc_stream_state *streams[], 1578 uint8_t stream_count) 1579 { 1580 uint8_t i; 1581 1582 if (stream_count != dc->current_state->stream_count) 1583 return true; 1584 1585 for (i = 0; i < dc->current_state->stream_count; i++) { 1586 if (dc->current_state->streams[i] != streams[i]) 1587 return true; 1588 if (!streams[i]->link->link_state_valid) 1589 return true; 1590 } 1591 1592 return false; 1593 } 1594 1595 bool dc_validate_boot_timing(const struct dc *dc, 1596 const struct dc_sink *sink, 1597 struct dc_crtc_timing *crtc_timing) 1598 { 1599 struct timing_generator *tg; 1600 struct stream_encoder *se = NULL; 1601 1602 struct dc_crtc_timing hw_crtc_timing = {0}; 1603 1604 struct dc_link *link = sink->link; 1605 unsigned int i, enc_inst, tg_inst = 0; 1606 1607 /* Support seamless boot on EDP displays only */ 1608 if (sink->sink_signal != SIGNAL_TYPE_EDP) { 1609 return false; 1610 } 1611 1612 if (dc->debug.force_odm_combine) 1613 return false; 1614 1615 /* Check for enabled DIG to identify enabled display */ 1616 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) 1617 return false; 1618 1619 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1620 1621 if (enc_inst == ENGINE_ID_UNKNOWN) 1622 return false; 1623 1624 for (i = 0; i < dc->res_pool->stream_enc_count; i++) { 1625 if (dc->res_pool->stream_enc[i]->id == enc_inst) { 1626 1627 se = dc->res_pool->stream_enc[i]; 1628 1629 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg( 1630 dc->res_pool->stream_enc[i]); 1631 break; 1632 } 1633 } 1634 1635 // tg_inst not found 1636 if (i == dc->res_pool->stream_enc_count) 1637 return false; 1638 1639 if (tg_inst >= dc->res_pool->timing_generator_count) 1640 return false; 1641 1642 if (tg_inst != link->link_enc->preferred_engine) 1643 return false; 1644 1645 tg = dc->res_pool->timing_generators[tg_inst]; 1646 1647 if (!tg->funcs->get_hw_timing) 1648 return false; 1649 1650 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) 1651 return false; 1652 1653 if (crtc_timing->h_total != hw_crtc_timing.h_total) 1654 return false; 1655 1656 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) 1657 return false; 1658 1659 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) 1660 return false; 1661 1662 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) 1663 return false; 1664 1665 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) 1666 return false; 1667 1668 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) 1669 return false; 1670 1671 if (crtc_timing->v_total != hw_crtc_timing.v_total) 1672 return false; 1673 1674 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) 1675 return false; 1676 1677 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) 1678 return false; 1679 1680 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) 1681 return false; 1682 1683 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) 1684 return false; 1685 1686 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) 1687 return false; 1688 1689 /* block DSC for now, as VBIOS does not currently support DSC timings */ 1690 if (crtc_timing->flags.DSC) 1691 return false; 1692 1693 if (dc_is_dp_signal(link->connector_signal)) { 1694 unsigned int pix_clk_100hz; 1695 uint32_t numOdmPipes = 1; 1696 uint32_t id_src[4] = {0}; 1697 1698 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1699 dc->res_pool->dp_clock_source, 1700 tg_inst, &pix_clk_100hz); 1701 1702 if (tg->funcs->get_optc_source) 1703 tg->funcs->get_optc_source(tg, 1704 &numOdmPipes, &id_src[0], &id_src[1]); 1705 1706 if (numOdmPipes == 2) 1707 pix_clk_100hz *= 2; 1708 if (numOdmPipes == 4) 1709 pix_clk_100hz *= 4; 1710 1711 // Note: In rare cases, HW pixclk may differ from crtc's pixclk 1712 // slightly due to rounding issues in 10 kHz units. 1713 if (crtc_timing->pix_clk_100hz != pix_clk_100hz) 1714 return false; 1715 1716 if (!se->funcs->dp_get_pixel_format) 1717 return false; 1718 1719 if (!se->funcs->dp_get_pixel_format( 1720 se, 1721 &hw_crtc_timing.pixel_encoding, 1722 &hw_crtc_timing.display_color_depth)) 1723 return false; 1724 1725 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) 1726 return false; 1727 1728 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) 1729 return false; 1730 } 1731 1732 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) { 1733 return false; 1734 } 1735 1736 if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) { 1737 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n"); 1738 return false; 1739 } 1740 1741 return true; 1742 } 1743 1744 static inline bool should_update_pipe_for_stream( 1745 struct dc_state *context, 1746 struct pipe_ctx *pipe_ctx, 1747 struct dc_stream_state *stream) 1748 { 1749 return (pipe_ctx->stream && pipe_ctx->stream == stream); 1750 } 1751 1752 static inline bool should_update_pipe_for_plane( 1753 struct dc_state *context, 1754 struct pipe_ctx *pipe_ctx, 1755 struct dc_plane_state *plane_state) 1756 { 1757 return (pipe_ctx->plane_state == plane_state); 1758 } 1759 1760 void dc_enable_stereo( 1761 struct dc *dc, 1762 struct dc_state *context, 1763 struct dc_stream_state *streams[], 1764 uint8_t stream_count) 1765 { 1766 int i, j; 1767 struct pipe_ctx *pipe; 1768 1769 for (i = 0; i < MAX_PIPES; i++) { 1770 if (context != NULL) { 1771 pipe = &context->res_ctx.pipe_ctx[i]; 1772 } else { 1773 context = dc->current_state; 1774 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1775 } 1776 1777 for (j = 0; pipe && j < stream_count; j++) { 1778 if (should_update_pipe_for_stream(context, pipe, streams[j]) && 1779 dc->hwss.setup_stereo) 1780 dc->hwss.setup_stereo(pipe, dc); 1781 } 1782 } 1783 } 1784 1785 void dc_trigger_sync(struct dc *dc, struct dc_state *context) 1786 { 1787 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { 1788 enable_timing_multisync(dc, context); 1789 program_timing_sync(dc, context); 1790 } 1791 } 1792 1793 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context) 1794 { 1795 int i; 1796 unsigned int stream_mask = 0; 1797 1798 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1799 if (context->res_ctx.pipe_ctx[i].stream) 1800 stream_mask |= 1 << i; 1801 } 1802 1803 return stream_mask; 1804 } 1805 1806 void dc_z10_restore(const struct dc *dc) 1807 { 1808 if (dc->hwss.z10_restore) 1809 dc->hwss.z10_restore(dc); 1810 } 1811 1812 void dc_z10_save_init(struct dc *dc) 1813 { 1814 if (dc->hwss.z10_save_init) 1815 dc->hwss.z10_save_init(dc); 1816 } 1817 1818 /** 1819 * dc_commit_state_no_check - Apply context to the hardware 1820 * 1821 * @dc: DC object with the current status to be updated 1822 * @context: New state that will become the current status at the end of this function 1823 * 1824 * Applies given context to the hardware and copy it into current context. 1825 * It's up to the user to release the src context afterwards. 1826 * 1827 * Return: an enum dc_status result code for the operation 1828 */ 1829 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context) 1830 { 1831 struct dc_bios *dcb = dc->ctx->dc_bios; 1832 enum dc_status result = DC_ERROR_UNEXPECTED; 1833 struct pipe_ctx *pipe; 1834 int i, k, l; 1835 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0}; 1836 struct dc_state *old_state; 1837 bool subvp_prev_use = false; 1838 1839 dc_z10_restore(dc); 1840 dc_allow_idle_optimizations(dc, false); 1841 1842 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1843 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1844 1845 /* Check old context for SubVP */ 1846 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM); 1847 if (subvp_prev_use) 1848 break; 1849 } 1850 1851 for (i = 0; i < context->stream_count; i++) 1852 dc_streams[i] = context->streams[i]; 1853 1854 if (!dcb->funcs->is_accelerated_mode(dcb)) { 1855 disable_vbios_mode_if_required(dc, context); 1856 dc->hwss.enable_accelerated_mode(dc, context); 1857 } 1858 1859 if (context->stream_count > get_seamless_boot_stream_count(context) || 1860 context->stream_count == 0) 1861 dc->hwss.prepare_bandwidth(dc, context); 1862 1863 /* When SubVP is active, all HW programming must be done while 1864 * SubVP lock is acquired 1865 */ 1866 if (dc->hwss.subvp_pipe_control_lock) 1867 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); 1868 1869 if (dc->hwss.update_dsc_pg) 1870 dc->hwss.update_dsc_pg(dc, context, false); 1871 1872 disable_dangling_plane(dc, context); 1873 /* re-program planes for existing stream, in case we need to 1874 * free up plane resource for later use 1875 */ 1876 if (dc->hwss.apply_ctx_for_surface) { 1877 for (i = 0; i < context->stream_count; i++) { 1878 if (context->streams[i]->mode_changed) 1879 continue; 1880 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 1881 dc->hwss.apply_ctx_for_surface( 1882 dc, context->streams[i], 1883 context->stream_status[i].plane_count, 1884 context); /* use new pipe config in new context */ 1885 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 1886 dc->hwss.post_unlock_program_front_end(dc, context); 1887 } 1888 } 1889 1890 /* Program hardware */ 1891 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1892 pipe = &context->res_ctx.pipe_ctx[i]; 1893 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); 1894 } 1895 1896 result = dc->hwss.apply_ctx_to_hw(dc, context); 1897 1898 if (result != DC_OK) { 1899 /* Application of dc_state to hardware stopped. */ 1900 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; 1901 return result; 1902 } 1903 1904 dc_trigger_sync(dc, context); 1905 1906 /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */ 1907 for (i = 0; i < context->stream_count; i++) { 1908 uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed; 1909 1910 context->streams[i]->update_flags.raw = 0xFFFFFFFF; 1911 context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed; 1912 } 1913 1914 /* Program all planes within new context*/ 1915 if (dc->hwss.program_front_end_for_ctx) { 1916 dc->hwss.interdependent_update_lock(dc, context, true); 1917 dc->hwss.program_front_end_for_ctx(dc, context); 1918 dc->hwss.interdependent_update_lock(dc, context, false); 1919 dc->hwss.post_unlock_program_front_end(dc, context); 1920 } 1921 1922 if (dc->hwss.commit_subvp_config) 1923 dc->hwss.commit_subvp_config(dc, context); 1924 if (dc->hwss.subvp_pipe_control_lock) 1925 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use); 1926 1927 for (i = 0; i < context->stream_count; i++) { 1928 const struct dc_link *link = context->streams[i]->link; 1929 1930 if (!context->streams[i]->mode_changed) 1931 continue; 1932 1933 if (dc->hwss.apply_ctx_for_surface) { 1934 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 1935 dc->hwss.apply_ctx_for_surface( 1936 dc, context->streams[i], 1937 context->stream_status[i].plane_count, 1938 context); 1939 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 1940 dc->hwss.post_unlock_program_front_end(dc, context); 1941 } 1942 1943 /* 1944 * enable stereo 1945 * TODO rework dc_enable_stereo call to work with validation sets? 1946 */ 1947 for (k = 0; k < MAX_PIPES; k++) { 1948 pipe = &context->res_ctx.pipe_ctx[k]; 1949 1950 for (l = 0 ; pipe && l < context->stream_count; l++) { 1951 if (context->streams[l] && 1952 context->streams[l] == pipe->stream && 1953 dc->hwss.setup_stereo) 1954 dc->hwss.setup_stereo(pipe, dc); 1955 } 1956 } 1957 1958 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}", 1959 context->streams[i]->timing.h_addressable, 1960 context->streams[i]->timing.v_addressable, 1961 context->streams[i]->timing.h_total, 1962 context->streams[i]->timing.v_total, 1963 context->streams[i]->timing.pix_clk_100hz / 10); 1964 } 1965 1966 dc_enable_stereo(dc, context, dc_streams, context->stream_count); 1967 1968 if (context->stream_count > get_seamless_boot_stream_count(context) || 1969 context->stream_count == 0) { 1970 /* Must wait for no flips to be pending before doing optimize bw */ 1971 wait_for_no_pipes_pending(dc, context); 1972 /* pplib is notified if disp_num changed */ 1973 dc->hwss.optimize_bandwidth(dc, context); 1974 /* Need to do otg sync again as otg could be out of sync due to otg 1975 * workaround applied during clock update 1976 */ 1977 dc_trigger_sync(dc, context); 1978 } 1979 1980 if (dc->hwss.update_dsc_pg) 1981 dc->hwss.update_dsc_pg(dc, context, true); 1982 1983 if (dc->ctx->dce_version >= DCE_VERSION_MAX) 1984 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 1985 else 1986 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 1987 1988 context->stream_mask = get_stream_mask(dc, context); 1989 1990 if (context->stream_mask != dc->current_state->stream_mask) 1991 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask); 1992 1993 for (i = 0; i < context->stream_count; i++) 1994 context->streams[i]->mode_changed = false; 1995 1996 /* Clear update flags that were set earlier to avoid redundant programming */ 1997 for (i = 0; i < context->stream_count; i++) { 1998 context->streams[i]->update_flags.raw = 0x0; 1999 } 2000 2001 old_state = dc->current_state; 2002 dc->current_state = context; 2003 2004 dc_state_release(old_state); 2005 2006 dc_state_retain(dc->current_state); 2007 2008 return result; 2009 } 2010 2011 static bool commit_minimal_transition_state(struct dc *dc, 2012 struct dc_state *transition_base_context); 2013 2014 /** 2015 * dc_commit_streams - Commit current stream state 2016 * 2017 * @dc: DC object with the commit state to be configured in the hardware 2018 * @streams: Array with a list of stream state 2019 * @stream_count: Total of streams 2020 * 2021 * Function responsible for commit streams change to the hardware. 2022 * 2023 * Return: 2024 * Return DC_OK if everything work as expected, otherwise, return a dc_status 2025 * code. 2026 */ 2027 enum dc_status dc_commit_streams(struct dc *dc, 2028 struct dc_stream_state *streams[], 2029 uint8_t stream_count) 2030 { 2031 int i, j; 2032 struct dc_state *context; 2033 enum dc_status res = DC_OK; 2034 struct dc_validation_set set[MAX_STREAMS] = {0}; 2035 struct pipe_ctx *pipe; 2036 bool handle_exit_odm2to1 = false; 2037 2038 if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW) 2039 return res; 2040 2041 if (!streams_changed(dc, streams, stream_count)) 2042 return res; 2043 2044 DC_LOG_DC("%s: %d streams\n", __func__, stream_count); 2045 2046 for (i = 0; i < stream_count; i++) { 2047 struct dc_stream_state *stream = streams[i]; 2048 struct dc_stream_status *status = dc_stream_get_status(stream); 2049 2050 dc_stream_log(dc, stream); 2051 2052 set[i].stream = stream; 2053 2054 if (status) { 2055 set[i].plane_count = status->plane_count; 2056 for (j = 0; j < status->plane_count; j++) 2057 set[i].plane_states[j] = status->plane_states[j]; 2058 } 2059 } 2060 2061 /* ODM Combine 2:1 power optimization is only applied for single stream 2062 * scenario, it uses extra pipes than needed to reduce power consumption 2063 * We need to switch off this feature to make room for new streams. 2064 */ 2065 if (stream_count > dc->current_state->stream_count && 2066 dc->current_state->stream_count == 1) { 2067 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2068 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 2069 if (pipe->next_odm_pipe) 2070 handle_exit_odm2to1 = true; 2071 } 2072 } 2073 2074 if (handle_exit_odm2to1) 2075 res = commit_minimal_transition_state(dc, dc->current_state); 2076 2077 context = dc_state_create_current_copy(dc); 2078 if (!context) 2079 goto context_alloc_fail; 2080 2081 res = dc_validate_with_context(dc, set, stream_count, context, false); 2082 if (res != DC_OK) { 2083 BREAK_TO_DEBUGGER(); 2084 goto fail; 2085 } 2086 2087 res = dc_commit_state_no_check(dc, context); 2088 2089 for (i = 0; i < stream_count; i++) { 2090 for (j = 0; j < context->stream_count; j++) { 2091 if (streams[i]->stream_id == context->streams[j]->stream_id) 2092 streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; 2093 2094 if (dc_is_embedded_signal(streams[i]->signal)) { 2095 struct dc_stream_status *status = dc_state_get_stream_status(context, streams[i]); 2096 2097 if (dc->hwss.is_abm_supported) 2098 status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]); 2099 else 2100 status->is_abm_supported = true; 2101 } 2102 } 2103 } 2104 2105 fail: 2106 dc_state_release(context); 2107 2108 context_alloc_fail: 2109 2110 DC_LOG_DC("%s Finished.\n", __func__); 2111 2112 return res; 2113 } 2114 2115 bool dc_acquire_release_mpc_3dlut( 2116 struct dc *dc, bool acquire, 2117 struct dc_stream_state *stream, 2118 struct dc_3dlut **lut, 2119 struct dc_transfer_func **shaper) 2120 { 2121 int pipe_idx; 2122 bool ret = false; 2123 bool found_pipe_idx = false; 2124 const struct resource_pool *pool = dc->res_pool; 2125 struct resource_context *res_ctx = &dc->current_state->res_ctx; 2126 int mpcc_id = 0; 2127 2128 if (pool && res_ctx) { 2129 if (acquire) { 2130 /*find pipe idx for the given stream*/ 2131 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) { 2132 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) { 2133 found_pipe_idx = true; 2134 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst; 2135 break; 2136 } 2137 } 2138 } else 2139 found_pipe_idx = true;/*for release pipe_idx is not required*/ 2140 2141 if (found_pipe_idx) { 2142 if (acquire && pool->funcs->acquire_post_bldn_3dlut) 2143 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper); 2144 else if (!acquire && pool->funcs->release_post_bldn_3dlut) 2145 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper); 2146 } 2147 } 2148 return ret; 2149 } 2150 2151 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) 2152 { 2153 int i; 2154 struct pipe_ctx *pipe; 2155 2156 for (i = 0; i < MAX_PIPES; i++) { 2157 pipe = &context->res_ctx.pipe_ctx[i]; 2158 2159 // Don't check flip pending on phantom pipes 2160 if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)) 2161 continue; 2162 2163 /* Must set to false to start with, due to OR in update function */ 2164 pipe->plane_state->status.is_flip_pending = false; 2165 dc->hwss.update_pending_status(pipe); 2166 if (pipe->plane_state->status.is_flip_pending) 2167 return true; 2168 } 2169 return false; 2170 } 2171 2172 /* Perform updates here which need to be deferred until next vupdate 2173 * 2174 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered 2175 * but forcing lut memory to shutdown state is immediate. This causes 2176 * single frame corruption as lut gets disabled mid-frame unless shutdown 2177 * is deferred until after entering bypass. 2178 */ 2179 static void process_deferred_updates(struct dc *dc) 2180 { 2181 int i = 0; 2182 2183 if (dc->debug.enable_mem_low_power.bits.cm) { 2184 ASSERT(dc->dcn_ip->max_num_dpp); 2185 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++) 2186 if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update) 2187 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]); 2188 } 2189 } 2190 2191 void dc_post_update_surfaces_to_stream(struct dc *dc) 2192 { 2193 int i; 2194 struct dc_state *context = dc->current_state; 2195 2196 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0) 2197 return; 2198 2199 post_surface_trace(dc); 2200 2201 /* 2202 * Only relevant for DCN behavior where we can guarantee the optimization 2203 * is safe to apply - retain the legacy behavior for DCE. 2204 */ 2205 2206 if (dc->ctx->dce_version < DCE_VERSION_MAX) 2207 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 2208 else { 2209 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 2210 2211 if (is_flip_pending_in_pipes(dc, context)) 2212 return; 2213 2214 for (i = 0; i < dc->res_pool->pipe_count; i++) 2215 if (context->res_ctx.pipe_ctx[i].stream == NULL || 2216 context->res_ctx.pipe_ctx[i].plane_state == NULL) { 2217 context->res_ctx.pipe_ctx[i].pipe_idx = i; 2218 dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]); 2219 } 2220 2221 process_deferred_updates(dc); 2222 2223 dc->hwss.optimize_bandwidth(dc, context); 2224 2225 if (dc->hwss.update_dsc_pg) 2226 dc->hwss.update_dsc_pg(dc, context, true); 2227 } 2228 2229 dc->optimized_required = false; 2230 dc->wm_optimized_required = false; 2231 } 2232 2233 bool dc_set_generic_gpio_for_stereo(bool enable, 2234 struct gpio_service *gpio_service) 2235 { 2236 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR; 2237 struct gpio_pin_info pin_info; 2238 struct gpio *generic; 2239 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config), 2240 GFP_KERNEL); 2241 2242 if (!config) 2243 return false; 2244 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0); 2245 2246 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) { 2247 kfree(config); 2248 return false; 2249 } else { 2250 generic = dal_gpio_service_create_generic_mux( 2251 gpio_service, 2252 pin_info.offset, 2253 pin_info.mask); 2254 } 2255 2256 if (!generic) { 2257 kfree(config); 2258 return false; 2259 } 2260 2261 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT); 2262 2263 config->enable_output_from_mux = enable; 2264 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC; 2265 2266 if (gpio_result == GPIO_RESULT_OK) 2267 gpio_result = dal_mux_setup_config(generic, config); 2268 2269 if (gpio_result == GPIO_RESULT_OK) { 2270 dal_gpio_close(generic); 2271 dal_gpio_destroy_generic_mux(&generic); 2272 kfree(config); 2273 return true; 2274 } else { 2275 dal_gpio_close(generic); 2276 dal_gpio_destroy_generic_mux(&generic); 2277 kfree(config); 2278 return false; 2279 } 2280 } 2281 2282 static bool is_surface_in_context( 2283 const struct dc_state *context, 2284 const struct dc_plane_state *plane_state) 2285 { 2286 int j; 2287 2288 for (j = 0; j < MAX_PIPES; j++) { 2289 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2290 2291 if (plane_state == pipe_ctx->plane_state) { 2292 return true; 2293 } 2294 } 2295 2296 return false; 2297 } 2298 2299 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u) 2300 { 2301 union surface_update_flags *update_flags = &u->surface->update_flags; 2302 enum surface_update_type update_type = UPDATE_TYPE_FAST; 2303 2304 if (!u->plane_info) 2305 return UPDATE_TYPE_FAST; 2306 2307 if (u->plane_info->color_space != u->surface->color_space) { 2308 update_flags->bits.color_space_change = 1; 2309 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2310 } 2311 2312 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) { 2313 update_flags->bits.horizontal_mirror_change = 1; 2314 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2315 } 2316 2317 if (u->plane_info->rotation != u->surface->rotation) { 2318 update_flags->bits.rotation_change = 1; 2319 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2320 } 2321 2322 if (u->plane_info->format != u->surface->format) { 2323 update_flags->bits.pixel_format_change = 1; 2324 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2325 } 2326 2327 if (u->plane_info->stereo_format != u->surface->stereo_format) { 2328 update_flags->bits.stereo_format_change = 1; 2329 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2330 } 2331 2332 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) { 2333 update_flags->bits.per_pixel_alpha_change = 1; 2334 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2335 } 2336 2337 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) { 2338 update_flags->bits.global_alpha_change = 1; 2339 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2340 } 2341 2342 if (u->plane_info->dcc.enable != u->surface->dcc.enable 2343 || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk 2344 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { 2345 /* During DCC on/off, stutter period is calculated before 2346 * DCC has fully transitioned. This results in incorrect 2347 * stutter period calculation. Triggering a full update will 2348 * recalculate stutter period. 2349 */ 2350 update_flags->bits.dcc_change = 1; 2351 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2352 } 2353 2354 if (resource_pixel_format_to_bpp(u->plane_info->format) != 2355 resource_pixel_format_to_bpp(u->surface->format)) { 2356 /* different bytes per element will require full bandwidth 2357 * and DML calculation 2358 */ 2359 update_flags->bits.bpp_change = 1; 2360 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2361 } 2362 2363 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch 2364 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { 2365 update_flags->bits.plane_size_change = 1; 2366 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2367 } 2368 2369 2370 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, 2371 sizeof(union dc_tiling_info)) != 0) { 2372 update_flags->bits.swizzle_change = 1; 2373 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2374 2375 /* todo: below are HW dependent, we should add a hook to 2376 * DCE/N resource and validated there. 2377 */ 2378 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { 2379 /* swizzled mode requires RQ to be setup properly, 2380 * thus need to run DML to calculate RQ settings 2381 */ 2382 update_flags->bits.bandwidth_change = 1; 2383 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2384 } 2385 } 2386 2387 /* This should be UPDATE_TYPE_FAST if nothing has changed. */ 2388 return update_type; 2389 } 2390 2391 static enum surface_update_type get_scaling_info_update_type( 2392 const struct dc *dc, 2393 const struct dc_surface_update *u) 2394 { 2395 union surface_update_flags *update_flags = &u->surface->update_flags; 2396 2397 if (!u->scaling_info) 2398 return UPDATE_TYPE_FAST; 2399 2400 if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width 2401 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height 2402 || u->scaling_info->scaling_quality.integer_scaling != 2403 u->surface->scaling_quality.integer_scaling 2404 ) { 2405 update_flags->bits.scaling_change = 1; 2406 2407 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width 2408 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height) 2409 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width 2410 || u->scaling_info->dst_rect.height < u->surface->src_rect.height)) 2411 /* Making dst rect smaller requires a bandwidth change */ 2412 update_flags->bits.bandwidth_change = 1; 2413 } 2414 2415 if (u->scaling_info->src_rect.width != u->surface->src_rect.width 2416 || u->scaling_info->src_rect.height != u->surface->src_rect.height) { 2417 2418 update_flags->bits.scaling_change = 1; 2419 if (u->scaling_info->src_rect.width > u->surface->src_rect.width 2420 || u->scaling_info->src_rect.height > u->surface->src_rect.height) 2421 /* Making src rect bigger requires a bandwidth change */ 2422 update_flags->bits.clock_change = 1; 2423 } 2424 2425 if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width && 2426 (u->scaling_info->clip_rect.width > u->surface->clip_rect.width || 2427 u->scaling_info->clip_rect.height > u->surface->clip_rect.height)) 2428 /* Changing clip size of a large surface may result in MPC slice count change */ 2429 update_flags->bits.bandwidth_change = 1; 2430 2431 if (u->scaling_info->src_rect.x != u->surface->src_rect.x 2432 || u->scaling_info->src_rect.y != u->surface->src_rect.y 2433 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x 2434 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y 2435 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x 2436 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) 2437 update_flags->bits.position_change = 1; 2438 2439 if (update_flags->bits.clock_change 2440 || update_flags->bits.bandwidth_change 2441 || update_flags->bits.scaling_change) 2442 return UPDATE_TYPE_FULL; 2443 2444 if (update_flags->bits.position_change) 2445 return UPDATE_TYPE_MED; 2446 2447 return UPDATE_TYPE_FAST; 2448 } 2449 2450 static enum surface_update_type det_surface_update(const struct dc *dc, 2451 const struct dc_surface_update *u) 2452 { 2453 const struct dc_state *context = dc->current_state; 2454 enum surface_update_type type; 2455 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2456 union surface_update_flags *update_flags = &u->surface->update_flags; 2457 2458 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { 2459 update_flags->raw = 0xFFFFFFFF; 2460 return UPDATE_TYPE_FULL; 2461 } 2462 2463 update_flags->raw = 0; // Reset all flags 2464 2465 type = get_plane_info_update_type(u); 2466 elevate_update_type(&overall_type, type); 2467 2468 type = get_scaling_info_update_type(dc, u); 2469 elevate_update_type(&overall_type, type); 2470 2471 if (u->flip_addr) { 2472 update_flags->bits.addr_update = 1; 2473 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) { 2474 update_flags->bits.tmz_changed = 1; 2475 elevate_update_type(&overall_type, UPDATE_TYPE_FULL); 2476 } 2477 } 2478 if (u->in_transfer_func) 2479 update_flags->bits.in_transfer_func_change = 1; 2480 2481 if (u->input_csc_color_matrix) 2482 update_flags->bits.input_csc_change = 1; 2483 2484 if (u->coeff_reduction_factor) 2485 update_flags->bits.coeff_reduction_change = 1; 2486 2487 if (u->gamut_remap_matrix) 2488 update_flags->bits.gamut_remap_change = 1; 2489 2490 if (u->blend_tf) 2491 update_flags->bits.gamma_change = 1; 2492 2493 if (u->gamma) { 2494 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; 2495 2496 if (u->plane_info) 2497 format = u->plane_info->format; 2498 else if (u->surface) 2499 format = u->surface->format; 2500 2501 if (dce_use_lut(format)) 2502 update_flags->bits.gamma_change = 1; 2503 } 2504 2505 if (u->lut3d_func || u->func_shaper) 2506 update_flags->bits.lut_3d = 1; 2507 2508 if (u->hdr_mult.value) 2509 if (u->hdr_mult.value != u->surface->hdr_mult.value) { 2510 update_flags->bits.hdr_mult = 1; 2511 elevate_update_type(&overall_type, UPDATE_TYPE_MED); 2512 } 2513 2514 if (update_flags->bits.in_transfer_func_change) { 2515 type = UPDATE_TYPE_MED; 2516 elevate_update_type(&overall_type, type); 2517 } 2518 2519 if (update_flags->bits.lut_3d) { 2520 type = UPDATE_TYPE_FULL; 2521 elevate_update_type(&overall_type, type); 2522 } 2523 2524 if (dc->debug.enable_legacy_fast_update && 2525 (update_flags->bits.gamma_change || 2526 update_flags->bits.gamut_remap_change || 2527 update_flags->bits.input_csc_change || 2528 update_flags->bits.coeff_reduction_change)) { 2529 type = UPDATE_TYPE_FULL; 2530 elevate_update_type(&overall_type, type); 2531 } 2532 return overall_type; 2533 } 2534 2535 static enum surface_update_type check_update_surfaces_for_stream( 2536 struct dc *dc, 2537 struct dc_surface_update *updates, 2538 int surface_count, 2539 struct dc_stream_update *stream_update, 2540 const struct dc_stream_status *stream_status) 2541 { 2542 int i; 2543 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2544 2545 if (dc->idle_optimizations_allowed) 2546 overall_type = UPDATE_TYPE_FULL; 2547 2548 if (stream_status == NULL || stream_status->plane_count != surface_count) 2549 overall_type = UPDATE_TYPE_FULL; 2550 2551 if (stream_update && stream_update->pending_test_pattern) { 2552 overall_type = UPDATE_TYPE_FULL; 2553 } 2554 2555 /* some stream updates require passive update */ 2556 if (stream_update) { 2557 union stream_update_flags *su_flags = &stream_update->stream->update_flags; 2558 2559 if ((stream_update->src.height != 0 && stream_update->src.width != 0) || 2560 (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 2561 stream_update->integer_scaling_update) 2562 su_flags->bits.scaling = 1; 2563 2564 if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) 2565 su_flags->bits.out_tf = 1; 2566 2567 if (stream_update->abm_level) 2568 su_flags->bits.abm_level = 1; 2569 2570 if (stream_update->dpms_off) 2571 su_flags->bits.dpms_off = 1; 2572 2573 if (stream_update->gamut_remap) 2574 su_flags->bits.gamut_remap = 1; 2575 2576 if (stream_update->wb_update) 2577 su_flags->bits.wb_update = 1; 2578 2579 if (stream_update->dsc_config) 2580 su_flags->bits.dsc_changed = 1; 2581 2582 if (stream_update->mst_bw_update) 2583 su_flags->bits.mst_bw = 1; 2584 2585 if (stream_update->stream && stream_update->stream->freesync_on_desktop && 2586 (stream_update->vrr_infopacket || stream_update->allow_freesync || 2587 stream_update->vrr_active_variable || stream_update->vrr_active_fixed)) 2588 su_flags->bits.fams_changed = 1; 2589 2590 if (su_flags->raw != 0) 2591 overall_type = UPDATE_TYPE_FULL; 2592 2593 if (stream_update->output_csc_transform || stream_update->output_color_space) 2594 su_flags->bits.out_csc = 1; 2595 2596 /* Output transfer function changes do not require bandwidth recalculation, 2597 * so don't trigger a full update 2598 */ 2599 if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) 2600 su_flags->bits.out_tf = 1; 2601 } 2602 2603 for (i = 0 ; i < surface_count; i++) { 2604 enum surface_update_type type = 2605 det_surface_update(dc, &updates[i]); 2606 2607 elevate_update_type(&overall_type, type); 2608 } 2609 2610 return overall_type; 2611 } 2612 2613 /* 2614 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full) 2615 * 2616 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types 2617 */ 2618 enum surface_update_type dc_check_update_surfaces_for_stream( 2619 struct dc *dc, 2620 struct dc_surface_update *updates, 2621 int surface_count, 2622 struct dc_stream_update *stream_update, 2623 const struct dc_stream_status *stream_status) 2624 { 2625 int i; 2626 enum surface_update_type type; 2627 2628 if (stream_update) 2629 stream_update->stream->update_flags.raw = 0; 2630 for (i = 0; i < surface_count; i++) 2631 updates[i].surface->update_flags.raw = 0; 2632 2633 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); 2634 if (type == UPDATE_TYPE_FULL) { 2635 if (stream_update) { 2636 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; 2637 stream_update->stream->update_flags.raw = 0xFFFFFFFF; 2638 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; 2639 } 2640 for (i = 0; i < surface_count; i++) 2641 updates[i].surface->update_flags.raw = 0xFFFFFFFF; 2642 } 2643 2644 if (type == UPDATE_TYPE_FAST) { 2645 // If there's an available clock comparator, we use that. 2646 if (dc->clk_mgr->funcs->are_clock_states_equal) { 2647 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) 2648 dc->optimized_required = true; 2649 // Else we fallback to mem compare. 2650 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { 2651 dc->optimized_required = true; 2652 } 2653 2654 dc->optimized_required |= dc->wm_optimized_required; 2655 } 2656 2657 return type; 2658 } 2659 2660 static struct dc_stream_status *stream_get_status( 2661 struct dc_state *ctx, 2662 struct dc_stream_state *stream) 2663 { 2664 uint8_t i; 2665 2666 for (i = 0; i < ctx->stream_count; i++) { 2667 if (stream == ctx->streams[i]) { 2668 return &ctx->stream_status[i]; 2669 } 2670 } 2671 2672 return NULL; 2673 } 2674 2675 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; 2676 2677 static void copy_surface_update_to_plane( 2678 struct dc_plane_state *surface, 2679 struct dc_surface_update *srf_update) 2680 { 2681 if (srf_update->flip_addr) { 2682 surface->address = srf_update->flip_addr->address; 2683 surface->flip_immediate = 2684 srf_update->flip_addr->flip_immediate; 2685 surface->time.time_elapsed_in_us[surface->time.index] = 2686 srf_update->flip_addr->flip_timestamp_in_us - 2687 surface->time.prev_update_time_in_us; 2688 surface->time.prev_update_time_in_us = 2689 srf_update->flip_addr->flip_timestamp_in_us; 2690 surface->time.index++; 2691 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) 2692 surface->time.index = 0; 2693 2694 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips; 2695 } 2696 2697 if (srf_update->scaling_info) { 2698 surface->scaling_quality = 2699 srf_update->scaling_info->scaling_quality; 2700 surface->dst_rect = 2701 srf_update->scaling_info->dst_rect; 2702 surface->src_rect = 2703 srf_update->scaling_info->src_rect; 2704 surface->clip_rect = 2705 srf_update->scaling_info->clip_rect; 2706 } 2707 2708 if (srf_update->plane_info) { 2709 surface->color_space = 2710 srf_update->plane_info->color_space; 2711 surface->format = 2712 srf_update->plane_info->format; 2713 surface->plane_size = 2714 srf_update->plane_info->plane_size; 2715 surface->rotation = 2716 srf_update->plane_info->rotation; 2717 surface->horizontal_mirror = 2718 srf_update->plane_info->horizontal_mirror; 2719 surface->stereo_format = 2720 srf_update->plane_info->stereo_format; 2721 surface->tiling_info = 2722 srf_update->plane_info->tiling_info; 2723 surface->visible = 2724 srf_update->plane_info->visible; 2725 surface->per_pixel_alpha = 2726 srf_update->plane_info->per_pixel_alpha; 2727 surface->global_alpha = 2728 srf_update->plane_info->global_alpha; 2729 surface->global_alpha_value = 2730 srf_update->plane_info->global_alpha_value; 2731 surface->dcc = 2732 srf_update->plane_info->dcc; 2733 surface->layer_index = 2734 srf_update->plane_info->layer_index; 2735 } 2736 2737 if (srf_update->gamma && 2738 (surface->gamma_correction != 2739 srf_update->gamma)) { 2740 memcpy(&surface->gamma_correction->entries, 2741 &srf_update->gamma->entries, 2742 sizeof(struct dc_gamma_entries)); 2743 surface->gamma_correction->is_identity = 2744 srf_update->gamma->is_identity; 2745 surface->gamma_correction->num_entries = 2746 srf_update->gamma->num_entries; 2747 surface->gamma_correction->type = 2748 srf_update->gamma->type; 2749 } 2750 2751 if (srf_update->in_transfer_func && 2752 (surface->in_transfer_func != 2753 srf_update->in_transfer_func)) { 2754 surface->in_transfer_func->sdr_ref_white_level = 2755 srf_update->in_transfer_func->sdr_ref_white_level; 2756 surface->in_transfer_func->tf = 2757 srf_update->in_transfer_func->tf; 2758 surface->in_transfer_func->type = 2759 srf_update->in_transfer_func->type; 2760 memcpy(&surface->in_transfer_func->tf_pts, 2761 &srf_update->in_transfer_func->tf_pts, 2762 sizeof(struct dc_transfer_func_distributed_points)); 2763 } 2764 2765 if (srf_update->func_shaper && 2766 (surface->in_shaper_func != 2767 srf_update->func_shaper)) 2768 memcpy(surface->in_shaper_func, srf_update->func_shaper, 2769 sizeof(*surface->in_shaper_func)); 2770 2771 if (srf_update->lut3d_func && 2772 (surface->lut3d_func != 2773 srf_update->lut3d_func)) 2774 memcpy(surface->lut3d_func, srf_update->lut3d_func, 2775 sizeof(*surface->lut3d_func)); 2776 2777 if (srf_update->hdr_mult.value) 2778 surface->hdr_mult = 2779 srf_update->hdr_mult; 2780 2781 if (srf_update->blend_tf && 2782 (surface->blend_tf != 2783 srf_update->blend_tf)) 2784 memcpy(surface->blend_tf, srf_update->blend_tf, 2785 sizeof(*surface->blend_tf)); 2786 2787 if (srf_update->input_csc_color_matrix) 2788 surface->input_csc_color_matrix = 2789 *srf_update->input_csc_color_matrix; 2790 2791 if (srf_update->coeff_reduction_factor) 2792 surface->coeff_reduction_factor = 2793 *srf_update->coeff_reduction_factor; 2794 2795 if (srf_update->gamut_remap_matrix) 2796 surface->gamut_remap_matrix = 2797 *srf_update->gamut_remap_matrix; 2798 } 2799 2800 static void copy_stream_update_to_stream(struct dc *dc, 2801 struct dc_state *context, 2802 struct dc_stream_state *stream, 2803 struct dc_stream_update *update) 2804 { 2805 struct dc_context *dc_ctx = dc->ctx; 2806 2807 if (update == NULL || stream == NULL) 2808 return; 2809 2810 if (update->src.height && update->src.width) 2811 stream->src = update->src; 2812 2813 if (update->dst.height && update->dst.width) 2814 stream->dst = update->dst; 2815 2816 if (update->out_transfer_func && 2817 stream->out_transfer_func != update->out_transfer_func) { 2818 stream->out_transfer_func->sdr_ref_white_level = 2819 update->out_transfer_func->sdr_ref_white_level; 2820 stream->out_transfer_func->tf = update->out_transfer_func->tf; 2821 stream->out_transfer_func->type = 2822 update->out_transfer_func->type; 2823 memcpy(&stream->out_transfer_func->tf_pts, 2824 &update->out_transfer_func->tf_pts, 2825 sizeof(struct dc_transfer_func_distributed_points)); 2826 } 2827 2828 if (update->hdr_static_metadata) 2829 stream->hdr_static_metadata = *update->hdr_static_metadata; 2830 2831 if (update->abm_level) 2832 stream->abm_level = *update->abm_level; 2833 2834 if (update->periodic_interrupt) 2835 stream->periodic_interrupt = *update->periodic_interrupt; 2836 2837 if (update->gamut_remap) 2838 stream->gamut_remap_matrix = *update->gamut_remap; 2839 2840 /* Note: this being updated after mode set is currently not a use case 2841 * however if it arises OCSC would need to be reprogrammed at the 2842 * minimum 2843 */ 2844 if (update->output_color_space) 2845 stream->output_color_space = *update->output_color_space; 2846 2847 if (update->output_csc_transform) 2848 stream->csc_color_matrix = *update->output_csc_transform; 2849 2850 if (update->vrr_infopacket) 2851 stream->vrr_infopacket = *update->vrr_infopacket; 2852 2853 if (update->allow_freesync) 2854 stream->allow_freesync = *update->allow_freesync; 2855 2856 if (update->vrr_active_variable) 2857 stream->vrr_active_variable = *update->vrr_active_variable; 2858 2859 if (update->vrr_active_fixed) 2860 stream->vrr_active_fixed = *update->vrr_active_fixed; 2861 2862 if (update->crtc_timing_adjust) 2863 stream->adjust = *update->crtc_timing_adjust; 2864 2865 if (update->dpms_off) 2866 stream->dpms_off = *update->dpms_off; 2867 2868 if (update->hfvsif_infopacket) 2869 stream->hfvsif_infopacket = *update->hfvsif_infopacket; 2870 2871 if (update->vtem_infopacket) 2872 stream->vtem_infopacket = *update->vtem_infopacket; 2873 2874 if (update->vsc_infopacket) 2875 stream->vsc_infopacket = *update->vsc_infopacket; 2876 2877 if (update->vsp_infopacket) 2878 stream->vsp_infopacket = *update->vsp_infopacket; 2879 2880 if (update->adaptive_sync_infopacket) 2881 stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket; 2882 2883 if (update->dither_option) 2884 stream->dither_option = *update->dither_option; 2885 2886 if (update->pending_test_pattern) 2887 stream->test_pattern = *update->pending_test_pattern; 2888 /* update current stream with writeback info */ 2889 if (update->wb_update) { 2890 int i; 2891 2892 stream->num_wb_info = update->wb_update->num_wb_info; 2893 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES); 2894 for (i = 0; i < stream->num_wb_info; i++) 2895 stream->writeback_info[i] = 2896 update->wb_update->writeback_info[i]; 2897 } 2898 if (update->dsc_config) { 2899 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg; 2900 uint32_t old_dsc_enabled = stream->timing.flags.DSC; 2901 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 && 2902 update->dsc_config->num_slices_v != 0); 2903 2904 /* Use temporarry context for validating new DSC config */ 2905 struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state); 2906 2907 if (dsc_validate_context) { 2908 stream->timing.dsc_cfg = *update->dsc_config; 2909 stream->timing.flags.DSC = enable_dsc; 2910 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { 2911 stream->timing.dsc_cfg = old_dsc_cfg; 2912 stream->timing.flags.DSC = old_dsc_enabled; 2913 update->dsc_config = NULL; 2914 } 2915 2916 dc_state_release(dsc_validate_context); 2917 } else { 2918 DC_ERROR("Failed to allocate new validate context for DSC change\n"); 2919 update->dsc_config = NULL; 2920 } 2921 } 2922 } 2923 2924 static void backup_plane_states_for_stream( 2925 struct dc_plane_state plane_states[MAX_SURFACE_NUM], 2926 struct dc_stream_state *stream) 2927 { 2928 int i; 2929 struct dc_stream_status *status = dc_stream_get_status(stream); 2930 2931 if (!status) 2932 return; 2933 2934 for (i = 0; i < status->plane_count; i++) 2935 plane_states[i] = *status->plane_states[i]; 2936 } 2937 2938 static void restore_plane_states_for_stream( 2939 struct dc_plane_state plane_states[MAX_SURFACE_NUM], 2940 struct dc_stream_state *stream) 2941 { 2942 int i; 2943 struct dc_stream_status *status = dc_stream_get_status(stream); 2944 2945 if (!status) 2946 return; 2947 2948 for (i = 0; i < status->plane_count; i++) 2949 *status->plane_states[i] = plane_states[i]; 2950 } 2951 2952 static bool update_planes_and_stream_state(struct dc *dc, 2953 struct dc_surface_update *srf_updates, int surface_count, 2954 struct dc_stream_state *stream, 2955 struct dc_stream_update *stream_update, 2956 enum surface_update_type *new_update_type, 2957 struct dc_state **new_context) 2958 { 2959 struct dc_state *context; 2960 int i, j; 2961 enum surface_update_type update_type; 2962 const struct dc_stream_status *stream_status; 2963 struct dc_context *dc_ctx = dc->ctx; 2964 2965 stream_status = dc_stream_get_status(stream); 2966 2967 if (!stream_status) { 2968 if (surface_count) /* Only an error condition if surf_count non-zero*/ 2969 ASSERT(false); 2970 2971 return false; /* Cannot commit surface to stream that is not committed */ 2972 } 2973 2974 context = dc->current_state; 2975 backup_plane_states_for_stream(dc->current_state->scratch.plane_states, stream); 2976 update_type = dc_check_update_surfaces_for_stream( 2977 dc, srf_updates, surface_count, stream_update, stream_status); 2978 2979 /* update current stream with the new updates */ 2980 copy_stream_update_to_stream(dc, context, stream, stream_update); 2981 2982 /* do not perform surface update if surface has invalid dimensions 2983 * (all zero) and no scaling_info is provided 2984 */ 2985 if (surface_count > 0) { 2986 for (i = 0; i < surface_count; i++) { 2987 if ((srf_updates[i].surface->src_rect.width == 0 || 2988 srf_updates[i].surface->src_rect.height == 0 || 2989 srf_updates[i].surface->dst_rect.width == 0 || 2990 srf_updates[i].surface->dst_rect.height == 0) && 2991 (!srf_updates[i].scaling_info || 2992 srf_updates[i].scaling_info->src_rect.width == 0 || 2993 srf_updates[i].scaling_info->src_rect.height == 0 || 2994 srf_updates[i].scaling_info->dst_rect.width == 0 || 2995 srf_updates[i].scaling_info->dst_rect.height == 0)) { 2996 DC_ERROR("Invalid src/dst rects in surface update!\n"); 2997 return false; 2998 } 2999 } 3000 } 3001 3002 if (update_type >= update_surface_trace_level) 3003 update_surface_trace(dc, srf_updates, surface_count); 3004 3005 for (i = 0; i < surface_count; i++) 3006 copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]); 3007 3008 if (update_type >= UPDATE_TYPE_FULL) { 3009 struct dc_plane_state *new_planes[MAX_SURFACES] = {0}; 3010 3011 for (i = 0; i < surface_count; i++) 3012 new_planes[i] = srf_updates[i].surface; 3013 3014 /* initialize scratch memory for building context */ 3015 context = dc_state_create_copy(dc->current_state); 3016 if (context == NULL) { 3017 DC_ERROR("Failed to allocate new validate context!\n"); 3018 return false; 3019 } 3020 3021 /* For each full update, remove all existing phantom pipes first. 3022 * Ensures that we have enough pipes for newly added MPO planes 3023 */ 3024 dc_state_remove_phantom_streams_and_planes(dc, context); 3025 dc_state_release_phantom_streams_and_planes(dc, context); 3026 3027 /*remove old surfaces from context */ 3028 if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) { 3029 3030 BREAK_TO_DEBUGGER(); 3031 goto fail; 3032 } 3033 3034 /* add surface to context */ 3035 if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { 3036 3037 BREAK_TO_DEBUGGER(); 3038 goto fail; 3039 } 3040 } 3041 3042 /* save update parameters into surface */ 3043 for (i = 0; i < surface_count; i++) { 3044 struct dc_plane_state *surface = srf_updates[i].surface; 3045 3046 if (update_type >= UPDATE_TYPE_MED) { 3047 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3048 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3049 3050 if (pipe_ctx->plane_state != surface) 3051 continue; 3052 3053 resource_build_scaling_params(pipe_ctx); 3054 } 3055 } 3056 } 3057 3058 if (update_type == UPDATE_TYPE_FULL) { 3059 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 3060 BREAK_TO_DEBUGGER(); 3061 goto fail; 3062 } 3063 3064 for (i = 0; i < context->stream_count; i++) { 3065 struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx, 3066 context->streams[i]); 3067 3068 if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) 3069 resource_build_test_pattern_params(&context->res_ctx, otg_master); 3070 } 3071 } 3072 3073 *new_context = context; 3074 *new_update_type = update_type; 3075 backup_plane_states_for_stream(context->scratch.plane_states, stream); 3076 3077 return true; 3078 3079 fail: 3080 dc_state_release(context); 3081 3082 return false; 3083 3084 } 3085 3086 static void commit_planes_do_stream_update(struct dc *dc, 3087 struct dc_stream_state *stream, 3088 struct dc_stream_update *stream_update, 3089 enum surface_update_type update_type, 3090 struct dc_state *context) 3091 { 3092 int j; 3093 3094 // Stream updates 3095 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3096 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3097 3098 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) { 3099 3100 if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt) 3101 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx); 3102 3103 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || 3104 stream_update->vrr_infopacket || 3105 stream_update->vsc_infopacket || 3106 stream_update->vsp_infopacket || 3107 stream_update->hfvsif_infopacket || 3108 stream_update->adaptive_sync_infopacket || 3109 stream_update->vtem_infopacket) { 3110 resource_build_info_frame(pipe_ctx); 3111 dc->hwss.update_info_frame(pipe_ctx); 3112 3113 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 3114 dc->link_srv->dp_trace_source_sequence( 3115 pipe_ctx->stream->link, 3116 DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); 3117 } 3118 3119 if (stream_update->hdr_static_metadata && 3120 stream->use_dynamic_meta && 3121 dc->hwss.set_dmdata_attributes && 3122 pipe_ctx->stream->dmdata_address.quad_part != 0) 3123 dc->hwss.set_dmdata_attributes(pipe_ctx); 3124 3125 if (stream_update->gamut_remap) 3126 dc_stream_set_gamut_remap(dc, stream); 3127 3128 if (stream_update->output_csc_transform) 3129 dc_stream_program_csc_matrix(dc, stream); 3130 3131 if (stream_update->dither_option) { 3132 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 3133 resource_build_bit_depth_reduction_params(pipe_ctx->stream, 3134 &pipe_ctx->stream->bit_depth_params); 3135 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp, 3136 &stream->bit_depth_params, 3137 &stream->clamping); 3138 while (odm_pipe) { 3139 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp, 3140 &stream->bit_depth_params, 3141 &stream->clamping); 3142 odm_pipe = odm_pipe->next_odm_pipe; 3143 } 3144 } 3145 3146 3147 /* Full fe update*/ 3148 if (update_type == UPDATE_TYPE_FAST) 3149 continue; 3150 3151 if (stream_update->dsc_config) 3152 dc->link_srv->update_dsc_config(pipe_ctx); 3153 3154 if (stream_update->mst_bw_update) { 3155 if (stream_update->mst_bw_update->is_increase) 3156 dc->link_srv->increase_mst_payload(pipe_ctx, 3157 stream_update->mst_bw_update->mst_stream_bw); 3158 else 3159 dc->link_srv->reduce_mst_payload(pipe_ctx, 3160 stream_update->mst_bw_update->mst_stream_bw); 3161 } 3162 3163 if (stream_update->pending_test_pattern) { 3164 dc_link_dp_set_test_pattern(stream->link, 3165 stream->test_pattern.type, 3166 stream->test_pattern.color_space, 3167 stream->test_pattern.p_link_settings, 3168 stream->test_pattern.p_custom_pattern, 3169 stream->test_pattern.cust_pattern_size); 3170 } 3171 3172 if (stream_update->dpms_off) { 3173 if (*stream_update->dpms_off) { 3174 dc->link_srv->set_dpms_off(pipe_ctx); 3175 /* for dpms, keep acquired resources*/ 3176 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only) 3177 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 3178 3179 dc->optimized_required = true; 3180 3181 } else { 3182 if (get_seamless_boot_stream_count(context) == 0) 3183 dc->hwss.prepare_bandwidth(dc, dc->current_state); 3184 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); 3185 } 3186 } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space 3187 && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) { 3188 /* 3189 * Workaround for firmware issue in some receivers where they don't pick up 3190 * correct output color space unless DP link is disabled/re-enabled 3191 */ 3192 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); 3193 } 3194 3195 if (stream_update->abm_level && pipe_ctx->stream_res.abm) { 3196 bool should_program_abm = true; 3197 3198 // if otg funcs defined check if blanked before programming 3199 if (pipe_ctx->stream_res.tg->funcs->is_blanked) 3200 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) 3201 should_program_abm = false; 3202 3203 if (should_program_abm) { 3204 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) { 3205 dc->hwss.set_abm_immediate_disable(pipe_ctx); 3206 } else { 3207 pipe_ctx->stream_res.abm->funcs->set_abm_level( 3208 pipe_ctx->stream_res.abm, stream->abm_level); 3209 } 3210 } 3211 } 3212 } 3213 } 3214 } 3215 3216 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream) 3217 { 3218 if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 3219 || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) 3220 && stream->ctx->dce_version >= DCN_VERSION_3_1) 3221 return true; 3222 3223 if (stream->link->replay_settings.config.replay_supported) 3224 return true; 3225 3226 return false; 3227 } 3228 3229 void dc_dmub_update_dirty_rect(struct dc *dc, 3230 int surface_count, 3231 struct dc_stream_state *stream, 3232 struct dc_surface_update *srf_updates, 3233 struct dc_state *context) 3234 { 3235 union dmub_rb_cmd cmd; 3236 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; 3237 unsigned int i, j; 3238 unsigned int panel_inst = 0; 3239 3240 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) 3241 return; 3242 3243 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3244 return; 3245 3246 memset(&cmd, 0x0, sizeof(cmd)); 3247 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; 3248 cmd.update_dirty_rect.header.sub_type = 0; 3249 cmd.update_dirty_rect.header.payload_bytes = 3250 sizeof(cmd.update_dirty_rect) - 3251 sizeof(cmd.update_dirty_rect.header); 3252 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; 3253 for (i = 0; i < surface_count; i++) { 3254 struct dc_plane_state *plane_state = srf_updates[i].surface; 3255 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; 3256 3257 if (!srf_updates[i].surface || !flip_addr) 3258 continue; 3259 /* Do not send in immediate flip mode */ 3260 if (srf_updates[i].surface->flip_immediate) 3261 continue; 3262 3263 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 3264 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 3265 sizeof(flip_addr->dirty_rects)); 3266 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3267 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3268 3269 if (pipe_ctx->stream != stream) 3270 continue; 3271 if (pipe_ctx->plane_state != plane_state) 3272 continue; 3273 3274 update_dirty_rect->panel_inst = panel_inst; 3275 update_dirty_rect->pipe_idx = j; 3276 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 3277 } 3278 } 3279 } 3280 3281 static void build_dmub_update_dirty_rect( 3282 struct dc *dc, 3283 int surface_count, 3284 struct dc_stream_state *stream, 3285 struct dc_surface_update *srf_updates, 3286 struct dc_state *context, 3287 struct dc_dmub_cmd dc_dmub_cmd[], 3288 unsigned int *dmub_cmd_count) 3289 { 3290 union dmub_rb_cmd cmd; 3291 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; 3292 unsigned int i, j; 3293 unsigned int panel_inst = 0; 3294 3295 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) 3296 return; 3297 3298 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3299 return; 3300 3301 memset(&cmd, 0x0, sizeof(cmd)); 3302 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; 3303 cmd.update_dirty_rect.header.sub_type = 0; 3304 cmd.update_dirty_rect.header.payload_bytes = 3305 sizeof(cmd.update_dirty_rect) - 3306 sizeof(cmd.update_dirty_rect.header); 3307 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; 3308 for (i = 0; i < surface_count; i++) { 3309 struct dc_plane_state *plane_state = srf_updates[i].surface; 3310 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; 3311 3312 if (!srf_updates[i].surface || !flip_addr) 3313 continue; 3314 /* Do not send in immediate flip mode */ 3315 if (srf_updates[i].surface->flip_immediate) 3316 continue; 3317 update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; 3318 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 3319 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 3320 sizeof(flip_addr->dirty_rects)); 3321 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3322 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3323 3324 if (pipe_ctx->stream != stream) 3325 continue; 3326 if (pipe_ctx->plane_state != plane_state) 3327 continue; 3328 update_dirty_rect->panel_inst = panel_inst; 3329 update_dirty_rect->pipe_idx = j; 3330 dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd; 3331 dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; 3332 (*dmub_cmd_count)++; 3333 } 3334 } 3335 } 3336 3337 3338 /** 3339 * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB 3340 * 3341 * @dc: Current DC state 3342 * @srf_updates: Array of surface updates 3343 * @surface_count: Number of surfaces that have an updated 3344 * @stream: Corresponding stream to be updated in the current flip 3345 * @context: New DC state to be programmed 3346 * 3347 * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB 3348 * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array 3349 * 3350 * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required 3351 * to build an array of commands and have them sent while the OTG lock is acquired. 3352 * 3353 * Return: void 3354 */ 3355 static void build_dmub_cmd_list(struct dc *dc, 3356 struct dc_surface_update *srf_updates, 3357 int surface_count, 3358 struct dc_stream_state *stream, 3359 struct dc_state *context, 3360 struct dc_dmub_cmd dc_dmub_cmd[], 3361 unsigned int *dmub_cmd_count) 3362 { 3363 // Initialize cmd count to 0 3364 *dmub_cmd_count = 0; 3365 build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count); 3366 } 3367 3368 static void commit_planes_for_stream_fast(struct dc *dc, 3369 struct dc_surface_update *srf_updates, 3370 int surface_count, 3371 struct dc_stream_state *stream, 3372 struct dc_stream_update *stream_update, 3373 enum surface_update_type update_type, 3374 struct dc_state *context) 3375 { 3376 int i, j; 3377 struct pipe_ctx *top_pipe_to_program = NULL; 3378 struct dc_stream_status *stream_status = NULL; 3379 dc_z10_restore(dc); 3380 3381 top_pipe_to_program = resource_get_otg_master_for_stream( 3382 &context->res_ctx, 3383 stream); 3384 3385 if (!top_pipe_to_program) 3386 return; 3387 3388 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3389 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 3390 3391 if (pipe->stream && pipe->plane_state) { 3392 set_p_state_switch_method(dc, context, pipe); 3393 3394 if (dc->debug.visual_confirm) 3395 dc_update_visual_confirm_color(dc, context, pipe); 3396 } 3397 } 3398 3399 for (i = 0; i < surface_count; i++) { 3400 struct dc_plane_state *plane_state = srf_updates[i].surface; 3401 /*set logical flag for lock/unlock use*/ 3402 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3403 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3404 3405 if (!pipe_ctx->plane_state) 3406 continue; 3407 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3408 continue; 3409 pipe_ctx->plane_state->triplebuffer_flips = false; 3410 if (update_type == UPDATE_TYPE_FAST && 3411 dc->hwss.program_triplebuffer && 3412 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 3413 /*triple buffer for VUpdate only*/ 3414 pipe_ctx->plane_state->triplebuffer_flips = true; 3415 } 3416 } 3417 } 3418 3419 stream_status = dc_state_get_stream_status(context, stream); 3420 3421 build_dmub_cmd_list(dc, 3422 srf_updates, 3423 surface_count, 3424 stream, 3425 context, 3426 context->dc_dmub_cmd, 3427 &(context->dmub_cmd_count)); 3428 hwss_build_fast_sequence(dc, 3429 context->dc_dmub_cmd, 3430 context->dmub_cmd_count, 3431 context->block_sequence, 3432 &(context->block_sequence_steps), 3433 top_pipe_to_program, 3434 stream_status); 3435 hwss_execute_sequence(dc, 3436 context->block_sequence, 3437 context->block_sequence_steps); 3438 /* Clear update flags so next flip doesn't have redundant programming 3439 * (if there's no stream update, the update flags are not cleared). 3440 * Surface updates are cleared unconditionally at the beginning of each flip, 3441 * so no need to clear here. 3442 */ 3443 if (top_pipe_to_program->stream) 3444 top_pipe_to_program->stream->update_flags.raw = 0; 3445 } 3446 3447 static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context) 3448 { 3449 /* 3450 * This function calls HWSS to wait for any potentially double buffered 3451 * operations to complete. It should be invoked as a pre-amble prior 3452 * to full update programming before asserting any HW locks. 3453 */ 3454 int pipe_idx; 3455 int opp_inst; 3456 int opp_count = dc->res_pool->res_cap->num_opp; 3457 struct hubp *hubp; 3458 int mpcc_inst; 3459 const struct pipe_ctx *pipe_ctx; 3460 3461 for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) { 3462 pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx]; 3463 3464 if (!pipe_ctx->stream) 3465 continue; 3466 3467 if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear) 3468 pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg); 3469 3470 hubp = pipe_ctx->plane_res.hubp; 3471 if (!hubp) 3472 continue; 3473 3474 mpcc_inst = hubp->inst; 3475 // MPCC inst is equal to pipe index in practice 3476 for (opp_inst = 0; opp_inst < opp_count; opp_inst++) { 3477 if ((dc->res_pool->opps[opp_inst] != NULL) && 3478 (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) { 3479 dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); 3480 dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false; 3481 break; 3482 } 3483 } 3484 } 3485 } 3486 3487 static void commit_planes_for_stream(struct dc *dc, 3488 struct dc_surface_update *srf_updates, 3489 int surface_count, 3490 struct dc_stream_state *stream, 3491 struct dc_stream_update *stream_update, 3492 enum surface_update_type update_type, 3493 struct dc_state *context) 3494 { 3495 int i, j; 3496 struct pipe_ctx *top_pipe_to_program = NULL; 3497 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); 3498 bool subvp_prev_use = false; 3499 bool subvp_curr_use = false; 3500 uint8_t current_stream_mask = 0; 3501 3502 // Once we apply the new subvp context to hardware it won't be in the 3503 // dc->current_state anymore, so we have to cache it before we apply 3504 // the new SubVP context 3505 subvp_prev_use = false; 3506 dc_z10_restore(dc); 3507 if (update_type == UPDATE_TYPE_FULL) 3508 wait_for_outstanding_hw_updates(dc, context); 3509 3510 if (update_type == UPDATE_TYPE_FULL) { 3511 dc_allow_idle_optimizations(dc, false); 3512 3513 if (get_seamless_boot_stream_count(context) == 0) 3514 dc->hwss.prepare_bandwidth(dc, context); 3515 3516 if (dc->hwss.update_dsc_pg) 3517 dc->hwss.update_dsc_pg(dc, context, false); 3518 3519 context_clock_trace(dc, context); 3520 } 3521 3522 top_pipe_to_program = resource_get_otg_master_for_stream( 3523 &context->res_ctx, 3524 stream); 3525 ASSERT(top_pipe_to_program != NULL); 3526 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3527 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3528 3529 // Check old context for SubVP 3530 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM); 3531 if (subvp_prev_use) 3532 break; 3533 } 3534 3535 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3536 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 3537 3538 if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 3539 subvp_curr_use = true; 3540 break; 3541 } 3542 } 3543 3544 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3545 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 3546 3547 if (pipe->stream && pipe->plane_state) { 3548 set_p_state_switch_method(dc, context, pipe); 3549 3550 if (dc->debug.visual_confirm) 3551 dc_update_visual_confirm_color(dc, context, pipe); 3552 } 3553 } 3554 3555 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { 3556 struct pipe_ctx *mpcc_pipe; 3557 struct pipe_ctx *odm_pipe; 3558 3559 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) 3560 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 3561 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU; 3562 } 3563 3564 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 3565 if (top_pipe_to_program && 3566 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 3567 if (should_use_dmub_lock(stream->link)) { 3568 union dmub_hw_lock_flags hw_locks = { 0 }; 3569 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 3570 3571 hw_locks.bits.lock_dig = 1; 3572 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 3573 3574 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 3575 true, 3576 &hw_locks, 3577 &inst_flags); 3578 } else 3579 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable( 3580 top_pipe_to_program->stream_res.tg); 3581 } 3582 3583 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3584 if (dc->hwss.subvp_pipe_control_lock) 3585 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use); 3586 dc->hwss.interdependent_update_lock(dc, context, true); 3587 3588 } else { 3589 if (dc->hwss.subvp_pipe_control_lock) 3590 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 3591 /* Lock the top pipe while updating plane addrs, since freesync requires 3592 * plane addr update event triggers to be synchronized. 3593 * top_pipe_to_program is expected to never be NULL 3594 */ 3595 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); 3596 } 3597 3598 dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context); 3599 3600 // Stream updates 3601 if (stream_update) 3602 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); 3603 3604 if (surface_count == 0) { 3605 /* 3606 * In case of turning off screen, no need to program front end a second time. 3607 * just return after program blank. 3608 */ 3609 if (dc->hwss.apply_ctx_for_surface) 3610 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); 3611 if (dc->hwss.program_front_end_for_ctx) 3612 dc->hwss.program_front_end_for_ctx(dc, context); 3613 3614 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3615 dc->hwss.interdependent_update_lock(dc, context, false); 3616 } else { 3617 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 3618 } 3619 dc->hwss.post_unlock_program_front_end(dc, context); 3620 3621 if (update_type != UPDATE_TYPE_FAST) 3622 if (dc->hwss.commit_subvp_config) 3623 dc->hwss.commit_subvp_config(dc, context); 3624 3625 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 3626 * move the SubVP lock to after the phantom pipes have been setup 3627 */ 3628 if (dc->hwss.subvp_pipe_control_lock) 3629 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, 3630 NULL, subvp_prev_use); 3631 return; 3632 } 3633 3634 if (update_type != UPDATE_TYPE_FAST) { 3635 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3636 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3637 3638 if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP || 3639 dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) && 3640 pipe_ctx->stream && pipe_ctx->plane_state) { 3641 /* Only update visual confirm for SUBVP and Mclk switching here. 3642 * The bar appears on all pipes, so we need to update the bar on all displays, 3643 * so the information doesn't get stale. 3644 */ 3645 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, 3646 pipe_ctx->plane_res.hubp->inst); 3647 } 3648 } 3649 } 3650 3651 for (i = 0; i < surface_count; i++) { 3652 struct dc_plane_state *plane_state = srf_updates[i].surface; 3653 /*set logical flag for lock/unlock use*/ 3654 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3655 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3656 if (!pipe_ctx->plane_state) 3657 continue; 3658 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3659 continue; 3660 pipe_ctx->plane_state->triplebuffer_flips = false; 3661 if (update_type == UPDATE_TYPE_FAST && 3662 dc->hwss.program_triplebuffer != NULL && 3663 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 3664 /*triple buffer for VUpdate only*/ 3665 pipe_ctx->plane_state->triplebuffer_flips = true; 3666 } 3667 } 3668 if (update_type == UPDATE_TYPE_FULL) { 3669 /* force vsync flip when reconfiguring pipes to prevent underflow */ 3670 plane_state->flip_immediate = false; 3671 } 3672 } 3673 3674 // Update Type FULL, Surface updates 3675 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3676 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3677 3678 if (!pipe_ctx->top_pipe && 3679 !pipe_ctx->prev_odm_pipe && 3680 should_update_pipe_for_stream(context, pipe_ctx, stream)) { 3681 struct dc_stream_status *stream_status = NULL; 3682 3683 if (!pipe_ctx->plane_state) 3684 continue; 3685 3686 /* Full fe update*/ 3687 if (update_type == UPDATE_TYPE_FAST) 3688 continue; 3689 3690 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); 3691 3692 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 3693 /*turn off triple buffer for full update*/ 3694 dc->hwss.program_triplebuffer( 3695 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 3696 } 3697 stream_status = 3698 stream_get_status(context, pipe_ctx->stream); 3699 3700 if (dc->hwss.apply_ctx_for_surface) 3701 dc->hwss.apply_ctx_for_surface( 3702 dc, pipe_ctx->stream, stream_status->plane_count, context); 3703 } 3704 } 3705 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { 3706 dc->hwss.program_front_end_for_ctx(dc, context); 3707 if (dc->debug.validate_dml_output) { 3708 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3709 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; 3710 if (cur_pipe->stream == NULL) 3711 continue; 3712 3713 cur_pipe->plane_res.hubp->funcs->validate_dml_output( 3714 cur_pipe->plane_res.hubp, dc->ctx, 3715 &context->res_ctx.pipe_ctx[i].rq_regs, 3716 &context->res_ctx.pipe_ctx[i].dlg_regs, 3717 &context->res_ctx.pipe_ctx[i].ttu_regs); 3718 } 3719 } 3720 } 3721 3722 // Update Type FAST, Surface updates 3723 if (update_type == UPDATE_TYPE_FAST) { 3724 if (dc->hwss.set_flip_control_gsl) 3725 for (i = 0; i < surface_count; i++) { 3726 struct dc_plane_state *plane_state = srf_updates[i].surface; 3727 3728 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3729 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3730 3731 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3732 continue; 3733 3734 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3735 continue; 3736 3737 // GSL has to be used for flip immediate 3738 dc->hwss.set_flip_control_gsl(pipe_ctx, 3739 pipe_ctx->plane_state->flip_immediate); 3740 } 3741 } 3742 3743 /* Perform requested Updates */ 3744 for (i = 0; i < surface_count; i++) { 3745 struct dc_plane_state *plane_state = srf_updates[i].surface; 3746 3747 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3748 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3749 3750 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3751 continue; 3752 3753 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3754 continue; 3755 3756 /*program triple buffer after lock based on flip type*/ 3757 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 3758 /*only enable triplebuffer for fast_update*/ 3759 dc->hwss.program_triplebuffer( 3760 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 3761 } 3762 if (pipe_ctx->plane_state->update_flags.bits.addr_update) 3763 dc->hwss.update_plane_addr(dc, pipe_ctx); 3764 } 3765 } 3766 } 3767 3768 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3769 dc->hwss.interdependent_update_lock(dc, context, false); 3770 } else { 3771 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 3772 } 3773 3774 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 3775 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 3776 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3777 top_pipe_to_program->stream_res.tg, 3778 CRTC_STATE_VACTIVE); 3779 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3780 top_pipe_to_program->stream_res.tg, 3781 CRTC_STATE_VBLANK); 3782 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3783 top_pipe_to_program->stream_res.tg, 3784 CRTC_STATE_VACTIVE); 3785 3786 if (should_use_dmub_lock(stream->link)) { 3787 union dmub_hw_lock_flags hw_locks = { 0 }; 3788 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 3789 3790 hw_locks.bits.lock_dig = 1; 3791 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 3792 3793 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 3794 false, 3795 &hw_locks, 3796 &inst_flags); 3797 } else 3798 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable( 3799 top_pipe_to_program->stream_res.tg); 3800 } 3801 3802 if (subvp_curr_use) { 3803 /* If enabling subvp or transitioning from subvp->subvp, enable the 3804 * phantom streams before we program front end for the phantom pipes. 3805 */ 3806 if (update_type != UPDATE_TYPE_FAST) { 3807 if (dc->hwss.enable_phantom_streams) 3808 dc->hwss.enable_phantom_streams(dc, context); 3809 } 3810 } 3811 3812 if (update_type != UPDATE_TYPE_FAST) 3813 dc->hwss.post_unlock_program_front_end(dc, context); 3814 3815 if (subvp_prev_use && !subvp_curr_use) { 3816 /* If disabling subvp, disable phantom streams after front end 3817 * programming has completed (we turn on phantom OTG in order 3818 * to complete the plane disable for phantom pipes). 3819 */ 3820 3821 if (dc->hwss.disable_phantom_streams) 3822 dc->hwss.disable_phantom_streams(dc, context); 3823 } 3824 3825 if (update_type != UPDATE_TYPE_FAST) 3826 if (dc->hwss.commit_subvp_config) 3827 dc->hwss.commit_subvp_config(dc, context); 3828 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 3829 * move the SubVP lock to after the phantom pipes have been setup 3830 */ 3831 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3832 if (dc->hwss.subvp_pipe_control_lock) 3833 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); 3834 } else { 3835 if (dc->hwss.subvp_pipe_control_lock) 3836 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 3837 } 3838 3839 // Fire manual trigger only when bottom plane is flipped 3840 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3841 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3842 3843 if (!pipe_ctx->plane_state) 3844 continue; 3845 3846 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe || 3847 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) || 3848 !pipe_ctx->plane_state->update_flags.bits.addr_update || 3849 pipe_ctx->plane_state->skip_manual_trigger) 3850 continue; 3851 3852 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) 3853 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); 3854 } 3855 3856 current_stream_mask = get_stream_mask(dc, context); 3857 if (current_stream_mask != context->stream_mask) { 3858 context->stream_mask = current_stream_mask; 3859 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, current_stream_mask); 3860 } 3861 } 3862 3863 /** 3864 * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change 3865 * 3866 * @dc: Used to get the current state status 3867 * @stream: Target stream, which we want to remove the attached planes 3868 * @srf_updates: Array of surface updates 3869 * @surface_count: Number of surface update 3870 * @is_plane_addition: [in] Fill out with true if it is a plane addition case 3871 * 3872 * DCN32x and newer support a feature named Dynamic ODM which can conflict with 3873 * the MPO if used simultaneously in some specific configurations (e.g., 3874 * 4k@144). This function checks if the incoming context requires applying a 3875 * transition state with unnecessary pipe splitting and ODM disabled to 3876 * circumvent our hardware limitations to prevent this edge case. If the OPP 3877 * associated with an MPCC might change due to plane additions, this function 3878 * returns true. 3879 * 3880 * Return: 3881 * Return true if OPP and MPCC might change, otherwise, return false. 3882 */ 3883 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc, 3884 struct dc_stream_state *stream, 3885 struct dc_surface_update *srf_updates, 3886 int surface_count, 3887 bool *is_plane_addition) 3888 { 3889 3890 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream); 3891 bool force_minimal_pipe_splitting = false; 3892 bool subvp_active = false; 3893 uint32_t i; 3894 3895 *is_plane_addition = false; 3896 3897 if (cur_stream_status && 3898 dc->current_state->stream_count > 0 && 3899 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) { 3900 /* determine if minimal transition is required due to MPC*/ 3901 if (surface_count > 0) { 3902 if (cur_stream_status->plane_count > surface_count) { 3903 force_minimal_pipe_splitting = true; 3904 } else if (cur_stream_status->plane_count < surface_count) { 3905 force_minimal_pipe_splitting = true; 3906 *is_plane_addition = true; 3907 } 3908 } 3909 } 3910 3911 if (cur_stream_status && 3912 dc->current_state->stream_count == 1 && 3913 dc->debug.enable_single_display_2to1_odm_policy) { 3914 /* determine if minimal transition is required due to dynamic ODM*/ 3915 if (surface_count > 0) { 3916 if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) { 3917 force_minimal_pipe_splitting = true; 3918 } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) { 3919 force_minimal_pipe_splitting = true; 3920 *is_plane_addition = true; 3921 } 3922 } 3923 } 3924 3925 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3926 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3927 3928 if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) { 3929 subvp_active = true; 3930 break; 3931 } 3932 } 3933 3934 /* For SubVP when adding or removing planes we need to add a minimal transition 3935 * (even when disabling all planes). Whenever disabling a phantom pipe, we 3936 * must use the minimal transition path to disable the pipe correctly. 3937 * 3938 * We want to use the minimal transition whenever subvp is active, not only if 3939 * a plane is being added / removed from a subvp stream (MPO plane can be added 3940 * to a DRR pipe of SubVP + DRR config, in which case we still want to run through 3941 * a min transition to disable subvp. 3942 */ 3943 if (cur_stream_status && subvp_active) { 3944 /* determine if minimal transition is required due to SubVP*/ 3945 if (cur_stream_status->plane_count > surface_count) { 3946 force_minimal_pipe_splitting = true; 3947 } else if (cur_stream_status->plane_count < surface_count) { 3948 force_minimal_pipe_splitting = true; 3949 *is_plane_addition = true; 3950 } 3951 } 3952 3953 return force_minimal_pipe_splitting; 3954 } 3955 3956 struct pipe_split_policy_backup { 3957 bool dynamic_odm_policy; 3958 bool subvp_policy; 3959 enum pipe_split_policy mpc_policy; 3960 }; 3961 3962 static void release_minimal_transition_state(struct dc *dc, 3963 struct dc_state *context, struct pipe_split_policy_backup *policy) 3964 { 3965 dc_state_release(context); 3966 /* restore previous pipe split and odm policy */ 3967 if (!dc->config.is_vmin_only_asic) 3968 dc->debug.pipe_split_policy = policy->mpc_policy; 3969 dc->debug.enable_single_display_2to1_odm_policy = policy->dynamic_odm_policy; 3970 dc->debug.force_disable_subvp = policy->subvp_policy; 3971 } 3972 3973 static struct dc_state *create_minimal_transition_state(struct dc *dc, 3974 struct dc_state *base_context, struct pipe_split_policy_backup *policy) 3975 { 3976 struct dc_state *minimal_transition_context = NULL; 3977 unsigned int i, j; 3978 3979 if (!dc->config.is_vmin_only_asic) { 3980 policy->mpc_policy = dc->debug.pipe_split_policy; 3981 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 3982 } 3983 policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy; 3984 dc->debug.enable_single_display_2to1_odm_policy = false; 3985 policy->subvp_policy = dc->debug.force_disable_subvp; 3986 dc->debug.force_disable_subvp = true; 3987 3988 minimal_transition_context = dc_state_create_copy(base_context); 3989 if (!minimal_transition_context) 3990 return NULL; 3991 3992 /* commit minimal state */ 3993 if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) { 3994 for (i = 0; i < minimal_transition_context->stream_count; i++) { 3995 struct dc_stream_status *stream_status = &minimal_transition_context->stream_status[i]; 3996 3997 for (j = 0; j < stream_status->plane_count; j++) { 3998 struct dc_plane_state *plane_state = stream_status->plane_states[j]; 3999 4000 /* force vsync flip when reconfiguring pipes to prevent underflow 4001 * and corruption 4002 */ 4003 plane_state->flip_immediate = false; 4004 } 4005 } 4006 } else { 4007 /* this should never happen */ 4008 release_minimal_transition_state(dc, minimal_transition_context, policy); 4009 BREAK_TO_DEBUGGER(); 4010 minimal_transition_context = NULL; 4011 } 4012 return minimal_transition_context; 4013 } 4014 4015 static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc, 4016 struct dc_state *context, 4017 struct dc_stream_state *stream) 4018 { 4019 bool success = false; 4020 struct dc_state *minimal_transition_context; 4021 struct pipe_split_policy_backup policy; 4022 4023 /* commit based on new context */ 4024 /* Since all phantom pipes are removed in full validation, 4025 * we have to save and restore the subvp/mall config when 4026 * we do a minimal transition since the flags marking the 4027 * pipe as subvp/phantom will be cleared (dc copy constructor 4028 * creates a shallow copy). 4029 */ 4030 minimal_transition_context = create_minimal_transition_state(dc, 4031 context, &policy); 4032 if (minimal_transition_context) { 4033 if (dc->hwss.is_pipe_topology_transition_seamless( 4034 dc, dc->current_state, minimal_transition_context) && 4035 dc->hwss.is_pipe_topology_transition_seamless( 4036 dc, minimal_transition_context, context)) { 4037 DC_LOG_DC("%s base = new state\n", __func__); 4038 4039 success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK; 4040 } 4041 release_minimal_transition_state(dc, minimal_transition_context, &policy); 4042 } 4043 4044 if (!success) { 4045 /* commit based on current context */ 4046 restore_plane_states_for_stream(dc->current_state->scratch.plane_states, stream); 4047 minimal_transition_context = create_minimal_transition_state(dc, 4048 dc->current_state, &policy); 4049 if (minimal_transition_context) { 4050 if (dc->hwss.is_pipe_topology_transition_seamless( 4051 dc, dc->current_state, minimal_transition_context) && 4052 dc->hwss.is_pipe_topology_transition_seamless( 4053 dc, minimal_transition_context, context)) { 4054 DC_LOG_DC("%s base = current state\n", __func__); 4055 success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK; 4056 } 4057 release_minimal_transition_state(dc, minimal_transition_context, &policy); 4058 } 4059 restore_plane_states_for_stream(context->scratch.plane_states, stream); 4060 } 4061 4062 ASSERT(success); 4063 return success; 4064 } 4065 4066 /** 4067 * commit_minimal_transition_state - Create a transition pipe split state 4068 * 4069 * @dc: Used to get the current state status 4070 * @transition_base_context: New transition state 4071 * 4072 * In some specific configurations, such as pipe split on multi-display with 4073 * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe 4074 * programming when moving to new planes. To mitigate those types of problems, 4075 * this function adds a transition state that minimizes pipe usage before 4076 * programming the new configuration. When adding a new plane, the current 4077 * state requires the least pipes, so it is applied without splitting. When 4078 * removing a plane, the new state requires the least pipes, so it is applied 4079 * without splitting. 4080 * 4081 * Return: 4082 * Return false if something is wrong in the transition state. 4083 */ 4084 static bool commit_minimal_transition_state(struct dc *dc, 4085 struct dc_state *transition_base_context) 4086 { 4087 struct dc_state *transition_context; 4088 struct pipe_split_policy_backup policy; 4089 enum dc_status ret = DC_ERROR_UNEXPECTED; 4090 unsigned int i, j; 4091 unsigned int pipe_in_use = 0; 4092 bool subvp_in_use = false; 4093 bool odm_in_use = false; 4094 4095 /* check current pipes in use*/ 4096 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4097 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; 4098 4099 if (pipe->plane_state) 4100 pipe_in_use++; 4101 } 4102 4103 /* If SubVP is enabled and we are adding or removing planes from any main subvp 4104 * pipe, we must use the minimal transition. 4105 */ 4106 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4107 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4108 4109 if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) { 4110 subvp_in_use = true; 4111 break; 4112 } 4113 } 4114 4115 /* If ODM is enabled and we are adding or removing planes from any ODM 4116 * pipe, we must use the minimal transition. 4117 */ 4118 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4119 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; 4120 4121 if (resource_is_pipe_type(pipe, OTG_MASTER)) { 4122 odm_in_use = resource_get_odm_slice_count(pipe) > 1; 4123 break; 4124 } 4125 } 4126 4127 /* When the OS add a new surface if we have been used all of pipes with odm combine 4128 * and mpc split feature, it need use commit_minimal_transition_state to transition safely. 4129 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need 4130 * call it again. Otherwise return true to skip. 4131 * 4132 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially 4133 * enter/exit MPO when DCN still have enough resources. 4134 */ 4135 if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) 4136 return true; 4137 4138 DC_LOG_DC("%s base = %s state, reason = %s\n", __func__, 4139 dc->current_state == transition_base_context ? "current" : "new", 4140 subvp_in_use ? "Subvp In Use" : 4141 odm_in_use ? "ODM in Use" : 4142 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" : 4143 "Unknown"); 4144 4145 transition_context = create_minimal_transition_state(dc, 4146 transition_base_context, &policy); 4147 if (transition_context) { 4148 ret = dc_commit_state_no_check(dc, transition_context); 4149 release_minimal_transition_state(dc, transition_context, &policy); 4150 } 4151 4152 if (ret != DC_OK) { 4153 /* this should never happen */ 4154 BREAK_TO_DEBUGGER(); 4155 return false; 4156 } 4157 4158 /* force full surface update */ 4159 for (i = 0; i < dc->current_state->stream_count; i++) { 4160 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) { 4161 dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF; 4162 } 4163 } 4164 4165 return true; 4166 } 4167 4168 /** 4169 * update_seamless_boot_flags() - Helper function for updating seamless boot flags 4170 * 4171 * @dc: Current DC state 4172 * @context: New DC state to be programmed 4173 * @surface_count: Number of surfaces that have an updated 4174 * @stream: Corresponding stream to be updated in the current flip 4175 * 4176 * Updating seamless boot flags do not need to be part of the commit sequence. This 4177 * helper function will update the seamless boot flags on each flip (if required) 4178 * outside of the HW commit sequence (fast or slow). 4179 * 4180 * Return: void 4181 */ 4182 static void update_seamless_boot_flags(struct dc *dc, 4183 struct dc_state *context, 4184 int surface_count, 4185 struct dc_stream_state *stream) 4186 { 4187 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { 4188 /* Optimize seamless boot flag keeps clocks and watermarks high until 4189 * first flip. After first flip, optimization is required to lower 4190 * bandwidth. Important to note that it is expected UEFI will 4191 * only light up a single display on POST, therefore we only expect 4192 * one stream with seamless boot flag set. 4193 */ 4194 if (stream->apply_seamless_boot_optimization) { 4195 stream->apply_seamless_boot_optimization = false; 4196 4197 if (get_seamless_boot_stream_count(context) == 0) 4198 dc->optimized_required = true; 4199 } 4200 } 4201 } 4202 4203 static void populate_fast_updates(struct dc_fast_update *fast_update, 4204 struct dc_surface_update *srf_updates, 4205 int surface_count, 4206 struct dc_stream_update *stream_update) 4207 { 4208 int i = 0; 4209 4210 if (stream_update) { 4211 fast_update[0].out_transfer_func = stream_update->out_transfer_func; 4212 fast_update[0].output_csc_transform = stream_update->output_csc_transform; 4213 } 4214 4215 for (i = 0; i < surface_count; i++) { 4216 fast_update[i].flip_addr = srf_updates[i].flip_addr; 4217 fast_update[i].gamma = srf_updates[i].gamma; 4218 fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix; 4219 fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix; 4220 fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor; 4221 } 4222 } 4223 4224 static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count) 4225 { 4226 int i; 4227 4228 if (fast_update[0].out_transfer_func || 4229 fast_update[0].output_csc_transform) 4230 return true; 4231 4232 for (i = 0; i < surface_count; i++) { 4233 if (fast_update[i].flip_addr || 4234 fast_update[i].gamma || 4235 fast_update[i].gamut_remap_matrix || 4236 fast_update[i].input_csc_color_matrix || 4237 fast_update[i].coeff_reduction_factor) 4238 return true; 4239 } 4240 4241 return false; 4242 } 4243 4244 static bool full_update_required(struct dc *dc, 4245 struct dc_surface_update *srf_updates, 4246 int surface_count, 4247 struct dc_stream_update *stream_update, 4248 struct dc_stream_state *stream) 4249 { 4250 4251 int i; 4252 struct dc_stream_status *stream_status; 4253 const struct dc_state *context = dc->current_state; 4254 4255 for (i = 0; i < surface_count; i++) { 4256 if (srf_updates && 4257 (srf_updates[i].plane_info || 4258 srf_updates[i].scaling_info || 4259 (srf_updates[i].hdr_mult.value && 4260 srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) || 4261 srf_updates[i].in_transfer_func || 4262 srf_updates[i].func_shaper || 4263 srf_updates[i].lut3d_func || 4264 srf_updates[i].surface->force_full_update || 4265 (srf_updates[i].flip_addr && 4266 srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || 4267 !is_surface_in_context(context, srf_updates[i].surface))) 4268 return true; 4269 } 4270 4271 if (stream_update && 4272 (((stream_update->src.height != 0 && stream_update->src.width != 0) || 4273 (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 4274 stream_update->integer_scaling_update) || 4275 stream_update->hdr_static_metadata || 4276 stream_update->abm_level || 4277 stream_update->periodic_interrupt || 4278 stream_update->vrr_infopacket || 4279 stream_update->vsc_infopacket || 4280 stream_update->vsp_infopacket || 4281 stream_update->hfvsif_infopacket || 4282 stream_update->vtem_infopacket || 4283 stream_update->adaptive_sync_infopacket || 4284 stream_update->dpms_off || 4285 stream_update->allow_freesync || 4286 stream_update->vrr_active_variable || 4287 stream_update->vrr_active_fixed || 4288 stream_update->gamut_remap || 4289 stream_update->output_color_space || 4290 stream_update->dither_option || 4291 stream_update->wb_update || 4292 stream_update->dsc_config || 4293 stream_update->mst_bw_update || 4294 stream_update->func_shaper || 4295 stream_update->lut3d_func || 4296 stream_update->pending_test_pattern || 4297 stream_update->crtc_timing_adjust)) 4298 return true; 4299 4300 if (stream) { 4301 stream_status = dc_stream_get_status(stream); 4302 if (stream_status == NULL || stream_status->plane_count != surface_count) 4303 return true; 4304 } 4305 if (dc->idle_optimizations_allowed) 4306 return true; 4307 4308 return false; 4309 } 4310 4311 static bool fast_update_only(struct dc *dc, 4312 struct dc_fast_update *fast_update, 4313 struct dc_surface_update *srf_updates, 4314 int surface_count, 4315 struct dc_stream_update *stream_update, 4316 struct dc_stream_state *stream) 4317 { 4318 return fast_updates_exist(fast_update, surface_count) 4319 && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); 4320 } 4321 4322 static bool should_commit_minimal_transition_for_windowed_mpo_odm(struct dc *dc, 4323 struct dc_stream_state *stream, 4324 struct dc_state *context) 4325 { 4326 struct pipe_ctx *cur_pipe, *new_pipe; 4327 bool cur_is_odm_in_use, new_is_odm_in_use; 4328 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream); 4329 struct dc_stream_status *new_stream_status = stream_get_status(context, stream); 4330 4331 if (!dc->debug.enable_single_display_2to1_odm_policy || 4332 !dc->config.enable_windowed_mpo_odm) 4333 /* skip the check if windowed MPO ODM or dynamic ODM is turned 4334 * off. 4335 */ 4336 return false; 4337 4338 if (context == dc->current_state) 4339 /* skip the check for fast update */ 4340 return false; 4341 4342 if (new_stream_status->plane_count != cur_stream_status->plane_count) 4343 /* plane count changed, not a plane scaling update so not the 4344 * case we are looking for 4345 */ 4346 return false; 4347 4348 cur_pipe = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, stream); 4349 new_pipe = resource_get_otg_master_for_stream(&context->res_ctx, stream); 4350 if (!cur_pipe || !new_pipe) 4351 return false; 4352 cur_is_odm_in_use = resource_get_odm_slice_count(cur_pipe) > 1; 4353 new_is_odm_in_use = resource_get_odm_slice_count(new_pipe) > 1; 4354 if (cur_is_odm_in_use == new_is_odm_in_use) 4355 /* ODM state isn't changed, not the case we are looking for */ 4356 return false; 4357 4358 if (dc->hwss.is_pipe_topology_transition_seamless && 4359 dc->hwss.is_pipe_topology_transition_seamless( 4360 dc, dc->current_state, context)) 4361 /* transition can be achieved without the need for committing 4362 * minimal transition state first 4363 */ 4364 return false; 4365 4366 return true; 4367 } 4368 4369 bool dc_update_planes_and_stream(struct dc *dc, 4370 struct dc_surface_update *srf_updates, int surface_count, 4371 struct dc_stream_state *stream, 4372 struct dc_stream_update *stream_update) 4373 { 4374 struct dc_state *context; 4375 enum surface_update_type update_type; 4376 int i; 4377 struct dc_fast_update fast_update[MAX_SURFACES] = {0}; 4378 4379 /* In cases where MPO and split or ODM are used transitions can 4380 * cause underflow. Apply stream configuration with minimal pipe 4381 * split first to avoid unsupported transitions for active pipes. 4382 */ 4383 bool force_minimal_pipe_splitting = 0; 4384 bool is_plane_addition = 0; 4385 bool is_fast_update_only; 4386 4387 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); 4388 is_fast_update_only = fast_update_only(dc, fast_update, srf_updates, 4389 surface_count, stream_update, stream); 4390 force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( 4391 dc, 4392 stream, 4393 srf_updates, 4394 surface_count, 4395 &is_plane_addition); 4396 4397 /* on plane addition, minimal state is the current one */ 4398 if (force_minimal_pipe_splitting && is_plane_addition && 4399 !commit_minimal_transition_state(dc, dc->current_state)) 4400 return false; 4401 4402 if (!update_planes_and_stream_state( 4403 dc, 4404 srf_updates, 4405 surface_count, 4406 stream, 4407 stream_update, 4408 &update_type, 4409 &context)) 4410 return false; 4411 4412 /* on plane removal, minimal state is the new one */ 4413 if (force_minimal_pipe_splitting && !is_plane_addition) { 4414 /* Since all phantom pipes are removed in full validation, 4415 * we have to save and restore the subvp/mall config when 4416 * we do a minimal transition since the flags marking the 4417 * pipe as subvp/phantom will be cleared (dc copy constructor 4418 * creates a shallow copy). 4419 */ 4420 if (!commit_minimal_transition_state(dc, context)) { 4421 dc_state_release(context); 4422 return false; 4423 } 4424 update_type = UPDATE_TYPE_FULL; 4425 } 4426 4427 /* when windowed MPO ODM is supported, we need to handle a special case 4428 * where we can transition between ODM combine and MPC combine due to 4429 * plane scaling update. This transition will require us to commit 4430 * minimal transition state. The condition to trigger this update can't 4431 * be predicted by could_mpcc_tree_change_for_active_pipes because we 4432 * can only determine it after DML validation. Therefore we can't rely 4433 * on the existing commit minimal transition state sequence. Instead 4434 * we have to add additional handling here to handle this transition 4435 * with its own special sequence. 4436 */ 4437 if (should_commit_minimal_transition_for_windowed_mpo_odm(dc, stream, context)) 4438 commit_minimal_transition_state_for_windowed_mpo_odm(dc, 4439 context, stream); 4440 update_seamless_boot_flags(dc, context, surface_count, stream); 4441 if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) { 4442 commit_planes_for_stream_fast(dc, 4443 srf_updates, 4444 surface_count, 4445 stream, 4446 stream_update, 4447 update_type, 4448 context); 4449 } else { 4450 if (!stream_update && 4451 dc->hwss.is_pipe_topology_transition_seamless && 4452 !dc->hwss.is_pipe_topology_transition_seamless( 4453 dc, dc->current_state, context)) { 4454 DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n"); 4455 BREAK_TO_DEBUGGER(); 4456 } 4457 commit_planes_for_stream( 4458 dc, 4459 srf_updates, 4460 surface_count, 4461 stream, 4462 stream_update, 4463 update_type, 4464 context); 4465 } 4466 4467 if (dc->current_state != context) { 4468 4469 /* Since memory free requires elevated IRQL, an interrupt 4470 * request is generated by mem free. If this happens 4471 * between freeing and reassigning the context, our vsync 4472 * interrupt will call into dc and cause a memory 4473 * corruption BSOD. Hence, we first reassign the context, 4474 * then free the old context. 4475 */ 4476 4477 struct dc_state *old = dc->current_state; 4478 4479 dc->current_state = context; 4480 dc_state_release(old); 4481 4482 // clear any forced full updates 4483 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4484 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 4485 4486 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 4487 pipe_ctx->plane_state->force_full_update = false; 4488 } 4489 } 4490 return true; 4491 } 4492 4493 void dc_commit_updates_for_stream(struct dc *dc, 4494 struct dc_surface_update *srf_updates, 4495 int surface_count, 4496 struct dc_stream_state *stream, 4497 struct dc_stream_update *stream_update, 4498 struct dc_state *state) 4499 { 4500 const struct dc_stream_status *stream_status; 4501 enum surface_update_type update_type; 4502 struct dc_state *context; 4503 struct dc_context *dc_ctx = dc->ctx; 4504 int i, j; 4505 struct dc_fast_update fast_update[MAX_SURFACES] = {0}; 4506 4507 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); 4508 stream_status = dc_stream_get_status(stream); 4509 context = dc->current_state; 4510 4511 update_type = dc_check_update_surfaces_for_stream( 4512 dc, srf_updates, surface_count, stream_update, stream_status); 4513 4514 /* TODO: Since change commit sequence can have a huge impact, 4515 * we decided to only enable it for DCN3x. However, as soon as 4516 * we get more confident about this change we'll need to enable 4517 * the new sequence for all ASICs. 4518 */ 4519 if (dc->ctx->dce_version >= DCN_VERSION_3_2) { 4520 /* 4521 * Previous frame finished and HW is ready for optimization. 4522 */ 4523 if (update_type == UPDATE_TYPE_FAST) 4524 dc_post_update_surfaces_to_stream(dc); 4525 4526 dc_update_planes_and_stream(dc, srf_updates, 4527 surface_count, stream, 4528 stream_update); 4529 return; 4530 } 4531 4532 if (update_type >= update_surface_trace_level) 4533 update_surface_trace(dc, srf_updates, surface_count); 4534 4535 4536 if (update_type >= UPDATE_TYPE_FULL) { 4537 4538 /* initialize scratch memory for building context */ 4539 context = dc_state_create_copy(state); 4540 if (context == NULL) { 4541 DC_ERROR("Failed to allocate new validate context!\n"); 4542 return; 4543 } 4544 4545 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4546 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 4547 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4548 4549 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) 4550 new_pipe->plane_state->force_full_update = true; 4551 } 4552 } else if (update_type == UPDATE_TYPE_FAST) { 4553 /* 4554 * Previous frame finished and HW is ready for optimization. 4555 */ 4556 dc_post_update_surfaces_to_stream(dc); 4557 } 4558 4559 4560 for (i = 0; i < surface_count; i++) { 4561 struct dc_plane_state *surface = srf_updates[i].surface; 4562 4563 copy_surface_update_to_plane(surface, &srf_updates[i]); 4564 4565 if (update_type >= UPDATE_TYPE_MED) { 4566 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4567 struct pipe_ctx *pipe_ctx = 4568 &context->res_ctx.pipe_ctx[j]; 4569 4570 if (pipe_ctx->plane_state != surface) 4571 continue; 4572 4573 resource_build_scaling_params(pipe_ctx); 4574 } 4575 } 4576 } 4577 4578 copy_stream_update_to_stream(dc, context, stream, stream_update); 4579 4580 if (update_type >= UPDATE_TYPE_FULL) { 4581 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 4582 DC_ERROR("Mode validation failed for stream update!\n"); 4583 dc_state_release(context); 4584 return; 4585 } 4586 } 4587 4588 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); 4589 4590 update_seamless_boot_flags(dc, context, surface_count, stream); 4591 if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && 4592 !dc->debug.enable_legacy_fast_update) { 4593 commit_planes_for_stream_fast(dc, 4594 srf_updates, 4595 surface_count, 4596 stream, 4597 stream_update, 4598 update_type, 4599 context); 4600 } else { 4601 commit_planes_for_stream( 4602 dc, 4603 srf_updates, 4604 surface_count, 4605 stream, 4606 stream_update, 4607 update_type, 4608 context); 4609 } 4610 /*update current_State*/ 4611 if (dc->current_state != context) { 4612 4613 struct dc_state *old = dc->current_state; 4614 4615 dc->current_state = context; 4616 dc_state_release(old); 4617 4618 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4619 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 4620 4621 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 4622 pipe_ctx->plane_state->force_full_update = false; 4623 } 4624 } 4625 4626 /* Legacy optimization path for DCE. */ 4627 if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) { 4628 dc_post_update_surfaces_to_stream(dc); 4629 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 4630 } 4631 4632 return; 4633 4634 } 4635 4636 uint8_t dc_get_current_stream_count(struct dc *dc) 4637 { 4638 return dc->current_state->stream_count; 4639 } 4640 4641 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i) 4642 { 4643 if (i < dc->current_state->stream_count) 4644 return dc->current_state->streams[i]; 4645 return NULL; 4646 } 4647 4648 enum dc_irq_source dc_interrupt_to_irq_source( 4649 struct dc *dc, 4650 uint32_t src_id, 4651 uint32_t ext_id) 4652 { 4653 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); 4654 } 4655 4656 /* 4657 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source 4658 */ 4659 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) 4660 { 4661 4662 if (dc == NULL) 4663 return false; 4664 4665 return dal_irq_service_set(dc->res_pool->irqs, src, enable); 4666 } 4667 4668 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) 4669 { 4670 dal_irq_service_ack(dc->res_pool->irqs, src); 4671 } 4672 4673 void dc_power_down_on_boot(struct dc *dc) 4674 { 4675 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && 4676 dc->hwss.power_down_on_boot) 4677 dc->hwss.power_down_on_boot(dc); 4678 } 4679 4680 void dc_set_power_state( 4681 struct dc *dc, 4682 enum dc_acpi_cm_power_state power_state) 4683 { 4684 if (!dc->current_state) 4685 return; 4686 4687 switch (power_state) { 4688 case DC_ACPI_CM_POWER_STATE_D0: 4689 dc_state_construct(dc, dc->current_state); 4690 4691 dc_z10_restore(dc); 4692 4693 dc->hwss.init_hw(dc); 4694 4695 if (dc->hwss.init_sys_ctx != NULL && 4696 dc->vm_pa_config.valid) { 4697 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); 4698 } 4699 4700 break; 4701 default: 4702 ASSERT(dc->current_state->stream_count == 0); 4703 4704 dc_state_destruct(dc->current_state); 4705 4706 break; 4707 } 4708 } 4709 4710 void dc_resume(struct dc *dc) 4711 { 4712 uint32_t i; 4713 4714 for (i = 0; i < dc->link_count; i++) 4715 dc->link_srv->resume(dc->links[i]); 4716 } 4717 4718 bool dc_is_dmcu_initialized(struct dc *dc) 4719 { 4720 struct dmcu *dmcu = dc->res_pool->dmcu; 4721 4722 if (dmcu) 4723 return dmcu->funcs->is_dmcu_initialized(dmcu); 4724 return false; 4725 } 4726 4727 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info) 4728 { 4729 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz; 4730 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz; 4731 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz; 4732 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz; 4733 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz; 4734 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz; 4735 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz; 4736 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz; 4737 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz; 4738 } 4739 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping) 4740 { 4741 if (dc->hwss.set_clock) 4742 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping); 4743 return DC_ERROR_UNEXPECTED; 4744 } 4745 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg) 4746 { 4747 if (dc->hwss.get_clock) 4748 dc->hwss.get_clock(dc, clock_type, clock_cfg); 4749 } 4750 4751 /* enable/disable eDP PSR without specify stream for eDP */ 4752 bool dc_set_psr_allow_active(struct dc *dc, bool enable) 4753 { 4754 int i; 4755 bool allow_active; 4756 4757 for (i = 0; i < dc->current_state->stream_count ; i++) { 4758 struct dc_link *link; 4759 struct dc_stream_state *stream = dc->current_state->streams[i]; 4760 4761 link = stream->link; 4762 if (!link) 4763 continue; 4764 4765 if (link->psr_settings.psr_feature_enabled) { 4766 if (enable && !link->psr_settings.psr_allow_active) { 4767 allow_active = true; 4768 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL)) 4769 return false; 4770 } else if (!enable && link->psr_settings.psr_allow_active) { 4771 allow_active = false; 4772 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL)) 4773 return false; 4774 } 4775 } 4776 } 4777 4778 return true; 4779 } 4780 4781 /* enable/disable eDP Replay without specify stream for eDP */ 4782 bool dc_set_replay_allow_active(struct dc *dc, bool active) 4783 { 4784 int i; 4785 bool allow_active; 4786 4787 for (i = 0; i < dc->current_state->stream_count; i++) { 4788 struct dc_link *link; 4789 struct dc_stream_state *stream = dc->current_state->streams[i]; 4790 4791 link = stream->link; 4792 if (!link) 4793 continue; 4794 4795 if (link->replay_settings.replay_feature_enabled) { 4796 if (active && !link->replay_settings.replay_allow_active) { 4797 allow_active = true; 4798 if (!dc_link_set_replay_allow_active(link, &allow_active, 4799 false, false, NULL)) 4800 return false; 4801 } else if (!active && link->replay_settings.replay_allow_active) { 4802 allow_active = false; 4803 if (!dc_link_set_replay_allow_active(link, &allow_active, 4804 true, false, NULL)) 4805 return false; 4806 } 4807 } 4808 } 4809 4810 return true; 4811 } 4812 4813 void dc_allow_idle_optimizations(struct dc *dc, bool allow) 4814 { 4815 if (dc->debug.disable_idle_power_optimizations) 4816 return; 4817 4818 if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) 4819 return; 4820 4821 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present) 4822 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) 4823 return; 4824 4825 if (allow == dc->idle_optimizations_allowed) 4826 return; 4827 4828 if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow)) 4829 dc->idle_optimizations_allowed = allow; 4830 } 4831 4832 bool dc_dmub_is_ips_idle_state(struct dc *dc) 4833 { 4834 uint32_t idle_state = 0; 4835 4836 if (dc->debug.disable_idle_power_optimizations) 4837 return false; 4838 4839 if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) 4840 return false; 4841 4842 if (dc->hwss.get_idle_state) 4843 idle_state = dc->hwss.get_idle_state(dc); 4844 4845 if (!(idle_state & DMUB_IPS1_ALLOW_MASK) || 4846 !(idle_state & DMUB_IPS2_ALLOW_MASK)) 4847 return true; 4848 4849 return false; 4850 } 4851 4852 /* set min and max memory clock to lowest and highest DPM level, respectively */ 4853 void dc_unlock_memory_clock_frequency(struct dc *dc) 4854 { 4855 if (dc->clk_mgr->funcs->set_hard_min_memclk) 4856 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false); 4857 4858 if (dc->clk_mgr->funcs->set_hard_max_memclk) 4859 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 4860 } 4861 4862 /* set min memory clock to the min required for current mode, max to maxDPM */ 4863 void dc_lock_memory_clock_frequency(struct dc *dc) 4864 { 4865 if (dc->clk_mgr->funcs->get_memclk_states_from_smu) 4866 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr); 4867 4868 if (dc->clk_mgr->funcs->set_hard_min_memclk) 4869 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true); 4870 4871 if (dc->clk_mgr->funcs->set_hard_max_memclk) 4872 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 4873 } 4874 4875 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz) 4876 { 4877 struct dc_state *context = dc->current_state; 4878 struct hubp *hubp; 4879 struct pipe_ctx *pipe; 4880 int i; 4881 4882 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4883 pipe = &context->res_ctx.pipe_ctx[i]; 4884 4885 if (pipe->stream != NULL) { 4886 dc->hwss.disable_pixel_data(dc, pipe, true); 4887 4888 // wait for double buffer 4889 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 4890 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); 4891 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 4892 4893 hubp = pipe->plane_res.hubp; 4894 hubp->funcs->set_blank_regs(hubp, true); 4895 } 4896 } 4897 4898 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz); 4899 dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz); 4900 4901 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4902 pipe = &context->res_ctx.pipe_ctx[i]; 4903 4904 if (pipe->stream != NULL) { 4905 dc->hwss.disable_pixel_data(dc, pipe, false); 4906 4907 hubp = pipe->plane_res.hubp; 4908 hubp->funcs->set_blank_regs(hubp, false); 4909 } 4910 } 4911 } 4912 4913 4914 /** 4915 * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode 4916 * @dc: pointer to dc of the dm calling this 4917 * @enable: True = transition to DC mode, false = transition back to AC mode 4918 * 4919 * Some SoCs define additional clock limits when in DC mode, DM should 4920 * invoke this function when the platform undergoes a power source transition 4921 * so DC can apply/unapply the limit. This interface may be disruptive to 4922 * the onscreen content. 4923 * 4924 * Context: Triggered by OS through DM interface, or manually by escape calls. 4925 * Need to hold a dclock when doing so. 4926 * 4927 * Return: none (void function) 4928 * 4929 */ 4930 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) 4931 { 4932 unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i; 4933 bool p_state_change_support; 4934 4935 if (!dc->config.dc_mode_clk_limit_support) 4936 return; 4937 4938 softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk; 4939 for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) { 4940 if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM) 4941 maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz; 4942 } 4943 funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000; 4944 p_state_change_support = dc->clk_mgr->clks.p_state_change_support; 4945 4946 if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) { 4947 if (p_state_change_support) { 4948 if (funcMin <= softMax) 4949 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax); 4950 // else: No-Op 4951 } else { 4952 if (funcMin <= softMax) 4953 blank_and_force_memclk(dc, true, softMax); 4954 // else: No-Op 4955 } 4956 } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) { 4957 if (p_state_change_support) { 4958 if (funcMin <= softMax) 4959 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM); 4960 // else: No-Op 4961 } else { 4962 if (funcMin <= softMax) 4963 blank_and_force_memclk(dc, true, maxDPM); 4964 // else: No-Op 4965 } 4966 } 4967 dc->clk_mgr->dc_mode_softmax_enabled = enable; 4968 } 4969 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, 4970 struct dc_cursor_attributes *cursor_attr) 4971 { 4972 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr)) 4973 return true; 4974 return false; 4975 } 4976 4977 /* cleanup on driver unload */ 4978 void dc_hardware_release(struct dc *dc) 4979 { 4980 dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc); 4981 4982 if (dc->hwss.hardware_release) 4983 dc->hwss.hardware_release(dc); 4984 } 4985 4986 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc) 4987 { 4988 if (dc->current_state) 4989 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true; 4990 } 4991 4992 /** 4993 * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification 4994 * 4995 * @dc: [in] dc structure 4996 * 4997 * Checks whether DMUB FW supports outbox notifications, if supported DM 4998 * should register outbox interrupt prior to actually enabling interrupts 4999 * via dc_enable_dmub_outbox 5000 * 5001 * Return: 5002 * True if DMUB FW supports outbox notifications, False otherwise 5003 */ 5004 bool dc_is_dmub_outbox_supported(struct dc *dc) 5005 { 5006 switch (dc->ctx->asic_id.chip_family) { 5007 5008 case FAMILY_YELLOW_CARP: 5009 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */ 5010 if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && 5011 !dc->debug.dpia_debug.bits.disable_dpia) 5012 return true; 5013 break; 5014 5015 case AMDGPU_FAMILY_GC_11_0_1: 5016 case AMDGPU_FAMILY_GC_11_5_0: 5017 if (!dc->debug.dpia_debug.bits.disable_dpia) 5018 return true; 5019 break; 5020 5021 default: 5022 break; 5023 } 5024 5025 /* dmub aux needs dmub notifications to be enabled */ 5026 return dc->debug.enable_dmub_aux_for_legacy_ddc; 5027 5028 } 5029 5030 /** 5031 * dc_enable_dmub_notifications - Check if dmub fw supports outbox 5032 * 5033 * @dc: [in] dc structure 5034 * 5035 * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox 5036 * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This 5037 * API shall be removed after switching. 5038 * 5039 * Return: 5040 * True if DMUB FW supports outbox notifications, False otherwise 5041 */ 5042 bool dc_enable_dmub_notifications(struct dc *dc) 5043 { 5044 return dc_is_dmub_outbox_supported(dc); 5045 } 5046 5047 /** 5048 * dc_enable_dmub_outbox - Enables DMUB unsolicited notification 5049 * 5050 * @dc: [in] dc structure 5051 * 5052 * Enables DMUB unsolicited notifications to x86 via outbox. 5053 */ 5054 void dc_enable_dmub_outbox(struct dc *dc) 5055 { 5056 struct dc_context *dc_ctx = dc->ctx; 5057 5058 dmub_enable_outbox_notification(dc_ctx->dmub_srv); 5059 DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__); 5060 } 5061 5062 /** 5063 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message 5064 * Sets port index appropriately for legacy DDC 5065 * @dc: dc structure 5066 * @link_index: link index 5067 * @payload: aux payload 5068 * 5069 * Returns: True if successful, False if failure 5070 */ 5071 bool dc_process_dmub_aux_transfer_async(struct dc *dc, 5072 uint32_t link_index, 5073 struct aux_payload *payload) 5074 { 5075 uint8_t action; 5076 union dmub_rb_cmd cmd = {0}; 5077 5078 ASSERT(payload->length <= 16); 5079 5080 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS; 5081 cmd.dp_aux_access.header.payload_bytes = 0; 5082 /* For dpia, ddc_pin is set to NULL */ 5083 if (!dc->links[link_index]->ddc->ddc_pin) 5084 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA; 5085 else 5086 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC; 5087 5088 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst; 5089 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0; 5090 cmd.dp_aux_access.aux_control.timeout = 0; 5091 cmd.dp_aux_access.aux_control.dpaux.address = payload->address; 5092 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux; 5093 cmd.dp_aux_access.aux_control.dpaux.length = payload->length; 5094 5095 /* set aux action */ 5096 if (payload->i2c_over_aux) { 5097 if (payload->write) { 5098 if (payload->mot) 5099 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT; 5100 else 5101 action = DP_AUX_REQ_ACTION_I2C_WRITE; 5102 } else { 5103 if (payload->mot) 5104 action = DP_AUX_REQ_ACTION_I2C_READ_MOT; 5105 else 5106 action = DP_AUX_REQ_ACTION_I2C_READ; 5107 } 5108 } else { 5109 if (payload->write) 5110 action = DP_AUX_REQ_ACTION_DPCD_WRITE; 5111 else 5112 action = DP_AUX_REQ_ACTION_DPCD_READ; 5113 } 5114 5115 cmd.dp_aux_access.aux_control.dpaux.action = action; 5116 5117 if (payload->length && payload->write) { 5118 memcpy(cmd.dp_aux_access.aux_control.dpaux.data, 5119 payload->data, 5120 payload->length 5121 ); 5122 } 5123 5124 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 5125 5126 return true; 5127 } 5128 5129 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, 5130 uint8_t dpia_port_index) 5131 { 5132 uint8_t index, link_index = 0xFF; 5133 5134 for (index = 0; index < dc->link_count; index++) { 5135 /* ddc_hw_inst has dpia port index for dpia links 5136 * and ddc instance for legacy links 5137 */ 5138 if (!dc->links[index]->ddc->ddc_pin) { 5139 if (dc->links[index]->ddc_hw_inst == dpia_port_index) { 5140 link_index = index; 5141 break; 5142 } 5143 } 5144 } 5145 ASSERT(link_index != 0xFF); 5146 return link_index; 5147 } 5148 5149 /** 5150 * dc_process_dmub_set_config_async - Submits set_config command 5151 * 5152 * @dc: [in] dc structure 5153 * @link_index: [in] link_index: link index 5154 * @payload: [in] aux payload 5155 * @notify: [out] set_config immediate reply 5156 * 5157 * Submits set_config command to dmub via inbox message. 5158 * 5159 * Return: 5160 * True if successful, False if failure 5161 */ 5162 bool dc_process_dmub_set_config_async(struct dc *dc, 5163 uint32_t link_index, 5164 struct set_config_cmd_payload *payload, 5165 struct dmub_notification *notify) 5166 { 5167 union dmub_rb_cmd cmd = {0}; 5168 bool is_cmd_complete = true; 5169 5170 /* prepare SET_CONFIG command */ 5171 cmd.set_config_access.header.type = DMUB_CMD__DPIA; 5172 cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS; 5173 5174 cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst; 5175 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type; 5176 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data; 5177 5178 if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) { 5179 /* command is not processed by dmub */ 5180 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR; 5181 return is_cmd_complete; 5182 } 5183 5184 /* command processed by dmub, if ret_status is 1, it is completed instantly */ 5185 if (cmd.set_config_access.header.ret_status == 1) 5186 notify->sc_status = cmd.set_config_access.set_config_control.immed_status; 5187 else 5188 /* cmd pending, will receive notification via outbox */ 5189 is_cmd_complete = false; 5190 5191 return is_cmd_complete; 5192 } 5193 5194 /** 5195 * dc_process_dmub_set_mst_slots - Submits MST solt allocation 5196 * 5197 * @dc: [in] dc structure 5198 * @link_index: [in] link index 5199 * @mst_alloc_slots: [in] mst slots to be allotted 5200 * @mst_slots_in_use: [out] mst slots in use returned in failure case 5201 * 5202 * Submits mst slot allocation command to dmub via inbox message 5203 * 5204 * Return: 5205 * DC_OK if successful, DC_ERROR if failure 5206 */ 5207 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, 5208 uint32_t link_index, 5209 uint8_t mst_alloc_slots, 5210 uint8_t *mst_slots_in_use) 5211 { 5212 union dmub_rb_cmd cmd = {0}; 5213 5214 /* prepare MST_ALLOC_SLOTS command */ 5215 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA; 5216 cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS; 5217 5218 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; 5219 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; 5220 5221 if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) 5222 /* command is not processed by dmub */ 5223 return DC_ERROR_UNEXPECTED; 5224 5225 /* command processed by dmub, if ret_status is 1 */ 5226 if (cmd.set_config_access.header.ret_status != 1) 5227 /* command processing error */ 5228 return DC_ERROR_UNEXPECTED; 5229 5230 /* command processed and we have a status of 2, mst not enabled in dpia */ 5231 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2) 5232 return DC_FAIL_UNSUPPORTED_1; 5233 5234 /* previously configured mst alloc and used slots did not match */ 5235 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) { 5236 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use; 5237 return DC_NOT_SUPPORTED; 5238 } 5239 5240 return DC_OK; 5241 } 5242 5243 /** 5244 * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption 5245 * 5246 * @dc: [in] dc structure 5247 * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable 5248 * 5249 * Submits dpia hpd int enable command to dmub via inbox message 5250 */ 5251 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, 5252 uint32_t hpd_int_enable) 5253 { 5254 union dmub_rb_cmd cmd = {0}; 5255 5256 cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE; 5257 cmd.dpia_hpd_int_enable.enable = hpd_int_enable; 5258 5259 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 5260 5261 DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable); 5262 } 5263 5264 /** 5265 * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging 5266 * 5267 * @dc: [in] dc structure 5268 * 5269 * 5270 */ 5271 void dc_print_dmub_diagnostic_data(const struct dc *dc) 5272 { 5273 dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv); 5274 } 5275 5276 /** 5277 * dc_disable_accelerated_mode - disable accelerated mode 5278 * @dc: dc structure 5279 */ 5280 void dc_disable_accelerated_mode(struct dc *dc) 5281 { 5282 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0); 5283 } 5284 5285 5286 /** 5287 * dc_notify_vsync_int_state - notifies vsync enable/disable state 5288 * @dc: dc structure 5289 * @stream: stream where vsync int state changed 5290 * @enable: whether vsync is enabled or disabled 5291 * 5292 * Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM 5293 * interrupts after steady state is reached. 5294 */ 5295 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable) 5296 { 5297 int i; 5298 int edp_num; 5299 struct pipe_ctx *pipe = NULL; 5300 struct dc_link *link = stream->sink->link; 5301 struct dc_link *edp_links[MAX_NUM_EDP]; 5302 5303 5304 if (link->psr_settings.psr_feature_enabled) 5305 return; 5306 5307 if (link->replay_settings.replay_feature_enabled) 5308 return; 5309 5310 /*find primary pipe associated with stream*/ 5311 for (i = 0; i < MAX_PIPES; i++) { 5312 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 5313 5314 if (pipe->stream == stream && pipe->stream_res.tg) 5315 break; 5316 } 5317 5318 if (i == MAX_PIPES) { 5319 ASSERT(0); 5320 return; 5321 } 5322 5323 dc_get_edp_links(dc, edp_links, &edp_num); 5324 5325 /* Determine panel inst */ 5326 for (i = 0; i < edp_num; i++) { 5327 if (edp_links[i] == link) 5328 break; 5329 } 5330 5331 if (i == edp_num) { 5332 return; 5333 } 5334 5335 if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause) 5336 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); 5337 } 5338 5339 /***************************************************************************** 5340 * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause 5341 * ABM 5342 * @dc: dc structure 5343 * @stream: stream where vsync int state changed 5344 * @pData: abm hw states 5345 * 5346 ****************************************************************************/ 5347 bool dc_abm_save_restore( 5348 struct dc *dc, 5349 struct dc_stream_state *stream, 5350 struct abm_save_restore *pData) 5351 { 5352 int i; 5353 int edp_num; 5354 struct pipe_ctx *pipe = NULL; 5355 struct dc_link *link = stream->sink->link; 5356 struct dc_link *edp_links[MAX_NUM_EDP]; 5357 5358 if (link->replay_settings.replay_feature_enabled) 5359 return false; 5360 5361 /*find primary pipe associated with stream*/ 5362 for (i = 0; i < MAX_PIPES; i++) { 5363 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 5364 5365 if (pipe->stream == stream && pipe->stream_res.tg) 5366 break; 5367 } 5368 5369 if (i == MAX_PIPES) { 5370 ASSERT(0); 5371 return false; 5372 } 5373 5374 dc_get_edp_links(dc, edp_links, &edp_num); 5375 5376 /* Determine panel inst */ 5377 for (i = 0; i < edp_num; i++) 5378 if (edp_links[i] == link) 5379 break; 5380 5381 if (i == edp_num) 5382 return false; 5383 5384 if (pipe->stream_res.abm && 5385 pipe->stream_res.abm->funcs->save_restore) 5386 return pipe->stream_res.abm->funcs->save_restore( 5387 pipe->stream_res.abm, 5388 i, 5389 pData); 5390 return false; 5391 } 5392 5393 void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties) 5394 { 5395 unsigned int i; 5396 bool subvp_sw_cursor_req = false; 5397 5398 for (i = 0; i < dc->current_state->stream_count; i++) { 5399 if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i])) { 5400 subvp_sw_cursor_req = true; 5401 break; 5402 } 5403 } 5404 properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size; 5405 } 5406 5407 /** 5408 * dc_set_edp_power() - DM controls eDP power to be ON/OFF 5409 * 5410 * Called when DM wants to power on/off eDP. 5411 * Only work on links with flag skip_implict_edp_power_control is set. 5412 * 5413 * @dc: Current DC state 5414 * @edp_link: a link with eDP connector signal type 5415 * @powerOn: power on/off eDP 5416 * 5417 * Return: void 5418 */ 5419 void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link, 5420 bool powerOn) 5421 { 5422 if (edp_link->connector_signal != SIGNAL_TYPE_EDP) 5423 return; 5424 5425 if (edp_link->skip_implict_edp_power_control == false) 5426 return; 5427 5428 edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn); 5429 } 5430 5431 /* 5432 ***************************************************************************** 5433 * dc_get_power_profile_for_dc_state() - extracts power profile from dc state 5434 * 5435 * Called when DM wants to make power policy decisions based on dc_state 5436 * 5437 ***************************************************************************** 5438 */ 5439 struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context) 5440 { 5441 struct dc_power_profile profile = { 0 }; 5442 5443 profile.power_level += !context->bw_ctx.bw.dcn.clk.p_state_change_support; 5444 5445 return profile; 5446 } 5447 5448