1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 */ 24 25 #include "dm_services.h" 26 27 #include "dc.h" 28 29 #include "core_status.h" 30 #include "core_types.h" 31 #include "hw_sequencer.h" 32 #include "dce/dce_hwseq.h" 33 34 #include "resource.h" 35 36 #include "clk_mgr.h" 37 #include "clock_source.h" 38 #include "dc_bios_types.h" 39 40 #include "bios_parser_interface.h" 41 #include "bios/bios_parser_helper.h" 42 #include "include/irq_service_interface.h" 43 #include "transform.h" 44 #include "dmcu.h" 45 #include "dpp.h" 46 #include "timing_generator.h" 47 #include "abm.h" 48 #include "virtual/virtual_link_encoder.h" 49 #include "hubp.h" 50 51 #include "link_hwss.h" 52 #include "link_encoder.h" 53 #include "link_enc_cfg.h" 54 55 #include "dc_link.h" 56 #include "dc_link_ddc.h" 57 #include "dm_helpers.h" 58 #include "mem_input.h" 59 60 #include "dc_link_dp.h" 61 #include "dc_dmub_srv.h" 62 63 #include "dsc.h" 64 65 #include "vm_helper.h" 66 67 #include "dce/dce_i2c.h" 68 69 #include "dmub/dmub_srv.h" 70 71 #include "i2caux_interface.h" 72 73 #include "dce/dmub_psr.h" 74 75 #include "dce/dmub_hw_lock_mgr.h" 76 77 #include "dc_trace.h" 78 79 #include "dce/dmub_outbox.h" 80 81 #define CTX \ 82 dc->ctx 83 84 #define DC_LOGGER \ 85 dc->ctx->logger 86 87 static const char DC_BUILD_ID[] = "production-build"; 88 89 /** 90 * DOC: Overview 91 * 92 * DC is the OS-agnostic component of the amdgpu DC driver. 93 * 94 * DC maintains and validates a set of structs representing the state of the 95 * driver and writes that state to AMD hardware 96 * 97 * Main DC HW structs: 98 * 99 * struct dc - The central struct. One per driver. Created on driver load, 100 * destroyed on driver unload. 101 * 102 * struct dc_context - One per driver. 103 * Used as a backpointer by most other structs in dc. 104 * 105 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP 106 * plugpoints). Created on driver load, destroyed on driver unload. 107 * 108 * struct dc_sink - One per display. Created on boot or hotplug. 109 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink 110 * (the display directly attached). It may also have one or more remote 111 * sinks (in the Multi-Stream Transport case) 112 * 113 * struct resource_pool - One per driver. Represents the hw blocks not in the 114 * main pipeline. Not directly accessible by dm. 115 * 116 * Main dc state structs: 117 * 118 * These structs can be created and destroyed as needed. There is a full set of 119 * these structs in dc->current_state representing the currently programmed state. 120 * 121 * struct dc_state - The global DC state to track global state information, 122 * such as bandwidth values. 123 * 124 * struct dc_stream_state - Represents the hw configuration for the pipeline from 125 * a framebuffer to a display. Maps one-to-one with dc_sink. 126 * 127 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one, 128 * and may have more in the Multi-Plane Overlay case. 129 * 130 * struct resource_context - Represents the programmable state of everything in 131 * the resource_pool. Not directly accessible by dm. 132 * 133 * struct pipe_ctx - A member of struct resource_context. Represents the 134 * internal hardware pipeline components. Each dc_plane_state has either 135 * one or two (in the pipe-split case). 136 */ 137 138 /******************************************************************************* 139 * Private functions 140 ******************************************************************************/ 141 142 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new) 143 { 144 if (new > *original) 145 *original = new; 146 } 147 148 static void destroy_links(struct dc *dc) 149 { 150 uint32_t i; 151 152 for (i = 0; i < dc->link_count; i++) { 153 if (NULL != dc->links[i]) 154 link_destroy(&dc->links[i]); 155 } 156 } 157 158 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links) 159 { 160 int i; 161 uint32_t count = 0; 162 163 for (i = 0; i < num_links; i++) { 164 if (links[i]->connector_signal == SIGNAL_TYPE_EDP || 165 links[i]->is_internal_display) 166 count++; 167 } 168 169 return count; 170 } 171 172 static int get_seamless_boot_stream_count(struct dc_state *ctx) 173 { 174 uint8_t i; 175 uint8_t seamless_boot_stream_count = 0; 176 177 for (i = 0; i < ctx->stream_count; i++) 178 if (ctx->streams[i]->apply_seamless_boot_optimization) 179 seamless_boot_stream_count++; 180 181 return seamless_boot_stream_count; 182 } 183 184 static bool create_links( 185 struct dc *dc, 186 uint32_t num_virtual_links) 187 { 188 int i; 189 int connectors_num; 190 struct dc_bios *bios = dc->ctx->dc_bios; 191 192 dc->link_count = 0; 193 194 connectors_num = bios->funcs->get_connectors_number(bios); 195 196 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num); 197 198 if (connectors_num > ENUM_ID_COUNT) { 199 dm_error( 200 "DC: Number of connectors %d exceeds maximum of %d!\n", 201 connectors_num, 202 ENUM_ID_COUNT); 203 return false; 204 } 205 206 dm_output_to_console( 207 "DC: %s: connectors_num: physical:%d, virtual:%d\n", 208 __func__, 209 connectors_num, 210 num_virtual_links); 211 212 for (i = 0; i < connectors_num; i++) { 213 struct link_init_data link_init_params = {0}; 214 struct dc_link *link; 215 216 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count); 217 218 link_init_params.ctx = dc->ctx; 219 /* next BIOS object table connector */ 220 link_init_params.connector_index = i; 221 link_init_params.link_index = dc->link_count; 222 link_init_params.dc = dc; 223 link = link_create(&link_init_params); 224 225 if (link) { 226 dc->links[dc->link_count] = link; 227 link->dc = dc; 228 ++dc->link_count; 229 } 230 } 231 232 DC_LOG_DC("BIOS object table - end"); 233 234 /* Create a link for each usb4 dpia port */ 235 for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) { 236 struct link_init_data link_init_params = {0}; 237 struct dc_link *link; 238 239 link_init_params.ctx = dc->ctx; 240 link_init_params.connector_index = i; 241 link_init_params.link_index = dc->link_count; 242 link_init_params.dc = dc; 243 link_init_params.is_dpia_link = true; 244 245 link = link_create(&link_init_params); 246 if (link) { 247 dc->links[dc->link_count] = link; 248 link->dc = dc; 249 ++dc->link_count; 250 } 251 } 252 253 for (i = 0; i < num_virtual_links; i++) { 254 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL); 255 struct encoder_init_data enc_init = {0}; 256 257 if (link == NULL) { 258 BREAK_TO_DEBUGGER(); 259 goto failed_alloc; 260 } 261 262 link->link_index = dc->link_count; 263 dc->links[dc->link_count] = link; 264 dc->link_count++; 265 266 link->ctx = dc->ctx; 267 link->dc = dc; 268 link->connector_signal = SIGNAL_TYPE_VIRTUAL; 269 link->link_id.type = OBJECT_TYPE_CONNECTOR; 270 link->link_id.id = CONNECTOR_ID_VIRTUAL; 271 link->link_id.enum_id = ENUM_ID_1; 272 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); 273 274 if (!link->link_enc) { 275 BREAK_TO_DEBUGGER(); 276 goto failed_alloc; 277 } 278 279 link->link_status.dpcd_caps = &link->dpcd_caps; 280 281 enc_init.ctx = dc->ctx; 282 enc_init.channel = CHANNEL_ID_UNKNOWN; 283 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN; 284 enc_init.transmitter = TRANSMITTER_UNKNOWN; 285 enc_init.connector = link->link_id; 286 enc_init.encoder.type = OBJECT_TYPE_ENCODER; 287 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL; 288 enc_init.encoder.enum_id = ENUM_ID_1; 289 virtual_link_encoder_construct(link->link_enc, &enc_init); 290 } 291 292 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count); 293 294 return true; 295 296 failed_alloc: 297 return false; 298 } 299 300 /* Create additional DIG link encoder objects if fewer than the platform 301 * supports were created during link construction. This can happen if the 302 * number of physical connectors is less than the number of DIGs. 303 */ 304 static bool create_link_encoders(struct dc *dc) 305 { 306 bool res = true; 307 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 308 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 309 int i; 310 311 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 312 * link encoders and physical display endpoints and does not require 313 * additional link encoder objects. 314 */ 315 if (num_usb4_dpia == 0) 316 return res; 317 318 /* Create as many link encoder objects as the platform supports. DPIA 319 * endpoints can be programmably mapped to any DIG. 320 */ 321 if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) { 322 for (i = 0; i < num_dig_link_enc; i++) { 323 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 324 325 if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) { 326 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx, 327 (enum engine_id)(ENGINE_ID_DIGA + i)); 328 if (link_enc) { 329 dc->res_pool->link_encoders[i] = link_enc; 330 dc->res_pool->dig_link_enc_count++; 331 } else { 332 res = false; 333 } 334 } 335 } 336 } 337 338 return res; 339 } 340 341 /* Destroy any additional DIG link encoder objects created by 342 * create_link_encoders(). 343 * NB: Must only be called after destroy_links(). 344 */ 345 static void destroy_link_encoders(struct dc *dc) 346 { 347 unsigned int num_usb4_dpia; 348 unsigned int num_dig_link_enc; 349 int i; 350 351 if (!dc->res_pool) 352 return; 353 354 num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 355 num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 356 357 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 358 * link encoders and physical display endpoints and does not require 359 * additional link encoder objects. 360 */ 361 if (num_usb4_dpia == 0) 362 return; 363 364 for (i = 0; i < num_dig_link_enc; i++) { 365 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 366 367 if (link_enc) { 368 link_enc->funcs->destroy(&link_enc); 369 dc->res_pool->link_encoders[i] = NULL; 370 dc->res_pool->dig_link_enc_count--; 371 } 372 } 373 } 374 375 static struct dc_perf_trace *dc_perf_trace_create(void) 376 { 377 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL); 378 } 379 380 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) 381 { 382 kfree(*perf_trace); 383 *perf_trace = NULL; 384 } 385 386 /** 387 * dc_stream_adjust_vmin_vmax: 388 * 389 * Looks up the pipe context of dc_stream_state and updates the 390 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh 391 * Rate, which is a power-saving feature that targets reducing panel 392 * refresh rate while the screen is static 393 * 394 * @dc: dc reference 395 * @stream: Initial dc stream state 396 * @adjust: Updated parameters for vertical_total_min and vertical_total_max 397 */ 398 bool dc_stream_adjust_vmin_vmax(struct dc *dc, 399 struct dc_stream_state *stream, 400 struct dc_crtc_timing_adjust *adjust) 401 { 402 int i; 403 404 /* 405 * Don't adjust DRR while there's bandwidth optimizations pending to 406 * avoid conflicting with firmware updates. 407 */ 408 if (dc->ctx->dce_version > DCE_VERSION_MAX) 409 if (dc->optimized_required || dc->wm_optimized_required) 410 return false; 411 412 stream->adjust.v_total_max = adjust->v_total_max; 413 stream->adjust.v_total_mid = adjust->v_total_mid; 414 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; 415 stream->adjust.v_total_min = adjust->v_total_min; 416 417 for (i = 0; i < MAX_PIPES; i++) { 418 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 419 420 if (pipe->stream == stream && pipe->stream_res.tg) { 421 dc->hwss.set_drr(&pipe, 422 1, 423 *adjust); 424 425 return true; 426 } 427 } 428 return false; 429 } 430 431 /** 432 ***************************************************************************** 433 * Function: dc_stream_get_last_vrr_vtotal 434 * 435 * @brief 436 * Looks up the pipe context of dc_stream_state and gets the 437 * last VTOTAL used by DRR (Dynamic Refresh Rate) 438 * 439 * @param [in] dc: dc reference 440 * @param [in] stream: Initial dc stream state 441 * @param [in] adjust: Updated parameters for vertical_total_min and 442 * vertical_total_max 443 ***************************************************************************** 444 */ 445 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, 446 struct dc_stream_state *stream, 447 uint32_t *refresh_rate) 448 { 449 bool status = false; 450 451 int i = 0; 452 453 for (i = 0; i < MAX_PIPES; i++) { 454 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 455 456 if (pipe->stream == stream && pipe->stream_res.tg) { 457 /* Only execute if a function pointer has been defined for 458 * the DC version in question 459 */ 460 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) { 461 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate); 462 463 status = true; 464 465 break; 466 } 467 } 468 } 469 470 return status; 471 } 472 473 bool dc_stream_get_crtc_position(struct dc *dc, 474 struct dc_stream_state **streams, int num_streams, 475 unsigned int *v_pos, unsigned int *nom_v_pos) 476 { 477 /* TODO: Support multiple streams */ 478 const struct dc_stream_state *stream = streams[0]; 479 int i; 480 bool ret = false; 481 struct crtc_position position; 482 483 for (i = 0; i < MAX_PIPES; i++) { 484 struct pipe_ctx *pipe = 485 &dc->current_state->res_ctx.pipe_ctx[i]; 486 487 if (pipe->stream == stream && pipe->stream_res.stream_enc) { 488 dc->hwss.get_position(&pipe, 1, &position); 489 490 *v_pos = position.vertical_count; 491 *nom_v_pos = position.nominal_vcount; 492 ret = true; 493 } 494 } 495 return ret; 496 } 497 498 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 499 bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream, 500 struct crc_params *crc_window) 501 { 502 int i; 503 struct dmcu *dmcu = dc->res_pool->dmcu; 504 struct pipe_ctx *pipe; 505 struct crc_region tmp_win, *crc_win; 506 struct otg_phy_mux mapping_tmp, *mux_mapping; 507 508 /*crc window can't be null*/ 509 if (!crc_window) 510 return false; 511 512 if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) { 513 crc_win = &tmp_win; 514 mux_mapping = &mapping_tmp; 515 /*set crc window*/ 516 tmp_win.x_start = crc_window->windowa_x_start; 517 tmp_win.y_start = crc_window->windowa_y_start; 518 tmp_win.x_end = crc_window->windowa_x_end; 519 tmp_win.y_end = crc_window->windowa_y_end; 520 521 for (i = 0; i < MAX_PIPES; i++) { 522 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 523 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 524 break; 525 } 526 527 /* Stream not found */ 528 if (i == MAX_PIPES) 529 return false; 530 531 532 /*set mux routing info*/ 533 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst; 534 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst; 535 536 dmcu->funcs->forward_crc_window(dmcu, crc_win, mux_mapping); 537 } else { 538 DC_LOG_DC("dmcu is not initialized"); 539 return false; 540 } 541 542 return true; 543 } 544 545 bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *stream) 546 { 547 int i; 548 struct dmcu *dmcu = dc->res_pool->dmcu; 549 struct pipe_ctx *pipe; 550 struct otg_phy_mux mapping_tmp, *mux_mapping; 551 552 if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) { 553 mux_mapping = &mapping_tmp; 554 555 for (i = 0; i < MAX_PIPES; i++) { 556 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 557 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 558 break; 559 } 560 561 /* Stream not found */ 562 if (i == MAX_PIPES) 563 return false; 564 565 566 /*set mux routing info*/ 567 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst; 568 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst; 569 570 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping); 571 } else { 572 DC_LOG_DC("dmcu is not initialized"); 573 return false; 574 } 575 576 return true; 577 } 578 #endif 579 580 /** 581 * dc_stream_configure_crc() - Configure CRC capture for the given stream. 582 * @dc: DC Object 583 * @stream: The stream to configure CRC on. 584 * @enable: Enable CRC if true, disable otherwise. 585 * @crc_window: CRC window (x/y start/end) information 586 * @continuous: Capture CRC on every frame if true. Otherwise, only capture 587 * once. 588 * 589 * By default, only CRC0 is configured, and the entire frame is used to 590 * calculate the crc. 591 */ 592 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, 593 struct crc_params *crc_window, bool enable, bool continuous) 594 { 595 int i; 596 struct pipe_ctx *pipe; 597 struct crc_params param; 598 struct timing_generator *tg; 599 600 for (i = 0; i < MAX_PIPES; i++) { 601 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 602 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 603 break; 604 } 605 /* Stream not found */ 606 if (i == MAX_PIPES) 607 return false; 608 609 /* By default, capture the full frame */ 610 param.windowa_x_start = 0; 611 param.windowa_y_start = 0; 612 param.windowa_x_end = pipe->stream->timing.h_addressable; 613 param.windowa_y_end = pipe->stream->timing.v_addressable; 614 param.windowb_x_start = 0; 615 param.windowb_y_start = 0; 616 param.windowb_x_end = pipe->stream->timing.h_addressable; 617 param.windowb_y_end = pipe->stream->timing.v_addressable; 618 619 if (crc_window) { 620 param.windowa_x_start = crc_window->windowa_x_start; 621 param.windowa_y_start = crc_window->windowa_y_start; 622 param.windowa_x_end = crc_window->windowa_x_end; 623 param.windowa_y_end = crc_window->windowa_y_end; 624 param.windowb_x_start = crc_window->windowb_x_start; 625 param.windowb_y_start = crc_window->windowb_y_start; 626 param.windowb_x_end = crc_window->windowb_x_end; 627 param.windowb_y_end = crc_window->windowb_y_end; 628 } 629 630 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0; 631 param.odm_mode = pipe->next_odm_pipe ? 1:0; 632 633 /* Default to the union of both windows */ 634 param.selection = UNION_WINDOW_A_B; 635 param.continuous_mode = continuous; 636 param.enable = enable; 637 638 tg = pipe->stream_res.tg; 639 640 /* Only call if supported */ 641 if (tg->funcs->configure_crc) 642 return tg->funcs->configure_crc(tg, ¶m); 643 DC_LOG_WARNING("CRC capture not supported."); 644 return false; 645 } 646 647 /** 648 * dc_stream_get_crc() - Get CRC values for the given stream. 649 * 650 * @dc: DC object. 651 * @stream: The DC stream state of the stream to get CRCs from. 652 * @r_cr: CRC value for the red component. 653 * @g_y: CRC value for the green component. 654 * @b_cb: CRC value for the blue component. 655 * 656 * dc_stream_configure_crc needs to be called beforehand to enable CRCs. 657 * 658 * Return: 659 * false if stream is not found, or if CRCs are not enabled. 660 */ 661 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, 662 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) 663 { 664 int i; 665 struct pipe_ctx *pipe; 666 struct timing_generator *tg; 667 668 for (i = 0; i < MAX_PIPES; i++) { 669 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 670 if (pipe->stream == stream) 671 break; 672 } 673 /* Stream not found */ 674 if (i == MAX_PIPES) 675 return false; 676 677 tg = pipe->stream_res.tg; 678 679 if (tg->funcs->get_crc) 680 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb); 681 DC_LOG_WARNING("CRC capture not supported."); 682 return false; 683 } 684 685 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, 686 enum dc_dynamic_expansion option) 687 { 688 /* OPP FMT dyn expansion updates*/ 689 int i; 690 struct pipe_ctx *pipe_ctx; 691 692 for (i = 0; i < MAX_PIPES; i++) { 693 if (dc->current_state->res_ctx.pipe_ctx[i].stream 694 == stream) { 695 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 696 pipe_ctx->stream_res.opp->dyn_expansion = option; 697 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( 698 pipe_ctx->stream_res.opp, 699 COLOR_SPACE_YCBCR601, 700 stream->timing.display_color_depth, 701 stream->signal); 702 } 703 } 704 } 705 706 void dc_stream_set_dither_option(struct dc_stream_state *stream, 707 enum dc_dither_option option) 708 { 709 struct bit_depth_reduction_params params; 710 struct dc_link *link = stream->link; 711 struct pipe_ctx *pipes = NULL; 712 int i; 713 714 for (i = 0; i < MAX_PIPES; i++) { 715 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream == 716 stream) { 717 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i]; 718 break; 719 } 720 } 721 722 if (!pipes) 723 return; 724 if (option > DITHER_OPTION_MAX) 725 return; 726 727 stream->dither_option = option; 728 729 memset(¶ms, 0, sizeof(params)); 730 resource_build_bit_depth_reduction_params(stream, ¶ms); 731 stream->bit_depth_params = params; 732 733 if (pipes->plane_res.xfm && 734 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) { 735 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth( 736 pipes->plane_res.xfm, 737 pipes->plane_res.scl_data.lb_params.depth, 738 &stream->bit_depth_params); 739 } 740 741 pipes->stream_res.opp->funcs-> 742 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms); 743 } 744 745 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream) 746 { 747 int i; 748 bool ret = false; 749 struct pipe_ctx *pipes; 750 751 for (i = 0; i < MAX_PIPES; i++) { 752 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { 753 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 754 dc->hwss.program_gamut_remap(pipes); 755 ret = true; 756 } 757 } 758 759 return ret; 760 } 761 762 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) 763 { 764 int i; 765 bool ret = false; 766 struct pipe_ctx *pipes; 767 768 for (i = 0; i < MAX_PIPES; i++) { 769 if (dc->current_state->res_ctx.pipe_ctx[i].stream 770 == stream) { 771 772 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 773 dc->hwss.program_output_csc(dc, 774 pipes, 775 stream->output_color_space, 776 stream->csc_color_matrix.matrix, 777 pipes->stream_res.opp->inst); 778 ret = true; 779 } 780 } 781 782 return ret; 783 } 784 785 void dc_stream_set_static_screen_params(struct dc *dc, 786 struct dc_stream_state **streams, 787 int num_streams, 788 const struct dc_static_screen_params *params) 789 { 790 int i, j; 791 struct pipe_ctx *pipes_affected[MAX_PIPES]; 792 int num_pipes_affected = 0; 793 794 for (i = 0; i < num_streams; i++) { 795 struct dc_stream_state *stream = streams[i]; 796 797 for (j = 0; j < MAX_PIPES; j++) { 798 if (dc->current_state->res_ctx.pipe_ctx[j].stream 799 == stream) { 800 pipes_affected[num_pipes_affected++] = 801 &dc->current_state->res_ctx.pipe_ctx[j]; 802 } 803 } 804 } 805 806 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params); 807 } 808 809 static void dc_destruct(struct dc *dc) 810 { 811 // reset link encoder assignment table on destruct 812 if (dc->res_pool && dc->res_pool->funcs->link_encs_assign) 813 link_enc_cfg_init(dc, dc->current_state); 814 815 if (dc->current_state) { 816 dc_release_state(dc->current_state); 817 dc->current_state = NULL; 818 } 819 820 destroy_links(dc); 821 822 destroy_link_encoders(dc); 823 824 if (dc->clk_mgr) { 825 dc_destroy_clk_mgr(dc->clk_mgr); 826 dc->clk_mgr = NULL; 827 } 828 829 dc_destroy_resource_pool(dc); 830 831 if (dc->ctx->gpio_service) 832 dal_gpio_service_destroy(&dc->ctx->gpio_service); 833 834 if (dc->ctx->created_bios) 835 dal_bios_parser_destroy(&dc->ctx->dc_bios); 836 837 dc_perf_trace_destroy(&dc->ctx->perf_trace); 838 839 kfree(dc->ctx); 840 dc->ctx = NULL; 841 842 kfree(dc->bw_vbios); 843 dc->bw_vbios = NULL; 844 845 kfree(dc->bw_dceip); 846 dc->bw_dceip = NULL; 847 848 kfree(dc->dcn_soc); 849 dc->dcn_soc = NULL; 850 851 kfree(dc->dcn_ip); 852 dc->dcn_ip = NULL; 853 854 kfree(dc->vm_helper); 855 dc->vm_helper = NULL; 856 857 } 858 859 static bool dc_construct_ctx(struct dc *dc, 860 const struct dc_init_data *init_params) 861 { 862 struct dc_context *dc_ctx; 863 enum dce_version dc_version = DCE_VERSION_UNKNOWN; 864 865 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); 866 if (!dc_ctx) 867 return false; 868 869 dc_ctx->cgs_device = init_params->cgs_device; 870 dc_ctx->driver_context = init_params->driver; 871 dc_ctx->dc = dc; 872 dc_ctx->asic_id = init_params->asic_id; 873 dc_ctx->dc_sink_id_count = 0; 874 dc_ctx->dc_stream_id_count = 0; 875 dc_ctx->dce_environment = init_params->dce_environment; 876 dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets; 877 dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets; 878 879 /* Create logger */ 880 881 dc_version = resource_parse_asic_id(init_params->asic_id); 882 dc_ctx->dce_version = dc_version; 883 884 dc_ctx->perf_trace = dc_perf_trace_create(); 885 if (!dc_ctx->perf_trace) { 886 kfree(dc_ctx); 887 ASSERT_CRITICAL(false); 888 return false; 889 } 890 891 dc->ctx = dc_ctx; 892 893 return true; 894 } 895 896 static bool dc_construct(struct dc *dc, 897 const struct dc_init_data *init_params) 898 { 899 struct dc_context *dc_ctx; 900 struct bw_calcs_dceip *dc_dceip; 901 struct bw_calcs_vbios *dc_vbios; 902 struct dcn_soc_bounding_box *dcn_soc; 903 struct dcn_ip_params *dcn_ip; 904 905 dc->config = init_params->flags; 906 907 // Allocate memory for the vm_helper 908 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL); 909 if (!dc->vm_helper) { 910 dm_error("%s: failed to create dc->vm_helper\n", __func__); 911 goto fail; 912 } 913 914 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides)); 915 916 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); 917 if (!dc_dceip) { 918 dm_error("%s: failed to create dceip\n", __func__); 919 goto fail; 920 } 921 922 dc->bw_dceip = dc_dceip; 923 924 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL); 925 if (!dc_vbios) { 926 dm_error("%s: failed to create vbios\n", __func__); 927 goto fail; 928 } 929 930 dc->bw_vbios = dc_vbios; 931 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); 932 if (!dcn_soc) { 933 dm_error("%s: failed to create dcn_soc\n", __func__); 934 goto fail; 935 } 936 937 dc->dcn_soc = dcn_soc; 938 939 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL); 940 if (!dcn_ip) { 941 dm_error("%s: failed to create dcn_ip\n", __func__); 942 goto fail; 943 } 944 945 dc->dcn_ip = dcn_ip; 946 947 if (!dc_construct_ctx(dc, init_params)) { 948 dm_error("%s: failed to create ctx\n", __func__); 949 goto fail; 950 } 951 952 dc_ctx = dc->ctx; 953 954 /* Resource should construct all asic specific resources. 955 * This should be the only place where we need to parse the asic id 956 */ 957 if (init_params->vbios_override) 958 dc_ctx->dc_bios = init_params->vbios_override; 959 else { 960 /* Create BIOS parser */ 961 struct bp_init_data bp_init_data; 962 963 bp_init_data.ctx = dc_ctx; 964 bp_init_data.bios = init_params->asic_id.atombios_base_address; 965 966 dc_ctx->dc_bios = dal_bios_parser_create( 967 &bp_init_data, dc_ctx->dce_version); 968 969 if (!dc_ctx->dc_bios) { 970 ASSERT_CRITICAL(false); 971 goto fail; 972 } 973 974 dc_ctx->created_bios = true; 975 } 976 977 dc->vendor_signature = init_params->vendor_signature; 978 979 /* Create GPIO service */ 980 dc_ctx->gpio_service = dal_gpio_service_create( 981 dc_ctx->dce_version, 982 dc_ctx->dce_environment, 983 dc_ctx); 984 985 if (!dc_ctx->gpio_service) { 986 ASSERT_CRITICAL(false); 987 goto fail; 988 } 989 990 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version); 991 if (!dc->res_pool) 992 goto fail; 993 994 /* set i2c speed if not done by the respective dcnxxx__resource.c */ 995 if (dc->caps.i2c_speed_in_khz_hdcp == 0) 996 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz; 997 998 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg); 999 if (!dc->clk_mgr) 1000 goto fail; 1001 #ifdef CONFIG_DRM_AMD_DC_DCN 1002 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; 1003 1004 if (dc->res_pool->funcs->update_bw_bounding_box) { 1005 DC_FP_START(); 1006 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); 1007 DC_FP_END(); 1008 } 1009 #endif 1010 1011 /* Creation of current_state must occur after dc->dml 1012 * is initialized in dc_create_resource_pool because 1013 * on creation it copies the contents of dc->dml 1014 */ 1015 1016 dc->current_state = dc_create_state(dc); 1017 1018 if (!dc->current_state) { 1019 dm_error("%s: failed to create validate ctx\n", __func__); 1020 goto fail; 1021 } 1022 1023 if (!create_links(dc, init_params->num_virtual_links)) 1024 goto fail; 1025 1026 /* Create additional DIG link encoder objects if fewer than the platform 1027 * supports were created during link construction. 1028 */ 1029 if (!create_link_encoders(dc)) 1030 goto fail; 1031 1032 dc_resource_state_construct(dc, dc->current_state); 1033 1034 return true; 1035 1036 fail: 1037 return false; 1038 } 1039 1040 static void disable_all_writeback_pipes_for_stream( 1041 const struct dc *dc, 1042 struct dc_stream_state *stream, 1043 struct dc_state *context) 1044 { 1045 int i; 1046 1047 for (i = 0; i < stream->num_wb_info; i++) 1048 stream->writeback_info[i].wb_enabled = false; 1049 } 1050 1051 static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, 1052 struct dc_stream_state *stream, bool lock) 1053 { 1054 int i; 1055 1056 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */ 1057 if (dc->hwss.interdependent_update_lock) 1058 dc->hwss.interdependent_update_lock(dc, context, lock); 1059 else { 1060 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1061 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1062 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 1063 1064 // Copied conditions that were previously in dce110_apply_ctx_for_surface 1065 if (stream == pipe_ctx->stream) { 1066 if (!pipe_ctx->top_pipe && 1067 (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) 1068 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock); 1069 } 1070 } 1071 } 1072 } 1073 1074 static void disable_dangling_plane(struct dc *dc, struct dc_state *context) 1075 { 1076 int i, j; 1077 struct dc_state *dangling_context = dc_create_state(dc); 1078 struct dc_state *current_ctx; 1079 struct pipe_ctx *pipe; 1080 1081 if (dangling_context == NULL) 1082 return; 1083 1084 dc_resource_state_copy_construct(dc->current_state, dangling_context); 1085 1086 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1087 struct dc_stream_state *old_stream = 1088 dc->current_state->res_ctx.pipe_ctx[i].stream; 1089 bool should_disable = true; 1090 bool pipe_split_change = false; 1091 1092 if ((context->res_ctx.pipe_ctx[i].top_pipe) && 1093 (dc->current_state->res_ctx.pipe_ctx[i].top_pipe)) 1094 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx != 1095 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx; 1096 else 1097 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe != 1098 dc->current_state->res_ctx.pipe_ctx[i].top_pipe; 1099 1100 for (j = 0; j < context->stream_count; j++) { 1101 if (old_stream == context->streams[j]) { 1102 should_disable = false; 1103 break; 1104 } 1105 } 1106 if (!should_disable && pipe_split_change && 1107 dc->current_state->stream_count != context->stream_count) 1108 should_disable = true; 1109 1110 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe && 1111 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) { 1112 struct pipe_ctx *old_pipe, *new_pipe; 1113 1114 old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1115 new_pipe = &context->res_ctx.pipe_ctx[i]; 1116 1117 if (old_pipe->plane_state && !new_pipe->plane_state) 1118 should_disable = true; 1119 } 1120 1121 if (should_disable && old_stream) { 1122 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1123 /* When disabling plane for a phantom pipe, we must turn on the 1124 * phantom OTG so the disable programming gets the double buffer 1125 * update. Otherwise the pipe will be left in a partially disabled 1126 * state that can result in underflow or hang when enabling it 1127 * again for different use. 1128 */ 1129 if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { 1130 pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); 1131 } 1132 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); 1133 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); 1134 1135 if (dc->hwss.apply_ctx_for_surface) { 1136 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); 1137 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); 1138 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false); 1139 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1140 } 1141 if (dc->hwss.program_front_end_for_ctx) { 1142 dc->hwss.interdependent_update_lock(dc, dc->current_state, true); 1143 dc->hwss.program_front_end_for_ctx(dc, dangling_context); 1144 dc->hwss.interdependent_update_lock(dc, dc->current_state, false); 1145 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1146 } 1147 } 1148 } 1149 1150 current_ctx = dc->current_state; 1151 dc->current_state = dangling_context; 1152 dc_release_state(current_ctx); 1153 } 1154 1155 static void disable_vbios_mode_if_required( 1156 struct dc *dc, 1157 struct dc_state *context) 1158 { 1159 unsigned int i, j; 1160 1161 /* check if timing_changed, disable stream*/ 1162 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1163 struct dc_stream_state *stream = NULL; 1164 struct dc_link *link = NULL; 1165 struct pipe_ctx *pipe = NULL; 1166 1167 pipe = &context->res_ctx.pipe_ctx[i]; 1168 stream = pipe->stream; 1169 if (stream == NULL) 1170 continue; 1171 1172 // only looking for first odm pipe 1173 if (pipe->prev_odm_pipe) 1174 continue; 1175 1176 if (stream->link->local_sink && 1177 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { 1178 link = stream->link; 1179 } 1180 1181 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { 1182 unsigned int enc_inst, tg_inst = 0; 1183 unsigned int pix_clk_100hz; 1184 1185 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1186 if (enc_inst != ENGINE_ID_UNKNOWN) { 1187 for (j = 0; j < dc->res_pool->stream_enc_count; j++) { 1188 if (dc->res_pool->stream_enc[j]->id == enc_inst) { 1189 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg( 1190 dc->res_pool->stream_enc[j]); 1191 break; 1192 } 1193 } 1194 1195 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1196 dc->res_pool->dp_clock_source, 1197 tg_inst, &pix_clk_100hz); 1198 1199 if (link->link_status.link_active) { 1200 uint32_t requested_pix_clk_100hz = 1201 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz; 1202 1203 if (pix_clk_100hz != requested_pix_clk_100hz) { 1204 core_link_disable_stream(pipe); 1205 pipe->stream->dpms_off = false; 1206 } 1207 } 1208 } 1209 } 1210 } 1211 } 1212 1213 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) 1214 { 1215 int i; 1216 PERF_TRACE(); 1217 for (i = 0; i < MAX_PIPES; i++) { 1218 int count = 0; 1219 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1220 1221 if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) 1222 continue; 1223 1224 /* Timeout 100 ms */ 1225 while (count < 100000) { 1226 /* Must set to false to start with, due to OR in update function */ 1227 pipe->plane_state->status.is_flip_pending = false; 1228 dc->hwss.update_pending_status(pipe); 1229 if (!pipe->plane_state->status.is_flip_pending) 1230 break; 1231 udelay(1); 1232 count++; 1233 } 1234 ASSERT(!pipe->plane_state->status.is_flip_pending); 1235 } 1236 PERF_TRACE(); 1237 } 1238 1239 /******************************************************************************* 1240 * Public functions 1241 ******************************************************************************/ 1242 1243 struct dc *dc_create(const struct dc_init_data *init_params) 1244 { 1245 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1246 unsigned int full_pipe_count; 1247 1248 if (!dc) 1249 return NULL; 1250 1251 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { 1252 if (!dc_construct_ctx(dc, init_params)) 1253 goto destruct_dc; 1254 } else { 1255 if (!dc_construct(dc, init_params)) 1256 goto destruct_dc; 1257 1258 full_pipe_count = dc->res_pool->pipe_count; 1259 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) 1260 full_pipe_count--; 1261 dc->caps.max_streams = min( 1262 full_pipe_count, 1263 dc->res_pool->stream_enc_count); 1264 1265 dc->caps.max_links = dc->link_count; 1266 dc->caps.max_audios = dc->res_pool->audio_count; 1267 dc->caps.linear_pitch_alignment = 64; 1268 1269 dc->caps.max_dp_protocol_version = DP_VERSION_1_4; 1270 1271 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator; 1272 1273 if (dc->res_pool->dmcu != NULL) 1274 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; 1275 } 1276 1277 dc->dcn_reg_offsets = init_params->dcn_reg_offsets; 1278 dc->nbio_reg_offsets = init_params->nbio_reg_offsets; 1279 1280 /* Populate versioning information */ 1281 dc->versions.dc_ver = DC_VER; 1282 1283 dc->build_id = DC_BUILD_ID; 1284 1285 DC_LOG_DC("Display Core initialized\n"); 1286 1287 1288 1289 return dc; 1290 1291 destruct_dc: 1292 dc_destruct(dc); 1293 kfree(dc); 1294 return NULL; 1295 } 1296 1297 static void detect_edp_presence(struct dc *dc) 1298 { 1299 struct dc_link *edp_links[MAX_NUM_EDP]; 1300 struct dc_link *edp_link = NULL; 1301 enum dc_connection_type type; 1302 int i; 1303 int edp_num; 1304 1305 get_edp_links(dc, edp_links, &edp_num); 1306 if (!edp_num) 1307 return; 1308 1309 for (i = 0; i < edp_num; i++) { 1310 edp_link = edp_links[i]; 1311 if (dc->config.edp_not_connected) { 1312 edp_link->edp_sink_present = false; 1313 } else { 1314 dc_link_detect_sink(edp_link, &type); 1315 edp_link->edp_sink_present = (type != dc_connection_none); 1316 } 1317 } 1318 } 1319 1320 void dc_hardware_init(struct dc *dc) 1321 { 1322 1323 detect_edp_presence(dc); 1324 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) 1325 dc->hwss.init_hw(dc); 1326 } 1327 1328 void dc_init_callbacks(struct dc *dc, 1329 const struct dc_callback_init *init_params) 1330 { 1331 #ifdef CONFIG_DRM_AMD_DC_HDCP 1332 dc->ctx->cp_psp = init_params->cp_psp; 1333 #endif 1334 } 1335 1336 void dc_deinit_callbacks(struct dc *dc) 1337 { 1338 #ifdef CONFIG_DRM_AMD_DC_HDCP 1339 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp)); 1340 #endif 1341 } 1342 1343 void dc_destroy(struct dc **dc) 1344 { 1345 dc_destruct(*dc); 1346 kfree(*dc); 1347 *dc = NULL; 1348 } 1349 1350 static void enable_timing_multisync( 1351 struct dc *dc, 1352 struct dc_state *ctx) 1353 { 1354 int i, multisync_count = 0; 1355 int pipe_count = dc->res_pool->pipe_count; 1356 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL }; 1357 1358 for (i = 0; i < pipe_count; i++) { 1359 if (!ctx->res_ctx.pipe_ctx[i].stream || 1360 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled) 1361 continue; 1362 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source) 1363 continue; 1364 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i]; 1365 multisync_count++; 1366 } 1367 1368 if (multisync_count > 0) { 1369 dc->hwss.enable_per_frame_crtc_position_reset( 1370 dc, multisync_count, multisync_pipes); 1371 } 1372 } 1373 1374 static void program_timing_sync( 1375 struct dc *dc, 1376 struct dc_state *ctx) 1377 { 1378 int i, j, k; 1379 int group_index = 0; 1380 int num_group = 0; 1381 int pipe_count = dc->res_pool->pipe_count; 1382 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL }; 1383 1384 for (i = 0; i < pipe_count; i++) { 1385 if (!ctx->res_ctx.pipe_ctx[i].stream 1386 || ctx->res_ctx.pipe_ctx[i].top_pipe 1387 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe) 1388 continue; 1389 1390 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i]; 1391 } 1392 1393 for (i = 0; i < pipe_count; i++) { 1394 int group_size = 1; 1395 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE; 1396 struct pipe_ctx *pipe_set[MAX_PIPES]; 1397 1398 if (!unsynced_pipes[i]) 1399 continue; 1400 1401 pipe_set[0] = unsynced_pipes[i]; 1402 unsynced_pipes[i] = NULL; 1403 1404 /* Add tg to the set, search rest of the tg's for ones with 1405 * same timing, add all tgs with same timing to the group 1406 */ 1407 for (j = i + 1; j < pipe_count; j++) { 1408 if (!unsynced_pipes[j]) 1409 continue; 1410 if (sync_type != TIMING_SYNCHRONIZABLE && 1411 dc->hwss.enable_vblanks_synchronization && 1412 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks && 1413 resource_are_vblanks_synchronizable( 1414 unsynced_pipes[j]->stream, 1415 pipe_set[0]->stream)) { 1416 sync_type = VBLANK_SYNCHRONIZABLE; 1417 pipe_set[group_size] = unsynced_pipes[j]; 1418 unsynced_pipes[j] = NULL; 1419 group_size++; 1420 } else 1421 if (sync_type != VBLANK_SYNCHRONIZABLE && 1422 resource_are_streams_timing_synchronizable( 1423 unsynced_pipes[j]->stream, 1424 pipe_set[0]->stream)) { 1425 sync_type = TIMING_SYNCHRONIZABLE; 1426 pipe_set[group_size] = unsynced_pipes[j]; 1427 unsynced_pipes[j] = NULL; 1428 group_size++; 1429 } 1430 } 1431 1432 /* set first unblanked pipe as master */ 1433 for (j = 0; j < group_size; j++) { 1434 bool is_blanked; 1435 1436 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1437 is_blanked = 1438 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1439 else 1440 is_blanked = 1441 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1442 if (!is_blanked) { 1443 if (j == 0) 1444 break; 1445 1446 swap(pipe_set[0], pipe_set[j]); 1447 break; 1448 } 1449 } 1450 1451 for (k = 0; k < group_size; k++) { 1452 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream); 1453 1454 status->timing_sync_info.group_id = num_group; 1455 status->timing_sync_info.group_size = group_size; 1456 if (k == 0) 1457 status->timing_sync_info.master = true; 1458 else 1459 status->timing_sync_info.master = false; 1460 1461 } 1462 1463 /* remove any other pipes that are already been synced */ 1464 if (dc->config.use_pipe_ctx_sync_logic) { 1465 /* check pipe's syncd to decide which pipe to be removed */ 1466 for (j = 1; j < group_size; j++) { 1467 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) { 1468 group_size--; 1469 pipe_set[j] = pipe_set[group_size]; 1470 j--; 1471 } else 1472 /* link slave pipe's syncd with master pipe */ 1473 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd; 1474 } 1475 } else { 1476 for (j = j + 1; j < group_size; j++) { 1477 bool is_blanked; 1478 1479 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1480 is_blanked = 1481 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1482 else 1483 is_blanked = 1484 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1485 if (!is_blanked) { 1486 group_size--; 1487 pipe_set[j] = pipe_set[group_size]; 1488 j--; 1489 } 1490 } 1491 } 1492 1493 if (group_size > 1) { 1494 if (sync_type == TIMING_SYNCHRONIZABLE) { 1495 dc->hwss.enable_timing_synchronization( 1496 dc, group_index, group_size, pipe_set); 1497 } else 1498 if (sync_type == VBLANK_SYNCHRONIZABLE) { 1499 dc->hwss.enable_vblanks_synchronization( 1500 dc, group_index, group_size, pipe_set); 1501 } 1502 group_index++; 1503 } 1504 num_group++; 1505 } 1506 } 1507 1508 static bool streams_changed(struct dc *dc, 1509 struct dc_stream_state *streams[], 1510 uint8_t stream_count) 1511 { 1512 uint8_t i; 1513 1514 if (stream_count != dc->current_state->stream_count) 1515 return true; 1516 1517 for (i = 0; i < dc->current_state->stream_count; i++) { 1518 if (dc->current_state->streams[i] != streams[i]) 1519 return true; 1520 if (!streams[i]->link->link_state_valid) 1521 return true; 1522 } 1523 1524 return false; 1525 } 1526 1527 bool dc_validate_boot_timing(const struct dc *dc, 1528 const struct dc_sink *sink, 1529 struct dc_crtc_timing *crtc_timing) 1530 { 1531 struct timing_generator *tg; 1532 struct stream_encoder *se = NULL; 1533 1534 struct dc_crtc_timing hw_crtc_timing = {0}; 1535 1536 struct dc_link *link = sink->link; 1537 unsigned int i, enc_inst, tg_inst = 0; 1538 1539 /* Support seamless boot on EDP displays only */ 1540 if (sink->sink_signal != SIGNAL_TYPE_EDP) { 1541 return false; 1542 } 1543 1544 if (dc->debug.force_odm_combine) 1545 return false; 1546 1547 /* Check for enabled DIG to identify enabled display */ 1548 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) 1549 return false; 1550 1551 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1552 1553 if (enc_inst == ENGINE_ID_UNKNOWN) 1554 return false; 1555 1556 for (i = 0; i < dc->res_pool->stream_enc_count; i++) { 1557 if (dc->res_pool->stream_enc[i]->id == enc_inst) { 1558 1559 se = dc->res_pool->stream_enc[i]; 1560 1561 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg( 1562 dc->res_pool->stream_enc[i]); 1563 break; 1564 } 1565 } 1566 1567 // tg_inst not found 1568 if (i == dc->res_pool->stream_enc_count) 1569 return false; 1570 1571 if (tg_inst >= dc->res_pool->timing_generator_count) 1572 return false; 1573 1574 tg = dc->res_pool->timing_generators[tg_inst]; 1575 1576 if (!tg->funcs->get_hw_timing) 1577 return false; 1578 1579 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) 1580 return false; 1581 1582 if (crtc_timing->h_total != hw_crtc_timing.h_total) 1583 return false; 1584 1585 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) 1586 return false; 1587 1588 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) 1589 return false; 1590 1591 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) 1592 return false; 1593 1594 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) 1595 return false; 1596 1597 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) 1598 return false; 1599 1600 if (crtc_timing->v_total != hw_crtc_timing.v_total) 1601 return false; 1602 1603 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) 1604 return false; 1605 1606 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) 1607 return false; 1608 1609 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) 1610 return false; 1611 1612 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) 1613 return false; 1614 1615 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) 1616 return false; 1617 1618 /* block DSC for now, as VBIOS does not currently support DSC timings */ 1619 if (crtc_timing->flags.DSC) 1620 return false; 1621 1622 if (dc_is_dp_signal(link->connector_signal)) { 1623 unsigned int pix_clk_100hz; 1624 uint32_t numOdmPipes = 1; 1625 uint32_t id_src[4] = {0}; 1626 1627 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1628 dc->res_pool->dp_clock_source, 1629 tg_inst, &pix_clk_100hz); 1630 1631 if (tg->funcs->get_optc_source) 1632 tg->funcs->get_optc_source(tg, 1633 &numOdmPipes, &id_src[0], &id_src[1]); 1634 1635 if (numOdmPipes == 2) 1636 pix_clk_100hz *= 2; 1637 if (numOdmPipes == 4) 1638 pix_clk_100hz *= 4; 1639 1640 // Note: In rare cases, HW pixclk may differ from crtc's pixclk 1641 // slightly due to rounding issues in 10 kHz units. 1642 if (crtc_timing->pix_clk_100hz != pix_clk_100hz) 1643 return false; 1644 1645 if (!se->funcs->dp_get_pixel_format) 1646 return false; 1647 1648 if (!se->funcs->dp_get_pixel_format( 1649 se, 1650 &hw_crtc_timing.pixel_encoding, 1651 &hw_crtc_timing.display_color_depth)) 1652 return false; 1653 1654 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) 1655 return false; 1656 1657 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) 1658 return false; 1659 } 1660 1661 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) { 1662 return false; 1663 } 1664 1665 if (is_edp_ilr_optimization_required(link, crtc_timing)) { 1666 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n"); 1667 return false; 1668 } 1669 1670 return true; 1671 } 1672 1673 static inline bool should_update_pipe_for_stream( 1674 struct dc_state *context, 1675 struct pipe_ctx *pipe_ctx, 1676 struct dc_stream_state *stream) 1677 { 1678 return (pipe_ctx->stream && pipe_ctx->stream == stream); 1679 } 1680 1681 static inline bool should_update_pipe_for_plane( 1682 struct dc_state *context, 1683 struct pipe_ctx *pipe_ctx, 1684 struct dc_plane_state *plane_state) 1685 { 1686 return (pipe_ctx->plane_state == plane_state); 1687 } 1688 1689 void dc_enable_stereo( 1690 struct dc *dc, 1691 struct dc_state *context, 1692 struct dc_stream_state *streams[], 1693 uint8_t stream_count) 1694 { 1695 int i, j; 1696 struct pipe_ctx *pipe; 1697 1698 for (i = 0; i < MAX_PIPES; i++) { 1699 if (context != NULL) { 1700 pipe = &context->res_ctx.pipe_ctx[i]; 1701 } else { 1702 context = dc->current_state; 1703 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1704 } 1705 1706 for (j = 0; pipe && j < stream_count; j++) { 1707 if (should_update_pipe_for_stream(context, pipe, streams[j]) && 1708 dc->hwss.setup_stereo) 1709 dc->hwss.setup_stereo(pipe, dc); 1710 } 1711 } 1712 } 1713 1714 void dc_trigger_sync(struct dc *dc, struct dc_state *context) 1715 { 1716 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { 1717 enable_timing_multisync(dc, context); 1718 program_timing_sync(dc, context); 1719 } 1720 } 1721 1722 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context) 1723 { 1724 int i; 1725 unsigned int stream_mask = 0; 1726 1727 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1728 if (context->res_ctx.pipe_ctx[i].stream) 1729 stream_mask |= 1 << i; 1730 } 1731 1732 return stream_mask; 1733 } 1734 1735 void dc_z10_restore(const struct dc *dc) 1736 { 1737 if (dc->hwss.z10_restore) 1738 dc->hwss.z10_restore(dc); 1739 } 1740 1741 void dc_z10_save_init(struct dc *dc) 1742 { 1743 if (dc->hwss.z10_save_init) 1744 dc->hwss.z10_save_init(dc); 1745 } 1746 1747 /* 1748 * Applies given context to HW and copy it into current context. 1749 * It's up to the user to release the src context afterwards. 1750 */ 1751 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context) 1752 { 1753 struct dc_bios *dcb = dc->ctx->dc_bios; 1754 enum dc_status result = DC_ERROR_UNEXPECTED; 1755 struct pipe_ctx *pipe; 1756 int i, k, l; 1757 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0}; 1758 struct dc_state *old_state; 1759 bool subvp_prev_use = false; 1760 1761 dc_z10_restore(dc); 1762 dc_allow_idle_optimizations(dc, false); 1763 1764 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1765 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1766 1767 /* Check old context for SubVP */ 1768 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); 1769 if (subvp_prev_use) 1770 break; 1771 } 1772 1773 for (i = 0; i < context->stream_count; i++) 1774 dc_streams[i] = context->streams[i]; 1775 1776 if (!dcb->funcs->is_accelerated_mode(dcb)) { 1777 disable_vbios_mode_if_required(dc, context); 1778 dc->hwss.enable_accelerated_mode(dc, context); 1779 } 1780 1781 if (context->stream_count > get_seamless_boot_stream_count(context) || 1782 context->stream_count == 0) 1783 dc->hwss.prepare_bandwidth(dc, context); 1784 1785 /* When SubVP is active, all HW programming must be done while 1786 * SubVP lock is acquired 1787 */ 1788 if (dc->hwss.subvp_pipe_control_lock) 1789 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); 1790 1791 if (dc->debug.enable_double_buffered_dsc_pg_support) 1792 dc->hwss.update_dsc_pg(dc, context, false); 1793 1794 disable_dangling_plane(dc, context); 1795 /* re-program planes for existing stream, in case we need to 1796 * free up plane resource for later use 1797 */ 1798 if (dc->hwss.apply_ctx_for_surface) { 1799 for (i = 0; i < context->stream_count; i++) { 1800 if (context->streams[i]->mode_changed) 1801 continue; 1802 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 1803 dc->hwss.apply_ctx_for_surface( 1804 dc, context->streams[i], 1805 context->stream_status[i].plane_count, 1806 context); /* use new pipe config in new context */ 1807 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 1808 dc->hwss.post_unlock_program_front_end(dc, context); 1809 } 1810 } 1811 1812 /* Program hardware */ 1813 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1814 pipe = &context->res_ctx.pipe_ctx[i]; 1815 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); 1816 } 1817 1818 result = dc->hwss.apply_ctx_to_hw(dc, context); 1819 1820 if (result != DC_OK) { 1821 /* Application of dc_state to hardware stopped. */ 1822 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; 1823 return result; 1824 } 1825 1826 dc_trigger_sync(dc, context); 1827 1828 /* Program all planes within new context*/ 1829 if (dc->hwss.program_front_end_for_ctx) { 1830 dc->hwss.interdependent_update_lock(dc, context, true); 1831 dc->hwss.program_front_end_for_ctx(dc, context); 1832 dc->hwss.interdependent_update_lock(dc, context, false); 1833 dc->hwss.post_unlock_program_front_end(dc, context); 1834 } 1835 1836 if (dc->hwss.commit_subvp_config) 1837 dc->hwss.commit_subvp_config(dc, context); 1838 if (dc->hwss.subvp_pipe_control_lock) 1839 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use); 1840 1841 for (i = 0; i < context->stream_count; i++) { 1842 const struct dc_link *link = context->streams[i]->link; 1843 1844 if (!context->streams[i]->mode_changed) 1845 continue; 1846 1847 if (dc->hwss.apply_ctx_for_surface) { 1848 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 1849 dc->hwss.apply_ctx_for_surface( 1850 dc, context->streams[i], 1851 context->stream_status[i].plane_count, 1852 context); 1853 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 1854 dc->hwss.post_unlock_program_front_end(dc, context); 1855 } 1856 1857 /* 1858 * enable stereo 1859 * TODO rework dc_enable_stereo call to work with validation sets? 1860 */ 1861 for (k = 0; k < MAX_PIPES; k++) { 1862 pipe = &context->res_ctx.pipe_ctx[k]; 1863 1864 for (l = 0 ; pipe && l < context->stream_count; l++) { 1865 if (context->streams[l] && 1866 context->streams[l] == pipe->stream && 1867 dc->hwss.setup_stereo) 1868 dc->hwss.setup_stereo(pipe, dc); 1869 } 1870 } 1871 1872 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}", 1873 context->streams[i]->timing.h_addressable, 1874 context->streams[i]->timing.v_addressable, 1875 context->streams[i]->timing.h_total, 1876 context->streams[i]->timing.v_total, 1877 context->streams[i]->timing.pix_clk_100hz / 10); 1878 } 1879 1880 dc_enable_stereo(dc, context, dc_streams, context->stream_count); 1881 1882 if (context->stream_count > get_seamless_boot_stream_count(context) || 1883 context->stream_count == 0) { 1884 /* Must wait for no flips to be pending before doing optimize bw */ 1885 wait_for_no_pipes_pending(dc, context); 1886 /* pplib is notified if disp_num changed */ 1887 dc->hwss.optimize_bandwidth(dc, context); 1888 } 1889 1890 if (dc->debug.enable_double_buffered_dsc_pg_support) 1891 dc->hwss.update_dsc_pg(dc, context, true); 1892 1893 if (dc->ctx->dce_version >= DCE_VERSION_MAX) 1894 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 1895 else 1896 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 1897 1898 context->stream_mask = get_stream_mask(dc, context); 1899 1900 if (context->stream_mask != dc->current_state->stream_mask) 1901 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask); 1902 1903 for (i = 0; i < context->stream_count; i++) 1904 context->streams[i]->mode_changed = false; 1905 1906 old_state = dc->current_state; 1907 dc->current_state = context; 1908 1909 dc_release_state(old_state); 1910 1911 dc_retain_state(dc->current_state); 1912 1913 return result; 1914 } 1915 1916 /** 1917 * dc_commit_streams - Commit current stream state 1918 * 1919 * @dc: DC object with the commit state to be configured in the hardware 1920 * @streams: Array with a list of stream state 1921 * @stream_count: Total of streams 1922 * 1923 * Function responsible for commit streams change to the hardware. 1924 * 1925 * Return: 1926 * Return DC_OK if everything work as expected, otherwise, return a dc_status 1927 * code. 1928 */ 1929 enum dc_status dc_commit_streams(struct dc *dc, 1930 struct dc_stream_state *streams[], 1931 uint8_t stream_count) 1932 { 1933 int i; 1934 struct dc_state *context; 1935 enum dc_status res = DC_OK; 1936 1937 if (!streams_changed(dc, streams, stream_count)) 1938 return res; 1939 1940 DC_LOG_DC("%s: %d streams\n", __func__, stream_count); 1941 1942 for (i = 0; i < stream_count; i++) { 1943 struct dc_stream_state *stream = streams[i]; 1944 1945 dc_stream_log(dc, stream); 1946 } 1947 1948 context = dc_create_state(dc); 1949 if (!context) 1950 goto context_alloc_fail; 1951 1952 dc_resource_state_copy_construct_current(dc, context); 1953 1954 /* 1955 * Previous validation was perfomred with fast_validation = true and 1956 * the full DML state required for hardware programming was skipped. 1957 * 1958 * Re-validate here to calculate these parameters / watermarks. 1959 */ 1960 res = dc_validate_global_state(dc, context, false); 1961 if (res != DC_OK) { 1962 DC_LOG_ERROR("DC commit global validation failure: %s (%d)", 1963 dc_status_to_str(res), res); 1964 return res; 1965 } 1966 1967 res = dc_commit_state_no_check(dc, context); 1968 1969 context_alloc_fail: 1970 1971 DC_LOG_DC("%s Finished.\n", __func__); 1972 1973 return (res == DC_OK); 1974 } 1975 1976 /* TODO: When the transition to the new commit sequence is done, remove this 1977 * function in favor of dc_commit_streams. */ 1978 bool dc_commit_state(struct dc *dc, struct dc_state *context) 1979 { 1980 enum dc_status result = DC_ERROR_UNEXPECTED; 1981 int i; 1982 1983 /* TODO: Since change commit sequence can have a huge impact, 1984 * we decided to only enable it for DCN3x. However, as soon as 1985 * we get more confident about this change we'll need to enable 1986 * the new sequence for all ASICs. */ 1987 if (dc->ctx->dce_version >= DCN_VERSION_3_2) { 1988 result = dc_commit_streams(dc, context->streams, context->stream_count); 1989 return result == DC_OK; 1990 } 1991 1992 if (!streams_changed(dc, context->streams, context->stream_count)) 1993 return DC_OK; 1994 1995 DC_LOG_DC("%s: %d streams\n", 1996 __func__, context->stream_count); 1997 1998 for (i = 0; i < context->stream_count; i++) { 1999 struct dc_stream_state *stream = context->streams[i]; 2000 2001 dc_stream_log(dc, stream); 2002 } 2003 2004 /* 2005 * Previous validation was perfomred with fast_validation = true and 2006 * the full DML state required for hardware programming was skipped. 2007 * 2008 * Re-validate here to calculate these parameters / watermarks. 2009 */ 2010 result = dc_validate_global_state(dc, context, false); 2011 if (result != DC_OK) { 2012 DC_LOG_ERROR("DC commit global validation failure: %s (%d)", 2013 dc_status_to_str(result), result); 2014 return result; 2015 } 2016 2017 result = dc_commit_state_no_check(dc, context); 2018 2019 return (result == DC_OK); 2020 } 2021 2022 bool dc_acquire_release_mpc_3dlut( 2023 struct dc *dc, bool acquire, 2024 struct dc_stream_state *stream, 2025 struct dc_3dlut **lut, 2026 struct dc_transfer_func **shaper) 2027 { 2028 int pipe_idx; 2029 bool ret = false; 2030 bool found_pipe_idx = false; 2031 const struct resource_pool *pool = dc->res_pool; 2032 struct resource_context *res_ctx = &dc->current_state->res_ctx; 2033 int mpcc_id = 0; 2034 2035 if (pool && res_ctx) { 2036 if (acquire) { 2037 /*find pipe idx for the given stream*/ 2038 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) { 2039 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) { 2040 found_pipe_idx = true; 2041 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst; 2042 break; 2043 } 2044 } 2045 } else 2046 found_pipe_idx = true;/*for release pipe_idx is not required*/ 2047 2048 if (found_pipe_idx) { 2049 if (acquire && pool->funcs->acquire_post_bldn_3dlut) 2050 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper); 2051 else if (!acquire && pool->funcs->release_post_bldn_3dlut) 2052 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper); 2053 } 2054 } 2055 return ret; 2056 } 2057 2058 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) 2059 { 2060 int i; 2061 struct pipe_ctx *pipe; 2062 2063 for (i = 0; i < MAX_PIPES; i++) { 2064 pipe = &context->res_ctx.pipe_ctx[i]; 2065 2066 // Don't check flip pending on phantom pipes 2067 if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)) 2068 continue; 2069 2070 /* Must set to false to start with, due to OR in update function */ 2071 pipe->plane_state->status.is_flip_pending = false; 2072 dc->hwss.update_pending_status(pipe); 2073 if (pipe->plane_state->status.is_flip_pending) 2074 return true; 2075 } 2076 return false; 2077 } 2078 2079 /* Perform updates here which need to be deferred until next vupdate 2080 * 2081 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered 2082 * but forcing lut memory to shutdown state is immediate. This causes 2083 * single frame corruption as lut gets disabled mid-frame unless shutdown 2084 * is deferred until after entering bypass. 2085 */ 2086 static void process_deferred_updates(struct dc *dc) 2087 { 2088 int i = 0; 2089 2090 if (dc->debug.enable_mem_low_power.bits.cm) { 2091 ASSERT(dc->dcn_ip->max_num_dpp); 2092 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++) 2093 if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update) 2094 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]); 2095 } 2096 } 2097 2098 void dc_post_update_surfaces_to_stream(struct dc *dc) 2099 { 2100 int i; 2101 struct dc_state *context = dc->current_state; 2102 2103 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0) 2104 return; 2105 2106 post_surface_trace(dc); 2107 2108 /* 2109 * Only relevant for DCN behavior where we can guarantee the optimization 2110 * is safe to apply - retain the legacy behavior for DCE. 2111 */ 2112 2113 if (dc->ctx->dce_version < DCE_VERSION_MAX) 2114 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 2115 else { 2116 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 2117 2118 if (is_flip_pending_in_pipes(dc, context)) 2119 return; 2120 2121 for (i = 0; i < dc->res_pool->pipe_count; i++) 2122 if (context->res_ctx.pipe_ctx[i].stream == NULL || 2123 context->res_ctx.pipe_ctx[i].plane_state == NULL) { 2124 context->res_ctx.pipe_ctx[i].pipe_idx = i; 2125 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); 2126 } 2127 2128 process_deferred_updates(dc); 2129 2130 dc->hwss.optimize_bandwidth(dc, context); 2131 2132 if (dc->debug.enable_double_buffered_dsc_pg_support) 2133 dc->hwss.update_dsc_pg(dc, context, true); 2134 } 2135 2136 dc->optimized_required = false; 2137 dc->wm_optimized_required = false; 2138 } 2139 2140 static void init_state(struct dc *dc, struct dc_state *context) 2141 { 2142 /* Each context must have their own instance of VBA and in order to 2143 * initialize and obtain IP and SOC the base DML instance from DC is 2144 * initially copied into every context 2145 */ 2146 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); 2147 } 2148 2149 struct dc_state *dc_create_state(struct dc *dc) 2150 { 2151 struct dc_state *context = kvzalloc(sizeof(struct dc_state), 2152 GFP_KERNEL); 2153 2154 if (!context) 2155 return NULL; 2156 2157 init_state(dc, context); 2158 2159 kref_init(&context->refcount); 2160 2161 return context; 2162 } 2163 2164 struct dc_state *dc_copy_state(struct dc_state *src_ctx) 2165 { 2166 int i, j; 2167 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL); 2168 2169 if (!new_ctx) 2170 return NULL; 2171 memcpy(new_ctx, src_ctx, sizeof(struct dc_state)); 2172 2173 for (i = 0; i < MAX_PIPES; i++) { 2174 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; 2175 2176 if (cur_pipe->top_pipe) 2177 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; 2178 2179 if (cur_pipe->bottom_pipe) 2180 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; 2181 2182 if (cur_pipe->prev_odm_pipe) 2183 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; 2184 2185 if (cur_pipe->next_odm_pipe) 2186 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; 2187 2188 } 2189 2190 for (i = 0; i < new_ctx->stream_count; i++) { 2191 dc_stream_retain(new_ctx->streams[i]); 2192 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++) 2193 dc_plane_state_retain( 2194 new_ctx->stream_status[i].plane_states[j]); 2195 } 2196 2197 kref_init(&new_ctx->refcount); 2198 2199 return new_ctx; 2200 } 2201 2202 void dc_retain_state(struct dc_state *context) 2203 { 2204 kref_get(&context->refcount); 2205 } 2206 2207 static void dc_state_free(struct kref *kref) 2208 { 2209 struct dc_state *context = container_of(kref, struct dc_state, refcount); 2210 dc_resource_state_destruct(context); 2211 kvfree(context); 2212 } 2213 2214 void dc_release_state(struct dc_state *context) 2215 { 2216 kref_put(&context->refcount, dc_state_free); 2217 } 2218 2219 bool dc_set_generic_gpio_for_stereo(bool enable, 2220 struct gpio_service *gpio_service) 2221 { 2222 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR; 2223 struct gpio_pin_info pin_info; 2224 struct gpio *generic; 2225 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config), 2226 GFP_KERNEL); 2227 2228 if (!config) 2229 return false; 2230 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0); 2231 2232 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) { 2233 kfree(config); 2234 return false; 2235 } else { 2236 generic = dal_gpio_service_create_generic_mux( 2237 gpio_service, 2238 pin_info.offset, 2239 pin_info.mask); 2240 } 2241 2242 if (!generic) { 2243 kfree(config); 2244 return false; 2245 } 2246 2247 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT); 2248 2249 config->enable_output_from_mux = enable; 2250 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC; 2251 2252 if (gpio_result == GPIO_RESULT_OK) 2253 gpio_result = dal_mux_setup_config(generic, config); 2254 2255 if (gpio_result == GPIO_RESULT_OK) { 2256 dal_gpio_close(generic); 2257 dal_gpio_destroy_generic_mux(&generic); 2258 kfree(config); 2259 return true; 2260 } else { 2261 dal_gpio_close(generic); 2262 dal_gpio_destroy_generic_mux(&generic); 2263 kfree(config); 2264 return false; 2265 } 2266 } 2267 2268 static bool is_surface_in_context( 2269 const struct dc_state *context, 2270 const struct dc_plane_state *plane_state) 2271 { 2272 int j; 2273 2274 for (j = 0; j < MAX_PIPES; j++) { 2275 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2276 2277 if (plane_state == pipe_ctx->plane_state) { 2278 return true; 2279 } 2280 } 2281 2282 return false; 2283 } 2284 2285 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u) 2286 { 2287 union surface_update_flags *update_flags = &u->surface->update_flags; 2288 enum surface_update_type update_type = UPDATE_TYPE_FAST; 2289 2290 if (!u->plane_info) 2291 return UPDATE_TYPE_FAST; 2292 2293 if (u->plane_info->color_space != u->surface->color_space) { 2294 update_flags->bits.color_space_change = 1; 2295 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2296 } 2297 2298 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) { 2299 update_flags->bits.horizontal_mirror_change = 1; 2300 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2301 } 2302 2303 if (u->plane_info->rotation != u->surface->rotation) { 2304 update_flags->bits.rotation_change = 1; 2305 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2306 } 2307 2308 if (u->plane_info->format != u->surface->format) { 2309 update_flags->bits.pixel_format_change = 1; 2310 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2311 } 2312 2313 if (u->plane_info->stereo_format != u->surface->stereo_format) { 2314 update_flags->bits.stereo_format_change = 1; 2315 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2316 } 2317 2318 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) { 2319 update_flags->bits.per_pixel_alpha_change = 1; 2320 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2321 } 2322 2323 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) { 2324 update_flags->bits.global_alpha_change = 1; 2325 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2326 } 2327 2328 if (u->plane_info->dcc.enable != u->surface->dcc.enable 2329 || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk 2330 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { 2331 /* During DCC on/off, stutter period is calculated before 2332 * DCC has fully transitioned. This results in incorrect 2333 * stutter period calculation. Triggering a full update will 2334 * recalculate stutter period. 2335 */ 2336 update_flags->bits.dcc_change = 1; 2337 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2338 } 2339 2340 if (resource_pixel_format_to_bpp(u->plane_info->format) != 2341 resource_pixel_format_to_bpp(u->surface->format)) { 2342 /* different bytes per element will require full bandwidth 2343 * and DML calculation 2344 */ 2345 update_flags->bits.bpp_change = 1; 2346 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2347 } 2348 2349 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch 2350 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { 2351 update_flags->bits.plane_size_change = 1; 2352 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2353 } 2354 2355 2356 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, 2357 sizeof(union dc_tiling_info)) != 0) { 2358 update_flags->bits.swizzle_change = 1; 2359 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2360 2361 /* todo: below are HW dependent, we should add a hook to 2362 * DCE/N resource and validated there. 2363 */ 2364 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { 2365 /* swizzled mode requires RQ to be setup properly, 2366 * thus need to run DML to calculate RQ settings 2367 */ 2368 update_flags->bits.bandwidth_change = 1; 2369 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2370 } 2371 } 2372 2373 /* This should be UPDATE_TYPE_FAST if nothing has changed. */ 2374 return update_type; 2375 } 2376 2377 static enum surface_update_type get_scaling_info_update_type( 2378 const struct dc_surface_update *u) 2379 { 2380 union surface_update_flags *update_flags = &u->surface->update_flags; 2381 2382 if (!u->scaling_info) 2383 return UPDATE_TYPE_FAST; 2384 2385 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width 2386 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height 2387 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width 2388 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height 2389 || u->scaling_info->scaling_quality.integer_scaling != 2390 u->surface->scaling_quality.integer_scaling 2391 ) { 2392 update_flags->bits.scaling_change = 1; 2393 2394 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width 2395 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height) 2396 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width 2397 || u->scaling_info->dst_rect.height < u->surface->src_rect.height)) 2398 /* Making dst rect smaller requires a bandwidth change */ 2399 update_flags->bits.bandwidth_change = 1; 2400 } 2401 2402 if (u->scaling_info->src_rect.width != u->surface->src_rect.width 2403 || u->scaling_info->src_rect.height != u->surface->src_rect.height) { 2404 2405 update_flags->bits.scaling_change = 1; 2406 if (u->scaling_info->src_rect.width > u->surface->src_rect.width 2407 || u->scaling_info->src_rect.height > u->surface->src_rect.height) 2408 /* Making src rect bigger requires a bandwidth change */ 2409 update_flags->bits.clock_change = 1; 2410 } 2411 2412 if (u->scaling_info->src_rect.x != u->surface->src_rect.x 2413 || u->scaling_info->src_rect.y != u->surface->src_rect.y 2414 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x 2415 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y 2416 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x 2417 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) 2418 update_flags->bits.position_change = 1; 2419 2420 if (update_flags->bits.clock_change 2421 || update_flags->bits.bandwidth_change 2422 || update_flags->bits.scaling_change) 2423 return UPDATE_TYPE_FULL; 2424 2425 if (update_flags->bits.position_change) 2426 return UPDATE_TYPE_MED; 2427 2428 return UPDATE_TYPE_FAST; 2429 } 2430 2431 static enum surface_update_type det_surface_update(const struct dc *dc, 2432 const struct dc_surface_update *u) 2433 { 2434 const struct dc_state *context = dc->current_state; 2435 enum surface_update_type type; 2436 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2437 union surface_update_flags *update_flags = &u->surface->update_flags; 2438 2439 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { 2440 update_flags->raw = 0xFFFFFFFF; 2441 return UPDATE_TYPE_FULL; 2442 } 2443 2444 update_flags->raw = 0; // Reset all flags 2445 2446 type = get_plane_info_update_type(u); 2447 elevate_update_type(&overall_type, type); 2448 2449 type = get_scaling_info_update_type(u); 2450 elevate_update_type(&overall_type, type); 2451 2452 if (u->flip_addr) { 2453 update_flags->bits.addr_update = 1; 2454 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) { 2455 update_flags->bits.tmz_changed = 1; 2456 elevate_update_type(&overall_type, UPDATE_TYPE_FULL); 2457 } 2458 } 2459 if (u->in_transfer_func) 2460 update_flags->bits.in_transfer_func_change = 1; 2461 2462 if (u->input_csc_color_matrix) 2463 update_flags->bits.input_csc_change = 1; 2464 2465 if (u->coeff_reduction_factor) 2466 update_flags->bits.coeff_reduction_change = 1; 2467 2468 if (u->gamut_remap_matrix) 2469 update_flags->bits.gamut_remap_change = 1; 2470 2471 if (u->gamma) { 2472 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; 2473 2474 if (u->plane_info) 2475 format = u->plane_info->format; 2476 else if (u->surface) 2477 format = u->surface->format; 2478 2479 if (dce_use_lut(format)) 2480 update_flags->bits.gamma_change = 1; 2481 } 2482 2483 if (u->lut3d_func || u->func_shaper) 2484 update_flags->bits.lut_3d = 1; 2485 2486 if (u->hdr_mult.value) 2487 if (u->hdr_mult.value != u->surface->hdr_mult.value) { 2488 update_flags->bits.hdr_mult = 1; 2489 elevate_update_type(&overall_type, UPDATE_TYPE_MED); 2490 } 2491 2492 if (update_flags->bits.in_transfer_func_change) { 2493 type = UPDATE_TYPE_MED; 2494 elevate_update_type(&overall_type, type); 2495 } 2496 2497 if (update_flags->bits.input_csc_change 2498 || update_flags->bits.coeff_reduction_change 2499 || update_flags->bits.lut_3d 2500 || update_flags->bits.gamma_change 2501 || update_flags->bits.gamut_remap_change) { 2502 type = UPDATE_TYPE_FULL; 2503 elevate_update_type(&overall_type, type); 2504 } 2505 2506 return overall_type; 2507 } 2508 2509 static enum surface_update_type check_update_surfaces_for_stream( 2510 struct dc *dc, 2511 struct dc_surface_update *updates, 2512 int surface_count, 2513 struct dc_stream_update *stream_update, 2514 const struct dc_stream_status *stream_status) 2515 { 2516 int i; 2517 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2518 2519 if (dc->idle_optimizations_allowed) 2520 overall_type = UPDATE_TYPE_FULL; 2521 2522 if (stream_status == NULL || stream_status->plane_count != surface_count) 2523 overall_type = UPDATE_TYPE_FULL; 2524 2525 if (stream_update && stream_update->pending_test_pattern) { 2526 overall_type = UPDATE_TYPE_FULL; 2527 } 2528 2529 /* some stream updates require passive update */ 2530 if (stream_update) { 2531 union stream_update_flags *su_flags = &stream_update->stream->update_flags; 2532 2533 if ((stream_update->src.height != 0 && stream_update->src.width != 0) || 2534 (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 2535 stream_update->integer_scaling_update) 2536 su_flags->bits.scaling = 1; 2537 2538 if (stream_update->out_transfer_func) 2539 su_flags->bits.out_tf = 1; 2540 2541 if (stream_update->abm_level) 2542 su_flags->bits.abm_level = 1; 2543 2544 if (stream_update->dpms_off) 2545 su_flags->bits.dpms_off = 1; 2546 2547 if (stream_update->gamut_remap) 2548 su_flags->bits.gamut_remap = 1; 2549 2550 if (stream_update->wb_update) 2551 su_flags->bits.wb_update = 1; 2552 2553 if (stream_update->dsc_config) 2554 su_flags->bits.dsc_changed = 1; 2555 2556 if (stream_update->mst_bw_update) 2557 su_flags->bits.mst_bw = 1; 2558 if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc)) 2559 su_flags->bits.crtc_timing_adjust = 1; 2560 2561 if (su_flags->raw != 0) 2562 overall_type = UPDATE_TYPE_FULL; 2563 2564 if (stream_update->output_csc_transform || stream_update->output_color_space) 2565 su_flags->bits.out_csc = 1; 2566 } 2567 2568 for (i = 0 ; i < surface_count; i++) { 2569 enum surface_update_type type = 2570 det_surface_update(dc, &updates[i]); 2571 2572 elevate_update_type(&overall_type, type); 2573 } 2574 2575 return overall_type; 2576 } 2577 2578 static bool dc_check_is_fullscreen_video(struct rect src, struct rect clip_rect) 2579 { 2580 int view_height, view_width, clip_x, clip_y, clip_width, clip_height; 2581 2582 view_height = src.height; 2583 view_width = src.width; 2584 2585 clip_x = clip_rect.x; 2586 clip_y = clip_rect.y; 2587 2588 clip_width = clip_rect.width; 2589 clip_height = clip_rect.height; 2590 2591 /* check for centered video accounting for off by 1 scaling truncation */ 2592 if ((view_height - clip_y - clip_height <= clip_y + 1) && 2593 (view_width - clip_x - clip_width <= clip_x + 1) && 2594 (view_height - clip_y - clip_height >= clip_y - 1) && 2595 (view_width - clip_x - clip_width >= clip_x - 1)) { 2596 2597 /* when OS scales up/down to letter box, it may end up 2598 * with few blank pixels on the border due to truncating. 2599 * Add offset margin to account for this 2600 */ 2601 if (clip_x <= 4 || clip_y <= 4) 2602 return true; 2603 } 2604 2605 return false; 2606 } 2607 2608 static enum surface_update_type check_boundary_crossing_for_windowed_mpo_with_odm(struct dc *dc, 2609 struct dc_surface_update *srf_updates, int surface_count, 2610 enum surface_update_type update_type) 2611 { 2612 enum surface_update_type new_update_type = update_type; 2613 int i, j; 2614 struct pipe_ctx *pipe = NULL; 2615 struct dc_stream_state *stream; 2616 2617 /* Check that we are in windowed MPO with ODM 2618 * - look for MPO pipe by scanning pipes for first pipe matching 2619 * surface that has moved ( position change ) 2620 * - MPO pipe will have top pipe 2621 * - check that top pipe has ODM pointer 2622 */ 2623 if ((surface_count > 1) && dc->config.enable_windowed_mpo_odm) { 2624 for (i = 0; i < surface_count; i++) { 2625 if (srf_updates[i].surface && srf_updates[i].scaling_info 2626 && srf_updates[i].surface->update_flags.bits.position_change) { 2627 2628 for (j = 0; j < dc->res_pool->pipe_count; j++) { 2629 if (srf_updates[i].surface == dc->current_state->res_ctx.pipe_ctx[j].plane_state) { 2630 pipe = &dc->current_state->res_ctx.pipe_ctx[j]; 2631 stream = pipe->stream; 2632 break; 2633 } 2634 } 2635 2636 if (pipe && pipe->top_pipe && (get_num_odm_splits(pipe->top_pipe) > 0) && stream 2637 && !dc_check_is_fullscreen_video(stream->src, srf_updates[i].scaling_info->clip_rect)) { 2638 struct rect old_clip_rect, new_clip_rect; 2639 bool old_clip_rect_left, old_clip_rect_right, old_clip_rect_middle; 2640 bool new_clip_rect_left, new_clip_rect_right, new_clip_rect_middle; 2641 2642 old_clip_rect = srf_updates[i].surface->clip_rect; 2643 new_clip_rect = srf_updates[i].scaling_info->clip_rect; 2644 2645 old_clip_rect_left = ((old_clip_rect.x + old_clip_rect.width) <= (stream->src.x + (stream->src.width/2))); 2646 old_clip_rect_right = (old_clip_rect.x >= (stream->src.x + (stream->src.width/2))); 2647 old_clip_rect_middle = !old_clip_rect_left && !old_clip_rect_right; 2648 2649 new_clip_rect_left = ((new_clip_rect.x + new_clip_rect.width) <= (stream->src.x + (stream->src.width/2))); 2650 new_clip_rect_right = (new_clip_rect.x >= (stream->src.x + (stream->src.width/2))); 2651 new_clip_rect_middle = !new_clip_rect_left && !new_clip_rect_right; 2652 2653 if (old_clip_rect_left && new_clip_rect_middle) 2654 new_update_type = UPDATE_TYPE_FULL; 2655 else if (old_clip_rect_middle && new_clip_rect_right) 2656 new_update_type = UPDATE_TYPE_FULL; 2657 else if (old_clip_rect_right && new_clip_rect_middle) 2658 new_update_type = UPDATE_TYPE_FULL; 2659 else if (old_clip_rect_middle && new_clip_rect_left) 2660 new_update_type = UPDATE_TYPE_FULL; 2661 } 2662 } 2663 } 2664 } 2665 return new_update_type; 2666 } 2667 2668 /* 2669 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full) 2670 * 2671 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types 2672 */ 2673 enum surface_update_type dc_check_update_surfaces_for_stream( 2674 struct dc *dc, 2675 struct dc_surface_update *updates, 2676 int surface_count, 2677 struct dc_stream_update *stream_update, 2678 const struct dc_stream_status *stream_status) 2679 { 2680 int i; 2681 enum surface_update_type type; 2682 2683 if (stream_update) 2684 stream_update->stream->update_flags.raw = 0; 2685 for (i = 0; i < surface_count; i++) 2686 updates[i].surface->update_flags.raw = 0; 2687 2688 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); 2689 if (type == UPDATE_TYPE_FULL) { 2690 if (stream_update) { 2691 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; 2692 stream_update->stream->update_flags.raw = 0xFFFFFFFF; 2693 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; 2694 } 2695 for (i = 0; i < surface_count; i++) 2696 updates[i].surface->update_flags.raw = 0xFFFFFFFF; 2697 } 2698 2699 if (type == UPDATE_TYPE_MED) 2700 type = check_boundary_crossing_for_windowed_mpo_with_odm(dc, 2701 updates, surface_count, type); 2702 2703 if (type == UPDATE_TYPE_FAST) { 2704 // If there's an available clock comparator, we use that. 2705 if (dc->clk_mgr->funcs->are_clock_states_equal) { 2706 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) 2707 dc->optimized_required = true; 2708 // Else we fallback to mem compare. 2709 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { 2710 dc->optimized_required = true; 2711 } 2712 2713 dc->optimized_required |= dc->wm_optimized_required; 2714 } 2715 2716 return type; 2717 } 2718 2719 static struct dc_stream_status *stream_get_status( 2720 struct dc_state *ctx, 2721 struct dc_stream_state *stream) 2722 { 2723 uint8_t i; 2724 2725 for (i = 0; i < ctx->stream_count; i++) { 2726 if (stream == ctx->streams[i]) { 2727 return &ctx->stream_status[i]; 2728 } 2729 } 2730 2731 return NULL; 2732 } 2733 2734 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; 2735 2736 static void copy_surface_update_to_plane( 2737 struct dc_plane_state *surface, 2738 struct dc_surface_update *srf_update) 2739 { 2740 if (srf_update->flip_addr) { 2741 surface->address = srf_update->flip_addr->address; 2742 surface->flip_immediate = 2743 srf_update->flip_addr->flip_immediate; 2744 surface->time.time_elapsed_in_us[surface->time.index] = 2745 srf_update->flip_addr->flip_timestamp_in_us - 2746 surface->time.prev_update_time_in_us; 2747 surface->time.prev_update_time_in_us = 2748 srf_update->flip_addr->flip_timestamp_in_us; 2749 surface->time.index++; 2750 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) 2751 surface->time.index = 0; 2752 2753 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips; 2754 } 2755 2756 if (srf_update->scaling_info) { 2757 surface->scaling_quality = 2758 srf_update->scaling_info->scaling_quality; 2759 surface->dst_rect = 2760 srf_update->scaling_info->dst_rect; 2761 surface->src_rect = 2762 srf_update->scaling_info->src_rect; 2763 surface->clip_rect = 2764 srf_update->scaling_info->clip_rect; 2765 } 2766 2767 if (srf_update->plane_info) { 2768 surface->color_space = 2769 srf_update->plane_info->color_space; 2770 surface->format = 2771 srf_update->plane_info->format; 2772 surface->plane_size = 2773 srf_update->plane_info->plane_size; 2774 surface->rotation = 2775 srf_update->plane_info->rotation; 2776 surface->horizontal_mirror = 2777 srf_update->plane_info->horizontal_mirror; 2778 surface->stereo_format = 2779 srf_update->plane_info->stereo_format; 2780 surface->tiling_info = 2781 srf_update->plane_info->tiling_info; 2782 surface->visible = 2783 srf_update->plane_info->visible; 2784 surface->per_pixel_alpha = 2785 srf_update->plane_info->per_pixel_alpha; 2786 surface->global_alpha = 2787 srf_update->plane_info->global_alpha; 2788 surface->global_alpha_value = 2789 srf_update->plane_info->global_alpha_value; 2790 surface->dcc = 2791 srf_update->plane_info->dcc; 2792 surface->layer_index = 2793 srf_update->plane_info->layer_index; 2794 } 2795 2796 if (srf_update->gamma && 2797 (surface->gamma_correction != 2798 srf_update->gamma)) { 2799 memcpy(&surface->gamma_correction->entries, 2800 &srf_update->gamma->entries, 2801 sizeof(struct dc_gamma_entries)); 2802 surface->gamma_correction->is_identity = 2803 srf_update->gamma->is_identity; 2804 surface->gamma_correction->num_entries = 2805 srf_update->gamma->num_entries; 2806 surface->gamma_correction->type = 2807 srf_update->gamma->type; 2808 } 2809 2810 if (srf_update->in_transfer_func && 2811 (surface->in_transfer_func != 2812 srf_update->in_transfer_func)) { 2813 surface->in_transfer_func->sdr_ref_white_level = 2814 srf_update->in_transfer_func->sdr_ref_white_level; 2815 surface->in_transfer_func->tf = 2816 srf_update->in_transfer_func->tf; 2817 surface->in_transfer_func->type = 2818 srf_update->in_transfer_func->type; 2819 memcpy(&surface->in_transfer_func->tf_pts, 2820 &srf_update->in_transfer_func->tf_pts, 2821 sizeof(struct dc_transfer_func_distributed_points)); 2822 } 2823 2824 if (srf_update->func_shaper && 2825 (surface->in_shaper_func != 2826 srf_update->func_shaper)) 2827 memcpy(surface->in_shaper_func, srf_update->func_shaper, 2828 sizeof(*surface->in_shaper_func)); 2829 2830 if (srf_update->lut3d_func && 2831 (surface->lut3d_func != 2832 srf_update->lut3d_func)) 2833 memcpy(surface->lut3d_func, srf_update->lut3d_func, 2834 sizeof(*surface->lut3d_func)); 2835 2836 if (srf_update->hdr_mult.value) 2837 surface->hdr_mult = 2838 srf_update->hdr_mult; 2839 2840 if (srf_update->blend_tf && 2841 (surface->blend_tf != 2842 srf_update->blend_tf)) 2843 memcpy(surface->blend_tf, srf_update->blend_tf, 2844 sizeof(*surface->blend_tf)); 2845 2846 if (srf_update->input_csc_color_matrix) 2847 surface->input_csc_color_matrix = 2848 *srf_update->input_csc_color_matrix; 2849 2850 if (srf_update->coeff_reduction_factor) 2851 surface->coeff_reduction_factor = 2852 *srf_update->coeff_reduction_factor; 2853 2854 if (srf_update->gamut_remap_matrix) 2855 surface->gamut_remap_matrix = 2856 *srf_update->gamut_remap_matrix; 2857 } 2858 2859 static void copy_stream_update_to_stream(struct dc *dc, 2860 struct dc_state *context, 2861 struct dc_stream_state *stream, 2862 struct dc_stream_update *update) 2863 { 2864 struct dc_context *dc_ctx = dc->ctx; 2865 2866 if (update == NULL || stream == NULL) 2867 return; 2868 2869 if (update->src.height && update->src.width) 2870 stream->src = update->src; 2871 2872 if (update->dst.height && update->dst.width) 2873 stream->dst = update->dst; 2874 2875 if (update->out_transfer_func && 2876 stream->out_transfer_func != update->out_transfer_func) { 2877 stream->out_transfer_func->sdr_ref_white_level = 2878 update->out_transfer_func->sdr_ref_white_level; 2879 stream->out_transfer_func->tf = update->out_transfer_func->tf; 2880 stream->out_transfer_func->type = 2881 update->out_transfer_func->type; 2882 memcpy(&stream->out_transfer_func->tf_pts, 2883 &update->out_transfer_func->tf_pts, 2884 sizeof(struct dc_transfer_func_distributed_points)); 2885 } 2886 2887 if (update->hdr_static_metadata) 2888 stream->hdr_static_metadata = *update->hdr_static_metadata; 2889 2890 if (update->abm_level) 2891 stream->abm_level = *update->abm_level; 2892 2893 if (update->periodic_interrupt) 2894 stream->periodic_interrupt = *update->periodic_interrupt; 2895 2896 if (update->gamut_remap) 2897 stream->gamut_remap_matrix = *update->gamut_remap; 2898 2899 /* Note: this being updated after mode set is currently not a use case 2900 * however if it arises OCSC would need to be reprogrammed at the 2901 * minimum 2902 */ 2903 if (update->output_color_space) 2904 stream->output_color_space = *update->output_color_space; 2905 2906 if (update->output_csc_transform) 2907 stream->csc_color_matrix = *update->output_csc_transform; 2908 2909 if (update->vrr_infopacket) 2910 stream->vrr_infopacket = *update->vrr_infopacket; 2911 2912 if (update->allow_freesync) 2913 stream->allow_freesync = *update->allow_freesync; 2914 2915 if (update->vrr_active_variable) 2916 stream->vrr_active_variable = *update->vrr_active_variable; 2917 2918 if (update->crtc_timing_adjust) 2919 stream->adjust = *update->crtc_timing_adjust; 2920 2921 if (update->dpms_off) 2922 stream->dpms_off = *update->dpms_off; 2923 2924 if (update->hfvsif_infopacket) 2925 stream->hfvsif_infopacket = *update->hfvsif_infopacket; 2926 2927 if (update->vtem_infopacket) 2928 stream->vtem_infopacket = *update->vtem_infopacket; 2929 2930 if (update->vsc_infopacket) 2931 stream->vsc_infopacket = *update->vsc_infopacket; 2932 2933 if (update->vsp_infopacket) 2934 stream->vsp_infopacket = *update->vsp_infopacket; 2935 2936 if (update->dither_option) 2937 stream->dither_option = *update->dither_option; 2938 2939 if (update->pending_test_pattern) 2940 stream->test_pattern = *update->pending_test_pattern; 2941 /* update current stream with writeback info */ 2942 if (update->wb_update) { 2943 int i; 2944 2945 stream->num_wb_info = update->wb_update->num_wb_info; 2946 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES); 2947 for (i = 0; i < stream->num_wb_info; i++) 2948 stream->writeback_info[i] = 2949 update->wb_update->writeback_info[i]; 2950 } 2951 if (update->dsc_config) { 2952 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg; 2953 uint32_t old_dsc_enabled = stream->timing.flags.DSC; 2954 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 && 2955 update->dsc_config->num_slices_v != 0); 2956 2957 /* Use temporarry context for validating new DSC config */ 2958 struct dc_state *dsc_validate_context = dc_create_state(dc); 2959 2960 if (dsc_validate_context) { 2961 dc_resource_state_copy_construct(dc->current_state, dsc_validate_context); 2962 2963 stream->timing.dsc_cfg = *update->dsc_config; 2964 stream->timing.flags.DSC = enable_dsc; 2965 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { 2966 stream->timing.dsc_cfg = old_dsc_cfg; 2967 stream->timing.flags.DSC = old_dsc_enabled; 2968 update->dsc_config = NULL; 2969 } 2970 2971 dc_release_state(dsc_validate_context); 2972 } else { 2973 DC_ERROR("Failed to allocate new validate context for DSC change\n"); 2974 update->dsc_config = NULL; 2975 } 2976 } 2977 } 2978 2979 static bool update_planes_and_stream_state(struct dc *dc, 2980 struct dc_surface_update *srf_updates, int surface_count, 2981 struct dc_stream_state *stream, 2982 struct dc_stream_update *stream_update, 2983 enum surface_update_type *new_update_type, 2984 struct dc_state **new_context) 2985 { 2986 struct dc_state *context; 2987 int i, j; 2988 enum surface_update_type update_type; 2989 const struct dc_stream_status *stream_status; 2990 struct dc_context *dc_ctx = dc->ctx; 2991 2992 stream_status = dc_stream_get_status(stream); 2993 2994 if (!stream_status) { 2995 if (surface_count) /* Only an error condition if surf_count non-zero*/ 2996 ASSERT(false); 2997 2998 return false; /* Cannot commit surface to stream that is not committed */ 2999 } 3000 3001 context = dc->current_state; 3002 3003 update_type = dc_check_update_surfaces_for_stream( 3004 dc, srf_updates, surface_count, stream_update, stream_status); 3005 3006 /* update current stream with the new updates */ 3007 copy_stream_update_to_stream(dc, context, stream, stream_update); 3008 3009 /* do not perform surface update if surface has invalid dimensions 3010 * (all zero) and no scaling_info is provided 3011 */ 3012 if (surface_count > 0) { 3013 for (i = 0; i < surface_count; i++) { 3014 if ((srf_updates[i].surface->src_rect.width == 0 || 3015 srf_updates[i].surface->src_rect.height == 0 || 3016 srf_updates[i].surface->dst_rect.width == 0 || 3017 srf_updates[i].surface->dst_rect.height == 0) && 3018 (!srf_updates[i].scaling_info || 3019 srf_updates[i].scaling_info->src_rect.width == 0 || 3020 srf_updates[i].scaling_info->src_rect.height == 0 || 3021 srf_updates[i].scaling_info->dst_rect.width == 0 || 3022 srf_updates[i].scaling_info->dst_rect.height == 0)) { 3023 DC_ERROR("Invalid src/dst rects in surface update!\n"); 3024 return false; 3025 } 3026 } 3027 } 3028 3029 if (update_type >= update_surface_trace_level) 3030 update_surface_trace(dc, srf_updates, surface_count); 3031 3032 if (update_type >= UPDATE_TYPE_FULL) { 3033 struct dc_plane_state *new_planes[MAX_SURFACES] = {0}; 3034 3035 for (i = 0; i < surface_count; i++) 3036 new_planes[i] = srf_updates[i].surface; 3037 3038 /* initialize scratch memory for building context */ 3039 context = dc_create_state(dc); 3040 if (context == NULL) { 3041 DC_ERROR("Failed to allocate new validate context!\n"); 3042 return false; 3043 } 3044 3045 dc_resource_state_copy_construct( 3046 dc->current_state, context); 3047 3048 /* For each full update, remove all existing phantom pipes first. 3049 * Ensures that we have enough pipes for newly added MPO planes 3050 */ 3051 if (dc->res_pool->funcs->remove_phantom_pipes) 3052 dc->res_pool->funcs->remove_phantom_pipes(dc, context); 3053 3054 /*remove old surfaces from context */ 3055 if (!dc_rem_all_planes_for_stream(dc, stream, context)) { 3056 3057 BREAK_TO_DEBUGGER(); 3058 goto fail; 3059 } 3060 3061 /* add surface to context */ 3062 if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { 3063 3064 BREAK_TO_DEBUGGER(); 3065 goto fail; 3066 } 3067 } 3068 3069 /* save update parameters into surface */ 3070 for (i = 0; i < surface_count; i++) { 3071 struct dc_plane_state *surface = srf_updates[i].surface; 3072 3073 copy_surface_update_to_plane(surface, &srf_updates[i]); 3074 3075 if (update_type >= UPDATE_TYPE_MED) { 3076 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3077 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3078 3079 if (pipe_ctx->plane_state != surface) 3080 continue; 3081 3082 resource_build_scaling_params(pipe_ctx); 3083 } 3084 } 3085 } 3086 3087 if (update_type == UPDATE_TYPE_FULL) { 3088 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 3089 BREAK_TO_DEBUGGER(); 3090 goto fail; 3091 } 3092 } 3093 3094 *new_context = context; 3095 *new_update_type = update_type; 3096 3097 return true; 3098 3099 fail: 3100 dc_release_state(context); 3101 3102 return false; 3103 3104 } 3105 3106 static void commit_planes_do_stream_update(struct dc *dc, 3107 struct dc_stream_state *stream, 3108 struct dc_stream_update *stream_update, 3109 enum surface_update_type update_type, 3110 struct dc_state *context) 3111 { 3112 int j; 3113 3114 // Stream updates 3115 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3116 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3117 3118 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) { 3119 3120 if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt) 3121 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx); 3122 3123 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || 3124 stream_update->vrr_infopacket || 3125 stream_update->vsc_infopacket || 3126 stream_update->vsp_infopacket || 3127 stream_update->hfvsif_infopacket || 3128 stream_update->vtem_infopacket) { 3129 resource_build_info_frame(pipe_ctx); 3130 dc->hwss.update_info_frame(pipe_ctx); 3131 3132 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 3133 dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); 3134 } 3135 3136 if (stream_update->hdr_static_metadata && 3137 stream->use_dynamic_meta && 3138 dc->hwss.set_dmdata_attributes && 3139 pipe_ctx->stream->dmdata_address.quad_part != 0) 3140 dc->hwss.set_dmdata_attributes(pipe_ctx); 3141 3142 if (stream_update->gamut_remap) 3143 dc_stream_set_gamut_remap(dc, stream); 3144 3145 if (stream_update->output_csc_transform) 3146 dc_stream_program_csc_matrix(dc, stream); 3147 3148 if (stream_update->dither_option) { 3149 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 3150 resource_build_bit_depth_reduction_params(pipe_ctx->stream, 3151 &pipe_ctx->stream->bit_depth_params); 3152 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp, 3153 &stream->bit_depth_params, 3154 &stream->clamping); 3155 while (odm_pipe) { 3156 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp, 3157 &stream->bit_depth_params, 3158 &stream->clamping); 3159 odm_pipe = odm_pipe->next_odm_pipe; 3160 } 3161 } 3162 3163 3164 /* Full fe update*/ 3165 if (update_type == UPDATE_TYPE_FAST) 3166 continue; 3167 3168 if (stream_update->dsc_config) 3169 dp_update_dsc_config(pipe_ctx); 3170 3171 if (stream_update->mst_bw_update) { 3172 if (stream_update->mst_bw_update->is_increase) 3173 dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); 3174 else 3175 dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); 3176 } 3177 3178 if (stream_update->pending_test_pattern) { 3179 dc_link_dp_set_test_pattern(stream->link, 3180 stream->test_pattern.type, 3181 stream->test_pattern.color_space, 3182 stream->test_pattern.p_link_settings, 3183 stream->test_pattern.p_custom_pattern, 3184 stream->test_pattern.cust_pattern_size); 3185 } 3186 3187 if (stream_update->dpms_off) { 3188 if (*stream_update->dpms_off) { 3189 core_link_disable_stream(pipe_ctx); 3190 /* for dpms, keep acquired resources*/ 3191 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only) 3192 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 3193 3194 dc->optimized_required = true; 3195 3196 } else { 3197 if (get_seamless_boot_stream_count(context) == 0) 3198 dc->hwss.prepare_bandwidth(dc, dc->current_state); 3199 core_link_enable_stream(dc->current_state, pipe_ctx); 3200 } 3201 } 3202 3203 if (stream_update->abm_level && pipe_ctx->stream_res.abm) { 3204 bool should_program_abm = true; 3205 3206 // if otg funcs defined check if blanked before programming 3207 if (pipe_ctx->stream_res.tg->funcs->is_blanked) 3208 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) 3209 should_program_abm = false; 3210 3211 if (should_program_abm) { 3212 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) { 3213 dc->hwss.set_abm_immediate_disable(pipe_ctx); 3214 } else { 3215 pipe_ctx->stream_res.abm->funcs->set_abm_level( 3216 pipe_ctx->stream_res.abm, stream->abm_level); 3217 } 3218 } 3219 } 3220 } 3221 } 3222 } 3223 3224 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream) 3225 { 3226 if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 3227 || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) 3228 && stream->ctx->dce_version >= DCN_VERSION_3_1) 3229 return true; 3230 3231 return false; 3232 } 3233 3234 void dc_dmub_update_dirty_rect(struct dc *dc, 3235 int surface_count, 3236 struct dc_stream_state *stream, 3237 struct dc_surface_update *srf_updates, 3238 struct dc_state *context) 3239 { 3240 union dmub_rb_cmd cmd; 3241 struct dc_context *dc_ctx = dc->ctx; 3242 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; 3243 unsigned int i, j; 3244 unsigned int panel_inst = 0; 3245 3246 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) 3247 return; 3248 3249 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3250 return; 3251 3252 memset(&cmd, 0x0, sizeof(cmd)); 3253 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; 3254 cmd.update_dirty_rect.header.sub_type = 0; 3255 cmd.update_dirty_rect.header.payload_bytes = 3256 sizeof(cmd.update_dirty_rect) - 3257 sizeof(cmd.update_dirty_rect.header); 3258 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; 3259 for (i = 0; i < surface_count; i++) { 3260 struct dc_plane_state *plane_state = srf_updates[i].surface; 3261 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; 3262 3263 if (!srf_updates[i].surface || !flip_addr) 3264 continue; 3265 /* Do not send in immediate flip mode */ 3266 if (srf_updates[i].surface->flip_immediate) 3267 continue; 3268 3269 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 3270 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 3271 sizeof(flip_addr->dirty_rects)); 3272 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3273 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3274 3275 if (pipe_ctx->stream != stream) 3276 continue; 3277 if (pipe_ctx->plane_state != plane_state) 3278 continue; 3279 3280 update_dirty_rect->panel_inst = panel_inst; 3281 update_dirty_rect->pipe_idx = j; 3282 dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd); 3283 dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv); 3284 } 3285 } 3286 } 3287 3288 static void commit_planes_for_stream(struct dc *dc, 3289 struct dc_surface_update *srf_updates, 3290 int surface_count, 3291 struct dc_stream_state *stream, 3292 struct dc_stream_update *stream_update, 3293 enum surface_update_type update_type, 3294 struct dc_state *context) 3295 { 3296 int i, j; 3297 struct pipe_ctx *top_pipe_to_program = NULL; 3298 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); 3299 bool subvp_prev_use = false; 3300 3301 // Once we apply the new subvp context to hardware it won't be in the 3302 // dc->current_state anymore, so we have to cache it before we apply 3303 // the new SubVP context 3304 subvp_prev_use = false; 3305 3306 3307 dc_z10_restore(dc); 3308 3309 if (update_type == UPDATE_TYPE_FULL) { 3310 /* wait for all double-buffer activity to clear on all pipes */ 3311 int pipe_idx; 3312 3313 for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) { 3314 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx]; 3315 3316 if (!pipe_ctx->stream) 3317 continue; 3318 3319 if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear) 3320 pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg); 3321 } 3322 } 3323 3324 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { 3325 /* Optimize seamless boot flag keeps clocks and watermarks high until 3326 * first flip. After first flip, optimization is required to lower 3327 * bandwidth. Important to note that it is expected UEFI will 3328 * only light up a single display on POST, therefore we only expect 3329 * one stream with seamless boot flag set. 3330 */ 3331 if (stream->apply_seamless_boot_optimization) { 3332 stream->apply_seamless_boot_optimization = false; 3333 3334 if (get_seamless_boot_stream_count(context) == 0) 3335 dc->optimized_required = true; 3336 } 3337 } 3338 3339 if (update_type == UPDATE_TYPE_FULL) { 3340 dc_allow_idle_optimizations(dc, false); 3341 3342 if (get_seamless_boot_stream_count(context) == 0) 3343 dc->hwss.prepare_bandwidth(dc, context); 3344 3345 if (dc->debug.enable_double_buffered_dsc_pg_support) 3346 dc->hwss.update_dsc_pg(dc, context, false); 3347 3348 context_clock_trace(dc, context); 3349 } 3350 3351 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3352 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3353 3354 if (!pipe_ctx->top_pipe && 3355 !pipe_ctx->prev_odm_pipe && 3356 pipe_ctx->stream && 3357 pipe_ctx->stream == stream) { 3358 top_pipe_to_program = pipe_ctx; 3359 } 3360 } 3361 3362 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3363 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3364 3365 // Check old context for SubVP 3366 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); 3367 if (subvp_prev_use) 3368 break; 3369 } 3370 3371 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { 3372 struct pipe_ctx *mpcc_pipe; 3373 struct pipe_ctx *odm_pipe; 3374 3375 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) 3376 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 3377 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU; 3378 } 3379 3380 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 3381 if (top_pipe_to_program && 3382 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 3383 if (should_use_dmub_lock(stream->link)) { 3384 union dmub_hw_lock_flags hw_locks = { 0 }; 3385 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 3386 3387 hw_locks.bits.lock_dig = 1; 3388 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 3389 3390 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 3391 true, 3392 &hw_locks, 3393 &inst_flags); 3394 } else 3395 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable( 3396 top_pipe_to_program->stream_res.tg); 3397 } 3398 3399 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3400 if (dc->hwss.subvp_pipe_control_lock) 3401 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use); 3402 dc->hwss.interdependent_update_lock(dc, context, true); 3403 3404 } else { 3405 if (dc->hwss.subvp_pipe_control_lock) 3406 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 3407 /* Lock the top pipe while updating plane addrs, since freesync requires 3408 * plane addr update event triggers to be synchronized. 3409 * top_pipe_to_program is expected to never be NULL 3410 */ 3411 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); 3412 } 3413 3414 if (update_type != UPDATE_TYPE_FAST) { 3415 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3416 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 3417 3418 if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) || 3419 subvp_prev_use) { 3420 // If old context or new context has phantom pipes, apply 3421 // the phantom timings now. We can't change the phantom 3422 // pipe configuration safely without driver acquiring 3423 // the DMCUB lock first. 3424 dc->hwss.apply_ctx_to_hw(dc, context); 3425 break; 3426 } 3427 } 3428 } 3429 3430 dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context); 3431 3432 if (update_type != UPDATE_TYPE_FAST) { 3433 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3434 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 3435 3436 if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) || 3437 subvp_prev_use) { 3438 // If old context or new context has phantom pipes, apply 3439 // the phantom timings now. We can't change the phantom 3440 // pipe configuration safely without driver acquiring 3441 // the DMCUB lock first. 3442 dc->hwss.apply_ctx_to_hw(dc, context); 3443 break; 3444 } 3445 } 3446 } 3447 3448 // Stream updates 3449 if (stream_update) 3450 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); 3451 3452 if (surface_count == 0) { 3453 /* 3454 * In case of turning off screen, no need to program front end a second time. 3455 * just return after program blank. 3456 */ 3457 if (dc->hwss.apply_ctx_for_surface) 3458 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); 3459 if (dc->hwss.program_front_end_for_ctx) 3460 dc->hwss.program_front_end_for_ctx(dc, context); 3461 3462 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3463 dc->hwss.interdependent_update_lock(dc, context, false); 3464 } else { 3465 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 3466 } 3467 dc->hwss.post_unlock_program_front_end(dc, context); 3468 3469 if (update_type != UPDATE_TYPE_FAST) 3470 if (dc->hwss.commit_subvp_config) 3471 dc->hwss.commit_subvp_config(dc, context); 3472 3473 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 3474 * move the SubVP lock to after the phantom pipes have been setup 3475 */ 3476 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3477 if (dc->hwss.subvp_pipe_control_lock) 3478 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); 3479 } else { 3480 if (dc->hwss.subvp_pipe_control_lock) 3481 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); 3482 } 3483 3484 return; 3485 } 3486 3487 if (!IS_DIAG_DC(dc->ctx->dce_environment)) { 3488 for (i = 0; i < surface_count; i++) { 3489 struct dc_plane_state *plane_state = srf_updates[i].surface; 3490 /*set logical flag for lock/unlock use*/ 3491 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3492 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3493 if (!pipe_ctx->plane_state) 3494 continue; 3495 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3496 continue; 3497 pipe_ctx->plane_state->triplebuffer_flips = false; 3498 if (update_type == UPDATE_TYPE_FAST && 3499 dc->hwss.program_triplebuffer != NULL && 3500 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 3501 /*triple buffer for VUpdate only*/ 3502 pipe_ctx->plane_state->triplebuffer_flips = true; 3503 } 3504 } 3505 if (update_type == UPDATE_TYPE_FULL) { 3506 /* force vsync flip when reconfiguring pipes to prevent underflow */ 3507 plane_state->flip_immediate = false; 3508 } 3509 } 3510 } 3511 3512 // Update Type FULL, Surface updates 3513 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3514 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3515 3516 if (!pipe_ctx->top_pipe && 3517 !pipe_ctx->prev_odm_pipe && 3518 should_update_pipe_for_stream(context, pipe_ctx, stream)) { 3519 struct dc_stream_status *stream_status = NULL; 3520 3521 if (!pipe_ctx->plane_state) 3522 continue; 3523 3524 /* Full fe update*/ 3525 if (update_type == UPDATE_TYPE_FAST) 3526 continue; 3527 3528 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); 3529 3530 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 3531 /*turn off triple buffer for full update*/ 3532 dc->hwss.program_triplebuffer( 3533 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 3534 } 3535 stream_status = 3536 stream_get_status(context, pipe_ctx->stream); 3537 3538 if (dc->hwss.apply_ctx_for_surface) 3539 dc->hwss.apply_ctx_for_surface( 3540 dc, pipe_ctx->stream, stream_status->plane_count, context); 3541 } 3542 } 3543 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { 3544 dc->hwss.program_front_end_for_ctx(dc, context); 3545 if (dc->debug.validate_dml_output) { 3546 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3547 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; 3548 if (cur_pipe->stream == NULL) 3549 continue; 3550 3551 cur_pipe->plane_res.hubp->funcs->validate_dml_output( 3552 cur_pipe->plane_res.hubp, dc->ctx, 3553 &context->res_ctx.pipe_ctx[i].rq_regs, 3554 &context->res_ctx.pipe_ctx[i].dlg_regs, 3555 &context->res_ctx.pipe_ctx[i].ttu_regs); 3556 } 3557 } 3558 } 3559 3560 // Update Type FAST, Surface updates 3561 if (update_type == UPDATE_TYPE_FAST) { 3562 if (dc->hwss.set_flip_control_gsl) 3563 for (i = 0; i < surface_count; i++) { 3564 struct dc_plane_state *plane_state = srf_updates[i].surface; 3565 3566 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3567 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3568 3569 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3570 continue; 3571 3572 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3573 continue; 3574 3575 // GSL has to be used for flip immediate 3576 dc->hwss.set_flip_control_gsl(pipe_ctx, 3577 pipe_ctx->plane_state->flip_immediate); 3578 } 3579 } 3580 3581 /* Perform requested Updates */ 3582 for (i = 0; i < surface_count; i++) { 3583 struct dc_plane_state *plane_state = srf_updates[i].surface; 3584 3585 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3586 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3587 3588 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3589 continue; 3590 3591 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3592 continue; 3593 3594 /*program triple buffer after lock based on flip type*/ 3595 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 3596 /*only enable triplebuffer for fast_update*/ 3597 dc->hwss.program_triplebuffer( 3598 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 3599 } 3600 if (pipe_ctx->plane_state->update_flags.bits.addr_update) 3601 dc->hwss.update_plane_addr(dc, pipe_ctx); 3602 } 3603 } 3604 3605 } 3606 3607 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3608 dc->hwss.interdependent_update_lock(dc, context, false); 3609 } else { 3610 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 3611 } 3612 3613 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 3614 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 3615 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3616 top_pipe_to_program->stream_res.tg, 3617 CRTC_STATE_VACTIVE); 3618 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3619 top_pipe_to_program->stream_res.tg, 3620 CRTC_STATE_VBLANK); 3621 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3622 top_pipe_to_program->stream_res.tg, 3623 CRTC_STATE_VACTIVE); 3624 3625 if (should_use_dmub_lock(stream->link)) { 3626 union dmub_hw_lock_flags hw_locks = { 0 }; 3627 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 3628 3629 hw_locks.bits.lock_dig = 1; 3630 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 3631 3632 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 3633 false, 3634 &hw_locks, 3635 &inst_flags); 3636 } else 3637 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable( 3638 top_pipe_to_program->stream_res.tg); 3639 } 3640 3641 if (update_type != UPDATE_TYPE_FAST) 3642 dc->hwss.post_unlock_program_front_end(dc, context); 3643 if (update_type != UPDATE_TYPE_FAST) 3644 if (dc->hwss.commit_subvp_config) 3645 dc->hwss.commit_subvp_config(dc, context); 3646 3647 if (update_type != UPDATE_TYPE_FAST) 3648 if (dc->hwss.commit_subvp_config) 3649 dc->hwss.commit_subvp_config(dc, context); 3650 3651 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 3652 * move the SubVP lock to after the phantom pipes have been setup 3653 */ 3654 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3655 if (dc->hwss.subvp_pipe_control_lock) 3656 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); 3657 } else { 3658 if (dc->hwss.subvp_pipe_control_lock) 3659 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 3660 } 3661 3662 // Fire manual trigger only when bottom plane is flipped 3663 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3664 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3665 3666 if (!pipe_ctx->plane_state) 3667 continue; 3668 3669 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe || 3670 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) || 3671 !pipe_ctx->plane_state->update_flags.bits.addr_update || 3672 pipe_ctx->plane_state->skip_manual_trigger) 3673 continue; 3674 3675 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) 3676 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); 3677 } 3678 } 3679 3680 /* Determines if the incoming context requires a applying transition state with unnecessary 3681 * pipe splitting and ODM disabled, due to hardware limitations. In a case where 3682 * the OPP associated with an MPCC might change due to plane additions, this function 3683 * returns true. 3684 */ 3685 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc, 3686 struct dc_stream_state *stream, 3687 int surface_count, 3688 bool *is_plane_addition) 3689 { 3690 3691 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream); 3692 bool force_minimal_pipe_splitting = false; 3693 3694 *is_plane_addition = false; 3695 3696 if (cur_stream_status && 3697 dc->current_state->stream_count > 0 && 3698 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) { 3699 /* determine if minimal transition is required due to MPC*/ 3700 if (surface_count > 0) { 3701 if (cur_stream_status->plane_count > surface_count) { 3702 force_minimal_pipe_splitting = true; 3703 } else if (cur_stream_status->plane_count < surface_count) { 3704 force_minimal_pipe_splitting = true; 3705 *is_plane_addition = true; 3706 } 3707 } 3708 } 3709 3710 if (cur_stream_status && 3711 dc->current_state->stream_count == 1 && 3712 dc->debug.enable_single_display_2to1_odm_policy) { 3713 /* determine if minimal transition is required due to dynamic ODM*/ 3714 if (surface_count > 0) { 3715 if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) { 3716 force_minimal_pipe_splitting = true; 3717 } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) { 3718 force_minimal_pipe_splitting = true; 3719 *is_plane_addition = true; 3720 } 3721 } 3722 } 3723 3724 /* For SubVP when adding MPO video we need to add a minimal transition. 3725 */ 3726 if (cur_stream_status && stream->mall_stream_config.type == SUBVP_MAIN) { 3727 /* determine if minimal transition is required due to SubVP*/ 3728 if (surface_count > 0) { 3729 if (cur_stream_status->plane_count > surface_count) { 3730 force_minimal_pipe_splitting = true; 3731 } else if (cur_stream_status->plane_count < surface_count) { 3732 force_minimal_pipe_splitting = true; 3733 *is_plane_addition = true; 3734 } 3735 } 3736 } 3737 3738 return force_minimal_pipe_splitting; 3739 } 3740 3741 static bool commit_minimal_transition_state(struct dc *dc, 3742 struct dc_state *transition_base_context) 3743 { 3744 struct dc_state *transition_context = dc_create_state(dc); 3745 enum pipe_split_policy tmp_mpc_policy; 3746 bool temp_dynamic_odm_policy; 3747 bool temp_subvp_policy; 3748 enum dc_status ret = DC_ERROR_UNEXPECTED; 3749 unsigned int i, j; 3750 unsigned int pipe_in_use = 0; 3751 3752 if (!transition_context) 3753 return false; 3754 3755 /* check current pipes in use*/ 3756 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3757 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; 3758 3759 if (pipe->plane_state) 3760 pipe_in_use++; 3761 } 3762 3763 /* When the OS add a new surface if we have been used all of pipes with odm combine 3764 * and mpc split feature, it need use commit_minimal_transition_state to transition safely. 3765 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need 3766 * call it again. Otherwise return true to skip. 3767 * 3768 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially 3769 * enter/exit MPO when DCN still have enough resources. 3770 */ 3771 if (pipe_in_use != dc->res_pool->pipe_count) { 3772 dc_release_state(transition_context); 3773 return true; 3774 } 3775 3776 if (!dc->config.is_vmin_only_asic) { 3777 tmp_mpc_policy = dc->debug.pipe_split_policy; 3778 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 3779 } 3780 3781 temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy; 3782 dc->debug.enable_single_display_2to1_odm_policy = false; 3783 3784 temp_subvp_policy = dc->debug.force_disable_subvp; 3785 dc->debug.force_disable_subvp = true; 3786 3787 dc_resource_state_copy_construct(transition_base_context, transition_context); 3788 3789 //commit minimal state 3790 if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) { 3791 for (i = 0; i < transition_context->stream_count; i++) { 3792 struct dc_stream_status *stream_status = &transition_context->stream_status[i]; 3793 3794 for (j = 0; j < stream_status->plane_count; j++) { 3795 struct dc_plane_state *plane_state = stream_status->plane_states[j]; 3796 3797 /* force vsync flip when reconfiguring pipes to prevent underflow 3798 * and corruption 3799 */ 3800 plane_state->flip_immediate = false; 3801 } 3802 } 3803 3804 ret = dc_commit_state_no_check(dc, transition_context); 3805 } 3806 3807 /*always release as dc_commit_state_no_check retains in good case*/ 3808 dc_release_state(transition_context); 3809 3810 /*restore previous pipe split and odm policy*/ 3811 if (!dc->config.is_vmin_only_asic) 3812 dc->debug.pipe_split_policy = tmp_mpc_policy; 3813 3814 dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy; 3815 dc->debug.force_disable_subvp = temp_subvp_policy; 3816 3817 if (ret != DC_OK) { 3818 /*this should never happen*/ 3819 BREAK_TO_DEBUGGER(); 3820 return false; 3821 } 3822 3823 /*force full surface update*/ 3824 for (i = 0; i < dc->current_state->stream_count; i++) { 3825 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) { 3826 dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF; 3827 } 3828 } 3829 3830 return true; 3831 } 3832 3833 bool dc_update_planes_and_stream(struct dc *dc, 3834 struct dc_surface_update *srf_updates, int surface_count, 3835 struct dc_stream_state *stream, 3836 struct dc_stream_update *stream_update) 3837 { 3838 struct dc_state *context; 3839 enum surface_update_type update_type; 3840 int i; 3841 3842 /* In cases where MPO and split or ODM are used transitions can 3843 * cause underflow. Apply stream configuration with minimal pipe 3844 * split first to avoid unsupported transitions for active pipes. 3845 */ 3846 bool force_minimal_pipe_splitting; 3847 bool is_plane_addition; 3848 3849 force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( 3850 dc, 3851 stream, 3852 surface_count, 3853 &is_plane_addition); 3854 3855 /* on plane addition, minimal state is the current one */ 3856 if (force_minimal_pipe_splitting && is_plane_addition && 3857 !commit_minimal_transition_state(dc, dc->current_state)) 3858 return false; 3859 3860 if (!update_planes_and_stream_state( 3861 dc, 3862 srf_updates, 3863 surface_count, 3864 stream, 3865 stream_update, 3866 &update_type, 3867 &context)) 3868 return false; 3869 3870 /* on plane removal, minimal state is the new one */ 3871 if (force_minimal_pipe_splitting && !is_plane_addition) { 3872 if (!commit_minimal_transition_state(dc, context)) { 3873 dc_release_state(context); 3874 return false; 3875 } 3876 3877 update_type = UPDATE_TYPE_FULL; 3878 } 3879 3880 commit_planes_for_stream( 3881 dc, 3882 srf_updates, 3883 surface_count, 3884 stream, 3885 stream_update, 3886 update_type, 3887 context); 3888 3889 if (dc->current_state != context) { 3890 3891 /* Since memory free requires elevated IRQL, an interrupt 3892 * request is generated by mem free. If this happens 3893 * between freeing and reassigning the context, our vsync 3894 * interrupt will call into dc and cause a memory 3895 * corruption BSOD. Hence, we first reassign the context, 3896 * then free the old context. 3897 */ 3898 3899 struct dc_state *old = dc->current_state; 3900 3901 dc->current_state = context; 3902 dc_release_state(old); 3903 3904 // clear any forced full updates 3905 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3906 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 3907 3908 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 3909 pipe_ctx->plane_state->force_full_update = false; 3910 } 3911 } 3912 return true; 3913 } 3914 3915 void dc_commit_updates_for_stream(struct dc *dc, 3916 struct dc_surface_update *srf_updates, 3917 int surface_count, 3918 struct dc_stream_state *stream, 3919 struct dc_stream_update *stream_update, 3920 struct dc_state *state) 3921 { 3922 const struct dc_stream_status *stream_status; 3923 enum surface_update_type update_type; 3924 struct dc_state *context; 3925 struct dc_context *dc_ctx = dc->ctx; 3926 int i, j; 3927 3928 stream_status = dc_stream_get_status(stream); 3929 context = dc->current_state; 3930 3931 update_type = dc_check_update_surfaces_for_stream( 3932 dc, srf_updates, surface_count, stream_update, stream_status); 3933 3934 if (update_type >= update_surface_trace_level) 3935 update_surface_trace(dc, srf_updates, surface_count); 3936 3937 3938 if (update_type >= UPDATE_TYPE_FULL) { 3939 3940 /* initialize scratch memory for building context */ 3941 context = dc_create_state(dc); 3942 if (context == NULL) { 3943 DC_ERROR("Failed to allocate new validate context!\n"); 3944 return; 3945 } 3946 3947 dc_resource_state_copy_construct(state, context); 3948 3949 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3950 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 3951 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3952 3953 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) 3954 new_pipe->plane_state->force_full_update = true; 3955 } 3956 } else if (update_type == UPDATE_TYPE_FAST) { 3957 /* 3958 * Previous frame finished and HW is ready for optimization. 3959 */ 3960 dc_post_update_surfaces_to_stream(dc); 3961 } 3962 3963 3964 for (i = 0; i < surface_count; i++) { 3965 struct dc_plane_state *surface = srf_updates[i].surface; 3966 3967 copy_surface_update_to_plane(surface, &srf_updates[i]); 3968 3969 if (update_type >= UPDATE_TYPE_MED) { 3970 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3971 struct pipe_ctx *pipe_ctx = 3972 &context->res_ctx.pipe_ctx[j]; 3973 3974 if (pipe_ctx->plane_state != surface) 3975 continue; 3976 3977 resource_build_scaling_params(pipe_ctx); 3978 } 3979 } 3980 } 3981 3982 copy_stream_update_to_stream(dc, context, stream, stream_update); 3983 3984 if (update_type >= UPDATE_TYPE_FULL) { 3985 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 3986 DC_ERROR("Mode validation failed for stream update!\n"); 3987 dc_release_state(context); 3988 return; 3989 } 3990 } 3991 3992 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); 3993 3994 commit_planes_for_stream( 3995 dc, 3996 srf_updates, 3997 surface_count, 3998 stream, 3999 stream_update, 4000 update_type, 4001 context); 4002 /*update current_State*/ 4003 if (dc->current_state != context) { 4004 4005 struct dc_state *old = dc->current_state; 4006 4007 dc->current_state = context; 4008 dc_release_state(old); 4009 4010 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4011 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 4012 4013 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 4014 pipe_ctx->plane_state->force_full_update = false; 4015 } 4016 } 4017 4018 /* Legacy optimization path for DCE. */ 4019 if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) { 4020 dc_post_update_surfaces_to_stream(dc); 4021 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 4022 } 4023 4024 return; 4025 4026 } 4027 4028 uint8_t dc_get_current_stream_count(struct dc *dc) 4029 { 4030 return dc->current_state->stream_count; 4031 } 4032 4033 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i) 4034 { 4035 if (i < dc->current_state->stream_count) 4036 return dc->current_state->streams[i]; 4037 return NULL; 4038 } 4039 4040 enum dc_irq_source dc_interrupt_to_irq_source( 4041 struct dc *dc, 4042 uint32_t src_id, 4043 uint32_t ext_id) 4044 { 4045 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); 4046 } 4047 4048 /* 4049 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source 4050 */ 4051 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) 4052 { 4053 4054 if (dc == NULL) 4055 return false; 4056 4057 return dal_irq_service_set(dc->res_pool->irqs, src, enable); 4058 } 4059 4060 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) 4061 { 4062 dal_irq_service_ack(dc->res_pool->irqs, src); 4063 } 4064 4065 void dc_power_down_on_boot(struct dc *dc) 4066 { 4067 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && 4068 dc->hwss.power_down_on_boot) 4069 dc->hwss.power_down_on_boot(dc); 4070 } 4071 4072 void dc_set_power_state( 4073 struct dc *dc, 4074 enum dc_acpi_cm_power_state power_state) 4075 { 4076 struct kref refcount; 4077 struct display_mode_lib *dml; 4078 4079 if (!dc->current_state) 4080 return; 4081 4082 switch (power_state) { 4083 case DC_ACPI_CM_POWER_STATE_D0: 4084 dc_resource_state_construct(dc, dc->current_state); 4085 4086 dc_z10_restore(dc); 4087 4088 if (dc->ctx->dmub_srv) 4089 dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv); 4090 4091 dc->hwss.init_hw(dc); 4092 4093 if (dc->hwss.init_sys_ctx != NULL && 4094 dc->vm_pa_config.valid) { 4095 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); 4096 } 4097 4098 break; 4099 default: 4100 ASSERT(dc->current_state->stream_count == 0); 4101 /* Zero out the current context so that on resume we start with 4102 * clean state, and dc hw programming optimizations will not 4103 * cause any trouble. 4104 */ 4105 dml = kzalloc(sizeof(struct display_mode_lib), 4106 GFP_KERNEL); 4107 4108 ASSERT(dml); 4109 if (!dml) 4110 return; 4111 4112 /* Preserve refcount */ 4113 refcount = dc->current_state->refcount; 4114 /* Preserve display mode lib */ 4115 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib)); 4116 4117 dc_resource_state_destruct(dc->current_state); 4118 memset(dc->current_state, 0, 4119 sizeof(*dc->current_state)); 4120 4121 dc->current_state->refcount = refcount; 4122 dc->current_state->bw_ctx.dml = *dml; 4123 4124 kfree(dml); 4125 4126 break; 4127 } 4128 } 4129 4130 void dc_resume(struct dc *dc) 4131 { 4132 uint32_t i; 4133 4134 for (i = 0; i < dc->link_count; i++) 4135 core_link_resume(dc->links[i]); 4136 } 4137 4138 bool dc_is_dmcu_initialized(struct dc *dc) 4139 { 4140 struct dmcu *dmcu = dc->res_pool->dmcu; 4141 4142 if (dmcu) 4143 return dmcu->funcs->is_dmcu_initialized(dmcu); 4144 return false; 4145 } 4146 4147 bool dc_is_oem_i2c_device_present( 4148 struct dc *dc, 4149 size_t slave_address) 4150 { 4151 if (dc->res_pool->oem_device) 4152 return dce_i2c_oem_device_present( 4153 dc->res_pool, 4154 dc->res_pool->oem_device, 4155 slave_address); 4156 4157 return false; 4158 } 4159 4160 bool dc_submit_i2c( 4161 struct dc *dc, 4162 uint32_t link_index, 4163 struct i2c_command *cmd) 4164 { 4165 4166 struct dc_link *link = dc->links[link_index]; 4167 struct ddc_service *ddc = link->ddc; 4168 return dce_i2c_submit_command( 4169 dc->res_pool, 4170 ddc->ddc_pin, 4171 cmd); 4172 } 4173 4174 bool dc_submit_i2c_oem( 4175 struct dc *dc, 4176 struct i2c_command *cmd) 4177 { 4178 struct ddc_service *ddc = dc->res_pool->oem_device; 4179 if (ddc) 4180 return dce_i2c_submit_command( 4181 dc->res_pool, 4182 ddc->ddc_pin, 4183 cmd); 4184 4185 return false; 4186 } 4187 4188 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink) 4189 { 4190 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) { 4191 BREAK_TO_DEBUGGER(); 4192 return false; 4193 } 4194 4195 dc_sink_retain(sink); 4196 4197 dc_link->remote_sinks[dc_link->sink_count] = sink; 4198 dc_link->sink_count++; 4199 4200 return true; 4201 } 4202 4203 /* 4204 * dc_link_add_remote_sink() - Create a sink and attach it to an existing link 4205 * 4206 * EDID length is in bytes 4207 */ 4208 struct dc_sink *dc_link_add_remote_sink( 4209 struct dc_link *link, 4210 const uint8_t *edid, 4211 int len, 4212 struct dc_sink_init_data *init_data) 4213 { 4214 struct dc_sink *dc_sink; 4215 enum dc_edid_status edid_status; 4216 4217 if (len > DC_MAX_EDID_BUFFER_SIZE) { 4218 dm_error("Max EDID buffer size breached!\n"); 4219 return NULL; 4220 } 4221 4222 if (!init_data) { 4223 BREAK_TO_DEBUGGER(); 4224 return NULL; 4225 } 4226 4227 if (!init_data->link) { 4228 BREAK_TO_DEBUGGER(); 4229 return NULL; 4230 } 4231 4232 dc_sink = dc_sink_create(init_data); 4233 4234 if (!dc_sink) 4235 return NULL; 4236 4237 memmove(dc_sink->dc_edid.raw_edid, edid, len); 4238 dc_sink->dc_edid.length = len; 4239 4240 if (!link_add_remote_sink_helper( 4241 link, 4242 dc_sink)) 4243 goto fail_add_sink; 4244 4245 edid_status = dm_helpers_parse_edid_caps( 4246 link, 4247 &dc_sink->dc_edid, 4248 &dc_sink->edid_caps); 4249 4250 /* 4251 * Treat device as no EDID device if EDID 4252 * parsing fails 4253 */ 4254 if (edid_status != EDID_OK && edid_status != EDID_PARTIAL_VALID) { 4255 dc_sink->dc_edid.length = 0; 4256 dm_error("Bad EDID, status%d!\n", edid_status); 4257 } 4258 4259 return dc_sink; 4260 4261 fail_add_sink: 4262 dc_sink_release(dc_sink); 4263 return NULL; 4264 } 4265 4266 /* 4267 * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link 4268 * 4269 * Note that this just removes the struct dc_sink - it doesn't 4270 * program hardware or alter other members of dc_link 4271 */ 4272 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink) 4273 { 4274 int i; 4275 4276 if (!link->sink_count) { 4277 BREAK_TO_DEBUGGER(); 4278 return; 4279 } 4280 4281 for (i = 0; i < link->sink_count; i++) { 4282 if (link->remote_sinks[i] == sink) { 4283 dc_sink_release(sink); 4284 link->remote_sinks[i] = NULL; 4285 4286 /* shrink array to remove empty place */ 4287 while (i < link->sink_count - 1) { 4288 link->remote_sinks[i] = link->remote_sinks[i+1]; 4289 i++; 4290 } 4291 link->remote_sinks[i] = NULL; 4292 link->sink_count--; 4293 return; 4294 } 4295 } 4296 } 4297 4298 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info) 4299 { 4300 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz; 4301 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz; 4302 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz; 4303 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz; 4304 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz; 4305 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz; 4306 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz; 4307 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz; 4308 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz; 4309 } 4310 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping) 4311 { 4312 if (dc->hwss.set_clock) 4313 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping); 4314 return DC_ERROR_UNEXPECTED; 4315 } 4316 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg) 4317 { 4318 if (dc->hwss.get_clock) 4319 dc->hwss.get_clock(dc, clock_type, clock_cfg); 4320 } 4321 4322 /* enable/disable eDP PSR without specify stream for eDP */ 4323 bool dc_set_psr_allow_active(struct dc *dc, bool enable) 4324 { 4325 int i; 4326 bool allow_active; 4327 4328 for (i = 0; i < dc->current_state->stream_count ; i++) { 4329 struct dc_link *link; 4330 struct dc_stream_state *stream = dc->current_state->streams[i]; 4331 4332 link = stream->link; 4333 if (!link) 4334 continue; 4335 4336 if (link->psr_settings.psr_feature_enabled) { 4337 if (enable && !link->psr_settings.psr_allow_active) { 4338 allow_active = true; 4339 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL)) 4340 return false; 4341 } else if (!enable && link->psr_settings.psr_allow_active) { 4342 allow_active = false; 4343 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL)) 4344 return false; 4345 } 4346 } 4347 } 4348 4349 return true; 4350 } 4351 4352 void dc_allow_idle_optimizations(struct dc *dc, bool allow) 4353 { 4354 if (dc->debug.disable_idle_power_optimizations) 4355 return; 4356 4357 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present) 4358 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) 4359 return; 4360 4361 if (allow == dc->idle_optimizations_allowed) 4362 return; 4363 4364 if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow)) 4365 dc->idle_optimizations_allowed = allow; 4366 } 4367 4368 /* set min and max memory clock to lowest and highest DPM level, respectively */ 4369 void dc_unlock_memory_clock_frequency(struct dc *dc) 4370 { 4371 if (dc->clk_mgr->funcs->set_hard_min_memclk) 4372 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false); 4373 4374 if (dc->clk_mgr->funcs->set_hard_max_memclk) 4375 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 4376 } 4377 4378 /* set min memory clock to the min required for current mode, max to maxDPM */ 4379 void dc_lock_memory_clock_frequency(struct dc *dc) 4380 { 4381 if (dc->clk_mgr->funcs->get_memclk_states_from_smu) 4382 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr); 4383 4384 if (dc->clk_mgr->funcs->set_hard_min_memclk) 4385 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true); 4386 4387 if (dc->clk_mgr->funcs->set_hard_max_memclk) 4388 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 4389 } 4390 4391 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz) 4392 { 4393 struct dc_state *context = dc->current_state; 4394 struct hubp *hubp; 4395 struct pipe_ctx *pipe; 4396 int i; 4397 4398 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4399 pipe = &context->res_ctx.pipe_ctx[i]; 4400 4401 if (pipe->stream != NULL) { 4402 dc->hwss.disable_pixel_data(dc, pipe, true); 4403 4404 // wait for double buffer 4405 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 4406 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); 4407 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 4408 4409 hubp = pipe->plane_res.hubp; 4410 hubp->funcs->set_blank_regs(hubp, true); 4411 } 4412 } 4413 4414 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz); 4415 dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz); 4416 4417 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4418 pipe = &context->res_ctx.pipe_ctx[i]; 4419 4420 if (pipe->stream != NULL) { 4421 dc->hwss.disable_pixel_data(dc, pipe, false); 4422 4423 hubp = pipe->plane_res.hubp; 4424 hubp->funcs->set_blank_regs(hubp, false); 4425 } 4426 } 4427 } 4428 4429 4430 /** 4431 * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode 4432 * @dc: pointer to dc of the dm calling this 4433 * @enable: True = transition to DC mode, false = transition back to AC mode 4434 * 4435 * Some SoCs define additional clock limits when in DC mode, DM should 4436 * invoke this function when the platform undergoes a power source transition 4437 * so DC can apply/unapply the limit. This interface may be disruptive to 4438 * the onscreen content. 4439 * 4440 * Context: Triggered by OS through DM interface, or manually by escape calls. 4441 * Need to hold a dclock when doing so. 4442 * 4443 * Return: none (void function) 4444 * 4445 */ 4446 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) 4447 { 4448 uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; 4449 unsigned int softMax, maxDPM, funcMin; 4450 bool p_state_change_support; 4451 4452 if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev)) 4453 return; 4454 4455 softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk; 4456 maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz; 4457 funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000; 4458 p_state_change_support = dc->clk_mgr->clks.p_state_change_support; 4459 4460 if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) { 4461 if (p_state_change_support) { 4462 if (funcMin <= softMax) 4463 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax); 4464 // else: No-Op 4465 } else { 4466 if (funcMin <= softMax) 4467 blank_and_force_memclk(dc, true, softMax); 4468 // else: No-Op 4469 } 4470 } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) { 4471 if (p_state_change_support) { 4472 if (funcMin <= softMax) 4473 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM); 4474 // else: No-Op 4475 } else { 4476 if (funcMin <= softMax) 4477 blank_and_force_memclk(dc, true, maxDPM); 4478 // else: No-Op 4479 } 4480 } 4481 dc->clk_mgr->dc_mode_softmax_enabled = enable; 4482 } 4483 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, 4484 struct dc_cursor_attributes *cursor_attr) 4485 { 4486 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr)) 4487 return true; 4488 return false; 4489 } 4490 4491 /* cleanup on driver unload */ 4492 void dc_hardware_release(struct dc *dc) 4493 { 4494 dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc); 4495 4496 if (dc->hwss.hardware_release) 4497 dc->hwss.hardware_release(dc); 4498 } 4499 4500 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc) 4501 { 4502 if (dc->current_state) 4503 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true; 4504 } 4505 4506 /* 4507 ***************************************************************************** 4508 * Function: dc_is_dmub_outbox_supported - 4509 * 4510 * @brief 4511 * Checks whether DMUB FW supports outbox notifications, if supported 4512 * DM should register outbox interrupt prior to actually enabling interrupts 4513 * via dc_enable_dmub_outbox 4514 * 4515 * @param 4516 * [in] dc: dc structure 4517 * 4518 * @return 4519 * True if DMUB FW supports outbox notifications, False otherwise 4520 ***************************************************************************** 4521 */ 4522 bool dc_is_dmub_outbox_supported(struct dc *dc) 4523 { 4524 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */ 4525 if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && 4526 dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && 4527 !dc->debug.dpia_debug.bits.disable_dpia) 4528 return true; 4529 4530 if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 && 4531 !dc->debug.dpia_debug.bits.disable_dpia) 4532 return true; 4533 4534 /* dmub aux needs dmub notifications to be enabled */ 4535 return dc->debug.enable_dmub_aux_for_legacy_ddc; 4536 } 4537 4538 /* 4539 ***************************************************************************** 4540 * Function: dc_enable_dmub_notifications 4541 * 4542 * @brief 4543 * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox 4544 * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. 4545 * This API shall be removed after switching. 4546 * 4547 * @param 4548 * [in] dc: dc structure 4549 * 4550 * @return 4551 * True if DMUB FW supports outbox notifications, False otherwise 4552 ***************************************************************************** 4553 */ 4554 bool dc_enable_dmub_notifications(struct dc *dc) 4555 { 4556 return dc_is_dmub_outbox_supported(dc); 4557 } 4558 4559 /** 4560 ***************************************************************************** 4561 * Function: dc_enable_dmub_outbox 4562 * 4563 * @brief 4564 * Enables DMUB unsolicited notifications to x86 via outbox 4565 * 4566 * @param 4567 * [in] dc: dc structure 4568 * 4569 * @return 4570 * None 4571 ***************************************************************************** 4572 */ 4573 void dc_enable_dmub_outbox(struct dc *dc) 4574 { 4575 struct dc_context *dc_ctx = dc->ctx; 4576 4577 dmub_enable_outbox_notification(dc_ctx->dmub_srv); 4578 DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__); 4579 } 4580 4581 /** 4582 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message 4583 * Sets port index appropriately for legacy DDC 4584 * @dc: dc structure 4585 * @link_index: link index 4586 * @payload: aux payload 4587 * 4588 * Returns: True if successful, False if failure 4589 */ 4590 bool dc_process_dmub_aux_transfer_async(struct dc *dc, 4591 uint32_t link_index, 4592 struct aux_payload *payload) 4593 { 4594 uint8_t action; 4595 union dmub_rb_cmd cmd = {0}; 4596 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 4597 4598 ASSERT(payload->length <= 16); 4599 4600 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS; 4601 cmd.dp_aux_access.header.payload_bytes = 0; 4602 /* For dpia, ddc_pin is set to NULL */ 4603 if (!dc->links[link_index]->ddc->ddc_pin) 4604 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA; 4605 else 4606 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC; 4607 4608 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst; 4609 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0; 4610 cmd.dp_aux_access.aux_control.timeout = 0; 4611 cmd.dp_aux_access.aux_control.dpaux.address = payload->address; 4612 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux; 4613 cmd.dp_aux_access.aux_control.dpaux.length = payload->length; 4614 4615 /* set aux action */ 4616 if (payload->i2c_over_aux) { 4617 if (payload->write) { 4618 if (payload->mot) 4619 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT; 4620 else 4621 action = DP_AUX_REQ_ACTION_I2C_WRITE; 4622 } else { 4623 if (payload->mot) 4624 action = DP_AUX_REQ_ACTION_I2C_READ_MOT; 4625 else 4626 action = DP_AUX_REQ_ACTION_I2C_READ; 4627 } 4628 } else { 4629 if (payload->write) 4630 action = DP_AUX_REQ_ACTION_DPCD_WRITE; 4631 else 4632 action = DP_AUX_REQ_ACTION_DPCD_READ; 4633 } 4634 4635 cmd.dp_aux_access.aux_control.dpaux.action = action; 4636 4637 if (payload->length && payload->write) { 4638 memcpy(cmd.dp_aux_access.aux_control.dpaux.data, 4639 payload->data, 4640 payload->length 4641 ); 4642 } 4643 4644 dc_dmub_srv_cmd_queue(dmub_srv, &cmd); 4645 dc_dmub_srv_cmd_execute(dmub_srv); 4646 dc_dmub_srv_wait_idle(dmub_srv); 4647 4648 return true; 4649 } 4650 4651 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, 4652 uint8_t dpia_port_index) 4653 { 4654 uint8_t index, link_index = 0xFF; 4655 4656 for (index = 0; index < dc->link_count; index++) { 4657 /* ddc_hw_inst has dpia port index for dpia links 4658 * and ddc instance for legacy links 4659 */ 4660 if (!dc->links[index]->ddc->ddc_pin) { 4661 if (dc->links[index]->ddc_hw_inst == dpia_port_index) { 4662 link_index = index; 4663 break; 4664 } 4665 } 4666 } 4667 ASSERT(link_index != 0xFF); 4668 return link_index; 4669 } 4670 4671 /** 4672 ***************************************************************************** 4673 * Function: dc_process_dmub_set_config_async 4674 * 4675 * @brief 4676 * Submits set_config command to dmub via inbox message 4677 * 4678 * @param 4679 * [in] dc: dc structure 4680 * [in] link_index: link index 4681 * [in] payload: aux payload 4682 * [out] notify: set_config immediate reply 4683 * 4684 * @return 4685 * True if successful, False if failure 4686 ***************************************************************************** 4687 */ 4688 bool dc_process_dmub_set_config_async(struct dc *dc, 4689 uint32_t link_index, 4690 struct set_config_cmd_payload *payload, 4691 struct dmub_notification *notify) 4692 { 4693 union dmub_rb_cmd cmd = {0}; 4694 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 4695 bool is_cmd_complete = true; 4696 4697 /* prepare SET_CONFIG command */ 4698 cmd.set_config_access.header.type = DMUB_CMD__DPIA; 4699 cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS; 4700 4701 cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst; 4702 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type; 4703 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data; 4704 4705 if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) { 4706 /* command is not processed by dmub */ 4707 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR; 4708 return is_cmd_complete; 4709 } 4710 4711 /* command processed by dmub, if ret_status is 1, it is completed instantly */ 4712 if (cmd.set_config_access.header.ret_status == 1) 4713 notify->sc_status = cmd.set_config_access.set_config_control.immed_status; 4714 else 4715 /* cmd pending, will receive notification via outbox */ 4716 is_cmd_complete = false; 4717 4718 return is_cmd_complete; 4719 } 4720 4721 /** 4722 ***************************************************************************** 4723 * Function: dc_process_dmub_set_mst_slots 4724 * 4725 * @brief 4726 * Submits mst slot allocation command to dmub via inbox message 4727 * 4728 * @param 4729 * [in] dc: dc structure 4730 * [in] link_index: link index 4731 * [in] mst_alloc_slots: mst slots to be allotted 4732 * [out] mst_slots_in_use: mst slots in use returned in failure case 4733 * 4734 * @return 4735 * DC_OK if successful, DC_ERROR if failure 4736 ***************************************************************************** 4737 */ 4738 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, 4739 uint32_t link_index, 4740 uint8_t mst_alloc_slots, 4741 uint8_t *mst_slots_in_use) 4742 { 4743 union dmub_rb_cmd cmd = {0}; 4744 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 4745 4746 /* prepare MST_ALLOC_SLOTS command */ 4747 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA; 4748 cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS; 4749 4750 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; 4751 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; 4752 4753 if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) 4754 /* command is not processed by dmub */ 4755 return DC_ERROR_UNEXPECTED; 4756 4757 /* command processed by dmub, if ret_status is 1 */ 4758 if (cmd.set_config_access.header.ret_status != 1) 4759 /* command processing error */ 4760 return DC_ERROR_UNEXPECTED; 4761 4762 /* command processed and we have a status of 2, mst not enabled in dpia */ 4763 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2) 4764 return DC_FAIL_UNSUPPORTED_1; 4765 4766 /* previously configured mst alloc and used slots did not match */ 4767 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) { 4768 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use; 4769 return DC_NOT_SUPPORTED; 4770 } 4771 4772 return DC_OK; 4773 } 4774 4775 /** 4776 ***************************************************************************** 4777 * Function: dc_process_dmub_dpia_hpd_int_enable 4778 * 4779 * @brief 4780 * Submits dpia hpd int enable command to dmub via inbox message 4781 * 4782 * @param 4783 * [in] dc: dc structure 4784 * [in] hpd_int_enable: 1 for hpd int enable, 0 to disable 4785 * 4786 * @return 4787 * None 4788 ***************************************************************************** 4789 */ 4790 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, 4791 uint32_t hpd_int_enable) 4792 { 4793 union dmub_rb_cmd cmd = {0}; 4794 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 4795 4796 cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE; 4797 cmd.dpia_hpd_int_enable.enable = hpd_int_enable; 4798 4799 dc_dmub_srv_cmd_queue(dmub_srv, &cmd); 4800 dc_dmub_srv_cmd_execute(dmub_srv); 4801 dc_dmub_srv_wait_idle(dmub_srv); 4802 4803 DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable); 4804 } 4805 4806 /** 4807 * dc_disable_accelerated_mode - disable accelerated mode 4808 * @dc: dc structure 4809 */ 4810 void dc_disable_accelerated_mode(struct dc *dc) 4811 { 4812 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0); 4813 } 4814 4815 4816 /** 4817 ***************************************************************************** 4818 * dc_notify_vsync_int_state() - notifies vsync enable/disable state 4819 * @dc: dc structure 4820 * @stream: stream where vsync int state changed 4821 * @enable: whether vsync is enabled or disabled 4822 * 4823 * Called when vsync is enabled/disabled 4824 * Will notify DMUB to start/stop ABM interrupts after steady state is reached 4825 * 4826 ***************************************************************************** 4827 */ 4828 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable) 4829 { 4830 int i; 4831 int edp_num; 4832 struct pipe_ctx *pipe = NULL; 4833 struct dc_link *link = stream->sink->link; 4834 struct dc_link *edp_links[MAX_NUM_EDP]; 4835 4836 4837 if (link->psr_settings.psr_feature_enabled) 4838 return; 4839 4840 /*find primary pipe associated with stream*/ 4841 for (i = 0; i < MAX_PIPES; i++) { 4842 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4843 4844 if (pipe->stream == stream && pipe->stream_res.tg) 4845 break; 4846 } 4847 4848 if (i == MAX_PIPES) { 4849 ASSERT(0); 4850 return; 4851 } 4852 4853 get_edp_links(dc, edp_links, &edp_num); 4854 4855 /* Determine panel inst */ 4856 for (i = 0; i < edp_num; i++) { 4857 if (edp_links[i] == link) 4858 break; 4859 } 4860 4861 if (i == edp_num) { 4862 return; 4863 } 4864 4865 if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause) 4866 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); 4867 } 4868 /* 4869 * dc_extended_blank_supported: Decide whether extended blank is supported 4870 * 4871 * Extended blank is a freesync optimization feature to be enabled in the future. 4872 * During the extra vblank period gained from freesync, we have the ability to enter z9/z10. 4873 * 4874 * @param [in] dc: Current DC state 4875 * @return: Indicate whether extended blank is supported (true or false) 4876 */ 4877 bool dc_extended_blank_supported(struct dc *dc) 4878 { 4879 return dc->debug.extended_blank_optimization && !dc->debug.disable_z10 4880 && dc->caps.zstate_support && dc->caps.is_apu; 4881 } 4882