1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include "dm_services.h" 27 28 #include "core_types.h" 29 30 #include "reg_helper.h" 31 #include "dcn10_dpp.h" 32 #include "basics/conversion.h" 33 #include "dcn10_cm_common.h" 34 35 #define NUM_PHASES 64 36 #define HORZ_MAX_TAPS 8 37 #define VERT_MAX_TAPS 8 38 39 #define BLACK_OFFSET_RGB_Y 0x0 40 #define BLACK_OFFSET_CBCR 0x8000 41 42 #define REG(reg)\ 43 dpp->tf_regs->reg 44 45 #define CTX \ 46 dpp->base.ctx 47 48 #undef FN 49 #define FN(reg_name, field_name) \ 50 dpp->tf_shift->field_name, dpp->tf_mask->field_name 51 52 #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) 53 54 struct dcn10_input_csc_matrix { 55 enum dc_color_space color_space; 56 uint16_t regval[12]; 57 }; 58 59 enum dcn10_coef_filter_type_sel { 60 SCL_COEF_LUMA_VERT_FILTER = 0, 61 SCL_COEF_LUMA_HORZ_FILTER = 1, 62 SCL_COEF_CHROMA_VERT_FILTER = 2, 63 SCL_COEF_CHROMA_HORZ_FILTER = 3, 64 SCL_COEF_ALPHA_VERT_FILTER = 4, 65 SCL_COEF_ALPHA_HORZ_FILTER = 5 66 }; 67 68 enum dscl_autocal_mode { 69 AUTOCAL_MODE_OFF = 0, 70 71 /* Autocal calculate the scaling ratio and initial phase and the 72 * DSCL_MODE_SEL must be set to 1 73 */ 74 AUTOCAL_MODE_AUTOSCALE = 1, 75 /* Autocal perform auto centering without replication and the 76 * DSCL_MODE_SEL must be set to 0 77 */ 78 AUTOCAL_MODE_AUTOCENTER = 2, 79 /* Autocal perform auto centering and auto replication and the 80 * DSCL_MODE_SEL must be set to 0 81 */ 82 AUTOCAL_MODE_AUTOREPLICATE = 3 83 }; 84 85 enum dscl_mode_sel { 86 DSCL_MODE_SCALING_444_BYPASS = 0, 87 DSCL_MODE_SCALING_444_RGB_ENABLE = 1, 88 DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2, 89 DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3, 90 DSCL_MODE_SCALING_420_LUMA_BYPASS = 4, 91 DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5, 92 DSCL_MODE_DSCL_BYPASS = 6 93 }; 94 95 enum gamut_remap_select { 96 GAMUT_REMAP_BYPASS = 0, 97 GAMUT_REMAP_COEFF, 98 GAMUT_REMAP_COMA_COEFF, 99 GAMUT_REMAP_COMB_COEFF 100 }; 101 102 static const struct dcn10_input_csc_matrix dcn10_input_csc_matrix[] = { 103 {COLOR_SPACE_SRGB, 104 {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, 105 {COLOR_SPACE_SRGB_LIMITED, 106 {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, 107 {COLOR_SPACE_YCBCR601, 108 {0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef, 109 0, 0x2000, 0x38b4, 0xe3a6} }, 110 {COLOR_SPACE_YCBCR601_LIMITED, 111 {0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108, 112 0, 0x2568, 0x40de, 0xdd3a} }, 113 {COLOR_SPACE_YCBCR709, 114 {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0, 115 0x2000, 0x3b61, 0xe24f} }, 116 117 {COLOR_SPACE_YCBCR709_LIMITED, 118 {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0, 119 0x2568, 0x43ee, 0xdbb2} } 120 }; 121 122 static void program_gamut_remap( 123 struct dcn10_dpp *dpp, 124 const uint16_t *regval, 125 enum gamut_remap_select select) 126 { 127 uint16_t selection = 0; 128 struct color_matrices_reg gam_regs; 129 130 if (regval == NULL || select == GAMUT_REMAP_BYPASS) { 131 REG_SET(CM_GAMUT_REMAP_CONTROL, 0, 132 CM_GAMUT_REMAP_MODE, 0); 133 return; 134 } 135 switch (select) { 136 case GAMUT_REMAP_COEFF: 137 selection = 1; 138 break; 139 case GAMUT_REMAP_COMA_COEFF: 140 selection = 2; 141 break; 142 case GAMUT_REMAP_COMB_COEFF: 143 selection = 3; 144 break; 145 default: 146 break; 147 } 148 149 gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; 150 gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; 151 gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; 152 gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; 153 154 155 if (select == GAMUT_REMAP_COEFF) { 156 gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); 157 gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); 158 159 cm_helper_program_color_matrices( 160 dpp->base.ctx, 161 regval, 162 &gam_regs); 163 164 } else if (select == GAMUT_REMAP_COMA_COEFF) { 165 166 gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); 167 gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); 168 169 cm_helper_program_color_matrices( 170 dpp->base.ctx, 171 regval, 172 &gam_regs); 173 174 } else { 175 176 gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); 177 gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); 178 179 cm_helper_program_color_matrices( 180 dpp->base.ctx, 181 regval, 182 &gam_regs); 183 } 184 185 REG_SET( 186 CM_GAMUT_REMAP_CONTROL, 0, 187 CM_GAMUT_REMAP_MODE, selection); 188 189 } 190 191 void dpp1_cm_set_gamut_remap( 192 struct dpp *dpp_base, 193 const struct dpp_grph_csc_adjustment *adjust) 194 { 195 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 196 int i = 0; 197 198 if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) 199 /* Bypass if type is bypass or hw */ 200 program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS); 201 else { 202 struct fixed31_32 arr_matrix[12]; 203 uint16_t arr_reg_val[12]; 204 205 for (i = 0; i < 12; i++) 206 arr_matrix[i] = adjust->temperature_matrix[i]; 207 208 convert_float_matrix( 209 arr_reg_val, arr_matrix, 12); 210 211 program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF); 212 } 213 } 214 215 static void dpp1_cm_program_color_matrix( 216 struct dcn10_dpp *dpp, 217 const uint16_t *regval) 218 { 219 uint32_t ocsc_mode; 220 uint32_t cur_mode; 221 struct color_matrices_reg gam_regs; 222 223 if (regval == NULL) { 224 BREAK_TO_DEBUGGER(); 225 return; 226 } 227 228 /* determine which CSC matrix (ocsc or comb) we are using 229 * currently. select the alternate set to double buffer 230 * the CSC update so CSC is updated on frame boundary 231 */ 232 REG_SET(CM_TEST_DEBUG_INDEX, 0, 233 CM_TEST_DEBUG_INDEX, 9); 234 235 REG_GET(CM_TEST_DEBUG_DATA, 236 CM_TEST_DEBUG_DATA_ID9_OCSC_MODE, &cur_mode); 237 238 if (cur_mode != 4) 239 ocsc_mode = 4; 240 else 241 ocsc_mode = 5; 242 243 244 gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11; 245 gam_regs.masks.csc_c11 = dpp->tf_mask->CM_OCSC_C11; 246 gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12; 247 gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12; 248 249 if (ocsc_mode == 4) { 250 251 gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12); 252 gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34); 253 254 } else { 255 256 gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); 257 gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); 258 259 } 260 261 cm_helper_program_color_matrices( 262 dpp->base.ctx, 263 regval, 264 &gam_regs); 265 266 REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode); 267 268 } 269 270 void dpp1_cm_set_output_csc_default( 271 struct dpp *dpp_base, 272 enum dc_color_space colorspace) 273 { 274 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 275 const uint16_t *regval = NULL; 276 int arr_size; 277 278 regval = find_color_matrix(colorspace, &arr_size); 279 if (regval == NULL) { 280 BREAK_TO_DEBUGGER(); 281 return; 282 } 283 284 dpp1_cm_program_color_matrix(dpp, regval); 285 } 286 287 static void dpp1_cm_get_reg_field( 288 struct dcn10_dpp *dpp, 289 struct xfer_func_reg *reg) 290 { 291 reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET; 292 reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET; 293 reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; 294 reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; 295 reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET; 296 reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET; 297 reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; 298 reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; 299 300 reg->shifts.field_region_end = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_B; 301 reg->masks.field_region_end = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_B; 302 reg->shifts.field_region_end_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B; 303 reg->masks.field_region_end_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B; 304 reg->shifts.field_region_end_base = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_BASE_B; 305 reg->masks.field_region_end_base = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_BASE_B; 306 reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; 307 reg->masks.field_region_linear_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; 308 reg->shifts.exp_region_start = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_B; 309 reg->masks.exp_region_start = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_B; 310 reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B; 311 reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B; 312 } 313 314 static void dpp1_cm_get_degamma_reg_field( 315 struct dcn10_dpp *dpp, 316 struct xfer_func_reg *reg) 317 { 318 reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET; 319 reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET; 320 reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; 321 reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; 322 reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET; 323 reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET; 324 reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; 325 reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; 326 327 reg->shifts.field_region_end = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_B; 328 reg->masks.field_region_end = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_B; 329 reg->shifts.field_region_end_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B; 330 reg->masks.field_region_end_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B; 331 reg->shifts.field_region_end_base = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_BASE_B; 332 reg->masks.field_region_end_base = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_BASE_B; 333 reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; 334 reg->masks.field_region_linear_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; 335 reg->shifts.exp_region_start = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_B; 336 reg->masks.exp_region_start = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_B; 337 reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B; 338 reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B; 339 } 340 void dpp1_cm_set_output_csc_adjustment( 341 struct dpp *dpp_base, 342 const uint16_t *regval) 343 { 344 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 345 346 dpp1_cm_program_color_matrix(dpp, regval); 347 } 348 349 void dpp1_cm_power_on_regamma_lut(struct dpp *dpp_base, 350 bool power_on) 351 { 352 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 353 354 REG_SET(CM_MEM_PWR_CTRL, 0, 355 RGAM_MEM_PWR_FORCE, power_on == true ? 0:1); 356 357 } 358 359 void dpp1_cm_program_regamma_lut(struct dpp *dpp_base, 360 const struct pwl_result_data *rgb, 361 uint32_t num) 362 { 363 uint32_t i; 364 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 365 366 for (i = 0 ; i < num; i++) { 367 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg); 368 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg); 369 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].blue_reg); 370 371 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_red_reg); 372 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_green_reg); 373 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_blue_reg); 374 375 } 376 377 } 378 379 void dpp1_cm_configure_regamma_lut( 380 struct dpp *dpp_base, 381 bool is_ram_a) 382 { 383 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 384 385 REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK, 386 CM_RGAM_LUT_WRITE_EN_MASK, 7); 387 REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK, 388 CM_RGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1); 389 REG_SET(CM_RGAM_LUT_INDEX, 0, CM_RGAM_LUT_INDEX, 0); 390 } 391 392 /*program re gamma RAM A*/ 393 void dpp1_cm_program_regamma_luta_settings( 394 struct dpp *dpp_base, 395 const struct pwl_params *params) 396 { 397 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 398 struct xfer_func_reg gam_regs; 399 400 dpp1_cm_get_reg_field(dpp, &gam_regs); 401 402 gam_regs.start_cntl_b = REG(CM_RGAM_RAMA_START_CNTL_B); 403 gam_regs.start_cntl_g = REG(CM_RGAM_RAMA_START_CNTL_G); 404 gam_regs.start_cntl_r = REG(CM_RGAM_RAMA_START_CNTL_R); 405 gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMA_SLOPE_CNTL_B); 406 gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMA_SLOPE_CNTL_G); 407 gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMA_SLOPE_CNTL_R); 408 gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMA_END_CNTL1_B); 409 gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMA_END_CNTL2_B); 410 gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMA_END_CNTL1_G); 411 gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMA_END_CNTL2_G); 412 gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMA_END_CNTL1_R); 413 gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMA_END_CNTL2_R); 414 gam_regs.region_start = REG(CM_RGAM_RAMA_REGION_0_1); 415 gam_regs.region_end = REG(CM_RGAM_RAMA_REGION_32_33); 416 417 cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); 418 419 } 420 421 /*program re gamma RAM B*/ 422 void dpp1_cm_program_regamma_lutb_settings( 423 struct dpp *dpp_base, 424 const struct pwl_params *params) 425 { 426 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 427 struct xfer_func_reg gam_regs; 428 429 dpp1_cm_get_reg_field(dpp, &gam_regs); 430 431 gam_regs.start_cntl_b = REG(CM_RGAM_RAMB_START_CNTL_B); 432 gam_regs.start_cntl_g = REG(CM_RGAM_RAMB_START_CNTL_G); 433 gam_regs.start_cntl_r = REG(CM_RGAM_RAMB_START_CNTL_R); 434 gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMB_SLOPE_CNTL_B); 435 gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMB_SLOPE_CNTL_G); 436 gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMB_SLOPE_CNTL_R); 437 gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMB_END_CNTL1_B); 438 gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMB_END_CNTL2_B); 439 gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMB_END_CNTL1_G); 440 gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMB_END_CNTL2_G); 441 gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMB_END_CNTL1_R); 442 gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMB_END_CNTL2_R); 443 gam_regs.region_start = REG(CM_RGAM_RAMB_REGION_0_1); 444 gam_regs.region_end = REG(CM_RGAM_RAMB_REGION_32_33); 445 446 cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); 447 } 448 449 void dpp1_program_input_csc( 450 struct dpp *dpp_base, 451 enum dc_color_space color_space, 452 enum dcn10_input_csc_select input_select, 453 const struct out_csc_color_matrix *tbl_entry) 454 { 455 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 456 int i; 457 int arr_size = sizeof(dcn10_input_csc_matrix)/sizeof(struct dcn10_input_csc_matrix); 458 const uint16_t *regval = NULL; 459 uint32_t cur_select = 0; 460 enum dcn10_input_csc_select select; 461 struct color_matrices_reg gam_regs; 462 463 if (input_select == INPUT_CSC_SELECT_BYPASS) { 464 REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0); 465 return; 466 } 467 468 if (tbl_entry == NULL) { 469 for (i = 0; i < arr_size; i++) 470 if (dcn10_input_csc_matrix[i].color_space == color_space) { 471 regval = dcn10_input_csc_matrix[i].regval; 472 break; 473 } 474 475 if (regval == NULL) { 476 BREAK_TO_DEBUGGER(); 477 return; 478 } 479 } else { 480 regval = tbl_entry->regval; 481 } 482 483 /* determine which CSC matrix (icsc or coma) we are using 484 * currently. select the alternate set to double buffer 485 * the CSC update so CSC is updated on frame boundary 486 */ 487 REG_SET(CM_TEST_DEBUG_INDEX, 0, 488 CM_TEST_DEBUG_INDEX, 9); 489 490 REG_GET(CM_TEST_DEBUG_DATA, 491 CM_TEST_DEBUG_DATA_ID9_ICSC_MODE, &cur_select); 492 493 if (cur_select != INPUT_CSC_SELECT_ICSC) 494 select = INPUT_CSC_SELECT_ICSC; 495 else 496 select = INPUT_CSC_SELECT_COMA; 497 498 gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11; 499 gam_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11; 500 gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12; 501 gam_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12; 502 503 if (select == INPUT_CSC_SELECT_ICSC) { 504 505 gam_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12); 506 gam_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34); 507 508 } else { 509 510 gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); 511 gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); 512 513 } 514 515 cm_helper_program_color_matrices( 516 dpp->base.ctx, 517 regval, 518 &gam_regs); 519 520 REG_SET(CM_ICSC_CONTROL, 0, 521 CM_ICSC_MODE, select); 522 } 523 524 //keep here for now, decide multi dce support later 525 void dpp1_program_bias_and_scale( 526 struct dpp *dpp_base, 527 struct dc_bias_and_scale *params) 528 { 529 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 530 531 REG_SET_2(CM_BNS_VALUES_R, 0, 532 CM_BNS_SCALE_R, params->scale_red, 533 CM_BNS_BIAS_R, params->bias_red); 534 535 REG_SET_2(CM_BNS_VALUES_G, 0, 536 CM_BNS_SCALE_G, params->scale_green, 537 CM_BNS_BIAS_G, params->bias_green); 538 539 REG_SET_2(CM_BNS_VALUES_B, 0, 540 CM_BNS_SCALE_B, params->scale_blue, 541 CM_BNS_BIAS_B, params->bias_blue); 542 543 } 544 545 /*program de gamma RAM B*/ 546 void dpp1_program_degamma_lutb_settings( 547 struct dpp *dpp_base, 548 const struct pwl_params *params) 549 { 550 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 551 struct xfer_func_reg gam_regs; 552 553 dpp1_cm_get_degamma_reg_field(dpp, &gam_regs); 554 555 gam_regs.start_cntl_b = REG(CM_DGAM_RAMB_START_CNTL_B); 556 gam_regs.start_cntl_g = REG(CM_DGAM_RAMB_START_CNTL_G); 557 gam_regs.start_cntl_r = REG(CM_DGAM_RAMB_START_CNTL_R); 558 gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMB_SLOPE_CNTL_B); 559 gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMB_SLOPE_CNTL_G); 560 gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMB_SLOPE_CNTL_R); 561 gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMB_END_CNTL1_B); 562 gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMB_END_CNTL2_B); 563 gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMB_END_CNTL1_G); 564 gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMB_END_CNTL2_G); 565 gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMB_END_CNTL1_R); 566 gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMB_END_CNTL2_R); 567 gam_regs.region_start = REG(CM_DGAM_RAMB_REGION_0_1); 568 gam_regs.region_end = REG(CM_DGAM_RAMB_REGION_14_15); 569 570 571 cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); 572 } 573 574 /*program de gamma RAM A*/ 575 void dpp1_program_degamma_luta_settings( 576 struct dpp *dpp_base, 577 const struct pwl_params *params) 578 { 579 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 580 struct xfer_func_reg gam_regs; 581 582 dpp1_cm_get_degamma_reg_field(dpp, &gam_regs); 583 584 gam_regs.start_cntl_b = REG(CM_DGAM_RAMA_START_CNTL_B); 585 gam_regs.start_cntl_g = REG(CM_DGAM_RAMA_START_CNTL_G); 586 gam_regs.start_cntl_r = REG(CM_DGAM_RAMA_START_CNTL_R); 587 gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMA_SLOPE_CNTL_B); 588 gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMA_SLOPE_CNTL_G); 589 gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMA_SLOPE_CNTL_R); 590 gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMA_END_CNTL1_B); 591 gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMA_END_CNTL2_B); 592 gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMA_END_CNTL1_G); 593 gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMA_END_CNTL2_G); 594 gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMA_END_CNTL1_R); 595 gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMA_END_CNTL2_R); 596 gam_regs.region_start = REG(CM_DGAM_RAMA_REGION_0_1); 597 gam_regs.region_end = REG(CM_DGAM_RAMA_REGION_14_15); 598 599 cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); 600 } 601 602 void dpp1_power_on_degamma_lut( 603 struct dpp *dpp_base, 604 bool power_on) 605 { 606 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 607 608 REG_SET(CM_MEM_PWR_CTRL, 0, 609 SHARED_MEM_PWR_DIS, power_on == true ? 0:1); 610 611 } 612 613 static void dpp1_enable_cm_block( 614 struct dpp *dpp_base) 615 { 616 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 617 618 REG_UPDATE(CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, 8); 619 REG_UPDATE(CM_CONTROL, CM_BYPASS_EN, 0); 620 } 621 622 void dpp1_set_degamma( 623 struct dpp *dpp_base, 624 enum ipp_degamma_mode mode) 625 { 626 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 627 dpp1_enable_cm_block(dpp_base); 628 629 switch (mode) { 630 case IPP_DEGAMMA_MODE_BYPASS: 631 /* Setting de gamma bypass for now */ 632 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 0); 633 break; 634 case IPP_DEGAMMA_MODE_HW_sRGB: 635 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 1); 636 break; 637 case IPP_DEGAMMA_MODE_HW_xvYCC: 638 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2); 639 break; 640 default: 641 BREAK_TO_DEBUGGER(); 642 break; 643 } 644 } 645 646 void dpp1_degamma_ram_select( 647 struct dpp *dpp_base, 648 bool use_ram_a) 649 { 650 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 651 652 if (use_ram_a) 653 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); 654 else 655 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 4); 656 657 } 658 659 static bool dpp1_degamma_ram_inuse( 660 struct dpp *dpp_base, 661 bool *ram_a_inuse) 662 { 663 bool ret = false; 664 uint32_t status_reg = 0; 665 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 666 667 REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS, 668 &status_reg); 669 670 if (status_reg == 9) { 671 *ram_a_inuse = true; 672 ret = true; 673 } else if (status_reg == 10) { 674 *ram_a_inuse = false; 675 ret = true; 676 } 677 return ret; 678 } 679 680 void dpp1_program_degamma_lut( 681 struct dpp *dpp_base, 682 const struct pwl_result_data *rgb, 683 uint32_t num, 684 bool is_ram_a) 685 { 686 uint32_t i; 687 688 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 689 REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, 0); 690 REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, 691 CM_DGAM_LUT_WRITE_EN_MASK, 7); 692 REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL, 693 is_ram_a == true ? 0:1); 694 695 REG_SET(CM_DGAM_LUT_INDEX, 0, CM_DGAM_LUT_INDEX, 0); 696 for (i = 0 ; i < num; i++) { 697 REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].red_reg); 698 REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].green_reg); 699 REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].blue_reg); 700 701 REG_SET(CM_DGAM_LUT_DATA, 0, 702 CM_DGAM_LUT_DATA, rgb[i].delta_red_reg); 703 REG_SET(CM_DGAM_LUT_DATA, 0, 704 CM_DGAM_LUT_DATA, rgb[i].delta_green_reg); 705 REG_SET(CM_DGAM_LUT_DATA, 0, 706 CM_DGAM_LUT_DATA, rgb[i].delta_blue_reg); 707 } 708 } 709 710 void dpp1_set_degamma_pwl(struct dpp *dpp_base, 711 const struct pwl_params *params) 712 { 713 bool is_ram_a = true; 714 715 dpp1_power_on_degamma_lut(dpp_base, true); 716 dpp1_enable_cm_block(dpp_base); 717 dpp1_degamma_ram_inuse(dpp_base, &is_ram_a); 718 if (is_ram_a == true) 719 dpp1_program_degamma_lutb_settings(dpp_base, params); 720 else 721 dpp1_program_degamma_luta_settings(dpp_base, params); 722 723 dpp1_program_degamma_lut(dpp_base, params->rgb_resulted, 724 params->hw_points_num, !is_ram_a); 725 dpp1_degamma_ram_select(dpp_base, !is_ram_a); 726 } 727 728 void dpp1_full_bypass(struct dpp *dpp_base) 729 { 730 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 731 732 /* Input pixel format: ARGB8888 */ 733 REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, 734 CNVC_SURFACE_PIXEL_FORMAT, 0x8); 735 736 /* Zero expansion */ 737 REG_SET_3(FORMAT_CONTROL, 0, 738 CNVC_BYPASS, 0, 739 FORMAT_CONTROL__ALPHA_EN, 0, 740 FORMAT_EXPANSION_MODE, 0); 741 742 /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */ 743 if (dpp->tf_mask->CM_BYPASS_EN) 744 REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1); 745 746 /* Setting degamma bypass for now */ 747 REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0); 748 } 749 750 static bool dpp1_ingamma_ram_inuse(struct dpp *dpp_base, 751 bool *ram_a_inuse) 752 { 753 bool in_use = false; 754 uint32_t status_reg = 0; 755 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 756 757 REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS, 758 &status_reg); 759 760 // 1 => IGAM_RAMA, 3 => IGAM_RAMA & DGAM_ROMA, 4 => IGAM_RAMA & DGAM_ROMB 761 if (status_reg == 1 || status_reg == 3 || status_reg == 4) { 762 *ram_a_inuse = true; 763 in_use = true; 764 // 2 => IGAM_RAMB, 5 => IGAM_RAMB & DGAM_ROMA, 6 => IGAM_RAMB & DGAM_ROMB 765 } else if (status_reg == 2 || status_reg == 5 || status_reg == 6) { 766 *ram_a_inuse = false; 767 in_use = true; 768 } 769 return in_use; 770 } 771 772 /* 773 * Input gamma LUT currently supports 256 values only. This means input color 774 * can have a maximum of 8 bits per channel (= 256 possible values) in order to 775 * have a one-to-one mapping with the LUT. Truncation will occur with color 776 * values greater than 8 bits. 777 * 778 * In the future, this function should support additional input gamma methods, 779 * such as piecewise linear mapping, and input gamma bypass. 780 */ 781 void dpp1_program_input_lut( 782 struct dpp *dpp_base, 783 const struct dc_gamma *gamma) 784 { 785 int i; 786 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 787 bool rama_occupied = false; 788 uint32_t ram_num; 789 // Power on LUT memory. 790 REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 1); 791 dpp1_enable_cm_block(dpp_base); 792 // Determine whether to use RAM A or RAM B 793 dpp1_ingamma_ram_inuse(dpp_base, &rama_occupied); 794 if (!rama_occupied) 795 REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 0); 796 else 797 REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 1); 798 // RW mode is 256-entry LUT 799 REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, 0); 800 // IGAM Input format should be 8 bits per channel. 801 REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 0); 802 // Do not mask any R,G,B values 803 REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, 7); 804 // LUT-256, unsigned, integer, new u0.12 format 805 REG_UPDATE_3( 806 CM_IGAM_CONTROL, 807 CM_IGAM_LUT_FORMAT_R, 3, 808 CM_IGAM_LUT_FORMAT_G, 3, 809 CM_IGAM_LUT_FORMAT_B, 3); 810 // Start at index 0 of IGAM LUT 811 REG_UPDATE(CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, 0); 812 for (i = 0; i < gamma->num_entries; i++) { 813 REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, 814 dc_fixpt_round( 815 gamma->entries.red[i])); 816 REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, 817 dc_fixpt_round( 818 gamma->entries.green[i])); 819 REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, 820 dc_fixpt_round( 821 gamma->entries.blue[i])); 822 } 823 // Power off LUT memory 824 REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 0); 825 // Enable IGAM LUT on ram we just wrote to. 2 => RAMA, 3 => RAMB 826 REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2); 827 REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num); 828 } 829 830 void dpp1_set_hdr_multiplier( 831 struct dpp *dpp_base, 832 uint32_t multiplier) 833 { 834 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 835 836 REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier); 837 } 838