1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include "dm_services.h"
27
28 #include "core_types.h"
29
30 #include "reg_helper.h"
31 #include "dcn10/dcn10_dpp.h"
32 #include "basics/conversion.h"
33 #include "dcn10/dcn10_cm_common.h"
34
35 #define NUM_PHASES 64
36 #define HORZ_MAX_TAPS 8
37 #define VERT_MAX_TAPS 8
38
39 #define BLACK_OFFSET_RGB_Y 0x0
40 #define BLACK_OFFSET_CBCR 0x8000
41
42 #define REG(reg)\
43 dpp->tf_regs->reg
44
45 #define CTX \
46 dpp->base.ctx
47
48 #undef FN
49 #define FN(reg_name, field_name) \
50 dpp->tf_shift->field_name, dpp->tf_mask->field_name
51
52 #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
53
54
55 enum dcn10_coef_filter_type_sel {
56 SCL_COEF_LUMA_VERT_FILTER = 0,
57 SCL_COEF_LUMA_HORZ_FILTER = 1,
58 SCL_COEF_CHROMA_VERT_FILTER = 2,
59 SCL_COEF_CHROMA_HORZ_FILTER = 3,
60 SCL_COEF_ALPHA_VERT_FILTER = 4,
61 SCL_COEF_ALPHA_HORZ_FILTER = 5
62 };
63
64 enum dscl_autocal_mode {
65 AUTOCAL_MODE_OFF = 0,
66
67 /* Autocal calculate the scaling ratio and initial phase and the
68 * DSCL_MODE_SEL must be set to 1
69 */
70 AUTOCAL_MODE_AUTOSCALE = 1,
71 /* Autocal perform auto centering without replication and the
72 * DSCL_MODE_SEL must be set to 0
73 */
74 AUTOCAL_MODE_AUTOCENTER = 2,
75 /* Autocal perform auto centering and auto replication and the
76 * DSCL_MODE_SEL must be set to 0
77 */
78 AUTOCAL_MODE_AUTOREPLICATE = 3
79 };
80
81 enum dscl_mode_sel {
82 DSCL_MODE_SCALING_444_BYPASS = 0,
83 DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
84 DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
85 DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
86 DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
87 DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
88 DSCL_MODE_DSCL_BYPASS = 6
89 };
90
program_gamut_remap(struct dcn10_dpp * dpp,const uint16_t * regval,enum gamut_remap_select select)91 static void program_gamut_remap(
92 struct dcn10_dpp *dpp,
93 const uint16_t *regval,
94 enum gamut_remap_select select)
95 {
96 uint16_t selection = 0;
97 struct color_matrices_reg gam_regs;
98
99 if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
100 REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
101 CM_GAMUT_REMAP_MODE, 0);
102 return;
103 }
104 switch (select) {
105 case GAMUT_REMAP_COEFF:
106 selection = 1;
107 break;
108 case GAMUT_REMAP_COMA_COEFF:
109 selection = 2;
110 break;
111 case GAMUT_REMAP_COMB_COEFF:
112 selection = 3;
113 break;
114 default:
115 break;
116 }
117
118 gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
119 gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
120 gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
121 gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
122
123
124 if (select == GAMUT_REMAP_COEFF) {
125 gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
126 gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
127
128 cm_helper_program_color_matrices(
129 dpp->base.ctx,
130 regval,
131 &gam_regs);
132
133 } else if (select == GAMUT_REMAP_COMA_COEFF) {
134
135 gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
136 gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
137
138 cm_helper_program_color_matrices(
139 dpp->base.ctx,
140 regval,
141 &gam_regs);
142
143 } else {
144
145 gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
146 gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
147
148 cm_helper_program_color_matrices(
149 dpp->base.ctx,
150 regval,
151 &gam_regs);
152 }
153
154 REG_SET(
155 CM_GAMUT_REMAP_CONTROL, 0,
156 CM_GAMUT_REMAP_MODE, selection);
157
158 }
159
dpp1_cm_set_gamut_remap(struct dpp * dpp_base,const struct dpp_grph_csc_adjustment * adjust)160 void dpp1_cm_set_gamut_remap(
161 struct dpp *dpp_base,
162 const struct dpp_grph_csc_adjustment *adjust)
163 {
164 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
165 int i = 0;
166
167 if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
168 /* Bypass if type is bypass or hw */
169 program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS);
170 else {
171 struct fixed31_32 arr_matrix[12];
172 uint16_t arr_reg_val[12];
173
174 for (i = 0; i < 12; i++)
175 arr_matrix[i] = adjust->temperature_matrix[i];
176
177 convert_float_matrix(
178 arr_reg_val, arr_matrix, 12);
179
180 program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF);
181 }
182 }
183
read_gamut_remap(struct dcn10_dpp * dpp,uint16_t * regval,enum gamut_remap_select * select)184 static void read_gamut_remap(struct dcn10_dpp *dpp,
185 uint16_t *regval,
186 enum gamut_remap_select *select)
187 {
188 struct color_matrices_reg gam_regs;
189 uint32_t selection;
190
191 REG_GET(CM_GAMUT_REMAP_CONTROL,
192 CM_GAMUT_REMAP_MODE, &selection);
193
194 *select = selection;
195
196 gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
197 gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
198 gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
199 gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
200
201 if (*select == GAMUT_REMAP_COEFF) {
202
203 gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
204 gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
205
206 cm_helper_read_color_matrices(
207 dpp->base.ctx,
208 regval,
209 &gam_regs);
210
211 } else if (*select == GAMUT_REMAP_COMA_COEFF) {
212
213 gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
214 gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
215
216 cm_helper_read_color_matrices(
217 dpp->base.ctx,
218 regval,
219 &gam_regs);
220
221 } else if (*select == GAMUT_REMAP_COMB_COEFF) {
222
223 gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
224 gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
225
226 cm_helper_read_color_matrices(
227 dpp->base.ctx,
228 regval,
229 &gam_regs);
230 }
231 }
232
dpp1_cm_get_gamut_remap(struct dpp * dpp_base,struct dpp_grph_csc_adjustment * adjust)233 void dpp1_cm_get_gamut_remap(struct dpp *dpp_base,
234 struct dpp_grph_csc_adjustment *adjust)
235 {
236 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
237 uint16_t arr_reg_val[12] = {0};
238 enum gamut_remap_select select;
239
240 read_gamut_remap(dpp, arr_reg_val, &select);
241
242 if (select == GAMUT_REMAP_BYPASS) {
243 adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
244 return;
245 }
246
247 adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
248 convert_hw_matrix(adjust->temperature_matrix,
249 arr_reg_val, ARRAY_SIZE(arr_reg_val));
250 }
251
dpp1_cm_program_color_matrix(struct dcn10_dpp * dpp,const uint16_t * regval)252 static void dpp1_cm_program_color_matrix(
253 struct dcn10_dpp *dpp,
254 const uint16_t *regval)
255 {
256 uint32_t ocsc_mode;
257 uint32_t cur_mode;
258 struct color_matrices_reg gam_regs;
259
260 if (regval == NULL) {
261 BREAK_TO_DEBUGGER();
262 return;
263 }
264
265 /* determine which CSC matrix (ocsc or comb) we are using
266 * currently. select the alternate set to double buffer
267 * the CSC update so CSC is updated on frame boundary
268 */
269 REG_SET(CM_TEST_DEBUG_INDEX, 0,
270 CM_TEST_DEBUG_INDEX, 9);
271
272 REG_GET(CM_TEST_DEBUG_DATA,
273 CM_TEST_DEBUG_DATA_ID9_OCSC_MODE, &cur_mode);
274
275 if (cur_mode != 4)
276 ocsc_mode = 4;
277 else
278 ocsc_mode = 5;
279
280
281 gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11;
282 gam_regs.masks.csc_c11 = dpp->tf_mask->CM_OCSC_C11;
283 gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12;
284 gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12;
285
286 if (ocsc_mode == 4) {
287
288 gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12);
289 gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34);
290
291 } else {
292
293 gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
294 gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
295
296 }
297
298 cm_helper_program_color_matrices(
299 dpp->base.ctx,
300 regval,
301 &gam_regs);
302
303 REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
304
305 }
306
dpp1_cm_set_output_csc_default(struct dpp * dpp_base,enum dc_color_space colorspace)307 void dpp1_cm_set_output_csc_default(
308 struct dpp *dpp_base,
309 enum dc_color_space colorspace)
310 {
311 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
312 const uint16_t *regval = NULL;
313 int arr_size;
314
315 regval = find_color_matrix(colorspace, &arr_size);
316 if (regval == NULL) {
317 BREAK_TO_DEBUGGER();
318 return;
319 }
320
321 dpp1_cm_program_color_matrix(dpp, regval);
322 }
323
dpp1_cm_get_reg_field(struct dcn10_dpp * dpp,struct xfer_func_reg * reg)324 static void dpp1_cm_get_reg_field(
325 struct dcn10_dpp *dpp,
326 struct xfer_func_reg *reg)
327 {
328 reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET;
329 reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET;
330 reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
331 reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
332 reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET;
333 reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET;
334 reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
335 reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
336
337 reg->shifts.field_region_end = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_B;
338 reg->masks.field_region_end = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_B;
339 reg->shifts.field_region_end_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B;
340 reg->masks.field_region_end_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B;
341 reg->shifts.field_region_end_base = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_BASE_B;
342 reg->masks.field_region_end_base = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_BASE_B;
343 reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
344 reg->masks.field_region_linear_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
345 reg->shifts.exp_region_start = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_B;
346 reg->masks.exp_region_start = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_B;
347 reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B;
348 reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B;
349 }
350
dpp1_cm_get_degamma_reg_field(struct dcn10_dpp * dpp,struct xfer_func_reg * reg)351 static void dpp1_cm_get_degamma_reg_field(
352 struct dcn10_dpp *dpp,
353 struct xfer_func_reg *reg)
354 {
355 reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET;
356 reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET;
357 reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
358 reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
359 reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET;
360 reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET;
361 reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
362 reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
363
364 reg->shifts.field_region_end = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_B;
365 reg->masks.field_region_end = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_B;
366 reg->shifts.field_region_end_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B;
367 reg->masks.field_region_end_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B;
368 reg->shifts.field_region_end_base = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_BASE_B;
369 reg->masks.field_region_end_base = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_BASE_B;
370 reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
371 reg->masks.field_region_linear_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
372 reg->shifts.exp_region_start = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_B;
373 reg->masks.exp_region_start = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_B;
374 reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B;
375 reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B;
376 }
dpp1_cm_set_output_csc_adjustment(struct dpp * dpp_base,const uint16_t * regval)377 void dpp1_cm_set_output_csc_adjustment(
378 struct dpp *dpp_base,
379 const uint16_t *regval)
380 {
381 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
382
383 dpp1_cm_program_color_matrix(dpp, regval);
384 }
385
dpp1_cm_power_on_regamma_lut(struct dpp * dpp_base,bool power_on)386 void dpp1_cm_power_on_regamma_lut(struct dpp *dpp_base,
387 bool power_on)
388 {
389 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
390
391 REG_SET(CM_MEM_PWR_CTRL, 0,
392 RGAM_MEM_PWR_FORCE, power_on == true ? 0:1);
393
394 }
395
dpp1_cm_program_regamma_lut(struct dpp * dpp_base,const struct pwl_result_data * rgb,uint32_t num)396 void dpp1_cm_program_regamma_lut(struct dpp *dpp_base,
397 const struct pwl_result_data *rgb,
398 uint32_t num)
399 {
400 uint32_t i;
401 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
402
403 REG_SEQ_START();
404
405 for (i = 0 ; i < num; i++) {
406 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg);
407 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg);
408 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].blue_reg);
409
410 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_red_reg);
411 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_green_reg);
412 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_blue_reg);
413 }
414
415 REG_SEQ_SUBMIT();
416 REG_SEQ_WAIT_DONE();
417 }
418
dpp1_cm_configure_regamma_lut(struct dpp * dpp_base,bool is_ram_a)419 void dpp1_cm_configure_regamma_lut(
420 struct dpp *dpp_base,
421 bool is_ram_a)
422 {
423 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
424
425 REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK,
426 CM_RGAM_LUT_WRITE_EN_MASK, 7);
427 REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK,
428 CM_RGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1);
429 REG_SET(CM_RGAM_LUT_INDEX, 0, CM_RGAM_LUT_INDEX, 0);
430 }
431
432 /*program re gamma RAM A*/
dpp1_cm_program_regamma_luta_settings(struct dpp * dpp_base,const struct pwl_params * params)433 void dpp1_cm_program_regamma_luta_settings(
434 struct dpp *dpp_base,
435 const struct pwl_params *params)
436 {
437 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
438 struct xfer_func_reg gam_regs;
439
440 dpp1_cm_get_reg_field(dpp, &gam_regs);
441
442 gam_regs.start_cntl_b = REG(CM_RGAM_RAMA_START_CNTL_B);
443 gam_regs.start_cntl_g = REG(CM_RGAM_RAMA_START_CNTL_G);
444 gam_regs.start_cntl_r = REG(CM_RGAM_RAMA_START_CNTL_R);
445 gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMA_SLOPE_CNTL_B);
446 gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMA_SLOPE_CNTL_G);
447 gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMA_SLOPE_CNTL_R);
448 gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMA_END_CNTL1_B);
449 gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMA_END_CNTL2_B);
450 gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMA_END_CNTL1_G);
451 gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMA_END_CNTL2_G);
452 gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMA_END_CNTL1_R);
453 gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMA_END_CNTL2_R);
454 gam_regs.region_start = REG(CM_RGAM_RAMA_REGION_0_1);
455 gam_regs.region_end = REG(CM_RGAM_RAMA_REGION_32_33);
456
457 cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
458
459 }
460
461 /*program re gamma RAM B*/
dpp1_cm_program_regamma_lutb_settings(struct dpp * dpp_base,const struct pwl_params * params)462 void dpp1_cm_program_regamma_lutb_settings(
463 struct dpp *dpp_base,
464 const struct pwl_params *params)
465 {
466 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
467 struct xfer_func_reg gam_regs;
468
469 dpp1_cm_get_reg_field(dpp, &gam_regs);
470
471 gam_regs.start_cntl_b = REG(CM_RGAM_RAMB_START_CNTL_B);
472 gam_regs.start_cntl_g = REG(CM_RGAM_RAMB_START_CNTL_G);
473 gam_regs.start_cntl_r = REG(CM_RGAM_RAMB_START_CNTL_R);
474 gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMB_SLOPE_CNTL_B);
475 gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMB_SLOPE_CNTL_G);
476 gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMB_SLOPE_CNTL_R);
477 gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMB_END_CNTL1_B);
478 gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMB_END_CNTL2_B);
479 gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMB_END_CNTL1_G);
480 gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMB_END_CNTL2_G);
481 gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMB_END_CNTL1_R);
482 gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMB_END_CNTL2_R);
483 gam_regs.region_start = REG(CM_RGAM_RAMB_REGION_0_1);
484 gam_regs.region_end = REG(CM_RGAM_RAMB_REGION_32_33);
485
486 cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
487 }
488
dpp1_program_input_csc(struct dpp * dpp_base,enum dc_color_space color_space,enum dcn10_input_csc_select input_select,const struct out_csc_color_matrix * tbl_entry)489 void dpp1_program_input_csc(
490 struct dpp *dpp_base,
491 enum dc_color_space color_space,
492 enum dcn10_input_csc_select input_select,
493 const struct out_csc_color_matrix *tbl_entry)
494 {
495 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
496 int i;
497 int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix);
498 const uint16_t *regval = NULL;
499 uint32_t cur_select = 0;
500 enum dcn10_input_csc_select select;
501 struct color_matrices_reg gam_regs;
502
503 if (input_select == INPUT_CSC_SELECT_BYPASS) {
504 REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0);
505 return;
506 }
507
508 if (tbl_entry == NULL) {
509 for (i = 0; i < arr_size; i++)
510 if (dpp_input_csc_matrix[i].color_space == color_space) {
511 regval = dpp_input_csc_matrix[i].regval;
512 break;
513 }
514
515 if (regval == NULL) {
516 BREAK_TO_DEBUGGER();
517 return;
518 }
519 } else {
520 regval = tbl_entry->regval;
521 }
522
523 /* determine which CSC matrix (icsc or coma) we are using
524 * currently. select the alternate set to double buffer
525 * the CSC update so CSC is updated on frame boundary
526 */
527 REG_SET(CM_TEST_DEBUG_INDEX, 0,
528 CM_TEST_DEBUG_INDEX, 9);
529
530 REG_GET(CM_TEST_DEBUG_DATA,
531 CM_TEST_DEBUG_DATA_ID9_ICSC_MODE, &cur_select);
532
533 if (cur_select != INPUT_CSC_SELECT_ICSC)
534 select = INPUT_CSC_SELECT_ICSC;
535 else
536 select = INPUT_CSC_SELECT_COMA;
537
538 gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11;
539 gam_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11;
540 gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12;
541 gam_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12;
542
543 if (select == INPUT_CSC_SELECT_ICSC) {
544
545 gam_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12);
546 gam_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34);
547
548 } else {
549
550 gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
551 gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
552
553 }
554
555 cm_helper_program_color_matrices(
556 dpp->base.ctx,
557 regval,
558 &gam_regs);
559
560 REG_SET(CM_ICSC_CONTROL, 0,
561 CM_ICSC_MODE, select);
562 }
563
564 //keep here for now, decide multi dce support later
dpp1_program_bias_and_scale(struct dpp * dpp_base,struct dc_bias_and_scale * params)565 void dpp1_program_bias_and_scale(
566 struct dpp *dpp_base,
567 struct dc_bias_and_scale *params)
568 {
569 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
570
571 REG_SET_2(CM_BNS_VALUES_R, 0,
572 CM_BNS_SCALE_R, params->scale_red,
573 CM_BNS_BIAS_R, params->bias_red);
574
575 REG_SET_2(CM_BNS_VALUES_G, 0,
576 CM_BNS_SCALE_G, params->scale_green,
577 CM_BNS_BIAS_G, params->bias_green);
578
579 REG_SET_2(CM_BNS_VALUES_B, 0,
580 CM_BNS_SCALE_B, params->scale_blue,
581 CM_BNS_BIAS_B, params->bias_blue);
582
583 }
584
585 /*program de gamma RAM B*/
dpp1_program_degamma_lutb_settings(struct dpp * dpp_base,const struct pwl_params * params)586 void dpp1_program_degamma_lutb_settings(
587 struct dpp *dpp_base,
588 const struct pwl_params *params)
589 {
590 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
591 struct xfer_func_reg gam_regs;
592
593 dpp1_cm_get_degamma_reg_field(dpp, &gam_regs);
594
595 gam_regs.start_cntl_b = REG(CM_DGAM_RAMB_START_CNTL_B);
596 gam_regs.start_cntl_g = REG(CM_DGAM_RAMB_START_CNTL_G);
597 gam_regs.start_cntl_r = REG(CM_DGAM_RAMB_START_CNTL_R);
598 gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMB_SLOPE_CNTL_B);
599 gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMB_SLOPE_CNTL_G);
600 gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMB_SLOPE_CNTL_R);
601 gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMB_END_CNTL1_B);
602 gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMB_END_CNTL2_B);
603 gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMB_END_CNTL1_G);
604 gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMB_END_CNTL2_G);
605 gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMB_END_CNTL1_R);
606 gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMB_END_CNTL2_R);
607 gam_regs.region_start = REG(CM_DGAM_RAMB_REGION_0_1);
608 gam_regs.region_end = REG(CM_DGAM_RAMB_REGION_14_15);
609
610
611 cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
612 }
613
614 /*program de gamma RAM A*/
dpp1_program_degamma_luta_settings(struct dpp * dpp_base,const struct pwl_params * params)615 void dpp1_program_degamma_luta_settings(
616 struct dpp *dpp_base,
617 const struct pwl_params *params)
618 {
619 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
620 struct xfer_func_reg gam_regs;
621
622 dpp1_cm_get_degamma_reg_field(dpp, &gam_regs);
623
624 gam_regs.start_cntl_b = REG(CM_DGAM_RAMA_START_CNTL_B);
625 gam_regs.start_cntl_g = REG(CM_DGAM_RAMA_START_CNTL_G);
626 gam_regs.start_cntl_r = REG(CM_DGAM_RAMA_START_CNTL_R);
627 gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMA_SLOPE_CNTL_B);
628 gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMA_SLOPE_CNTL_G);
629 gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMA_SLOPE_CNTL_R);
630 gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMA_END_CNTL1_B);
631 gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMA_END_CNTL2_B);
632 gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMA_END_CNTL1_G);
633 gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMA_END_CNTL2_G);
634 gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMA_END_CNTL1_R);
635 gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMA_END_CNTL2_R);
636 gam_regs.region_start = REG(CM_DGAM_RAMA_REGION_0_1);
637 gam_regs.region_end = REG(CM_DGAM_RAMA_REGION_14_15);
638
639 cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
640 }
641
dpp1_power_on_degamma_lut(struct dpp * dpp_base,bool power_on)642 void dpp1_power_on_degamma_lut(
643 struct dpp *dpp_base,
644 bool power_on)
645 {
646 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
647
648 REG_SET(CM_MEM_PWR_CTRL, 0,
649 SHARED_MEM_PWR_DIS, power_on ? 0:1);
650
651 }
652
dpp1_enable_cm_block(struct dpp * dpp_base)653 static void dpp1_enable_cm_block(
654 struct dpp *dpp_base)
655 {
656 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
657
658 REG_UPDATE(CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, 8);
659 REG_UPDATE(CM_CONTROL, CM_BYPASS_EN, 0);
660 }
661
dpp1_set_degamma(struct dpp * dpp_base,enum ipp_degamma_mode mode)662 void dpp1_set_degamma(
663 struct dpp *dpp_base,
664 enum ipp_degamma_mode mode)
665 {
666 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
667 dpp1_enable_cm_block(dpp_base);
668
669 switch (mode) {
670 case IPP_DEGAMMA_MODE_BYPASS:
671 /* Setting de gamma bypass for now */
672 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 0);
673 break;
674 case IPP_DEGAMMA_MODE_HW_sRGB:
675 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 1);
676 break;
677 case IPP_DEGAMMA_MODE_HW_xvYCC:
678 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);
679 break;
680 case IPP_DEGAMMA_MODE_USER_PWL:
681 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
682 break;
683 default:
684 BREAK_TO_DEBUGGER();
685 break;
686 }
687 }
688
dpp1_degamma_ram_select(struct dpp * dpp_base,bool use_ram_a)689 void dpp1_degamma_ram_select(
690 struct dpp *dpp_base,
691 bool use_ram_a)
692 {
693 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
694
695 if (use_ram_a)
696 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
697 else
698 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 4);
699
700 }
701
dpp1_degamma_ram_inuse(struct dpp * dpp_base,bool * ram_a_inuse)702 static bool dpp1_degamma_ram_inuse(
703 struct dpp *dpp_base,
704 bool *ram_a_inuse)
705 {
706 bool ret = false;
707 uint32_t status_reg = 0;
708 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
709
710 REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS,
711 &status_reg);
712
713 if (status_reg == 9) {
714 *ram_a_inuse = true;
715 ret = true;
716 } else if (status_reg == 10) {
717 *ram_a_inuse = false;
718 ret = true;
719 }
720 return ret;
721 }
722
dpp1_program_degamma_lut(struct dpp * dpp_base,const struct pwl_result_data * rgb,uint32_t num,bool is_ram_a)723 void dpp1_program_degamma_lut(
724 struct dpp *dpp_base,
725 const struct pwl_result_data *rgb,
726 uint32_t num,
727 bool is_ram_a)
728 {
729 uint32_t i;
730
731 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
732 REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, 0);
733 REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK,
734 CM_DGAM_LUT_WRITE_EN_MASK, 7);
735 REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL,
736 is_ram_a == true ? 0:1);
737
738 REG_SET(CM_DGAM_LUT_INDEX, 0, CM_DGAM_LUT_INDEX, 0);
739 for (i = 0 ; i < num; i++) {
740 REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].red_reg);
741 REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].green_reg);
742 REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].blue_reg);
743
744 REG_SET(CM_DGAM_LUT_DATA, 0,
745 CM_DGAM_LUT_DATA, rgb[i].delta_red_reg);
746 REG_SET(CM_DGAM_LUT_DATA, 0,
747 CM_DGAM_LUT_DATA, rgb[i].delta_green_reg);
748 REG_SET(CM_DGAM_LUT_DATA, 0,
749 CM_DGAM_LUT_DATA, rgb[i].delta_blue_reg);
750 }
751 }
752
dpp1_set_degamma_pwl(struct dpp * dpp_base,const struct pwl_params * params)753 void dpp1_set_degamma_pwl(struct dpp *dpp_base,
754 const struct pwl_params *params)
755 {
756 bool is_ram_a = true;
757
758 dpp1_power_on_degamma_lut(dpp_base, true);
759 dpp1_enable_cm_block(dpp_base);
760 dpp1_degamma_ram_inuse(dpp_base, &is_ram_a);
761 if (is_ram_a == true)
762 dpp1_program_degamma_lutb_settings(dpp_base, params);
763 else
764 dpp1_program_degamma_luta_settings(dpp_base, params);
765
766 dpp1_program_degamma_lut(dpp_base, params->rgb_resulted,
767 params->hw_points_num, !is_ram_a);
768 dpp1_degamma_ram_select(dpp_base, !is_ram_a);
769 }
770
dpp1_full_bypass(struct dpp * dpp_base)771 void dpp1_full_bypass(struct dpp *dpp_base)
772 {
773 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
774
775 /* Input pixel format: ARGB8888 */
776 REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
777 CNVC_SURFACE_PIXEL_FORMAT, 0x8);
778
779 /* Zero expansion */
780 REG_SET_3(FORMAT_CONTROL, 0,
781 CNVC_BYPASS, 0,
782 FORMAT_CONTROL__ALPHA_EN, 0,
783 FORMAT_EXPANSION_MODE, 0);
784
785 /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */
786 if (dpp->tf_mask->CM_BYPASS_EN)
787 REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1);
788 else
789 REG_SET(CM_CONTROL, 0, CM_BYPASS, 1);
790
791 /* Setting degamma bypass for now */
792 REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0);
793 }
794
dpp1_ingamma_ram_inuse(struct dpp * dpp_base,bool * ram_a_inuse)795 static bool dpp1_ingamma_ram_inuse(struct dpp *dpp_base,
796 bool *ram_a_inuse)
797 {
798 bool in_use = false;
799 uint32_t status_reg = 0;
800 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
801
802 REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS,
803 &status_reg);
804
805 // 1 => IGAM_RAMA, 3 => IGAM_RAMA & DGAM_ROMA, 4 => IGAM_RAMA & DGAM_ROMB
806 if (status_reg == 1 || status_reg == 3 || status_reg == 4) {
807 *ram_a_inuse = true;
808 in_use = true;
809 // 2 => IGAM_RAMB, 5 => IGAM_RAMB & DGAM_ROMA, 6 => IGAM_RAMB & DGAM_ROMB
810 } else if (status_reg == 2 || status_reg == 5 || status_reg == 6) {
811 *ram_a_inuse = false;
812 in_use = true;
813 }
814 return in_use;
815 }
816
817 /*
818 * Input gamma LUT currently supports 256 values only. This means input color
819 * can have a maximum of 8 bits per channel (= 256 possible values) in order to
820 * have a one-to-one mapping with the LUT. Truncation will occur with color
821 * values greater than 8 bits.
822 *
823 * In the future, this function should support additional input gamma methods,
824 * such as piecewise linear mapping, and input gamma bypass.
825 */
dpp1_program_input_lut(struct dpp * dpp_base,const struct dc_gamma * gamma)826 void dpp1_program_input_lut(
827 struct dpp *dpp_base,
828 const struct dc_gamma *gamma)
829 {
830 int i;
831 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
832 bool rama_occupied = false;
833 uint32_t ram_num;
834 // Power on LUT memory.
835 REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 1);
836 dpp1_enable_cm_block(dpp_base);
837 // Determine whether to use RAM A or RAM B
838 dpp1_ingamma_ram_inuse(dpp_base, &rama_occupied);
839 if (!rama_occupied)
840 REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 0);
841 else
842 REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 1);
843 // RW mode is 256-entry LUT
844 REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, 0);
845 // IGAM Input format should be 8 bits per channel.
846 REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 0);
847 // Do not mask any R,G,B values
848 REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, 7);
849 // LUT-256, unsigned, integer, new u0.12 format
850 REG_UPDATE_3(
851 CM_IGAM_CONTROL,
852 CM_IGAM_LUT_FORMAT_R, 3,
853 CM_IGAM_LUT_FORMAT_G, 3,
854 CM_IGAM_LUT_FORMAT_B, 3);
855 // Start at index 0 of IGAM LUT
856 REG_UPDATE(CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, 0);
857 for (i = 0; i < gamma->num_entries; i++) {
858 REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
859 dc_fixpt_round(
860 gamma->entries.red[i]));
861 REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
862 dc_fixpt_round(
863 gamma->entries.green[i]));
864 REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
865 dc_fixpt_round(
866 gamma->entries.blue[i]));
867 }
868 // Power off LUT memory
869 REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 0);
870 // Enable IGAM LUT on ram we just wrote to. 2 => RAMA, 3 => RAMB
871 REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2);
872 REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num);
873 }
874
dpp1_set_hdr_multiplier(struct dpp * dpp_base,uint32_t multiplier)875 void dpp1_set_hdr_multiplier(
876 struct dpp *dpp_base,
877 uint32_t multiplier)
878 {
879 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
880
881 REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier);
882 }
883