1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "dm_services.h"
27 #include "core_types.h"
28 #include "reg_helper.h"
29 #include "dcn30/dcn30_dpp.h"
30 #include "basics/conversion.h"
31 #include "dcn30/dcn30_cm_common.h"
32 
33 #define REG(reg)\
34 	dpp->tf_regs->reg
35 
36 #define CTX \
37 	dpp->base.ctx
38 
39 #undef FN
40 #define FN(reg_name, field_name) \
41 	dpp->tf_shift->field_name, dpp->tf_mask->field_name
42 
dpp3_enable_cm_block(struct dpp * dpp_base)43 static void dpp3_enable_cm_block(
44 		struct dpp *dpp_base)
45 {
46 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
47 
48 	unsigned int cm_bypass_mode = 0;
49 
50 	// debug option: put CM in bypass mode
51 	if (dpp_base->ctx->dc->debug.cm_in_bypass)
52 		cm_bypass_mode = 1;
53 
54 	REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode);
55 }
56 
dpp30_get_gamcor_current(struct dpp * dpp_base)57 static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base)
58 {
59 	enum dc_lut_mode mode = LUT_BYPASS;
60 	uint32_t state_mode;
61 	uint32_t lut_mode;
62 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
63 
64 	REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode);
65 
66 	if (state_mode == 2) {//Programmable RAM LUT
67 		REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode);
68 		if (lut_mode == 0)
69 			mode = LUT_RAM_A;
70 		else
71 			mode = LUT_RAM_B;
72 	}
73 
74 	return mode;
75 }
76 
dpp3_program_gammcor_lut(struct dpp * dpp_base,const struct pwl_result_data * rgb,uint32_t num,bool is_ram_a)77 static void dpp3_program_gammcor_lut(
78 		struct dpp *dpp_base,
79 		const struct pwl_result_data *rgb,
80 		uint32_t num,
81 		bool is_ram_a)
82 {
83 	uint32_t i;
84 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
85 	uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
86 	uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg;
87 	uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg;
88 
89 	/*fill in the LUT with all base values to be used by pwl module
90 	 * HW auto increments the LUT index: back-to-back write
91 	 */
92 	if (is_rgb_equal(rgb,  num)) {
93 		for (i = 0 ; i < num; i++)
94 			REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg);
95 
96 		REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red);
97 
98 	} else {
99 		REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
100 				CM_GAMCOR_LUT_WRITE_COLOR_MASK, 4);
101 		for (i = 0 ; i < num; i++)
102 			REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg);
103 
104 		REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red);
105 
106 		REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
107 
108 		REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
109 				CM_GAMCOR_LUT_WRITE_COLOR_MASK, 2);
110 		for (i = 0 ; i < num; i++)
111 			REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].green_reg);
112 
113 		REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_green);
114 
115 		REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
116 
117 		REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
118 				CM_GAMCOR_LUT_WRITE_COLOR_MASK, 1);
119 		for (i = 0 ; i < num; i++)
120 			REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].blue_reg);
121 
122 		REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_blue);
123 	}
124 }
125 
dpp3_power_on_gamcor_lut(struct dpp * dpp_base,bool power_on)126 static void dpp3_power_on_gamcor_lut(
127 		struct dpp *dpp_base,
128 	bool power_on)
129 {
130 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
131 
132 	if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) {
133 		if (power_on) {
134 			REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 0);
135 			REG_WAIT(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, 0, 1, 5);
136 		} else {
137 			dpp_base->ctx->dc->optimized_required = true;
138 			dpp_base->deferred_reg_writes.bits.disable_gamcor = true;
139 		}
140 	} else
141 		REG_SET(CM_MEM_PWR_CTRL, 0,
142 				GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1);
143 }
144 
dpp3_program_cm_dealpha(struct dpp * dpp_base,uint32_t enable,uint32_t additive_blending)145 void dpp3_program_cm_dealpha(
146 		struct dpp *dpp_base,
147 	uint32_t enable, uint32_t additive_blending)
148 {
149 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
150 
151 	REG_SET_2(CM_DEALPHA, 0,
152 			CM_DEALPHA_EN, enable,
153 			CM_DEALPHA_ABLND, additive_blending);
154 }
155 
dpp3_program_cm_bias(struct dpp * dpp_base,struct CM_bias_params * bias_params)156 void dpp3_program_cm_bias(
157 	struct dpp *dpp_base,
158 	struct CM_bias_params *bias_params)
159 {
160 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
161 
162 	REG_SET(CM_BIAS_CR_R, 0, CM_BIAS_CR_R, bias_params->cm_bias_cr_r);
163 	REG_SET_2(CM_BIAS_Y_G_CB_B, 0,
164 			CM_BIAS_Y_G, bias_params->cm_bias_y_g,
165 			CM_BIAS_CB_B, bias_params->cm_bias_cb_b);
166 }
167 
dpp3_gamcor_reg_field(struct dcn3_dpp * dpp,struct dcn3_xfer_func_reg * reg)168 static void dpp3_gamcor_reg_field(
169 		struct dcn3_dpp *dpp,
170 		struct dcn3_xfer_func_reg *reg)
171 {
172 
173 	reg->shifts.field_region_start_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B;
174 	reg->masks.field_region_start_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B;
175 	reg->shifts.field_offset = dpp->tf_shift->CM_GAMCOR_RAMA_OFFSET_B;
176 	reg->masks.field_offset = dpp->tf_mask->CM_GAMCOR_RAMA_OFFSET_B;
177 
178 	reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET;
179 	reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET;
180 	reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS;
181 	reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS;
182 	reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET;
183 	reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET;
184 	reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;
185 	reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;
186 
187 	reg->shifts.field_region_end = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_B;
188 	reg->masks.field_region_end = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_B;
189 	reg->shifts.field_region_end_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B;
190 	reg->masks.field_region_end_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B;
191 	reg->shifts.field_region_end_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B;
192 	reg->masks.field_region_end_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B;
193 	reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B;
194 	reg->masks.field_region_linear_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B;
195 	reg->shifts.exp_region_start = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_B;
196 	reg->masks.exp_region_start = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_B;
197 	reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B;
198 	reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B;
199 }
200 
dpp3_configure_gamcor_lut(struct dpp * dpp_base,bool is_ram_a)201 static void dpp3_configure_gamcor_lut(
202 		struct dpp *dpp_base,
203 		bool is_ram_a)
204 {
205 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
206 
207 	REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
208 			CM_GAMCOR_LUT_WRITE_COLOR_MASK, 7);
209 	REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
210 			CM_GAMCOR_LUT_HOST_SEL, is_ram_a == true ? 0:1);
211 	REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
212 }
213 
214 
dpp3_program_gamcor_lut(struct dpp * dpp_base,const struct pwl_params * params)215 bool dpp3_program_gamcor_lut(
216 	struct dpp *dpp_base, const struct pwl_params *params)
217 {
218 	enum dc_lut_mode current_mode;
219 	enum dc_lut_mode next_mode;
220 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
221 	struct dcn3_xfer_func_reg gam_regs;
222 
223 	dpp3_enable_cm_block(dpp_base);
224 
225 	if (params == NULL) { //bypass if we have no pwl data
226 		REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 0);
227 		if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm)
228 			dpp3_power_on_gamcor_lut(dpp_base, false);
229 		return false;
230 	}
231 	dpp3_power_on_gamcor_lut(dpp_base, true);
232 	REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 2);
233 
234 	current_mode = dpp30_get_gamcor_current(dpp_base);
235 	if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
236 		next_mode = LUT_RAM_B;
237 	else
238 		next_mode = LUT_RAM_A;
239 
240 	dpp3_power_on_gamcor_lut(dpp_base, true);
241 	dpp3_configure_gamcor_lut(dpp_base, next_mode == LUT_RAM_A);
242 
243 	if (next_mode == LUT_RAM_B) {
244 		gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMB_START_CNTL_B);
245 		gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMB_START_CNTL_G);
246 		gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMB_START_CNTL_R);
247 		gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B);
248 		gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G);
249 		gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R);
250 		gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMB_END_CNTL1_B);
251 		gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMB_END_CNTL2_B);
252 		gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMB_END_CNTL1_G);
253 		gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMB_END_CNTL2_G);
254 		gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMB_END_CNTL1_R);
255 		gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMB_END_CNTL2_R);
256 		gam_regs.region_start = REG(CM_GAMCOR_RAMB_REGION_0_1);
257 		gam_regs.region_end = REG(CM_GAMCOR_RAMB_REGION_32_33);
258 		//New registers in DCN3AG/DCN GAMCOR block
259 		gam_regs.offset_b =  REG(CM_GAMCOR_RAMB_OFFSET_B);
260 		gam_regs.offset_g =  REG(CM_GAMCOR_RAMB_OFFSET_G);
261 		gam_regs.offset_r =  REG(CM_GAMCOR_RAMB_OFFSET_R);
262 		gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_B);
263 		gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_G);
264 		gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_R);
265 	} else {
266 		gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMA_START_CNTL_B);
267 		gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMA_START_CNTL_G);
268 		gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMA_START_CNTL_R);
269 		gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B);
270 		gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G);
271 		gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R);
272 		gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMA_END_CNTL1_B);
273 		gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMA_END_CNTL2_B);
274 		gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMA_END_CNTL1_G);
275 		gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMA_END_CNTL2_G);
276 		gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMA_END_CNTL1_R);
277 		gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMA_END_CNTL2_R);
278 		gam_regs.region_start = REG(CM_GAMCOR_RAMA_REGION_0_1);
279 		gam_regs.region_end = REG(CM_GAMCOR_RAMA_REGION_32_33);
280 		//New registers in DCN3AG/DCN GAMCOR block
281 		gam_regs.offset_b =  REG(CM_GAMCOR_RAMA_OFFSET_B);
282 		gam_regs.offset_g =  REG(CM_GAMCOR_RAMA_OFFSET_G);
283 		gam_regs.offset_r =  REG(CM_GAMCOR_RAMA_OFFSET_R);
284 		gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_B);
285 		gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_G);
286 		gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_R);
287 	}
288 
289 	//get register fields
290 	dpp3_gamcor_reg_field(dpp, &gam_regs);
291 
292 	//program register set for LUTA/LUTB
293 	cm_helper_program_gamcor_xfer_func(dpp_base->ctx, params, &gam_regs);
294 
295 	dpp3_program_gammcor_lut(dpp_base, params->rgb_resulted, params->hw_points_num,
296 				 next_mode == LUT_RAM_A);
297 
298 	//select Gamma LUT to use for next frame
299 	REG_UPDATE(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, next_mode == LUT_RAM_A ? 0:1);
300 
301 	return true;
302 }
303 
dpp3_set_hdr_multiplier(struct dpp * dpp_base,uint32_t multiplier)304 void dpp3_set_hdr_multiplier(
305 		struct dpp *dpp_base,
306 		uint32_t multiplier)
307 {
308 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
309 
310 	REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier);
311 }
312 
313 
program_gamut_remap(struct dcn3_dpp * dpp,const uint16_t * regval,int select)314 static void program_gamut_remap(
315 		struct dcn3_dpp *dpp,
316 		const uint16_t *regval,
317 		int select)
318 {
319 	uint16_t selection = 0;
320 	struct color_matrices_reg gam_regs;
321 
322 	if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
323 		REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
324 				CM_GAMUT_REMAP_MODE, 0);
325 		return;
326 	}
327 	switch (select) {
328 	case GAMUT_REMAP_COEFF:
329 		selection = 1;
330 		break;
331 		/*this corresponds to GAMUT_REMAP coefficients set B
332 		 *we don't have common coefficient sets in dcn3ag/dcn3
333 		 */
334 	case GAMUT_REMAP_COMA_COEFF:
335 		selection = 2;
336 		break;
337 	default:
338 		break;
339 	}
340 
341 	gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
342 	gam_regs.masks.csc_c11  = dpp->tf_mask->CM_GAMUT_REMAP_C11;
343 	gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
344 	gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
345 
346 
347 	if (select == GAMUT_REMAP_COEFF) {
348 		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
349 		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
350 
351 		cm_helper_program_color_matrices(
352 				dpp->base.ctx,
353 				regval,
354 				&gam_regs);
355 
356 	} else  if (select == GAMUT_REMAP_COMA_COEFF) {
357 
358 		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
359 		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
360 
361 		cm_helper_program_color_matrices(
362 				dpp->base.ctx,
363 				regval,
364 				&gam_regs);
365 
366 	}
367 	//select coefficient set to use
368 	REG_SET(
369 			CM_GAMUT_REMAP_CONTROL, 0,
370 			CM_GAMUT_REMAP_MODE, selection);
371 }
372 
dpp3_cm_set_gamut_remap(struct dpp * dpp_base,const struct dpp_grph_csc_adjustment * adjust)373 void dpp3_cm_set_gamut_remap(
374 	struct dpp *dpp_base,
375 	const struct dpp_grph_csc_adjustment *adjust)
376 {
377 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
378 	int i = 0;
379 	int gamut_mode;
380 
381 	if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
382 		/* Bypass if type is bypass or hw */
383 		program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS);
384 	else {
385 		struct fixed31_32 arr_matrix[12];
386 		uint16_t arr_reg_val[12];
387 
388 		for (i = 0; i < 12; i++)
389 			arr_matrix[i] = adjust->temperature_matrix[i];
390 
391 		convert_float_matrix(
392 			arr_reg_val, arr_matrix, 12);
393 
394 		//current coefficient set in use
395 		REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &gamut_mode);
396 
397 		if (gamut_mode == 0)
398 			gamut_mode = 1; //use coefficient set A
399 		else if (gamut_mode == 1)
400 			gamut_mode = 2;
401 		else
402 			gamut_mode = 1;
403 
404 		//follow dcn2 approach for now - using only coefficient set A
405 		program_gamut_remap(dpp, arr_reg_val, gamut_mode);
406 	}
407 }
408 
read_gamut_remap(struct dcn3_dpp * dpp,uint16_t * regval,int * select)409 static void read_gamut_remap(struct dcn3_dpp *dpp,
410 			     uint16_t *regval,
411 			     int *select)
412 {
413 	struct color_matrices_reg gam_regs;
414 	uint32_t selection;
415 
416 	//current coefficient set in use
417 	REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &selection);
418 
419 	*select = selection;
420 
421 	gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
422 	gam_regs.masks.csc_c11  = dpp->tf_mask->CM_GAMUT_REMAP_C11;
423 	gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
424 	gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
425 
426 	if (*select == GAMUT_REMAP_COEFF) {
427 		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
428 		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
429 
430 		cm_helper_read_color_matrices(dpp->base.ctx,
431 					      regval,
432 					      &gam_regs);
433 
434 	} else if (*select == GAMUT_REMAP_COMA_COEFF) {
435 		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
436 		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
437 
438 		cm_helper_read_color_matrices(dpp->base.ctx,
439 					      regval,
440 					      &gam_regs);
441 	}
442 }
443 
dpp3_cm_get_gamut_remap(struct dpp * dpp_base,struct dpp_grph_csc_adjustment * adjust)444 void dpp3_cm_get_gamut_remap(struct dpp *dpp_base,
445 			     struct dpp_grph_csc_adjustment *adjust)
446 {
447 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
448 	uint16_t arr_reg_val[12] = {0};
449 	int select;
450 
451 	read_gamut_remap(dpp, arr_reg_val, &select);
452 
453 	if (select == GAMUT_REMAP_BYPASS) {
454 		adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
455 		return;
456 	}
457 
458 	adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
459 	convert_hw_matrix(adjust->temperature_matrix,
460 			  arr_reg_val, ARRAY_SIZE(arr_reg_val));
461 }
462