Line data Source code
1 : /*
2 : * Copyright 2020 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : * Authors: AMD
23 : *
24 : */
25 :
26 : #include "dm_services.h"
27 : #include "core_types.h"
28 : #include "reg_helper.h"
29 : #include "dcn30_dpp.h"
30 : #include "basics/conversion.h"
31 : #include "dcn30_cm_common.h"
32 :
33 : #define REG(reg)\
34 : dpp->tf_regs->reg
35 :
36 : #define CTX \
37 : dpp->base.ctx
38 :
39 : #undef FN
40 : #define FN(reg_name, field_name) \
41 : dpp->tf_shift->field_name, dpp->tf_mask->field_name
42 :
43 0 : static void dpp3_enable_cm_block(
44 : struct dpp *dpp_base)
45 : {
46 0 : struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
47 :
48 0 : unsigned int cm_bypass_mode = 0;
49 :
50 : // debug option: put CM in bypass mode
51 0 : if (dpp_base->ctx->dc->debug.cm_in_bypass)
52 0 : cm_bypass_mode = 1;
53 :
54 0 : REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode);
55 0 : }
56 :
57 0 : static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base)
58 : {
59 : enum dc_lut_mode mode;
60 : uint32_t state_mode;
61 : uint32_t lut_mode;
62 0 : struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
63 :
64 0 : REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode);
65 :
66 0 : if (state_mode == 0)
67 : mode = LUT_BYPASS;
68 :
69 0 : if (state_mode == 2) {//Programmable RAM LUT
70 0 : REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode);
71 0 : if (lut_mode == 0)
72 : mode = LUT_RAM_A;
73 : else
74 0 : mode = LUT_RAM_B;
75 : }
76 :
77 0 : return mode;
78 : }
79 :
80 0 : static void dpp3_program_gammcor_lut(
81 : struct dpp *dpp_base,
82 : const struct pwl_result_data *rgb,
83 : uint32_t num,
84 : bool is_ram_a)
85 : {
86 : uint32_t i;
87 0 : struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
88 0 : uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
89 0 : uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg;
90 0 : uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg;
91 :
92 : /*fill in the LUT with all base values to be used by pwl module
93 : * HW auto increments the LUT index: back-to-back write
94 : */
95 0 : if (is_rgb_equal(rgb, num)) {
96 0 : for (i = 0 ; i < num; i++)
97 0 : REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg);
98 :
99 0 : REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red);
100 :
101 : } else {
102 0 : REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
103 : CM_GAMCOR_LUT_WRITE_COLOR_MASK, 4);
104 0 : for (i = 0 ; i < num; i++)
105 0 : REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg);
106 :
107 0 : REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red);
108 :
109 0 : REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
110 :
111 0 : REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
112 : CM_GAMCOR_LUT_WRITE_COLOR_MASK, 2);
113 0 : for (i = 0 ; i < num; i++)
114 0 : REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].green_reg);
115 :
116 0 : REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_green);
117 :
118 0 : REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
119 :
120 0 : REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
121 : CM_GAMCOR_LUT_WRITE_COLOR_MASK, 1);
122 0 : for (i = 0 ; i < num; i++)
123 0 : REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].blue_reg);
124 :
125 0 : REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_blue);
126 : }
127 0 : }
128 :
129 0 : static void dpp3_power_on_gamcor_lut(
130 : struct dpp *dpp_base,
131 : bool power_on)
132 : {
133 0 : struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
134 :
135 0 : if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) {
136 0 : if (power_on) {
137 0 : REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 0);
138 0 : REG_WAIT(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, 0, 1, 5);
139 : } else {
140 0 : dpp_base->ctx->dc->optimized_required = true;
141 0 : dpp_base->deferred_reg_writes.bits.disable_gamcor = true;
142 : }
143 : } else
144 0 : REG_SET(CM_MEM_PWR_CTRL, 0,
145 : GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1);
146 0 : }
147 :
148 0 : void dpp3_program_cm_dealpha(
149 : struct dpp *dpp_base,
150 : uint32_t enable, uint32_t additive_blending)
151 : {
152 0 : struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
153 :
154 0 : REG_SET_2(CM_DEALPHA, 0,
155 : CM_DEALPHA_EN, enable,
156 : CM_DEALPHA_ABLND, additive_blending);
157 0 : }
158 :
159 0 : void dpp3_program_cm_bias(
160 : struct dpp *dpp_base,
161 : struct CM_bias_params *bias_params)
162 : {
163 0 : struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
164 :
165 0 : REG_SET(CM_BIAS_CR_R, 0, CM_BIAS_CR_R, bias_params->cm_bias_cr_r);
166 0 : REG_SET_2(CM_BIAS_Y_G_CB_B, 0,
167 : CM_BIAS_Y_G, bias_params->cm_bias_y_g,
168 : CM_BIAS_CB_B, bias_params->cm_bias_cb_b);
169 0 : }
170 :
171 0 : static void dpp3_gamcor_reg_field(
172 : struct dcn3_dpp *dpp,
173 : struct dcn3_xfer_func_reg *reg)
174 : {
175 :
176 0 : reg->shifts.field_region_start_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B;
177 0 : reg->masks.field_region_start_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B;
178 0 : reg->shifts.field_offset = dpp->tf_shift->CM_GAMCOR_RAMA_OFFSET_B;
179 0 : reg->masks.field_offset = dpp->tf_mask->CM_GAMCOR_RAMA_OFFSET_B;
180 :
181 0 : reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET;
182 0 : reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET;
183 0 : reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS;
184 0 : reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS;
185 0 : reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET;
186 0 : reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET;
187 0 : reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;
188 0 : reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;
189 :
190 0 : reg->shifts.field_region_end = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_B;
191 0 : reg->masks.field_region_end = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_B;
192 0 : reg->shifts.field_region_end_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B;
193 0 : reg->masks.field_region_end_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B;
194 0 : reg->shifts.field_region_end_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B;
195 0 : reg->masks.field_region_end_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B;
196 0 : reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B;
197 0 : reg->masks.field_region_linear_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B;
198 0 : reg->shifts.exp_region_start = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_B;
199 0 : reg->masks.exp_region_start = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_B;
200 0 : reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B;
201 0 : reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B;
202 0 : }
203 :
204 0 : static void dpp3_configure_gamcor_lut(
205 : struct dpp *dpp_base,
206 : bool is_ram_a)
207 : {
208 0 : struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
209 :
210 0 : REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
211 : CM_GAMCOR_LUT_WRITE_COLOR_MASK, 7);
212 0 : REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
213 : CM_GAMCOR_LUT_HOST_SEL, is_ram_a == true ? 0:1);
214 0 : REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
215 0 : }
216 :
217 :
218 0 : bool dpp3_program_gamcor_lut(
219 : struct dpp *dpp_base, const struct pwl_params *params)
220 : {
221 : enum dc_lut_mode current_mode;
222 : enum dc_lut_mode next_mode;
223 0 : struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
224 : struct dcn3_xfer_func_reg gam_regs;
225 :
226 0 : dpp3_enable_cm_block(dpp_base);
227 :
228 0 : if (params == NULL) { //bypass if we have no pwl data
229 0 : REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 0);
230 0 : if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm)
231 0 : dpp3_power_on_gamcor_lut(dpp_base, false);
232 : return false;
233 : }
234 0 : dpp3_power_on_gamcor_lut(dpp_base, true);
235 0 : REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 2);
236 :
237 0 : current_mode = dpp30_get_gamcor_current(dpp_base);
238 0 : if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
239 : next_mode = LUT_RAM_B;
240 : else
241 0 : next_mode = LUT_RAM_A;
242 :
243 0 : dpp3_power_on_gamcor_lut(dpp_base, true);
244 0 : dpp3_configure_gamcor_lut(dpp_base, next_mode == LUT_RAM_A);
245 :
246 0 : if (next_mode == LUT_RAM_B) {
247 0 : gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMB_START_CNTL_B);
248 0 : gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMB_START_CNTL_G);
249 0 : gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMB_START_CNTL_R);
250 0 : gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B);
251 0 : gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G);
252 0 : gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R);
253 0 : gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMB_END_CNTL1_B);
254 0 : gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMB_END_CNTL2_B);
255 0 : gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMB_END_CNTL1_G);
256 0 : gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMB_END_CNTL2_G);
257 0 : gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMB_END_CNTL1_R);
258 0 : gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMB_END_CNTL2_R);
259 0 : gam_regs.region_start = REG(CM_GAMCOR_RAMB_REGION_0_1);
260 0 : gam_regs.region_end = REG(CM_GAMCOR_RAMB_REGION_32_33);
261 : //New registers in DCN3AG/DCN GAMCOR block
262 0 : gam_regs.offset_b = REG(CM_GAMCOR_RAMB_OFFSET_B);
263 0 : gam_regs.offset_g = REG(CM_GAMCOR_RAMB_OFFSET_G);
264 0 : gam_regs.offset_r = REG(CM_GAMCOR_RAMB_OFFSET_R);
265 0 : gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_B);
266 0 : gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_G);
267 0 : gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_R);
268 : } else {
269 0 : gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMA_START_CNTL_B);
270 0 : gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMA_START_CNTL_G);
271 0 : gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMA_START_CNTL_R);
272 0 : gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B);
273 0 : gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G);
274 0 : gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R);
275 0 : gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMA_END_CNTL1_B);
276 0 : gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMA_END_CNTL2_B);
277 0 : gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMA_END_CNTL1_G);
278 0 : gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMA_END_CNTL2_G);
279 0 : gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMA_END_CNTL1_R);
280 0 : gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMA_END_CNTL2_R);
281 0 : gam_regs.region_start = REG(CM_GAMCOR_RAMA_REGION_0_1);
282 0 : gam_regs.region_end = REG(CM_GAMCOR_RAMA_REGION_32_33);
283 : //New registers in DCN3AG/DCN GAMCOR block
284 0 : gam_regs.offset_b = REG(CM_GAMCOR_RAMA_OFFSET_B);
285 0 : gam_regs.offset_g = REG(CM_GAMCOR_RAMA_OFFSET_G);
286 0 : gam_regs.offset_r = REG(CM_GAMCOR_RAMA_OFFSET_R);
287 0 : gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_B);
288 0 : gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_G);
289 0 : gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_R);
290 : }
291 :
292 : //get register fields
293 0 : dpp3_gamcor_reg_field(dpp, &gam_regs);
294 :
295 : //program register set for LUTA/LUTB
296 0 : cm_helper_program_gamcor_xfer_func(dpp_base->ctx, params, &gam_regs);
297 :
298 0 : dpp3_program_gammcor_lut(dpp_base, params->rgb_resulted, params->hw_points_num,
299 : next_mode == LUT_RAM_A);
300 :
301 : //select Gamma LUT to use for next frame
302 0 : REG_UPDATE(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, next_mode == LUT_RAM_A ? 0:1);
303 :
304 0 : return true;
305 : }
306 :
307 0 : void dpp3_set_hdr_multiplier(
308 : struct dpp *dpp_base,
309 : uint32_t multiplier)
310 : {
311 0 : struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
312 :
313 0 : REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier);
314 0 : }
315 :
316 :
317 0 : static void program_gamut_remap(
318 : struct dcn3_dpp *dpp,
319 : const uint16_t *regval,
320 : int select)
321 : {
322 0 : uint16_t selection = 0;
323 : struct color_matrices_reg gam_regs;
324 :
325 0 : if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
326 0 : REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
327 : CM_GAMUT_REMAP_MODE, 0);
328 0 : return;
329 : }
330 0 : switch (select) {
331 : case GAMUT_REMAP_COEFF:
332 0 : selection = 1;
333 0 : break;
334 : /*this corresponds to GAMUT_REMAP coefficients set B
335 : *we don't have common coefficient sets in dcn3ag/dcn3
336 : */
337 : case GAMUT_REMAP_COMA_COEFF:
338 0 : selection = 2;
339 0 : break;
340 : default:
341 : break;
342 : }
343 :
344 0 : gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
345 0 : gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
346 0 : gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
347 0 : gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
348 :
349 :
350 0 : if (select == GAMUT_REMAP_COEFF) {
351 0 : gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
352 0 : gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
353 :
354 0 : cm_helper_program_color_matrices(
355 : dpp->base.ctx,
356 : regval,
357 : &gam_regs);
358 :
359 0 : } else if (select == GAMUT_REMAP_COMA_COEFF) {
360 :
361 0 : gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
362 0 : gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
363 :
364 0 : cm_helper_program_color_matrices(
365 : dpp->base.ctx,
366 : regval,
367 : &gam_regs);
368 :
369 : }
370 : //select coefficient set to use
371 0 : REG_SET(
372 : CM_GAMUT_REMAP_CONTROL, 0,
373 : CM_GAMUT_REMAP_MODE, selection);
374 : }
375 :
376 0 : void dpp3_cm_set_gamut_remap(
377 : struct dpp *dpp_base,
378 : const struct dpp_grph_csc_adjustment *adjust)
379 : {
380 0 : struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
381 0 : int i = 0;
382 : int gamut_mode;
383 :
384 0 : if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
385 : /* Bypass if type is bypass or hw */
386 0 : program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS);
387 : else {
388 : struct fixed31_32 arr_matrix[12];
389 : uint16_t arr_reg_val[12];
390 :
391 0 : for (i = 0; i < 12; i++)
392 0 : arr_matrix[i] = adjust->temperature_matrix[i];
393 :
394 0 : convert_float_matrix(
395 : arr_reg_val, arr_matrix, 12);
396 :
397 : //current coefficient set in use
398 0 : REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &gamut_mode);
399 :
400 0 : if (gamut_mode == 0)
401 0 : gamut_mode = 1; //use coefficient set A
402 0 : else if (gamut_mode == 1)
403 0 : gamut_mode = 2;
404 : else
405 0 : gamut_mode = 1;
406 :
407 : //follow dcn2 approach for now - using only coefficient set A
408 0 : program_gamut_remap(dpp, arr_reg_val, gamut_mode);
409 : }
410 0 : }
|