Line data Source code
1 : /*
2 : * Copyright 2017 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : */
23 : #include <linux/module.h>
24 : #include <linux/slab.h>
25 : #include <linux/fb.h>
26 : #include "linux/delay.h"
27 : #include <linux/types.h>
28 : #include <linux/pci.h>
29 :
30 : #include "smumgr.h"
31 : #include "pp_debug.h"
32 : #include "ci_smumgr.h"
33 : #include "ppsmc.h"
34 : #include "smu7_hwmgr.h"
35 : #include "hardwaremanager.h"
36 : #include "ppatomctrl.h"
37 : #include "cgs_common.h"
38 : #include "atombios.h"
39 : #include "pppcielanes.h"
40 : #include "smu7_smumgr.h"
41 :
42 : #include "smu/smu_7_0_1_d.h"
43 : #include "smu/smu_7_0_1_sh_mask.h"
44 :
45 : #include "dce/dce_8_0_d.h"
46 : #include "dce/dce_8_0_sh_mask.h"
47 :
48 : #include "bif/bif_4_1_d.h"
49 : #include "bif/bif_4_1_sh_mask.h"
50 :
51 : #include "gca/gfx_7_2_d.h"
52 : #include "gca/gfx_7_2_sh_mask.h"
53 :
54 : #include "gmc/gmc_7_1_d.h"
55 : #include "gmc/gmc_7_1_sh_mask.h"
56 :
57 : #include "processpptables.h"
58 :
59 : #define MC_CG_ARB_FREQ_F0 0x0a
60 : #define MC_CG_ARB_FREQ_F1 0x0b
61 : #define MC_CG_ARB_FREQ_F2 0x0c
62 : #define MC_CG_ARB_FREQ_F3 0x0d
63 :
64 : #define SMC_RAM_END 0x40000
65 :
66 : #define CISLAND_MINIMUM_ENGINE_CLOCK 800
67 : #define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
68 :
69 : static const struct ci_pt_defaults defaults_hawaii_xt = {
70 : 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
71 : { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
72 : { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
73 : };
74 :
75 : static const struct ci_pt_defaults defaults_hawaii_pro = {
76 : 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
77 : { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
78 : { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
79 : };
80 :
81 : static const struct ci_pt_defaults defaults_bonaire_xt = {
82 : 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
83 : { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
84 : { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
85 : };
86 :
87 :
88 : static const struct ci_pt_defaults defaults_saturn_xt = {
89 : 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
90 : { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
91 : { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
92 : };
93 :
94 :
95 0 : static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr,
96 : uint32_t smc_addr, uint32_t limit)
97 : {
98 0 : if ((0 != (3 & smc_addr))
99 0 : || ((smc_addr + 3) >= limit)) {
100 0 : pr_err("smc_addr invalid \n");
101 : return -EINVAL;
102 : }
103 :
104 0 : cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr);
105 0 : PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
106 : return 0;
107 : }
108 :
109 0 : static int ci_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
110 : const uint8_t *src, uint32_t byte_count, uint32_t limit)
111 : {
112 : int result;
113 0 : uint32_t data = 0;
114 : uint32_t original_data;
115 0 : uint32_t addr = 0;
116 : uint32_t extra_shift;
117 :
118 0 : if ((3 & smc_start_address)
119 0 : || ((smc_start_address + byte_count) >= limit)) {
120 0 : pr_err("smc_start_address invalid \n");
121 0 : return -EINVAL;
122 : }
123 :
124 : addr = smc_start_address;
125 :
126 0 : while (byte_count >= 4) {
127 : /* Bytes are written into the SMC address space with the MSB first. */
128 0 : data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
129 :
130 0 : result = ci_set_smc_sram_address(hwmgr, addr, limit);
131 :
132 0 : if (0 != result)
133 : return result;
134 :
135 0 : cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
136 :
137 0 : src += 4;
138 0 : byte_count -= 4;
139 0 : addr += 4;
140 : }
141 :
142 0 : if (0 != byte_count) {
143 :
144 0 : data = 0;
145 :
146 0 : result = ci_set_smc_sram_address(hwmgr, addr, limit);
147 :
148 0 : if (0 != result)
149 : return result;
150 :
151 :
152 0 : original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
153 :
154 0 : extra_shift = 8 * (4 - byte_count);
155 :
156 0 : while (byte_count > 0) {
157 : /* Bytes are written into the SMC addres space with the MSB first. */
158 0 : data = (0x100 * data) + *src++;
159 0 : byte_count--;
160 : }
161 :
162 0 : data <<= extra_shift;
163 :
164 0 : data |= (original_data & ~((~0UL) << extra_shift));
165 :
166 0 : result = ci_set_smc_sram_address(hwmgr, addr, limit);
167 :
168 0 : if (0 != result)
169 : return result;
170 :
171 0 : cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
172 : }
173 :
174 : return 0;
175 : }
176 :
177 :
178 : static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr)
179 : {
180 : static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
181 :
182 0 : ci_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
183 :
184 : return 0;
185 : }
186 :
187 0 : static bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr)
188 : {
189 0 : return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
190 : CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
191 0 : && (0x20100 <= cgs_read_ind_register(hwmgr->device,
192 : CGS_IND_REG__SMC, ixSMC_PC_C)));
193 : }
194 :
195 0 : static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
196 : uint32_t *value, uint32_t limit)
197 : {
198 : int result;
199 :
200 0 : result = ci_set_smc_sram_address(hwmgr, smc_addr, limit);
201 :
202 0 : if (result)
203 : return result;
204 :
205 0 : *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
206 0 : return 0;
207 : }
208 :
209 0 : static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
210 : {
211 0 : struct amdgpu_device *adev = hwmgr->adev;
212 : int ret;
213 :
214 0 : cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
215 0 : cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
216 :
217 0 : PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
218 :
219 0 : ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
220 :
221 0 : if (ret != 1)
222 0 : dev_info(adev->dev,
223 : "failed to send message %x ret is %d\n", msg,ret);
224 :
225 0 : return 0;
226 : }
227 :
228 0 : static int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
229 : uint16_t msg, uint32_t parameter)
230 : {
231 0 : cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
232 0 : return ci_send_msg_to_smc(hwmgr, msg);
233 : }
234 :
235 0 : static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
236 : {
237 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
238 0 : struct amdgpu_device *adev = hwmgr->adev;
239 : uint32_t dev_id;
240 :
241 0 : dev_id = adev->pdev->device;
242 :
243 0 : switch (dev_id) {
244 : case 0x67BA:
245 : case 0x67B1:
246 0 : smu_data->power_tune_defaults = &defaults_hawaii_pro;
247 : break;
248 : case 0x67B8:
249 : case 0x66B0:
250 0 : smu_data->power_tune_defaults = &defaults_hawaii_xt;
251 : break;
252 : case 0x6640:
253 : case 0x6641:
254 : case 0x6646:
255 : case 0x6647:
256 0 : smu_data->power_tune_defaults = &defaults_saturn_xt;
257 : break;
258 : case 0x6649:
259 : case 0x6650:
260 : case 0x6651:
261 : case 0x6658:
262 : case 0x665C:
263 : case 0x665D:
264 : case 0x67A0:
265 : case 0x67A1:
266 : case 0x67A2:
267 : case 0x67A8:
268 : case 0x67A9:
269 : case 0x67AA:
270 : case 0x67B9:
271 : case 0x67BE:
272 : default:
273 0 : smu_data->power_tune_defaults = &defaults_bonaire_xt;
274 : break;
275 : }
276 0 : }
277 :
278 : static int ci_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
279 : struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
280 : uint32_t clock, uint32_t *vol)
281 : {
282 0 : uint32_t i = 0;
283 :
284 0 : if (allowed_clock_voltage_table->count == 0)
285 : return -EINVAL;
286 :
287 0 : for (i = 0; i < allowed_clock_voltage_table->count; i++) {
288 0 : if (allowed_clock_voltage_table->entries[i].clk >= clock) {
289 0 : *vol = allowed_clock_voltage_table->entries[i].v;
290 : return 0;
291 : }
292 : }
293 :
294 0 : *vol = allowed_clock_voltage_table->entries[i - 1].v;
295 : return 0;
296 : }
297 :
298 0 : static int ci_calculate_sclk_params(struct pp_hwmgr *hwmgr,
299 : uint32_t clock, struct SMU7_Discrete_GraphicsLevel *sclk)
300 : {
301 0 : const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
302 : struct pp_atomctrl_clock_dividers_vi dividers;
303 0 : uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
304 0 : uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
305 0 : uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
306 0 : uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
307 0 : uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
308 : uint32_t ref_clock;
309 : uint32_t ref_divider;
310 : uint32_t fbdiv;
311 : int result;
312 :
313 : /* get the engine clock dividers for this clock value */
314 0 : result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, ÷rs);
315 :
316 0 : PP_ASSERT_WITH_CODE(result == 0,
317 : "Error retrieving Engine Clock dividers from VBIOS.",
318 : return result);
319 :
320 : /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
321 0 : ref_clock = atomctrl_get_reference_clock(hwmgr);
322 0 : ref_divider = 1 + dividers.uc_pll_ref_div;
323 :
324 : /* low 14 bits is fraction and high 12 bits is divider */
325 0 : fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
326 :
327 : /* SPLL_FUNC_CNTL setup */
328 0 : spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
329 : SPLL_REF_DIV, dividers.uc_pll_ref_div);
330 0 : spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
331 : SPLL_PDIV_A, dividers.uc_pll_post_div);
332 :
333 : /* SPLL_FUNC_CNTL_3 setup*/
334 0 : spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
335 : SPLL_FB_DIV, fbdiv);
336 :
337 : /* set to use fractional accumulation*/
338 0 : spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
339 : SPLL_DITHEN, 1);
340 :
341 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
342 : PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
343 : struct pp_atomctrl_internal_ss_info ss_info;
344 0 : uint32_t vco_freq = clock * dividers.uc_pll_post_div;
345 :
346 0 : if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
347 : vco_freq, &ss_info)) {
348 0 : uint32_t clk_s = ref_clock * 5 /
349 0 : (ref_divider * ss_info.speed_spectrum_rate);
350 0 : uint32_t clk_v = 4 * ss_info.speed_spectrum_percentage *
351 0 : fbdiv / (clk_s * 10000);
352 :
353 0 : cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
354 : CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
355 0 : cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
356 : CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
357 0 : cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
358 : CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
359 : }
360 : }
361 :
362 0 : sclk->SclkFrequency = clock;
363 0 : sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
364 0 : sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
365 0 : sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
366 0 : sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
367 0 : sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
368 :
369 0 : return 0;
370 : }
371 :
372 : static void ci_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
373 : const struct phm_phase_shedding_limits_table *pl,
374 : uint32_t sclk, uint32_t *p_shed)
375 : {
376 : unsigned int i;
377 :
378 : /* use the minimum phase shedding */
379 : *p_shed = 1;
380 :
381 0 : for (i = 0; i < pl->count; i++) {
382 0 : if (sclk < pl->entries[i].Sclk) {
383 0 : *p_shed = i;
384 : break;
385 : }
386 : }
387 : }
388 :
389 : static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock,
390 : uint32_t clock_insr)
391 : {
392 : uint8_t i;
393 : uint32_t temp;
394 0 : uint32_t min = min_t(uint32_t, clock_insr, CISLAND_MINIMUM_ENGINE_CLOCK);
395 :
396 0 : if (clock < min) {
397 0 : pr_info("Engine clock can't satisfy stutter requirement!\n");
398 : return 0;
399 : }
400 0 : for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
401 0 : temp = clock >> i;
402 :
403 0 : if (temp >= min || i == 0)
404 : break;
405 : }
406 : return i;
407 : }
408 :
409 0 : static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
410 : uint32_t clock, struct SMU7_Discrete_GraphicsLevel *level)
411 : {
412 : int result;
413 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
414 :
415 :
416 0 : result = ci_calculate_sclk_params(hwmgr, clock, level);
417 :
418 : /* populate graphics levels */
419 0 : result = ci_get_dependency_volt_by_clk(hwmgr,
420 : hwmgr->dyn_state.vddc_dependency_on_sclk, clock,
421 : (uint32_t *)(&level->MinVddc));
422 0 : if (result) {
423 0 : pr_err("vdd_dep_on_sclk table is NULL\n");
424 0 : return result;
425 : }
426 :
427 0 : level->SclkFrequency = clock;
428 0 : level->MinVddcPhases = 1;
429 :
430 0 : if (data->vddc_phase_shed_control)
431 0 : ci_populate_phase_value_based_on_sclk(hwmgr,
432 0 : hwmgr->dyn_state.vddc_phase_shed_limits_table,
433 : clock,
434 : &level->MinVddcPhases);
435 :
436 0 : level->ActivityLevel = data->current_profile_setting.sclk_activity;
437 0 : level->CcPwrDynRm = 0;
438 0 : level->CcPwrDynRm1 = 0;
439 0 : level->EnabledForActivity = 0;
440 : /* this level can be used for throttling.*/
441 0 : level->EnabledForThrottle = 1;
442 0 : level->UpH = data->current_profile_setting.sclk_up_hyst;
443 0 : level->DownH = data->current_profile_setting.sclk_down_hyst;
444 0 : level->VoltageDownH = 0;
445 0 : level->PowerThrottle = 0;
446 :
447 :
448 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
449 : PHM_PlatformCaps_SclkDeepSleep))
450 0 : level->DeepSleepDivId =
451 0 : ci_get_sleep_divider_id_from_clock(clock,
452 : CISLAND_MINIMUM_ENGINE_CLOCK);
453 :
454 : /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
455 0 : level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
456 :
457 : if (0 == result) {
458 0 : level->MinVddc = PP_HOST_TO_SMC_UL(level->MinVddc * VOLTAGE_SCALE);
459 0 : CONVERT_FROM_HOST_TO_SMC_UL(level->MinVddcPhases);
460 0 : CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
461 0 : CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
462 0 : CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
463 0 : CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
464 0 : CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
465 0 : CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
466 0 : CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
467 0 : CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
468 : }
469 :
470 0 : return result;
471 : }
472 :
473 0 : static int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
474 : {
475 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
476 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
477 0 : struct smu7_dpm_table *dpm_table = &data->dpm_table;
478 0 : int result = 0;
479 0 : uint32_t array = smu_data->dpm_table_start +
480 : offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
481 0 : uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
482 : SMU7_MAX_LEVELS_GRAPHICS;
483 0 : struct SMU7_Discrete_GraphicsLevel *levels =
484 : smu_data->smc_state_table.GraphicsLevel;
485 : uint32_t i;
486 :
487 0 : for (i = 0; i < dpm_table->sclk_table.count; i++) {
488 0 : result = ci_populate_single_graphic_level(hwmgr,
489 : dpm_table->sclk_table.dpm_levels[i].value,
490 0 : &levels[i]);
491 0 : if (result)
492 : return result;
493 0 : if (i > 1)
494 0 : smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
495 0 : if (i == (dpm_table->sclk_table.count - 1))
496 0 : smu_data->smc_state_table.GraphicsLevel[i].DisplayWatermark =
497 : PPSMC_DISPLAY_WATERMARK_HIGH;
498 : }
499 :
500 0 : smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
501 :
502 0 : smu_data->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
503 0 : data->dpm_level_enable_mask.sclk_dpm_enable_mask =
504 0 : phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
505 :
506 0 : result = ci_copy_bytes_to_smc(hwmgr, array,
507 : (u8 *)levels, array_size,
508 : SMC_RAM_END);
509 :
510 0 : return result;
511 :
512 : }
513 :
514 : static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr)
515 : {
516 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
517 0 : const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
518 :
519 0 : smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
520 0 : smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
521 0 : smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
522 0 : smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
523 :
524 : return 0;
525 : }
526 :
527 : static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr)
528 : {
529 : uint16_t tdc_limit;
530 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
531 0 : const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
532 :
533 0 : tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
534 0 : smu_data->power_tune_table.TDC_VDDC_PkgLimit =
535 0 : CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
536 0 : smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
537 0 : defaults->tdc_vddc_throttle_release_limit_perc;
538 0 : smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
539 :
540 : return 0;
541 : }
542 :
543 0 : static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
544 : {
545 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
546 0 : const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
547 : uint32_t temp;
548 :
549 0 : if (ci_read_smc_sram_dword(hwmgr,
550 : fuse_table_offset +
551 : offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
552 : (uint32_t *)&temp, SMC_RAM_END))
553 0 : PP_ASSERT_WITH_CODE(false,
554 : "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
555 : return -EINVAL);
556 : else
557 0 : smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
558 :
559 0 : return 0;
560 : }
561 :
562 : static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
563 : {
564 : uint16_t tmp;
565 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
566 :
567 0 : if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
568 0 : || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
569 : tmp = hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity;
570 : else
571 0 : tmp = hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
572 :
573 0 : smu_data->power_tune_table.FuzzyFan_PwmSetDelta = CONVERT_FROM_HOST_TO_SMC_US(tmp);
574 :
575 : return 0;
576 : }
577 :
578 0 : static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
579 : {
580 : int i;
581 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
582 0 : uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
583 0 : uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
584 0 : uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2;
585 :
586 0 : PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
587 : "The CAC Leakage table does not exist!", return -EINVAL);
588 0 : PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
589 : "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
590 0 : PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
591 : "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
592 :
593 0 : for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
594 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
595 0 : lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
596 0 : hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
597 0 : hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3);
598 : } else {
599 0 : lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc);
600 0 : hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage);
601 : }
602 : }
603 :
604 : return 0;
605 : }
606 :
607 0 : static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr)
608 : {
609 : int i;
610 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
611 0 : uint8_t *vid = smu_data->power_tune_table.VddCVid;
612 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
613 :
614 0 : PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
615 : "There should never be more than 8 entries for VddcVid!!!",
616 : return -EINVAL);
617 :
618 0 : for (i = 0; i < (int)data->vddc_voltage_table.count; i++)
619 0 : vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
620 :
621 : return 0;
622 : }
623 :
624 0 : static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr)
625 : {
626 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
627 0 : u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
628 0 : u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
629 : int i, min, max;
630 :
631 0 : min = max = hi_vid[0];
632 0 : for (i = 0; i < 8; i++) {
633 0 : if (0 != hi_vid[i]) {
634 0 : if (min > hi_vid[i])
635 0 : min = hi_vid[i];
636 0 : if (max < hi_vid[i])
637 0 : max = hi_vid[i];
638 : }
639 :
640 0 : if (0 != lo_vid[i]) {
641 0 : if (min > lo_vid[i])
642 0 : min = lo_vid[i];
643 0 : if (max < lo_vid[i])
644 0 : max = lo_vid[i];
645 : }
646 : }
647 :
648 0 : if ((min == 0) || (max == 0))
649 : return -EINVAL;
650 0 : smu_data->power_tune_table.GnbLPMLMaxVid = (u8)max;
651 0 : smu_data->power_tune_table.GnbLPMLMinVid = (u8)min;
652 :
653 : return 0;
654 : }
655 :
656 : static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
657 : {
658 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
659 : uint16_t HiSidd;
660 : uint16_t LoSidd;
661 0 : struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
662 :
663 0 : HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
664 0 : LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
665 :
666 0 : smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
667 0 : CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
668 0 : smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
669 0 : CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
670 :
671 : return 0;
672 : }
673 :
674 0 : static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr)
675 : {
676 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
677 : uint32_t pm_fuse_table_offset;
678 0 : int ret = 0;
679 :
680 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
681 : PHM_PlatformCaps_PowerContainment)) {
682 0 : if (ci_read_smc_sram_dword(hwmgr,
683 : SMU7_FIRMWARE_HEADER_LOCATION +
684 : offsetof(SMU7_Firmware_Header, PmFuseTable),
685 : &pm_fuse_table_offset, SMC_RAM_END)) {
686 0 : pr_err("Attempt to get pm_fuse_table_offset Failed!\n");
687 0 : return -EINVAL;
688 : }
689 :
690 : /* DW0 - DW3 */
691 0 : ret = ci_populate_bapm_vddc_vid_sidd(hwmgr);
692 : /* DW4 - DW5 */
693 0 : ret |= ci_populate_vddc_vid(hwmgr);
694 : /* DW6 */
695 0 : ret |= ci_populate_svi_load_line(hwmgr);
696 : /* DW7 */
697 0 : ret |= ci_populate_tdc_limit(hwmgr);
698 : /* DW8 */
699 0 : ret |= ci_populate_dw8(hwmgr, pm_fuse_table_offset);
700 :
701 0 : ret |= ci_populate_fuzzy_fan(hwmgr, pm_fuse_table_offset);
702 :
703 0 : ret |= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(hwmgr);
704 :
705 0 : ret |= ci_populate_bapm_vddc_base_leakage_sidd(hwmgr);
706 0 : if (ret)
707 : return ret;
708 :
709 0 : ret = ci_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
710 0 : (uint8_t *)&smu_data->power_tune_table,
711 : sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END);
712 : }
713 : return ret;
714 : }
715 :
716 0 : static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
717 : {
718 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
719 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
720 0 : const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
721 0 : SMU7_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
722 0 : struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
723 0 : struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
724 : const uint16_t *def1, *def2;
725 : int i, j, k;
726 :
727 0 : dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
728 0 : dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
729 :
730 0 : dpm_table->DTETjOffset = 0;
731 0 : dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
732 0 : dpm_table->GpuTjHyst = 8;
733 :
734 0 : dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
735 :
736 0 : if (ppm) {
737 0 : dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
738 0 : dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
739 : } else {
740 0 : dpm_table->PPM_PkgPwrLimit = 0;
741 0 : dpm_table->PPM_TemperatureLimit = 0;
742 : }
743 :
744 0 : CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
745 0 : CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
746 :
747 0 : dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
748 0 : def1 = defaults->bapmti_r;
749 0 : def2 = defaults->bapmti_rc;
750 :
751 0 : for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
752 0 : for (j = 0; j < SMU7_DTE_SOURCES; j++) {
753 0 : for (k = 0; k < SMU7_DTE_SINKS; k++) {
754 0 : dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
755 0 : dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
756 0 : def1++;
757 0 : def2++;
758 : }
759 : }
760 : }
761 :
762 0 : return 0;
763 : }
764 :
765 0 : static int ci_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
766 : pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
767 : uint16_t *lo)
768 : {
769 : uint16_t v_index;
770 0 : bool vol_found = false;
771 0 : *hi = tab->value * VOLTAGE_SCALE;
772 0 : *lo = tab->value * VOLTAGE_SCALE;
773 :
774 0 : PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
775 : "The SCLK/VDDC Dependency Table does not exist.\n",
776 : return -EINVAL);
777 :
778 0 : if (NULL == hwmgr->dyn_state.cac_leakage_table) {
779 0 : pr_warn("CAC Leakage Table does not exist, using vddc.\n");
780 : return 0;
781 : }
782 :
783 0 : for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
784 0 : if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
785 0 : vol_found = true;
786 0 : if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
787 0 : *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
788 0 : *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
789 : } else {
790 0 : pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
791 0 : *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
792 0 : *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
793 : }
794 : break;
795 : }
796 : }
797 :
798 0 : if (!vol_found) {
799 0 : for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
800 0 : if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
801 0 : vol_found = true;
802 0 : if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
803 0 : *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
804 0 : *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
805 : } else {
806 0 : pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
807 0 : *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
808 0 : *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
809 : }
810 : break;
811 : }
812 : }
813 :
814 0 : if (!vol_found)
815 0 : pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
816 : }
817 :
818 : return 0;
819 : }
820 :
821 0 : static int ci_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
822 : pp_atomctrl_voltage_table_entry *tab,
823 : SMU7_Discrete_VoltageLevel *smc_voltage_tab)
824 : {
825 : int result;
826 :
827 0 : result = ci_get_std_voltage_value_sidd(hwmgr, tab,
828 : &smc_voltage_tab->StdVoltageHiSidd,
829 : &smc_voltage_tab->StdVoltageLoSidd);
830 0 : if (result) {
831 0 : smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
832 0 : smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
833 : }
834 :
835 0 : smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
836 0 : CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
837 0 : CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageLoSidd);
838 :
839 0 : return 0;
840 : }
841 :
842 0 : static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
843 : SMU7_Discrete_DpmTable *table)
844 : {
845 : unsigned int count;
846 : int result;
847 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
848 :
849 0 : table->VddcLevelCount = data->vddc_voltage_table.count;
850 0 : for (count = 0; count < table->VddcLevelCount; count++) {
851 0 : result = ci_populate_smc_voltage_table(hwmgr,
852 : &(data->vddc_voltage_table.entries[count]),
853 : &(table->VddcLevel[count]));
854 0 : PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
855 :
856 : /* GPIO voltage control */
857 0 : if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
858 0 : table->VddcLevel[count].Smio = (uint8_t) count;
859 0 : table->Smio[count] |= data->vddc_voltage_table.entries[count].smio_low;
860 0 : table->SmioMaskVddcVid |= data->vddc_voltage_table.entries[count].smio_low;
861 : } else {
862 0 : table->VddcLevel[count].Smio = 0;
863 : }
864 : }
865 :
866 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
867 :
868 0 : return 0;
869 : }
870 :
871 0 : static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
872 : SMU7_Discrete_DpmTable *table)
873 : {
874 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
875 : uint32_t count;
876 : int result;
877 :
878 0 : table->VddciLevelCount = data->vddci_voltage_table.count;
879 :
880 0 : for (count = 0; count < table->VddciLevelCount; count++) {
881 0 : result = ci_populate_smc_voltage_table(hwmgr,
882 : &(data->vddci_voltage_table.entries[count]),
883 : &(table->VddciLevel[count]));
884 0 : PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
885 0 : if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
886 0 : table->VddciLevel[count].Smio = (uint8_t) count;
887 0 : table->Smio[count] |= data->vddci_voltage_table.entries[count].smio_low;
888 0 : table->SmioMaskVddciVid |= data->vddci_voltage_table.entries[count].smio_low;
889 : } else {
890 0 : table->VddciLevel[count].Smio = 0;
891 : }
892 : }
893 :
894 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
895 :
896 0 : return 0;
897 : }
898 :
899 0 : static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
900 : SMU7_Discrete_DpmTable *table)
901 : {
902 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
903 : uint32_t count;
904 : int result;
905 :
906 0 : table->MvddLevelCount = data->mvdd_voltage_table.count;
907 :
908 0 : for (count = 0; count < table->MvddLevelCount; count++) {
909 0 : result = ci_populate_smc_voltage_table(hwmgr,
910 : &(data->mvdd_voltage_table.entries[count]),
911 : &table->MvddLevel[count]);
912 0 : PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
913 0 : if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
914 0 : table->MvddLevel[count].Smio = (uint8_t) count;
915 0 : table->Smio[count] |= data->mvdd_voltage_table.entries[count].smio_low;
916 0 : table->SmioMaskMvddVid |= data->mvdd_voltage_table.entries[count].smio_low;
917 : } else {
918 0 : table->MvddLevel[count].Smio = 0;
919 : }
920 : }
921 :
922 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
923 :
924 0 : return 0;
925 : }
926 :
927 :
928 0 : static int ci_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
929 : SMU7_Discrete_DpmTable *table)
930 : {
931 : int result;
932 :
933 0 : result = ci_populate_smc_vddc_table(hwmgr, table);
934 0 : PP_ASSERT_WITH_CODE(0 == result,
935 : "can not populate VDDC voltage table to SMC", return -EINVAL);
936 :
937 0 : result = ci_populate_smc_vdd_ci_table(hwmgr, table);
938 0 : PP_ASSERT_WITH_CODE(0 == result,
939 : "can not populate VDDCI voltage table to SMC", return -EINVAL);
940 :
941 0 : result = ci_populate_smc_mvdd_table(hwmgr, table);
942 0 : PP_ASSERT_WITH_CODE(0 == result,
943 : "can not populate MVDD voltage table to SMC", return -EINVAL);
944 :
945 : return 0;
946 : }
947 :
948 0 : static int ci_populate_ulv_level(struct pp_hwmgr *hwmgr,
949 : struct SMU7_Discrete_Ulv *state)
950 : {
951 : uint32_t voltage_response_time, ulv_voltage;
952 : int result;
953 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
954 :
955 0 : state->CcPwrDynRm = 0;
956 0 : state->CcPwrDynRm1 = 0;
957 :
958 0 : result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
959 0 : PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
960 :
961 0 : if (ulv_voltage == 0) {
962 0 : data->ulv_supported = false;
963 0 : return 0;
964 : }
965 :
966 0 : if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
967 : /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
968 0 : if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
969 0 : state->VddcOffset = 0;
970 : else
971 : /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
972 0 : state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
973 : } else {
974 : /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
975 0 : if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
976 0 : state->VddcOffsetVid = 0;
977 : else /* used in SVI2 Mode */
978 0 : state->VddcOffsetVid = (uint8_t)(
979 0 : (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
980 0 : * VOLTAGE_VID_OFFSET_SCALE2
981 0 : / VOLTAGE_VID_OFFSET_SCALE1);
982 : }
983 0 : state->VddcPhase = 1;
984 :
985 0 : CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
986 0 : CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
987 0 : CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
988 :
989 0 : return 0;
990 : }
991 :
992 : static int ci_populate_ulv_state(struct pp_hwmgr *hwmgr,
993 : SMU7_Discrete_Ulv *ulv_level)
994 : {
995 0 : return ci_populate_ulv_level(hwmgr, ulv_level);
996 : }
997 :
998 0 : static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
999 : {
1000 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1001 0 : struct smu7_dpm_table *dpm_table = &data->dpm_table;
1002 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1003 : uint32_t i;
1004 :
1005 : /* Index dpm_table->pcie_speed_table.count is reserved for PCIE boot level.*/
1006 0 : for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
1007 0 : table->LinkLevel[i].PcieGenSpeed =
1008 0 : (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
1009 0 : table->LinkLevel[i].PcieLaneCount =
1010 0 : (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
1011 0 : table->LinkLevel[i].EnabledForActivity = 1;
1012 0 : table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5);
1013 0 : table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30);
1014 : }
1015 :
1016 0 : smu_data->smc_state_table.LinkLevelCount =
1017 0 : (uint8_t)dpm_table->pcie_speed_table.count;
1018 0 : data->dpm_level_enable_mask.pcie_dpm_enable_mask =
1019 0 : phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
1020 :
1021 0 : return 0;
1022 : }
1023 :
1024 0 : static int ci_calculate_mclk_params(
1025 : struct pp_hwmgr *hwmgr,
1026 : uint32_t memory_clock,
1027 : SMU7_Discrete_MemoryLevel *mclk,
1028 : bool strobe_mode,
1029 : bool dllStateOn
1030 : )
1031 : {
1032 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1033 0 : uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1034 0 : uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1035 0 : uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
1036 0 : uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
1037 0 : uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
1038 0 : uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
1039 0 : uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
1040 0 : uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
1041 0 : uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
1042 :
1043 : pp_atomctrl_memory_clock_param mpll_param;
1044 : int result;
1045 :
1046 0 : result = atomctrl_get_memory_pll_dividers_si(hwmgr,
1047 : memory_clock, &mpll_param, strobe_mode);
1048 0 : PP_ASSERT_WITH_CODE(0 == result,
1049 : "Error retrieving Memory Clock Parameters from VBIOS.", return result);
1050 :
1051 0 : mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
1052 :
1053 0 : mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1054 : MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
1055 0 : mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1056 : MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
1057 0 : mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1058 : MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
1059 :
1060 0 : mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
1061 : MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1062 :
1063 0 : if (data->is_memory_gddr5) {
1064 0 : mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1065 : MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
1066 0 : mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1067 : MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1068 : }
1069 :
1070 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1071 : PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
1072 : pp_atomctrl_internal_ss_info ss_info;
1073 : uint32_t freq_nom;
1074 : uint32_t tmp;
1075 0 : uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
1076 :
1077 : /* for GDDR5 for all modes and DDR3 */
1078 0 : if (1 == mpll_param.qdr)
1079 0 : freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
1080 : else
1081 0 : freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
1082 :
1083 : /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
1084 0 : tmp = (freq_nom / reference_clock);
1085 0 : tmp = tmp * tmp;
1086 :
1087 0 : if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
1088 0 : uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
1089 0 : uint32_t clkv =
1090 0 : (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
1091 0 : ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
1092 :
1093 0 : mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
1094 0 : mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
1095 : }
1096 : }
1097 :
1098 0 : mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1099 : MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
1100 0 : mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1101 : MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
1102 0 : mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1103 : MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
1104 :
1105 :
1106 0 : mclk->MclkFrequency = memory_clock;
1107 0 : mclk->MpllFuncCntl = mpll_func_cntl;
1108 0 : mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
1109 0 : mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
1110 0 : mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
1111 0 : mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
1112 0 : mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
1113 0 : mclk->DllCntl = dll_cntl;
1114 0 : mclk->MpllSs1 = mpll_ss1;
1115 0 : mclk->MpllSs2 = mpll_ss2;
1116 :
1117 0 : return 0;
1118 : }
1119 :
1120 : static uint8_t ci_get_mclk_frequency_ratio(uint32_t memory_clock,
1121 : bool strobe_mode)
1122 : {
1123 : uint8_t mc_para_index;
1124 :
1125 0 : if (strobe_mode) {
1126 0 : if (memory_clock < 12500)
1127 : mc_para_index = 0x00;
1128 0 : else if (memory_clock > 47500)
1129 : mc_para_index = 0x0f;
1130 : else
1131 0 : mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
1132 : } else {
1133 0 : if (memory_clock < 65000)
1134 : mc_para_index = 0x00;
1135 0 : else if (memory_clock > 135000)
1136 : mc_para_index = 0x0f;
1137 : else
1138 0 : mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
1139 : }
1140 :
1141 : return mc_para_index;
1142 : }
1143 :
1144 : static uint8_t ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
1145 : {
1146 : uint8_t mc_para_index;
1147 :
1148 0 : if (memory_clock < 10000)
1149 : mc_para_index = 0;
1150 0 : else if (memory_clock >= 80000)
1151 : mc_para_index = 0x0f;
1152 : else
1153 0 : mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
1154 :
1155 : return mc_para_index;
1156 : }
1157 :
1158 : static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
1159 : uint32_t memory_clock, uint32_t *p_shed)
1160 : {
1161 : unsigned int i;
1162 :
1163 : *p_shed = 1;
1164 :
1165 0 : for (i = 0; i < pl->count; i++) {
1166 0 : if (memory_clock < pl->entries[i].Mclk) {
1167 0 : *p_shed = i;
1168 : break;
1169 : }
1170 : }
1171 :
1172 : return 0;
1173 : }
1174 :
1175 0 : static int ci_populate_single_memory_level(
1176 : struct pp_hwmgr *hwmgr,
1177 : uint32_t memory_clock,
1178 : SMU7_Discrete_MemoryLevel *memory_level
1179 : )
1180 : {
1181 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1182 0 : int result = 0;
1183 : bool dll_state_on;
1184 0 : uint32_t mclk_edc_wr_enable_threshold = 40000;
1185 0 : uint32_t mclk_edc_enable_threshold = 40000;
1186 0 : uint32_t mclk_strobe_mode_threshold = 40000;
1187 :
1188 0 : if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
1189 0 : result = ci_get_dependency_volt_by_clk(hwmgr,
1190 : hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
1191 0 : PP_ASSERT_WITH_CODE((0 == result),
1192 : "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
1193 : }
1194 :
1195 0 : if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
1196 0 : result = ci_get_dependency_volt_by_clk(hwmgr,
1197 : hwmgr->dyn_state.vddci_dependency_on_mclk,
1198 : memory_clock,
1199 : &memory_level->MinVddci);
1200 0 : PP_ASSERT_WITH_CODE((0 == result),
1201 : "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
1202 : }
1203 :
1204 0 : if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
1205 0 : result = ci_get_dependency_volt_by_clk(hwmgr,
1206 : hwmgr->dyn_state.mvdd_dependency_on_mclk,
1207 : memory_clock,
1208 : &memory_level->MinMvdd);
1209 0 : PP_ASSERT_WITH_CODE((0 == result),
1210 : "can not find MinVddci voltage value from memory MVDD voltage dependency table", return result);
1211 : }
1212 :
1213 0 : memory_level->MinVddcPhases = 1;
1214 :
1215 0 : if (data->vddc_phase_shed_control) {
1216 0 : ci_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
1217 : memory_clock, &memory_level->MinVddcPhases);
1218 : }
1219 :
1220 0 : memory_level->EnabledForThrottle = 1;
1221 0 : memory_level->EnabledForActivity = 1;
1222 0 : memory_level->UpH = data->current_profile_setting.mclk_up_hyst;
1223 0 : memory_level->DownH = data->current_profile_setting.mclk_down_hyst;
1224 0 : memory_level->VoltageDownH = 0;
1225 :
1226 : /* Indicates maximum activity level for this performance level.*/
1227 0 : memory_level->ActivityLevel = data->current_profile_setting.mclk_activity;
1228 0 : memory_level->StutterEnable = 0;
1229 0 : memory_level->StrobeEnable = 0;
1230 0 : memory_level->EdcReadEnable = 0;
1231 0 : memory_level->EdcWriteEnable = 0;
1232 0 : memory_level->RttEnable = 0;
1233 :
1234 : /* default set to low watermark. Highest level will be set to high later.*/
1235 0 : memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1236 :
1237 0 : data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
1238 0 : data->display_timing.vrefresh = hwmgr->display_config->vrefresh;
1239 :
1240 : /* stutter mode not support on ci */
1241 :
1242 : /* decide strobe mode*/
1243 0 : memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
1244 0 : (memory_clock <= mclk_strobe_mode_threshold);
1245 :
1246 : /* decide EDC mode and memory clock ratio*/
1247 0 : if (data->is_memory_gddr5) {
1248 0 : memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock,
1249 : memory_level->StrobeEnable);
1250 :
1251 0 : if ((mclk_edc_enable_threshold != 0) &&
1252 : (memory_clock > mclk_edc_enable_threshold)) {
1253 0 : memory_level->EdcReadEnable = 1;
1254 : }
1255 :
1256 0 : if ((mclk_edc_wr_enable_threshold != 0) &&
1257 : (memory_clock > mclk_edc_wr_enable_threshold)) {
1258 0 : memory_level->EdcWriteEnable = 1;
1259 : }
1260 :
1261 0 : if (memory_level->StrobeEnable) {
1262 0 : if (ci_get_mclk_frequency_ratio(memory_clock, 1) >=
1263 0 : ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
1264 0 : dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1265 : else
1266 0 : dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
1267 : } else
1268 0 : dll_state_on = data->dll_default_on;
1269 : } else {
1270 0 : memory_level->StrobeRatio =
1271 0 : ci_get_ddr3_mclk_frequency_ratio(memory_clock);
1272 0 : dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1273 : }
1274 :
1275 0 : result = ci_calculate_mclk_params(hwmgr,
1276 0 : memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
1277 :
1278 0 : if (0 == result) {
1279 0 : memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
1280 0 : CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
1281 0 : memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
1282 0 : memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
1283 : /* MCLK frequency in units of 10KHz*/
1284 0 : CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
1285 : /* Indicates maximum activity level for this performance level.*/
1286 0 : CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
1287 0 : CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
1288 0 : CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
1289 0 : CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
1290 0 : CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
1291 0 : CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
1292 0 : CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
1293 0 : CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
1294 0 : CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
1295 0 : CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
1296 : }
1297 :
1298 : return result;
1299 : }
1300 :
1301 0 : static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1302 : {
1303 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1304 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1305 0 : struct smu7_dpm_table *dpm_table = &data->dpm_table;
1306 : int result;
1307 0 : struct amdgpu_device *adev = hwmgr->adev;
1308 : uint32_t dev_id;
1309 :
1310 0 : uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
1311 0 : uint32_t level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY;
1312 0 : SMU7_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
1313 : uint32_t i;
1314 :
1315 0 : memset(levels, 0x00, level_array_size);
1316 :
1317 0 : for (i = 0; i < dpm_table->mclk_table.count; i++) {
1318 0 : PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1319 : "can not populate memory level as memory clock is zero", return -EINVAL);
1320 0 : result = ci_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
1321 : &(smu_data->smc_state_table.MemoryLevel[i]));
1322 0 : if (0 != result)
1323 : return result;
1324 : }
1325 :
1326 0 : smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
1327 :
1328 0 : dev_id = adev->pdev->device;
1329 :
1330 0 : if ((dpm_table->mclk_table.count >= 2)
1331 0 : && ((dev_id == 0x67B0) || (dev_id == 0x67B1))) {
1332 0 : smu_data->smc_state_table.MemoryLevel[1].MinVddci =
1333 0 : smu_data->smc_state_table.MemoryLevel[0].MinVddci;
1334 0 : smu_data->smc_state_table.MemoryLevel[1].MinMvdd =
1335 0 : smu_data->smc_state_table.MemoryLevel[0].MinMvdd;
1336 : }
1337 : smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
1338 0 : CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
1339 :
1340 0 : smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
1341 0 : data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1342 0 : smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1343 :
1344 0 : result = ci_copy_bytes_to_smc(hwmgr,
1345 : level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
1346 : SMC_RAM_END);
1347 :
1348 0 : return result;
1349 : }
1350 :
1351 0 : static int ci_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
1352 : SMU7_Discrete_VoltageLevel *voltage)
1353 : {
1354 0 : const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1355 :
1356 0 : uint32_t i = 0;
1357 :
1358 0 : if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1359 : /* find mvdd value which clock is more than request */
1360 0 : for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
1361 0 : if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
1362 : /* Always round to higher voltage. */
1363 0 : voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
1364 : break;
1365 : }
1366 : }
1367 :
1368 0 : PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
1369 : "MVDD Voltage is outside the supported range.", return -EINVAL);
1370 :
1371 : } else {
1372 : return -EINVAL;
1373 : }
1374 :
1375 : return 0;
1376 : }
1377 :
1378 0 : static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1379 : SMU7_Discrete_DpmTable *table)
1380 : {
1381 0 : int result = 0;
1382 0 : const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1383 : struct pp_atomctrl_clock_dividers_vi dividers;
1384 :
1385 : SMU7_Discrete_VoltageLevel voltage_level;
1386 0 : uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1387 0 : uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
1388 0 : uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1389 0 : uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1390 :
1391 :
1392 : /* The ACPI state should not do DPM on DC (or ever).*/
1393 0 : table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1394 :
1395 0 : if (data->acpi_vddc)
1396 0 : table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
1397 : else
1398 0 : table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
1399 :
1400 0 : table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1;
1401 : /* assign zero for now*/
1402 0 : table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
1403 :
1404 : /* get the engine clock dividers for this clock value*/
1405 0 : result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
1406 : table->ACPILevel.SclkFrequency, ÷rs);
1407 :
1408 0 : PP_ASSERT_WITH_CODE(result == 0,
1409 : "Error retrieving Engine Clock dividers from VBIOS.", return result);
1410 :
1411 : /* divider ID for required SCLK*/
1412 0 : table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
1413 0 : table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1414 0 : table->ACPILevel.DeepSleepDivId = 0;
1415 :
1416 0 : spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
1417 : CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
1418 0 : spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
1419 : CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
1420 0 : spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
1421 : CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
1422 :
1423 0 : table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
1424 0 : table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
1425 0 : table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1426 0 : table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1427 0 : table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1428 0 : table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1429 0 : table->ACPILevel.CcPwrDynRm = 0;
1430 0 : table->ACPILevel.CcPwrDynRm1 = 0;
1431 :
1432 : /* For various features to be enabled/disabled while this level is active.*/
1433 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1434 : /* SCLK frequency in units of 10KHz*/
1435 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
1436 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
1437 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
1438 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
1439 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
1440 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
1441 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
1442 : CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1443 : CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1444 :
1445 :
1446 : /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
1447 0 : table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
1448 0 : table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
1449 :
1450 0 : if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
1451 0 : table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
1452 : else {
1453 0 : if (data->acpi_vddci != 0)
1454 0 : table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
1455 : else
1456 0 : table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
1457 : }
1458 :
1459 0 : if (0 == ci_populate_mvdd_value(hwmgr, 0, &voltage_level))
1460 0 : table->MemoryACPILevel.MinMvdd =
1461 0 : PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
1462 : else
1463 0 : table->MemoryACPILevel.MinMvdd = 0;
1464 :
1465 : /* Force reset on DLL*/
1466 0 : mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1467 : MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
1468 0 : mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1469 : MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
1470 :
1471 : /* Disable DLL in ACPIState*/
1472 0 : mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1473 : MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
1474 0 : mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1475 : MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
1476 :
1477 : /* Enable DLL bypass signal*/
1478 0 : dll_cntl = PHM_SET_FIELD(dll_cntl,
1479 : DLL_CNTL, MRDCK0_BYPASS, 0);
1480 0 : dll_cntl = PHM_SET_FIELD(dll_cntl,
1481 : DLL_CNTL, MRDCK1_BYPASS, 0);
1482 :
1483 0 : table->MemoryACPILevel.DllCntl =
1484 0 : PP_HOST_TO_SMC_UL(dll_cntl);
1485 0 : table->MemoryACPILevel.MclkPwrmgtCntl =
1486 0 : PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
1487 0 : table->MemoryACPILevel.MpllAdFuncCntl =
1488 0 : PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
1489 0 : table->MemoryACPILevel.MpllDqFuncCntl =
1490 0 : PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
1491 0 : table->MemoryACPILevel.MpllFuncCntl =
1492 0 : PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
1493 0 : table->MemoryACPILevel.MpllFuncCntl_1 =
1494 0 : PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
1495 0 : table->MemoryACPILevel.MpllFuncCntl_2 =
1496 0 : PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
1497 0 : table->MemoryACPILevel.MpllSs1 =
1498 0 : PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
1499 0 : table->MemoryACPILevel.MpllSs2 =
1500 0 : PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
1501 :
1502 0 : table->MemoryACPILevel.EnabledForThrottle = 0;
1503 0 : table->MemoryACPILevel.EnabledForActivity = 0;
1504 0 : table->MemoryACPILevel.UpH = 0;
1505 0 : table->MemoryACPILevel.DownH = 100;
1506 0 : table->MemoryACPILevel.VoltageDownH = 0;
1507 : /* Indicates maximum activity level for this performance level.*/
1508 0 : table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
1509 :
1510 0 : table->MemoryACPILevel.StutterEnable = 0;
1511 0 : table->MemoryACPILevel.StrobeEnable = 0;
1512 0 : table->MemoryACPILevel.EdcReadEnable = 0;
1513 0 : table->MemoryACPILevel.EdcWriteEnable = 0;
1514 0 : table->MemoryACPILevel.RttEnable = 0;
1515 :
1516 0 : return result;
1517 : }
1518 :
1519 0 : static int ci_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1520 : SMU7_Discrete_DpmTable *table)
1521 : {
1522 0 : int result = 0;
1523 : uint8_t count;
1524 : struct pp_atomctrl_clock_dividers_vi dividers;
1525 0 : struct phm_uvd_clock_voltage_dependency_table *uvd_table =
1526 : hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1527 :
1528 0 : table->UvdLevelCount = (uint8_t)(uvd_table->count);
1529 :
1530 0 : for (count = 0; count < table->UvdLevelCount; count++) {
1531 0 : table->UvdLevel[count].VclkFrequency =
1532 0 : uvd_table->entries[count].vclk;
1533 0 : table->UvdLevel[count].DclkFrequency =
1534 0 : uvd_table->entries[count].dclk;
1535 0 : table->UvdLevel[count].MinVddc =
1536 0 : uvd_table->entries[count].v * VOLTAGE_SCALE;
1537 0 : table->UvdLevel[count].MinVddcPhases = 1;
1538 :
1539 0 : result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1540 : table->UvdLevel[count].VclkFrequency, ÷rs);
1541 0 : PP_ASSERT_WITH_CODE((0 == result),
1542 : "can not find divide id for Vclk clock", return result);
1543 :
1544 0 : table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1545 :
1546 0 : result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1547 : table->UvdLevel[count].DclkFrequency, ÷rs);
1548 0 : PP_ASSERT_WITH_CODE((0 == result),
1549 : "can not find divide id for Dclk clock", return result);
1550 :
1551 0 : table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1552 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1553 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1554 0 : CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc);
1555 : }
1556 :
1557 : return result;
1558 : }
1559 :
1560 0 : static int ci_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1561 : SMU7_Discrete_DpmTable *table)
1562 : {
1563 0 : int result = -EINVAL;
1564 : uint8_t count;
1565 : struct pp_atomctrl_clock_dividers_vi dividers;
1566 0 : struct phm_vce_clock_voltage_dependency_table *vce_table =
1567 : hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1568 :
1569 0 : table->VceLevelCount = (uint8_t)(vce_table->count);
1570 0 : table->VceBootLevel = 0;
1571 :
1572 0 : for (count = 0; count < table->VceLevelCount; count++) {
1573 0 : table->VceLevel[count].Frequency = vce_table->entries[count].evclk;
1574 0 : table->VceLevel[count].MinVoltage =
1575 0 : vce_table->entries[count].v * VOLTAGE_SCALE;
1576 0 : table->VceLevel[count].MinPhases = 1;
1577 :
1578 0 : result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1579 : table->VceLevel[count].Frequency, ÷rs);
1580 0 : PP_ASSERT_WITH_CODE((0 == result),
1581 : "can not find divide id for VCE engine clock",
1582 : return result);
1583 :
1584 0 : table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1585 :
1586 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1587 0 : CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage);
1588 : }
1589 : return result;
1590 : }
1591 :
1592 0 : static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1593 : SMU7_Discrete_DpmTable *table)
1594 : {
1595 0 : int result = -EINVAL;
1596 : uint8_t count;
1597 : struct pp_atomctrl_clock_dividers_vi dividers;
1598 0 : struct phm_acp_clock_voltage_dependency_table *acp_table =
1599 : hwmgr->dyn_state.acp_clock_voltage_dependency_table;
1600 :
1601 0 : table->AcpLevelCount = (uint8_t)(acp_table->count);
1602 0 : table->AcpBootLevel = 0;
1603 :
1604 0 : for (count = 0; count < table->AcpLevelCount; count++) {
1605 0 : table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk;
1606 0 : table->AcpLevel[count].MinVoltage = acp_table->entries[count].v;
1607 0 : table->AcpLevel[count].MinPhases = 1;
1608 :
1609 0 : result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1610 : table->AcpLevel[count].Frequency, ÷rs);
1611 0 : PP_ASSERT_WITH_CODE((0 == result),
1612 : "can not find divide id for engine clock", return result);
1613 :
1614 0 : table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1615 :
1616 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
1617 0 : CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage);
1618 : }
1619 : return result;
1620 : }
1621 :
1622 0 : static int ci_populate_memory_timing_parameters(
1623 : struct pp_hwmgr *hwmgr,
1624 : uint32_t engine_clock,
1625 : uint32_t memory_clock,
1626 : struct SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs
1627 : )
1628 : {
1629 : uint32_t dramTiming;
1630 : uint32_t dramTiming2;
1631 : uint32_t burstTime;
1632 : int result;
1633 :
1634 0 : result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1635 : engine_clock, memory_clock);
1636 :
1637 0 : PP_ASSERT_WITH_CODE(result == 0,
1638 : "Error calling VBIOS to set DRAM_TIMING.", return result);
1639 :
1640 0 : dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1641 0 : dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1642 0 : burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1643 :
1644 0 : arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
1645 0 : arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1646 0 : arb_regs->McArbBurstTime = (uint8_t)burstTime;
1647 :
1648 0 : return 0;
1649 : }
1650 :
1651 0 : static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1652 : {
1653 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1654 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1655 0 : int result = 0;
1656 : SMU7_Discrete_MCArbDramTimingTable arb_regs;
1657 : uint32_t i, j;
1658 :
1659 0 : memset(&arb_regs, 0x00, sizeof(SMU7_Discrete_MCArbDramTimingTable));
1660 :
1661 0 : for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1662 0 : for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1663 0 : result = ci_populate_memory_timing_parameters
1664 : (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1665 : data->dpm_table.mclk_table.dpm_levels[j].value,
1666 0 : &arb_regs.entries[i][j]);
1667 :
1668 0 : if (0 != result)
1669 : break;
1670 : }
1671 : }
1672 :
1673 0 : if (0 == result) {
1674 0 : result = ci_copy_bytes_to_smc(
1675 : hwmgr,
1676 : smu_data->arb_table_start,
1677 : (uint8_t *)&arb_regs,
1678 : sizeof(SMU7_Discrete_MCArbDramTimingTable),
1679 : SMC_RAM_END
1680 : );
1681 : }
1682 :
1683 0 : return result;
1684 : }
1685 :
1686 0 : static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1687 : SMU7_Discrete_DpmTable *table)
1688 : {
1689 0 : int result = 0;
1690 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1691 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1692 :
1693 0 : table->GraphicsBootLevel = 0;
1694 0 : table->MemoryBootLevel = 0;
1695 :
1696 : /* find boot level from dpm table*/
1697 0 : result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1698 : data->vbios_boot_state.sclk_bootup_value,
1699 0 : (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
1700 :
1701 0 : if (0 != result) {
1702 0 : smu_data->smc_state_table.GraphicsBootLevel = 0;
1703 0 : pr_err("VBIOS did not find boot engine clock value in dependency table. Using Graphics DPM level 0!\n");
1704 0 : result = 0;
1705 : }
1706 :
1707 0 : result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1708 : data->vbios_boot_state.mclk_bootup_value,
1709 0 : (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
1710 :
1711 0 : if (0 != result) {
1712 0 : smu_data->smc_state_table.MemoryBootLevel = 0;
1713 0 : pr_err("VBIOS did not find boot engine clock value in dependency table. Using Memory DPM level 0!\n");
1714 0 : result = 0;
1715 : }
1716 :
1717 0 : table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
1718 0 : table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
1719 0 : table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
1720 :
1721 0 : return result;
1722 : }
1723 :
1724 0 : static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
1725 : SMU7_Discrete_MCRegisters *mc_reg_table)
1726 : {
1727 0 : const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smu_backend;
1728 :
1729 : uint32_t i, j;
1730 :
1731 0 : for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
1732 0 : if (smu_data->mc_reg_table.validflag & 1<<j) {
1733 0 : PP_ASSERT_WITH_CODE(i < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE,
1734 : "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
1735 0 : mc_reg_table->address[i].s0 =
1736 0 : PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
1737 0 : mc_reg_table->address[i].s1 =
1738 0 : PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
1739 0 : i++;
1740 : }
1741 : }
1742 :
1743 0 : mc_reg_table->last = (uint8_t)i;
1744 :
1745 : return 0;
1746 : }
1747 :
1748 : static void ci_convert_mc_registers(
1749 : const struct ci_mc_reg_entry *entry,
1750 : SMU7_Discrete_MCRegisterSet *data,
1751 : uint32_t num_entries, uint32_t valid_flag)
1752 : {
1753 : uint32_t i, j;
1754 :
1755 0 : for (i = 0, j = 0; j < num_entries; j++) {
1756 0 : if (valid_flag & 1<<j) {
1757 0 : data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
1758 0 : i++;
1759 : }
1760 : }
1761 : }
1762 :
1763 0 : static int ci_convert_mc_reg_table_entry_to_smc(
1764 : struct pp_hwmgr *hwmgr,
1765 : const uint32_t memory_clock,
1766 : SMU7_Discrete_MCRegisterSet *mc_reg_table_data
1767 : )
1768 : {
1769 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1770 0 : uint32_t i = 0;
1771 :
1772 0 : for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
1773 0 : if (memory_clock <=
1774 0 : smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
1775 : break;
1776 : }
1777 : }
1778 :
1779 0 : if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
1780 0 : --i;
1781 :
1782 0 : ci_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
1783 0 : mc_reg_table_data, smu_data->mc_reg_table.last,
1784 0 : smu_data->mc_reg_table.validflag);
1785 :
1786 0 : return 0;
1787 : }
1788 :
1789 0 : static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
1790 : SMU7_Discrete_MCRegisters *mc_regs)
1791 : {
1792 0 : int result = 0;
1793 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1794 : int res;
1795 : uint32_t i;
1796 :
1797 0 : for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
1798 0 : res = ci_convert_mc_reg_table_entry_to_smc(
1799 : hwmgr,
1800 : data->dpm_table.mclk_table.dpm_levels[i].value,
1801 : &mc_regs->data[i]
1802 : );
1803 :
1804 0 : if (0 != res)
1805 0 : result = res;
1806 : }
1807 :
1808 0 : return result;
1809 : }
1810 :
1811 0 : static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
1812 : {
1813 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1814 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1815 : uint32_t address;
1816 : int32_t result;
1817 :
1818 0 : if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
1819 : return 0;
1820 :
1821 :
1822 0 : memset(&smu_data->mc_regs, 0, sizeof(SMU7_Discrete_MCRegisters));
1823 :
1824 0 : result = ci_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
1825 :
1826 0 : if (result != 0)
1827 : return result;
1828 :
1829 0 : address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]);
1830 :
1831 0 : return ci_copy_bytes_to_smc(hwmgr, address,
1832 0 : (uint8_t *)&smu_data->mc_regs.data[0],
1833 0 : sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
1834 : SMC_RAM_END);
1835 : }
1836 :
1837 0 : static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
1838 : {
1839 : int result;
1840 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1841 :
1842 0 : memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters));
1843 0 : result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
1844 0 : PP_ASSERT_WITH_CODE(0 == result,
1845 : "Failed to initialize MCRegTable for the MC register addresses!", return result;);
1846 :
1847 0 : result = ci_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
1848 0 : PP_ASSERT_WITH_CODE(0 == result,
1849 : "Failed to initialize MCRegTable for driver state!", return result;);
1850 :
1851 0 : return ci_copy_bytes_to_smc(hwmgr, smu_data->mc_reg_table_start,
1852 : (uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END);
1853 : }
1854 :
1855 0 : static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
1856 : {
1857 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1858 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1859 : uint8_t count, level;
1860 :
1861 0 : count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
1862 :
1863 0 : for (level = 0; level < count; level++) {
1864 0 : if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
1865 0 : >= data->vbios_boot_state.sclk_bootup_value) {
1866 0 : smu_data->smc_state_table.GraphicsBootLevel = level;
1867 0 : break;
1868 : }
1869 : }
1870 :
1871 0 : count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
1872 :
1873 0 : for (level = 0; level < count; level++) {
1874 0 : if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
1875 0 : >= data->vbios_boot_state.mclk_bootup_value) {
1876 0 : smu_data->smc_state_table.MemoryBootLevel = level;
1877 0 : break;
1878 : }
1879 : }
1880 :
1881 0 : return 0;
1882 : }
1883 :
1884 : static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
1885 : SMU7_Discrete_DpmTable *table)
1886 : {
1887 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1888 :
1889 0 : if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
1890 0 : table->SVI2Enable = 1;
1891 : else
1892 0 : table->SVI2Enable = 0;
1893 : return 0;
1894 : }
1895 :
1896 0 : static int ci_start_smc(struct pp_hwmgr *hwmgr)
1897 : {
1898 : /* set smc instruct start point at 0x0 */
1899 0 : ci_program_jump_on_start(hwmgr);
1900 :
1901 : /* enable smc clock */
1902 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
1903 :
1904 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
1905 :
1906 0 : PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS,
1907 : INTERRUPTS_ENABLED, 1);
1908 :
1909 0 : return 0;
1910 : }
1911 :
1912 0 : static int ci_populate_vr_config(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
1913 : {
1914 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1915 : uint16_t config;
1916 :
1917 0 : config = VR_SVI2_PLANE_1;
1918 0 : table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
1919 :
1920 0 : if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1921 0 : config = VR_SVI2_PLANE_2;
1922 0 : table->VRConfig |= config;
1923 : } else {
1924 0 : pr_info("VDDCshould be on SVI2 controller!");
1925 : }
1926 :
1927 0 : if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1928 0 : config = VR_SVI2_PLANE_2;
1929 0 : table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1930 0 : } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1931 0 : config = VR_SMIO_PATTERN_1;
1932 0 : table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1933 : }
1934 :
1935 0 : if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1936 0 : config = VR_SMIO_PATTERN_2;
1937 0 : table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
1938 : }
1939 :
1940 0 : return 0;
1941 : }
1942 :
1943 0 : static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
1944 : {
1945 : int result;
1946 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1947 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1948 0 : SMU7_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1949 : struct pp_atomctrl_gpio_pin_assignment gpio_pin;
1950 : u32 i;
1951 :
1952 0 : ci_initialize_power_tune_defaults(hwmgr);
1953 0 : memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
1954 :
1955 0 : if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
1956 0 : ci_populate_smc_voltage_tables(hwmgr, table);
1957 :
1958 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1959 : PHM_PlatformCaps_AutomaticDCTransition))
1960 0 : table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1961 :
1962 :
1963 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1964 : PHM_PlatformCaps_StepVddc))
1965 0 : table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1966 :
1967 0 : if (data->is_memory_gddr5)
1968 0 : table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1969 :
1970 0 : if (data->ulv_supported) {
1971 0 : result = ci_populate_ulv_state(hwmgr, &(table->Ulv));
1972 0 : PP_ASSERT_WITH_CODE(0 == result,
1973 : "Failed to initialize ULV state!", return result);
1974 :
1975 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1976 : ixCG_ULV_PARAMETER, 0x40035);
1977 : }
1978 :
1979 0 : result = ci_populate_all_graphic_levels(hwmgr);
1980 0 : PP_ASSERT_WITH_CODE(0 == result,
1981 : "Failed to initialize Graphics Level!", return result);
1982 :
1983 0 : result = ci_populate_all_memory_levels(hwmgr);
1984 0 : PP_ASSERT_WITH_CODE(0 == result,
1985 : "Failed to initialize Memory Level!", return result);
1986 :
1987 0 : result = ci_populate_smc_link_level(hwmgr, table);
1988 0 : PP_ASSERT_WITH_CODE(0 == result,
1989 : "Failed to initialize Link Level!", return result);
1990 :
1991 0 : result = ci_populate_smc_acpi_level(hwmgr, table);
1992 0 : PP_ASSERT_WITH_CODE(0 == result,
1993 : "Failed to initialize ACPI Level!", return result);
1994 :
1995 0 : result = ci_populate_smc_vce_level(hwmgr, table);
1996 0 : PP_ASSERT_WITH_CODE(0 == result,
1997 : "Failed to initialize VCE Level!", return result);
1998 :
1999 0 : result = ci_populate_smc_acp_level(hwmgr, table);
2000 0 : PP_ASSERT_WITH_CODE(0 == result,
2001 : "Failed to initialize ACP Level!", return result);
2002 :
2003 : /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
2004 : /* need to populate the ARB settings for the initial state. */
2005 0 : result = ci_program_memory_timing_parameters(hwmgr);
2006 0 : PP_ASSERT_WITH_CODE(0 == result,
2007 : "Failed to Write ARB settings for the initial state.", return result);
2008 :
2009 0 : result = ci_populate_smc_uvd_level(hwmgr, table);
2010 0 : PP_ASSERT_WITH_CODE(0 == result,
2011 : "Failed to initialize UVD Level!", return result);
2012 :
2013 0 : table->UvdBootLevel = 0;
2014 0 : table->VceBootLevel = 0;
2015 0 : table->AcpBootLevel = 0;
2016 0 : table->SamuBootLevel = 0;
2017 :
2018 0 : table->GraphicsBootLevel = 0;
2019 0 : table->MemoryBootLevel = 0;
2020 :
2021 0 : result = ci_populate_smc_boot_level(hwmgr, table);
2022 0 : PP_ASSERT_WITH_CODE(0 == result,
2023 : "Failed to initialize Boot Level!", return result);
2024 :
2025 0 : result = ci_populate_smc_initial_state(hwmgr);
2026 0 : PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
2027 :
2028 0 : result = ci_populate_bapm_parameters_in_dpm_table(hwmgr);
2029 0 : PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
2030 :
2031 0 : table->UVDInterval = 1;
2032 0 : table->VCEInterval = 1;
2033 0 : table->ACPInterval = 1;
2034 0 : table->SAMUInterval = 1;
2035 0 : table->GraphicsVoltageChangeEnable = 1;
2036 0 : table->GraphicsThermThrottleEnable = 1;
2037 0 : table->GraphicsInterval = 1;
2038 0 : table->VoltageInterval = 1;
2039 0 : table->ThermalInterval = 1;
2040 :
2041 0 : table->TemperatureLimitHigh =
2042 0 : (data->thermal_temp_setting.temperature_high *
2043 0 : SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2044 0 : table->TemperatureLimitLow =
2045 0 : (data->thermal_temp_setting.temperature_low *
2046 0 : SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2047 :
2048 0 : table->MemoryVoltageChangeEnable = 1;
2049 0 : table->MemoryInterval = 1;
2050 0 : table->VoltageResponseTime = 0;
2051 0 : table->VddcVddciDelta = 4000;
2052 0 : table->PhaseResponseTime = 0;
2053 0 : table->MemoryThermThrottleEnable = 1;
2054 :
2055 0 : PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
2056 : "There must be 1 or more PCIE levels defined in PPTable.",
2057 : return -EINVAL);
2058 :
2059 0 : table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count;
2060 0 : table->PCIeGenInterval = 1;
2061 :
2062 0 : result = ci_populate_vr_config(hwmgr, table);
2063 0 : PP_ASSERT_WITH_CODE(0 == result,
2064 : "Failed to populate VRConfig setting!", return result);
2065 0 : data->vr_config = table->VRConfig;
2066 :
2067 0 : ci_populate_smc_svi2_config(hwmgr, table);
2068 :
2069 0 : for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++)
2070 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]);
2071 :
2072 0 : table->ThermGpio = 17;
2073 0 : table->SclkStepSize = 0x4000;
2074 0 : if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
2075 0 : table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
2076 0 : phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2077 : PHM_PlatformCaps_RegulatorHot);
2078 : } else {
2079 0 : table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
2080 0 : phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2081 : PHM_PlatformCaps_RegulatorHot);
2082 : }
2083 :
2084 0 : table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
2085 :
2086 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2087 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
2088 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
2089 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
2090 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
2091 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
2092 0 : CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2093 0 : CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2094 0 : CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2095 0 : table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta);
2096 0 : CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2097 0 : CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2098 :
2099 0 : table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
2100 0 : table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
2101 0 : table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
2102 :
2103 : /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2104 0 : result = ci_copy_bytes_to_smc(hwmgr, smu_data->dpm_table_start +
2105 : offsetof(SMU7_Discrete_DpmTable, SystemFlags),
2106 0 : (uint8_t *)&(table->SystemFlags),
2107 : sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController),
2108 : SMC_RAM_END);
2109 :
2110 0 : PP_ASSERT_WITH_CODE(0 == result,
2111 : "Failed to upload dpm data to SMC memory!", return result;);
2112 :
2113 0 : result = ci_populate_initial_mc_reg_table(hwmgr);
2114 0 : PP_ASSERT_WITH_CODE((0 == result),
2115 : "Failed to populate initialize MC Reg table!", return result);
2116 :
2117 0 : result = ci_populate_pm_fuses(hwmgr);
2118 0 : PP_ASSERT_WITH_CODE(0 == result,
2119 : "Failed to populate PM fuses to SMC memory!", return result);
2120 :
2121 0 : ci_start_smc(hwmgr);
2122 :
2123 0 : return 0;
2124 : }
2125 :
2126 0 : static int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2127 : {
2128 0 : struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2129 0 : SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
2130 : uint32_t duty100;
2131 : uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
2132 : uint16_t fdo_min, slope1, slope2;
2133 : uint32_t reference_clock;
2134 : int res;
2135 : uint64_t tmp64;
2136 :
2137 0 : if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
2138 : return 0;
2139 :
2140 0 : if (hwmgr->thermal_controller.fanInfo.bNoFan) {
2141 0 : phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2142 : PHM_PlatformCaps_MicrocodeFanControl);
2143 0 : return 0;
2144 : }
2145 :
2146 0 : if (0 == ci_data->fan_table_start) {
2147 0 : phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2148 0 : return 0;
2149 : }
2150 :
2151 0 : duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
2152 :
2153 0 : if (0 == duty100) {
2154 0 : phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2155 0 : return 0;
2156 : }
2157 :
2158 0 : tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
2159 0 : do_div(tmp64, 10000);
2160 0 : fdo_min = (uint16_t)tmp64;
2161 :
2162 0 : t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2163 0 : t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2164 :
2165 0 : pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2166 0 : pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2167 :
2168 0 : slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2169 0 : slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2170 :
2171 0 : fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
2172 0 : fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
2173 0 : fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
2174 :
2175 0 : fan_table.Slope1 = cpu_to_be16(slope1);
2176 0 : fan_table.Slope2 = cpu_to_be16(slope2);
2177 :
2178 0 : fan_table.FdoMin = cpu_to_be16(fdo_min);
2179 :
2180 0 : fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
2181 :
2182 0 : fan_table.HystUp = cpu_to_be16(1);
2183 :
2184 0 : fan_table.HystSlope = cpu_to_be16(1);
2185 :
2186 0 : fan_table.TempRespLim = cpu_to_be16(5);
2187 :
2188 0 : reference_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
2189 :
2190 0 : fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
2191 :
2192 0 : fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2193 :
2194 0 : fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
2195 :
2196 0 : res = ci_copy_bytes_to_smc(hwmgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
2197 :
2198 0 : return res;
2199 : }
2200 :
2201 : static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2202 : {
2203 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2204 :
2205 0 : if (data->need_update_smu7_dpm_table &
2206 : (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
2207 0 : return ci_program_memory_timing_parameters(hwmgr);
2208 :
2209 : return 0;
2210 : }
2211 :
2212 0 : static int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2213 : {
2214 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2215 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2216 :
2217 0 : int result = 0;
2218 0 : uint32_t low_sclk_interrupt_threshold = 0;
2219 :
2220 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2221 : PHM_PlatformCaps_SclkThrottleLowNotification)
2222 0 : && (data->low_sclk_interrupt_threshold != 0)) {
2223 : low_sclk_interrupt_threshold =
2224 : data->low_sclk_interrupt_threshold;
2225 :
2226 0 : CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2227 :
2228 0 : result = ci_copy_bytes_to_smc(
2229 : hwmgr,
2230 0 : smu_data->dpm_table_start +
2231 : offsetof(SMU7_Discrete_DpmTable,
2232 : LowSclkInterruptT),
2233 : (uint8_t *)&low_sclk_interrupt_threshold,
2234 : sizeof(uint32_t),
2235 : SMC_RAM_END);
2236 : }
2237 :
2238 0 : result = ci_update_and_upload_mc_reg_table(hwmgr);
2239 :
2240 0 : PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
2241 :
2242 0 : result = ci_program_mem_timing_parameters(hwmgr);
2243 0 : PP_ASSERT_WITH_CODE((result == 0),
2244 : "Failed to program memory timing parameters!",
2245 : );
2246 :
2247 : return result;
2248 : }
2249 :
2250 0 : static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
2251 : {
2252 0 : switch (type) {
2253 : case SMU_SoftRegisters:
2254 0 : switch (member) {
2255 : case HandshakeDisables:
2256 : return offsetof(SMU7_SoftRegisters, HandshakeDisables);
2257 : case VoltageChangeTimeout:
2258 0 : return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout);
2259 : case AverageGraphicsActivity:
2260 0 : return offsetof(SMU7_SoftRegisters, AverageGraphicsA);
2261 : case AverageMemoryActivity:
2262 0 : return offsetof(SMU7_SoftRegisters, AverageMemoryA);
2263 : case PreVBlankGap:
2264 0 : return offsetof(SMU7_SoftRegisters, PreVBlankGap);
2265 : case VBlankTimeout:
2266 0 : return offsetof(SMU7_SoftRegisters, VBlankTimeout);
2267 : case DRAM_LOG_ADDR_H:
2268 0 : return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_H);
2269 : case DRAM_LOG_ADDR_L:
2270 0 : return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_L);
2271 : case DRAM_LOG_PHY_ADDR_H:
2272 0 : return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
2273 : case DRAM_LOG_PHY_ADDR_L:
2274 0 : return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
2275 : case DRAM_LOG_BUFF_SIZE:
2276 0 : return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE);
2277 : }
2278 : break;
2279 : case SMU_Discrete_DpmTable:
2280 0 : switch (member) {
2281 : case LowSclkInterruptThreshold:
2282 : return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT);
2283 : }
2284 : break;
2285 : }
2286 : pr_debug("can't get the offset of type %x member %x\n", type, member);
2287 0 : return 0;
2288 : }
2289 :
2290 0 : static uint32_t ci_get_mac_definition(uint32_t value)
2291 : {
2292 : switch (value) {
2293 : case SMU_MAX_LEVELS_GRAPHICS:
2294 : return SMU7_MAX_LEVELS_GRAPHICS;
2295 : case SMU_MAX_LEVELS_MEMORY:
2296 : return SMU7_MAX_LEVELS_MEMORY;
2297 : case SMU_MAX_LEVELS_LINK:
2298 : return SMU7_MAX_LEVELS_LINK;
2299 : case SMU_MAX_ENTRIES_SMIO:
2300 : return SMU7_MAX_ENTRIES_SMIO;
2301 : case SMU_MAX_LEVELS_VDDC:
2302 : return SMU7_MAX_LEVELS_VDDC;
2303 : case SMU_MAX_LEVELS_VDDCI:
2304 : return SMU7_MAX_LEVELS_VDDCI;
2305 : case SMU_MAX_LEVELS_MVDD:
2306 : return SMU7_MAX_LEVELS_MVDD;
2307 : }
2308 :
2309 : pr_debug("can't get the mac of %x\n", value);
2310 : return 0;
2311 : }
2312 :
2313 0 : static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr)
2314 : {
2315 : uint32_t byte_count, start_addr;
2316 : uint8_t *src;
2317 : uint32_t data;
2318 :
2319 0 : struct cgs_firmware_info info = {0};
2320 :
2321 0 : cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
2322 :
2323 0 : hwmgr->is_kicker = info.is_kicker;
2324 0 : hwmgr->smu_version = info.version;
2325 0 : byte_count = info.image_size;
2326 0 : src = (uint8_t *)info.kptr;
2327 0 : start_addr = info.ucode_start_address;
2328 :
2329 0 : if (byte_count > SMC_RAM_END) {
2330 0 : pr_err("SMC address is beyond the SMC RAM area.\n");
2331 0 : return -EINVAL;
2332 : }
2333 :
2334 0 : cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr);
2335 0 : PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
2336 :
2337 0 : for (; byte_count >= 4; byte_count -= 4) {
2338 0 : data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
2339 0 : cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
2340 0 : src += 4;
2341 : }
2342 0 : PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
2343 :
2344 0 : if (0 != byte_count) {
2345 0 : pr_err("SMC size must be divisible by 4\n");
2346 0 : return -EINVAL;
2347 : }
2348 :
2349 : return 0;
2350 : }
2351 :
2352 0 : static int ci_upload_firmware(struct pp_hwmgr *hwmgr)
2353 : {
2354 0 : if (ci_is_smc_ram_running(hwmgr)) {
2355 0 : pr_info("smc is running, no need to load smc firmware\n");
2356 0 : return 0;
2357 : }
2358 0 : PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
2359 : boot_seq_done, 1);
2360 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL,
2361 : pre_fetcher_en, 1);
2362 :
2363 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
2364 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
2365 0 : return ci_load_smc_ucode(hwmgr);
2366 : }
2367 :
2368 0 : static int ci_process_firmware_header(struct pp_hwmgr *hwmgr)
2369 : {
2370 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2371 0 : struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2372 :
2373 0 : uint32_t tmp = 0;
2374 : int result;
2375 0 : bool error = false;
2376 :
2377 0 : if (ci_upload_firmware(hwmgr))
2378 : return -EINVAL;
2379 :
2380 0 : result = ci_read_smc_sram_dword(hwmgr,
2381 : SMU7_FIRMWARE_HEADER_LOCATION +
2382 : offsetof(SMU7_Firmware_Header, DpmTable),
2383 : &tmp, SMC_RAM_END);
2384 :
2385 0 : if (0 == result)
2386 0 : ci_data->dpm_table_start = tmp;
2387 :
2388 0 : error |= (0 != result);
2389 :
2390 0 : result = ci_read_smc_sram_dword(hwmgr,
2391 : SMU7_FIRMWARE_HEADER_LOCATION +
2392 : offsetof(SMU7_Firmware_Header, SoftRegisters),
2393 : &tmp, SMC_RAM_END);
2394 :
2395 0 : if (0 == result) {
2396 0 : data->soft_regs_start = tmp;
2397 0 : ci_data->soft_regs_start = tmp;
2398 : }
2399 :
2400 0 : error |= (0 != result);
2401 :
2402 0 : result = ci_read_smc_sram_dword(hwmgr,
2403 : SMU7_FIRMWARE_HEADER_LOCATION +
2404 : offsetof(SMU7_Firmware_Header, mcRegisterTable),
2405 : &tmp, SMC_RAM_END);
2406 :
2407 0 : if (0 == result)
2408 0 : ci_data->mc_reg_table_start = tmp;
2409 :
2410 0 : result = ci_read_smc_sram_dword(hwmgr,
2411 : SMU7_FIRMWARE_HEADER_LOCATION +
2412 : offsetof(SMU7_Firmware_Header, FanTable),
2413 : &tmp, SMC_RAM_END);
2414 :
2415 0 : if (0 == result)
2416 0 : ci_data->fan_table_start = tmp;
2417 :
2418 0 : error |= (0 != result);
2419 :
2420 0 : result = ci_read_smc_sram_dword(hwmgr,
2421 : SMU7_FIRMWARE_HEADER_LOCATION +
2422 : offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
2423 : &tmp, SMC_RAM_END);
2424 :
2425 0 : if (0 == result)
2426 0 : ci_data->arb_table_start = tmp;
2427 :
2428 0 : error |= (0 != result);
2429 :
2430 0 : result = ci_read_smc_sram_dword(hwmgr,
2431 : SMU7_FIRMWARE_HEADER_LOCATION +
2432 : offsetof(SMU7_Firmware_Header, Version),
2433 : &tmp, SMC_RAM_END);
2434 :
2435 0 : if (0 == result)
2436 0 : hwmgr->microcode_version_info.SMC = tmp;
2437 :
2438 0 : error |= (0 != result);
2439 :
2440 0 : return error ? 1 : 0;
2441 : }
2442 :
2443 : static uint8_t ci_get_memory_modile_index(struct pp_hwmgr *hwmgr)
2444 : {
2445 0 : return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
2446 : }
2447 :
2448 0 : static bool ci_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
2449 : {
2450 0 : bool result = true;
2451 :
2452 0 : switch (in_reg) {
2453 : case mmMC_SEQ_RAS_TIMING:
2454 0 : *out_reg = mmMC_SEQ_RAS_TIMING_LP;
2455 0 : break;
2456 :
2457 : case mmMC_SEQ_DLL_STBY:
2458 0 : *out_reg = mmMC_SEQ_DLL_STBY_LP;
2459 0 : break;
2460 :
2461 : case mmMC_SEQ_G5PDX_CMD0:
2462 0 : *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
2463 0 : break;
2464 :
2465 : case mmMC_SEQ_G5PDX_CMD1:
2466 0 : *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
2467 0 : break;
2468 :
2469 : case mmMC_SEQ_G5PDX_CTRL:
2470 0 : *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
2471 0 : break;
2472 :
2473 : case mmMC_SEQ_CAS_TIMING:
2474 0 : *out_reg = mmMC_SEQ_CAS_TIMING_LP;
2475 0 : break;
2476 :
2477 : case mmMC_SEQ_MISC_TIMING:
2478 0 : *out_reg = mmMC_SEQ_MISC_TIMING_LP;
2479 0 : break;
2480 :
2481 : case mmMC_SEQ_MISC_TIMING2:
2482 0 : *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
2483 0 : break;
2484 :
2485 : case mmMC_SEQ_PMG_DVS_CMD:
2486 0 : *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
2487 0 : break;
2488 :
2489 : case mmMC_SEQ_PMG_DVS_CTL:
2490 0 : *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
2491 0 : break;
2492 :
2493 : case mmMC_SEQ_RD_CTL_D0:
2494 0 : *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
2495 0 : break;
2496 :
2497 : case mmMC_SEQ_RD_CTL_D1:
2498 0 : *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
2499 0 : break;
2500 :
2501 : case mmMC_SEQ_WR_CTL_D0:
2502 0 : *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
2503 0 : break;
2504 :
2505 : case mmMC_SEQ_WR_CTL_D1:
2506 0 : *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
2507 0 : break;
2508 :
2509 : case mmMC_PMG_CMD_EMRS:
2510 0 : *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
2511 0 : break;
2512 :
2513 : case mmMC_PMG_CMD_MRS:
2514 0 : *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
2515 0 : break;
2516 :
2517 : case mmMC_PMG_CMD_MRS1:
2518 0 : *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
2519 0 : break;
2520 :
2521 : case mmMC_SEQ_PMG_TIMING:
2522 0 : *out_reg = mmMC_SEQ_PMG_TIMING_LP;
2523 0 : break;
2524 :
2525 : case mmMC_PMG_CMD_MRS2:
2526 0 : *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
2527 0 : break;
2528 :
2529 : case mmMC_SEQ_WR_CTL_2:
2530 0 : *out_reg = mmMC_SEQ_WR_CTL_2_LP;
2531 0 : break;
2532 :
2533 : default:
2534 : result = false;
2535 : break;
2536 : }
2537 :
2538 0 : return result;
2539 : }
2540 :
2541 0 : static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
2542 : {
2543 : uint32_t i;
2544 : uint16_t address;
2545 :
2546 0 : for (i = 0; i < table->last; i++) {
2547 0 : table->mc_reg_address[i].s0 =
2548 0 : ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
2549 : ? address : table->mc_reg_address[i].s1;
2550 : }
2551 0 : return 0;
2552 : }
2553 :
2554 0 : static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
2555 : struct ci_mc_reg_table *ni_table)
2556 : {
2557 : uint8_t i, j;
2558 :
2559 0 : PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2560 : "Invalid VramInfo table.", return -EINVAL);
2561 0 : PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
2562 : "Invalid VramInfo table.", return -EINVAL);
2563 :
2564 0 : for (i = 0; i < table->last; i++)
2565 0 : ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
2566 :
2567 0 : ni_table->last = table->last;
2568 :
2569 0 : for (i = 0; i < table->num_entries; i++) {
2570 0 : ni_table->mc_reg_table_entry[i].mclk_max =
2571 0 : table->mc_reg_table_entry[i].mclk_max;
2572 0 : for (j = 0; j < table->last; j++) {
2573 0 : ni_table->mc_reg_table_entry[i].mc_data[j] =
2574 0 : table->mc_reg_table_entry[i].mc_data[j];
2575 : }
2576 : }
2577 :
2578 0 : ni_table->num_entries = table->num_entries;
2579 :
2580 0 : return 0;
2581 : }
2582 :
2583 0 : static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr,
2584 : struct ci_mc_reg_table *table)
2585 : {
2586 : uint8_t i, j, k;
2587 : uint32_t temp_reg;
2588 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2589 :
2590 0 : for (i = 0, j = table->last; i < table->last; i++) {
2591 0 : PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2592 : "Invalid VramInfo table.", return -EINVAL);
2593 :
2594 0 : switch (table->mc_reg_address[i].s1) {
2595 :
2596 : case mmMC_SEQ_MISC1:
2597 0 : temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
2598 0 : table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
2599 0 : table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
2600 0 : for (k = 0; k < table->num_entries; k++) {
2601 0 : table->mc_reg_table_entry[k].mc_data[j] =
2602 0 : ((temp_reg & 0xffff0000)) |
2603 0 : ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
2604 : }
2605 0 : j++;
2606 :
2607 0 : PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2608 : "Invalid VramInfo table.", return -EINVAL);
2609 0 : temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
2610 0 : table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
2611 0 : table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
2612 0 : for (k = 0; k < table->num_entries; k++) {
2613 0 : table->mc_reg_table_entry[k].mc_data[j] =
2614 0 : (temp_reg & 0xffff0000) |
2615 0 : (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2616 :
2617 0 : if (!data->is_memory_gddr5)
2618 0 : table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
2619 : }
2620 0 : j++;
2621 :
2622 0 : if (!data->is_memory_gddr5) {
2623 0 : PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2624 : "Invalid VramInfo table.", return -EINVAL);
2625 0 : table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
2626 0 : table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
2627 0 : for (k = 0; k < table->num_entries; k++) {
2628 0 : table->mc_reg_table_entry[k].mc_data[j] =
2629 0 : (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
2630 : }
2631 0 : j++;
2632 : }
2633 :
2634 : break;
2635 :
2636 : case mmMC_SEQ_RESERVE_M:
2637 0 : temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
2638 0 : table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
2639 0 : table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
2640 0 : for (k = 0; k < table->num_entries; k++) {
2641 0 : table->mc_reg_table_entry[k].mc_data[j] =
2642 0 : (temp_reg & 0xffff0000) |
2643 0 : (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2644 : }
2645 0 : j++;
2646 : break;
2647 :
2648 : default:
2649 : break;
2650 : }
2651 :
2652 : }
2653 :
2654 0 : table->last = j;
2655 :
2656 : return 0;
2657 : }
2658 :
2659 : static int ci_set_valid_flag(struct ci_mc_reg_table *table)
2660 : {
2661 : uint8_t i, j;
2662 :
2663 0 : for (i = 0; i < table->last; i++) {
2664 0 : for (j = 1; j < table->num_entries; j++) {
2665 0 : if (table->mc_reg_table_entry[j-1].mc_data[i] !=
2666 0 : table->mc_reg_table_entry[j].mc_data[i]) {
2667 0 : table->validflag |= (1 << i);
2668 : break;
2669 : }
2670 : }
2671 : }
2672 :
2673 : return 0;
2674 : }
2675 :
2676 0 : static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
2677 : {
2678 : int result;
2679 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2680 : pp_atomctrl_mc_reg_table *table;
2681 0 : struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table;
2682 0 : uint8_t module_index = ci_get_memory_modile_index(hwmgr);
2683 :
2684 0 : table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
2685 :
2686 0 : if (NULL == table)
2687 : return -ENOMEM;
2688 :
2689 : /* Program additional LP registers that are no longer programmed by VBIOS */
2690 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
2691 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
2692 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
2693 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
2694 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
2695 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
2696 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
2697 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
2698 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
2699 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
2700 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
2701 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
2702 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
2703 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
2704 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
2705 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
2706 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
2707 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
2708 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
2709 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
2710 :
2711 0 : result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
2712 :
2713 0 : if (0 == result)
2714 0 : result = ci_copy_vbios_smc_reg_table(table, ni_table);
2715 :
2716 0 : if (0 == result) {
2717 0 : ci_set_s0_mc_reg_index(ni_table);
2718 0 : result = ci_set_mc_special_registers(hwmgr, ni_table);
2719 : }
2720 :
2721 0 : if (0 == result)
2722 : ci_set_valid_flag(ni_table);
2723 :
2724 0 : kfree(table);
2725 :
2726 0 : return result;
2727 : }
2728 :
2729 0 : static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
2730 : {
2731 0 : return ci_is_smc_ram_running(hwmgr);
2732 : }
2733 :
2734 0 : static int ci_smu_init(struct pp_hwmgr *hwmgr)
2735 : {
2736 0 : struct ci_smumgr *ci_priv = NULL;
2737 :
2738 0 : ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL);
2739 :
2740 0 : if (ci_priv == NULL)
2741 : return -ENOMEM;
2742 :
2743 0 : hwmgr->smu_backend = ci_priv;
2744 :
2745 0 : return 0;
2746 : }
2747 :
2748 0 : static int ci_smu_fini(struct pp_hwmgr *hwmgr)
2749 : {
2750 0 : kfree(hwmgr->smu_backend);
2751 0 : hwmgr->smu_backend = NULL;
2752 0 : return 0;
2753 : }
2754 :
2755 0 : static int ci_start_smu(struct pp_hwmgr *hwmgr)
2756 : {
2757 0 : return 0;
2758 : }
2759 :
2760 0 : static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
2761 : void *profile_setting)
2762 : {
2763 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2764 0 : struct ci_smumgr *smu_data = (struct ci_smumgr *)
2765 : (hwmgr->smu_backend);
2766 : struct profile_mode_setting *setting;
2767 0 : struct SMU7_Discrete_GraphicsLevel *levels =
2768 : smu_data->smc_state_table.GraphicsLevel;
2769 0 : uint32_t array = smu_data->dpm_table_start +
2770 : offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2771 :
2772 0 : uint32_t mclk_array = smu_data->dpm_table_start +
2773 : offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2774 0 : struct SMU7_Discrete_MemoryLevel *mclk_levels =
2775 : smu_data->smc_state_table.MemoryLevel;
2776 : uint32_t i;
2777 : uint32_t offset, up_hyst_offset, down_hyst_offset, clk_activity_offset, tmp;
2778 :
2779 0 : if (profile_setting == NULL)
2780 : return -EINVAL;
2781 :
2782 0 : setting = (struct profile_mode_setting *)profile_setting;
2783 :
2784 0 : if (setting->bupdate_sclk) {
2785 0 : if (!data->sclk_dpm_key_disabled)
2786 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
2787 0 : for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
2788 0 : if (levels[i].ActivityLevel !=
2789 0 : cpu_to_be16(setting->sclk_activity)) {
2790 0 : levels[i].ActivityLevel = cpu_to_be16(setting->sclk_activity);
2791 :
2792 0 : clk_activity_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2793 : + offsetof(SMU7_Discrete_GraphicsLevel, ActivityLevel);
2794 0 : offset = clk_activity_offset & ~0x3;
2795 0 : tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2796 0 : tmp = phm_set_field_to_u32(clk_activity_offset, tmp, levels[i].ActivityLevel, sizeof(uint16_t));
2797 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2798 :
2799 : }
2800 0 : if (levels[i].UpH != setting->sclk_up_hyst ||
2801 0 : levels[i].DownH != setting->sclk_down_hyst) {
2802 0 : levels[i].UpH = setting->sclk_up_hyst;
2803 0 : levels[i].DownH = setting->sclk_down_hyst;
2804 0 : up_hyst_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2805 : + offsetof(SMU7_Discrete_GraphicsLevel, UpH);
2806 0 : down_hyst_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2807 : + offsetof(SMU7_Discrete_GraphicsLevel, DownH);
2808 0 : offset = up_hyst_offset & ~0x3;
2809 0 : tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2810 0 : tmp = phm_set_field_to_u32(up_hyst_offset, tmp, levels[i].UpH, sizeof(uint8_t));
2811 0 : tmp = phm_set_field_to_u32(down_hyst_offset, tmp, levels[i].DownH, sizeof(uint8_t));
2812 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2813 : }
2814 : }
2815 0 : if (!data->sclk_dpm_key_disabled)
2816 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
2817 : }
2818 :
2819 0 : if (setting->bupdate_mclk) {
2820 0 : if (!data->mclk_dpm_key_disabled)
2821 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
2822 0 : for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
2823 0 : if (mclk_levels[i].ActivityLevel !=
2824 0 : cpu_to_be16(setting->mclk_activity)) {
2825 0 : mclk_levels[i].ActivityLevel = cpu_to_be16(setting->mclk_activity);
2826 :
2827 0 : clk_activity_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2828 : + offsetof(SMU7_Discrete_MemoryLevel, ActivityLevel);
2829 0 : offset = clk_activity_offset & ~0x3;
2830 0 : tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2831 0 : tmp = phm_set_field_to_u32(clk_activity_offset, tmp, mclk_levels[i].ActivityLevel, sizeof(uint16_t));
2832 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2833 :
2834 : }
2835 0 : if (mclk_levels[i].UpH != setting->mclk_up_hyst ||
2836 0 : mclk_levels[i].DownH != setting->mclk_down_hyst) {
2837 0 : mclk_levels[i].UpH = setting->mclk_up_hyst;
2838 0 : mclk_levels[i].DownH = setting->mclk_down_hyst;
2839 0 : up_hyst_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2840 : + offsetof(SMU7_Discrete_MemoryLevel, UpH);
2841 0 : down_hyst_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2842 : + offsetof(SMU7_Discrete_MemoryLevel, DownH);
2843 0 : offset = up_hyst_offset & ~0x3;
2844 0 : tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2845 0 : tmp = phm_set_field_to_u32(up_hyst_offset, tmp, mclk_levels[i].UpH, sizeof(uint8_t));
2846 0 : tmp = phm_set_field_to_u32(down_hyst_offset, tmp, mclk_levels[i].DownH, sizeof(uint8_t));
2847 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2848 : }
2849 : }
2850 0 : if (!data->mclk_dpm_key_disabled)
2851 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
2852 : }
2853 : return 0;
2854 : }
2855 :
2856 0 : static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
2857 : {
2858 0 : struct amdgpu_device *adev = hwmgr->adev;
2859 0 : struct smu7_hwmgr *data = hwmgr->backend;
2860 0 : struct ci_smumgr *smu_data = hwmgr->smu_backend;
2861 0 : struct phm_uvd_clock_voltage_dependency_table *uvd_table =
2862 : hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
2863 0 : uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2864 : AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2865 : AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2866 : AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2867 0 : uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
2868 0 : hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
2869 : int32_t i;
2870 :
2871 0 : if (PP_CAP(PHM_PlatformCaps_UVDDPM) || uvd_table->count <= 0)
2872 0 : smu_data->smc_state_table.UvdBootLevel = 0;
2873 : else
2874 0 : smu_data->smc_state_table.UvdBootLevel = uvd_table->count - 1;
2875 :
2876 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
2877 : UvdBootLevel, smu_data->smc_state_table.UvdBootLevel);
2878 :
2879 0 : data->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
2880 :
2881 0 : for (i = uvd_table->count - 1; i >= 0; i--) {
2882 0 : if (uvd_table->entries[i].v <= max_vddc)
2883 0 : data->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
2884 0 : if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM))
2885 : break;
2886 : }
2887 0 : smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
2888 : data->dpm_level_enable_mask.uvd_dpm_enable_mask,
2889 : NULL);
2890 :
2891 0 : return 0;
2892 : }
2893 :
2894 0 : static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr)
2895 : {
2896 0 : struct amdgpu_device *adev = hwmgr->adev;
2897 0 : struct smu7_hwmgr *data = hwmgr->backend;
2898 0 : struct phm_vce_clock_voltage_dependency_table *vce_table =
2899 : hwmgr->dyn_state.vce_clock_voltage_dependency_table;
2900 0 : uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2901 : AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2902 : AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2903 : AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2904 0 : uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
2905 0 : hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
2906 : int32_t i;
2907 :
2908 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
2909 : VceBootLevel, 0); /* temp hard code to level 0, vce can set min evclk*/
2910 :
2911 0 : data->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
2912 :
2913 0 : for (i = vce_table->count - 1; i >= 0; i--) {
2914 0 : if (vce_table->entries[i].v <= max_vddc)
2915 0 : data->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
2916 0 : if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM))
2917 : break;
2918 : }
2919 0 : smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
2920 : data->dpm_level_enable_mask.vce_dpm_enable_mask,
2921 : NULL);
2922 :
2923 0 : return 0;
2924 : }
2925 :
2926 0 : static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
2927 : {
2928 0 : switch (type) {
2929 : case SMU_UVD_TABLE:
2930 0 : ci_update_uvd_smc_table(hwmgr);
2931 0 : break;
2932 : case SMU_VCE_TABLE:
2933 0 : ci_update_vce_smc_table(hwmgr);
2934 0 : break;
2935 : default:
2936 : break;
2937 : }
2938 0 : return 0;
2939 : }
2940 :
2941 0 : static void ci_reset_smc(struct pp_hwmgr *hwmgr)
2942 : {
2943 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2944 : SMC_SYSCON_RESET_CNTL,
2945 : rst_reg, 1);
2946 0 : }
2947 :
2948 :
2949 0 : static void ci_stop_smc_clock(struct pp_hwmgr *hwmgr)
2950 : {
2951 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2952 : SMC_SYSCON_CLOCK_CNTL_0,
2953 : ck_disable, 1);
2954 0 : }
2955 :
2956 0 : static int ci_stop_smc(struct pp_hwmgr *hwmgr)
2957 : {
2958 0 : ci_reset_smc(hwmgr);
2959 0 : ci_stop_smc_clock(hwmgr);
2960 :
2961 0 : return 0;
2962 : }
2963 :
2964 : const struct pp_smumgr_func ci_smu_funcs = {
2965 : .name = "ci_smu",
2966 : .smu_init = ci_smu_init,
2967 : .smu_fini = ci_smu_fini,
2968 : .start_smu = ci_start_smu,
2969 : .check_fw_load_finish = NULL,
2970 : .request_smu_load_fw = NULL,
2971 : .request_smu_load_specific_fw = NULL,
2972 : .send_msg_to_smc = ci_send_msg_to_smc,
2973 : .send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
2974 : .get_argument = smu7_get_argument,
2975 : .download_pptable_settings = NULL,
2976 : .upload_pptable_settings = NULL,
2977 : .get_offsetof = ci_get_offsetof,
2978 : .process_firmware_header = ci_process_firmware_header,
2979 : .init_smc_table = ci_init_smc_table,
2980 : .update_sclk_threshold = ci_update_sclk_threshold,
2981 : .thermal_setup_fan_table = ci_thermal_setup_fan_table,
2982 : .populate_all_graphic_levels = ci_populate_all_graphic_levels,
2983 : .populate_all_memory_levels = ci_populate_all_memory_levels,
2984 : .get_mac_definition = ci_get_mac_definition,
2985 : .initialize_mc_reg_table = ci_initialize_mc_reg_table,
2986 : .is_dpm_running = ci_is_dpm_running,
2987 : .update_dpm_settings = ci_update_dpm_settings,
2988 : .update_smc_table = ci_update_smc_table,
2989 : .stop_smc = ci_stop_smc,
2990 : };
|