Line data Source code
1 : /*
2 : * Copyright 2015 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : */
23 : #include "pp_debug.h"
24 : #include <linux/types.h>
25 : #include <linux/kernel.h>
26 : #include <linux/slab.h>
27 : #include "atom-types.h"
28 : #include "atombios.h"
29 : #include "processpptables.h"
30 : #include "cgs_common.h"
31 : #include "smu/smu_8_0_d.h"
32 : #include "smu8_fusion.h"
33 : #include "smu/smu_8_0_sh_mask.h"
34 : #include "smumgr.h"
35 : #include "hwmgr.h"
36 : #include "hardwaremanager.h"
37 : #include "cz_ppsmc.h"
38 : #include "smu8_hwmgr.h"
39 : #include "power_state.h"
40 : #include "pp_thermal.h"
41 :
42 : #define ixSMUSVI_NB_CURRENTVID 0xD8230044
43 : #define CURRENT_NB_VID_MASK 0xff000000
44 : #define CURRENT_NB_VID__SHIFT 24
45 : #define ixSMUSVI_GFX_CURRENTVID 0xD8230048
46 : #define CURRENT_GFX_VID_MASK 0xff000000
47 : #define CURRENT_GFX_VID__SHIFT 24
48 :
49 : static const unsigned long smu8_magic = (unsigned long) PHM_Cz_Magic;
50 :
51 : static struct smu8_power_state *cast_smu8_power_state(struct pp_hw_power_state *hw_ps)
52 : {
53 0 : if (smu8_magic != hw_ps->magic)
54 : return NULL;
55 :
56 : return (struct smu8_power_state *)hw_ps;
57 : }
58 :
59 : static const struct smu8_power_state *cast_const_smu8_power_state(
60 : const struct pp_hw_power_state *hw_ps)
61 : {
62 0 : if (smu8_magic != hw_ps->magic)
63 : return NULL;
64 :
65 : return (struct smu8_power_state *)hw_ps;
66 : }
67 :
68 : static uint32_t smu8_get_eclk_level(struct pp_hwmgr *hwmgr,
69 : uint32_t clock, uint32_t msg)
70 : {
71 0 : int i = 0;
72 0 : struct phm_vce_clock_voltage_dependency_table *ptable =
73 : hwmgr->dyn_state.vce_clock_voltage_dependency_table;
74 :
75 : switch (msg) {
76 : case PPSMC_MSG_SetEclkSoftMin:
77 : case PPSMC_MSG_SetEclkHardMin:
78 0 : for (i = 0; i < (int)ptable->count; i++) {
79 0 : if (clock <= ptable->entries[i].ecclk)
80 : break;
81 : }
82 : break;
83 :
84 : case PPSMC_MSG_SetEclkSoftMax:
85 : case PPSMC_MSG_SetEclkHardMax:
86 : for (i = ptable->count - 1; i >= 0; i--) {
87 : if (clock >= ptable->entries[i].ecclk)
88 : break;
89 : }
90 : break;
91 :
92 : default:
93 : break;
94 : }
95 :
96 0 : return i;
97 : }
98 :
99 : static uint32_t smu8_get_sclk_level(struct pp_hwmgr *hwmgr,
100 : uint32_t clock, uint32_t msg)
101 : {
102 0 : int i = 0;
103 0 : struct phm_clock_voltage_dependency_table *table =
104 : hwmgr->dyn_state.vddc_dependency_on_sclk;
105 :
106 : switch (msg) {
107 : case PPSMC_MSG_SetSclkSoftMin:
108 : case PPSMC_MSG_SetSclkHardMin:
109 0 : for (i = 0; i < (int)table->count; i++) {
110 0 : if (clock <= table->entries[i].clk)
111 : break;
112 : }
113 : break;
114 :
115 : case PPSMC_MSG_SetSclkSoftMax:
116 : case PPSMC_MSG_SetSclkHardMax:
117 0 : for (i = table->count - 1; i >= 0; i--) {
118 0 : if (clock >= table->entries[i].clk)
119 : break;
120 : }
121 : break;
122 :
123 : default:
124 : break;
125 : }
126 0 : return i;
127 : }
128 :
129 : static uint32_t smu8_get_uvd_level(struct pp_hwmgr *hwmgr,
130 : uint32_t clock, uint32_t msg)
131 : {
132 0 : int i = 0;
133 0 : struct phm_uvd_clock_voltage_dependency_table *ptable =
134 : hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
135 :
136 : switch (msg) {
137 : case PPSMC_MSG_SetUvdSoftMin:
138 : case PPSMC_MSG_SetUvdHardMin:
139 0 : for (i = 0; i < (int)ptable->count; i++) {
140 0 : if (clock <= ptable->entries[i].vclk)
141 : break;
142 : }
143 : break;
144 :
145 : case PPSMC_MSG_SetUvdSoftMax:
146 : case PPSMC_MSG_SetUvdHardMax:
147 : for (i = ptable->count - 1; i >= 0; i--) {
148 : if (clock >= ptable->entries[i].vclk)
149 : break;
150 : }
151 : break;
152 :
153 : default:
154 : break;
155 : }
156 :
157 0 : return i;
158 : }
159 :
160 0 : static uint32_t smu8_get_max_sclk_level(struct pp_hwmgr *hwmgr)
161 : {
162 0 : struct smu8_hwmgr *data = hwmgr->backend;
163 :
164 0 : if (data->max_sclk_level == 0) {
165 0 : smum_send_msg_to_smc(hwmgr,
166 : PPSMC_MSG_GetMaxSclkLevel,
167 : &data->max_sclk_level);
168 0 : data->max_sclk_level += 1;
169 : }
170 :
171 0 : return data->max_sclk_level;
172 : }
173 :
174 0 : static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
175 : {
176 0 : struct smu8_hwmgr *data = hwmgr->backend;
177 0 : struct amdgpu_device *adev = hwmgr->adev;
178 :
179 0 : data->gfx_ramp_step = 256*25/100;
180 0 : data->gfx_ramp_delay = 1; /* by default, we delay 1us */
181 :
182 0 : data->mgcg_cgtt_local0 = 0x00000000;
183 0 : data->mgcg_cgtt_local1 = 0x00000000;
184 0 : data->clock_slow_down_freq = 25000;
185 0 : data->skip_clock_slow_down = 1;
186 0 : data->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
187 0 : data->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
188 0 : data->voting_rights_clients = 0x00C00033;
189 0 : data->static_screen_threshold = 8;
190 0 : data->ddi_power_gating_disabled = 0;
191 0 : data->bapm_enabled = 1;
192 0 : data->voltage_drop_threshold = 0;
193 0 : data->gfx_power_gating_threshold = 500;
194 0 : data->vce_slow_sclk_threshold = 20000;
195 0 : data->dce_slow_sclk_threshold = 30000;
196 0 : data->disable_driver_thermal_policy = 1;
197 0 : data->disable_nb_ps3_in_battery = 0;
198 :
199 0 : phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
200 : PHM_PlatformCaps_ABM);
201 :
202 0 : phm_cap_set(hwmgr->platform_descriptor.platformCaps,
203 : PHM_PlatformCaps_NonABMSupportInPPLib);
204 :
205 0 : phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
206 : PHM_PlatformCaps_DynamicM3Arbiter);
207 :
208 0 : data->override_dynamic_mgpg = 1;
209 :
210 0 : phm_cap_set(hwmgr->platform_descriptor.platformCaps,
211 : PHM_PlatformCaps_DynamicPatchPowerState);
212 :
213 0 : data->thermal_auto_throttling_treshold = 0;
214 0 : data->tdr_clock = 0;
215 0 : data->disable_gfx_power_gating_in_uvd = 0;
216 :
217 0 : phm_cap_set(hwmgr->platform_descriptor.platformCaps,
218 : PHM_PlatformCaps_DynamicUVDState);
219 :
220 0 : phm_cap_set(hwmgr->platform_descriptor.platformCaps,
221 : PHM_PlatformCaps_UVDDPM);
222 0 : phm_cap_set(hwmgr->platform_descriptor.platformCaps,
223 : PHM_PlatformCaps_VCEDPM);
224 :
225 0 : data->cc6_settings.cpu_cc6_disable = false;
226 0 : data->cc6_settings.cpu_pstate_disable = false;
227 0 : data->cc6_settings.nb_pstate_switch_disable = false;
228 0 : data->cc6_settings.cpu_pstate_separation_time = 0;
229 :
230 0 : phm_cap_set(hwmgr->platform_descriptor.platformCaps,
231 : PHM_PlatformCaps_DisableVoltageIsland);
232 :
233 0 : phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
234 : PHM_PlatformCaps_UVDPowerGating);
235 0 : phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
236 : PHM_PlatformCaps_VCEPowerGating);
237 :
238 0 : if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
239 0 : phm_cap_set(hwmgr->platform_descriptor.platformCaps,
240 : PHM_PlatformCaps_UVDPowerGating);
241 0 : if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
242 0 : phm_cap_set(hwmgr->platform_descriptor.platformCaps,
243 : PHM_PlatformCaps_VCEPowerGating);
244 :
245 :
246 0 : return 0;
247 : }
248 :
249 : /* convert form 8bit vid to real voltage in mV*4 */
250 : static uint32_t smu8_convert_8Bit_index_to_voltage(
251 : struct pp_hwmgr *hwmgr, uint16_t voltage)
252 : {
253 0 : return 6200 - (voltage * 25);
254 : }
255 :
256 : static int smu8_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
257 : struct phm_clock_and_voltage_limits *table)
258 : {
259 0 : struct smu8_hwmgr *data = hwmgr->backend;
260 0 : struct smu8_sys_info *sys_info = &data->sys_info;
261 0 : struct phm_clock_voltage_dependency_table *dep_table =
262 : hwmgr->dyn_state.vddc_dependency_on_sclk;
263 :
264 0 : if (dep_table->count > 0) {
265 0 : table->sclk = dep_table->entries[dep_table->count-1].clk;
266 0 : table->vddc = smu8_convert_8Bit_index_to_voltage(hwmgr,
267 0 : (uint16_t)dep_table->entries[dep_table->count-1].v);
268 : }
269 0 : table->mclk = sys_info->nbp_memory_clock[0];
270 : return 0;
271 : }
272 :
273 0 : static int smu8_init_dynamic_state_adjustment_rule_settings(
274 : struct pp_hwmgr *hwmgr,
275 : ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table)
276 : {
277 : struct phm_clock_voltage_dependency_table *table_clk_vlt;
278 :
279 0 : table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 8),
280 : GFP_KERNEL);
281 :
282 0 : if (NULL == table_clk_vlt) {
283 0 : pr_err("Can not allocate memory!\n");
284 : return -ENOMEM;
285 : }
286 :
287 0 : table_clk_vlt->count = 8;
288 0 : table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
289 0 : table_clk_vlt->entries[0].v = 0;
290 0 : table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
291 0 : table_clk_vlt->entries[1].v = 1;
292 0 : table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
293 0 : table_clk_vlt->entries[2].v = 2;
294 0 : table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
295 0 : table_clk_vlt->entries[3].v = 3;
296 0 : table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
297 0 : table_clk_vlt->entries[4].v = 4;
298 0 : table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
299 0 : table_clk_vlt->entries[5].v = 5;
300 0 : table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
301 0 : table_clk_vlt->entries[6].v = 6;
302 0 : table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
303 0 : table_clk_vlt->entries[7].v = 7;
304 0 : hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
305 :
306 : return 0;
307 : }
308 :
309 0 : static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
310 : {
311 0 : struct smu8_hwmgr *data = hwmgr->backend;
312 0 : ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL;
313 : uint32_t i;
314 0 : int result = 0;
315 : uint8_t frev, crev;
316 : uint16_t size;
317 :
318 0 : info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *)smu_atom_get_data_table(hwmgr->adev,
319 : GetIndexIntoMasterTable(DATA, IntegratedSystemInfo),
320 : &size, &frev, &crev);
321 :
322 0 : if (info == NULL) {
323 0 : pr_err("Could not retrieve the Integrated System Info Table!\n");
324 0 : return -EINVAL;
325 : }
326 :
327 0 : if (crev != 9) {
328 0 : pr_err("Unsupported IGP table: %d %d\n", frev, crev);
329 0 : return -EINVAL;
330 : }
331 :
332 0 : data->sys_info.bootup_uma_clock =
333 0 : le32_to_cpu(info->ulBootUpUMAClock);
334 :
335 0 : data->sys_info.bootup_engine_clock =
336 0 : le32_to_cpu(info->ulBootUpEngineClock);
337 :
338 0 : data->sys_info.dentist_vco_freq =
339 0 : le32_to_cpu(info->ulDentistVCOFreq);
340 :
341 0 : data->sys_info.system_config =
342 0 : le32_to_cpu(info->ulSystemConfig);
343 :
344 0 : data->sys_info.bootup_nb_voltage_index =
345 0 : le16_to_cpu(info->usBootUpNBVoltage);
346 :
347 0 : data->sys_info.htc_hyst_lmt =
348 0 : (info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt;
349 :
350 0 : data->sys_info.htc_tmp_lmt =
351 0 : (info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt;
352 :
353 0 : if (data->sys_info.htc_tmp_lmt <=
354 : data->sys_info.htc_hyst_lmt) {
355 0 : pr_err("The htcTmpLmt should be larger than htcHystLmt.\n");
356 0 : return -EINVAL;
357 : }
358 :
359 0 : data->sys_info.nb_dpm_enable =
360 0 : data->enable_nb_ps_policy &&
361 0 : (le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1);
362 :
363 0 : for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
364 0 : if (i < SMU8_NUM_NBPMEMORYCLOCK) {
365 0 : data->sys_info.nbp_memory_clock[i] =
366 0 : le32_to_cpu(info->ulNbpStateMemclkFreq[i]);
367 : }
368 0 : data->sys_info.nbp_n_clock[i] =
369 0 : le32_to_cpu(info->ulNbpStateNClkFreq[i]);
370 : }
371 :
372 0 : for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) {
373 0 : data->sys_info.display_clock[i] =
374 0 : le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
375 : }
376 :
377 : /* Here use 4 levels, make sure not exceed */
378 0 : for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
379 0 : data->sys_info.nbp_voltage_index[i] =
380 0 : le16_to_cpu(info->usNBPStateVoltage[i]);
381 : }
382 :
383 0 : if (!data->sys_info.nb_dpm_enable) {
384 0 : for (i = 1; i < SMU8_NUM_NBPSTATES; i++) {
385 0 : if (i < SMU8_NUM_NBPMEMORYCLOCK) {
386 0 : data->sys_info.nbp_memory_clock[i] =
387 0 : data->sys_info.nbp_memory_clock[0];
388 : }
389 0 : data->sys_info.nbp_n_clock[i] =
390 0 : data->sys_info.nbp_n_clock[0];
391 0 : data->sys_info.nbp_voltage_index[i] =
392 0 : data->sys_info.nbp_voltage_index[0];
393 : }
394 : }
395 :
396 0 : if (le32_to_cpu(info->ulGPUCapInfo) &
397 : SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) {
398 0 : phm_cap_set(hwmgr->platform_descriptor.platformCaps,
399 : PHM_PlatformCaps_EnableDFSBypass);
400 : }
401 :
402 0 : data->sys_info.uma_channel_number = info->ucUMAChannelNumber;
403 :
404 0 : smu8_construct_max_power_limits_table (hwmgr,
405 : &hwmgr->dyn_state.max_clock_voltage_on_ac);
406 :
407 0 : smu8_init_dynamic_state_adjustment_rule_settings(hwmgr,
408 : &info->sDISPCLK_Voltage[0]);
409 :
410 0 : return result;
411 : }
412 :
413 : static int smu8_construct_boot_state(struct pp_hwmgr *hwmgr)
414 : {
415 0 : struct smu8_hwmgr *data = hwmgr->backend;
416 :
417 0 : data->boot_power_level.engineClock =
418 0 : data->sys_info.bootup_engine_clock;
419 :
420 0 : data->boot_power_level.vddcIndex =
421 0 : (uint8_t)data->sys_info.bootup_nb_voltage_index;
422 :
423 0 : data->boot_power_level.dsDividerIndex = 0;
424 0 : data->boot_power_level.ssDividerIndex = 0;
425 0 : data->boot_power_level.allowGnbSlow = 1;
426 0 : data->boot_power_level.forceNBPstate = 0;
427 0 : data->boot_power_level.hysteresis_up = 0;
428 0 : data->boot_power_level.numSIMDToPowerDown = 0;
429 0 : data->boot_power_level.display_wm = 0;
430 0 : data->boot_power_level.vce_wm = 0;
431 :
432 : return 0;
433 : }
434 :
435 0 : static int smu8_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
436 : {
437 : struct SMU8_Fusion_ClkTable *clock_table;
438 : int ret;
439 : uint32_t i;
440 0 : void *table = NULL;
441 : pp_atomctrl_clock_dividers_kong dividers;
442 :
443 0 : struct phm_clock_voltage_dependency_table *vddc_table =
444 : hwmgr->dyn_state.vddc_dependency_on_sclk;
445 0 : struct phm_clock_voltage_dependency_table *vdd_gfx_table =
446 : hwmgr->dyn_state.vdd_gfx_dependency_on_sclk;
447 0 : struct phm_acp_clock_voltage_dependency_table *acp_table =
448 : hwmgr->dyn_state.acp_clock_voltage_dependency_table;
449 0 : struct phm_uvd_clock_voltage_dependency_table *uvd_table =
450 : hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
451 0 : struct phm_vce_clock_voltage_dependency_table *vce_table =
452 : hwmgr->dyn_state.vce_clock_voltage_dependency_table;
453 :
454 0 : if (!hwmgr->need_pp_table_upload)
455 : return 0;
456 :
457 0 : ret = smum_download_powerplay_table(hwmgr, &table);
458 :
459 0 : PP_ASSERT_WITH_CODE((0 == ret && NULL != table),
460 : "Fail to get clock table from SMU!", return -EINVAL;);
461 :
462 0 : clock_table = (struct SMU8_Fusion_ClkTable *)table;
463 :
464 : /* patch clock table */
465 0 : PP_ASSERT_WITH_CODE((vddc_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
466 : "Dependency table entry exceeds max limit!", return -EINVAL;);
467 0 : PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
468 : "Dependency table entry exceeds max limit!", return -EINVAL;);
469 0 : PP_ASSERT_WITH_CODE((acp_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
470 : "Dependency table entry exceeds max limit!", return -EINVAL;);
471 0 : PP_ASSERT_WITH_CODE((uvd_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
472 : "Dependency table entry exceeds max limit!", return -EINVAL;);
473 0 : PP_ASSERT_WITH_CODE((vce_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
474 : "Dependency table entry exceeds max limit!", return -EINVAL;);
475 :
476 0 : for (i = 0; i < SMU8_MAX_HARDWARE_POWERLEVELS; i++) {
477 :
478 : /* vddc_sclk */
479 0 : clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
480 0 : (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0;
481 0 : clock_table->SclkBreakdownTable.ClkLevel[i].Frequency =
482 0 : (i < vddc_table->count) ? vddc_table->entries[i].clk : 0;
483 :
484 0 : atomctrl_get_engine_pll_dividers_kong(hwmgr,
485 : clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
486 : ÷rs);
487 :
488 0 : clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid =
489 0 : (uint8_t)dividers.pll_post_divider;
490 :
491 : /* vddgfx_sclk */
492 0 : clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid =
493 0 : (i < vdd_gfx_table->count) ? (uint8_t)vdd_gfx_table->entries[i].v : 0;
494 :
495 : /* acp breakdown */
496 0 : clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid =
497 0 : (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0;
498 0 : clock_table->AclkBreakdownTable.ClkLevel[i].Frequency =
499 0 : (i < acp_table->count) ? acp_table->entries[i].acpclk : 0;
500 :
501 0 : atomctrl_get_engine_pll_dividers_kong(hwmgr,
502 : clock_table->AclkBreakdownTable.ClkLevel[i].Frequency,
503 : ÷rs);
504 :
505 0 : clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid =
506 0 : (uint8_t)dividers.pll_post_divider;
507 :
508 :
509 : /* uvd breakdown */
510 0 : clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid =
511 0 : (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
512 0 : clock_table->VclkBreakdownTable.ClkLevel[i].Frequency =
513 0 : (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0;
514 :
515 0 : atomctrl_get_engine_pll_dividers_kong(hwmgr,
516 : clock_table->VclkBreakdownTable.ClkLevel[i].Frequency,
517 : ÷rs);
518 :
519 0 : clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid =
520 0 : (uint8_t)dividers.pll_post_divider;
521 :
522 0 : clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid =
523 0 : (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
524 0 : clock_table->DclkBreakdownTable.ClkLevel[i].Frequency =
525 0 : (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0;
526 :
527 0 : atomctrl_get_engine_pll_dividers_kong(hwmgr,
528 : clock_table->DclkBreakdownTable.ClkLevel[i].Frequency,
529 : ÷rs);
530 :
531 0 : clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid =
532 0 : (uint8_t)dividers.pll_post_divider;
533 :
534 : /* vce breakdown */
535 0 : clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid =
536 0 : (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0;
537 0 : clock_table->EclkBreakdownTable.ClkLevel[i].Frequency =
538 0 : (i < vce_table->count) ? vce_table->entries[i].ecclk : 0;
539 :
540 :
541 0 : atomctrl_get_engine_pll_dividers_kong(hwmgr,
542 : clock_table->EclkBreakdownTable.ClkLevel[i].Frequency,
543 : ÷rs);
544 :
545 0 : clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid =
546 0 : (uint8_t)dividers.pll_post_divider;
547 :
548 : }
549 0 : ret = smum_upload_powerplay_table(hwmgr);
550 :
551 0 : return ret;
552 : }
553 :
554 0 : static int smu8_init_sclk_limit(struct pp_hwmgr *hwmgr)
555 : {
556 0 : struct smu8_hwmgr *data = hwmgr->backend;
557 0 : struct phm_clock_voltage_dependency_table *table =
558 : hwmgr->dyn_state.vddc_dependency_on_sclk;
559 0 : unsigned long clock = 0, level;
560 :
561 0 : if (NULL == table || table->count <= 0)
562 : return -EINVAL;
563 :
564 0 : data->sclk_dpm.soft_min_clk = table->entries[0].clk;
565 0 : data->sclk_dpm.hard_min_clk = table->entries[0].clk;
566 :
567 0 : level = smu8_get_max_sclk_level(hwmgr) - 1;
568 :
569 0 : if (level < table->count)
570 0 : clock = table->entries[level].clk;
571 : else
572 0 : clock = table->entries[table->count - 1].clk;
573 :
574 0 : data->sclk_dpm.soft_max_clk = clock;
575 0 : data->sclk_dpm.hard_max_clk = clock;
576 :
577 0 : return 0;
578 : }
579 :
580 0 : static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
581 : {
582 0 : struct smu8_hwmgr *data = hwmgr->backend;
583 0 : struct phm_uvd_clock_voltage_dependency_table *table =
584 : hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
585 0 : unsigned long clock = 0;
586 : uint32_t level;
587 :
588 0 : if (NULL == table || table->count <= 0)
589 : return -EINVAL;
590 :
591 0 : data->uvd_dpm.soft_min_clk = 0;
592 0 : data->uvd_dpm.hard_min_clk = 0;
593 :
594 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
595 :
596 0 : if (level < table->count)
597 0 : clock = table->entries[level].vclk;
598 : else
599 0 : clock = table->entries[table->count - 1].vclk;
600 :
601 0 : data->uvd_dpm.soft_max_clk = clock;
602 0 : data->uvd_dpm.hard_max_clk = clock;
603 :
604 0 : return 0;
605 : }
606 :
607 0 : static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
608 : {
609 0 : struct smu8_hwmgr *data = hwmgr->backend;
610 0 : struct phm_vce_clock_voltage_dependency_table *table =
611 : hwmgr->dyn_state.vce_clock_voltage_dependency_table;
612 0 : unsigned long clock = 0;
613 : uint32_t level;
614 :
615 0 : if (NULL == table || table->count <= 0)
616 : return -EINVAL;
617 :
618 0 : data->vce_dpm.soft_min_clk = 0;
619 0 : data->vce_dpm.hard_min_clk = 0;
620 :
621 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
622 :
623 0 : if (level < table->count)
624 0 : clock = table->entries[level].ecclk;
625 : else
626 0 : clock = table->entries[table->count - 1].ecclk;
627 :
628 0 : data->vce_dpm.soft_max_clk = clock;
629 0 : data->vce_dpm.hard_max_clk = clock;
630 :
631 0 : return 0;
632 : }
633 :
634 0 : static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
635 : {
636 0 : struct smu8_hwmgr *data = hwmgr->backend;
637 0 : struct phm_acp_clock_voltage_dependency_table *table =
638 : hwmgr->dyn_state.acp_clock_voltage_dependency_table;
639 0 : unsigned long clock = 0;
640 : uint32_t level;
641 :
642 0 : if (NULL == table || table->count <= 0)
643 : return -EINVAL;
644 :
645 0 : data->acp_dpm.soft_min_clk = 0;
646 0 : data->acp_dpm.hard_min_clk = 0;
647 :
648 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
649 :
650 0 : if (level < table->count)
651 0 : clock = table->entries[level].acpclk;
652 : else
653 0 : clock = table->entries[table->count - 1].acpclk;
654 :
655 0 : data->acp_dpm.soft_max_clk = clock;
656 0 : data->acp_dpm.hard_max_clk = clock;
657 0 : return 0;
658 : }
659 :
660 : static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr)
661 : {
662 0 : struct smu8_hwmgr *data = hwmgr->backend;
663 :
664 0 : data->uvd_power_gated = false;
665 0 : data->vce_power_gated = false;
666 0 : data->samu_power_gated = false;
667 : #ifdef CONFIG_DRM_AMD_ACP
668 : data->acp_power_gated = false;
669 : #else
670 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL);
671 0 : data->acp_power_gated = true;
672 : #endif
673 :
674 : }
675 :
676 : static void smu8_init_sclk_threshold(struct pp_hwmgr *hwmgr)
677 : {
678 0 : struct smu8_hwmgr *data = hwmgr->backend;
679 :
680 0 : data->low_sclk_interrupt_threshold = 0;
681 : }
682 :
683 0 : static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
684 : {
685 0 : struct smu8_hwmgr *data = hwmgr->backend;
686 0 : struct phm_clock_voltage_dependency_table *table =
687 : hwmgr->dyn_state.vddc_dependency_on_sclk;
688 :
689 0 : unsigned long clock = 0;
690 : unsigned long level;
691 : unsigned long stable_pstate_sclk;
692 : unsigned long percentage;
693 :
694 0 : data->sclk_dpm.soft_min_clk = table->entries[0].clk;
695 0 : level = smu8_get_max_sclk_level(hwmgr) - 1;
696 :
697 0 : if (level < table->count)
698 0 : data->sclk_dpm.soft_max_clk = table->entries[level].clk;
699 : else
700 0 : data->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk;
701 :
702 0 : clock = hwmgr->display_config->min_core_set_clock;
703 : if (clock == 0)
704 : pr_debug("min_core_set_clock not set\n");
705 :
706 0 : if (data->sclk_dpm.hard_min_clk != clock) {
707 0 : data->sclk_dpm.hard_min_clk = clock;
708 :
709 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
710 : PPSMC_MSG_SetSclkHardMin,
711 : smu8_get_sclk_level(hwmgr,
712 : data->sclk_dpm.hard_min_clk,
713 : PPSMC_MSG_SetSclkHardMin),
714 : NULL);
715 : }
716 :
717 0 : clock = data->sclk_dpm.soft_min_clk;
718 :
719 : /* update minimum clocks for Stable P-State feature */
720 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
721 : PHM_PlatformCaps_StablePState)) {
722 0 : percentage = 75;
723 : /*Sclk - calculate sclk value based on percentage and find FLOOR sclk from VddcDependencyOnSCLK table */
724 0 : stable_pstate_sclk = (hwmgr->dyn_state.max_clock_voltage_on_ac.mclk *
725 : percentage) / 100;
726 :
727 0 : if (clock < stable_pstate_sclk)
728 0 : clock = stable_pstate_sclk;
729 : }
730 :
731 0 : if (data->sclk_dpm.soft_min_clk != clock) {
732 0 : data->sclk_dpm.soft_min_clk = clock;
733 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
734 : PPSMC_MSG_SetSclkSoftMin,
735 : smu8_get_sclk_level(hwmgr,
736 : data->sclk_dpm.soft_min_clk,
737 : PPSMC_MSG_SetSclkSoftMin),
738 : NULL);
739 : }
740 :
741 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
742 0 : PHM_PlatformCaps_StablePState) &&
743 0 : data->sclk_dpm.soft_max_clk != clock) {
744 0 : data->sclk_dpm.soft_max_clk = clock;
745 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
746 : PPSMC_MSG_SetSclkSoftMax,
747 : smu8_get_sclk_level(hwmgr,
748 : data->sclk_dpm.soft_max_clk,
749 : PPSMC_MSG_SetSclkSoftMax),
750 : NULL);
751 : }
752 :
753 0 : return 0;
754 : }
755 :
756 0 : static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
757 : {
758 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
759 : PHM_PlatformCaps_SclkDeepSleep)) {
760 0 : uint32_t clks = hwmgr->display_config->min_core_set_clock_in_sr;
761 0 : if (clks == 0)
762 0 : clks = SMU8_MIN_DEEP_SLEEP_SCLK;
763 :
764 : PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks);
765 :
766 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
767 : PPSMC_MSG_SetMinDeepSleepSclk,
768 : clks,
769 : NULL);
770 : }
771 :
772 0 : return 0;
773 : }
774 :
775 : static int smu8_set_watermark_threshold(struct pp_hwmgr *hwmgr)
776 : {
777 0 : struct smu8_hwmgr *data =
778 : hwmgr->backend;
779 :
780 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
781 : PPSMC_MSG_SetWatermarkFrequency,
782 : data->sclk_dpm.soft_max_clk,
783 : NULL);
784 :
785 : return 0;
786 : }
787 :
788 0 : static int smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
789 : {
790 0 : struct smu8_hwmgr *hw_data = hwmgr->backend;
791 :
792 0 : if (hw_data->is_nb_dpm_enabled) {
793 0 : if (enable) {
794 : PP_DBG_LOG("enable Low Memory PState.\n");
795 :
796 0 : return smum_send_msg_to_smc_with_parameter(hwmgr,
797 : PPSMC_MSG_EnableLowMemoryPstate,
798 : (lock ? 1 : 0),
799 : NULL);
800 : } else {
801 : PP_DBG_LOG("disable Low Memory PState.\n");
802 :
803 0 : return smum_send_msg_to_smc_with_parameter(hwmgr,
804 : PPSMC_MSG_DisableLowMemoryPstate,
805 : (lock ? 1 : 0),
806 : NULL);
807 : }
808 : }
809 :
810 : return 0;
811 : }
812 :
813 0 : static int smu8_disable_nb_dpm(struct pp_hwmgr *hwmgr)
814 : {
815 0 : int ret = 0;
816 :
817 0 : struct smu8_hwmgr *data = hwmgr->backend;
818 0 : unsigned long dpm_features = 0;
819 :
820 0 : if (data->is_nb_dpm_enabled) {
821 0 : smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
822 0 : dpm_features |= NB_DPM_MASK;
823 0 : ret = smum_send_msg_to_smc_with_parameter(
824 : hwmgr,
825 : PPSMC_MSG_DisableAllSmuFeatures,
826 : dpm_features,
827 : NULL);
828 0 : if (ret == 0)
829 0 : data->is_nb_dpm_enabled = false;
830 : }
831 :
832 0 : return ret;
833 : }
834 :
835 0 : static int smu8_enable_nb_dpm(struct pp_hwmgr *hwmgr)
836 : {
837 0 : int ret = 0;
838 :
839 0 : struct smu8_hwmgr *data = hwmgr->backend;
840 0 : unsigned long dpm_features = 0;
841 :
842 0 : if (!data->is_nb_dpm_enabled) {
843 : PP_DBG_LOG("enabling ALL SMU features.\n");
844 0 : dpm_features |= NB_DPM_MASK;
845 0 : ret = smum_send_msg_to_smc_with_parameter(
846 : hwmgr,
847 : PPSMC_MSG_EnableAllSmuFeatures,
848 : dpm_features,
849 : NULL);
850 0 : if (ret == 0)
851 0 : data->is_nb_dpm_enabled = true;
852 : }
853 :
854 0 : return ret;
855 : }
856 :
857 0 : static int smu8_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input)
858 : {
859 : bool disable_switch;
860 : bool enable_low_mem_state;
861 0 : struct smu8_hwmgr *hw_data = hwmgr->backend;
862 0 : const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input;
863 0 : const struct smu8_power_state *pnew_state = cast_const_smu8_power_state(states->pnew_state);
864 :
865 0 : if (hw_data->sys_info.nb_dpm_enable) {
866 0 : disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false;
867 0 : enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true;
868 :
869 0 : if (pnew_state->action == FORCE_HIGH)
870 0 : smu8_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
871 0 : else if (pnew_state->action == CANCEL_FORCE_HIGH)
872 0 : smu8_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch);
873 : else
874 0 : smu8_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch);
875 : }
876 0 : return 0;
877 : }
878 :
879 0 : static int smu8_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
880 : {
881 0 : int ret = 0;
882 :
883 0 : smu8_update_sclk_limit(hwmgr);
884 0 : smu8_set_deep_sleep_sclk_threshold(hwmgr);
885 0 : smu8_set_watermark_threshold(hwmgr);
886 0 : ret = smu8_enable_nb_dpm(hwmgr);
887 0 : if (ret)
888 : return ret;
889 0 : smu8_update_low_mem_pstate(hwmgr, input);
890 :
891 0 : return 0;
892 : }
893 :
894 :
895 0 : static int smu8_setup_asic_task(struct pp_hwmgr *hwmgr)
896 : {
897 : int ret;
898 :
899 0 : ret = smu8_upload_pptable_to_smu(hwmgr);
900 0 : if (ret)
901 : return ret;
902 0 : ret = smu8_init_sclk_limit(hwmgr);
903 0 : if (ret)
904 : return ret;
905 0 : ret = smu8_init_uvd_limit(hwmgr);
906 0 : if (ret)
907 : return ret;
908 0 : ret = smu8_init_vce_limit(hwmgr);
909 0 : if (ret)
910 : return ret;
911 0 : ret = smu8_init_acp_limit(hwmgr);
912 0 : if (ret)
913 : return ret;
914 :
915 0 : smu8_init_power_gate_state(hwmgr);
916 0 : smu8_init_sclk_threshold(hwmgr);
917 :
918 0 : return 0;
919 : }
920 :
921 : static void smu8_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr)
922 : {
923 0 : struct smu8_hwmgr *hw_data = hwmgr->backend;
924 :
925 0 : hw_data->disp_clk_bypass_pending = false;
926 0 : hw_data->disp_clk_bypass = false;
927 : }
928 :
929 : static void smu8_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr)
930 : {
931 0 : struct smu8_hwmgr *hw_data = hwmgr->backend;
932 :
933 0 : hw_data->is_nb_dpm_enabled = false;
934 : }
935 :
936 : static void smu8_reset_cc6_data(struct pp_hwmgr *hwmgr)
937 : {
938 0 : struct smu8_hwmgr *hw_data = hwmgr->backend;
939 :
940 0 : hw_data->cc6_settings.cc6_setting_changed = false;
941 0 : hw_data->cc6_settings.cpu_pstate_separation_time = 0;
942 0 : hw_data->cc6_settings.cpu_cc6_disable = false;
943 0 : hw_data->cc6_settings.cpu_pstate_disable = false;
944 : }
945 :
946 : static void smu8_program_voting_clients(struct pp_hwmgr *hwmgr)
947 : {
948 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
949 : ixCG_FREQ_TRAN_VOTING_0,
950 : SMU8_VOTINGRIGHTSCLIENTS_DFLT0);
951 : }
952 :
953 : static void smu8_clear_voting_clients(struct pp_hwmgr *hwmgr)
954 : {
955 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
956 : ixCG_FREQ_TRAN_VOTING_0, 0);
957 : }
958 :
959 : static int smu8_start_dpm(struct pp_hwmgr *hwmgr)
960 : {
961 0 : struct smu8_hwmgr *data = hwmgr->backend;
962 :
963 0 : data->dpm_flags |= DPMFlags_SCLK_Enabled;
964 :
965 0 : return smum_send_msg_to_smc_with_parameter(hwmgr,
966 : PPSMC_MSG_EnableAllSmuFeatures,
967 : SCLK_DPM_MASK,
968 : NULL);
969 : }
970 :
971 0 : static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
972 : {
973 0 : int ret = 0;
974 0 : struct smu8_hwmgr *data = hwmgr->backend;
975 0 : unsigned long dpm_features = 0;
976 :
977 0 : if (data->dpm_flags & DPMFlags_SCLK_Enabled) {
978 0 : dpm_features |= SCLK_DPM_MASK;
979 0 : data->dpm_flags &= ~DPMFlags_SCLK_Enabled;
980 0 : ret = smum_send_msg_to_smc_with_parameter(hwmgr,
981 : PPSMC_MSG_DisableAllSmuFeatures,
982 : dpm_features,
983 : NULL);
984 : }
985 0 : return ret;
986 : }
987 :
988 0 : static int smu8_program_bootup_state(struct pp_hwmgr *hwmgr)
989 : {
990 0 : struct smu8_hwmgr *data = hwmgr->backend;
991 :
992 0 : data->sclk_dpm.soft_min_clk = data->sys_info.bootup_engine_clock;
993 0 : data->sclk_dpm.soft_max_clk = data->sys_info.bootup_engine_clock;
994 :
995 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
996 : PPSMC_MSG_SetSclkSoftMin,
997 : smu8_get_sclk_level(hwmgr,
998 : data->sclk_dpm.soft_min_clk,
999 : PPSMC_MSG_SetSclkSoftMin),
1000 : NULL);
1001 :
1002 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1003 : PPSMC_MSG_SetSclkSoftMax,
1004 : smu8_get_sclk_level(hwmgr,
1005 : data->sclk_dpm.soft_max_clk,
1006 : PPSMC_MSG_SetSclkSoftMax),
1007 : NULL);
1008 :
1009 0 : return 0;
1010 : }
1011 :
1012 : static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
1013 : {
1014 0 : struct smu8_hwmgr *data = hwmgr->backend;
1015 :
1016 0 : data->acp_boot_level = 0xff;
1017 : }
1018 :
1019 0 : static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1020 : {
1021 0 : smu8_program_voting_clients(hwmgr);
1022 0 : if (smu8_start_dpm(hwmgr))
1023 : return -EINVAL;
1024 0 : smu8_program_bootup_state(hwmgr);
1025 0 : smu8_reset_acp_boot_level(hwmgr);
1026 :
1027 0 : return 0;
1028 : }
1029 :
1030 0 : static int smu8_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1031 : {
1032 0 : smu8_disable_nb_dpm(hwmgr);
1033 :
1034 0 : smu8_clear_voting_clients(hwmgr);
1035 0 : if (smu8_stop_dpm(hwmgr))
1036 : return -EINVAL;
1037 :
1038 0 : return 0;
1039 : }
1040 :
1041 0 : static int smu8_power_off_asic(struct pp_hwmgr *hwmgr)
1042 : {
1043 0 : smu8_disable_dpm_tasks(hwmgr);
1044 0 : smu8_power_up_display_clock_sys_pll(hwmgr);
1045 0 : smu8_clear_nb_dpm_flag(hwmgr);
1046 0 : smu8_reset_cc6_data(hwmgr);
1047 0 : return 0;
1048 : }
1049 :
1050 0 : static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
1051 : struct pp_power_state *prequest_ps,
1052 : const struct pp_power_state *pcurrent_ps)
1053 : {
1054 0 : struct smu8_power_state *smu8_ps =
1055 0 : cast_smu8_power_state(&prequest_ps->hardware);
1056 :
1057 0 : const struct smu8_power_state *smu8_current_ps =
1058 0 : cast_const_smu8_power_state(&pcurrent_ps->hardware);
1059 :
1060 0 : struct smu8_hwmgr *data = hwmgr->backend;
1061 0 : struct PP_Clocks clocks = {0, 0, 0, 0};
1062 : bool force_high;
1063 :
1064 0 : smu8_ps->need_dfs_bypass = true;
1065 :
1066 0 : data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
1067 :
1068 0 : clocks.memoryClock = hwmgr->display_config->min_mem_set_clock != 0 ?
1069 0 : hwmgr->display_config->min_mem_set_clock :
1070 : data->sys_info.nbp_memory_clock[1];
1071 :
1072 :
1073 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
1074 0 : clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
1075 :
1076 0 : force_high = (clocks.memoryClock > data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1])
1077 0 : || (hwmgr->display_config->num_display >= 3);
1078 :
1079 0 : smu8_ps->action = smu8_current_ps->action;
1080 :
1081 0 : if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1082 0 : smu8_nbdpm_pstate_enable_disable(hwmgr, false, false);
1083 0 : else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD)
1084 0 : smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
1085 0 : else if (!force_high && (smu8_ps->action == FORCE_HIGH))
1086 0 : smu8_ps->action = CANCEL_FORCE_HIGH;
1087 0 : else if (force_high && (smu8_ps->action != FORCE_HIGH))
1088 0 : smu8_ps->action = FORCE_HIGH;
1089 : else
1090 0 : smu8_ps->action = DO_NOTHING;
1091 :
1092 0 : return 0;
1093 : }
1094 :
1095 0 : static int smu8_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
1096 : {
1097 0 : int result = 0;
1098 : struct smu8_hwmgr *data;
1099 :
1100 0 : data = kzalloc(sizeof(struct smu8_hwmgr), GFP_KERNEL);
1101 0 : if (data == NULL)
1102 : return -ENOMEM;
1103 :
1104 0 : hwmgr->backend = data;
1105 :
1106 0 : result = smu8_initialize_dpm_defaults(hwmgr);
1107 0 : if (result != 0) {
1108 0 : pr_err("smu8_initialize_dpm_defaults failed\n");
1109 0 : return result;
1110 : }
1111 :
1112 0 : result = smu8_get_system_info_data(hwmgr);
1113 0 : if (result != 0) {
1114 0 : pr_err("smu8_get_system_info_data failed\n");
1115 0 : return result;
1116 : }
1117 :
1118 0 : smu8_construct_boot_state(hwmgr);
1119 :
1120 0 : hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = SMU8_MAX_HARDWARE_POWERLEVELS;
1121 :
1122 0 : return result;
1123 : }
1124 :
1125 0 : static int smu8_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
1126 : {
1127 0 : if (hwmgr != NULL) {
1128 0 : kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
1129 0 : hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
1130 :
1131 0 : kfree(hwmgr->backend);
1132 0 : hwmgr->backend = NULL;
1133 : }
1134 0 : return 0;
1135 : }
1136 :
1137 0 : static int smu8_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
1138 : {
1139 0 : struct smu8_hwmgr *data = hwmgr->backend;
1140 :
1141 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1142 : PPSMC_MSG_SetSclkSoftMin,
1143 : smu8_get_sclk_level(hwmgr,
1144 : data->sclk_dpm.soft_max_clk,
1145 : PPSMC_MSG_SetSclkSoftMin),
1146 : NULL);
1147 :
1148 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1149 : PPSMC_MSG_SetSclkSoftMax,
1150 : smu8_get_sclk_level(hwmgr,
1151 : data->sclk_dpm.soft_max_clk,
1152 : PPSMC_MSG_SetSclkSoftMax),
1153 : NULL);
1154 :
1155 0 : return 0;
1156 : }
1157 :
1158 0 : static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1159 : {
1160 0 : struct smu8_hwmgr *data = hwmgr->backend;
1161 0 : struct phm_clock_voltage_dependency_table *table =
1162 : hwmgr->dyn_state.vddc_dependency_on_sclk;
1163 0 : unsigned long clock = 0, level;
1164 :
1165 0 : if (NULL == table || table->count <= 0)
1166 : return -EINVAL;
1167 :
1168 0 : data->sclk_dpm.soft_min_clk = table->entries[0].clk;
1169 0 : data->sclk_dpm.hard_min_clk = table->entries[0].clk;
1170 0 : hwmgr->pstate_sclk = table->entries[0].clk;
1171 0 : hwmgr->pstate_mclk = 0;
1172 :
1173 0 : level = smu8_get_max_sclk_level(hwmgr) - 1;
1174 :
1175 0 : if (level < table->count)
1176 0 : clock = table->entries[level].clk;
1177 : else
1178 0 : clock = table->entries[table->count - 1].clk;
1179 :
1180 0 : data->sclk_dpm.soft_max_clk = clock;
1181 0 : data->sclk_dpm.hard_max_clk = clock;
1182 :
1183 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1184 : PPSMC_MSG_SetSclkSoftMin,
1185 : smu8_get_sclk_level(hwmgr,
1186 : data->sclk_dpm.soft_min_clk,
1187 : PPSMC_MSG_SetSclkSoftMin),
1188 : NULL);
1189 :
1190 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1191 : PPSMC_MSG_SetSclkSoftMax,
1192 : smu8_get_sclk_level(hwmgr,
1193 : data->sclk_dpm.soft_max_clk,
1194 : PPSMC_MSG_SetSclkSoftMax),
1195 : NULL);
1196 :
1197 0 : return 0;
1198 : }
1199 :
1200 0 : static int smu8_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1201 : {
1202 0 : struct smu8_hwmgr *data = hwmgr->backend;
1203 :
1204 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1205 : PPSMC_MSG_SetSclkSoftMax,
1206 : smu8_get_sclk_level(hwmgr,
1207 : data->sclk_dpm.soft_min_clk,
1208 : PPSMC_MSG_SetSclkSoftMax),
1209 : NULL);
1210 :
1211 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1212 : PPSMC_MSG_SetSclkSoftMin,
1213 : smu8_get_sclk_level(hwmgr,
1214 : data->sclk_dpm.soft_min_clk,
1215 : PPSMC_MSG_SetSclkSoftMin),
1216 : NULL);
1217 :
1218 0 : return 0;
1219 : }
1220 :
1221 0 : static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1222 : enum amd_dpm_forced_level level)
1223 : {
1224 0 : int ret = 0;
1225 :
1226 0 : switch (level) {
1227 : case AMD_DPM_FORCED_LEVEL_HIGH:
1228 : case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1229 0 : ret = smu8_phm_force_dpm_highest(hwmgr);
1230 0 : break;
1231 : case AMD_DPM_FORCED_LEVEL_LOW:
1232 : case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1233 : case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1234 0 : ret = smu8_phm_force_dpm_lowest(hwmgr);
1235 0 : break;
1236 : case AMD_DPM_FORCED_LEVEL_AUTO:
1237 0 : ret = smu8_phm_unforce_dpm_levels(hwmgr);
1238 0 : break;
1239 : case AMD_DPM_FORCED_LEVEL_MANUAL:
1240 : case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1241 : default:
1242 : break;
1243 : }
1244 :
1245 0 : return ret;
1246 : }
1247 :
1248 0 : static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
1249 : {
1250 0 : if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
1251 0 : return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF, NULL);
1252 : return 0;
1253 : }
1254 :
1255 0 : static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
1256 : {
1257 0 : if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
1258 0 : return smum_send_msg_to_smc_with_parameter(
1259 : hwmgr,
1260 : PPSMC_MSG_UVDPowerON,
1261 0 : PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0,
1262 : NULL);
1263 : }
1264 :
1265 : return 0;
1266 : }
1267 :
1268 0 : static int smu8_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1269 : {
1270 0 : struct smu8_hwmgr *data = hwmgr->backend;
1271 0 : struct phm_vce_clock_voltage_dependency_table *ptable =
1272 : hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1273 :
1274 : /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
1275 0 : if (PP_CAP(PHM_PlatformCaps_StablePState) ||
1276 0 : hwmgr->en_umd_pstate) {
1277 0 : data->vce_dpm.hard_min_clk =
1278 0 : ptable->entries[ptable->count - 1].ecclk;
1279 :
1280 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1281 : PPSMC_MSG_SetEclkHardMin,
1282 : smu8_get_eclk_level(hwmgr,
1283 : data->vce_dpm.hard_min_clk,
1284 : PPSMC_MSG_SetEclkHardMin),
1285 : NULL);
1286 : } else {
1287 :
1288 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1289 : PPSMC_MSG_SetEclkHardMin,
1290 : 0,
1291 : NULL);
1292 : /* disable ECLK DPM 0. Otherwise VCE could hang if
1293 : * switching SCLK from DPM 0 to 6/7 */
1294 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1295 : PPSMC_MSG_SetEclkSoftMin,
1296 : 1,
1297 : NULL);
1298 : }
1299 0 : return 0;
1300 : }
1301 :
1302 0 : static int smu8_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
1303 : {
1304 0 : if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
1305 0 : return smum_send_msg_to_smc(hwmgr,
1306 : PPSMC_MSG_VCEPowerOFF,
1307 : NULL);
1308 : return 0;
1309 : }
1310 :
1311 0 : static int smu8_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
1312 : {
1313 0 : if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
1314 0 : return smum_send_msg_to_smc(hwmgr,
1315 : PPSMC_MSG_VCEPowerON,
1316 : NULL);
1317 : return 0;
1318 : }
1319 :
1320 0 : static uint32_t smu8_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
1321 : {
1322 0 : struct smu8_hwmgr *data = hwmgr->backend;
1323 :
1324 0 : return data->sys_info.bootup_uma_clock;
1325 : }
1326 :
1327 0 : static uint32_t smu8_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
1328 : {
1329 : struct pp_power_state *ps;
1330 : struct smu8_power_state *smu8_ps;
1331 :
1332 0 : if (hwmgr == NULL)
1333 : return -EINVAL;
1334 :
1335 0 : ps = hwmgr->request_ps;
1336 :
1337 0 : if (ps == NULL)
1338 : return -EINVAL;
1339 :
1340 0 : smu8_ps = cast_smu8_power_state(&ps->hardware);
1341 :
1342 0 : if (low)
1343 0 : return smu8_ps->levels[0].engineClock;
1344 : else
1345 0 : return smu8_ps->levels[smu8_ps->level-1].engineClock;
1346 : }
1347 :
1348 0 : static int smu8_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
1349 : struct pp_hw_power_state *hw_ps)
1350 : {
1351 0 : struct smu8_hwmgr *data = hwmgr->backend;
1352 0 : struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
1353 :
1354 0 : smu8_ps->level = 1;
1355 0 : smu8_ps->nbps_flags = 0;
1356 0 : smu8_ps->bapm_flags = 0;
1357 0 : smu8_ps->levels[0] = data->boot_power_level;
1358 :
1359 0 : return 0;
1360 : }
1361 :
1362 0 : static int smu8_dpm_get_pp_table_entry_callback(
1363 : struct pp_hwmgr *hwmgr,
1364 : struct pp_hw_power_state *hw_ps,
1365 : unsigned int index,
1366 : const void *clock_info)
1367 : {
1368 0 : struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
1369 :
1370 0 : const ATOM_PPLIB_CZ_CLOCK_INFO *smu8_clock_info = clock_info;
1371 :
1372 0 : struct phm_clock_voltage_dependency_table *table =
1373 : hwmgr->dyn_state.vddc_dependency_on_sclk;
1374 0 : uint8_t clock_info_index = smu8_clock_info->index;
1375 :
1376 0 : if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1))
1377 0 : clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1);
1378 :
1379 0 : smu8_ps->levels[index].engineClock = table->entries[clock_info_index].clk;
1380 0 : smu8_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v;
1381 :
1382 0 : smu8_ps->level = index + 1;
1383 :
1384 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
1385 0 : smu8_ps->levels[index].dsDividerIndex = 5;
1386 0 : smu8_ps->levels[index].ssDividerIndex = 5;
1387 : }
1388 :
1389 0 : return 0;
1390 : }
1391 :
1392 0 : static int smu8_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
1393 : {
1394 : int result;
1395 0 : unsigned long ret = 0;
1396 :
1397 0 : result = pp_tables_get_num_of_entries(hwmgr, &ret);
1398 :
1399 0 : return result ? 0 : ret;
1400 : }
1401 :
1402 0 : static int smu8_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
1403 : unsigned long entry, struct pp_power_state *ps)
1404 : {
1405 : int result;
1406 : struct smu8_power_state *smu8_ps;
1407 :
1408 0 : ps->hardware.magic = smu8_magic;
1409 :
1410 0 : smu8_ps = cast_smu8_power_state(&(ps->hardware));
1411 :
1412 0 : result = pp_tables_get_entry(hwmgr, entry, ps,
1413 : smu8_dpm_get_pp_table_entry_callback);
1414 :
1415 0 : smu8_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
1416 0 : smu8_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
1417 :
1418 0 : return result;
1419 : }
1420 :
1421 0 : static int smu8_get_power_state_size(struct pp_hwmgr *hwmgr)
1422 : {
1423 0 : return sizeof(struct smu8_power_state);
1424 : }
1425 :
1426 : static void smu8_hw_print_display_cfg(
1427 : const struct cc6_settings *cc6_settings)
1428 : {
1429 : PP_DBG_LOG("New Display Configuration:\n");
1430 :
1431 : PP_DBG_LOG(" cpu_cc6_disable: %d\n",
1432 : cc6_settings->cpu_cc6_disable);
1433 : PP_DBG_LOG(" cpu_pstate_disable: %d\n",
1434 : cc6_settings->cpu_pstate_disable);
1435 : PP_DBG_LOG(" nb_pstate_switch_disable: %d\n",
1436 : cc6_settings->nb_pstate_switch_disable);
1437 : PP_DBG_LOG(" cpu_pstate_separation_time: %d\n\n",
1438 : cc6_settings->cpu_pstate_separation_time);
1439 : }
1440 :
1441 0 : static int smu8_set_cpu_power_state(struct pp_hwmgr *hwmgr)
1442 : {
1443 0 : struct smu8_hwmgr *hw_data = hwmgr->backend;
1444 0 : uint32_t data = 0;
1445 :
1446 0 : if (hw_data->cc6_settings.cc6_setting_changed) {
1447 :
1448 0 : hw_data->cc6_settings.cc6_setting_changed = false;
1449 :
1450 0 : smu8_hw_print_display_cfg(&hw_data->cc6_settings);
1451 :
1452 0 : data |= (hw_data->cc6_settings.cpu_pstate_separation_time
1453 : & PWRMGT_SEPARATION_TIME_MASK)
1454 0 : << PWRMGT_SEPARATION_TIME_SHIFT;
1455 :
1456 0 : data |= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0)
1457 0 : << PWRMGT_DISABLE_CPU_CSTATES_SHIFT;
1458 :
1459 0 : data |= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0)
1460 0 : << PWRMGT_DISABLE_CPU_PSTATES_SHIFT;
1461 :
1462 : PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n",
1463 : data);
1464 :
1465 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1466 : PPSMC_MSG_SetDisplaySizePowerParams,
1467 : data,
1468 : NULL);
1469 : }
1470 :
1471 0 : return 0;
1472 : }
1473 :
1474 :
1475 0 : static int smu8_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
1476 : bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
1477 : {
1478 0 : struct smu8_hwmgr *hw_data = hwmgr->backend;
1479 :
1480 0 : if (separation_time !=
1481 0 : hw_data->cc6_settings.cpu_pstate_separation_time ||
1482 0 : cc6_disable != hw_data->cc6_settings.cpu_cc6_disable ||
1483 0 : pstate_disable != hw_data->cc6_settings.cpu_pstate_disable ||
1484 0 : pstate_switch_disable != hw_data->cc6_settings.nb_pstate_switch_disable) {
1485 :
1486 0 : hw_data->cc6_settings.cc6_setting_changed = true;
1487 :
1488 0 : hw_data->cc6_settings.cpu_pstate_separation_time =
1489 : separation_time;
1490 0 : hw_data->cc6_settings.cpu_cc6_disable =
1491 : cc6_disable;
1492 0 : hw_data->cc6_settings.cpu_pstate_disable =
1493 : pstate_disable;
1494 0 : hw_data->cc6_settings.nb_pstate_switch_disable =
1495 : pstate_switch_disable;
1496 :
1497 : }
1498 :
1499 0 : return 0;
1500 : }
1501 :
1502 0 : static int smu8_get_dal_power_level(struct pp_hwmgr *hwmgr,
1503 : struct amd_pp_simple_clock_info *info)
1504 : {
1505 : uint32_t i;
1506 0 : const struct phm_clock_voltage_dependency_table *table =
1507 : hwmgr->dyn_state.vddc_dep_on_dal_pwrl;
1508 0 : const struct phm_clock_and_voltage_limits *limits =
1509 : &hwmgr->dyn_state.max_clock_voltage_on_ac;
1510 :
1511 0 : info->engine_max_clock = limits->sclk;
1512 0 : info->memory_max_clock = limits->mclk;
1513 :
1514 0 : for (i = table->count - 1; i > 0; i--) {
1515 0 : if (limits->vddc >= table->entries[i].v) {
1516 0 : info->level = table->entries[i].clk;
1517 0 : return 0;
1518 : }
1519 : }
1520 : return -EINVAL;
1521 : }
1522 :
1523 0 : static int smu8_force_clock_level(struct pp_hwmgr *hwmgr,
1524 : enum pp_clock_type type, uint32_t mask)
1525 : {
1526 0 : switch (type) {
1527 : case PP_SCLK:
1528 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1529 : PPSMC_MSG_SetSclkSoftMin,
1530 : mask,
1531 : NULL);
1532 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1533 : PPSMC_MSG_SetSclkSoftMax,
1534 : mask,
1535 : NULL);
1536 0 : break;
1537 : default:
1538 : break;
1539 : }
1540 :
1541 0 : return 0;
1542 : }
1543 :
1544 0 : static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
1545 : enum pp_clock_type type, char *buf)
1546 : {
1547 0 : struct smu8_hwmgr *data = hwmgr->backend;
1548 0 : struct phm_clock_voltage_dependency_table *sclk_table =
1549 : hwmgr->dyn_state.vddc_dependency_on_sclk;
1550 : uint32_t i, now;
1551 0 : int size = 0;
1552 :
1553 0 : switch (type) {
1554 : case PP_SCLK:
1555 0 : now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
1556 : CGS_IND_REG__SMC,
1557 : ixTARGET_AND_CURRENT_PROFILE_INDEX),
1558 : TARGET_AND_CURRENT_PROFILE_INDEX,
1559 : CURR_SCLK_INDEX);
1560 :
1561 0 : for (i = 0; i < sclk_table->count; i++)
1562 0 : size += sprintf(buf + size, "%d: %uMhz %s\n",
1563 0 : i, sclk_table->entries[i].clk / 100,
1564 : (i == now) ? "*" : "");
1565 : break;
1566 : case PP_MCLK:
1567 0 : now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
1568 : CGS_IND_REG__SMC,
1569 : ixTARGET_AND_CURRENT_PROFILE_INDEX),
1570 : TARGET_AND_CURRENT_PROFILE_INDEX,
1571 : CURR_MCLK_INDEX);
1572 :
1573 0 : for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
1574 0 : size += sprintf(buf + size, "%d: %uMhz %s\n",
1575 0 : SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
1576 0 : (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
1577 : break;
1578 : default:
1579 : break;
1580 : }
1581 0 : return size;
1582 : }
1583 :
1584 0 : static int smu8_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
1585 : PHM_PerformanceLevelDesignation designation, uint32_t index,
1586 : PHM_PerformanceLevel *level)
1587 : {
1588 : const struct smu8_power_state *ps;
1589 : struct smu8_hwmgr *data;
1590 : uint32_t level_index;
1591 : uint32_t i;
1592 :
1593 0 : if (level == NULL || hwmgr == NULL || state == NULL)
1594 : return -EINVAL;
1595 :
1596 0 : data = hwmgr->backend;
1597 0 : ps = cast_const_smu8_power_state(state);
1598 :
1599 0 : level_index = index > ps->level - 1 ? ps->level - 1 : index;
1600 0 : level->coreClock = ps->levels[level_index].engineClock;
1601 :
1602 0 : if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
1603 0 : for (i = 1; i < ps->level; i++) {
1604 0 : if (ps->levels[i].engineClock > data->dce_slow_sclk_threshold) {
1605 0 : level->coreClock = ps->levels[i].engineClock;
1606 0 : break;
1607 : }
1608 : }
1609 : }
1610 :
1611 0 : if (level_index == 0)
1612 0 : level->memory_clock = data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1];
1613 : else
1614 0 : level->memory_clock = data->sys_info.nbp_memory_clock[0];
1615 :
1616 0 : level->vddc = (smu8_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4;
1617 0 : level->nonLocalMemoryFreq = 0;
1618 0 : level->nonLocalMemoryWidth = 0;
1619 :
1620 0 : return 0;
1621 : }
1622 :
1623 0 : static int smu8_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
1624 : const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
1625 : {
1626 0 : const struct smu8_power_state *ps = cast_const_smu8_power_state(state);
1627 :
1628 0 : clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex));
1629 0 : clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex));
1630 :
1631 0 : return 0;
1632 : }
1633 :
1634 0 : static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
1635 : struct amd_pp_clocks *clocks)
1636 : {
1637 0 : struct smu8_hwmgr *data = hwmgr->backend;
1638 : int i;
1639 : struct phm_clock_voltage_dependency_table *table;
1640 :
1641 0 : clocks->count = smu8_get_max_sclk_level(hwmgr);
1642 0 : switch (type) {
1643 : case amd_pp_disp_clock:
1644 0 : for (i = 0; i < clocks->count; i++)
1645 0 : clocks->clock[i] = data->sys_info.display_clock[i] * 10;
1646 : break;
1647 : case amd_pp_sys_clock:
1648 0 : table = hwmgr->dyn_state.vddc_dependency_on_sclk;
1649 0 : for (i = 0; i < clocks->count; i++)
1650 0 : clocks->clock[i] = table->entries[i].clk * 10;
1651 : break;
1652 : case amd_pp_mem_clock:
1653 0 : clocks->count = SMU8_NUM_NBPMEMORYCLOCK;
1654 0 : for (i = 0; i < clocks->count; i++)
1655 0 : clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i] * 10;
1656 : break;
1657 : default:
1658 : return -1;
1659 : }
1660 :
1661 : return 0;
1662 : }
1663 :
1664 0 : static int smu8_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
1665 : {
1666 0 : struct phm_clock_voltage_dependency_table *table =
1667 : hwmgr->dyn_state.vddc_dependency_on_sclk;
1668 : unsigned long level;
1669 0 : const struct phm_clock_and_voltage_limits *limits =
1670 : &hwmgr->dyn_state.max_clock_voltage_on_ac;
1671 :
1672 0 : if ((NULL == table) || (table->count <= 0) || (clocks == NULL))
1673 : return -EINVAL;
1674 :
1675 0 : level = smu8_get_max_sclk_level(hwmgr) - 1;
1676 :
1677 0 : if (level < table->count)
1678 0 : clocks->engine_max_clock = table->entries[level].clk;
1679 : else
1680 0 : clocks->engine_max_clock = table->entries[table->count - 1].clk;
1681 :
1682 0 : clocks->memory_max_clock = limits->mclk;
1683 :
1684 0 : return 0;
1685 : }
1686 :
1687 0 : static int smu8_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1688 : {
1689 0 : int actual_temp = 0;
1690 0 : uint32_t val = cgs_read_ind_register(hwmgr->device,
1691 : CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP);
1692 0 : uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
1693 :
1694 0 : if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
1695 0 : actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1696 : else
1697 0 : actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1698 :
1699 0 : return actual_temp;
1700 : }
1701 :
1702 0 : static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1703 : void *value, int *size)
1704 : {
1705 0 : struct smu8_hwmgr *data = hwmgr->backend;
1706 :
1707 0 : struct phm_clock_voltage_dependency_table *table =
1708 : hwmgr->dyn_state.vddc_dependency_on_sclk;
1709 :
1710 0 : struct phm_vce_clock_voltage_dependency_table *vce_table =
1711 : hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1712 :
1713 0 : struct phm_uvd_clock_voltage_dependency_table *uvd_table =
1714 : hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1715 :
1716 0 : uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
1717 : TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
1718 0 : uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
1719 : TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
1720 0 : uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
1721 : TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
1722 :
1723 : uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
1724 : uint16_t vddnb, vddgfx;
1725 : int result;
1726 :
1727 : /* size must be at least 4 bytes for all sensors */
1728 0 : if (*size < 4)
1729 : return -EINVAL;
1730 0 : *size = 4;
1731 :
1732 0 : switch (idx) {
1733 : case AMDGPU_PP_SENSOR_GFX_SCLK:
1734 0 : if (sclk_index < NUM_SCLK_LEVELS) {
1735 0 : sclk = table->entries[sclk_index].clk;
1736 0 : *((uint32_t *)value) = sclk;
1737 0 : return 0;
1738 : }
1739 : return -EINVAL;
1740 : case AMDGPU_PP_SENSOR_VDDNB:
1741 0 : tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
1742 0 : CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
1743 0 : vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp) / 4;
1744 0 : *((uint32_t *)value) = vddnb;
1745 0 : return 0;
1746 : case AMDGPU_PP_SENSOR_VDDGFX:
1747 0 : tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
1748 0 : CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
1749 0 : vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp) / 4;
1750 0 : *((uint32_t *)value) = vddgfx;
1751 0 : return 0;
1752 : case AMDGPU_PP_SENSOR_UVD_VCLK:
1753 0 : if (!data->uvd_power_gated) {
1754 0 : if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1755 : return -EINVAL;
1756 : } else {
1757 0 : vclk = uvd_table->entries[uvd_index].vclk;
1758 0 : *((uint32_t *)value) = vclk;
1759 0 : return 0;
1760 : }
1761 : }
1762 0 : *((uint32_t *)value) = 0;
1763 0 : return 0;
1764 : case AMDGPU_PP_SENSOR_UVD_DCLK:
1765 0 : if (!data->uvd_power_gated) {
1766 0 : if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1767 : return -EINVAL;
1768 : } else {
1769 0 : dclk = uvd_table->entries[uvd_index].dclk;
1770 0 : *((uint32_t *)value) = dclk;
1771 0 : return 0;
1772 : }
1773 : }
1774 0 : *((uint32_t *)value) = 0;
1775 0 : return 0;
1776 : case AMDGPU_PP_SENSOR_VCE_ECCLK:
1777 0 : if (!data->vce_power_gated) {
1778 0 : if (vce_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1779 : return -EINVAL;
1780 : } else {
1781 0 : ecclk = vce_table->entries[vce_index].ecclk;
1782 0 : *((uint32_t *)value) = ecclk;
1783 0 : return 0;
1784 : }
1785 : }
1786 0 : *((uint32_t *)value) = 0;
1787 0 : return 0;
1788 : case AMDGPU_PP_SENSOR_GPU_LOAD:
1789 0 : result = smum_send_msg_to_smc(hwmgr,
1790 : PPSMC_MSG_GetAverageGraphicsActivity,
1791 : &activity_percent);
1792 0 : if (0 == result)
1793 0 : activity_percent = activity_percent > 100 ? 100 : activity_percent;
1794 : else
1795 : return -EIO;
1796 0 : *((uint32_t *)value) = activity_percent;
1797 0 : return 0;
1798 : case AMDGPU_PP_SENSOR_UVD_POWER:
1799 0 : *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
1800 0 : return 0;
1801 : case AMDGPU_PP_SENSOR_VCE_POWER:
1802 0 : *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
1803 0 : return 0;
1804 : case AMDGPU_PP_SENSOR_GPU_TEMP:
1805 0 : *((uint32_t *)value) = smu8_thermal_get_temperature(hwmgr);
1806 0 : return 0;
1807 : default:
1808 : return -EOPNOTSUPP;
1809 : }
1810 : }
1811 :
1812 0 : static int smu8_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
1813 : uint32_t virtual_addr_low,
1814 : uint32_t virtual_addr_hi,
1815 : uint32_t mc_addr_low,
1816 : uint32_t mc_addr_hi,
1817 : uint32_t size)
1818 : {
1819 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1820 : PPSMC_MSG_DramAddrHiVirtual,
1821 : mc_addr_hi,
1822 : NULL);
1823 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1824 : PPSMC_MSG_DramAddrLoVirtual,
1825 : mc_addr_low,
1826 : NULL);
1827 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1828 : PPSMC_MSG_DramAddrHiPhysical,
1829 : virtual_addr_hi,
1830 : NULL);
1831 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1832 : PPSMC_MSG_DramAddrLoPhysical,
1833 : virtual_addr_low,
1834 : NULL);
1835 :
1836 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1837 : PPSMC_MSG_DramBufferSize,
1838 : size,
1839 : NULL);
1840 0 : return 0;
1841 : }
1842 :
1843 0 : static int smu8_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
1844 : struct PP_TemperatureRange *thermal_data)
1845 : {
1846 0 : struct smu8_hwmgr *data = hwmgr->backend;
1847 :
1848 0 : memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
1849 :
1850 0 : thermal_data->max = (data->thermal_auto_throttling_treshold +
1851 0 : data->sys_info.htc_hyst_lmt) *
1852 : PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1853 :
1854 0 : return 0;
1855 : }
1856 :
1857 0 : static int smu8_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
1858 : {
1859 0 : struct smu8_hwmgr *data = hwmgr->backend;
1860 0 : uint32_t dpm_features = 0;
1861 :
1862 0 : if (enable &&
1863 0 : phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1864 : PHM_PlatformCaps_UVDDPM)) {
1865 0 : data->dpm_flags |= DPMFlags_UVD_Enabled;
1866 0 : dpm_features |= UVD_DPM_MASK;
1867 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1868 : PPSMC_MSG_EnableAllSmuFeatures,
1869 : dpm_features,
1870 : NULL);
1871 : } else {
1872 0 : dpm_features |= UVD_DPM_MASK;
1873 0 : data->dpm_flags &= ~DPMFlags_UVD_Enabled;
1874 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1875 : PPSMC_MSG_DisableAllSmuFeatures,
1876 : dpm_features,
1877 : NULL);
1878 : }
1879 0 : return 0;
1880 : }
1881 :
1882 0 : static int smu8_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
1883 : {
1884 0 : struct smu8_hwmgr *data = hwmgr->backend;
1885 0 : struct phm_uvd_clock_voltage_dependency_table *ptable =
1886 : hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1887 :
1888 0 : if (!bgate) {
1889 : /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
1890 0 : if (PP_CAP(PHM_PlatformCaps_StablePState) ||
1891 0 : hwmgr->en_umd_pstate) {
1892 0 : data->uvd_dpm.hard_min_clk =
1893 0 : ptable->entries[ptable->count - 1].vclk;
1894 :
1895 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1896 : PPSMC_MSG_SetUvdHardMin,
1897 : smu8_get_uvd_level(hwmgr,
1898 : data->uvd_dpm.hard_min_clk,
1899 : PPSMC_MSG_SetUvdHardMin),
1900 : NULL);
1901 :
1902 0 : smu8_enable_disable_uvd_dpm(hwmgr, true);
1903 : } else {
1904 0 : smu8_enable_disable_uvd_dpm(hwmgr, true);
1905 : }
1906 : } else {
1907 0 : smu8_enable_disable_uvd_dpm(hwmgr, false);
1908 : }
1909 :
1910 0 : return 0;
1911 : }
1912 :
1913 0 : static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
1914 : {
1915 0 : struct smu8_hwmgr *data = hwmgr->backend;
1916 0 : uint32_t dpm_features = 0;
1917 :
1918 0 : if (enable && phm_cap_enabled(
1919 0 : hwmgr->platform_descriptor.platformCaps,
1920 : PHM_PlatformCaps_VCEDPM)) {
1921 0 : data->dpm_flags |= DPMFlags_VCE_Enabled;
1922 0 : dpm_features |= VCE_DPM_MASK;
1923 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1924 : PPSMC_MSG_EnableAllSmuFeatures,
1925 : dpm_features,
1926 : NULL);
1927 : } else {
1928 0 : dpm_features |= VCE_DPM_MASK;
1929 0 : data->dpm_flags &= ~DPMFlags_VCE_Enabled;
1930 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
1931 : PPSMC_MSG_DisableAllSmuFeatures,
1932 : dpm_features,
1933 : NULL);
1934 : }
1935 :
1936 0 : return 0;
1937 : }
1938 :
1939 :
1940 0 : static void smu8_dpm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
1941 : {
1942 0 : struct smu8_hwmgr *data = hwmgr->backend;
1943 :
1944 0 : if (data->acp_power_gated == bgate)
1945 : return;
1946 :
1947 0 : if (bgate)
1948 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL);
1949 : else
1950 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON, NULL);
1951 : }
1952 :
1953 : #define WIDTH_4K 3840
1954 :
1955 0 : static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
1956 : {
1957 0 : struct smu8_hwmgr *data = hwmgr->backend;
1958 0 : struct amdgpu_device *adev = hwmgr->adev;
1959 :
1960 0 : data->uvd_power_gated = bgate;
1961 :
1962 0 : if (bgate) {
1963 0 : amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1964 : AMD_IP_BLOCK_TYPE_UVD,
1965 : AMD_PG_STATE_GATE);
1966 0 : amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1967 : AMD_IP_BLOCK_TYPE_UVD,
1968 : AMD_CG_STATE_GATE);
1969 0 : smu8_dpm_update_uvd_dpm(hwmgr, true);
1970 0 : smu8_dpm_powerdown_uvd(hwmgr);
1971 : } else {
1972 0 : smu8_dpm_powerup_uvd(hwmgr);
1973 0 : amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1974 : AMD_IP_BLOCK_TYPE_UVD,
1975 : AMD_CG_STATE_UNGATE);
1976 0 : amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1977 : AMD_IP_BLOCK_TYPE_UVD,
1978 : AMD_PG_STATE_UNGATE);
1979 0 : smu8_dpm_update_uvd_dpm(hwmgr, false);
1980 : }
1981 :
1982 : /* enable/disable Low Memory PState for UVD (4k videos) */
1983 0 : if (adev->asic_type == CHIP_STONEY &&
1984 0 : adev->uvd.decode_image_width >= WIDTH_4K)
1985 0 : smu8_nbdpm_pstate_enable_disable(hwmgr,
1986 : bgate,
1987 : true);
1988 0 : }
1989 :
1990 0 : static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
1991 : {
1992 0 : struct smu8_hwmgr *data = hwmgr->backend;
1993 :
1994 0 : if (bgate) {
1995 0 : amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1996 : AMD_IP_BLOCK_TYPE_VCE,
1997 : AMD_PG_STATE_GATE);
1998 0 : amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1999 : AMD_IP_BLOCK_TYPE_VCE,
2000 : AMD_CG_STATE_GATE);
2001 0 : smu8_enable_disable_vce_dpm(hwmgr, false);
2002 0 : smu8_dpm_powerdown_vce(hwmgr);
2003 0 : data->vce_power_gated = true;
2004 : } else {
2005 0 : smu8_dpm_powerup_vce(hwmgr);
2006 0 : data->vce_power_gated = false;
2007 0 : amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
2008 : AMD_IP_BLOCK_TYPE_VCE,
2009 : AMD_CG_STATE_UNGATE);
2010 0 : amdgpu_device_ip_set_powergating_state(hwmgr->adev,
2011 : AMD_IP_BLOCK_TYPE_VCE,
2012 : AMD_PG_STATE_UNGATE);
2013 0 : smu8_dpm_update_vce_dpm(hwmgr);
2014 0 : smu8_enable_disable_vce_dpm(hwmgr, true);
2015 : }
2016 0 : }
2017 :
2018 : static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
2019 : .backend_init = smu8_hwmgr_backend_init,
2020 : .backend_fini = smu8_hwmgr_backend_fini,
2021 : .apply_state_adjust_rules = smu8_apply_state_adjust_rules,
2022 : .force_dpm_level = smu8_dpm_force_dpm_level,
2023 : .get_power_state_size = smu8_get_power_state_size,
2024 : .powerdown_uvd = smu8_dpm_powerdown_uvd,
2025 : .powergate_uvd = smu8_dpm_powergate_uvd,
2026 : .powergate_vce = smu8_dpm_powergate_vce,
2027 : .powergate_acp = smu8_dpm_powergate_acp,
2028 : .get_mclk = smu8_dpm_get_mclk,
2029 : .get_sclk = smu8_dpm_get_sclk,
2030 : .patch_boot_state = smu8_dpm_patch_boot_state,
2031 : .get_pp_table_entry = smu8_dpm_get_pp_table_entry,
2032 : .get_num_of_pp_table_entries = smu8_dpm_get_num_of_pp_table_entries,
2033 : .set_cpu_power_state = smu8_set_cpu_power_state,
2034 : .store_cc6_data = smu8_store_cc6_data,
2035 : .force_clock_level = smu8_force_clock_level,
2036 : .print_clock_levels = smu8_print_clock_levels,
2037 : .get_dal_power_level = smu8_get_dal_power_level,
2038 : .get_performance_level = smu8_get_performance_level,
2039 : .get_current_shallow_sleep_clocks = smu8_get_current_shallow_sleep_clocks,
2040 : .get_clock_by_type = smu8_get_clock_by_type,
2041 : .get_max_high_clocks = smu8_get_max_high_clocks,
2042 : .read_sensor = smu8_read_sensor,
2043 : .power_off_asic = smu8_power_off_asic,
2044 : .asic_setup = smu8_setup_asic_task,
2045 : .dynamic_state_management_enable = smu8_enable_dpm_tasks,
2046 : .power_state_set = smu8_set_power_state_tasks,
2047 : .dynamic_state_management_disable = smu8_disable_dpm_tasks,
2048 : .notify_cac_buffer_info = smu8_notify_cac_buffer_info,
2049 : .get_thermal_temperature_range = smu8_get_thermal_temperature_range,
2050 : };
2051 :
2052 0 : int smu8_init_function_pointers(struct pp_hwmgr *hwmgr)
2053 : {
2054 0 : hwmgr->hwmgr_func = &smu8_hwmgr_funcs;
2055 0 : hwmgr->pptable_func = &pptable_funcs;
2056 0 : return 0;
2057 : }
|