Line data Source code
1 : /*
2 : * Copyright 2015 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : */
23 : #include "pp_debug.h"
24 : #include <linux/delay.h>
25 : #include <linux/fb.h>
26 : #include <linux/module.h>
27 : #include <linux/pci.h>
28 : #include <linux/slab.h>
29 : #include <asm/div64.h>
30 : #if IS_ENABLED(CONFIG_X86_64)
31 : #include <asm/intel-family.h>
32 : #endif
33 : #include <drm/amdgpu_drm.h>
34 : #include "ppatomctrl.h"
35 : #include "atombios.h"
36 : #include "pptable_v1_0.h"
37 : #include "pppcielanes.h"
38 : #include "amd_pcie_helpers.h"
39 : #include "hardwaremanager.h"
40 : #include "process_pptables_v1_0.h"
41 : #include "cgs_common.h"
42 :
43 : #include "smu7_common.h"
44 :
45 : #include "hwmgr.h"
46 : #include "smu7_hwmgr.h"
47 : #include "smu_ucode_xfer_vi.h"
48 : #include "smu7_powertune.h"
49 : #include "smu7_dyn_defaults.h"
50 : #include "smu7_thermal.h"
51 : #include "smu7_clockpowergating.h"
52 : #include "processpptables.h"
53 : #include "pp_thermal.h"
54 : #include "smu7_baco.h"
55 : #include "smu7_smumgr.h"
56 : #include "polaris10_smumgr.h"
57 :
58 : #include "ivsrcid/ivsrcid_vislands30.h"
59 :
60 : #define MC_CG_ARB_FREQ_F0 0x0a
61 : #define MC_CG_ARB_FREQ_F1 0x0b
62 : #define MC_CG_ARB_FREQ_F2 0x0c
63 : #define MC_CG_ARB_FREQ_F3 0x0d
64 :
65 : #define MC_CG_SEQ_DRAMCONF_S0 0x05
66 : #define MC_CG_SEQ_DRAMCONF_S1 0x06
67 : #define MC_CG_SEQ_YCLK_SUSPEND 0x04
68 : #define MC_CG_SEQ_YCLK_RESUME 0x0a
69 :
70 : #define SMC_CG_IND_START 0xc0030000
71 : #define SMC_CG_IND_END 0xc0040000
72 :
73 : #define MEM_FREQ_LOW_LATENCY 25000
74 : #define MEM_FREQ_HIGH_LATENCY 80000
75 :
76 : #define MEM_LATENCY_HIGH 45
77 : #define MEM_LATENCY_LOW 35
78 : #define MEM_LATENCY_ERR 0xFFFF
79 :
80 : #define MC_SEQ_MISC0_GDDR5_SHIFT 28
81 : #define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
82 : #define MC_SEQ_MISC0_GDDR5_VALUE 5
83 :
84 : #define PCIE_BUS_CLK 10000
85 : #define TCLK (PCIE_BUS_CLK / 10)
86 :
87 : static struct profile_mode_setting smu7_profiling[7] =
88 : {{0, 0, 0, 0, 0, 0, 0, 0},
89 : {1, 0, 100, 30, 1, 0, 100, 10},
90 : {1, 10, 0, 30, 0, 0, 0, 0},
91 : {0, 0, 0, 0, 1, 10, 16, 31},
92 : {1, 0, 11, 50, 1, 0, 100, 10},
93 : {1, 0, 5, 30, 0, 0, 0, 0},
94 : {0, 0, 0, 0, 0, 0, 0, 0},
95 : };
96 :
97 : #define PPSMC_MSG_SetVBITimeout_VEGAM ((uint16_t) 0x310)
98 :
99 : #define ixPWR_SVI2_PLANE1_LOAD 0xC0200280
100 : #define PWR_SVI2_PLANE1_LOAD__PSI1_MASK 0x00000020L
101 : #define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK 0x00000040L
102 : #define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT 0x00000005
103 : #define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT 0x00000006
104 :
105 : #define STRAP_EVV_REVISION_MSB 2211
106 : #define STRAP_EVV_REVISION_LSB 2208
107 :
108 : /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
109 : enum DPM_EVENT_SRC {
110 : DPM_EVENT_SRC_ANALOG = 0,
111 : DPM_EVENT_SRC_EXTERNAL = 1,
112 : DPM_EVENT_SRC_DIGITAL = 2,
113 : DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
114 : DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
115 : };
116 :
117 : #define ixDIDT_SQ_EDC_CTRL 0x0013
118 : #define ixDIDT_SQ_EDC_THRESHOLD 0x0014
119 : #define ixDIDT_SQ_EDC_STALL_PATTERN_1_2 0x0015
120 : #define ixDIDT_SQ_EDC_STALL_PATTERN_3_4 0x0016
121 : #define ixDIDT_SQ_EDC_STALL_PATTERN_5_6 0x0017
122 : #define ixDIDT_SQ_EDC_STALL_PATTERN_7 0x0018
123 :
124 : #define ixDIDT_TD_EDC_CTRL 0x0053
125 : #define ixDIDT_TD_EDC_THRESHOLD 0x0054
126 : #define ixDIDT_TD_EDC_STALL_PATTERN_1_2 0x0055
127 : #define ixDIDT_TD_EDC_STALL_PATTERN_3_4 0x0056
128 : #define ixDIDT_TD_EDC_STALL_PATTERN_5_6 0x0057
129 : #define ixDIDT_TD_EDC_STALL_PATTERN_7 0x0058
130 :
131 : #define ixDIDT_TCP_EDC_CTRL 0x0073
132 : #define ixDIDT_TCP_EDC_THRESHOLD 0x0074
133 : #define ixDIDT_TCP_EDC_STALL_PATTERN_1_2 0x0075
134 : #define ixDIDT_TCP_EDC_STALL_PATTERN_3_4 0x0076
135 : #define ixDIDT_TCP_EDC_STALL_PATTERN_5_6 0x0077
136 : #define ixDIDT_TCP_EDC_STALL_PATTERN_7 0x0078
137 :
138 : #define ixDIDT_DB_EDC_CTRL 0x0033
139 : #define ixDIDT_DB_EDC_THRESHOLD 0x0034
140 : #define ixDIDT_DB_EDC_STALL_PATTERN_1_2 0x0035
141 : #define ixDIDT_DB_EDC_STALL_PATTERN_3_4 0x0036
142 : #define ixDIDT_DB_EDC_STALL_PATTERN_5_6 0x0037
143 : #define ixDIDT_DB_EDC_STALL_PATTERN_7 0x0038
144 :
145 : uint32_t DIDTEDCConfig_P12[] = {
146 : ixDIDT_SQ_EDC_STALL_PATTERN_1_2,
147 : ixDIDT_SQ_EDC_STALL_PATTERN_3_4,
148 : ixDIDT_SQ_EDC_STALL_PATTERN_5_6,
149 : ixDIDT_SQ_EDC_STALL_PATTERN_7,
150 : ixDIDT_SQ_EDC_THRESHOLD,
151 : ixDIDT_SQ_EDC_CTRL,
152 : ixDIDT_TD_EDC_STALL_PATTERN_1_2,
153 : ixDIDT_TD_EDC_STALL_PATTERN_3_4,
154 : ixDIDT_TD_EDC_STALL_PATTERN_5_6,
155 : ixDIDT_TD_EDC_STALL_PATTERN_7,
156 : ixDIDT_TD_EDC_THRESHOLD,
157 : ixDIDT_TD_EDC_CTRL,
158 : ixDIDT_TCP_EDC_STALL_PATTERN_1_2,
159 : ixDIDT_TCP_EDC_STALL_PATTERN_3_4,
160 : ixDIDT_TCP_EDC_STALL_PATTERN_5_6,
161 : ixDIDT_TCP_EDC_STALL_PATTERN_7,
162 : ixDIDT_TCP_EDC_THRESHOLD,
163 : ixDIDT_TCP_EDC_CTRL,
164 : ixDIDT_DB_EDC_STALL_PATTERN_1_2,
165 : ixDIDT_DB_EDC_STALL_PATTERN_3_4,
166 : ixDIDT_DB_EDC_STALL_PATTERN_5_6,
167 : ixDIDT_DB_EDC_STALL_PATTERN_7,
168 : ixDIDT_DB_EDC_THRESHOLD,
169 : ixDIDT_DB_EDC_CTRL,
170 : 0xFFFFFFFF // End of list
171 : };
172 :
173 : static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
174 : static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
175 : enum pp_clock_type type, uint32_t mask);
176 : static int smu7_notify_has_display(struct pp_hwmgr *hwmgr);
177 :
178 0 : static struct smu7_power_state *cast_phw_smu7_power_state(
179 : struct pp_hw_power_state *hw_ps)
180 : {
181 0 : PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
182 : "Invalid Powerstate Type!",
183 : return NULL);
184 :
185 : return (struct smu7_power_state *)hw_ps;
186 : }
187 :
188 0 : static const struct smu7_power_state *cast_const_phw_smu7_power_state(
189 : const struct pp_hw_power_state *hw_ps)
190 : {
191 0 : PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
192 : "Invalid Powerstate Type!",
193 : return NULL);
194 :
195 : return (const struct smu7_power_state *)hw_ps;
196 : }
197 :
198 : /**
199 : * smu7_get_mc_microcode_version - Find the MC microcode version and store it in the HwMgr struct
200 : *
201 : * @hwmgr: the address of the powerplay hardware manager.
202 : * Return: always 0
203 : */
204 0 : static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
205 : {
206 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
207 :
208 0 : hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
209 :
210 0 : return 0;
211 : }
212 :
213 : static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
214 : {
215 0 : uint32_t speedCntl = 0;
216 :
217 : /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
218 0 : speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
219 : ixPCIE_LC_SPEED_CNTL);
220 0 : return((uint16_t)PHM_GET_FIELD(speedCntl,
221 : PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
222 : }
223 :
224 0 : static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
225 : {
226 : uint32_t link_width;
227 :
228 : /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
229 0 : link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
230 : PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
231 :
232 0 : PP_ASSERT_WITH_CODE((7 >= link_width),
233 : "Invalid PCIe lane width!", return 0);
234 :
235 0 : return decode_pcie_lane_width(link_width);
236 : }
237 :
238 : /**
239 : * smu7_enable_smc_voltage_controller - Enable voltage control
240 : *
241 : * @hwmgr: the address of the powerplay hardware manager.
242 : * Return: always PP_Result_OK
243 : */
244 0 : static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
245 : {
246 0 : if (hwmgr->chip_id >= CHIP_POLARIS10 &&
247 : hwmgr->chip_id <= CHIP_VEGAM) {
248 0 : PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
249 : CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0);
250 0 : PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
251 : CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0);
252 : }
253 :
254 0 : if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
255 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable, NULL);
256 :
257 0 : return 0;
258 : }
259 :
260 : /**
261 : * smu7_voltage_control - Checks if we want to support voltage control
262 : *
263 : * @hwmgr: the address of the powerplay hardware manager.
264 : */
265 : static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
266 : {
267 0 : const struct smu7_hwmgr *data =
268 : (const struct smu7_hwmgr *)(hwmgr->backend);
269 :
270 0 : return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
271 : }
272 :
273 : /**
274 : * smu7_enable_voltage_control - Enable voltage control
275 : *
276 : * @hwmgr: the address of the powerplay hardware manager.
277 : * Return: always 0
278 : */
279 0 : static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
280 : {
281 : /* enable voltage control */
282 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
283 : GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
284 :
285 0 : return 0;
286 : }
287 :
288 0 : static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
289 : struct phm_clock_voltage_dependency_table *voltage_dependency_table
290 : )
291 : {
292 : uint32_t i;
293 :
294 0 : PP_ASSERT_WITH_CODE((NULL != voltage_table),
295 : "Voltage Dependency Table empty.", return -EINVAL;);
296 :
297 0 : voltage_table->mask_low = 0;
298 0 : voltage_table->phase_delay = 0;
299 0 : voltage_table->count = voltage_dependency_table->count;
300 :
301 0 : for (i = 0; i < voltage_dependency_table->count; i++) {
302 0 : voltage_table->entries[i].value =
303 0 : voltage_dependency_table->entries[i].v;
304 0 : voltage_table->entries[i].smio_low = 0;
305 : }
306 :
307 : return 0;
308 : }
309 :
310 :
311 : /**
312 : * smu7_construct_voltage_tables - Create Voltage Tables.
313 : *
314 : * @hwmgr: the address of the powerplay hardware manager.
315 : * Return: always 0
316 : */
317 0 : static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
318 : {
319 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
320 0 : struct phm_ppt_v1_information *table_info =
321 : (struct phm_ppt_v1_information *)hwmgr->pptable;
322 0 : int result = 0;
323 : uint32_t tmp;
324 :
325 0 : if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
326 0 : result = atomctrl_get_voltage_table_v3(hwmgr,
327 : VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
328 0 : &(data->mvdd_voltage_table));
329 0 : PP_ASSERT_WITH_CODE((0 == result),
330 : "Failed to retrieve MVDD table.",
331 : return result);
332 0 : } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
333 0 : if (hwmgr->pp_table_version == PP_TABLE_V1)
334 0 : result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
335 0 : table_info->vdd_dep_on_mclk);
336 0 : else if (hwmgr->pp_table_version == PP_TABLE_V0)
337 0 : result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
338 : hwmgr->dyn_state.mvdd_dependency_on_mclk);
339 :
340 0 : PP_ASSERT_WITH_CODE((0 == result),
341 : "Failed to retrieve SVI2 MVDD table from dependency table.",
342 : return result;);
343 : }
344 :
345 0 : if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
346 0 : result = atomctrl_get_voltage_table_v3(hwmgr,
347 : VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
348 0 : &(data->vddci_voltage_table));
349 0 : PP_ASSERT_WITH_CODE((0 == result),
350 : "Failed to retrieve VDDCI table.",
351 : return result);
352 0 : } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
353 0 : if (hwmgr->pp_table_version == PP_TABLE_V1)
354 0 : result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
355 0 : table_info->vdd_dep_on_mclk);
356 0 : else if (hwmgr->pp_table_version == PP_TABLE_V0)
357 0 : result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
358 : hwmgr->dyn_state.vddci_dependency_on_mclk);
359 0 : PP_ASSERT_WITH_CODE((0 == result),
360 : "Failed to retrieve SVI2 VDDCI table from dependency table.",
361 : return result);
362 : }
363 :
364 0 : if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
365 : /* VDDGFX has only SVI2 voltage control */
366 0 : result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
367 0 : table_info->vddgfx_lookup_table);
368 0 : PP_ASSERT_WITH_CODE((0 == result),
369 : "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
370 : }
371 :
372 :
373 0 : if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
374 0 : result = atomctrl_get_voltage_table_v3(hwmgr,
375 : VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
376 0 : &data->vddc_voltage_table);
377 0 : PP_ASSERT_WITH_CODE((0 == result),
378 : "Failed to retrieve VDDC table.", return result;);
379 0 : } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
380 :
381 0 : if (hwmgr->pp_table_version == PP_TABLE_V0)
382 0 : result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
383 : hwmgr->dyn_state.vddc_dependency_on_mclk);
384 0 : else if (hwmgr->pp_table_version == PP_TABLE_V1)
385 0 : result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
386 0 : table_info->vddc_lookup_table);
387 :
388 0 : PP_ASSERT_WITH_CODE((0 == result),
389 : "Failed to retrieve SVI2 VDDC table from dependency table.", return result;);
390 : }
391 :
392 0 : tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
393 0 : PP_ASSERT_WITH_CODE(
394 : (data->vddc_voltage_table.count <= tmp),
395 : "Too many voltage values for VDDC. Trimming to fit state table.",
396 : phm_trim_voltage_table_to_fit_state_table(tmp,
397 : &(data->vddc_voltage_table)));
398 :
399 0 : tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
400 0 : PP_ASSERT_WITH_CODE(
401 : (data->vddgfx_voltage_table.count <= tmp),
402 : "Too many voltage values for VDDC. Trimming to fit state table.",
403 : phm_trim_voltage_table_to_fit_state_table(tmp,
404 : &(data->vddgfx_voltage_table)));
405 :
406 0 : tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI);
407 0 : PP_ASSERT_WITH_CODE(
408 : (data->vddci_voltage_table.count <= tmp),
409 : "Too many voltage values for VDDCI. Trimming to fit state table.",
410 : phm_trim_voltage_table_to_fit_state_table(tmp,
411 : &(data->vddci_voltage_table)));
412 :
413 0 : tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD);
414 0 : PP_ASSERT_WITH_CODE(
415 : (data->mvdd_voltage_table.count <= tmp),
416 : "Too many voltage values for MVDD. Trimming to fit state table.",
417 : phm_trim_voltage_table_to_fit_state_table(tmp,
418 : &(data->mvdd_voltage_table)));
419 :
420 : return 0;
421 : }
422 :
423 : /**
424 : * smu7_program_static_screen_threshold_parameters - Programs static screed detection parameters
425 : *
426 : * @hwmgr: the address of the powerplay hardware manager.
427 : * Return: always 0
428 : */
429 0 : static int smu7_program_static_screen_threshold_parameters(
430 : struct pp_hwmgr *hwmgr)
431 : {
432 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
433 :
434 : /* Set static screen threshold unit */
435 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
436 : CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
437 : data->static_screen_threshold_unit);
438 : /* Set static screen threshold */
439 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
440 : CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
441 : data->static_screen_threshold);
442 :
443 0 : return 0;
444 : }
445 :
446 : /**
447 : * smu7_enable_display_gap - Setup display gap for glitch free memory clock switching.
448 : *
449 : * @hwmgr: the address of the powerplay hardware manager.
450 : * Return: always 0
451 : */
452 0 : static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
453 : {
454 0 : uint32_t display_gap =
455 0 : cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
456 : ixCG_DISPLAY_GAP_CNTL);
457 :
458 0 : display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
459 : DISP_GAP, DISPLAY_GAP_IGNORE);
460 :
461 0 : display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
462 : DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
463 :
464 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
465 : ixCG_DISPLAY_GAP_CNTL, display_gap);
466 :
467 0 : return 0;
468 : }
469 :
470 : /**
471 : * smu7_program_voting_clients - Programs activity state transition voting clients
472 : *
473 : * @hwmgr: the address of the powerplay hardware manager.
474 : * Return: always 0
475 : */
476 0 : static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
477 : {
478 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
479 : int i;
480 :
481 : /* Clear reset for voting clients before enabling DPM */
482 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
483 : SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
484 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
485 : SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
486 :
487 0 : for (i = 0; i < 8; i++)
488 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
489 : ixCG_FREQ_TRAN_VOTING_0 + i * 4,
490 : data->voting_rights_clients[i]);
491 0 : return 0;
492 : }
493 :
494 0 : static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
495 : {
496 : int i;
497 :
498 : /* Reset voting clients before disabling DPM */
499 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
500 : SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
501 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
502 : SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
503 :
504 0 : for (i = 0; i < 8; i++)
505 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
506 : ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0);
507 :
508 0 : return 0;
509 : }
510 :
511 : /* Copy one arb setting to another and then switch the active set.
512 : * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
513 : */
514 0 : static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
515 : uint32_t arb_src, uint32_t arb_dest)
516 : {
517 : uint32_t mc_arb_dram_timing;
518 : uint32_t mc_arb_dram_timing2;
519 : uint32_t burst_time;
520 : uint32_t mc_cg_config;
521 :
522 0 : switch (arb_src) {
523 : case MC_CG_ARB_FREQ_F0:
524 0 : mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
525 0 : mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
526 0 : burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
527 : break;
528 : case MC_CG_ARB_FREQ_F1:
529 0 : mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
530 0 : mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
531 0 : burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
532 : break;
533 : default:
534 : return -EINVAL;
535 : }
536 :
537 0 : switch (arb_dest) {
538 : case MC_CG_ARB_FREQ_F0:
539 0 : cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
540 0 : cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
541 0 : PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
542 : break;
543 : case MC_CG_ARB_FREQ_F1:
544 0 : cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
545 0 : cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
546 0 : PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
547 : break;
548 : default:
549 : return -EINVAL;
550 : }
551 :
552 0 : mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
553 0 : mc_cg_config |= 0x0000000F;
554 0 : cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
555 0 : PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
556 :
557 : return 0;
558 : }
559 :
560 : static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
561 : {
562 0 : return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults, NULL);
563 : }
564 :
565 : /**
566 : * smu7_initial_switch_from_arbf0_to_f1 - Initial switch from ARB F0->F1
567 : *
568 : * @hwmgr: the address of the powerplay hardware manager.
569 : * Return: always 0
570 : * This function is to be called from the SetPowerState table.
571 : */
572 : static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
573 : {
574 0 : return smu7_copy_and_switch_arb_sets(hwmgr,
575 : MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
576 : }
577 :
578 0 : static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
579 : {
580 : uint32_t tmp;
581 :
582 0 : tmp = (cgs_read_ind_register(hwmgr->device,
583 : CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
584 0 : 0x0000ff00) >> 8;
585 :
586 0 : if (tmp == MC_CG_ARB_FREQ_F0)
587 : return 0;
588 :
589 0 : return smu7_copy_and_switch_arb_sets(hwmgr,
590 : tmp, MC_CG_ARB_FREQ_F0);
591 : }
592 :
593 : static uint16_t smu7_override_pcie_speed(struct pp_hwmgr *hwmgr)
594 : {
595 0 : struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
596 0 : uint16_t pcie_gen = 0;
597 :
598 0 : if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 &&
599 : adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4)
600 : pcie_gen = 3;
601 0 : else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 &&
602 : adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
603 : pcie_gen = 2;
604 0 : else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 &&
605 : adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2)
606 : pcie_gen = 1;
607 : else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 &&
608 : adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1)
609 : pcie_gen = 0;
610 :
611 : return pcie_gen;
612 : }
613 :
614 0 : static uint16_t smu7_override_pcie_width(struct pp_hwmgr *hwmgr)
615 : {
616 0 : struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
617 0 : uint16_t pcie_width = 0;
618 :
619 0 : if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
620 : pcie_width = 16;
621 0 : else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
622 : pcie_width = 12;
623 0 : else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
624 : pcie_width = 8;
625 0 : else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
626 : pcie_width = 4;
627 0 : else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
628 : pcie_width = 2;
629 0 : else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
630 0 : pcie_width = 1;
631 :
632 0 : return pcie_width;
633 : }
634 :
635 0 : static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
636 : {
637 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
638 :
639 0 : struct phm_ppt_v1_information *table_info =
640 : (struct phm_ppt_v1_information *)(hwmgr->pptable);
641 0 : struct phm_ppt_v1_pcie_table *pcie_table = NULL;
642 :
643 : uint32_t i, max_entry;
644 : uint32_t tmp;
645 :
646 0 : PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
647 : data->use_pcie_power_saving_levels), "No pcie performance levels!",
648 : return -EINVAL);
649 :
650 0 : if (table_info != NULL)
651 0 : pcie_table = table_info->pcie_table;
652 :
653 0 : if (data->use_pcie_performance_levels &&
654 0 : !data->use_pcie_power_saving_levels) {
655 0 : data->pcie_gen_power_saving = data->pcie_gen_performance;
656 0 : data->pcie_lane_power_saving = data->pcie_lane_performance;
657 0 : } else if (!data->use_pcie_performance_levels &&
658 0 : data->use_pcie_power_saving_levels) {
659 0 : data->pcie_gen_performance = data->pcie_gen_power_saving;
660 0 : data->pcie_lane_performance = data->pcie_lane_power_saving;
661 : }
662 0 : tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK);
663 0 : phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
664 : tmp,
665 : MAX_REGULAR_DPM_NUMBER);
666 :
667 0 : if (pcie_table != NULL) {
668 : /* max_entry is used to make sure we reserve one PCIE level
669 : * for boot level (fix for A+A PSPP issue).
670 : * If PCIE table from PPTable have ULV entry + 8 entries,
671 : * then ignore the last entry.*/
672 0 : max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
673 0 : for (i = 1; i < max_entry; i++) {
674 0 : phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
675 0 : get_pcie_gen_support(data->pcie_gen_cap,
676 0 : pcie_table->entries[i].gen_speed),
677 0 : get_pcie_lane_support(data->pcie_lane_cap,
678 0 : pcie_table->entries[i].lane_width));
679 : }
680 0 : data->dpm_table.pcie_speed_table.count = max_entry - 1;
681 0 : smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
682 : } else {
683 : /* Hardcode Pcie Table */
684 0 : phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
685 0 : get_pcie_gen_support(data->pcie_gen_cap,
686 : PP_Min_PCIEGen),
687 0 : get_pcie_lane_support(data->pcie_lane_cap,
688 : PP_Max_PCIELane));
689 0 : phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
690 0 : get_pcie_gen_support(data->pcie_gen_cap,
691 : PP_Min_PCIEGen),
692 0 : get_pcie_lane_support(data->pcie_lane_cap,
693 : PP_Max_PCIELane));
694 0 : phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
695 0 : get_pcie_gen_support(data->pcie_gen_cap,
696 : PP_Max_PCIEGen),
697 0 : get_pcie_lane_support(data->pcie_lane_cap,
698 : PP_Max_PCIELane));
699 0 : phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
700 0 : get_pcie_gen_support(data->pcie_gen_cap,
701 : PP_Max_PCIEGen),
702 0 : get_pcie_lane_support(data->pcie_lane_cap,
703 : PP_Max_PCIELane));
704 0 : phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
705 0 : get_pcie_gen_support(data->pcie_gen_cap,
706 : PP_Max_PCIEGen),
707 0 : get_pcie_lane_support(data->pcie_lane_cap,
708 : PP_Max_PCIELane));
709 0 : phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
710 0 : get_pcie_gen_support(data->pcie_gen_cap,
711 : PP_Max_PCIEGen),
712 0 : get_pcie_lane_support(data->pcie_lane_cap,
713 : PP_Max_PCIELane));
714 :
715 0 : data->dpm_table.pcie_speed_table.count = 6;
716 : }
717 : /* Populate last level for boot PCIE level, but do not increment count. */
718 0 : if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
719 0 : for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++)
720 0 : phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i,
721 0 : get_pcie_gen_support(data->pcie_gen_cap,
722 : PP_Max_PCIEGen),
723 0 : data->vbios_boot_state.pcie_lane_bootup_value);
724 : } else {
725 0 : phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
726 : data->dpm_table.pcie_speed_table.count,
727 0 : get_pcie_gen_support(data->pcie_gen_cap,
728 : PP_Min_PCIEGen),
729 0 : get_pcie_lane_support(data->pcie_lane_cap,
730 : PP_Max_PCIELane));
731 :
732 0 : if (data->pcie_dpm_key_disabled)
733 0 : phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
734 : data->dpm_table.pcie_speed_table.count,
735 0 : smu7_override_pcie_speed(hwmgr), smu7_override_pcie_width(hwmgr));
736 : }
737 : return 0;
738 : }
739 :
740 0 : static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
741 : {
742 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
743 :
744 0 : memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
745 :
746 0 : phm_reset_single_dpm_table(
747 0 : &data->dpm_table.sclk_table,
748 : smum_get_mac_definition(hwmgr,
749 : SMU_MAX_LEVELS_GRAPHICS),
750 : MAX_REGULAR_DPM_NUMBER);
751 0 : phm_reset_single_dpm_table(
752 0 : &data->dpm_table.mclk_table,
753 : smum_get_mac_definition(hwmgr,
754 : SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
755 :
756 0 : phm_reset_single_dpm_table(
757 0 : &data->dpm_table.vddc_table,
758 : smum_get_mac_definition(hwmgr,
759 : SMU_MAX_LEVELS_VDDC),
760 : MAX_REGULAR_DPM_NUMBER);
761 0 : phm_reset_single_dpm_table(
762 0 : &data->dpm_table.vddci_table,
763 : smum_get_mac_definition(hwmgr,
764 : SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
765 :
766 0 : phm_reset_single_dpm_table(
767 0 : &data->dpm_table.mvdd_table,
768 : smum_get_mac_definition(hwmgr,
769 : SMU_MAX_LEVELS_MVDD),
770 : MAX_REGULAR_DPM_NUMBER);
771 0 : return 0;
772 : }
773 : /*
774 : * This function is to initialize all DPM state tables
775 : * for SMU7 based on the dependency table.
776 : * Dynamic state patching function will then trim these
777 : * state tables to the allowed range based
778 : * on the power policy or external client requests,
779 : * such as UVD request, etc.
780 : */
781 :
782 0 : static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
783 : {
784 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
785 0 : struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
786 : hwmgr->dyn_state.vddc_dependency_on_sclk;
787 0 : struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
788 : hwmgr->dyn_state.vddc_dependency_on_mclk;
789 0 : struct phm_cac_leakage_table *std_voltage_table =
790 : hwmgr->dyn_state.cac_leakage_table;
791 : uint32_t i;
792 :
793 0 : PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
794 : "SCLK dependency table is missing. This table is mandatory", return -EINVAL);
795 0 : PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
796 : "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
797 :
798 0 : PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
799 : "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
800 0 : PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
801 : "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
802 :
803 :
804 : /* Initialize Sclk DPM table based on allow Sclk values*/
805 0 : data->dpm_table.sclk_table.count = 0;
806 :
807 0 : for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
808 0 : if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
809 0 : allowed_vdd_sclk_table->entries[i].clk) {
810 0 : data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
811 0 : allowed_vdd_sclk_table->entries[i].clk;
812 0 : data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
813 0 : data->dpm_table.sclk_table.count++;
814 : }
815 : }
816 :
817 : PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
818 : "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
819 : /* Initialize Mclk DPM table based on allow Mclk values */
820 0 : data->dpm_table.mclk_table.count = 0;
821 0 : for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
822 0 : if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
823 0 : allowed_vdd_mclk_table->entries[i].clk) {
824 0 : data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
825 0 : allowed_vdd_mclk_table->entries[i].clk;
826 0 : data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0;
827 0 : data->dpm_table.mclk_table.count++;
828 : }
829 : }
830 :
831 : /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
832 0 : for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
833 0 : data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
834 0 : data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
835 : /* param1 is for corresponding std voltage */
836 0 : data->dpm_table.vddc_table.dpm_levels[i].enabled = true;
837 : }
838 :
839 0 : data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
840 0 : allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
841 :
842 0 : if (NULL != allowed_vdd_mclk_table) {
843 : /* Initialize Vddci DPM table based on allow Mclk values */
844 0 : for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
845 0 : data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
846 0 : data->dpm_table.vddci_table.dpm_levels[i].enabled = true;
847 : }
848 0 : data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
849 : }
850 :
851 0 : allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
852 :
853 0 : if (NULL != allowed_vdd_mclk_table) {
854 : /*
855 : * Initialize MVDD DPM table based on allow Mclk
856 : * values
857 : */
858 0 : for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
859 0 : data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
860 0 : data->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
861 : }
862 0 : data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
863 : }
864 :
865 : return 0;
866 : }
867 :
868 0 : static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
869 : {
870 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
871 0 : struct phm_ppt_v1_information *table_info =
872 : (struct phm_ppt_v1_information *)(hwmgr->pptable);
873 : uint32_t i;
874 :
875 : struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
876 : struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
877 :
878 0 : if (table_info == NULL)
879 : return -EINVAL;
880 :
881 0 : dep_sclk_table = table_info->vdd_dep_on_sclk;
882 0 : dep_mclk_table = table_info->vdd_dep_on_mclk;
883 :
884 0 : PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
885 : "SCLK dependency table is missing.",
886 : return -EINVAL);
887 0 : PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
888 : "SCLK dependency table count is 0.",
889 : return -EINVAL);
890 :
891 0 : PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
892 : "MCLK dependency table is missing.",
893 : return -EINVAL);
894 0 : PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
895 : "MCLK dependency table count is 0",
896 : return -EINVAL);
897 :
898 : /* Initialize Sclk DPM table based on allow Sclk values */
899 0 : data->dpm_table.sclk_table.count = 0;
900 0 : for (i = 0; i < dep_sclk_table->count; i++) {
901 0 : if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
902 0 : dep_sclk_table->entries[i].clk) {
903 :
904 0 : data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
905 0 : dep_sclk_table->entries[i].clk;
906 :
907 0 : data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
908 0 : (i == 0) ? true : false;
909 0 : data->dpm_table.sclk_table.count++;
910 : }
911 : }
912 0 : if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
913 0 : hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk;
914 : /* Initialize Mclk DPM table based on allow Mclk values */
915 0 : data->dpm_table.mclk_table.count = 0;
916 0 : for (i = 0; i < dep_mclk_table->count; i++) {
917 0 : if (i == 0 || data->dpm_table.mclk_table.dpm_levels
918 0 : [data->dpm_table.mclk_table.count - 1].value !=
919 0 : dep_mclk_table->entries[i].clk) {
920 0 : data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
921 0 : dep_mclk_table->entries[i].clk;
922 0 : data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
923 0 : (i == 0) ? true : false;
924 0 : data->dpm_table.mclk_table.count++;
925 : }
926 : }
927 :
928 0 : if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
929 0 : hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk;
930 : return 0;
931 : }
932 :
933 0 : static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
934 : {
935 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
936 0 : struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
937 0 : struct phm_ppt_v1_information *table_info =
938 : (struct phm_ppt_v1_information *)(hwmgr->pptable);
939 : uint32_t i;
940 :
941 : struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
942 : struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
943 : struct phm_odn_performance_level *entries;
944 :
945 0 : if (table_info == NULL)
946 : return -EINVAL;
947 :
948 0 : dep_sclk_table = table_info->vdd_dep_on_sclk;
949 0 : dep_mclk_table = table_info->vdd_dep_on_mclk;
950 :
951 0 : odn_table->odn_core_clock_dpm_levels.num_of_pl =
952 0 : data->golden_dpm_table.sclk_table.count;
953 0 : entries = odn_table->odn_core_clock_dpm_levels.entries;
954 0 : for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) {
955 0 : entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value;
956 0 : entries[i].enabled = true;
957 0 : entries[i].vddc = dep_sclk_table->entries[i].vddc;
958 : }
959 :
960 0 : smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table,
961 0 : (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk));
962 :
963 0 : odn_table->odn_memory_clock_dpm_levels.num_of_pl =
964 0 : data->golden_dpm_table.mclk_table.count;
965 0 : entries = odn_table->odn_memory_clock_dpm_levels.entries;
966 0 : for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) {
967 0 : entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value;
968 0 : entries[i].enabled = true;
969 0 : entries[i].vddc = dep_mclk_table->entries[i].vddc;
970 : }
971 :
972 0 : smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table,
973 0 : (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk));
974 :
975 : return 0;
976 : }
977 :
978 0 : static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
979 : {
980 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
981 : struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
982 0 : struct phm_ppt_v1_information *table_info =
983 : (struct phm_ppt_v1_information *)(hwmgr->pptable);
984 0 : uint32_t min_vddc = 0;
985 0 : uint32_t max_vddc = 0;
986 :
987 0 : if (!table_info)
988 0 : return;
989 :
990 0 : dep_sclk_table = table_info->vdd_dep_on_sclk;
991 :
992 0 : atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc);
993 :
994 0 : if (min_vddc == 0 || min_vddc > 2000
995 0 : || min_vddc > dep_sclk_table->entries[0].vddc)
996 0 : min_vddc = dep_sclk_table->entries[0].vddc;
997 :
998 0 : if (max_vddc == 0 || max_vddc > 2000
999 0 : || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc)
1000 0 : max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc;
1001 :
1002 0 : data->odn_dpm_table.min_vddc = min_vddc;
1003 0 : data->odn_dpm_table.max_vddc = max_vddc;
1004 : }
1005 :
1006 0 : static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
1007 : {
1008 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1009 0 : struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
1010 0 : struct phm_ppt_v1_information *table_info =
1011 : (struct phm_ppt_v1_information *)(hwmgr->pptable);
1012 : uint32_t i;
1013 :
1014 : struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
1015 : struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
1016 :
1017 0 : if (table_info == NULL)
1018 : return;
1019 :
1020 0 : for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1021 0 : if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
1022 0 : data->dpm_table.sclk_table.dpm_levels[i].value) {
1023 0 : data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
1024 : break;
1025 : }
1026 : }
1027 :
1028 0 : for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
1029 0 : if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
1030 0 : data->dpm_table.mclk_table.dpm_levels[i].value) {
1031 0 : data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
1032 : break;
1033 : }
1034 : }
1035 :
1036 0 : dep_table = table_info->vdd_dep_on_mclk;
1037 0 : odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
1038 :
1039 0 : for (i = 0; i < dep_table->count; i++) {
1040 0 : if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
1041 0 : data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
1042 : return;
1043 : }
1044 : }
1045 :
1046 0 : dep_table = table_info->vdd_dep_on_sclk;
1047 0 : odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
1048 0 : for (i = 0; i < dep_table->count; i++) {
1049 0 : if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
1050 0 : data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
1051 : return;
1052 : }
1053 : }
1054 0 : if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
1055 0 : data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
1056 0 : data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
1057 : }
1058 : }
1059 :
1060 0 : static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1061 : {
1062 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1063 :
1064 0 : smu7_reset_dpm_tables(hwmgr);
1065 :
1066 0 : if (hwmgr->pp_table_version == PP_TABLE_V1)
1067 0 : smu7_setup_dpm_tables_v1(hwmgr);
1068 0 : else if (hwmgr->pp_table_version == PP_TABLE_V0)
1069 0 : smu7_setup_dpm_tables_v0(hwmgr);
1070 :
1071 0 : smu7_setup_default_pcie_table(hwmgr);
1072 :
1073 : /* save a copy of the default DPM table */
1074 0 : memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1075 : sizeof(struct smu7_dpm_table));
1076 :
1077 : /* initialize ODN table */
1078 0 : if (hwmgr->od_enabled) {
1079 0 : if (data->odn_dpm_table.max_vddc) {
1080 0 : smu7_check_dpm_table_updated(hwmgr);
1081 : } else {
1082 0 : smu7_setup_voltage_range_from_vbios(hwmgr);
1083 0 : smu7_odn_initial_default_setting(hwmgr);
1084 : }
1085 : }
1086 0 : return 0;
1087 : }
1088 :
1089 : static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
1090 : {
1091 :
1092 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1093 : PHM_PlatformCaps_RegulatorHot))
1094 0 : return smum_send_msg_to_smc(hwmgr,
1095 : PPSMC_MSG_EnableVRHotGPIOInterrupt,
1096 : NULL);
1097 :
1098 : return 0;
1099 : }
1100 :
1101 0 : static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
1102 : {
1103 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1104 : SCLK_PWRMGT_OFF, 0);
1105 0 : return 0;
1106 : }
1107 :
1108 : static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
1109 : {
1110 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1111 :
1112 0 : if (data->ulv_supported)
1113 0 : return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV, NULL);
1114 :
1115 : return 0;
1116 : }
1117 :
1118 : static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
1119 : {
1120 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1121 :
1122 0 : if (data->ulv_supported)
1123 0 : return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV, NULL);
1124 :
1125 : return 0;
1126 : }
1127 :
1128 0 : static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1129 : {
1130 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1131 : PHM_PlatformCaps_SclkDeepSleep)) {
1132 0 : if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON, NULL))
1133 0 : PP_ASSERT_WITH_CODE(false,
1134 : "Attempt to enable Master Deep Sleep switch failed!",
1135 : return -EINVAL);
1136 : } else {
1137 0 : if (smum_send_msg_to_smc(hwmgr,
1138 : PPSMC_MSG_MASTER_DeepSleep_OFF,
1139 : NULL)) {
1140 0 : PP_ASSERT_WITH_CODE(false,
1141 : "Attempt to disable Master Deep Sleep switch failed!",
1142 : return -EINVAL);
1143 : }
1144 : }
1145 :
1146 : return 0;
1147 : }
1148 :
1149 0 : static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1150 : {
1151 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1152 : PHM_PlatformCaps_SclkDeepSleep)) {
1153 0 : if (smum_send_msg_to_smc(hwmgr,
1154 : PPSMC_MSG_MASTER_DeepSleep_OFF,
1155 : NULL)) {
1156 0 : PP_ASSERT_WITH_CODE(false,
1157 : "Attempt to disable Master Deep Sleep switch failed!",
1158 : return -EINVAL);
1159 : }
1160 : }
1161 :
1162 : return 0;
1163 : }
1164 :
1165 0 : static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr)
1166 : {
1167 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1168 0 : uint32_t soft_register_value = 0;
1169 0 : uint32_t handshake_disables_offset = data->soft_regs_start
1170 0 : + smum_get_offsetof(hwmgr,
1171 : SMU_SoftRegisters, HandshakeDisables);
1172 :
1173 0 : soft_register_value = cgs_read_ind_register(hwmgr->device,
1174 : CGS_IND_REG__SMC, handshake_disables_offset);
1175 0 : soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE;
1176 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1177 : handshake_disables_offset, soft_register_value);
1178 0 : return 0;
1179 : }
1180 :
1181 0 : static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
1182 : {
1183 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1184 0 : uint32_t soft_register_value = 0;
1185 0 : uint32_t handshake_disables_offset = data->soft_regs_start
1186 0 : + smum_get_offsetof(hwmgr,
1187 : SMU_SoftRegisters, HandshakeDisables);
1188 :
1189 0 : soft_register_value = cgs_read_ind_register(hwmgr->device,
1190 : CGS_IND_REG__SMC, handshake_disables_offset);
1191 0 : soft_register_value |= smum_get_mac_definition(hwmgr,
1192 : SMU_UVD_MCLK_HANDSHAKE_DISABLE);
1193 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1194 : handshake_disables_offset, soft_register_value);
1195 0 : return 0;
1196 : }
1197 :
1198 0 : static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1199 : {
1200 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1201 :
1202 : /* enable SCLK dpm */
1203 0 : if (!data->sclk_dpm_key_disabled) {
1204 0 : if (hwmgr->chip_id >= CHIP_POLARIS10 &&
1205 : hwmgr->chip_id <= CHIP_VEGAM)
1206 0 : smu7_disable_sclk_vce_handshake(hwmgr);
1207 :
1208 0 : PP_ASSERT_WITH_CODE(
1209 : (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable, NULL)),
1210 : "Failed to enable SCLK DPM during DPM Start Function!",
1211 : return -EINVAL);
1212 : }
1213 :
1214 : /* enable MCLK dpm */
1215 0 : if (0 == data->mclk_dpm_key_disabled) {
1216 0 : if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
1217 0 : smu7_disable_handshake_uvd(hwmgr);
1218 :
1219 0 : PP_ASSERT_WITH_CODE(
1220 : (0 == smum_send_msg_to_smc(hwmgr,
1221 : PPSMC_MSG_MCLKDPM_Enable,
1222 : NULL)),
1223 : "Failed to enable MCLK DPM during DPM Start Function!",
1224 : return -EINVAL);
1225 :
1226 0 : if ((hwmgr->chip_family == AMDGPU_FAMILY_CI) ||
1227 0 : (hwmgr->chip_id == CHIP_POLARIS10) ||
1228 0 : (hwmgr->chip_id == CHIP_POLARIS11) ||
1229 0 : (hwmgr->chip_id == CHIP_POLARIS12) ||
1230 0 : (hwmgr->chip_id == CHIP_TONGA) ||
1231 : (hwmgr->chip_id == CHIP_TOPAZ))
1232 0 : PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
1233 :
1234 :
1235 0 : if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1236 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5);
1237 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5);
1238 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005);
1239 0 : udelay(10);
1240 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005);
1241 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005);
1242 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005);
1243 : } else {
1244 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
1245 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
1246 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
1247 0 : udelay(10);
1248 0 : if (hwmgr->chip_id == CHIP_VEGAM) {
1249 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009);
1250 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009);
1251 : } else {
1252 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
1253 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
1254 : }
1255 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
1256 : }
1257 : }
1258 :
1259 : return 0;
1260 : }
1261 :
1262 0 : static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
1263 : {
1264 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1265 :
1266 : /*enable general power management */
1267 :
1268 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1269 : GLOBAL_PWRMGT_EN, 1);
1270 :
1271 : /* enable sclk deep sleep */
1272 :
1273 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1274 : DYNAMIC_PM_EN, 1);
1275 :
1276 : /* prepare for PCIE DPM */
1277 :
1278 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1279 : data->soft_regs_start +
1280 : smum_get_offsetof(hwmgr, SMU_SoftRegisters,
1281 : VoltageChangeTimeout), 0x1000);
1282 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
1283 : SWRST_COMMAND_1, RESETLC, 0x0);
1284 :
1285 0 : if (hwmgr->chip_family == AMDGPU_FAMILY_CI)
1286 0 : cgs_write_register(hwmgr->device, 0x1488,
1287 : (cgs_read_register(hwmgr->device, 0x1488) & ~0x1));
1288 :
1289 0 : if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
1290 0 : pr_err("Failed to enable Sclk DPM and Mclk DPM!");
1291 0 : return -EINVAL;
1292 : }
1293 :
1294 : /* enable PCIE dpm */
1295 0 : if (0 == data->pcie_dpm_key_disabled) {
1296 0 : PP_ASSERT_WITH_CODE(
1297 : (0 == smum_send_msg_to_smc(hwmgr,
1298 : PPSMC_MSG_PCIeDPM_Enable,
1299 : NULL)),
1300 : "Failed to enable pcie DPM during DPM Start Function!",
1301 : return -EINVAL);
1302 : } else {
1303 0 : PP_ASSERT_WITH_CODE(
1304 : (0 == smum_send_msg_to_smc(hwmgr,
1305 : PPSMC_MSG_PCIeDPM_Disable,
1306 : NULL)),
1307 : "Failed to disable pcie DPM during DPM Start Function!",
1308 : return -EINVAL);
1309 : }
1310 :
1311 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1312 : PHM_PlatformCaps_Falcon_QuickTransition)) {
1313 0 : PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
1314 : PPSMC_MSG_EnableACDCGPIOInterrupt,
1315 : NULL)),
1316 : "Failed to enable AC DC GPIO Interrupt!",
1317 : );
1318 : }
1319 :
1320 : return 0;
1321 : }
1322 :
1323 0 : static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1324 : {
1325 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1326 :
1327 : /* disable SCLK dpm */
1328 0 : if (!data->sclk_dpm_key_disabled) {
1329 0 : PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1330 : "Trying to disable SCLK DPM when DPM is disabled",
1331 : return 0);
1332 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable, NULL);
1333 : }
1334 :
1335 : /* disable MCLK dpm */
1336 0 : if (!data->mclk_dpm_key_disabled) {
1337 0 : PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1338 : "Trying to disable MCLK DPM when DPM is disabled",
1339 : return 0);
1340 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable, NULL);
1341 : }
1342 :
1343 : return 0;
1344 : }
1345 :
1346 0 : static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
1347 : {
1348 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1349 :
1350 : /* disable general power management */
1351 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1352 : GLOBAL_PWRMGT_EN, 0);
1353 : /* disable sclk deep sleep */
1354 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1355 : DYNAMIC_PM_EN, 0);
1356 :
1357 : /* disable PCIE dpm */
1358 0 : if (!data->pcie_dpm_key_disabled) {
1359 0 : PP_ASSERT_WITH_CODE(
1360 : (smum_send_msg_to_smc(hwmgr,
1361 : PPSMC_MSG_PCIeDPM_Disable,
1362 : NULL) == 0),
1363 : "Failed to disable pcie DPM during DPM Stop Function!",
1364 : return -EINVAL);
1365 : }
1366 :
1367 0 : smu7_disable_sclk_mclk_dpm(hwmgr);
1368 :
1369 0 : PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1370 : "Trying to disable voltage DPM when DPM is disabled",
1371 : return 0);
1372 :
1373 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable, NULL);
1374 :
1375 0 : return 0;
1376 : }
1377 :
1378 0 : static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
1379 : {
1380 : bool protection;
1381 : enum DPM_EVENT_SRC src;
1382 :
1383 0 : switch (sources) {
1384 : default:
1385 0 : pr_err("Unknown throttling event sources.");
1386 : fallthrough;
1387 : case 0:
1388 : protection = false;
1389 : /* src is unused */
1390 : break;
1391 : case (1 << PHM_AutoThrottleSource_Thermal):
1392 : protection = true;
1393 : src = DPM_EVENT_SRC_DIGITAL;
1394 : break;
1395 : case (1 << PHM_AutoThrottleSource_External):
1396 0 : protection = true;
1397 0 : src = DPM_EVENT_SRC_EXTERNAL;
1398 0 : break;
1399 : case (1 << PHM_AutoThrottleSource_External) |
1400 : (1 << PHM_AutoThrottleSource_Thermal):
1401 0 : protection = true;
1402 0 : src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
1403 0 : break;
1404 : }
1405 : /* Order matters - don't enable thermal protection for the wrong source. */
1406 0 : if (protection) {
1407 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
1408 : DPM_EVENT_SRC, src);
1409 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1410 : THERMAL_PROTECTION_DIS,
1411 : !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1412 : PHM_PlatformCaps_ThermalController));
1413 : } else
1414 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1415 : THERMAL_PROTECTION_DIS, 1);
1416 0 : }
1417 :
1418 : static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1419 : PHM_AutoThrottleSource source)
1420 : {
1421 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1422 :
1423 0 : if (!(data->active_auto_throttle_sources & (1 << source))) {
1424 0 : data->active_auto_throttle_sources |= 1 << source;
1425 0 : smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1426 : }
1427 : return 0;
1428 : }
1429 :
1430 : static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1431 : {
1432 0 : return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1433 : }
1434 :
1435 : static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1436 : PHM_AutoThrottleSource source)
1437 : {
1438 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1439 :
1440 0 : if (data->active_auto_throttle_sources & (1 << source)) {
1441 0 : data->active_auto_throttle_sources &= ~(1 << source);
1442 0 : smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1443 : }
1444 : return 0;
1445 : }
1446 :
1447 : static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1448 : {
1449 0 : return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1450 : }
1451 :
1452 : static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
1453 : {
1454 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1455 0 : data->pcie_performance_request = true;
1456 :
1457 : return 0;
1458 : }
1459 :
1460 : static int smu7_program_edc_didt_registers(struct pp_hwmgr *hwmgr,
1461 : uint32_t *cac_config_regs,
1462 : AtomCtrl_EDCLeakgeTable *edc_leakage_table)
1463 : {
1464 : uint32_t data, i = 0;
1465 :
1466 0 : while (cac_config_regs[i] != 0xFFFFFFFF) {
1467 0 : data = edc_leakage_table->DIDT_REG[i];
1468 0 : cgs_write_ind_register(hwmgr->device,
1469 : CGS_IND_REG__DIDT,
1470 : cac_config_regs[i],
1471 : data);
1472 0 : i++;
1473 : }
1474 :
1475 : return 0;
1476 : }
1477 :
1478 0 : static int smu7_populate_edc_leakage_registers(struct pp_hwmgr *hwmgr)
1479 : {
1480 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1481 0 : int ret = 0;
1482 :
1483 0 : if (!data->disable_edc_leakage_controller &&
1484 0 : data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset &&
1485 0 : data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) {
1486 0 : ret = smu7_program_edc_didt_registers(hwmgr,
1487 : DIDTEDCConfig_P12,
1488 : &data->edc_leakage_table);
1489 : if (ret)
1490 : return ret;
1491 :
1492 0 : ret = smum_send_msg_to_smc(hwmgr,
1493 : (PPSMC_Msg)PPSMC_MSG_EnableEDCController,
1494 : NULL);
1495 : } else {
1496 0 : ret = smum_send_msg_to_smc(hwmgr,
1497 : (PPSMC_Msg)PPSMC_MSG_DisableEDCController,
1498 : NULL);
1499 : }
1500 :
1501 : return ret;
1502 : }
1503 :
1504 0 : static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1505 : {
1506 0 : int tmp_result = 0;
1507 0 : int result = 0;
1508 :
1509 0 : if (smu7_voltage_control(hwmgr)) {
1510 0 : tmp_result = smu7_enable_voltage_control(hwmgr);
1511 0 : PP_ASSERT_WITH_CODE(tmp_result == 0,
1512 : "Failed to enable voltage control!",
1513 : result = tmp_result);
1514 :
1515 0 : tmp_result = smu7_construct_voltage_tables(hwmgr);
1516 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
1517 : "Failed to construct voltage tables!",
1518 : result = tmp_result);
1519 : }
1520 0 : smum_initialize_mc_reg_table(hwmgr);
1521 :
1522 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1523 : PHM_PlatformCaps_EngineSpreadSpectrumSupport))
1524 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1525 : GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
1526 :
1527 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1528 : PHM_PlatformCaps_ThermalController))
1529 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1530 : GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
1531 :
1532 0 : tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
1533 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
1534 : "Failed to program static screen threshold parameters!",
1535 : result = tmp_result);
1536 :
1537 0 : tmp_result = smu7_enable_display_gap(hwmgr);
1538 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
1539 : "Failed to enable display gap!", result = tmp_result);
1540 :
1541 0 : tmp_result = smu7_program_voting_clients(hwmgr);
1542 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
1543 : "Failed to program voting clients!", result = tmp_result);
1544 :
1545 0 : tmp_result = smum_process_firmware_header(hwmgr);
1546 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
1547 : "Failed to process firmware header!", result = tmp_result);
1548 :
1549 0 : if (hwmgr->chip_id != CHIP_VEGAM) {
1550 0 : tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
1551 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
1552 : "Failed to initialize switch from ArbF0 to F1!",
1553 : result = tmp_result);
1554 : }
1555 :
1556 0 : result = smu7_setup_default_dpm_tables(hwmgr);
1557 0 : PP_ASSERT_WITH_CODE(0 == result,
1558 : "Failed to setup default DPM tables!", return result);
1559 :
1560 0 : tmp_result = smum_init_smc_table(hwmgr);
1561 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
1562 : "Failed to initialize SMC table!", result = tmp_result);
1563 :
1564 0 : tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
1565 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
1566 : "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
1567 :
1568 0 : if (hwmgr->chip_id >= CHIP_POLARIS10 &&
1569 : hwmgr->chip_id <= CHIP_VEGAM) {
1570 0 : tmp_result = smu7_notify_has_display(hwmgr);
1571 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
1572 : "Failed to enable display setting!", result = tmp_result);
1573 : } else {
1574 0 : smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL);
1575 : }
1576 :
1577 0 : if (hwmgr->chip_id >= CHIP_POLARIS10 &&
1578 : hwmgr->chip_id <= CHIP_VEGAM) {
1579 0 : tmp_result = smu7_populate_edc_leakage_registers(hwmgr);
1580 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
1581 : "Failed to populate edc leakage registers!", result = tmp_result);
1582 : }
1583 :
1584 0 : tmp_result = smu7_enable_sclk_control(hwmgr);
1585 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
1586 : "Failed to enable SCLK control!", result = tmp_result);
1587 :
1588 0 : tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
1589 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
1590 : "Failed to enable voltage control!", result = tmp_result);
1591 :
1592 0 : tmp_result = smu7_enable_ulv(hwmgr);
1593 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
1594 : "Failed to enable ULV!", result = tmp_result);
1595 :
1596 0 : tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
1597 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
1598 : "Failed to enable deep sleep master switch!", result = tmp_result);
1599 :
1600 0 : tmp_result = smu7_enable_didt_config(hwmgr);
1601 0 : PP_ASSERT_WITH_CODE((tmp_result == 0),
1602 : "Failed to enable deep sleep master switch!", result = tmp_result);
1603 :
1604 0 : tmp_result = smu7_start_dpm(hwmgr);
1605 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
1606 : "Failed to start DPM!", result = tmp_result);
1607 :
1608 0 : tmp_result = smu7_enable_smc_cac(hwmgr);
1609 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
1610 : "Failed to enable SMC CAC!", result = tmp_result);
1611 :
1612 0 : tmp_result = smu7_enable_power_containment(hwmgr);
1613 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
1614 : "Failed to enable power containment!", result = tmp_result);
1615 :
1616 0 : tmp_result = smu7_power_control_set_level(hwmgr);
1617 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
1618 : "Failed to power control set level!", result = tmp_result);
1619 :
1620 0 : tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
1621 : PP_ASSERT_WITH_CODE((0 == tmp_result),
1622 : "Failed to enable thermal auto throttle!", result = tmp_result);
1623 :
1624 0 : tmp_result = smu7_pcie_performance_request(hwmgr);
1625 : PP_ASSERT_WITH_CODE((0 == tmp_result),
1626 : "pcie performance request failed!", result = tmp_result);
1627 :
1628 0 : return 0;
1629 : }
1630 :
1631 0 : static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
1632 : {
1633 0 : if (!hwmgr->avfs_supported)
1634 : return 0;
1635 :
1636 0 : if (enable) {
1637 0 : if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1638 : CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1639 0 : PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1640 : hwmgr, PPSMC_MSG_EnableAvfs, NULL),
1641 : "Failed to enable AVFS!",
1642 : return -EINVAL);
1643 : }
1644 0 : } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1645 : CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1646 0 : PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1647 : hwmgr, PPSMC_MSG_DisableAvfs, NULL),
1648 : "Failed to disable AVFS!",
1649 : return -EINVAL);
1650 : }
1651 :
1652 : return 0;
1653 : }
1654 :
1655 0 : static int smu7_update_avfs(struct pp_hwmgr *hwmgr)
1656 : {
1657 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1658 :
1659 0 : if (!hwmgr->avfs_supported)
1660 : return 0;
1661 :
1662 0 : if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
1663 0 : smu7_avfs_control(hwmgr, false);
1664 0 : } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
1665 0 : smu7_avfs_control(hwmgr, false);
1666 0 : smu7_avfs_control(hwmgr, true);
1667 : } else {
1668 0 : smu7_avfs_control(hwmgr, true);
1669 : }
1670 :
1671 : return 0;
1672 : }
1673 :
1674 0 : static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1675 : {
1676 0 : int tmp_result, result = 0;
1677 :
1678 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1679 : PHM_PlatformCaps_ThermalController))
1680 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1681 : GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
1682 :
1683 0 : tmp_result = smu7_disable_power_containment(hwmgr);
1684 0 : PP_ASSERT_WITH_CODE((tmp_result == 0),
1685 : "Failed to disable power containment!", result = tmp_result);
1686 :
1687 0 : tmp_result = smu7_disable_smc_cac(hwmgr);
1688 0 : PP_ASSERT_WITH_CODE((tmp_result == 0),
1689 : "Failed to disable SMC CAC!", result = tmp_result);
1690 :
1691 0 : tmp_result = smu7_disable_didt_config(hwmgr);
1692 0 : PP_ASSERT_WITH_CODE((tmp_result == 0),
1693 : "Failed to disable DIDT!", result = tmp_result);
1694 :
1695 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1696 : CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
1697 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1698 : GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
1699 :
1700 0 : tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
1701 : PP_ASSERT_WITH_CODE((tmp_result == 0),
1702 : "Failed to disable thermal auto throttle!", result = tmp_result);
1703 :
1704 0 : tmp_result = smu7_avfs_control(hwmgr, false);
1705 0 : PP_ASSERT_WITH_CODE((tmp_result == 0),
1706 : "Failed to disable AVFS!", result = tmp_result);
1707 :
1708 0 : tmp_result = smu7_stop_dpm(hwmgr);
1709 0 : PP_ASSERT_WITH_CODE((tmp_result == 0),
1710 : "Failed to stop DPM!", result = tmp_result);
1711 :
1712 0 : tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
1713 0 : PP_ASSERT_WITH_CODE((tmp_result == 0),
1714 : "Failed to disable deep sleep master switch!", result = tmp_result);
1715 :
1716 0 : tmp_result = smu7_disable_ulv(hwmgr);
1717 0 : PP_ASSERT_WITH_CODE((tmp_result == 0),
1718 : "Failed to disable ULV!", result = tmp_result);
1719 :
1720 0 : tmp_result = smu7_clear_voting_clients(hwmgr);
1721 0 : PP_ASSERT_WITH_CODE((tmp_result == 0),
1722 : "Failed to clear voting clients!", result = tmp_result);
1723 :
1724 0 : tmp_result = smu7_reset_to_default(hwmgr);
1725 0 : PP_ASSERT_WITH_CODE((tmp_result == 0),
1726 : "Failed to reset to default!", result = tmp_result);
1727 :
1728 0 : tmp_result = smum_stop_smc(hwmgr);
1729 0 : PP_ASSERT_WITH_CODE((tmp_result == 0),
1730 : "Failed to stop smc!", result = tmp_result);
1731 :
1732 0 : tmp_result = smu7_force_switch_to_arbf0(hwmgr);
1733 0 : PP_ASSERT_WITH_CODE((tmp_result == 0),
1734 : "Failed to force to switch arbf0!", result = tmp_result);
1735 :
1736 0 : return result;
1737 : }
1738 :
1739 : static bool intel_core_rkl_chk(void)
1740 : {
1741 : #if IS_ENABLED(CONFIG_X86)
1742 : struct cpuinfo_x86 *c = &cpu_data(0);
1743 :
1744 : return (c->x86 == 6 && c->x86_model == INTEL_FAM6_ROCKETLAKE);
1745 : #else
1746 : return false;
1747 : #endif
1748 : }
1749 :
1750 0 : static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1751 : {
1752 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1753 0 : struct phm_ppt_v1_information *table_info =
1754 : (struct phm_ppt_v1_information *)(hwmgr->pptable);
1755 0 : struct amdgpu_device *adev = hwmgr->adev;
1756 : uint8_t tmp1, tmp2;
1757 0 : uint16_t tmp3 = 0;
1758 :
1759 0 : data->dll_default_on = false;
1760 0 : data->mclk_dpm0_activity_target = 0xa;
1761 0 : data->vddc_vddgfx_delta = 300;
1762 0 : data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
1763 0 : data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
1764 0 : data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
1765 0 : data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
1766 0 : data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
1767 0 : data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
1768 0 : data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
1769 0 : data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
1770 0 : data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
1771 0 : data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
1772 :
1773 0 : data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
1774 0 : data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
1775 0 : data->pcie_dpm_key_disabled =
1776 0 : intel_core_rkl_chk() || !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
1777 : /* need to set voltage control types before EVV patching */
1778 0 : data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
1779 0 : data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
1780 0 : data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
1781 0 : data->enable_tdc_limit_feature = true;
1782 0 : data->enable_pkg_pwr_tracking_feature = true;
1783 0 : data->force_pcie_gen = PP_PCIEGenInvalid;
1784 0 : data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
1785 0 : data->current_profile_setting.bupdate_sclk = 1;
1786 0 : data->current_profile_setting.sclk_up_hyst = 0;
1787 0 : data->current_profile_setting.sclk_down_hyst = 100;
1788 0 : data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
1789 0 : data->current_profile_setting.bupdate_mclk = 1;
1790 0 : if (hwmgr->chip_id >= CHIP_POLARIS10) {
1791 0 : if (adev->gmc.vram_width == 256) {
1792 0 : data->current_profile_setting.mclk_up_hyst = 10;
1793 0 : data->current_profile_setting.mclk_down_hyst = 60;
1794 0 : data->current_profile_setting.mclk_activity = 25;
1795 0 : } else if (adev->gmc.vram_width == 128) {
1796 0 : data->current_profile_setting.mclk_up_hyst = 5;
1797 0 : data->current_profile_setting.mclk_down_hyst = 16;
1798 0 : data->current_profile_setting.mclk_activity = 20;
1799 0 : } else if (adev->gmc.vram_width == 64) {
1800 0 : data->current_profile_setting.mclk_up_hyst = 3;
1801 0 : data->current_profile_setting.mclk_down_hyst = 16;
1802 0 : data->current_profile_setting.mclk_activity = 20;
1803 : }
1804 : } else {
1805 0 : data->current_profile_setting.mclk_up_hyst = 0;
1806 0 : data->current_profile_setting.mclk_down_hyst = 100;
1807 0 : data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
1808 : }
1809 0 : hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
1810 0 : hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1811 0 : hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1812 :
1813 0 : if (hwmgr->chip_id == CHIP_HAWAII) {
1814 0 : data->thermal_temp_setting.temperature_low = 94500;
1815 0 : data->thermal_temp_setting.temperature_high = 95000;
1816 0 : data->thermal_temp_setting.temperature_shutdown = 104000;
1817 : } else {
1818 0 : data->thermal_temp_setting.temperature_low = 99500;
1819 0 : data->thermal_temp_setting.temperature_high = 100000;
1820 0 : data->thermal_temp_setting.temperature_shutdown = 104000;
1821 : }
1822 :
1823 0 : data->fast_watermark_threshold = 100;
1824 0 : if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1825 : VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
1826 0 : data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1827 0 : else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1828 : VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
1829 0 : data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1830 :
1831 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1832 : PHM_PlatformCaps_ControlVDDGFX)) {
1833 0 : if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1834 : VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
1835 0 : data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1836 : }
1837 : }
1838 :
1839 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1840 : PHM_PlatformCaps_EnableMVDDControl)) {
1841 0 : if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1842 : VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
1843 0 : data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1844 0 : else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1845 : VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
1846 0 : data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1847 : }
1848 :
1849 0 : if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control)
1850 0 : phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1851 : PHM_PlatformCaps_ControlVDDGFX);
1852 :
1853 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1854 : PHM_PlatformCaps_ControlVDDCI)) {
1855 0 : if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1856 : VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
1857 0 : data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1858 0 : else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1859 : VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
1860 0 : data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1861 : }
1862 :
1863 0 : if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
1864 0 : phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1865 : PHM_PlatformCaps_EnableMVDDControl);
1866 :
1867 0 : if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
1868 0 : phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1869 : PHM_PlatformCaps_ControlVDDCI);
1870 :
1871 0 : data->vddc_phase_shed_control = 1;
1872 0 : if ((hwmgr->chip_id == CHIP_POLARIS12) ||
1873 0 : ASICID_IS_P20(adev->pdev->device, adev->pdev->revision) ||
1874 0 : ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
1875 0 : ASICID_IS_P30(adev->pdev->device, adev->pdev->revision) ||
1876 0 : ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
1877 0 : if (data->voltage_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1878 0 : atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2,
1879 : &tmp3);
1880 0 : tmp3 = (tmp3 >> 5) & 0x3;
1881 0 : data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3;
1882 : }
1883 : } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1884 : data->vddc_phase_shed_control = 1;
1885 : }
1886 :
1887 0 : if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK)
1888 0 : && (table_info->cac_dtp_table->usClockStretchAmount != 0))
1889 0 : phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1890 : PHM_PlatformCaps_ClockStretcher);
1891 :
1892 0 : data->pcie_gen_performance.max = PP_PCIEGen1;
1893 0 : data->pcie_gen_performance.min = PP_PCIEGen3;
1894 0 : data->pcie_gen_power_saving.max = PP_PCIEGen1;
1895 0 : data->pcie_gen_power_saving.min = PP_PCIEGen3;
1896 0 : data->pcie_lane_performance.max = 0;
1897 0 : data->pcie_lane_performance.min = 16;
1898 0 : data->pcie_lane_power_saving.max = 0;
1899 0 : data->pcie_lane_power_saving.min = 16;
1900 :
1901 :
1902 0 : if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1903 0 : phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1904 : PHM_PlatformCaps_UVDPowerGating);
1905 0 : if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
1906 0 : phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1907 : PHM_PlatformCaps_VCEPowerGating);
1908 :
1909 0 : data->disable_edc_leakage_controller = true;
1910 0 : if (((adev->asic_type == CHIP_POLARIS10) && hwmgr->is_kicker) ||
1911 0 : ((adev->asic_type == CHIP_POLARIS11) && hwmgr->is_kicker) ||
1912 0 : (adev->asic_type == CHIP_POLARIS12) ||
1913 : (adev->asic_type == CHIP_VEGAM))
1914 0 : data->disable_edc_leakage_controller = false;
1915 :
1916 0 : if (!atomctrl_is_asic_internal_ss_supported(hwmgr)) {
1917 0 : phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1918 : PHM_PlatformCaps_MemorySpreadSpectrumSupport);
1919 0 : phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1920 : PHM_PlatformCaps_EngineSpreadSpectrumSupport);
1921 : }
1922 :
1923 0 : if ((adev->pdev->device == 0x699F) &&
1924 0 : (adev->pdev->revision == 0xCF)) {
1925 0 : phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1926 : PHM_PlatformCaps_PowerContainment);
1927 0 : data->enable_tdc_limit_feature = false;
1928 0 : data->enable_pkg_pwr_tracking_feature = false;
1929 0 : data->disable_edc_leakage_controller = true;
1930 0 : phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1931 : PHM_PlatformCaps_ClockStretcher);
1932 : }
1933 0 : }
1934 :
1935 0 : static int smu7_calculate_ro_range(struct pp_hwmgr *hwmgr)
1936 : {
1937 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1938 0 : struct amdgpu_device *adev = hwmgr->adev;
1939 0 : uint32_t asicrev1, evv_revision, max = 0, min = 0;
1940 :
1941 0 : atomctrl_read_efuse(hwmgr, STRAP_EVV_REVISION_LSB, STRAP_EVV_REVISION_MSB,
1942 : &evv_revision);
1943 :
1944 0 : atomctrl_read_efuse(hwmgr, 568, 579, &asicrev1);
1945 :
1946 0 : if (ASICID_IS_P20(adev->pdev->device, adev->pdev->revision) ||
1947 0 : ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) {
1948 : min = 1200;
1949 : max = 2500;
1950 0 : } else if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
1951 0 : ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
1952 : min = 900;
1953 : max= 2100;
1954 0 : } else if (hwmgr->chip_id == CHIP_POLARIS10) {
1955 0 : if (adev->pdev->subsystem_vendor == 0x106B) {
1956 : min = 1000;
1957 : max = 2300;
1958 : } else {
1959 0 : if (evv_revision == 0) {
1960 : min = 1000;
1961 : max = 2300;
1962 0 : } else if (evv_revision == 1) {
1963 0 : if (asicrev1 == 326) {
1964 : min = 1200;
1965 : max = 2500;
1966 : /* TODO: PATCH RO in VBIOS */
1967 : } else {
1968 0 : min = 1200;
1969 0 : max = 2000;
1970 : }
1971 0 : } else if (evv_revision == 2) {
1972 0 : min = 1200;
1973 0 : max = 2500;
1974 : }
1975 : }
1976 : } else {
1977 : min = 1100;
1978 : max = 2100;
1979 : }
1980 :
1981 0 : data->ro_range_minimum = min;
1982 0 : data->ro_range_maximum = max;
1983 :
1984 : /* TODO: PATCH RO in VBIOS here */
1985 :
1986 0 : return 0;
1987 : }
1988 :
1989 : /**
1990 : * smu7_get_evv_voltages - Get Leakage VDDC based on leakage ID.
1991 : *
1992 : * @hwmgr: the address of the powerplay hardware manager.
1993 : * Return: always 0
1994 : */
1995 0 : static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1996 : {
1997 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1998 : uint16_t vv_id;
1999 0 : uint16_t vddc = 0;
2000 0 : uint16_t vddgfx = 0;
2001 : uint16_t i, j;
2002 0 : uint32_t sclk = 0;
2003 0 : struct phm_ppt_v1_information *table_info =
2004 : (struct phm_ppt_v1_information *)hwmgr->pptable;
2005 0 : struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
2006 :
2007 0 : if (hwmgr->chip_id == CHIP_POLARIS10 ||
2008 0 : hwmgr->chip_id == CHIP_POLARIS11 ||
2009 : hwmgr->chip_id == CHIP_POLARIS12)
2010 0 : smu7_calculate_ro_range(hwmgr);
2011 :
2012 0 : for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
2013 0 : vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
2014 :
2015 0 : if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2016 0 : if ((hwmgr->pp_table_version == PP_TABLE_V1)
2017 0 : && !phm_get_sclk_for_voltage_evv(hwmgr,
2018 0 : table_info->vddgfx_lookup_table, vv_id, &sclk)) {
2019 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2020 : PHM_PlatformCaps_ClockStretcher)) {
2021 0 : sclk_table = table_info->vdd_dep_on_sclk;
2022 :
2023 0 : for (j = 1; j < sclk_table->count; j++) {
2024 0 : if (sclk_table->entries[j].clk == sclk &&
2025 0 : sclk_table->entries[j].cks_enable == 0) {
2026 0 : sclk += 5000;
2027 0 : break;
2028 : }
2029 : }
2030 : }
2031 0 : if (0 == atomctrl_get_voltage_evv_on_sclk
2032 : (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
2033 : vv_id, &vddgfx)) {
2034 : /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
2035 0 : PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
2036 :
2037 : /* the voltage should not be zero nor equal to leakage ID */
2038 0 : if (vddgfx != 0 && vddgfx != vv_id) {
2039 0 : data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
2040 0 : data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
2041 0 : data->vddcgfx_leakage.count++;
2042 : }
2043 : } else {
2044 0 : pr_info("Error retrieving EVV voltage value!\n");
2045 : }
2046 : }
2047 : } else {
2048 0 : if ((hwmgr->pp_table_version == PP_TABLE_V0)
2049 0 : || !phm_get_sclk_for_voltage_evv(hwmgr,
2050 0 : table_info->vddc_lookup_table, vv_id, &sclk)) {
2051 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2052 : PHM_PlatformCaps_ClockStretcher)) {
2053 0 : if (table_info == NULL)
2054 : return -EINVAL;
2055 0 : sclk_table = table_info->vdd_dep_on_sclk;
2056 :
2057 0 : for (j = 1; j < sclk_table->count; j++) {
2058 0 : if (sclk_table->entries[j].clk == sclk &&
2059 0 : sclk_table->entries[j].cks_enable == 0) {
2060 0 : sclk += 5000;
2061 0 : break;
2062 : }
2063 : }
2064 : }
2065 :
2066 0 : if (phm_get_voltage_evv_on_sclk(hwmgr,
2067 : VOLTAGE_TYPE_VDDC,
2068 : sclk, vv_id, &vddc) == 0) {
2069 0 : if (vddc >= 2000 || vddc == 0)
2070 : return -EINVAL;
2071 : } else {
2072 : pr_debug("failed to retrieving EVV voltage!\n");
2073 0 : continue;
2074 : }
2075 :
2076 : /* the voltage should not be zero nor equal to leakage ID */
2077 0 : if (vddc != 0 && vddc != vv_id) {
2078 0 : data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
2079 0 : data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
2080 0 : data->vddc_leakage.count++;
2081 : }
2082 : }
2083 : }
2084 : }
2085 :
2086 : return 0;
2087 : }
2088 :
2089 : /**
2090 : * smu7_patch_ppt_v1_with_vdd_leakage - Change virtual leakage voltage to actual value.
2091 : *
2092 : * @hwmgr: the address of the powerplay hardware manager.
2093 : * @voltage: pointer to changing voltage
2094 : * @leakage_table: pointer to leakage table
2095 : */
2096 0 : static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2097 : uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
2098 : {
2099 : uint32_t index;
2100 :
2101 : /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2102 0 : for (index = 0; index < leakage_table->count; index++) {
2103 : /* if this voltage matches a leakage voltage ID */
2104 : /* patch with actual leakage voltage */
2105 0 : if (leakage_table->leakage_id[index] == *voltage) {
2106 0 : *voltage = leakage_table->actual_voltage[index];
2107 : break;
2108 : }
2109 : }
2110 :
2111 0 : if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2112 0 : pr_info("Voltage value looks like a Leakage ID but it's not patched\n");
2113 0 : }
2114 :
2115 : /**
2116 : * smu7_patch_lookup_table_with_leakage - Patch voltage lookup table by EVV leakages.
2117 : *
2118 : * @hwmgr: the address of the powerplay hardware manager.
2119 : * @lookup_table: pointer to voltage lookup table
2120 : * @leakage_table: pointer to leakage table
2121 : * Return: always 0
2122 : */
2123 : static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
2124 : phm_ppt_v1_voltage_lookup_table *lookup_table,
2125 : struct smu7_leakage_voltage *leakage_table)
2126 : {
2127 : uint32_t i;
2128 :
2129 0 : for (i = 0; i < lookup_table->count; i++)
2130 0 : smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
2131 : &lookup_table->entries[i].us_vdd, leakage_table);
2132 :
2133 : return 0;
2134 : }
2135 :
2136 : static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
2137 : struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
2138 : uint16_t *vddc)
2139 : {
2140 0 : struct phm_ppt_v1_information *table_info =
2141 : (struct phm_ppt_v1_information *)(hwmgr->pptable);
2142 0 : smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
2143 0 : hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
2144 0 : table_info->max_clock_voltage_on_dc.vddc;
2145 : return 0;
2146 : }
2147 :
2148 0 : static int smu7_patch_voltage_dependency_tables_with_lookup_table(
2149 : struct pp_hwmgr *hwmgr)
2150 : {
2151 : uint8_t entry_id;
2152 : uint8_t voltage_id;
2153 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2154 0 : struct phm_ppt_v1_information *table_info =
2155 : (struct phm_ppt_v1_information *)(hwmgr->pptable);
2156 :
2157 0 : struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
2158 : table_info->vdd_dep_on_sclk;
2159 0 : struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
2160 : table_info->vdd_dep_on_mclk;
2161 0 : struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2162 : table_info->mm_dep_table;
2163 :
2164 0 : if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2165 0 : for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
2166 0 : voltage_id = sclk_table->entries[entry_id].vddInd;
2167 0 : sclk_table->entries[entry_id].vddgfx =
2168 0 : table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
2169 : }
2170 : } else {
2171 0 : for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
2172 0 : voltage_id = sclk_table->entries[entry_id].vddInd;
2173 0 : sclk_table->entries[entry_id].vddc =
2174 0 : table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
2175 : }
2176 : }
2177 :
2178 0 : for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
2179 0 : voltage_id = mclk_table->entries[entry_id].vddInd;
2180 0 : mclk_table->entries[entry_id].vddc =
2181 0 : table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
2182 : }
2183 :
2184 0 : for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
2185 0 : voltage_id = mm_table->entries[entry_id].vddcInd;
2186 0 : mm_table->entries[entry_id].vddc =
2187 0 : table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
2188 : }
2189 :
2190 0 : return 0;
2191 :
2192 : }
2193 :
2194 0 : static int phm_add_voltage(struct pp_hwmgr *hwmgr,
2195 : phm_ppt_v1_voltage_lookup_table *look_up_table,
2196 : phm_ppt_v1_voltage_lookup_record *record)
2197 : {
2198 : uint32_t i;
2199 :
2200 0 : PP_ASSERT_WITH_CODE((NULL != look_up_table),
2201 : "Lookup Table empty.", return -EINVAL);
2202 0 : PP_ASSERT_WITH_CODE((0 != look_up_table->count),
2203 : "Lookup Table empty.", return -EINVAL);
2204 :
2205 0 : i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
2206 0 : PP_ASSERT_WITH_CODE((i >= look_up_table->count),
2207 : "Lookup Table is full.", return -EINVAL);
2208 :
2209 : /* This is to avoid entering duplicate calculated records. */
2210 0 : for (i = 0; i < look_up_table->count; i++) {
2211 0 : if (look_up_table->entries[i].us_vdd == record->us_vdd) {
2212 0 : if (look_up_table->entries[i].us_calculated == 1)
2213 : return 0;
2214 : break;
2215 : }
2216 : }
2217 :
2218 0 : look_up_table->entries[i].us_calculated = 1;
2219 0 : look_up_table->entries[i].us_vdd = record->us_vdd;
2220 0 : look_up_table->entries[i].us_cac_low = record->us_cac_low;
2221 0 : look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
2222 0 : look_up_table->entries[i].us_cac_high = record->us_cac_high;
2223 : /* Only increment the count when we're appending, not replacing duplicate entry. */
2224 0 : if (i == look_up_table->count)
2225 0 : look_up_table->count++;
2226 :
2227 : return 0;
2228 : }
2229 :
2230 :
2231 0 : static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
2232 : {
2233 : uint8_t entry_id;
2234 : struct phm_ppt_v1_voltage_lookup_record v_record;
2235 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2236 0 : struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2237 :
2238 0 : phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
2239 0 : phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
2240 :
2241 0 : if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2242 0 : for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
2243 0 : if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
2244 0 : v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
2245 0 : sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
2246 : else
2247 0 : v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
2248 : sclk_table->entries[entry_id].vdd_offset;
2249 :
2250 0 : sclk_table->entries[entry_id].vddc =
2251 0 : v_record.us_cac_low = v_record.us_cac_mid =
2252 0 : v_record.us_cac_high = v_record.us_vdd;
2253 :
2254 0 : phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
2255 : }
2256 :
2257 0 : for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
2258 0 : if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
2259 0 : v_record.us_vdd = mclk_table->entries[entry_id].vddc +
2260 0 : mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
2261 : else
2262 0 : v_record.us_vdd = mclk_table->entries[entry_id].vddc +
2263 : mclk_table->entries[entry_id].vdd_offset;
2264 :
2265 0 : mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
2266 0 : v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
2267 0 : phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
2268 : }
2269 : }
2270 0 : return 0;
2271 : }
2272 :
2273 0 : static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
2274 : {
2275 : uint8_t entry_id;
2276 : struct phm_ppt_v1_voltage_lookup_record v_record;
2277 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2278 0 : struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2279 0 : phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
2280 :
2281 0 : if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2282 0 : for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
2283 0 : if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
2284 0 : v_record.us_vdd = mm_table->entries[entry_id].vddc +
2285 0 : mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
2286 : else
2287 0 : v_record.us_vdd = mm_table->entries[entry_id].vddc +
2288 : mm_table->entries[entry_id].vddgfx_offset;
2289 :
2290 : /* Add the calculated VDDGFX to the VDDGFX lookup table */
2291 0 : mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
2292 0 : v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
2293 0 : phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
2294 : }
2295 : }
2296 0 : return 0;
2297 : }
2298 :
2299 0 : static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
2300 : struct phm_ppt_v1_voltage_lookup_table *lookup_table)
2301 : {
2302 : uint32_t table_size, i, j;
2303 0 : table_size = lookup_table->count;
2304 :
2305 0 : PP_ASSERT_WITH_CODE(0 != lookup_table->count,
2306 : "Lookup table is empty", return -EINVAL);
2307 :
2308 : /* Sorting voltages */
2309 0 : for (i = 0; i < table_size - 1; i++) {
2310 0 : for (j = i + 1; j > 0; j--) {
2311 0 : if (lookup_table->entries[j].us_vdd <
2312 0 : lookup_table->entries[j - 1].us_vdd) {
2313 0 : swap(lookup_table->entries[j - 1],
2314 : lookup_table->entries[j]);
2315 : }
2316 : }
2317 : }
2318 :
2319 : return 0;
2320 : }
2321 :
2322 0 : static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
2323 : {
2324 0 : int result = 0;
2325 : int tmp_result;
2326 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2327 0 : struct phm_ppt_v1_information *table_info =
2328 : (struct phm_ppt_v1_information *)(hwmgr->pptable);
2329 :
2330 0 : if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2331 0 : tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2332 0 : table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
2333 : if (tmp_result != 0)
2334 : result = tmp_result;
2335 :
2336 0 : smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
2337 : &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
2338 : } else {
2339 :
2340 0 : tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2341 0 : table_info->vddc_lookup_table, &(data->vddc_leakage));
2342 : if (tmp_result)
2343 : result = tmp_result;
2344 :
2345 0 : tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
2346 : &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
2347 : if (tmp_result)
2348 : result = tmp_result;
2349 : }
2350 :
2351 0 : tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
2352 0 : if (tmp_result)
2353 0 : result = tmp_result;
2354 :
2355 0 : tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
2356 0 : if (tmp_result)
2357 0 : result = tmp_result;
2358 :
2359 0 : tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
2360 0 : if (tmp_result)
2361 0 : result = tmp_result;
2362 :
2363 0 : tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
2364 0 : if (tmp_result)
2365 0 : result = tmp_result;
2366 :
2367 0 : tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
2368 0 : if (tmp_result)
2369 0 : result = tmp_result;
2370 :
2371 0 : return result;
2372 : }
2373 :
2374 : static int smu7_find_highest_vddc(struct pp_hwmgr *hwmgr)
2375 : {
2376 0 : struct phm_ppt_v1_information *table_info =
2377 : (struct phm_ppt_v1_information *)(hwmgr->pptable);
2378 0 : struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
2379 : table_info->vdd_dep_on_sclk;
2380 0 : struct phm_ppt_v1_voltage_lookup_table *lookup_table =
2381 : table_info->vddc_lookup_table;
2382 : uint16_t highest_voltage;
2383 : uint32_t i;
2384 :
2385 0 : highest_voltage = allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
2386 :
2387 0 : for (i = 0; i < lookup_table->count; i++) {
2388 0 : if (lookup_table->entries[i].us_vdd < ATOM_VIRTUAL_VOLTAGE_ID0 &&
2389 : lookup_table->entries[i].us_vdd > highest_voltage)
2390 0 : highest_voltage = lookup_table->entries[i].us_vdd;
2391 : }
2392 :
2393 : return highest_voltage;
2394 : }
2395 :
2396 0 : static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
2397 : {
2398 0 : struct phm_ppt_v1_information *table_info =
2399 : (struct phm_ppt_v1_information *)(hwmgr->pptable);
2400 :
2401 0 : struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
2402 : table_info->vdd_dep_on_sclk;
2403 0 : struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
2404 : table_info->vdd_dep_on_mclk;
2405 :
2406 0 : PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
2407 : "VDD dependency on SCLK table is missing.",
2408 : return -EINVAL);
2409 0 : PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
2410 : "VDD dependency on SCLK table has to have is missing.",
2411 : return -EINVAL);
2412 :
2413 0 : PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
2414 : "VDD dependency on MCLK table is missing",
2415 : return -EINVAL);
2416 0 : PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
2417 : "VDD dependency on MCLK table has to have is missing.",
2418 : return -EINVAL);
2419 :
2420 0 : table_info->max_clock_voltage_on_ac.sclk =
2421 0 : allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
2422 0 : table_info->max_clock_voltage_on_ac.mclk =
2423 0 : allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
2424 0 : if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM)
2425 0 : table_info->max_clock_voltage_on_ac.vddc =
2426 0 : smu7_find_highest_vddc(hwmgr);
2427 : else
2428 0 : table_info->max_clock_voltage_on_ac.vddc =
2429 0 : allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
2430 0 : table_info->max_clock_voltage_on_ac.vddci =
2431 0 : allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
2432 :
2433 0 : hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
2434 0 : hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
2435 0 : hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
2436 0 : hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
2437 :
2438 0 : return 0;
2439 : }
2440 :
2441 0 : static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
2442 : {
2443 0 : struct phm_ppt_v1_information *table_info =
2444 : (struct phm_ppt_v1_information *)(hwmgr->pptable);
2445 : struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
2446 : struct phm_ppt_v1_voltage_lookup_table *lookup_table;
2447 : uint32_t i;
2448 : uint32_t hw_revision, sub_vendor_id, sub_sys_id;
2449 0 : struct amdgpu_device *adev = hwmgr->adev;
2450 :
2451 0 : if (table_info != NULL) {
2452 0 : dep_mclk_table = table_info->vdd_dep_on_mclk;
2453 0 : lookup_table = table_info->vddc_lookup_table;
2454 : } else
2455 : return 0;
2456 :
2457 0 : hw_revision = adev->pdev->revision;
2458 0 : sub_sys_id = adev->pdev->subsystem_device;
2459 0 : sub_vendor_id = adev->pdev->subsystem_vendor;
2460 :
2461 0 : if (adev->pdev->device == 0x67DF && hw_revision == 0xC7 &&
2462 0 : ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
2463 0 : (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
2464 0 : (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
2465 :
2466 0 : PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
2467 : CGS_IND_REG__SMC,
2468 : PWR_CKS_CNTL,
2469 : CKS_STRETCH_AMOUNT,
2470 : 0x3);
2471 :
2472 0 : if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
2473 : return 0;
2474 :
2475 0 : for (i = 0; i < lookup_table->count; i++) {
2476 0 : if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
2477 0 : dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
2478 0 : return 0;
2479 : }
2480 : }
2481 : }
2482 : return 0;
2483 : }
2484 :
2485 0 : static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
2486 : {
2487 : struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2488 : uint32_t temp_reg;
2489 0 : struct phm_ppt_v1_information *table_info =
2490 : (struct phm_ppt_v1_information *)(hwmgr->pptable);
2491 :
2492 :
2493 0 : if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
2494 0 : temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
2495 0 : switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
2496 : case 0:
2497 0 : temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
2498 0 : break;
2499 : case 1:
2500 0 : temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
2501 0 : break;
2502 : case 2:
2503 0 : temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
2504 0 : break;
2505 : case 3:
2506 0 : temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
2507 0 : break;
2508 : case 4:
2509 0 : temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
2510 0 : break;
2511 : default:
2512 : break;
2513 : }
2514 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
2515 : }
2516 :
2517 0 : if (table_info == NULL)
2518 : return 0;
2519 :
2520 0 : if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
2521 0 : hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
2522 0 : hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
2523 0 : (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2524 :
2525 0 : hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
2526 0 : (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2527 :
2528 0 : hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
2529 :
2530 0 : hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
2531 :
2532 0 : hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
2533 : (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2534 :
2535 0 : hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
2536 :
2537 0 : table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
2538 : (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
2539 :
2540 0 : table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2541 0 : table_info->cac_dtp_table->usOperatingTempStep = 1;
2542 0 : table_info->cac_dtp_table->usOperatingTempHyst = 1;
2543 :
2544 0 : hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
2545 0 : hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2546 :
2547 0 : hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
2548 0 : hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
2549 :
2550 0 : hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
2551 0 : table_info->cac_dtp_table->usOperatingTempMinLimit;
2552 :
2553 0 : hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
2554 0 : table_info->cac_dtp_table->usOperatingTempMaxLimit;
2555 :
2556 0 : hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
2557 0 : table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2558 :
2559 0 : hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
2560 0 : table_info->cac_dtp_table->usOperatingTempStep;
2561 :
2562 0 : hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
2563 0 : table_info->cac_dtp_table->usTargetOperatingTemp;
2564 0 : if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK)
2565 0 : phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2566 : PHM_PlatformCaps_ODFuzzyFanControlSupport);
2567 : }
2568 :
2569 : return 0;
2570 : }
2571 :
2572 : /**
2573 : * smu7_patch_ppt_v0_with_vdd_leakage - Change virtual leakage voltage to actual value.
2574 : *
2575 : * @hwmgr: the address of the powerplay hardware manager.
2576 : * @voltage: pointer to changing voltage
2577 : * @leakage_table: pointer to leakage table
2578 : */
2579 0 : static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2580 : uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
2581 : {
2582 : uint32_t index;
2583 :
2584 : /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2585 0 : for (index = 0; index < leakage_table->count; index++) {
2586 : /* if this voltage matches a leakage voltage ID */
2587 : /* patch with actual leakage voltage */
2588 0 : if (leakage_table->leakage_id[index] == *voltage) {
2589 0 : *voltage = leakage_table->actual_voltage[index];
2590 : break;
2591 : }
2592 : }
2593 :
2594 0 : if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2595 0 : pr_info("Voltage value looks like a Leakage ID but it's not patched\n");
2596 0 : }
2597 :
2598 :
2599 : static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
2600 : struct phm_clock_voltage_dependency_table *tab)
2601 : {
2602 : uint16_t i;
2603 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2604 :
2605 0 : if (tab)
2606 0 : for (i = 0; i < tab->count; i++)
2607 0 : smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2608 : &data->vddc_leakage);
2609 :
2610 : return 0;
2611 : }
2612 :
2613 : static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
2614 : struct phm_clock_voltage_dependency_table *tab)
2615 : {
2616 : uint16_t i;
2617 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2618 :
2619 0 : if (tab)
2620 0 : for (i = 0; i < tab->count; i++)
2621 0 : smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2622 : &data->vddci_leakage);
2623 :
2624 : return 0;
2625 : }
2626 :
2627 : static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
2628 : struct phm_vce_clock_voltage_dependency_table *tab)
2629 : {
2630 : uint16_t i;
2631 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2632 :
2633 0 : if (tab)
2634 0 : for (i = 0; i < tab->count; i++)
2635 0 : smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2636 : &data->vddc_leakage);
2637 :
2638 : return 0;
2639 : }
2640 :
2641 :
2642 : static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
2643 : struct phm_uvd_clock_voltage_dependency_table *tab)
2644 : {
2645 : uint16_t i;
2646 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2647 :
2648 0 : if (tab)
2649 0 : for (i = 0; i < tab->count; i++)
2650 0 : smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2651 : &data->vddc_leakage);
2652 :
2653 : return 0;
2654 : }
2655 :
2656 : static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
2657 : struct phm_phase_shedding_limits_table *tab)
2658 : {
2659 : uint16_t i;
2660 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2661 :
2662 0 : if (tab)
2663 0 : for (i = 0; i < tab->count; i++)
2664 0 : smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
2665 : &data->vddc_leakage);
2666 :
2667 : return 0;
2668 : }
2669 :
2670 : static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
2671 : struct phm_samu_clock_voltage_dependency_table *tab)
2672 : {
2673 : uint16_t i;
2674 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2675 :
2676 0 : if (tab)
2677 0 : for (i = 0; i < tab->count; i++)
2678 0 : smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2679 : &data->vddc_leakage);
2680 :
2681 : return 0;
2682 : }
2683 :
2684 : static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
2685 : struct phm_acp_clock_voltage_dependency_table *tab)
2686 : {
2687 : uint16_t i;
2688 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2689 :
2690 0 : if (tab)
2691 0 : for (i = 0; i < tab->count; i++)
2692 0 : smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2693 : &data->vddc_leakage);
2694 :
2695 : return 0;
2696 : }
2697 :
2698 0 : static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
2699 : struct phm_clock_and_voltage_limits *tab)
2700 : {
2701 : uint32_t vddc, vddci;
2702 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2703 :
2704 0 : if (tab) {
2705 0 : vddc = tab->vddc;
2706 0 : smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
2707 : &data->vddc_leakage);
2708 0 : tab->vddc = vddc;
2709 0 : vddci = tab->vddci;
2710 0 : smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
2711 : &data->vddci_leakage);
2712 0 : tab->vddci = vddci;
2713 : }
2714 :
2715 0 : return 0;
2716 : }
2717 :
2718 0 : static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
2719 : {
2720 : uint32_t i;
2721 : uint32_t vddc;
2722 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2723 :
2724 0 : if (tab) {
2725 0 : for (i = 0; i < tab->count; i++) {
2726 0 : vddc = (uint32_t)(tab->entries[i].Vddc);
2727 0 : smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
2728 0 : tab->entries[i].Vddc = (uint16_t)vddc;
2729 : }
2730 : }
2731 :
2732 0 : return 0;
2733 : }
2734 :
2735 0 : static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
2736 : {
2737 : int tmp;
2738 :
2739 0 : tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
2740 : if (tmp)
2741 : return -EINVAL;
2742 :
2743 0 : tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
2744 : if (tmp)
2745 : return -EINVAL;
2746 :
2747 0 : tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2748 : if (tmp)
2749 : return -EINVAL;
2750 :
2751 0 : tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
2752 : if (tmp)
2753 : return -EINVAL;
2754 :
2755 0 : tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
2756 : if (tmp)
2757 : return -EINVAL;
2758 :
2759 0 : tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
2760 : if (tmp)
2761 : return -EINVAL;
2762 :
2763 0 : tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
2764 : if (tmp)
2765 : return -EINVAL;
2766 :
2767 0 : tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
2768 : if (tmp)
2769 : return -EINVAL;
2770 :
2771 0 : tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
2772 : if (tmp)
2773 : return -EINVAL;
2774 :
2775 0 : tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
2776 0 : if (tmp)
2777 : return -EINVAL;
2778 :
2779 0 : tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
2780 0 : if (tmp)
2781 : return -EINVAL;
2782 :
2783 0 : tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
2784 0 : if (tmp)
2785 : return -EINVAL;
2786 :
2787 0 : return 0;
2788 : }
2789 :
2790 :
2791 0 : static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
2792 : {
2793 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2794 :
2795 0 : struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
2796 0 : struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
2797 0 : struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
2798 :
2799 0 : PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
2800 : "VDDC dependency on SCLK table is missing. This table is mandatory",
2801 : return -EINVAL);
2802 0 : PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
2803 : "VDDC dependency on SCLK table has to have is missing. This table is mandatory",
2804 : return -EINVAL);
2805 :
2806 0 : PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
2807 : "VDDC dependency on MCLK table is missing. This table is mandatory",
2808 : return -EINVAL);
2809 0 : PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
2810 : "VDD dependency on MCLK table has to have is missing. This table is mandatory",
2811 : return -EINVAL);
2812 :
2813 0 : data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
2814 0 : data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2815 :
2816 0 : hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
2817 0 : allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
2818 0 : hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
2819 0 : allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
2820 0 : hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
2821 0 : allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2822 :
2823 0 : if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
2824 0 : data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
2825 0 : data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
2826 : }
2827 :
2828 0 : if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1)
2829 0 : hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
2830 :
2831 : return 0;
2832 : }
2833 :
2834 0 : static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
2835 : {
2836 0 : kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2837 0 : hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
2838 0 : kfree(hwmgr->backend);
2839 0 : hwmgr->backend = NULL;
2840 :
2841 0 : return 0;
2842 : }
2843 :
2844 0 : static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr)
2845 : {
2846 : uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id;
2847 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2848 : int i;
2849 :
2850 0 : if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) {
2851 0 : for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
2852 0 : virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
2853 0 : if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci,
2854 : virtual_voltage_id,
2855 : efuse_voltage_id) == 0) {
2856 0 : if (vddc != 0 && vddc != virtual_voltage_id) {
2857 0 : data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
2858 0 : data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
2859 0 : data->vddc_leakage.count++;
2860 : }
2861 0 : if (vddci != 0 && vddci != virtual_voltage_id) {
2862 0 : data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci;
2863 0 : data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id;
2864 0 : data->vddci_leakage.count++;
2865 : }
2866 : }
2867 : }
2868 : }
2869 0 : return 0;
2870 : }
2871 :
2872 : #define LEAKAGE_ID_MSB 463
2873 : #define LEAKAGE_ID_LSB 454
2874 :
2875 0 : static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr)
2876 : {
2877 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2878 : uint32_t efuse;
2879 : uint16_t offset;
2880 0 : int ret = 0;
2881 :
2882 0 : if (data->disable_edc_leakage_controller)
2883 : return 0;
2884 :
2885 0 : ret = atomctrl_get_edc_hilo_leakage_offset_table(hwmgr,
2886 : &data->edc_hilo_leakage_offset_from_vbios);
2887 0 : if (ret)
2888 : return ret;
2889 :
2890 0 : if (data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset &&
2891 0 : data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) {
2892 0 : atomctrl_read_efuse(hwmgr, LEAKAGE_ID_LSB, LEAKAGE_ID_MSB, &efuse);
2893 0 : if (efuse < data->edc_hilo_leakage_offset_from_vbios.usHiLoLeakageThreshold)
2894 0 : offset = data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset;
2895 : else
2896 0 : offset = data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset;
2897 :
2898 0 : ret = atomctrl_get_edc_leakage_table(hwmgr,
2899 : &data->edc_leakage_table,
2900 : offset);
2901 0 : if (ret)
2902 : return ret;
2903 : }
2904 :
2905 : return ret;
2906 : }
2907 :
2908 0 : static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2909 : {
2910 : struct smu7_hwmgr *data;
2911 0 : int result = 0;
2912 :
2913 0 : data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
2914 0 : if (data == NULL)
2915 : return -ENOMEM;
2916 :
2917 0 : hwmgr->backend = data;
2918 0 : smu7_patch_voltage_workaround(hwmgr);
2919 0 : smu7_init_dpm_defaults(hwmgr);
2920 :
2921 : /* Get leakage voltage based on leakage ID. */
2922 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2923 : PHM_PlatformCaps_EVV)) {
2924 0 : result = smu7_get_evv_voltages(hwmgr);
2925 0 : if (result) {
2926 0 : pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
2927 0 : return -EINVAL;
2928 : }
2929 : } else {
2930 0 : smu7_get_elb_voltages(hwmgr);
2931 : }
2932 :
2933 0 : if (hwmgr->pp_table_version == PP_TABLE_V1) {
2934 0 : smu7_complete_dependency_tables(hwmgr);
2935 0 : smu7_set_private_data_based_on_pptable_v1(hwmgr);
2936 0 : } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
2937 0 : smu7_patch_dependency_tables_with_leakage(hwmgr);
2938 0 : smu7_set_private_data_based_on_pptable_v0(hwmgr);
2939 : }
2940 :
2941 : /* Initalize Dynamic State Adjustment Rule Settings */
2942 0 : result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
2943 :
2944 0 : if (0 == result) {
2945 0 : struct amdgpu_device *adev = hwmgr->adev;
2946 :
2947 0 : data->is_tlu_enabled = false;
2948 :
2949 0 : hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
2950 : SMU7_MAX_HARDWARE_POWERLEVELS;
2951 0 : hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
2952 0 : hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
2953 :
2954 0 : data->pcie_gen_cap = adev->pm.pcie_gen_mask;
2955 0 : if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2956 0 : data->pcie_spc_cap = 20;
2957 : else
2958 0 : data->pcie_spc_cap = 16;
2959 0 : data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
2960 :
2961 0 : hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
2962 : /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
2963 0 : hwmgr->platform_descriptor.clockStep.engineClock = 500;
2964 0 : hwmgr->platform_descriptor.clockStep.memoryClock = 500;
2965 0 : smu7_thermal_parameter_init(hwmgr);
2966 : } else {
2967 : /* Ignore return value in here, we are cleaning up a mess. */
2968 : smu7_hwmgr_backend_fini(hwmgr);
2969 : }
2970 :
2971 0 : result = smu7_update_edc_leakage_table(hwmgr);
2972 0 : if (result)
2973 : return result;
2974 :
2975 0 : return 0;
2976 : }
2977 :
2978 0 : static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
2979 : {
2980 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2981 : uint32_t level, tmp;
2982 :
2983 0 : if (!data->pcie_dpm_key_disabled) {
2984 0 : if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2985 : level = 0;
2986 : tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
2987 0 : while (tmp >>= 1)
2988 0 : level++;
2989 :
2990 0 : if (level)
2991 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
2992 : PPSMC_MSG_PCIeDPM_ForceLevel, level,
2993 : NULL);
2994 : }
2995 : }
2996 :
2997 0 : if (!data->sclk_dpm_key_disabled) {
2998 0 : if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2999 : level = 0;
3000 : tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
3001 0 : while (tmp >>= 1)
3002 0 : level++;
3003 :
3004 0 : if (level)
3005 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
3006 : PPSMC_MSG_SCLKDPM_SetEnabledMask,
3007 0 : (1 << level),
3008 : NULL);
3009 : }
3010 : }
3011 :
3012 0 : if (!data->mclk_dpm_key_disabled) {
3013 0 : if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3014 : level = 0;
3015 : tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
3016 0 : while (tmp >>= 1)
3017 0 : level++;
3018 :
3019 0 : if (level)
3020 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
3021 : PPSMC_MSG_MCLKDPM_SetEnabledMask,
3022 0 : (1 << level),
3023 : NULL);
3024 : }
3025 : }
3026 :
3027 0 : return 0;
3028 : }
3029 :
3030 0 : static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
3031 : {
3032 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3033 :
3034 0 : if (hwmgr->pp_table_version == PP_TABLE_V1)
3035 0 : phm_apply_dal_min_voltage_request(hwmgr);
3036 : /* TO DO for v0 iceland and Ci*/
3037 :
3038 0 : if (!data->sclk_dpm_key_disabled) {
3039 0 : if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
3040 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
3041 : PPSMC_MSG_SCLKDPM_SetEnabledMask,
3042 : data->dpm_level_enable_mask.sclk_dpm_enable_mask,
3043 : NULL);
3044 : }
3045 :
3046 0 : if (!data->mclk_dpm_key_disabled) {
3047 0 : if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
3048 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
3049 : PPSMC_MSG_MCLKDPM_SetEnabledMask,
3050 : data->dpm_level_enable_mask.mclk_dpm_enable_mask,
3051 : NULL);
3052 : }
3053 :
3054 0 : return 0;
3055 : }
3056 :
3057 0 : static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
3058 : {
3059 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3060 :
3061 0 : if (!smum_is_dpm_running(hwmgr))
3062 : return -EINVAL;
3063 :
3064 0 : if (!data->pcie_dpm_key_disabled) {
3065 0 : smum_send_msg_to_smc(hwmgr,
3066 : PPSMC_MSG_PCIeDPM_UnForceLevel,
3067 : NULL);
3068 : }
3069 :
3070 0 : return smu7_upload_dpm_level_enable_mask(hwmgr);
3071 : }
3072 :
3073 0 : static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3074 : {
3075 0 : struct smu7_hwmgr *data =
3076 : (struct smu7_hwmgr *)(hwmgr->backend);
3077 : uint32_t level;
3078 :
3079 0 : if (!data->sclk_dpm_key_disabled)
3080 0 : if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3081 0 : level = phm_get_lowest_enabled_level(hwmgr,
3082 : data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3083 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
3084 : PPSMC_MSG_SCLKDPM_SetEnabledMask,
3085 0 : (1 << level),
3086 : NULL);
3087 :
3088 : }
3089 :
3090 0 : if (!data->mclk_dpm_key_disabled) {
3091 0 : if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3092 0 : level = phm_get_lowest_enabled_level(hwmgr,
3093 : data->dpm_level_enable_mask.mclk_dpm_enable_mask);
3094 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
3095 : PPSMC_MSG_MCLKDPM_SetEnabledMask,
3096 0 : (1 << level),
3097 : NULL);
3098 : }
3099 : }
3100 :
3101 0 : if (!data->pcie_dpm_key_disabled) {
3102 0 : if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3103 0 : level = phm_get_lowest_enabled_level(hwmgr,
3104 : data->dpm_level_enable_mask.pcie_dpm_enable_mask);
3105 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
3106 : PPSMC_MSG_PCIeDPM_ForceLevel,
3107 : (level),
3108 : NULL);
3109 : }
3110 : }
3111 :
3112 0 : return 0;
3113 : }
3114 :
3115 0 : static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
3116 : uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask)
3117 : {
3118 : uint32_t percentage;
3119 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3120 0 : struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
3121 : int32_t tmp_mclk;
3122 : int32_t tmp_sclk;
3123 : int32_t count;
3124 :
3125 0 : if (golden_dpm_table->mclk_table.count < 1)
3126 : return -EINVAL;
3127 :
3128 0 : percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
3129 0 : golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
3130 :
3131 0 : if (golden_dpm_table->mclk_table.count == 1) {
3132 0 : percentage = 70;
3133 0 : tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
3134 0 : *mclk_mask = golden_dpm_table->mclk_table.count - 1;
3135 : } else {
3136 0 : tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
3137 0 : *mclk_mask = golden_dpm_table->mclk_table.count - 2;
3138 : }
3139 :
3140 0 : tmp_sclk = tmp_mclk * percentage / 100;
3141 :
3142 0 : if (hwmgr->pp_table_version == PP_TABLE_V0) {
3143 0 : for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
3144 0 : count >= 0; count--) {
3145 0 : if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
3146 0 : tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
3147 0 : *sclk_mask = count;
3148 0 : break;
3149 : }
3150 : }
3151 0 : if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3152 0 : *sclk_mask = 0;
3153 0 : tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
3154 : }
3155 :
3156 0 : if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3157 0 : *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
3158 0 : } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
3159 0 : struct phm_ppt_v1_information *table_info =
3160 : (struct phm_ppt_v1_information *)(hwmgr->pptable);
3161 :
3162 0 : for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
3163 0 : if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
3164 0 : tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
3165 0 : *sclk_mask = count;
3166 0 : break;
3167 : }
3168 : }
3169 0 : if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3170 0 : *sclk_mask = 0;
3171 0 : tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3172 : }
3173 :
3174 0 : if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3175 0 : *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
3176 : }
3177 :
3178 0 : if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
3179 0 : *mclk_mask = 0;
3180 0 : else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3181 0 : *mclk_mask = golden_dpm_table->mclk_table.count - 1;
3182 :
3183 0 : *pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
3184 0 : hwmgr->pstate_sclk = tmp_sclk;
3185 0 : hwmgr->pstate_mclk = tmp_mclk;
3186 :
3187 0 : return 0;
3188 : }
3189 :
3190 0 : static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
3191 : enum amd_dpm_forced_level level)
3192 : {
3193 0 : int ret = 0;
3194 0 : uint32_t sclk_mask = 0;
3195 0 : uint32_t mclk_mask = 0;
3196 0 : uint32_t pcie_mask = 0;
3197 :
3198 0 : if (hwmgr->pstate_sclk == 0)
3199 0 : smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
3200 :
3201 0 : switch (level) {
3202 : case AMD_DPM_FORCED_LEVEL_HIGH:
3203 0 : ret = smu7_force_dpm_highest(hwmgr);
3204 0 : break;
3205 : case AMD_DPM_FORCED_LEVEL_LOW:
3206 0 : ret = smu7_force_dpm_lowest(hwmgr);
3207 0 : break;
3208 : case AMD_DPM_FORCED_LEVEL_AUTO:
3209 0 : ret = smu7_unforce_dpm_levels(hwmgr);
3210 0 : break;
3211 : case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
3212 : case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
3213 : case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
3214 : case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
3215 0 : ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
3216 0 : if (ret)
3217 : return ret;
3218 0 : smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
3219 0 : smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
3220 0 : smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
3221 0 : break;
3222 : case AMD_DPM_FORCED_LEVEL_MANUAL:
3223 : case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
3224 : default:
3225 : break;
3226 : }
3227 :
3228 0 : if (!ret) {
3229 0 : if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3230 0 : smu7_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
3231 0 : else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3232 0 : smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
3233 : }
3234 : return ret;
3235 : }
3236 :
3237 0 : static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
3238 : {
3239 0 : return sizeof(struct smu7_power_state);
3240 : }
3241 :
3242 0 : static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
3243 : uint32_t vblank_time_us)
3244 : {
3245 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3246 : uint32_t switch_limit_us;
3247 :
3248 0 : switch (hwmgr->chip_id) {
3249 : case CHIP_POLARIS10:
3250 : case CHIP_POLARIS11:
3251 : case CHIP_POLARIS12:
3252 0 : if (hwmgr->is_kicker || (hwmgr->chip_id == CHIP_POLARIS12))
3253 0 : switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
3254 : else
3255 0 : switch_limit_us = data->is_memory_gddr5 ? 200 : 150;
3256 : break;
3257 : case CHIP_VEGAM:
3258 : switch_limit_us = 30;
3259 : break;
3260 : default:
3261 0 : switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
3262 : break;
3263 : }
3264 :
3265 0 : if (vblank_time_us < switch_limit_us)
3266 : return true;
3267 : else
3268 0 : return false;
3269 : }
3270 :
3271 0 : static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3272 : struct pp_power_state *request_ps,
3273 : const struct pp_power_state *current_ps)
3274 : {
3275 0 : struct amdgpu_device *adev = hwmgr->adev;
3276 0 : struct smu7_power_state *smu7_ps =
3277 0 : cast_phw_smu7_power_state(&request_ps->hardware);
3278 : uint32_t sclk;
3279 : uint32_t mclk;
3280 0 : struct PP_Clocks minimum_clocks = {0};
3281 : bool disable_mclk_switching;
3282 : bool disable_mclk_switching_for_frame_lock;
3283 : bool disable_mclk_switching_for_display;
3284 : const struct phm_clock_and_voltage_limits *max_limits;
3285 : uint32_t i;
3286 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3287 0 : struct phm_ppt_v1_information *table_info =
3288 : (struct phm_ppt_v1_information *)(hwmgr->pptable);
3289 : int32_t count;
3290 0 : int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3291 : uint32_t latency;
3292 0 : bool latency_allowed = false;
3293 :
3294 0 : data->battery_state = (PP_StateUILabel_Battery ==
3295 0 : request_ps->classification.ui_label);
3296 0 : data->mclk_ignore_signal = false;
3297 :
3298 0 : max_limits = adev->pm.ac_power ?
3299 : &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3300 : &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3301 :
3302 : /* Cap clock DPM tables at DC MAX if it is in DC. */
3303 0 : if (!adev->pm.ac_power) {
3304 0 : for (i = 0; i < smu7_ps->performance_level_count; i++) {
3305 0 : if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
3306 0 : smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
3307 0 : if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
3308 0 : smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
3309 : }
3310 : }
3311 :
3312 0 : minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
3313 0 : minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
3314 :
3315 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3316 : PHM_PlatformCaps_StablePState)) {
3317 0 : max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3318 0 : stable_pstate_sclk = (max_limits->sclk * 75) / 100;
3319 :
3320 0 : for (count = table_info->vdd_dep_on_sclk->count - 1;
3321 0 : count >= 0; count--) {
3322 0 : if (stable_pstate_sclk >=
3323 0 : table_info->vdd_dep_on_sclk->entries[count].clk) {
3324 0 : stable_pstate_sclk =
3325 : table_info->vdd_dep_on_sclk->entries[count].clk;
3326 0 : break;
3327 : }
3328 : }
3329 :
3330 0 : if (count < 0)
3331 0 : stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3332 :
3333 0 : stable_pstate_mclk = max_limits->mclk;
3334 :
3335 0 : minimum_clocks.engineClock = stable_pstate_sclk;
3336 0 : minimum_clocks.memoryClock = stable_pstate_mclk;
3337 : }
3338 :
3339 0 : disable_mclk_switching_for_frame_lock = phm_cap_enabled(
3340 0 : hwmgr->platform_descriptor.platformCaps,
3341 : PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3342 :
3343 0 : disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) &&
3344 0 : !hwmgr->display_config->multi_monitor_in_sync) ||
3345 0 : (hwmgr->display_config->num_display &&
3346 0 : smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
3347 :
3348 0 : disable_mclk_switching = disable_mclk_switching_for_frame_lock ||
3349 : disable_mclk_switching_for_display;
3350 :
3351 0 : if (hwmgr->display_config->num_display == 0) {
3352 0 : if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM)
3353 0 : data->mclk_ignore_signal = true;
3354 : else
3355 : disable_mclk_switching = false;
3356 : }
3357 :
3358 0 : sclk = smu7_ps->performance_levels[0].engine_clock;
3359 0 : mclk = smu7_ps->performance_levels[0].memory_clock;
3360 :
3361 0 : if (disable_mclk_switching &&
3362 0 : (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
3363 : hwmgr->chip_id <= CHIP_VEGAM)))
3364 0 : mclk = smu7_ps->performance_levels
3365 0 : [smu7_ps->performance_level_count - 1].memory_clock;
3366 :
3367 0 : if (sclk < minimum_clocks.engineClock)
3368 0 : sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3369 0 : max_limits->sclk : minimum_clocks.engineClock;
3370 :
3371 0 : if (mclk < minimum_clocks.memoryClock)
3372 0 : mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3373 0 : max_limits->mclk : minimum_clocks.memoryClock;
3374 :
3375 0 : smu7_ps->performance_levels[0].engine_clock = sclk;
3376 0 : smu7_ps->performance_levels[0].memory_clock = mclk;
3377 :
3378 0 : smu7_ps->performance_levels[1].engine_clock =
3379 : (smu7_ps->performance_levels[1].engine_clock >=
3380 : smu7_ps->performance_levels[0].engine_clock) ?
3381 0 : smu7_ps->performance_levels[1].engine_clock :
3382 : smu7_ps->performance_levels[0].engine_clock;
3383 :
3384 0 : if (disable_mclk_switching) {
3385 0 : if (mclk < smu7_ps->performance_levels[1].memory_clock)
3386 0 : mclk = smu7_ps->performance_levels[1].memory_clock;
3387 :
3388 0 : if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM) {
3389 0 : if (disable_mclk_switching_for_display) {
3390 : /* Find the lowest MCLK frequency that is within
3391 : * the tolerable latency defined in DAL
3392 : */
3393 0 : latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
3394 0 : for (i = 0; i < data->mclk_latency_table.count; i++) {
3395 0 : if (data->mclk_latency_table.entries[i].latency <= latency) {
3396 0 : latency_allowed = true;
3397 :
3398 0 : if ((data->mclk_latency_table.entries[i].frequency >=
3399 0 : smu7_ps->performance_levels[0].memory_clock) &&
3400 : (data->mclk_latency_table.entries[i].frequency <=
3401 : smu7_ps->performance_levels[1].memory_clock)) {
3402 : mclk = data->mclk_latency_table.entries[i].frequency;
3403 : break;
3404 : }
3405 : }
3406 : }
3407 0 : if ((i >= data->mclk_latency_table.count - 1) && !latency_allowed) {
3408 0 : data->mclk_ignore_signal = true;
3409 : } else {
3410 0 : data->mclk_ignore_signal = false;
3411 : }
3412 : }
3413 :
3414 0 : if (disable_mclk_switching_for_frame_lock)
3415 0 : mclk = smu7_ps->performance_levels[1].memory_clock;
3416 : }
3417 :
3418 0 : smu7_ps->performance_levels[0].memory_clock = mclk;
3419 :
3420 0 : if (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
3421 : hwmgr->chip_id <= CHIP_VEGAM))
3422 0 : smu7_ps->performance_levels[1].memory_clock = mclk;
3423 : } else {
3424 0 : if (smu7_ps->performance_levels[1].memory_clock <
3425 : smu7_ps->performance_levels[0].memory_clock)
3426 0 : smu7_ps->performance_levels[1].memory_clock =
3427 : smu7_ps->performance_levels[0].memory_clock;
3428 : }
3429 :
3430 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3431 : PHM_PlatformCaps_StablePState)) {
3432 0 : for (i = 0; i < smu7_ps->performance_level_count; i++) {
3433 0 : smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
3434 0 : smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
3435 0 : smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
3436 0 : smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
3437 : }
3438 : }
3439 0 : return 0;
3440 : }
3441 :
3442 :
3443 0 : static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3444 : {
3445 : struct pp_power_state *ps;
3446 : struct smu7_power_state *smu7_ps;
3447 :
3448 0 : if (hwmgr == NULL)
3449 : return -EINVAL;
3450 :
3451 0 : ps = hwmgr->request_ps;
3452 :
3453 0 : if (ps == NULL)
3454 : return -EINVAL;
3455 :
3456 0 : smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3457 :
3458 0 : if (low)
3459 0 : return smu7_ps->performance_levels[0].memory_clock;
3460 : else
3461 : return smu7_ps->performance_levels
3462 0 : [smu7_ps->performance_level_count-1].memory_clock;
3463 : }
3464 :
3465 0 : static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3466 : {
3467 : struct pp_power_state *ps;
3468 : struct smu7_power_state *smu7_ps;
3469 :
3470 0 : if (hwmgr == NULL)
3471 : return -EINVAL;
3472 :
3473 0 : ps = hwmgr->request_ps;
3474 :
3475 0 : if (ps == NULL)
3476 : return -EINVAL;
3477 :
3478 0 : smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3479 :
3480 0 : if (low)
3481 0 : return smu7_ps->performance_levels[0].engine_clock;
3482 : else
3483 : return smu7_ps->performance_levels
3484 0 : [smu7_ps->performance_level_count-1].engine_clock;
3485 : }
3486 :
3487 0 : static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
3488 : struct pp_hw_power_state *hw_ps)
3489 : {
3490 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3491 0 : struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
3492 : ATOM_FIRMWARE_INFO_V2_2 *fw_info;
3493 : uint16_t size;
3494 : uint8_t frev, crev;
3495 0 : int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
3496 :
3497 : /* First retrieve the Boot clocks and VDDC from the firmware info table.
3498 : * We assume here that fw_info is unchanged if this call fails.
3499 : */
3500 0 : fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index,
3501 : &size, &frev, &crev);
3502 0 : if (!fw_info)
3503 : /* During a test, there is no firmware info table. */
3504 : return 0;
3505 :
3506 : /* Patch the state. */
3507 0 : data->vbios_boot_state.sclk_bootup_value =
3508 0 : le32_to_cpu(fw_info->ulDefaultEngineClock);
3509 0 : data->vbios_boot_state.mclk_bootup_value =
3510 0 : le32_to_cpu(fw_info->ulDefaultMemoryClock);
3511 0 : data->vbios_boot_state.mvdd_bootup_value =
3512 0 : le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
3513 0 : data->vbios_boot_state.vddc_bootup_value =
3514 0 : le16_to_cpu(fw_info->usBootUpVDDCVoltage);
3515 0 : data->vbios_boot_state.vddci_bootup_value =
3516 0 : le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
3517 0 : data->vbios_boot_state.pcie_gen_bootup_value =
3518 0 : smu7_get_current_pcie_speed(hwmgr);
3519 :
3520 0 : data->vbios_boot_state.pcie_lane_bootup_value =
3521 0 : (uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
3522 :
3523 : /* set boot power state */
3524 0 : ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
3525 0 : ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
3526 0 : ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
3527 0 : ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
3528 :
3529 0 : return 0;
3530 : }
3531 :
3532 0 : static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
3533 : {
3534 : int result;
3535 0 : unsigned long ret = 0;
3536 :
3537 0 : if (hwmgr->pp_table_version == PP_TABLE_V0) {
3538 0 : result = pp_tables_get_num_of_entries(hwmgr, &ret);
3539 0 : return result ? 0 : ret;
3540 0 : } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
3541 0 : result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
3542 0 : return result;
3543 : }
3544 : return 0;
3545 : }
3546 :
3547 0 : static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
3548 : void *state, struct pp_power_state *power_state,
3549 : void *pp_table, uint32_t classification_flag)
3550 : {
3551 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3552 0 : struct smu7_power_state *smu7_power_state =
3553 : (struct smu7_power_state *)(&(power_state->hardware));
3554 : struct smu7_performance_level *performance_level;
3555 0 : ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3556 0 : ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3557 : (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3558 0 : PPTable_Generic_SubTable_Header *sclk_dep_table =
3559 : (PPTable_Generic_SubTable_Header *)
3560 0 : (((unsigned long)powerplay_table) +
3561 0 : le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3562 :
3563 0 : ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3564 : (ATOM_Tonga_MCLK_Dependency_Table *)
3565 0 : (((unsigned long)powerplay_table) +
3566 0 : le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3567 :
3568 : /* The following fields are not initialized here: id orderedList allStatesList */
3569 0 : power_state->classification.ui_label =
3570 0 : (le16_to_cpu(state_entry->usClassification) &
3571 0 : ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3572 : ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3573 0 : power_state->classification.flags = classification_flag;
3574 : /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
3575 :
3576 0 : power_state->classification.temporary_state = false;
3577 0 : power_state->classification.to_be_deleted = false;
3578 :
3579 0 : power_state->validation.disallowOnDC =
3580 0 : (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3581 : ATOM_Tonga_DISALLOW_ON_DC));
3582 :
3583 0 : power_state->pcie.lanes = 0;
3584 :
3585 0 : power_state->display.disableFrameModulation = false;
3586 0 : power_state->display.limitRefreshrate = false;
3587 0 : power_state->display.enableVariBright =
3588 0 : (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3589 : ATOM_Tonga_ENABLE_VARIBRIGHT));
3590 :
3591 0 : power_state->validation.supportedPowerLevels = 0;
3592 0 : power_state->uvd_clocks.VCLK = 0;
3593 0 : power_state->uvd_clocks.DCLK = 0;
3594 0 : power_state->temperatures.min = 0;
3595 0 : power_state->temperatures.max = 0;
3596 :
3597 0 : performance_level = &(smu7_power_state->performance_levels
3598 0 : [smu7_power_state->performance_level_count++]);
3599 :
3600 0 : PP_ASSERT_WITH_CODE(
3601 : (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3602 : "Performance levels exceeds SMC limit!",
3603 : return -EINVAL);
3604 :
3605 0 : PP_ASSERT_WITH_CODE(
3606 : (smu7_power_state->performance_level_count <=
3607 : hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3608 : "Performance levels exceeds Driver limit!",
3609 : return -EINVAL);
3610 :
3611 : /* Performance levels are arranged from low to high. */
3612 0 : performance_level->memory_clock = mclk_dep_table->entries
3613 0 : [state_entry->ucMemoryClockIndexLow].ulMclk;
3614 0 : if (sclk_dep_table->ucRevId == 0)
3615 0 : performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3616 0 : [state_entry->ucEngineClockIndexLow].ulSclk;
3617 0 : else if (sclk_dep_table->ucRevId == 1)
3618 0 : performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3619 0 : [state_entry->ucEngineClockIndexLow].ulSclk;
3620 0 : performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3621 0 : state_entry->ucPCIEGenLow);
3622 0 : performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3623 0 : state_entry->ucPCIELaneLow);
3624 :
3625 0 : performance_level = &(smu7_power_state->performance_levels
3626 0 : [smu7_power_state->performance_level_count++]);
3627 0 : performance_level->memory_clock = mclk_dep_table->entries
3628 0 : [state_entry->ucMemoryClockIndexHigh].ulMclk;
3629 :
3630 0 : if (sclk_dep_table->ucRevId == 0)
3631 0 : performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3632 0 : [state_entry->ucEngineClockIndexHigh].ulSclk;
3633 0 : else if (sclk_dep_table->ucRevId == 1)
3634 0 : performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3635 0 : [state_entry->ucEngineClockIndexHigh].ulSclk;
3636 :
3637 0 : performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3638 0 : state_entry->ucPCIEGenHigh);
3639 0 : performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3640 0 : state_entry->ucPCIELaneHigh);
3641 :
3642 0 : return 0;
3643 : }
3644 :
3645 0 : static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
3646 : unsigned long entry_index, struct pp_power_state *state)
3647 : {
3648 : int result;
3649 : struct smu7_power_state *ps;
3650 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3651 0 : struct phm_ppt_v1_information *table_info =
3652 : (struct phm_ppt_v1_information *)(hwmgr->pptable);
3653 0 : struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
3654 : table_info->vdd_dep_on_mclk;
3655 :
3656 0 : state->hardware.magic = PHM_VIslands_Magic;
3657 :
3658 0 : ps = (struct smu7_power_state *)(&state->hardware);
3659 :
3660 0 : result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
3661 : smu7_get_pp_table_entry_callback_func_v1);
3662 :
3663 : /* This is the earliest time we have all the dependency table and the VBIOS boot state
3664 : * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
3665 : * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
3666 : */
3667 : if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3668 : if (dep_mclk_table->entries[0].clk !=
3669 : data->vbios_boot_state.mclk_bootup_value)
3670 : pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3671 : "does not match VBIOS boot MCLK level");
3672 : if (dep_mclk_table->entries[0].vddci !=
3673 : data->vbios_boot_state.vddci_bootup_value)
3674 : pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3675 : "does not match VBIOS boot VDDCI level");
3676 : }
3677 :
3678 : /* set DC compatible flag if this state supports DC */
3679 0 : if (!state->validation.disallowOnDC)
3680 0 : ps->dc_compatible = true;
3681 :
3682 0 : if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3683 0 : data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3684 :
3685 0 : ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3686 0 : ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3687 :
3688 0 : if (!result) {
3689 : uint32_t i;
3690 :
3691 0 : switch (state->classification.ui_label) {
3692 : case PP_StateUILabel_Performance:
3693 0 : data->use_pcie_performance_levels = true;
3694 0 : for (i = 0; i < ps->performance_level_count; i++) {
3695 0 : if (data->pcie_gen_performance.max <
3696 0 : ps->performance_levels[i].pcie_gen)
3697 0 : data->pcie_gen_performance.max =
3698 : ps->performance_levels[i].pcie_gen;
3699 :
3700 0 : if (data->pcie_gen_performance.min >
3701 0 : ps->performance_levels[i].pcie_gen)
3702 0 : data->pcie_gen_performance.min =
3703 : ps->performance_levels[i].pcie_gen;
3704 :
3705 0 : if (data->pcie_lane_performance.max <
3706 0 : ps->performance_levels[i].pcie_lane)
3707 0 : data->pcie_lane_performance.max =
3708 : ps->performance_levels[i].pcie_lane;
3709 0 : if (data->pcie_lane_performance.min >
3710 0 : ps->performance_levels[i].pcie_lane)
3711 0 : data->pcie_lane_performance.min =
3712 : ps->performance_levels[i].pcie_lane;
3713 : }
3714 : break;
3715 : case PP_StateUILabel_Battery:
3716 0 : data->use_pcie_power_saving_levels = true;
3717 :
3718 0 : for (i = 0; i < ps->performance_level_count; i++) {
3719 0 : if (data->pcie_gen_power_saving.max <
3720 0 : ps->performance_levels[i].pcie_gen)
3721 0 : data->pcie_gen_power_saving.max =
3722 : ps->performance_levels[i].pcie_gen;
3723 :
3724 0 : if (data->pcie_gen_power_saving.min >
3725 0 : ps->performance_levels[i].pcie_gen)
3726 0 : data->pcie_gen_power_saving.min =
3727 : ps->performance_levels[i].pcie_gen;
3728 :
3729 0 : if (data->pcie_lane_power_saving.max <
3730 0 : ps->performance_levels[i].pcie_lane)
3731 0 : data->pcie_lane_power_saving.max =
3732 : ps->performance_levels[i].pcie_lane;
3733 :
3734 0 : if (data->pcie_lane_power_saving.min >
3735 0 : ps->performance_levels[i].pcie_lane)
3736 0 : data->pcie_lane_power_saving.min =
3737 : ps->performance_levels[i].pcie_lane;
3738 : }
3739 : break;
3740 : default:
3741 : break;
3742 : }
3743 : }
3744 0 : return 0;
3745 : }
3746 :
3747 0 : static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
3748 : struct pp_hw_power_state *power_state,
3749 : unsigned int index, const void *clock_info)
3750 : {
3751 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3752 0 : struct smu7_power_state *ps = cast_phw_smu7_power_state(power_state);
3753 0 : const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
3754 : struct smu7_performance_level *performance_level;
3755 : uint32_t engine_clock, memory_clock;
3756 : uint16_t pcie_gen_from_bios;
3757 :
3758 0 : engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
3759 0 : memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
3760 :
3761 0 : if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
3762 0 : data->highest_mclk = memory_clock;
3763 :
3764 0 : PP_ASSERT_WITH_CODE(
3765 : (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3766 : "Performance levels exceeds SMC limit!",
3767 : return -EINVAL);
3768 :
3769 0 : PP_ASSERT_WITH_CODE(
3770 : (ps->performance_level_count <
3771 : hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3772 : "Performance levels exceeds Driver limit, Skip!",
3773 : return 0);
3774 :
3775 0 : performance_level = &(ps->performance_levels
3776 0 : [ps->performance_level_count++]);
3777 :
3778 : /* Performance levels are arranged from low to high. */
3779 0 : performance_level->memory_clock = memory_clock;
3780 0 : performance_level->engine_clock = engine_clock;
3781 :
3782 0 : pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
3783 :
3784 0 : performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
3785 0 : performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
3786 :
3787 0 : return 0;
3788 : }
3789 :
3790 0 : static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
3791 : unsigned long entry_index, struct pp_power_state *state)
3792 : {
3793 : int result;
3794 : struct smu7_power_state *ps;
3795 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3796 0 : struct phm_clock_voltage_dependency_table *dep_mclk_table =
3797 : hwmgr->dyn_state.vddci_dependency_on_mclk;
3798 :
3799 0 : memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
3800 :
3801 0 : state->hardware.magic = PHM_VIslands_Magic;
3802 :
3803 0 : ps = (struct smu7_power_state *)(&state->hardware);
3804 :
3805 0 : result = pp_tables_get_entry(hwmgr, entry_index, state,
3806 : smu7_get_pp_table_entry_callback_func_v0);
3807 :
3808 : /*
3809 : * This is the earliest time we have all the dependency table
3810 : * and the VBIOS boot state as
3811 : * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
3812 : * state if there is only one VDDCI/MCLK level, check if it's
3813 : * the same as VBIOS boot state
3814 : */
3815 : if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3816 : if (dep_mclk_table->entries[0].clk !=
3817 : data->vbios_boot_state.mclk_bootup_value)
3818 : pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3819 : "does not match VBIOS boot MCLK level");
3820 : if (dep_mclk_table->entries[0].v !=
3821 : data->vbios_boot_state.vddci_bootup_value)
3822 : pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3823 : "does not match VBIOS boot VDDCI level");
3824 : }
3825 :
3826 : /* set DC compatible flag if this state supports DC */
3827 0 : if (!state->validation.disallowOnDC)
3828 0 : ps->dc_compatible = true;
3829 :
3830 0 : if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3831 0 : data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3832 :
3833 0 : ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3834 0 : ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3835 :
3836 0 : if (!result) {
3837 : uint32_t i;
3838 :
3839 0 : switch (state->classification.ui_label) {
3840 : case PP_StateUILabel_Performance:
3841 0 : data->use_pcie_performance_levels = true;
3842 :
3843 0 : for (i = 0; i < ps->performance_level_count; i++) {
3844 0 : if (data->pcie_gen_performance.max <
3845 0 : ps->performance_levels[i].pcie_gen)
3846 0 : data->pcie_gen_performance.max =
3847 : ps->performance_levels[i].pcie_gen;
3848 :
3849 0 : if (data->pcie_gen_performance.min >
3850 0 : ps->performance_levels[i].pcie_gen)
3851 0 : data->pcie_gen_performance.min =
3852 : ps->performance_levels[i].pcie_gen;
3853 :
3854 0 : if (data->pcie_lane_performance.max <
3855 0 : ps->performance_levels[i].pcie_lane)
3856 0 : data->pcie_lane_performance.max =
3857 : ps->performance_levels[i].pcie_lane;
3858 :
3859 0 : if (data->pcie_lane_performance.min >
3860 0 : ps->performance_levels[i].pcie_lane)
3861 0 : data->pcie_lane_performance.min =
3862 : ps->performance_levels[i].pcie_lane;
3863 : }
3864 : break;
3865 : case PP_StateUILabel_Battery:
3866 0 : data->use_pcie_power_saving_levels = true;
3867 :
3868 0 : for (i = 0; i < ps->performance_level_count; i++) {
3869 0 : if (data->pcie_gen_power_saving.max <
3870 0 : ps->performance_levels[i].pcie_gen)
3871 0 : data->pcie_gen_power_saving.max =
3872 : ps->performance_levels[i].pcie_gen;
3873 :
3874 0 : if (data->pcie_gen_power_saving.min >
3875 0 : ps->performance_levels[i].pcie_gen)
3876 0 : data->pcie_gen_power_saving.min =
3877 : ps->performance_levels[i].pcie_gen;
3878 :
3879 0 : if (data->pcie_lane_power_saving.max <
3880 0 : ps->performance_levels[i].pcie_lane)
3881 0 : data->pcie_lane_power_saving.max =
3882 : ps->performance_levels[i].pcie_lane;
3883 :
3884 0 : if (data->pcie_lane_power_saving.min >
3885 0 : ps->performance_levels[i].pcie_lane)
3886 0 : data->pcie_lane_power_saving.min =
3887 : ps->performance_levels[i].pcie_lane;
3888 : }
3889 : break;
3890 : default:
3891 : break;
3892 : }
3893 : }
3894 0 : return 0;
3895 : }
3896 :
3897 0 : static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3898 : unsigned long entry_index, struct pp_power_state *state)
3899 : {
3900 0 : if (hwmgr->pp_table_version == PP_TABLE_V0)
3901 0 : return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
3902 0 : else if (hwmgr->pp_table_version == PP_TABLE_V1)
3903 0 : return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
3904 :
3905 : return 0;
3906 : }
3907 :
3908 0 : static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
3909 : {
3910 0 : struct amdgpu_device *adev = hwmgr->adev;
3911 : int i;
3912 0 : u32 tmp = 0;
3913 :
3914 0 : if (!query)
3915 : return -EINVAL;
3916 :
3917 : /*
3918 : * PPSMC_MSG_GetCurrPkgPwr is not supported on:
3919 : * - Hawaii
3920 : * - Bonaire
3921 : * - Fiji
3922 : * - Tonga
3923 : */
3924 0 : if ((adev->asic_type != CHIP_HAWAII) &&
3925 0 : (adev->asic_type != CHIP_BONAIRE) &&
3926 0 : (adev->asic_type != CHIP_FIJI) &&
3927 : (adev->asic_type != CHIP_TONGA)) {
3928 0 : smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0, &tmp);
3929 0 : *query = tmp;
3930 :
3931 0 : if (tmp != 0)
3932 : return 0;
3933 : }
3934 :
3935 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart, NULL);
3936 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3937 : ixSMU_PM_STATUS_95, 0);
3938 :
3939 0 : for (i = 0; i < 10; i++) {
3940 0 : msleep(500);
3941 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample, NULL);
3942 0 : tmp = cgs_read_ind_register(hwmgr->device,
3943 : CGS_IND_REG__SMC,
3944 : ixSMU_PM_STATUS_95);
3945 0 : if (tmp != 0)
3946 : break;
3947 : }
3948 0 : *query = tmp;
3949 :
3950 0 : return 0;
3951 : }
3952 :
3953 0 : static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3954 : void *value, int *size)
3955 : {
3956 : uint32_t sclk, mclk, activity_percent;
3957 : uint32_t offset, val_vid;
3958 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3959 :
3960 : /* size must be at least 4 bytes for all sensors */
3961 0 : if (*size < 4)
3962 : return -EINVAL;
3963 :
3964 0 : switch (idx) {
3965 : case AMDGPU_PP_SENSOR_GFX_SCLK:
3966 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk);
3967 0 : *((uint32_t *)value) = sclk;
3968 0 : *size = 4;
3969 0 : return 0;
3970 : case AMDGPU_PP_SENSOR_GFX_MCLK:
3971 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk);
3972 0 : *((uint32_t *)value) = mclk;
3973 0 : *size = 4;
3974 0 : return 0;
3975 : case AMDGPU_PP_SENSOR_GPU_LOAD:
3976 : case AMDGPU_PP_SENSOR_MEM_LOAD:
3977 0 : offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
3978 : SMU_SoftRegisters,
3979 : (idx == AMDGPU_PP_SENSOR_GPU_LOAD) ?
3980 : AverageGraphicsActivity:
3981 : AverageMemoryActivity);
3982 :
3983 0 : activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
3984 0 : activity_percent += 0x80;
3985 0 : activity_percent >>= 8;
3986 0 : *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3987 0 : *size = 4;
3988 0 : return 0;
3989 : case AMDGPU_PP_SENSOR_GPU_TEMP:
3990 0 : *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr);
3991 0 : *size = 4;
3992 0 : return 0;
3993 : case AMDGPU_PP_SENSOR_UVD_POWER:
3994 0 : *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3995 0 : *size = 4;
3996 0 : return 0;
3997 : case AMDGPU_PP_SENSOR_VCE_POWER:
3998 0 : *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3999 0 : *size = 4;
4000 0 : return 0;
4001 : case AMDGPU_PP_SENSOR_GPU_POWER:
4002 0 : return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
4003 : case AMDGPU_PP_SENSOR_VDDGFX:
4004 0 : if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
4005 : (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))
4006 0 : val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
4007 : CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
4008 : else
4009 0 : val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
4010 : CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID);
4011 :
4012 0 : *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid);
4013 0 : return 0;
4014 : default:
4015 : return -EOPNOTSUPP;
4016 : }
4017 : }
4018 :
4019 0 : static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
4020 : {
4021 0 : const struct phm_set_power_state_input *states =
4022 : (const struct phm_set_power_state_input *)input;
4023 0 : const struct smu7_power_state *smu7_ps =
4024 0 : cast_const_phw_smu7_power_state(states->pnew_state);
4025 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4026 0 : struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4027 0 : uint32_t sclk = smu7_ps->performance_levels
4028 0 : [smu7_ps->performance_level_count - 1].engine_clock;
4029 0 : struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4030 0 : uint32_t mclk = smu7_ps->performance_levels
4031 : [smu7_ps->performance_level_count - 1].memory_clock;
4032 0 : struct PP_Clocks min_clocks = {0};
4033 : uint32_t i;
4034 :
4035 0 : for (i = 0; i < sclk_table->count; i++) {
4036 0 : if (sclk == sclk_table->dpm_levels[i].value)
4037 : break;
4038 : }
4039 :
4040 0 : if (i >= sclk_table->count) {
4041 0 : if (sclk > sclk_table->dpm_levels[i-1].value) {
4042 0 : data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4043 0 : sclk_table->dpm_levels[i-1].value = sclk;
4044 : }
4045 : } else {
4046 : /* TODO: Check SCLK in DAL's minimum clocks
4047 : * in case DeepSleep divider update is required.
4048 : */
4049 0 : if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
4050 0 : (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
4051 : data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
4052 0 : data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
4053 : }
4054 :
4055 0 : for (i = 0; i < mclk_table->count; i++) {
4056 0 : if (mclk == mclk_table->dpm_levels[i].value)
4057 : break;
4058 : }
4059 :
4060 0 : if (i >= mclk_table->count) {
4061 0 : if (mclk > mclk_table->dpm_levels[i-1].value) {
4062 0 : data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4063 0 : mclk_table->dpm_levels[i-1].value = mclk;
4064 : }
4065 : }
4066 :
4067 0 : if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4068 0 : data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4069 :
4070 0 : return 0;
4071 : }
4072 :
4073 : static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
4074 : const struct smu7_power_state *smu7_ps)
4075 : {
4076 : uint32_t i;
4077 0 : uint32_t sclk, max_sclk = 0;
4078 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4079 0 : struct smu7_dpm_table *dpm_table = &data->dpm_table;
4080 :
4081 0 : for (i = 0; i < smu7_ps->performance_level_count; i++) {
4082 0 : sclk = smu7_ps->performance_levels[i].engine_clock;
4083 0 : if (max_sclk < sclk)
4084 0 : max_sclk = sclk;
4085 : }
4086 :
4087 0 : for (i = 0; i < dpm_table->sclk_table.count; i++) {
4088 0 : if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
4089 0 : return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
4090 : dpm_table->pcie_speed_table.dpm_levels
4091 0 : [dpm_table->pcie_speed_table.count - 1].value :
4092 0 : dpm_table->pcie_speed_table.dpm_levels[i].value);
4093 : }
4094 :
4095 : return 0;
4096 : }
4097 :
4098 0 : static int smu7_request_link_speed_change_before_state_change(
4099 : struct pp_hwmgr *hwmgr, const void *input)
4100 : {
4101 0 : const struct phm_set_power_state_input *states =
4102 : (const struct phm_set_power_state_input *)input;
4103 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4104 0 : const struct smu7_power_state *smu7_nps =
4105 0 : cast_const_phw_smu7_power_state(states->pnew_state);
4106 0 : const struct smu7_power_state *polaris10_cps =
4107 0 : cast_const_phw_smu7_power_state(states->pcurrent_state);
4108 :
4109 0 : uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
4110 : uint16_t current_link_speed;
4111 :
4112 0 : if (data->force_pcie_gen == PP_PCIEGenInvalid)
4113 : current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
4114 : else
4115 : current_link_speed = data->force_pcie_gen;
4116 :
4117 0 : data->force_pcie_gen = PP_PCIEGenInvalid;
4118 0 : data->pspp_notify_required = false;
4119 :
4120 0 : if (target_link_speed > current_link_speed) {
4121 : switch (target_link_speed) {
4122 : #ifdef CONFIG_ACPI
4123 : case PP_PCIEGen3:
4124 : if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false))
4125 : break;
4126 : data->force_pcie_gen = PP_PCIEGen2;
4127 : if (current_link_speed == PP_PCIEGen2)
4128 : break;
4129 : fallthrough;
4130 : case PP_PCIEGen2:
4131 : if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
4132 : break;
4133 : fallthrough;
4134 : #endif
4135 : default:
4136 0 : data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
4137 0 : break;
4138 : }
4139 : } else {
4140 0 : if (target_link_speed < current_link_speed)
4141 0 : data->pspp_notify_required = true;
4142 : }
4143 :
4144 0 : return 0;
4145 : }
4146 :
4147 0 : static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4148 : {
4149 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4150 :
4151 0 : if (0 == data->need_update_smu7_dpm_table)
4152 : return 0;
4153 :
4154 0 : if ((0 == data->sclk_dpm_key_disabled) &&
4155 0 : (data->need_update_smu7_dpm_table &
4156 : (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4157 0 : PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4158 : "Trying to freeze SCLK DPM when DPM is disabled",
4159 : );
4160 0 : PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4161 : PPSMC_MSG_SCLKDPM_FreezeLevel,
4162 : NULL),
4163 : "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
4164 : return -EINVAL);
4165 : }
4166 :
4167 0 : if ((0 == data->mclk_dpm_key_disabled) &&
4168 0 : !data->mclk_ignore_signal &&
4169 0 : (data->need_update_smu7_dpm_table &
4170 : DPMTABLE_OD_UPDATE_MCLK)) {
4171 0 : PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4172 : "Trying to freeze MCLK DPM when DPM is disabled",
4173 : );
4174 0 : PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4175 : PPSMC_MSG_MCLKDPM_FreezeLevel,
4176 : NULL),
4177 : "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
4178 : return -EINVAL);
4179 : }
4180 :
4181 : return 0;
4182 : }
4183 :
4184 0 : static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
4185 : struct pp_hwmgr *hwmgr, const void *input)
4186 : {
4187 0 : int result = 0;
4188 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4189 0 : struct smu7_dpm_table *dpm_table = &data->dpm_table;
4190 : uint32_t count;
4191 0 : struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4192 0 : struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
4193 0 : struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
4194 :
4195 0 : if (0 == data->need_update_smu7_dpm_table)
4196 : return 0;
4197 :
4198 0 : if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
4199 0 : for (count = 0; count < dpm_table->sclk_table.count; count++) {
4200 0 : dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled;
4201 0 : dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock;
4202 : }
4203 : }
4204 :
4205 0 : if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
4206 0 : for (count = 0; count < dpm_table->mclk_table.count; count++) {
4207 0 : dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled;
4208 0 : dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock;
4209 : }
4210 : }
4211 :
4212 0 : if (data->need_update_smu7_dpm_table &
4213 : (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
4214 0 : result = smum_populate_all_graphic_levels(hwmgr);
4215 0 : PP_ASSERT_WITH_CODE((0 == result),
4216 : "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
4217 : return result);
4218 : }
4219 :
4220 0 : if (data->need_update_smu7_dpm_table &
4221 : (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
4222 : /*populate MCLK dpm table to SMU7 */
4223 0 : result = smum_populate_all_memory_levels(hwmgr);
4224 0 : PP_ASSERT_WITH_CODE((0 == result),
4225 : "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
4226 : return result);
4227 : }
4228 :
4229 : return result;
4230 : }
4231 :
4232 : static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
4233 : struct smu7_single_dpm_table *dpm_table,
4234 : uint32_t low_limit, uint32_t high_limit)
4235 : {
4236 : uint32_t i;
4237 :
4238 : /* force the trim if mclk_switching is disabled to prevent flicker */
4239 : bool force_trim = (low_limit == high_limit);
4240 0 : for (i = 0; i < dpm_table->count; i++) {
4241 : /*skip the trim if od is enabled*/
4242 0 : if ((!hwmgr->od_enabled || force_trim)
4243 0 : && (dpm_table->dpm_levels[i].value < low_limit
4244 0 : || dpm_table->dpm_levels[i].value > high_limit))
4245 0 : dpm_table->dpm_levels[i].enabled = false;
4246 : else
4247 0 : dpm_table->dpm_levels[i].enabled = true;
4248 : }
4249 :
4250 : return 0;
4251 : }
4252 :
4253 0 : static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
4254 : const struct smu7_power_state *smu7_ps)
4255 : {
4256 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4257 : uint32_t high_limit_count;
4258 :
4259 0 : PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
4260 : "power state did not have any performance level",
4261 : return -EINVAL);
4262 :
4263 0 : high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
4264 :
4265 0 : smu7_trim_single_dpm_states(hwmgr,
4266 : &(data->dpm_table.sclk_table),
4267 : smu7_ps->performance_levels[0].engine_clock,
4268 : smu7_ps->performance_levels[high_limit_count].engine_clock);
4269 :
4270 0 : smu7_trim_single_dpm_states(hwmgr,
4271 : &(data->dpm_table.mclk_table),
4272 : smu7_ps->performance_levels[0].memory_clock,
4273 : smu7_ps->performance_levels[high_limit_count].memory_clock);
4274 :
4275 : return 0;
4276 : }
4277 :
4278 0 : static int smu7_generate_dpm_level_enable_mask(
4279 : struct pp_hwmgr *hwmgr, const void *input)
4280 : {
4281 0 : int result = 0;
4282 0 : const struct phm_set_power_state_input *states =
4283 : (const struct phm_set_power_state_input *)input;
4284 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4285 0 : const struct smu7_power_state *smu7_ps =
4286 0 : cast_const_phw_smu7_power_state(states->pnew_state);
4287 :
4288 :
4289 0 : result = smu7_trim_dpm_states(hwmgr, smu7_ps);
4290 0 : if (result)
4291 : return result;
4292 :
4293 0 : data->dpm_level_enable_mask.sclk_dpm_enable_mask =
4294 0 : phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
4295 0 : data->dpm_level_enable_mask.mclk_dpm_enable_mask =
4296 0 : phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
4297 0 : data->dpm_level_enable_mask.pcie_dpm_enable_mask =
4298 0 : phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
4299 :
4300 0 : return 0;
4301 : }
4302 :
4303 0 : static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4304 : {
4305 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4306 :
4307 0 : if (0 == data->need_update_smu7_dpm_table)
4308 : return 0;
4309 :
4310 0 : if ((0 == data->sclk_dpm_key_disabled) &&
4311 0 : (data->need_update_smu7_dpm_table &
4312 : (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4313 :
4314 0 : PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4315 : "Trying to Unfreeze SCLK DPM when DPM is disabled",
4316 : );
4317 0 : PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4318 : PPSMC_MSG_SCLKDPM_UnfreezeLevel,
4319 : NULL),
4320 : "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
4321 : return -EINVAL);
4322 : }
4323 :
4324 0 : if ((0 == data->mclk_dpm_key_disabled) &&
4325 0 : !data->mclk_ignore_signal &&
4326 0 : (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
4327 :
4328 0 : PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4329 : "Trying to Unfreeze MCLK DPM when DPM is disabled",
4330 : );
4331 0 : PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4332 : PPSMC_MSG_MCLKDPM_UnfreezeLevel,
4333 : NULL),
4334 : "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
4335 : return -EINVAL);
4336 : }
4337 :
4338 0 : data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
4339 :
4340 0 : return 0;
4341 : }
4342 :
4343 0 : static int smu7_notify_link_speed_change_after_state_change(
4344 : struct pp_hwmgr *hwmgr, const void *input)
4345 : {
4346 0 : const struct phm_set_power_state_input *states =
4347 : (const struct phm_set_power_state_input *)input;
4348 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4349 0 : const struct smu7_power_state *smu7_ps =
4350 0 : cast_const_phw_smu7_power_state(states->pnew_state);
4351 0 : uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
4352 : uint8_t request;
4353 :
4354 0 : if (data->pspp_notify_required) {
4355 0 : if (target_link_speed == PP_PCIEGen3)
4356 : request = PCIE_PERF_REQ_GEN3;
4357 0 : else if (target_link_speed == PP_PCIEGen2)
4358 : request = PCIE_PERF_REQ_GEN2;
4359 : else
4360 0 : request = PCIE_PERF_REQ_GEN1;
4361 :
4362 0 : if (request == PCIE_PERF_REQ_GEN1 &&
4363 0 : smu7_get_current_pcie_speed(hwmgr) > 0)
4364 : return 0;
4365 :
4366 : #ifdef CONFIG_ACPI
4367 : if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) {
4368 : if (PP_PCIEGen2 == target_link_speed)
4369 : pr_info("PSPP request to switch to Gen2 from Gen3 Failed!");
4370 : else
4371 : pr_info("PSPP request to switch to Gen1 from Gen2 Failed!");
4372 : }
4373 : #endif
4374 : }
4375 :
4376 : return 0;
4377 : }
4378 :
4379 : static int smu7_notify_no_display(struct pp_hwmgr *hwmgr)
4380 : {
4381 0 : return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL) == 0) ? 0 : -EINVAL;
4382 : }
4383 :
4384 0 : static int smu7_notify_has_display(struct pp_hwmgr *hwmgr)
4385 : {
4386 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4387 :
4388 0 : if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
4389 0 : if (hwmgr->chip_id == CHIP_VEGAM)
4390 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
4391 : (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2,
4392 : NULL);
4393 : else
4394 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
4395 : (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2,
4396 : NULL);
4397 0 : data->last_sent_vbi_timeout = data->frame_time_x2;
4398 : }
4399 :
4400 0 : return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay, NULL) == 0) ? 0 : -EINVAL;
4401 : }
4402 :
4403 0 : static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
4404 : {
4405 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4406 0 : int result = 0;
4407 :
4408 0 : if (data->mclk_ignore_signal)
4409 : result = smu7_notify_no_display(hwmgr);
4410 : else
4411 0 : result = smu7_notify_has_display(hwmgr);
4412 :
4413 0 : return result;
4414 : }
4415 :
4416 0 : static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
4417 : {
4418 0 : int tmp_result, result = 0;
4419 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4420 :
4421 0 : tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
4422 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
4423 : "Failed to find DPM states clocks in DPM table!",
4424 : result = tmp_result);
4425 :
4426 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4427 : PHM_PlatformCaps_PCIEPerformanceRequest)) {
4428 0 : tmp_result =
4429 : smu7_request_link_speed_change_before_state_change(hwmgr, input);
4430 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
4431 : "Failed to request link speed change before state change!",
4432 : result = tmp_result);
4433 : }
4434 :
4435 0 : tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
4436 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
4437 : "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
4438 :
4439 0 : tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
4440 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
4441 : "Failed to populate and upload SCLK MCLK DPM levels!",
4442 : result = tmp_result);
4443 :
4444 : /*
4445 : * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
4446 : * That effectively disables AVFS feature.
4447 : */
4448 0 : if (hwmgr->hardcode_pp_table != NULL)
4449 0 : data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
4450 :
4451 0 : tmp_result = smu7_update_avfs(hwmgr);
4452 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
4453 : "Failed to update avfs voltages!",
4454 : result = tmp_result);
4455 :
4456 0 : tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
4457 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
4458 : "Failed to generate DPM level enabled mask!",
4459 : result = tmp_result);
4460 :
4461 0 : tmp_result = smum_update_sclk_threshold(hwmgr);
4462 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
4463 : "Failed to update SCLK threshold!",
4464 : result = tmp_result);
4465 :
4466 0 : tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
4467 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
4468 : "Failed to unfreeze SCLK MCLK DPM!",
4469 : result = tmp_result);
4470 :
4471 0 : tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
4472 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
4473 : "Failed to upload DPM level enabled mask!",
4474 : result = tmp_result);
4475 :
4476 0 : tmp_result = smu7_notify_smc_display(hwmgr);
4477 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
4478 : "Failed to notify smc display settings!",
4479 : result = tmp_result);
4480 :
4481 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4482 : PHM_PlatformCaps_PCIEPerformanceRequest)) {
4483 0 : tmp_result =
4484 : smu7_notify_link_speed_change_after_state_change(hwmgr, input);
4485 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
4486 : "Failed to notify link speed change after state change!",
4487 : result = tmp_result);
4488 : }
4489 0 : data->apply_optimized_settings = false;
4490 0 : return result;
4491 : }
4492 :
4493 0 : static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
4494 : {
4495 : hwmgr->thermal_controller.
4496 0 : advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
4497 :
4498 0 : return smum_send_msg_to_smc_with_parameter(hwmgr,
4499 : PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm,
4500 : NULL);
4501 : }
4502 :
4503 : static int
4504 0 : smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
4505 : {
4506 0 : return 0;
4507 : }
4508 :
4509 : /**
4510 : * smu7_program_display_gap - Programs the display gap
4511 : *
4512 : * @hwmgr: the address of the powerplay hardware manager.
4513 : * Return: always OK
4514 : */
4515 0 : static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
4516 : {
4517 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4518 0 : uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
4519 : uint32_t display_gap2;
4520 : uint32_t pre_vbi_time_in_us;
4521 : uint32_t frame_time_in_us;
4522 : uint32_t ref_clock, refresh_rate;
4523 :
4524 0 : display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
4525 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
4526 :
4527 0 : ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
4528 0 : refresh_rate = hwmgr->display_config->vrefresh;
4529 :
4530 0 : if (0 == refresh_rate)
4531 0 : refresh_rate = 60;
4532 :
4533 0 : frame_time_in_us = 1000000 / refresh_rate;
4534 :
4535 0 : pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time;
4536 :
4537 0 : data->frame_time_x2 = frame_time_in_us * 2 / 100;
4538 :
4539 0 : if (data->frame_time_x2 < 280) {
4540 : pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
4541 0 : data->frame_time_x2 = 280;
4542 : }
4543 :
4544 0 : display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
4545 :
4546 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
4547 :
4548 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4549 : data->soft_regs_start + smum_get_offsetof(hwmgr,
4550 : SMU_SoftRegisters,
4551 : PreVBlankGap), 0x64);
4552 :
4553 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4554 : data->soft_regs_start + smum_get_offsetof(hwmgr,
4555 : SMU_SoftRegisters,
4556 : VBlankTimeout),
4557 : (frame_time_in_us - pre_vbi_time_in_us));
4558 :
4559 0 : return 0;
4560 : }
4561 :
4562 0 : static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4563 : {
4564 0 : return smu7_program_display_gap(hwmgr);
4565 : }
4566 :
4567 : /**
4568 : * smu7_set_max_fan_rpm_output - Set maximum target operating fan output RPM
4569 : *
4570 : * @hwmgr: the address of the powerplay hardware manager.
4571 : * @us_max_fan_rpm: max operating fan RPM value.
4572 : * Return: The response that came from the SMC.
4573 : */
4574 0 : static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
4575 : {
4576 : hwmgr->thermal_controller.
4577 0 : advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
4578 :
4579 0 : return smum_send_msg_to_smc_with_parameter(hwmgr,
4580 : PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm,
4581 : NULL);
4582 : }
4583 :
4584 : static const struct amdgpu_irq_src_funcs smu7_irq_funcs = {
4585 : .process = phm_irq_process,
4586 : };
4587 :
4588 0 : static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
4589 : {
4590 0 : struct amdgpu_irq_src *source =
4591 : kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
4592 :
4593 0 : if (!source)
4594 : return -ENOMEM;
4595 :
4596 0 : source->funcs = &smu7_irq_funcs;
4597 :
4598 0 : amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4599 : AMDGPU_IRQ_CLIENTID_LEGACY,
4600 : VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH,
4601 : source);
4602 0 : amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4603 : AMDGPU_IRQ_CLIENTID_LEGACY,
4604 : VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW,
4605 : source);
4606 :
4607 : /* Register CTF(GPIO_19) interrupt */
4608 0 : amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4609 : AMDGPU_IRQ_CLIENTID_LEGACY,
4610 : VISLANDS30_IV_SRCID_GPIO_19,
4611 : source);
4612 :
4613 0 : return 0;
4614 : }
4615 :
4616 : static bool
4617 0 : smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4618 : {
4619 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4620 0 : bool is_update_required = false;
4621 :
4622 0 : if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4623 0 : is_update_required = true;
4624 :
4625 0 : if (data->display_timing.vrefresh != hwmgr->display_config->vrefresh)
4626 0 : is_update_required = true;
4627 :
4628 0 : if (hwmgr->chip_id >= CHIP_POLARIS10 &&
4629 0 : hwmgr->chip_id <= CHIP_VEGAM &&
4630 0 : data->last_sent_vbi_timeout != data->frame_time_x2)
4631 0 : is_update_required = true;
4632 :
4633 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4634 0 : if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr &&
4635 0 : (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
4636 : hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
4637 0 : is_update_required = true;
4638 : }
4639 0 : return is_update_required;
4640 : }
4641 :
4642 : static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
4643 : const struct smu7_performance_level *pl2)
4644 : {
4645 0 : return ((pl1->memory_clock == pl2->memory_clock) &&
4646 0 : (pl1->engine_clock == pl2->engine_clock) &&
4647 0 : (pl1->pcie_gen == pl2->pcie_gen) &&
4648 : (pl1->pcie_lane == pl2->pcie_lane));
4649 : }
4650 :
4651 0 : static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
4652 : const struct pp_hw_power_state *pstate1,
4653 : const struct pp_hw_power_state *pstate2, bool *equal)
4654 : {
4655 : const struct smu7_power_state *psa;
4656 : const struct smu7_power_state *psb;
4657 : int i;
4658 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4659 :
4660 0 : if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4661 : return -EINVAL;
4662 :
4663 0 : psa = cast_const_phw_smu7_power_state(pstate1);
4664 0 : psb = cast_const_phw_smu7_power_state(pstate2);
4665 : /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4666 0 : if (psa->performance_level_count != psb->performance_level_count) {
4667 0 : *equal = false;
4668 0 : return 0;
4669 : }
4670 :
4671 0 : for (i = 0; i < psa->performance_level_count; i++) {
4672 0 : if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4673 : /* If we have found even one performance level pair that is different the states are different. */
4674 0 : *equal = false;
4675 0 : return 0;
4676 : }
4677 : }
4678 :
4679 : /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4680 0 : *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4681 0 : *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4682 0 : *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4683 : /* For OD call, set value based on flag */
4684 0 : *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK |
4685 : DPMTABLE_OD_UPDATE_MCLK |
4686 : DPMTABLE_OD_UPDATE_VDDC));
4687 :
4688 0 : return 0;
4689 : }
4690 :
4691 0 : static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
4692 : {
4693 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4694 :
4695 : uint32_t tmp;
4696 :
4697 : /* Read MC indirect register offset 0x9F bits [3:0] to see
4698 : * if VBIOS has already loaded a full version of MC ucode
4699 : * or not.
4700 : */
4701 :
4702 0 : smu7_get_mc_microcode_version(hwmgr);
4703 :
4704 0 : data->need_long_memory_training = false;
4705 :
4706 0 : cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
4707 : ixMC_IO_DEBUG_UP_13);
4708 0 : tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
4709 :
4710 0 : if (tmp & (1 << 23)) {
4711 0 : data->mem_latency_high = MEM_LATENCY_HIGH;
4712 0 : data->mem_latency_low = MEM_LATENCY_LOW;
4713 0 : if ((hwmgr->chip_id == CHIP_POLARIS10) ||
4714 0 : (hwmgr->chip_id == CHIP_POLARIS11) ||
4715 : (hwmgr->chip_id == CHIP_POLARIS12))
4716 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC, NULL);
4717 : } else {
4718 0 : data->mem_latency_high = 330;
4719 0 : data->mem_latency_low = 330;
4720 0 : if ((hwmgr->chip_id == CHIP_POLARIS10) ||
4721 0 : (hwmgr->chip_id == CHIP_POLARIS11) ||
4722 : (hwmgr->chip_id == CHIP_POLARIS12))
4723 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC, NULL);
4724 : }
4725 :
4726 0 : return 0;
4727 : }
4728 :
4729 0 : static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
4730 : {
4731 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4732 :
4733 0 : data->clock_registers.vCG_SPLL_FUNC_CNTL =
4734 0 : cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
4735 0 : data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
4736 0 : cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
4737 0 : data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
4738 0 : cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
4739 0 : data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
4740 0 : cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
4741 0 : data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
4742 0 : cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
4743 0 : data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
4744 0 : cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
4745 0 : data->clock_registers.vDLL_CNTL =
4746 0 : cgs_read_register(hwmgr->device, mmDLL_CNTL);
4747 0 : data->clock_registers.vMCLK_PWRMGT_CNTL =
4748 0 : cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
4749 0 : data->clock_registers.vMPLL_AD_FUNC_CNTL =
4750 0 : cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
4751 0 : data->clock_registers.vMPLL_DQ_FUNC_CNTL =
4752 0 : cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
4753 0 : data->clock_registers.vMPLL_FUNC_CNTL =
4754 0 : cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
4755 0 : data->clock_registers.vMPLL_FUNC_CNTL_1 =
4756 0 : cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
4757 0 : data->clock_registers.vMPLL_FUNC_CNTL_2 =
4758 0 : cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
4759 0 : data->clock_registers.vMPLL_SS1 =
4760 0 : cgs_read_register(hwmgr->device, mmMPLL_SS1);
4761 0 : data->clock_registers.vMPLL_SS2 =
4762 0 : cgs_read_register(hwmgr->device, mmMPLL_SS2);
4763 0 : return 0;
4764 :
4765 : }
4766 :
4767 : /**
4768 : * smu7_get_memory_type - Find out if memory is GDDR5.
4769 : *
4770 : * @hwmgr: the address of the powerplay hardware manager.
4771 : * Return: always 0
4772 : */
4773 : static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
4774 : {
4775 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4776 0 : struct amdgpu_device *adev = hwmgr->adev;
4777 :
4778 0 : data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5);
4779 :
4780 : return 0;
4781 : }
4782 :
4783 : /**
4784 : * smu7_enable_acpi_power_management - Enables Dynamic Power Management by SMC
4785 : *
4786 : * @hwmgr: the address of the powerplay hardware manager.
4787 : * Return: always 0
4788 : */
4789 0 : static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
4790 : {
4791 0 : PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4792 : GENERAL_PWRMGT, STATIC_PM_EN, 1);
4793 :
4794 0 : return 0;
4795 : }
4796 :
4797 : /**
4798 : * smu7_init_power_gate_state - Initialize PowerGating States for different engines
4799 : *
4800 : * @hwmgr: the address of the powerplay hardware manager.
4801 : * Return: always 0
4802 : */
4803 : static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
4804 : {
4805 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4806 :
4807 0 : data->uvd_power_gated = false;
4808 0 : data->vce_power_gated = false;
4809 :
4810 : return 0;
4811 : }
4812 :
4813 : static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
4814 : {
4815 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4816 :
4817 0 : data->low_sclk_interrupt_threshold = 0;
4818 : return 0;
4819 : }
4820 :
4821 0 : static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
4822 : {
4823 0 : int tmp_result, result = 0;
4824 :
4825 0 : smu7_check_mc_firmware(hwmgr);
4826 :
4827 0 : tmp_result = smu7_read_clock_registers(hwmgr);
4828 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
4829 : "Failed to read clock registers!", result = tmp_result);
4830 :
4831 0 : tmp_result = smu7_get_memory_type(hwmgr);
4832 : PP_ASSERT_WITH_CODE((0 == tmp_result),
4833 : "Failed to get memory type!", result = tmp_result);
4834 :
4835 0 : tmp_result = smu7_enable_acpi_power_management(hwmgr);
4836 0 : PP_ASSERT_WITH_CODE((0 == tmp_result),
4837 : "Failed to enable ACPI power management!", result = tmp_result);
4838 :
4839 0 : tmp_result = smu7_init_power_gate_state(hwmgr);
4840 : PP_ASSERT_WITH_CODE((0 == tmp_result),
4841 : "Failed to init power gate state!", result = tmp_result);
4842 :
4843 0 : tmp_result = smu7_get_mc_microcode_version(hwmgr);
4844 : PP_ASSERT_WITH_CODE((0 == tmp_result),
4845 : "Failed to get MC microcode version!", result = tmp_result);
4846 :
4847 0 : tmp_result = smu7_init_sclk_threshold(hwmgr);
4848 : PP_ASSERT_WITH_CODE((0 == tmp_result),
4849 : "Failed to init sclk threshold!", result = tmp_result);
4850 :
4851 0 : return result;
4852 : }
4853 :
4854 0 : static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
4855 : enum pp_clock_type type, uint32_t mask)
4856 : {
4857 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4858 :
4859 0 : if (mask == 0)
4860 : return -EINVAL;
4861 :
4862 0 : switch (type) {
4863 : case PP_SCLK:
4864 0 : if (!data->sclk_dpm_key_disabled)
4865 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
4866 : PPSMC_MSG_SCLKDPM_SetEnabledMask,
4867 0 : data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask,
4868 : NULL);
4869 : break;
4870 : case PP_MCLK:
4871 0 : if (!data->mclk_dpm_key_disabled)
4872 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
4873 : PPSMC_MSG_MCLKDPM_SetEnabledMask,
4874 0 : data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask,
4875 : NULL);
4876 : break;
4877 : case PP_PCIE:
4878 : {
4879 0 : uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
4880 :
4881 0 : if (!data->pcie_dpm_key_disabled) {
4882 0 : if (fls(tmp) != ffs(tmp))
4883 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel,
4884 : NULL);
4885 : else
4886 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
4887 : PPSMC_MSG_PCIeDPM_ForceLevel,
4888 0 : fls(tmp) - 1,
4889 : NULL);
4890 : }
4891 : break;
4892 : }
4893 : default:
4894 : break;
4895 : }
4896 :
4897 : return 0;
4898 : }
4899 :
4900 0 : static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
4901 : enum pp_clock_type type, char *buf)
4902 : {
4903 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4904 0 : struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4905 0 : struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4906 0 : struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
4907 0 : struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4908 0 : struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
4909 0 : struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
4910 0 : int size = 0;
4911 : uint32_t i, now, clock, pcie_speed;
4912 :
4913 0 : switch (type) {
4914 : case PP_SCLK:
4915 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
4916 :
4917 0 : for (i = 0; i < sclk_table->count; i++) {
4918 0 : if (clock > sclk_table->dpm_levels[i].value)
4919 0 : continue;
4920 : break;
4921 : }
4922 : now = i;
4923 :
4924 0 : for (i = 0; i < sclk_table->count; i++)
4925 0 : size += sprintf(buf + size, "%d: %uMhz %s\n",
4926 0 : i, sclk_table->dpm_levels[i].value / 100,
4927 : (i == now) ? "*" : "");
4928 : break;
4929 : case PP_MCLK:
4930 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock);
4931 :
4932 0 : for (i = 0; i < mclk_table->count; i++) {
4933 0 : if (clock > mclk_table->dpm_levels[i].value)
4934 0 : continue;
4935 : break;
4936 : }
4937 : now = i;
4938 :
4939 0 : for (i = 0; i < mclk_table->count; i++)
4940 0 : size += sprintf(buf + size, "%d: %uMhz %s\n",
4941 0 : i, mclk_table->dpm_levels[i].value / 100,
4942 : (i == now) ? "*" : "");
4943 : break;
4944 : case PP_PCIE:
4945 0 : pcie_speed = smu7_get_current_pcie_speed(hwmgr);
4946 0 : for (i = 0; i < pcie_table->count; i++) {
4947 0 : if (pcie_speed != pcie_table->dpm_levels[i].value)
4948 0 : continue;
4949 : break;
4950 : }
4951 : now = i;
4952 :
4953 0 : for (i = 0; i < pcie_table->count; i++)
4954 0 : size += sprintf(buf + size, "%d: %s %s\n", i,
4955 0 : (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
4956 0 : (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
4957 0 : (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
4958 : (i == now) ? "*" : "");
4959 : break;
4960 : case OD_SCLK:
4961 0 : if (hwmgr->od_enabled) {
4962 0 : size += sprintf(buf + size, "%s:\n", "OD_SCLK");
4963 0 : for (i = 0; i < odn_sclk_table->num_of_pl; i++)
4964 0 : size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4965 0 : i, odn_sclk_table->entries[i].clock/100,
4966 : odn_sclk_table->entries[i].vddc);
4967 : }
4968 : break;
4969 : case OD_MCLK:
4970 0 : if (hwmgr->od_enabled) {
4971 0 : size += sprintf(buf + size, "%s:\n", "OD_MCLK");
4972 0 : for (i = 0; i < odn_mclk_table->num_of_pl; i++)
4973 0 : size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4974 0 : i, odn_mclk_table->entries[i].clock/100,
4975 : odn_mclk_table->entries[i].vddc);
4976 : }
4977 : break;
4978 : case OD_RANGE:
4979 0 : if (hwmgr->od_enabled) {
4980 0 : size += sprintf(buf + size, "%s:\n", "OD_RANGE");
4981 0 : size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
4982 0 : data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
4983 0 : hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4984 0 : size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
4985 0 : data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
4986 0 : hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4987 0 : size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
4988 : data->odn_dpm_table.min_vddc,
4989 : data->odn_dpm_table.max_vddc);
4990 : }
4991 : break;
4992 : default:
4993 : break;
4994 : }
4995 0 : return size;
4996 : }
4997 :
4998 0 : static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4999 : {
5000 0 : switch (mode) {
5001 : case AMD_FAN_CTRL_NONE:
5002 0 : smu7_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
5003 0 : break;
5004 : case AMD_FAN_CTRL_MANUAL:
5005 0 : if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
5006 : PHM_PlatformCaps_MicrocodeFanControl))
5007 0 : smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
5008 : break;
5009 : case AMD_FAN_CTRL_AUTO:
5010 0 : if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode))
5011 0 : smu7_fan_ctrl_start_smc_fan_control(hwmgr);
5012 : break;
5013 : default:
5014 : break;
5015 : }
5016 0 : }
5017 :
5018 0 : static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
5019 : {
5020 0 : return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL;
5021 : }
5022 :
5023 0 : static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
5024 : {
5025 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5026 0 : struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5027 0 : struct smu7_single_dpm_table *golden_sclk_table =
5028 : &(data->golden_dpm_table.sclk_table);
5029 0 : int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
5030 0 : int golden_value = golden_sclk_table->dpm_levels
5031 0 : [golden_sclk_table->count - 1].value;
5032 :
5033 0 : value -= golden_value;
5034 0 : value = DIV_ROUND_UP(value * 100, golden_value);
5035 :
5036 0 : return value;
5037 : }
5038 :
5039 0 : static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5040 : {
5041 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5042 0 : struct smu7_single_dpm_table *golden_sclk_table =
5043 : &(data->golden_dpm_table.sclk_table);
5044 : struct pp_power_state *ps;
5045 : struct smu7_power_state *smu7_ps;
5046 :
5047 0 : if (value > 20)
5048 0 : value = 20;
5049 :
5050 0 : ps = hwmgr->request_ps;
5051 :
5052 0 : if (ps == NULL)
5053 : return -EINVAL;
5054 :
5055 0 : smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
5056 :
5057 0 : smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
5058 0 : golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
5059 0 : value / 100 +
5060 : golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
5061 :
5062 0 : return 0;
5063 : }
5064 :
5065 0 : static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
5066 : {
5067 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5068 0 : struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5069 0 : struct smu7_single_dpm_table *golden_mclk_table =
5070 : &(data->golden_dpm_table.mclk_table);
5071 0 : int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
5072 0 : int golden_value = golden_mclk_table->dpm_levels
5073 0 : [golden_mclk_table->count - 1].value;
5074 :
5075 0 : value -= golden_value;
5076 0 : value = DIV_ROUND_UP(value * 100, golden_value);
5077 :
5078 0 : return value;
5079 : }
5080 :
5081 0 : static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5082 : {
5083 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5084 0 : struct smu7_single_dpm_table *golden_mclk_table =
5085 : &(data->golden_dpm_table.mclk_table);
5086 : struct pp_power_state *ps;
5087 : struct smu7_power_state *smu7_ps;
5088 :
5089 0 : if (value > 20)
5090 0 : value = 20;
5091 :
5092 0 : ps = hwmgr->request_ps;
5093 :
5094 0 : if (ps == NULL)
5095 : return -EINVAL;
5096 :
5097 0 : smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
5098 :
5099 0 : smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
5100 0 : golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
5101 0 : value / 100 +
5102 : golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5103 :
5104 0 : return 0;
5105 : }
5106 :
5107 :
5108 0 : static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
5109 : {
5110 0 : struct phm_ppt_v1_information *table_info =
5111 : (struct phm_ppt_v1_information *)hwmgr->pptable;
5112 0 : struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
5113 : struct phm_clock_voltage_dependency_table *sclk_table;
5114 : int i;
5115 :
5116 0 : if (hwmgr->pp_table_version == PP_TABLE_V1) {
5117 0 : if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
5118 : return -EINVAL;
5119 : dep_sclk_table = table_info->vdd_dep_on_sclk;
5120 0 : for (i = 0; i < dep_sclk_table->count; i++)
5121 0 : clocks->clock[i] = dep_sclk_table->entries[i].clk * 10;
5122 0 : clocks->count = dep_sclk_table->count;
5123 0 : } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
5124 0 : sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
5125 0 : for (i = 0; i < sclk_table->count; i++)
5126 0 : clocks->clock[i] = sclk_table->entries[i].clk * 10;
5127 0 : clocks->count = sclk_table->count;
5128 : }
5129 :
5130 : return 0;
5131 : }
5132 :
5133 : static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
5134 : {
5135 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5136 :
5137 0 : if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
5138 0 : return data->mem_latency_high;
5139 0 : else if (clk >= MEM_FREQ_HIGH_LATENCY)
5140 0 : return data->mem_latency_low;
5141 : else
5142 : return MEM_LATENCY_ERR;
5143 : }
5144 :
5145 0 : static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
5146 : {
5147 0 : struct phm_ppt_v1_information *table_info =
5148 : (struct phm_ppt_v1_information *)hwmgr->pptable;
5149 : struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
5150 : int i;
5151 : struct phm_clock_voltage_dependency_table *mclk_table;
5152 :
5153 0 : if (hwmgr->pp_table_version == PP_TABLE_V1) {
5154 0 : if (table_info == NULL)
5155 : return -EINVAL;
5156 0 : dep_mclk_table = table_info->vdd_dep_on_mclk;
5157 0 : for (i = 0; i < dep_mclk_table->count; i++) {
5158 0 : clocks->clock[i] = dep_mclk_table->entries[i].clk * 10;
5159 0 : clocks->latency[i] = smu7_get_mem_latency(hwmgr,
5160 : dep_mclk_table->entries[i].clk);
5161 : }
5162 0 : clocks->count = dep_mclk_table->count;
5163 0 : } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
5164 0 : mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
5165 0 : for (i = 0; i < mclk_table->count; i++)
5166 0 : clocks->clock[i] = mclk_table->entries[i].clk * 10;
5167 0 : clocks->count = mclk_table->count;
5168 : }
5169 : return 0;
5170 : }
5171 :
5172 0 : static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
5173 : struct amd_pp_clocks *clocks)
5174 : {
5175 0 : switch (type) {
5176 : case amd_pp_sys_clock:
5177 0 : smu7_get_sclks(hwmgr, clocks);
5178 0 : break;
5179 : case amd_pp_mem_clock:
5180 0 : smu7_get_mclks(hwmgr, clocks);
5181 0 : break;
5182 : default:
5183 : return -EINVAL;
5184 : }
5185 :
5186 : return 0;
5187 : }
5188 :
5189 : static int smu7_get_sclks_with_latency(struct pp_hwmgr *hwmgr,
5190 : struct pp_clock_levels_with_latency *clocks)
5191 : {
5192 0 : struct phm_ppt_v1_information *table_info =
5193 : (struct phm_ppt_v1_information *)hwmgr->pptable;
5194 0 : struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
5195 : table_info->vdd_dep_on_sclk;
5196 : int i;
5197 :
5198 0 : clocks->num_levels = 0;
5199 0 : for (i = 0; i < dep_sclk_table->count; i++) {
5200 0 : if (dep_sclk_table->entries[i].clk) {
5201 0 : clocks->data[clocks->num_levels].clocks_in_khz =
5202 0 : dep_sclk_table->entries[i].clk * 10;
5203 0 : clocks->num_levels++;
5204 : }
5205 : }
5206 :
5207 : return 0;
5208 : }
5209 :
5210 0 : static int smu7_get_mclks_with_latency(struct pp_hwmgr *hwmgr,
5211 : struct pp_clock_levels_with_latency *clocks)
5212 : {
5213 0 : struct phm_ppt_v1_information *table_info =
5214 : (struct phm_ppt_v1_information *)hwmgr->pptable;
5215 0 : struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
5216 : table_info->vdd_dep_on_mclk;
5217 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5218 : int i;
5219 :
5220 0 : clocks->num_levels = 0;
5221 0 : data->mclk_latency_table.count = 0;
5222 0 : for (i = 0; i < dep_mclk_table->count; i++) {
5223 0 : if (dep_mclk_table->entries[i].clk) {
5224 0 : clocks->data[clocks->num_levels].clocks_in_khz =
5225 0 : dep_mclk_table->entries[i].clk * 10;
5226 0 : data->mclk_latency_table.entries[data->mclk_latency_table.count].frequency =
5227 0 : dep_mclk_table->entries[i].clk;
5228 0 : clocks->data[clocks->num_levels].latency_in_us =
5229 0 : data->mclk_latency_table.entries[data->mclk_latency_table.count].latency =
5230 0 : smu7_get_mem_latency(hwmgr, dep_mclk_table->entries[i].clk);
5231 0 : clocks->num_levels++;
5232 0 : data->mclk_latency_table.count++;
5233 : }
5234 : }
5235 :
5236 0 : return 0;
5237 : }
5238 :
5239 0 : static int smu7_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
5240 : enum amd_pp_clock_type type,
5241 : struct pp_clock_levels_with_latency *clocks)
5242 : {
5243 0 : if (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
5244 : hwmgr->chip_id <= CHIP_VEGAM))
5245 : return -EINVAL;
5246 :
5247 0 : switch (type) {
5248 : case amd_pp_sys_clock:
5249 0 : smu7_get_sclks_with_latency(hwmgr, clocks);
5250 : break;
5251 : case amd_pp_mem_clock:
5252 0 : smu7_get_mclks_with_latency(hwmgr, clocks);
5253 0 : break;
5254 : default:
5255 : return -EINVAL;
5256 : }
5257 :
5258 : return 0;
5259 : }
5260 :
5261 0 : static int smu7_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
5262 : void *clock_range)
5263 : {
5264 0 : struct phm_ppt_v1_information *table_info =
5265 : (struct phm_ppt_v1_information *)hwmgr->pptable;
5266 0 : struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
5267 : table_info->vdd_dep_on_mclk;
5268 0 : struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
5269 : table_info->vdd_dep_on_sclk;
5270 0 : struct polaris10_smumgr *smu_data =
5271 : (struct polaris10_smumgr *)(hwmgr->smu_backend);
5272 0 : SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
5273 0 : struct dm_pp_wm_sets_with_clock_ranges *watermarks =
5274 : (struct dm_pp_wm_sets_with_clock_ranges *)clock_range;
5275 : uint32_t i, j, k;
5276 : bool valid_entry;
5277 :
5278 0 : if (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
5279 : hwmgr->chip_id <= CHIP_VEGAM))
5280 : return -EINVAL;
5281 :
5282 0 : for (i = 0; i < dep_mclk_table->count; i++) {
5283 0 : for (j = 0; j < dep_sclk_table->count; j++) {
5284 : valid_entry = false;
5285 0 : for (k = 0; k < watermarks->num_wm_sets; k++) {
5286 0 : if (dep_sclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz / 10 &&
5287 0 : dep_sclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz / 10 &&
5288 0 : dep_mclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz / 10 &&
5289 0 : dep_mclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz / 10) {
5290 0 : valid_entry = true;
5291 0 : table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k].wm_set_id;
5292 0 : break;
5293 : }
5294 : }
5295 0 : PP_ASSERT_WITH_CODE(valid_entry,
5296 : "Clock is not in range of specified clock range for watermark from DAL! Using highest water mark set.",
5297 : table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k - 1].wm_set_id);
5298 : }
5299 : }
5300 :
5301 0 : return smu7_copy_bytes_to_smc(hwmgr,
5302 0 : smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, DisplayWatermark),
5303 0 : (uint8_t *)table->DisplayWatermark,
5304 : sizeof(uint8_t) * SMU74_MAX_LEVELS_MEMORY * SMU74_MAX_LEVELS_GRAPHICS,
5305 : SMC_RAM_END);
5306 : }
5307 :
5308 0 : static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
5309 : uint32_t virtual_addr_low,
5310 : uint32_t virtual_addr_hi,
5311 : uint32_t mc_addr_low,
5312 : uint32_t mc_addr_hi,
5313 : uint32_t size)
5314 : {
5315 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5316 :
5317 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5318 : data->soft_regs_start +
5319 : smum_get_offsetof(hwmgr,
5320 : SMU_SoftRegisters, DRAM_LOG_ADDR_H),
5321 : mc_addr_hi);
5322 :
5323 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5324 : data->soft_regs_start +
5325 : smum_get_offsetof(hwmgr,
5326 : SMU_SoftRegisters, DRAM_LOG_ADDR_L),
5327 : mc_addr_low);
5328 :
5329 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5330 : data->soft_regs_start +
5331 : smum_get_offsetof(hwmgr,
5332 : SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H),
5333 : virtual_addr_hi);
5334 :
5335 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5336 : data->soft_regs_start +
5337 : smum_get_offsetof(hwmgr,
5338 : SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L),
5339 : virtual_addr_low);
5340 :
5341 0 : cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5342 : data->soft_regs_start +
5343 : smum_get_offsetof(hwmgr,
5344 : SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE),
5345 : size);
5346 0 : return 0;
5347 : }
5348 :
5349 0 : static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr,
5350 : struct amd_pp_simple_clock_info *clocks)
5351 : {
5352 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5353 0 : struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5354 0 : struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5355 :
5356 0 : if (clocks == NULL)
5357 : return -EINVAL;
5358 :
5359 0 : clocks->memory_max_clock = mclk_table->count > 1 ?
5360 0 : mclk_table->dpm_levels[mclk_table->count-1].value :
5361 : mclk_table->dpm_levels[0].value;
5362 0 : clocks->engine_max_clock = sclk_table->count > 1 ?
5363 0 : sclk_table->dpm_levels[sclk_table->count-1].value :
5364 : sclk_table->dpm_levels[0].value;
5365 0 : return 0;
5366 : }
5367 :
5368 0 : static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
5369 : struct PP_TemperatureRange *thermal_data)
5370 : {
5371 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5372 0 : struct phm_ppt_v1_information *table_info =
5373 : (struct phm_ppt_v1_information *)hwmgr->pptable;
5374 :
5375 0 : memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
5376 :
5377 0 : if (hwmgr->pp_table_version == PP_TABLE_V1)
5378 0 : thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
5379 : PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
5380 0 : else if (hwmgr->pp_table_version == PP_TABLE_V0)
5381 0 : thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
5382 : PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
5383 :
5384 0 : return 0;
5385 : }
5386 :
5387 0 : static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
5388 : enum PP_OD_DPM_TABLE_COMMAND type,
5389 : uint32_t clk,
5390 : uint32_t voltage)
5391 : {
5392 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5393 :
5394 0 : if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) {
5395 0 : pr_info("OD voltage is out of range [%d - %d] mV\n",
5396 : data->odn_dpm_table.min_vddc,
5397 : data->odn_dpm_table.max_vddc);
5398 : return false;
5399 : }
5400 :
5401 0 : if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
5402 0 : if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk ||
5403 0 : hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
5404 0 : pr_info("OD engine clock is out of range [%d - %d] MHz\n",
5405 : data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
5406 : hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
5407 : return false;
5408 : }
5409 0 : } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
5410 0 : if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk ||
5411 0 : hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
5412 0 : pr_info("OD memory clock is out of range [%d - %d] MHz\n",
5413 : data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
5414 : hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
5415 : return false;
5416 : }
5417 : } else {
5418 : return false;
5419 : }
5420 :
5421 : return true;
5422 : }
5423 :
5424 0 : static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
5425 : enum PP_OD_DPM_TABLE_COMMAND type,
5426 : long *input, uint32_t size)
5427 : {
5428 : uint32_t i;
5429 0 : struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL;
5430 0 : struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL;
5431 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5432 :
5433 : uint32_t input_clk;
5434 : uint32_t input_vol;
5435 : uint32_t input_level;
5436 :
5437 0 : PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
5438 : return -EINVAL);
5439 :
5440 0 : if (!hwmgr->od_enabled) {
5441 0 : pr_info("OverDrive feature not enabled\n");
5442 0 : return -EINVAL;
5443 : }
5444 :
5445 0 : if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
5446 0 : podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels;
5447 0 : podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk;
5448 0 : PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
5449 : "Failed to get ODN SCLK and Voltage tables",
5450 : return -EINVAL);
5451 0 : } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
5452 0 : podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels;
5453 0 : podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk;
5454 :
5455 0 : PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
5456 : "Failed to get ODN MCLK and Voltage tables",
5457 : return -EINVAL);
5458 0 : } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
5459 0 : smu7_odn_initial_default_setting(hwmgr);
5460 0 : return 0;
5461 0 : } else if (PP_OD_COMMIT_DPM_TABLE == type) {
5462 0 : smu7_check_dpm_table_updated(hwmgr);
5463 0 : return 0;
5464 : } else {
5465 : return -EINVAL;
5466 : }
5467 :
5468 0 : for (i = 0; i < size; i += 3) {
5469 0 : if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) {
5470 0 : pr_info("invalid clock voltage input \n");
5471 0 : return 0;
5472 : }
5473 0 : input_level = input[i];
5474 0 : input_clk = input[i+1] * 100;
5475 0 : input_vol = input[i+2];
5476 :
5477 0 : if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
5478 0 : podn_dpm_table_in_backend->entries[input_level].clock = input_clk;
5479 0 : podn_vdd_dep_in_backend->entries[input_level].clk = input_clk;
5480 0 : podn_dpm_table_in_backend->entries[input_level].vddc = input_vol;
5481 0 : podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol;
5482 0 : podn_vdd_dep_in_backend->entries[input_level].vddgfx = input_vol;
5483 : } else {
5484 : return -EINVAL;
5485 : }
5486 : }
5487 :
5488 : return 0;
5489 : }
5490 :
5491 0 : static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
5492 : {
5493 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5494 0 : uint32_t i, size = 0;
5495 : uint32_t len;
5496 :
5497 : static const char *title[8] = {"NUM",
5498 : "MODE_NAME",
5499 : "SCLK_UP_HYST",
5500 : "SCLK_DOWN_HYST",
5501 : "SCLK_ACTIVE_LEVEL",
5502 : "MCLK_UP_HYST",
5503 : "MCLK_DOWN_HYST",
5504 : "MCLK_ACTIVE_LEVEL"};
5505 :
5506 0 : if (!buf)
5507 : return -EINVAL;
5508 :
5509 0 : phm_get_sysfs_buf(&buf, &size);
5510 :
5511 0 : size += sysfs_emit_at(buf, size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
5512 : title[0], title[1], title[2], title[3],
5513 : title[4], title[5], title[6], title[7]);
5514 :
5515 0 : len = ARRAY_SIZE(smu7_profiling);
5516 :
5517 0 : for (i = 0; i < len; i++) {
5518 0 : if (i == hwmgr->power_profile_mode) {
5519 0 : size += sysfs_emit_at(buf, size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
5520 : i, amdgpu_pp_profile_name[i], "*",
5521 0 : data->current_profile_setting.sclk_up_hyst,
5522 0 : data->current_profile_setting.sclk_down_hyst,
5523 0 : data->current_profile_setting.sclk_activity,
5524 0 : data->current_profile_setting.mclk_up_hyst,
5525 0 : data->current_profile_setting.mclk_down_hyst,
5526 0 : data->current_profile_setting.mclk_activity);
5527 0 : continue;
5528 : }
5529 0 : if (smu7_profiling[i].bupdate_sclk)
5530 0 : size += sysfs_emit_at(buf, size, "%3d %16s: %8d %16d %16d ",
5531 0 : i, amdgpu_pp_profile_name[i], smu7_profiling[i].sclk_up_hyst,
5532 0 : smu7_profiling[i].sclk_down_hyst,
5533 0 : smu7_profiling[i].sclk_activity);
5534 : else
5535 0 : size += sysfs_emit_at(buf, size, "%3d %16s: %8s %16s %16s ",
5536 : i, amdgpu_pp_profile_name[i], "-", "-", "-");
5537 :
5538 0 : if (smu7_profiling[i].bupdate_mclk)
5539 0 : size += sysfs_emit_at(buf, size, "%16d %16d %16d\n",
5540 0 : smu7_profiling[i].mclk_up_hyst,
5541 0 : smu7_profiling[i].mclk_down_hyst,
5542 0 : smu7_profiling[i].mclk_activity);
5543 : else
5544 0 : size += sysfs_emit_at(buf, size, "%16s %16s %16s\n",
5545 : "-", "-", "-");
5546 : }
5547 :
5548 0 : return size;
5549 : }
5550 :
5551 0 : static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr,
5552 : enum PP_SMC_POWER_PROFILE requst)
5553 : {
5554 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5555 : uint32_t tmp, level;
5556 :
5557 0 : if (requst == PP_SMC_POWER_PROFILE_COMPUTE) {
5558 0 : if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
5559 : level = 0;
5560 : tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
5561 0 : while (tmp >>= 1)
5562 0 : level++;
5563 0 : if (level > 0)
5564 0 : smu7_force_clock_level(hwmgr, PP_SCLK, 3 << (level-1));
5565 : }
5566 0 : } else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
5567 0 : smu7_force_clock_level(hwmgr, PP_SCLK, data->dpm_level_enable_mask.sclk_dpm_enable_mask);
5568 : }
5569 0 : }
5570 :
5571 0 : static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
5572 : {
5573 0 : struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5574 : struct profile_mode_setting tmp;
5575 : enum PP_SMC_POWER_PROFILE mode;
5576 :
5577 0 : if (input == NULL)
5578 : return -EINVAL;
5579 :
5580 0 : mode = input[size];
5581 0 : switch (mode) {
5582 : case PP_SMC_POWER_PROFILE_CUSTOM:
5583 0 : if (size < 8 && size != 0)
5584 : return -EINVAL;
5585 : /* If only CUSTOM is passed in, use the saved values. Check
5586 : * that we actually have a CUSTOM profile by ensuring that
5587 : * the "use sclk" or the "use mclk" bits are set
5588 : */
5589 0 : tmp = smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM];
5590 0 : if (size == 0) {
5591 0 : if (tmp.bupdate_sclk == 0 && tmp.bupdate_mclk == 0)
5592 : return -EINVAL;
5593 : } else {
5594 0 : tmp.bupdate_sclk = input[0];
5595 0 : tmp.sclk_up_hyst = input[1];
5596 0 : tmp.sclk_down_hyst = input[2];
5597 0 : tmp.sclk_activity = input[3];
5598 0 : tmp.bupdate_mclk = input[4];
5599 0 : tmp.mclk_up_hyst = input[5];
5600 0 : tmp.mclk_down_hyst = input[6];
5601 0 : tmp.mclk_activity = input[7];
5602 0 : smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM] = tmp;
5603 : }
5604 0 : if (!smum_update_dpm_settings(hwmgr, &tmp)) {
5605 0 : memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
5606 0 : hwmgr->power_profile_mode = mode;
5607 : }
5608 : break;
5609 : case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
5610 : case PP_SMC_POWER_PROFILE_POWERSAVING:
5611 : case PP_SMC_POWER_PROFILE_VIDEO:
5612 : case PP_SMC_POWER_PROFILE_VR:
5613 : case PP_SMC_POWER_PROFILE_COMPUTE:
5614 0 : if (mode == hwmgr->power_profile_mode)
5615 : return 0;
5616 :
5617 0 : memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting));
5618 0 : if (!smum_update_dpm_settings(hwmgr, &tmp)) {
5619 0 : if (tmp.bupdate_sclk) {
5620 0 : data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk;
5621 0 : data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst;
5622 0 : data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst;
5623 0 : data->current_profile_setting.sclk_activity = tmp.sclk_activity;
5624 : }
5625 0 : if (tmp.bupdate_mclk) {
5626 0 : data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk;
5627 0 : data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst;
5628 0 : data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst;
5629 0 : data->current_profile_setting.mclk_activity = tmp.mclk_activity;
5630 : }
5631 0 : smu7_patch_compute_profile_mode(hwmgr, mode);
5632 0 : hwmgr->power_profile_mode = mode;
5633 : }
5634 : break;
5635 : default:
5636 : return -EINVAL;
5637 : }
5638 :
5639 : return 0;
5640 : }
5641 :
5642 0 : static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
5643 : PHM_PerformanceLevelDesignation designation, uint32_t index,
5644 : PHM_PerformanceLevel *level)
5645 : {
5646 : const struct smu7_power_state *ps;
5647 : uint32_t i;
5648 :
5649 0 : if (level == NULL || hwmgr == NULL || state == NULL)
5650 : return -EINVAL;
5651 :
5652 0 : ps = cast_const_phw_smu7_power_state(state);
5653 :
5654 0 : i = index > ps->performance_level_count - 1 ?
5655 0 : ps->performance_level_count - 1 : index;
5656 :
5657 0 : level->coreClock = ps->performance_levels[i].engine_clock;
5658 0 : level->memory_clock = ps->performance_levels[i].memory_clock;
5659 :
5660 0 : return 0;
5661 : }
5662 :
5663 0 : static int smu7_power_off_asic(struct pp_hwmgr *hwmgr)
5664 : {
5665 : int result;
5666 :
5667 0 : result = smu7_disable_dpm_tasks(hwmgr);
5668 0 : PP_ASSERT_WITH_CODE((0 == result),
5669 : "[disable_dpm_tasks] Failed to disable DPM!",
5670 : );
5671 :
5672 0 : return result;
5673 : }
5674 :
5675 : static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
5676 : .backend_init = &smu7_hwmgr_backend_init,
5677 : .backend_fini = &smu7_hwmgr_backend_fini,
5678 : .asic_setup = &smu7_setup_asic_task,
5679 : .dynamic_state_management_enable = &smu7_enable_dpm_tasks,
5680 : .apply_state_adjust_rules = smu7_apply_state_adjust_rules,
5681 : .force_dpm_level = &smu7_force_dpm_level,
5682 : .power_state_set = smu7_set_power_state_tasks,
5683 : .get_power_state_size = smu7_get_power_state_size,
5684 : .get_mclk = smu7_dpm_get_mclk,
5685 : .get_sclk = smu7_dpm_get_sclk,
5686 : .patch_boot_state = smu7_dpm_patch_boot_state,
5687 : .get_pp_table_entry = smu7_get_pp_table_entry,
5688 : .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
5689 : .powerdown_uvd = smu7_powerdown_uvd,
5690 : .powergate_uvd = smu7_powergate_uvd,
5691 : .powergate_vce = smu7_powergate_vce,
5692 : .disable_clock_power_gating = smu7_disable_clock_power_gating,
5693 : .update_clock_gatings = smu7_update_clock_gatings,
5694 : .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
5695 : .display_config_changed = smu7_display_configuration_changed_task,
5696 : .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
5697 : .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
5698 : .stop_thermal_controller = smu7_thermal_stop_thermal_controller,
5699 : .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
5700 : .get_fan_speed_pwm = smu7_fan_ctrl_get_fan_speed_pwm,
5701 : .set_fan_speed_pwm = smu7_fan_ctrl_set_fan_speed_pwm,
5702 : .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
5703 : .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
5704 : .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
5705 : .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
5706 : .register_irq_handlers = smu7_register_irq_handlers,
5707 : .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
5708 : .check_states_equal = smu7_check_states_equal,
5709 : .set_fan_control_mode = smu7_set_fan_control_mode,
5710 : .get_fan_control_mode = smu7_get_fan_control_mode,
5711 : .force_clock_level = smu7_force_clock_level,
5712 : .print_clock_levels = smu7_print_clock_levels,
5713 : .powergate_gfx = smu7_powergate_gfx,
5714 : .get_sclk_od = smu7_get_sclk_od,
5715 : .set_sclk_od = smu7_set_sclk_od,
5716 : .get_mclk_od = smu7_get_mclk_od,
5717 : .set_mclk_od = smu7_set_mclk_od,
5718 : .get_clock_by_type = smu7_get_clock_by_type,
5719 : .get_clock_by_type_with_latency = smu7_get_clock_by_type_with_latency,
5720 : .set_watermarks_for_clocks_ranges = smu7_set_watermarks_for_clocks_ranges,
5721 : .read_sensor = smu7_read_sensor,
5722 : .dynamic_state_management_disable = smu7_disable_dpm_tasks,
5723 : .avfs_control = smu7_avfs_control,
5724 : .disable_smc_firmware_ctf = smu7_thermal_disable_alert,
5725 : .start_thermal_controller = smu7_start_thermal_controller,
5726 : .notify_cac_buffer_info = smu7_notify_cac_buffer_info,
5727 : .get_max_high_clocks = smu7_get_max_high_clocks,
5728 : .get_thermal_temperature_range = smu7_get_thermal_temperature_range,
5729 : .odn_edit_dpm_table = smu7_odn_edit_dpm_table,
5730 : .set_power_limit = smu7_set_power_limit,
5731 : .get_power_profile_mode = smu7_get_power_profile_mode,
5732 : .set_power_profile_mode = smu7_set_power_profile_mode,
5733 : .get_performance_level = smu7_get_performance_level,
5734 : .get_asic_baco_capability = smu7_baco_get_capability,
5735 : .get_asic_baco_state = smu7_baco_get_state,
5736 : .set_asic_baco_state = smu7_baco_set_state,
5737 : .power_off_asic = smu7_power_off_asic,
5738 : };
5739 :
5740 0 : uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
5741 : uint32_t clock_insr)
5742 : {
5743 : uint8_t i;
5744 : uint32_t temp;
5745 0 : uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
5746 :
5747 0 : PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
5748 0 : for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
5749 0 : temp = clock >> i;
5750 :
5751 0 : if (temp >= min || i == 0)
5752 : break;
5753 : }
5754 : return i;
5755 : }
5756 :
5757 0 : int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
5758 : {
5759 0 : hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
5760 0 : if (hwmgr->pp_table_version == PP_TABLE_V0)
5761 0 : hwmgr->pptable_func = &pptable_funcs;
5762 0 : else if (hwmgr->pp_table_version == PP_TABLE_V1)
5763 0 : hwmgr->pptable_func = &pptable_v1_0_funcs;
5764 :
5765 0 : return 0;
5766 : }
|