Line data Source code
1 : /*
2 : * Copyright 2020 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : */
23 :
24 : #define SWSMU_CODE_LAYER_L2
25 :
26 : #include "amdgpu.h"
27 : #include "amdgpu_smu.h"
28 : #include "smu_v11_0.h"
29 : #include "smu11_driver_if_vangogh.h"
30 : #include "vangogh_ppt.h"
31 : #include "smu_v11_5_ppsmc.h"
32 : #include "smu_v11_5_pmfw.h"
33 : #include "smu_cmn.h"
34 : #include "soc15_common.h"
35 : #include "asic_reg/gc/gc_10_3_0_offset.h"
36 : #include "asic_reg/gc/gc_10_3_0_sh_mask.h"
37 : #include <asm/processor.h>
38 :
39 : /*
40 : * DO NOT use these for err/warn/info/debug messages.
41 : * Use dev_err, dev_warn, dev_info and dev_dbg instead.
42 : * They are more MGPU friendly.
43 : */
44 : #undef pr_err
45 : #undef pr_warn
46 : #undef pr_info
47 : #undef pr_debug
48 :
49 : // Registers related to GFXOFF
50 : // addressBlock: smuio_smuio_SmuSmuioDec
51 : // base address: 0x5a000
52 : #define mmSMUIO_GFX_MISC_CNTL 0x00c5
53 : #define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0
54 :
55 : //SMUIO_GFX_MISC_CNTL
56 : #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff__SHIFT 0x0
57 : #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
58 : #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK 0x00000001L
59 : #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
60 :
61 : #define FEATURE_MASK(feature) (1ULL << feature)
62 : #define SMC_DPM_FEATURE ( \
63 : FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
64 : FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \
65 : FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \
66 : FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \
67 : FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \
68 : FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \
69 : FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \
70 : FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \
71 : FEATURE_MASK(FEATURE_GFX_DPM_BIT))
72 :
73 : static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
74 : MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
75 : MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0),
76 : MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 0),
77 : MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 0),
78 : MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
79 : MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
80 : MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 0),
81 : MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 0),
82 : MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
83 : MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0),
84 : MSG_MAP(RlcPowerNotify, PPSMC_MSG_RlcPowerNotify, 0),
85 : MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 0),
86 : MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 0),
87 : MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 0),
88 : MSG_MAP(SetHardMinIspiclkByFreq, PPSMC_MSG_SetHardMinIspiclkByFreq, 0),
89 : MSG_MAP(SetHardMinIspxclkByFreq, PPSMC_MSG_SetHardMinIspxclkByFreq, 0),
90 : MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0),
91 : MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0),
92 : MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0),
93 : MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
94 : MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 0),
95 : MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0),
96 : MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 0),
97 : MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 0),
98 : MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 0),
99 : MSG_MAP(EnablePostCode, PPSMC_MSG_EnablePostCode, 0),
100 : MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 0),
101 : MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 0),
102 : MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0),
103 : MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 0),
104 : MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 0),
105 : MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 0),
106 : MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 0),
107 : MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 0),
108 : MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
109 : MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
110 : MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 0),
111 : MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 0),
112 : MSG_MAP(PowerUpCvip, PPSMC_MSG_PowerUpCvip, 0),
113 : MSG_MAP(PowerDownCvip, PPSMC_MSG_PowerDownCvip, 0),
114 : MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
115 : MSG_MAP(GetThermalLimit, PPSMC_MSG_GetThermalLimit, 0),
116 : MSG_MAP(GetCurrentTemperature, PPSMC_MSG_GetCurrentTemperature, 0),
117 : MSG_MAP(GetCurrentPower, PPSMC_MSG_GetCurrentPower, 0),
118 : MSG_MAP(GetCurrentVoltage, PPSMC_MSG_GetCurrentVoltage, 0),
119 : MSG_MAP(GetCurrentCurrent, PPSMC_MSG_GetCurrentCurrent, 0),
120 : MSG_MAP(GetAverageCpuActivity, PPSMC_MSG_GetAverageCpuActivity, 0),
121 : MSG_MAP(GetAverageGfxActivity, PPSMC_MSG_GetAverageGfxActivity, 0),
122 : MSG_MAP(GetAveragePower, PPSMC_MSG_GetAveragePower, 0),
123 : MSG_MAP(GetAverageTemperature, PPSMC_MSG_GetAverageTemperature, 0),
124 : MSG_MAP(SetAveragePowerTimeConstant, PPSMC_MSG_SetAveragePowerTimeConstant, 0),
125 : MSG_MAP(SetAverageActivityTimeConstant, PPSMC_MSG_SetAverageActivityTimeConstant, 0),
126 : MSG_MAP(SetAverageTemperatureTimeConstant, PPSMC_MSG_SetAverageTemperatureTimeConstant, 0),
127 : MSG_MAP(SetMitigationEndHysteresis, PPSMC_MSG_SetMitigationEndHysteresis, 0),
128 : MSG_MAP(GetCurrentFreq, PPSMC_MSG_GetCurrentFreq, 0),
129 : MSG_MAP(SetReducedPptLimit, PPSMC_MSG_SetReducedPptLimit, 0),
130 : MSG_MAP(SetReducedThermalLimit, PPSMC_MSG_SetReducedThermalLimit, 0),
131 : MSG_MAP(DramLogSetDramAddr, PPSMC_MSG_DramLogSetDramAddr, 0),
132 : MSG_MAP(StartDramLogging, PPSMC_MSG_StartDramLogging, 0),
133 : MSG_MAP(StopDramLogging, PPSMC_MSG_StopDramLogging, 0),
134 : MSG_MAP(SetSoftMinCclk, PPSMC_MSG_SetSoftMinCclk, 0),
135 : MSG_MAP(SetSoftMaxCclk, PPSMC_MSG_SetSoftMaxCclk, 0),
136 : MSG_MAP(RequestActiveWgp, PPSMC_MSG_RequestActiveWgp, 0),
137 : MSG_MAP(SetFastPPTLimit, PPSMC_MSG_SetFastPPTLimit, 0),
138 : MSG_MAP(SetSlowPPTLimit, PPSMC_MSG_SetSlowPPTLimit, 0),
139 : MSG_MAP(GetFastPPTLimit, PPSMC_MSG_GetFastPPTLimit, 0),
140 : MSG_MAP(GetSlowPPTLimit, PPSMC_MSG_GetSlowPPTLimit, 0),
141 : MSG_MAP(GetGfxOffStatus, PPSMC_MSG_GetGfxOffStatus, 0),
142 : MSG_MAP(GetGfxOffEntryCount, PPSMC_MSG_GetGfxOffEntryCount, 0),
143 : MSG_MAP(LogGfxOffResidency, PPSMC_MSG_LogGfxOffResidency, 0),
144 : };
145 :
146 : static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = {
147 : FEA_MAP(PPT),
148 : FEA_MAP(TDC),
149 : FEA_MAP(THERMAL),
150 : FEA_MAP(DS_GFXCLK),
151 : FEA_MAP(DS_SOCCLK),
152 : FEA_MAP(DS_LCLK),
153 : FEA_MAP(DS_FCLK),
154 : FEA_MAP(DS_MP1CLK),
155 : FEA_MAP(DS_MP0CLK),
156 : FEA_MAP(ATHUB_PG),
157 : FEA_MAP(CCLK_DPM),
158 : FEA_MAP(FAN_CONTROLLER),
159 : FEA_MAP(ULV),
160 : FEA_MAP(VCN_DPM),
161 : FEA_MAP(LCLK_DPM),
162 : FEA_MAP(SHUBCLK_DPM),
163 : FEA_MAP(DCFCLK_DPM),
164 : FEA_MAP(DS_DCFCLK),
165 : FEA_MAP(S0I2),
166 : FEA_MAP(SMU_LOW_POWER),
167 : FEA_MAP(GFX_DEM),
168 : FEA_MAP(PSI),
169 : FEA_MAP(PROCHOT),
170 : FEA_MAP(CPUOFF),
171 : FEA_MAP(STAPM),
172 : FEA_MAP(S0I3),
173 : FEA_MAP(DF_CSTATES),
174 : FEA_MAP(PERF_LIMIT),
175 : FEA_MAP(CORE_DLDO),
176 : FEA_MAP(RSMU_LOW_POWER),
177 : FEA_MAP(SMN_LOW_POWER),
178 : FEA_MAP(THM_LOW_POWER),
179 : FEA_MAP(SMUIO_LOW_POWER),
180 : FEA_MAP(MP1_LOW_POWER),
181 : FEA_MAP(DS_VCN),
182 : FEA_MAP(CPPC),
183 : FEA_MAP(OS_CSTATES),
184 : FEA_MAP(ISP_DPM),
185 : FEA_MAP(A55_DPM),
186 : FEA_MAP(CVIP_DSP_DPM),
187 : FEA_MAP(MSMU_LOW_POWER),
188 : FEA_MAP_REVERSE(SOCCLK),
189 : FEA_MAP_REVERSE(FCLK),
190 : FEA_MAP_HALF_REVERSE(GFX),
191 : };
192 :
193 : static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = {
194 : TAB_MAP_VALID(WATERMARKS),
195 : TAB_MAP_VALID(SMU_METRICS),
196 : TAB_MAP_VALID(CUSTOM_DPM),
197 : TAB_MAP_VALID(DPMCLOCKS),
198 : };
199 :
200 : static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
201 : WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
202 : WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
203 : WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
204 : WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
205 : WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
206 : };
207 :
208 : static const uint8_t vangogh_throttler_map[] = {
209 : [THROTTLER_STATUS_BIT_SPL] = (SMU_THROTTLER_SPL_BIT),
210 : [THROTTLER_STATUS_BIT_FPPT] = (SMU_THROTTLER_FPPT_BIT),
211 : [THROTTLER_STATUS_BIT_SPPT] = (SMU_THROTTLER_SPPT_BIT),
212 : [THROTTLER_STATUS_BIT_SPPT_APU] = (SMU_THROTTLER_SPPT_APU_BIT),
213 : [THROTTLER_STATUS_BIT_THM_CORE] = (SMU_THROTTLER_TEMP_CORE_BIT),
214 : [THROTTLER_STATUS_BIT_THM_GFX] = (SMU_THROTTLER_TEMP_GPU_BIT),
215 : [THROTTLER_STATUS_BIT_THM_SOC] = (SMU_THROTTLER_TEMP_SOC_BIT),
216 : [THROTTLER_STATUS_BIT_TDC_VDD] = (SMU_THROTTLER_TDC_VDD_BIT),
217 : [THROTTLER_STATUS_BIT_TDC_SOC] = (SMU_THROTTLER_TDC_SOC_BIT),
218 : [THROTTLER_STATUS_BIT_TDC_GFX] = (SMU_THROTTLER_TDC_GFX_BIT),
219 : [THROTTLER_STATUS_BIT_TDC_CVIP] = (SMU_THROTTLER_TDC_CVIP_BIT),
220 : };
221 :
222 0 : static int vangogh_tables_init(struct smu_context *smu)
223 : {
224 0 : struct smu_table_context *smu_table = &smu->smu_table;
225 0 : struct smu_table *tables = smu_table->tables;
226 0 : struct amdgpu_device *adev = smu->adev;
227 : uint32_t if_version;
228 0 : uint32_t ret = 0;
229 :
230 0 : ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
231 0 : if (ret) {
232 0 : dev_err(adev->dev, "Failed to get smu if version!\n");
233 0 : goto err0_out;
234 : }
235 :
236 0 : SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
237 : PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
238 0 : SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
239 : PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
240 0 : SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
241 : PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
242 0 : SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t),
243 : PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
244 :
245 0 : if (if_version < 0x3) {
246 0 : SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t),
247 : PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
248 0 : smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL);
249 : } else {
250 0 : SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
251 : PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
252 0 : smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
253 : }
254 0 : if (!smu_table->metrics_table)
255 : goto err0_out;
256 0 : smu_table->metrics_time = 0;
257 :
258 0 : smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
259 0 : smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
260 0 : if (!smu_table->gpu_metrics_table)
261 : goto err1_out;
262 :
263 0 : smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
264 0 : if (!smu_table->watermarks_table)
265 : goto err2_out;
266 :
267 0 : smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
268 0 : if (!smu_table->clocks_table)
269 : goto err3_out;
270 :
271 : return 0;
272 :
273 : err3_out:
274 0 : kfree(smu_table->watermarks_table);
275 : err2_out:
276 0 : kfree(smu_table->gpu_metrics_table);
277 : err1_out:
278 0 : kfree(smu_table->metrics_table);
279 : err0_out:
280 : return -ENOMEM;
281 : }
282 :
283 0 : static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu,
284 : MetricsMember_t member,
285 : uint32_t *value)
286 : {
287 0 : struct smu_table_context *smu_table = &smu->smu_table;
288 0 : SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table;
289 0 : int ret = 0;
290 :
291 0 : ret = smu_cmn_get_metrics_table(smu,
292 : NULL,
293 : false);
294 0 : if (ret)
295 : return ret;
296 :
297 0 : switch (member) {
298 : case METRICS_CURR_GFXCLK:
299 0 : *value = metrics->GfxclkFrequency;
300 0 : break;
301 : case METRICS_AVERAGE_SOCCLK:
302 0 : *value = metrics->SocclkFrequency;
303 0 : break;
304 : case METRICS_AVERAGE_VCLK:
305 0 : *value = metrics->VclkFrequency;
306 0 : break;
307 : case METRICS_AVERAGE_DCLK:
308 0 : *value = metrics->DclkFrequency;
309 0 : break;
310 : case METRICS_CURR_UCLK:
311 0 : *value = metrics->MemclkFrequency;
312 0 : break;
313 : case METRICS_AVERAGE_GFXACTIVITY:
314 0 : *value = metrics->GfxActivity / 100;
315 0 : break;
316 : case METRICS_AVERAGE_VCNACTIVITY:
317 0 : *value = metrics->UvdActivity;
318 0 : break;
319 : case METRICS_AVERAGE_SOCKETPOWER:
320 0 : *value = (metrics->CurrentSocketPower << 8) /
321 : 1000 ;
322 0 : break;
323 : case METRICS_TEMPERATURE_EDGE:
324 0 : *value = metrics->GfxTemperature / 100 *
325 : SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
326 0 : break;
327 : case METRICS_TEMPERATURE_HOTSPOT:
328 0 : *value = metrics->SocTemperature / 100 *
329 : SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
330 0 : break;
331 : case METRICS_THROTTLER_STATUS:
332 0 : *value = metrics->ThrottlerStatus;
333 0 : break;
334 : case METRICS_VOLTAGE_VDDGFX:
335 0 : *value = metrics->Voltage[2];
336 0 : break;
337 : case METRICS_VOLTAGE_VDDSOC:
338 0 : *value = metrics->Voltage[1];
339 0 : break;
340 : case METRICS_AVERAGE_CPUCLK:
341 0 : memcpy(value, &metrics->CoreFrequency[0],
342 0 : smu->cpu_core_num * sizeof(uint16_t));
343 0 : break;
344 : default:
345 0 : *value = UINT_MAX;
346 0 : break;
347 : }
348 :
349 : return ret;
350 : }
351 :
352 0 : static int vangogh_get_smu_metrics_data(struct smu_context *smu,
353 : MetricsMember_t member,
354 : uint32_t *value)
355 : {
356 0 : struct smu_table_context *smu_table = &smu->smu_table;
357 0 : SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
358 0 : int ret = 0;
359 :
360 0 : ret = smu_cmn_get_metrics_table(smu,
361 : NULL,
362 : false);
363 0 : if (ret)
364 : return ret;
365 :
366 0 : switch (member) {
367 : case METRICS_CURR_GFXCLK:
368 0 : *value = metrics->Current.GfxclkFrequency;
369 0 : break;
370 : case METRICS_AVERAGE_SOCCLK:
371 0 : *value = metrics->Current.SocclkFrequency;
372 0 : break;
373 : case METRICS_AVERAGE_VCLK:
374 0 : *value = metrics->Current.VclkFrequency;
375 0 : break;
376 : case METRICS_AVERAGE_DCLK:
377 0 : *value = metrics->Current.DclkFrequency;
378 0 : break;
379 : case METRICS_CURR_UCLK:
380 0 : *value = metrics->Current.MemclkFrequency;
381 0 : break;
382 : case METRICS_AVERAGE_GFXACTIVITY:
383 0 : *value = metrics->Current.GfxActivity;
384 0 : break;
385 : case METRICS_AVERAGE_VCNACTIVITY:
386 0 : *value = metrics->Current.UvdActivity;
387 0 : break;
388 : case METRICS_AVERAGE_SOCKETPOWER:
389 0 : *value = (metrics->Current.CurrentSocketPower << 8) /
390 : 1000;
391 0 : break;
392 : case METRICS_TEMPERATURE_EDGE:
393 0 : *value = metrics->Current.GfxTemperature / 100 *
394 : SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
395 0 : break;
396 : case METRICS_TEMPERATURE_HOTSPOT:
397 0 : *value = metrics->Current.SocTemperature / 100 *
398 : SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
399 0 : break;
400 : case METRICS_THROTTLER_STATUS:
401 0 : *value = metrics->Current.ThrottlerStatus;
402 0 : break;
403 : case METRICS_VOLTAGE_VDDGFX:
404 0 : *value = metrics->Current.Voltage[2];
405 0 : break;
406 : case METRICS_VOLTAGE_VDDSOC:
407 0 : *value = metrics->Current.Voltage[1];
408 0 : break;
409 : case METRICS_AVERAGE_CPUCLK:
410 0 : memcpy(value, &metrics->Current.CoreFrequency[0],
411 0 : smu->cpu_core_num * sizeof(uint16_t));
412 0 : break;
413 : default:
414 0 : *value = UINT_MAX;
415 0 : break;
416 : }
417 :
418 : return ret;
419 : }
420 :
421 0 : static int vangogh_common_get_smu_metrics_data(struct smu_context *smu,
422 : MetricsMember_t member,
423 : uint32_t *value)
424 : {
425 0 : struct amdgpu_device *adev = smu->adev;
426 : uint32_t if_version;
427 0 : int ret = 0;
428 :
429 0 : ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
430 0 : if (ret) {
431 0 : dev_err(adev->dev, "Failed to get smu if version!\n");
432 0 : return ret;
433 : }
434 :
435 0 : if (if_version < 0x3)
436 0 : ret = vangogh_get_legacy_smu_metrics_data(smu, member, value);
437 : else
438 0 : ret = vangogh_get_smu_metrics_data(smu, member, value);
439 :
440 : return ret;
441 : }
442 :
443 : static int vangogh_allocate_dpm_context(struct smu_context *smu)
444 : {
445 0 : struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
446 :
447 0 : smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
448 : GFP_KERNEL);
449 0 : if (!smu_dpm->dpm_context)
450 : return -ENOMEM;
451 :
452 0 : smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context);
453 :
454 : return 0;
455 : }
456 :
457 0 : static int vangogh_init_smc_tables(struct smu_context *smu)
458 : {
459 0 : int ret = 0;
460 :
461 0 : ret = vangogh_tables_init(smu);
462 0 : if (ret)
463 : return ret;
464 :
465 0 : ret = vangogh_allocate_dpm_context(smu);
466 0 : if (ret)
467 : return ret;
468 :
469 : #ifdef CONFIG_X86
470 : /* AMD x86 APU only */
471 : smu->cpu_core_num = boot_cpu_data.x86_max_cores;
472 : #else
473 0 : smu->cpu_core_num = 4;
474 : #endif
475 :
476 0 : return smu_v11_0_init_smc_tables(smu);
477 : }
478 :
479 0 : static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
480 : {
481 0 : int ret = 0;
482 :
483 0 : if (enable) {
484 : /* vcn dpm on is a prerequisite for vcn power gate messages */
485 0 : ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
486 0 : if (ret)
487 : return ret;
488 : } else {
489 0 : ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
490 0 : if (ret)
491 : return ret;
492 : }
493 :
494 : return ret;
495 : }
496 :
497 0 : static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
498 : {
499 0 : int ret = 0;
500 :
501 0 : if (enable) {
502 0 : ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
503 0 : if (ret)
504 : return ret;
505 : } else {
506 0 : ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
507 0 : if (ret)
508 : return ret;
509 : }
510 :
511 : return ret;
512 : }
513 :
514 0 : static bool vangogh_is_dpm_running(struct smu_context *smu)
515 : {
516 0 : struct amdgpu_device *adev = smu->adev;
517 0 : int ret = 0;
518 : uint64_t feature_enabled;
519 :
520 : /* we need to re-init after suspend so return false */
521 0 : if (adev->in_suspend)
522 : return false;
523 :
524 0 : ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
525 :
526 0 : if (ret)
527 : return false;
528 :
529 0 : return !!(feature_enabled & SMC_DPM_FEATURE);
530 : }
531 :
532 0 : static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type,
533 : uint32_t dpm_level, uint32_t *freq)
534 : {
535 0 : DpmClocks_t *clk_table = smu->smu_table.clocks_table;
536 :
537 0 : if (!clk_table || clk_type >= SMU_CLK_COUNT)
538 : return -EINVAL;
539 :
540 0 : switch (clk_type) {
541 : case SMU_SOCCLK:
542 0 : if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
543 : return -EINVAL;
544 0 : *freq = clk_table->SocClocks[dpm_level];
545 : break;
546 : case SMU_VCLK:
547 0 : if (dpm_level >= clk_table->VcnClkLevelsEnabled)
548 : return -EINVAL;
549 0 : *freq = clk_table->VcnClocks[dpm_level].vclk;
550 : break;
551 : case SMU_DCLK:
552 0 : if (dpm_level >= clk_table->VcnClkLevelsEnabled)
553 : return -EINVAL;
554 0 : *freq = clk_table->VcnClocks[dpm_level].dclk;
555 : break;
556 : case SMU_UCLK:
557 : case SMU_MCLK:
558 0 : if (dpm_level >= clk_table->NumDfPstatesEnabled)
559 : return -EINVAL;
560 0 : *freq = clk_table->DfPstateTable[dpm_level].memclk;
561 :
562 : break;
563 : case SMU_FCLK:
564 0 : if (dpm_level >= clk_table->NumDfPstatesEnabled)
565 : return -EINVAL;
566 0 : *freq = clk_table->DfPstateTable[dpm_level].fclk;
567 : break;
568 : default:
569 : return -EINVAL;
570 : }
571 :
572 : return 0;
573 : }
574 :
575 0 : static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
576 : enum smu_clk_type clk_type, char *buf)
577 : {
578 0 : DpmClocks_t *clk_table = smu->smu_table.clocks_table;
579 : SmuMetrics_legacy_t metrics;
580 0 : struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
581 0 : int i, size = 0, ret = 0;
582 0 : uint32_t cur_value = 0, value = 0, count = 0;
583 0 : bool cur_value_match_level = false;
584 :
585 0 : memset(&metrics, 0, sizeof(metrics));
586 :
587 0 : ret = smu_cmn_get_metrics_table(smu, &metrics, false);
588 0 : if (ret)
589 : return ret;
590 :
591 0 : smu_cmn_get_sysfs_buf(&buf, &size);
592 :
593 0 : switch (clk_type) {
594 : case SMU_OD_SCLK:
595 0 : if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
596 0 : size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
597 0 : size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
598 0 : (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
599 0 : size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
600 0 : (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
601 : }
602 : break;
603 : case SMU_OD_CCLK:
604 0 : if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
605 0 : size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
606 0 : size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
607 0 : (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
608 0 : size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
609 0 : (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
610 : }
611 : break;
612 : case SMU_OD_RANGE:
613 0 : if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
614 0 : size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
615 0 : size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
616 : smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
617 0 : size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
618 : smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
619 : }
620 : break;
621 : case SMU_SOCCLK:
622 : /* the level 3 ~ 6 of socclk use the same frequency for vangogh */
623 0 : count = clk_table->NumSocClkLevelsEnabled;
624 0 : cur_value = metrics.SocclkFrequency;
625 0 : break;
626 : case SMU_VCLK:
627 0 : count = clk_table->VcnClkLevelsEnabled;
628 0 : cur_value = metrics.VclkFrequency;
629 0 : break;
630 : case SMU_DCLK:
631 0 : count = clk_table->VcnClkLevelsEnabled;
632 0 : cur_value = metrics.DclkFrequency;
633 0 : break;
634 : case SMU_MCLK:
635 0 : count = clk_table->NumDfPstatesEnabled;
636 0 : cur_value = metrics.MemclkFrequency;
637 0 : break;
638 : case SMU_FCLK:
639 0 : count = clk_table->NumDfPstatesEnabled;
640 0 : ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value);
641 0 : if (ret)
642 : return ret;
643 : break;
644 : default:
645 : break;
646 : }
647 :
648 : switch (clk_type) {
649 : case SMU_SOCCLK:
650 : case SMU_VCLK:
651 : case SMU_DCLK:
652 : case SMU_MCLK:
653 : case SMU_FCLK:
654 0 : for (i = 0; i < count; i++) {
655 0 : ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
656 0 : if (ret)
657 : return ret;
658 0 : if (!value)
659 0 : continue;
660 0 : size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
661 0 : cur_value == value ? "*" : "");
662 0 : if (cur_value == value)
663 0 : cur_value_match_level = true;
664 : }
665 :
666 0 : if (!cur_value_match_level)
667 0 : size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value);
668 : break;
669 : default:
670 : break;
671 : }
672 :
673 : return size;
674 : }
675 :
676 0 : static int vangogh_print_clk_levels(struct smu_context *smu,
677 : enum smu_clk_type clk_type, char *buf)
678 : {
679 0 : DpmClocks_t *clk_table = smu->smu_table.clocks_table;
680 : SmuMetrics_t metrics;
681 0 : struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
682 0 : int i, size = 0, ret = 0;
683 0 : uint32_t cur_value = 0, value = 0, count = 0;
684 0 : bool cur_value_match_level = false;
685 : uint32_t min, max;
686 :
687 0 : memset(&metrics, 0, sizeof(metrics));
688 :
689 0 : ret = smu_cmn_get_metrics_table(smu, &metrics, false);
690 0 : if (ret)
691 : return ret;
692 :
693 0 : smu_cmn_get_sysfs_buf(&buf, &size);
694 :
695 0 : switch (clk_type) {
696 : case SMU_OD_SCLK:
697 0 : if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
698 0 : size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
699 0 : size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
700 0 : (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
701 0 : size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
702 0 : (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
703 : }
704 : break;
705 : case SMU_OD_CCLK:
706 0 : if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
707 0 : size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
708 0 : size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
709 0 : (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
710 0 : size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
711 0 : (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
712 : }
713 : break;
714 : case SMU_OD_RANGE:
715 0 : if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
716 0 : size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
717 0 : size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
718 : smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
719 0 : size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
720 : smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
721 : }
722 : break;
723 : case SMU_SOCCLK:
724 : /* the level 3 ~ 6 of socclk use the same frequency for vangogh */
725 0 : count = clk_table->NumSocClkLevelsEnabled;
726 0 : cur_value = metrics.Current.SocclkFrequency;
727 0 : break;
728 : case SMU_VCLK:
729 0 : count = clk_table->VcnClkLevelsEnabled;
730 0 : cur_value = metrics.Current.VclkFrequency;
731 0 : break;
732 : case SMU_DCLK:
733 0 : count = clk_table->VcnClkLevelsEnabled;
734 0 : cur_value = metrics.Current.DclkFrequency;
735 0 : break;
736 : case SMU_MCLK:
737 0 : count = clk_table->NumDfPstatesEnabled;
738 0 : cur_value = metrics.Current.MemclkFrequency;
739 0 : break;
740 : case SMU_FCLK:
741 0 : count = clk_table->NumDfPstatesEnabled;
742 0 : ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value);
743 0 : if (ret)
744 : return ret;
745 : break;
746 : case SMU_GFXCLK:
747 : case SMU_SCLK:
748 0 : ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value);
749 0 : if (ret) {
750 : return ret;
751 : }
752 : break;
753 : default:
754 : break;
755 : }
756 :
757 : switch (clk_type) {
758 : case SMU_SOCCLK:
759 : case SMU_VCLK:
760 : case SMU_DCLK:
761 : case SMU_MCLK:
762 : case SMU_FCLK:
763 0 : for (i = 0; i < count; i++) {
764 0 : ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
765 0 : if (ret)
766 : return ret;
767 0 : if (!value)
768 0 : continue;
769 0 : size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
770 0 : cur_value == value ? "*" : "");
771 0 : if (cur_value == value)
772 0 : cur_value_match_level = true;
773 : }
774 :
775 0 : if (!cur_value_match_level)
776 0 : size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value);
777 : break;
778 : case SMU_GFXCLK:
779 : case SMU_SCLK:
780 0 : min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
781 0 : max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
782 0 : if (cur_value == max)
783 : i = 2;
784 0 : else if (cur_value == min)
785 : i = 0;
786 : else
787 0 : i = 1;
788 0 : size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
789 : i == 0 ? "*" : "");
790 0 : size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
791 : i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK,
792 : i == 1 ? "*" : "");
793 0 : size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
794 : i == 2 ? "*" : "");
795 0 : break;
796 : default:
797 : break;
798 : }
799 :
800 : return size;
801 : }
802 :
803 0 : static int vangogh_common_print_clk_levels(struct smu_context *smu,
804 : enum smu_clk_type clk_type, char *buf)
805 : {
806 0 : struct amdgpu_device *adev = smu->adev;
807 : uint32_t if_version;
808 0 : int ret = 0;
809 :
810 0 : ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
811 0 : if (ret) {
812 0 : dev_err(adev->dev, "Failed to get smu if version!\n");
813 0 : return ret;
814 : }
815 :
816 0 : if (if_version < 0x3)
817 0 : ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf);
818 : else
819 0 : ret = vangogh_print_clk_levels(smu, clk_type, buf);
820 :
821 : return ret;
822 : }
823 :
824 : static int vangogh_get_profiling_clk_mask(struct smu_context *smu,
825 : enum amd_dpm_forced_level level,
826 : uint32_t *vclk_mask,
827 : uint32_t *dclk_mask,
828 : uint32_t *mclk_mask,
829 : uint32_t *fclk_mask,
830 : uint32_t *soc_mask)
831 : {
832 0 : DpmClocks_t *clk_table = smu->smu_table.clocks_table;
833 :
834 0 : if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
835 : if (mclk_mask)
836 0 : *mclk_mask = clk_table->NumDfPstatesEnabled - 1;
837 :
838 : if (fclk_mask)
839 0 : *fclk_mask = clk_table->NumDfPstatesEnabled - 1;
840 :
841 : if (soc_mask)
842 0 : *soc_mask = 0;
843 0 : } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
844 : if (mclk_mask)
845 0 : *mclk_mask = 0;
846 :
847 : if (fclk_mask)
848 0 : *fclk_mask = 0;
849 :
850 : if (soc_mask)
851 0 : *soc_mask = 1;
852 :
853 : if (vclk_mask)
854 0 : *vclk_mask = 1;
855 :
856 : if (dclk_mask)
857 0 : *dclk_mask = 1;
858 0 : } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) {
859 : if (mclk_mask)
860 0 : *mclk_mask = 0;
861 :
862 : if (fclk_mask)
863 0 : *fclk_mask = 0;
864 :
865 : if (soc_mask)
866 0 : *soc_mask = 1;
867 :
868 : if (vclk_mask)
869 0 : *vclk_mask = 1;
870 :
871 : if (dclk_mask)
872 0 : *dclk_mask = 1;
873 : }
874 :
875 : return 0;
876 : }
877 :
878 0 : static bool vangogh_clk_dpm_is_enabled(struct smu_context *smu,
879 : enum smu_clk_type clk_type)
880 : {
881 0 : enum smu_feature_mask feature_id = 0;
882 :
883 0 : switch (clk_type) {
884 : case SMU_MCLK:
885 : case SMU_UCLK:
886 : case SMU_FCLK:
887 : feature_id = SMU_FEATURE_DPM_FCLK_BIT;
888 : break;
889 : case SMU_GFXCLK:
890 : case SMU_SCLK:
891 0 : feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
892 0 : break;
893 : case SMU_SOCCLK:
894 0 : feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
895 0 : break;
896 : case SMU_VCLK:
897 : case SMU_DCLK:
898 0 : feature_id = SMU_FEATURE_VCN_DPM_BIT;
899 0 : break;
900 : default:
901 : return true;
902 : }
903 :
904 0 : if (!smu_cmn_feature_is_enabled(smu, feature_id))
905 : return false;
906 :
907 0 : return true;
908 : }
909 :
910 0 : static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu,
911 : enum smu_clk_type clk_type,
912 : uint32_t *min,
913 : uint32_t *max)
914 : {
915 0 : int ret = 0;
916 : uint32_t soc_mask;
917 : uint32_t vclk_mask;
918 : uint32_t dclk_mask;
919 : uint32_t mclk_mask;
920 : uint32_t fclk_mask;
921 : uint32_t clock_limit;
922 :
923 0 : if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) {
924 0 : switch (clk_type) {
925 : case SMU_MCLK:
926 : case SMU_UCLK:
927 0 : clock_limit = smu->smu_table.boot_values.uclk;
928 0 : break;
929 : case SMU_FCLK:
930 0 : clock_limit = smu->smu_table.boot_values.fclk;
931 0 : break;
932 : case SMU_GFXCLK:
933 : case SMU_SCLK:
934 0 : clock_limit = smu->smu_table.boot_values.gfxclk;
935 0 : break;
936 : case SMU_SOCCLK:
937 0 : clock_limit = smu->smu_table.boot_values.socclk;
938 0 : break;
939 : case SMU_VCLK:
940 0 : clock_limit = smu->smu_table.boot_values.vclk;
941 0 : break;
942 : case SMU_DCLK:
943 0 : clock_limit = smu->smu_table.boot_values.dclk;
944 0 : break;
945 : default:
946 : clock_limit = 0;
947 : break;
948 : }
949 :
950 : /* clock in Mhz unit */
951 0 : if (min)
952 0 : *min = clock_limit / 100;
953 0 : if (max)
954 0 : *max = clock_limit / 100;
955 :
956 : return 0;
957 : }
958 0 : if (max) {
959 0 : ret = vangogh_get_profiling_clk_mask(smu,
960 : AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
961 : &vclk_mask,
962 : &dclk_mask,
963 : &mclk_mask,
964 : &fclk_mask,
965 : &soc_mask);
966 : if (ret)
967 : goto failed;
968 :
969 0 : switch (clk_type) {
970 : case SMU_UCLK:
971 : case SMU_MCLK:
972 0 : ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
973 0 : if (ret)
974 : goto failed;
975 : break;
976 : case SMU_SOCCLK:
977 0 : ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
978 0 : if (ret)
979 : goto failed;
980 : break;
981 : case SMU_FCLK:
982 0 : ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, max);
983 0 : if (ret)
984 : goto failed;
985 : break;
986 : case SMU_VCLK:
987 0 : ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, max);
988 0 : if (ret)
989 : goto failed;
990 : break;
991 : case SMU_DCLK:
992 0 : ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, max);
993 0 : if (ret)
994 : goto failed;
995 : break;
996 : default:
997 : ret = -EINVAL;
998 : goto failed;
999 : }
1000 : }
1001 0 : if (min) {
1002 0 : switch (clk_type) {
1003 : case SMU_UCLK:
1004 : case SMU_MCLK:
1005 0 : ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, min);
1006 : if (ret)
1007 : goto failed;
1008 : break;
1009 : case SMU_SOCCLK:
1010 0 : ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, min);
1011 : if (ret)
1012 : goto failed;
1013 : break;
1014 : case SMU_FCLK:
1015 0 : ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, min);
1016 : if (ret)
1017 : goto failed;
1018 : break;
1019 : case SMU_VCLK:
1020 0 : ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, min);
1021 : if (ret)
1022 : goto failed;
1023 : break;
1024 : case SMU_DCLK:
1025 0 : ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, min);
1026 : if (ret)
1027 : goto failed;
1028 : break;
1029 : default:
1030 : ret = -EINVAL;
1031 : goto failed;
1032 : }
1033 : }
1034 : failed:
1035 : return ret;
1036 : }
1037 :
1038 0 : static int vangogh_get_power_profile_mode(struct smu_context *smu,
1039 : char *buf)
1040 : {
1041 0 : uint32_t i, size = 0;
1042 0 : int16_t workload_type = 0;
1043 :
1044 0 : if (!buf)
1045 : return -EINVAL;
1046 :
1047 0 : for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
1048 : /*
1049 : * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
1050 : * Not all profile modes are supported on vangogh.
1051 : */
1052 0 : workload_type = smu_cmn_to_asic_specific_index(smu,
1053 : CMN2ASIC_MAPPING_WORKLOAD,
1054 : i);
1055 :
1056 0 : if (workload_type < 0)
1057 0 : continue;
1058 :
1059 0 : size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
1060 0 : i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1061 : }
1062 :
1063 0 : return size;
1064 : }
1065 :
1066 0 : static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
1067 : {
1068 : int workload_type, ret;
1069 0 : uint32_t profile_mode = input[size];
1070 :
1071 0 : if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
1072 0 : dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
1073 0 : return -EINVAL;
1074 : }
1075 :
1076 0 : if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
1077 0 : profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
1078 : return 0;
1079 :
1080 : /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1081 0 : workload_type = smu_cmn_to_asic_specific_index(smu,
1082 : CMN2ASIC_MAPPING_WORKLOAD,
1083 : profile_mode);
1084 0 : if (workload_type < 0) {
1085 : dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n",
1086 : profile_mode);
1087 : return -EINVAL;
1088 : }
1089 :
1090 0 : ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
1091 0 : 1 << workload_type,
1092 : NULL);
1093 0 : if (ret) {
1094 0 : dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
1095 : workload_type);
1096 : return ret;
1097 : }
1098 :
1099 0 : smu->power_profile_mode = profile_mode;
1100 :
1101 0 : return 0;
1102 : }
1103 :
1104 0 : static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
1105 : enum smu_clk_type clk_type,
1106 : uint32_t min,
1107 : uint32_t max)
1108 : {
1109 0 : int ret = 0;
1110 :
1111 0 : if (!vangogh_clk_dpm_is_enabled(smu, clk_type))
1112 : return 0;
1113 :
1114 0 : switch (clk_type) {
1115 : case SMU_GFXCLK:
1116 : case SMU_SCLK:
1117 0 : ret = smu_cmn_send_smc_msg_with_param(smu,
1118 : SMU_MSG_SetHardMinGfxClk,
1119 : min, NULL);
1120 0 : if (ret)
1121 : return ret;
1122 :
1123 0 : ret = smu_cmn_send_smc_msg_with_param(smu,
1124 : SMU_MSG_SetSoftMaxGfxClk,
1125 : max, NULL);
1126 : if (ret)
1127 : return ret;
1128 : break;
1129 : case SMU_FCLK:
1130 0 : ret = smu_cmn_send_smc_msg_with_param(smu,
1131 : SMU_MSG_SetHardMinFclkByFreq,
1132 : min, NULL);
1133 0 : if (ret)
1134 : return ret;
1135 :
1136 0 : ret = smu_cmn_send_smc_msg_with_param(smu,
1137 : SMU_MSG_SetSoftMaxFclkByFreq,
1138 : max, NULL);
1139 : if (ret)
1140 : return ret;
1141 : break;
1142 : case SMU_SOCCLK:
1143 0 : ret = smu_cmn_send_smc_msg_with_param(smu,
1144 : SMU_MSG_SetHardMinSocclkByFreq,
1145 : min, NULL);
1146 0 : if (ret)
1147 : return ret;
1148 :
1149 0 : ret = smu_cmn_send_smc_msg_with_param(smu,
1150 : SMU_MSG_SetSoftMaxSocclkByFreq,
1151 : max, NULL);
1152 : if (ret)
1153 : return ret;
1154 : break;
1155 : case SMU_VCLK:
1156 0 : ret = smu_cmn_send_smc_msg_with_param(smu,
1157 : SMU_MSG_SetHardMinVcn,
1158 : min << 16, NULL);
1159 0 : if (ret)
1160 : return ret;
1161 0 : ret = smu_cmn_send_smc_msg_with_param(smu,
1162 : SMU_MSG_SetSoftMaxVcn,
1163 : max << 16, NULL);
1164 : if (ret)
1165 : return ret;
1166 : break;
1167 : case SMU_DCLK:
1168 0 : ret = smu_cmn_send_smc_msg_with_param(smu,
1169 : SMU_MSG_SetHardMinVcn,
1170 : min, NULL);
1171 0 : if (ret)
1172 : return ret;
1173 0 : ret = smu_cmn_send_smc_msg_with_param(smu,
1174 : SMU_MSG_SetSoftMaxVcn,
1175 : max, NULL);
1176 : if (ret)
1177 : return ret;
1178 : break;
1179 : default:
1180 : return -EINVAL;
1181 : }
1182 :
1183 : return ret;
1184 : }
1185 :
1186 0 : static int vangogh_force_clk_levels(struct smu_context *smu,
1187 : enum smu_clk_type clk_type, uint32_t mask)
1188 : {
1189 0 : uint32_t soft_min_level = 0, soft_max_level = 0;
1190 0 : uint32_t min_freq = 0, max_freq = 0;
1191 0 : int ret = 0 ;
1192 :
1193 0 : soft_min_level = mask ? (ffs(mask) - 1) : 0;
1194 0 : soft_max_level = mask ? (fls(mask) - 1) : 0;
1195 :
1196 0 : switch (clk_type) {
1197 : case SMU_SOCCLK:
1198 0 : ret = vangogh_get_dpm_clk_limited(smu, clk_type,
1199 : soft_min_level, &min_freq);
1200 0 : if (ret)
1201 : return ret;
1202 0 : ret = vangogh_get_dpm_clk_limited(smu, clk_type,
1203 : soft_max_level, &max_freq);
1204 0 : if (ret)
1205 : return ret;
1206 0 : ret = smu_cmn_send_smc_msg_with_param(smu,
1207 : SMU_MSG_SetSoftMaxSocclkByFreq,
1208 : max_freq, NULL);
1209 0 : if (ret)
1210 : return ret;
1211 0 : ret = smu_cmn_send_smc_msg_with_param(smu,
1212 : SMU_MSG_SetHardMinSocclkByFreq,
1213 : min_freq, NULL);
1214 : if (ret)
1215 : return ret;
1216 : break;
1217 : case SMU_FCLK:
1218 0 : ret = vangogh_get_dpm_clk_limited(smu,
1219 : clk_type, soft_min_level, &min_freq);
1220 0 : if (ret)
1221 : return ret;
1222 0 : ret = vangogh_get_dpm_clk_limited(smu,
1223 : clk_type, soft_max_level, &max_freq);
1224 0 : if (ret)
1225 : return ret;
1226 0 : ret = smu_cmn_send_smc_msg_with_param(smu,
1227 : SMU_MSG_SetSoftMaxFclkByFreq,
1228 : max_freq, NULL);
1229 0 : if (ret)
1230 : return ret;
1231 0 : ret = smu_cmn_send_smc_msg_with_param(smu,
1232 : SMU_MSG_SetHardMinFclkByFreq,
1233 : min_freq, NULL);
1234 : if (ret)
1235 : return ret;
1236 : break;
1237 : case SMU_VCLK:
1238 0 : ret = vangogh_get_dpm_clk_limited(smu,
1239 : clk_type, soft_min_level, &min_freq);
1240 0 : if (ret)
1241 : return ret;
1242 :
1243 0 : ret = vangogh_get_dpm_clk_limited(smu,
1244 : clk_type, soft_max_level, &max_freq);
1245 0 : if (ret)
1246 : return ret;
1247 :
1248 :
1249 0 : ret = smu_cmn_send_smc_msg_with_param(smu,
1250 : SMU_MSG_SetHardMinVcn,
1251 : min_freq << 16, NULL);
1252 0 : if (ret)
1253 : return ret;
1254 :
1255 0 : ret = smu_cmn_send_smc_msg_with_param(smu,
1256 : SMU_MSG_SetSoftMaxVcn,
1257 : max_freq << 16, NULL);
1258 : if (ret)
1259 : return ret;
1260 :
1261 : break;
1262 : case SMU_DCLK:
1263 0 : ret = vangogh_get_dpm_clk_limited(smu,
1264 : clk_type, soft_min_level, &min_freq);
1265 0 : if (ret)
1266 : return ret;
1267 :
1268 0 : ret = vangogh_get_dpm_clk_limited(smu,
1269 : clk_type, soft_max_level, &max_freq);
1270 0 : if (ret)
1271 : return ret;
1272 :
1273 0 : ret = smu_cmn_send_smc_msg_with_param(smu,
1274 : SMU_MSG_SetHardMinVcn,
1275 : min_freq, NULL);
1276 0 : if (ret)
1277 : return ret;
1278 :
1279 0 : ret = smu_cmn_send_smc_msg_with_param(smu,
1280 : SMU_MSG_SetSoftMaxVcn,
1281 : max_freq, NULL);
1282 : if (ret)
1283 : return ret;
1284 :
1285 : break;
1286 : default:
1287 : break;
1288 : }
1289 :
1290 : return ret;
1291 : }
1292 :
1293 0 : static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest)
1294 : {
1295 0 : int ret = 0, i = 0;
1296 : uint32_t min_freq, max_freq, force_freq;
1297 : enum smu_clk_type clk_type;
1298 :
1299 0 : enum smu_clk_type clks[] = {
1300 : SMU_SOCCLK,
1301 : SMU_VCLK,
1302 : SMU_DCLK,
1303 : SMU_FCLK,
1304 : };
1305 :
1306 0 : for (i = 0; i < ARRAY_SIZE(clks); i++) {
1307 0 : clk_type = clks[i];
1308 0 : ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
1309 0 : if (ret)
1310 : return ret;
1311 :
1312 0 : force_freq = highest ? max_freq : min_freq;
1313 0 : ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
1314 0 : if (ret)
1315 : return ret;
1316 : }
1317 :
1318 : return ret;
1319 : }
1320 :
1321 0 : static int vangogh_unforce_dpm_levels(struct smu_context *smu)
1322 : {
1323 0 : int ret = 0, i = 0;
1324 : uint32_t min_freq, max_freq;
1325 : enum smu_clk_type clk_type;
1326 :
1327 : struct clk_feature_map {
1328 : enum smu_clk_type clk_type;
1329 : uint32_t feature;
1330 0 : } clk_feature_map[] = {
1331 : {SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT},
1332 : {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
1333 : {SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT},
1334 : {SMU_DCLK, SMU_FEATURE_VCN_DPM_BIT},
1335 : };
1336 :
1337 0 : for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
1338 :
1339 0 : if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature))
1340 0 : continue;
1341 :
1342 0 : clk_type = clk_feature_map[i].clk_type;
1343 :
1344 0 : ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
1345 :
1346 0 : if (ret)
1347 : return ret;
1348 :
1349 0 : ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
1350 :
1351 0 : if (ret)
1352 : return ret;
1353 : }
1354 :
1355 : return ret;
1356 : }
1357 :
1358 0 : static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
1359 : {
1360 0 : int ret = 0;
1361 0 : uint32_t socclk_freq = 0, fclk_freq = 0;
1362 0 : uint32_t vclk_freq = 0, dclk_freq = 0;
1363 :
1364 0 : ret = vangogh_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_freq);
1365 0 : if (ret)
1366 : return ret;
1367 :
1368 0 : ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq);
1369 0 : if (ret)
1370 : return ret;
1371 :
1372 0 : ret = vangogh_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_freq);
1373 0 : if (ret)
1374 : return ret;
1375 :
1376 0 : ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq);
1377 0 : if (ret)
1378 : return ret;
1379 :
1380 0 : ret = vangogh_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_freq);
1381 0 : if (ret)
1382 : return ret;
1383 :
1384 0 : ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq);
1385 0 : if (ret)
1386 : return ret;
1387 :
1388 0 : ret = vangogh_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_freq);
1389 0 : if (ret)
1390 : return ret;
1391 :
1392 0 : ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq);
1393 : if (ret)
1394 : return ret;
1395 :
1396 : return ret;
1397 : }
1398 :
1399 0 : static int vangogh_set_performance_level(struct smu_context *smu,
1400 : enum amd_dpm_forced_level level)
1401 : {
1402 0 : int ret = 0, i;
1403 : uint32_t soc_mask, mclk_mask, fclk_mask;
1404 0 : uint32_t vclk_mask = 0, dclk_mask = 0;
1405 :
1406 0 : smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
1407 0 : smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
1408 :
1409 0 : switch (level) {
1410 : case AMD_DPM_FORCED_LEVEL_HIGH:
1411 0 : smu->gfx_actual_hard_min_freq = smu->gfx_default_soft_max_freq;
1412 0 : smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1413 :
1414 :
1415 0 : ret = vangogh_force_dpm_limit_value(smu, true);
1416 0 : if (ret)
1417 : return ret;
1418 : break;
1419 : case AMD_DPM_FORCED_LEVEL_LOW:
1420 0 : smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1421 0 : smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq;
1422 :
1423 0 : ret = vangogh_force_dpm_limit_value(smu, false);
1424 0 : if (ret)
1425 : return ret;
1426 : break;
1427 : case AMD_DPM_FORCED_LEVEL_AUTO:
1428 0 : smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1429 0 : smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1430 :
1431 0 : ret = vangogh_unforce_dpm_levels(smu);
1432 0 : if (ret)
1433 : return ret;
1434 : break;
1435 : case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1436 0 : smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK;
1437 0 : smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK;
1438 :
1439 0 : ret = vangogh_get_profiling_clk_mask(smu, level,
1440 : &vclk_mask,
1441 : &dclk_mask,
1442 : &mclk_mask,
1443 : &fclk_mask,
1444 : &soc_mask);
1445 : if (ret)
1446 : return ret;
1447 :
1448 0 : vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
1449 0 : vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
1450 0 : vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask);
1451 0 : vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask);
1452 0 : break;
1453 : case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1454 0 : smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1455 0 : smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq;
1456 0 : break;
1457 : case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1458 0 : smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1459 0 : smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1460 :
1461 0 : ret = vangogh_get_profiling_clk_mask(smu, level,
1462 : NULL,
1463 : NULL,
1464 : &mclk_mask,
1465 : &fclk_mask,
1466 : NULL);
1467 : if (ret)
1468 : return ret;
1469 :
1470 0 : vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
1471 0 : break;
1472 : case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1473 0 : smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK;
1474 0 : smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK;
1475 :
1476 0 : ret = vangogh_set_peak_clock_by_device(smu);
1477 0 : if (ret)
1478 : return ret;
1479 : break;
1480 : case AMD_DPM_FORCED_LEVEL_MANUAL:
1481 : case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1482 : default:
1483 : return 0;
1484 : }
1485 :
1486 0 : ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
1487 : smu->gfx_actual_hard_min_freq, NULL);
1488 0 : if (ret)
1489 : return ret;
1490 :
1491 0 : ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
1492 : smu->gfx_actual_soft_max_freq, NULL);
1493 0 : if (ret)
1494 : return ret;
1495 :
1496 0 : if (smu->adev->pm.fw_version >= 0x43f1b00) {
1497 0 : for (i = 0; i < smu->cpu_core_num; i++) {
1498 0 : ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
1499 0 : ((i << 20)
1500 0 : | smu->cpu_actual_soft_min_freq),
1501 : NULL);
1502 0 : if (ret)
1503 : return ret;
1504 :
1505 0 : ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
1506 : ((i << 20)
1507 0 : | smu->cpu_actual_soft_max_freq),
1508 : NULL);
1509 0 : if (ret)
1510 : return ret;
1511 : }
1512 : }
1513 :
1514 : return ret;
1515 : }
1516 :
1517 0 : static int vangogh_read_sensor(struct smu_context *smu,
1518 : enum amd_pp_sensors sensor,
1519 : void *data, uint32_t *size)
1520 : {
1521 0 : int ret = 0;
1522 :
1523 0 : if (!data || !size)
1524 : return -EINVAL;
1525 :
1526 0 : switch (sensor) {
1527 : case AMDGPU_PP_SENSOR_GPU_LOAD:
1528 0 : ret = vangogh_common_get_smu_metrics_data(smu,
1529 : METRICS_AVERAGE_GFXACTIVITY,
1530 : (uint32_t *)data);
1531 0 : *size = 4;
1532 0 : break;
1533 : case AMDGPU_PP_SENSOR_GPU_POWER:
1534 0 : ret = vangogh_common_get_smu_metrics_data(smu,
1535 : METRICS_AVERAGE_SOCKETPOWER,
1536 : (uint32_t *)data);
1537 0 : *size = 4;
1538 0 : break;
1539 : case AMDGPU_PP_SENSOR_EDGE_TEMP:
1540 0 : ret = vangogh_common_get_smu_metrics_data(smu,
1541 : METRICS_TEMPERATURE_EDGE,
1542 : (uint32_t *)data);
1543 0 : *size = 4;
1544 0 : break;
1545 : case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1546 0 : ret = vangogh_common_get_smu_metrics_data(smu,
1547 : METRICS_TEMPERATURE_HOTSPOT,
1548 : (uint32_t *)data);
1549 0 : *size = 4;
1550 0 : break;
1551 : case AMDGPU_PP_SENSOR_GFX_MCLK:
1552 0 : ret = vangogh_common_get_smu_metrics_data(smu,
1553 : METRICS_CURR_UCLK,
1554 : (uint32_t *)data);
1555 0 : *(uint32_t *)data *= 100;
1556 0 : *size = 4;
1557 0 : break;
1558 : case AMDGPU_PP_SENSOR_GFX_SCLK:
1559 0 : ret = vangogh_common_get_smu_metrics_data(smu,
1560 : METRICS_CURR_GFXCLK,
1561 : (uint32_t *)data);
1562 0 : *(uint32_t *)data *= 100;
1563 0 : *size = 4;
1564 0 : break;
1565 : case AMDGPU_PP_SENSOR_VDDGFX:
1566 0 : ret = vangogh_common_get_smu_metrics_data(smu,
1567 : METRICS_VOLTAGE_VDDGFX,
1568 : (uint32_t *)data);
1569 0 : *size = 4;
1570 0 : break;
1571 : case AMDGPU_PP_SENSOR_VDDNB:
1572 0 : ret = vangogh_common_get_smu_metrics_data(smu,
1573 : METRICS_VOLTAGE_VDDSOC,
1574 : (uint32_t *)data);
1575 0 : *size = 4;
1576 0 : break;
1577 : case AMDGPU_PP_SENSOR_CPU_CLK:
1578 0 : ret = vangogh_common_get_smu_metrics_data(smu,
1579 : METRICS_AVERAGE_CPUCLK,
1580 : (uint32_t *)data);
1581 0 : *size = smu->cpu_core_num * sizeof(uint16_t);
1582 0 : break;
1583 : default:
1584 : ret = -EOPNOTSUPP;
1585 : break;
1586 : }
1587 :
1588 : return ret;
1589 : }
1590 :
1591 0 : static int vangogh_set_watermarks_table(struct smu_context *smu,
1592 : struct pp_smu_wm_range_sets *clock_ranges)
1593 : {
1594 : int i;
1595 0 : int ret = 0;
1596 0 : Watermarks_t *table = smu->smu_table.watermarks_table;
1597 :
1598 0 : if (!table || !clock_ranges)
1599 : return -EINVAL;
1600 :
1601 0 : if (clock_ranges) {
1602 0 : if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
1603 0 : clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
1604 : return -EINVAL;
1605 :
1606 0 : for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
1607 0 : table->WatermarkRow[WM_DCFCLK][i].MinClock =
1608 0 : clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
1609 0 : table->WatermarkRow[WM_DCFCLK][i].MaxClock =
1610 0 : clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
1611 0 : table->WatermarkRow[WM_DCFCLK][i].MinMclk =
1612 0 : clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
1613 0 : table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
1614 0 : clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
1615 :
1616 0 : table->WatermarkRow[WM_DCFCLK][i].WmSetting =
1617 0 : clock_ranges->reader_wm_sets[i].wm_inst;
1618 : }
1619 :
1620 0 : for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
1621 0 : table->WatermarkRow[WM_SOCCLK][i].MinClock =
1622 0 : clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
1623 0 : table->WatermarkRow[WM_SOCCLK][i].MaxClock =
1624 0 : clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
1625 0 : table->WatermarkRow[WM_SOCCLK][i].MinMclk =
1626 0 : clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
1627 0 : table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
1628 0 : clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
1629 :
1630 0 : table->WatermarkRow[WM_SOCCLK][i].WmSetting =
1631 0 : clock_ranges->writer_wm_sets[i].wm_inst;
1632 : }
1633 :
1634 0 : smu->watermarks_bitmap |= WATERMARKS_EXIST;
1635 : }
1636 :
1637 : /* pass data to smu controller */
1638 0 : if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1639 : !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1640 0 : ret = smu_cmn_write_watermarks_table(smu);
1641 0 : if (ret) {
1642 0 : dev_err(smu->adev->dev, "Failed to update WMTABLE!");
1643 0 : return ret;
1644 : }
1645 0 : smu->watermarks_bitmap |= WATERMARKS_LOADED;
1646 : }
1647 :
1648 : return 0;
1649 : }
1650 :
1651 0 : static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
1652 : void **table)
1653 : {
1654 0 : struct smu_table_context *smu_table = &smu->smu_table;
1655 0 : struct gpu_metrics_v2_2 *gpu_metrics =
1656 : (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
1657 : SmuMetrics_legacy_t metrics;
1658 0 : int ret = 0;
1659 :
1660 0 : ret = smu_cmn_get_metrics_table(smu, &metrics, true);
1661 0 : if (ret)
1662 0 : return ret;
1663 :
1664 0 : smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
1665 :
1666 0 : gpu_metrics->temperature_gfx = metrics.GfxTemperature;
1667 0 : gpu_metrics->temperature_soc = metrics.SocTemperature;
1668 0 : memcpy(&gpu_metrics->temperature_core[0],
1669 : &metrics.CoreTemperature[0],
1670 : sizeof(uint16_t) * 4);
1671 0 : gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0];
1672 :
1673 0 : gpu_metrics->average_gfx_activity = metrics.GfxActivity;
1674 0 : gpu_metrics->average_mm_activity = metrics.UvdActivity;
1675 :
1676 0 : gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
1677 0 : gpu_metrics->average_cpu_power = metrics.Power[0];
1678 0 : gpu_metrics->average_soc_power = metrics.Power[1];
1679 0 : gpu_metrics->average_gfx_power = metrics.Power[2];
1680 0 : memcpy(&gpu_metrics->average_core_power[0],
1681 : &metrics.CorePower[0],
1682 : sizeof(uint16_t) * 4);
1683 :
1684 0 : gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
1685 0 : gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
1686 0 : gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
1687 0 : gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
1688 0 : gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
1689 0 : gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
1690 :
1691 0 : memcpy(&gpu_metrics->current_coreclk[0],
1692 : &metrics.CoreFrequency[0],
1693 : sizeof(uint16_t) * 4);
1694 0 : gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0];
1695 :
1696 0 : gpu_metrics->throttle_status = metrics.ThrottlerStatus;
1697 0 : gpu_metrics->indep_throttle_status =
1698 0 : smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
1699 : vangogh_throttler_map);
1700 :
1701 0 : gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1702 :
1703 0 : *table = (void *)gpu_metrics;
1704 :
1705 0 : return sizeof(struct gpu_metrics_v2_2);
1706 : }
1707 :
1708 0 : static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
1709 : void **table)
1710 : {
1711 0 : struct smu_table_context *smu_table = &smu->smu_table;
1712 0 : struct gpu_metrics_v2_2 *gpu_metrics =
1713 : (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
1714 : SmuMetrics_t metrics;
1715 0 : int ret = 0;
1716 :
1717 0 : ret = smu_cmn_get_metrics_table(smu, &metrics, true);
1718 0 : if (ret)
1719 0 : return ret;
1720 :
1721 0 : smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
1722 :
1723 0 : gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
1724 0 : gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
1725 0 : memcpy(&gpu_metrics->temperature_core[0],
1726 : &metrics.Current.CoreTemperature[0],
1727 : sizeof(uint16_t) * 4);
1728 0 : gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0];
1729 :
1730 0 : gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity;
1731 0 : gpu_metrics->average_mm_activity = metrics.Current.UvdActivity;
1732 :
1733 0 : gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
1734 0 : gpu_metrics->average_cpu_power = metrics.Current.Power[0];
1735 0 : gpu_metrics->average_soc_power = metrics.Current.Power[1];
1736 0 : gpu_metrics->average_gfx_power = metrics.Current.Power[2];
1737 0 : memcpy(&gpu_metrics->average_core_power[0],
1738 : &metrics.Average.CorePower[0],
1739 : sizeof(uint16_t) * 4);
1740 :
1741 0 : gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
1742 0 : gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
1743 0 : gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
1744 0 : gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
1745 0 : gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
1746 0 : gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
1747 :
1748 0 : gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
1749 0 : gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
1750 0 : gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
1751 0 : gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
1752 0 : gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
1753 0 : gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
1754 :
1755 0 : memcpy(&gpu_metrics->current_coreclk[0],
1756 : &metrics.Current.CoreFrequency[0],
1757 : sizeof(uint16_t) * 4);
1758 0 : gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0];
1759 :
1760 0 : gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
1761 0 : gpu_metrics->indep_throttle_status =
1762 0 : smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus,
1763 : vangogh_throttler_map);
1764 :
1765 0 : gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1766 :
1767 0 : *table = (void *)gpu_metrics;
1768 :
1769 0 : return sizeof(struct gpu_metrics_v2_2);
1770 : }
1771 :
1772 0 : static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu,
1773 : void **table)
1774 : {
1775 0 : struct amdgpu_device *adev = smu->adev;
1776 : uint32_t if_version;
1777 0 : int ret = 0;
1778 :
1779 0 : ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
1780 0 : if (ret) {
1781 0 : dev_err(adev->dev, "Failed to get smu if version!\n");
1782 0 : return ret;
1783 : }
1784 :
1785 0 : if (if_version < 0x3)
1786 0 : ret = vangogh_get_legacy_gpu_metrics(smu, table);
1787 : else
1788 0 : ret = vangogh_get_gpu_metrics(smu, table);
1789 :
1790 0 : return ret;
1791 : }
1792 :
1793 0 : static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
1794 : long input[], uint32_t size)
1795 : {
1796 0 : int ret = 0;
1797 0 : struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1798 :
1799 0 : if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
1800 0 : dev_warn(smu->adev->dev,
1801 : "pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n");
1802 0 : return -EINVAL;
1803 : }
1804 :
1805 0 : switch (type) {
1806 : case PP_OD_EDIT_CCLK_VDDC_TABLE:
1807 0 : if (size != 3) {
1808 0 : dev_err(smu->adev->dev, "Input parameter number not correct (should be 4 for processor)\n");
1809 0 : return -EINVAL;
1810 : }
1811 0 : if (input[0] >= smu->cpu_core_num) {
1812 0 : dev_err(smu->adev->dev, "core index is overflow, should be less than %d\n",
1813 : smu->cpu_core_num);
1814 : }
1815 0 : smu->cpu_core_id_select = input[0];
1816 0 : if (input[1] == 0) {
1817 0 : if (input[2] < smu->cpu_default_soft_min_freq) {
1818 0 : dev_warn(smu->adev->dev, "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
1819 : input[2], smu->cpu_default_soft_min_freq);
1820 0 : return -EINVAL;
1821 : }
1822 0 : smu->cpu_actual_soft_min_freq = input[2];
1823 0 : } else if (input[1] == 1) {
1824 0 : if (input[2] > smu->cpu_default_soft_max_freq) {
1825 0 : dev_warn(smu->adev->dev, "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
1826 : input[2], smu->cpu_default_soft_max_freq);
1827 0 : return -EINVAL;
1828 : }
1829 0 : smu->cpu_actual_soft_max_freq = input[2];
1830 : } else {
1831 : return -EINVAL;
1832 : }
1833 : break;
1834 : case PP_OD_EDIT_SCLK_VDDC_TABLE:
1835 0 : if (size != 2) {
1836 0 : dev_err(smu->adev->dev, "Input parameter number not correct\n");
1837 0 : return -EINVAL;
1838 : }
1839 :
1840 0 : if (input[0] == 0) {
1841 0 : if (input[1] < smu->gfx_default_hard_min_freq) {
1842 0 : dev_warn(smu->adev->dev,
1843 : "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
1844 : input[1], smu->gfx_default_hard_min_freq);
1845 0 : return -EINVAL;
1846 : }
1847 0 : smu->gfx_actual_hard_min_freq = input[1];
1848 0 : } else if (input[0] == 1) {
1849 0 : if (input[1] > smu->gfx_default_soft_max_freq) {
1850 0 : dev_warn(smu->adev->dev,
1851 : "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
1852 : input[1], smu->gfx_default_soft_max_freq);
1853 0 : return -EINVAL;
1854 : }
1855 0 : smu->gfx_actual_soft_max_freq = input[1];
1856 : } else {
1857 : return -EINVAL;
1858 : }
1859 : break;
1860 : case PP_OD_RESTORE_DEFAULT_TABLE:
1861 0 : if (size != 0) {
1862 0 : dev_err(smu->adev->dev, "Input parameter number not correct\n");
1863 0 : return -EINVAL;
1864 : } else {
1865 0 : smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1866 0 : smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1867 0 : smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
1868 0 : smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
1869 : }
1870 0 : break;
1871 : case PP_OD_COMMIT_DPM_TABLE:
1872 0 : if (size != 0) {
1873 0 : dev_err(smu->adev->dev, "Input parameter number not correct\n");
1874 0 : return -EINVAL;
1875 : } else {
1876 0 : if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
1877 0 : dev_err(smu->adev->dev,
1878 : "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
1879 : smu->gfx_actual_hard_min_freq,
1880 : smu->gfx_actual_soft_max_freq);
1881 0 : return -EINVAL;
1882 : }
1883 :
1884 0 : ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
1885 : smu->gfx_actual_hard_min_freq, NULL);
1886 0 : if (ret) {
1887 0 : dev_err(smu->adev->dev, "Set hard min sclk failed!");
1888 0 : return ret;
1889 : }
1890 :
1891 0 : ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
1892 : smu->gfx_actual_soft_max_freq, NULL);
1893 0 : if (ret) {
1894 0 : dev_err(smu->adev->dev, "Set soft max sclk failed!");
1895 0 : return ret;
1896 : }
1897 :
1898 0 : if (smu->adev->pm.fw_version < 0x43f1b00) {
1899 0 : dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n");
1900 0 : break;
1901 : }
1902 :
1903 0 : ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
1904 0 : ((smu->cpu_core_id_select << 20)
1905 0 : | smu->cpu_actual_soft_min_freq),
1906 : NULL);
1907 0 : if (ret) {
1908 0 : dev_err(smu->adev->dev, "Set hard min cclk failed!");
1909 0 : return ret;
1910 : }
1911 :
1912 0 : ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
1913 0 : ((smu->cpu_core_id_select << 20)
1914 0 : | smu->cpu_actual_soft_max_freq),
1915 : NULL);
1916 0 : if (ret) {
1917 0 : dev_err(smu->adev->dev, "Set soft max cclk failed!");
1918 0 : return ret;
1919 : }
1920 : }
1921 : break;
1922 : default:
1923 : return -ENOSYS;
1924 : }
1925 :
1926 : return ret;
1927 : }
1928 :
1929 0 : static int vangogh_set_default_dpm_tables(struct smu_context *smu)
1930 : {
1931 0 : struct smu_table_context *smu_table = &smu->smu_table;
1932 :
1933 0 : return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
1934 : }
1935 :
1936 0 : static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
1937 : {
1938 0 : DpmClocks_t *clk_table = smu->smu_table.clocks_table;
1939 :
1940 0 : smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
1941 0 : smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
1942 0 : smu->gfx_actual_hard_min_freq = 0;
1943 0 : smu->gfx_actual_soft_max_freq = 0;
1944 :
1945 0 : smu->cpu_default_soft_min_freq = 1400;
1946 0 : smu->cpu_default_soft_max_freq = 3500;
1947 0 : smu->cpu_actual_soft_min_freq = 0;
1948 0 : smu->cpu_actual_soft_max_freq = 0;
1949 :
1950 0 : return 0;
1951 : }
1952 :
1953 0 : static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table)
1954 : {
1955 0 : DpmClocks_t *table = smu->smu_table.clocks_table;
1956 : int i;
1957 :
1958 0 : if (!clock_table || !table)
1959 : return -EINVAL;
1960 :
1961 0 : for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
1962 0 : clock_table->SocClocks[i].Freq = table->SocClocks[i];
1963 0 : clock_table->SocClocks[i].Vol = table->SocVoltage[i];
1964 : }
1965 :
1966 0 : for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
1967 0 : clock_table->FClocks[i].Freq = table->DfPstateTable[i].fclk;
1968 0 : clock_table->FClocks[i].Vol = table->DfPstateTable[i].voltage;
1969 : }
1970 :
1971 0 : for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
1972 0 : clock_table->MemClocks[i].Freq = table->DfPstateTable[i].memclk;
1973 0 : clock_table->MemClocks[i].Vol = table->DfPstateTable[i].voltage;
1974 : }
1975 :
1976 : return 0;
1977 : }
1978 :
1979 :
1980 0 : static int vangogh_system_features_control(struct smu_context *smu, bool en)
1981 : {
1982 0 : struct amdgpu_device *adev = smu->adev;
1983 0 : int ret = 0;
1984 :
1985 0 : if (adev->pm.fw_version >= 0x43f1700 && !en)
1986 0 : ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
1987 : RLC_STATUS_OFF, NULL);
1988 :
1989 0 : return ret;
1990 : }
1991 :
1992 0 : static int vangogh_post_smu_init(struct smu_context *smu)
1993 : {
1994 0 : struct amdgpu_device *adev = smu->adev;
1995 : uint32_t tmp;
1996 0 : int ret = 0;
1997 0 : uint8_t aon_bits = 0;
1998 : /* Two CUs in one WGP */
1999 0 : uint32_t req_active_wgps = adev->gfx.cu_info.number/2;
2000 0 : uint32_t total_cu = adev->gfx.config.max_cu_per_sh *
2001 0 : adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
2002 :
2003 : /* allow message will be sent after enable message on Vangogh*/
2004 0 : if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
2005 0 : (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
2006 0 : ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
2007 0 : if (ret) {
2008 0 : dev_err(adev->dev, "Failed to Enable GfxOff!\n");
2009 0 : return ret;
2010 : }
2011 : } else {
2012 0 : adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2013 0 : dev_info(adev->dev, "If GFX DPM or power gate disabled, disable GFXOFF\n");
2014 : }
2015 :
2016 : /* if all CUs are active, no need to power off any WGPs */
2017 0 : if (total_cu == adev->gfx.cu_info.number)
2018 : return 0;
2019 :
2020 : /*
2021 : * Calculate the total bits number of always on WGPs for all SA/SEs in
2022 : * RLC_PG_ALWAYS_ON_WGP_MASK.
2023 : */
2024 0 : tmp = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_ALWAYS_ON_WGP_MASK));
2025 0 : tmp &= RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK;
2026 :
2027 0 : aon_bits = hweight32(tmp) * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
2028 :
2029 : /* Do not request any WGPs less than set in the AON_WGP_MASK */
2030 0 : if (aon_bits > req_active_wgps) {
2031 0 : dev_info(adev->dev, "Number of always on WGPs greater than active WGPs: WGP power save not requested.\n");
2032 0 : return 0;
2033 : } else {
2034 0 : return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestActiveWgp, req_active_wgps, NULL);
2035 : }
2036 : }
2037 :
2038 0 : static int vangogh_mode_reset(struct smu_context *smu, int type)
2039 : {
2040 0 : int ret = 0, index = 0;
2041 :
2042 0 : index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
2043 : SMU_MSG_GfxDeviceDriverReset);
2044 0 : if (index < 0)
2045 0 : return index == -EACCES ? 0 : index;
2046 :
2047 0 : mutex_lock(&smu->message_lock);
2048 :
2049 0 : ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type);
2050 :
2051 0 : mutex_unlock(&smu->message_lock);
2052 :
2053 0 : mdelay(10);
2054 :
2055 : return ret;
2056 : }
2057 :
2058 0 : static int vangogh_mode2_reset(struct smu_context *smu)
2059 : {
2060 0 : return vangogh_mode_reset(smu, SMU_RESET_MODE_2);
2061 : }
2062 :
2063 : /**
2064 : * vangogh_get_gfxoff_status - Get gfxoff status
2065 : *
2066 : * @smu: amdgpu_device pointer
2067 : *
2068 : * Get current gfxoff status
2069 : *
2070 : * Return:
2071 : * * 0 - GFXOFF (default if enabled).
2072 : * * 1 - Transition out of GFX State.
2073 : * * 2 - Not in GFXOFF.
2074 : * * 3 - Transition into GFXOFF.
2075 : */
2076 0 : static u32 vangogh_get_gfxoff_status(struct smu_context *smu)
2077 : {
2078 0 : struct amdgpu_device *adev = smu->adev;
2079 : u32 reg, gfxoff_status;
2080 :
2081 0 : reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL);
2082 0 : gfxoff_status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
2083 0 : >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
2084 :
2085 0 : return gfxoff_status;
2086 : }
2087 :
2088 0 : static int vangogh_get_power_limit(struct smu_context *smu,
2089 : uint32_t *current_power_limit,
2090 : uint32_t *default_power_limit,
2091 : uint32_t *max_power_limit)
2092 : {
2093 0 : struct smu_11_5_power_context *power_context =
2094 : smu->smu_power.power_context;
2095 : uint32_t ppt_limit;
2096 0 : int ret = 0;
2097 :
2098 0 : if (smu->adev->pm.fw_version < 0x43f1e00)
2099 : return ret;
2100 :
2101 0 : ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSlowPPTLimit, &ppt_limit);
2102 0 : if (ret) {
2103 0 : dev_err(smu->adev->dev, "Get slow PPT limit failed!\n");
2104 0 : return ret;
2105 : }
2106 : /* convert from milliwatt to watt */
2107 0 : if (current_power_limit)
2108 0 : *current_power_limit = ppt_limit / 1000;
2109 0 : if (default_power_limit)
2110 0 : *default_power_limit = ppt_limit / 1000;
2111 0 : if (max_power_limit)
2112 0 : *max_power_limit = 29;
2113 :
2114 0 : ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit);
2115 0 : if (ret) {
2116 0 : dev_err(smu->adev->dev, "Get fast PPT limit failed!\n");
2117 0 : return ret;
2118 : }
2119 : /* convert from milliwatt to watt */
2120 0 : power_context->current_fast_ppt_limit =
2121 0 : power_context->default_fast_ppt_limit = ppt_limit / 1000;
2122 0 : power_context->max_fast_ppt_limit = 30;
2123 :
2124 0 : return ret;
2125 : }
2126 :
2127 0 : static int vangogh_get_ppt_limit(struct smu_context *smu,
2128 : uint32_t *ppt_limit,
2129 : enum smu_ppt_limit_type type,
2130 : enum smu_ppt_limit_level level)
2131 : {
2132 0 : struct smu_11_5_power_context *power_context =
2133 : smu->smu_power.power_context;
2134 :
2135 0 : if (!power_context)
2136 : return -EOPNOTSUPP;
2137 :
2138 0 : if (type == SMU_FAST_PPT_LIMIT) {
2139 0 : switch (level) {
2140 : case SMU_PPT_LIMIT_MAX:
2141 0 : *ppt_limit = power_context->max_fast_ppt_limit;
2142 0 : break;
2143 : case SMU_PPT_LIMIT_CURRENT:
2144 0 : *ppt_limit = power_context->current_fast_ppt_limit;
2145 0 : break;
2146 : case SMU_PPT_LIMIT_DEFAULT:
2147 0 : *ppt_limit = power_context->default_fast_ppt_limit;
2148 0 : break;
2149 : default:
2150 : break;
2151 : }
2152 : }
2153 :
2154 : return 0;
2155 : }
2156 :
2157 0 : static int vangogh_set_power_limit(struct smu_context *smu,
2158 : enum smu_ppt_limit_type limit_type,
2159 : uint32_t ppt_limit)
2160 : {
2161 0 : struct smu_11_5_power_context *power_context =
2162 : smu->smu_power.power_context;
2163 0 : int ret = 0;
2164 :
2165 0 : if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
2166 0 : dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
2167 0 : return -EOPNOTSUPP;
2168 : }
2169 :
2170 0 : switch (limit_type) {
2171 : case SMU_DEFAULT_PPT_LIMIT:
2172 0 : ret = smu_cmn_send_smc_msg_with_param(smu,
2173 : SMU_MSG_SetSlowPPTLimit,
2174 : ppt_limit * 1000, /* convert from watt to milliwatt */
2175 : NULL);
2176 0 : if (ret)
2177 : return ret;
2178 :
2179 0 : smu->current_power_limit = ppt_limit;
2180 0 : break;
2181 : case SMU_FAST_PPT_LIMIT:
2182 0 : ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24);
2183 0 : if (ppt_limit > power_context->max_fast_ppt_limit) {
2184 0 : dev_err(smu->adev->dev,
2185 : "New power limit (%d) is over the max allowed %d\n",
2186 : ppt_limit, power_context->max_fast_ppt_limit);
2187 0 : return ret;
2188 : }
2189 :
2190 0 : ret = smu_cmn_send_smc_msg_with_param(smu,
2191 : SMU_MSG_SetFastPPTLimit,
2192 : ppt_limit * 1000, /* convert from watt to milliwatt */
2193 : NULL);
2194 0 : if (ret)
2195 : return ret;
2196 :
2197 0 : power_context->current_fast_ppt_limit = ppt_limit;
2198 0 : break;
2199 : default:
2200 : return -EINVAL;
2201 : }
2202 :
2203 : return ret;
2204 : }
2205 :
2206 : /**
2207 : * vangogh_set_gfxoff_residency
2208 : *
2209 : * @smu: amdgpu_device pointer
2210 : * @start: start/stop residency log
2211 : *
2212 : * This function will be used to log gfxoff residency
2213 : *
2214 : *
2215 : * Returns standard response codes.
2216 : */
2217 0 : static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start)
2218 : {
2219 0 : int ret = 0;
2220 : u32 residency;
2221 0 : struct amdgpu_device *adev = smu->adev;
2222 :
2223 0 : if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
2224 : return 0;
2225 :
2226 0 : ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency,
2227 : start, &residency);
2228 :
2229 0 : if (!start)
2230 0 : adev->gfx.gfx_off_residency = residency;
2231 :
2232 0 : return ret;
2233 : }
2234 :
2235 : /**
2236 : * vangogh_get_gfxoff_residency
2237 : *
2238 : * @smu: amdgpu_device pointer
2239 : *
2240 : * This function will be used to get gfxoff residency.
2241 : *
2242 : * Returns standard response codes.
2243 : */
2244 0 : static u32 vangogh_get_gfxoff_residency(struct smu_context *smu, uint32_t *residency)
2245 : {
2246 0 : struct amdgpu_device *adev = smu->adev;
2247 :
2248 0 : *residency = adev->gfx.gfx_off_residency;
2249 :
2250 0 : return 0;
2251 : }
2252 :
2253 : /**
2254 : * vangogh_get_gfxoff_entrycount - get gfxoff entry count
2255 : *
2256 : * @smu: amdgpu_device pointer
2257 : *
2258 : * This function will be used to get gfxoff entry count
2259 : *
2260 : * Returns standard response codes.
2261 : */
2262 0 : static u32 vangogh_get_gfxoff_entrycount(struct smu_context *smu, uint64_t *entrycount)
2263 : {
2264 0 : int ret = 0, value = 0;
2265 0 : struct amdgpu_device *adev = smu->adev;
2266 :
2267 0 : if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
2268 : return 0;
2269 :
2270 0 : ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetGfxOffEntryCount, &value);
2271 0 : *entrycount = value + adev->gfx.gfx_off_entrycount;
2272 :
2273 0 : return ret;
2274 : }
2275 :
2276 : static const struct pptable_funcs vangogh_ppt_funcs = {
2277 :
2278 : .check_fw_status = smu_v11_0_check_fw_status,
2279 : .check_fw_version = smu_v11_0_check_fw_version,
2280 : .init_smc_tables = vangogh_init_smc_tables,
2281 : .fini_smc_tables = smu_v11_0_fini_smc_tables,
2282 : .init_power = smu_v11_0_init_power,
2283 : .fini_power = smu_v11_0_fini_power,
2284 : .register_irq_handler = smu_v11_0_register_irq_handler,
2285 : .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
2286 : .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
2287 : .send_smc_msg = smu_cmn_send_smc_msg,
2288 : .dpm_set_vcn_enable = vangogh_dpm_set_vcn_enable,
2289 : .dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable,
2290 : .is_dpm_running = vangogh_is_dpm_running,
2291 : .read_sensor = vangogh_read_sensor,
2292 : .get_enabled_mask = smu_cmn_get_enabled_mask,
2293 : .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2294 : .set_watermarks_table = vangogh_set_watermarks_table,
2295 : .set_driver_table_location = smu_v11_0_set_driver_table_location,
2296 : .interrupt_work = smu_v11_0_interrupt_work,
2297 : .get_gpu_metrics = vangogh_common_get_gpu_metrics,
2298 : .od_edit_dpm_table = vangogh_od_edit_dpm_table,
2299 : .print_clk_levels = vangogh_common_print_clk_levels,
2300 : .set_default_dpm_table = vangogh_set_default_dpm_tables,
2301 : .set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
2302 : .system_features_control = vangogh_system_features_control,
2303 : .feature_is_enabled = smu_cmn_feature_is_enabled,
2304 : .set_power_profile_mode = vangogh_set_power_profile_mode,
2305 : .get_power_profile_mode = vangogh_get_power_profile_mode,
2306 : .get_dpm_clock_table = vangogh_get_dpm_clock_table,
2307 : .force_clk_levels = vangogh_force_clk_levels,
2308 : .set_performance_level = vangogh_set_performance_level,
2309 : .post_init = vangogh_post_smu_init,
2310 : .mode2_reset = vangogh_mode2_reset,
2311 : .gfx_off_control = smu_v11_0_gfx_off_control,
2312 : .get_gfx_off_status = vangogh_get_gfxoff_status,
2313 : .get_gfx_off_entrycount = vangogh_get_gfxoff_entrycount,
2314 : .get_gfx_off_residency = vangogh_get_gfxoff_residency,
2315 : .set_gfx_off_residency = vangogh_set_gfxoff_residency,
2316 : .get_ppt_limit = vangogh_get_ppt_limit,
2317 : .get_power_limit = vangogh_get_power_limit,
2318 : .set_power_limit = vangogh_set_power_limit,
2319 : .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
2320 : };
2321 :
2322 0 : void vangogh_set_ppt_funcs(struct smu_context *smu)
2323 : {
2324 0 : smu->ppt_funcs = &vangogh_ppt_funcs;
2325 0 : smu->message_map = vangogh_message_map;
2326 0 : smu->feature_map = vangogh_feature_mask_map;
2327 0 : smu->table_map = vangogh_table_map;
2328 0 : smu->workload_map = vangogh_workload_map;
2329 0 : smu->is_apu = true;
2330 0 : smu_v11_0_set_smu_mailbox_registers(smu);
2331 0 : }
|