Line data Source code
1 : /*
2 : * Copyright 2015 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : */
23 :
24 : #include <linux/delay.h>
25 : #include <linux/gfp.h>
26 : #include <linux/kernel.h>
27 : #include <linux/ktime.h>
28 : #include <linux/slab.h>
29 : #include <linux/types.h>
30 :
31 : #include "cgs_common.h"
32 : #include "smu/smu_8_0_d.h"
33 : #include "smu/smu_8_0_sh_mask.h"
34 : #include "smu8.h"
35 : #include "smu8_fusion.h"
36 : #include "smu8_smumgr.h"
37 : #include "cz_ppsmc.h"
38 : #include "smu_ucode_xfer_cz.h"
39 : #include "gca/gfx_8_0_d.h"
40 : #include "gca/gfx_8_0_sh_mask.h"
41 : #include "smumgr.h"
42 :
43 : #define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32)
44 :
45 : static const enum smu8_scratch_entry firmware_list[] = {
46 : SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0,
47 : SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1,
48 : SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE,
49 : SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
50 : SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME,
51 : SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
52 : SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
53 : SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G,
54 : };
55 :
56 0 : static uint32_t smu8_get_argument(struct pp_hwmgr *hwmgr)
57 : {
58 0 : if (hwmgr == NULL || hwmgr->device == NULL)
59 : return 0;
60 :
61 0 : return cgs_read_register(hwmgr->device,
62 : mmSMU_MP1_SRBM2P_ARG_0);
63 : }
64 :
65 : /* Send a message to the SMC, and wait for its response.*/
66 0 : static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
67 : uint16_t msg, uint32_t parameter)
68 : {
69 0 : int result = 0;
70 : ktime_t t_start;
71 : s64 elapsed_us;
72 :
73 0 : if (hwmgr == NULL || hwmgr->device == NULL)
74 : return -EINVAL;
75 :
76 0 : result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
77 : SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
78 0 : if (result != 0) {
79 : /* Read the last message to SMU, to report actual cause */
80 0 : uint32_t val = cgs_read_register(hwmgr->device,
81 : mmSMU_MP1_SRBM2P_MSG_0);
82 0 : pr_err("%s(0x%04x) aborted; SMU still servicing msg (0x%04x)\n",
83 : __func__, msg, val);
84 0 : return result;
85 : }
86 0 : t_start = ktime_get();
87 :
88 0 : cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
89 :
90 0 : cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
91 0 : cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
92 :
93 0 : result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
94 : SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
95 :
96 0 : elapsed_us = ktime_us_delta(ktime_get(), t_start);
97 :
98 0 : WARN(result, "%s(0x%04x, %#x) timed out after %lld us\n",
99 : __func__, msg, parameter, elapsed_us);
100 :
101 : return result;
102 : }
103 :
104 0 : static int smu8_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
105 : {
106 0 : return smu8_send_msg_to_smc_with_parameter(hwmgr, msg, 0);
107 : }
108 :
109 0 : static int smu8_set_smc_sram_address(struct pp_hwmgr *hwmgr,
110 : uint32_t smc_address, uint32_t limit)
111 : {
112 0 : if (hwmgr == NULL || hwmgr->device == NULL)
113 : return -EINVAL;
114 :
115 0 : if (0 != (3 & smc_address)) {
116 0 : pr_err("SMC address must be 4 byte aligned\n");
117 0 : return -EINVAL;
118 : }
119 :
120 0 : if (limit <= (smc_address + 3)) {
121 0 : pr_err("SMC address beyond the SMC RAM area\n");
122 0 : return -EINVAL;
123 : }
124 :
125 0 : cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0,
126 : SMN_MP1_SRAM_START_ADDR + smc_address);
127 :
128 0 : return 0;
129 : }
130 :
131 0 : static int smu8_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
132 : uint32_t smc_address, uint32_t value, uint32_t limit)
133 : {
134 : int result;
135 :
136 0 : if (hwmgr == NULL || hwmgr->device == NULL)
137 : return -EINVAL;
138 :
139 0 : result = smu8_set_smc_sram_address(hwmgr, smc_address, limit);
140 0 : if (!result)
141 0 : cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value);
142 :
143 : return result;
144 : }
145 :
146 0 : static int smu8_check_fw_load_finish(struct pp_hwmgr *hwmgr,
147 : uint32_t firmware)
148 : {
149 : int i;
150 0 : uint32_t index = SMN_MP1_SRAM_START_ADDR +
151 : SMU8_FIRMWARE_HEADER_LOCATION +
152 : offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
153 :
154 0 : if (hwmgr == NULL || hwmgr->device == NULL)
155 : return -EINVAL;
156 :
157 0 : cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
158 :
159 0 : for (i = 0; i < hwmgr->usec_timeout; i++) {
160 0 : if (firmware ==
161 0 : (cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware))
162 : break;
163 0 : udelay(1);
164 : }
165 :
166 0 : if (i >= hwmgr->usec_timeout) {
167 0 : pr_err("SMU check loaded firmware failed.\n");
168 0 : return -EINVAL;
169 : }
170 :
171 : return 0;
172 : }
173 :
174 0 : static int smu8_load_mec_firmware(struct pp_hwmgr *hwmgr)
175 : {
176 : uint32_t reg_data;
177 : uint32_t tmp;
178 0 : int ret = 0;
179 0 : struct cgs_firmware_info info = {0};
180 :
181 0 : if (hwmgr == NULL || hwmgr->device == NULL)
182 : return -EINVAL;
183 :
184 0 : ret = cgs_get_firmware_info(hwmgr->device,
185 : CGS_UCODE_ID_CP_MEC, &info);
186 :
187 0 : if (ret)
188 : return -EINVAL;
189 :
190 : /* Disable MEC parsing/prefetching */
191 0 : tmp = cgs_read_register(hwmgr->device,
192 : mmCP_MEC_CNTL);
193 0 : tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
194 0 : tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
195 0 : cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp);
196 :
197 0 : tmp = cgs_read_register(hwmgr->device,
198 : mmCP_CPC_IC_BASE_CNTL);
199 :
200 0 : tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
201 0 : tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
202 0 : tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
203 0 : tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
204 0 : cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
205 :
206 0 : reg_data = lower_32_bits(info.mc_addr) &
207 : PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
208 0 : cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
209 :
210 0 : reg_data = upper_32_bits(info.mc_addr) &
211 : PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
212 0 : cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
213 :
214 0 : return 0;
215 : }
216 :
217 0 : static uint8_t smu8_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr,
218 : enum smu8_scratch_entry firmware_enum)
219 : {
220 0 : uint8_t ret = 0;
221 :
222 0 : switch (firmware_enum) {
223 : case SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0:
224 : ret = UCODE_ID_SDMA0;
225 : break;
226 : case SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1:
227 0 : if (hwmgr->chip_id == CHIP_STONEY)
228 : ret = UCODE_ID_SDMA0;
229 : else
230 0 : ret = UCODE_ID_SDMA1;
231 : break;
232 : case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE:
233 0 : ret = UCODE_ID_CP_CE;
234 : break;
235 : case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
236 0 : ret = UCODE_ID_CP_PFP;
237 : break;
238 : case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME:
239 0 : ret = UCODE_ID_CP_ME;
240 : break;
241 : case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
242 0 : ret = UCODE_ID_CP_MEC_JT1;
243 : break;
244 : case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
245 0 : if (hwmgr->chip_id == CHIP_STONEY)
246 : ret = UCODE_ID_CP_MEC_JT1;
247 : else
248 0 : ret = UCODE_ID_CP_MEC_JT2;
249 : break;
250 : case SMU8_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
251 0 : ret = UCODE_ID_GMCON_RENG;
252 : break;
253 : case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G:
254 0 : ret = UCODE_ID_RLC_G;
255 : break;
256 : case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
257 0 : ret = UCODE_ID_RLC_SCRATCH;
258 : break;
259 : case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
260 0 : ret = UCODE_ID_RLC_SRM_ARAM;
261 : break;
262 : case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
263 0 : ret = UCODE_ID_RLC_SRM_DRAM;
264 : break;
265 : case SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
266 0 : ret = UCODE_ID_DMCU_ERAM;
267 : break;
268 : case SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
269 0 : ret = UCODE_ID_DMCU_IRAM;
270 : break;
271 : case SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
272 : ret = TASK_ARG_INIT_MM_PWR_LOG;
273 : break;
274 : case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
275 : case SMU8_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
276 : case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
277 : case SMU8_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
278 : case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_START:
279 : case SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
280 0 : ret = TASK_ARG_REG_MMIO;
281 : break;
282 : case SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
283 0 : ret = TASK_ARG_INIT_CLK_TABLE;
284 : break;
285 : }
286 :
287 0 : return ret;
288 : }
289 :
290 : static enum cgs_ucode_id smu8_convert_fw_type_to_cgs(uint32_t fw_type)
291 : {
292 0 : enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
293 :
294 : switch (fw_type) {
295 : case UCODE_ID_SDMA0:
296 : result = CGS_UCODE_ID_SDMA0;
297 : break;
298 : case UCODE_ID_SDMA1:
299 : result = CGS_UCODE_ID_SDMA1;
300 : break;
301 : case UCODE_ID_CP_CE:
302 : result = CGS_UCODE_ID_CP_CE;
303 : break;
304 : case UCODE_ID_CP_PFP:
305 : result = CGS_UCODE_ID_CP_PFP;
306 : break;
307 : case UCODE_ID_CP_ME:
308 : result = CGS_UCODE_ID_CP_ME;
309 : break;
310 : case UCODE_ID_CP_MEC_JT1:
311 : result = CGS_UCODE_ID_CP_MEC_JT1;
312 : break;
313 : case UCODE_ID_CP_MEC_JT2:
314 : result = CGS_UCODE_ID_CP_MEC_JT2;
315 : break;
316 : case UCODE_ID_RLC_G:
317 : result = CGS_UCODE_ID_RLC_G;
318 : break;
319 : default:
320 : break;
321 : }
322 :
323 : return result;
324 : }
325 :
326 0 : static int smu8_smu_populate_single_scratch_task(
327 : struct pp_hwmgr *hwmgr,
328 : enum smu8_scratch_entry fw_enum,
329 : uint8_t type, bool is_last)
330 : {
331 : uint8_t i;
332 0 : struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
333 0 : struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
334 0 : struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++];
335 :
336 0 : task->type = type;
337 0 : task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum);
338 0 : task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count;
339 :
340 0 : for (i = 0; i < smu8_smu->scratch_buffer_length; i++)
341 0 : if (smu8_smu->scratch_buffer[i].firmware_ID == fw_enum)
342 : break;
343 :
344 0 : if (i >= smu8_smu->scratch_buffer_length) {
345 0 : pr_err("Invalid Firmware Type\n");
346 0 : return -EINVAL;
347 : }
348 :
349 0 : task->addr.low = lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr);
350 0 : task->addr.high = upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr);
351 0 : task->size_bytes = smu8_smu->scratch_buffer[i].data_size;
352 :
353 0 : if (SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) {
354 0 : struct smu8_ih_meta_data *pIHReg_restore =
355 : (struct smu8_ih_meta_data *)smu8_smu->scratch_buffer[i].kaddr;
356 0 : pIHReg_restore->command =
357 : METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
358 : }
359 :
360 : return 0;
361 : }
362 :
363 0 : static int smu8_smu_populate_single_ucode_load_task(
364 : struct pp_hwmgr *hwmgr,
365 : enum smu8_scratch_entry fw_enum,
366 : bool is_last)
367 : {
368 : uint8_t i;
369 0 : struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
370 0 : struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
371 0 : struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++];
372 :
373 0 : task->type = TASK_TYPE_UCODE_LOAD;
374 0 : task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum);
375 0 : task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count;
376 :
377 0 : for (i = 0; i < smu8_smu->driver_buffer_length; i++)
378 0 : if (smu8_smu->driver_buffer[i].firmware_ID == fw_enum)
379 : break;
380 :
381 0 : if (i >= smu8_smu->driver_buffer_length) {
382 0 : pr_err("Invalid Firmware Type\n");
383 0 : return -EINVAL;
384 : }
385 :
386 0 : task->addr.low = lower_32_bits(smu8_smu->driver_buffer[i].mc_addr);
387 0 : task->addr.high = upper_32_bits(smu8_smu->driver_buffer[i].mc_addr);
388 0 : task->size_bytes = smu8_smu->driver_buffer[i].data_size;
389 :
390 0 : return 0;
391 : }
392 :
393 : static int smu8_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr)
394 : {
395 0 : struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
396 :
397 0 : smu8_smu->toc_entry_aram = smu8_smu->toc_entry_used_count;
398 0 : smu8_smu_populate_single_scratch_task(hwmgr,
399 : SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
400 : TASK_TYPE_UCODE_SAVE, true);
401 :
402 : return 0;
403 : }
404 :
405 : static int smu8_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr)
406 : {
407 : int i;
408 0 : struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
409 0 : struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
410 :
411 0 : for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
412 0 : toc->JobList[i] = (uint8_t)IGNORE_JOB;
413 :
414 : return 0;
415 : }
416 :
417 0 : static int smu8_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr)
418 : {
419 0 : struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
420 0 : struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
421 :
422 0 : toc->JobList[JOB_GFX_SAVE] = (uint8_t)smu8_smu->toc_entry_used_count;
423 0 : smu8_smu_populate_single_scratch_task(hwmgr,
424 : SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
425 : TASK_TYPE_UCODE_SAVE, false);
426 :
427 0 : smu8_smu_populate_single_scratch_task(hwmgr,
428 : SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
429 : TASK_TYPE_UCODE_SAVE, true);
430 :
431 0 : return 0;
432 : }
433 :
434 :
435 0 : static int smu8_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr)
436 : {
437 0 : struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
438 0 : struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
439 :
440 0 : toc->JobList[JOB_GFX_RESTORE] = (uint8_t)smu8_smu->toc_entry_used_count;
441 :
442 0 : smu8_smu_populate_single_ucode_load_task(hwmgr,
443 : SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
444 0 : smu8_smu_populate_single_ucode_load_task(hwmgr,
445 : SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
446 0 : smu8_smu_populate_single_ucode_load_task(hwmgr,
447 : SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
448 0 : smu8_smu_populate_single_ucode_load_task(hwmgr,
449 : SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
450 :
451 0 : if (hwmgr->chip_id == CHIP_STONEY)
452 0 : smu8_smu_populate_single_ucode_load_task(hwmgr,
453 : SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
454 : else
455 0 : smu8_smu_populate_single_ucode_load_task(hwmgr,
456 : SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
457 :
458 0 : smu8_smu_populate_single_ucode_load_task(hwmgr,
459 : SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
460 :
461 : /* populate scratch */
462 0 : smu8_smu_populate_single_scratch_task(hwmgr,
463 : SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
464 : TASK_TYPE_UCODE_LOAD, false);
465 :
466 0 : smu8_smu_populate_single_scratch_task(hwmgr,
467 : SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
468 : TASK_TYPE_UCODE_LOAD, false);
469 :
470 0 : smu8_smu_populate_single_scratch_task(hwmgr,
471 : SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
472 : TASK_TYPE_UCODE_LOAD, true);
473 :
474 0 : return 0;
475 : }
476 :
477 : static int smu8_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr)
478 : {
479 0 : struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
480 :
481 0 : smu8_smu->toc_entry_power_profiling_index = smu8_smu->toc_entry_used_count;
482 :
483 0 : smu8_smu_populate_single_scratch_task(hwmgr,
484 : SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
485 : TASK_TYPE_INITIALIZE, true);
486 : return 0;
487 : }
488 :
489 0 : static int smu8_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr)
490 : {
491 0 : struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
492 :
493 0 : smu8_smu->toc_entry_initialize_index = smu8_smu->toc_entry_used_count;
494 :
495 0 : smu8_smu_populate_single_ucode_load_task(hwmgr,
496 : SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
497 0 : if (hwmgr->chip_id != CHIP_STONEY)
498 0 : smu8_smu_populate_single_ucode_load_task(hwmgr,
499 : SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
500 0 : smu8_smu_populate_single_ucode_load_task(hwmgr,
501 : SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
502 0 : smu8_smu_populate_single_ucode_load_task(hwmgr,
503 : SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
504 0 : smu8_smu_populate_single_ucode_load_task(hwmgr,
505 : SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
506 0 : smu8_smu_populate_single_ucode_load_task(hwmgr,
507 : SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
508 0 : if (hwmgr->chip_id != CHIP_STONEY)
509 0 : smu8_smu_populate_single_ucode_load_task(hwmgr,
510 : SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
511 0 : smu8_smu_populate_single_ucode_load_task(hwmgr,
512 : SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
513 :
514 0 : return 0;
515 : }
516 :
517 : static int smu8_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr)
518 : {
519 0 : struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
520 :
521 0 : smu8_smu->toc_entry_clock_table = smu8_smu->toc_entry_used_count;
522 :
523 0 : smu8_smu_populate_single_scratch_task(hwmgr,
524 : SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
525 : TASK_TYPE_INITIALIZE, true);
526 :
527 : return 0;
528 : }
529 :
530 0 : static int smu8_smu_construct_toc(struct pp_hwmgr *hwmgr)
531 : {
532 0 : struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
533 :
534 0 : smu8_smu->toc_entry_used_count = 0;
535 0 : smu8_smu_initialize_toc_empty_job_list(hwmgr);
536 0 : smu8_smu_construct_toc_for_rlc_aram_save(hwmgr);
537 0 : smu8_smu_construct_toc_for_vddgfx_enter(hwmgr);
538 0 : smu8_smu_construct_toc_for_vddgfx_exit(hwmgr);
539 0 : smu8_smu_construct_toc_for_power_profiling(hwmgr);
540 0 : smu8_smu_construct_toc_for_bootup(hwmgr);
541 0 : smu8_smu_construct_toc_for_clock_table(hwmgr);
542 :
543 0 : return 0;
544 : }
545 :
546 0 : static int smu8_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr)
547 : {
548 0 : struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
549 : uint32_t firmware_type;
550 : uint32_t i;
551 : int ret;
552 : enum cgs_ucode_id ucode_id;
553 0 : struct cgs_firmware_info info = {0};
554 :
555 0 : smu8_smu->driver_buffer_length = 0;
556 :
557 0 : for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
558 :
559 0 : firmware_type = smu8_translate_firmware_enum_to_arg(hwmgr,
560 : firmware_list[i]);
561 :
562 0 : ucode_id = smu8_convert_fw_type_to_cgs(firmware_type);
563 :
564 0 : ret = cgs_get_firmware_info(hwmgr->device,
565 : ucode_id, &info);
566 :
567 0 : if (ret == 0) {
568 0 : smu8_smu->driver_buffer[i].mc_addr = info.mc_addr;
569 :
570 0 : smu8_smu->driver_buffer[i].data_size = info.image_size;
571 :
572 0 : smu8_smu->driver_buffer[i].firmware_ID = firmware_list[i];
573 0 : smu8_smu->driver_buffer_length++;
574 : }
575 : }
576 :
577 0 : return 0;
578 : }
579 :
580 : static int smu8_smu_populate_single_scratch_entry(
581 : struct pp_hwmgr *hwmgr,
582 : enum smu8_scratch_entry scratch_type,
583 : uint32_t ulsize_byte,
584 : struct smu8_buffer_entry *entry)
585 : {
586 0 : struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
587 0 : uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte);
588 :
589 0 : entry->data_size = ulsize_byte;
590 0 : entry->kaddr = (char *) smu8_smu->smu_buffer.kaddr +
591 0 : smu8_smu->smu_buffer_used_bytes;
592 0 : entry->mc_addr = smu8_smu->smu_buffer.mc_addr + smu8_smu->smu_buffer_used_bytes;
593 0 : entry->firmware_ID = scratch_type;
594 :
595 0 : smu8_smu->smu_buffer_used_bytes += ulsize_aligned;
596 :
597 : return 0;
598 : }
599 :
600 0 : static int smu8_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
601 : {
602 0 : struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
603 : unsigned long i;
604 :
605 0 : for (i = 0; i < smu8_smu->scratch_buffer_length; i++) {
606 0 : if (smu8_smu->scratch_buffer[i].firmware_ID
607 : == SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
608 : break;
609 : }
610 :
611 0 : *table = (struct SMU8_Fusion_ClkTable *)smu8_smu->scratch_buffer[i].kaddr;
612 :
613 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
614 : PPSMC_MSG_SetClkTableAddrHi,
615 0 : upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
616 : NULL);
617 :
618 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
619 : PPSMC_MSG_SetClkTableAddrLo,
620 0 : lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
621 : NULL);
622 :
623 0 : smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
624 0 : smu8_smu->toc_entry_clock_table,
625 : NULL);
626 :
627 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram, NULL);
628 :
629 0 : return 0;
630 : }
631 :
632 0 : static int smu8_upload_pptable_settings(struct pp_hwmgr *hwmgr)
633 : {
634 0 : struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
635 : unsigned long i;
636 :
637 0 : for (i = 0; i < smu8_smu->scratch_buffer_length; i++) {
638 0 : if (smu8_smu->scratch_buffer[i].firmware_ID
639 : == SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
640 : break;
641 : }
642 :
643 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
644 : PPSMC_MSG_SetClkTableAddrHi,
645 0 : upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
646 : NULL);
647 :
648 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
649 : PPSMC_MSG_SetClkTableAddrLo,
650 0 : lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
651 : NULL);
652 :
653 0 : smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
654 0 : smu8_smu->toc_entry_clock_table,
655 : NULL);
656 :
657 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu, NULL);
658 :
659 0 : return 0;
660 : }
661 :
662 0 : static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
663 : {
664 0 : struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
665 : uint32_t smc_address;
666 0 : uint32_t fw_to_check = 0;
667 : int ret;
668 :
669 0 : amdgpu_ucode_init_bo(hwmgr->adev);
670 :
671 0 : smu8_smu_populate_firmware_entries(hwmgr);
672 :
673 0 : smu8_smu_construct_toc(hwmgr);
674 :
675 0 : smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
676 : offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
677 :
678 0 : smu8_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
679 :
680 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
681 : PPSMC_MSG_DriverDramAddrHi,
682 0 : upper_32_bits(smu8_smu->toc_buffer.mc_addr),
683 : NULL);
684 :
685 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
686 : PPSMC_MSG_DriverDramAddrLo,
687 0 : lower_32_bits(smu8_smu->toc_buffer.mc_addr),
688 : NULL);
689 :
690 0 : smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs, NULL);
691 :
692 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
693 : PPSMC_MSG_ExecuteJob,
694 0 : smu8_smu->toc_entry_aram,
695 : NULL);
696 0 : smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
697 0 : smu8_smu->toc_entry_power_profiling_index,
698 : NULL);
699 :
700 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
701 : PPSMC_MSG_ExecuteJob,
702 0 : smu8_smu->toc_entry_initialize_index,
703 : NULL);
704 :
705 0 : fw_to_check = UCODE_ID_RLC_G_MASK |
706 : UCODE_ID_SDMA0_MASK |
707 : UCODE_ID_SDMA1_MASK |
708 : UCODE_ID_CP_CE_MASK |
709 : UCODE_ID_CP_ME_MASK |
710 : UCODE_ID_CP_PFP_MASK |
711 : UCODE_ID_CP_MEC_JT1_MASK |
712 : UCODE_ID_CP_MEC_JT2_MASK;
713 :
714 0 : if (hwmgr->chip_id == CHIP_STONEY)
715 0 : fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
716 :
717 0 : ret = smu8_check_fw_load_finish(hwmgr, fw_to_check);
718 0 : if (ret) {
719 0 : pr_err("SMU firmware load failed\n");
720 0 : return ret;
721 : }
722 :
723 0 : ret = smu8_load_mec_firmware(hwmgr);
724 0 : if (ret) {
725 0 : pr_err("Mec Firmware load failed\n");
726 0 : return ret;
727 : }
728 :
729 : return 0;
730 : }
731 :
732 0 : static int smu8_start_smu(struct pp_hwmgr *hwmgr)
733 : {
734 : struct amdgpu_device *adev;
735 :
736 0 : uint32_t index = SMN_MP1_SRAM_START_ADDR +
737 : SMU8_FIRMWARE_HEADER_LOCATION +
738 : offsetof(struct SMU8_Firmware_Header, Version);
739 :
740 0 : if (hwmgr == NULL || hwmgr->device == NULL)
741 : return -EINVAL;
742 :
743 0 : adev = hwmgr->adev;
744 :
745 0 : cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
746 0 : hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
747 0 : pr_info("smu version %02d.%02d.%02d\n",
748 : ((hwmgr->smu_version >> 16) & 0xFF),
749 : ((hwmgr->smu_version >> 8) & 0xFF),
750 : (hwmgr->smu_version & 0xFF));
751 0 : adev->pm.fw_version = hwmgr->smu_version >> 8;
752 :
753 0 : return smu8_request_smu_load_fw(hwmgr);
754 : }
755 :
756 0 : static int smu8_smu_init(struct pp_hwmgr *hwmgr)
757 : {
758 0 : int ret = 0;
759 : struct smu8_smumgr *smu8_smu;
760 :
761 0 : smu8_smu = kzalloc(sizeof(struct smu8_smumgr), GFP_KERNEL);
762 0 : if (smu8_smu == NULL)
763 : return -ENOMEM;
764 :
765 0 : hwmgr->smu_backend = smu8_smu;
766 :
767 0 : smu8_smu->toc_buffer.data_size = 4096;
768 0 : smu8_smu->smu_buffer.data_size =
769 : ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
770 : ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
771 : ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
772 : ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
773 : ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
774 :
775 0 : ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
776 : smu8_smu->toc_buffer.data_size,
777 : PAGE_SIZE,
778 : AMDGPU_GEM_DOMAIN_VRAM,
779 : &smu8_smu->toc_buffer.handle,
780 0 : &smu8_smu->toc_buffer.mc_addr,
781 : &smu8_smu->toc_buffer.kaddr);
782 0 : if (ret)
783 : goto err2;
784 :
785 0 : ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
786 0 : smu8_smu->smu_buffer.data_size,
787 : PAGE_SIZE,
788 : AMDGPU_GEM_DOMAIN_VRAM,
789 : &smu8_smu->smu_buffer.handle,
790 0 : &smu8_smu->smu_buffer.mc_addr,
791 : &smu8_smu->smu_buffer.kaddr);
792 0 : if (ret)
793 : goto err1;
794 :
795 0 : if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
796 : SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
797 : UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
798 0 : &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
799 : pr_err("Error when Populate Firmware Entry.\n");
800 : goto err0;
801 : }
802 :
803 0 : if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
804 : SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
805 : UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
806 0 : &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
807 : pr_err("Error when Populate Firmware Entry.\n");
808 : goto err0;
809 : }
810 0 : if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
811 : SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
812 : UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
813 0 : &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
814 : pr_err("Error when Populate Firmware Entry.\n");
815 : goto err0;
816 : }
817 :
818 0 : if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
819 : SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
820 : sizeof(struct SMU8_MultimediaPowerLogData),
821 0 : &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
822 : pr_err("Error when Populate Firmware Entry.\n");
823 : goto err0;
824 : }
825 :
826 0 : if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
827 : SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
828 : sizeof(struct SMU8_Fusion_ClkTable),
829 0 : &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
830 : pr_err("Error when Populate Firmware Entry.\n");
831 : goto err0;
832 : }
833 :
834 : return 0;
835 :
836 : err0:
837 : amdgpu_bo_free_kernel(&smu8_smu->smu_buffer.handle,
838 : &smu8_smu->smu_buffer.mc_addr,
839 : &smu8_smu->smu_buffer.kaddr);
840 : err1:
841 0 : amdgpu_bo_free_kernel(&smu8_smu->toc_buffer.handle,
842 : &smu8_smu->toc_buffer.mc_addr,
843 : &smu8_smu->toc_buffer.kaddr);
844 : err2:
845 0 : kfree(smu8_smu);
846 0 : return -EINVAL;
847 : }
848 :
849 0 : static int smu8_smu_fini(struct pp_hwmgr *hwmgr)
850 : {
851 : struct smu8_smumgr *smu8_smu;
852 :
853 0 : if (hwmgr == NULL || hwmgr->device == NULL)
854 : return -EINVAL;
855 :
856 0 : smu8_smu = hwmgr->smu_backend;
857 0 : if (smu8_smu) {
858 0 : amdgpu_bo_free_kernel(&smu8_smu->toc_buffer.handle,
859 0 : &smu8_smu->toc_buffer.mc_addr,
860 : &smu8_smu->toc_buffer.kaddr);
861 0 : amdgpu_bo_free_kernel(&smu8_smu->smu_buffer.handle,
862 0 : &smu8_smu->smu_buffer.mc_addr,
863 : &smu8_smu->smu_buffer.kaddr);
864 0 : kfree(smu8_smu);
865 : }
866 :
867 : return 0;
868 : }
869 :
870 0 : static bool smu8_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
871 : unsigned long check_feature)
872 : {
873 : int result;
874 : uint32_t features;
875 :
876 0 : result = smum_send_msg_to_smc_with_parameter(hwmgr,
877 : PPSMC_MSG_GetFeatureStatus,
878 : 0,
879 : &features);
880 0 : if (result == 0) {
881 0 : if (features & check_feature)
882 : return true;
883 : }
884 :
885 0 : return false;
886 : }
887 :
888 0 : static bool smu8_is_dpm_running(struct pp_hwmgr *hwmgr)
889 : {
890 0 : if (smu8_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn))
891 : return true;
892 0 : return false;
893 : }
894 :
895 : const struct pp_smumgr_func smu8_smu_funcs = {
896 : .name = "smu8_smu",
897 : .smu_init = smu8_smu_init,
898 : .smu_fini = smu8_smu_fini,
899 : .start_smu = smu8_start_smu,
900 : .check_fw_load_finish = smu8_check_fw_load_finish,
901 : .request_smu_load_fw = NULL,
902 : .request_smu_load_specific_fw = NULL,
903 : .get_argument = smu8_get_argument,
904 : .send_msg_to_smc = smu8_send_msg_to_smc,
905 : .send_msg_to_smc_with_parameter = smu8_send_msg_to_smc_with_parameter,
906 : .download_pptable_settings = smu8_download_pptable_settings,
907 : .upload_pptable_settings = smu8_upload_pptable_settings,
908 : .is_dpm_running = smu8_is_dpm_running,
909 : };
910 :
|