Line data Source code
1 : /*
2 : * Copyright 2018 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : */
23 :
24 : #include <linux/pci.h>
25 : #include <linux/reboot.h>
26 :
27 : #include "hwmgr.h"
28 : #include "pp_debug.h"
29 : #include "ppatomctrl.h"
30 : #include "ppsmc.h"
31 : #include "atom.h"
32 : #include "ivsrcid/thm/irqsrcs_thm_9_0.h"
33 : #include "ivsrcid/smuio/irqsrcs_smuio_9_0.h"
34 : #include "ivsrcid/ivsrcid_vislands30.h"
35 :
36 0 : uint8_t convert_to_vid(uint16_t vddc)
37 : {
38 0 : return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
39 : }
40 :
41 0 : uint16_t convert_to_vddc(uint8_t vid)
42 : {
43 0 : return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
44 : }
45 :
46 0 : int phm_copy_clock_limits_array(
47 : struct pp_hwmgr *hwmgr,
48 : uint32_t **pptable_info_array,
49 : const uint32_t *pptable_array,
50 : uint32_t power_saving_clock_count)
51 : {
52 : uint32_t array_size, i;
53 : uint32_t *table;
54 :
55 0 : array_size = sizeof(uint32_t) * power_saving_clock_count;
56 0 : table = kzalloc(array_size, GFP_KERNEL);
57 0 : if (NULL == table)
58 : return -ENOMEM;
59 :
60 0 : for (i = 0; i < power_saving_clock_count; i++)
61 0 : table[i] = le32_to_cpu(pptable_array[i]);
62 :
63 0 : *pptable_info_array = table;
64 :
65 0 : return 0;
66 : }
67 :
68 0 : int phm_copy_overdrive_settings_limits_array(
69 : struct pp_hwmgr *hwmgr,
70 : uint32_t **pptable_info_array,
71 : const uint32_t *pptable_array,
72 : uint32_t od_setting_count)
73 : {
74 : uint32_t array_size, i;
75 : uint32_t *table;
76 :
77 0 : array_size = sizeof(uint32_t) * od_setting_count;
78 0 : table = kzalloc(array_size, GFP_KERNEL);
79 0 : if (NULL == table)
80 : return -ENOMEM;
81 :
82 0 : for (i = 0; i < od_setting_count; i++)
83 0 : table[i] = le32_to_cpu(pptable_array[i]);
84 :
85 0 : *pptable_info_array = table;
86 :
87 0 : return 0;
88 : }
89 :
90 0 : uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
91 : {
92 0 : u32 mask = 0;
93 0 : u32 shift = 0;
94 :
95 0 : shift = (offset % 4) << 3;
96 0 : if (size == sizeof(uint8_t))
97 0 : mask = 0xFF << shift;
98 0 : else if (size == sizeof(uint16_t))
99 0 : mask = 0xFFFF << shift;
100 :
101 0 : original_data &= ~mask;
102 0 : original_data |= (field << shift);
103 0 : return original_data;
104 : }
105 :
106 : /*
107 : * Returns once the part of the register indicated by the mask has
108 : * reached the given value.
109 : */
110 0 : int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
111 : uint32_t value, uint32_t mask)
112 : {
113 : uint32_t i;
114 : uint32_t cur_value;
115 :
116 0 : if (hwmgr == NULL || hwmgr->device == NULL) {
117 0 : pr_err("Invalid Hardware Manager!");
118 0 : return -EINVAL;
119 : }
120 :
121 0 : for (i = 0; i < hwmgr->usec_timeout; i++) {
122 0 : cur_value = cgs_read_register(hwmgr->device, index);
123 0 : if ((cur_value & mask) == (value & mask))
124 : break;
125 0 : udelay(1);
126 : }
127 :
128 : /* timeout means wrong logic*/
129 0 : if (i == hwmgr->usec_timeout)
130 : return -1;
131 0 : return 0;
132 : }
133 :
134 :
135 : /*
136 : * Returns once the part of the register indicated by the mask has
137 : * reached the given value.The indirect space is described by giving
138 : * the memory-mapped index of the indirect index register.
139 : */
140 0 : int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
141 : uint32_t indirect_port,
142 : uint32_t index,
143 : uint32_t value,
144 : uint32_t mask)
145 : {
146 0 : if (hwmgr == NULL || hwmgr->device == NULL) {
147 0 : pr_err("Invalid Hardware Manager!");
148 0 : return -EINVAL;
149 : }
150 :
151 0 : cgs_write_register(hwmgr->device, indirect_port, index);
152 0 : return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
153 : }
154 :
155 0 : int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
156 : uint32_t index,
157 : uint32_t value, uint32_t mask)
158 : {
159 : uint32_t i;
160 : uint32_t cur_value;
161 :
162 0 : if (hwmgr == NULL || hwmgr->device == NULL)
163 : return -EINVAL;
164 :
165 0 : for (i = 0; i < hwmgr->usec_timeout; i++) {
166 0 : cur_value = cgs_read_register(hwmgr->device,
167 : index);
168 0 : if ((cur_value & mask) != (value & mask))
169 : break;
170 0 : udelay(1);
171 : }
172 :
173 : /* timeout means wrong logic */
174 0 : if (i == hwmgr->usec_timeout)
175 : return -ETIME;
176 0 : return 0;
177 : }
178 :
179 0 : int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
180 : uint32_t indirect_port,
181 : uint32_t index,
182 : uint32_t value,
183 : uint32_t mask)
184 : {
185 0 : if (hwmgr == NULL || hwmgr->device == NULL)
186 : return -EINVAL;
187 :
188 0 : cgs_write_register(hwmgr->device, indirect_port, index);
189 0 : return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
190 : value, mask);
191 : }
192 :
193 0 : bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
194 : {
195 0 : return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
196 : }
197 :
198 0 : bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
199 : {
200 0 : return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
201 : }
202 :
203 :
204 0 : int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
205 : {
206 : uint32_t i, j;
207 : uint16_t vvalue;
208 0 : bool found = false;
209 : struct pp_atomctrl_voltage_table *table;
210 :
211 0 : PP_ASSERT_WITH_CODE((NULL != vol_table),
212 : "Voltage Table empty.", return -EINVAL);
213 :
214 0 : table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
215 : GFP_KERNEL);
216 :
217 0 : if (NULL == table)
218 : return -EINVAL;
219 :
220 0 : table->mask_low = vol_table->mask_low;
221 0 : table->phase_delay = vol_table->phase_delay;
222 :
223 0 : for (i = 0; i < vol_table->count; i++) {
224 0 : vvalue = vol_table->entries[i].value;
225 0 : found = false;
226 :
227 0 : for (j = 0; j < table->count; j++) {
228 0 : if (vvalue == table->entries[j].value) {
229 : found = true;
230 : break;
231 : }
232 : }
233 :
234 0 : if (!found) {
235 0 : table->entries[table->count].value = vvalue;
236 0 : table->entries[table->count].smio_low =
237 0 : vol_table->entries[i].smio_low;
238 0 : table->count++;
239 : }
240 : }
241 :
242 0 : memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
243 0 : kfree(table);
244 0 : table = NULL;
245 0 : return 0;
246 : }
247 :
248 0 : int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
249 : phm_ppt_v1_clock_voltage_dependency_table *dep_table)
250 : {
251 : uint32_t i;
252 : int result;
253 :
254 0 : PP_ASSERT_WITH_CODE((0 != dep_table->count),
255 : "Voltage Dependency Table empty.", return -EINVAL);
256 :
257 0 : PP_ASSERT_WITH_CODE((NULL != vol_table),
258 : "vol_table empty.", return -EINVAL);
259 :
260 0 : vol_table->mask_low = 0;
261 0 : vol_table->phase_delay = 0;
262 0 : vol_table->count = dep_table->count;
263 :
264 0 : for (i = 0; i < dep_table->count; i++) {
265 0 : vol_table->entries[i].value = dep_table->entries[i].mvdd;
266 0 : vol_table->entries[i].smio_low = 0;
267 : }
268 :
269 0 : result = phm_trim_voltage_table(vol_table);
270 0 : PP_ASSERT_WITH_CODE((0 == result),
271 : "Failed to trim MVDD table.", return result);
272 :
273 : return 0;
274 : }
275 :
276 0 : int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
277 : phm_ppt_v1_clock_voltage_dependency_table *dep_table)
278 : {
279 : uint32_t i;
280 : int result;
281 :
282 0 : PP_ASSERT_WITH_CODE((0 != dep_table->count),
283 : "Voltage Dependency Table empty.", return -EINVAL);
284 :
285 0 : PP_ASSERT_WITH_CODE((NULL != vol_table),
286 : "vol_table empty.", return -EINVAL);
287 :
288 0 : vol_table->mask_low = 0;
289 0 : vol_table->phase_delay = 0;
290 0 : vol_table->count = dep_table->count;
291 :
292 0 : for (i = 0; i < dep_table->count; i++) {
293 0 : vol_table->entries[i].value = dep_table->entries[i].vddci;
294 0 : vol_table->entries[i].smio_low = 0;
295 : }
296 :
297 0 : result = phm_trim_voltage_table(vol_table);
298 0 : PP_ASSERT_WITH_CODE((0 == result),
299 : "Failed to trim VDDCI table.", return result);
300 :
301 : return 0;
302 : }
303 :
304 0 : int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
305 : phm_ppt_v1_voltage_lookup_table *lookup_table)
306 : {
307 0 : int i = 0;
308 :
309 0 : PP_ASSERT_WITH_CODE((0 != lookup_table->count),
310 : "Voltage Lookup Table empty.", return -EINVAL);
311 :
312 0 : PP_ASSERT_WITH_CODE((NULL != vol_table),
313 : "vol_table empty.", return -EINVAL);
314 :
315 0 : vol_table->mask_low = 0;
316 0 : vol_table->phase_delay = 0;
317 :
318 0 : vol_table->count = lookup_table->count;
319 :
320 0 : for (i = 0; i < vol_table->count; i++) {
321 0 : vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
322 0 : vol_table->entries[i].smio_low = 0;
323 : }
324 :
325 : return 0;
326 : }
327 :
328 0 : void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
329 : struct pp_atomctrl_voltage_table *vol_table)
330 : {
331 : unsigned int i, diff;
332 :
333 0 : if (vol_table->count <= max_vol_steps)
334 : return;
335 :
336 0 : diff = vol_table->count - max_vol_steps;
337 :
338 0 : for (i = 0; i < max_vol_steps; i++)
339 0 : vol_table->entries[i] = vol_table->entries[i + diff];
340 :
341 0 : vol_table->count = max_vol_steps;
342 :
343 0 : return;
344 : }
345 :
346 0 : int phm_reset_single_dpm_table(void *table,
347 : uint32_t count, int max)
348 : {
349 : int i;
350 :
351 0 : struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
352 :
353 0 : dpm_table->count = count > max ? max : count;
354 :
355 0 : for (i = 0; i < dpm_table->count; i++)
356 0 : dpm_table->dpm_level[i].enabled = false;
357 :
358 0 : return 0;
359 : }
360 :
361 0 : void phm_setup_pcie_table_entry(
362 : void *table,
363 : uint32_t index, uint32_t pcie_gen,
364 : uint32_t pcie_lanes)
365 : {
366 0 : struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
367 0 : dpm_table->dpm_level[index].value = pcie_gen;
368 0 : dpm_table->dpm_level[index].param1 = pcie_lanes;
369 0 : dpm_table->dpm_level[index].enabled = 1;
370 0 : }
371 :
372 0 : int32_t phm_get_dpm_level_enable_mask_value(void *table)
373 : {
374 : int32_t i;
375 0 : int32_t mask = 0;
376 0 : struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
377 :
378 0 : for (i = dpm_table->count; i > 0; i--) {
379 0 : mask = mask << 1;
380 0 : if (dpm_table->dpm_level[i - 1].enabled)
381 0 : mask |= 0x1;
382 : else
383 : mask &= 0xFFFFFFFE;
384 : }
385 :
386 0 : return mask;
387 : }
388 :
389 0 : uint8_t phm_get_voltage_index(
390 : struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
391 : {
392 0 : uint8_t count = (uint8_t) (lookup_table->count);
393 : uint8_t i;
394 :
395 0 : PP_ASSERT_WITH_CODE((NULL != lookup_table),
396 : "Lookup Table empty.", return 0);
397 0 : PP_ASSERT_WITH_CODE((0 != count),
398 : "Lookup Table empty.", return 0);
399 :
400 0 : for (i = 0; i < lookup_table->count; i++) {
401 : /* find first voltage equal or bigger than requested */
402 0 : if (lookup_table->entries[i].us_vdd >= voltage)
403 : return i;
404 : }
405 : /* voltage is bigger than max voltage in the table */
406 0 : return i - 1;
407 : }
408 :
409 0 : uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
410 : uint32_t voltage)
411 : {
412 0 : uint8_t count = (uint8_t) (voltage_table->count);
413 0 : uint8_t i = 0;
414 :
415 0 : PP_ASSERT_WITH_CODE((NULL != voltage_table),
416 : "Voltage Table empty.", return 0;);
417 0 : PP_ASSERT_WITH_CODE((0 != count),
418 : "Voltage Table empty.", return 0;);
419 :
420 0 : for (i = 0; i < count; i++) {
421 : /* find first voltage bigger than requested */
422 0 : if (voltage_table->entries[i].value >= voltage)
423 : return i;
424 : }
425 :
426 : /* voltage is bigger than max voltage in the table */
427 0 : return i - 1;
428 : }
429 :
430 0 : uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
431 : {
432 : uint32_t i;
433 :
434 0 : for (i = 0; i < vddci_table->count; i++) {
435 0 : if (vddci_table->entries[i].value >= vddci)
436 : return vddci_table->entries[i].value;
437 : }
438 :
439 : pr_debug("vddci is larger than max value in vddci_table\n");
440 0 : return vddci_table->entries[i-1].value;
441 : }
442 :
443 0 : int phm_find_boot_level(void *table,
444 : uint32_t value, uint32_t *boot_level)
445 : {
446 0 : int result = -EINVAL;
447 : uint32_t i;
448 0 : struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
449 :
450 0 : for (i = 0; i < dpm_table->count; i++) {
451 0 : if (value == dpm_table->dpm_level[i].value) {
452 0 : *boot_level = i;
453 0 : result = 0;
454 : }
455 : }
456 :
457 0 : return result;
458 : }
459 :
460 0 : int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
461 : phm_ppt_v1_voltage_lookup_table *lookup_table,
462 : uint16_t virtual_voltage_id, int32_t *sclk)
463 : {
464 : uint8_t entry_id;
465 : uint8_t voltage_id;
466 0 : struct phm_ppt_v1_information *table_info =
467 : (struct phm_ppt_v1_information *)(hwmgr->pptable);
468 :
469 0 : PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
470 :
471 : /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
472 0 : for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
473 0 : voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
474 0 : if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
475 : break;
476 : }
477 :
478 0 : if (entry_id >= table_info->vdd_dep_on_sclk->count) {
479 : pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
480 : return -EINVAL;
481 : }
482 :
483 0 : *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
484 :
485 0 : return 0;
486 : }
487 :
488 : /**
489 : * phm_initializa_dynamic_state_adjustment_rule_settings - Initialize Dynamic State Adjustment Rule Settings
490 : *
491 : * @hwmgr: the address of the powerplay hardware manager.
492 : */
493 0 : int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
494 : {
495 : struct phm_clock_voltage_dependency_table *table_clk_vlt;
496 0 : struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
497 :
498 : /* initialize vddc_dep_on_dal_pwrl table */
499 0 : table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 4),
500 : GFP_KERNEL);
501 :
502 0 : if (NULL == table_clk_vlt) {
503 0 : pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
504 0 : return -ENOMEM;
505 : } else {
506 0 : table_clk_vlt->count = 4;
507 0 : table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
508 0 : if (hwmgr->chip_id >= CHIP_POLARIS10 &&
509 : hwmgr->chip_id <= CHIP_VEGAM)
510 0 : table_clk_vlt->entries[0].v = 700;
511 : else
512 0 : table_clk_vlt->entries[0].v = 0;
513 0 : table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
514 0 : if (hwmgr->chip_id >= CHIP_POLARIS10 &&
515 : hwmgr->chip_id <= CHIP_VEGAM)
516 0 : table_clk_vlt->entries[1].v = 740;
517 : else
518 0 : table_clk_vlt->entries[1].v = 720;
519 0 : table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
520 0 : if (hwmgr->chip_id >= CHIP_POLARIS10 &&
521 : hwmgr->chip_id <= CHIP_VEGAM)
522 0 : table_clk_vlt->entries[2].v = 800;
523 : else
524 0 : table_clk_vlt->entries[2].v = 810;
525 0 : table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
526 0 : table_clk_vlt->entries[3].v = 900;
527 0 : if (pptable_info != NULL)
528 0 : pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
529 0 : hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
530 : }
531 :
532 0 : return 0;
533 : }
534 :
535 0 : uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
536 : {
537 0 : uint32_t level = 0;
538 :
539 0 : while (0 == (mask & (1 << level)))
540 0 : level++;
541 :
542 0 : return level;
543 : }
544 :
545 0 : void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
546 : {
547 0 : struct phm_ppt_v1_information *table_info =
548 : (struct phm_ppt_v1_information *)hwmgr->pptable;
549 0 : struct phm_clock_voltage_dependency_table *table =
550 : table_info->vddc_dep_on_dal_pwrl;
551 : struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
552 0 : enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
553 0 : uint32_t req_vddc = 0, req_volt, i;
554 :
555 0 : if (!table || table->count <= 0
556 : || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
557 0 : || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
558 : return;
559 :
560 0 : for (i = 0; i < table->count; i++) {
561 0 : if (dal_power_level == table->entries[i].clk) {
562 0 : req_vddc = table->entries[i].v;
563 0 : break;
564 : }
565 : }
566 :
567 0 : vddc_table = table_info->vdd_dep_on_sclk;
568 0 : for (i = 0; i < vddc_table->count; i++) {
569 0 : if (req_vddc <= vddc_table->entries[i].vddc) {
570 0 : req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
571 0 : smum_send_msg_to_smc_with_parameter(hwmgr,
572 : PPSMC_MSG_VddC_Request,
573 : req_volt,
574 : NULL);
575 0 : return;
576 : }
577 : }
578 0 : pr_err("DAL requested level can not"
579 : " found a available voltage in VDDC DPM Table \n");
580 : }
581 :
582 0 : int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
583 : uint32_t sclk, uint16_t id, uint16_t *voltage)
584 : {
585 : uint32_t vol;
586 0 : int ret = 0;
587 :
588 0 : if (hwmgr->chip_id < CHIP_TONGA) {
589 0 : ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
590 0 : } else if (hwmgr->chip_id < CHIP_POLARIS10) {
591 0 : ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
592 0 : if (*voltage >= 2000 || *voltage == 0)
593 0 : *voltage = 1150;
594 : } else {
595 0 : ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
596 0 : *voltage = (uint16_t)(vol/100);
597 : }
598 0 : return ret;
599 : }
600 :
601 :
602 0 : int phm_irq_process(struct amdgpu_device *adev,
603 : struct amdgpu_irq_src *source,
604 : struct amdgpu_iv_entry *entry)
605 : {
606 0 : uint32_t client_id = entry->client_id;
607 0 : uint32_t src_id = entry->src_id;
608 :
609 0 : if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
610 0 : if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) {
611 0 : dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
612 : /*
613 : * SW CTF just occurred.
614 : * Try to do a graceful shutdown to prevent further damage.
615 : */
616 0 : dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
617 0 : orderly_poweroff(true);
618 0 : } else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
619 0 : dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
620 0 : else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
621 0 : dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
622 : /*
623 : * HW CTF just occurred. Shutdown to prevent further damage.
624 : */
625 0 : dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
626 0 : orderly_poweroff(true);
627 : }
628 0 : } else if (client_id == SOC15_IH_CLIENTID_THM) {
629 0 : if (src_id == 0) {
630 0 : dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
631 : /*
632 : * SW CTF just occurred.
633 : * Try to do a graceful shutdown to prevent further damage.
634 : */
635 0 : dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
636 0 : orderly_poweroff(true);
637 : } else
638 0 : dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
639 0 : } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
640 0 : dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
641 : /*
642 : * HW CTF just occurred. Shutdown to prevent further damage.
643 : */
644 0 : dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
645 0 : orderly_poweroff(true);
646 : }
647 :
648 0 : return 0;
649 : }
650 :
651 : static const struct amdgpu_irq_src_funcs smu9_irq_funcs = {
652 : .process = phm_irq_process,
653 : };
654 :
655 0 : int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr)
656 : {
657 0 : struct amdgpu_irq_src *source =
658 : kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
659 :
660 0 : if (!source)
661 : return -ENOMEM;
662 :
663 0 : source->funcs = &smu9_irq_funcs;
664 :
665 0 : amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
666 : SOC15_IH_CLIENTID_THM,
667 : THM_9_0__SRCID__THM_DIG_THERM_L2H,
668 : source);
669 0 : amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
670 : SOC15_IH_CLIENTID_THM,
671 : THM_9_0__SRCID__THM_DIG_THERM_H2L,
672 : source);
673 :
674 : /* Register CTF(GPIO_19) interrupt */
675 0 : amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
676 : SOC15_IH_CLIENTID_ROM_SMUIO,
677 : SMUIO_9_0__SRCID__SMUIO_GPIO19,
678 : source);
679 :
680 0 : return 0;
681 : }
682 :
683 0 : void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,
684 : uint8_t *frev, uint8_t *crev)
685 : {
686 0 : struct amdgpu_device *adev = dev;
687 : uint16_t data_start;
688 :
689 0 : if (amdgpu_atom_parse_data_header(
690 : adev->mode_info.atom_context, table, size,
691 : frev, crev, &data_start))
692 0 : return (uint8_t *)adev->mode_info.atom_context->bios +
693 : data_start;
694 :
695 : return NULL;
696 : }
697 :
698 0 : int smu_get_voltage_dependency_table_ppt_v1(
699 : const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table,
700 : struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
701 : {
702 0 : uint8_t i = 0;
703 0 : PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count),
704 : "Voltage Lookup Table empty",
705 : return -EINVAL);
706 :
707 0 : dep_table->count = allowed_dep_table->count;
708 0 : for (i=0; i<dep_table->count; i++) {
709 0 : dep_table->entries[i].clk = allowed_dep_table->entries[i].clk;
710 0 : dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd;
711 0 : dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset;
712 0 : dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc;
713 0 : dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx;
714 0 : dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci;
715 0 : dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd;
716 0 : dep_table->entries[i].phases = allowed_dep_table->entries[i].phases;
717 0 : dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable;
718 0 : dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset;
719 : }
720 :
721 : return 0;
722 : }
723 :
724 0 : int smu_set_watermarks_for_clocks_ranges(void *wt_table,
725 : struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
726 : {
727 : uint32_t i;
728 0 : struct watermarks *table = wt_table;
729 :
730 0 : if (!table || !wm_with_clock_ranges)
731 : return -EINVAL;
732 :
733 0 : if (wm_with_clock_ranges->num_wm_dmif_sets > 4 || wm_with_clock_ranges->num_wm_mcif_sets > 4)
734 : return -EINVAL;
735 :
736 0 : for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
737 0 : table->WatermarkRow[1][i].MinClock =
738 0 : cpu_to_le16((uint16_t)
739 : (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
740 : 1000));
741 0 : table->WatermarkRow[1][i].MaxClock =
742 0 : cpu_to_le16((uint16_t)
743 : (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
744 : 1000));
745 0 : table->WatermarkRow[1][i].MinUclk =
746 0 : cpu_to_le16((uint16_t)
747 : (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
748 : 1000));
749 0 : table->WatermarkRow[1][i].MaxUclk =
750 0 : cpu_to_le16((uint16_t)
751 : (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
752 : 1000));
753 0 : table->WatermarkRow[1][i].WmSetting = (uint8_t)
754 0 : wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
755 : }
756 :
757 0 : for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
758 0 : table->WatermarkRow[0][i].MinClock =
759 0 : cpu_to_le16((uint16_t)
760 : (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
761 : 1000));
762 0 : table->WatermarkRow[0][i].MaxClock =
763 0 : cpu_to_le16((uint16_t)
764 : (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
765 : 1000));
766 0 : table->WatermarkRow[0][i].MinUclk =
767 0 : cpu_to_le16((uint16_t)
768 : (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
769 : 1000));
770 0 : table->WatermarkRow[0][i].MaxUclk =
771 0 : cpu_to_le16((uint16_t)
772 : (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
773 : 1000));
774 0 : table->WatermarkRow[0][i].WmSetting = (uint8_t)
775 0 : wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
776 : }
777 : return 0;
778 : }
|