Line data Source code
1 : /*
2 : * Copyright 2021 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : */
23 : #include "amdgpu.h"
24 : #include "amdgpu_atombios.h"
25 : #include "hdp_v5_2.h"
26 :
27 : #include "hdp/hdp_5_2_1_offset.h"
28 : #include "hdp/hdp_5_2_1_sh_mask.h"
29 : #include <uapi/linux/kfd_ioctl.h>
30 :
31 0 : static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
32 : struct amdgpu_ring *ring)
33 : {
34 0 : if (!ring || !ring->funcs->emit_wreg)
35 0 : WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
36 : 0);
37 : else
38 0 : amdgpu_ring_emit_wreg(ring,
39 : (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
40 : 0);
41 0 : }
42 :
43 0 : static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev,
44 : bool enable)
45 : {
46 : uint32_t hdp_clk_cntl;
47 : uint32_t hdp_mem_pwr_cntl;
48 :
49 0 : if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
50 : AMD_CG_SUPPORT_HDP_DS |
51 : AMD_CG_SUPPORT_HDP_SD)))
52 : return;
53 :
54 0 : hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
55 0 : hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
56 :
57 : /* Before doing clock/power mode switch, forced on MEM clock */
58 0 : hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
59 : ATOMIC_MEM_CLK_SOFT_OVERRIDE, 1);
60 0 : hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
61 : RC_MEM_CLK_SOFT_OVERRIDE, 1);
62 0 : WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
63 :
64 : /* disable clock and power gating before any changing */
65 0 : hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
66 : ATOMIC_MEM_POWER_CTRL_EN, 0);
67 0 : hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
68 : ATOMIC_MEM_POWER_LS_EN, 0);
69 0 : hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
70 : ATOMIC_MEM_POWER_DS_EN, 0);
71 0 : hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
72 : ATOMIC_MEM_POWER_SD_EN, 0);
73 0 : hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
74 : RC_MEM_POWER_CTRL_EN, 0);
75 0 : hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
76 : RC_MEM_POWER_LS_EN, 0);
77 0 : hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
78 : RC_MEM_POWER_DS_EN, 0);
79 0 : hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
80 : RC_MEM_POWER_SD_EN, 0);
81 0 : WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
82 :
83 : /* Already disabled above. The actions below are for "enabled" only */
84 0 : if (enable) {
85 : /* only one clock gating mode (LS/DS/SD) can be enabled */
86 0 : if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
87 0 : hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
88 : HDP_MEM_POWER_CTRL,
89 : ATOMIC_MEM_POWER_SD_EN, 1);
90 0 : hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
91 : HDP_MEM_POWER_CTRL,
92 : RC_MEM_POWER_SD_EN, 1);
93 0 : } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
94 0 : hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
95 : HDP_MEM_POWER_CTRL,
96 : ATOMIC_MEM_POWER_LS_EN, 1);
97 0 : hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
98 : HDP_MEM_POWER_CTRL,
99 : RC_MEM_POWER_LS_EN, 1);
100 0 : } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
101 0 : hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
102 : HDP_MEM_POWER_CTRL,
103 : ATOMIC_MEM_POWER_DS_EN, 1);
104 0 : hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
105 : HDP_MEM_POWER_CTRL,
106 : RC_MEM_POWER_DS_EN, 1);
107 : }
108 :
109 : /* confirmed that ATOMIC/RC_MEM_POWER_CTRL_EN have to be set for SRAM LS/DS/SD */
110 0 : if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
111 : AMD_CG_SUPPORT_HDP_SD)) {
112 0 : hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
113 : ATOMIC_MEM_POWER_CTRL_EN, 1);
114 0 : hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
115 : RC_MEM_POWER_CTRL_EN, 1);
116 0 : WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
117 : }
118 : }
119 :
120 : /* disable MEM clock override after clock/power mode changing */
121 0 : hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
122 : ATOMIC_MEM_CLK_SOFT_OVERRIDE, 0);
123 0 : hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
124 : RC_MEM_CLK_SOFT_OVERRIDE, 0);
125 0 : WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
126 : }
127 :
128 0 : static void hdp_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *adev,
129 : bool enable)
130 : {
131 : uint32_t hdp_clk_cntl;
132 :
133 0 : if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
134 : return;
135 :
136 0 : hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
137 :
138 0 : if (enable) {
139 0 : hdp_clk_cntl &=
140 : ~(uint32_t)
141 : (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
142 : HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
143 : HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
144 : HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
145 : HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
146 : HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
147 : } else {
148 0 : hdp_clk_cntl |= HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
149 : HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
150 : HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
151 : HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
152 : HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
153 : HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
154 : }
155 :
156 0 : WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
157 : }
158 :
159 0 : static void hdp_v5_2_get_clockgating_state(struct amdgpu_device *adev,
160 : u64 *flags)
161 : {
162 : uint32_t tmp;
163 :
164 : /* AMD_CG_SUPPORT_HDP_MGCG */
165 0 : tmp = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
166 0 : if (!(tmp & (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
167 : HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
168 : HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
169 : HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
170 : HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
171 : HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
172 0 : *flags |= AMD_CG_SUPPORT_HDP_MGCG;
173 :
174 : /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
175 0 : tmp = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
176 0 : if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK)
177 0 : *flags |= AMD_CG_SUPPORT_HDP_LS;
178 0 : else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK)
179 0 : *flags |= AMD_CG_SUPPORT_HDP_DS;
180 0 : else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK)
181 0 : *flags |= AMD_CG_SUPPORT_HDP_SD;
182 0 : }
183 :
184 0 : static void hdp_v5_2_update_clock_gating(struct amdgpu_device *adev,
185 : bool enable)
186 : {
187 0 : hdp_v5_2_update_mem_power_gating(adev, enable);
188 0 : hdp_v5_2_update_medium_grain_clock_gating(adev, enable);
189 0 : }
190 :
191 : const struct amdgpu_hdp_funcs hdp_v5_2_funcs = {
192 : .flush_hdp = hdp_v5_2_flush_hdp,
193 : .update_clock_gating = hdp_v5_2_update_clock_gating,
194 : .get_clock_gating_state = hdp_v5_2_get_clockgating_state,
195 : };
|