Line data Source code
1 : /*
2 : * Copyright 2017 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : * Authors: Xiangliang.Yu@amd.com
23 : */
24 :
25 : #include "amdgpu.h"
26 : #include "vi.h"
27 : #include "bif/bif_5_0_d.h"
28 : #include "bif/bif_5_0_sh_mask.h"
29 : #include "vid.h"
30 : #include "gca/gfx_8_0_d.h"
31 : #include "gca/gfx_8_0_sh_mask.h"
32 : #include "gmc_v8_0.h"
33 : #include "gfx_v8_0.h"
34 : #include "sdma_v3_0.h"
35 : #include "tonga_ih.h"
36 : #include "gmc/gmc_8_2_d.h"
37 : #include "gmc/gmc_8_2_sh_mask.h"
38 : #include "oss/oss_3_0_d.h"
39 : #include "oss/oss_3_0_sh_mask.h"
40 : #include "dce/dce_10_0_d.h"
41 : #include "dce/dce_10_0_sh_mask.h"
42 : #include "smu/smu_7_1_3_d.h"
43 : #include "mxgpu_vi.h"
44 :
45 : #include "amdgpu_reset.h"
46 :
47 : /* VI golden setting */
48 : static const u32 xgpu_fiji_mgcg_cgcg_init[] = {
49 : mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
50 : mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
51 : mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
52 : mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
53 : mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
54 : mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
55 : mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
56 : mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
57 : mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
58 : mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
59 : mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
60 : mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
61 : mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
62 : mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
63 : mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
64 : mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
65 : mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
66 : mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
67 : mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
68 : mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
69 : mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
70 : mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
71 : mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
72 : mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
73 : mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
74 : mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
75 : mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
76 : mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
77 : mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
78 : mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
79 : mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
80 : mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
81 : mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
82 : mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
83 : mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
84 : mmPCIE_INDEX, 0xffffffff, 0x0140001c,
85 : mmPCIE_DATA, 0x000f0000, 0x00000000,
86 : mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
87 : mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
88 : mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
89 : mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
90 : mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
91 : mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
92 : mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
93 : mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
94 : mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
95 : mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100,
96 : };
97 :
98 : static const u32 xgpu_fiji_golden_settings_a10[] = {
99 : mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
100 : mmDB_DEBUG2, 0xf00fffff, 0x00000400,
101 : mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
102 : mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
103 : mmFBC_MISC, 0x1f311fff, 0x12300000,
104 : mmHDMI_CONTROL, 0x31000111, 0x00000011,
105 : mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
106 : mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
107 : mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
108 : mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
109 : mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
110 : mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
111 : mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
112 : mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
113 : mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
114 : mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
115 : mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
116 : mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
117 : mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
118 : mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
119 : mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
120 : mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
121 : mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
122 : mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
123 : mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
124 : };
125 :
126 : static const u32 xgpu_fiji_golden_common_all[] = {
127 : mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
128 : mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
129 : mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
130 : mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
131 : mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
132 : mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
133 : mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
134 : mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
135 : mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
136 : mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
137 : };
138 :
139 : static const u32 xgpu_tonga_mgcg_cgcg_init[] = {
140 : mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
141 : mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
142 : mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
143 : mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
144 : mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
145 : mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
146 : mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
147 : mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
148 : mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
149 : mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
150 : mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
151 : mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
152 : mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
153 : mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
154 : mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
155 : mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
156 : mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
157 : mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
158 : mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
159 : mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
160 : mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
161 : mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
162 : mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
163 : mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
164 : mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
165 : mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
166 : mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
167 : mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
168 : mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
169 : mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
170 : mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
171 : mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
172 : mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
173 : mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
174 : mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
175 : mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
176 : mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
177 : mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
178 : mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
179 : mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
180 : mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
181 : mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
182 : mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
183 : mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
184 : mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
185 : mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
186 : mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
187 : mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
188 : mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
189 : mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
190 : mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
191 : mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
192 : mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
193 : mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
194 : mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
195 : mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
196 : mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
197 : mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
198 : mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
199 : mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
200 : mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
201 : mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
202 : mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
203 : mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
204 : mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
205 : mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
206 : mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
207 : mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
208 : mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
209 : mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
210 : mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
211 : mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
212 : mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
213 : mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
214 : mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
215 : mmPCIE_INDEX, 0xffffffff, 0x0140001c,
216 : mmPCIE_DATA, 0x000f0000, 0x00000000,
217 : mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
218 : mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
219 : mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
220 : mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
221 : mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
222 : mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
223 : mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
224 : mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
225 : mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
226 : mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100,
227 : };
228 :
229 : static const u32 xgpu_tonga_golden_settings_a11[] = {
230 : mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
231 : mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
232 : mmDB_DEBUG2, 0xf00fffff, 0x00000400,
233 : mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
234 : mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
235 : mmFBC_MISC, 0x1f311fff, 0x12300000,
236 : mmGB_GPU_ID, 0x0000000f, 0x00000000,
237 : mmHDMI_CONTROL, 0x31000111, 0x00000011,
238 : mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
239 : mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
240 : mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
241 : mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
242 : mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
243 : mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
244 : mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
245 : mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
246 : mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
247 : mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
248 : mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
249 : mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
250 : mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
251 : mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
252 : mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
253 : mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
254 : mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
255 : mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
256 : mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
257 : mmTCC_CTRL, 0x00100000, 0xf31fff7f,
258 : mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
259 : mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
260 : mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
261 : mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
262 : mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
263 : mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
264 : mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
265 : mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
266 : mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
267 : };
268 :
269 : static const u32 xgpu_tonga_golden_common_all[] = {
270 : mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
271 : mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
272 : mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
273 : mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
274 : mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
275 : mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
276 : mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
277 : };
278 :
279 0 : void xgpu_vi_init_golden_registers(struct amdgpu_device *adev)
280 : {
281 0 : switch (adev->asic_type) {
282 : case CHIP_FIJI:
283 0 : amdgpu_device_program_register_sequence(adev,
284 : xgpu_fiji_mgcg_cgcg_init,
285 : ARRAY_SIZE(
286 : xgpu_fiji_mgcg_cgcg_init));
287 0 : amdgpu_device_program_register_sequence(adev,
288 : xgpu_fiji_golden_settings_a10,
289 : ARRAY_SIZE(
290 : xgpu_fiji_golden_settings_a10));
291 0 : amdgpu_device_program_register_sequence(adev,
292 : xgpu_fiji_golden_common_all,
293 : ARRAY_SIZE(
294 : xgpu_fiji_golden_common_all));
295 0 : break;
296 : case CHIP_TONGA:
297 0 : amdgpu_device_program_register_sequence(adev,
298 : xgpu_tonga_mgcg_cgcg_init,
299 : ARRAY_SIZE(
300 : xgpu_tonga_mgcg_cgcg_init));
301 0 : amdgpu_device_program_register_sequence(adev,
302 : xgpu_tonga_golden_settings_a11,
303 : ARRAY_SIZE(
304 : xgpu_tonga_golden_settings_a11));
305 0 : amdgpu_device_program_register_sequence(adev,
306 : xgpu_tonga_golden_common_all,
307 : ARRAY_SIZE(
308 : xgpu_tonga_golden_common_all));
309 0 : break;
310 : default:
311 0 : BUG_ON("Doesn't support chip type.\n");
312 : break;
313 : }
314 0 : }
315 :
316 : /*
317 : * Mailbox communication between GPU hypervisor and VFs
318 : */
319 0 : static void xgpu_vi_mailbox_send_ack(struct amdgpu_device *adev)
320 : {
321 : u32 reg;
322 0 : int timeout = VI_MAILBOX_TIMEDOUT;
323 0 : u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
324 :
325 0 : reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
326 0 : reg = REG_SET_FIELD(reg, MAILBOX_CONTROL, RCV_MSG_ACK, 1);
327 0 : WREG32_NO_KIQ(mmMAILBOX_CONTROL, reg);
328 :
329 : /*Wait for RCV_MSG_VALID to be 0*/
330 0 : reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
331 0 : while (reg & mask) {
332 0 : if (timeout <= 0) {
333 0 : pr_err("RCV_MSG_VALID is not cleared\n");
334 0 : break;
335 : }
336 0 : mdelay(1);
337 0 : timeout -=1;
338 :
339 0 : reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
340 : }
341 0 : }
342 :
343 0 : static void xgpu_vi_mailbox_set_valid(struct amdgpu_device *adev, bool val)
344 : {
345 : u32 reg;
346 :
347 0 : reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
348 0 : reg = REG_SET_FIELD(reg, MAILBOX_CONTROL,
349 : TRN_MSG_VALID, val ? 1 : 0);
350 0 : WREG32_NO_KIQ(mmMAILBOX_CONTROL, reg);
351 0 : }
352 :
353 0 : static void xgpu_vi_mailbox_trans_msg(struct amdgpu_device *adev,
354 : enum idh_request req)
355 : {
356 : u32 reg;
357 :
358 0 : reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0);
359 0 : reg = REG_SET_FIELD(reg, MAILBOX_MSGBUF_TRN_DW0,
360 : MSGBUF_DATA, req);
361 0 : WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, reg);
362 :
363 0 : xgpu_vi_mailbox_set_valid(adev, true);
364 0 : }
365 :
366 0 : static int xgpu_vi_mailbox_rcv_msg(struct amdgpu_device *adev,
367 : enum idh_event event)
368 : {
369 : u32 reg;
370 0 : u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
371 :
372 : /* workaround: host driver doesn't set VALID for CMPL now */
373 0 : if (event != IDH_FLR_NOTIFICATION_CMPL) {
374 0 : reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
375 0 : if (!(reg & mask))
376 : return -ENOENT;
377 : }
378 :
379 0 : reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
380 0 : if (reg != event)
381 : return -ENOENT;
382 :
383 : /* send ack to PF */
384 0 : xgpu_vi_mailbox_send_ack(adev);
385 :
386 0 : return 0;
387 : }
388 :
389 0 : static int xgpu_vi_poll_ack(struct amdgpu_device *adev)
390 : {
391 0 : int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
392 0 : u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, TRN_MSG_ACK);
393 : u32 reg;
394 :
395 0 : reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
396 0 : while (!(reg & mask)) {
397 0 : if (timeout <= 0) {
398 0 : pr_err("Doesn't get ack from pf.\n");
399 0 : r = -ETIME;
400 0 : break;
401 : }
402 0 : mdelay(5);
403 0 : timeout -= 5;
404 :
405 0 : reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
406 : }
407 :
408 0 : return r;
409 : }
410 :
411 0 : static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event)
412 : {
413 0 : int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
414 :
415 0 : r = xgpu_vi_mailbox_rcv_msg(adev, event);
416 0 : while (r) {
417 0 : if (timeout <= 0) {
418 0 : pr_err("Doesn't get ack from pf.\n");
419 0 : r = -ETIME;
420 0 : break;
421 : }
422 0 : mdelay(5);
423 0 : timeout -= 5;
424 :
425 0 : r = xgpu_vi_mailbox_rcv_msg(adev, event);
426 : }
427 :
428 0 : return r;
429 : }
430 :
431 0 : static int xgpu_vi_send_access_requests(struct amdgpu_device *adev,
432 : enum idh_request request)
433 : {
434 : int r;
435 :
436 0 : xgpu_vi_mailbox_trans_msg(adev, request);
437 :
438 : /* start to poll ack */
439 0 : r = xgpu_vi_poll_ack(adev);
440 0 : if (r)
441 : return r;
442 :
443 0 : xgpu_vi_mailbox_set_valid(adev, false);
444 :
445 : /* start to check msg if request is idh_req_gpu_init_access */
446 0 : if (request == IDH_REQ_GPU_INIT_ACCESS ||
447 0 : request == IDH_REQ_GPU_FINI_ACCESS ||
448 : request == IDH_REQ_GPU_RESET_ACCESS) {
449 0 : r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
450 0 : if (r) {
451 0 : pr_err("Doesn't get ack from pf, give up\n");
452 0 : return r;
453 : }
454 : }
455 :
456 : return 0;
457 : }
458 :
459 0 : static int xgpu_vi_request_reset(struct amdgpu_device *adev)
460 : {
461 0 : return xgpu_vi_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
462 : }
463 :
464 0 : static int xgpu_vi_wait_reset_cmpl(struct amdgpu_device *adev)
465 : {
466 0 : return xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL);
467 : }
468 :
469 0 : static int xgpu_vi_request_full_gpu_access(struct amdgpu_device *adev,
470 : bool init)
471 : {
472 : enum idh_request req;
473 :
474 0 : req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
475 0 : return xgpu_vi_send_access_requests(adev, req);
476 : }
477 :
478 0 : static int xgpu_vi_release_full_gpu_access(struct amdgpu_device *adev,
479 : bool init)
480 : {
481 : enum idh_request req;
482 0 : int r = 0;
483 :
484 0 : req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
485 0 : r = xgpu_vi_send_access_requests(adev, req);
486 :
487 0 : return r;
488 : }
489 :
490 : /* add support mailbox interrupts */
491 0 : static int xgpu_vi_mailbox_ack_irq(struct amdgpu_device *adev,
492 : struct amdgpu_irq_src *source,
493 : struct amdgpu_iv_entry *entry)
494 : {
495 0 : DRM_DEBUG("get ack intr and do nothing.\n");
496 0 : return 0;
497 : }
498 :
499 0 : static int xgpu_vi_set_mailbox_ack_irq(struct amdgpu_device *adev,
500 : struct amdgpu_irq_src *src,
501 : unsigned type,
502 : enum amdgpu_interrupt_state state)
503 : {
504 0 : u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
505 :
506 0 : tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, ACK_INT_EN,
507 : (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
508 0 : WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
509 :
510 0 : return 0;
511 : }
512 :
513 0 : static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
514 : {
515 0 : struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
516 0 : struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
517 :
518 : /* wait until RCV_MSG become 3 */
519 0 : if (xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
520 0 : pr_err("failed to receive FLR_CMPL\n");
521 0 : return;
522 : }
523 :
524 : /* Trigger recovery due to world switch failure */
525 0 : if (amdgpu_device_should_recover_gpu(adev)) {
526 : struct amdgpu_reset_context reset_context;
527 0 : memset(&reset_context, 0, sizeof(reset_context));
528 :
529 0 : reset_context.method = AMD_RESET_METHOD_NONE;
530 0 : reset_context.reset_req_dev = adev;
531 0 : clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
532 0 : clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
533 :
534 0 : amdgpu_device_gpu_recover(adev, NULL, &reset_context);
535 : }
536 : }
537 :
538 0 : static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
539 : struct amdgpu_irq_src *src,
540 : unsigned type,
541 : enum amdgpu_interrupt_state state)
542 : {
543 0 : u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
544 :
545 0 : tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, VALID_INT_EN,
546 : (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
547 0 : WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
548 :
549 0 : return 0;
550 : }
551 :
552 0 : static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
553 : struct amdgpu_irq_src *source,
554 : struct amdgpu_iv_entry *entry)
555 : {
556 : int r;
557 :
558 : /* trigger gpu-reset by hypervisor only if TDR disabled */
559 0 : if (!amdgpu_gpu_recovery) {
560 : /* see what event we get */
561 0 : r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
562 :
563 : /* only handle FLR_NOTIFY now */
564 0 : if (!r && !amdgpu_in_reset(adev))
565 0 : WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
566 : &adev->virt.flr_work),
567 : "Failed to queue work! at %s",
568 : __func__);
569 : }
570 :
571 0 : return 0;
572 : }
573 :
574 : static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_ack_irq_funcs = {
575 : .set = xgpu_vi_set_mailbox_ack_irq,
576 : .process = xgpu_vi_mailbox_ack_irq,
577 : };
578 :
579 : static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_rcv_irq_funcs = {
580 : .set = xgpu_vi_set_mailbox_rcv_irq,
581 : .process = xgpu_vi_mailbox_rcv_irq,
582 : };
583 :
584 0 : void xgpu_vi_mailbox_set_irq_funcs(struct amdgpu_device *adev)
585 : {
586 0 : adev->virt.ack_irq.num_types = 1;
587 0 : adev->virt.ack_irq.funcs = &xgpu_vi_mailbox_ack_irq_funcs;
588 0 : adev->virt.rcv_irq.num_types = 1;
589 0 : adev->virt.rcv_irq.funcs = &xgpu_vi_mailbox_rcv_irq_funcs;
590 0 : }
591 :
592 0 : int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev)
593 : {
594 : int r;
595 :
596 0 : r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
597 0 : if (r)
598 : return r;
599 :
600 0 : r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
601 0 : if (r) {
602 0 : amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
603 0 : return r;
604 : }
605 :
606 : return 0;
607 : }
608 :
609 0 : int xgpu_vi_mailbox_get_irq(struct amdgpu_device *adev)
610 : {
611 : int r;
612 :
613 0 : r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
614 0 : if (r)
615 : return r;
616 0 : r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
617 0 : if (r) {
618 0 : amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
619 0 : return r;
620 : }
621 :
622 0 : INIT_WORK(&adev->virt.flr_work, xgpu_vi_mailbox_flr_work);
623 :
624 0 : return 0;
625 : }
626 :
627 0 : void xgpu_vi_mailbox_put_irq(struct amdgpu_device *adev)
628 : {
629 0 : amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
630 0 : amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
631 0 : }
632 :
633 : const struct amdgpu_virt_ops xgpu_vi_virt_ops = {
634 : .req_full_gpu = xgpu_vi_request_full_gpu_access,
635 : .rel_full_gpu = xgpu_vi_release_full_gpu_access,
636 : .reset_gpu = xgpu_vi_request_reset,
637 : .wait_reset = xgpu_vi_wait_reset_cmpl,
638 : .trans_msg = NULL, /* Does not need to trans VF errors to host. */
639 : };
|