Line data Source code
1 : /*
2 : * Copyright 2014 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : */
23 :
24 : #include <linux/pci.h>
25 : #include <linux/slab.h>
26 :
27 : #include <drm/amdgpu_drm.h>
28 :
29 : #include "amdgpu.h"
30 : #include "amdgpu_atombios.h"
31 : #include "amdgpu_ih.h"
32 : #include "amdgpu_uvd.h"
33 : #include "amdgpu_vce.h"
34 : #include "amdgpu_ucode.h"
35 : #include "atom.h"
36 : #include "amd_pcie.h"
37 :
38 : #include "gmc/gmc_8_1_d.h"
39 : #include "gmc/gmc_8_1_sh_mask.h"
40 :
41 : #include "oss/oss_3_0_d.h"
42 : #include "oss/oss_3_0_sh_mask.h"
43 :
44 : #include "bif/bif_5_0_d.h"
45 : #include "bif/bif_5_0_sh_mask.h"
46 :
47 : #include "gca/gfx_8_0_d.h"
48 : #include "gca/gfx_8_0_sh_mask.h"
49 :
50 : #include "smu/smu_7_1_1_d.h"
51 : #include "smu/smu_7_1_1_sh_mask.h"
52 :
53 : #include "uvd/uvd_5_0_d.h"
54 : #include "uvd/uvd_5_0_sh_mask.h"
55 :
56 : #include "vce/vce_3_0_d.h"
57 : #include "vce/vce_3_0_sh_mask.h"
58 :
59 : #include "dce/dce_10_0_d.h"
60 : #include "dce/dce_10_0_sh_mask.h"
61 :
62 : #include "vid.h"
63 : #include "vi.h"
64 : #include "gmc_v8_0.h"
65 : #include "gmc_v7_0.h"
66 : #include "gfx_v8_0.h"
67 : #include "sdma_v2_4.h"
68 : #include "sdma_v3_0.h"
69 : #include "dce_v10_0.h"
70 : #include "dce_v11_0.h"
71 : #include "iceland_ih.h"
72 : #include "tonga_ih.h"
73 : #include "cz_ih.h"
74 : #include "uvd_v5_0.h"
75 : #include "uvd_v6_0.h"
76 : #include "vce_v3_0.h"
77 : #if defined(CONFIG_DRM_AMD_ACP)
78 : #include "amdgpu_acp.h"
79 : #endif
80 : #include "amdgpu_vkms.h"
81 : #include "mxgpu_vi.h"
82 : #include "amdgpu_dm.h"
83 :
84 : #if IS_ENABLED(CONFIG_X86)
85 : #include <asm/intel-family.h>
86 : #endif
87 :
88 : #define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6
89 : #define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L
90 : #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L
91 : #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK 0x00000004L
92 : #define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK 0x00000008L
93 : #define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK 0x00000010L
94 : #define ixPCIE_L1_PM_SUB_CNTL 0x378
95 : #define PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK 0x00000004L
96 : #define PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK 0x00000008L
97 : #define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK 0x00000001L
98 : #define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK 0x00000002L
99 : #define PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK 0x00200000L
100 : #define LINK_CAP 0x64
101 : #define PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
102 : #define ixCPM_CONTROL 0x1400118
103 : #define ixPCIE_LC_CNTL7 0x100100BC
104 : #define PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK 0x00000400L
105 : #define PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT 0x00000007
106 : #define PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT 0x00000009
107 : #define CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK 0x01000000L
108 : #define PCIE_L1_PM_SUB_CNTL 0x378
109 : #define ASIC_IS_P22(asic_type, rid) ((asic_type >= CHIP_POLARIS10) && \
110 : (asic_type <= CHIP_POLARIS12) && \
111 : (rid >= 0x6E))
112 : /* Topaz */
113 : static const struct amdgpu_video_codecs topaz_video_codecs_encode =
114 : {
115 : .codec_count = 0,
116 : .codec_array = NULL,
117 : };
118 :
119 : /* Tonga, CZ, ST, Fiji */
120 : static const struct amdgpu_video_codec_info tonga_video_codecs_encode_array[] =
121 : {
122 : {
123 : .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
124 : .max_width = 4096,
125 : .max_height = 2304,
126 : .max_pixels_per_frame = 4096 * 2304,
127 : .max_level = 0,
128 : },
129 : };
130 :
131 : static const struct amdgpu_video_codecs tonga_video_codecs_encode =
132 : {
133 : .codec_count = ARRAY_SIZE(tonga_video_codecs_encode_array),
134 : .codec_array = tonga_video_codecs_encode_array,
135 : };
136 :
137 : /* Polaris */
138 : static const struct amdgpu_video_codec_info polaris_video_codecs_encode_array[] =
139 : {
140 : {
141 : .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
142 : .max_width = 4096,
143 : .max_height = 2304,
144 : .max_pixels_per_frame = 4096 * 2304,
145 : .max_level = 0,
146 : },
147 : {
148 : .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
149 : .max_width = 4096,
150 : .max_height = 2304,
151 : .max_pixels_per_frame = 4096 * 2304,
152 : .max_level = 0,
153 : },
154 : };
155 :
156 : static const struct amdgpu_video_codecs polaris_video_codecs_encode =
157 : {
158 : .codec_count = ARRAY_SIZE(polaris_video_codecs_encode_array),
159 : .codec_array = polaris_video_codecs_encode_array,
160 : };
161 :
162 : /* Topaz */
163 : static const struct amdgpu_video_codecs topaz_video_codecs_decode =
164 : {
165 : .codec_count = 0,
166 : .codec_array = NULL,
167 : };
168 :
169 : /* Tonga */
170 : static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
171 : {
172 : {
173 : .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
174 : .max_width = 4096,
175 : .max_height = 4096,
176 : .max_pixels_per_frame = 4096 * 4096,
177 : .max_level = 3,
178 : },
179 : {
180 : .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
181 : .max_width = 4096,
182 : .max_height = 4096,
183 : .max_pixels_per_frame = 4096 * 4096,
184 : .max_level = 5,
185 : },
186 : {
187 : .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
188 : .max_width = 4096,
189 : .max_height = 4096,
190 : .max_pixels_per_frame = 4096 * 4096,
191 : .max_level = 52,
192 : },
193 : {
194 : .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
195 : .max_width = 4096,
196 : .max_height = 4096,
197 : .max_pixels_per_frame = 4096 * 4096,
198 : .max_level = 4,
199 : },
200 : };
201 :
202 : static const struct amdgpu_video_codecs tonga_video_codecs_decode =
203 : {
204 : .codec_count = ARRAY_SIZE(tonga_video_codecs_decode_array),
205 : .codec_array = tonga_video_codecs_decode_array,
206 : };
207 :
208 : /* CZ, ST, Fiji, Polaris */
209 : static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
210 : {
211 : {
212 : .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
213 : .max_width = 4096,
214 : .max_height = 4096,
215 : .max_pixels_per_frame = 4096 * 4096,
216 : .max_level = 3,
217 : },
218 : {
219 : .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
220 : .max_width = 4096,
221 : .max_height = 4096,
222 : .max_pixels_per_frame = 4096 * 4096,
223 : .max_level = 5,
224 : },
225 : {
226 : .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
227 : .max_width = 4096,
228 : .max_height = 4096,
229 : .max_pixels_per_frame = 4096 * 4096,
230 : .max_level = 52,
231 : },
232 : {
233 : .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
234 : .max_width = 4096,
235 : .max_height = 4096,
236 : .max_pixels_per_frame = 4096 * 4096,
237 : .max_level = 4,
238 : },
239 : {
240 : .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
241 : .max_width = 4096,
242 : .max_height = 4096,
243 : .max_pixels_per_frame = 4096 * 4096,
244 : .max_level = 186,
245 : },
246 : {
247 : .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
248 : .max_width = 4096,
249 : .max_height = 4096,
250 : .max_pixels_per_frame = 4096 * 4096,
251 : .max_level = 0,
252 : },
253 : };
254 :
255 : static const struct amdgpu_video_codecs cz_video_codecs_decode =
256 : {
257 : .codec_count = ARRAY_SIZE(cz_video_codecs_decode_array),
258 : .codec_array = cz_video_codecs_decode_array,
259 : };
260 :
261 0 : static int vi_query_video_codecs(struct amdgpu_device *adev, bool encode,
262 : const struct amdgpu_video_codecs **codecs)
263 : {
264 0 : switch (adev->asic_type) {
265 : case CHIP_TOPAZ:
266 0 : if (encode)
267 0 : *codecs = &topaz_video_codecs_encode;
268 : else
269 0 : *codecs = &topaz_video_codecs_decode;
270 : return 0;
271 : case CHIP_TONGA:
272 0 : if (encode)
273 0 : *codecs = &tonga_video_codecs_encode;
274 : else
275 0 : *codecs = &tonga_video_codecs_decode;
276 : return 0;
277 : case CHIP_POLARIS10:
278 : case CHIP_POLARIS11:
279 : case CHIP_POLARIS12:
280 : case CHIP_VEGAM:
281 0 : if (encode)
282 0 : *codecs = &polaris_video_codecs_encode;
283 : else
284 0 : *codecs = &cz_video_codecs_decode;
285 : return 0;
286 : case CHIP_FIJI:
287 : case CHIP_CARRIZO:
288 : case CHIP_STONEY:
289 0 : if (encode)
290 0 : *codecs = &tonga_video_codecs_encode;
291 : else
292 0 : *codecs = &cz_video_codecs_decode;
293 : return 0;
294 : default:
295 : return -EINVAL;
296 : }
297 : }
298 :
299 : /*
300 : * Indirect registers accessor
301 : */
302 0 : static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
303 : {
304 : unsigned long flags;
305 : u32 r;
306 :
307 0 : spin_lock_irqsave(&adev->pcie_idx_lock, flags);
308 0 : WREG32_NO_KIQ(mmPCIE_INDEX, reg);
309 0 : (void)RREG32_NO_KIQ(mmPCIE_INDEX);
310 0 : r = RREG32_NO_KIQ(mmPCIE_DATA);
311 0 : spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
312 0 : return r;
313 : }
314 :
315 0 : static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
316 : {
317 : unsigned long flags;
318 :
319 0 : spin_lock_irqsave(&adev->pcie_idx_lock, flags);
320 0 : WREG32_NO_KIQ(mmPCIE_INDEX, reg);
321 0 : (void)RREG32_NO_KIQ(mmPCIE_INDEX);
322 0 : WREG32_NO_KIQ(mmPCIE_DATA, v);
323 0 : (void)RREG32_NO_KIQ(mmPCIE_DATA);
324 0 : spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
325 0 : }
326 :
327 0 : static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
328 : {
329 : unsigned long flags;
330 : u32 r;
331 :
332 0 : spin_lock_irqsave(&adev->smc_idx_lock, flags);
333 0 : WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
334 0 : r = RREG32_NO_KIQ(mmSMC_IND_DATA_11);
335 0 : spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
336 0 : return r;
337 : }
338 :
339 0 : static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
340 : {
341 : unsigned long flags;
342 :
343 0 : spin_lock_irqsave(&adev->smc_idx_lock, flags);
344 0 : WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
345 0 : WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
346 0 : spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
347 0 : }
348 :
349 : /* smu_8_0_d.h */
350 : #define mmMP0PUB_IND_INDEX 0x180
351 : #define mmMP0PUB_IND_DATA 0x181
352 :
353 0 : static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
354 : {
355 : unsigned long flags;
356 : u32 r;
357 :
358 0 : spin_lock_irqsave(&adev->smc_idx_lock, flags);
359 0 : WREG32(mmMP0PUB_IND_INDEX, (reg));
360 0 : r = RREG32(mmMP0PUB_IND_DATA);
361 0 : spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
362 0 : return r;
363 : }
364 :
365 0 : static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
366 : {
367 : unsigned long flags;
368 :
369 0 : spin_lock_irqsave(&adev->smc_idx_lock, flags);
370 0 : WREG32(mmMP0PUB_IND_INDEX, (reg));
371 0 : WREG32(mmMP0PUB_IND_DATA, (v));
372 0 : spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
373 0 : }
374 :
375 0 : static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
376 : {
377 : unsigned long flags;
378 : u32 r;
379 :
380 0 : spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
381 0 : WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
382 0 : r = RREG32(mmUVD_CTX_DATA);
383 0 : spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
384 0 : return r;
385 : }
386 :
387 0 : static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
388 : {
389 : unsigned long flags;
390 :
391 0 : spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
392 0 : WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
393 0 : WREG32(mmUVD_CTX_DATA, (v));
394 0 : spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
395 0 : }
396 :
397 0 : static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
398 : {
399 : unsigned long flags;
400 : u32 r;
401 :
402 0 : spin_lock_irqsave(&adev->didt_idx_lock, flags);
403 0 : WREG32(mmDIDT_IND_INDEX, (reg));
404 0 : r = RREG32(mmDIDT_IND_DATA);
405 0 : spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
406 0 : return r;
407 : }
408 :
409 0 : static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
410 : {
411 : unsigned long flags;
412 :
413 0 : spin_lock_irqsave(&adev->didt_idx_lock, flags);
414 0 : WREG32(mmDIDT_IND_INDEX, (reg));
415 0 : WREG32(mmDIDT_IND_DATA, (v));
416 0 : spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
417 0 : }
418 :
419 0 : static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
420 : {
421 : unsigned long flags;
422 : u32 r;
423 :
424 0 : spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
425 0 : WREG32(mmGC_CAC_IND_INDEX, (reg));
426 0 : r = RREG32(mmGC_CAC_IND_DATA);
427 0 : spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
428 0 : return r;
429 : }
430 :
431 0 : static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
432 : {
433 : unsigned long flags;
434 :
435 0 : spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
436 0 : WREG32(mmGC_CAC_IND_INDEX, (reg));
437 0 : WREG32(mmGC_CAC_IND_DATA, (v));
438 0 : spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
439 0 : }
440 :
441 :
442 : static const u32 tonga_mgcg_cgcg_init[] =
443 : {
444 : mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
445 : mmPCIE_INDEX, 0xffffffff, 0x0140001c,
446 : mmPCIE_DATA, 0x000f0000, 0x00000000,
447 : mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
448 : mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
449 : mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
450 : mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
451 : };
452 :
453 : static const u32 fiji_mgcg_cgcg_init[] =
454 : {
455 : mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
456 : mmPCIE_INDEX, 0xffffffff, 0x0140001c,
457 : mmPCIE_DATA, 0x000f0000, 0x00000000,
458 : mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
459 : mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
460 : mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
461 : mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
462 : };
463 :
464 : static const u32 iceland_mgcg_cgcg_init[] =
465 : {
466 : mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
467 : mmPCIE_DATA, 0x000f0000, 0x00000000,
468 : mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
469 : mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
470 : mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
471 : };
472 :
473 : static const u32 cz_mgcg_cgcg_init[] =
474 : {
475 : mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
476 : mmPCIE_INDEX, 0xffffffff, 0x0140001c,
477 : mmPCIE_DATA, 0x000f0000, 0x00000000,
478 : mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
479 : mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
480 : };
481 :
482 : static const u32 stoney_mgcg_cgcg_init[] =
483 : {
484 : mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
485 : mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
486 : mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
487 : };
488 :
489 0 : static void vi_init_golden_registers(struct amdgpu_device *adev)
490 : {
491 : /* Some of the registers might be dependent on GRBM_GFX_INDEX */
492 0 : mutex_lock(&adev->grbm_idx_mutex);
493 :
494 0 : if (amdgpu_sriov_vf(adev)) {
495 0 : xgpu_vi_init_golden_registers(adev);
496 0 : mutex_unlock(&adev->grbm_idx_mutex);
497 0 : return;
498 : }
499 :
500 0 : switch (adev->asic_type) {
501 : case CHIP_TOPAZ:
502 0 : amdgpu_device_program_register_sequence(adev,
503 : iceland_mgcg_cgcg_init,
504 : ARRAY_SIZE(iceland_mgcg_cgcg_init));
505 0 : break;
506 : case CHIP_FIJI:
507 0 : amdgpu_device_program_register_sequence(adev,
508 : fiji_mgcg_cgcg_init,
509 : ARRAY_SIZE(fiji_mgcg_cgcg_init));
510 0 : break;
511 : case CHIP_TONGA:
512 0 : amdgpu_device_program_register_sequence(adev,
513 : tonga_mgcg_cgcg_init,
514 : ARRAY_SIZE(tonga_mgcg_cgcg_init));
515 0 : break;
516 : case CHIP_CARRIZO:
517 0 : amdgpu_device_program_register_sequence(adev,
518 : cz_mgcg_cgcg_init,
519 : ARRAY_SIZE(cz_mgcg_cgcg_init));
520 0 : break;
521 : case CHIP_STONEY:
522 0 : amdgpu_device_program_register_sequence(adev,
523 : stoney_mgcg_cgcg_init,
524 : ARRAY_SIZE(stoney_mgcg_cgcg_init));
525 0 : break;
526 : case CHIP_POLARIS10:
527 : case CHIP_POLARIS11:
528 : case CHIP_POLARIS12:
529 : case CHIP_VEGAM:
530 : default:
531 : break;
532 : }
533 0 : mutex_unlock(&adev->grbm_idx_mutex);
534 : }
535 :
536 : /**
537 : * vi_get_xclk - get the xclk
538 : *
539 : * @adev: amdgpu_device pointer
540 : *
541 : * Returns the reference clock used by the gfx engine
542 : * (VI).
543 : */
544 0 : static u32 vi_get_xclk(struct amdgpu_device *adev)
545 : {
546 0 : u32 reference_clock = adev->clock.spll.reference_freq;
547 : u32 tmp;
548 :
549 0 : if (adev->flags & AMD_IS_APU)
550 : return reference_clock;
551 :
552 0 : tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
553 0 : if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
554 : return 1000;
555 :
556 0 : tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
557 0 : if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
558 0 : return reference_clock / 4;
559 :
560 : return reference_clock;
561 : }
562 :
563 : /**
564 : * vi_srbm_select - select specific register instances
565 : *
566 : * @adev: amdgpu_device pointer
567 : * @me: selected ME (micro engine)
568 : * @pipe: pipe
569 : * @queue: queue
570 : * @vmid: VMID
571 : *
572 : * Switches the currently active registers instances. Some
573 : * registers are instanced per VMID, others are instanced per
574 : * me/pipe/queue combination.
575 : */
576 0 : void vi_srbm_select(struct amdgpu_device *adev,
577 : u32 me, u32 pipe, u32 queue, u32 vmid)
578 : {
579 0 : u32 srbm_gfx_cntl = 0;
580 0 : srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
581 0 : srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
582 0 : srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
583 0 : srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
584 0 : WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
585 0 : }
586 :
587 0 : static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
588 : {
589 : /* todo */
590 0 : }
591 :
592 0 : static bool vi_read_disabled_bios(struct amdgpu_device *adev)
593 : {
594 : u32 bus_cntl;
595 0 : u32 d1vga_control = 0;
596 0 : u32 d2vga_control = 0;
597 0 : u32 vga_render_control = 0;
598 : u32 rom_cntl;
599 : bool r;
600 :
601 0 : bus_cntl = RREG32(mmBUS_CNTL);
602 0 : if (adev->mode_info.num_crtc) {
603 0 : d1vga_control = RREG32(mmD1VGA_CONTROL);
604 0 : d2vga_control = RREG32(mmD2VGA_CONTROL);
605 0 : vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
606 : }
607 0 : rom_cntl = RREG32_SMC(ixROM_CNTL);
608 :
609 : /* enable the rom */
610 0 : WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
611 0 : if (adev->mode_info.num_crtc) {
612 : /* Disable VGA mode */
613 0 : WREG32(mmD1VGA_CONTROL,
614 : (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
615 : D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
616 0 : WREG32(mmD2VGA_CONTROL,
617 : (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
618 : D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
619 0 : WREG32(mmVGA_RENDER_CONTROL,
620 : (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
621 : }
622 0 : WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
623 :
624 0 : r = amdgpu_read_bios(adev);
625 :
626 : /* restore regs */
627 0 : WREG32(mmBUS_CNTL, bus_cntl);
628 0 : if (adev->mode_info.num_crtc) {
629 0 : WREG32(mmD1VGA_CONTROL, d1vga_control);
630 0 : WREG32(mmD2VGA_CONTROL, d2vga_control);
631 0 : WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
632 : }
633 0 : WREG32_SMC(ixROM_CNTL, rom_cntl);
634 0 : return r;
635 : }
636 :
637 0 : static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
638 : u8 *bios, u32 length_bytes)
639 : {
640 : u32 *dw_ptr;
641 : unsigned long flags;
642 : u32 i, length_dw;
643 :
644 0 : if (bios == NULL)
645 : return false;
646 0 : if (length_bytes == 0)
647 : return false;
648 : /* APU vbios image is part of sbios image */
649 0 : if (adev->flags & AMD_IS_APU)
650 : return false;
651 :
652 0 : dw_ptr = (u32 *)bios;
653 0 : length_dw = ALIGN(length_bytes, 4) / 4;
654 : /* take the smc lock since we are using the smc index */
655 0 : spin_lock_irqsave(&adev->smc_idx_lock, flags);
656 : /* set rom index to 0 */
657 0 : WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
658 0 : WREG32(mmSMC_IND_DATA_11, 0);
659 : /* set index to data for continous read */
660 0 : WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
661 0 : for (i = 0; i < length_dw; i++)
662 0 : dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
663 0 : spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
664 :
665 0 : return true;
666 : }
667 :
668 : static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
669 : {mmGRBM_STATUS},
670 : {mmGRBM_STATUS2},
671 : {mmGRBM_STATUS_SE0},
672 : {mmGRBM_STATUS_SE1},
673 : {mmGRBM_STATUS_SE2},
674 : {mmGRBM_STATUS_SE3},
675 : {mmSRBM_STATUS},
676 : {mmSRBM_STATUS2},
677 : {mmSRBM_STATUS3},
678 : {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
679 : {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
680 : {mmCP_STAT},
681 : {mmCP_STALLED_STAT1},
682 : {mmCP_STALLED_STAT2},
683 : {mmCP_STALLED_STAT3},
684 : {mmCP_CPF_BUSY_STAT},
685 : {mmCP_CPF_STALLED_STAT1},
686 : {mmCP_CPF_STATUS},
687 : {mmCP_CPC_BUSY_STAT},
688 : {mmCP_CPC_STALLED_STAT1},
689 : {mmCP_CPC_STATUS},
690 : {mmGB_ADDR_CONFIG},
691 : {mmMC_ARB_RAMCFG},
692 : {mmGB_TILE_MODE0},
693 : {mmGB_TILE_MODE1},
694 : {mmGB_TILE_MODE2},
695 : {mmGB_TILE_MODE3},
696 : {mmGB_TILE_MODE4},
697 : {mmGB_TILE_MODE5},
698 : {mmGB_TILE_MODE6},
699 : {mmGB_TILE_MODE7},
700 : {mmGB_TILE_MODE8},
701 : {mmGB_TILE_MODE9},
702 : {mmGB_TILE_MODE10},
703 : {mmGB_TILE_MODE11},
704 : {mmGB_TILE_MODE12},
705 : {mmGB_TILE_MODE13},
706 : {mmGB_TILE_MODE14},
707 : {mmGB_TILE_MODE15},
708 : {mmGB_TILE_MODE16},
709 : {mmGB_TILE_MODE17},
710 : {mmGB_TILE_MODE18},
711 : {mmGB_TILE_MODE19},
712 : {mmGB_TILE_MODE20},
713 : {mmGB_TILE_MODE21},
714 : {mmGB_TILE_MODE22},
715 : {mmGB_TILE_MODE23},
716 : {mmGB_TILE_MODE24},
717 : {mmGB_TILE_MODE25},
718 : {mmGB_TILE_MODE26},
719 : {mmGB_TILE_MODE27},
720 : {mmGB_TILE_MODE28},
721 : {mmGB_TILE_MODE29},
722 : {mmGB_TILE_MODE30},
723 : {mmGB_TILE_MODE31},
724 : {mmGB_MACROTILE_MODE0},
725 : {mmGB_MACROTILE_MODE1},
726 : {mmGB_MACROTILE_MODE2},
727 : {mmGB_MACROTILE_MODE3},
728 : {mmGB_MACROTILE_MODE4},
729 : {mmGB_MACROTILE_MODE5},
730 : {mmGB_MACROTILE_MODE6},
731 : {mmGB_MACROTILE_MODE7},
732 : {mmGB_MACROTILE_MODE8},
733 : {mmGB_MACROTILE_MODE9},
734 : {mmGB_MACROTILE_MODE10},
735 : {mmGB_MACROTILE_MODE11},
736 : {mmGB_MACROTILE_MODE12},
737 : {mmGB_MACROTILE_MODE13},
738 : {mmGB_MACROTILE_MODE14},
739 : {mmGB_MACROTILE_MODE15},
740 : {mmCC_RB_BACKEND_DISABLE, true},
741 : {mmGC_USER_RB_BACKEND_DISABLE, true},
742 : {mmGB_BACKEND_MAP, false},
743 : {mmPA_SC_RASTER_CONFIG, true},
744 : {mmPA_SC_RASTER_CONFIG_1, true},
745 : };
746 :
747 0 : static uint32_t vi_get_register_value(struct amdgpu_device *adev,
748 : bool indexed, u32 se_num,
749 : u32 sh_num, u32 reg_offset)
750 : {
751 0 : if (indexed) {
752 : uint32_t val;
753 0 : unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
754 0 : unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
755 :
756 0 : switch (reg_offset) {
757 : case mmCC_RB_BACKEND_DISABLE:
758 0 : return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
759 : case mmGC_USER_RB_BACKEND_DISABLE:
760 0 : return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
761 : case mmPA_SC_RASTER_CONFIG:
762 0 : return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
763 : case mmPA_SC_RASTER_CONFIG_1:
764 0 : return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
765 : }
766 :
767 0 : mutex_lock(&adev->grbm_idx_mutex);
768 0 : if (se_num != 0xffffffff || sh_num != 0xffffffff)
769 0 : amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
770 :
771 0 : val = RREG32(reg_offset);
772 :
773 0 : if (se_num != 0xffffffff || sh_num != 0xffffffff)
774 0 : amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
775 0 : mutex_unlock(&adev->grbm_idx_mutex);
776 0 : return val;
777 : } else {
778 : unsigned idx;
779 :
780 0 : switch (reg_offset) {
781 : case mmGB_ADDR_CONFIG:
782 0 : return adev->gfx.config.gb_addr_config;
783 : case mmMC_ARB_RAMCFG:
784 0 : return adev->gfx.config.mc_arb_ramcfg;
785 : case mmGB_TILE_MODE0:
786 : case mmGB_TILE_MODE1:
787 : case mmGB_TILE_MODE2:
788 : case mmGB_TILE_MODE3:
789 : case mmGB_TILE_MODE4:
790 : case mmGB_TILE_MODE5:
791 : case mmGB_TILE_MODE6:
792 : case mmGB_TILE_MODE7:
793 : case mmGB_TILE_MODE8:
794 : case mmGB_TILE_MODE9:
795 : case mmGB_TILE_MODE10:
796 : case mmGB_TILE_MODE11:
797 : case mmGB_TILE_MODE12:
798 : case mmGB_TILE_MODE13:
799 : case mmGB_TILE_MODE14:
800 : case mmGB_TILE_MODE15:
801 : case mmGB_TILE_MODE16:
802 : case mmGB_TILE_MODE17:
803 : case mmGB_TILE_MODE18:
804 : case mmGB_TILE_MODE19:
805 : case mmGB_TILE_MODE20:
806 : case mmGB_TILE_MODE21:
807 : case mmGB_TILE_MODE22:
808 : case mmGB_TILE_MODE23:
809 : case mmGB_TILE_MODE24:
810 : case mmGB_TILE_MODE25:
811 : case mmGB_TILE_MODE26:
812 : case mmGB_TILE_MODE27:
813 : case mmGB_TILE_MODE28:
814 : case mmGB_TILE_MODE29:
815 : case mmGB_TILE_MODE30:
816 : case mmGB_TILE_MODE31:
817 0 : idx = (reg_offset - mmGB_TILE_MODE0);
818 0 : return adev->gfx.config.tile_mode_array[idx];
819 : case mmGB_MACROTILE_MODE0:
820 : case mmGB_MACROTILE_MODE1:
821 : case mmGB_MACROTILE_MODE2:
822 : case mmGB_MACROTILE_MODE3:
823 : case mmGB_MACROTILE_MODE4:
824 : case mmGB_MACROTILE_MODE5:
825 : case mmGB_MACROTILE_MODE6:
826 : case mmGB_MACROTILE_MODE7:
827 : case mmGB_MACROTILE_MODE8:
828 : case mmGB_MACROTILE_MODE9:
829 : case mmGB_MACROTILE_MODE10:
830 : case mmGB_MACROTILE_MODE11:
831 : case mmGB_MACROTILE_MODE12:
832 : case mmGB_MACROTILE_MODE13:
833 : case mmGB_MACROTILE_MODE14:
834 : case mmGB_MACROTILE_MODE15:
835 0 : idx = (reg_offset - mmGB_MACROTILE_MODE0);
836 0 : return adev->gfx.config.macrotile_mode_array[idx];
837 : default:
838 0 : return RREG32(reg_offset);
839 : }
840 : }
841 : }
842 :
843 0 : static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
844 : u32 sh_num, u32 reg_offset, u32 *value)
845 : {
846 : uint32_t i;
847 :
848 0 : *value = 0;
849 0 : for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
850 0 : bool indexed = vi_allowed_read_registers[i].grbm_indexed;
851 :
852 0 : if (reg_offset != vi_allowed_read_registers[i].reg_offset)
853 0 : continue;
854 :
855 0 : *value = vi_get_register_value(adev, indexed, se_num, sh_num,
856 : reg_offset);
857 0 : return 0;
858 : }
859 : return -EINVAL;
860 : }
861 :
862 : /**
863 : * vi_asic_pci_config_reset - soft reset GPU
864 : *
865 : * @adev: amdgpu_device pointer
866 : *
867 : * Use PCI Config method to reset the GPU.
868 : *
869 : * Returns 0 for success.
870 : */
871 0 : static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
872 : {
873 : u32 i;
874 0 : int r = -EINVAL;
875 :
876 0 : amdgpu_atombios_scratch_regs_engine_hung(adev, true);
877 :
878 : /* disable BM */
879 0 : pci_clear_master(adev->pdev);
880 : /* reset */
881 0 : amdgpu_device_pci_config_reset(adev);
882 :
883 0 : udelay(100);
884 :
885 : /* wait for asic to come out of reset */
886 0 : for (i = 0; i < adev->usec_timeout; i++) {
887 0 : if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
888 : /* enable BM */
889 0 : pci_set_master(adev->pdev);
890 0 : adev->has_hw_reset = true;
891 0 : r = 0;
892 0 : break;
893 : }
894 0 : udelay(1);
895 : }
896 :
897 0 : amdgpu_atombios_scratch_regs_engine_hung(adev, false);
898 :
899 0 : return r;
900 : }
901 :
902 0 : static bool vi_asic_supports_baco(struct amdgpu_device *adev)
903 : {
904 0 : switch (adev->asic_type) {
905 : case CHIP_FIJI:
906 : case CHIP_TONGA:
907 : case CHIP_POLARIS10:
908 : case CHIP_POLARIS11:
909 : case CHIP_POLARIS12:
910 : case CHIP_TOPAZ:
911 0 : return amdgpu_dpm_is_baco_supported(adev);
912 : default:
913 : return false;
914 : }
915 : }
916 :
917 : static enum amd_reset_method
918 0 : vi_asic_reset_method(struct amdgpu_device *adev)
919 : {
920 : bool baco_reset;
921 :
922 0 : if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY ||
923 : amdgpu_reset_method == AMD_RESET_METHOD_BACO)
924 : return amdgpu_reset_method;
925 :
926 0 : if (amdgpu_reset_method != -1)
927 0 : dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
928 : amdgpu_reset_method);
929 :
930 0 : switch (adev->asic_type) {
931 : case CHIP_FIJI:
932 : case CHIP_TONGA:
933 : case CHIP_POLARIS10:
934 : case CHIP_POLARIS11:
935 : case CHIP_POLARIS12:
936 : case CHIP_TOPAZ:
937 0 : baco_reset = amdgpu_dpm_is_baco_supported(adev);
938 0 : break;
939 : default:
940 : baco_reset = false;
941 : break;
942 : }
943 :
944 0 : if (baco_reset)
945 : return AMD_RESET_METHOD_BACO;
946 : else
947 0 : return AMD_RESET_METHOD_LEGACY;
948 : }
949 :
950 : /**
951 : * vi_asic_reset - soft reset GPU
952 : *
953 : * @adev: amdgpu_device pointer
954 : *
955 : * Look up which blocks are hung and attempt
956 : * to reset them.
957 : * Returns 0 for success.
958 : */
959 0 : static int vi_asic_reset(struct amdgpu_device *adev)
960 : {
961 : int r;
962 :
963 : /* APUs don't have full asic reset */
964 0 : if (adev->flags & AMD_IS_APU)
965 : return 0;
966 :
967 0 : if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
968 0 : dev_info(adev->dev, "BACO reset\n");
969 0 : r = amdgpu_dpm_baco_reset(adev);
970 : } else {
971 0 : dev_info(adev->dev, "PCI CONFIG reset\n");
972 0 : r = vi_asic_pci_config_reset(adev);
973 : }
974 :
975 : return r;
976 : }
977 :
978 0 : static u32 vi_get_config_memsize(struct amdgpu_device *adev)
979 : {
980 0 : return RREG32(mmCONFIG_MEMSIZE);
981 : }
982 :
983 0 : static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
984 : u32 cntl_reg, u32 status_reg)
985 : {
986 : int r, i;
987 : struct atom_clock_dividers dividers;
988 : uint32_t tmp;
989 :
990 0 : r = amdgpu_atombios_get_clock_dividers(adev,
991 : COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
992 : clock, false, ÷rs);
993 0 : if (r)
994 : return r;
995 :
996 0 : tmp = RREG32_SMC(cntl_reg);
997 :
998 0 : if (adev->flags & AMD_IS_APU)
999 0 : tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
1000 : else
1001 0 : tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
1002 : CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
1003 0 : tmp |= dividers.post_divider;
1004 0 : WREG32_SMC(cntl_reg, tmp);
1005 :
1006 0 : for (i = 0; i < 100; i++) {
1007 0 : tmp = RREG32_SMC(status_reg);
1008 0 : if (adev->flags & AMD_IS_APU) {
1009 0 : if (tmp & 0x10000)
1010 : break;
1011 : } else {
1012 0 : if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
1013 : break;
1014 : }
1015 0 : mdelay(10);
1016 : }
1017 0 : if (i == 100)
1018 : return -ETIMEDOUT;
1019 0 : return 0;
1020 : }
1021 :
1022 : #define ixGNB_CLK1_DFS_CNTL 0xD82200F0
1023 : #define ixGNB_CLK1_STATUS 0xD822010C
1024 : #define ixGNB_CLK2_DFS_CNTL 0xD8220110
1025 : #define ixGNB_CLK2_STATUS 0xD822012C
1026 : #define ixGNB_CLK3_DFS_CNTL 0xD8220130
1027 : #define ixGNB_CLK3_STATUS 0xD822014C
1028 :
1029 0 : static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
1030 : {
1031 : int r;
1032 :
1033 0 : if (adev->flags & AMD_IS_APU) {
1034 0 : r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
1035 0 : if (r)
1036 : return r;
1037 :
1038 0 : r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
1039 0 : if (r)
1040 : return r;
1041 : } else {
1042 0 : r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
1043 0 : if (r)
1044 : return r;
1045 :
1046 0 : r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
1047 0 : if (r)
1048 : return r;
1049 : }
1050 :
1051 : return 0;
1052 : }
1053 :
1054 0 : static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
1055 : {
1056 : int r, i;
1057 : struct atom_clock_dividers dividers;
1058 : u32 tmp;
1059 : u32 reg_ctrl;
1060 : u32 reg_status;
1061 : u32 status_mask;
1062 : u32 reg_mask;
1063 :
1064 0 : if (adev->flags & AMD_IS_APU) {
1065 : reg_ctrl = ixGNB_CLK3_DFS_CNTL;
1066 : reg_status = ixGNB_CLK3_STATUS;
1067 : status_mask = 0x00010000;
1068 : reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
1069 : } else {
1070 0 : reg_ctrl = ixCG_ECLK_CNTL;
1071 0 : reg_status = ixCG_ECLK_STATUS;
1072 0 : status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
1073 0 : reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
1074 : }
1075 :
1076 0 : r = amdgpu_atombios_get_clock_dividers(adev,
1077 : COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1078 : ecclk, false, ÷rs);
1079 0 : if (r)
1080 : return r;
1081 :
1082 0 : for (i = 0; i < 100; i++) {
1083 0 : if (RREG32_SMC(reg_status) & status_mask)
1084 : break;
1085 0 : mdelay(10);
1086 : }
1087 :
1088 0 : if (i == 100)
1089 : return -ETIMEDOUT;
1090 :
1091 0 : tmp = RREG32_SMC(reg_ctrl);
1092 0 : tmp &= ~reg_mask;
1093 0 : tmp |= dividers.post_divider;
1094 0 : WREG32_SMC(reg_ctrl, tmp);
1095 :
1096 0 : for (i = 0; i < 100; i++) {
1097 0 : if (RREG32_SMC(reg_status) & status_mask)
1098 : break;
1099 0 : mdelay(10);
1100 : }
1101 :
1102 0 : if (i == 100)
1103 : return -ETIMEDOUT;
1104 :
1105 0 : return 0;
1106 : }
1107 :
1108 : static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
1109 : {
1110 0 : if (pci_is_root_bus(adev->pdev->bus))
1111 : return;
1112 :
1113 : if (amdgpu_pcie_gen2 == 0)
1114 : return;
1115 :
1116 : if (adev->flags & AMD_IS_APU)
1117 : return;
1118 :
1119 : if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
1120 : CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
1121 : return;
1122 :
1123 : /* todo */
1124 : }
1125 :
1126 0 : static void vi_enable_aspm(struct amdgpu_device *adev)
1127 : {
1128 : u32 data, orig;
1129 :
1130 0 : orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
1131 0 : data |= PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT <<
1132 : PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
1133 0 : data |= PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT <<
1134 : PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
1135 0 : data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
1136 0 : data |= PCIE_LC_CNTL__LC_DELAY_L1_EXIT_MASK;
1137 0 : if (orig != data)
1138 0 : WREG32_PCIE(ixPCIE_LC_CNTL, data);
1139 0 : }
1140 :
1141 : static bool aspm_support_quirk_check(void)
1142 : {
1143 : #if IS_ENABLED(CONFIG_X86)
1144 : struct cpuinfo_x86 *c = &cpu_data(0);
1145 :
1146 : return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
1147 : #else
1148 : return true;
1149 : #endif
1150 : }
1151 :
1152 0 : static void vi_program_aspm(struct amdgpu_device *adev)
1153 : {
1154 : u32 data, data1, orig;
1155 0 : bool bL1SS = false;
1156 0 : bool bClkReqSupport = true;
1157 :
1158 0 : if (!amdgpu_device_should_use_aspm(adev) || !aspm_support_quirk_check())
1159 0 : return;
1160 :
1161 0 : if (adev->flags & AMD_IS_APU ||
1162 0 : adev->asic_type < CHIP_POLARIS10)
1163 : return;
1164 :
1165 0 : orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
1166 0 : data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
1167 0 : data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
1168 0 : data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
1169 0 : if (orig != data)
1170 0 : WREG32_PCIE(ixPCIE_LC_CNTL, data);
1171 :
1172 0 : orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
1173 0 : data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
1174 0 : data |= 0x0024 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT;
1175 0 : data |= PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK;
1176 0 : if (orig != data)
1177 0 : WREG32_PCIE(ixPCIE_LC_N_FTS_CNTL, data);
1178 :
1179 0 : orig = data = RREG32_PCIE(ixPCIE_LC_CNTL3);
1180 0 : data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK;
1181 0 : if (orig != data)
1182 0 : WREG32_PCIE(ixPCIE_LC_CNTL3, data);
1183 :
1184 0 : orig = data = RREG32_PCIE(ixPCIE_P_CNTL);
1185 0 : data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK;
1186 0 : if (orig != data)
1187 0 : WREG32_PCIE(ixPCIE_P_CNTL, data);
1188 :
1189 0 : data = RREG32_PCIE(ixPCIE_LC_L1_PM_SUBSTATE);
1190 0 : pci_read_config_dword(adev->pdev, PCIE_L1_PM_SUB_CNTL, &data1);
1191 0 : if (data & PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK &&
1192 : (data & (PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK |
1193 : PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK |
1194 : PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK |
1195 : PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK))) {
1196 : bL1SS = true;
1197 0 : } else if (data1 & (PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK |
1198 : PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK |
1199 : PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK |
1200 : PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK)) {
1201 0 : bL1SS = true;
1202 : }
1203 :
1204 0 : orig = data = RREG32_PCIE(ixPCIE_LC_CNTL6);
1205 0 : data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK;
1206 0 : if (orig != data)
1207 0 : WREG32_PCIE(ixPCIE_LC_CNTL6, data);
1208 :
1209 0 : orig = data = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL);
1210 0 : data |= PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK;
1211 0 : if (orig != data)
1212 0 : WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, data);
1213 :
1214 0 : pci_read_config_dword(adev->pdev, LINK_CAP, &data);
1215 0 : if (!(data & PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK))
1216 0 : bClkReqSupport = false;
1217 :
1218 0 : if (bClkReqSupport) {
1219 0 : orig = data = RREG32_SMC(ixTHM_CLK_CNTL);
1220 0 : data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | THM_CLK_CNTL__TMON_CLK_SEL_MASK);
1221 0 : data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) |
1222 : (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT);
1223 0 : if (orig != data)
1224 0 : WREG32_SMC(ixTHM_CLK_CNTL, data);
1225 :
1226 0 : orig = data = RREG32_SMC(ixMISC_CLK_CTRL);
1227 0 : data &= ~(MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK |
1228 : MISC_CLK_CTRL__ZCLK_SEL_MASK | MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK);
1229 0 : data |= (1 << MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT) |
1230 : (1 << MISC_CLK_CTRL__ZCLK_SEL__SHIFT);
1231 0 : data |= (0x20 << MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT);
1232 0 : if (orig != data)
1233 0 : WREG32_SMC(ixMISC_CLK_CTRL, data);
1234 :
1235 0 : orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL);
1236 0 : data |= CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK;
1237 0 : if (orig != data)
1238 0 : WREG32_SMC(ixCG_CLKPIN_CNTL, data);
1239 :
1240 0 : orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
1241 0 : data |= CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK;
1242 0 : if (orig != data)
1243 0 : WREG32_SMC(ixCG_CLKPIN_CNTL, data);
1244 :
1245 0 : orig = data = RREG32_SMC(ixMPLL_BYPASSCLK_SEL);
1246 0 : data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK;
1247 0 : data |= (4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT);
1248 0 : if (orig != data)
1249 0 : WREG32_SMC(ixMPLL_BYPASSCLK_SEL, data);
1250 :
1251 0 : orig = data = RREG32_PCIE(ixCPM_CONTROL);
1252 0 : data |= (CPM_CONTROL__REFCLK_XSTCLK_ENABLE_MASK |
1253 : CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK);
1254 0 : if (orig != data)
1255 0 : WREG32_PCIE(ixCPM_CONTROL, data);
1256 :
1257 0 : orig = data = RREG32_PCIE(ixPCIE_CONFIG_CNTL);
1258 0 : data &= ~PCIE_CONFIG_CNTL__DYN_CLK_LATENCY_MASK;
1259 0 : data |= (0xE << PCIE_CONFIG_CNTL__DYN_CLK_LATENCY__SHIFT);
1260 0 : if (orig != data)
1261 0 : WREG32_PCIE(ixPCIE_CONFIG_CNTL, data);
1262 :
1263 0 : orig = data = RREG32(mmBIF_CLK_CTRL);
1264 0 : data |= BIF_CLK_CTRL__BIF_XSTCLK_READY_MASK;
1265 0 : if (orig != data)
1266 0 : WREG32(mmBIF_CLK_CTRL, data);
1267 :
1268 0 : orig = data = RREG32_PCIE(ixPCIE_LC_CNTL7);
1269 0 : data |= PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK;
1270 0 : if (orig != data)
1271 0 : WREG32_PCIE(ixPCIE_LC_CNTL7, data);
1272 :
1273 0 : orig = data = RREG32_PCIE(ixPCIE_HW_DEBUG);
1274 0 : data |= PCIE_HW_DEBUG__HW_01_DEBUG_MASK;
1275 0 : if (orig != data)
1276 0 : WREG32_PCIE(ixPCIE_HW_DEBUG, data);
1277 :
1278 0 : orig = data = RREG32_PCIE(ixPCIE_LC_CNTL2);
1279 0 : data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
1280 0 : data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK;
1281 0 : if (bL1SS)
1282 0 : data &= ~PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK;
1283 0 : if (orig != data)
1284 0 : WREG32_PCIE(ixPCIE_LC_CNTL2, data);
1285 :
1286 : }
1287 :
1288 0 : vi_enable_aspm(adev);
1289 :
1290 0 : data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
1291 0 : data1 = RREG32_PCIE(ixPCIE_LC_STATUS1);
1292 0 : if (((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) &&
1293 0 : data1 & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK &&
1294 : data1 & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK) {
1295 0 : orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
1296 0 : data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
1297 0 : if (orig != data)
1298 0 : WREG32_PCIE(ixPCIE_LC_CNTL, data);
1299 : }
1300 :
1301 0 : if ((adev->asic_type == CHIP_POLARIS12 &&
1302 0 : !(ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))) ||
1303 0 : ASIC_IS_P22(adev->asic_type, adev->external_rev_id)) {
1304 0 : orig = data = RREG32_PCIE(ixPCIE_LC_TRAINING_CNTL);
1305 0 : data &= ~PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH_MASK;
1306 0 : if (orig != data)
1307 0 : WREG32_PCIE(ixPCIE_LC_TRAINING_CNTL, data);
1308 : }
1309 : }
1310 :
1311 0 : static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
1312 : bool enable)
1313 : {
1314 : u32 tmp;
1315 :
1316 : /* not necessary on CZ */
1317 0 : if (adev->flags & AMD_IS_APU)
1318 : return;
1319 :
1320 0 : tmp = RREG32(mmBIF_DOORBELL_APER_EN);
1321 0 : if (enable)
1322 0 : tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
1323 : else
1324 0 : tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
1325 :
1326 0 : WREG32(mmBIF_DOORBELL_APER_EN, tmp);
1327 : }
1328 :
1329 : #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
1330 : #define ATI_REV_ID_FUSE_MACRO__SHIFT 9
1331 : #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
1332 :
1333 0 : static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
1334 : {
1335 0 : if (adev->flags & AMD_IS_APU)
1336 0 : return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
1337 0 : >> ATI_REV_ID_FUSE_MACRO__SHIFT;
1338 : else
1339 0 : return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
1340 0 : >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
1341 : }
1342 :
1343 0 : static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
1344 : {
1345 0 : if (!ring || !ring->funcs->emit_wreg) {
1346 0 : WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
1347 0 : RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
1348 : } else {
1349 0 : amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
1350 : }
1351 0 : }
1352 :
1353 0 : static void vi_invalidate_hdp(struct amdgpu_device *adev,
1354 : struct amdgpu_ring *ring)
1355 : {
1356 0 : if (!ring || !ring->funcs->emit_wreg) {
1357 0 : WREG32(mmHDP_DEBUG0, 1);
1358 0 : RREG32(mmHDP_DEBUG0);
1359 : } else {
1360 0 : amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
1361 : }
1362 0 : }
1363 :
1364 0 : static bool vi_need_full_reset(struct amdgpu_device *adev)
1365 : {
1366 0 : switch (adev->asic_type) {
1367 : case CHIP_CARRIZO:
1368 : case CHIP_STONEY:
1369 : /* CZ has hang issues with full reset at the moment */
1370 : return false;
1371 : case CHIP_FIJI:
1372 : case CHIP_TONGA:
1373 : /* XXX: soft reset should work on fiji and tonga */
1374 0 : return true;
1375 : case CHIP_POLARIS10:
1376 : case CHIP_POLARIS11:
1377 : case CHIP_POLARIS12:
1378 : case CHIP_TOPAZ:
1379 : default:
1380 : /* change this when we support soft reset */
1381 0 : return true;
1382 : }
1383 : }
1384 :
1385 0 : static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
1386 : uint64_t *count1)
1387 : {
1388 0 : uint32_t perfctr = 0;
1389 : uint64_t cnt0_of, cnt1_of;
1390 : int tmp;
1391 :
1392 : /* This reports 0 on APUs, so return to avoid writing/reading registers
1393 : * that may or may not be different from their GPU counterparts
1394 : */
1395 0 : if (adev->flags & AMD_IS_APU)
1396 : return;
1397 :
1398 : /* Set the 2 events that we wish to watch, defined above */
1399 : /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
1400 0 : perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
1401 0 : perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
1402 :
1403 : /* Write to enable desired perf counters */
1404 0 : WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
1405 : /* Zero out and enable the perf counters
1406 : * Write 0x5:
1407 : * Bit 0 = Start all counters(1)
1408 : * Bit 2 = Global counter reset enable(1)
1409 : */
1410 0 : WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
1411 :
1412 0 : msleep(1000);
1413 :
1414 : /* Load the shadow and disable the perf counters
1415 : * Write 0x2:
1416 : * Bit 0 = Stop counters(0)
1417 : * Bit 1 = Load the shadow counters(1)
1418 : */
1419 0 : WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
1420 :
1421 : /* Read register values to get any >32bit overflow */
1422 0 : tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
1423 0 : cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
1424 0 : cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
1425 :
1426 : /* Get the values and add the overflow */
1427 0 : *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
1428 0 : *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
1429 : }
1430 :
1431 0 : static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev)
1432 : {
1433 : uint64_t nak_r, nak_g;
1434 :
1435 : /* Get the number of NAKs received and generated */
1436 0 : nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
1437 0 : nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
1438 :
1439 : /* Add the total number of NAKs, i.e the number of replays */
1440 0 : return (nak_r + nak_g);
1441 : }
1442 :
1443 0 : static bool vi_need_reset_on_init(struct amdgpu_device *adev)
1444 : {
1445 : u32 clock_cntl, pc;
1446 :
1447 0 : if (adev->flags & AMD_IS_APU)
1448 : return false;
1449 :
1450 : /* check if the SMC is already running */
1451 0 : clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
1452 0 : pc = RREG32_SMC(ixSMC_PC_C);
1453 0 : if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
1454 : (0x20100 <= pc))
1455 : return true;
1456 :
1457 0 : return false;
1458 : }
1459 :
1460 0 : static void vi_pre_asic_init(struct amdgpu_device *adev)
1461 : {
1462 0 : }
1463 :
1464 : static const struct amdgpu_asic_funcs vi_asic_funcs =
1465 : {
1466 : .read_disabled_bios = &vi_read_disabled_bios,
1467 : .read_bios_from_rom = &vi_read_bios_from_rom,
1468 : .read_register = &vi_read_register,
1469 : .reset = &vi_asic_reset,
1470 : .reset_method = &vi_asic_reset_method,
1471 : .set_vga_state = &vi_vga_set_state,
1472 : .get_xclk = &vi_get_xclk,
1473 : .set_uvd_clocks = &vi_set_uvd_clocks,
1474 : .set_vce_clocks = &vi_set_vce_clocks,
1475 : .get_config_memsize = &vi_get_config_memsize,
1476 : .flush_hdp = &vi_flush_hdp,
1477 : .invalidate_hdp = &vi_invalidate_hdp,
1478 : .need_full_reset = &vi_need_full_reset,
1479 : .init_doorbell_index = &legacy_doorbell_index_init,
1480 : .get_pcie_usage = &vi_get_pcie_usage,
1481 : .need_reset_on_init = &vi_need_reset_on_init,
1482 : .get_pcie_replay_count = &vi_get_pcie_replay_count,
1483 : .supports_baco = &vi_asic_supports_baco,
1484 : .pre_asic_init = &vi_pre_asic_init,
1485 : .query_video_codecs = &vi_query_video_codecs,
1486 : };
1487 :
1488 : #define CZ_REV_BRISTOL(rev) \
1489 : ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
1490 :
1491 0 : static int vi_common_early_init(void *handle)
1492 : {
1493 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1494 :
1495 0 : if (adev->flags & AMD_IS_APU) {
1496 0 : adev->smc_rreg = &cz_smc_rreg;
1497 0 : adev->smc_wreg = &cz_smc_wreg;
1498 : } else {
1499 0 : adev->smc_rreg = &vi_smc_rreg;
1500 0 : adev->smc_wreg = &vi_smc_wreg;
1501 : }
1502 0 : adev->pcie_rreg = &vi_pcie_rreg;
1503 0 : adev->pcie_wreg = &vi_pcie_wreg;
1504 0 : adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1505 0 : adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1506 0 : adev->didt_rreg = &vi_didt_rreg;
1507 0 : adev->didt_wreg = &vi_didt_wreg;
1508 0 : adev->gc_cac_rreg = &vi_gc_cac_rreg;
1509 0 : adev->gc_cac_wreg = &vi_gc_cac_wreg;
1510 :
1511 0 : adev->asic_funcs = &vi_asic_funcs;
1512 :
1513 0 : adev->rev_id = vi_get_rev_id(adev);
1514 0 : adev->external_rev_id = 0xFF;
1515 0 : switch (adev->asic_type) {
1516 : case CHIP_TOPAZ:
1517 0 : adev->cg_flags = 0;
1518 0 : adev->pg_flags = 0;
1519 0 : adev->external_rev_id = 0x1;
1520 0 : break;
1521 : case CHIP_FIJI:
1522 0 : adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1523 : AMD_CG_SUPPORT_GFX_MGLS |
1524 : AMD_CG_SUPPORT_GFX_RLC_LS |
1525 : AMD_CG_SUPPORT_GFX_CP_LS |
1526 : AMD_CG_SUPPORT_GFX_CGTS |
1527 : AMD_CG_SUPPORT_GFX_CGTS_LS |
1528 : AMD_CG_SUPPORT_GFX_CGCG |
1529 : AMD_CG_SUPPORT_GFX_CGLS |
1530 : AMD_CG_SUPPORT_SDMA_MGCG |
1531 : AMD_CG_SUPPORT_SDMA_LS |
1532 : AMD_CG_SUPPORT_BIF_LS |
1533 : AMD_CG_SUPPORT_HDP_MGCG |
1534 : AMD_CG_SUPPORT_HDP_LS |
1535 : AMD_CG_SUPPORT_ROM_MGCG |
1536 : AMD_CG_SUPPORT_MC_MGCG |
1537 : AMD_CG_SUPPORT_MC_LS |
1538 : AMD_CG_SUPPORT_UVD_MGCG;
1539 0 : adev->pg_flags = 0;
1540 0 : adev->external_rev_id = adev->rev_id + 0x3c;
1541 0 : break;
1542 : case CHIP_TONGA:
1543 0 : adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1544 : AMD_CG_SUPPORT_GFX_CGCG |
1545 : AMD_CG_SUPPORT_GFX_CGLS |
1546 : AMD_CG_SUPPORT_SDMA_MGCG |
1547 : AMD_CG_SUPPORT_SDMA_LS |
1548 : AMD_CG_SUPPORT_BIF_LS |
1549 : AMD_CG_SUPPORT_HDP_MGCG |
1550 : AMD_CG_SUPPORT_HDP_LS |
1551 : AMD_CG_SUPPORT_ROM_MGCG |
1552 : AMD_CG_SUPPORT_MC_MGCG |
1553 : AMD_CG_SUPPORT_MC_LS |
1554 : AMD_CG_SUPPORT_DRM_LS |
1555 : AMD_CG_SUPPORT_UVD_MGCG;
1556 0 : adev->pg_flags = 0;
1557 0 : adev->external_rev_id = adev->rev_id + 0x14;
1558 0 : break;
1559 : case CHIP_POLARIS11:
1560 0 : adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1561 : AMD_CG_SUPPORT_GFX_RLC_LS |
1562 : AMD_CG_SUPPORT_GFX_CP_LS |
1563 : AMD_CG_SUPPORT_GFX_CGCG |
1564 : AMD_CG_SUPPORT_GFX_CGLS |
1565 : AMD_CG_SUPPORT_GFX_3D_CGCG |
1566 : AMD_CG_SUPPORT_GFX_3D_CGLS |
1567 : AMD_CG_SUPPORT_SDMA_MGCG |
1568 : AMD_CG_SUPPORT_SDMA_LS |
1569 : AMD_CG_SUPPORT_BIF_MGCG |
1570 : AMD_CG_SUPPORT_BIF_LS |
1571 : AMD_CG_SUPPORT_HDP_MGCG |
1572 : AMD_CG_SUPPORT_HDP_LS |
1573 : AMD_CG_SUPPORT_ROM_MGCG |
1574 : AMD_CG_SUPPORT_MC_MGCG |
1575 : AMD_CG_SUPPORT_MC_LS |
1576 : AMD_CG_SUPPORT_DRM_LS |
1577 : AMD_CG_SUPPORT_UVD_MGCG |
1578 : AMD_CG_SUPPORT_VCE_MGCG;
1579 0 : adev->pg_flags = 0;
1580 0 : adev->external_rev_id = adev->rev_id + 0x5A;
1581 0 : break;
1582 : case CHIP_POLARIS10:
1583 0 : adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1584 : AMD_CG_SUPPORT_GFX_RLC_LS |
1585 : AMD_CG_SUPPORT_GFX_CP_LS |
1586 : AMD_CG_SUPPORT_GFX_CGCG |
1587 : AMD_CG_SUPPORT_GFX_CGLS |
1588 : AMD_CG_SUPPORT_GFX_3D_CGCG |
1589 : AMD_CG_SUPPORT_GFX_3D_CGLS |
1590 : AMD_CG_SUPPORT_SDMA_MGCG |
1591 : AMD_CG_SUPPORT_SDMA_LS |
1592 : AMD_CG_SUPPORT_BIF_MGCG |
1593 : AMD_CG_SUPPORT_BIF_LS |
1594 : AMD_CG_SUPPORT_HDP_MGCG |
1595 : AMD_CG_SUPPORT_HDP_LS |
1596 : AMD_CG_SUPPORT_ROM_MGCG |
1597 : AMD_CG_SUPPORT_MC_MGCG |
1598 : AMD_CG_SUPPORT_MC_LS |
1599 : AMD_CG_SUPPORT_DRM_LS |
1600 : AMD_CG_SUPPORT_UVD_MGCG |
1601 : AMD_CG_SUPPORT_VCE_MGCG;
1602 0 : adev->pg_flags = 0;
1603 0 : adev->external_rev_id = adev->rev_id + 0x50;
1604 0 : break;
1605 : case CHIP_POLARIS12:
1606 0 : adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1607 : AMD_CG_SUPPORT_GFX_RLC_LS |
1608 : AMD_CG_SUPPORT_GFX_CP_LS |
1609 : AMD_CG_SUPPORT_GFX_CGCG |
1610 : AMD_CG_SUPPORT_GFX_CGLS |
1611 : AMD_CG_SUPPORT_GFX_3D_CGCG |
1612 : AMD_CG_SUPPORT_GFX_3D_CGLS |
1613 : AMD_CG_SUPPORT_SDMA_MGCG |
1614 : AMD_CG_SUPPORT_SDMA_LS |
1615 : AMD_CG_SUPPORT_BIF_MGCG |
1616 : AMD_CG_SUPPORT_BIF_LS |
1617 : AMD_CG_SUPPORT_HDP_MGCG |
1618 : AMD_CG_SUPPORT_HDP_LS |
1619 : AMD_CG_SUPPORT_ROM_MGCG |
1620 : AMD_CG_SUPPORT_MC_MGCG |
1621 : AMD_CG_SUPPORT_MC_LS |
1622 : AMD_CG_SUPPORT_DRM_LS |
1623 : AMD_CG_SUPPORT_UVD_MGCG |
1624 : AMD_CG_SUPPORT_VCE_MGCG;
1625 0 : adev->pg_flags = 0;
1626 0 : adev->external_rev_id = adev->rev_id + 0x64;
1627 0 : break;
1628 : case CHIP_VEGAM:
1629 0 : adev->cg_flags = 0;
1630 : /*AMD_CG_SUPPORT_GFX_MGCG |
1631 : AMD_CG_SUPPORT_GFX_RLC_LS |
1632 : AMD_CG_SUPPORT_GFX_CP_LS |
1633 : AMD_CG_SUPPORT_GFX_CGCG |
1634 : AMD_CG_SUPPORT_GFX_CGLS |
1635 : AMD_CG_SUPPORT_GFX_3D_CGCG |
1636 : AMD_CG_SUPPORT_GFX_3D_CGLS |
1637 : AMD_CG_SUPPORT_SDMA_MGCG |
1638 : AMD_CG_SUPPORT_SDMA_LS |
1639 : AMD_CG_SUPPORT_BIF_MGCG |
1640 : AMD_CG_SUPPORT_BIF_LS |
1641 : AMD_CG_SUPPORT_HDP_MGCG |
1642 : AMD_CG_SUPPORT_HDP_LS |
1643 : AMD_CG_SUPPORT_ROM_MGCG |
1644 : AMD_CG_SUPPORT_MC_MGCG |
1645 : AMD_CG_SUPPORT_MC_LS |
1646 : AMD_CG_SUPPORT_DRM_LS |
1647 : AMD_CG_SUPPORT_UVD_MGCG |
1648 : AMD_CG_SUPPORT_VCE_MGCG;*/
1649 0 : adev->pg_flags = 0;
1650 0 : adev->external_rev_id = adev->rev_id + 0x6E;
1651 0 : break;
1652 : case CHIP_CARRIZO:
1653 0 : adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1654 : AMD_CG_SUPPORT_GFX_MGCG |
1655 : AMD_CG_SUPPORT_GFX_MGLS |
1656 : AMD_CG_SUPPORT_GFX_RLC_LS |
1657 : AMD_CG_SUPPORT_GFX_CP_LS |
1658 : AMD_CG_SUPPORT_GFX_CGTS |
1659 : AMD_CG_SUPPORT_GFX_CGTS_LS |
1660 : AMD_CG_SUPPORT_GFX_CGCG |
1661 : AMD_CG_SUPPORT_GFX_CGLS |
1662 : AMD_CG_SUPPORT_BIF_LS |
1663 : AMD_CG_SUPPORT_HDP_MGCG |
1664 : AMD_CG_SUPPORT_HDP_LS |
1665 : AMD_CG_SUPPORT_SDMA_MGCG |
1666 : AMD_CG_SUPPORT_SDMA_LS |
1667 : AMD_CG_SUPPORT_VCE_MGCG;
1668 : /* rev0 hardware requires workarounds to support PG */
1669 0 : adev->pg_flags = 0;
1670 0 : if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
1671 0 : adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
1672 : AMD_PG_SUPPORT_GFX_PIPELINE |
1673 : AMD_PG_SUPPORT_CP |
1674 : AMD_PG_SUPPORT_UVD |
1675 : AMD_PG_SUPPORT_VCE;
1676 : }
1677 0 : adev->external_rev_id = adev->rev_id + 0x1;
1678 0 : break;
1679 : case CHIP_STONEY:
1680 0 : adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1681 : AMD_CG_SUPPORT_GFX_MGCG |
1682 : AMD_CG_SUPPORT_GFX_MGLS |
1683 : AMD_CG_SUPPORT_GFX_RLC_LS |
1684 : AMD_CG_SUPPORT_GFX_CP_LS |
1685 : AMD_CG_SUPPORT_GFX_CGTS |
1686 : AMD_CG_SUPPORT_GFX_CGTS_LS |
1687 : AMD_CG_SUPPORT_GFX_CGLS |
1688 : AMD_CG_SUPPORT_BIF_LS |
1689 : AMD_CG_SUPPORT_HDP_MGCG |
1690 : AMD_CG_SUPPORT_HDP_LS |
1691 : AMD_CG_SUPPORT_SDMA_MGCG |
1692 : AMD_CG_SUPPORT_SDMA_LS |
1693 : AMD_CG_SUPPORT_VCE_MGCG;
1694 0 : adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1695 : AMD_PG_SUPPORT_GFX_SMG |
1696 : AMD_PG_SUPPORT_GFX_PIPELINE |
1697 : AMD_PG_SUPPORT_CP |
1698 : AMD_PG_SUPPORT_UVD |
1699 : AMD_PG_SUPPORT_VCE;
1700 0 : adev->external_rev_id = adev->rev_id + 0x61;
1701 0 : break;
1702 : default:
1703 : /* FIXME: not supported yet */
1704 : return -EINVAL;
1705 : }
1706 :
1707 0 : if (amdgpu_sriov_vf(adev)) {
1708 0 : amdgpu_virt_init_setting(adev);
1709 0 : xgpu_vi_mailbox_set_irq_funcs(adev);
1710 : }
1711 :
1712 : return 0;
1713 : }
1714 :
1715 0 : static int vi_common_late_init(void *handle)
1716 : {
1717 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1718 :
1719 0 : if (amdgpu_sriov_vf(adev))
1720 0 : xgpu_vi_mailbox_get_irq(adev);
1721 :
1722 0 : return 0;
1723 : }
1724 :
1725 0 : static int vi_common_sw_init(void *handle)
1726 : {
1727 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1728 :
1729 0 : if (amdgpu_sriov_vf(adev))
1730 0 : xgpu_vi_mailbox_add_irq_id(adev);
1731 :
1732 0 : return 0;
1733 : }
1734 :
1735 0 : static int vi_common_sw_fini(void *handle)
1736 : {
1737 0 : return 0;
1738 : }
1739 :
1740 0 : static int vi_common_hw_init(void *handle)
1741 : {
1742 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1743 :
1744 : /* move the golden regs per IP block */
1745 0 : vi_init_golden_registers(adev);
1746 : /* enable pcie gen2/3 link */
1747 0 : vi_pcie_gen3_enable(adev);
1748 : /* enable aspm */
1749 0 : vi_program_aspm(adev);
1750 : /* enable the doorbell aperture */
1751 0 : vi_enable_doorbell_aperture(adev, true);
1752 :
1753 0 : return 0;
1754 : }
1755 :
1756 0 : static int vi_common_hw_fini(void *handle)
1757 : {
1758 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1759 :
1760 : /* enable the doorbell aperture */
1761 0 : vi_enable_doorbell_aperture(adev, false);
1762 :
1763 0 : if (amdgpu_sriov_vf(adev))
1764 0 : xgpu_vi_mailbox_put_irq(adev);
1765 :
1766 0 : return 0;
1767 : }
1768 :
1769 0 : static int vi_common_suspend(void *handle)
1770 : {
1771 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1772 :
1773 0 : return vi_common_hw_fini(adev);
1774 : }
1775 :
1776 0 : static int vi_common_resume(void *handle)
1777 : {
1778 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1779 :
1780 0 : return vi_common_hw_init(adev);
1781 : }
1782 :
1783 0 : static bool vi_common_is_idle(void *handle)
1784 : {
1785 0 : return true;
1786 : }
1787 :
1788 0 : static int vi_common_wait_for_idle(void *handle)
1789 : {
1790 0 : return 0;
1791 : }
1792 :
1793 0 : static int vi_common_soft_reset(void *handle)
1794 : {
1795 0 : return 0;
1796 : }
1797 :
1798 0 : static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1799 : bool enable)
1800 : {
1801 : uint32_t temp, data;
1802 :
1803 0 : temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1804 :
1805 0 : if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
1806 0 : data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1807 : PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1808 : PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1809 : else
1810 0 : data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1811 : PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1812 : PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1813 :
1814 0 : if (temp != data)
1815 0 : WREG32_PCIE(ixPCIE_CNTL2, data);
1816 0 : }
1817 :
1818 0 : static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1819 : bool enable)
1820 : {
1821 : uint32_t temp, data;
1822 :
1823 0 : temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1824 :
1825 0 : if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1826 0 : data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1827 : else
1828 0 : data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1829 :
1830 0 : if (temp != data)
1831 0 : WREG32(mmHDP_HOST_PATH_CNTL, data);
1832 0 : }
1833 :
1834 0 : static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
1835 : bool enable)
1836 : {
1837 : uint32_t temp, data;
1838 :
1839 0 : temp = data = RREG32(mmHDP_MEM_POWER_LS);
1840 :
1841 0 : if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1842 0 : data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1843 : else
1844 0 : data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1845 :
1846 0 : if (temp != data)
1847 0 : WREG32(mmHDP_MEM_POWER_LS, data);
1848 0 : }
1849 :
1850 0 : static void vi_update_drm_light_sleep(struct amdgpu_device *adev,
1851 : bool enable)
1852 : {
1853 : uint32_t temp, data;
1854 :
1855 0 : temp = data = RREG32(0x157a);
1856 :
1857 0 : if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1858 0 : data |= 1;
1859 : else
1860 0 : data &= ~1;
1861 :
1862 0 : if (temp != data)
1863 0 : WREG32(0x157a, data);
1864 0 : }
1865 :
1866 :
1867 0 : static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1868 : bool enable)
1869 : {
1870 : uint32_t temp, data;
1871 :
1872 0 : temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1873 :
1874 0 : if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1875 0 : data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1876 : CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1877 : else
1878 0 : data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1879 : CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1880 :
1881 0 : if (temp != data)
1882 0 : WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1883 0 : }
1884 :
1885 0 : static int vi_common_set_clockgating_state_by_smu(void *handle,
1886 : enum amd_clockgating_state state)
1887 : {
1888 0 : uint32_t msg_id, pp_state = 0;
1889 0 : uint32_t pp_support_state = 0;
1890 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1891 :
1892 0 : if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
1893 0 : if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
1894 0 : pp_support_state = PP_STATE_SUPPORT_LS;
1895 0 : pp_state = PP_STATE_LS;
1896 : }
1897 0 : if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
1898 0 : pp_support_state |= PP_STATE_SUPPORT_CG;
1899 0 : pp_state |= PP_STATE_CG;
1900 : }
1901 0 : if (state == AMD_CG_STATE_UNGATE)
1902 0 : pp_state = 0;
1903 0 : msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1904 : PP_BLOCK_SYS_MC,
1905 : pp_support_state,
1906 : pp_state);
1907 0 : amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1908 : }
1909 :
1910 0 : if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
1911 0 : if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
1912 0 : pp_support_state = PP_STATE_SUPPORT_LS;
1913 0 : pp_state = PP_STATE_LS;
1914 : }
1915 0 : if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
1916 0 : pp_support_state |= PP_STATE_SUPPORT_CG;
1917 0 : pp_state |= PP_STATE_CG;
1918 : }
1919 0 : if (state == AMD_CG_STATE_UNGATE)
1920 0 : pp_state = 0;
1921 0 : msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1922 : PP_BLOCK_SYS_SDMA,
1923 : pp_support_state,
1924 : pp_state);
1925 0 : amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1926 : }
1927 :
1928 0 : if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
1929 0 : if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
1930 0 : pp_support_state = PP_STATE_SUPPORT_LS;
1931 0 : pp_state = PP_STATE_LS;
1932 : }
1933 0 : if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
1934 0 : pp_support_state |= PP_STATE_SUPPORT_CG;
1935 0 : pp_state |= PP_STATE_CG;
1936 : }
1937 0 : if (state == AMD_CG_STATE_UNGATE)
1938 0 : pp_state = 0;
1939 0 : msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1940 : PP_BLOCK_SYS_HDP,
1941 : pp_support_state,
1942 : pp_state);
1943 0 : amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1944 : }
1945 :
1946 :
1947 0 : if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
1948 0 : if (state == AMD_CG_STATE_UNGATE)
1949 : pp_state = 0;
1950 : else
1951 0 : pp_state = PP_STATE_LS;
1952 :
1953 0 : msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1954 : PP_BLOCK_SYS_BIF,
1955 : PP_STATE_SUPPORT_LS,
1956 : pp_state);
1957 0 : amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1958 : }
1959 0 : if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
1960 0 : if (state == AMD_CG_STATE_UNGATE)
1961 : pp_state = 0;
1962 : else
1963 0 : pp_state = PP_STATE_CG;
1964 :
1965 0 : msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1966 : PP_BLOCK_SYS_BIF,
1967 : PP_STATE_SUPPORT_CG,
1968 : pp_state);
1969 0 : amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1970 : }
1971 :
1972 0 : if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
1973 :
1974 0 : if (state == AMD_CG_STATE_UNGATE)
1975 : pp_state = 0;
1976 : else
1977 0 : pp_state = PP_STATE_LS;
1978 :
1979 0 : msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1980 : PP_BLOCK_SYS_DRM,
1981 : PP_STATE_SUPPORT_LS,
1982 : pp_state);
1983 0 : amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1984 : }
1985 :
1986 0 : if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
1987 :
1988 0 : if (state == AMD_CG_STATE_UNGATE)
1989 : pp_state = 0;
1990 : else
1991 0 : pp_state = PP_STATE_CG;
1992 :
1993 0 : msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1994 : PP_BLOCK_SYS_ROM,
1995 : PP_STATE_SUPPORT_CG,
1996 : pp_state);
1997 0 : amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1998 : }
1999 0 : return 0;
2000 : }
2001 :
2002 0 : static int vi_common_set_clockgating_state(void *handle,
2003 : enum amd_clockgating_state state)
2004 : {
2005 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2006 :
2007 0 : if (amdgpu_sriov_vf(adev))
2008 : return 0;
2009 :
2010 0 : switch (adev->asic_type) {
2011 : case CHIP_FIJI:
2012 0 : vi_update_bif_medium_grain_light_sleep(adev,
2013 : state == AMD_CG_STATE_GATE);
2014 0 : vi_update_hdp_medium_grain_clock_gating(adev,
2015 : state == AMD_CG_STATE_GATE);
2016 0 : vi_update_hdp_light_sleep(adev,
2017 : state == AMD_CG_STATE_GATE);
2018 0 : vi_update_rom_medium_grain_clock_gating(adev,
2019 : state == AMD_CG_STATE_GATE);
2020 0 : break;
2021 : case CHIP_CARRIZO:
2022 : case CHIP_STONEY:
2023 0 : vi_update_bif_medium_grain_light_sleep(adev,
2024 : state == AMD_CG_STATE_GATE);
2025 0 : vi_update_hdp_medium_grain_clock_gating(adev,
2026 : state == AMD_CG_STATE_GATE);
2027 0 : vi_update_hdp_light_sleep(adev,
2028 : state == AMD_CG_STATE_GATE);
2029 0 : vi_update_drm_light_sleep(adev,
2030 : state == AMD_CG_STATE_GATE);
2031 0 : break;
2032 : case CHIP_TONGA:
2033 : case CHIP_POLARIS10:
2034 : case CHIP_POLARIS11:
2035 : case CHIP_POLARIS12:
2036 : case CHIP_VEGAM:
2037 0 : vi_common_set_clockgating_state_by_smu(adev, state);
2038 0 : break;
2039 : default:
2040 : break;
2041 : }
2042 : return 0;
2043 : }
2044 :
2045 0 : static int vi_common_set_powergating_state(void *handle,
2046 : enum amd_powergating_state state)
2047 : {
2048 0 : return 0;
2049 : }
2050 :
2051 0 : static void vi_common_get_clockgating_state(void *handle, u64 *flags)
2052 : {
2053 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2054 : int data;
2055 :
2056 0 : if (amdgpu_sriov_vf(adev))
2057 0 : *flags = 0;
2058 :
2059 : /* AMD_CG_SUPPORT_BIF_LS */
2060 0 : data = RREG32_PCIE(ixPCIE_CNTL2);
2061 0 : if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
2062 0 : *flags |= AMD_CG_SUPPORT_BIF_LS;
2063 :
2064 : /* AMD_CG_SUPPORT_HDP_LS */
2065 0 : data = RREG32(mmHDP_MEM_POWER_LS);
2066 0 : if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
2067 0 : *flags |= AMD_CG_SUPPORT_HDP_LS;
2068 :
2069 : /* AMD_CG_SUPPORT_HDP_MGCG */
2070 0 : data = RREG32(mmHDP_HOST_PATH_CNTL);
2071 0 : if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK))
2072 0 : *flags |= AMD_CG_SUPPORT_HDP_MGCG;
2073 :
2074 : /* AMD_CG_SUPPORT_ROM_MGCG */
2075 0 : data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
2076 0 : if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
2077 0 : *flags |= AMD_CG_SUPPORT_ROM_MGCG;
2078 0 : }
2079 :
2080 : static const struct amd_ip_funcs vi_common_ip_funcs = {
2081 : .name = "vi_common",
2082 : .early_init = vi_common_early_init,
2083 : .late_init = vi_common_late_init,
2084 : .sw_init = vi_common_sw_init,
2085 : .sw_fini = vi_common_sw_fini,
2086 : .hw_init = vi_common_hw_init,
2087 : .hw_fini = vi_common_hw_fini,
2088 : .suspend = vi_common_suspend,
2089 : .resume = vi_common_resume,
2090 : .is_idle = vi_common_is_idle,
2091 : .wait_for_idle = vi_common_wait_for_idle,
2092 : .soft_reset = vi_common_soft_reset,
2093 : .set_clockgating_state = vi_common_set_clockgating_state,
2094 : .set_powergating_state = vi_common_set_powergating_state,
2095 : .get_clockgating_state = vi_common_get_clockgating_state,
2096 : };
2097 :
2098 : static const struct amdgpu_ip_block_version vi_common_ip_block =
2099 : {
2100 : .type = AMD_IP_BLOCK_TYPE_COMMON,
2101 : .major = 1,
2102 : .minor = 0,
2103 : .rev = 0,
2104 : .funcs = &vi_common_ip_funcs,
2105 : };
2106 :
2107 0 : void vi_set_virt_ops(struct amdgpu_device *adev)
2108 : {
2109 0 : adev->virt.ops = &xgpu_vi_virt_ops;
2110 0 : }
2111 :
2112 0 : int vi_set_ip_blocks(struct amdgpu_device *adev)
2113 : {
2114 0 : switch (adev->asic_type) {
2115 : case CHIP_TOPAZ:
2116 : /* topaz has no DCE, UVD, VCE */
2117 0 : amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2118 0 : amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
2119 0 : amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
2120 0 : amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2121 0 : amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
2122 0 : amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2123 0 : if (adev->enable_virtual_display)
2124 0 : amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2125 : break;
2126 : case CHIP_FIJI:
2127 0 : amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2128 0 : amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
2129 0 : amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
2130 0 : amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2131 0 : amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
2132 0 : amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2133 0 : if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
2134 0 : amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2135 : #if defined(CONFIG_DRM_AMD_DC)
2136 0 : else if (amdgpu_device_has_dc_support(adev))
2137 0 : amdgpu_device_ip_block_add(adev, &dm_ip_block);
2138 : #endif
2139 : else
2140 0 : amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
2141 0 : if (!amdgpu_sriov_vf(adev)) {
2142 0 : amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
2143 0 : amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
2144 : }
2145 : break;
2146 : case CHIP_TONGA:
2147 0 : amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2148 0 : amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
2149 0 : amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
2150 0 : amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2151 0 : amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
2152 0 : amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2153 0 : if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
2154 0 : amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2155 : #if defined(CONFIG_DRM_AMD_DC)
2156 0 : else if (amdgpu_device_has_dc_support(adev))
2157 0 : amdgpu_device_ip_block_add(adev, &dm_ip_block);
2158 : #endif
2159 : else
2160 0 : amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
2161 0 : if (!amdgpu_sriov_vf(adev)) {
2162 0 : amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
2163 0 : amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
2164 : }
2165 : break;
2166 : case CHIP_POLARIS10:
2167 : case CHIP_POLARIS11:
2168 : case CHIP_POLARIS12:
2169 : case CHIP_VEGAM:
2170 0 : amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2171 0 : amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
2172 0 : amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
2173 0 : amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2174 0 : amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
2175 0 : amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2176 0 : if (adev->enable_virtual_display)
2177 0 : amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2178 : #if defined(CONFIG_DRM_AMD_DC)
2179 0 : else if (amdgpu_device_has_dc_support(adev))
2180 0 : amdgpu_device_ip_block_add(adev, &dm_ip_block);
2181 : #endif
2182 : else
2183 0 : amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
2184 0 : amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
2185 0 : amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
2186 0 : break;
2187 : case CHIP_CARRIZO:
2188 0 : amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2189 0 : amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
2190 0 : amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
2191 0 : amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2192 0 : amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
2193 0 : amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2194 0 : if (adev->enable_virtual_display)
2195 0 : amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2196 : #if defined(CONFIG_DRM_AMD_DC)
2197 0 : else if (amdgpu_device_has_dc_support(adev))
2198 0 : amdgpu_device_ip_block_add(adev, &dm_ip_block);
2199 : #endif
2200 : else
2201 0 : amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
2202 0 : amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
2203 0 : amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
2204 : #if defined(CONFIG_DRM_AMD_ACP)
2205 : amdgpu_device_ip_block_add(adev, &acp_ip_block);
2206 : #endif
2207 0 : break;
2208 : case CHIP_STONEY:
2209 0 : amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2210 0 : amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
2211 0 : amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
2212 0 : amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
2213 0 : amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
2214 0 : amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2215 0 : if (adev->enable_virtual_display)
2216 0 : amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2217 : #if defined(CONFIG_DRM_AMD_DC)
2218 0 : else if (amdgpu_device_has_dc_support(adev))
2219 0 : amdgpu_device_ip_block_add(adev, &dm_ip_block);
2220 : #endif
2221 : else
2222 0 : amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
2223 0 : amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
2224 0 : amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
2225 : #if defined(CONFIG_DRM_AMD_ACP)
2226 : amdgpu_device_ip_block_add(adev, &acp_ip_block);
2227 : #endif
2228 0 : break;
2229 : default:
2230 : /* FIXME: not supported yet */
2231 : return -EINVAL;
2232 : }
2233 :
2234 : return 0;
2235 : }
2236 :
2237 0 : void legacy_doorbell_index_init(struct amdgpu_device *adev)
2238 : {
2239 0 : adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
2240 0 : adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
2241 0 : adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
2242 0 : adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
2243 0 : adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
2244 0 : adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
2245 0 : adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
2246 0 : adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
2247 0 : adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
2248 0 : adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
2249 0 : adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0;
2250 0 : adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1;
2251 0 : adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
2252 0 : adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;
2253 0 : }
|