Line data Source code
1 : /*
2 : * Copyright 2019 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : */
23 : #include <linux/firmware.h>
24 : #include <linux/slab.h>
25 : #include <linux/module.h>
26 : #include <linux/pci.h>
27 :
28 : #include <drm/amdgpu_drm.h>
29 :
30 : #include "amdgpu.h"
31 : #include "amdgpu_atombios.h"
32 : #include "amdgpu_ih.h"
33 : #include "amdgpu_uvd.h"
34 : #include "amdgpu_vce.h"
35 : #include "amdgpu_ucode.h"
36 : #include "amdgpu_psp.h"
37 : #include "atom.h"
38 : #include "amd_pcie.h"
39 :
40 : #include "gc/gc_10_1_0_offset.h"
41 : #include "gc/gc_10_1_0_sh_mask.h"
42 : #include "mp/mp_11_0_offset.h"
43 :
44 : #include "soc15.h"
45 : #include "soc15_common.h"
46 : #include "gmc_v10_0.h"
47 : #include "gfxhub_v2_0.h"
48 : #include "mmhub_v2_0.h"
49 : #include "nbio_v2_3.h"
50 : #include "nbio_v7_2.h"
51 : #include "hdp_v5_0.h"
52 : #include "nv.h"
53 : #include "navi10_ih.h"
54 : #include "gfx_v10_0.h"
55 : #include "sdma_v5_0.h"
56 : #include "sdma_v5_2.h"
57 : #include "vcn_v2_0.h"
58 : #include "jpeg_v2_0.h"
59 : #include "vcn_v3_0.h"
60 : #include "jpeg_v3_0.h"
61 : #include "amdgpu_vkms.h"
62 : #include "mes_v10_1.h"
63 : #include "mxgpu_nv.h"
64 : #include "smuio_v11_0.h"
65 : #include "smuio_v11_0_6.h"
66 :
67 : static const struct amd_ip_funcs nv_common_ip_funcs;
68 :
69 : /* Navi */
70 : static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] =
71 : {
72 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
73 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
74 : };
75 :
76 : static const struct amdgpu_video_codecs nv_video_codecs_encode =
77 : {
78 : .codec_count = ARRAY_SIZE(nv_video_codecs_encode_array),
79 : .codec_array = nv_video_codecs_encode_array,
80 : };
81 :
82 : /* Navi1x */
83 : static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] =
84 : {
85 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
86 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
87 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
88 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
89 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
90 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
91 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
92 : };
93 :
94 : static const struct amdgpu_video_codecs nv_video_codecs_decode =
95 : {
96 : .codec_count = ARRAY_SIZE(nv_video_codecs_decode_array),
97 : .codec_array = nv_video_codecs_decode_array,
98 : };
99 :
100 : /* Sienna Cichlid */
101 : static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
102 : {
103 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
104 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
105 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
106 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
107 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
108 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
109 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
110 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
111 : };
112 :
113 : static const struct amdgpu_video_codecs sc_video_codecs_decode =
114 : {
115 : .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array),
116 : .codec_array = sc_video_codecs_decode_array,
117 : };
118 :
119 : /* SRIOV Sienna Cichlid, not const since data is controlled by host */
120 : static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
121 : {
122 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
123 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
124 : };
125 :
126 : static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
127 : {
128 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
129 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
130 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
131 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
132 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
133 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
134 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
135 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
136 : };
137 :
138 : static struct amdgpu_video_codecs sriov_sc_video_codecs_encode =
139 : {
140 : .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
141 : .codec_array = sriov_sc_video_codecs_encode_array,
142 : };
143 :
144 : static struct amdgpu_video_codecs sriov_sc_video_codecs_decode =
145 : {
146 : .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array),
147 : .codec_array = sriov_sc_video_codecs_decode_array,
148 : };
149 :
150 : /* Beige Goby*/
151 : static const struct amdgpu_video_codec_info bg_video_codecs_decode_array[] = {
152 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
153 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
154 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
155 : };
156 :
157 : static const struct amdgpu_video_codecs bg_video_codecs_decode = {
158 : .codec_count = ARRAY_SIZE(bg_video_codecs_decode_array),
159 : .codec_array = bg_video_codecs_decode_array,
160 : };
161 :
162 : static const struct amdgpu_video_codecs bg_video_codecs_encode = {
163 : .codec_count = 0,
164 : .codec_array = NULL,
165 : };
166 :
167 : /* Yellow Carp*/
168 : static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = {
169 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
170 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
171 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
172 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
173 : {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
174 : };
175 :
176 : static const struct amdgpu_video_codecs yc_video_codecs_decode = {
177 : .codec_count = ARRAY_SIZE(yc_video_codecs_decode_array),
178 : .codec_array = yc_video_codecs_decode_array,
179 : };
180 :
181 0 : static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
182 : const struct amdgpu_video_codecs **codecs)
183 : {
184 0 : switch (adev->ip_versions[UVD_HWIP][0]) {
185 : case IP_VERSION(3, 0, 0):
186 : case IP_VERSION(3, 0, 64):
187 : case IP_VERSION(3, 0, 192):
188 0 : if (amdgpu_sriov_vf(adev)) {
189 0 : if (encode)
190 0 : *codecs = &sriov_sc_video_codecs_encode;
191 : else
192 0 : *codecs = &sriov_sc_video_codecs_decode;
193 : } else {
194 0 : if (encode)
195 0 : *codecs = &nv_video_codecs_encode;
196 : else
197 0 : *codecs = &sc_video_codecs_decode;
198 : }
199 : return 0;
200 : case IP_VERSION(3, 0, 16):
201 : case IP_VERSION(3, 0, 2):
202 0 : if (encode)
203 0 : *codecs = &nv_video_codecs_encode;
204 : else
205 0 : *codecs = &sc_video_codecs_decode;
206 : return 0;
207 : case IP_VERSION(3, 1, 1):
208 : case IP_VERSION(3, 1, 2):
209 0 : if (encode)
210 0 : *codecs = &nv_video_codecs_encode;
211 : else
212 0 : *codecs = &yc_video_codecs_decode;
213 : return 0;
214 : case IP_VERSION(3, 0, 33):
215 0 : if (encode)
216 0 : *codecs = &bg_video_codecs_encode;
217 : else
218 0 : *codecs = &bg_video_codecs_decode;
219 : return 0;
220 : case IP_VERSION(2, 0, 0):
221 : case IP_VERSION(2, 0, 2):
222 0 : if (encode)
223 0 : *codecs = &nv_video_codecs_encode;
224 : else
225 0 : *codecs = &nv_video_codecs_decode;
226 : return 0;
227 : default:
228 : return -EINVAL;
229 : }
230 : }
231 :
232 : /*
233 : * Indirect registers accessor
234 : */
235 0 : static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
236 : {
237 : unsigned long address, data;
238 0 : address = adev->nbio.funcs->get_pcie_index_offset(adev);
239 0 : data = adev->nbio.funcs->get_pcie_data_offset(adev);
240 :
241 0 : return amdgpu_device_indirect_rreg(adev, address, data, reg);
242 : }
243 :
244 0 : static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
245 : {
246 : unsigned long address, data;
247 :
248 0 : address = adev->nbio.funcs->get_pcie_index_offset(adev);
249 0 : data = adev->nbio.funcs->get_pcie_data_offset(adev);
250 :
251 0 : amdgpu_device_indirect_wreg(adev, address, data, reg, v);
252 0 : }
253 :
254 0 : static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
255 : {
256 : unsigned long address, data;
257 0 : address = adev->nbio.funcs->get_pcie_index_offset(adev);
258 0 : data = adev->nbio.funcs->get_pcie_data_offset(adev);
259 :
260 0 : return amdgpu_device_indirect_rreg64(adev, address, data, reg);
261 : }
262 :
263 0 : static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
264 : {
265 : unsigned long address, data;
266 :
267 0 : address = adev->nbio.funcs->get_pcie_index_offset(adev);
268 0 : data = adev->nbio.funcs->get_pcie_data_offset(adev);
269 :
270 0 : amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
271 0 : }
272 :
273 0 : static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
274 : {
275 : unsigned long flags, address, data;
276 : u32 r;
277 :
278 0 : address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
279 0 : data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
280 :
281 0 : spin_lock_irqsave(&adev->didt_idx_lock, flags);
282 0 : WREG32(address, (reg));
283 0 : r = RREG32(data);
284 0 : spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
285 0 : return r;
286 : }
287 :
288 0 : static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
289 : {
290 : unsigned long flags, address, data;
291 :
292 0 : address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
293 0 : data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
294 :
295 0 : spin_lock_irqsave(&adev->didt_idx_lock, flags);
296 0 : WREG32(address, (reg));
297 0 : WREG32(data, (v));
298 0 : spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
299 0 : }
300 :
301 0 : static u32 nv_get_config_memsize(struct amdgpu_device *adev)
302 : {
303 0 : return adev->nbio.funcs->get_memsize(adev);
304 : }
305 :
306 0 : static u32 nv_get_xclk(struct amdgpu_device *adev)
307 : {
308 0 : return adev->clock.spll.reference_freq;
309 : }
310 :
311 :
312 0 : void nv_grbm_select(struct amdgpu_device *adev,
313 : u32 me, u32 pipe, u32 queue, u32 vmid)
314 : {
315 0 : u32 grbm_gfx_cntl = 0;
316 0 : grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
317 0 : grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
318 0 : grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
319 0 : grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
320 :
321 0 : WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
322 0 : }
323 :
324 0 : static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
325 : {
326 : /* todo */
327 0 : }
328 :
329 0 : static bool nv_read_disabled_bios(struct amdgpu_device *adev)
330 : {
331 : /* todo */
332 0 : return false;
333 : }
334 :
335 : static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
336 : { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
337 : { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
338 : { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
339 : { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
340 : { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
341 : { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
342 : { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
343 : { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
344 : { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
345 : { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
346 : { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
347 : { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
348 : { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
349 : { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
350 : { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
351 : { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
352 : { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
353 : { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
354 : { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
355 : };
356 :
357 0 : static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
358 : u32 sh_num, u32 reg_offset)
359 : {
360 : uint32_t val;
361 :
362 0 : mutex_lock(&adev->grbm_idx_mutex);
363 0 : if (se_num != 0xffffffff || sh_num != 0xffffffff)
364 0 : amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
365 :
366 0 : val = RREG32(reg_offset);
367 :
368 0 : if (se_num != 0xffffffff || sh_num != 0xffffffff)
369 0 : amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
370 0 : mutex_unlock(&adev->grbm_idx_mutex);
371 0 : return val;
372 : }
373 :
374 0 : static uint32_t nv_get_register_value(struct amdgpu_device *adev,
375 : bool indexed, u32 se_num,
376 : u32 sh_num, u32 reg_offset)
377 : {
378 0 : if (indexed) {
379 0 : return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
380 : } else {
381 0 : if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
382 0 : return adev->gfx.config.gb_addr_config;
383 0 : return RREG32(reg_offset);
384 : }
385 : }
386 :
387 0 : static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
388 : u32 sh_num, u32 reg_offset, u32 *value)
389 : {
390 : uint32_t i;
391 : struct soc15_allowed_register_entry *en;
392 :
393 0 : *value = 0;
394 0 : for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
395 0 : en = &nv_allowed_read_registers[i];
396 0 : if (adev->reg_offset[en->hwip][en->inst] &&
397 0 : reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
398 0 : + en->reg_offset))
399 0 : continue;
400 :
401 0 : *value = nv_get_register_value(adev,
402 0 : nv_allowed_read_registers[i].grbm_indexed,
403 : se_num, sh_num, reg_offset);
404 0 : return 0;
405 : }
406 : return -EINVAL;
407 : }
408 :
409 0 : static int nv_asic_mode2_reset(struct amdgpu_device *adev)
410 : {
411 : u32 i;
412 0 : int ret = 0;
413 :
414 0 : amdgpu_atombios_scratch_regs_engine_hung(adev, true);
415 :
416 : /* disable BM */
417 0 : pci_clear_master(adev->pdev);
418 :
419 0 : amdgpu_device_cache_pci_state(adev->pdev);
420 :
421 0 : ret = amdgpu_dpm_mode2_reset(adev);
422 0 : if (ret)
423 0 : dev_err(adev->dev, "GPU mode2 reset failed\n");
424 :
425 0 : amdgpu_device_load_pci_state(adev->pdev);
426 :
427 : /* wait for asic to come out of reset */
428 0 : for (i = 0; i < adev->usec_timeout; i++) {
429 0 : u32 memsize = adev->nbio.funcs->get_memsize(adev);
430 :
431 0 : if (memsize != 0xffffffff)
432 : break;
433 0 : udelay(1);
434 : }
435 :
436 0 : amdgpu_atombios_scratch_regs_engine_hung(adev, false);
437 :
438 0 : return ret;
439 : }
440 :
441 : static enum amd_reset_method
442 0 : nv_asic_reset_method(struct amdgpu_device *adev)
443 : {
444 0 : if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
445 : amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
446 0 : amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
447 : amdgpu_reset_method == AMD_RESET_METHOD_PCI)
448 : return amdgpu_reset_method;
449 :
450 0 : if (amdgpu_reset_method != -1)
451 0 : dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
452 : amdgpu_reset_method);
453 :
454 0 : switch (adev->ip_versions[MP1_HWIP][0]) {
455 : case IP_VERSION(11, 5, 0):
456 : case IP_VERSION(13, 0, 1):
457 : case IP_VERSION(13, 0, 3):
458 : case IP_VERSION(13, 0, 5):
459 : case IP_VERSION(13, 0, 8):
460 : return AMD_RESET_METHOD_MODE2;
461 : case IP_VERSION(11, 0, 7):
462 : case IP_VERSION(11, 0, 11):
463 : case IP_VERSION(11, 0, 12):
464 : case IP_VERSION(11, 0, 13):
465 0 : return AMD_RESET_METHOD_MODE1;
466 : default:
467 0 : if (amdgpu_dpm_is_baco_supported(adev))
468 : return AMD_RESET_METHOD_BACO;
469 : else
470 0 : return AMD_RESET_METHOD_MODE1;
471 : }
472 : }
473 :
474 0 : static int nv_asic_reset(struct amdgpu_device *adev)
475 : {
476 0 : int ret = 0;
477 :
478 0 : switch (nv_asic_reset_method(adev)) {
479 : case AMD_RESET_METHOD_PCI:
480 0 : dev_info(adev->dev, "PCI reset\n");
481 0 : ret = amdgpu_device_pci_reset(adev);
482 0 : break;
483 : case AMD_RESET_METHOD_BACO:
484 0 : dev_info(adev->dev, "BACO reset\n");
485 0 : ret = amdgpu_dpm_baco_reset(adev);
486 0 : break;
487 : case AMD_RESET_METHOD_MODE2:
488 0 : dev_info(adev->dev, "MODE2 reset\n");
489 0 : ret = nv_asic_mode2_reset(adev);
490 0 : break;
491 : default:
492 0 : dev_info(adev->dev, "MODE1 reset\n");
493 0 : ret = amdgpu_device_mode1_reset(adev);
494 0 : break;
495 : }
496 :
497 0 : return ret;
498 : }
499 :
500 0 : static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
501 : {
502 : /* todo */
503 0 : return 0;
504 : }
505 :
506 0 : static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
507 : {
508 : /* todo */
509 0 : return 0;
510 : }
511 :
512 : static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
513 : {
514 0 : if (pci_is_root_bus(adev->pdev->bus))
515 : return;
516 :
517 : if (amdgpu_pcie_gen2 == 0)
518 : return;
519 :
520 : if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
521 : CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
522 : return;
523 :
524 : /* todo */
525 : }
526 :
527 0 : static void nv_program_aspm(struct amdgpu_device *adev)
528 : {
529 0 : if (!amdgpu_device_should_use_aspm(adev))
530 : return;
531 :
532 0 : if (!(adev->flags & AMD_IS_APU) &&
533 0 : (adev->nbio.funcs->program_aspm))
534 0 : adev->nbio.funcs->program_aspm(adev);
535 :
536 : }
537 :
538 : static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
539 : bool enable)
540 : {
541 0 : adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
542 0 : adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
543 : }
544 :
545 : const struct amdgpu_ip_block_version nv_common_ip_block =
546 : {
547 : .type = AMD_IP_BLOCK_TYPE_COMMON,
548 : .major = 1,
549 : .minor = 0,
550 : .rev = 0,
551 : .funcs = &nv_common_ip_funcs,
552 : };
553 :
554 0 : void nv_set_virt_ops(struct amdgpu_device *adev)
555 : {
556 0 : adev->virt.ops = &xgpu_nv_virt_ops;
557 0 : }
558 :
559 : static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
560 : {
561 0 : return adev->nbio.funcs->get_rev_id(adev);
562 : }
563 :
564 0 : static bool nv_need_full_reset(struct amdgpu_device *adev)
565 : {
566 0 : return true;
567 : }
568 :
569 0 : static bool nv_need_reset_on_init(struct amdgpu_device *adev)
570 : {
571 : u32 sol_reg;
572 :
573 0 : if (adev->flags & AMD_IS_APU)
574 : return false;
575 :
576 : /* Check sOS sign of life register to confirm sys driver and sOS
577 : * are already been loaded.
578 : */
579 0 : sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
580 0 : if (sol_reg)
581 : return true;
582 :
583 0 : return false;
584 : }
585 :
586 0 : static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
587 : {
588 :
589 : /* TODO
590 : * dummy implement for pcie_replay_count sysfs interface
591 : * */
592 :
593 0 : return 0;
594 : }
595 :
596 0 : static void nv_init_doorbell_index(struct amdgpu_device *adev)
597 : {
598 0 : adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
599 0 : adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
600 0 : adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
601 0 : adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
602 0 : adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
603 0 : adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
604 0 : adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
605 0 : adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
606 0 : adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
607 0 : adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
608 0 : adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
609 0 : adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
610 0 : adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
611 0 : adev->doorbell_index.gfx_userqueue_start =
612 : AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_START;
613 0 : adev->doorbell_index.gfx_userqueue_end =
614 : AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_END;
615 0 : adev->doorbell_index.mes_ring0 = AMDGPU_NAVI10_DOORBELL_MES_RING0;
616 0 : adev->doorbell_index.mes_ring1 = AMDGPU_NAVI10_DOORBELL_MES_RING1;
617 0 : adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
618 0 : adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
619 0 : adev->doorbell_index.sdma_engine[2] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2;
620 0 : adev->doorbell_index.sdma_engine[3] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3;
621 0 : adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
622 0 : adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
623 0 : adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
624 0 : adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
625 0 : adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
626 0 : adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
627 0 : adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
628 :
629 0 : adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
630 0 : adev->doorbell_index.sdma_doorbell_range = 20;
631 0 : }
632 :
633 0 : static void nv_pre_asic_init(struct amdgpu_device *adev)
634 : {
635 0 : }
636 :
637 0 : static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
638 : bool enter)
639 : {
640 0 : if (enter)
641 0 : amdgpu_gfx_rlc_enter_safe_mode(adev);
642 : else
643 0 : amdgpu_gfx_rlc_exit_safe_mode(adev);
644 :
645 0 : if (adev->gfx.funcs->update_perfmon_mgcg)
646 0 : adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
647 :
648 0 : if (!(adev->flags & AMD_IS_APU) &&
649 0 : (adev->nbio.funcs->enable_aspm) &&
650 0 : amdgpu_device_should_use_aspm(adev))
651 0 : adev->nbio.funcs->enable_aspm(adev, !enter);
652 :
653 0 : return 0;
654 : }
655 :
656 : static const struct amdgpu_asic_funcs nv_asic_funcs =
657 : {
658 : .read_disabled_bios = &nv_read_disabled_bios,
659 : .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
660 : .read_register = &nv_read_register,
661 : .reset = &nv_asic_reset,
662 : .reset_method = &nv_asic_reset_method,
663 : .set_vga_state = &nv_vga_set_state,
664 : .get_xclk = &nv_get_xclk,
665 : .set_uvd_clocks = &nv_set_uvd_clocks,
666 : .set_vce_clocks = &nv_set_vce_clocks,
667 : .get_config_memsize = &nv_get_config_memsize,
668 : .init_doorbell_index = &nv_init_doorbell_index,
669 : .need_full_reset = &nv_need_full_reset,
670 : .need_reset_on_init = &nv_need_reset_on_init,
671 : .get_pcie_replay_count = &nv_get_pcie_replay_count,
672 : .supports_baco = &amdgpu_dpm_is_baco_supported,
673 : .pre_asic_init = &nv_pre_asic_init,
674 : .update_umd_stable_pstate = &nv_update_umd_stable_pstate,
675 : .query_video_codecs = &nv_query_video_codecs,
676 : };
677 :
678 0 : static int nv_common_early_init(void *handle)
679 : {
680 : #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
681 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
682 :
683 0 : if (!amdgpu_sriov_vf(adev)) {
684 0 : adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
685 0 : adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
686 : }
687 0 : adev->smc_rreg = NULL;
688 0 : adev->smc_wreg = NULL;
689 0 : adev->pcie_rreg = &nv_pcie_rreg;
690 0 : adev->pcie_wreg = &nv_pcie_wreg;
691 0 : adev->pcie_rreg64 = &nv_pcie_rreg64;
692 0 : adev->pcie_wreg64 = &nv_pcie_wreg64;
693 0 : adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
694 0 : adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
695 :
696 : /* TODO: will add them during VCN v2 implementation */
697 0 : adev->uvd_ctx_rreg = NULL;
698 0 : adev->uvd_ctx_wreg = NULL;
699 :
700 0 : adev->didt_rreg = &nv_didt_rreg;
701 0 : adev->didt_wreg = &nv_didt_wreg;
702 :
703 0 : adev->asic_funcs = &nv_asic_funcs;
704 :
705 0 : adev->rev_id = nv_get_rev_id(adev);
706 0 : adev->external_rev_id = 0xff;
707 : /* TODO: split the GC and PG flags based on the relevant IP version for which
708 : * they are relevant.
709 : */
710 0 : switch (adev->ip_versions[GC_HWIP][0]) {
711 : case IP_VERSION(10, 1, 10):
712 0 : adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
713 : AMD_CG_SUPPORT_GFX_CGCG |
714 : AMD_CG_SUPPORT_IH_CG |
715 : AMD_CG_SUPPORT_HDP_MGCG |
716 : AMD_CG_SUPPORT_HDP_LS |
717 : AMD_CG_SUPPORT_SDMA_MGCG |
718 : AMD_CG_SUPPORT_SDMA_LS |
719 : AMD_CG_SUPPORT_MC_MGCG |
720 : AMD_CG_SUPPORT_MC_LS |
721 : AMD_CG_SUPPORT_ATHUB_MGCG |
722 : AMD_CG_SUPPORT_ATHUB_LS |
723 : AMD_CG_SUPPORT_VCN_MGCG |
724 : AMD_CG_SUPPORT_JPEG_MGCG |
725 : AMD_CG_SUPPORT_BIF_MGCG |
726 : AMD_CG_SUPPORT_BIF_LS;
727 0 : adev->pg_flags = AMD_PG_SUPPORT_VCN |
728 : AMD_PG_SUPPORT_VCN_DPG |
729 : AMD_PG_SUPPORT_JPEG |
730 : AMD_PG_SUPPORT_ATHUB;
731 0 : adev->external_rev_id = adev->rev_id + 0x1;
732 0 : break;
733 : case IP_VERSION(10, 1, 1):
734 0 : adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
735 : AMD_CG_SUPPORT_GFX_CGCG |
736 : AMD_CG_SUPPORT_IH_CG |
737 : AMD_CG_SUPPORT_HDP_MGCG |
738 : AMD_CG_SUPPORT_HDP_LS |
739 : AMD_CG_SUPPORT_SDMA_MGCG |
740 : AMD_CG_SUPPORT_SDMA_LS |
741 : AMD_CG_SUPPORT_MC_MGCG |
742 : AMD_CG_SUPPORT_MC_LS |
743 : AMD_CG_SUPPORT_ATHUB_MGCG |
744 : AMD_CG_SUPPORT_ATHUB_LS |
745 : AMD_CG_SUPPORT_VCN_MGCG |
746 : AMD_CG_SUPPORT_JPEG_MGCG |
747 : AMD_CG_SUPPORT_BIF_MGCG |
748 : AMD_CG_SUPPORT_BIF_LS;
749 0 : adev->pg_flags = AMD_PG_SUPPORT_VCN |
750 : AMD_PG_SUPPORT_JPEG |
751 : AMD_PG_SUPPORT_VCN_DPG;
752 0 : adev->external_rev_id = adev->rev_id + 20;
753 0 : break;
754 : case IP_VERSION(10, 1, 2):
755 0 : adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
756 : AMD_CG_SUPPORT_GFX_MGLS |
757 : AMD_CG_SUPPORT_GFX_CGCG |
758 : AMD_CG_SUPPORT_GFX_CP_LS |
759 : AMD_CG_SUPPORT_GFX_RLC_LS |
760 : AMD_CG_SUPPORT_IH_CG |
761 : AMD_CG_SUPPORT_HDP_MGCG |
762 : AMD_CG_SUPPORT_HDP_LS |
763 : AMD_CG_SUPPORT_SDMA_MGCG |
764 : AMD_CG_SUPPORT_SDMA_LS |
765 : AMD_CG_SUPPORT_MC_MGCG |
766 : AMD_CG_SUPPORT_MC_LS |
767 : AMD_CG_SUPPORT_ATHUB_MGCG |
768 : AMD_CG_SUPPORT_ATHUB_LS |
769 : AMD_CG_SUPPORT_VCN_MGCG |
770 : AMD_CG_SUPPORT_JPEG_MGCG;
771 0 : adev->pg_flags = AMD_PG_SUPPORT_VCN |
772 : AMD_PG_SUPPORT_VCN_DPG |
773 : AMD_PG_SUPPORT_JPEG |
774 : AMD_PG_SUPPORT_ATHUB;
775 : /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
776 : * as a consequence, the rev_id and external_rev_id are wrong.
777 : * workaround it by hardcoding rev_id to 0 (default value).
778 : */
779 0 : if (amdgpu_sriov_vf(adev))
780 0 : adev->rev_id = 0;
781 0 : adev->external_rev_id = adev->rev_id + 0xa;
782 0 : break;
783 : case IP_VERSION(10, 3, 0):
784 0 : adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
785 : AMD_CG_SUPPORT_GFX_CGCG |
786 : AMD_CG_SUPPORT_GFX_CGLS |
787 : AMD_CG_SUPPORT_GFX_3D_CGCG |
788 : AMD_CG_SUPPORT_MC_MGCG |
789 : AMD_CG_SUPPORT_VCN_MGCG |
790 : AMD_CG_SUPPORT_JPEG_MGCG |
791 : AMD_CG_SUPPORT_HDP_MGCG |
792 : AMD_CG_SUPPORT_HDP_LS |
793 : AMD_CG_SUPPORT_IH_CG |
794 : AMD_CG_SUPPORT_MC_LS;
795 0 : adev->pg_flags = AMD_PG_SUPPORT_VCN |
796 : AMD_PG_SUPPORT_VCN_DPG |
797 : AMD_PG_SUPPORT_JPEG |
798 : AMD_PG_SUPPORT_ATHUB |
799 : AMD_PG_SUPPORT_MMHUB;
800 0 : if (amdgpu_sriov_vf(adev)) {
801 : /* hypervisor control CG and PG enablement */
802 0 : adev->cg_flags = 0;
803 0 : adev->pg_flags = 0;
804 : }
805 0 : adev->external_rev_id = adev->rev_id + 0x28;
806 0 : break;
807 : case IP_VERSION(10, 3, 2):
808 0 : adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
809 : AMD_CG_SUPPORT_GFX_CGCG |
810 : AMD_CG_SUPPORT_GFX_CGLS |
811 : AMD_CG_SUPPORT_GFX_3D_CGCG |
812 : AMD_CG_SUPPORT_VCN_MGCG |
813 : AMD_CG_SUPPORT_JPEG_MGCG |
814 : AMD_CG_SUPPORT_MC_MGCG |
815 : AMD_CG_SUPPORT_MC_LS |
816 : AMD_CG_SUPPORT_HDP_MGCG |
817 : AMD_CG_SUPPORT_HDP_LS |
818 : AMD_CG_SUPPORT_IH_CG;
819 0 : adev->pg_flags = AMD_PG_SUPPORT_VCN |
820 : AMD_PG_SUPPORT_VCN_DPG |
821 : AMD_PG_SUPPORT_JPEG |
822 : AMD_PG_SUPPORT_ATHUB |
823 : AMD_PG_SUPPORT_MMHUB;
824 0 : adev->external_rev_id = adev->rev_id + 0x32;
825 0 : break;
826 : case IP_VERSION(10, 3, 1):
827 0 : adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
828 : AMD_CG_SUPPORT_GFX_MGLS |
829 : AMD_CG_SUPPORT_GFX_CP_LS |
830 : AMD_CG_SUPPORT_GFX_RLC_LS |
831 : AMD_CG_SUPPORT_GFX_CGCG |
832 : AMD_CG_SUPPORT_GFX_CGLS |
833 : AMD_CG_SUPPORT_GFX_3D_CGCG |
834 : AMD_CG_SUPPORT_GFX_3D_CGLS |
835 : AMD_CG_SUPPORT_MC_MGCG |
836 : AMD_CG_SUPPORT_MC_LS |
837 : AMD_CG_SUPPORT_GFX_FGCG |
838 : AMD_CG_SUPPORT_VCN_MGCG |
839 : AMD_CG_SUPPORT_SDMA_MGCG |
840 : AMD_CG_SUPPORT_SDMA_LS |
841 : AMD_CG_SUPPORT_JPEG_MGCG;
842 0 : adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
843 : AMD_PG_SUPPORT_VCN |
844 : AMD_PG_SUPPORT_VCN_DPG |
845 : AMD_PG_SUPPORT_JPEG;
846 0 : if (adev->apu_flags & AMD_APU_IS_VANGOGH)
847 0 : adev->external_rev_id = adev->rev_id + 0x01;
848 : break;
849 : case IP_VERSION(10, 3, 4):
850 0 : adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
851 : AMD_CG_SUPPORT_GFX_CGCG |
852 : AMD_CG_SUPPORT_GFX_CGLS |
853 : AMD_CG_SUPPORT_GFX_3D_CGCG |
854 : AMD_CG_SUPPORT_VCN_MGCG |
855 : AMD_CG_SUPPORT_JPEG_MGCG |
856 : AMD_CG_SUPPORT_MC_MGCG |
857 : AMD_CG_SUPPORT_MC_LS |
858 : AMD_CG_SUPPORT_HDP_MGCG |
859 : AMD_CG_SUPPORT_HDP_LS |
860 : AMD_CG_SUPPORT_IH_CG;
861 0 : adev->pg_flags = AMD_PG_SUPPORT_VCN |
862 : AMD_PG_SUPPORT_VCN_DPG |
863 : AMD_PG_SUPPORT_JPEG |
864 : AMD_PG_SUPPORT_ATHUB |
865 : AMD_PG_SUPPORT_MMHUB;
866 0 : adev->external_rev_id = adev->rev_id + 0x3c;
867 0 : break;
868 : case IP_VERSION(10, 3, 5):
869 0 : adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
870 : AMD_CG_SUPPORT_GFX_CGCG |
871 : AMD_CG_SUPPORT_GFX_CGLS |
872 : AMD_CG_SUPPORT_GFX_3D_CGCG |
873 : AMD_CG_SUPPORT_MC_MGCG |
874 : AMD_CG_SUPPORT_MC_LS |
875 : AMD_CG_SUPPORT_HDP_MGCG |
876 : AMD_CG_SUPPORT_HDP_LS |
877 : AMD_CG_SUPPORT_IH_CG |
878 : AMD_CG_SUPPORT_VCN_MGCG;
879 0 : adev->pg_flags = AMD_PG_SUPPORT_VCN |
880 : AMD_PG_SUPPORT_VCN_DPG |
881 : AMD_PG_SUPPORT_ATHUB |
882 : AMD_PG_SUPPORT_MMHUB;
883 0 : adev->external_rev_id = adev->rev_id + 0x46;
884 0 : break;
885 : case IP_VERSION(10, 3, 3):
886 0 : adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
887 : AMD_CG_SUPPORT_GFX_MGLS |
888 : AMD_CG_SUPPORT_GFX_CGCG |
889 : AMD_CG_SUPPORT_GFX_CGLS |
890 : AMD_CG_SUPPORT_GFX_3D_CGCG |
891 : AMD_CG_SUPPORT_GFX_3D_CGLS |
892 : AMD_CG_SUPPORT_GFX_RLC_LS |
893 : AMD_CG_SUPPORT_GFX_CP_LS |
894 : AMD_CG_SUPPORT_GFX_FGCG |
895 : AMD_CG_SUPPORT_MC_MGCG |
896 : AMD_CG_SUPPORT_MC_LS |
897 : AMD_CG_SUPPORT_SDMA_LS |
898 : AMD_CG_SUPPORT_HDP_MGCG |
899 : AMD_CG_SUPPORT_HDP_LS |
900 : AMD_CG_SUPPORT_ATHUB_MGCG |
901 : AMD_CG_SUPPORT_ATHUB_LS |
902 : AMD_CG_SUPPORT_IH_CG |
903 : AMD_CG_SUPPORT_VCN_MGCG |
904 : AMD_CG_SUPPORT_JPEG_MGCG;
905 0 : adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
906 : AMD_PG_SUPPORT_VCN |
907 : AMD_PG_SUPPORT_VCN_DPG |
908 : AMD_PG_SUPPORT_JPEG;
909 0 : if (adev->pdev->device == 0x1681)
910 0 : adev->external_rev_id = 0x20;
911 : else
912 0 : adev->external_rev_id = adev->rev_id + 0x01;
913 : break;
914 : case IP_VERSION(10, 1, 3):
915 : case IP_VERSION(10, 1, 4):
916 0 : adev->cg_flags = 0;
917 0 : adev->pg_flags = 0;
918 0 : adev->external_rev_id = adev->rev_id + 0x82;
919 0 : break;
920 : case IP_VERSION(10, 3, 6):
921 0 : adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
922 : AMD_CG_SUPPORT_GFX_MGLS |
923 : AMD_CG_SUPPORT_GFX_CGCG |
924 : AMD_CG_SUPPORT_GFX_CGLS |
925 : AMD_CG_SUPPORT_GFX_3D_CGCG |
926 : AMD_CG_SUPPORT_GFX_3D_CGLS |
927 : AMD_CG_SUPPORT_GFX_RLC_LS |
928 : AMD_CG_SUPPORT_GFX_CP_LS |
929 : AMD_CG_SUPPORT_GFX_FGCG |
930 : AMD_CG_SUPPORT_MC_MGCG |
931 : AMD_CG_SUPPORT_MC_LS |
932 : AMD_CG_SUPPORT_SDMA_LS |
933 : AMD_CG_SUPPORT_HDP_MGCG |
934 : AMD_CG_SUPPORT_HDP_LS |
935 : AMD_CG_SUPPORT_ATHUB_MGCG |
936 : AMD_CG_SUPPORT_ATHUB_LS |
937 : AMD_CG_SUPPORT_IH_CG |
938 : AMD_CG_SUPPORT_VCN_MGCG |
939 : AMD_CG_SUPPORT_JPEG_MGCG;
940 0 : adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
941 : AMD_PG_SUPPORT_VCN |
942 : AMD_PG_SUPPORT_VCN_DPG |
943 : AMD_PG_SUPPORT_JPEG;
944 0 : adev->external_rev_id = adev->rev_id + 0x01;
945 0 : break;
946 : case IP_VERSION(10, 3, 7):
947 0 : adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
948 : AMD_CG_SUPPORT_GFX_MGLS |
949 : AMD_CG_SUPPORT_GFX_CGCG |
950 : AMD_CG_SUPPORT_GFX_CGLS |
951 : AMD_CG_SUPPORT_GFX_3D_CGCG |
952 : AMD_CG_SUPPORT_GFX_3D_CGLS |
953 : AMD_CG_SUPPORT_GFX_RLC_LS |
954 : AMD_CG_SUPPORT_GFX_CP_LS |
955 : AMD_CG_SUPPORT_GFX_FGCG |
956 : AMD_CG_SUPPORT_MC_MGCG |
957 : AMD_CG_SUPPORT_MC_LS |
958 : AMD_CG_SUPPORT_SDMA_LS |
959 : AMD_CG_SUPPORT_HDP_MGCG |
960 : AMD_CG_SUPPORT_HDP_LS |
961 : AMD_CG_SUPPORT_ATHUB_MGCG |
962 : AMD_CG_SUPPORT_ATHUB_LS |
963 : AMD_CG_SUPPORT_IH_CG |
964 : AMD_CG_SUPPORT_VCN_MGCG |
965 : AMD_CG_SUPPORT_JPEG_MGCG;
966 0 : adev->pg_flags = AMD_PG_SUPPORT_VCN |
967 : AMD_PG_SUPPORT_VCN_DPG |
968 : AMD_PG_SUPPORT_JPEG |
969 : AMD_PG_SUPPORT_GFX_PG;
970 0 : adev->external_rev_id = adev->rev_id + 0x01;
971 0 : break;
972 : default:
973 : /* FIXME: not supported yet */
974 : return -EINVAL;
975 : }
976 :
977 0 : if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
978 0 : adev->pg_flags &= ~(AMD_PG_SUPPORT_VCN |
979 : AMD_PG_SUPPORT_VCN_DPG |
980 : AMD_PG_SUPPORT_JPEG);
981 :
982 0 : if (amdgpu_sriov_vf(adev)) {
983 0 : amdgpu_virt_init_setting(adev);
984 0 : xgpu_nv_mailbox_set_irq_funcs(adev);
985 : }
986 :
987 : return 0;
988 : }
989 :
990 0 : static int nv_common_late_init(void *handle)
991 : {
992 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
993 :
994 0 : if (amdgpu_sriov_vf(adev)) {
995 0 : xgpu_nv_mailbox_get_irq(adev);
996 0 : amdgpu_virt_update_sriov_video_codec(adev,
997 : sriov_sc_video_codecs_encode_array, ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
998 : sriov_sc_video_codecs_decode_array, ARRAY_SIZE(sriov_sc_video_codecs_decode_array));
999 : }
1000 :
1001 0 : return 0;
1002 : }
1003 :
1004 0 : static int nv_common_sw_init(void *handle)
1005 : {
1006 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1007 :
1008 0 : if (amdgpu_sriov_vf(adev))
1009 0 : xgpu_nv_mailbox_add_irq_id(adev);
1010 :
1011 0 : return 0;
1012 : }
1013 :
1014 0 : static int nv_common_sw_fini(void *handle)
1015 : {
1016 0 : return 0;
1017 : }
1018 :
1019 0 : static int nv_common_hw_init(void *handle)
1020 : {
1021 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1022 :
1023 0 : if (adev->nbio.funcs->apply_lc_spc_mode_wa)
1024 0 : adev->nbio.funcs->apply_lc_spc_mode_wa(adev);
1025 :
1026 0 : if (adev->nbio.funcs->apply_l1_link_width_reconfig_wa)
1027 0 : adev->nbio.funcs->apply_l1_link_width_reconfig_wa(adev);
1028 :
1029 : /* enable pcie gen2/3 link */
1030 0 : nv_pcie_gen3_enable(adev);
1031 : /* enable aspm */
1032 0 : nv_program_aspm(adev);
1033 : /* setup nbio registers */
1034 0 : adev->nbio.funcs->init_registers(adev);
1035 : /* remap HDP registers to a hole in mmio space,
1036 : * for the purpose of expose those registers
1037 : * to process space
1038 : */
1039 0 : if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
1040 0 : adev->nbio.funcs->remap_hdp_registers(adev);
1041 : /* enable the doorbell aperture */
1042 0 : nv_enable_doorbell_aperture(adev, true);
1043 :
1044 0 : return 0;
1045 : }
1046 :
1047 0 : static int nv_common_hw_fini(void *handle)
1048 : {
1049 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1050 :
1051 : /* disable the doorbell aperture */
1052 0 : nv_enable_doorbell_aperture(adev, false);
1053 :
1054 0 : return 0;
1055 : }
1056 :
1057 0 : static int nv_common_suspend(void *handle)
1058 : {
1059 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1060 :
1061 0 : return nv_common_hw_fini(adev);
1062 : }
1063 :
1064 0 : static int nv_common_resume(void *handle)
1065 : {
1066 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1067 :
1068 0 : return nv_common_hw_init(adev);
1069 : }
1070 :
1071 0 : static bool nv_common_is_idle(void *handle)
1072 : {
1073 0 : return true;
1074 : }
1075 :
1076 0 : static int nv_common_wait_for_idle(void *handle)
1077 : {
1078 0 : return 0;
1079 : }
1080 :
1081 0 : static int nv_common_soft_reset(void *handle)
1082 : {
1083 0 : return 0;
1084 : }
1085 :
1086 0 : static int nv_common_set_clockgating_state(void *handle,
1087 : enum amd_clockgating_state state)
1088 : {
1089 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1090 :
1091 0 : if (amdgpu_sriov_vf(adev))
1092 : return 0;
1093 :
1094 0 : switch (adev->ip_versions[NBIO_HWIP][0]) {
1095 : case IP_VERSION(2, 3, 0):
1096 : case IP_VERSION(2, 3, 1):
1097 : case IP_VERSION(2, 3, 2):
1098 : case IP_VERSION(3, 3, 0):
1099 : case IP_VERSION(3, 3, 1):
1100 : case IP_VERSION(3, 3, 2):
1101 : case IP_VERSION(3, 3, 3):
1102 0 : adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1103 : state == AMD_CG_STATE_GATE);
1104 0 : adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1105 : state == AMD_CG_STATE_GATE);
1106 0 : adev->hdp.funcs->update_clock_gating(adev,
1107 : state == AMD_CG_STATE_GATE);
1108 0 : adev->smuio.funcs->update_rom_clock_gating(adev,
1109 : state == AMD_CG_STATE_GATE);
1110 0 : break;
1111 : default:
1112 : break;
1113 : }
1114 : return 0;
1115 : }
1116 :
1117 0 : static int nv_common_set_powergating_state(void *handle,
1118 : enum amd_powergating_state state)
1119 : {
1120 : /* TODO */
1121 0 : return 0;
1122 : }
1123 :
1124 0 : static void nv_common_get_clockgating_state(void *handle, u64 *flags)
1125 : {
1126 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1127 :
1128 0 : if (amdgpu_sriov_vf(adev))
1129 0 : *flags = 0;
1130 :
1131 0 : adev->nbio.funcs->get_clockgating_state(adev, flags);
1132 :
1133 0 : adev->hdp.funcs->get_clock_gating_state(adev, flags);
1134 :
1135 0 : adev->smuio.funcs->get_clock_gating_state(adev, flags);
1136 :
1137 0 : return;
1138 : }
1139 :
1140 : static const struct amd_ip_funcs nv_common_ip_funcs = {
1141 : .name = "nv_common",
1142 : .early_init = nv_common_early_init,
1143 : .late_init = nv_common_late_init,
1144 : .sw_init = nv_common_sw_init,
1145 : .sw_fini = nv_common_sw_fini,
1146 : .hw_init = nv_common_hw_init,
1147 : .hw_fini = nv_common_hw_fini,
1148 : .suspend = nv_common_suspend,
1149 : .resume = nv_common_resume,
1150 : .is_idle = nv_common_is_idle,
1151 : .wait_for_idle = nv_common_wait_for_idle,
1152 : .soft_reset = nv_common_soft_reset,
1153 : .set_clockgating_state = nv_common_set_clockgating_state,
1154 : .set_powergating_state = nv_common_set_powergating_state,
1155 : .get_clockgating_state = nv_common_get_clockgating_state,
1156 : };
|