Line data Source code
1 : /*
2 : * Copyright 2018 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : */
23 :
24 : #include <linux/firmware.h>
25 :
26 : #include "amdgpu.h"
27 : #include "amdgpu_discovery.h"
28 : #include "soc15_hw_ip.h"
29 : #include "discovery.h"
30 :
31 : #include "soc15.h"
32 : #include "gfx_v9_0.h"
33 : #include "gmc_v9_0.h"
34 : #include "df_v1_7.h"
35 : #include "df_v3_6.h"
36 : #include "nbio_v6_1.h"
37 : #include "nbio_v7_0.h"
38 : #include "nbio_v7_4.h"
39 : #include "hdp_v4_0.h"
40 : #include "vega10_ih.h"
41 : #include "vega20_ih.h"
42 : #include "sdma_v4_0.h"
43 : #include "uvd_v7_0.h"
44 : #include "vce_v4_0.h"
45 : #include "vcn_v1_0.h"
46 : #include "vcn_v2_5.h"
47 : #include "jpeg_v2_5.h"
48 : #include "smuio_v9_0.h"
49 : #include "gmc_v10_0.h"
50 : #include "gmc_v11_0.h"
51 : #include "gfxhub_v2_0.h"
52 : #include "mmhub_v2_0.h"
53 : #include "nbio_v2_3.h"
54 : #include "nbio_v4_3.h"
55 : #include "nbio_v7_2.h"
56 : #include "nbio_v7_7.h"
57 : #include "hdp_v5_0.h"
58 : #include "hdp_v5_2.h"
59 : #include "hdp_v6_0.h"
60 : #include "nv.h"
61 : #include "soc21.h"
62 : #include "navi10_ih.h"
63 : #include "ih_v6_0.h"
64 : #include "gfx_v10_0.h"
65 : #include "gfx_v11_0.h"
66 : #include "sdma_v5_0.h"
67 : #include "sdma_v5_2.h"
68 : #include "sdma_v6_0.h"
69 : #include "lsdma_v6_0.h"
70 : #include "vcn_v2_0.h"
71 : #include "jpeg_v2_0.h"
72 : #include "vcn_v3_0.h"
73 : #include "jpeg_v3_0.h"
74 : #include "vcn_v4_0.h"
75 : #include "jpeg_v4_0.h"
76 : #include "amdgpu_vkms.h"
77 : #include "mes_v10_1.h"
78 : #include "mes_v11_0.h"
79 : #include "smuio_v11_0.h"
80 : #include "smuio_v11_0_6.h"
81 : #include "smuio_v13_0.h"
82 : #include "smuio_v13_0_6.h"
83 :
84 : #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
85 : MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
86 :
87 : #define mmRCC_CONFIG_MEMSIZE 0xde3
88 : #define mmMM_INDEX 0x0
89 : #define mmMM_INDEX_HI 0x6
90 : #define mmMM_DATA 0x1
91 :
92 : static const char *hw_id_names[HW_ID_MAX] = {
93 : [MP1_HWID] = "MP1",
94 : [MP2_HWID] = "MP2",
95 : [THM_HWID] = "THM",
96 : [SMUIO_HWID] = "SMUIO",
97 : [FUSE_HWID] = "FUSE",
98 : [CLKA_HWID] = "CLKA",
99 : [PWR_HWID] = "PWR",
100 : [GC_HWID] = "GC",
101 : [UVD_HWID] = "UVD",
102 : [AUDIO_AZ_HWID] = "AUDIO_AZ",
103 : [ACP_HWID] = "ACP",
104 : [DCI_HWID] = "DCI",
105 : [DMU_HWID] = "DMU",
106 : [DCO_HWID] = "DCO",
107 : [DIO_HWID] = "DIO",
108 : [XDMA_HWID] = "XDMA",
109 : [DCEAZ_HWID] = "DCEAZ",
110 : [DAZ_HWID] = "DAZ",
111 : [SDPMUX_HWID] = "SDPMUX",
112 : [NTB_HWID] = "NTB",
113 : [IOHC_HWID] = "IOHC",
114 : [L2IMU_HWID] = "L2IMU",
115 : [VCE_HWID] = "VCE",
116 : [MMHUB_HWID] = "MMHUB",
117 : [ATHUB_HWID] = "ATHUB",
118 : [DBGU_NBIO_HWID] = "DBGU_NBIO",
119 : [DFX_HWID] = "DFX",
120 : [DBGU0_HWID] = "DBGU0",
121 : [DBGU1_HWID] = "DBGU1",
122 : [OSSSYS_HWID] = "OSSSYS",
123 : [HDP_HWID] = "HDP",
124 : [SDMA0_HWID] = "SDMA0",
125 : [SDMA1_HWID] = "SDMA1",
126 : [SDMA2_HWID] = "SDMA2",
127 : [SDMA3_HWID] = "SDMA3",
128 : [LSDMA_HWID] = "LSDMA",
129 : [ISP_HWID] = "ISP",
130 : [DBGU_IO_HWID] = "DBGU_IO",
131 : [DF_HWID] = "DF",
132 : [CLKB_HWID] = "CLKB",
133 : [FCH_HWID] = "FCH",
134 : [DFX_DAP_HWID] = "DFX_DAP",
135 : [L1IMU_PCIE_HWID] = "L1IMU_PCIE",
136 : [L1IMU_NBIF_HWID] = "L1IMU_NBIF",
137 : [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR",
138 : [L1IMU3_HWID] = "L1IMU3",
139 : [L1IMU4_HWID] = "L1IMU4",
140 : [L1IMU5_HWID] = "L1IMU5",
141 : [L1IMU6_HWID] = "L1IMU6",
142 : [L1IMU7_HWID] = "L1IMU7",
143 : [L1IMU8_HWID] = "L1IMU8",
144 : [L1IMU9_HWID] = "L1IMU9",
145 : [L1IMU10_HWID] = "L1IMU10",
146 : [L1IMU11_HWID] = "L1IMU11",
147 : [L1IMU12_HWID] = "L1IMU12",
148 : [L1IMU13_HWID] = "L1IMU13",
149 : [L1IMU14_HWID] = "L1IMU14",
150 : [L1IMU15_HWID] = "L1IMU15",
151 : [WAFLC_HWID] = "WAFLC",
152 : [FCH_USB_PD_HWID] = "FCH_USB_PD",
153 : [PCIE_HWID] = "PCIE",
154 : [PCS_HWID] = "PCS",
155 : [DDCL_HWID] = "DDCL",
156 : [SST_HWID] = "SST",
157 : [IOAGR_HWID] = "IOAGR",
158 : [NBIF_HWID] = "NBIF",
159 : [IOAPIC_HWID] = "IOAPIC",
160 : [SYSTEMHUB_HWID] = "SYSTEMHUB",
161 : [NTBCCP_HWID] = "NTBCCP",
162 : [UMC_HWID] = "UMC",
163 : [SATA_HWID] = "SATA",
164 : [USB_HWID] = "USB",
165 : [CCXSEC_HWID] = "CCXSEC",
166 : [XGMI_HWID] = "XGMI",
167 : [XGBE_HWID] = "XGBE",
168 : [MP0_HWID] = "MP0",
169 : };
170 :
171 : static int hw_id_map[MAX_HWIP] = {
172 : [GC_HWIP] = GC_HWID,
173 : [HDP_HWIP] = HDP_HWID,
174 : [SDMA0_HWIP] = SDMA0_HWID,
175 : [SDMA1_HWIP] = SDMA1_HWID,
176 : [SDMA2_HWIP] = SDMA2_HWID,
177 : [SDMA3_HWIP] = SDMA3_HWID,
178 : [LSDMA_HWIP] = LSDMA_HWID,
179 : [MMHUB_HWIP] = MMHUB_HWID,
180 : [ATHUB_HWIP] = ATHUB_HWID,
181 : [NBIO_HWIP] = NBIF_HWID,
182 : [MP0_HWIP] = MP0_HWID,
183 : [MP1_HWIP] = MP1_HWID,
184 : [UVD_HWIP] = UVD_HWID,
185 : [VCE_HWIP] = VCE_HWID,
186 : [DF_HWIP] = DF_HWID,
187 : [DCE_HWIP] = DMU_HWID,
188 : [OSSSYS_HWIP] = OSSSYS_HWID,
189 : [SMUIO_HWIP] = SMUIO_HWID,
190 : [PWR_HWIP] = PWR_HWID,
191 : [NBIF_HWIP] = NBIF_HWID,
192 : [THM_HWIP] = THM_HWID,
193 : [CLK_HWIP] = CLKA_HWID,
194 : [UMC_HWIP] = UMC_HWID,
195 : [XGMI_HWIP] = XGMI_HWID,
196 : [DCI_HWIP] = DCI_HWID,
197 : [PCIE_HWIP] = PCIE_HWID,
198 : };
199 :
200 0 : static int amdgpu_discovery_read_binary_from_vram(struct amdgpu_device *adev, uint8_t *binary)
201 : {
202 0 : uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
203 0 : uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
204 :
205 0 : amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
206 0 : adev->mman.discovery_tmr_size, false);
207 0 : return 0;
208 : }
209 :
210 0 : static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
211 : {
212 : const struct firmware *fw;
213 : const char *fw_name;
214 : int r;
215 :
216 0 : switch (amdgpu_discovery) {
217 : case 2:
218 0 : fw_name = FIRMWARE_IP_DISCOVERY;
219 : break;
220 : default:
221 0 : dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
222 : return -EINVAL;
223 : }
224 :
225 0 : r = request_firmware(&fw, fw_name, adev->dev);
226 0 : if (r) {
227 0 : dev_err(adev->dev, "can't load firmware \"%s\"\n",
228 : fw_name);
229 : return r;
230 : }
231 :
232 0 : memcpy((u8 *)binary, (u8 *)fw->data, adev->mman.discovery_tmr_size);
233 0 : release_firmware(fw);
234 :
235 : return 0;
236 : }
237 :
238 : static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
239 : {
240 0 : uint16_t checksum = 0;
241 : int i;
242 :
243 0 : for (i = 0; i < size; i++)
244 0 : checksum += data[i];
245 :
246 : return checksum;
247 : }
248 :
249 : static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
250 : uint16_t expected)
251 : {
252 0 : return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
253 : }
254 :
255 : static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
256 : {
257 : struct binary_header *bhdr;
258 0 : bhdr = (struct binary_header *)binary;
259 :
260 0 : return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
261 : }
262 :
263 0 : static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
264 : {
265 : /*
266 : * So far, apply this quirk only on those Navy Flounder boards which
267 : * have a bad harvest table of VCN config.
268 : */
269 0 : if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&
270 0 : (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) {
271 0 : switch (adev->pdev->revision) {
272 : case 0xC1:
273 : case 0xC2:
274 : case 0xC3:
275 : case 0xC5:
276 : case 0xC7:
277 : case 0xCF:
278 : case 0xDF:
279 0 : adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
280 0 : break;
281 : default:
282 : break;
283 : }
284 : }
285 0 : }
286 :
287 0 : static int amdgpu_discovery_init(struct amdgpu_device *adev)
288 : {
289 : struct table_info *info;
290 : struct binary_header *bhdr;
291 : uint16_t offset;
292 : uint16_t size;
293 : uint16_t checksum;
294 : int r;
295 :
296 0 : adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
297 0 : adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
298 0 : if (!adev->mman.discovery_bin)
299 : return -ENOMEM;
300 :
301 0 : r = amdgpu_discovery_read_binary_from_vram(adev, adev->mman.discovery_bin);
302 0 : if (r) {
303 0 : dev_err(adev->dev, "failed to read ip discovery binary from vram\n");
304 0 : r = -EINVAL;
305 0 : goto out;
306 : }
307 :
308 0 : if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
309 0 : dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n");
310 : /* retry read ip discovery binary from file */
311 0 : r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
312 0 : if (r) {
313 0 : dev_err(adev->dev, "failed to read ip discovery binary from file\n");
314 0 : r = -EINVAL;
315 0 : goto out;
316 : }
317 : /* check the ip discovery binary signature */
318 0 : if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
319 0 : dev_warn(adev->dev, "get invalid ip discovery binary signature from file\n");
320 0 : r = -EINVAL;
321 0 : goto out;
322 : }
323 : }
324 :
325 0 : bhdr = (struct binary_header *)adev->mman.discovery_bin;
326 :
327 0 : offset = offsetof(struct binary_header, binary_checksum) +
328 : sizeof(bhdr->binary_checksum);
329 0 : size = le16_to_cpu(bhdr->binary_size) - offset;
330 0 : checksum = le16_to_cpu(bhdr->binary_checksum);
331 :
332 0 : if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
333 : size, checksum)) {
334 0 : dev_err(adev->dev, "invalid ip discovery binary checksum\n");
335 0 : r = -EINVAL;
336 0 : goto out;
337 : }
338 :
339 0 : info = &bhdr->table_list[IP_DISCOVERY];
340 0 : offset = le16_to_cpu(info->offset);
341 0 : checksum = le16_to_cpu(info->checksum);
342 :
343 0 : if (offset) {
344 0 : struct ip_discovery_header *ihdr =
345 0 : (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
346 0 : if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
347 0 : dev_err(adev->dev, "invalid ip discovery data table signature\n");
348 0 : r = -EINVAL;
349 0 : goto out;
350 : }
351 :
352 0 : if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
353 0 : le16_to_cpu(ihdr->size), checksum)) {
354 0 : dev_err(adev->dev, "invalid ip discovery data table checksum\n");
355 0 : r = -EINVAL;
356 0 : goto out;
357 : }
358 : }
359 :
360 0 : info = &bhdr->table_list[GC];
361 0 : offset = le16_to_cpu(info->offset);
362 0 : checksum = le16_to_cpu(info->checksum);
363 :
364 0 : if (offset) {
365 0 : struct gpu_info_header *ghdr =
366 0 : (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
367 :
368 0 : if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
369 0 : dev_err(adev->dev, "invalid ip discovery gc table id\n");
370 0 : r = -EINVAL;
371 0 : goto out;
372 : }
373 :
374 0 : if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
375 0 : le32_to_cpu(ghdr->size), checksum)) {
376 0 : dev_err(adev->dev, "invalid gc data table checksum\n");
377 0 : r = -EINVAL;
378 0 : goto out;
379 : }
380 : }
381 :
382 0 : info = &bhdr->table_list[HARVEST_INFO];
383 0 : offset = le16_to_cpu(info->offset);
384 0 : checksum = le16_to_cpu(info->checksum);
385 :
386 0 : if (offset) {
387 0 : struct harvest_info_header *hhdr =
388 0 : (struct harvest_info_header *)(adev->mman.discovery_bin + offset);
389 :
390 0 : if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
391 0 : dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
392 0 : r = -EINVAL;
393 0 : goto out;
394 : }
395 :
396 0 : if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
397 : sizeof(struct harvest_table), checksum)) {
398 0 : dev_err(adev->dev, "invalid harvest data table checksum\n");
399 0 : r = -EINVAL;
400 0 : goto out;
401 : }
402 : }
403 :
404 0 : info = &bhdr->table_list[VCN_INFO];
405 0 : offset = le16_to_cpu(info->offset);
406 0 : checksum = le16_to_cpu(info->checksum);
407 :
408 0 : if (offset) {
409 0 : struct vcn_info_header *vhdr =
410 0 : (struct vcn_info_header *)(adev->mman.discovery_bin + offset);
411 :
412 0 : if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
413 0 : dev_err(adev->dev, "invalid ip discovery vcn table id\n");
414 0 : r = -EINVAL;
415 0 : goto out;
416 : }
417 :
418 0 : if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
419 0 : le32_to_cpu(vhdr->size_bytes), checksum)) {
420 0 : dev_err(adev->dev, "invalid vcn data table checksum\n");
421 0 : r = -EINVAL;
422 0 : goto out;
423 : }
424 : }
425 :
426 : info = &bhdr->table_list[MALL_INFO];
427 : offset = le16_to_cpu(info->offset);
428 : checksum = le16_to_cpu(info->checksum);
429 :
430 : if (0 && offset) {
431 : struct mall_info_header *mhdr =
432 : (struct mall_info_header *)(adev->mman.discovery_bin + offset);
433 :
434 : if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
435 : dev_err(adev->dev, "invalid ip discovery mall table id\n");
436 : r = -EINVAL;
437 : goto out;
438 : }
439 :
440 : if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
441 : le32_to_cpu(mhdr->size_bytes), checksum)) {
442 : dev_err(adev->dev, "invalid mall data table checksum\n");
443 : r = -EINVAL;
444 : goto out;
445 : }
446 : }
447 :
448 : return 0;
449 :
450 : out:
451 0 : kfree(adev->mman.discovery_bin);
452 0 : adev->mman.discovery_bin = NULL;
453 :
454 0 : return r;
455 : }
456 :
457 : static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
458 :
459 0 : void amdgpu_discovery_fini(struct amdgpu_device *adev)
460 : {
461 0 : amdgpu_discovery_sysfs_fini(adev);
462 0 : kfree(adev->mman.discovery_bin);
463 0 : adev->mman.discovery_bin = NULL;
464 0 : }
465 :
466 0 : static int amdgpu_discovery_validate_ip(const struct ip *ip)
467 : {
468 0 : if (ip->number_instance >= HWIP_MAX_INSTANCE) {
469 0 : DRM_ERROR("Unexpected number_instance (%d) from ip discovery blob\n",
470 : ip->number_instance);
471 0 : return -EINVAL;
472 : }
473 0 : if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
474 0 : DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
475 : le16_to_cpu(ip->hw_id));
476 0 : return -EINVAL;
477 : }
478 :
479 : return 0;
480 : }
481 :
482 0 : static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
483 : uint32_t *vcn_harvest_count)
484 : {
485 : struct binary_header *bhdr;
486 : struct ip_discovery_header *ihdr;
487 : struct die_header *dhdr;
488 : struct ip *ip;
489 : uint16_t die_offset, ip_offset, num_dies, num_ips;
490 : int i, j;
491 :
492 0 : bhdr = (struct binary_header *)adev->mman.discovery_bin;
493 0 : ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
494 0 : le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
495 0 : num_dies = le16_to_cpu(ihdr->num_dies);
496 :
497 : /* scan harvest bit of all IP data structures */
498 0 : for (i = 0; i < num_dies; i++) {
499 0 : die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
500 0 : dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
501 0 : num_ips = le16_to_cpu(dhdr->num_ips);
502 0 : ip_offset = die_offset + sizeof(*dhdr);
503 :
504 0 : for (j = 0; j < num_ips; j++) {
505 0 : ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
506 :
507 0 : if (amdgpu_discovery_validate_ip(ip))
508 : goto next_ip;
509 :
510 0 : if (le16_to_cpu(ip->harvest) == 1) {
511 0 : switch (le16_to_cpu(ip->hw_id)) {
512 : case VCN_HWID:
513 0 : (*vcn_harvest_count)++;
514 0 : if (ip->number_instance == 0)
515 0 : adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
516 : else
517 0 : adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
518 : break;
519 : case DMU_HWID:
520 0 : adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
521 0 : break;
522 : default:
523 : break;
524 : }
525 : }
526 : next_ip:
527 0 : ip_offset += struct_size(ip, base_address, ip->num_base_address);
528 : }
529 : }
530 0 : }
531 :
532 0 : static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
533 : uint32_t *vcn_harvest_count,
534 : uint32_t *umc_harvest_count)
535 : {
536 : struct binary_header *bhdr;
537 : struct harvest_table *harvest_info;
538 : u16 offset;
539 : int i;
540 :
541 0 : bhdr = (struct binary_header *)adev->mman.discovery_bin;
542 0 : offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
543 :
544 0 : if (!offset) {
545 0 : dev_err(adev->dev, "invalid harvest table offset\n");
546 0 : return;
547 : }
548 :
549 0 : harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
550 :
551 0 : for (i = 0; i < 32; i++) {
552 0 : if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
553 : break;
554 :
555 0 : switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
556 : case VCN_HWID:
557 0 : (*vcn_harvest_count)++;
558 0 : if (harvest_info->list[i].number_instance == 0)
559 0 : adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
560 : else
561 0 : adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
562 : break;
563 : case DMU_HWID:
564 0 : adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
565 0 : break;
566 : case UMC_HWID:
567 0 : (*umc_harvest_count)++;
568 0 : break;
569 : default:
570 : break;
571 : }
572 : }
573 : }
574 :
575 : /* ================================================== */
576 :
577 : struct ip_hw_instance {
578 : struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
579 :
580 : int hw_id;
581 : u8 num_instance;
582 : u8 major, minor, revision;
583 : u8 harvest;
584 :
585 : int num_base_addresses;
586 : u32 base_addr[];
587 : };
588 :
589 : struct ip_hw_id {
590 : struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
591 : int hw_id;
592 : };
593 :
594 : struct ip_die_entry {
595 : struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */
596 : u16 num_ips;
597 : };
598 :
599 : /* -------------------------------------------------- */
600 :
601 : struct ip_hw_instance_attr {
602 : struct attribute attr;
603 : ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
604 : };
605 :
606 0 : static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
607 : {
608 0 : return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
609 : }
610 :
611 0 : static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
612 : {
613 0 : return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
614 : }
615 :
616 0 : static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
617 : {
618 0 : return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
619 : }
620 :
621 0 : static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
622 : {
623 0 : return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
624 : }
625 :
626 0 : static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
627 : {
628 0 : return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
629 : }
630 :
631 0 : static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
632 : {
633 0 : return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
634 : }
635 :
636 0 : static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
637 : {
638 0 : return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
639 : }
640 :
641 0 : static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
642 : {
643 : ssize_t res, at;
644 : int ii;
645 :
646 0 : for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
647 : /* Here we satisfy the condition that, at + size <= PAGE_SIZE.
648 : */
649 0 : if (at + 12 > PAGE_SIZE)
650 : break;
651 0 : res = sysfs_emit_at(buf, at, "0x%08X\n",
652 : ip_hw_instance->base_addr[ii]);
653 0 : if (res <= 0)
654 : break;
655 0 : at += res;
656 : }
657 :
658 0 : return res < 0 ? res : at;
659 : }
660 :
661 : static struct ip_hw_instance_attr ip_hw_attr[] = {
662 : __ATTR_RO(hw_id),
663 : __ATTR_RO(num_instance),
664 : __ATTR_RO(major),
665 : __ATTR_RO(minor),
666 : __ATTR_RO(revision),
667 : __ATTR_RO(harvest),
668 : __ATTR_RO(num_base_addresses),
669 : __ATTR_RO(base_addr),
670 : };
671 :
672 : static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
673 : ATTRIBUTE_GROUPS(ip_hw_instance);
674 :
675 : #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
676 : #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
677 :
678 0 : static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
679 : struct attribute *attr,
680 : char *buf)
681 : {
682 0 : struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
683 0 : struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
684 :
685 0 : if (!ip_hw_attr->show)
686 : return -EIO;
687 :
688 0 : return ip_hw_attr->show(ip_hw_instance, buf);
689 : }
690 :
691 : static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
692 : .show = ip_hw_instance_attr_show,
693 : };
694 :
695 0 : static void ip_hw_instance_release(struct kobject *kobj)
696 : {
697 0 : struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
698 :
699 0 : kfree(ip_hw_instance);
700 0 : }
701 :
702 : static struct kobj_type ip_hw_instance_ktype = {
703 : .release = ip_hw_instance_release,
704 : .sysfs_ops = &ip_hw_instance_sysfs_ops,
705 : .default_groups = ip_hw_instance_groups,
706 : };
707 :
708 : /* -------------------------------------------------- */
709 :
710 : #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
711 :
712 0 : static void ip_hw_id_release(struct kobject *kobj)
713 : {
714 0 : struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
715 :
716 0 : if (!list_empty(&ip_hw_id->hw_id_kset.list))
717 0 : DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
718 0 : kfree(ip_hw_id);
719 0 : }
720 :
721 : static struct kobj_type ip_hw_id_ktype = {
722 : .release = ip_hw_id_release,
723 : .sysfs_ops = &kobj_sysfs_ops,
724 : };
725 :
726 : /* -------------------------------------------------- */
727 :
728 : static void die_kobj_release(struct kobject *kobj);
729 : static void ip_disc_release(struct kobject *kobj);
730 :
731 : struct ip_die_entry_attribute {
732 : struct attribute attr;
733 : ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
734 : };
735 :
736 : #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr)
737 :
738 0 : static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
739 : {
740 0 : return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
741 : }
742 :
743 : /* If there are more ip_die_entry attrs, other than the number of IPs,
744 : * we can make this intro an array of attrs, and then initialize
745 : * ip_die_entry_attrs in a loop.
746 : */
747 : static struct ip_die_entry_attribute num_ips_attr =
748 : __ATTR_RO(num_ips);
749 :
750 : static struct attribute *ip_die_entry_attrs[] = {
751 : &num_ips_attr.attr,
752 : NULL,
753 : };
754 : ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
755 :
756 : #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
757 :
758 0 : static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
759 : struct attribute *attr,
760 : char *buf)
761 : {
762 0 : struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
763 0 : struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
764 :
765 0 : if (!ip_die_entry_attr->show)
766 : return -EIO;
767 :
768 0 : return ip_die_entry_attr->show(ip_die_entry, buf);
769 : }
770 :
771 0 : static void ip_die_entry_release(struct kobject *kobj)
772 : {
773 0 : struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
774 :
775 0 : if (!list_empty(&ip_die_entry->ip_kset.list))
776 0 : DRM_ERROR("ip_die_entry->ip_kset is not empty");
777 0 : kfree(ip_die_entry);
778 0 : }
779 :
780 : static const struct sysfs_ops ip_die_entry_sysfs_ops = {
781 : .show = ip_die_entry_attr_show,
782 : };
783 :
784 : static struct kobj_type ip_die_entry_ktype = {
785 : .release = ip_die_entry_release,
786 : .sysfs_ops = &ip_die_entry_sysfs_ops,
787 : .default_groups = ip_die_entry_groups,
788 : };
789 :
790 : static struct kobj_type die_kobj_ktype = {
791 : .release = die_kobj_release,
792 : .sysfs_ops = &kobj_sysfs_ops,
793 : };
794 :
795 : static struct kobj_type ip_discovery_ktype = {
796 : .release = ip_disc_release,
797 : .sysfs_ops = &kobj_sysfs_ops,
798 : };
799 :
800 : struct ip_discovery_top {
801 : struct kobject kobj; /* ip_discovery/ */
802 : struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */
803 : struct amdgpu_device *adev;
804 : };
805 :
806 0 : static void die_kobj_release(struct kobject *kobj)
807 : {
808 0 : struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
809 : struct ip_discovery_top,
810 : die_kset);
811 0 : if (!list_empty(&ip_top->die_kset.list))
812 0 : DRM_ERROR("ip_top->die_kset is not empty");
813 0 : }
814 :
815 0 : static void ip_disc_release(struct kobject *kobj)
816 : {
817 0 : struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
818 : kobj);
819 0 : struct amdgpu_device *adev = ip_top->adev;
820 :
821 0 : adev->ip_top = NULL;
822 0 : kfree(ip_top);
823 0 : }
824 :
825 0 : static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
826 : struct ip_die_entry *ip_die_entry,
827 : const size_t _ip_offset, const int num_ips)
828 : {
829 : int ii, jj, kk, res;
830 :
831 0 : DRM_DEBUG("num_ips:%d", num_ips);
832 :
833 : /* Find all IPs of a given HW ID, and add their instance to
834 : * #die/#hw_id/#instance/<attributes>
835 : */
836 0 : for (ii = 0; ii < HW_ID_MAX; ii++) {
837 : struct ip_hw_id *ip_hw_id = NULL;
838 : size_t ip_offset = _ip_offset;
839 :
840 0 : for (jj = 0; jj < num_ips; jj++) {
841 : struct ip *ip;
842 : struct ip_hw_instance *ip_hw_instance;
843 :
844 0 : ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
845 0 : if (amdgpu_discovery_validate_ip(ip) ||
846 0 : le16_to_cpu(ip->hw_id) != ii)
847 : goto next_ip;
848 :
849 0 : DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
850 :
851 : /* We have a hw_id match; register the hw
852 : * block if not yet registered.
853 : */
854 0 : if (!ip_hw_id) {
855 0 : ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
856 0 : if (!ip_hw_id)
857 : return -ENOMEM;
858 0 : ip_hw_id->hw_id = ii;
859 :
860 0 : kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
861 0 : ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
862 0 : ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
863 0 : res = kset_register(&ip_hw_id->hw_id_kset);
864 0 : if (res) {
865 0 : DRM_ERROR("Couldn't register ip_hw_id kset");
866 0 : kfree(ip_hw_id);
867 : return res;
868 : }
869 0 : if (hw_id_names[ii]) {
870 0 : res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
871 : &ip_hw_id->hw_id_kset.kobj,
872 : hw_id_names[ii]);
873 0 : if (res) {
874 0 : DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
875 : hw_id_names[ii],
876 : kobject_name(&ip_die_entry->ip_kset.kobj));
877 : }
878 : }
879 : }
880 :
881 : /* Now register its instance.
882 : */
883 0 : ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
884 : base_addr,
885 : ip->num_base_address),
886 : GFP_KERNEL);
887 0 : if (!ip_hw_instance) {
888 0 : DRM_ERROR("no memory for ip_hw_instance");
889 : return -ENOMEM;
890 : }
891 0 : ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
892 0 : ip_hw_instance->num_instance = ip->number_instance;
893 0 : ip_hw_instance->major = ip->major;
894 0 : ip_hw_instance->minor = ip->minor;
895 0 : ip_hw_instance->revision = ip->revision;
896 0 : ip_hw_instance->harvest = ip->harvest;
897 0 : ip_hw_instance->num_base_addresses = ip->num_base_address;
898 :
899 0 : for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++)
900 0 : ip_hw_instance->base_addr[kk] = ip->base_address[kk];
901 :
902 0 : kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
903 0 : ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
904 0 : res = kobject_add(&ip_hw_instance->kobj, NULL,
905 0 : "%d", ip_hw_instance->num_instance);
906 : next_ip:
907 0 : ip_offset += struct_size(ip, base_address, ip->num_base_address);
908 : }
909 : }
910 :
911 : return 0;
912 : }
913 :
914 0 : static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
915 : {
916 : struct binary_header *bhdr;
917 : struct ip_discovery_header *ihdr;
918 : struct die_header *dhdr;
919 0 : struct kset *die_kset = &adev->ip_top->die_kset;
920 : u16 num_dies, die_offset, num_ips;
921 : size_t ip_offset;
922 : int ii, res;
923 :
924 0 : bhdr = (struct binary_header *)adev->mman.discovery_bin;
925 0 : ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
926 0 : le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
927 0 : num_dies = le16_to_cpu(ihdr->num_dies);
928 :
929 0 : DRM_DEBUG("number of dies: %d\n", num_dies);
930 :
931 0 : for (ii = 0; ii < num_dies; ii++) {
932 : struct ip_die_entry *ip_die_entry;
933 :
934 0 : die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
935 0 : dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
936 0 : num_ips = le16_to_cpu(dhdr->num_ips);
937 0 : ip_offset = die_offset + sizeof(*dhdr);
938 :
939 : /* Add the die to the kset.
940 : *
941 : * dhdr->die_id == ii, which was checked in
942 : * amdgpu_discovery_reg_base_init().
943 : */
944 :
945 0 : ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
946 0 : if (!ip_die_entry)
947 : return -ENOMEM;
948 :
949 0 : ip_die_entry->num_ips = num_ips;
950 :
951 0 : kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
952 0 : ip_die_entry->ip_kset.kobj.kset = die_kset;
953 0 : ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
954 0 : res = kset_register(&ip_die_entry->ip_kset);
955 0 : if (res) {
956 0 : DRM_ERROR("Couldn't register ip_die_entry kset");
957 0 : kfree(ip_die_entry);
958 0 : return res;
959 : }
960 :
961 0 : amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips);
962 : }
963 :
964 : return 0;
965 : }
966 :
967 0 : static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
968 : {
969 : struct kset *die_kset;
970 : int res, ii;
971 :
972 0 : adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
973 0 : if (!adev->ip_top)
974 : return -ENOMEM;
975 :
976 0 : adev->ip_top->adev = adev;
977 :
978 0 : res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
979 0 : &adev->dev->kobj, "ip_discovery");
980 0 : if (res) {
981 0 : DRM_ERROR("Couldn't init and add ip_discovery/");
982 0 : goto Err;
983 : }
984 :
985 0 : die_kset = &adev->ip_top->die_kset;
986 0 : kobject_set_name(&die_kset->kobj, "%s", "die");
987 0 : die_kset->kobj.parent = &adev->ip_top->kobj;
988 0 : die_kset->kobj.ktype = &die_kobj_ktype;
989 0 : res = kset_register(&adev->ip_top->die_kset);
990 0 : if (res) {
991 0 : DRM_ERROR("Couldn't register die_kset");
992 0 : goto Err;
993 : }
994 :
995 0 : for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
996 0 : ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
997 0 : ip_hw_instance_attrs[ii] = NULL;
998 :
999 0 : res = amdgpu_discovery_sysfs_recurse(adev);
1000 :
1001 0 : return res;
1002 : Err:
1003 0 : kobject_put(&adev->ip_top->kobj);
1004 0 : return res;
1005 : }
1006 :
1007 : /* -------------------------------------------------- */
1008 :
1009 : #define list_to_kobj(el) container_of(el, struct kobject, entry)
1010 :
1011 0 : static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1012 : {
1013 : struct list_head *el, *tmp;
1014 : struct kset *hw_id_kset;
1015 :
1016 0 : hw_id_kset = &ip_hw_id->hw_id_kset;
1017 0 : spin_lock(&hw_id_kset->list_lock);
1018 0 : list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1019 0 : list_del_init(el);
1020 0 : spin_unlock(&hw_id_kset->list_lock);
1021 : /* kobject is embedded in ip_hw_instance */
1022 0 : kobject_put(list_to_kobj(el));
1023 0 : spin_lock(&hw_id_kset->list_lock);
1024 : }
1025 0 : spin_unlock(&hw_id_kset->list_lock);
1026 0 : kobject_put(&ip_hw_id->hw_id_kset.kobj);
1027 0 : }
1028 :
1029 0 : static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1030 : {
1031 : struct list_head *el, *tmp;
1032 : struct kset *ip_kset;
1033 :
1034 0 : ip_kset = &ip_die_entry->ip_kset;
1035 0 : spin_lock(&ip_kset->list_lock);
1036 0 : list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1037 0 : list_del_init(el);
1038 0 : spin_unlock(&ip_kset->list_lock);
1039 0 : amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1040 0 : spin_lock(&ip_kset->list_lock);
1041 : }
1042 0 : spin_unlock(&ip_kset->list_lock);
1043 0 : kobject_put(&ip_die_entry->ip_kset.kobj);
1044 0 : }
1045 :
1046 0 : static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1047 : {
1048 : struct list_head *el, *tmp;
1049 : struct kset *die_kset;
1050 :
1051 0 : die_kset = &adev->ip_top->die_kset;
1052 0 : spin_lock(&die_kset->list_lock);
1053 0 : list_for_each_prev_safe(el, tmp, &die_kset->list) {
1054 0 : list_del_init(el);
1055 0 : spin_unlock(&die_kset->list_lock);
1056 0 : amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1057 0 : spin_lock(&die_kset->list_lock);
1058 : }
1059 0 : spin_unlock(&die_kset->list_lock);
1060 0 : kobject_put(&adev->ip_top->die_kset.kobj);
1061 0 : kobject_put(&adev->ip_top->kobj);
1062 0 : }
1063 :
1064 : /* ================================================== */
1065 :
1066 0 : static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1067 : {
1068 : struct binary_header *bhdr;
1069 : struct ip_discovery_header *ihdr;
1070 : struct die_header *dhdr;
1071 : struct ip *ip;
1072 : uint16_t die_offset;
1073 : uint16_t ip_offset;
1074 : uint16_t num_dies;
1075 : uint16_t num_ips;
1076 : uint8_t num_base_address;
1077 : int hw_ip;
1078 : int i, j, k;
1079 : int r;
1080 :
1081 0 : r = amdgpu_discovery_init(adev);
1082 0 : if (r) {
1083 0 : DRM_ERROR("amdgpu_discovery_init failed\n");
1084 0 : return r;
1085 : }
1086 :
1087 0 : bhdr = (struct binary_header *)adev->mman.discovery_bin;
1088 0 : ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1089 0 : le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1090 0 : num_dies = le16_to_cpu(ihdr->num_dies);
1091 :
1092 0 : DRM_DEBUG("number of dies: %d\n", num_dies);
1093 :
1094 0 : for (i = 0; i < num_dies; i++) {
1095 0 : die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1096 0 : dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1097 0 : num_ips = le16_to_cpu(dhdr->num_ips);
1098 0 : ip_offset = die_offset + sizeof(*dhdr);
1099 :
1100 0 : if (le16_to_cpu(dhdr->die_id) != i) {
1101 0 : DRM_ERROR("invalid die id %d, expected %d\n",
1102 : le16_to_cpu(dhdr->die_id), i);
1103 0 : return -EINVAL;
1104 : }
1105 :
1106 0 : DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1107 : le16_to_cpu(dhdr->die_id), num_ips);
1108 :
1109 0 : for (j = 0; j < num_ips; j++) {
1110 0 : ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
1111 :
1112 0 : if (amdgpu_discovery_validate_ip(ip))
1113 : goto next_ip;
1114 :
1115 0 : num_base_address = ip->num_base_address;
1116 :
1117 0 : DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1118 : hw_id_names[le16_to_cpu(ip->hw_id)],
1119 : le16_to_cpu(ip->hw_id),
1120 : ip->number_instance,
1121 : ip->major, ip->minor,
1122 : ip->revision);
1123 :
1124 0 : if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1125 : /* Bit [5:0]: original revision value
1126 : * Bit [7:6]: en/decode capability:
1127 : * 0b00 : VCN function normally
1128 : * 0b10 : encode is disabled
1129 : * 0b01 : decode is disabled
1130 : */
1131 0 : adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
1132 0 : ip->revision & 0xc0;
1133 0 : ip->revision &= ~0xc0;
1134 0 : if (adev->vcn.num_vcn_inst < AMDGPU_MAX_VCN_INSTANCES)
1135 0 : adev->vcn.num_vcn_inst++;
1136 : else
1137 0 : dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1138 : adev->vcn.num_vcn_inst + 1,
1139 : AMDGPU_MAX_VCN_INSTANCES);
1140 : }
1141 0 : if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1142 0 : le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1143 0 : le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1144 : le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1145 0 : if (adev->sdma.num_instances < AMDGPU_MAX_SDMA_INSTANCES)
1146 0 : adev->sdma.num_instances++;
1147 : else
1148 0 : dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1149 : adev->sdma.num_instances + 1,
1150 : AMDGPU_MAX_SDMA_INSTANCES);
1151 : }
1152 :
1153 0 : if (le16_to_cpu(ip->hw_id) == UMC_HWID)
1154 0 : adev->gmc.num_umc++;
1155 :
1156 0 : for (k = 0; k < num_base_address; k++) {
1157 : /*
1158 : * convert the endianness of base addresses in place,
1159 : * so that we don't need to convert them when accessing adev->reg_offset.
1160 : */
1161 0 : ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1162 0 : DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1163 : }
1164 :
1165 0 : for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1166 0 : if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) {
1167 0 : DRM_DEBUG("set register base offset for %s\n",
1168 : hw_id_names[le16_to_cpu(ip->hw_id)]);
1169 0 : adev->reg_offset[hw_ip][ip->number_instance] =
1170 0 : ip->base_address;
1171 : /* Instance support is somewhat inconsistent.
1172 : * SDMA is a good example. Sienna cichlid has 4 total
1173 : * SDMA instances, each enumerated separately (HWIDs
1174 : * 42, 43, 68, 69). Arcturus has 8 total SDMA instances,
1175 : * but they are enumerated as multiple instances of the
1176 : * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another
1177 : * example. On most chips there are multiple instances
1178 : * with the same HWID.
1179 : */
1180 0 : adev->ip_versions[hw_ip][ip->number_instance] =
1181 0 : IP_VERSION(ip->major, ip->minor, ip->revision);
1182 : }
1183 : }
1184 :
1185 : next_ip:
1186 0 : ip_offset += struct_size(ip, base_address, ip->num_base_address);
1187 : }
1188 : }
1189 :
1190 0 : amdgpu_discovery_sysfs_init(adev);
1191 :
1192 0 : return 0;
1193 : }
1194 :
1195 0 : int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance,
1196 : int *major, int *minor, int *revision)
1197 : {
1198 : struct binary_header *bhdr;
1199 : struct ip_discovery_header *ihdr;
1200 : struct die_header *dhdr;
1201 : struct ip *ip;
1202 : uint16_t die_offset;
1203 : uint16_t ip_offset;
1204 : uint16_t num_dies;
1205 : uint16_t num_ips;
1206 : int i, j;
1207 :
1208 0 : if (!adev->mman.discovery_bin) {
1209 0 : DRM_ERROR("ip discovery uninitialized\n");
1210 0 : return -EINVAL;
1211 : }
1212 :
1213 0 : bhdr = (struct binary_header *)adev->mman.discovery_bin;
1214 0 : ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1215 0 : le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1216 0 : num_dies = le16_to_cpu(ihdr->num_dies);
1217 :
1218 0 : for (i = 0; i < num_dies; i++) {
1219 0 : die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1220 0 : dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1221 0 : num_ips = le16_to_cpu(dhdr->num_ips);
1222 0 : ip_offset = die_offset + sizeof(*dhdr);
1223 :
1224 0 : for (j = 0; j < num_ips; j++) {
1225 0 : ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
1226 :
1227 0 : if ((le16_to_cpu(ip->hw_id) == hw_id) && (ip->number_instance == number_instance)) {
1228 0 : if (major)
1229 0 : *major = ip->major;
1230 0 : if (minor)
1231 0 : *minor = ip->minor;
1232 0 : if (revision)
1233 0 : *revision = ip->revision;
1234 : return 0;
1235 : }
1236 0 : ip_offset += struct_size(ip, base_address, ip->num_base_address);
1237 : }
1238 : }
1239 :
1240 : return -EINVAL;
1241 : }
1242 :
1243 0 : static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1244 : {
1245 0 : int vcn_harvest_count = 0;
1246 0 : int umc_harvest_count = 0;
1247 :
1248 : /*
1249 : * Harvest table does not fit Navi1x and legacy GPUs,
1250 : * so read harvest bit per IP data structure to set
1251 : * harvest configuration.
1252 : */
1253 0 : if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0)) {
1254 0 : if ((adev->pdev->device == 0x731E &&
1255 0 : (adev->pdev->revision == 0xC6 ||
1256 0 : adev->pdev->revision == 0xC7)) ||
1257 0 : (adev->pdev->device == 0x7340 &&
1258 0 : adev->pdev->revision == 0xC9) ||
1259 0 : (adev->pdev->device == 0x7360 &&
1260 0 : adev->pdev->revision == 0xC7))
1261 0 : amdgpu_discovery_read_harvest_bit_per_ip(adev,
1262 : &vcn_harvest_count);
1263 : } else {
1264 0 : amdgpu_discovery_read_from_harvest_table(adev,
1265 : &vcn_harvest_count,
1266 : &umc_harvest_count);
1267 : }
1268 :
1269 0 : amdgpu_discovery_harvest_config_quirk(adev);
1270 :
1271 0 : if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1272 0 : adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1273 0 : adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1274 : }
1275 :
1276 0 : if (umc_harvest_count < adev->gmc.num_umc) {
1277 0 : adev->gmc.num_umc -= umc_harvest_count;
1278 : }
1279 0 : }
1280 :
1281 : union gc_info {
1282 : struct gc_info_v1_0 v1;
1283 : struct gc_info_v1_1 v1_1;
1284 : struct gc_info_v1_2 v1_2;
1285 : struct gc_info_v2_0 v2;
1286 : };
1287 :
1288 0 : static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1289 : {
1290 : struct binary_header *bhdr;
1291 : union gc_info *gc_info;
1292 : u16 offset;
1293 :
1294 0 : if (!adev->mman.discovery_bin) {
1295 0 : DRM_ERROR("ip discovery uninitialized\n");
1296 0 : return -EINVAL;
1297 : }
1298 :
1299 0 : bhdr = (struct binary_header *)adev->mman.discovery_bin;
1300 0 : offset = le16_to_cpu(bhdr->table_list[GC].offset);
1301 :
1302 0 : if (!offset)
1303 : return 0;
1304 :
1305 0 : gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1306 :
1307 0 : switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1308 : case 1:
1309 0 : adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1310 0 : adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1311 0 : le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1312 0 : adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1313 0 : adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1314 0 : adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1315 0 : adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1316 0 : adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1317 0 : adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1318 0 : adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1319 0 : adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1320 0 : adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1321 0 : adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1322 0 : adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1323 0 : adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1324 0 : adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1325 0 : le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1326 0 : adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1327 0 : if (gc_info->v1.header.version_minor >= 1) {
1328 0 : adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1329 0 : adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1330 0 : adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1331 : }
1332 0 : if (gc_info->v1.header.version_minor >= 2) {
1333 0 : adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1334 0 : adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1335 0 : adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1336 0 : adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1337 0 : adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1338 0 : adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1339 0 : adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1340 0 : adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1341 : }
1342 : break;
1343 : case 2:
1344 0 : adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1345 0 : adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1346 0 : adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1347 0 : adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1348 0 : adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1349 0 : adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1350 0 : adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1351 0 : adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1352 0 : adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1353 0 : adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1354 0 : adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1355 0 : adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1356 0 : adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1357 0 : adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1358 0 : adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1359 0 : le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1360 0 : adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1361 0 : break;
1362 : default:
1363 0 : dev_err(adev->dev,
1364 : "Unhandled GC info table %d.%d\n",
1365 : le16_to_cpu(gc_info->v1.header.version_major),
1366 : le16_to_cpu(gc_info->v1.header.version_minor));
1367 0 : return -EINVAL;
1368 : }
1369 : return 0;
1370 : }
1371 :
1372 : union mall_info {
1373 : struct mall_info_v1_0 v1;
1374 : };
1375 :
1376 0 : static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1377 : {
1378 : struct binary_header *bhdr;
1379 : union mall_info *mall_info;
1380 : u32 u, mall_size_per_umc, m_s_present, half_use;
1381 : u64 mall_size;
1382 : u16 offset;
1383 :
1384 0 : if (!adev->mman.discovery_bin) {
1385 0 : DRM_ERROR("ip discovery uninitialized\n");
1386 0 : return -EINVAL;
1387 : }
1388 :
1389 0 : bhdr = (struct binary_header *)adev->mman.discovery_bin;
1390 0 : offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1391 :
1392 0 : if (!offset)
1393 : return 0;
1394 :
1395 0 : mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1396 :
1397 0 : switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1398 : case 1:
1399 0 : mall_size = 0;
1400 0 : mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1401 0 : m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1402 0 : half_use = le32_to_cpu(mall_info->v1.m_half_use);
1403 0 : for (u = 0; u < adev->gmc.num_umc; u++) {
1404 0 : if (m_s_present & (1 << u))
1405 0 : mall_size += mall_size_per_umc * 2;
1406 0 : else if (half_use & (1 << u))
1407 0 : mall_size += mall_size_per_umc / 2;
1408 : else
1409 0 : mall_size += mall_size_per_umc;
1410 : }
1411 0 : adev->gmc.mall_size = mall_size;
1412 : break;
1413 : default:
1414 0 : dev_err(adev->dev,
1415 : "Unhandled MALL info table %d.%d\n",
1416 : le16_to_cpu(mall_info->v1.header.version_major),
1417 : le16_to_cpu(mall_info->v1.header.version_minor));
1418 0 : return -EINVAL;
1419 : }
1420 0 : return 0;
1421 : }
1422 :
1423 : union vcn_info {
1424 : struct vcn_info_v1_0 v1;
1425 : };
1426 :
1427 0 : static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1428 : {
1429 : struct binary_header *bhdr;
1430 : union vcn_info *vcn_info;
1431 : u16 offset;
1432 : int v;
1433 :
1434 0 : if (!adev->mman.discovery_bin) {
1435 0 : DRM_ERROR("ip discovery uninitialized\n");
1436 0 : return -EINVAL;
1437 : }
1438 :
1439 : /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1440 : * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1441 : * but that may change in the future with new GPUs so keep this
1442 : * check for defensive purposes.
1443 : */
1444 0 : if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1445 0 : dev_err(adev->dev, "invalid vcn instances\n");
1446 0 : return -EINVAL;
1447 : }
1448 :
1449 0 : bhdr = (struct binary_header *)adev->mman.discovery_bin;
1450 0 : offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1451 :
1452 0 : if (!offset)
1453 : return 0;
1454 :
1455 0 : vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1456 :
1457 0 : switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1458 : case 1:
1459 : /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1460 : * so this won't overflow.
1461 : */
1462 0 : for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1463 0 : adev->vcn.vcn_codec_disable_mask[v] =
1464 0 : le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1465 : }
1466 : break;
1467 : default:
1468 0 : dev_err(adev->dev,
1469 : "Unhandled VCN info table %d.%d\n",
1470 : le16_to_cpu(vcn_info->v1.header.version_major),
1471 : le16_to_cpu(vcn_info->v1.header.version_minor));
1472 0 : return -EINVAL;
1473 : }
1474 : return 0;
1475 : }
1476 :
1477 0 : static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1478 : {
1479 : /* what IP to use for this? */
1480 0 : switch (adev->ip_versions[GC_HWIP][0]) {
1481 : case IP_VERSION(9, 0, 1):
1482 : case IP_VERSION(9, 1, 0):
1483 : case IP_VERSION(9, 2, 1):
1484 : case IP_VERSION(9, 2, 2):
1485 : case IP_VERSION(9, 3, 0):
1486 : case IP_VERSION(9, 4, 0):
1487 : case IP_VERSION(9, 4, 1):
1488 : case IP_VERSION(9, 4, 2):
1489 0 : amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1490 0 : break;
1491 : case IP_VERSION(10, 1, 10):
1492 : case IP_VERSION(10, 1, 1):
1493 : case IP_VERSION(10, 1, 2):
1494 : case IP_VERSION(10, 1, 3):
1495 : case IP_VERSION(10, 1, 4):
1496 : case IP_VERSION(10, 3, 0):
1497 : case IP_VERSION(10, 3, 1):
1498 : case IP_VERSION(10, 3, 2):
1499 : case IP_VERSION(10, 3, 3):
1500 : case IP_VERSION(10, 3, 4):
1501 : case IP_VERSION(10, 3, 5):
1502 : case IP_VERSION(10, 3, 6):
1503 : case IP_VERSION(10, 3, 7):
1504 0 : amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1505 0 : break;
1506 : case IP_VERSION(11, 0, 0):
1507 : case IP_VERSION(11, 0, 1):
1508 : case IP_VERSION(11, 0, 2):
1509 : case IP_VERSION(11, 0, 3):
1510 0 : amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1511 0 : break;
1512 : default:
1513 0 : dev_err(adev->dev,
1514 : "Failed to add common ip block(GC_HWIP:0x%x)\n",
1515 : adev->ip_versions[GC_HWIP][0]);
1516 0 : return -EINVAL;
1517 : }
1518 : return 0;
1519 : }
1520 :
1521 0 : static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1522 : {
1523 : /* use GC or MMHUB IP version */
1524 0 : switch (adev->ip_versions[GC_HWIP][0]) {
1525 : case IP_VERSION(9, 0, 1):
1526 : case IP_VERSION(9, 1, 0):
1527 : case IP_VERSION(9, 2, 1):
1528 : case IP_VERSION(9, 2, 2):
1529 : case IP_VERSION(9, 3, 0):
1530 : case IP_VERSION(9, 4, 0):
1531 : case IP_VERSION(9, 4, 1):
1532 : case IP_VERSION(9, 4, 2):
1533 0 : amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1534 0 : break;
1535 : case IP_VERSION(10, 1, 10):
1536 : case IP_VERSION(10, 1, 1):
1537 : case IP_VERSION(10, 1, 2):
1538 : case IP_VERSION(10, 1, 3):
1539 : case IP_VERSION(10, 1, 4):
1540 : case IP_VERSION(10, 3, 0):
1541 : case IP_VERSION(10, 3, 1):
1542 : case IP_VERSION(10, 3, 2):
1543 : case IP_VERSION(10, 3, 3):
1544 : case IP_VERSION(10, 3, 4):
1545 : case IP_VERSION(10, 3, 5):
1546 : case IP_VERSION(10, 3, 6):
1547 : case IP_VERSION(10, 3, 7):
1548 0 : amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1549 0 : break;
1550 : case IP_VERSION(11, 0, 0):
1551 : case IP_VERSION(11, 0, 1):
1552 : case IP_VERSION(11, 0, 2):
1553 : case IP_VERSION(11, 0, 3):
1554 0 : amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1555 0 : break;
1556 : default:
1557 0 : dev_err(adev->dev,
1558 : "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1559 : adev->ip_versions[GC_HWIP][0]);
1560 0 : return -EINVAL;
1561 : }
1562 : return 0;
1563 : }
1564 :
1565 0 : static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1566 : {
1567 0 : switch (adev->ip_versions[OSSSYS_HWIP][0]) {
1568 : case IP_VERSION(4, 0, 0):
1569 : case IP_VERSION(4, 0, 1):
1570 : case IP_VERSION(4, 1, 0):
1571 : case IP_VERSION(4, 1, 1):
1572 : case IP_VERSION(4, 3, 0):
1573 0 : amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1574 0 : break;
1575 : case IP_VERSION(4, 2, 0):
1576 : case IP_VERSION(4, 2, 1):
1577 : case IP_VERSION(4, 4, 0):
1578 0 : amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1579 0 : break;
1580 : case IP_VERSION(5, 0, 0):
1581 : case IP_VERSION(5, 0, 1):
1582 : case IP_VERSION(5, 0, 2):
1583 : case IP_VERSION(5, 0, 3):
1584 : case IP_VERSION(5, 2, 0):
1585 : case IP_VERSION(5, 2, 1):
1586 0 : amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1587 0 : break;
1588 : case IP_VERSION(6, 0, 0):
1589 : case IP_VERSION(6, 0, 1):
1590 : case IP_VERSION(6, 0, 2):
1591 0 : amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
1592 0 : break;
1593 : default:
1594 0 : dev_err(adev->dev,
1595 : "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1596 : adev->ip_versions[OSSSYS_HWIP][0]);
1597 0 : return -EINVAL;
1598 : }
1599 : return 0;
1600 : }
1601 :
1602 0 : static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1603 : {
1604 0 : switch (adev->ip_versions[MP0_HWIP][0]) {
1605 : case IP_VERSION(9, 0, 0):
1606 0 : amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1607 0 : break;
1608 : case IP_VERSION(10, 0, 0):
1609 : case IP_VERSION(10, 0, 1):
1610 0 : amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1611 0 : break;
1612 : case IP_VERSION(11, 0, 0):
1613 : case IP_VERSION(11, 0, 2):
1614 : case IP_VERSION(11, 0, 4):
1615 : case IP_VERSION(11, 0, 5):
1616 : case IP_VERSION(11, 0, 9):
1617 : case IP_VERSION(11, 0, 7):
1618 : case IP_VERSION(11, 0, 11):
1619 : case IP_VERSION(11, 0, 12):
1620 : case IP_VERSION(11, 0, 13):
1621 : case IP_VERSION(11, 5, 0):
1622 0 : amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
1623 0 : break;
1624 : case IP_VERSION(11, 0, 8):
1625 0 : amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
1626 0 : break;
1627 : case IP_VERSION(11, 0, 3):
1628 : case IP_VERSION(12, 0, 1):
1629 0 : amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
1630 0 : break;
1631 : case IP_VERSION(13, 0, 0):
1632 : case IP_VERSION(13, 0, 1):
1633 : case IP_VERSION(13, 0, 2):
1634 : case IP_VERSION(13, 0, 3):
1635 : case IP_VERSION(13, 0, 5):
1636 : case IP_VERSION(13, 0, 7):
1637 : case IP_VERSION(13, 0, 8):
1638 : case IP_VERSION(13, 0, 10):
1639 0 : amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
1640 0 : break;
1641 : case IP_VERSION(13, 0, 4):
1642 0 : amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
1643 0 : break;
1644 : default:
1645 0 : dev_err(adev->dev,
1646 : "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
1647 : adev->ip_versions[MP0_HWIP][0]);
1648 0 : return -EINVAL;
1649 : }
1650 : return 0;
1651 : }
1652 :
1653 0 : static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
1654 : {
1655 0 : switch (adev->ip_versions[MP1_HWIP][0]) {
1656 : case IP_VERSION(9, 0, 0):
1657 : case IP_VERSION(10, 0, 0):
1658 : case IP_VERSION(10, 0, 1):
1659 : case IP_VERSION(11, 0, 2):
1660 0 : if (adev->asic_type == CHIP_ARCTURUS)
1661 0 : amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1662 : else
1663 0 : amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1664 : break;
1665 : case IP_VERSION(11, 0, 0):
1666 : case IP_VERSION(11, 0, 5):
1667 : case IP_VERSION(11, 0, 9):
1668 : case IP_VERSION(11, 0, 7):
1669 : case IP_VERSION(11, 0, 8):
1670 : case IP_VERSION(11, 0, 11):
1671 : case IP_VERSION(11, 0, 12):
1672 : case IP_VERSION(11, 0, 13):
1673 : case IP_VERSION(11, 5, 0):
1674 0 : amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1675 0 : break;
1676 : case IP_VERSION(12, 0, 0):
1677 : case IP_VERSION(12, 0, 1):
1678 0 : amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
1679 0 : break;
1680 : case IP_VERSION(13, 0, 0):
1681 : case IP_VERSION(13, 0, 1):
1682 : case IP_VERSION(13, 0, 2):
1683 : case IP_VERSION(13, 0, 3):
1684 : case IP_VERSION(13, 0, 4):
1685 : case IP_VERSION(13, 0, 5):
1686 : case IP_VERSION(13, 0, 7):
1687 : case IP_VERSION(13, 0, 8):
1688 : case IP_VERSION(13, 0, 10):
1689 0 : amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
1690 0 : break;
1691 : default:
1692 0 : dev_err(adev->dev,
1693 : "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
1694 : adev->ip_versions[MP1_HWIP][0]);
1695 0 : return -EINVAL;
1696 : }
1697 : return 0;
1698 : }
1699 :
1700 0 : static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
1701 : {
1702 0 : if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) {
1703 0 : amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1704 0 : return 0;
1705 : }
1706 :
1707 0 : if (!amdgpu_device_has_dc_support(adev))
1708 : return 0;
1709 :
1710 : #if defined(CONFIG_DRM_AMD_DC)
1711 0 : if (adev->ip_versions[DCE_HWIP][0]) {
1712 0 : switch (adev->ip_versions[DCE_HWIP][0]) {
1713 : case IP_VERSION(1, 0, 0):
1714 : case IP_VERSION(1, 0, 1):
1715 : case IP_VERSION(2, 0, 2):
1716 : case IP_VERSION(2, 0, 0):
1717 : case IP_VERSION(2, 0, 3):
1718 : case IP_VERSION(2, 1, 0):
1719 : case IP_VERSION(3, 0, 0):
1720 : case IP_VERSION(3, 0, 2):
1721 : case IP_VERSION(3, 0, 3):
1722 : case IP_VERSION(3, 0, 1):
1723 : case IP_VERSION(3, 1, 2):
1724 : case IP_VERSION(3, 1, 3):
1725 : case IP_VERSION(3, 1, 4):
1726 : case IP_VERSION(3, 1, 5):
1727 : case IP_VERSION(3, 1, 6):
1728 : case IP_VERSION(3, 2, 0):
1729 : case IP_VERSION(3, 2, 1):
1730 0 : amdgpu_device_ip_block_add(adev, &dm_ip_block);
1731 : break;
1732 : default:
1733 0 : dev_err(adev->dev,
1734 : "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
1735 : adev->ip_versions[DCE_HWIP][0]);
1736 0 : return -EINVAL;
1737 : }
1738 0 : } else if (adev->ip_versions[DCI_HWIP][0]) {
1739 0 : switch (adev->ip_versions[DCI_HWIP][0]) {
1740 : case IP_VERSION(12, 0, 0):
1741 : case IP_VERSION(12, 0, 1):
1742 : case IP_VERSION(12, 1, 0):
1743 0 : amdgpu_device_ip_block_add(adev, &dm_ip_block);
1744 0 : break;
1745 : default:
1746 0 : dev_err(adev->dev,
1747 : "Failed to add dm ip block(DCI_HWIP:0x%x)\n",
1748 : adev->ip_versions[DCI_HWIP][0]);
1749 0 : return -EINVAL;
1750 : }
1751 : }
1752 : #endif
1753 : return 0;
1754 : }
1755 :
1756 0 : static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
1757 : {
1758 0 : switch (adev->ip_versions[GC_HWIP][0]) {
1759 : case IP_VERSION(9, 0, 1):
1760 : case IP_VERSION(9, 1, 0):
1761 : case IP_VERSION(9, 2, 1):
1762 : case IP_VERSION(9, 2, 2):
1763 : case IP_VERSION(9, 3, 0):
1764 : case IP_VERSION(9, 4, 0):
1765 : case IP_VERSION(9, 4, 1):
1766 : case IP_VERSION(9, 4, 2):
1767 0 : amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
1768 0 : break;
1769 : case IP_VERSION(10, 1, 10):
1770 : case IP_VERSION(10, 1, 2):
1771 : case IP_VERSION(10, 1, 1):
1772 : case IP_VERSION(10, 1, 3):
1773 : case IP_VERSION(10, 1, 4):
1774 : case IP_VERSION(10, 3, 0):
1775 : case IP_VERSION(10, 3, 2):
1776 : case IP_VERSION(10, 3, 1):
1777 : case IP_VERSION(10, 3, 4):
1778 : case IP_VERSION(10, 3, 5):
1779 : case IP_VERSION(10, 3, 6):
1780 : case IP_VERSION(10, 3, 3):
1781 : case IP_VERSION(10, 3, 7):
1782 0 : amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
1783 0 : break;
1784 : case IP_VERSION(11, 0, 0):
1785 : case IP_VERSION(11, 0, 1):
1786 : case IP_VERSION(11, 0, 2):
1787 : case IP_VERSION(11, 0, 3):
1788 0 : amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
1789 0 : break;
1790 : default:
1791 0 : dev_err(adev->dev,
1792 : "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
1793 : adev->ip_versions[GC_HWIP][0]);
1794 0 : return -EINVAL;
1795 : }
1796 : return 0;
1797 : }
1798 :
1799 0 : static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
1800 : {
1801 0 : switch (adev->ip_versions[SDMA0_HWIP][0]) {
1802 : case IP_VERSION(4, 0, 0):
1803 : case IP_VERSION(4, 0, 1):
1804 : case IP_VERSION(4, 1, 0):
1805 : case IP_VERSION(4, 1, 1):
1806 : case IP_VERSION(4, 1, 2):
1807 : case IP_VERSION(4, 2, 0):
1808 : case IP_VERSION(4, 2, 2):
1809 : case IP_VERSION(4, 4, 0):
1810 0 : amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
1811 0 : break;
1812 : case IP_VERSION(5, 0, 0):
1813 : case IP_VERSION(5, 0, 1):
1814 : case IP_VERSION(5, 0, 2):
1815 : case IP_VERSION(5, 0, 5):
1816 0 : amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
1817 0 : break;
1818 : case IP_VERSION(5, 2, 0):
1819 : case IP_VERSION(5, 2, 2):
1820 : case IP_VERSION(5, 2, 4):
1821 : case IP_VERSION(5, 2, 5):
1822 : case IP_VERSION(5, 2, 6):
1823 : case IP_VERSION(5, 2, 3):
1824 : case IP_VERSION(5, 2, 1):
1825 : case IP_VERSION(5, 2, 7):
1826 0 : amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
1827 0 : break;
1828 : case IP_VERSION(6, 0, 0):
1829 : case IP_VERSION(6, 0, 1):
1830 : case IP_VERSION(6, 0, 2):
1831 : case IP_VERSION(6, 0, 3):
1832 0 : amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
1833 0 : break;
1834 : default:
1835 0 : dev_err(adev->dev,
1836 : "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
1837 : adev->ip_versions[SDMA0_HWIP][0]);
1838 0 : return -EINVAL;
1839 : }
1840 : return 0;
1841 : }
1842 :
1843 0 : static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
1844 : {
1845 0 : if (adev->ip_versions[VCE_HWIP][0]) {
1846 0 : switch (adev->ip_versions[UVD_HWIP][0]) {
1847 : case IP_VERSION(7, 0, 0):
1848 : case IP_VERSION(7, 2, 0):
1849 : /* UVD is not supported on vega20 SR-IOV */
1850 0 : if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
1851 0 : amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
1852 : break;
1853 : default:
1854 0 : dev_err(adev->dev,
1855 : "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
1856 : adev->ip_versions[UVD_HWIP][0]);
1857 0 : return -EINVAL;
1858 : }
1859 0 : switch (adev->ip_versions[VCE_HWIP][0]) {
1860 : case IP_VERSION(4, 0, 0):
1861 : case IP_VERSION(4, 1, 0):
1862 : /* VCE is not supported on vega20 SR-IOV */
1863 0 : if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
1864 0 : amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
1865 : break;
1866 : default:
1867 0 : dev_err(adev->dev,
1868 : "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
1869 : adev->ip_versions[VCE_HWIP][0]);
1870 0 : return -EINVAL;
1871 : }
1872 : } else {
1873 0 : switch (adev->ip_versions[UVD_HWIP][0]) {
1874 : case IP_VERSION(1, 0, 0):
1875 : case IP_VERSION(1, 0, 1):
1876 0 : amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
1877 0 : break;
1878 : case IP_VERSION(2, 0, 0):
1879 : case IP_VERSION(2, 0, 2):
1880 : case IP_VERSION(2, 2, 0):
1881 0 : amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
1882 0 : if (!amdgpu_sriov_vf(adev))
1883 0 : amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
1884 : break;
1885 : case IP_VERSION(2, 0, 3):
1886 : break;
1887 : case IP_VERSION(2, 5, 0):
1888 0 : amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
1889 0 : amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
1890 0 : break;
1891 : case IP_VERSION(2, 6, 0):
1892 0 : amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
1893 0 : amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
1894 0 : break;
1895 : case IP_VERSION(3, 0, 0):
1896 : case IP_VERSION(3, 0, 16):
1897 : case IP_VERSION(3, 1, 1):
1898 : case IP_VERSION(3, 1, 2):
1899 : case IP_VERSION(3, 0, 2):
1900 : case IP_VERSION(3, 0, 192):
1901 0 : amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
1902 0 : if (!amdgpu_sriov_vf(adev))
1903 0 : amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
1904 : break;
1905 : case IP_VERSION(3, 0, 33):
1906 0 : amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
1907 0 : break;
1908 : case IP_VERSION(4, 0, 0):
1909 : case IP_VERSION(4, 0, 2):
1910 : case IP_VERSION(4, 0, 4):
1911 0 : amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
1912 0 : if (!amdgpu_sriov_vf(adev))
1913 0 : amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
1914 : break;
1915 : default:
1916 0 : dev_err(adev->dev,
1917 : "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
1918 : adev->ip_versions[UVD_HWIP][0]);
1919 0 : return -EINVAL;
1920 : }
1921 : }
1922 : return 0;
1923 : }
1924 :
1925 0 : static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
1926 : {
1927 0 : switch (adev->ip_versions[GC_HWIP][0]) {
1928 : case IP_VERSION(10, 1, 10):
1929 : case IP_VERSION(10, 1, 1):
1930 : case IP_VERSION(10, 1, 2):
1931 : case IP_VERSION(10, 1, 3):
1932 : case IP_VERSION(10, 1, 4):
1933 : case IP_VERSION(10, 3, 0):
1934 : case IP_VERSION(10, 3, 1):
1935 : case IP_VERSION(10, 3, 2):
1936 : case IP_VERSION(10, 3, 3):
1937 : case IP_VERSION(10, 3, 4):
1938 : case IP_VERSION(10, 3, 5):
1939 : case IP_VERSION(10, 3, 6):
1940 0 : if (amdgpu_mes) {
1941 0 : amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
1942 0 : adev->enable_mes = true;
1943 0 : if (amdgpu_mes_kiq)
1944 0 : adev->enable_mes_kiq = true;
1945 : }
1946 : break;
1947 : case IP_VERSION(11, 0, 0):
1948 : case IP_VERSION(11, 0, 1):
1949 : case IP_VERSION(11, 0, 2):
1950 : case IP_VERSION(11, 0, 3):
1951 0 : amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
1952 0 : adev->enable_mes = true;
1953 0 : adev->enable_mes_kiq = true;
1954 0 : break;
1955 : default:
1956 : break;
1957 : }
1958 0 : return 0;
1959 : }
1960 :
1961 0 : int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
1962 : {
1963 : int r;
1964 :
1965 0 : switch (adev->asic_type) {
1966 : case CHIP_VEGA10:
1967 0 : vega10_reg_base_init(adev);
1968 0 : adev->sdma.num_instances = 2;
1969 0 : adev->gmc.num_umc = 4;
1970 0 : adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
1971 0 : adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
1972 0 : adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
1973 0 : adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
1974 0 : adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
1975 0 : adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
1976 0 : adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
1977 0 : adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
1978 0 : adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
1979 0 : adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
1980 0 : adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
1981 0 : adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
1982 0 : adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
1983 0 : adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
1984 0 : adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
1985 0 : adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
1986 0 : adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
1987 0 : break;
1988 : case CHIP_VEGA12:
1989 0 : vega10_reg_base_init(adev);
1990 0 : adev->sdma.num_instances = 2;
1991 0 : adev->gmc.num_umc = 4;
1992 0 : adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
1993 0 : adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
1994 0 : adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
1995 0 : adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
1996 0 : adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
1997 0 : adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
1998 0 : adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
1999 0 : adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2000 0 : adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2001 0 : adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2002 0 : adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2003 0 : adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2004 0 : adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2005 0 : adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2006 0 : adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2007 0 : adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2008 0 : adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2009 0 : break;
2010 : case CHIP_RAVEN:
2011 0 : vega10_reg_base_init(adev);
2012 0 : adev->sdma.num_instances = 1;
2013 0 : adev->vcn.num_vcn_inst = 1;
2014 0 : adev->gmc.num_umc = 2;
2015 0 : if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2016 0 : adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2017 0 : adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2018 0 : adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2019 0 : adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2020 0 : adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2021 0 : adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2022 0 : adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2023 0 : adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2024 0 : adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2025 0 : adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2026 0 : adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2027 0 : adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2028 0 : adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2029 0 : adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2030 0 : adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2031 : } else {
2032 0 : adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2033 0 : adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2034 0 : adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2035 0 : adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2036 0 : adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2037 0 : adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2038 0 : adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2039 0 : adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2040 0 : adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2041 0 : adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2042 0 : adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2043 0 : adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2044 0 : adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2045 0 : adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2046 0 : adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2047 : }
2048 : break;
2049 : case CHIP_VEGA20:
2050 0 : vega20_reg_base_init(adev);
2051 0 : adev->sdma.num_instances = 2;
2052 0 : adev->gmc.num_umc = 8;
2053 0 : adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2054 0 : adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2055 0 : adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2056 0 : adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2057 0 : adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2058 0 : adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2059 0 : adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2060 0 : adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2061 0 : adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2062 0 : adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2063 0 : adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2064 0 : adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2065 0 : adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2066 0 : adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2067 0 : adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2068 0 : adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2069 0 : adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2070 0 : adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2071 0 : break;
2072 : case CHIP_ARCTURUS:
2073 0 : arct_reg_base_init(adev);
2074 0 : adev->sdma.num_instances = 8;
2075 0 : adev->vcn.num_vcn_inst = 2;
2076 0 : adev->gmc.num_umc = 8;
2077 0 : adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2078 0 : adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2079 0 : adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2080 0 : adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2081 0 : adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2082 0 : adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2083 0 : adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2084 0 : adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2085 0 : adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2086 0 : adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2087 0 : adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2088 0 : adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2089 0 : adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2090 0 : adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2091 0 : adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2092 0 : adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2093 0 : adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2094 0 : adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2095 0 : adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2096 0 : adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2097 0 : adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2098 0 : adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2099 0 : break;
2100 : case CHIP_ALDEBARAN:
2101 0 : aldebaran_reg_base_init(adev);
2102 0 : adev->sdma.num_instances = 5;
2103 0 : adev->vcn.num_vcn_inst = 2;
2104 0 : adev->gmc.num_umc = 4;
2105 0 : adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2106 0 : adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2107 0 : adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2108 0 : adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2109 0 : adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2110 0 : adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2111 0 : adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2112 0 : adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2113 0 : adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2114 0 : adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2115 0 : adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2116 0 : adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2117 0 : adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2118 0 : adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2119 0 : adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2120 0 : adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2121 0 : adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2122 0 : adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2123 0 : adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2124 0 : adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2125 0 : break;
2126 : default:
2127 0 : r = amdgpu_discovery_reg_base_init(adev);
2128 0 : if (r)
2129 : return -EINVAL;
2130 :
2131 0 : amdgpu_discovery_harvest_ip(adev);
2132 0 : amdgpu_discovery_get_gfx_info(adev);
2133 0 : amdgpu_discovery_get_mall_info(adev);
2134 0 : amdgpu_discovery_get_vcn_info(adev);
2135 0 : break;
2136 : }
2137 :
2138 0 : switch (adev->ip_versions[GC_HWIP][0]) {
2139 : case IP_VERSION(9, 0, 1):
2140 : case IP_VERSION(9, 2, 1):
2141 : case IP_VERSION(9, 4, 0):
2142 : case IP_VERSION(9, 4, 1):
2143 : case IP_VERSION(9, 4, 2):
2144 0 : adev->family = AMDGPU_FAMILY_AI;
2145 0 : break;
2146 : case IP_VERSION(9, 1, 0):
2147 : case IP_VERSION(9, 2, 2):
2148 : case IP_VERSION(9, 3, 0):
2149 0 : adev->family = AMDGPU_FAMILY_RV;
2150 0 : break;
2151 : case IP_VERSION(10, 1, 10):
2152 : case IP_VERSION(10, 1, 1):
2153 : case IP_VERSION(10, 1, 2):
2154 : case IP_VERSION(10, 1, 3):
2155 : case IP_VERSION(10, 1, 4):
2156 : case IP_VERSION(10, 3, 0):
2157 : case IP_VERSION(10, 3, 2):
2158 : case IP_VERSION(10, 3, 4):
2159 : case IP_VERSION(10, 3, 5):
2160 0 : adev->family = AMDGPU_FAMILY_NV;
2161 0 : break;
2162 : case IP_VERSION(10, 3, 1):
2163 0 : adev->family = AMDGPU_FAMILY_VGH;
2164 0 : break;
2165 : case IP_VERSION(10, 3, 3):
2166 0 : adev->family = AMDGPU_FAMILY_YC;
2167 0 : break;
2168 : case IP_VERSION(10, 3, 6):
2169 0 : adev->family = AMDGPU_FAMILY_GC_10_3_6;
2170 0 : break;
2171 : case IP_VERSION(10, 3, 7):
2172 0 : adev->family = AMDGPU_FAMILY_GC_10_3_7;
2173 0 : break;
2174 : case IP_VERSION(11, 0, 0):
2175 : case IP_VERSION(11, 0, 2):
2176 : case IP_VERSION(11, 0, 3):
2177 0 : adev->family = AMDGPU_FAMILY_GC_11_0_0;
2178 0 : break;
2179 : case IP_VERSION(11, 0, 1):
2180 0 : adev->family = AMDGPU_FAMILY_GC_11_0_1;
2181 0 : break;
2182 : default:
2183 : return -EINVAL;
2184 : }
2185 :
2186 0 : switch (adev->ip_versions[GC_HWIP][0]) {
2187 : case IP_VERSION(9, 1, 0):
2188 : case IP_VERSION(9, 2, 2):
2189 : case IP_VERSION(9, 3, 0):
2190 : case IP_VERSION(10, 1, 3):
2191 : case IP_VERSION(10, 1, 4):
2192 : case IP_VERSION(10, 3, 1):
2193 : case IP_VERSION(10, 3, 3):
2194 : case IP_VERSION(10, 3, 6):
2195 : case IP_VERSION(10, 3, 7):
2196 : case IP_VERSION(11, 0, 1):
2197 0 : adev->flags |= AMD_IS_APU;
2198 0 : break;
2199 : default:
2200 : break;
2201 : }
2202 :
2203 0 : if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0))
2204 0 : adev->gmc.xgmi.supported = true;
2205 :
2206 : /* set NBIO version */
2207 0 : switch (adev->ip_versions[NBIO_HWIP][0]) {
2208 : case IP_VERSION(6, 1, 0):
2209 : case IP_VERSION(6, 2, 0):
2210 0 : adev->nbio.funcs = &nbio_v6_1_funcs;
2211 0 : adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2212 0 : break;
2213 : case IP_VERSION(7, 0, 0):
2214 : case IP_VERSION(7, 0, 1):
2215 : case IP_VERSION(2, 5, 0):
2216 0 : adev->nbio.funcs = &nbio_v7_0_funcs;
2217 0 : adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2218 0 : break;
2219 : case IP_VERSION(7, 4, 0):
2220 : case IP_VERSION(7, 4, 1):
2221 : case IP_VERSION(7, 4, 4):
2222 0 : adev->nbio.funcs = &nbio_v7_4_funcs;
2223 0 : adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2224 0 : break;
2225 : case IP_VERSION(7, 2, 0):
2226 : case IP_VERSION(7, 2, 1):
2227 : case IP_VERSION(7, 3, 0):
2228 : case IP_VERSION(7, 5, 0):
2229 : case IP_VERSION(7, 5, 1):
2230 0 : adev->nbio.funcs = &nbio_v7_2_funcs;
2231 0 : adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2232 0 : break;
2233 : case IP_VERSION(2, 1, 1):
2234 : case IP_VERSION(2, 3, 0):
2235 : case IP_VERSION(2, 3, 1):
2236 : case IP_VERSION(2, 3, 2):
2237 : case IP_VERSION(3, 3, 0):
2238 : case IP_VERSION(3, 3, 1):
2239 : case IP_VERSION(3, 3, 2):
2240 : case IP_VERSION(3, 3, 3):
2241 0 : adev->nbio.funcs = &nbio_v2_3_funcs;
2242 0 : adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2243 0 : break;
2244 : case IP_VERSION(4, 3, 0):
2245 : case IP_VERSION(4, 3, 1):
2246 0 : if (amdgpu_sriov_vf(adev))
2247 0 : adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2248 : else
2249 0 : adev->nbio.funcs = &nbio_v4_3_funcs;
2250 0 : adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2251 0 : break;
2252 : case IP_VERSION(7, 7, 0):
2253 0 : adev->nbio.funcs = &nbio_v7_7_funcs;
2254 0 : adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2255 0 : break;
2256 : default:
2257 : break;
2258 : }
2259 :
2260 0 : switch (adev->ip_versions[HDP_HWIP][0]) {
2261 : case IP_VERSION(4, 0, 0):
2262 : case IP_VERSION(4, 0, 1):
2263 : case IP_VERSION(4, 1, 0):
2264 : case IP_VERSION(4, 1, 1):
2265 : case IP_VERSION(4, 1, 2):
2266 : case IP_VERSION(4, 2, 0):
2267 : case IP_VERSION(4, 2, 1):
2268 : case IP_VERSION(4, 4, 0):
2269 0 : adev->hdp.funcs = &hdp_v4_0_funcs;
2270 0 : break;
2271 : case IP_VERSION(5, 0, 0):
2272 : case IP_VERSION(5, 0, 1):
2273 : case IP_VERSION(5, 0, 2):
2274 : case IP_VERSION(5, 0, 3):
2275 : case IP_VERSION(5, 0, 4):
2276 : case IP_VERSION(5, 2, 0):
2277 0 : adev->hdp.funcs = &hdp_v5_0_funcs;
2278 0 : break;
2279 : case IP_VERSION(5, 2, 1):
2280 0 : adev->hdp.funcs = &hdp_v5_2_funcs;
2281 0 : break;
2282 : case IP_VERSION(6, 0, 0):
2283 : case IP_VERSION(6, 0, 1):
2284 0 : adev->hdp.funcs = &hdp_v6_0_funcs;
2285 0 : break;
2286 : default:
2287 : break;
2288 : }
2289 :
2290 0 : switch (adev->ip_versions[DF_HWIP][0]) {
2291 : case IP_VERSION(3, 6, 0):
2292 : case IP_VERSION(3, 6, 1):
2293 : case IP_VERSION(3, 6, 2):
2294 0 : adev->df.funcs = &df_v3_6_funcs;
2295 0 : break;
2296 : case IP_VERSION(2, 1, 0):
2297 : case IP_VERSION(2, 1, 1):
2298 : case IP_VERSION(2, 5, 0):
2299 : case IP_VERSION(3, 5, 1):
2300 : case IP_VERSION(3, 5, 2):
2301 0 : adev->df.funcs = &df_v1_7_funcs;
2302 0 : break;
2303 : default:
2304 : break;
2305 : }
2306 :
2307 0 : switch (adev->ip_versions[SMUIO_HWIP][0]) {
2308 : case IP_VERSION(9, 0, 0):
2309 : case IP_VERSION(9, 0, 1):
2310 : case IP_VERSION(10, 0, 0):
2311 : case IP_VERSION(10, 0, 1):
2312 : case IP_VERSION(10, 0, 2):
2313 0 : adev->smuio.funcs = &smuio_v9_0_funcs;
2314 0 : break;
2315 : case IP_VERSION(11, 0, 0):
2316 : case IP_VERSION(11, 0, 2):
2317 : case IP_VERSION(11, 0, 3):
2318 : case IP_VERSION(11, 0, 4):
2319 : case IP_VERSION(11, 0, 7):
2320 : case IP_VERSION(11, 0, 8):
2321 0 : adev->smuio.funcs = &smuio_v11_0_funcs;
2322 0 : break;
2323 : case IP_VERSION(11, 0, 6):
2324 : case IP_VERSION(11, 0, 10):
2325 : case IP_VERSION(11, 0, 11):
2326 : case IP_VERSION(11, 5, 0):
2327 : case IP_VERSION(13, 0, 1):
2328 : case IP_VERSION(13, 0, 9):
2329 : case IP_VERSION(13, 0, 10):
2330 0 : adev->smuio.funcs = &smuio_v11_0_6_funcs;
2331 0 : break;
2332 : case IP_VERSION(13, 0, 2):
2333 0 : adev->smuio.funcs = &smuio_v13_0_funcs;
2334 0 : break;
2335 : case IP_VERSION(13, 0, 6):
2336 : case IP_VERSION(13, 0, 8):
2337 0 : adev->smuio.funcs = &smuio_v13_0_6_funcs;
2338 0 : break;
2339 : default:
2340 : break;
2341 : }
2342 :
2343 0 : switch (adev->ip_versions[LSDMA_HWIP][0]) {
2344 : case IP_VERSION(6, 0, 0):
2345 : case IP_VERSION(6, 0, 1):
2346 : case IP_VERSION(6, 0, 2):
2347 : case IP_VERSION(6, 0, 3):
2348 0 : adev->lsdma.funcs = &lsdma_v6_0_funcs;
2349 0 : break;
2350 : default:
2351 : break;
2352 : }
2353 :
2354 0 : r = amdgpu_discovery_set_common_ip_blocks(adev);
2355 0 : if (r)
2356 : return r;
2357 :
2358 0 : r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2359 0 : if (r)
2360 : return r;
2361 :
2362 : /* For SR-IOV, PSP needs to be initialized before IH */
2363 0 : if (amdgpu_sriov_vf(adev)) {
2364 0 : r = amdgpu_discovery_set_psp_ip_blocks(adev);
2365 0 : if (r)
2366 : return r;
2367 0 : r = amdgpu_discovery_set_ih_ip_blocks(adev);
2368 0 : if (r)
2369 : return r;
2370 : } else {
2371 0 : r = amdgpu_discovery_set_ih_ip_blocks(adev);
2372 0 : if (r)
2373 : return r;
2374 :
2375 0 : if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2376 0 : r = amdgpu_discovery_set_psp_ip_blocks(adev);
2377 0 : if (r)
2378 : return r;
2379 : }
2380 : }
2381 :
2382 0 : if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2383 0 : r = amdgpu_discovery_set_smu_ip_blocks(adev);
2384 0 : if (r)
2385 : return r;
2386 : }
2387 :
2388 0 : r = amdgpu_discovery_set_display_ip_blocks(adev);
2389 0 : if (r)
2390 : return r;
2391 :
2392 0 : r = amdgpu_discovery_set_gc_ip_blocks(adev);
2393 0 : if (r)
2394 : return r;
2395 :
2396 0 : r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2397 0 : if (r)
2398 : return r;
2399 :
2400 0 : if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2401 0 : !amdgpu_sriov_vf(adev)) ||
2402 0 : (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
2403 0 : r = amdgpu_discovery_set_smu_ip_blocks(adev);
2404 0 : if (r)
2405 : return r;
2406 : }
2407 :
2408 0 : r = amdgpu_discovery_set_mm_ip_blocks(adev);
2409 0 : if (r)
2410 : return r;
2411 :
2412 0 : r = amdgpu_discovery_set_mes_ip_blocks(adev);
2413 0 : if (r)
2414 : return r;
2415 :
2416 0 : return 0;
2417 : }
2418 :
|