Line data Source code
1 : /*
2 : * Copyright 2016 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : */
23 : #include "amdgpu.h"
24 : #include "amdgpu_ras.h"
25 : #include "mmhub_v1_0.h"
26 :
27 : #include "mmhub/mmhub_1_0_offset.h"
28 : #include "mmhub/mmhub_1_0_sh_mask.h"
29 : #include "mmhub/mmhub_1_0_default.h"
30 : #include "vega10_enum.h"
31 : #include "soc15.h"
32 : #include "soc15_common.h"
33 :
34 : #define mmDAGB0_CNTL_MISC2_RV 0x008f
35 : #define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
36 :
37 0 : static u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
38 : {
39 0 : u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
40 0 : u64 top = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP);
41 :
42 0 : base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
43 0 : base <<= 24;
44 :
45 0 : top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
46 0 : top <<= 24;
47 :
48 0 : adev->gmc.fb_start = base;
49 0 : adev->gmc.fb_end = top;
50 :
51 0 : return base;
52 : }
53 :
54 0 : static void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
55 : uint64_t page_table_base)
56 : {
57 0 : struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
58 :
59 0 : WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
60 : hub->ctx_addr_distance * vmid,
61 : lower_32_bits(page_table_base));
62 :
63 0 : WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
64 : hub->ctx_addr_distance * vmid,
65 : upper_32_bits(page_table_base));
66 0 : }
67 :
68 0 : static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
69 : {
70 0 : uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
71 :
72 0 : mmhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
73 :
74 0 : WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
75 : (u32)(adev->gmc.gart_start >> 12));
76 0 : WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
77 : (u32)(adev->gmc.gart_start >> 44));
78 :
79 0 : WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
80 : (u32)(adev->gmc.gart_end >> 12));
81 0 : WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
82 : (u32)(adev->gmc.gart_end >> 44));
83 0 : }
84 :
85 0 : static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
86 : {
87 : uint64_t value;
88 : uint32_t tmp;
89 :
90 : /* Program the AGP BAR */
91 0 : WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BASE, 0);
92 0 : WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
93 0 : WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
94 :
95 : /* Program the system aperture low logical page number. */
96 0 : WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
97 : min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
98 :
99 0 : if (adev->apu_flags & AMD_APU_IS_RAVEN2)
100 : /*
101 : * Raven2 has a HW issue that it is unable to use the vram which
102 : * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
103 : * workaround that increase system aperture high address (add 1)
104 : * to get rid of the VM fault and hardware hang.
105 : */
106 0 : WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
107 : max((adev->gmc.fb_end >> 18) + 0x1,
108 : adev->gmc.agp_end >> 18));
109 : else
110 0 : WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
111 : max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
112 :
113 0 : if (amdgpu_sriov_vf(adev))
114 : return;
115 :
116 : /* Set default page address. */
117 0 : value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
118 0 : WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
119 : (u32)(value >> 12));
120 0 : WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
121 : (u32)(value >> 44));
122 :
123 : /* Program "protection fault". */
124 0 : WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
125 : (u32)(adev->dummy_page_addr >> 12));
126 0 : WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
127 : (u32)((u64)adev->dummy_page_addr >> 44));
128 :
129 0 : tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2);
130 0 : tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
131 : ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
132 0 : WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2, tmp);
133 : }
134 :
135 0 : static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
136 : {
137 : uint32_t tmp;
138 :
139 : /* Setup TLB control */
140 0 : tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
141 :
142 0 : tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
143 0 : tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
144 0 : tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
145 : ENABLE_ADVANCED_DRIVER_MODEL, 1);
146 0 : tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
147 : SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
148 0 : tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
149 : MTYPE, MTYPE_UC);/* XXX for emulation. */
150 0 : tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
151 :
152 0 : WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
153 0 : }
154 :
155 0 : static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
156 : {
157 : uint32_t tmp;
158 :
159 0 : if (amdgpu_sriov_vf(adev))
160 : return;
161 :
162 : /* Setup L2 cache */
163 0 : tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
164 0 : tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
165 0 : tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
166 : /* XXX for emulation, Refer to closed source code.*/
167 0 : tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
168 : 0);
169 0 : tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
170 0 : tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
171 0 : tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
172 0 : WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
173 :
174 0 : tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2);
175 0 : tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
176 0 : tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
177 0 : WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
178 :
179 0 : tmp = mmVM_L2_CNTL3_DEFAULT;
180 0 : if (adev->gmc.translate_further) {
181 : tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
182 : tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
183 : L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
184 : } else {
185 0 : tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
186 0 : tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
187 : L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
188 : }
189 0 : WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
190 :
191 0 : tmp = mmVM_L2_CNTL4_DEFAULT;
192 0 : tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
193 0 : tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
194 0 : WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL4, tmp);
195 : }
196 :
197 0 : static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
198 : {
199 : uint32_t tmp;
200 :
201 0 : tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL);
202 0 : tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
203 0 : tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
204 0 : tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
205 : RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
206 0 : WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp);
207 0 : }
208 :
209 0 : static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
210 : {
211 0 : if (amdgpu_sriov_vf(adev))
212 : return;
213 :
214 0 : WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
215 : 0XFFFFFFFF);
216 0 : WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
217 : 0x0000000F);
218 :
219 0 : WREG32_SOC15(MMHUB, 0,
220 : mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0);
221 0 : WREG32_SOC15(MMHUB, 0,
222 : mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0);
223 :
224 0 : WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
225 : 0);
226 0 : WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
227 : 0);
228 : }
229 :
230 0 : static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
231 : {
232 0 : struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
233 : unsigned num_level, block_size;
234 : uint32_t tmp;
235 : int i;
236 :
237 0 : num_level = adev->vm_manager.num_level;
238 0 : block_size = adev->vm_manager.block_size;
239 0 : if (adev->gmc.translate_further)
240 0 : num_level -= 1;
241 : else
242 0 : block_size -= 9;
243 :
244 0 : for (i = 0; i <= 14; i++) {
245 0 : tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i);
246 0 : tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
247 0 : tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
248 : num_level);
249 0 : tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
250 : RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
251 0 : tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
252 : DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
253 : 1);
254 0 : tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
255 : PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
256 0 : tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
257 : VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
258 0 : tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
259 : READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
260 0 : tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
261 : WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
262 0 : tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
263 : EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
264 0 : tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
265 : PAGE_TABLE_BLOCK_SIZE,
266 : block_size);
267 : /* Send no-retry XNACK on fault to suppress VM fault storm. */
268 0 : tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
269 : RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
270 : !adev->gmc.noretry);
271 0 : WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL,
272 : i * hub->ctx_distance, tmp);
273 0 : WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
274 : i * hub->ctx_addr_distance, 0);
275 0 : WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
276 : i * hub->ctx_addr_distance, 0);
277 0 : WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
278 : i * hub->ctx_addr_distance,
279 : lower_32_bits(adev->vm_manager.max_pfn - 1));
280 0 : WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
281 : i * hub->ctx_addr_distance,
282 : upper_32_bits(adev->vm_manager.max_pfn - 1));
283 : }
284 0 : }
285 :
286 0 : static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
287 : {
288 0 : struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
289 : unsigned i;
290 :
291 0 : for (i = 0; i < 18; ++i) {
292 0 : WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
293 : i * hub->eng_addr_distance, 0xffffffff);
294 0 : WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
295 : i * hub->eng_addr_distance, 0x1f);
296 : }
297 0 : }
298 :
299 0 : static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
300 : bool enable)
301 : {
302 0 : if (amdgpu_sriov_vf(adev))
303 : return;
304 :
305 0 : if (adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
306 0 : amdgpu_dpm_set_powergating_by_smu(adev,
307 : AMD_IP_BLOCK_TYPE_GMC,
308 : enable);
309 : }
310 :
311 0 : static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
312 : {
313 0 : if (amdgpu_sriov_vf(adev)) {
314 : /*
315 : * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
316 : * VF copy registers so vbios post doesn't program them, for
317 : * SRIOV driver need to program them
318 : */
319 0 : WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE,
320 : adev->gmc.vram_start >> 24);
321 0 : WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP,
322 : adev->gmc.vram_end >> 24);
323 : }
324 :
325 : /* GART Enable. */
326 0 : mmhub_v1_0_init_gart_aperture_regs(adev);
327 0 : mmhub_v1_0_init_system_aperture_regs(adev);
328 0 : mmhub_v1_0_init_tlb_regs(adev);
329 0 : mmhub_v1_0_init_cache_regs(adev);
330 :
331 0 : mmhub_v1_0_enable_system_domain(adev);
332 0 : mmhub_v1_0_disable_identity_aperture(adev);
333 0 : mmhub_v1_0_setup_vmid_config(adev);
334 0 : mmhub_v1_0_program_invalidation(adev);
335 :
336 0 : return 0;
337 : }
338 :
339 0 : static void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
340 : {
341 0 : struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
342 : u32 tmp;
343 : u32 i;
344 :
345 : /* Disable all tables */
346 0 : for (i = 0; i < AMDGPU_NUM_VMID; i++)
347 0 : WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL,
348 : i * hub->ctx_distance, 0);
349 :
350 : /* Setup TLB control */
351 0 : tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
352 0 : tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
353 0 : tmp = REG_SET_FIELD(tmp,
354 : MC_VM_MX_L1_TLB_CNTL,
355 : ENABLE_ADVANCED_DRIVER_MODEL,
356 : 0);
357 0 : WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
358 :
359 0 : if (!amdgpu_sriov_vf(adev)) {
360 : /* Setup L2 cache */
361 0 : tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
362 0 : tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
363 0 : WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
364 0 : WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, 0);
365 : }
366 0 : }
367 :
368 : /**
369 : * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
370 : *
371 : * @adev: amdgpu_device pointer
372 : * @value: true redirects VM faults to the default page
373 : */
374 0 : static void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
375 : {
376 : u32 tmp;
377 :
378 0 : if (amdgpu_sriov_vf(adev))
379 : return;
380 :
381 0 : tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
382 0 : tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
383 : RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
384 0 : tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
385 : PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
386 0 : tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
387 : PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
388 0 : tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
389 : PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
390 0 : tmp = REG_SET_FIELD(tmp,
391 : VM_L2_PROTECTION_FAULT_CNTL,
392 : TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
393 : value);
394 0 : tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
395 : NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
396 0 : tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
397 : DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
398 0 : tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
399 : VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
400 0 : tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
401 : READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
402 0 : tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
403 : WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
404 0 : tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
405 : EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
406 0 : if (!value) {
407 0 : tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
408 : CRASH_ON_NO_RETRY_FAULT, 1);
409 0 : tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
410 : CRASH_ON_RETRY_FAULT, 1);
411 : }
412 :
413 0 : WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
414 : }
415 :
416 0 : static void mmhub_v1_0_init(struct amdgpu_device *adev)
417 : {
418 0 : struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
419 :
420 0 : hub->ctx0_ptb_addr_lo32 =
421 0 : SOC15_REG_OFFSET(MMHUB, 0,
422 : mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
423 0 : hub->ctx0_ptb_addr_hi32 =
424 0 : SOC15_REG_OFFSET(MMHUB, 0,
425 : mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
426 0 : hub->vm_inv_eng0_sem =
427 0 : SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_SEM);
428 0 : hub->vm_inv_eng0_req =
429 0 : SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
430 0 : hub->vm_inv_eng0_ack =
431 0 : SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ACK);
432 0 : hub->vm_context0_cntl =
433 0 : SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL);
434 0 : hub->vm_l2_pro_fault_status =
435 0 : SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_STATUS);
436 0 : hub->vm_l2_pro_fault_cntl =
437 0 : SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
438 :
439 0 : hub->ctx_distance = mmVM_CONTEXT1_CNTL - mmVM_CONTEXT0_CNTL;
440 0 : hub->ctx_addr_distance = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
441 : mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
442 0 : hub->eng_distance = mmVM_INVALIDATE_ENG1_REQ - mmVM_INVALIDATE_ENG0_REQ;
443 0 : hub->eng_addr_distance = mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
444 : mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
445 0 : }
446 :
447 0 : static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
448 : bool enable)
449 : {
450 0 : uint32_t def, data, def1, data1, def2 = 0, data2 = 0;
451 :
452 0 : def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
453 :
454 0 : if (adev->asic_type != CHIP_RAVEN) {
455 0 : def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
456 0 : def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2);
457 : } else
458 0 : def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV);
459 :
460 0 : if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
461 0 : data |= ATC_L2_MISC_CG__ENABLE_MASK;
462 :
463 0 : data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
464 : DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
465 : DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
466 : DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
467 : DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
468 : DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
469 :
470 0 : if (adev->asic_type != CHIP_RAVEN)
471 0 : data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
472 : DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
473 : DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
474 : DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
475 : DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
476 : DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
477 : } else {
478 0 : data &= ~ATC_L2_MISC_CG__ENABLE_MASK;
479 :
480 0 : data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
481 : DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
482 : DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
483 : DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
484 : DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
485 : DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
486 :
487 0 : if (adev->asic_type != CHIP_RAVEN)
488 0 : data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
489 : DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
490 : DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
491 : DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
492 : DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
493 : DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
494 : }
495 :
496 0 : if (def != data)
497 0 : WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
498 :
499 0 : if (def1 != data1) {
500 0 : if (adev->asic_type != CHIP_RAVEN)
501 0 : WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
502 : else
503 0 : WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV, data1);
504 : }
505 :
506 0 : if (adev->asic_type != CHIP_RAVEN && def2 != data2)
507 0 : WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2);
508 0 : }
509 :
510 0 : static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
511 : bool enable)
512 : {
513 : uint32_t def, data;
514 :
515 0 : def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
516 :
517 0 : if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
518 0 : data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
519 : else
520 0 : data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
521 :
522 0 : if (def != data)
523 0 : WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
524 0 : }
525 :
526 0 : static int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
527 : enum amd_clockgating_state state)
528 : {
529 0 : if (amdgpu_sriov_vf(adev))
530 : return 0;
531 :
532 0 : switch (adev->asic_type) {
533 : case CHIP_VEGA10:
534 : case CHIP_VEGA12:
535 : case CHIP_VEGA20:
536 : case CHIP_RAVEN:
537 : case CHIP_RENOIR:
538 0 : mmhub_v1_0_update_medium_grain_clock_gating(adev,
539 : state == AMD_CG_STATE_GATE);
540 0 : mmhub_v1_0_update_medium_grain_light_sleep(adev,
541 : state == AMD_CG_STATE_GATE);
542 0 : break;
543 : default:
544 : break;
545 : }
546 :
547 : return 0;
548 : }
549 :
550 0 : static void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
551 : {
552 : int data, data1;
553 :
554 0 : if (amdgpu_sriov_vf(adev))
555 0 : *flags = 0;
556 :
557 0 : data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
558 :
559 0 : data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
560 :
561 : /* AMD_CG_SUPPORT_MC_MGCG */
562 0 : if ((data & ATC_L2_MISC_CG__ENABLE_MASK) &&
563 : !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
564 : DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
565 : DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
566 : DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
567 : DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
568 : DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
569 0 : *flags |= AMD_CG_SUPPORT_MC_MGCG;
570 :
571 : /* AMD_CG_SUPPORT_MC_LS */
572 0 : if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
573 0 : *flags |= AMD_CG_SUPPORT_MC_LS;
574 0 : }
575 :
576 : static const struct soc15_ras_field_entry mmhub_v1_0_ras_fields[] = {
577 : { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
578 : SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT),
579 : SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT),
580 : },
581 : { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
582 : SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT),
583 : SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT),
584 : },
585 : { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
586 : SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT),
587 : SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT),
588 : },
589 : { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
590 : SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT),
591 : SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT),
592 : },
593 : { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
594 : SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT),
595 : SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT),
596 : },
597 : { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
598 : SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT),
599 : 0, 0,
600 : },
601 : { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
602 : SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT),
603 : 0, 0,
604 : },
605 : { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
606 : SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT),
607 : 0, 0,
608 : },
609 : { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
610 : SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT),
611 : 0, 0,
612 : },
613 : { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
614 : SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT),
615 : 0, 0,
616 : },
617 : { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
618 : SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT),
619 : SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT),
620 : },
621 : { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
622 : SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT),
623 : SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT),
624 : },
625 : { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
626 : SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT),
627 : SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT),
628 : },
629 : { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
630 : SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT),
631 : 0, 0,
632 : },
633 : { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
634 : SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT),
635 : 0, 0,
636 : },
637 : { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
638 : SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT),
639 : SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT),
640 : },
641 : { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
642 : SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT),
643 : SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT),
644 : },
645 : { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
646 : SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT),
647 : SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT),
648 : },
649 : { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
650 : SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT),
651 : SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT),
652 : },
653 : { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
654 : SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT),
655 : SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT),
656 : },
657 : { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
658 : SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT),
659 : 0, 0,
660 : },
661 : { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
662 : SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT),
663 : 0, 0,
664 : },
665 : { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
666 : SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT),
667 : 0, 0,
668 : },
669 : { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
670 : SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT),
671 : 0, 0,
672 : },
673 : { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
674 : SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT),
675 : 0, 0,
676 : },
677 : { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
678 : SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT),
679 : SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT),
680 : },
681 : { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
682 : SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT),
683 : SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT),
684 : },
685 : { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
686 : SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT),
687 : SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT),
688 : },
689 : { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
690 : SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT),
691 : 0, 0,
692 : },
693 : { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
694 : SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT),
695 : 0, 0,
696 : }
697 : };
698 :
699 : static const struct soc15_reg_entry mmhub_v1_0_edc_cnt_regs[] = {
700 : { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 0, 0, 0},
701 : { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 0, 0, 0},
702 : { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 0, 0, 0},
703 : { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 0, 0, 0},
704 : };
705 :
706 0 : static int mmhub_v1_0_get_ras_error_count(struct amdgpu_device *adev,
707 : const struct soc15_reg_entry *reg,
708 : uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
709 : {
710 : uint32_t i;
711 : uint32_t sec_cnt, ded_cnt;
712 :
713 0 : for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_ras_fields); i++) {
714 0 : if (mmhub_v1_0_ras_fields[i].reg_offset != reg->reg_offset)
715 0 : continue;
716 :
717 0 : sec_cnt = (value &
718 0 : mmhub_v1_0_ras_fields[i].sec_count_mask) >>
719 0 : mmhub_v1_0_ras_fields[i].sec_count_shift;
720 0 : if (sec_cnt) {
721 0 : dev_info(adev->dev,
722 : "MMHUB SubBlock %s, SEC %d\n",
723 : mmhub_v1_0_ras_fields[i].name,
724 : sec_cnt);
725 0 : *sec_count += sec_cnt;
726 : }
727 :
728 0 : ded_cnt = (value &
729 0 : mmhub_v1_0_ras_fields[i].ded_count_mask) >>
730 0 : mmhub_v1_0_ras_fields[i].ded_count_shift;
731 0 : if (ded_cnt) {
732 0 : dev_info(adev->dev,
733 : "MMHUB SubBlock %s, DED %d\n",
734 : mmhub_v1_0_ras_fields[i].name,
735 : ded_cnt);
736 0 : *ded_count += ded_cnt;
737 : }
738 : }
739 :
740 0 : return 0;
741 : }
742 :
743 0 : static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
744 : void *ras_error_status)
745 : {
746 0 : struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
747 0 : uint32_t sec_count = 0, ded_count = 0;
748 : uint32_t i;
749 : uint32_t reg_value;
750 :
751 0 : err_data->ue_count = 0;
752 0 : err_data->ce_count = 0;
753 :
754 0 : for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++) {
755 0 : reg_value =
756 0 : RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
757 0 : if (reg_value)
758 0 : mmhub_v1_0_get_ras_error_count(adev,
759 : &mmhub_v1_0_edc_cnt_regs[i],
760 : reg_value, &sec_count, &ded_count);
761 : }
762 :
763 0 : err_data->ce_count += sec_count;
764 0 : err_data->ue_count += ded_count;
765 0 : }
766 :
767 0 : static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev)
768 : {
769 : uint32_t i;
770 :
771 : /* read back edc counter registers to reset the counters to 0 */
772 0 : if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
773 0 : for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++)
774 0 : RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
775 : }
776 0 : }
777 :
778 : struct amdgpu_ras_block_hw_ops mmhub_v1_0_ras_hw_ops = {
779 : .query_ras_error_count = mmhub_v1_0_query_ras_error_count,
780 : .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
781 : };
782 :
783 : struct amdgpu_mmhub_ras mmhub_v1_0_ras = {
784 : .ras_block = {
785 : .hw_ops = &mmhub_v1_0_ras_hw_ops,
786 : },
787 : };
788 :
789 : const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
790 : .get_fb_location = mmhub_v1_0_get_fb_location,
791 : .init = mmhub_v1_0_init,
792 : .gart_enable = mmhub_v1_0_gart_enable,
793 : .set_fault_enable_default = mmhub_v1_0_set_fault_enable_default,
794 : .gart_disable = mmhub_v1_0_gart_disable,
795 : .set_clockgating = mmhub_v1_0_set_clockgating,
796 : .get_clockgating = mmhub_v1_0_get_clockgating,
797 : .setup_vm_pt_regs = mmhub_v1_0_setup_vm_pt_regs,
798 : .update_power_gating = mmhub_v1_0_update_power_gating,
799 : };
|