Line data Source code
1 : /*
2 : * Copyright 2019 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : */
23 :
24 : #include <linux/pci.h>
25 :
26 : #include "amdgpu.h"
27 : #include "amdgpu_ih.h"
28 :
29 : #include "oss/osssys_5_0_0_offset.h"
30 : #include "oss/osssys_5_0_0_sh_mask.h"
31 :
32 : #include "soc15_common.h"
33 : #include "navi10_ih.h"
34 :
35 : #define MAX_REARM_RETRY 10
36 :
37 : #define mmIH_CHICKEN_Sienna_Cichlid 0x018d
38 : #define mmIH_CHICKEN_Sienna_Cichlid_BASE_IDX 0
39 :
40 : static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
41 :
42 : /**
43 : * navi10_ih_init_register_offset - Initialize register offset for ih rings
44 : *
45 : * @adev: amdgpu_device pointer
46 : *
47 : * Initialize register offset ih rings (NAVI10).
48 : */
49 0 : static void navi10_ih_init_register_offset(struct amdgpu_device *adev)
50 : {
51 : struct amdgpu_ih_regs *ih_regs;
52 :
53 0 : if (adev->irq.ih.ring_size) {
54 0 : ih_regs = &adev->irq.ih.ih_regs;
55 0 : ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE);
56 0 : ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI);
57 0 : ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
58 0 : ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
59 0 : ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
60 0 : ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR);
61 0 : ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO);
62 0 : ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI);
63 0 : ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
64 : }
65 :
66 0 : if (adev->irq.ih1.ring_size) {
67 0 : ih_regs = &adev->irq.ih1.ih_regs;
68 0 : ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1);
69 0 : ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1);
70 0 : ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
71 0 : ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
72 0 : ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
73 0 : ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1);
74 0 : ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
75 : }
76 :
77 0 : if (adev->irq.ih2.ring_size) {
78 0 : ih_regs = &adev->irq.ih2.ih_regs;
79 0 : ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2);
80 0 : ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2);
81 0 : ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
82 0 : ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
83 0 : ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
84 0 : ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2);
85 0 : ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2;
86 : }
87 0 : }
88 :
89 : /**
90 : * force_update_wptr_for_self_int - Force update the wptr for self interrupt
91 : *
92 : * @adev: amdgpu_device pointer
93 : * @threshold: threshold to trigger the wptr reporting
94 : * @timeout: timeout to trigger the wptr reporting
95 : * @enabled: Enable/disable timeout flush mechanism
96 : *
97 : * threshold input range: 0 ~ 15, default 0,
98 : * real_threshold = 2^threshold
99 : * timeout input range: 0 ~ 20, default 8,
100 : * real_timeout = (2^timeout) * 1024 / (socclk_freq)
101 : *
102 : * Force update wptr for self interrupt ( >= SIENNA_CICHLID).
103 : */
104 : static void
105 0 : force_update_wptr_for_self_int(struct amdgpu_device *adev,
106 : u32 threshold, u32 timeout, bool enabled)
107 : {
108 : u32 ih_cntl, ih_rb_cntl;
109 :
110 0 : if (adev->ip_versions[OSSSYS_HWIP][0] < IP_VERSION(5, 0, 3))
111 : return;
112 :
113 0 : ih_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_CNTL2);
114 0 : ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
115 :
116 0 : ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
117 : SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout);
118 0 : ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
119 : SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled);
120 0 : ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
121 : RB_USED_INT_THRESHOLD, threshold);
122 :
123 0 : if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
124 0 : if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl))
125 : return;
126 : } else {
127 0 : WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
128 : }
129 :
130 0 : ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
131 0 : ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
132 : RB_USED_INT_THRESHOLD, threshold);
133 0 : if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
134 0 : if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, ih_rb_cntl))
135 : return;
136 : } else {
137 0 : WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
138 : }
139 :
140 0 : WREG32_SOC15(OSSSYS, 0, mmIH_CNTL2, ih_cntl);
141 : }
142 :
143 : /**
144 : * navi10_ih_toggle_ring_interrupts - toggle the interrupt ring buffer
145 : *
146 : * @adev: amdgpu_device pointer
147 : * @ih: amdgpu_ih_ring pointet
148 : * @enable: true - enable the interrupts, false - disable the interrupts
149 : *
150 : * Toggle the interrupt ring buffer (NAVI10)
151 : */
152 0 : static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
153 : struct amdgpu_ih_ring *ih,
154 : bool enable)
155 : {
156 : struct amdgpu_ih_regs *ih_regs;
157 : uint32_t tmp;
158 :
159 0 : ih_regs = &ih->ih_regs;
160 :
161 0 : tmp = RREG32(ih_regs->ih_rb_cntl);
162 0 : tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
163 0 : tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
164 : /* enable_intr field is only valid in ring0 */
165 0 : if (ih == &adev->irq.ih)
166 0 : tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
167 :
168 0 : if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
169 0 : if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
170 : return -ETIMEDOUT;
171 : } else {
172 0 : WREG32(ih_regs->ih_rb_cntl, tmp);
173 : }
174 :
175 0 : if (enable) {
176 0 : ih->enabled = true;
177 : } else {
178 : /* set rptr, wptr to 0 */
179 0 : WREG32(ih_regs->ih_rb_rptr, 0);
180 0 : WREG32(ih_regs->ih_rb_wptr, 0);
181 0 : ih->enabled = false;
182 0 : ih->rptr = 0;
183 : }
184 :
185 : return 0;
186 : }
187 :
188 : /**
189 : * navi10_ih_toggle_interrupts - Toggle all the available interrupt ring buffers
190 : *
191 : * @adev: amdgpu_device pointer
192 : * @enable: enable or disable interrupt ring buffers
193 : *
194 : * Toggle all the available interrupt ring buffers (NAVI10).
195 : */
196 0 : static int navi10_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable)
197 : {
198 0 : struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
199 : int i;
200 : int r;
201 :
202 0 : for (i = 0; i < ARRAY_SIZE(ih); i++) {
203 0 : if (ih[i]->ring_size) {
204 0 : r = navi10_ih_toggle_ring_interrupts(adev, ih[i], enable);
205 0 : if (r)
206 : return r;
207 : }
208 : }
209 :
210 : return 0;
211 : }
212 :
213 0 : static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
214 : {
215 0 : int rb_bufsz = order_base_2(ih->ring_size / 4);
216 :
217 0 : ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
218 : MC_SPACE, ih->use_bus_addr ? 1 : 4);
219 0 : ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
220 : WPTR_OVERFLOW_CLEAR, 1);
221 0 : ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
222 : WPTR_OVERFLOW_ENABLE, 1);
223 0 : ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
224 : /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
225 : * value is written to memory
226 : */
227 0 : ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
228 : WPTR_WRITEBACK_ENABLE, 1);
229 0 : ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
230 0 : ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
231 0 : ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
232 :
233 0 : return ih_rb_cntl;
234 : }
235 :
236 : static uint32_t navi10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
237 : {
238 0 : u32 ih_doorbell_rtpr = 0;
239 :
240 0 : if (ih->use_doorbell) {
241 0 : ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
242 : IH_DOORBELL_RPTR, OFFSET,
243 : ih->doorbell_index);
244 0 : ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
245 : IH_DOORBELL_RPTR,
246 : ENABLE, 1);
247 : } else {
248 : ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
249 : IH_DOORBELL_RPTR,
250 : ENABLE, 0);
251 : }
252 : return ih_doorbell_rtpr;
253 : }
254 :
255 : /**
256 : * navi10_ih_enable_ring - enable an ih ring buffer
257 : *
258 : * @adev: amdgpu_device pointer
259 : * @ih: amdgpu_ih_ring pointer
260 : *
261 : * Enable an ih ring buffer (NAVI10)
262 : */
263 0 : static int navi10_ih_enable_ring(struct amdgpu_device *adev,
264 : struct amdgpu_ih_ring *ih)
265 : {
266 : struct amdgpu_ih_regs *ih_regs;
267 : uint32_t tmp;
268 :
269 0 : ih_regs = &ih->ih_regs;
270 :
271 : /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
272 0 : WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
273 0 : WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
274 :
275 0 : tmp = RREG32(ih_regs->ih_rb_cntl);
276 0 : tmp = navi10_ih_rb_cntl(ih, tmp);
277 0 : if (ih == &adev->irq.ih)
278 0 : tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
279 0 : if (ih == &adev->irq.ih1)
280 0 : tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
281 :
282 0 : if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
283 0 : if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
284 0 : DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
285 0 : return -ETIMEDOUT;
286 : }
287 : } else {
288 0 : WREG32(ih_regs->ih_rb_cntl, tmp);
289 : }
290 :
291 0 : if (ih == &adev->irq.ih) {
292 : /* set the ih ring 0 writeback address whether it's enabled or not */
293 0 : WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
294 0 : WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
295 : }
296 :
297 : /* set rptr, wptr to 0 */
298 0 : WREG32(ih_regs->ih_rb_wptr, 0);
299 0 : WREG32(ih_regs->ih_rb_rptr, 0);
300 :
301 0 : WREG32(ih_regs->ih_doorbell_rptr, navi10_ih_doorbell_rptr(ih));
302 :
303 0 : return 0;
304 : }
305 :
306 : /**
307 : * navi10_ih_irq_init - init and enable the interrupt ring
308 : *
309 : * @adev: amdgpu_device pointer
310 : *
311 : * Allocate a ring buffer for the interrupt controller,
312 : * enable the RLC, disable interrupts, enable the IH
313 : * ring buffer and enable it (NAVI).
314 : * Called at device load and reume.
315 : * Returns 0 for success, errors for failure.
316 : */
317 0 : static int navi10_ih_irq_init(struct amdgpu_device *adev)
318 : {
319 0 : struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
320 : u32 ih_chicken;
321 : int ret;
322 : int i;
323 :
324 : /* disable irqs */
325 0 : ret = navi10_ih_toggle_interrupts(adev, false);
326 0 : if (ret)
327 : return ret;
328 :
329 0 : adev->nbio.funcs->ih_control(adev);
330 :
331 0 : if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) {
332 0 : if (ih[0]->use_bus_addr) {
333 0 : switch (adev->ip_versions[OSSSYS_HWIP][0]) {
334 : case IP_VERSION(5, 0, 3):
335 : case IP_VERSION(5, 2, 0):
336 : case IP_VERSION(5, 2, 1):
337 0 : ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_Sienna_Cichlid);
338 0 : ih_chicken = REG_SET_FIELD(ih_chicken,
339 : IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
340 0 : WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_Sienna_Cichlid, ih_chicken);
341 : break;
342 : default:
343 0 : ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
344 0 : ih_chicken = REG_SET_FIELD(ih_chicken,
345 : IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
346 0 : WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
347 : break;
348 : }
349 : }
350 : }
351 :
352 0 : for (i = 0; i < ARRAY_SIZE(ih); i++) {
353 0 : if (ih[i]->ring_size) {
354 0 : ret = navi10_ih_enable_ring(adev, ih[i]);
355 0 : if (ret)
356 : return ret;
357 : }
358 : }
359 :
360 : /* update doorbell range for ih ring 0*/
361 0 : adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
362 0 : ih[0]->doorbell_index);
363 :
364 0 : pci_set_master(adev->pdev);
365 :
366 : /* enable interrupts */
367 0 : ret = navi10_ih_toggle_interrupts(adev, true);
368 0 : if (ret)
369 : return ret;
370 : /* enable wptr force update for self int */
371 0 : force_update_wptr_for_self_int(adev, 0, 8, true);
372 :
373 0 : if (adev->irq.ih_soft.ring_size)
374 0 : adev->irq.ih_soft.enabled = true;
375 :
376 : return 0;
377 : }
378 :
379 : /**
380 : * navi10_ih_irq_disable - disable interrupts
381 : *
382 : * @adev: amdgpu_device pointer
383 : *
384 : * Disable interrupts on the hw (NAVI10).
385 : */
386 0 : static void navi10_ih_irq_disable(struct amdgpu_device *adev)
387 : {
388 0 : force_update_wptr_for_self_int(adev, 0, 8, false);
389 0 : navi10_ih_toggle_interrupts(adev, false);
390 :
391 : /* Wait and acknowledge irq */
392 0 : mdelay(1);
393 0 : }
394 :
395 : /**
396 : * navi10_ih_get_wptr - get the IH ring buffer wptr
397 : *
398 : * @adev: amdgpu_device pointer
399 : * @ih: IH ring buffer to fetch wptr
400 : *
401 : * Get the IH ring buffer wptr from either the register
402 : * or the writeback memory buffer (NAVI10). Also check for
403 : * ring buffer overflow and deal with it.
404 : * Returns the value of the wptr.
405 : */
406 0 : static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
407 : struct amdgpu_ih_ring *ih)
408 : {
409 : u32 wptr, tmp;
410 : struct amdgpu_ih_regs *ih_regs;
411 :
412 0 : if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
413 : /* Only ring0 supports writeback. On other rings fall back
414 : * to register-based code with overflow checking below.
415 : * ih_soft ring doesn't have any backing hardware registers,
416 : * update wptr and return.
417 : */
418 0 : wptr = le32_to_cpu(*ih->wptr_cpu);
419 :
420 0 : if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
421 : goto out;
422 : }
423 :
424 0 : ih_regs = &ih->ih_regs;
425 :
426 : /* Double check that the overflow wasn't already cleared. */
427 0 : wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
428 0 : if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
429 : goto out;
430 0 : wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
431 :
432 : /* When a ring buffer overflow happen start parsing interrupt
433 : * from the last not overwritten vector (wptr + 32). Hopefully
434 : * this should allow us to catch up.
435 : */
436 0 : tmp = (wptr + 32) & ih->ptr_mask;
437 0 : dev_warn(adev->dev, "IH ring buffer overflow "
438 : "(0x%08X, 0x%08X, 0x%08X)\n",
439 : wptr, ih->rptr, tmp);
440 0 : ih->rptr = tmp;
441 :
442 0 : tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
443 0 : tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
444 0 : WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
445 : out:
446 0 : return (wptr & ih->ptr_mask);
447 : }
448 :
449 : /**
450 : * navi10_ih_irq_rearm - rearm IRQ if lost
451 : *
452 : * @adev: amdgpu_device pointer
453 : * @ih: IH ring to match
454 : *
455 : */
456 0 : static void navi10_ih_irq_rearm(struct amdgpu_device *adev,
457 : struct amdgpu_ih_ring *ih)
458 : {
459 0 : uint32_t v = 0;
460 0 : uint32_t i = 0;
461 : struct amdgpu_ih_regs *ih_regs;
462 :
463 0 : ih_regs = &ih->ih_regs;
464 :
465 : /* Rearm IRQ / re-write doorbell if doorbell write is lost */
466 0 : for (i = 0; i < MAX_REARM_RETRY; i++) {
467 0 : v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
468 0 : if ((v < ih->ring_size) && (v != ih->rptr))
469 0 : WDOORBELL32(ih->doorbell_index, ih->rptr);
470 : else
471 : break;
472 : }
473 0 : }
474 :
475 : /**
476 : * navi10_ih_set_rptr - set the IH ring buffer rptr
477 : *
478 : * @adev: amdgpu_device pointer
479 : *
480 : * @ih: IH ring buffer to set rptr
481 : * Set the IH ring buffer rptr.
482 : */
483 0 : static void navi10_ih_set_rptr(struct amdgpu_device *adev,
484 : struct amdgpu_ih_ring *ih)
485 : {
486 : struct amdgpu_ih_regs *ih_regs;
487 :
488 0 : if (ih == &adev->irq.ih_soft)
489 : return;
490 :
491 0 : if (ih->use_doorbell) {
492 : /* XXX check if swapping is necessary on BE */
493 0 : *ih->rptr_cpu = ih->rptr;
494 0 : WDOORBELL32(ih->doorbell_index, ih->rptr);
495 :
496 0 : if (amdgpu_sriov_vf(adev))
497 0 : navi10_ih_irq_rearm(adev, ih);
498 : } else {
499 0 : ih_regs = &ih->ih_regs;
500 0 : WREG32(ih_regs->ih_rb_rptr, ih->rptr);
501 : }
502 : }
503 :
504 : /**
505 : * navi10_ih_self_irq - dispatch work for ring 1 and 2
506 : *
507 : * @adev: amdgpu_device pointer
508 : * @source: irq source
509 : * @entry: IV with WPTR update
510 : *
511 : * Update the WPTR from the IV and schedule work to handle the entries.
512 : */
513 0 : static int navi10_ih_self_irq(struct amdgpu_device *adev,
514 : struct amdgpu_irq_src *source,
515 : struct amdgpu_iv_entry *entry)
516 : {
517 0 : switch (entry->ring_id) {
518 : case 1:
519 0 : schedule_work(&adev->irq.ih1_work);
520 : break;
521 : case 2:
522 0 : schedule_work(&adev->irq.ih2_work);
523 : break;
524 : default: break;
525 : }
526 0 : return 0;
527 : }
528 :
529 : static const struct amdgpu_irq_src_funcs navi10_ih_self_irq_funcs = {
530 : .process = navi10_ih_self_irq,
531 : };
532 :
533 : static void navi10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
534 : {
535 0 : adev->irq.self_irq.num_types = 0;
536 0 : adev->irq.self_irq.funcs = &navi10_ih_self_irq_funcs;
537 : }
538 :
539 0 : static int navi10_ih_early_init(void *handle)
540 : {
541 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
542 :
543 0 : navi10_ih_set_interrupt_funcs(adev);
544 0 : navi10_ih_set_self_irq_funcs(adev);
545 0 : return 0;
546 : }
547 :
548 0 : static int navi10_ih_sw_init(void *handle)
549 : {
550 : int r;
551 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
552 : bool use_bus_addr;
553 :
554 0 : r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
555 : &adev->irq.self_irq);
556 :
557 0 : if (r)
558 : return r;
559 :
560 : /* use gpu virtual address for ih ring
561 : * until ih_checken is programmed to allow
562 : * use bus address for ih ring by psp bl */
563 0 : if ((adev->flags & AMD_IS_APU) ||
564 0 : (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
565 : use_bus_addr = false;
566 : else
567 0 : use_bus_addr = true;
568 0 : r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
569 0 : if (r)
570 : return r;
571 :
572 0 : adev->irq.ih.use_doorbell = true;
573 0 : adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
574 :
575 0 : adev->irq.ih1.ring_size = 0;
576 0 : adev->irq.ih2.ring_size = 0;
577 :
578 : /* initialize ih control registers offset */
579 0 : navi10_ih_init_register_offset(adev);
580 :
581 0 : r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
582 0 : if (r)
583 : return r;
584 :
585 0 : r = amdgpu_irq_init(adev);
586 :
587 0 : return r;
588 : }
589 :
590 0 : static int navi10_ih_sw_fini(void *handle)
591 : {
592 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
593 :
594 0 : amdgpu_irq_fini_sw(adev);
595 :
596 0 : return 0;
597 : }
598 :
599 0 : static int navi10_ih_hw_init(void *handle)
600 : {
601 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
602 :
603 0 : return navi10_ih_irq_init(adev);
604 : }
605 :
606 0 : static int navi10_ih_hw_fini(void *handle)
607 : {
608 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
609 :
610 0 : navi10_ih_irq_disable(adev);
611 :
612 0 : return 0;
613 : }
614 :
615 0 : static int navi10_ih_suspend(void *handle)
616 : {
617 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
618 :
619 0 : return navi10_ih_hw_fini(adev);
620 : }
621 :
622 0 : static int navi10_ih_resume(void *handle)
623 : {
624 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
625 :
626 0 : return navi10_ih_hw_init(adev);
627 : }
628 :
629 0 : static bool navi10_ih_is_idle(void *handle)
630 : {
631 : /* todo */
632 0 : return true;
633 : }
634 :
635 0 : static int navi10_ih_wait_for_idle(void *handle)
636 : {
637 : /* todo */
638 0 : return -ETIMEDOUT;
639 : }
640 :
641 0 : static int navi10_ih_soft_reset(void *handle)
642 : {
643 : /* todo */
644 0 : return 0;
645 : }
646 :
647 0 : static void navi10_ih_update_clockgating_state(struct amdgpu_device *adev,
648 : bool enable)
649 : {
650 : uint32_t data, def, field_val;
651 :
652 0 : if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
653 0 : def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
654 0 : field_val = enable ? 0 : 1;
655 0 : data = REG_SET_FIELD(data, IH_CLK_CTRL,
656 : DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
657 0 : data = REG_SET_FIELD(data, IH_CLK_CTRL,
658 : OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
659 0 : data = REG_SET_FIELD(data, IH_CLK_CTRL,
660 : LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
661 0 : data = REG_SET_FIELD(data, IH_CLK_CTRL,
662 : DYN_CLK_SOFT_OVERRIDE, field_val);
663 0 : data = REG_SET_FIELD(data, IH_CLK_CTRL,
664 : REG_CLK_SOFT_OVERRIDE, field_val);
665 0 : if (def != data)
666 0 : WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data);
667 : }
668 :
669 0 : return;
670 : }
671 :
672 0 : static int navi10_ih_set_clockgating_state(void *handle,
673 : enum amd_clockgating_state state)
674 : {
675 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
676 :
677 0 : navi10_ih_update_clockgating_state(adev,
678 : state == AMD_CG_STATE_GATE);
679 0 : return 0;
680 : }
681 :
682 0 : static int navi10_ih_set_powergating_state(void *handle,
683 : enum amd_powergating_state state)
684 : {
685 0 : return 0;
686 : }
687 :
688 0 : static void navi10_ih_get_clockgating_state(void *handle, u64 *flags)
689 : {
690 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
691 :
692 0 : if (!RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL))
693 0 : *flags |= AMD_CG_SUPPORT_IH_CG;
694 :
695 0 : return;
696 : }
697 :
698 : static const struct amd_ip_funcs navi10_ih_ip_funcs = {
699 : .name = "navi10_ih",
700 : .early_init = navi10_ih_early_init,
701 : .late_init = NULL,
702 : .sw_init = navi10_ih_sw_init,
703 : .sw_fini = navi10_ih_sw_fini,
704 : .hw_init = navi10_ih_hw_init,
705 : .hw_fini = navi10_ih_hw_fini,
706 : .suspend = navi10_ih_suspend,
707 : .resume = navi10_ih_resume,
708 : .is_idle = navi10_ih_is_idle,
709 : .wait_for_idle = navi10_ih_wait_for_idle,
710 : .soft_reset = navi10_ih_soft_reset,
711 : .set_clockgating_state = navi10_ih_set_clockgating_state,
712 : .set_powergating_state = navi10_ih_set_powergating_state,
713 : .get_clockgating_state = navi10_ih_get_clockgating_state,
714 : };
715 :
716 : static const struct amdgpu_ih_funcs navi10_ih_funcs = {
717 : .get_wptr = navi10_ih_get_wptr,
718 : .decode_iv = amdgpu_ih_decode_iv_helper,
719 : .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
720 : .set_rptr = navi10_ih_set_rptr
721 : };
722 :
723 : static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
724 : {
725 0 : if (adev->irq.ih_funcs == NULL)
726 0 : adev->irq.ih_funcs = &navi10_ih_funcs;
727 : }
728 :
729 : const struct amdgpu_ip_block_version navi10_ih_ip_block =
730 : {
731 : .type = AMD_IP_BLOCK_TYPE_IH,
732 : .major = 5,
733 : .minor = 0,
734 : .rev = 0,
735 : .funcs = &navi10_ih_ip_funcs,
736 : };
|