LCOV - code coverage report
Current view: top level - drivers/gpu/drm/amd/amdgpu - gmc_v11_0.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 353 0.0 %
Date: 2022-12-09 01:23:36 Functions: 0 31 0.0 %

          Line data    Source code
       1             : /*
       2             :  * Copyright 2021 Advanced Micro Devices, Inc.
       3             :  *
       4             :  * Permission is hereby granted, free of charge, to any person obtaining a
       5             :  * copy of this software and associated documentation files (the "Software"),
       6             :  * to deal in the Software without restriction, including without limitation
       7             :  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
       8             :  * and/or sell copies of the Software, and to permit persons to whom the
       9             :  * Software is furnished to do so, subject to the following conditions:
      10             :  *
      11             :  * The above copyright notice and this permission notice shall be included in
      12             :  * all copies or substantial portions of the Software.
      13             :  *
      14             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      15             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      16             :  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
      17             :  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
      18             :  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
      19             :  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
      20             :  * OTHER DEALINGS IN THE SOFTWARE.
      21             :  *
      22             :  */
      23             : #include <linux/firmware.h>
      24             : #include <linux/pci.h>
      25             : 
      26             : #include <drm/drm_cache.h>
      27             : 
      28             : #include "amdgpu.h"
      29             : #include "amdgpu_atomfirmware.h"
      30             : #include "gmc_v11_0.h"
      31             : #include "umc_v8_10.h"
      32             : #include "athub/athub_3_0_0_sh_mask.h"
      33             : #include "athub/athub_3_0_0_offset.h"
      34             : #include "oss/osssys_6_0_0_offset.h"
      35             : #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
      36             : #include "navi10_enum.h"
      37             : #include "soc15.h"
      38             : #include "soc15d.h"
      39             : #include "soc15_common.h"
      40             : #include "nbio_v4_3.h"
      41             : #include "gfxhub_v3_0.h"
      42             : #include "gfxhub_v3_0_3.h"
      43             : #include "mmhub_v3_0.h"
      44             : #include "mmhub_v3_0_1.h"
      45             : #include "mmhub_v3_0_2.h"
      46             : #include "athub_v3_0.h"
      47             : 
      48             : 
      49           0 : static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev,
      50             :                                          struct amdgpu_irq_src *src,
      51             :                                          unsigned type,
      52             :                                          enum amdgpu_interrupt_state state)
      53             : {
      54           0 :         return 0;
      55             : }
      56             : 
      57             : static int
      58           0 : gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
      59             :                                    struct amdgpu_irq_src *src, unsigned type,
      60             :                                    enum amdgpu_interrupt_state state)
      61             : {
      62           0 :         switch (state) {
      63             :         case AMDGPU_IRQ_STATE_DISABLE:
      64             :                 /* MM HUB */
      65           0 :                 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
      66             :                 /* GFX HUB */
      67           0 :                 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
      68           0 :                 break;
      69             :         case AMDGPU_IRQ_STATE_ENABLE:
      70             :                 /* MM HUB */
      71           0 :                 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
      72             :                 /* GFX HUB */
      73           0 :                 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
      74           0 :                 break;
      75             :         default:
      76             :                 break;
      77             :         }
      78             : 
      79           0 :         return 0;
      80             : }
      81             : 
      82           0 : static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
      83             :                                        struct amdgpu_irq_src *source,
      84             :                                        struct amdgpu_iv_entry *entry)
      85             : {
      86           0 :         struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
      87           0 :         uint32_t status = 0;
      88             :         u64 addr;
      89             : 
      90           0 :         addr = (u64)entry->src_data[0] << 12;
      91           0 :         addr |= ((u64)entry->src_data[1] & 0xf) << 44;
      92             : 
      93           0 :         if (!amdgpu_sriov_vf(adev)) {
      94             :                 /*
      95             :                  * Issue a dummy read to wait for the status register to
      96             :                  * be updated to avoid reading an incorrect value due to
      97             :                  * the new fast GRBM interface.
      98             :                  */
      99           0 :                 if (entry->vmid_src == AMDGPU_GFXHUB_0)
     100           0 :                         RREG32(hub->vm_l2_pro_fault_status);
     101             : 
     102           0 :                 status = RREG32(hub->vm_l2_pro_fault_status);
     103           0 :                 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
     104             :         }
     105             : 
     106           0 :         if (printk_ratelimit()) {
     107             :                 struct amdgpu_task_info task_info;
     108             : 
     109           0 :                 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
     110           0 :                 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
     111             : 
     112           0 :                 dev_err(adev->dev,
     113             :                         "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
     114             :                         "for process %s pid %d thread %s pid %d)\n",
     115             :                         entry->vmid_src ? "mmhub" : "gfxhub",
     116             :                         entry->src_id, entry->ring_id, entry->vmid,
     117             :                         entry->pasid, task_info.process_name, task_info.tgid,
     118             :                         task_info.task_name, task_info.pid);
     119           0 :                 dev_err(adev->dev, "  in page starting at address 0x%016llx from client %d\n",
     120             :                         addr, entry->client_id);
     121           0 :                 if (!amdgpu_sriov_vf(adev))
     122           0 :                         hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
     123             :         }
     124             : 
     125           0 :         return 0;
     126             : }
     127             : 
     128             : static const struct amdgpu_irq_src_funcs gmc_v11_0_irq_funcs = {
     129             :         .set = gmc_v11_0_vm_fault_interrupt_state,
     130             :         .process = gmc_v11_0_process_interrupt,
     131             : };
     132             : 
     133             : static const struct amdgpu_irq_src_funcs gmc_v11_0_ecc_funcs = {
     134             :         .set = gmc_v11_0_ecc_interrupt_state,
     135             :         .process = amdgpu_umc_process_ecc_irq,
     136             : };
     137             : 
     138             : static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev)
     139             : {
     140           0 :         adev->gmc.vm_fault.num_types = 1;
     141           0 :         adev->gmc.vm_fault.funcs = &gmc_v11_0_irq_funcs;
     142             : 
     143           0 :         if (!amdgpu_sriov_vf(adev)) {
     144           0 :                 adev->gmc.ecc_irq.num_types = 1;
     145           0 :                 adev->gmc.ecc_irq.funcs = &gmc_v11_0_ecc_funcs;
     146             :         }
     147             : }
     148             : 
     149             : /**
     150             :  * gmc_v11_0_use_invalidate_semaphore - judge whether to use semaphore
     151             :  *
     152             :  * @adev: amdgpu_device pointer
     153             :  * @vmhub: vmhub type
     154             :  *
     155             :  */
     156             : static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev,
     157             :                                        uint32_t vmhub)
     158             : {
     159           0 :         return ((vmhub == AMDGPU_MMHUB_0) &&
     160           0 :                 (!amdgpu_sriov_vf(adev)));
     161             : }
     162             : 
     163             : static bool gmc_v11_0_get_vmid_pasid_mapping_info(
     164             :                                         struct amdgpu_device *adev,
     165             :                                         uint8_t vmid, uint16_t *p_pasid)
     166             : {
     167           0 :         *p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff;
     168             : 
     169             :         return !!(*p_pasid);
     170             : }
     171             : 
     172             : /*
     173             :  * GART
     174             :  * VMID 0 is the physical GPU addresses as used by the kernel.
     175             :  * VMIDs 1-15 are used for userspace clients and are handled
     176             :  * by the amdgpu vm/hsa code.
     177             :  */
     178             : 
     179           0 : static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
     180             :                                    unsigned int vmhub, uint32_t flush_type)
     181             : {
     182           0 :         bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(adev, vmhub);
     183           0 :         struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
     184           0 :         u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
     185             :         u32 tmp;
     186             :         /* Use register 17 for GART */
     187           0 :         const unsigned eng = 17;
     188             :         unsigned int i;
     189             : 
     190           0 :         spin_lock(&adev->gmc.invalidate_lock);
     191             :         /*
     192             :          * It may lose gpuvm invalidate acknowldege state across power-gating
     193             :          * off cycle, add semaphore acquire before invalidation and semaphore
     194             :          * release after invalidation to avoid entering power gated state
     195             :          * to WA the Issue
     196             :          */
     197             : 
     198             :         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
     199           0 :         if (use_semaphore) {
     200           0 :                 for (i = 0; i < adev->usec_timeout; i++) {
     201             :                         /* a read return value of 1 means semaphore acuqire */
     202           0 :                         tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
     203             :                                             hub->eng_distance * eng);
     204           0 :                         if (tmp & 0x1)
     205             :                                 break;
     206           0 :                         udelay(1);
     207             :                 }
     208             : 
     209           0 :                 if (i >= adev->usec_timeout)
     210           0 :                         DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
     211             :         }
     212             : 
     213           0 :         WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
     214             : 
     215             :         /* Wait for ACK with a delay.*/
     216           0 :         for (i = 0; i < adev->usec_timeout; i++) {
     217           0 :                 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
     218             :                                     hub->eng_distance * eng);
     219           0 :                 tmp &= 1 << vmid;
     220           0 :                 if (tmp)
     221             :                         break;
     222             : 
     223           0 :                 udelay(1);
     224             :         }
     225             : 
     226             :         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
     227           0 :         if (use_semaphore)
     228             :                 /*
     229             :                  * add semaphore release after invalidation,
     230             :                  * write with 0 means semaphore release
     231             :                  */
     232           0 :                 WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
     233             :                               hub->eng_distance * eng, 0);
     234             : 
     235             :         /* Issue additional private vm invalidation to MMHUB */
     236           0 :         if ((vmhub != AMDGPU_GFXHUB_0) &&
     237           0 :             (hub->vm_l2_bank_select_reserved_cid2) &&
     238           0 :                 !amdgpu_sriov_vf(adev)) {
     239           0 :                 inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
     240             :                 /* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */
     241           0 :                 inv_req |= (1 << 25);
     242             :                 /* Issue private invalidation */
     243           0 :                 WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req);
     244             :                 /* Read back to ensure invalidation is done*/
     245           0 :                 RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
     246             :         }
     247             : 
     248           0 :         spin_unlock(&adev->gmc.invalidate_lock);
     249             : 
     250           0 :         if (i < adev->usec_timeout)
     251             :                 return;
     252             : 
     253           0 :         DRM_ERROR("Timeout waiting for VM flush ACK!\n");
     254             : }
     255             : 
     256             : /**
     257             :  * gmc_v11_0_flush_gpu_tlb - gart tlb flush callback
     258             :  *
     259             :  * @adev: amdgpu_device pointer
     260             :  * @vmid: vm instance to flush
     261             :  *
     262             :  * Flush the TLB for the requested page table.
     263             :  */
     264           0 : static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
     265             :                                         uint32_t vmhub, uint32_t flush_type)
     266             : {
     267           0 :         if ((vmhub == AMDGPU_GFXHUB_0) && !adev->gfx.is_poweron)
     268             :                 return;
     269             : 
     270             :         /* flush hdp cache */
     271           0 :         adev->hdp.funcs->flush_hdp(adev, NULL);
     272             : 
     273             :         /* For SRIOV run time, driver shouldn't access the register through MMIO
     274             :          * Directly use kiq to do the vm invalidation instead
     275             :          */
     276           0 :         if ((adev->gfx.kiq.ring.sched.ready || adev->mes.ring.sched.ready) &&
     277           0 :             (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
     278           0 :                 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
     279           0 :                 const unsigned eng = 17;
     280           0 :                 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
     281           0 :                 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
     282           0 :                 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
     283             : 
     284           0 :                 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
     285           0 :                                 1 << vmid);
     286           0 :                 return;
     287             :         }
     288             : 
     289           0 :         mutex_lock(&adev->mman.gtt_window_lock);
     290           0 :         gmc_v11_0_flush_vm_hub(adev, vmid, vmhub, 0);
     291           0 :         mutex_unlock(&adev->mman.gtt_window_lock);
     292           0 :         return;
     293             : }
     294             : 
     295             : /**
     296             :  * gmc_v11_0_flush_gpu_tlb_pasid - tlb flush via pasid
     297             :  *
     298             :  * @adev: amdgpu_device pointer
     299             :  * @pasid: pasid to be flush
     300             :  *
     301             :  * Flush the TLB for the requested pasid.
     302             :  */
     303           0 : static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
     304             :                                         uint16_t pasid, uint32_t flush_type,
     305             :                                         bool all_hub)
     306             : {
     307             :         int vmid, i;
     308             :         signed long r;
     309             :         uint32_t seq;
     310             :         uint16_t queried_pasid;
     311             :         bool ret;
     312           0 :         struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
     313           0 :         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
     314             : 
     315           0 :         if (amdgpu_emu_mode == 0 && ring->sched.ready) {
     316           0 :                 spin_lock(&adev->gfx.kiq.ring_lock);
     317             :                 /* 2 dwords flush + 8 dwords fence */
     318           0 :                 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
     319           0 :                 kiq->pmf->kiq_invalidate_tlbs(ring,
     320             :                                         pasid, flush_type, all_hub);
     321           0 :                 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
     322           0 :                 if (r) {
     323           0 :                         amdgpu_ring_undo(ring);
     324           0 :                         spin_unlock(&adev->gfx.kiq.ring_lock);
     325           0 :                         return -ETIME;
     326             :                 }
     327             : 
     328           0 :                 amdgpu_ring_commit(ring);
     329           0 :                 spin_unlock(&adev->gfx.kiq.ring_lock);
     330           0 :                 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
     331           0 :                 if (r < 1) {
     332           0 :                         dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
     333           0 :                         return -ETIME;
     334             :                 }
     335             : 
     336             :                 return 0;
     337             :         }
     338             : 
     339           0 :         for (vmid = 1; vmid < 16; vmid++) {
     340             : 
     341           0 :                 ret = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid,
     342             :                                 &queried_pasid);
     343           0 :                 if (ret && queried_pasid == pasid) {
     344           0 :                         if (all_hub) {
     345           0 :                                 for (i = 0; i < adev->num_vmhubs; i++)
     346           0 :                                         gmc_v11_0_flush_gpu_tlb(adev, vmid,
     347             :                                                         i, flush_type);
     348             :                         } else {
     349           0 :                                 gmc_v11_0_flush_gpu_tlb(adev, vmid,
     350             :                                                 AMDGPU_GFXHUB_0, flush_type);
     351             :                         }
     352             :                 }
     353             :         }
     354             : 
     355             :         return 0;
     356             : }
     357             : 
     358           0 : static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
     359             :                                              unsigned vmid, uint64_t pd_addr)
     360             : {
     361           0 :         bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
     362           0 :         struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
     363           0 :         uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
     364           0 :         unsigned eng = ring->vm_inv_eng;
     365             : 
     366             :         /*
     367             :          * It may lose gpuvm invalidate acknowldege state across power-gating
     368             :          * off cycle, add semaphore acquire before invalidation and semaphore
     369             :          * release after invalidation to avoid entering power gated state
     370             :          * to WA the Issue
     371             :          */
     372             : 
     373             :         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
     374           0 :         if (use_semaphore)
     375             :                 /* a read return value of 1 means semaphore acuqire */
     376           0 :                 amdgpu_ring_emit_reg_wait(ring,
     377             :                                           hub->vm_inv_eng0_sem +
     378             :                                           hub->eng_distance * eng, 0x1, 0x1);
     379             : 
     380           0 :         amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
     381             :                               (hub->ctx_addr_distance * vmid),
     382             :                               lower_32_bits(pd_addr));
     383             : 
     384           0 :         amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
     385             :                               (hub->ctx_addr_distance * vmid),
     386             :                               upper_32_bits(pd_addr));
     387             : 
     388           0 :         amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
     389             :                                             hub->eng_distance * eng,
     390             :                                             hub->vm_inv_eng0_ack +
     391             :                                             hub->eng_distance * eng,
     392             :                                             req, 1 << vmid);
     393             : 
     394             :         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
     395           0 :         if (use_semaphore)
     396             :                 /*
     397             :                  * add semaphore release after invalidation,
     398             :                  * write with 0 means semaphore release
     399             :                  */
     400           0 :                 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
     401             :                                       hub->eng_distance * eng, 0);
     402             : 
     403           0 :         return pd_addr;
     404             : }
     405             : 
     406           0 : static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
     407             :                                          unsigned pasid)
     408             : {
     409           0 :         struct amdgpu_device *adev = ring->adev;
     410             :         uint32_t reg;
     411             : 
     412             :         /* MES fw manages IH_VMID_x_LUT updating */
     413           0 :         if (ring->is_mes_queue)
     414             :                 return;
     415             : 
     416           0 :         if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
     417           0 :                 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
     418             :         else
     419           0 :                 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
     420             : 
     421           0 :         amdgpu_ring_emit_wreg(ring, reg, pasid);
     422             : }
     423             : 
     424             : /*
     425             :  * PTE format:
     426             :  * 63:59 reserved
     427             :  * 58:57 reserved
     428             :  * 56 F
     429             :  * 55 L
     430             :  * 54 reserved
     431             :  * 53:52 SW
     432             :  * 51 T
     433             :  * 50:48 mtype
     434             :  * 47:12 4k physical page base address
     435             :  * 11:7 fragment
     436             :  * 6 write
     437             :  * 5 read
     438             :  * 4 exe
     439             :  * 3 Z
     440             :  * 2 snooped
     441             :  * 1 system
     442             :  * 0 valid
     443             :  *
     444             :  * PDE format:
     445             :  * 63:59 block fragment size
     446             :  * 58:55 reserved
     447             :  * 54 P
     448             :  * 53:48 reserved
     449             :  * 47:6 physical base address of PD or PTE
     450             :  * 5:3 reserved
     451             :  * 2 C
     452             :  * 1 system
     453             :  * 0 valid
     454             :  */
     455             : 
     456           0 : static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
     457             : {
     458           0 :         switch (flags) {
     459             :         case AMDGPU_VM_MTYPE_DEFAULT:
     460             :                 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
     461             :         case AMDGPU_VM_MTYPE_NC:
     462             :                 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
     463             :         case AMDGPU_VM_MTYPE_WC:
     464           0 :                 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
     465             :         case AMDGPU_VM_MTYPE_CC:
     466           0 :                 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
     467             :         case AMDGPU_VM_MTYPE_UC:
     468           0 :                 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
     469             :         default:
     470             :                 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
     471             :         }
     472             : }
     473             : 
     474           0 : static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
     475             :                                  uint64_t *addr, uint64_t *flags)
     476             : {
     477           0 :         if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
     478           0 :                 *addr = adev->vm_manager.vram_base_offset + *addr -
     479           0 :                         adev->gmc.vram_start;
     480           0 :         BUG_ON(*addr & 0xFFFF00000000003FULL);
     481             : 
     482           0 :         if (!adev->gmc.translate_further)
     483             :                 return;
     484             : 
     485           0 :         if (level == AMDGPU_VM_PDB1) {
     486             :                 /* Set the block fragment size */
     487           0 :                 if (!(*flags & AMDGPU_PDE_PTE))
     488           0 :                         *flags |= AMDGPU_PDE_BFS(0x9);
     489             : 
     490           0 :         } else if (level == AMDGPU_VM_PDB0) {
     491           0 :                 if (*flags & AMDGPU_PDE_PTE)
     492           0 :                         *flags &= ~AMDGPU_PDE_PTE;
     493             :                 else
     494           0 :                         *flags |= AMDGPU_PTE_TF;
     495             :         }
     496             : }
     497             : 
     498           0 : static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
     499             :                                  struct amdgpu_bo_va_mapping *mapping,
     500             :                                  uint64_t *flags)
     501             : {
     502           0 :         *flags &= ~AMDGPU_PTE_EXECUTABLE;
     503           0 :         *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
     504             : 
     505           0 :         *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
     506           0 :         *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
     507             : 
     508           0 :         *flags &= ~AMDGPU_PTE_NOALLOC;
     509           0 :         *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
     510             : 
     511           0 :         if (mapping->flags & AMDGPU_PTE_PRT) {
     512           0 :                 *flags |= AMDGPU_PTE_PRT;
     513           0 :                 *flags |= AMDGPU_PTE_SNOOPED;
     514           0 :                 *flags |= AMDGPU_PTE_LOG;
     515           0 :                 *flags |= AMDGPU_PTE_SYSTEM;
     516           0 :                 *flags &= ~AMDGPU_PTE_VALID;
     517             :         }
     518           0 : }
     519             : 
     520           0 : static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
     521             : {
     522           0 :         return 0;
     523             : }
     524             : 
     525             : static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
     526             :         .flush_gpu_tlb = gmc_v11_0_flush_gpu_tlb,
     527             :         .flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid,
     528             :         .emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb,
     529             :         .emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping,
     530             :         .map_mtype = gmc_v11_0_map_mtype,
     531             :         .get_vm_pde = gmc_v11_0_get_vm_pde,
     532             :         .get_vm_pte = gmc_v11_0_get_vm_pte,
     533             :         .get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size,
     534             : };
     535             : 
     536             : static void gmc_v11_0_set_gmc_funcs(struct amdgpu_device *adev)
     537             : {
     538           0 :         adev->gmc.gmc_funcs = &gmc_v11_0_gmc_funcs;
     539             : }
     540             : 
     541           0 : static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
     542             : {
     543           0 :         switch (adev->ip_versions[UMC_HWIP][0]) {
     544             :         case IP_VERSION(8, 10, 0):
     545           0 :                 adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM;
     546           0 :                 adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM;
     547           0 :                 adev->umc.node_inst_num = adev->gmc.num_umc;
     548           0 :                 adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
     549           0 :                 adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET;
     550           0 :                 adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl[0][0][0];
     551           0 :                 adev->umc.ras = &umc_v8_10_ras;
     552           0 :                 break;
     553             :         case IP_VERSION(8, 11, 0):
     554             :                 break;
     555             :         default:
     556             :                 break;
     557             :         }
     558             : 
     559           0 :         if (adev->umc.ras) {
     560           0 :                 amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
     561             : 
     562           0 :                 strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
     563           0 :                 adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
     564           0 :                 adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
     565           0 :                 adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
     566             : 
     567             :                 /* If don't define special ras_late_init function, use default ras_late_init */
     568           0 :                 if (!adev->umc.ras->ras_block.ras_late_init)
     569           0 :                         adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
     570             : 
     571             :                 /* If not define special ras_cb function, use default ras_cb */
     572           0 :                 if (!adev->umc.ras->ras_block.ras_cb)
     573           0 :                         adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
     574             :         }
     575           0 : }
     576             : 
     577             : 
     578             : static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)
     579             : {
     580           0 :         switch (adev->ip_versions[MMHUB_HWIP][0]) {
     581             :         case IP_VERSION(3, 0, 1):
     582           0 :                 adev->mmhub.funcs = &mmhub_v3_0_1_funcs;
     583             :                 break;
     584             :         case IP_VERSION(3, 0, 2):
     585           0 :                 adev->mmhub.funcs = &mmhub_v3_0_2_funcs;
     586             :                 break;
     587             :         default:
     588           0 :                 adev->mmhub.funcs = &mmhub_v3_0_funcs;
     589             :                 break;
     590             :         }
     591             : }
     592             : 
     593             : static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev)
     594             : {
     595           0 :         switch (adev->ip_versions[GC_HWIP][0]) {
     596             :         case IP_VERSION(11, 0, 3):
     597           0 :                 adev->gfxhub.funcs = &gfxhub_v3_0_3_funcs;
     598             :                 break;
     599             :         default:
     600           0 :                 adev->gfxhub.funcs = &gfxhub_v3_0_funcs;
     601             :                 break;
     602             :         }
     603             : }
     604             : 
     605           0 : static int gmc_v11_0_early_init(void *handle)
     606             : {
     607           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     608             : 
     609           0 :         gmc_v11_0_set_gfxhub_funcs(adev);
     610           0 :         gmc_v11_0_set_mmhub_funcs(adev);
     611           0 :         gmc_v11_0_set_gmc_funcs(adev);
     612           0 :         gmc_v11_0_set_irq_funcs(adev);
     613           0 :         gmc_v11_0_set_umc_funcs(adev);
     614             : 
     615           0 :         adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
     616           0 :         adev->gmc.shared_aperture_end =
     617             :                 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
     618           0 :         adev->gmc.private_aperture_start = 0x1000000000000000ULL;
     619           0 :         adev->gmc.private_aperture_end =
     620             :                 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
     621             : 
     622           0 :         return 0;
     623             : }
     624             : 
     625           0 : static int gmc_v11_0_late_init(void *handle)
     626             : {
     627           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     628             :         int r;
     629             : 
     630           0 :         r = amdgpu_gmc_allocate_vm_inv_eng(adev);
     631           0 :         if (r)
     632             :                 return r;
     633             : 
     634           0 :         r = amdgpu_gmc_ras_late_init(adev);
     635           0 :         if (r)
     636             :                 return r;
     637             : 
     638           0 :         return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
     639             : }
     640             : 
     641           0 : static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
     642             :                                         struct amdgpu_gmc *mc)
     643             : {
     644           0 :         u64 base = 0;
     645             : 
     646           0 :         base = adev->mmhub.funcs->get_fb_location(adev);
     647             : 
     648           0 :         amdgpu_gmc_vram_location(adev, &adev->gmc, base);
     649           0 :         amdgpu_gmc_gart_location(adev, mc);
     650             : 
     651             :         /* base offset of vram pages */
     652           0 :         if (amdgpu_sriov_vf(adev))
     653           0 :                 adev->vm_manager.vram_base_offset = 0;
     654             :         else
     655           0 :                 adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
     656           0 : }
     657             : 
     658             : /**
     659             :  * gmc_v11_0_mc_init - initialize the memory controller driver params
     660             :  *
     661             :  * @adev: amdgpu_device pointer
     662             :  *
     663             :  * Look up the amount of vram, vram width, and decide how to place
     664             :  * vram and gart within the GPU's physical address space.
     665             :  * Returns 0 for success.
     666             :  */
     667           0 : static int gmc_v11_0_mc_init(struct amdgpu_device *adev)
     668             : {
     669             :         int r;
     670             : 
     671             :         /* size in MB on si */
     672           0 :         adev->gmc.mc_vram_size =
     673           0 :                 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
     674           0 :         adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
     675             : 
     676           0 :         if (!(adev->flags & AMD_IS_APU)) {
     677           0 :                 r = amdgpu_device_resize_fb_bar(adev);
     678           0 :                 if (r)
     679             :                         return r;
     680             :         }
     681           0 :         adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
     682           0 :         adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
     683             : 
     684             : #ifdef CONFIG_X86_64
     685           0 :         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
     686           0 :                 adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev);
     687           0 :                 adev->gmc.aper_size = adev->gmc.real_vram_size;
     688             :         }
     689             : #endif
     690             :         /* In case the PCI BAR is larger than the actual amount of vram */
     691           0 :         adev->gmc.visible_vram_size = adev->gmc.aper_size;
     692           0 :         if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
     693           0 :                 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
     694             : 
     695             :         /* set the gart size */
     696           0 :         if (amdgpu_gart_size == -1) {
     697           0 :                 adev->gmc.gart_size = 512ULL << 20;
     698             :         } else
     699           0 :                 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
     700             : 
     701           0 :         gmc_v11_0_vram_gtt_location(adev, &adev->gmc);
     702             : 
     703           0 :         return 0;
     704             : }
     705             : 
     706           0 : static int gmc_v11_0_gart_init(struct amdgpu_device *adev)
     707             : {
     708             :         int r;
     709             : 
     710           0 :         if (adev->gart.bo) {
     711           0 :                 WARN(1, "PCIE GART already initialized\n");
     712           0 :                 return 0;
     713             :         }
     714             : 
     715             :         /* Initialize common gart structure */
     716           0 :         r = amdgpu_gart_init(adev);
     717           0 :         if (r)
     718             :                 return r;
     719             : 
     720           0 :         adev->gart.table_size = adev->gart.num_gpu_pages * 8;
     721           0 :         adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
     722             :                                  AMDGPU_PTE_EXECUTABLE;
     723             : 
     724           0 :         return amdgpu_gart_table_vram_alloc(adev);
     725             : }
     726             : 
     727           0 : static int gmc_v11_0_sw_init(void *handle)
     728             : {
     729           0 :         int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
     730           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     731             : 
     732           0 :         adev->mmhub.funcs->init(adev);
     733             : 
     734           0 :         spin_lock_init(&adev->gmc.invalidate_lock);
     735             : 
     736           0 :         r = amdgpu_atomfirmware_get_vram_info(adev,
     737             :                                               &vram_width, &vram_type, &vram_vendor);
     738           0 :         adev->gmc.vram_width = vram_width;
     739             : 
     740           0 :         adev->gmc.vram_type = vram_type;
     741           0 :         adev->gmc.vram_vendor = vram_vendor;
     742             : 
     743           0 :         switch (adev->ip_versions[GC_HWIP][0]) {
     744             :         case IP_VERSION(11, 0, 0):
     745             :         case IP_VERSION(11, 0, 1):
     746             :         case IP_VERSION(11, 0, 2):
     747             :         case IP_VERSION(11, 0, 3):
     748           0 :                 adev->num_vmhubs = 2;
     749             :                 /*
     750             :                  * To fulfill 4-level page support,
     751             :                  * vm size is 256TB (48bit), maximum size,
     752             :                  * block size 512 (9bit)
     753             :                  */
     754           0 :                 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
     755           0 :                 break;
     756             :         default:
     757             :                 break;
     758             :         }
     759             : 
     760             :         /* This interrupt is VMC page fault.*/
     761           0 :         r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC,
     762             :                               VMC_1_0__SRCID__VM_FAULT,
     763             :                               &adev->gmc.vm_fault);
     764             : 
     765           0 :         if (r)
     766             :                 return r;
     767             : 
     768           0 :         r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
     769             :                               UTCL2_1_0__SRCID__FAULT,
     770             :                               &adev->gmc.vm_fault);
     771           0 :         if (r)
     772             :                 return r;
     773             : 
     774           0 :         if (!amdgpu_sriov_vf(adev)) {
     775             :                 /* interrupt sent to DF. */
     776           0 :                 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0,
     777             :                                       &adev->gmc.ecc_irq);
     778           0 :                 if (r)
     779             :                         return r;
     780             :         }
     781             : 
     782             :         /*
     783             :          * Set the internal MC address mask This is the max address of the GPU's
     784             :          * internal address space.
     785             :          */
     786           0 :         adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
     787             : 
     788           0 :         r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
     789           0 :         if (r) {
     790           0 :                 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
     791           0 :                 return r;
     792             :         }
     793             : 
     794           0 :         adev->need_swiotlb = drm_need_swiotlb(44);
     795             : 
     796           0 :         r = gmc_v11_0_mc_init(adev);
     797           0 :         if (r)
     798             :                 return r;
     799             : 
     800           0 :         amdgpu_gmc_get_vbios_allocations(adev);
     801             : 
     802             :         /* Memory manager */
     803           0 :         r = amdgpu_bo_init(adev);
     804           0 :         if (r)
     805             :                 return r;
     806             : 
     807           0 :         r = gmc_v11_0_gart_init(adev);
     808           0 :         if (r)
     809             :                 return r;
     810             : 
     811             :         /*
     812             :          * number of VMs
     813             :          * VMID 0 is reserved for System
     814             :          * amdgpu graphics/compute will use VMIDs 1-7
     815             :          * amdkfd will use VMIDs 8-15
     816             :          */
     817           0 :         adev->vm_manager.first_kfd_vmid = 8;
     818             : 
     819           0 :         amdgpu_vm_manager_init(adev);
     820             : 
     821           0 :         return 0;
     822             : }
     823             : 
     824             : /**
     825             :  * gmc_v11_0_gart_fini - vm fini callback
     826             :  *
     827             :  * @adev: amdgpu_device pointer
     828             :  *
     829             :  * Tears down the driver GART/VM setup (CIK).
     830             :  */
     831             : static void gmc_v11_0_gart_fini(struct amdgpu_device *adev)
     832             : {
     833           0 :         amdgpu_gart_table_vram_free(adev);
     834             : }
     835             : 
     836           0 : static int gmc_v11_0_sw_fini(void *handle)
     837             : {
     838           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     839             : 
     840           0 :         amdgpu_vm_manager_fini(adev);
     841           0 :         gmc_v11_0_gart_fini(adev);
     842           0 :         amdgpu_gem_force_release(adev);
     843           0 :         amdgpu_bo_fini(adev);
     844             : 
     845           0 :         return 0;
     846             : }
     847             : 
     848             : static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev)
     849             : {
     850             : }
     851             : 
     852             : /**
     853             :  * gmc_v11_0_gart_enable - gart enable
     854             :  *
     855             :  * @adev: amdgpu_device pointer
     856             :  */
     857           0 : static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
     858             : {
     859             :         int r;
     860             :         bool value;
     861             : 
     862           0 :         if (adev->gart.bo == NULL) {
     863           0 :                 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
     864           0 :                 return -EINVAL;
     865             :         }
     866             : 
     867           0 :         amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
     868             : 
     869           0 :         r = adev->mmhub.funcs->gart_enable(adev);
     870           0 :         if (r)
     871             :                 return r;
     872             : 
     873             :         /* Flush HDP after it is initialized */
     874           0 :         adev->hdp.funcs->flush_hdp(adev, NULL);
     875             : 
     876           0 :         value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
     877           0 :                 false : true;
     878             : 
     879           0 :         adev->mmhub.funcs->set_fault_enable_default(adev, value);
     880           0 :         gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
     881             : 
     882           0 :         DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
     883             :                  (unsigned)(adev->gmc.gart_size >> 20),
     884             :                  (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
     885             : 
     886           0 :         return 0;
     887             : }
     888             : 
     889           0 : static int gmc_v11_0_hw_init(void *handle)
     890             : {
     891             :         int r;
     892           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     893             : 
     894             :         /* The sequence of these two function calls matters.*/
     895           0 :         gmc_v11_0_init_golden_registers(adev);
     896             : 
     897           0 :         r = gmc_v11_0_gart_enable(adev);
     898           0 :         if (r)
     899             :                 return r;
     900             : 
     901           0 :         if (adev->umc.funcs && adev->umc.funcs->init_registers)
     902           0 :                 adev->umc.funcs->init_registers(adev);
     903             : 
     904             :         return 0;
     905             : }
     906             : 
     907             : /**
     908             :  * gmc_v11_0_gart_disable - gart disable
     909             :  *
     910             :  * @adev: amdgpu_device pointer
     911             :  *
     912             :  * This disables all VM page table.
     913             :  */
     914             : static void gmc_v11_0_gart_disable(struct amdgpu_device *adev)
     915             : {
     916           0 :         adev->mmhub.funcs->gart_disable(adev);
     917             : }
     918             : 
     919           0 : static int gmc_v11_0_hw_fini(void *handle)
     920             : {
     921           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     922             : 
     923           0 :         if (amdgpu_sriov_vf(adev)) {
     924             :                 /* full access mode, so don't touch any GMC register */
     925           0 :                 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
     926           0 :                 return 0;
     927             :         }
     928             : 
     929           0 :         amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
     930           0 :         amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
     931           0 :         gmc_v11_0_gart_disable(adev);
     932             : 
     933           0 :         return 0;
     934             : }
     935             : 
     936           0 : static int gmc_v11_0_suspend(void *handle)
     937             : {
     938           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     939             : 
     940           0 :         gmc_v11_0_hw_fini(adev);
     941             : 
     942           0 :         return 0;
     943             : }
     944             : 
     945           0 : static int gmc_v11_0_resume(void *handle)
     946             : {
     947             :         int r;
     948           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     949             : 
     950           0 :         r = gmc_v11_0_hw_init(adev);
     951           0 :         if (r)
     952             :                 return r;
     953             : 
     954           0 :         amdgpu_vmid_reset_all(adev);
     955             : 
     956           0 :         return 0;
     957             : }
     958             : 
     959           0 : static bool gmc_v11_0_is_idle(void *handle)
     960             : {
     961             :         /* MC is always ready in GMC v11.*/
     962           0 :         return true;
     963             : }
     964             : 
     965           0 : static int gmc_v11_0_wait_for_idle(void *handle)
     966             : {
     967             :         /* There is no need to wait for MC idle in GMC v11.*/
     968           0 :         return 0;
     969             : }
     970             : 
     971           0 : static int gmc_v11_0_soft_reset(void *handle)
     972             : {
     973           0 :         return 0;
     974             : }
     975             : 
     976           0 : static int gmc_v11_0_set_clockgating_state(void *handle,
     977             :                                            enum amd_clockgating_state state)
     978             : {
     979             :         int r;
     980           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     981             : 
     982           0 :         r = adev->mmhub.funcs->set_clockgating(adev, state);
     983           0 :         if (r)
     984             :                 return r;
     985             : 
     986           0 :         return athub_v3_0_set_clockgating(adev, state);
     987             : }
     988             : 
     989           0 : static void gmc_v11_0_get_clockgating_state(void *handle, u64 *flags)
     990             : {
     991           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     992             : 
     993           0 :         adev->mmhub.funcs->get_clockgating(adev, flags);
     994             : 
     995           0 :         athub_v3_0_get_clockgating(adev, flags);
     996           0 : }
     997             : 
     998           0 : static int gmc_v11_0_set_powergating_state(void *handle,
     999             :                                            enum amd_powergating_state state)
    1000             : {
    1001           0 :         return 0;
    1002             : }
    1003             : 
    1004             : const struct amd_ip_funcs gmc_v11_0_ip_funcs = {
    1005             :         .name = "gmc_v11_0",
    1006             :         .early_init = gmc_v11_0_early_init,
    1007             :         .sw_init = gmc_v11_0_sw_init,
    1008             :         .hw_init = gmc_v11_0_hw_init,
    1009             :         .late_init = gmc_v11_0_late_init,
    1010             :         .sw_fini = gmc_v11_0_sw_fini,
    1011             :         .hw_fini = gmc_v11_0_hw_fini,
    1012             :         .suspend = gmc_v11_0_suspend,
    1013             :         .resume = gmc_v11_0_resume,
    1014             :         .is_idle = gmc_v11_0_is_idle,
    1015             :         .wait_for_idle = gmc_v11_0_wait_for_idle,
    1016             :         .soft_reset = gmc_v11_0_soft_reset,
    1017             :         .set_clockgating_state = gmc_v11_0_set_clockgating_state,
    1018             :         .set_powergating_state = gmc_v11_0_set_powergating_state,
    1019             :         .get_clockgating_state = gmc_v11_0_get_clockgating_state,
    1020             : };
    1021             : 
    1022             : const struct amdgpu_ip_block_version gmc_v11_0_ip_block = {
    1023             :         .type = AMD_IP_BLOCK_TYPE_GMC,
    1024             :         .major = 11,
    1025             :         .minor = 0,
    1026             :         .rev = 0,
    1027             :         .funcs = &gmc_v11_0_ip_funcs,
    1028             : };

Generated by: LCOV version 1.14